content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(reshape2)
library(ncdf4)
library(MuMIn)
library(zoo)
library(nlme)
library(dplyr)
library(chron)
library(fields)
library(maps)
library(mapdata)
library(tidyr)
# load and process the PDO
names <- read.table("~pdo", skip=30, nrows=1, as.is = T)
pdo <- read.table("~pdo", skip=31, nrows=119, fill=T, col.names = names)
pdo$YEAR <- 1900:(1899+nrow(pdo)) # drop asterisks!
pdo <- pdo %>%
gather(month, value, -YEAR) %>%
arrange(YEAR)
# and get FMA mean values
pdo <- filter(pdo, month %in% c("FEB", "MAR", "APR"))
pdo.FMA <- tapply(pdo$value, pdo$YEAR, mean)
# load the NCEP NCAR wind stress data
# start with U
nc <- nc_open("U_Stress_NCEP.nc")
# view dates (middle of month):
d <- ncvar_get(nc, "TIME")
d <- dates(d, origin = c(1,1,0001))
# get all the data - they have already been subsetted by date and area in my version
tauX <- ncvar_get(nc, "UFLX")
x <- ncvar_get(nc, "LON94_126") # view longitudes (degrees East)
y <- ncvar_get(nc, "LAT69_82") # view latitudes
# process!
tauX <- aperm(tauX, 3:1) # First, reverse order of dimensions ("transpose" array)
tauX <- matrix(tauX, nrow=dim(tauX)[1], ncol=prod(dim(tauX)[2:3])) # Change to matrix
# Keep track of corresponding latitudes and longitudes of each column:
lat <- rep(y, length(x)) # Vector of latitudes
lon <- rep(x, each = length(y)) # Vector of longitudes
dimnames(tauX) <- list(as.character(d), paste("N", lat, "E", lon, sep=""))
m1 <- months(d)
y1 <- years(d)
dec.yr1 <- as.numeric(as.character(y1)) + (as.numeric(m1)-0.5)/12
# and define the seasons for analysis
win <- c("Nov", "Dec", "Jan") # using NDJ as wind period to relate to FMA PDO
# define winter years
win.y1 <- as.numeric(as.character(y1))
win.y1[m1 %in% c("Nov", "Dec")] <- win.y1[m1 %in% c("Nov", "Dec")] + 1
# restrict to our selected winter months
tauX <- tauX[m1 %in% win,]
# restrict the indexing vector of winter years
win.y1 <- win.y1[m1 %in% win]
# and get annual means of these winter values
ff <- function(x) tapply(x, win.y1, mean)
tauX <- apply(tauX, 2, ff)
# now regress on the PDO for 1950:1988 and 1989:2010
# get rid of NAs for regression
land <- is.na(colMeans(tauX)) # Logical vector that's true over land!
# For analysis, we only use the columns of the matrix with non-missing values:
tauX <- tauX[,!land]
regr.early.X <- regr.late.X <- regr.heat.X <- NA # vectors for regression coefficients in both eras
X.pvals <- NA # object to catch p values
for(j in 1:ncol(tauX)){
# j <- 1
# subset for cell of interest
temp <- data.frame(tauX=tauX[2:71, j], pdo=pdo.FMA[50:119], era=c(rep("early", 40), rep("late", 25), rep("heat",5)))
mod <- gls(tauX ~ pdo*era, data=temp, corAR1()) # again, autocorrelated residuals allowed
regr.early.X[j] <- summary(mod)$tTable[2,1]
regr.late.X[j] <- regr.early.X[j] + summary(mod)$tTable[6,1]
regr.heat.X[j] <- regr.early.X[j] + summary(mod)$tTable[5,1]
# X.pvals[j] <- summary(mod)$tTable[4,4]
}
# And now the northward wind stress.
# load the data
nc <- nc_open("V_Stress_NCEP.nc")
# view dates (middle of month):
dv <- ncvar_get(nc, "TIME")
dv <- dates(d, origin = c(1,1,0001))
# check
identical(d, dv)
# get all the data - they have already been subsetted by date and area in my version
tauY <- ncvar_get(nc, "VFLX")
xv <- ncvar_get(nc, "LON94_126") # view longitudes (degrees East)
yv <- ncvar_get(nc, "LAT69_82") # view latitudes
# check again
identical(xv, x)
identical(yv,y)
# process!
tauY <- aperm(tauY, 3:1) # First, reverse order of dimensions ("transpose" array)
tauY <- matrix(tauY, nrow=dim(tauY)[1], ncol=prod(dim(tauY)[2:3])) # Change to matrix
dimnames(tauY) <- list(as.character(d), paste("N", lat, "E", lon, sep=""))
# restrict to our selected winter months
tauY <- tauY[m1 %in% win,]
# and get annual means of these winter values
tauY <- apply(tauY, 2, ff)
# now regress on the PDO for 1949:1988 and 1989:2010
# For analysis, we only use the columns of the matrix with non-missing values:
tauY <- tauY[,!land]
regr.early.Y <- regr.late.Y <- regr.heat.Y <- NA # vectors for regression coefficients in both eras
Y.pvals <- NA # object to catch p values
for(j in 1:ncol(tauY)){
# again subset by cell
temp <- data.frame(tauY=tauY[2:71, j], pdo=pdo.FMA[50:119], era=c(rep("early", 40), rep("late", 25), rep("heat",5)))
mod <- gls(tauY ~ pdo*era, data=temp, corAR1())
regr.early.Y[j] <- summary(mod)$tTable[2,1]
regr.late.Y[j] <- regr.early.Y[j] + summary(mod)$tTable[6,1]
regr.heat.Y[j] <- regr.early.Y[j] + summary(mod)$tTable[5,1]
# Y.pvals[j] <- summary(mod)$tTable[4,4]
}
# Now plot the combined regression coefficients
# combine the regression coefficients for the two directions
regr.early.XY <- sqrt(regr.early.X^2 + regr.early.Y^2)
regr.late.XY <- sqrt(regr.late.X^2 + regr.late.Y^2)
regr.heat.XY <- sqrt(regr.heat.X^2 + regr.heat.Y^2)
# set up the color scheme
new.col <- tim.colors(64)
grays <- c("gray98", "gray97", "gray96", "gray95", "gray94", "gray93", "gray92", "gray91", "gray90", "gray89", "gray88")
new.col[27:36] <- c(grays[5:1], grays[1:5])
# setup the layout
l.mar <- 3
l.cex <- 0.8
l.l <- 0.2
tc.l <- -0.2
# two panel layout
png("ncep ncar vs pdo.png", 8, 4, units="in", res=300)
par(mar=c(1.5,2.5,1,0.5), tcl=tc.l, mfrow=c(1,2), oma=c(0,0,0,0.2))
zlim <- range(regr.early.XY, regr.late.XY)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.early.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 1950-1988", cex=0.8)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.late.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 1989-2013", cex=0.8)
dev.off()
# three panel layout
quartz()
par(mar=c(1.5,2.5,1,0.5), tcl=tc.l, mfrow=c(1,3), oma=c(0,0,0,0.2))
zlim <- range(regr.early.XY, regr.late.XY, regr.heat.XY)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.early.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 1950-1988", cex=0.8)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.late.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 1989-2013", cex=0.8)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.heat.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 2014-2018", cex=0.8)
|
/code_archive/NCEP NCAR wind stress regression on PDO.R
|
no_license
|
OcnAtmCouplingAndClimateForcing/non-som
|
R
| false
| false
| 11,178
|
r
|
library(reshape2)
library(ncdf4)
library(MuMIn)
library(zoo)
library(nlme)
library(dplyr)
library(chron)
library(fields)
library(maps)
library(mapdata)
library(tidyr)
# load and process the PDO
names <- read.table("~pdo", skip=30, nrows=1, as.is = T)
pdo <- read.table("~pdo", skip=31, nrows=119, fill=T, col.names = names)
pdo$YEAR <- 1900:(1899+nrow(pdo)) # drop asterisks!
pdo <- pdo %>%
gather(month, value, -YEAR) %>%
arrange(YEAR)
# and get FMA mean values
pdo <- filter(pdo, month %in% c("FEB", "MAR", "APR"))
pdo.FMA <- tapply(pdo$value, pdo$YEAR, mean)
# load the NCEP NCAR wind stress data
# start with U
nc <- nc_open("U_Stress_NCEP.nc")
# view dates (middle of month):
d <- ncvar_get(nc, "TIME")
d <- dates(d, origin = c(1,1,0001))
# get all the data - they have already been subsetted by date and area in my version
tauX <- ncvar_get(nc, "UFLX")
x <- ncvar_get(nc, "LON94_126") # view longitudes (degrees East)
y <- ncvar_get(nc, "LAT69_82") # view latitudes
# process!
tauX <- aperm(tauX, 3:1) # First, reverse order of dimensions ("transpose" array)
tauX <- matrix(tauX, nrow=dim(tauX)[1], ncol=prod(dim(tauX)[2:3])) # Change to matrix
# Keep track of corresponding latitudes and longitudes of each column:
lat <- rep(y, length(x)) # Vector of latitudes
lon <- rep(x, each = length(y)) # Vector of longitudes
dimnames(tauX) <- list(as.character(d), paste("N", lat, "E", lon, sep=""))
m1 <- months(d)
y1 <- years(d)
dec.yr1 <- as.numeric(as.character(y1)) + (as.numeric(m1)-0.5)/12
# and define the seasons for analysis
win <- c("Nov", "Dec", "Jan") # using NDJ as wind period to relate to FMA PDO
# define winter years
win.y1 <- as.numeric(as.character(y1))
win.y1[m1 %in% c("Nov", "Dec")] <- win.y1[m1 %in% c("Nov", "Dec")] + 1
# restrict to our selected winter months
tauX <- tauX[m1 %in% win,]
# restrict the indexing vector of winter years
win.y1 <- win.y1[m1 %in% win]
# and get annual means of these winter values
ff <- function(x) tapply(x, win.y1, mean)
tauX <- apply(tauX, 2, ff)
# now regress on the PDO for 1950:1988 and 1989:2010
# get rid of NAs for regression
land <- is.na(colMeans(tauX)) # Logical vector that's true over land!
# For analysis, we only use the columns of the matrix with non-missing values:
tauX <- tauX[,!land]
regr.early.X <- regr.late.X <- regr.heat.X <- NA # vectors for regression coefficients in both eras
X.pvals <- NA # object to catch p values
for(j in 1:ncol(tauX)){
# j <- 1
# subset for cell of interest
temp <- data.frame(tauX=tauX[2:71, j], pdo=pdo.FMA[50:119], era=c(rep("early", 40), rep("late", 25), rep("heat",5)))
mod <- gls(tauX ~ pdo*era, data=temp, corAR1()) # again, autocorrelated residuals allowed
regr.early.X[j] <- summary(mod)$tTable[2,1]
regr.late.X[j] <- regr.early.X[j] + summary(mod)$tTable[6,1]
regr.heat.X[j] <- regr.early.X[j] + summary(mod)$tTable[5,1]
# X.pvals[j] <- summary(mod)$tTable[4,4]
}
# And now the northward wind stress.
# load the data
nc <- nc_open("V_Stress_NCEP.nc")
# view dates (middle of month):
dv <- ncvar_get(nc, "TIME")
dv <- dates(d, origin = c(1,1,0001))
# check
identical(d, dv)
# get all the data - they have already been subsetted by date and area in my version
tauY <- ncvar_get(nc, "VFLX")
xv <- ncvar_get(nc, "LON94_126") # view longitudes (degrees East)
yv <- ncvar_get(nc, "LAT69_82") # view latitudes
# check again
identical(xv, x)
identical(yv,y)
# process!
tauY <- aperm(tauY, 3:1) # First, reverse order of dimensions ("transpose" array)
tauY <- matrix(tauY, nrow=dim(tauY)[1], ncol=prod(dim(tauY)[2:3])) # Change to matrix
dimnames(tauY) <- list(as.character(d), paste("N", lat, "E", lon, sep=""))
# restrict to our selected winter months
tauY <- tauY[m1 %in% win,]
# and get annual means of these winter values
tauY <- apply(tauY, 2, ff)
# now regress on the PDO for 1949:1988 and 1989:2010
# For analysis, we only use the columns of the matrix with non-missing values:
tauY <- tauY[,!land]
regr.early.Y <- regr.late.Y <- regr.heat.Y <- NA # vectors for regression coefficients in both eras
Y.pvals <- NA # object to catch p values
for(j in 1:ncol(tauY)){
# again subset by cell
temp <- data.frame(tauY=tauY[2:71, j], pdo=pdo.FMA[50:119], era=c(rep("early", 40), rep("late", 25), rep("heat",5)))
mod <- gls(tauY ~ pdo*era, data=temp, corAR1())
regr.early.Y[j] <- summary(mod)$tTable[2,1]
regr.late.Y[j] <- regr.early.Y[j] + summary(mod)$tTable[6,1]
regr.heat.Y[j] <- regr.early.Y[j] + summary(mod)$tTable[5,1]
# Y.pvals[j] <- summary(mod)$tTable[4,4]
}
# Now plot the combined regression coefficients
# combine the regression coefficients for the two directions
regr.early.XY <- sqrt(regr.early.X^2 + regr.early.Y^2)
regr.late.XY <- sqrt(regr.late.X^2 + regr.late.Y^2)
regr.heat.XY <- sqrt(regr.heat.X^2 + regr.heat.Y^2)
# set up the color scheme
new.col <- tim.colors(64)
grays <- c("gray98", "gray97", "gray96", "gray95", "gray94", "gray93", "gray92", "gray91", "gray90", "gray89", "gray88")
new.col[27:36] <- c(grays[5:1], grays[1:5])
# setup the layout
l.mar <- 3
l.cex <- 0.8
l.l <- 0.2
tc.l <- -0.2
# two panel layout
png("ncep ncar vs pdo.png", 8, 4, units="in", res=300)
par(mar=c(1.5,2.5,1,0.5), tcl=tc.l, mfrow=c(1,2), oma=c(0,0,0,0.2))
zlim <- range(regr.early.XY, regr.late.XY)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.early.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 1950-1988", cex=0.8)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.late.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 1989-2013", cex=0.8)
dev.off()
# three panel layout
quartz()
par(mar=c(1.5,2.5,1,0.5), tcl=tc.l, mfrow=c(1,3), oma=c(0,0,0,0.2))
zlim <- range(regr.early.XY, regr.late.XY, regr.heat.XY)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.early.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 1950-1988", cex=0.8)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.late.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 1989-2013", cex=0.8)
z <- rep(NA, ncol(tauY))
z[!land] <- regr.heat.XY
z <- t(matrix(z,length(y))) # Re-shape to a matrix with latitudes in columns, longitudes in rows
image.plot(x,y,z, col=new.col, zlim=c(-zlim[2],zlim[2]), ylab="", xlab="", yaxt="n", xaxt="n",legend.mar=l.mar, legend.line=l.l, axis.args=list(cex.axis=l.cex, tcl=tc.l, mgp=c(3,0.3,0)))
contour(x, y, z, add=T, drawlabels = F, lwd=0.7, col="grey")
map('world2Hires', 'Canada', fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'usa',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'USSR',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Japan',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'Mexico',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires', 'China',fill=T,xlim=c(130,250), ylim=c(20,70),add=T, lwd=0.5, col="darkgoldenrod3")
map('world2Hires',fill=F, xlim=c(130,250), ylim=c(20,66),add=T, lwd=1)
mtext("Wind stress-PDO 2014-2018", cex=0.8)
|
#' Get the weight-at-age data frame for the acoustic survey
#'
#' @param d
#'
#' @return a data frame
#' @export
get_surv_wa <- function(d){
keep_yrs <- c(1995, 1998, 2001, 2003, 2009, 2011, 2012)
all_yrs <- unique(d$year)
remain_yrs <- all_yrs[all_yrs > 2012]
keep_yrs <- sort(c(keep_yrs, remain_yrs[as.logical(remain_yrs %% 2)]))
d %>%
transmute(Source = "CAN_acoustic",
Weight_kg = weight / 1000,
Sex = ifelse(is.na(sex), NA_character_, ifelse(sex == 1, "M", "F")),
Age_yrs = age,
Length_cm = length,
Month = month,
Year = year) %>%
filter(!is.na(Weight_kg),
!is.na(Sex),
!is.na(Age_yrs),
!is.na(Length_cm),
!is.na(Weight_kg),
!is.na(Month),
!is.na(Year)) %>%
filter(Year %in% keep_yrs)
}
#' Get the weight-at-age data frame for commercial data.
#'
#' @param d
#'
#' @return a data frame with Source set to `CAN_shoreside` and `CAN_freezer`
#' for the two fishery types
#' @importFrom lubridate year month
#' @export
get_comm_wa <- function(d){
major_areas <- c("02", "03", "04", "05", "06", "07", "08", "09")
# Include BC offshore and St. of Juan de Fuca (Major 1, Minor 20)
map(c("CAN_shoreside", "CAN_freezer", "CAN_jv", "CAN_polish"), ~{
k <- d %>%
filter(major_stat_area_code %in% major_areas |
(major_stat_area_code == "01" & minor_stat_area_code == "20")) %>%
filter(gear_desc == "MIDWATER TRAWL")
if(.x == "CAN_shoreside"){
k <- k %>%
filter(trip_sub_type_desc %in% c("OBSERVED DOMESTIC", "NON - OBSERVED DOMESTIC")) %>%
filter(!vessel_id %in% freezer_trawlers$GFBIO.ID)
}else if(.x == "CAN_freezer"){
k <- k %>%
filter(trip_sub_type_desc %in% c("OBSERVED DOMESTIC", "NON - OBSERVED DOMESTIC")) %>%
filter(vessel_id %in% freezer_trawlers$GFBIO.ID)
}else if(.x == "CAN_jv"){
k <- k %>%
filter(trip_sub_type_desc == "OBSERVED J-V")
}else{
k <- k %>%
filter(trip_sub_type_desc %in% c("POLISH COMM NATIONAL", "POLISH COMMERCIAL SUPPLEMENTAL"))
}
k %>% transmute(Source = .x,
Weight_kg = weight / 1000,
Sex = ifelse(is.na(sex), NA_character_, ifelse(sex == 1, "M", "F")),
Age_yrs = age,
Length_cm = length,
Month = month(trip_start_date),
Year = year(trip_start_date)) %>%
filter(!is.na(Weight_kg),
!is.na(Sex),
!is.na(Age_yrs),
!is.na(Length_cm),
!is.na(Weight_kg),
!is.na(Month),
!is.na(Year))
}) %>% bind_rows
}
|
/R/get-wa.R
|
no_license
|
pbs-assess/hakedata
|
R
| false
| false
| 2,734
|
r
|
#' Get the weight-at-age data frame for the acoustic survey
#'
#' @param d
#'
#' @return a data frame
#' @export
get_surv_wa <- function(d){
keep_yrs <- c(1995, 1998, 2001, 2003, 2009, 2011, 2012)
all_yrs <- unique(d$year)
remain_yrs <- all_yrs[all_yrs > 2012]
keep_yrs <- sort(c(keep_yrs, remain_yrs[as.logical(remain_yrs %% 2)]))
d %>%
transmute(Source = "CAN_acoustic",
Weight_kg = weight / 1000,
Sex = ifelse(is.na(sex), NA_character_, ifelse(sex == 1, "M", "F")),
Age_yrs = age,
Length_cm = length,
Month = month,
Year = year) %>%
filter(!is.na(Weight_kg),
!is.na(Sex),
!is.na(Age_yrs),
!is.na(Length_cm),
!is.na(Weight_kg),
!is.na(Month),
!is.na(Year)) %>%
filter(Year %in% keep_yrs)
}
#' Get the weight-at-age data frame for commercial data.
#'
#' @param d
#'
#' @return a data frame with Source set to `CAN_shoreside` and `CAN_freezer`
#' for the two fishery types
#' @importFrom lubridate year month
#' @export
get_comm_wa <- function(d){
major_areas <- c("02", "03", "04", "05", "06", "07", "08", "09")
# Include BC offshore and St. of Juan de Fuca (Major 1, Minor 20)
map(c("CAN_shoreside", "CAN_freezer", "CAN_jv", "CAN_polish"), ~{
k <- d %>%
filter(major_stat_area_code %in% major_areas |
(major_stat_area_code == "01" & minor_stat_area_code == "20")) %>%
filter(gear_desc == "MIDWATER TRAWL")
if(.x == "CAN_shoreside"){
k <- k %>%
filter(trip_sub_type_desc %in% c("OBSERVED DOMESTIC", "NON - OBSERVED DOMESTIC")) %>%
filter(!vessel_id %in% freezer_trawlers$GFBIO.ID)
}else if(.x == "CAN_freezer"){
k <- k %>%
filter(trip_sub_type_desc %in% c("OBSERVED DOMESTIC", "NON - OBSERVED DOMESTIC")) %>%
filter(vessel_id %in% freezer_trawlers$GFBIO.ID)
}else if(.x == "CAN_jv"){
k <- k %>%
filter(trip_sub_type_desc == "OBSERVED J-V")
}else{
k <- k %>%
filter(trip_sub_type_desc %in% c("POLISH COMM NATIONAL", "POLISH COMMERCIAL SUPPLEMENTAL"))
}
k %>% transmute(Source = .x,
Weight_kg = weight / 1000,
Sex = ifelse(is.na(sex), NA_character_, ifelse(sex == 1, "M", "F")),
Age_yrs = age,
Length_cm = length,
Month = month(trip_start_date),
Year = year(trip_start_date)) %>%
filter(!is.na(Weight_kg),
!is.na(Sex),
!is.na(Age_yrs),
!is.na(Length_cm),
!is.na(Weight_kg),
!is.na(Month),
!is.na(Year))
}) %>% bind_rows
}
|
library(tm)
#Chargement des données de la saga Harry potter:
hp=VCorpus(DirSource("/home/abdelali/Documents/ETU/Logiciel R/tm_Harrypotter/tm/",encoding = "UTF-8"))
inspect(hp)
#netoyage des données :
hp=tm_map(hp,stripWhitespace)
hp=tm_map(hp, removePunctuation)
hp=tm_map(hp, content_transformer(tolower))
hp=tm_map(hp, removeWords, stopwords("english"))
hp=tm_map(hp, removeNumbers)
hp=tm_map(hp, stripWhitespace)
#stemming : j'ai negligé le stemming,a un effet negatifs sur les données.
library(SnowballC)
#hp=tm_map(hp,stemDocument)
#TermDocumentMatrix :
TerDocHp=DocumentTermMatrix(hp)
# DocTerHp=TermDocumentMatrix(hp)
dtm=inspect(TerDocHp)
dtm[1:5,1:5]
#sparsité
TerDocHp=removeSparseTerms(TerDocHp, 0.6)
#frequente terme
findFreqTerms(TerDocHp[2,],100)
# le nombre d'apprence de chaque terme :
freq.saga=sapply(as.data.frame(dtm),function(x){length(which(x > 0))})
# apparue dans tous les docs :
freq.saga[which(freq.saga>5)]
## une solution pour détecter les personnages et co-ocuurences de leur passage dans le saga:
## Charger la liste des vrais personnages de harry potter :
act=VCorpus(DirSource("/home/abdelali/Documents/ETU/Logiciel R/actors/actor",encoding = "UTF-8"))
#actors=read.csv("/home/abdelali/Documents/ETU/Logiciel R/castActors_idbm.csv",sep=",")
#actorsC=VCorpus(DataframeSource(actors$Harry.Potter.and.the.Sorcerer.s.Stone))
act=tm_map(act, removePunctuation)
act=tm_map(act, content_transformer(tolower))
act=tm_map(act, removeWords, stopwords("english"))
name.personnage.saga.HP=tm_map(act,stripWhitespace)
#les livres et les vrais occurence des personnages
name.personnage.dtm=DocumentTermMatrix(act)
name.personnage=inspect(name.personnage.dtm)
name.personnage[,1:10]
# frequant personnage
freq.actor <- sapply(as.data.frame(name.personnage),function(x){length(which(x > 0))})
freq.actor2=freq.actor[which(freq.actor>2)]
#Association entre les vrais personnages et leur co-occurence dans les romans
cher=findAssocs(TerDocHp,names(freq.actor2),corlimit = 0.9)
data.frame(cher$harry)
# les personnages moins fréquant :
# namePerso=colnames(name.personnage[1,which(freq.actor>2)]) #les personnages du premiere saga au moins frequent 2 fois
namePerso=names(which(name.personnage[1,]>0)) #les personnages du premiere saga
# namePerso_tous=colnames(name.personnage)
# pour tout le saga
charachterHP_saga= which(TerDocHp$dimnames$Terms %in% namePerso)
CharMatTD_saga=TerDocHp[,charachterHP]
CharMat_saga=inspect(TerDocHp[,charachterHP])
## Harry Potter and the Chamber of Secrets
#les personnages du premiere saga
#------------------------------------------
namePerso=names(which(name.personnage[1,]>0))
#filtrage des terms selon les vrais personnage du roman:
charachterHP = which(TerDocHp[1,]$dimnames$Terms %in% namePerso)
CharMatTD=TerDocHp[1,charachterHP]
CharMat=inspect(TerDocHp[1,charachterHP])
#Graphe : intensité des relation entre acteurs
library(igraph)
# Matrice Terme_Terme
# CharMat[CharMat>=1]<-1
# termmatrix=CharMat_in %*% t(CharMat_in)
termmatrix=crossprod(as.matrix(CharMat))
diag(termmatrix)<-0
termmatrix[1:5,1:5]
termmatrix["harry", "dobby"]
termmatrix["harry", "ron"]
g<-graph.adjacency(termmatrix,weighted = T, mode = "undirected")
set.seed(3952)
g<-simplify(g)
g
V(g)$label <-V(g)$name
V(g)$degree <- degree(g)
plot(g,layout=layout.kamada.kawai)
#personaliser le graphe :
V(g)$label.cex <- 2.2 * V(g)$degree / max(V(g)$degree)+ .2
V(g)$label.color <- rgb(0, 0, .2, .8)
V(g)$frame.color <- NA
egam <- (log(E(g)$weight)+.4) / max(log(E(g)$weight)+.4)
E(g)$color <- rgb(.5, .5, 0, egam)
E(g)$width <- egam
plot(g, layout=layout.kamada.kawai)
# métriques de l'analyses des résaux sociaux:
betweenness(g,directed = F)
closeness(g, mode = "out")
degree(g, mode = "out")
## les amis de "HARRY" sur tout le saga :
##----------------------------------------
# les terme associé avec harry:
Harry=findAssocs(TerDocHp,"harry",0.5)
#filtré seulement les vrais personnages
relation_avec_harry=names(Harry$harry)[ names(Harry$harry) %in% colnames(name.personnage)]
tdm_harry=TerDocHp[,colnames(TerDocHp)%in% relation_avec_harry]
inspect(tdm_harry)
#Matrice de Term-term Matrix:
TTM=crossprod(as.matrix(tdm_harry))
diag(TTM)<-0
TTM[1:10,1:10]
g_harry<-graph.adjacency(TTM,weighted = T, mode = "undirected")
g_harry<-simplify(g_harry)
set.seed(10000)
V(g_harry)$label <-V(g_harry)$name
V(g_harry)$degree <- degree(g_harry)
plot(g_harry,layout=layout.kamada.kawai)
# métriques de l'analyses des résaux sociaux:
sort(betweenness(g_harry,directed = F),decreasing = T)
sort(closeness(g_harry, mode = "out"), decreasing = T)
sort(degree(g_harry, mode = "out"),decreasing = T)
##les relations entre les professeurs de harry potter:
#---------------------------------
professor=findAssocs(TerDocHp,"professor",0.85)
relation_avec_professor=names(professor$professor)[ names(professor$professor) %in% colnames(name.personnage)]
tdm_professor=TerDocHp[,colnames(TerDocHp)%in% relation_avec_professor]
inspect(tdm_professor)
#Matrice de Term-term Matrix:
TTM_pro=crossprod(as.matrix(tdm_professor))
diag(TTM_pro)<-0
TTM_pro
set.seed(10000)
g_professor<-graph.adjacency(TTM_pro,weighted = T, mode = "undirected")
g_professor<-simplify(g_professor)
V(g_professor)$label <-V(g_professor)$name
V(g_professor)$degree <- degree(g_professor)
plot(g_professor,layout=layout.kamada.kawai)
sort(betweenness(g_professor,directed = F),decreasing = T)
sort(closeness(g_professor, mode = "out"),decreasing = F)
degree(g_professor, mode = "out")
#modularité de la communiauté des professeur
wc <- walktrap.community(g_professor)
wc
modularity(wc)
membership(wc)
plot(g_professor, vertex.color=membership(wc))
# #personaliser le graphe :
# V(g_professor)$label.cex <- 2.2 * V(g_professor)$degree / max(V(g_professor)$degree)+ .2
# V(g_professor)$label.color <- rgb(0, 0, .2, .8)
# V(g_professor)$frame.color <- NA
# egam <- (log(E(g_professor)$weight)+.4) / max(log(E(g_professor)$weight)+.4)
# E(g_professor)$color <- rgb(.5, .5, 0, egam)
# E(g_professor)$width <- egam
# plot(g_professor, layout=layout.kamada.kawai)
|
/textMining_Harry potter.R
|
no_license
|
alimanager/Rproject
|
R
| false
| false
| 6,110
|
r
|
library(tm)
#Chargement des données de la saga Harry potter:
hp=VCorpus(DirSource("/home/abdelali/Documents/ETU/Logiciel R/tm_Harrypotter/tm/",encoding = "UTF-8"))
inspect(hp)
#netoyage des données :
hp=tm_map(hp,stripWhitespace)
hp=tm_map(hp, removePunctuation)
hp=tm_map(hp, content_transformer(tolower))
hp=tm_map(hp, removeWords, stopwords("english"))
hp=tm_map(hp, removeNumbers)
hp=tm_map(hp, stripWhitespace)
#stemming : j'ai negligé le stemming,a un effet negatifs sur les données.
library(SnowballC)
#hp=tm_map(hp,stemDocument)
#TermDocumentMatrix :
TerDocHp=DocumentTermMatrix(hp)
# DocTerHp=TermDocumentMatrix(hp)
dtm=inspect(TerDocHp)
dtm[1:5,1:5]
#sparsité
TerDocHp=removeSparseTerms(TerDocHp, 0.6)
#frequente terme
findFreqTerms(TerDocHp[2,],100)
# le nombre d'apprence de chaque terme :
freq.saga=sapply(as.data.frame(dtm),function(x){length(which(x > 0))})
# apparue dans tous les docs :
freq.saga[which(freq.saga>5)]
## une solution pour détecter les personnages et co-ocuurences de leur passage dans le saga:
## Charger la liste des vrais personnages de harry potter :
act=VCorpus(DirSource("/home/abdelali/Documents/ETU/Logiciel R/actors/actor",encoding = "UTF-8"))
#actors=read.csv("/home/abdelali/Documents/ETU/Logiciel R/castActors_idbm.csv",sep=",")
#actorsC=VCorpus(DataframeSource(actors$Harry.Potter.and.the.Sorcerer.s.Stone))
act=tm_map(act, removePunctuation)
act=tm_map(act, content_transformer(tolower))
act=tm_map(act, removeWords, stopwords("english"))
name.personnage.saga.HP=tm_map(act,stripWhitespace)
#les livres et les vrais occurence des personnages
name.personnage.dtm=DocumentTermMatrix(act)
name.personnage=inspect(name.personnage.dtm)
name.personnage[,1:10]
# frequant personnage
freq.actor <- sapply(as.data.frame(name.personnage),function(x){length(which(x > 0))})
freq.actor2=freq.actor[which(freq.actor>2)]
#Association entre les vrais personnages et leur co-occurence dans les romans
cher=findAssocs(TerDocHp,names(freq.actor2),corlimit = 0.9)
data.frame(cher$harry)
# les personnages moins fréquant :
# namePerso=colnames(name.personnage[1,which(freq.actor>2)]) #les personnages du premiere saga au moins frequent 2 fois
namePerso=names(which(name.personnage[1,]>0)) #les personnages du premiere saga
# namePerso_tous=colnames(name.personnage)
# pour tout le saga
charachterHP_saga= which(TerDocHp$dimnames$Terms %in% namePerso)
CharMatTD_saga=TerDocHp[,charachterHP]
CharMat_saga=inspect(TerDocHp[,charachterHP])
## Harry Potter and the Chamber of Secrets
#les personnages du premiere saga
#------------------------------------------
namePerso=names(which(name.personnage[1,]>0))
#filtrage des terms selon les vrais personnage du roman:
charachterHP = which(TerDocHp[1,]$dimnames$Terms %in% namePerso)
CharMatTD=TerDocHp[1,charachterHP]
CharMat=inspect(TerDocHp[1,charachterHP])
#Graphe : intensité des relation entre acteurs
library(igraph)
# Matrice Terme_Terme
# CharMat[CharMat>=1]<-1
# termmatrix=CharMat_in %*% t(CharMat_in)
termmatrix=crossprod(as.matrix(CharMat))
diag(termmatrix)<-0
termmatrix[1:5,1:5]
termmatrix["harry", "dobby"]
termmatrix["harry", "ron"]
g<-graph.adjacency(termmatrix,weighted = T, mode = "undirected")
set.seed(3952)
g<-simplify(g)
g
V(g)$label <-V(g)$name
V(g)$degree <- degree(g)
plot(g,layout=layout.kamada.kawai)
#personaliser le graphe :
V(g)$label.cex <- 2.2 * V(g)$degree / max(V(g)$degree)+ .2
V(g)$label.color <- rgb(0, 0, .2, .8)
V(g)$frame.color <- NA
egam <- (log(E(g)$weight)+.4) / max(log(E(g)$weight)+.4)
E(g)$color <- rgb(.5, .5, 0, egam)
E(g)$width <- egam
plot(g, layout=layout.kamada.kawai)
# métriques de l'analyses des résaux sociaux:
betweenness(g,directed = F)
closeness(g, mode = "out")
degree(g, mode = "out")
## les amis de "HARRY" sur tout le saga :
##----------------------------------------
# les terme associé avec harry:
Harry=findAssocs(TerDocHp,"harry",0.5)
#filtré seulement les vrais personnages
relation_avec_harry=names(Harry$harry)[ names(Harry$harry) %in% colnames(name.personnage)]
tdm_harry=TerDocHp[,colnames(TerDocHp)%in% relation_avec_harry]
inspect(tdm_harry)
#Matrice de Term-term Matrix:
TTM=crossprod(as.matrix(tdm_harry))
diag(TTM)<-0
TTM[1:10,1:10]
g_harry<-graph.adjacency(TTM,weighted = T, mode = "undirected")
g_harry<-simplify(g_harry)
set.seed(10000)
V(g_harry)$label <-V(g_harry)$name
V(g_harry)$degree <- degree(g_harry)
plot(g_harry,layout=layout.kamada.kawai)
# métriques de l'analyses des résaux sociaux:
sort(betweenness(g_harry,directed = F),decreasing = T)
sort(closeness(g_harry, mode = "out"), decreasing = T)
sort(degree(g_harry, mode = "out"),decreasing = T)
##les relations entre les professeurs de harry potter:
#---------------------------------
professor=findAssocs(TerDocHp,"professor",0.85)
relation_avec_professor=names(professor$professor)[ names(professor$professor) %in% colnames(name.personnage)]
tdm_professor=TerDocHp[,colnames(TerDocHp)%in% relation_avec_professor]
inspect(tdm_professor)
#Matrice de Term-term Matrix:
TTM_pro=crossprod(as.matrix(tdm_professor))
diag(TTM_pro)<-0
TTM_pro
set.seed(10000)
g_professor<-graph.adjacency(TTM_pro,weighted = T, mode = "undirected")
g_professor<-simplify(g_professor)
V(g_professor)$label <-V(g_professor)$name
V(g_professor)$degree <- degree(g_professor)
plot(g_professor,layout=layout.kamada.kawai)
sort(betweenness(g_professor,directed = F),decreasing = T)
sort(closeness(g_professor, mode = "out"),decreasing = F)
degree(g_professor, mode = "out")
#modularité de la communiauté des professeur
wc <- walktrap.community(g_professor)
wc
modularity(wc)
membership(wc)
plot(g_professor, vertex.color=membership(wc))
# #personaliser le graphe :
# V(g_professor)$label.cex <- 2.2 * V(g_professor)$degree / max(V(g_professor)$degree)+ .2
# V(g_professor)$label.color <- rgb(0, 0, .2, .8)
# V(g_professor)$frame.color <- NA
# egam <- (log(E(g_professor)$weight)+.4) / max(log(E(g_professor)$weight)+.4)
# E(g_professor)$color <- rgb(.5, .5, 0, egam)
# E(g_professor)$width <- egam
# plot(g_professor, layout=layout.kamada.kawai)
|
#Placar do jogo
library(rvest)
library(xml2)
library(stringr)
url<- 'https://noticias.bol.uol.com.br/esporte/campeonato/brasileirao/2016/jogos/'
webpage<- read_html(url)
res_html <- html_nodes(webpage,'.confronto')
res_data <- html_text(res_html)
res_data<- str_replace_all(res_data, "[\r\n]", "")
head(res_data)
read_ht
|
/web-scrapping.R
|
no_license
|
acf77/web-scrapping
|
R
| false
| false
| 328
|
r
|
#Placar do jogo
library(rvest)
library(xml2)
library(stringr)
url<- 'https://noticias.bol.uol.com.br/esporte/campeonato/brasileirao/2016/jogos/'
webpage<- read_html(url)
res_html <- html_nodes(webpage,'.confronto')
res_data <- html_text(res_html)
res_data<- str_replace_all(res_data, "[\r\n]", "")
head(res_data)
read_ht
|
res.p = predict(m.res,n.ahead=12)$pred
se = predict(m.res,n.ahead=12)$se
ures = res.p+1.96*se
lres = res.p-1.96*se
C = summary(m.employ)$coefficients
sigma = sqrt(deviance(m.employ))
Xt1P <- read.table("~/Dropbox/R/Time Series Project/Xt1.p", quote="\"")
Xt1.p <- ts(Xt1P$V1,start = c(193), end = c(204))
Xt2p = C[2,1]*Xt1.p + C[1,1]
L = lres+ Xt2p
U = ures + Xt2p
mina = min(Xt2,L)
maxa = max(Xt2,U)
Xt20 <- read.table("~/Dropbox/R/Time Series Project/Xt20", quote="\"")
Xt20 <- ts(Xt20$V1,start = c(193), end = c(204))
plot(res.p + Xt2p,
ylim=c(mina,maxa),ylab="Natural Resources and Mining Employment")
points(Xt20)
lines(U,col='red',lty="dashed")
lines(L,col='red',lty="dashed")
m.m = arima(Xt1,order=c(2,1,0),transform.pars = FALSE,fixed=c(0,NA,NA),xreg=data.frame(Xt2))
m.m
|
/predict.R
|
no_license
|
zwangcode/R_at_UC
|
R
| false
| false
| 795
|
r
|
res.p = predict(m.res,n.ahead=12)$pred
se = predict(m.res,n.ahead=12)$se
ures = res.p+1.96*se
lres = res.p-1.96*se
C = summary(m.employ)$coefficients
sigma = sqrt(deviance(m.employ))
Xt1P <- read.table("~/Dropbox/R/Time Series Project/Xt1.p", quote="\"")
Xt1.p <- ts(Xt1P$V1,start = c(193), end = c(204))
Xt2p = C[2,1]*Xt1.p + C[1,1]
L = lres+ Xt2p
U = ures + Xt2p
mina = min(Xt2,L)
maxa = max(Xt2,U)
Xt20 <- read.table("~/Dropbox/R/Time Series Project/Xt20", quote="\"")
Xt20 <- ts(Xt20$V1,start = c(193), end = c(204))
plot(res.p + Xt2p,
ylim=c(mina,maxa),ylab="Natural Resources and Mining Employment")
points(Xt20)
lines(U,col='red',lty="dashed")
lines(L,col='red',lty="dashed")
m.m = arima(Xt1,order=c(2,1,0),transform.pars = FALSE,fixed=c(0,NA,NA),xreg=data.frame(Xt2))
m.m
|
DisableHIT <-
disable <-
function(hit = NULL,
hit.type = NULL,
annotation = NULL,
response.group = NULL,
verbose = getOption('MTurkR.verbose', TRUE),
...) {
operation <- "DisableHIT"
if(!is.null(response.group)) {
if(any(!response.group %in%
c("Minimal", "HITQuestion", "HITDetail", "HITAssignmentSummary"))) {
stop("ResponseGroup must be in c(Minimal,HITQuestion,HITDetail,HITAssignmentSummary)")
}
}
if((is.null(hit) & is.null(hit.type) & is.null(annotation)) |
(!is.null(hit) & !is.null(hit.type) & !is.null(annotation))) {
stop("Must provide 'hit' xor 'hit.type' xor 'annotation'")
} else if(!is.null(hit)){
if(is.factor(hit))
hit <- as.character(hit)
hitlist <- hit
} else if(!is.null(hit.type)) {
if(is.factor(hit.type))
hit.type <- as.character(hit.type)
hitsearch <- SearchHITs(verbose = FALSE, return.qual.dataframe = FALSE, ...)
hitlist <- hitsearch$HITs$HITId[hitsearch$HITs$HITTypeId %in% hit.type]
} else if(!is.null(annotation)) {
if(is.factor(annotation))
annotation <- as.character(annotation)
hitsearch <- SearchHITs(verbose = FALSE, return.qual.dataframe = FALSE, ...)
hitlist <- hitsearch$HITs$HITId[hitsearch$HITs$RequesterAnnotation %in% annotation]
}
if(length(hitlist) == 0)
stop("No HITs found for HITType")
HITs <- setNames(data.frame(matrix(ncol=2, nrow=length(hitlist))), c("HITId", "Valid"))
for(i in 1:length(hitlist)) {
GETiteration <- paste("&HITId=", hitlist[i], sep = "")
if(!is.null(response.group)) {
if(length(response.group) == 1)
GETiteration <- paste(GETiteration, "&ResponseGroup=",
response.group, sep = "")
else {
for(i in 1:length(response.group)) {
GETiteration <- paste(GETiteration, "&ResponseGroup", i-1,
"=", response.group[i], sep = "")
}
}
}
request <- request(operation, GETparameters = GETiteration, ...)
if(is.null(request$valid))
return(request)
if(is.null(response.group))
request$ResponseGroup <- c("Minimal")
else
request$ResponseGroup <- response.group
HITs[i, ] <- c(hitlist[i], request$valid)
if(request$valid & verbose)
message(i, ": HIT ", hitlist[i], " Disabled")
else if(!request$valid & verbose)
warning(i, ": Invalid Request for HIT ", hitlist[i])
}
HITs$Valid <- factor(HITs$Valid, levels=c('TRUE','FALSE'))
return(HITs)
}
|
/MTurkR/R/DisableHIT.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,859
|
r
|
DisableHIT <-
disable <-
function(hit = NULL,
hit.type = NULL,
annotation = NULL,
response.group = NULL,
verbose = getOption('MTurkR.verbose', TRUE),
...) {
operation <- "DisableHIT"
if(!is.null(response.group)) {
if(any(!response.group %in%
c("Minimal", "HITQuestion", "HITDetail", "HITAssignmentSummary"))) {
stop("ResponseGroup must be in c(Minimal,HITQuestion,HITDetail,HITAssignmentSummary)")
}
}
if((is.null(hit) & is.null(hit.type) & is.null(annotation)) |
(!is.null(hit) & !is.null(hit.type) & !is.null(annotation))) {
stop("Must provide 'hit' xor 'hit.type' xor 'annotation'")
} else if(!is.null(hit)){
if(is.factor(hit))
hit <- as.character(hit)
hitlist <- hit
} else if(!is.null(hit.type)) {
if(is.factor(hit.type))
hit.type <- as.character(hit.type)
hitsearch <- SearchHITs(verbose = FALSE, return.qual.dataframe = FALSE, ...)
hitlist <- hitsearch$HITs$HITId[hitsearch$HITs$HITTypeId %in% hit.type]
} else if(!is.null(annotation)) {
if(is.factor(annotation))
annotation <- as.character(annotation)
hitsearch <- SearchHITs(verbose = FALSE, return.qual.dataframe = FALSE, ...)
hitlist <- hitsearch$HITs$HITId[hitsearch$HITs$RequesterAnnotation %in% annotation]
}
if(length(hitlist) == 0)
stop("No HITs found for HITType")
HITs <- setNames(data.frame(matrix(ncol=2, nrow=length(hitlist))), c("HITId", "Valid"))
for(i in 1:length(hitlist)) {
GETiteration <- paste("&HITId=", hitlist[i], sep = "")
if(!is.null(response.group)) {
if(length(response.group) == 1)
GETiteration <- paste(GETiteration, "&ResponseGroup=",
response.group, sep = "")
else {
for(i in 1:length(response.group)) {
GETiteration <- paste(GETiteration, "&ResponseGroup", i-1,
"=", response.group[i], sep = "")
}
}
}
request <- request(operation, GETparameters = GETiteration, ...)
if(is.null(request$valid))
return(request)
if(is.null(response.group))
request$ResponseGroup <- c("Minimal")
else
request$ResponseGroup <- response.group
HITs[i, ] <- c(hitlist[i], request$valid)
if(request$valid & verbose)
message(i, ": HIT ", hitlist[i], " Disabled")
else if(!request$valid & verbose)
warning(i, ": Invalid Request for HIT ", hitlist[i])
}
HITs$Valid <- factor(HITs$Valid, levels=c('TRUE','FALSE'))
return(HITs)
}
|
\name{NMES1988}
\alias{NMES1988}
\title{Demand for Medical Care in NMES 1988}
\description{
Cross-section data originating from the US National Medical Expenditure Survey (NMES)
conducted in 1987 and 1988. The NMES is based upon a representative, national
probability sample of the civilian non-institutionalized population and individuals
admitted to long-term care facilities during 1987. The data are a subsample of
individuals ages 66 and over all of whom are covered by Medicare
(a public insurance program providing substantial protection against health-care costs).
}
\usage{data("NMES1988")}
\format{
A data frame containing 4,406 observations on 19 variables.
\describe{
\item{visits}{Number of physician office visits.}
\item{nvisits}{Number of non-physician office visits.}
\item{ovisits}{Number of physician hospital outpatient visits.}
\item{novisits}{Number of non-physician hospital outpatient visits.}
\item{emergency}{Emergency room visits.}
\item{hospital}{Number of hospital stays.}
\item{health}{Factor indicating self-perceived health status, levels are
\code{"poor"}, \code{"average"} (reference category), \code{"excellent"}.}
\item{chronic}{Number of chronic conditions.}
\item{adl}{Factor indicating whether the individual has a condition that
limits activities of daily living (\code{"limited"}) or not (\code{"normal"}).}
\item{region}{Factor indicating region, levels are \code{northeast},
\code{midwest}, \code{west}, \code{other} (reference category).}
\item{age}{Age in years (divided by 10).}
\item{afam}{Factor. Is the individual African-American?}
\item{gender}{Factor indicating gender.}
\item{married}{Factor. is the individual married?}
\item{school}{Number of years of education.}
\item{income}{Family income in USD 10,000.}
\item{employed}{Factor. Is the individual employed?}
\item{insurance}{Factor. Is the individual covered by private insurance?}
\item{medicaid}{Factor. Is the individual covered by Medicaid?}
}
}
\source{
Journal of Applied Econometrics Data Archive for Deb and Trivedi (1997).
\url{http://qed.econ.queensu.ca/jae/1997-v12.3/deb-trivedi/}
}
\references{
Cameron, A.C. and Trivedi, P.K. (1998). \emph{Regression Analysis of Count Data}.
Cambridge: Cambridge University Press.
Deb, P., and Trivedi, P.K. (1997). Demand for Medical Care by the
Elderly: A Finite Mixture Approach. \emph{Journal of Applied Econometrics},
\bold{12}, 313--336.
Zeileis, A., Kleiber, C., and Jackman, S. (2008). Regression Models
for Count Data in R. \emph{Journal of Statistical Software}, \bold{27}(8).
URL \url{http://www.jstatsoft.org/v27/i08/}.
}
\seealso{\code{\link{CameronTrivedi1998}}}
\examples{
## packages
library("MASS")
library("pscl")
## select variables for analysis
data("NMES1988")
nmes <- NMES1988[, c(1, 7:8, 13, 15, 18)]
## dependent variable
hist(nmes$visits, breaks = 0:(max(nmes$visits)+1) - 0.5)
plot(table(nmes$visits))
## convenience transformations for exploratory graphics
clog <- function(x) log(x + 0.5)
cfac <- function(x, breaks = NULL) {
if(is.null(breaks)) breaks <- unique(quantile(x, 0:10/10))
x <- cut(x, breaks, include.lowest = TRUE, right = FALSE)
levels(x) <- paste(breaks[-length(breaks)], ifelse(diff(breaks) > 1,
c(paste("-", breaks[-c(1, length(breaks))] - 1, sep = ""), "+"), ""), sep = "")
return(x)
}
## bivariate visualization
par(mfrow = c(3, 2))
plot(clog(visits) ~ health, data = nmes, varwidth = TRUE)
plot(clog(visits) ~ cfac(chronic), data = nmes)
plot(clog(visits) ~ insurance, data = nmes, varwidth = TRUE)
plot(clog(visits) ~ gender, data = nmes, varwidth = TRUE)
plot(cfac(visits, c(0:2, 4, 6, 10, 100)) ~ school, data = nmes, breaks = 9)
par(mfrow = c(1, 1))
## Poisson regression
nmes_pois <- glm(visits ~ ., data = nmes, family = poisson)
summary(nmes_pois)
## LM test for overdispersion
dispersiontest(nmes_pois)
dispersiontest(nmes_pois, trafo = 2)
## sandwich covariance matrix
coeftest(nmes_pois, vcov = sandwich)
## quasipoisson model
nmes_qpois <- glm(visits ~ ., data = nmes, family = quasipoisson)
## NegBin regression
nmes_nb <- glm.nb(visits ~ ., data = nmes)
## hurdle regression
nmes_hurdle <- hurdle(visits ~ . | chronic + insurance + school + gender,
data = nmes, dist = "negbin")
## zero-inflated regression model
nmes_zinb <- zeroinfl(visits ~ . | chronic + insurance + school + gender,
data = nmes, dist = "negbin")
## compare estimated coefficients
fm <- list("ML-Pois" = nmes_pois, "Quasi-Pois" = nmes_qpois, "NB" = nmes_nb,
"Hurdle-NB" = nmes_hurdle, "ZINB" = nmes_zinb)
round(sapply(fm, function(x) coef(x)[1:7]), digits = 3)
## associated standard errors
round(cbind("ML-Pois" = sqrt(diag(vcov(nmes_pois))),
"Adj-Pois" = sqrt(diag(sandwich(nmes_pois))),
sapply(fm[-1], function(x) sqrt(diag(vcov(x)))[1:7])),
digits = 3)
## log-likelihoods and number of estimated parameters
rbind(logLik = sapply(fm, function(x) round(logLik(x), digits = 0)),
Df = sapply(fm, function(x) attr(logLik(x), "df")))
## predicted number of zeros
round(c("Obs" = sum(nmes$visits < 1),
"ML-Pois" = sum(dpois(0, fitted(nmes_pois))),
"Adj-Pois" = NA,
"Quasi-Pois" = NA,
"NB" = sum(dnbinom(0, mu = fitted(nmes_nb), size = nmes_nb$theta)),
"NB-Hurdle" = sum(predict(nmes_hurdle, type = "prob")[,1]),
"ZINB" = sum(predict(nmes_zinb, type = "prob")[,1])))
## coefficients of zero-augmentation models
t(sapply(fm[4:5], function(x) round(x$coefficients$zero, digits = 3)))
}
\keyword{datasets}
|
/man/NMES1988.Rd
|
no_license
|
arubhardwaj/AER
|
R
| false
| false
| 5,508
|
rd
|
\name{NMES1988}
\alias{NMES1988}
\title{Demand for Medical Care in NMES 1988}
\description{
Cross-section data originating from the US National Medical Expenditure Survey (NMES)
conducted in 1987 and 1988. The NMES is based upon a representative, national
probability sample of the civilian non-institutionalized population and individuals
admitted to long-term care facilities during 1987. The data are a subsample of
individuals ages 66 and over all of whom are covered by Medicare
(a public insurance program providing substantial protection against health-care costs).
}
\usage{data("NMES1988")}
\format{
A data frame containing 4,406 observations on 19 variables.
\describe{
\item{visits}{Number of physician office visits.}
\item{nvisits}{Number of non-physician office visits.}
\item{ovisits}{Number of physician hospital outpatient visits.}
\item{novisits}{Number of non-physician hospital outpatient visits.}
\item{emergency}{Emergency room visits.}
\item{hospital}{Number of hospital stays.}
\item{health}{Factor indicating self-perceived health status, levels are
\code{"poor"}, \code{"average"} (reference category), \code{"excellent"}.}
\item{chronic}{Number of chronic conditions.}
\item{adl}{Factor indicating whether the individual has a condition that
limits activities of daily living (\code{"limited"}) or not (\code{"normal"}).}
\item{region}{Factor indicating region, levels are \code{northeast},
\code{midwest}, \code{west}, \code{other} (reference category).}
\item{age}{Age in years (divided by 10).}
\item{afam}{Factor. Is the individual African-American?}
\item{gender}{Factor indicating gender.}
\item{married}{Factor. is the individual married?}
\item{school}{Number of years of education.}
\item{income}{Family income in USD 10,000.}
\item{employed}{Factor. Is the individual employed?}
\item{insurance}{Factor. Is the individual covered by private insurance?}
\item{medicaid}{Factor. Is the individual covered by Medicaid?}
}
}
\source{
Journal of Applied Econometrics Data Archive for Deb and Trivedi (1997).
\url{http://qed.econ.queensu.ca/jae/1997-v12.3/deb-trivedi/}
}
\references{
Cameron, A.C. and Trivedi, P.K. (1998). \emph{Regression Analysis of Count Data}.
Cambridge: Cambridge University Press.
Deb, P., and Trivedi, P.K. (1997). Demand for Medical Care by the
Elderly: A Finite Mixture Approach. \emph{Journal of Applied Econometrics},
\bold{12}, 313--336.
Zeileis, A., Kleiber, C., and Jackman, S. (2008). Regression Models
for Count Data in R. \emph{Journal of Statistical Software}, \bold{27}(8).
URL \url{http://www.jstatsoft.org/v27/i08/}.
}
\seealso{\code{\link{CameronTrivedi1998}}}
\examples{
## packages
library("MASS")
library("pscl")
## select variables for analysis
data("NMES1988")
nmes <- NMES1988[, c(1, 7:8, 13, 15, 18)]
## dependent variable
hist(nmes$visits, breaks = 0:(max(nmes$visits)+1) - 0.5)
plot(table(nmes$visits))
## convenience transformations for exploratory graphics
clog <- function(x) log(x + 0.5)
cfac <- function(x, breaks = NULL) {
if(is.null(breaks)) breaks <- unique(quantile(x, 0:10/10))
x <- cut(x, breaks, include.lowest = TRUE, right = FALSE)
levels(x) <- paste(breaks[-length(breaks)], ifelse(diff(breaks) > 1,
c(paste("-", breaks[-c(1, length(breaks))] - 1, sep = ""), "+"), ""), sep = "")
return(x)
}
## bivariate visualization
par(mfrow = c(3, 2))
plot(clog(visits) ~ health, data = nmes, varwidth = TRUE)
plot(clog(visits) ~ cfac(chronic), data = nmes)
plot(clog(visits) ~ insurance, data = nmes, varwidth = TRUE)
plot(clog(visits) ~ gender, data = nmes, varwidth = TRUE)
plot(cfac(visits, c(0:2, 4, 6, 10, 100)) ~ school, data = nmes, breaks = 9)
par(mfrow = c(1, 1))
## Poisson regression
nmes_pois <- glm(visits ~ ., data = nmes, family = poisson)
summary(nmes_pois)
## LM test for overdispersion
dispersiontest(nmes_pois)
dispersiontest(nmes_pois, trafo = 2)
## sandwich covariance matrix
coeftest(nmes_pois, vcov = sandwich)
## quasipoisson model
nmes_qpois <- glm(visits ~ ., data = nmes, family = quasipoisson)
## NegBin regression
nmes_nb <- glm.nb(visits ~ ., data = nmes)
## hurdle regression
nmes_hurdle <- hurdle(visits ~ . | chronic + insurance + school + gender,
data = nmes, dist = "negbin")
## zero-inflated regression model
nmes_zinb <- zeroinfl(visits ~ . | chronic + insurance + school + gender,
data = nmes, dist = "negbin")
## compare estimated coefficients
fm <- list("ML-Pois" = nmes_pois, "Quasi-Pois" = nmes_qpois, "NB" = nmes_nb,
"Hurdle-NB" = nmes_hurdle, "ZINB" = nmes_zinb)
round(sapply(fm, function(x) coef(x)[1:7]), digits = 3)
## associated standard errors
round(cbind("ML-Pois" = sqrt(diag(vcov(nmes_pois))),
"Adj-Pois" = sqrt(diag(sandwich(nmes_pois))),
sapply(fm[-1], function(x) sqrt(diag(vcov(x)))[1:7])),
digits = 3)
## log-likelihoods and number of estimated parameters
rbind(logLik = sapply(fm, function(x) round(logLik(x), digits = 0)),
Df = sapply(fm, function(x) attr(logLik(x), "df")))
## predicted number of zeros
round(c("Obs" = sum(nmes$visits < 1),
"ML-Pois" = sum(dpois(0, fitted(nmes_pois))),
"Adj-Pois" = NA,
"Quasi-Pois" = NA,
"NB" = sum(dnbinom(0, mu = fitted(nmes_nb), size = nmes_nb$theta)),
"NB-Hurdle" = sum(predict(nmes_hurdle, type = "prob")[,1]),
"ZINB" = sum(predict(nmes_zinb, type = "prob")[,1])))
## coefficients of zero-augmentation models
t(sapply(fm[4:5], function(x) round(x$coefficients$zero, digits = 3)))
}
\keyword{datasets}
|
# ~/github/dora/notebooks/AD-family.UM0463F/src/fimoUtils.R
#------------------------------------------------------------------------------------------------------------------------
library(RPostgreSQL)
library(GenomicRanges)
library(TReNA)
library(FimoClient)
library(RUnit)
library(BSgenome.Hsapiens.UCSC.hg38)
hg38 = BSgenome.Hsapiens.UCSC.hg38
library(SNPlocs.Hsapiens.dbSNP144.GRCh38)
dbSNP <- SNPlocs.Hsapiens.dbSNP144.GRCh38
#------------------------------------------------------------------------------------------------------------------------
if(!exists("tbl.snps")){
tbl.snps <- read.table("~/github/dora/datasets/AD-family.UM0463F/snps59.hg38.bed", sep="\t", as.is=TRUE)
colnames(tbl.snps) <- c("chrom", "start", "end", "name");
tokens <- strsplit(tbl.snps$name, "-", fixed=TRUE)
tbl.snps$wt <- unlist(lapply(tokens, "[", 2))
tbl.snps$mut <- unlist(lapply(tokens, "[", 3))
tbl.snps$rsid <- unlist(lapply(tokens, "[", 1))
}
if(!exists("fimo.service")){
fimo.service <- FimoClient("whovian", 5558, quiet=TRUE)
checkEquals(nrow(requestMatch(fimo.service, list(bogus='NNNNN'))), 0)
}
if(!exists("chrom.lengths")){
library(TxDb.Hsapiens.UCSC.hg19.knownGene) # version 3.2.2
tbl.seqInfo <- seqlevels(seqinfo(TxDb.Hsapiens.UCSC.hg19.knownGene))
seqlengths(seqinfo(TxDb.Hsapiens.UCSC.hg19.knownGene))[paste("chr", c(1:22, "X", "Y"), sep="")]
chrom.lengths <- as.list(seqlengths(seqinfo(TxDb.Hsapiens.UCSC.hg19.knownGene))[paste("chr", c(1:22), sep="")])
}
db.gtf <- dbConnect(PostgreSQL(), user= "trena", password="trena", dbname="gtf", host="whovian")
tmp <- checkEquals(dbListTables(db.gtf), "hg38human")
if(!exists("tbl.geneInfo")){
query <- "select * from hg38human where moleculetype='gene' and gene_biotype='protein_coding'"
tbl.geneInfo <- dbGetQuery(db.gtf, query)
checkTrue(nrow(tbl.geneInfo) > 19000)
checkTrue(ncol(tbl.geneInfo) > 25)
}
#------------------------------------------------------------------------------------------------------------------------
runTests <- function()
{
test.createWtAndMutSequences()
test.doComparativeFimo()
test.toBed9()
test.intersectWithFootprints()
} # runTests
#------------------------------------------------------------------------------------------------------------------------
doComparativeFimo <- function(chrom, base, wt, mut, snpName, flank, quiet=TRUE)
{
#wt.sequence <- getSequenceByLoc(dna.service, chrom, base-flank, base+flank)
ref.sequence <- as.character(getSeq(hg38, chrom, base-flank, base+flank))
wt.sequence <- paste(substr(ref.sequence, 1, flank), wt, substr(ref.sequence, flank+2, 1+(2*flank)), sep="")
mut.sequence <- paste(substr(ref.sequence, 1, flank), mut, substr(ref.sequence, flank+2, 1+(2*flank)), sep="")
if(!quiet){
printf("--- doComparativeFimo %s:%d %s/%s", chrom, base, wt, mut)
printf("wt: %s", wt)
printf("mut: %s", mut)
printf("retrieved: %s %s-%s-%s", wt.sequence, substr(wt.sequence, 1, flank),
substr(wt.sequence, flank+1, flank+1), substr(wt.sequence, flank+2, 1+(2*flank)))
printf("left flank: %s", substr(wt.sequence, 1, flank))
printf("right flank: %s", substr(wt.sequence, flank+1, nchar(wt.sequence)))
printf("wt as expected: %s", substr(wt.sequence, flank+1, flank+1) == wt)
}
if(mut.sequence == wt.sequence) { # due to the flakey data in tbl.snps.level_1.RData
printf(" suspicious igap report at %s:%d - mutation same as reference", chrom, base)
result <- data.frame()
}
else{
query <- list(wt=wt.sequence, mut=mut.sequence)
result <- requestMatch(fimo.service, query)
result$X.pattern.name <- as.character(result$X.pattern.name)
if(!quiet) print(result)
}
status <- "noMotif"
if(nrow(result) == 0){
if(!quiet) printf(" no motifs in wt or mut")
}
else{
wt.motifs <- unique(subset(result, sequence.name=="wt")$X.pattern.name)
mut.motifs <- unique(subset(result, sequence.name=="mut")$X.pattern.name)
novel.mut.motifs <- setdiff(mut.motifs, wt.motifs)
lost.wt.motifs <- setdiff(wt.motifs, mut.motifs)
if(!quiet){
printf(" lost.wt.motifs: %s", paste(lost.wt.motifs, collapse=","))
printf(" novel.mut.motifs: %s", paste(novel.mut.motifs, collapse=","))
}
if((length(lost.wt.motifs) == 0) & length(novel.mut.motifs) == 0)
status <- "noChange"
if((length(lost.wt.motifs) > 0) & length(novel.mut.motifs) > 0)
status <- "lossAndGain"
if((length(lost.wt.motifs) > 0) & length(novel.mut.motifs) == 0)
status <- "loss"
if((length(lost.wt.motifs) == 0) & length(novel.mut.motifs) > 0)
status <- "gain"
} # else: nrow > 0
if(nrow(result) > 0)
result$snpName <- snpName
return(list(chrom=chrom, base=base, wt=wt, mut=mut, flank=flank, status=status, table=result))
} # doComparativeFimo
#------------------------------------------------------------------------------------------------------------------------
test.doComparativeFimo <- function(chrom, base, wt, mut)
{
printf("--- test.doComparativeFimo")
x <- with(tbl.snps[4,], doComparativeFimo(chrom, start, wt, mut, rsid, 10))
checkEquals(sort(names(x)), c("base", "chrom", "flank", "mut", "status", "table", "wt"))
checkEquals(x$chrom, "chr1")
checkEquals(x$base, 167474522)
checkEquals(x$flank, 10)
checkEquals(x$mut, "G")
checkEquals(x$wt, "A")
checkTrue(is(x$table, "data.frame"))
checkEquals(x$status, "gain")
} # test.doComparativeFimo
#------------------------------------------------------------------------------------------------------------------------
createWtAndMutSequences <- function(chrom, base, wt.base, mut.base, flank=7)
{
#wt.sequence <- getSequenceByLoc(dna.service, chrom, base-flank, base+flank)
wt.sequence <- as.character(getSeq(hg38, chrom, base-flank, base+flank))
mut.sequence <- paste(substr(wt.sequence, 1, flank), mut.base, substr(wt.sequence, flank+2, 1+(2*flank)), sep="")
retrieved.wtBase <- substr(wt.sequence, flank + 1, flank + 1)
retrieved.mutBase <- substr(mut.sequence, flank + 1, flank + 1)
list(wt=wt.sequence, mut=mut.sequence, wtBase=retrieved.wtBase, mutBase=retrieved.mutBase)
} # createWtAndMutSequences
#------------------------------------------------------------------------------------------------------------------------
test.createWtAndMutSequences <- function(chrom, base, flank=7)
{
printf("--- test.createWtAndMutSequences")
snp <- tbl.snps[30,]
chrom <- snp$chrom
base <- snp$start
mut <- snp$mut
wt <- snp$wt
seqs <- createWtAndMutSequences(chrom, base, wt, mut, flank=3)
# |
checkEquals(seqs$wt, "TCAGGCC")
checkEquals(seqs$mut, "TCAAGCC")
checkEquals(seqs$wtBase, wt)
checkEquals(seqs$mutBase, mut)
seqs <- createWtAndMutSequences(chrom, base, wt, mut, flank=7)
# |
checkEquals(seqs$wt, "CAAATCAGGCCTCTG")
checkEquals(seqs$mut, "CAAATCAAGCCTCTG")
checkEquals(seqs$wtBase, wt)
checkEquals(seqs$mutBase, mut)
for (r in 1:30){
#printf(" ------ tbl.snps, row %d", r)
wt <- tbl.snps$wt[r]
mut <- tbl.snps$mut[r]
seqs <- createWtAndMutSequences(tbl.snps$chrom[r], tbl.snps$start[r], wt, mut, flank=3)
#checkEquals(seqs$wtBase, wt)
checkEquals(seqs$mutBase, mut)
} # for r
} # test.createWtAndMutSequences
#------------------------------------------------------------------------------------------------------------------------
toBed9 <- function(tbl)
{
chrom <- tbl$chrom
start <- tbl$start - 1
end <- tbl$start
name <- paste(tbl$rsid, tbl$status, sep="_")
score <- 1
strand <- rep(".", nrow(tbl))
thickStart <- start
thickEnd <- end
colors <- list(noMotif="220,220,220", # gray
loss="255,0,0", # red
gain="0,220,0", # green
lossAndGain="255,153,0", # orange
noChange="255,0,255")
color <- unlist(lapply(tbl$status, function(status) colors[[status]]))
tbl.out <- data.frame(chrom=chrom, start=start, end=end, name=name, score=score, strand=strand,
thickStart=thickStart, thickEnd=thickEnd, color=color, stringsAsFactors=FALSE)
tbl.out
} # toBed9
#------------------------------------------------------------------------------------------------------------------------
test.toBed9 <- function()
{
printf("--- test.toBed9")
tbl.test <- tbl.snps[1:5,]
status <- vector(mode="character", length=nrow(tbl.test))
for(r in 1:nrow(tbl.test)){
x <- doComparativeFimo(tbl.test$chrom[r], tbl.test$start[r], tbl.test$wt[r],
tbl.test$mut[r], tbl.test$rsid[r], 10)
status[[r]] <- x$status
}
tbl.test$status <- unlist(status)
tbl.b9 <- toBed9(tbl.test)
checkEquals(dim(tbl.b9), c(nrow(tbl.test), 9))
checkEquals(colnames(tbl.b9), c("chrom","start","end","name","score","strand","thickStart","thickEnd","color"))
# spot check first row, 3 values
with(tbl.b9[1,], checkEquals(name, "rs145175987_loss"),
checkEquals(color, "255,0,0"),
checkEquals(start, 167461077))
} # test.toBed9
#------------------------------------------------------------------------------------------------------------------------
assessAllSNPs.write.bed9 <- function(outputFile="UM0463_snps_assayed.bed")
{
status <- vector(mode="character", length=nrow(tbl.snps))
for(r in 1:nrow(tbl.snps)){
printf("comparative fimo on snp %d", r)
x <- doComparativeFimo(tbl.snps$chrom[r], tbl.snps$start[r], tbl.snps$wt[r],
tbl.snps$mut[r], tbl.snps$rsid[r], 10)
status[[r]] <- x$status
}
tbl.snps$status <- unlist(status)
tbl.b9 <- toBed9(tbl.snps)
checkEquals(dim(tbl.b9), c(nrow(tbl.snps), 9))
checkEquals(colnames(tbl.b9), c("chrom","start","end","name","score","strand","thickStart","thickEnd","color"))
printf("writing sample file b9.bed (%d, %d)", nrow(tbl.b9), ncol(tbl.b9))
write.table(tbl.b9, quote=FALSE, sep="\t", row.names=FALSE, col.names=FALSE, file=outputFile)
# system("scp b9.bed pshannon@whovian:/local/httpd/vhosts/pshannon/annotations/UM0463_snps_assayed.bed")
} # run.all
#------------------------------------------------------------------------------------------------------------------------
intersectWithFootprints <- function(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=0)
{
tbl.fp <- read.table(fp.bed4.file, sep="\t", header=FALSE, as.is=TRUE)
colnames(tbl.fp) <- c("chrom", "start", "end", "score")
tbl.snp <- read.table(assayedSnps.bed9.file, sep="\t", header=FALSE, as.is=TRUE)
colnames(tbl.snp) <- c("chrom", "start", "end", "name", "score", "strand", "thickStart", "thickEnd", "color")
gr1 <- with(tbl.snp, GRanges(seqnames=chrom, IRanges(start=start, end=end)))
gr2 <- with(tbl.fp, GRanges(seqnames=chrom, IRanges(start=start-shoulder, end=end+shoulder)))
tbl.overlaps <- as.data.frame(findOverlaps(gr1, gr2, type='any'))
indices <- unique(tbl.overlaps$queryHits)
printf("number of snps in fps: %d", length(indices))
tbl.out <- tbl.snp[indices,]
if(!is.null(outputFile))
write.table(tbl.out, sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE, file=outputFile)
else
return(tbl.out)
} # intersectWithFootprints
#------------------------------------------------------------------------------------------------------------------------
test.intersectWithFootprints <- function()
{
printf("--- test.intersectWithFootprints")
assayedSnps.bed9.file <- "UM0463_snps_assayed.bed"
checkTrue(file.exists(assayedSnps.bed9.file))
fp.bed4.file <- "lizBlue.brain.bed";
checkTrue(file.exists(fp.bed4.file))
shoulder <- 0;
tbl.snpI.brain.0 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=shoulder)
checkEquals(dim(tbl.snpI.brain.0), c(3, 9))
checkEquals(tbl.snpI.brain.0$name, c("rs192547848_loss", "rs10918708_gain", "rs75653497_loss"))
fp.bed4.file <- "lizBlue.lymphoblast.bed";
checkTrue(file.exists(fp.bed4.file))
shoulder <- 0;
tbl.snpI.lymphoblast.0 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=shoulder)
checkEquals(dim(tbl.snpI.lymphoblast.0), c(4, 9))
checkEquals(tbl.snpI.lymphoblast.0$name, c("rs192547848_loss", "rs61497249_noMotif", "rs75653497_loss", "rs10918754_noMotif"))
fp.bed4.file <- "lizBlue.brain.bed";
checkTrue(file.exists(fp.bed4.file))
shoulder <- 10;
tbl.snpI.brain.10 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=shoulder)
checkEquals(dim(tbl.snpI.brain.10), c(8, 9))
checkEquals(tbl.snpI.brain.10$name, c("rs114187767_noMotif", "rs192547848_loss", "rs115514202_noMotif", "rs10918708_gain",
"rs61497249_noMotif", "rs17477053_gain", "rs75653497_loss", "rs12745388_noMotif"))
fp.bed4.file <- "lizBlue.lymphoblast.bed";
checkTrue(file.exists(fp.bed4.file))
shoulder <- 10;
tbl.snpI.lymphoblast.10 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=shoulder)
checkEquals(dim(tbl.snpI.lymphoblast.10), c(7, 9))
checkEquals(tbl.snpI.lymphoblast.10$name, c("rs114187767_noMotif", "rs192547848_loss", "rs115514202_noMotif",
"rs79796415_noMotif", "rs61497249_noMotif", "rs75653497_loss", "rs10918754_noMotif"))
} # test.intersectWithFootprints
#------------------------------------------------------------------------------------------------------------------------
createAllAssayedSnpBed9Files <- function(copyToWhovian=FALSE)
{
filenames.in <- list(brain.fp.bed="lizBlue.brain.bed",
lymphoblast.fp.bed="lizBlue.lymphoblast.bed")
checkTrue(all(file.exists(as.character(filenames.in))))
filenames.out <- list(all="UM0463_snps_assayed.bed",
brain0="UM0463_brain_snps_0.bed",
lymphoblast0="UM0463_lymphoblast_snps_0.bed",
brain12="UM0463_brain_snps_12.bed",
lymphoblast12="UM0463_lymphoblast_snps_12.bed"
)
outfile <-filenames$all
assessAllSNPs.write.bed9(outputFile=outfile);
checkTrue(file.exists(outfile))
fp.bed4.file <- filenames.in$brain.fp.bed
shoulder <- 0;
outfile <- filenames.out$brain0
tbl.snpI.brain.0 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=outFile, shoulder=shoulder)
checkTrue(file.exists(outfile))
fp.bed4.file <- filenames.in$lymphoblast.fp.bed
shoulder <- 0;
outfile <- filenames.out$lymphoblast0
tbl.snpI.lymphoblast.0 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=outfile, shoulder=shoulder)
checkTrue(file.exists(outfile))
fp.bed4.file <- filenames.in$brain.fp.bed
shoulder <- 12;
outfile <- filenames.out$brain12
tbl.snpI.brain.12 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=outfile, shoulder=shoulder)
checkTrue(file.exists(outfile))
fp.bed4.file <- filenames.in$lymphoblast.fp.bed
shoulder <- 12;
outfile <- filenames.out$lymphoblast12
tbl.snpI.lymphoblast.12 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=outfile, shoulder=shoulder)
checkTrue(file.exists(outfile))
if(copyToWhovian){
for(f in as.character(filenames.out)){
cmd <- sprintf("scp %s pshannon@whovian:/local/httpd/vhosts/pshannon/annotations/", f)
printf("-- about to execute: %s", cmd)
system(cmd)
} # for f
} # if copyToWhovian
} # createAllAssayedSnpBed9Files
#------------------------------------------------------------------------------------------------------------------------
#if(!interactive())
# runTests()
|
/datasets/AD-family.UM0463F/fimoUtils.R
|
no_license
|
PriceLab/dora
|
R
| false
| false
| 16,107
|
r
|
# ~/github/dora/notebooks/AD-family.UM0463F/src/fimoUtils.R
#------------------------------------------------------------------------------------------------------------------------
library(RPostgreSQL)
library(GenomicRanges)
library(TReNA)
library(FimoClient)
library(RUnit)
library(BSgenome.Hsapiens.UCSC.hg38)
hg38 = BSgenome.Hsapiens.UCSC.hg38
library(SNPlocs.Hsapiens.dbSNP144.GRCh38)
dbSNP <- SNPlocs.Hsapiens.dbSNP144.GRCh38
#------------------------------------------------------------------------------------------------------------------------
if(!exists("tbl.snps")){
tbl.snps <- read.table("~/github/dora/datasets/AD-family.UM0463F/snps59.hg38.bed", sep="\t", as.is=TRUE)
colnames(tbl.snps) <- c("chrom", "start", "end", "name");
tokens <- strsplit(tbl.snps$name, "-", fixed=TRUE)
tbl.snps$wt <- unlist(lapply(tokens, "[", 2))
tbl.snps$mut <- unlist(lapply(tokens, "[", 3))
tbl.snps$rsid <- unlist(lapply(tokens, "[", 1))
}
if(!exists("fimo.service")){
fimo.service <- FimoClient("whovian", 5558, quiet=TRUE)
checkEquals(nrow(requestMatch(fimo.service, list(bogus='NNNNN'))), 0)
}
if(!exists("chrom.lengths")){
library(TxDb.Hsapiens.UCSC.hg19.knownGene) # version 3.2.2
tbl.seqInfo <- seqlevels(seqinfo(TxDb.Hsapiens.UCSC.hg19.knownGene))
seqlengths(seqinfo(TxDb.Hsapiens.UCSC.hg19.knownGene))[paste("chr", c(1:22, "X", "Y"), sep="")]
chrom.lengths <- as.list(seqlengths(seqinfo(TxDb.Hsapiens.UCSC.hg19.knownGene))[paste("chr", c(1:22), sep="")])
}
db.gtf <- dbConnect(PostgreSQL(), user= "trena", password="trena", dbname="gtf", host="whovian")
tmp <- checkEquals(dbListTables(db.gtf), "hg38human")
if(!exists("tbl.geneInfo")){
query <- "select * from hg38human where moleculetype='gene' and gene_biotype='protein_coding'"
tbl.geneInfo <- dbGetQuery(db.gtf, query)
checkTrue(nrow(tbl.geneInfo) > 19000)
checkTrue(ncol(tbl.geneInfo) > 25)
}
#------------------------------------------------------------------------------------------------------------------------
runTests <- function()
{
test.createWtAndMutSequences()
test.doComparativeFimo()
test.toBed9()
test.intersectWithFootprints()
} # runTests
#------------------------------------------------------------------------------------------------------------------------
doComparativeFimo <- function(chrom, base, wt, mut, snpName, flank, quiet=TRUE)
{
#wt.sequence <- getSequenceByLoc(dna.service, chrom, base-flank, base+flank)
ref.sequence <- as.character(getSeq(hg38, chrom, base-flank, base+flank))
wt.sequence <- paste(substr(ref.sequence, 1, flank), wt, substr(ref.sequence, flank+2, 1+(2*flank)), sep="")
mut.sequence <- paste(substr(ref.sequence, 1, flank), mut, substr(ref.sequence, flank+2, 1+(2*flank)), sep="")
if(!quiet){
printf("--- doComparativeFimo %s:%d %s/%s", chrom, base, wt, mut)
printf("wt: %s", wt)
printf("mut: %s", mut)
printf("retrieved: %s %s-%s-%s", wt.sequence, substr(wt.sequence, 1, flank),
substr(wt.sequence, flank+1, flank+1), substr(wt.sequence, flank+2, 1+(2*flank)))
printf("left flank: %s", substr(wt.sequence, 1, flank))
printf("right flank: %s", substr(wt.sequence, flank+1, nchar(wt.sequence)))
printf("wt as expected: %s", substr(wt.sequence, flank+1, flank+1) == wt)
}
if(mut.sequence == wt.sequence) { # due to the flakey data in tbl.snps.level_1.RData
printf(" suspicious igap report at %s:%d - mutation same as reference", chrom, base)
result <- data.frame()
}
else{
query <- list(wt=wt.sequence, mut=mut.sequence)
result <- requestMatch(fimo.service, query)
result$X.pattern.name <- as.character(result$X.pattern.name)
if(!quiet) print(result)
}
status <- "noMotif"
if(nrow(result) == 0){
if(!quiet) printf(" no motifs in wt or mut")
}
else{
wt.motifs <- unique(subset(result, sequence.name=="wt")$X.pattern.name)
mut.motifs <- unique(subset(result, sequence.name=="mut")$X.pattern.name)
novel.mut.motifs <- setdiff(mut.motifs, wt.motifs)
lost.wt.motifs <- setdiff(wt.motifs, mut.motifs)
if(!quiet){
printf(" lost.wt.motifs: %s", paste(lost.wt.motifs, collapse=","))
printf(" novel.mut.motifs: %s", paste(novel.mut.motifs, collapse=","))
}
if((length(lost.wt.motifs) == 0) & length(novel.mut.motifs) == 0)
status <- "noChange"
if((length(lost.wt.motifs) > 0) & length(novel.mut.motifs) > 0)
status <- "lossAndGain"
if((length(lost.wt.motifs) > 0) & length(novel.mut.motifs) == 0)
status <- "loss"
if((length(lost.wt.motifs) == 0) & length(novel.mut.motifs) > 0)
status <- "gain"
} # else: nrow > 0
if(nrow(result) > 0)
result$snpName <- snpName
return(list(chrom=chrom, base=base, wt=wt, mut=mut, flank=flank, status=status, table=result))
} # doComparativeFimo
#------------------------------------------------------------------------------------------------------------------------
test.doComparativeFimo <- function(chrom, base, wt, mut)
{
printf("--- test.doComparativeFimo")
x <- with(tbl.snps[4,], doComparativeFimo(chrom, start, wt, mut, rsid, 10))
checkEquals(sort(names(x)), c("base", "chrom", "flank", "mut", "status", "table", "wt"))
checkEquals(x$chrom, "chr1")
checkEquals(x$base, 167474522)
checkEquals(x$flank, 10)
checkEquals(x$mut, "G")
checkEquals(x$wt, "A")
checkTrue(is(x$table, "data.frame"))
checkEquals(x$status, "gain")
} # test.doComparativeFimo
#------------------------------------------------------------------------------------------------------------------------
createWtAndMutSequences <- function(chrom, base, wt.base, mut.base, flank=7)
{
#wt.sequence <- getSequenceByLoc(dna.service, chrom, base-flank, base+flank)
wt.sequence <- as.character(getSeq(hg38, chrom, base-flank, base+flank))
mut.sequence <- paste(substr(wt.sequence, 1, flank), mut.base, substr(wt.sequence, flank+2, 1+(2*flank)), sep="")
retrieved.wtBase <- substr(wt.sequence, flank + 1, flank + 1)
retrieved.mutBase <- substr(mut.sequence, flank + 1, flank + 1)
list(wt=wt.sequence, mut=mut.sequence, wtBase=retrieved.wtBase, mutBase=retrieved.mutBase)
} # createWtAndMutSequences
#------------------------------------------------------------------------------------------------------------------------
test.createWtAndMutSequences <- function(chrom, base, flank=7)
{
printf("--- test.createWtAndMutSequences")
snp <- tbl.snps[30,]
chrom <- snp$chrom
base <- snp$start
mut <- snp$mut
wt <- snp$wt
seqs <- createWtAndMutSequences(chrom, base, wt, mut, flank=3)
# |
checkEquals(seqs$wt, "TCAGGCC")
checkEquals(seqs$mut, "TCAAGCC")
checkEquals(seqs$wtBase, wt)
checkEquals(seqs$mutBase, mut)
seqs <- createWtAndMutSequences(chrom, base, wt, mut, flank=7)
# |
checkEquals(seqs$wt, "CAAATCAGGCCTCTG")
checkEquals(seqs$mut, "CAAATCAAGCCTCTG")
checkEquals(seqs$wtBase, wt)
checkEquals(seqs$mutBase, mut)
for (r in 1:30){
#printf(" ------ tbl.snps, row %d", r)
wt <- tbl.snps$wt[r]
mut <- tbl.snps$mut[r]
seqs <- createWtAndMutSequences(tbl.snps$chrom[r], tbl.snps$start[r], wt, mut, flank=3)
#checkEquals(seqs$wtBase, wt)
checkEquals(seqs$mutBase, mut)
} # for r
} # test.createWtAndMutSequences
#------------------------------------------------------------------------------------------------------------------------
toBed9 <- function(tbl)
{
chrom <- tbl$chrom
start <- tbl$start - 1
end <- tbl$start
name <- paste(tbl$rsid, tbl$status, sep="_")
score <- 1
strand <- rep(".", nrow(tbl))
thickStart <- start
thickEnd <- end
colors <- list(noMotif="220,220,220", # gray
loss="255,0,0", # red
gain="0,220,0", # green
lossAndGain="255,153,0", # orange
noChange="255,0,255")
color <- unlist(lapply(tbl$status, function(status) colors[[status]]))
tbl.out <- data.frame(chrom=chrom, start=start, end=end, name=name, score=score, strand=strand,
thickStart=thickStart, thickEnd=thickEnd, color=color, stringsAsFactors=FALSE)
tbl.out
} # toBed9
#------------------------------------------------------------------------------------------------------------------------
test.toBed9 <- function()
{
printf("--- test.toBed9")
tbl.test <- tbl.snps[1:5,]
status <- vector(mode="character", length=nrow(tbl.test))
for(r in 1:nrow(tbl.test)){
x <- doComparativeFimo(tbl.test$chrom[r], tbl.test$start[r], tbl.test$wt[r],
tbl.test$mut[r], tbl.test$rsid[r], 10)
status[[r]] <- x$status
}
tbl.test$status <- unlist(status)
tbl.b9 <- toBed9(tbl.test)
checkEquals(dim(tbl.b9), c(nrow(tbl.test), 9))
checkEquals(colnames(tbl.b9), c("chrom","start","end","name","score","strand","thickStart","thickEnd","color"))
# spot check first row, 3 values
with(tbl.b9[1,], checkEquals(name, "rs145175987_loss"),
checkEquals(color, "255,0,0"),
checkEquals(start, 167461077))
} # test.toBed9
#------------------------------------------------------------------------------------------------------------------------
assessAllSNPs.write.bed9 <- function(outputFile="UM0463_snps_assayed.bed")
{
status <- vector(mode="character", length=nrow(tbl.snps))
for(r in 1:nrow(tbl.snps)){
printf("comparative fimo on snp %d", r)
x <- doComparativeFimo(tbl.snps$chrom[r], tbl.snps$start[r], tbl.snps$wt[r],
tbl.snps$mut[r], tbl.snps$rsid[r], 10)
status[[r]] <- x$status
}
tbl.snps$status <- unlist(status)
tbl.b9 <- toBed9(tbl.snps)
checkEquals(dim(tbl.b9), c(nrow(tbl.snps), 9))
checkEquals(colnames(tbl.b9), c("chrom","start","end","name","score","strand","thickStart","thickEnd","color"))
printf("writing sample file b9.bed (%d, %d)", nrow(tbl.b9), ncol(tbl.b9))
write.table(tbl.b9, quote=FALSE, sep="\t", row.names=FALSE, col.names=FALSE, file=outputFile)
# system("scp b9.bed pshannon@whovian:/local/httpd/vhosts/pshannon/annotations/UM0463_snps_assayed.bed")
} # run.all
#------------------------------------------------------------------------------------------------------------------------
intersectWithFootprints <- function(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=0)
{
tbl.fp <- read.table(fp.bed4.file, sep="\t", header=FALSE, as.is=TRUE)
colnames(tbl.fp) <- c("chrom", "start", "end", "score")
tbl.snp <- read.table(assayedSnps.bed9.file, sep="\t", header=FALSE, as.is=TRUE)
colnames(tbl.snp) <- c("chrom", "start", "end", "name", "score", "strand", "thickStart", "thickEnd", "color")
gr1 <- with(tbl.snp, GRanges(seqnames=chrom, IRanges(start=start, end=end)))
gr2 <- with(tbl.fp, GRanges(seqnames=chrom, IRanges(start=start-shoulder, end=end+shoulder)))
tbl.overlaps <- as.data.frame(findOverlaps(gr1, gr2, type='any'))
indices <- unique(tbl.overlaps$queryHits)
printf("number of snps in fps: %d", length(indices))
tbl.out <- tbl.snp[indices,]
if(!is.null(outputFile))
write.table(tbl.out, sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE, file=outputFile)
else
return(tbl.out)
} # intersectWithFootprints
#------------------------------------------------------------------------------------------------------------------------
test.intersectWithFootprints <- function()
{
printf("--- test.intersectWithFootprints")
assayedSnps.bed9.file <- "UM0463_snps_assayed.bed"
checkTrue(file.exists(assayedSnps.bed9.file))
fp.bed4.file <- "lizBlue.brain.bed";
checkTrue(file.exists(fp.bed4.file))
shoulder <- 0;
tbl.snpI.brain.0 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=shoulder)
checkEquals(dim(tbl.snpI.brain.0), c(3, 9))
checkEquals(tbl.snpI.brain.0$name, c("rs192547848_loss", "rs10918708_gain", "rs75653497_loss"))
fp.bed4.file <- "lizBlue.lymphoblast.bed";
checkTrue(file.exists(fp.bed4.file))
shoulder <- 0;
tbl.snpI.lymphoblast.0 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=shoulder)
checkEquals(dim(tbl.snpI.lymphoblast.0), c(4, 9))
checkEquals(tbl.snpI.lymphoblast.0$name, c("rs192547848_loss", "rs61497249_noMotif", "rs75653497_loss", "rs10918754_noMotif"))
fp.bed4.file <- "lizBlue.brain.bed";
checkTrue(file.exists(fp.bed4.file))
shoulder <- 10;
tbl.snpI.brain.10 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=shoulder)
checkEquals(dim(tbl.snpI.brain.10), c(8, 9))
checkEquals(tbl.snpI.brain.10$name, c("rs114187767_noMotif", "rs192547848_loss", "rs115514202_noMotif", "rs10918708_gain",
"rs61497249_noMotif", "rs17477053_gain", "rs75653497_loss", "rs12745388_noMotif"))
fp.bed4.file <- "lizBlue.lymphoblast.bed";
checkTrue(file.exists(fp.bed4.file))
shoulder <- 10;
tbl.snpI.lymphoblast.10 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=NULL, shoulder=shoulder)
checkEquals(dim(tbl.snpI.lymphoblast.10), c(7, 9))
checkEquals(tbl.snpI.lymphoblast.10$name, c("rs114187767_noMotif", "rs192547848_loss", "rs115514202_noMotif",
"rs79796415_noMotif", "rs61497249_noMotif", "rs75653497_loss", "rs10918754_noMotif"))
} # test.intersectWithFootprints
#------------------------------------------------------------------------------------------------------------------------
createAllAssayedSnpBed9Files <- function(copyToWhovian=FALSE)
{
filenames.in <- list(brain.fp.bed="lizBlue.brain.bed",
lymphoblast.fp.bed="lizBlue.lymphoblast.bed")
checkTrue(all(file.exists(as.character(filenames.in))))
filenames.out <- list(all="UM0463_snps_assayed.bed",
brain0="UM0463_brain_snps_0.bed",
lymphoblast0="UM0463_lymphoblast_snps_0.bed",
brain12="UM0463_brain_snps_12.bed",
lymphoblast12="UM0463_lymphoblast_snps_12.bed"
)
outfile <-filenames$all
assessAllSNPs.write.bed9(outputFile=outfile);
checkTrue(file.exists(outfile))
fp.bed4.file <- filenames.in$brain.fp.bed
shoulder <- 0;
outfile <- filenames.out$brain0
tbl.snpI.brain.0 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=outFile, shoulder=shoulder)
checkTrue(file.exists(outfile))
fp.bed4.file <- filenames.in$lymphoblast.fp.bed
shoulder <- 0;
outfile <- filenames.out$lymphoblast0
tbl.snpI.lymphoblast.0 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=outfile, shoulder=shoulder)
checkTrue(file.exists(outfile))
fp.bed4.file <- filenames.in$brain.fp.bed
shoulder <- 12;
outfile <- filenames.out$brain12
tbl.snpI.brain.12 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=outfile, shoulder=shoulder)
checkTrue(file.exists(outfile))
fp.bed4.file <- filenames.in$lymphoblast.fp.bed
shoulder <- 12;
outfile <- filenames.out$lymphoblast12
tbl.snpI.lymphoblast.12 <- intersectWithFootprints(fp.bed4.file, assayedSnps.bed9.file, outputFile=outfile, shoulder=shoulder)
checkTrue(file.exists(outfile))
if(copyToWhovian){
for(f in as.character(filenames.out)){
cmd <- sprintf("scp %s pshannon@whovian:/local/httpd/vhosts/pshannon/annotations/", f)
printf("-- about to execute: %s", cmd)
system(cmd)
} # for f
} # if copyToWhovian
} # createAllAssayedSnpBed9Files
#------------------------------------------------------------------------------------------------------------------------
#if(!interactive())
# runTests()
|
#' Conformal Grid
#'
#' Given a polygon, find the orthogonal grid from a square projected into the
#' polygon.
#'
#' Given a orthogonal grid in (u, v) space, \code{conformal_grid} finds and
#' allows one to plot the mapped grid into the polygon in (x, y) space. The (u,
#' v) space is a square with vertices at (-1, -1), (1, -1), (1, 1), and (-1, 1).
#'
#' @param x a \code{sccm_pg} or \code{sccm_ch} object
#' @param ubreaks,vbreaks Either an integer value or numeric vector for the
#' location of the grid lines. If an integer value is given,
#' then the dimension will have equidistant grid lines. Else, specify specific
#' values in the u and/or v space between -1, and 1.
#' @param n number of data points to use on each margin of the grid.
#'
#' @return a \code{sccm_cg} object. A list with two elements, the \code{mapping}
#' is the result of a call to \code{\link{p2p}}, and \code{plotting_data}, a
#' \code{data.frame} which would be easy to use to if you want to plot the
#' result yourself instead of using the \code{plot.sccm_cg} method.
#'
#' @example examples/conformal_grid.R
#'
#' @export
conformal_grid <- function(x, ubreaks = 7, vbreaks = 7, n = 100) {
UseMethod("conformal_grid")
}
#' @export
conformal_grid.sccm_pg <- function(x, ubreaks = 7, vbreaks = 7, n = 100) {
if (length(ubreaks) > 1) {
ubrksok <- all(ubreaks > -1 & ubreaks < 1)
} else {
ubrksok <- ubreaks > 0
ubreaks <- -1 + 2 * seq(1, ubreaks, by = 1) / (ubreaks + 1)
}
if (length(vbreaks) > 1) {
vbrksok <- all(vbreaks > -1 & vbreaks < 1)
} else {
vbrksok <- vbreaks > 0
vbreaks <- -1 + 2 * seq(1, vbreaks, by = 1) / (vbreaks + 1)
}
if (!all(c(ubrksok, vbrksok))) {
stop("ubreaks or vbreaks are nonsensical.")
}
.data <- rbind(dplyr::mutate_(expand.grid(u = ubreaks,
v = seq(-0.999, 0.999, length = n)),
.dots = list(grp = ~ paste("u", u))),
dplyr::mutate_(expand.grid(u = seq(-0.999, 0.999, length = n),
v = vbreaks),
.dots = list(grp = ~ paste("v", v))))
mapping <- p2p(.data[, 1:2],
pg1 = polygon(c(-1, 1, 1, -1), c(-1, -1, 1, 1)),
pg2 = x)
plotting_data <-
dplyr::mutate(.data,
x = mapping$mapped[, 1],
y = mapping$mapped[, 2],
dx = mapping$disked[, 1],
dy = mapping$disked[, 2])
out <- list(mapping = mapping, plotting_data = plotting_data)
class(out) <- c("sccm_cg", class(out))
out
}
#' @export
plot.sccm_cg <- function(x, ...) {
plot(x$mapping$pg2, ...)
for (grp in unique(x$plotting_data$grp)) {
lines(x$plotting_data$x[x$plotting_data$grp == grp], x$plotting_data$y[x$plotting_data$grp == grp])
}
}
|
/R/conformal_grid.R
|
no_license
|
dewittpe/sccm
|
R
| false
| false
| 2,848
|
r
|
#' Conformal Grid
#'
#' Given a polygon, find the orthogonal grid from a square projected into the
#' polygon.
#'
#' Given a orthogonal grid in (u, v) space, \code{conformal_grid} finds and
#' allows one to plot the mapped grid into the polygon in (x, y) space. The (u,
#' v) space is a square with vertices at (-1, -1), (1, -1), (1, 1), and (-1, 1).
#'
#' @param x a \code{sccm_pg} or \code{sccm_ch} object
#' @param ubreaks,vbreaks Either an integer value or numeric vector for the
#' location of the grid lines. If an integer value is given,
#' then the dimension will have equidistant grid lines. Else, specify specific
#' values in the u and/or v space between -1, and 1.
#' @param n number of data points to use on each margin of the grid.
#'
#' @return a \code{sccm_cg} object. A list with two elements, the \code{mapping}
#' is the result of a call to \code{\link{p2p}}, and \code{plotting_data}, a
#' \code{data.frame} which would be easy to use to if you want to plot the
#' result yourself instead of using the \code{plot.sccm_cg} method.
#'
#' @example examples/conformal_grid.R
#'
#' @export
conformal_grid <- function(x, ubreaks = 7, vbreaks = 7, n = 100) {
UseMethod("conformal_grid")
}
#' @export
conformal_grid.sccm_pg <- function(x, ubreaks = 7, vbreaks = 7, n = 100) {
if (length(ubreaks) > 1) {
ubrksok <- all(ubreaks > -1 & ubreaks < 1)
} else {
ubrksok <- ubreaks > 0
ubreaks <- -1 + 2 * seq(1, ubreaks, by = 1) / (ubreaks + 1)
}
if (length(vbreaks) > 1) {
vbrksok <- all(vbreaks > -1 & vbreaks < 1)
} else {
vbrksok <- vbreaks > 0
vbreaks <- -1 + 2 * seq(1, vbreaks, by = 1) / (vbreaks + 1)
}
if (!all(c(ubrksok, vbrksok))) {
stop("ubreaks or vbreaks are nonsensical.")
}
.data <- rbind(dplyr::mutate_(expand.grid(u = ubreaks,
v = seq(-0.999, 0.999, length = n)),
.dots = list(grp = ~ paste("u", u))),
dplyr::mutate_(expand.grid(u = seq(-0.999, 0.999, length = n),
v = vbreaks),
.dots = list(grp = ~ paste("v", v))))
mapping <- p2p(.data[, 1:2],
pg1 = polygon(c(-1, 1, 1, -1), c(-1, -1, 1, 1)),
pg2 = x)
plotting_data <-
dplyr::mutate(.data,
x = mapping$mapped[, 1],
y = mapping$mapped[, 2],
dx = mapping$disked[, 1],
dy = mapping$disked[, 2])
out <- list(mapping = mapping, plotting_data = plotting_data)
class(out) <- c("sccm_cg", class(out))
out
}
#' @export
plot.sccm_cg <- function(x, ...) {
plot(x$mapping$pg2, ...)
for (grp in unique(x$plotting_data$grp)) {
lines(x$plotting_data$x[x$plotting_data$grp == grp], x$plotting_data$y[x$plotting_data$grp == grp])
}
}
|
#plot4.R
#input csv and select dates 2007-02-01 and 2007-02-02
dat=read.csv("./data/household_power_consumption.txt",as.is=T,
sep=";",na.strings = "?")
dat=dat[dat$Date %in% c("1/2/2007","2/2/2007"),]
#convert coloumn Date and time to new coloumn time classess (waktu)
dat$waktu=strptime(paste(dat$Date,dat$Time),"%d/%m/%Y %H:%M:%S")
Sys.setlocale("LC_TIME","English")
## Open PNG device; create 'plot1.png' in working directory
png(file = "plot4.png")
op <- par(mfrow = c(2, 2))
plot(dat$waktu,dat$Global_active_power,type="l",
xlab = NA,
ylab = "Global Active Power")
plot(dat$waktu,dat$Voltage,type="l",
xlab = "datetime",
ylab = "Voltage")
plot(dat$waktu,dat$Sub_metering_1,type="l",
xlab = NA,
ylab = "Energy sub metering")
lines(dat$waktu,dat$Sub_metering_2,col="red")
lines(dat$waktu,dat$Sub_metering_3,col="blue")
text_legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
legend("topright", legend = text_legend,lwd=1,bty="n", col = c("black","red","blue"))
plot(dat$waktu,dat$Global_reactive_power,type="l",
xlab = "datetime",
ylab = "Global_reactive_power")
par(op)
dev.off() ## Close the device
|
/plot4.R
|
no_license
|
mfarkhann/ExData_Plotting1
|
R
| false
| false
| 1,175
|
r
|
#plot4.R
#input csv and select dates 2007-02-01 and 2007-02-02
dat=read.csv("./data/household_power_consumption.txt",as.is=T,
sep=";",na.strings = "?")
dat=dat[dat$Date %in% c("1/2/2007","2/2/2007"),]
#convert coloumn Date and time to new coloumn time classess (waktu)
dat$waktu=strptime(paste(dat$Date,dat$Time),"%d/%m/%Y %H:%M:%S")
Sys.setlocale("LC_TIME","English")
## Open PNG device; create 'plot1.png' in working directory
png(file = "plot4.png")
op <- par(mfrow = c(2, 2))
plot(dat$waktu,dat$Global_active_power,type="l",
xlab = NA,
ylab = "Global Active Power")
plot(dat$waktu,dat$Voltage,type="l",
xlab = "datetime",
ylab = "Voltage")
plot(dat$waktu,dat$Sub_metering_1,type="l",
xlab = NA,
ylab = "Energy sub metering")
lines(dat$waktu,dat$Sub_metering_2,col="red")
lines(dat$waktu,dat$Sub_metering_3,col="blue")
text_legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
legend("topright", legend = text_legend,lwd=1,bty="n", col = c("black","red","blue"))
plot(dat$waktu,dat$Global_reactive_power,type="l",
xlab = "datetime",
ylab = "Global_reactive_power")
par(op)
dev.off() ## Close the device
|
library(randomForest)
library(caret)
library(aws.s3)
FundRatings <- read_csv("~/R_workspaces/AI_Targetting/FundRatings.csv",
col_types = cols(AverageMarketValueRating = col_double(),
NumberPositions = col_double(), OIPRating = col_double(),
OIPRatingOffset = col_double(), StyleRating = col_double(),
TotalMarketValueRating = col_double(),TurnoverRating = col_double()
)
) %>% dplyr::select(Fund_ID = FactSetFundId, FundRating = OIPRating)
fill_missing_if_any <- function(data.df, value.df){
count <- matrix(0, nrow = NROW(data.df), ncol = 1)
for(j in 1:NCOL(data.df)) {
indx <- which(is.na(data.df[,j]))
if(length(indx) > 0)
count[indx, 1] <- count[indx, 1] + 1
data.df[indx, j] <- value.df[j, 2]
}
res <- list(data = data.df, Count = count)
return(res)
}
my_pred <- function(ticker, model_path, fund, Models_on_S3 = FALSE, bucket = NULL, Indicator_Data) {
if(Models_on_S3 == TRUE) {
fname <- fund
mm <- unserialize(get_object(fname, bucket = bucket))
} else {
fname <- paste0(model_path, fund, ".txt")
mm <- unserialize(charToRaw(readChar(fname, file.info(fname)$size)))
}
varNames <- names(mm$trainingData)[1:(NCOL(mm$trainingData) -1)]
mm_avg <- data.frame(colMeans(mm$trainingData[, 1:(NCOL(mm$trainingData) -1)], na.rm = TRUE)) %>%
tibble::rownames_to_column(var = "VarNames")
data4model0 <- Indicator_Data %>% dplyr::filter(Symbol == ticker) %>% dplyr::select_(.dots = varNames)
# Filling missing data if there is any.
data4model <- fill_missing_if_any(data.df = data4model0, value.df = mm_avg)
newX <- data4model$data
# if(any(is.na(newX))) {
# missingValue <- TRUE
# numMissingValue <- length(which(is.na(newX)))
# # Filling in missing values by the average of the variable in the training set of the model
# avg <- colMeans(mm$trainingData[, 1:(NCOL(mm$trainingData) -1)], na.rm = TRUE)
# newX[, which(is.na(newX))] <- avg[which(is.na(newX))]
# } else {
# missingValue <- FALSE
# numMissingValue <- 0
# }
pred <- predict(mm$finalModel, newdata = newX ,predict.all=TRUE)
votes <- apply(pred$individual,1,function(x){table(x)})
ntrees <- mm$finalModel$ntree
if(is.list(votes)){
out<-c()
for (i in 1:2){
out<-cbind(out,sapply(votes,function(x) x[i]))
}
out[is.na(out)] <- 0
rownames(out) <- NULL
colnames(out) <- c("FALSE", "TRUE")
VotesPerc <- t(out)/ntrees
} else VotesPerc <- votes/ntrees
fund_tmp <- strsplit(fund, split = "/", fixed = TRUE) %>% unlist()
fund_id <- gsub(x = fund_tmp[2], pattern = ".txt", replacement = "", fixed = TRUE)
modeling <- strsplit(fund_tmp[1], split = "_", fixed = TRUE)[[1]][2]
res <- data.frame(model = fund,
modeling = modeling,
list_votes_debug = is.list(votes),
Fund_ID = fund_id,
N_Training = NROW(mm$trainingData),
MissingValues = data4model$Count,
NumVar = NROW(mm_avg),
Symbol = ticker,
ClassPred = pred$aggregate,
VotesPerc_false = t(VotesPerc)[, 1],
VotesPerc_true = t(VotesPerc)[, 2],
stringsAsFactors = FALSE) %>%
dplyr::mutate(VotePerc = ifelse(ClassPred %in% c("TRUE", "Sell"),VotesPerc_true, VotesPerc_false )) %>%
dplyr::select(-VotesPerc_false, -VotesPerc_true)
return(res)
}
var_imp <- function(model_path, fund, Models_on_S3 = FALSE, bucket = NULL) {
if(Models_on_S3 == TRUE) {
fname <- fund
mm <- unserialize(get_object(fname, bucket = bucket))
} else {
fname <- paste0(model_path, fund, ".txt")
mm <- unserialize(charToRaw(readChar(fname, file.info(fname)$size)))
}
browser()
res_tmp <- caret::varImp(mm)
# Getting ID of fund to include in results
fund_tmp <- strsplit(fund, split = "/", fixed = TRUE) %>% unlist()
fund_id <- gsub(x = fund_tmp[2], pattern = ".txt", replacement = "", fixed = TRUE)
res <- res_tmp$importance %>% tibble::rownames_to_column(var = "VariableName") %>% dplyr::mutate(FundId = fund_id)
names(res)[2] <- "Importance"
return(res)
}
S3bucket <- get_bucket(bucket = 'q4quant-ai-targeting-models')
files_list <- NULL
files_list_tmp <- data.frame(FileName = sapply(S3bucket, FUN = function(x){x$Key}), stringsAsFactors = FALSE)
while((NROW(files_list_tmp) %% 1000 == 0) && NROW(files_list_tmp) == 1000) {
files_list <- rbind(files_list, files_list_tmp)
S3bucket_tmp <- get_bucket(bucket = 'q4quant-ai-targeting-models', marker = files_list$FileName[NROW(files_list)])
files_list_tmp <- data.frame(FileName = sapply(S3bucket_tmp, FUN = function(x){x$Key}), stringsAsFactors = FALSE)
}
files_list <- rbind(files_list, files_list_tmp)
rm(files_list_tmp)
model_entry <- files_list %>% dplyr::filter(grepl(pattern = "1_EntryPosition/", x = FileName), FileName != "1_EntryPosition/")
model_change <- files_list %>% dplyr::filter(grepl(pattern = "2_ChangePosition/", x = FileName), FileName != "2_ChangePosition/")
model_exit <- files_list %>% dplyr::filter(grepl(pattern = "3_ExitPosition/", x = FileName), FileName != "3_ExitPosition/")
#modelPath <- list(EntryModelPath, ChangeModelPath, ExitModelPath)
models <- list(model_entry, model_change, model_exit)
# Determining Variable Importance for the Fitted Models
res_varImp <- vector(mode = "list", length = 3)
for(j in 1:3){
res_varImp[[j]] <- lapply(X = models[[j]]$FileName, FUN = var_imp, model_path = NULL, Models_on_S3 = TRUE,
bucket = "q4quant-ai-targeting-models") %>% dplyr::bind_rows()
}
res_varImp[[1]] <- res_varImp[[1]] %>% dplyr::arrange(FundId, desc(Importance))
res_varImp[[2]] <- res_varImp[[2]] %>% dplyr::arrange(FundId, desc(Importance))
res_varImp[[3]] <- res_varImp[[3]] %>% dplyr::arrange(FundId, desc(Importance))
# res_varImp_final <- vector(mode = "list", length = 3)
# for(j in 1:3){
# res_varImp_final[[j]] <- dplyr::arrange(res_varImp[[1]][[1]], desc(Importance))
# for(k in 1:length(res_varImp[[1]])){
# if(k %% 10 == 0) print(paste(j, k, sep = "-"))
# res_varImp_final[[j]] <- qpcR:::cbind.na(res_varImp_final[[j]],
# dplyr::arrange(res_varImp[[j]][[k]], desc(Importance)))
# }
# }
library(openxlsx)
# https://cran.r-project.org/web/packages/openxlsx/vignettes/Introduction.pdf
# Creating a workbook ----
wb_Results <- createWorkbook()
sheet_entry <- addWorksheet(wb_Results, sheetName = "Entry")
sheet_change <- addWorksheet(wb_Results, sheetName = "Change")
sheet_exit <- addWorksheet(wb_Results, sheetName = "Exit")
writeData(wb = wb_Results, x = as.data.frame(res_varImp[[1]]), sheet = sheet_entry, rowNames=FALSE)
writeData(wb = wb_Results, x = as.data.frame(res_varImp[[2]]), sheet = sheet_change, rowNames=FALSE)
writeData(wb = wb_Results, x = as.data.frame(res_varImp[[3]]), sheet = sheet_exit, rowNames=FALSE)
saveWorkbook(wb_Results, file = "Funds_VariableRanking.xlsx", overwrite = TRUE)
rm(sheet_entry, sheet_change, sheet_exit, wb_Results)
#### Getting Funds prediction for symbolList
res <- vector(mode = "list", length = 3)
for(j in 1:3){
res[[i]] <- lapply(X = models[[j]]$FileName, FUN = my_pred, ticker = SymbolList, model_path = NULL, Models_on_S3 = TRUE,
bucket = "q4quant-ai-targeting-models", Indicator_Data = Mrk_Data) %>% dplyr::bind_rows()
}
fname <- "./ModelResults/04B8FN-E.txt"
mm <- unserialize(charToRaw(readChar(fname, file.info(fname)$size)))
res[[1]] <- res_tmp %>% dplyr::mutate(Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID)) %>%
dplyr::left_join(supportiveData$GlobalModelStat[[1]], by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(supportiveData$FundName, by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(FundRatings, by = c("Fund_ID")) %>%
dplyr::select(-Pred_Accuracy) %>%
dplyr::mutate(Score = TP * VotePerc,
Score_with_fund = round(Score * FundRating, digits = 0),
p_2.5 = quantile(Score_with_fund, probs = 0.025, na.rm = TRUE),
#Score_shift = p_2.5
Score_shift = 0) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp = max(Score_with_fund - Score_shift, 0, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(p_97.5 = quantile(Score_tmp, probs = 0.975, na.rm = TRUE)) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp2 = min(Score_tmp, p_97.5, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Score_final = round(100 * Score_tmp2/max(Score_tmp2, na.rm = TRUE), digits = 0)) %>%
# dplyr::group_by(Symbol, ClassPred) %>%
# dplyr::mutate(ScalingMin = min(Score_with_fund, na.rm = TRUE),
# ScalingFactor = max(Score_with_fund - ScalingMin, na.rm = TRUE),
# Scaled_Score_with_fund = round(100 * (Score_with_fund - ScalingMin)/ScalingFactor, digits = 0)) %>%
# dplyr::ungroup() %>%
dplyr::arrange(Symbol, desc(ClassPred),desc(Score_final))
res[[2]] <- res_tmp2 %>% dplyr::mutate(Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID),
Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID)) %>%
dplyr::left_join(supportiveData$GlobalModelStat[[2]], by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(supportiveData$FundName, by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(FundRatings, by = c("Fund_ID")) %>%
dplyr::select(-Pred_Accuracy) %>%
dplyr::mutate(Score = ifelse(ClassPred == "Sell", TP * VotePerc, TN * VotePerc),
Score_with_fund = round(Score * FundRating, digits = 0),
p_2.5 = quantile(Score_with_fund, probs = 0.025, na.rm = TRUE),
#Score_shift = p_2.5
Score_shift = 0) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp = max(Score_with_fund - Score_shift, 0, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(p_97.5 = quantile(Score_tmp, probs = 0.975, na.rm = TRUE)) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp2 = min(Score_tmp, p_97.5, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Score_final = round(100 * Score_tmp2/max(Score_tmp2, na.rm = TRUE), digits = 0)) %>%
# dplyr::group_by(Symbol, ClassPred) %>%
# dplyr::mutate(ScalingMin = min(Score_with_fund, na.rm = TRUE),
# ScalingFactor = max(Score_with_fund - ScalingMin, na.rm = TRUE),
# Scaled_Score_with_fund = round(100 * (Score_with_fund - ScalingMin)/ScalingFactor, digits = 0)) %>%
# dplyr::ungroup() %>%
dplyr::arrange(Symbol, ClassPred, desc(Score_final))
res[[3]] <- res_tmp3 %>% dplyr::mutate(Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID),
Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID)) %>%
dplyr::left_join(supportiveData$GlobalModelStat[[3]], by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(supportiveData$FundName, by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(FundRatings, by = c("Fund_ID")) %>%
dplyr::select(-Pred_Accuracy) %>%
dplyr::mutate(Score = TP * VotePerc,
Score_with_fund = round(Score * FundRating, digits = 0),
p_2.5 = quantile(Score_with_fund, probs = 0.025, na.rm = TRUE),
#Score_shift = p_2.5
Score_shift = 0) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp = max(Score_with_fund - Score_shift, 0, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(p_97.5 = quantile(Score_tmp, probs = 0.975, na.rm = TRUE)) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp2 = min(Score_tmp, p_97.5, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Score_final = round(100 * Score_tmp2/max(Score_tmp2, na.rm = TRUE), digits = 0)) %>%
# dplyr::group_by(Symbol, ClassPred) %>%
# dplyr::mutate(ScalingMin = min(Score_with_fund, na.rm = TRUE),
# ScalingFactor = max(Score_with_fund - ScalingMin, na.rm = TRUE),
# Scaled_Score_with_fund = round(100 * (Score_with_fund - ScalingMin)/ScalingFactor, digits = 0)) %>%
# dplyr::ungroup() %>%
dplyr::arrange(Symbol, desc(ClassPred),desc(Score_final))
library(ggplot2)
p1 <- ggplot(res[[1]], aes(x = Score_final)) + geom_histogram(aes(y = (..count..)/sum(..count..)), binwidth = 5) +
scale_y_continuous(labels = scales::percent) +
labs(title="Histogram of Final Score for Entry Models", x ="Score final", y = "Frequency") +
ggthemes::theme_solarized(light = TRUE)
p1
p2 <- ggplot(res[[2]], aes(x = Score_final)) + geom_histogram(aes(y = (..count..)/sum(..count..)), binwidth = 5) +
scale_y_continuous(labels = scales::percent) +
labs(title="Histogram of Final Score for Change Models", x ="Score final", y = "Frequency") +
ggthemes::theme_solarized(light = TRUE)
p2
p3 <- ggplot(res[[3]], aes(x = Score_final)) + geom_histogram(aes(y = (..count..)/sum(..count..)), binwidth = 5) +
scale_y_continuous(labels = scales::percent) +
labs(title="Histogram of Final Score for Exit Models", x ="Score final", y = "Frequency") +
ggthemes::theme_solarized(light = TRUE)
p3
library(openxlsx)
# https://cran.r-project.org/web/packages/openxlsx/vignettes/Introduction.pdf
# Creating a workbook ----
wb_Results <- createWorkbook()
sheet_entry <- addWorksheet(wb_Results, sheetName = "Entry")
sheet_change <- addWorksheet(wb_Results, sheetName = "Change")
sheet_exit <- addWorksheet(wb_Results, sheetName = "Exit")
sheet_entry_hist <- addWorksheet(wb_Results, sheetName = "Entry hist")
sheet_change_hist <- addWorksheet(wb_Results, sheetName = "Change hist")
sheet_exit_hist <- addWorksheet(wb_Results, sheetName = "Exit hist")
print(p1)
insertPlot(wb = wb_Results, sheet = sheet_entry_hist, startRow = 4,startCol = 3, width = 6, height = 5)
writeData(wb = wb_Results, x = as.data.frame(res[[1]]), sheet = sheet_entry, rowNames=FALSE)
print(p2)
insertPlot(wb = wb_Results, sheet = sheet_change_hist, startRow = 4,startCol = 3, width = 6, height = 5)
writeData(wb = wb_Results, x = as.data.frame(res[[2]]), sheet = sheet_change, rowNames=FALSE)
print(p3)
insertPlot(wb = wb_Results, sheet = sheet_exit_hist, startRow = 4,startCol = 3, width = 6, height = 5)
writeData(wb = wb_Results, x = as.data.frame(res[[3]]), sheet = sheet_exit, rowNames=FALSE)
saveWorkbook(wb_Results, file = "Fund_Recommendation_AI_Targeting_1_.xlsx", overwrite = TRUE)
rm(sheet_entry, sheet_entry_hist, sheet_change, sheet_change_hist, sheet_exit,sheet_exit_hist, wb_Results)
|
/VariableImportance_FundAnalysis.R
|
no_license
|
ahmed-Q4/AI_Targetting
|
R
| false
| false
| 15,574
|
r
|
library(randomForest)
library(caret)
library(aws.s3)
FundRatings <- read_csv("~/R_workspaces/AI_Targetting/FundRatings.csv",
col_types = cols(AverageMarketValueRating = col_double(),
NumberPositions = col_double(), OIPRating = col_double(),
OIPRatingOffset = col_double(), StyleRating = col_double(),
TotalMarketValueRating = col_double(),TurnoverRating = col_double()
)
) %>% dplyr::select(Fund_ID = FactSetFundId, FundRating = OIPRating)
fill_missing_if_any <- function(data.df, value.df){
count <- matrix(0, nrow = NROW(data.df), ncol = 1)
for(j in 1:NCOL(data.df)) {
indx <- which(is.na(data.df[,j]))
if(length(indx) > 0)
count[indx, 1] <- count[indx, 1] + 1
data.df[indx, j] <- value.df[j, 2]
}
res <- list(data = data.df, Count = count)
return(res)
}
my_pred <- function(ticker, model_path, fund, Models_on_S3 = FALSE, bucket = NULL, Indicator_Data) {
if(Models_on_S3 == TRUE) {
fname <- fund
mm <- unserialize(get_object(fname, bucket = bucket))
} else {
fname <- paste0(model_path, fund, ".txt")
mm <- unserialize(charToRaw(readChar(fname, file.info(fname)$size)))
}
varNames <- names(mm$trainingData)[1:(NCOL(mm$trainingData) -1)]
mm_avg <- data.frame(colMeans(mm$trainingData[, 1:(NCOL(mm$trainingData) -1)], na.rm = TRUE)) %>%
tibble::rownames_to_column(var = "VarNames")
data4model0 <- Indicator_Data %>% dplyr::filter(Symbol == ticker) %>% dplyr::select_(.dots = varNames)
# Filling missing data if there is any.
data4model <- fill_missing_if_any(data.df = data4model0, value.df = mm_avg)
newX <- data4model$data
# if(any(is.na(newX))) {
# missingValue <- TRUE
# numMissingValue <- length(which(is.na(newX)))
# # Filling in missing values by the average of the variable in the training set of the model
# avg <- colMeans(mm$trainingData[, 1:(NCOL(mm$trainingData) -1)], na.rm = TRUE)
# newX[, which(is.na(newX))] <- avg[which(is.na(newX))]
# } else {
# missingValue <- FALSE
# numMissingValue <- 0
# }
pred <- predict(mm$finalModel, newdata = newX ,predict.all=TRUE)
votes <- apply(pred$individual,1,function(x){table(x)})
ntrees <- mm$finalModel$ntree
if(is.list(votes)){
out<-c()
for (i in 1:2){
out<-cbind(out,sapply(votes,function(x) x[i]))
}
out[is.na(out)] <- 0
rownames(out) <- NULL
colnames(out) <- c("FALSE", "TRUE")
VotesPerc <- t(out)/ntrees
} else VotesPerc <- votes/ntrees
fund_tmp <- strsplit(fund, split = "/", fixed = TRUE) %>% unlist()
fund_id <- gsub(x = fund_tmp[2], pattern = ".txt", replacement = "", fixed = TRUE)
modeling <- strsplit(fund_tmp[1], split = "_", fixed = TRUE)[[1]][2]
res <- data.frame(model = fund,
modeling = modeling,
list_votes_debug = is.list(votes),
Fund_ID = fund_id,
N_Training = NROW(mm$trainingData),
MissingValues = data4model$Count,
NumVar = NROW(mm_avg),
Symbol = ticker,
ClassPred = pred$aggregate,
VotesPerc_false = t(VotesPerc)[, 1],
VotesPerc_true = t(VotesPerc)[, 2],
stringsAsFactors = FALSE) %>%
dplyr::mutate(VotePerc = ifelse(ClassPred %in% c("TRUE", "Sell"),VotesPerc_true, VotesPerc_false )) %>%
dplyr::select(-VotesPerc_false, -VotesPerc_true)
return(res)
}
var_imp <- function(model_path, fund, Models_on_S3 = FALSE, bucket = NULL) {
if(Models_on_S3 == TRUE) {
fname <- fund
mm <- unserialize(get_object(fname, bucket = bucket))
} else {
fname <- paste0(model_path, fund, ".txt")
mm <- unserialize(charToRaw(readChar(fname, file.info(fname)$size)))
}
browser()
res_tmp <- caret::varImp(mm)
# Getting ID of fund to include in results
fund_tmp <- strsplit(fund, split = "/", fixed = TRUE) %>% unlist()
fund_id <- gsub(x = fund_tmp[2], pattern = ".txt", replacement = "", fixed = TRUE)
res <- res_tmp$importance %>% tibble::rownames_to_column(var = "VariableName") %>% dplyr::mutate(FundId = fund_id)
names(res)[2] <- "Importance"
return(res)
}
S3bucket <- get_bucket(bucket = 'q4quant-ai-targeting-models')
files_list <- NULL
files_list_tmp <- data.frame(FileName = sapply(S3bucket, FUN = function(x){x$Key}), stringsAsFactors = FALSE)
while((NROW(files_list_tmp) %% 1000 == 0) && NROW(files_list_tmp) == 1000) {
files_list <- rbind(files_list, files_list_tmp)
S3bucket_tmp <- get_bucket(bucket = 'q4quant-ai-targeting-models', marker = files_list$FileName[NROW(files_list)])
files_list_tmp <- data.frame(FileName = sapply(S3bucket_tmp, FUN = function(x){x$Key}), stringsAsFactors = FALSE)
}
files_list <- rbind(files_list, files_list_tmp)
rm(files_list_tmp)
model_entry <- files_list %>% dplyr::filter(grepl(pattern = "1_EntryPosition/", x = FileName), FileName != "1_EntryPosition/")
model_change <- files_list %>% dplyr::filter(grepl(pattern = "2_ChangePosition/", x = FileName), FileName != "2_ChangePosition/")
model_exit <- files_list %>% dplyr::filter(grepl(pattern = "3_ExitPosition/", x = FileName), FileName != "3_ExitPosition/")
#modelPath <- list(EntryModelPath, ChangeModelPath, ExitModelPath)
models <- list(model_entry, model_change, model_exit)
# Determining Variable Importance for the Fitted Models
res_varImp <- vector(mode = "list", length = 3)
for(j in 1:3){
res_varImp[[j]] <- lapply(X = models[[j]]$FileName, FUN = var_imp, model_path = NULL, Models_on_S3 = TRUE,
bucket = "q4quant-ai-targeting-models") %>% dplyr::bind_rows()
}
res_varImp[[1]] <- res_varImp[[1]] %>% dplyr::arrange(FundId, desc(Importance))
res_varImp[[2]] <- res_varImp[[2]] %>% dplyr::arrange(FundId, desc(Importance))
res_varImp[[3]] <- res_varImp[[3]] %>% dplyr::arrange(FundId, desc(Importance))
# res_varImp_final <- vector(mode = "list", length = 3)
# for(j in 1:3){
# res_varImp_final[[j]] <- dplyr::arrange(res_varImp[[1]][[1]], desc(Importance))
# for(k in 1:length(res_varImp[[1]])){
# if(k %% 10 == 0) print(paste(j, k, sep = "-"))
# res_varImp_final[[j]] <- qpcR:::cbind.na(res_varImp_final[[j]],
# dplyr::arrange(res_varImp[[j]][[k]], desc(Importance)))
# }
# }
library(openxlsx)
# https://cran.r-project.org/web/packages/openxlsx/vignettes/Introduction.pdf
# Creating a workbook ----
wb_Results <- createWorkbook()
sheet_entry <- addWorksheet(wb_Results, sheetName = "Entry")
sheet_change <- addWorksheet(wb_Results, sheetName = "Change")
sheet_exit <- addWorksheet(wb_Results, sheetName = "Exit")
writeData(wb = wb_Results, x = as.data.frame(res_varImp[[1]]), sheet = sheet_entry, rowNames=FALSE)
writeData(wb = wb_Results, x = as.data.frame(res_varImp[[2]]), sheet = sheet_change, rowNames=FALSE)
writeData(wb = wb_Results, x = as.data.frame(res_varImp[[3]]), sheet = sheet_exit, rowNames=FALSE)
saveWorkbook(wb_Results, file = "Funds_VariableRanking.xlsx", overwrite = TRUE)
rm(sheet_entry, sheet_change, sheet_exit, wb_Results)
#### Getting Funds prediction for symbolList
res <- vector(mode = "list", length = 3)
for(j in 1:3){
res[[i]] <- lapply(X = models[[j]]$FileName, FUN = my_pred, ticker = SymbolList, model_path = NULL, Models_on_S3 = TRUE,
bucket = "q4quant-ai-targeting-models", Indicator_Data = Mrk_Data) %>% dplyr::bind_rows()
}
fname <- "./ModelResults/04B8FN-E.txt"
mm <- unserialize(charToRaw(readChar(fname, file.info(fname)$size)))
res[[1]] <- res_tmp %>% dplyr::mutate(Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID)) %>%
dplyr::left_join(supportiveData$GlobalModelStat[[1]], by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(supportiveData$FundName, by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(FundRatings, by = c("Fund_ID")) %>%
dplyr::select(-Pred_Accuracy) %>%
dplyr::mutate(Score = TP * VotePerc,
Score_with_fund = round(Score * FundRating, digits = 0),
p_2.5 = quantile(Score_with_fund, probs = 0.025, na.rm = TRUE),
#Score_shift = p_2.5
Score_shift = 0) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp = max(Score_with_fund - Score_shift, 0, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(p_97.5 = quantile(Score_tmp, probs = 0.975, na.rm = TRUE)) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp2 = min(Score_tmp, p_97.5, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Score_final = round(100 * Score_tmp2/max(Score_tmp2, na.rm = TRUE), digits = 0)) %>%
# dplyr::group_by(Symbol, ClassPred) %>%
# dplyr::mutate(ScalingMin = min(Score_with_fund, na.rm = TRUE),
# ScalingFactor = max(Score_with_fund - ScalingMin, na.rm = TRUE),
# Scaled_Score_with_fund = round(100 * (Score_with_fund - ScalingMin)/ScalingFactor, digits = 0)) %>%
# dplyr::ungroup() %>%
dplyr::arrange(Symbol, desc(ClassPred),desc(Score_final))
res[[2]] <- res_tmp2 %>% dplyr::mutate(Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID),
Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID)) %>%
dplyr::left_join(supportiveData$GlobalModelStat[[2]], by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(supportiveData$FundName, by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(FundRatings, by = c("Fund_ID")) %>%
dplyr::select(-Pred_Accuracy) %>%
dplyr::mutate(Score = ifelse(ClassPred == "Sell", TP * VotePerc, TN * VotePerc),
Score_with_fund = round(Score * FundRating, digits = 0),
p_2.5 = quantile(Score_with_fund, probs = 0.025, na.rm = TRUE),
#Score_shift = p_2.5
Score_shift = 0) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp = max(Score_with_fund - Score_shift, 0, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(p_97.5 = quantile(Score_tmp, probs = 0.975, na.rm = TRUE)) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp2 = min(Score_tmp, p_97.5, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Score_final = round(100 * Score_tmp2/max(Score_tmp2, na.rm = TRUE), digits = 0)) %>%
# dplyr::group_by(Symbol, ClassPred) %>%
# dplyr::mutate(ScalingMin = min(Score_with_fund, na.rm = TRUE),
# ScalingFactor = max(Score_with_fund - ScalingMin, na.rm = TRUE),
# Scaled_Score_with_fund = round(100 * (Score_with_fund - ScalingMin)/ScalingFactor, digits = 0)) %>%
# dplyr::ungroup() %>%
dplyr::arrange(Symbol, ClassPred, desc(Score_final))
res[[3]] <- res_tmp3 %>% dplyr::mutate(Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID),
Fund_ID = gsub(pattern = "-E-1", replacement = "-E", x = Fund_ID)) %>%
dplyr::left_join(supportiveData$GlobalModelStat[[3]], by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(supportiveData$FundName, by = c("Fund_ID" = "Fund")) %>%
dplyr::left_join(FundRatings, by = c("Fund_ID")) %>%
dplyr::select(-Pred_Accuracy) %>%
dplyr::mutate(Score = TP * VotePerc,
Score_with_fund = round(Score * FundRating, digits = 0),
p_2.5 = quantile(Score_with_fund, probs = 0.025, na.rm = TRUE),
#Score_shift = p_2.5
Score_shift = 0) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp = max(Score_with_fund - Score_shift, 0, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(p_97.5 = quantile(Score_tmp, probs = 0.975, na.rm = TRUE)) %>%
dplyr::rowwise() %>%
dplyr::mutate(Score_tmp2 = min(Score_tmp, p_97.5, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Score_final = round(100 * Score_tmp2/max(Score_tmp2, na.rm = TRUE), digits = 0)) %>%
# dplyr::group_by(Symbol, ClassPred) %>%
# dplyr::mutate(ScalingMin = min(Score_with_fund, na.rm = TRUE),
# ScalingFactor = max(Score_with_fund - ScalingMin, na.rm = TRUE),
# Scaled_Score_with_fund = round(100 * (Score_with_fund - ScalingMin)/ScalingFactor, digits = 0)) %>%
# dplyr::ungroup() %>%
dplyr::arrange(Symbol, desc(ClassPred),desc(Score_final))
library(ggplot2)
p1 <- ggplot(res[[1]], aes(x = Score_final)) + geom_histogram(aes(y = (..count..)/sum(..count..)), binwidth = 5) +
scale_y_continuous(labels = scales::percent) +
labs(title="Histogram of Final Score for Entry Models", x ="Score final", y = "Frequency") +
ggthemes::theme_solarized(light = TRUE)
p1
p2 <- ggplot(res[[2]], aes(x = Score_final)) + geom_histogram(aes(y = (..count..)/sum(..count..)), binwidth = 5) +
scale_y_continuous(labels = scales::percent) +
labs(title="Histogram of Final Score for Change Models", x ="Score final", y = "Frequency") +
ggthemes::theme_solarized(light = TRUE)
p2
p3 <- ggplot(res[[3]], aes(x = Score_final)) + geom_histogram(aes(y = (..count..)/sum(..count..)), binwidth = 5) +
scale_y_continuous(labels = scales::percent) +
labs(title="Histogram of Final Score for Exit Models", x ="Score final", y = "Frequency") +
ggthemes::theme_solarized(light = TRUE)
p3
library(openxlsx)
# https://cran.r-project.org/web/packages/openxlsx/vignettes/Introduction.pdf
# Creating a workbook ----
wb_Results <- createWorkbook()
sheet_entry <- addWorksheet(wb_Results, sheetName = "Entry")
sheet_change <- addWorksheet(wb_Results, sheetName = "Change")
sheet_exit <- addWorksheet(wb_Results, sheetName = "Exit")
sheet_entry_hist <- addWorksheet(wb_Results, sheetName = "Entry hist")
sheet_change_hist <- addWorksheet(wb_Results, sheetName = "Change hist")
sheet_exit_hist <- addWorksheet(wb_Results, sheetName = "Exit hist")
print(p1)
insertPlot(wb = wb_Results, sheet = sheet_entry_hist, startRow = 4,startCol = 3, width = 6, height = 5)
writeData(wb = wb_Results, x = as.data.frame(res[[1]]), sheet = sheet_entry, rowNames=FALSE)
print(p2)
insertPlot(wb = wb_Results, sheet = sheet_change_hist, startRow = 4,startCol = 3, width = 6, height = 5)
writeData(wb = wb_Results, x = as.data.frame(res[[2]]), sheet = sheet_change, rowNames=FALSE)
print(p3)
insertPlot(wb = wb_Results, sheet = sheet_exit_hist, startRow = 4,startCol = 3, width = 6, height = 5)
writeData(wb = wb_Results, x = as.data.frame(res[[3]]), sheet = sheet_exit, rowNames=FALSE)
saveWorkbook(wb_Results, file = "Fund_Recommendation_AI_Targeting_1_.xlsx", overwrite = TRUE)
rm(sheet_entry, sheet_entry_hist, sheet_change, sheet_change_hist, sheet_exit,sheet_exit_hist, wb_Results)
|
## # dada2 processing of 096 pilot study
## following tutorial at
## https://benjjneb.github.io/dada2/tutorial.html
## Two changes had to happen from last time
## https://benjjneb.github.io/dada2/faq.html#can-i-use-dada2-with-my-454-or-ion-torrent-data
## filterAndTrim(..., maxLen=XXX) # XXX depends on the chemistry # lets set to 500, well bring trunclen down to 350.
## https://github.com/benjjneb/dada2/issues/275
## dada(..., HOMOPOLYMER_GAP_PENALTY=-1, BAND_SIZE=32)
library('dada2'); packageVersion("dada2")
library('ggplot2'); packageVersion('ggplot2')
setwd('..')
path <- "for_dada2"
fnFs00 <- list.files(path)
fnFs0 <- fnFs00[grep("fastq", list.files(path))]
sample.names <- sapply(fnFs0, function(file) gsub('\\.fasta',"", file))
fnFs <- file.path(path, fnFs0)
filt_path <- file.path(path, "filtered")
filtFs <- file.path(filt_path, paste0(sample.names, "_F_filt.fastq.gz"))
print('filter and trim sequences')
out <- filterAndTrim(fnFs, filtFs, trimLeft = 22, truncLen=300, maxLen=500, maxN=0, maxEE=2, truncQ=2, rm.phix=TRUE,
compress=TRUE, multithread=TRUE)
print('learing errors')
ptm = proc.time()
errF <- learnErrors(filtFs, multithread=TRUE)
proc.time() - ptm
save(errF, file = 'proc096/errF-Mar2018.Rdata')
pdf('figures/errF-Mar2018.pdf') # nreads shoudn't matter, there are only 306339 of them
plotErrors(errF, nominalQ=TRUE)
dev.off()
print('dereplicating sequences')
derepFs <- derepFastq(filtFs, verbose=TRUE)
names(derepFs) <- sample.names
print('run dada function')
ptm <- proc.time()
dadaFs <- dada(derepFs, err=errF, multithread=TRUE, HOMOPOLYMER_GAP_PENALTY=-1, BAND_SIZE=32, pool = TRUE)
proc.time() - ptm
seqtab <- makeSequenceTable(dadaFs)
print('remove chimeras')
ptm <- proc.time()
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=TRUE, verbose=TRUE)
proc.time() - ptm # 3 seconds
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(dadaFs, getN), rowSums(seqtab), rowSums(seqtab.nochim))
colnames(track) <- c("input", "filtered", "denoised", "tabled", "nonchim")
rownames(track) <- sample.names
#track[10:20,]
track
write.csv(track, 'proc096/trackMar2018.csv')
write.csv(seqtab.nochim, "data1/seqtab.nochimMar2018.csv")
|
/scripts/dada2work-March2018Run.R
|
permissive
|
cramjaco/Nyvac_096_Microbiome
|
R
| false
| false
| 2,229
|
r
|
## # dada2 processing of 096 pilot study
## following tutorial at
## https://benjjneb.github.io/dada2/tutorial.html
## Two changes had to happen from last time
## https://benjjneb.github.io/dada2/faq.html#can-i-use-dada2-with-my-454-or-ion-torrent-data
## filterAndTrim(..., maxLen=XXX) # XXX depends on the chemistry # lets set to 500, well bring trunclen down to 350.
## https://github.com/benjjneb/dada2/issues/275
## dada(..., HOMOPOLYMER_GAP_PENALTY=-1, BAND_SIZE=32)
library('dada2'); packageVersion("dada2")
library('ggplot2'); packageVersion('ggplot2')
setwd('..')
path <- "for_dada2"
fnFs00 <- list.files(path)
fnFs0 <- fnFs00[grep("fastq", list.files(path))]
sample.names <- sapply(fnFs0, function(file) gsub('\\.fasta',"", file))
fnFs <- file.path(path, fnFs0)
filt_path <- file.path(path, "filtered")
filtFs <- file.path(filt_path, paste0(sample.names, "_F_filt.fastq.gz"))
print('filter and trim sequences')
out <- filterAndTrim(fnFs, filtFs, trimLeft = 22, truncLen=300, maxLen=500, maxN=0, maxEE=2, truncQ=2, rm.phix=TRUE,
compress=TRUE, multithread=TRUE)
print('learing errors')
ptm = proc.time()
errF <- learnErrors(filtFs, multithread=TRUE)
proc.time() - ptm
save(errF, file = 'proc096/errF-Mar2018.Rdata')
pdf('figures/errF-Mar2018.pdf') # nreads shoudn't matter, there are only 306339 of them
plotErrors(errF, nominalQ=TRUE)
dev.off()
print('dereplicating sequences')
derepFs <- derepFastq(filtFs, verbose=TRUE)
names(derepFs) <- sample.names
print('run dada function')
ptm <- proc.time()
dadaFs <- dada(derepFs, err=errF, multithread=TRUE, HOMOPOLYMER_GAP_PENALTY=-1, BAND_SIZE=32, pool = TRUE)
proc.time() - ptm
seqtab <- makeSequenceTable(dadaFs)
print('remove chimeras')
ptm <- proc.time()
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=TRUE, verbose=TRUE)
proc.time() - ptm # 3 seconds
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(dadaFs, getN), rowSums(seqtab), rowSums(seqtab.nochim))
colnames(track) <- c("input", "filtered", "denoised", "tabled", "nonchim")
rownames(track) <- sample.names
#track[10:20,]
track
write.csv(track, 'proc096/trackMar2018.csv')
write.csv(seqtab.nochim, "data1/seqtab.nochimMar2018.csv")
|
####################################################################
#' Compare Variables with their Distributions
#'
#' Compare the distribution of a target variable vs another variable. This
#' function automatically splits into quantiles for numerical variables.
#' Custom and tidyverse friendly.
#'
#' @param data Dataframe
#' @param ... Variables. Main (target variable) and secondary (values
#' variable) to group by
#' @param type Integer. 1 for both plots, 2 for counter plot only, 3 por
#' percentages plot only.
#' @param top Integer. Filter and plot the most n frequent for categorical values
#' @param breaks Integer. Number of splits for numerical values
#' @param na.rm Boolean. Ignore NAs if needed
#' @param force Character. Force class on the values data. Choose between 'none',
#' 'character', 'numeric', 'date'
#' @param trim Integer. Trim words until the nth character for categorical values
#' (applies for both, target and values)
#' @param clean Boolean. Use lares::cleanText for categorical values (applies
#' for both, target and values)
#' @param abc Boolean. Do you wish to sort by alphabetical order?
#' @param custom_colours Boolean. Use custom colours function?
#' @param results Boolean. Return results data.frame?
#' @param save Boolean. Save the output plot in our working directory
#' @param subdir Character. Into which subdirectory do you wish to save the plot to?
#' @export
distr <- function(data, ...,
type = 1,
top = 10,
breaks = 10,
na.rm = FALSE,
force = "none",
trim = 0,
clean = FALSE,
abc = FALSE,
custom_colours = FALSE,
results = FALSE,
save = FALSE,
subdir = NA) {
options(scipen=999)
vars <- quos(...)
# Validate if we can continue with given data:
if (length(vars) > 2) {
stop("Please, select only one or two variables to continue...")
}
# Functions
force_class <- function(value, class = "none") {
if (class != "none") {
if (grepl("char|fact", class) & is.numeric(value)) {
value <- as.character(value)
}
if (grepl("num|int", class) & !is.numeric(value)) {
value <- as.numeric(value)
}
if (grepl("dat|day|time", class)) {
value <- gsub(" .*", "", as.character(value))
value <- lubridate::date(value)
}
}
return(value)
}
fxtrim <- function(value, trim, targets = NA) {
if (trim > 0) {
if (!is.numeric(value)) {
value <- substr(value, 1, trim)
}
if (!is.numeric(targets) & !is.na(targets)) {
targets <- substr(targets, 1, trim)
}
message(paste("Chopping everything to", trim, "characters..."))
}
return(value)
}
fxclean <- function(value, clean = FALSE, targets = NA) {
if (clean == TRUE) {
if (!is.numeric(value)) {
value <- cleanText(value, spaces = F)
}
if (!is.numeric(targets) & !is.na(targets)) {
targets <- cleanText(targets, spaces = F)
}
}
return(value)
}
fxna_rm <- function(df, na.rm = FALSE){
if (na.rm == TRUE) {
df <- df[complete.cases(df), ]
}
return(df)
}
# When we only have one variable
if (length(vars) == 1) {
value <- data %>% select(!!!vars[[1]])
variable_name <- colnames(value)
value <- value[,1] # do.call("c", value)
value <- force_class(value, force)
value <- fxtrim(value, trim)
value <- fxclean(value, clean)
df <- data.frame(value = value, dummy = 0)
df <- fxna_rm(df, na.rm)
is.Date <- function(x) inherits(x, "Date")
is.POSIXct <- function(x) inherits(x, "POSIXct")
is.POSIXlt <- function(x) inherits(x, "POSIXlt")
if (is.numeric(value) | is.Date(value) | is.POSIXct(value) | is.POSIXlt(value)) {
# Continuous and date values
if (!is.numeric(value)) {
p <- ggplot(df, aes(x = value))
} else {
p <- ggplot(df, aes(x = date(value)))
}
p <- p + theme_minimal() +
geom_density(fill = "deepskyblue", alpha = 0.7, adjust = 1/3) +
labs(y = "", x = "", fill = "Density",
title = paste("Density Distribution"),
subtitle = paste("Variable:", variable_name),
caption = paste("Obs:", formatNum(nrow(df), 0)))
print(p)
} else {
# Discrete values
df %>% freqs(value, plot = T, results = F,
variable_name = variable_name,
abc = abc, top = top)
}
# Return table with results?
if (results == TRUE) {
output <- df %>% freqs(value, top = top)
return(output)
}
}
# When we only have 2 variables
if (length(vars) == 2) {
targets <- data %>% select(!!!vars[[1]])
targets_name <- colnames(targets)
targets <- targets[,1]
value <- data %>% select(!!!vars[[2]])
variable_name <- colnames(value)
# Transformations
value <- value[,1] # do.call("c", value)
value <- force_class(value, force)
value <- fxtrim(value, trim)
value <- fxclean(value, clean)
if (length(targets) != length(value)) {
message("The targets and value vectors should be the same length.")
stop(message(paste("Currently, targets has", length(targets),
"rows and value has", length(value))))
}
# For num-num distributions or too many unique target variables
if (length(unique(targets)) >= 8) {
if (is.numeric(targets) & is.numeric(value)) {
df <- data.frame(x = targets, y = value)
df <- fxna_rm(df, na.rm = TRUE)
p <- ggplot(df, aes(x = x, y = y)) +
stat_density_2d(aes(fill = ..level..), geom = "polygon") +
theme_minimal() +
labs(title = "2D Distribution Plot",
x = targets_name, y = variable_name,
subtitle = paste("For", variable_name, "vs.", targets_name),
caption = paste("Obs:", nrow(df))) +
scale_x_continuous(labels = scales::comma) +
scale_y_continuous(labels = scales::comma)
return(p)
}
stop("You should use a 'target' variable with max 8 different values.")
}
# Only n numeric values, really numeric?
if (is.numeric(value) & length(unique(value)) <= 8) {
value <- force_class(value, class = "char")
}
# Turn numeric variables into quantiles
if (is.numeric(value)) {
breaks <- ifelse(top != 10, top, breaks)
value <- quants(value, breaks, return = "labels")
cuts <- length(unique(value[!is.na(value)]))
if (cuts != breaks) {
message(paste("When dividing", variable_name, "into", breaks, "quantiles,",
cuts, "cuts/groups are possible."))
}
top <- top + 1
}
# Caption for plots
caption <- paste0("Variables: ", targets_name, " vs. ", variable_name,
". Obs: ", formatNum(nrow(df), 0))
# Finally, we have our data.frame
df <- data.frame(targets = targets, value = value)
df <- fxna_rm(df, na.rm)
freqs <- df %>%
group_by(value, targets) %>%
tally() %>% arrange(desc(n)) %>%
mutate(p = round(100*n/sum(n),2)) %>% ungroup() %>%
mutate(row = row_number(),
order = ifelse(grepl("\\(|\\)", value),
as.numeric(as.character(substr(gsub(",.*", "", value), 2, 100))), row))
if(length(unique(value)) > top & !is.numeric(value)) {
message(paste("Filtering the", top, "most frequent values. Use `top` to overrule."))
which <- df %>% group_by(value) %>% tally() %>% arrange(desc(n)) %>% slice(1:top)
freqs <- freqs %>%
mutate(value = ifelse(value %in% which$value, as.character(value), "OTHERS")) %>%
group_by(value, targets) %>% select(-row, -order) %>%
summarise(n = sum(n)) %>%
mutate(p = round(100*n/sum(n),2)) %>%
ungroup() %>% arrange(desc(n)) %>%
mutate(row = row_number(),
order = row_number())
}
# Sort values alphabetically or ascending if numeric
if (abc == TRUE) {
freqs <- freqs %>% mutate(order = rank(value))
}
# Counter plot
if(type %in% c(1,2)) {
count <- ggplot(freqs, aes(
x = reorder(as.character(value), order), y=n,
fill = tolower(as.character(targets)),
label = n, ymax = max(n) * 1.1)) +
geom_col(position = "dodge") +
geom_text(colour = "black",
check_overlap = TRUE,
position = position_dodge(0.9),
size=3, vjust = -0.15) +
labs(x = "", y = "Counter", fill = targets_name, caption = caption) +
theme_minimal() + theme(legend.position = "top") + guides(colour = FALSE) +
theme(axis.title.y = element_text(size = rel(0.8), angle = 90))
# Give an angle to labels when more than...
if (length(unique(value)) >= 7) {
count <- count + theme(axis.text.x = element_text(angle = 45, hjust=1))
}
# Custom colours if wanted...
if (custom_colours == TRUE) {
count <- count + gg_fill_customs()
} else {
count <- count + scale_fill_brewer(palette = "Blues")
}
}
# Proportions (%) plot
if (type %in% c(1,3)) {
prop <- freqs %>%
group_by(value) %>%
mutate(size = sum(n)/sum(freqs$n)) %>%
ggplot(aes(x = reorder(value, -order),
y = as.numeric(p/100),
fill = tolower(as.character(targets)),
label = p)) +
geom_col(position = "fill") +
geom_text(aes(size = size,
colour = ifelse(custom_colours, tolower(as.character(targets)), "none")),
check_overlap = TRUE,
position = position_stack(vjust = 0.5)) +
scale_size(range = c(1.8, 3.5)) +
theme_minimal() + coord_flip() +
labs(x = "Proportions", y = "", fill = targets_name, caption = caption) +
theme(legend.position = "top") + ylim(0, 1) + guides(colour = FALSE, size = FALSE) +
theme(axis.title.y = element_text(size = rel(0.8), angle = 90)) +
gg_text_customs()
# Show limit caption when more values than top
if (length(unique(value)) > top) {
count <- count + labs(caption = paste("Showing the", top, "most frequent values"))
}
# Show a reference line if levels = 2; quite useful when data is unbalanced (not 50/50)
if (length(unique(targets)) == 2) {
distr <- df %>% freqs(targets)
h <- signif(100 - distr$pcum[1], 3)
prop <- prop +
geom_hline(yintercept = h/100, colour = "purple",
linetype = "dotted", alpha = 0.8) +
geom_label(aes(0, h/100, label = h, vjust = -0.05),
size = 2.3, fill="white", alpha = 0.8)
}
# Custom colours if wanted...
if (custom_colours == TRUE) {
prop <- prop + gg_fill_customs()
} else {
prop <- prop + scale_fill_brewer(palette = "Blues")
}
}
# Export file name and folder
if (save == TRUE) {
file_name <- paste0(
"viz_distr_",
cleanText(targets_name), ".vs.",
cleanText(variable_name),
case_when(type == 2 ~ "_c", type == 3 ~ "_p", TRUE ~ ""),".png")
if (!is.na(subdir)) {
options(warn=-1)
dir.create(file.path(getwd(), subdir), recursive = T)
file_name <- paste(subdir, file_name, sep="/")
}
}
# Plot the results and save if needed
if (type == 1) {
prop <- prop + guides(fill=FALSE)
count <- count + labs(caption = "")
if (save == TRUE) {
png(file_name, height = 1000, width = 1300, res = 200)
gridExtra::grid.arrange(count, prop, ncol = 1, nrow = 2)
dev.off()
}
invisible(gridExtra::grid.arrange(count, prop, ncol = 1, nrow = 2))
}
if (type == 2) {
if (save == TRUE) {
count <- count +
ggsave(file_name, width = 8, height = 6)
}
plot(count)
}
if (type == 3) {
if (save == TRUE) {
prop <- prop +
ggsave(file_name, width = 8, height = 6)
}
plot(prop)
}
# Return table with results?
if (results == TRUE) {
table <- freqs %>% select(-order)
return(table)
}
}
}
|
/R/distribution.R
|
no_license
|
Transconnectome/lares
|
R
| false
| false
| 12,531
|
r
|
####################################################################
#' Compare Variables with their Distributions
#'
#' Compare the distribution of a target variable vs another variable. This
#' function automatically splits into quantiles for numerical variables.
#' Custom and tidyverse friendly.
#'
#' @param data Dataframe
#' @param ... Variables. Main (target variable) and secondary (values
#' variable) to group by
#' @param type Integer. 1 for both plots, 2 for counter plot only, 3 por
#' percentages plot only.
#' @param top Integer. Filter and plot the most n frequent for categorical values
#' @param breaks Integer. Number of splits for numerical values
#' @param na.rm Boolean. Ignore NAs if needed
#' @param force Character. Force class on the values data. Choose between 'none',
#' 'character', 'numeric', 'date'
#' @param trim Integer. Trim words until the nth character for categorical values
#' (applies for both, target and values)
#' @param clean Boolean. Use lares::cleanText for categorical values (applies
#' for both, target and values)
#' @param abc Boolean. Do you wish to sort by alphabetical order?
#' @param custom_colours Boolean. Use custom colours function?
#' @param results Boolean. Return results data.frame?
#' @param save Boolean. Save the output plot in our working directory
#' @param subdir Character. Into which subdirectory do you wish to save the plot to?
#' @export
distr <- function(data, ...,
type = 1,
top = 10,
breaks = 10,
na.rm = FALSE,
force = "none",
trim = 0,
clean = FALSE,
abc = FALSE,
custom_colours = FALSE,
results = FALSE,
save = FALSE,
subdir = NA) {
options(scipen=999)
vars <- quos(...)
# Validate if we can continue with given data:
if (length(vars) > 2) {
stop("Please, select only one or two variables to continue...")
}
# Functions
force_class <- function(value, class = "none") {
if (class != "none") {
if (grepl("char|fact", class) & is.numeric(value)) {
value <- as.character(value)
}
if (grepl("num|int", class) & !is.numeric(value)) {
value <- as.numeric(value)
}
if (grepl("dat|day|time", class)) {
value <- gsub(" .*", "", as.character(value))
value <- lubridate::date(value)
}
}
return(value)
}
fxtrim <- function(value, trim, targets = NA) {
if (trim > 0) {
if (!is.numeric(value)) {
value <- substr(value, 1, trim)
}
if (!is.numeric(targets) & !is.na(targets)) {
targets <- substr(targets, 1, trim)
}
message(paste("Chopping everything to", trim, "characters..."))
}
return(value)
}
fxclean <- function(value, clean = FALSE, targets = NA) {
if (clean == TRUE) {
if (!is.numeric(value)) {
value <- cleanText(value, spaces = F)
}
if (!is.numeric(targets) & !is.na(targets)) {
targets <- cleanText(targets, spaces = F)
}
}
return(value)
}
fxna_rm <- function(df, na.rm = FALSE){
if (na.rm == TRUE) {
df <- df[complete.cases(df), ]
}
return(df)
}
# When we only have one variable
if (length(vars) == 1) {
value <- data %>% select(!!!vars[[1]])
variable_name <- colnames(value)
value <- value[,1] # do.call("c", value)
value <- force_class(value, force)
value <- fxtrim(value, trim)
value <- fxclean(value, clean)
df <- data.frame(value = value, dummy = 0)
df <- fxna_rm(df, na.rm)
is.Date <- function(x) inherits(x, "Date")
is.POSIXct <- function(x) inherits(x, "POSIXct")
is.POSIXlt <- function(x) inherits(x, "POSIXlt")
if (is.numeric(value) | is.Date(value) | is.POSIXct(value) | is.POSIXlt(value)) {
# Continuous and date values
if (!is.numeric(value)) {
p <- ggplot(df, aes(x = value))
} else {
p <- ggplot(df, aes(x = date(value)))
}
p <- p + theme_minimal() +
geom_density(fill = "deepskyblue", alpha = 0.7, adjust = 1/3) +
labs(y = "", x = "", fill = "Density",
title = paste("Density Distribution"),
subtitle = paste("Variable:", variable_name),
caption = paste("Obs:", formatNum(nrow(df), 0)))
print(p)
} else {
# Discrete values
df %>% freqs(value, plot = T, results = F,
variable_name = variable_name,
abc = abc, top = top)
}
# Return table with results?
if (results == TRUE) {
output <- df %>% freqs(value, top = top)
return(output)
}
}
# When we only have 2 variables
if (length(vars) == 2) {
targets <- data %>% select(!!!vars[[1]])
targets_name <- colnames(targets)
targets <- targets[,1]
value <- data %>% select(!!!vars[[2]])
variable_name <- colnames(value)
# Transformations
value <- value[,1] # do.call("c", value)
value <- force_class(value, force)
value <- fxtrim(value, trim)
value <- fxclean(value, clean)
if (length(targets) != length(value)) {
message("The targets and value vectors should be the same length.")
stop(message(paste("Currently, targets has", length(targets),
"rows and value has", length(value))))
}
# For num-num distributions or too many unique target variables
if (length(unique(targets)) >= 8) {
if (is.numeric(targets) & is.numeric(value)) {
df <- data.frame(x = targets, y = value)
df <- fxna_rm(df, na.rm = TRUE)
p <- ggplot(df, aes(x = x, y = y)) +
stat_density_2d(aes(fill = ..level..), geom = "polygon") +
theme_minimal() +
labs(title = "2D Distribution Plot",
x = targets_name, y = variable_name,
subtitle = paste("For", variable_name, "vs.", targets_name),
caption = paste("Obs:", nrow(df))) +
scale_x_continuous(labels = scales::comma) +
scale_y_continuous(labels = scales::comma)
return(p)
}
stop("You should use a 'target' variable with max 8 different values.")
}
# Only n numeric values, really numeric?
if (is.numeric(value) & length(unique(value)) <= 8) {
value <- force_class(value, class = "char")
}
# Turn numeric variables into quantiles
if (is.numeric(value)) {
breaks <- ifelse(top != 10, top, breaks)
value <- quants(value, breaks, return = "labels")
cuts <- length(unique(value[!is.na(value)]))
if (cuts != breaks) {
message(paste("When dividing", variable_name, "into", breaks, "quantiles,",
cuts, "cuts/groups are possible."))
}
top <- top + 1
}
# Caption for plots
caption <- paste0("Variables: ", targets_name, " vs. ", variable_name,
". Obs: ", formatNum(nrow(df), 0))
# Finally, we have our data.frame
df <- data.frame(targets = targets, value = value)
df <- fxna_rm(df, na.rm)
freqs <- df %>%
group_by(value, targets) %>%
tally() %>% arrange(desc(n)) %>%
mutate(p = round(100*n/sum(n),2)) %>% ungroup() %>%
mutate(row = row_number(),
order = ifelse(grepl("\\(|\\)", value),
as.numeric(as.character(substr(gsub(",.*", "", value), 2, 100))), row))
if(length(unique(value)) > top & !is.numeric(value)) {
message(paste("Filtering the", top, "most frequent values. Use `top` to overrule."))
which <- df %>% group_by(value) %>% tally() %>% arrange(desc(n)) %>% slice(1:top)
freqs <- freqs %>%
mutate(value = ifelse(value %in% which$value, as.character(value), "OTHERS")) %>%
group_by(value, targets) %>% select(-row, -order) %>%
summarise(n = sum(n)) %>%
mutate(p = round(100*n/sum(n),2)) %>%
ungroup() %>% arrange(desc(n)) %>%
mutate(row = row_number(),
order = row_number())
}
# Sort values alphabetically or ascending if numeric
if (abc == TRUE) {
freqs <- freqs %>% mutate(order = rank(value))
}
# Counter plot
if(type %in% c(1,2)) {
count <- ggplot(freqs, aes(
x = reorder(as.character(value), order), y=n,
fill = tolower(as.character(targets)),
label = n, ymax = max(n) * 1.1)) +
geom_col(position = "dodge") +
geom_text(colour = "black",
check_overlap = TRUE,
position = position_dodge(0.9),
size=3, vjust = -0.15) +
labs(x = "", y = "Counter", fill = targets_name, caption = caption) +
theme_minimal() + theme(legend.position = "top") + guides(colour = FALSE) +
theme(axis.title.y = element_text(size = rel(0.8), angle = 90))
# Give an angle to labels when more than...
if (length(unique(value)) >= 7) {
count <- count + theme(axis.text.x = element_text(angle = 45, hjust=1))
}
# Custom colours if wanted...
if (custom_colours == TRUE) {
count <- count + gg_fill_customs()
} else {
count <- count + scale_fill_brewer(palette = "Blues")
}
}
# Proportions (%) plot
if (type %in% c(1,3)) {
prop <- freqs %>%
group_by(value) %>%
mutate(size = sum(n)/sum(freqs$n)) %>%
ggplot(aes(x = reorder(value, -order),
y = as.numeric(p/100),
fill = tolower(as.character(targets)),
label = p)) +
geom_col(position = "fill") +
geom_text(aes(size = size,
colour = ifelse(custom_colours, tolower(as.character(targets)), "none")),
check_overlap = TRUE,
position = position_stack(vjust = 0.5)) +
scale_size(range = c(1.8, 3.5)) +
theme_minimal() + coord_flip() +
labs(x = "Proportions", y = "", fill = targets_name, caption = caption) +
theme(legend.position = "top") + ylim(0, 1) + guides(colour = FALSE, size = FALSE) +
theme(axis.title.y = element_text(size = rel(0.8), angle = 90)) +
gg_text_customs()
# Show limit caption when more values than top
if (length(unique(value)) > top) {
count <- count + labs(caption = paste("Showing the", top, "most frequent values"))
}
# Show a reference line if levels = 2; quite useful when data is unbalanced (not 50/50)
if (length(unique(targets)) == 2) {
distr <- df %>% freqs(targets)
h <- signif(100 - distr$pcum[1], 3)
prop <- prop +
geom_hline(yintercept = h/100, colour = "purple",
linetype = "dotted", alpha = 0.8) +
geom_label(aes(0, h/100, label = h, vjust = -0.05),
size = 2.3, fill="white", alpha = 0.8)
}
# Custom colours if wanted...
if (custom_colours == TRUE) {
prop <- prop + gg_fill_customs()
} else {
prop <- prop + scale_fill_brewer(palette = "Blues")
}
}
# Export file name and folder
if (save == TRUE) {
file_name <- paste0(
"viz_distr_",
cleanText(targets_name), ".vs.",
cleanText(variable_name),
case_when(type == 2 ~ "_c", type == 3 ~ "_p", TRUE ~ ""),".png")
if (!is.na(subdir)) {
options(warn=-1)
dir.create(file.path(getwd(), subdir), recursive = T)
file_name <- paste(subdir, file_name, sep="/")
}
}
# Plot the results and save if needed
if (type == 1) {
prop <- prop + guides(fill=FALSE)
count <- count + labs(caption = "")
if (save == TRUE) {
png(file_name, height = 1000, width = 1300, res = 200)
gridExtra::grid.arrange(count, prop, ncol = 1, nrow = 2)
dev.off()
}
invisible(gridExtra::grid.arrange(count, prop, ncol = 1, nrow = 2))
}
if (type == 2) {
if (save == TRUE) {
count <- count +
ggsave(file_name, width = 8, height = 6)
}
plot(count)
}
if (type == 3) {
if (save == TRUE) {
prop <- prop +
ggsave(file_name, width = 8, height = 6)
}
plot(prop)
}
# Return table with results?
if (results == TRUE) {
table <- freqs %>% select(-order)
return(table)
}
}
}
|
##################################################
# Summarize analyses of CNBSS-I, and CNBSS-II
##################################################
##################################################
# CNBSS-I data:
##################################################
cnbss40 <- matrix(c(98, 19, 25214,
39, 16, 22424,
44, 8, 22066,
52, 10, 21839,
26, NA , 14146),
byrow=TRUE,
ncol=3)
## from Baines 2016 Table 2A; years of FU: 1,2,3,4,5, 10, 15, 20
FU.40=c(9,59,112,168,221) # cumulative numbers of observed cases
a<-FU.40[2:5]-FU.40[1] # cumulative starting in year two of follow up
a<-round(a*14111/24742) # rescale back to the number of people who remained in screening until 5th scree
FU.40[2:5]<-FU.40[1]+a # cumulative numbers adjusted
cnbss40[5,2]<-FU.40[ind.FU] # complete the matrix
if(ind.FU>1){
FU.40 <- c(FU.40[1],FU.40[2:ind.FU]-FU.40[1:(ind.FU-1)]) # annual rates (instead of cumulative numbers)
} else {
FU.40 <- c(FU.40[1])
}
##################################################
# CNBSS-II data:
##################################################
cnbss50 <- matrix(c(142, 15, 19711,
66, 10, 17669,
43, 9, 17347,
54, 9, 17193,
28, NA, 9876),
byrow=TRUE,
ncol=3)
## from Baines 2016 Table 2B; years of FU: 1,2,3,4,5, 10, 15, 20
FU.50=c(5, 48, 78, 123, 181) # observed numbers
a<-FU.50[2:5]-FU.50[1] # cumulative starting in year two of follow up
a<-round(a*9843/19111) # rescale back to the number of people who remained in screening until 5th screen
FU.50[2:5]<-FU.50[1]+a # cumulative numbers adjusted
cnbss50[5,2]<-FU.50[ind.FU] # complete the matrix
if(ind.FU>1){
FU.50 <- c(FU.50[1],FU.50[2:ind.FU]-FU.50[1:(ind.FU-1)]) # annual rates (instead of cumulative numbers)
} else {
FU.50 <- c(FU.50[1])
}
|
/trials_FU.R
|
no_license
|
roman-gulati/FH_Duke
|
R
| false
| false
| 1,973
|
r
|
##################################################
# Summarize analyses of CNBSS-I, and CNBSS-II
##################################################
##################################################
# CNBSS-I data:
##################################################
cnbss40 <- matrix(c(98, 19, 25214,
39, 16, 22424,
44, 8, 22066,
52, 10, 21839,
26, NA , 14146),
byrow=TRUE,
ncol=3)
## from Baines 2016 Table 2A; years of FU: 1,2,3,4,5, 10, 15, 20
FU.40=c(9,59,112,168,221) # cumulative numbers of observed cases
a<-FU.40[2:5]-FU.40[1] # cumulative starting in year two of follow up
a<-round(a*14111/24742) # rescale back to the number of people who remained in screening until 5th scree
FU.40[2:5]<-FU.40[1]+a # cumulative numbers adjusted
cnbss40[5,2]<-FU.40[ind.FU] # complete the matrix
if(ind.FU>1){
FU.40 <- c(FU.40[1],FU.40[2:ind.FU]-FU.40[1:(ind.FU-1)]) # annual rates (instead of cumulative numbers)
} else {
FU.40 <- c(FU.40[1])
}
##################################################
# CNBSS-II data:
##################################################
cnbss50 <- matrix(c(142, 15, 19711,
66, 10, 17669,
43, 9, 17347,
54, 9, 17193,
28, NA, 9876),
byrow=TRUE,
ncol=3)
## from Baines 2016 Table 2B; years of FU: 1,2,3,4,5, 10, 15, 20
FU.50=c(5, 48, 78, 123, 181) # observed numbers
a<-FU.50[2:5]-FU.50[1] # cumulative starting in year two of follow up
a<-round(a*9843/19111) # rescale back to the number of people who remained in screening until 5th screen
FU.50[2:5]<-FU.50[1]+a # cumulative numbers adjusted
cnbss50[5,2]<-FU.50[ind.FU] # complete the matrix
if(ind.FU>1){
FU.50 <- c(FU.50[1],FU.50[2:ind.FU]-FU.50[1:(ind.FU-1)]) # annual rates (instead of cumulative numbers)
} else {
FU.50 <- c(FU.50[1])
}
|
# Copyright 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @export
generics::glance
.glance_tmbfit <- function(x, nobs) {
dist <- .dist_tmbfit(x)
npars <- npars(x)
log_lik <- logLik(x)
aic <- 2 * npars - 2 * log_lik
aicc <- aic + 2 * npars * (npars + 1) / (nobs - npars - 1)
tibble(
dist = dist,
npars = npars,
nobs = nobs,
log_lik = log_lik,
aic = aic,
aicc = aicc
)
}
#' Get a tibble summarizing each distribution
#'
#' Gets a tibble with a single row for each distribution.
#'
#' @inheritParams params
#' @return A tidy tibble of the distributions.
#' @family generics
#' @seealso [`ssd_gof()`]
#' @export
#' @examples
#' fits <- ssd_fit_dists(ssddata::ccme_boron)
#' glance(fits)
glance.fitdists <- function(x, ...) {
chk_unused(...)
nobs <- nobs(x)
tbl <- lapply(x, .glance_tmbfit, nobs = nobs)
tbl <- bind_rows(tbl)
tbl$delta <- tbl$aicc - min(tbl$aicc)
if(is.na(tbl$delta[1]) && all(tbl$npars == tbl$npars[1])) {
tbl$delta <- tbl$aic - min(tbl$aic)
}
tbl$weight <- exp(-tbl$delta / 2) / sum(exp(-tbl$delta / 2))
tbl
}
|
/R/glance.R
|
permissive
|
beckyfisher/ssdtools
|
R
| false
| false
| 1,659
|
r
|
# Copyright 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @export
generics::glance
.glance_tmbfit <- function(x, nobs) {
dist <- .dist_tmbfit(x)
npars <- npars(x)
log_lik <- logLik(x)
aic <- 2 * npars - 2 * log_lik
aicc <- aic + 2 * npars * (npars + 1) / (nobs - npars - 1)
tibble(
dist = dist,
npars = npars,
nobs = nobs,
log_lik = log_lik,
aic = aic,
aicc = aicc
)
}
#' Get a tibble summarizing each distribution
#'
#' Gets a tibble with a single row for each distribution.
#'
#' @inheritParams params
#' @return A tidy tibble of the distributions.
#' @family generics
#' @seealso [`ssd_gof()`]
#' @export
#' @examples
#' fits <- ssd_fit_dists(ssddata::ccme_boron)
#' glance(fits)
glance.fitdists <- function(x, ...) {
chk_unused(...)
nobs <- nobs(x)
tbl <- lapply(x, .glance_tmbfit, nobs = nobs)
tbl <- bind_rows(tbl)
tbl$delta <- tbl$aicc - min(tbl$aicc)
if(is.na(tbl$delta[1]) && all(tbl$npars == tbl$npars[1])) {
tbl$delta <- tbl$aic - min(tbl$aic)
}
tbl$weight <- exp(-tbl$delta / 2) / sum(exp(-tbl$delta / 2))
tbl
}
|
##############################################################################################
##############################################################################################
##### CREATED 1/28/2018 #####
#install.packages("lqa")
###predictors correlation of 0.2
rm(list=ls())
library("lqa") # version 1.0-3
library(MASS) # version 3.3.1
library("mgcv")
require(glmnet)
#########################################################
#########################################################
DIREC="/home/tkzhou/PSPP_Project/PENCOMP_OneTimePoint/variableSelection5/case2b/"
DIRECOUT="/home/tkzhou/PSPP_Project/PENCOMP_OneTimePoint/variableSelection5/case2b/sampleSize1000"
funLoc = "Functions/"
source(paste0(DIREC, funLoc, "addFun.R"))
source(paste0(DIREC, funLoc, "formulaConstruct.R"))
source(paste0(DIREC, funLoc, "simDataAll.R"))
source(paste0(DIREC, funLoc, "variableSelectY.R"))
source(paste0(DIREC, funLoc, "variableSelectT.R"))
source(paste0(DIREC, funLoc, "variableSelectT2.R"))
source(paste0(DIREC, funLoc, "allLasso.R"))
source(paste0(DIREC, funLoc, "pencompFit.R"))
numRun=500
############IPTW, AIPTW and PENCOMP estimators###############
###standard CI
estFinal_iptw=matrix(NA, nrow=numRun, ncol=4)
estFinal_aiptw=matrix(NA, nrow=numRun, ncol=4)
estFinal_pencomp=matrix(NA, nrow=numRun, ncol=4)
###bagging estimator
estFinal_iptw_bag=matrix(NA, nrow=numRun, ncol=4)
estFinal_aiptw_bag=matrix(NA, nrow=numRun, ncol=4)
estFinal_pencomp_bag=matrix(NA, nrow=numRun, ncol=4)
###percentiles
estFinal_iptw_per=matrix(NA, nrow=numRun, ncol=4)
estFinal_aiptw_per=matrix(NA, nrow=numRun, ncol=4)
estFinal_pencomp_per=matrix(NA, nrow=numRun, ncol=4)
###Rubin's combining rule
estFinal_pencomp_rubin=matrix(NA, nrow=numRun, ncol=4)
varSelPropY=matrix(NA, nrow=numRun, ncol = 20)
varSelPropT=matrix(NA, nrow=numRun, ncol = 20)
start=1
end=100
for(d in start:end)
{
tryCatch (
{
numT=1000
sampleSize=1000
numPred=20 ##number of predictors
level="high"
simdatG=simulateDate(sampleSize=sampleSize, numPred=numPred, overlapL=level, seed.num=d, rho=0, treatEff=2)
simdat=simdatG[[1]]
varList=simdatG[[2]]
outcome.varname="Y"
treat.varname="A"
splineTerm="s(pslogit, bs=\"ps\", k=15)" ###
firstNum="treat" ###adaptive lasso on the outcome first and then propensity score model
Method="REML"
##for both propensity and prediction models
modelType="allLasso" ###seperate adaptive lasso on the propensity and prediction models
outcomeVarList0=NULL
outcomeVarList1=NULL
propenVarList=NULL
##############################################################################################################################
####################################################################################################################
# print out IPTW, AIPTW and PENCOMP estimates corresponding to smallest wAMD value
estimate.out=NULL
estimate.out=pencompAllLasso(dataOR=simdat, data=simdat, varList=varList, propenVarList=propenVarList,
outcomeVarList0=outcomeVarList0, outcomeVarList1=outcomeVarList1,
treat.varname=treat.varname, outcome.varname=outcome.varname)
if( typeof(estimate.out) == "list" ){ ###if output is list, should be right
estFinal_iptw[d,1]=estimate.out$out[1]
estFinal_aiptw[d,1]=estimate.out$out[2]
estFinal_pencomp[d,1]=estimate.out$out[3]
}
estimate.boot=matrix(NA, nrow=numT, ncol=3) ###IPTW, AIPTW and PENCOMP estimates from each bootstrap sample
pencomp.rubin=matrix(NA, nrow=numT, ncol=2) ###variance of PENCOMP
pencomp.numKnot=matrix(NA, nrow=numT, ncol=2) ###number of knots in PENCOMP
varSelectTreat=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in propensity model
varSelectY1=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in the outcome model Y0
varSelectY0=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in the outcome modely1
countMat=matrix(NA, ncol=nrow(simdat), nrow=numT)
for(ind in 1:numT){
tryCatch (
{
set.seed(ind)
bootSample = simdat[sample(1:nrow(simdat),replace=T),] ###random bootstraps
tempCount=numeric(nrow(bootSample))
for(countIndex in 1:length(tempCount)){
tempCount[countIndex] = sum(bootSample$id2==countIndex)
}
mulResult = pencompAllLasso(dataOR=simdat, data=bootSample, varList=varList, propenVarList=propenVarList,
outcomeVarList0=outcomeVarList0, outcomeVarList1=outcomeVarList1,
treat.varname=treat.varname, outcome.varname=outcome.varname )
if( typeof(mulResult) == "list" ){ ###if output is list, should be right
estimate.boot[ind,] = (mulResult$out)[c(1, 2, 3)]
pencomp.rubin[ind,] = (mulResult$out)[c(4, 5)]
varSelectTreat[ind,]=mulResult$varTreat
varSelectY1[ind,]=mulResult$varY1
varSelectY0[ind,]=mulResult$varY0
countMat[ind,]=tempCount
pencomp.numKnot[ind,]=mulResult$numK ###number of knots in PENCOMP
}
}
,
error=function(e) { }
)
}
if(d < 10){
####store bootstrap estimates
bootResult=cbind(estimate.out$out[1], estimate.out$out[2], estimate.out$out[3], estimate.boot, pencomp.rubin, pencomp.numKnot)
write.table(bootResult, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, ".txt",sep=""), row.name=F, quote=F, sep="\t",
col.names = c("iptwOR", "aiptwOR","pencompOR", "iptw", "aiptw","pencompBoot", "pencompRubin", "pencompRubinVar", "K0", "K1"))
}
####store counts of each datapoint in each bootstrap (for calculating Brad Efron's CI)
#write.table(countMat, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, "countMat.txt",sep=""), row.name=F, quote=F, sep="\t")
varSelPropY[d,]=colMeans(varSelectY0, na.rm = T)
varSelPropT[d,]=colMeans(varSelectTreat, na.rm = T)
write.table(varSelPropY, paste(DIRECOUT, "/Results/", modelType, "_", level, "_start_", start, "varSelectY.txt",sep=""), row.name=F, quote=F, col.names = varList,
sep="\t")
write.table(varSelPropT, paste(DIRECOUT, "/Results/", modelType, "_", level, "_start_", start, "varSelectTreat.txt",sep=""), row.name=F, quote=F, col.names = varList,
sep="\t")
####store coefficients of outcome model Y1
# write.table(varSelectY1, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, "varSelectY1.txt",sep=""), row.name=F, quote=F, col.names = varList,
# sep="\t")
#########standard confidence interval, Rubin's combining rule for PENCOMP
estFinal_pencomp_rubin[d,]=processPENCOMP(t(pencomp.rubin))
estFinal_iptw[d, 2:4]=c( sd(estimate.boot[,1], na.rm = T), estFinal_iptw[d,1] + c(-1, 1)*1.96*sd(estimate.boot[, 1], na.rm = T) )
estFinal_aiptw[d, 2:4]=c( sd(estimate.boot[,2], na.rm = T), estFinal_aiptw[d,1] + c(-1, 1)*1.96*sd(estimate.boot[,2], na.rm = T) )
estFinal_pencomp[d, 2:4]=c( sd(estimate.boot[,3], na.rm = T), estFinal_pencomp[d,1] + c(-1, 1)*1.96*sd(estimate.boot[,3], na.rm = T) )
##############################################
#### bagging estimator accounting for model selection
estFinal_iptw_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,1], sampleSize=sampleSize)
estFinal_aiptw_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,2], sampleSize=sampleSize)
estFinal_pencomp_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,3], sampleSize=sampleSize)
##############################################
#### confidence interval based on quantiles
estFinal_iptw_per[d,]=percentile(estimate=estimate.boot[,1])
estFinal_aiptw_per[d,]=percentile(estimate=estimate.boot[,2])
estFinal_pencomp_per[d,]=percentile(estimate=estimate.boot[,3])
resultTable=NULL
resultTable=data.frame(estFinal_pencomp, estFinal_pencomp_bag, estFinal_pencomp_per, estFinal_pencomp_rubin)
write.table(resultTable, paste(DIRECOUT, "/Results/pencomp_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t")
resultTable=NULL
resultTable=data.frame(estFinal_iptw, estFinal_iptw_bag, estFinal_iptw_per)
write.table(resultTable, paste(DIRECOUT, "/Results/iptw_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t")
resultTable=NULL
resultTable=data.frame(estFinal_aiptw, estFinal_aiptw_bag, estFinal_aiptw_per)
write.table(resultTable, paste(DIRECOUT, "/Results/aiptw_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t")
}
,
error=function(e) { }
)
}
|
/simulation/case2b/sampleSize1000/allLasso_high_start_1.R
|
no_license
|
TingtingKayla/Stats_Robust_Causal_Estimation
|
R
| false
| false
| 8,699
|
r
|
##############################################################################################
##############################################################################################
##### CREATED 1/28/2018 #####
#install.packages("lqa")
###predictors correlation of 0.2
rm(list=ls())
library("lqa") # version 1.0-3
library(MASS) # version 3.3.1
library("mgcv")
require(glmnet)
#########################################################
#########################################################
DIREC="/home/tkzhou/PSPP_Project/PENCOMP_OneTimePoint/variableSelection5/case2b/"
DIRECOUT="/home/tkzhou/PSPP_Project/PENCOMP_OneTimePoint/variableSelection5/case2b/sampleSize1000"
funLoc = "Functions/"
source(paste0(DIREC, funLoc, "addFun.R"))
source(paste0(DIREC, funLoc, "formulaConstruct.R"))
source(paste0(DIREC, funLoc, "simDataAll.R"))
source(paste0(DIREC, funLoc, "variableSelectY.R"))
source(paste0(DIREC, funLoc, "variableSelectT.R"))
source(paste0(DIREC, funLoc, "variableSelectT2.R"))
source(paste0(DIREC, funLoc, "allLasso.R"))
source(paste0(DIREC, funLoc, "pencompFit.R"))
numRun=500
############IPTW, AIPTW and PENCOMP estimators###############
###standard CI
estFinal_iptw=matrix(NA, nrow=numRun, ncol=4)
estFinal_aiptw=matrix(NA, nrow=numRun, ncol=4)
estFinal_pencomp=matrix(NA, nrow=numRun, ncol=4)
###bagging estimator
estFinal_iptw_bag=matrix(NA, nrow=numRun, ncol=4)
estFinal_aiptw_bag=matrix(NA, nrow=numRun, ncol=4)
estFinal_pencomp_bag=matrix(NA, nrow=numRun, ncol=4)
###percentiles
estFinal_iptw_per=matrix(NA, nrow=numRun, ncol=4)
estFinal_aiptw_per=matrix(NA, nrow=numRun, ncol=4)
estFinal_pencomp_per=matrix(NA, nrow=numRun, ncol=4)
###Rubin's combining rule
estFinal_pencomp_rubin=matrix(NA, nrow=numRun, ncol=4)
varSelPropY=matrix(NA, nrow=numRun, ncol = 20)
varSelPropT=matrix(NA, nrow=numRun, ncol = 20)
start=1
end=100
for(d in start:end)
{
tryCatch (
{
numT=1000
sampleSize=1000
numPred=20 ##number of predictors
level="high"
simdatG=simulateDate(sampleSize=sampleSize, numPred=numPred, overlapL=level, seed.num=d, rho=0, treatEff=2)
simdat=simdatG[[1]]
varList=simdatG[[2]]
outcome.varname="Y"
treat.varname="A"
splineTerm="s(pslogit, bs=\"ps\", k=15)" ###
firstNum="treat" ###adaptive lasso on the outcome first and then propensity score model
Method="REML"
##for both propensity and prediction models
modelType="allLasso" ###seperate adaptive lasso on the propensity and prediction models
outcomeVarList0=NULL
outcomeVarList1=NULL
propenVarList=NULL
##############################################################################################################################
####################################################################################################################
# print out IPTW, AIPTW and PENCOMP estimates corresponding to smallest wAMD value
estimate.out=NULL
estimate.out=pencompAllLasso(dataOR=simdat, data=simdat, varList=varList, propenVarList=propenVarList,
outcomeVarList0=outcomeVarList0, outcomeVarList1=outcomeVarList1,
treat.varname=treat.varname, outcome.varname=outcome.varname)
if( typeof(estimate.out) == "list" ){ ###if output is list, should be right
estFinal_iptw[d,1]=estimate.out$out[1]
estFinal_aiptw[d,1]=estimate.out$out[2]
estFinal_pencomp[d,1]=estimate.out$out[3]
}
estimate.boot=matrix(NA, nrow=numT, ncol=3) ###IPTW, AIPTW and PENCOMP estimates from each bootstrap sample
pencomp.rubin=matrix(NA, nrow=numT, ncol=2) ###variance of PENCOMP
pencomp.numKnot=matrix(NA, nrow=numT, ncol=2) ###number of knots in PENCOMP
varSelectTreat=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in propensity model
varSelectY1=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in the outcome model Y0
varSelectY0=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in the outcome modely1
countMat=matrix(NA, ncol=nrow(simdat), nrow=numT)
for(ind in 1:numT){
tryCatch (
{
set.seed(ind)
bootSample = simdat[sample(1:nrow(simdat),replace=T),] ###random bootstraps
tempCount=numeric(nrow(bootSample))
for(countIndex in 1:length(tempCount)){
tempCount[countIndex] = sum(bootSample$id2==countIndex)
}
mulResult = pencompAllLasso(dataOR=simdat, data=bootSample, varList=varList, propenVarList=propenVarList,
outcomeVarList0=outcomeVarList0, outcomeVarList1=outcomeVarList1,
treat.varname=treat.varname, outcome.varname=outcome.varname )
if( typeof(mulResult) == "list" ){ ###if output is list, should be right
estimate.boot[ind,] = (mulResult$out)[c(1, 2, 3)]
pencomp.rubin[ind,] = (mulResult$out)[c(4, 5)]
varSelectTreat[ind,]=mulResult$varTreat
varSelectY1[ind,]=mulResult$varY1
varSelectY0[ind,]=mulResult$varY0
countMat[ind,]=tempCount
pencomp.numKnot[ind,]=mulResult$numK ###number of knots in PENCOMP
}
}
,
error=function(e) { }
)
}
if(d < 10){
####store bootstrap estimates
bootResult=cbind(estimate.out$out[1], estimate.out$out[2], estimate.out$out[3], estimate.boot, pencomp.rubin, pencomp.numKnot)
write.table(bootResult, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, ".txt",sep=""), row.name=F, quote=F, sep="\t",
col.names = c("iptwOR", "aiptwOR","pencompOR", "iptw", "aiptw","pencompBoot", "pencompRubin", "pencompRubinVar", "K0", "K1"))
}
####store counts of each datapoint in each bootstrap (for calculating Brad Efron's CI)
#write.table(countMat, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, "countMat.txt",sep=""), row.name=F, quote=F, sep="\t")
varSelPropY[d,]=colMeans(varSelectY0, na.rm = T)
varSelPropT[d,]=colMeans(varSelectTreat, na.rm = T)
write.table(varSelPropY, paste(DIRECOUT, "/Results/", modelType, "_", level, "_start_", start, "varSelectY.txt",sep=""), row.name=F, quote=F, col.names = varList,
sep="\t")
write.table(varSelPropT, paste(DIRECOUT, "/Results/", modelType, "_", level, "_start_", start, "varSelectTreat.txt",sep=""), row.name=F, quote=F, col.names = varList,
sep="\t")
####store coefficients of outcome model Y1
# write.table(varSelectY1, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, "varSelectY1.txt",sep=""), row.name=F, quote=F, col.names = varList,
# sep="\t")
#########standard confidence interval, Rubin's combining rule for PENCOMP
estFinal_pencomp_rubin[d,]=processPENCOMP(t(pencomp.rubin))
estFinal_iptw[d, 2:4]=c( sd(estimate.boot[,1], na.rm = T), estFinal_iptw[d,1] + c(-1, 1)*1.96*sd(estimate.boot[, 1], na.rm = T) )
estFinal_aiptw[d, 2:4]=c( sd(estimate.boot[,2], na.rm = T), estFinal_aiptw[d,1] + c(-1, 1)*1.96*sd(estimate.boot[,2], na.rm = T) )
estFinal_pencomp[d, 2:4]=c( sd(estimate.boot[,3], na.rm = T), estFinal_pencomp[d,1] + c(-1, 1)*1.96*sd(estimate.boot[,3], na.rm = T) )
##############################################
#### bagging estimator accounting for model selection
estFinal_iptw_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,1], sampleSize=sampleSize)
estFinal_aiptw_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,2], sampleSize=sampleSize)
estFinal_pencomp_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,3], sampleSize=sampleSize)
##############################################
#### confidence interval based on quantiles
estFinal_iptw_per[d,]=percentile(estimate=estimate.boot[,1])
estFinal_aiptw_per[d,]=percentile(estimate=estimate.boot[,2])
estFinal_pencomp_per[d,]=percentile(estimate=estimate.boot[,3])
resultTable=NULL
resultTable=data.frame(estFinal_pencomp, estFinal_pencomp_bag, estFinal_pencomp_per, estFinal_pencomp_rubin)
write.table(resultTable, paste(DIRECOUT, "/Results/pencomp_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t")
resultTable=NULL
resultTable=data.frame(estFinal_iptw, estFinal_iptw_bag, estFinal_iptw_per)
write.table(resultTable, paste(DIRECOUT, "/Results/iptw_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t")
resultTable=NULL
resultTable=data.frame(estFinal_aiptw, estFinal_aiptw_bag, estFinal_aiptw_per)
write.table(resultTable, paste(DIRECOUT, "/Results/aiptw_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t")
}
,
error=function(e) { }
)
}
|
# source('src/final.models.r')
#
# Real and fake cross-tabulations by model
#
# How many replications is the simulation?
n <- 2
n <- 100
set.seed(202)
crosstabs.real.1 <- dcast(expected.posture.df, sex + task + privacy + cleanliness ~ as.numeric(expected), value.var = 'sex', fun.aggregate = length)
crosstabs.real.1$odds <- crosstabs.real.1[,'1'] / crosstabs.real.1[,'0']
crosstabs.fake.1 <- crosstabs.fake.fn(model.1, model.1.fake.participant)
for (i in 2:n) {
crosstabs.fake.1 <- rbind(crosstabs.fake.1, crosstabs.fake.fn(model.1, model.1.fake.participant))
}
crosstabs.real.2 <- dcast(posture.df, sex + task + privacy + cleanliness ~ as.numeric(posture == 'hover'), value.var = 'sex', fun.aggregate = length)
crosstabs.real.2$odds <- crosstabs.real.2[,'1'] / crosstabs.real.2[,'0']
crosstabs.fake.2 <- crosstabs.fake.fn(model.2, model.2.fake.participant)
for (i in 2:n) {
crosstabs.fake.2 <- rbind(crosstabs.fake.2, crosstabs.fake.fn(model.2, model.2.fake.participant))
}
# Plot, and relate the real to the fake
print('Plotting model 1')
pdf('graphs/simulations.1.pdf')
id.vars <- c('sex', 'task', 'privacy', 'cleanliness')
d_ply(crosstabs.fake.1, id.vars, function(crosstabs.fake.slice) {
print(nrow(crosstabs.fake.slice))
print(plot.crosstabs(crosstabs.real.1, crosstabs.fake.slice))
})
dev.off()
print('Plotting model 2')
pdf('graphs/simulations.2.pdf')
id.vars <- c('sex', 'task', 'privacy', 'cleanliness')
d_ply(crosstabs.fake.2, id.vars, function(crosstabs.fake.slice) {
print(plot.crosstabs(crosstabs.real.2, crosstabs.fake.slice))
})
dev.off()
|
/analyze/src/model.fit.r
|
no_license
|
tlevine/toilet-posture-release
|
R
| false
| false
| 1,575
|
r
|
# source('src/final.models.r')
#
# Real and fake cross-tabulations by model
#
# How many replications is the simulation?
n <- 2
n <- 100
set.seed(202)
crosstabs.real.1 <- dcast(expected.posture.df, sex + task + privacy + cleanliness ~ as.numeric(expected), value.var = 'sex', fun.aggregate = length)
crosstabs.real.1$odds <- crosstabs.real.1[,'1'] / crosstabs.real.1[,'0']
crosstabs.fake.1 <- crosstabs.fake.fn(model.1, model.1.fake.participant)
for (i in 2:n) {
crosstabs.fake.1 <- rbind(crosstabs.fake.1, crosstabs.fake.fn(model.1, model.1.fake.participant))
}
crosstabs.real.2 <- dcast(posture.df, sex + task + privacy + cleanliness ~ as.numeric(posture == 'hover'), value.var = 'sex', fun.aggregate = length)
crosstabs.real.2$odds <- crosstabs.real.2[,'1'] / crosstabs.real.2[,'0']
crosstabs.fake.2 <- crosstabs.fake.fn(model.2, model.2.fake.participant)
for (i in 2:n) {
crosstabs.fake.2 <- rbind(crosstabs.fake.2, crosstabs.fake.fn(model.2, model.2.fake.participant))
}
# Plot, and relate the real to the fake
print('Plotting model 1')
pdf('graphs/simulations.1.pdf')
id.vars <- c('sex', 'task', 'privacy', 'cleanliness')
d_ply(crosstabs.fake.1, id.vars, function(crosstabs.fake.slice) {
print(nrow(crosstabs.fake.slice))
print(plot.crosstabs(crosstabs.real.1, crosstabs.fake.slice))
})
dev.off()
print('Plotting model 2')
pdf('graphs/simulations.2.pdf')
id.vars <- c('sex', 'task', 'privacy', 'cleanliness')
d_ply(crosstabs.fake.2, id.vars, function(crosstabs.fake.slice) {
print(plot.crosstabs(crosstabs.real.2, crosstabs.fake.slice))
})
dev.off()
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## X=makeCacheMatrix() creates a reassignable matrix that is meant to hold its inverse matrix
## X$set(x) allows us to reset the stored matrix
## X$get() returns the stored matrix
## getinverse and setinverse shouldn't be used directly, use cacheSolve(X) instead
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Computes and caches the inverse of a matrix created via makeCacheMatrix
## Usage: X = makeCacheMatrix()
## cacheSolve(X) ## Computes, caches and returns the inverse of X
## cacheSolve(X) ## uses the cached value
## X.set(y) ## Resets the matrix stored in X, and clears the cache
## cacheSolve(X) ## Computes, caches and returns the inverse of the new matrix in X
## cacheSolve(X) ## uses the new cached value
## See after the code for this function if interested in an alternative way to do the caching
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
X <- x$get()
inv <- solve(X, ...)
x$setinverse(inv)
inv
}
### In my opinion, a more elegant solution exists, where cacheSolve is incorporated
### into makeCacheMatrix's getinverse. Sample code follows, commented so it does not execute.
### The idea is that getinverse simply calls an internal function called f. That function defaults to
### a function called getinverseDefault, which when executed computes the inverse of the matrix,
### and then replaces the function f with a version that just accesses this computed inverse.
### Subsequent calls to getinverse will just use this overwritten f.
### A call to X$set resets f back to this default.
###
### Only one function needed with this technique
# alternateCacheMatrix <- function(x = matrix()) {
# f <- getinverseDefault <- function() {
# inv <- solve(x)
# f <<- function() { message("cached"); inv }
# inv
# }
# set <- function(y) {
# x <<- y
# f <<- getinverseDefault
# }
# get <- function() x
# getinverse <- function() { f() }
# list(set = set, get = get,
# getinverse = getinverse)
# }
#
# y = alternateCacheMatrix(matrix(rnorm(16), 4))
# y$get()
# y$getinverse()
# y$getinverse()
# y$set(matrix(rnorm(16), 4))
# y$get()
# y$getinverse()
# y$getinverse()
|
/cachematrix.R
|
no_license
|
skiadas/ProgrammingAssignment2
|
R
| false
| false
| 2,934
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## X=makeCacheMatrix() creates a reassignable matrix that is meant to hold its inverse matrix
## X$set(x) allows us to reset the stored matrix
## X$get() returns the stored matrix
## getinverse and setinverse shouldn't be used directly, use cacheSolve(X) instead
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Computes and caches the inverse of a matrix created via makeCacheMatrix
## Usage: X = makeCacheMatrix()
## cacheSolve(X) ## Computes, caches and returns the inverse of X
## cacheSolve(X) ## uses the cached value
## X.set(y) ## Resets the matrix stored in X, and clears the cache
## cacheSolve(X) ## Computes, caches and returns the inverse of the new matrix in X
## cacheSolve(X) ## uses the new cached value
## See after the code for this function if interested in an alternative way to do the caching
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
X <- x$get()
inv <- solve(X, ...)
x$setinverse(inv)
inv
}
### In my opinion, a more elegant solution exists, where cacheSolve is incorporated
### into makeCacheMatrix's getinverse. Sample code follows, commented so it does not execute.
### The idea is that getinverse simply calls an internal function called f. That function defaults to
### a function called getinverseDefault, which when executed computes the inverse of the matrix,
### and then replaces the function f with a version that just accesses this computed inverse.
### Subsequent calls to getinverse will just use this overwritten f.
### A call to X$set resets f back to this default.
###
### Only one function needed with this technique
# alternateCacheMatrix <- function(x = matrix()) {
# f <- getinverseDefault <- function() {
# inv <- solve(x)
# f <<- function() { message("cached"); inv }
# inv
# }
# set <- function(y) {
# x <<- y
# f <<- getinverseDefault
# }
# get <- function() x
# getinverse <- function() { f() }
# list(set = set, get = get,
# getinverse = getinverse)
# }
#
# y = alternateCacheMatrix(matrix(rnorm(16), 4))
# y$get()
# y$getinverse()
# y$getinverse()
# y$set(matrix(rnorm(16), 4))
# y$get()
# y$getinverse()
# y$getinverse()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_game.R, R/montyhall.R
\name{create_game}
\alias{create_game}
\title{Create a new Monty Hall Problem game.}
\usage{
create_game()
create_game()
}
\arguments{
\item{...}{no arguments are used by the function.}
}
\value{
The function returns a length 3 character vector
indicating the positions of goats and the car.
The function returns a length 3 character vector
indicating the positions of goats and the car.
}
\description{
create_game() generates a new game that consists of two doors
with goats behind them, and one with a car.
\code{create_game()} generates a new game that consists of two doors
with goats behind them, and one with a car.
}
\details{
The game setup replicates the game on the TV show "Let's
Make a Deal" where there are three doors for a contestant
to choose from, one of which has a car behind it and two
have goats. The contestant selects a door, then the host
opens a door to reveal a goat, and then the contestant is
given an opportunity to stay with their original selection
or switch to the other unopened door. There was a famous
debate about whether it was optimal to stay or switch when
given the option to switch, so this simulation was created
to test both strategies.
The game setup replicates the game on the TV show "Let's
Make a Deal" where there are three doors for a contestant
to choose from, one of which has a car behind it and two
have goats. The contestant selects a door, then the host
opens a door to reveal a goat, and then the contestant is
given an opportunity to stay with their original selection
or switch to the other unopened door. There was a famous
debate about whether it was optimal to stay or switch when
given the option to switch, so this simulation was created
to test both strategies.
}
\examples{
create_game()
create_game()
}
|
/man/create_game.Rd
|
no_license
|
AhmedRashwanASU/montyhallgame
|
R
| false
| true
| 1,886
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_game.R, R/montyhall.R
\name{create_game}
\alias{create_game}
\title{Create a new Monty Hall Problem game.}
\usage{
create_game()
create_game()
}
\arguments{
\item{...}{no arguments are used by the function.}
}
\value{
The function returns a length 3 character vector
indicating the positions of goats and the car.
The function returns a length 3 character vector
indicating the positions of goats and the car.
}
\description{
create_game() generates a new game that consists of two doors
with goats behind them, and one with a car.
\code{create_game()} generates a new game that consists of two doors
with goats behind them, and one with a car.
}
\details{
The game setup replicates the game on the TV show "Let's
Make a Deal" where there are three doors for a contestant
to choose from, one of which has a car behind it and two
have goats. The contestant selects a door, then the host
opens a door to reveal a goat, and then the contestant is
given an opportunity to stay with their original selection
or switch to the other unopened door. There was a famous
debate about whether it was optimal to stay or switch when
given the option to switch, so this simulation was created
to test both strategies.
The game setup replicates the game on the TV show "Let's
Make a Deal" where there are three doors for a contestant
to choose from, one of which has a car behind it and two
have goats. The contestant selects a door, then the host
opens a door to reveal a goat, and then the contestant is
given an opportunity to stay with their original selection
or switch to the other unopened door. There was a famous
debate about whether it was optimal to stay or switch when
given the option to switch, so this simulation was created
to test both strategies.
}
\examples{
create_game()
create_game()
}
|
remove(list = ls())
graphics.off()
data <- read.csv("mbp_NQR_AIR12_XTT.csv", header = TRUE)
#############
###CALCULS###
#############
data$result <- (data$pente * 60 * 10^6)/ (24200 * 43.6)
mean <- aggregate(data$result, list(data$ajout), mean)
StandErr <- function(x){
return(sd(x) / sqrt(length(x)))
}
se <- aggregate(data$result, list(data$ajout),StandErr)
result <- cbind(mean, se[,2])
colnames(result) <- c("ajout", "moyenne", "se")
result$ajout <- factor(result$ajout,
levels = c("mbp", "NQR", "AIR12"),
labels = c("mb", "mb + NQR", "mb + NQR + AIR12"))
#############
###BARPLOT###
#############
g <- ggplot(data = result,
aes(x = ajout,
y = moyenne)) +
geom_bar(stat = "identity",
position = "dodge",
color = "black",
width = 0.6) +
scale_x_discrete(name = "") +
scale_y_continuous(name = "Superoxide production (µmol/mg/h)\n",
expand = c(0,0)) +
geom_errorbar(data = result,
position = position_dodge(0.75),
aes(x = ajout,
ymin = result$moyenne,
ymax = result$moyenne + result$se),
width = 0.05)
save_plot("mbp_NQR_AIR12_XTT.png", g, base_aspect_ratio = 1.3)
###########
###STATS###
###########
#Normalité
shapiroTest <- aggregate(result ~ ajout, data = data,
function (x) shapiro.test(x)$p.value)
#tout normal
# tests
bartlett.test(list(data$result[data$ajout == "NQR"],
data$result[data$ajout == "AIR12"]))
#égalité des variances entre tous
data2 <- data[,-2]
stats <- aov(result ~ ajout, data = data2)
summary(stats)
TukeyHSD(stats)
# 3 jeux de données 3 groupes significativement différents
|
/mbp/XTT.R
|
no_license
|
CathyCat88/thesis
|
R
| false
| false
| 1,806
|
r
|
remove(list = ls())
graphics.off()
data <- read.csv("mbp_NQR_AIR12_XTT.csv", header = TRUE)
#############
###CALCULS###
#############
data$result <- (data$pente * 60 * 10^6)/ (24200 * 43.6)
mean <- aggregate(data$result, list(data$ajout), mean)
StandErr <- function(x){
return(sd(x) / sqrt(length(x)))
}
se <- aggregate(data$result, list(data$ajout),StandErr)
result <- cbind(mean, se[,2])
colnames(result) <- c("ajout", "moyenne", "se")
result$ajout <- factor(result$ajout,
levels = c("mbp", "NQR", "AIR12"),
labels = c("mb", "mb + NQR", "mb + NQR + AIR12"))
#############
###BARPLOT###
#############
g <- ggplot(data = result,
aes(x = ajout,
y = moyenne)) +
geom_bar(stat = "identity",
position = "dodge",
color = "black",
width = 0.6) +
scale_x_discrete(name = "") +
scale_y_continuous(name = "Superoxide production (µmol/mg/h)\n",
expand = c(0,0)) +
geom_errorbar(data = result,
position = position_dodge(0.75),
aes(x = ajout,
ymin = result$moyenne,
ymax = result$moyenne + result$se),
width = 0.05)
save_plot("mbp_NQR_AIR12_XTT.png", g, base_aspect_ratio = 1.3)
###########
###STATS###
###########
#Normalité
shapiroTest <- aggregate(result ~ ajout, data = data,
function (x) shapiro.test(x)$p.value)
#tout normal
# tests
bartlett.test(list(data$result[data$ajout == "NQR"],
data$result[data$ajout == "AIR12"]))
#égalité des variances entre tous
data2 <- data[,-2]
stats <- aov(result ~ ajout, data = data2)
summary(stats)
TukeyHSD(stats)
# 3 jeux de données 3 groupes significativement différents
|
## Libraries ----------------------------------------------------------------
library(DataExplorer)
library(e1071)
library("kernlab")
library(neuralnet)
library(caret)
## Load Dataset ----------------------------------------------------------------
mushroom_df <- read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.data", sep = ',', header = FALSE, col.names = c("classes", "cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment", "gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring", "stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type", "veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"))
head(mushroom_df)
for (x in colnames(mushroom_df)){
print(unique(mushroom_df[x]))
}
## Convert "classes" to Factor ----------------------------------------------------------------------------------
mushroom_df$classes <- factor(mushroom_df$classes, levels = c("e","p"), labels=c("edible","poisonous"))
unique(mushroom_df$classes)
## Remove Unnecessary Columns, Handle Missing Values, and Data Exploration ----------------------------------------------------------------
#mushroom_df$veil.type has only one level
mushroom_df$veil.type <- NULL #remove veil.type column
#missing values = "?"
mushroom_df[mushroom_df == "?"] <- NA
str(mushroom_df)
introduce(mushroom_df)
plot_intro(mushroom_df)
sum(is.na(mushroom_df))
plot_missing(mushroom_df)
#replace missing values with new level: "m" = missing
#since missing value is in stalk.root column and no other values = "m", we can use "m" to represent missing values
mushroom_df$stalk.root[is.na(mushroom_df$stalk.root)] <- "m" #https://stackoverflow.com/questions/8161836/how-do-i-replace-na-values-with-zeros-in-an-r-dataframe
sum(is.na(mushroom_df))
plot_missing(mushroom_df)
plot_bar(mushroom_df)
## SVM ------------------------------------------------------------------------------------------------------
## Create Training/Testing Dataset and Labels----------------------------------------------------------------
set.seed(789)
index <- createDataPartition(mushroom_df$classes, p =0.7, list = FALSE)
mushroomTrain <- mushroom_df[index,] #index for training set
train_labels <- mushroom_df[1][index,]
dim(mushroomTrain)
mushroomTest <- mushroom_df[-index,] #not index for test set
test_labels <- mushroom_df[-index,1]
dim(mushroomTest)
# Linear SVM - e1071
svm_linear_model = svm(classes~., data = mushroomTrain, kernel = "linear", scale = FALSE) #http://uc-r.github.io/svm
summary(svm_linear_model)
#plot(svm_linear_model, mushroomTrain, Petal.Width ~ Petal.Length, slice = list(Sepal.Width = 3, Sepal.Length = 4))
linear_pred <- predict(svm_linear_model, mushroomTest)
confusionMatrix(linear_pred, test_labels)
plot(svm_linear_model, mushroomTrain)
# Linear SVM - Kernlab
svm_linear_model_2 <- ksvm(classes ~ ., data = mushroomTrain, kernel = "vanilladot")
svm_linear_model_2
linear_pred_2 <- predict(svm_linear_model_2, mushroomTest)
table(linear_pred_2, test_labels)
confusionMatrix(linear_pred_2, test_labels)
# Gaussian RBF (Radial) SVM - Kernlab
svm_rbf_model <- ksvm(classes ~ ., data = mushroomTrain, kernel = "rbfdot")
svm_rbf_model
rbf_pred <- predict(svm_rbf_model, mushroomTest)
table(rbf_pred, test_labels)
confusionMatrix(rbf_pred, test_labels)
# Polynomial SVM - Kernlab
svm_poly_model <- ksvm(classes ~ ., data = mushroomTrain, kernel = "polydot")
svm_poly_model
poly_pred <- predict(svm_poly_model, mushroomTest)
table(poly_pred, test_labels)
confusionMatrix(poly_pred, test_labels)
# Sigmoid SVM - Kernlab
svm_sigmoid_model <- ksvm(classes ~ ., data = mushroomTrain, kernel = "tanhdot")
svm_sigmoid_model
sigmoid_pred <- predict(svm_sigmoid_model, mushroomTest)
table(sigmoid_pred, test_labels)
confusionMatrix(sigmoid_pred, test_labels)
## ---------------------------------------------------------------------
## ANN ----------------------------------------------------------------
#convert categorical variables (except the predicted class/label)
mushroom_ann_df <- subset(mushroom_df[2:22])
class <- subset(mushroom_df[1])
dmy <- dummyVars(~.,mushroom_ann_df, fullRank=T)
trsf <- data.frame(predict(dmy, newdata = mushroom_ann_df))
#combine class and dummy variables
final_mushroom_ann <- cbind(trsf, class)
dim(final_mushroom_ann)
#create train and test data sets
ind = sample(2, nrow(final_mushroom_ann),replace=TRUE,prob=c(0.7,0.3))
trainset = final_mushroom_ann[ind == 1,]
dim(trainset)
testset = final_mushroom_ann[ind==2,]
dim(testset)
#add classes label to trainset
#unique(final_mushroom_ann$classes)
#trainset$poisonous = trainset$classes == "poisonous"
#trainset$edible = trainset$classes == "edible"
#dim(trainset)
# Train model
train_columns <- colnames(trainset[-96])
nn_form <- as.formula(paste("classes~", paste(train_columns, collapse = "+")))
nn_model <- neuralnet(nn_form, data = trainset, hidden = 3)
nn_model$result.matrix
plot(nn_model, rep="best"))
#visualize the generalized weights plot (Yu-Wei)
#par(mfrow=c(3,2))
#gwplot(nn_model, selected.covariate = "cap.shapec")
#gwplot(nn_model, selected.covariate = "cap.shapef")
#gwplot(nn_model, selected.covariate = "cap.shapek")
#gwplot(nn_model, selected.covariate = "cap.shapes")
#gwplot(nn_model, selected.covariate = "cap.shapex")
#par(mfrow=c(1,1)) #reset
#predictions
nn_pred = compute(nn_model, testset[-96])$net.result #remove "classes" column
nn_predicition = c("edible","poisonous")[apply(nn_pred, 1, which.max)] #obtain other possible labels by finding the column with the greatest probability
pred_table = table(testset$classes, nn_predicition)#generate classification table
pred_table
classAgreement(pred_table)
confusionMatrix(pred_table)
#ANN Model #2
nn_model_2 <- neuralnet(nn_form , data = trainset , startweights = NULL, hidden=3, err.fct="sse", act.fct="logistic", linear.output = FALSE)
plot(nn_model_2, rep="best")
nn_pred_2 = compute(nn_model_2, testset[-96])$net.result #remove "classes" column
nn_predicition_2 = c("edible","poisonous")[apply(nn_pred_2, 1, which.max)] #obtain other possible labels by finding the column with the greatest probability:
pred_table_2 = table(testset$classes, nn_predicition_2)#generate classification table
pred_table_2
classAgreement(pred_table)
confusionMatrix(pred_table)
##----------------------------------------------------------------
|
/Supervised_Learning/SVM_ANN.R
|
no_license
|
tshrode37/Machine-Learning
|
R
| false
| false
| 6,654
|
r
|
## Libraries ----------------------------------------------------------------
library(DataExplorer)
library(e1071)
library("kernlab")
library(neuralnet)
library(caret)
## Load Dataset ----------------------------------------------------------------
mushroom_df <- read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.data", sep = ',', header = FALSE, col.names = c("classes", "cap-shape", "cap-surface", "cap-color", "bruises", "odor", "gill-attachment", "gill-spacing", "gill-size", "gill-color", "stalk-shape", "stalk-root", "stalk-surface-above-ring", "stalk-surface-below-ring", "stalk-color-above-ring", "stalk-color-below-ring", "veil-type", "veil-color", "ring-number", "ring-type", "spore-print-color", "population", "habitat"))
head(mushroom_df)
for (x in colnames(mushroom_df)){
print(unique(mushroom_df[x]))
}
## Convert "classes" to Factor ----------------------------------------------------------------------------------
mushroom_df$classes <- factor(mushroom_df$classes, levels = c("e","p"), labels=c("edible","poisonous"))
unique(mushroom_df$classes)
## Remove Unnecessary Columns, Handle Missing Values, and Data Exploration ----------------------------------------------------------------
#mushroom_df$veil.type has only one level
mushroom_df$veil.type <- NULL #remove veil.type column
#missing values = "?"
mushroom_df[mushroom_df == "?"] <- NA
str(mushroom_df)
introduce(mushroom_df)
plot_intro(mushroom_df)
sum(is.na(mushroom_df))
plot_missing(mushroom_df)
#replace missing values with new level: "m" = missing
#since missing value is in stalk.root column and no other values = "m", we can use "m" to represent missing values
mushroom_df$stalk.root[is.na(mushroom_df$stalk.root)] <- "m" #https://stackoverflow.com/questions/8161836/how-do-i-replace-na-values-with-zeros-in-an-r-dataframe
sum(is.na(mushroom_df))
plot_missing(mushroom_df)
plot_bar(mushroom_df)
## SVM ------------------------------------------------------------------------------------------------------
## Create Training/Testing Dataset and Labels----------------------------------------------------------------
set.seed(789)
index <- createDataPartition(mushroom_df$classes, p =0.7, list = FALSE)
mushroomTrain <- mushroom_df[index,] #index for training set
train_labels <- mushroom_df[1][index,]
dim(mushroomTrain)
mushroomTest <- mushroom_df[-index,] #not index for test set
test_labels <- mushroom_df[-index,1]
dim(mushroomTest)
# Linear SVM - e1071
svm_linear_model = svm(classes~., data = mushroomTrain, kernel = "linear", scale = FALSE) #http://uc-r.github.io/svm
summary(svm_linear_model)
#plot(svm_linear_model, mushroomTrain, Petal.Width ~ Petal.Length, slice = list(Sepal.Width = 3, Sepal.Length = 4))
linear_pred <- predict(svm_linear_model, mushroomTest)
confusionMatrix(linear_pred, test_labels)
plot(svm_linear_model, mushroomTrain)
# Linear SVM - Kernlab
svm_linear_model_2 <- ksvm(classes ~ ., data = mushroomTrain, kernel = "vanilladot")
svm_linear_model_2
linear_pred_2 <- predict(svm_linear_model_2, mushroomTest)
table(linear_pred_2, test_labels)
confusionMatrix(linear_pred_2, test_labels)
# Gaussian RBF (Radial) SVM - Kernlab
svm_rbf_model <- ksvm(classes ~ ., data = mushroomTrain, kernel = "rbfdot")
svm_rbf_model
rbf_pred <- predict(svm_rbf_model, mushroomTest)
table(rbf_pred, test_labels)
confusionMatrix(rbf_pred, test_labels)
# Polynomial SVM - Kernlab
svm_poly_model <- ksvm(classes ~ ., data = mushroomTrain, kernel = "polydot")
svm_poly_model
poly_pred <- predict(svm_poly_model, mushroomTest)
table(poly_pred, test_labels)
confusionMatrix(poly_pred, test_labels)
# Sigmoid SVM - Kernlab
svm_sigmoid_model <- ksvm(classes ~ ., data = mushroomTrain, kernel = "tanhdot")
svm_sigmoid_model
sigmoid_pred <- predict(svm_sigmoid_model, mushroomTest)
table(sigmoid_pred, test_labels)
confusionMatrix(sigmoid_pred, test_labels)
## ---------------------------------------------------------------------
## ANN ----------------------------------------------------------------
#convert categorical variables (except the predicted class/label)
mushroom_ann_df <- subset(mushroom_df[2:22])
class <- subset(mushroom_df[1])
dmy <- dummyVars(~.,mushroom_ann_df, fullRank=T)
trsf <- data.frame(predict(dmy, newdata = mushroom_ann_df))
#combine class and dummy variables
final_mushroom_ann <- cbind(trsf, class)
dim(final_mushroom_ann)
#create train and test data sets
ind = sample(2, nrow(final_mushroom_ann),replace=TRUE,prob=c(0.7,0.3))
trainset = final_mushroom_ann[ind == 1,]
dim(trainset)
testset = final_mushroom_ann[ind==2,]
dim(testset)
#add classes label to trainset
#unique(final_mushroom_ann$classes)
#trainset$poisonous = trainset$classes == "poisonous"
#trainset$edible = trainset$classes == "edible"
#dim(trainset)
# Train model
train_columns <- colnames(trainset[-96])
nn_form <- as.formula(paste("classes~", paste(train_columns, collapse = "+")))
nn_model <- neuralnet(nn_form, data = trainset, hidden = 3)
nn_model$result.matrix
plot(nn_model, rep="best"))
#visualize the generalized weights plot (Yu-Wei)
#par(mfrow=c(3,2))
#gwplot(nn_model, selected.covariate = "cap.shapec")
#gwplot(nn_model, selected.covariate = "cap.shapef")
#gwplot(nn_model, selected.covariate = "cap.shapek")
#gwplot(nn_model, selected.covariate = "cap.shapes")
#gwplot(nn_model, selected.covariate = "cap.shapex")
#par(mfrow=c(1,1)) #reset
#predictions
nn_pred = compute(nn_model, testset[-96])$net.result #remove "classes" column
nn_predicition = c("edible","poisonous")[apply(nn_pred, 1, which.max)] #obtain other possible labels by finding the column with the greatest probability
pred_table = table(testset$classes, nn_predicition)#generate classification table
pred_table
classAgreement(pred_table)
confusionMatrix(pred_table)
#ANN Model #2
nn_model_2 <- neuralnet(nn_form , data = trainset , startweights = NULL, hidden=3, err.fct="sse", act.fct="logistic", linear.output = FALSE)
plot(nn_model_2, rep="best")
nn_pred_2 = compute(nn_model_2, testset[-96])$net.result #remove "classes" column
nn_predicition_2 = c("edible","poisonous")[apply(nn_pred_2, 1, which.max)] #obtain other possible labels by finding the column with the greatest probability:
pred_table_2 = table(testset$classes, nn_predicition_2)#generate classification table
pred_table_2
classAgreement(pred_table)
confusionMatrix(pred_table)
##----------------------------------------------------------------
|
/10-Previsao.R
|
no_license
|
vitorsouzzas/AAULA10
|
R
| false
| false
| 3,309
|
r
| ||
a<- c(6,5,7,4)
b<- c("mama","papa","hijo")
|
/vectores.R
|
no_license
|
cristhianfgy/Practica2
|
R
| false
| false
| 43
|
r
|
a<- c(6,5,7,4)
b<- c("mama","papa","hijo")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3.R
\name{haystack}
\alias{haystack}
\alias{haystack.matrix}
\alias{haystack.data.frame}
\alias{haystack.Seurat}
\alias{haystack.SingleCellExperiment}
\title{The main Haystack function}
\usage{
haystack(x, ...)
\method{haystack}{matrix}(
x,
dim1 = 1,
dim2 = 2,
detection,
method = "highD",
use.advanced.sampling = NULL,
dir.randomization = NULL,
scale = TRUE,
grid.points = 100,
grid.method = "centroid",
...
)
\method{haystack}{data.frame}(
x,
dim1 = 1,
dim2 = 2,
detection,
method = "highD",
use.advanced.sampling = NULL,
dir.randomization = NULL,
scale = TRUE,
grid.points = 100,
grid.method = "centroid",
...
)
\method{haystack}{Seurat}(
x,
assay = "RNA",
slot = "data",
coord = "pca",
cutoff = 1,
method = NULL,
...
)
\method{haystack}{SingleCellExperiment}(x, assay = "counts", coord = "TSNE", cutoff = 1, method = NULL, ...)
}
\arguments{
\item{x}{a matrix or other object from which coordinates of cells can be extracted.}
\item{...}{further paramters passed down to methods.}
\item{dim1}{column index or name of matrix for x-axis coordinates.}
\item{dim2}{column index or name of matrix for y-axis coordinates.}
\item{detection}{A logical matrix showing which genes (rows) are detected in which cells (columns)}
\item{method}{choose between highD (default) and 2D haystack.}
\item{use.advanced.sampling}{If NULL naive sampling is used. If a vector is given (of length = no. of cells) sampling is done according to the values in the vector.}
\item{dir.randomization}{If NULL, no output is made about the random sampling step. If not NULL, files related to the randomizations are printed to this directory.}
\item{scale}{Logical (default=TRUE) indicating whether input coordinates in x should be scaled to mean 0 and standard deviation 1.}
\item{grid.points}{An integer specifying the number of centers (gridpoints) to be used for estimating the density distributions of cells. Default is set to 100.}
\item{grid.method}{The method to decide grid points for estimating the density in the high-dimensional space. Should be "centroid" (default) or "seeding".}
\item{assay}{name of assay data for Seurat method.}
\item{slot}{name of slot for assay data for Seurat method.}
\item{coord}{name of coordinates slot for specific methods.}
\item{cutoff}{cutoff for detection.}
}
\value{
An object of class "haystack"
}
\description{
The main Haystack function
}
|
/man/haystack.Rd
|
permissive
|
JunjuanZheng/singleCellHaystack
|
R
| false
| true
| 2,515
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3.R
\name{haystack}
\alias{haystack}
\alias{haystack.matrix}
\alias{haystack.data.frame}
\alias{haystack.Seurat}
\alias{haystack.SingleCellExperiment}
\title{The main Haystack function}
\usage{
haystack(x, ...)
\method{haystack}{matrix}(
x,
dim1 = 1,
dim2 = 2,
detection,
method = "highD",
use.advanced.sampling = NULL,
dir.randomization = NULL,
scale = TRUE,
grid.points = 100,
grid.method = "centroid",
...
)
\method{haystack}{data.frame}(
x,
dim1 = 1,
dim2 = 2,
detection,
method = "highD",
use.advanced.sampling = NULL,
dir.randomization = NULL,
scale = TRUE,
grid.points = 100,
grid.method = "centroid",
...
)
\method{haystack}{Seurat}(
x,
assay = "RNA",
slot = "data",
coord = "pca",
cutoff = 1,
method = NULL,
...
)
\method{haystack}{SingleCellExperiment}(x, assay = "counts", coord = "TSNE", cutoff = 1, method = NULL, ...)
}
\arguments{
\item{x}{a matrix or other object from which coordinates of cells can be extracted.}
\item{...}{further paramters passed down to methods.}
\item{dim1}{column index or name of matrix for x-axis coordinates.}
\item{dim2}{column index or name of matrix for y-axis coordinates.}
\item{detection}{A logical matrix showing which genes (rows) are detected in which cells (columns)}
\item{method}{choose between highD (default) and 2D haystack.}
\item{use.advanced.sampling}{If NULL naive sampling is used. If a vector is given (of length = no. of cells) sampling is done according to the values in the vector.}
\item{dir.randomization}{If NULL, no output is made about the random sampling step. If not NULL, files related to the randomizations are printed to this directory.}
\item{scale}{Logical (default=TRUE) indicating whether input coordinates in x should be scaled to mean 0 and standard deviation 1.}
\item{grid.points}{An integer specifying the number of centers (gridpoints) to be used for estimating the density distributions of cells. Default is set to 100.}
\item{grid.method}{The method to decide grid points for estimating the density in the high-dimensional space. Should be "centroid" (default) or "seeding".}
\item{assay}{name of assay data for Seurat method.}
\item{slot}{name of slot for assay data for Seurat method.}
\item{coord}{name of coordinates slot for specific methods.}
\item{cutoff}{cutoff for detection.}
}
\value{
An object of class "haystack"
}
\description{
The main Haystack function
}
|
#==============================================================================#
# #
# Title : Automate Git #
# Purpose : Define function for automating Git processes. #
# Notes : . #
# Author : chrimaho #
# Created : 09/May/2020 #
# References : . #
# Sources : . #
# Edited : 09/May/2020 - Initial creation #
# #
#==============================================================================#
# Start ----
GitSync <- function(repo=rprojroot::find_rstudio_root_file(), untracked=TRUE, stage=TRUE, commit=TRUE, pull=TRUE, push=TRUE) {
#' @title Sync Git
#' @description Automate the Sync process for Git.
#' @note This will run through the Git stages in sequence: `untracked`, `stage`, `commit`, `pull`, `push`.
#' @param repo character. The working directory to be Sync'd. Must be a valid system path.
#' @param untracked logical. Do you want to run the `untracked` process?
#' @param stage logical. Do you want to run the `stage` process?
#' @param commit logical. Do you want to run the `commit` process?
#' @param pull logical. Do you want to run the `pull` process?
#' @param push logical. Do you want to run the `push` process?
#' @return A character string for success or failure.
# Confirm required packages are loaded ----
require(git2r)
require(rprojroot)
require(rstudioapi)
require(assertthat)
# Validations ----
assert_that(is.character(repo))
assert_that(is.logical(untracked))
assert_that(is.logical(stage))
assert_that(is.logical(commit))
assert_that(is.logical(pull))
assert_that(is.logical(push))
assert_that(dir.exists(repo), msg="'repo' must be a valid system directory.")
# Confirm valid repor directory ----
if (!file.exists(paste0(repo, "/.git/config"))) {
stop(paste0("You have not supplied a valid repo directory. '", repo, "'"))
}
# Get credentials from user input ----
get_Credentials <- function() {
# Get & Set username
if (is.null(getOption("git_username"))) {
username <- showPrompt(title="Username", message="Enter your username:", default="")
} else {
username <- getOption("git_username")
}
# Get & Set password
if (is.null(getOption("git_password"))) {
password <- askForPassword(prompt="Enter your password:")
} else {
password <- getOption("git_password")
}
# Save username
if (is.null(getOption("git_username"))) {
confirm <- showQuestion(title="Save username?"
,message=paste0("Would you like to save your Git username to the `options()` environment?","\n\n"
,"If 'Yes', then you will not be prompted for your username next time.","\n"
,"If 'No', then you will be prompted for your username again next time.","\n\n"
,"Note: `options()` are not pushed to Git.","\n"
,"However, they are still retrievable by others on your computer.","\n"
,"So be careful."
)
,ok="Yes"
,cancel="No"
)
if (confirm == TRUE) {
options(git_username=username)
}
} else if (username != getOption("git_username")) {
confirm <- showQuestion(title="Update username?"
,message=paste0("The username you have provided is different to what is saved.","\n"
,"Would you like to update this new username to memory?"
)
,ok="Yes"
,cancel="No"
)
if (confirm == TRUE) {
options(git_username=username)
}
}
# Save password
if (is.null(getOption("git_password"))) {
confirm <- showQuestion(title="Save password?"
,message=paste0("Would you like to save your Git password to the `options()` environment?","\n\n"
,"If 'Yes', then you will not be prompted for your password next time.","\n"
,"If 'No', then you will be prompted for your password again next time.","\n\n"
,"Note: `options()` are not pushed to Git.","\n"
,"However, they are still retrievable by others on your computer.","\n"
,"So be careful."
)
,ok="Yes"
,cancel="No"
)
if (confirm == TRUE) {
options(git_password=password)
}
} else if (password != getOption("git_password")) {
confirm <- showQuestion(title="Update password?"
,message=paste0("The password you have provided is different to what is saved.","\n"
,"Would you like to update this new password to memory?"
)
,ok="Yes"
,cancel="No"
)
if (confirm == TRUE) {
options(git_password=password)
}
}
# Set Credentials
credentials <- cred_user_pass(username=username, password=password)
# Return
return(credentials)
}
# NOTE: values returned from the status() command are as follows: ----
# 1. "untracked" means new files which have not yet been added to GitHub.
# 2. "unstaged" means existing files which have been modified but not yet ready to be committed to GitHub.
# 3. "staged" means files that are staged and ready to be committed.
# Process untracked ----
if (untracked == TRUE) {
num <- length(unlist(status()["untracked"]))
if (num > 0) {
writeLines(paste0("There are ", num, " Untracked items to be processed."))
for (i in 1:num) {
writeLines(paste0(" ", i, ": ",unlist(status()["untracked"])[i]))
}
git2r::add(repo, unlist(status()["untracked"]))
writeLines(paste0("Items have been Staged."))
CommitComment <- showPrompt(title="Git Commit Comment", message="Enter a comment for the commit message.")
if (!nchar(trimws(CommitComment), keepNA = TRUE) %in% c("NA","0",NA,0)) {
CommitComment <- paste(Sys.time(), CommitComment, sep = " - ")
} else {
CommitComment <- paste(Sys.time(), "Initial commit", sep = " - ")
}
commit(message = CommitComment)
writeLines(paste0("Items have been Committed."))
push(credentials = get_Credentials())
writeLines(paste0("Items have been Pushed."))
}
}
# Process stage ----
if (stage == TRUE) {
num <- length(unlist(status()["unstaged"]))
if (num > 0) {
writeLines(paste0("There are ", num, " Tracked items to be processed."))
for (i in 1:num) {
writeLines(paste0(" ", i, ": ", unlist(status()["unstaged"])[i]))
}
}
if (!is.null(unlist(status()["unstaged"]))) {
git2r::add(repo, unlist(status()["unstaged"]))
num2 <- length(unlist(status()["unstaged"]))
if (num2 == 0) {
writeLines(paste0("Items have been Staged."))
} else if (num == num2) {
stop ("Something went wrong with the Staging.")
}
}
}
# Process commit ----
if (commit == TRUE) {
if (!is.null(unlist(status()["staged"]))) {
CommitComment <- showPrompt(title="Git Commit Comment", message="Enter a comment for the commit message.")
if (!nchar(trimws(CommitComment), keepNA = TRUE) %in% c("NA","0",NA,0)) {
CommitComment <- paste(Sys.time(), CommitComment, sep = " - ")
} else {
CommitComment <- paste(Sys.time(), "Update", sep = " - ")
}
commit(message = CommitComment)
num2 <- length(unlist(status()["staged"]))
if (num2 == 0) {
writeLines(paste0("Items have been Committed."))
} else if (num == num2) {
stop ("Something went wrong with Committing.")
}
}
}
# Process pull ----
# tryCatch() is utilised because the error message when executing pull() or push() is not very helpful: "too many redirects or authentication replays". The main issue is usually that the credentials are incorrect or missing.
if (pull == TRUE) {
pull <- tryCatch (
expr = {
git2r::pull(credentials = get_Credentials())
},
error = function (err) {
message (paste0("Error when Pulling from GitHub. Try checking your credentials and try again.","\n","Message thrown: "))
stop (err)
},
warning = function (war) {
message ("There was a Warning when Pulling from GitHub.")
return (war)
},
finally = {
# It was successful. Move on.
}
)
if (unlist(pull["up_to_date"]) == TRUE) {
writeLines(paste0("There are no discrepancies with the Master branch."))
} else {
stop ("Something went wrong with pulling the repo. Please manually check, merge the code, validate discrepancies, then re-try.")
}
}
# Process push ----
if (push == TRUE) {
if (num > 0) {
tryCatch(
expr = {
push(credentials = get_Credentials())
},
error = function(err) {
message (paste0("Error when Pushing to GitHub. Try checking your credentials and try again.","\n","Message thrown: "))
stop (err)
},
warning = function (war) {
message ("There was a Warning when Pushing to GitHub.")
return (war)
},
finally = {
# It was successful. Move on.
}
)
num2 <- length(unlist(status()))
if (num2 == 0) {
writeLines(paste0("Items have been Pushed."))
} else if (num == num2) {
stop ("Something went wrong with Pushing.")
}
}
}
# Return ----
return(writeLines(paste0("Successfully updated.")))
}
GitSync()
|
/AutomateGit.R
|
no_license
|
rmasiniexpert/VanillaNeuralNetworksInR
|
R
| false
| false
| 11,953
|
r
|
#==============================================================================#
# #
# Title : Automate Git #
# Purpose : Define function for automating Git processes. #
# Notes : . #
# Author : chrimaho #
# Created : 09/May/2020 #
# References : . #
# Sources : . #
# Edited : 09/May/2020 - Initial creation #
# #
#==============================================================================#
# Start ----
GitSync <- function(repo=rprojroot::find_rstudio_root_file(), untracked=TRUE, stage=TRUE, commit=TRUE, pull=TRUE, push=TRUE) {
#' @title Sync Git
#' @description Automate the Sync process for Git.
#' @note This will run through the Git stages in sequence: `untracked`, `stage`, `commit`, `pull`, `push`.
#' @param repo character. The working directory to be Sync'd. Must be a valid system path.
#' @param untracked logical. Do you want to run the `untracked` process?
#' @param stage logical. Do you want to run the `stage` process?
#' @param commit logical. Do you want to run the `commit` process?
#' @param pull logical. Do you want to run the `pull` process?
#' @param push logical. Do you want to run the `push` process?
#' @return A character string for success or failure.
# Confirm required packages are loaded ----
require(git2r)
require(rprojroot)
require(rstudioapi)
require(assertthat)
# Validations ----
assert_that(is.character(repo))
assert_that(is.logical(untracked))
assert_that(is.logical(stage))
assert_that(is.logical(commit))
assert_that(is.logical(pull))
assert_that(is.logical(push))
assert_that(dir.exists(repo), msg="'repo' must be a valid system directory.")
# Confirm valid repor directory ----
if (!file.exists(paste0(repo, "/.git/config"))) {
stop(paste0("You have not supplied a valid repo directory. '", repo, "'"))
}
# Get credentials from user input ----
get_Credentials <- function() {
# Get & Set username
if (is.null(getOption("git_username"))) {
username <- showPrompt(title="Username", message="Enter your username:", default="")
} else {
username <- getOption("git_username")
}
# Get & Set password
if (is.null(getOption("git_password"))) {
password <- askForPassword(prompt="Enter your password:")
} else {
password <- getOption("git_password")
}
# Save username
if (is.null(getOption("git_username"))) {
confirm <- showQuestion(title="Save username?"
,message=paste0("Would you like to save your Git username to the `options()` environment?","\n\n"
,"If 'Yes', then you will not be prompted for your username next time.","\n"
,"If 'No', then you will be prompted for your username again next time.","\n\n"
,"Note: `options()` are not pushed to Git.","\n"
,"However, they are still retrievable by others on your computer.","\n"
,"So be careful."
)
,ok="Yes"
,cancel="No"
)
if (confirm == TRUE) {
options(git_username=username)
}
} else if (username != getOption("git_username")) {
confirm <- showQuestion(title="Update username?"
,message=paste0("The username you have provided is different to what is saved.","\n"
,"Would you like to update this new username to memory?"
)
,ok="Yes"
,cancel="No"
)
if (confirm == TRUE) {
options(git_username=username)
}
}
# Save password
if (is.null(getOption("git_password"))) {
confirm <- showQuestion(title="Save password?"
,message=paste0("Would you like to save your Git password to the `options()` environment?","\n\n"
,"If 'Yes', then you will not be prompted for your password next time.","\n"
,"If 'No', then you will be prompted for your password again next time.","\n\n"
,"Note: `options()` are not pushed to Git.","\n"
,"However, they are still retrievable by others on your computer.","\n"
,"So be careful."
)
,ok="Yes"
,cancel="No"
)
if (confirm == TRUE) {
options(git_password=password)
}
} else if (password != getOption("git_password")) {
confirm <- showQuestion(title="Update password?"
,message=paste0("The password you have provided is different to what is saved.","\n"
,"Would you like to update this new password to memory?"
)
,ok="Yes"
,cancel="No"
)
if (confirm == TRUE) {
options(git_password=password)
}
}
# Set Credentials
credentials <- cred_user_pass(username=username, password=password)
# Return
return(credentials)
}
# NOTE: values returned from the status() command are as follows: ----
# 1. "untracked" means new files which have not yet been added to GitHub.
# 2. "unstaged" means existing files which have been modified but not yet ready to be committed to GitHub.
# 3. "staged" means files that are staged and ready to be committed.
# Process untracked ----
if (untracked == TRUE) {
num <- length(unlist(status()["untracked"]))
if (num > 0) {
writeLines(paste0("There are ", num, " Untracked items to be processed."))
for (i in 1:num) {
writeLines(paste0(" ", i, ": ",unlist(status()["untracked"])[i]))
}
git2r::add(repo, unlist(status()["untracked"]))
writeLines(paste0("Items have been Staged."))
CommitComment <- showPrompt(title="Git Commit Comment", message="Enter a comment for the commit message.")
if (!nchar(trimws(CommitComment), keepNA = TRUE) %in% c("NA","0",NA,0)) {
CommitComment <- paste(Sys.time(), CommitComment, sep = " - ")
} else {
CommitComment <- paste(Sys.time(), "Initial commit", sep = " - ")
}
commit(message = CommitComment)
writeLines(paste0("Items have been Committed."))
push(credentials = get_Credentials())
writeLines(paste0("Items have been Pushed."))
}
}
# Process stage ----
if (stage == TRUE) {
num <- length(unlist(status()["unstaged"]))
if (num > 0) {
writeLines(paste0("There are ", num, " Tracked items to be processed."))
for (i in 1:num) {
writeLines(paste0(" ", i, ": ", unlist(status()["unstaged"])[i]))
}
}
if (!is.null(unlist(status()["unstaged"]))) {
git2r::add(repo, unlist(status()["unstaged"]))
num2 <- length(unlist(status()["unstaged"]))
if (num2 == 0) {
writeLines(paste0("Items have been Staged."))
} else if (num == num2) {
stop ("Something went wrong with the Staging.")
}
}
}
# Process commit ----
if (commit == TRUE) {
if (!is.null(unlist(status()["staged"]))) {
CommitComment <- showPrompt(title="Git Commit Comment", message="Enter a comment for the commit message.")
if (!nchar(trimws(CommitComment), keepNA = TRUE) %in% c("NA","0",NA,0)) {
CommitComment <- paste(Sys.time(), CommitComment, sep = " - ")
} else {
CommitComment <- paste(Sys.time(), "Update", sep = " - ")
}
commit(message = CommitComment)
num2 <- length(unlist(status()["staged"]))
if (num2 == 0) {
writeLines(paste0("Items have been Committed."))
} else if (num == num2) {
stop ("Something went wrong with Committing.")
}
}
}
# Process pull ----
# tryCatch() is utilised because the error message when executing pull() or push() is not very helpful: "too many redirects or authentication replays". The main issue is usually that the credentials are incorrect or missing.
if (pull == TRUE) {
pull <- tryCatch (
expr = {
git2r::pull(credentials = get_Credentials())
},
error = function (err) {
message (paste0("Error when Pulling from GitHub. Try checking your credentials and try again.","\n","Message thrown: "))
stop (err)
},
warning = function (war) {
message ("There was a Warning when Pulling from GitHub.")
return (war)
},
finally = {
# It was successful. Move on.
}
)
if (unlist(pull["up_to_date"]) == TRUE) {
writeLines(paste0("There are no discrepancies with the Master branch."))
} else {
stop ("Something went wrong with pulling the repo. Please manually check, merge the code, validate discrepancies, then re-try.")
}
}
# Process push ----
if (push == TRUE) {
if (num > 0) {
tryCatch(
expr = {
push(credentials = get_Credentials())
},
error = function(err) {
message (paste0("Error when Pushing to GitHub. Try checking your credentials and try again.","\n","Message thrown: "))
stop (err)
},
warning = function (war) {
message ("There was a Warning when Pushing to GitHub.")
return (war)
},
finally = {
# It was successful. Move on.
}
)
num2 <- length(unlist(status()))
if (num2 == 0) {
writeLines(paste0("Items have been Pushed."))
} else if (num == num2) {
stop ("Something went wrong with Pushing.")
}
}
}
# Return ----
return(writeLines(paste0("Successfully updated.")))
}
GitSync()
|
/data/man.r
|
no_license
|
unix-history/tropix-cmd
|
R
| false
| false
| 2,569
|
r
| ||
mcmcplot <- function(mcmcout, parms=NULL, regex=NULL, random=NULL, leaf.marker="[\\[_]", dir=tempdir(), filename="MCMCoutput", extension="html", title=NULL, heading=title, col=NULL, lty=1, xlim=NULL, ylim=NULL, style=c("gray", "plain"), greek=FALSE){
## This must come before mcmcout is evaluated in any other expression
if (is.null(title))
title <- paste("MCMC Plots: ", deparse(substitute(mcmcout)), sep="")
if (is.null(heading))
heading <- title
style <- match.arg(style)
## Turn off graphics device if interrupted in the middle of plotting
current.devices <- dev.list()
on.exit( sapply(dev.list(), function(dev) if(!(dev %in% current.devices)) dev.off(dev)) )
## Convert input mcmcout to mcmc.list object
mcmcout <- convert.mcmc.list(mcmcout)
nchains <- length(mcmcout)
if (is.null(col)){
col <- mcmcplotsPalette(nchains)
}
css.file <- system.file("MCMCoutput.css", package="mcmcplots")
css.file <- paste("file:///", css.file, sep="")
htmlfile <- .html.begin(dir, filename, extension, title=title, cssfile=css.file)
## Select parameters for plotting
if (is.null(varnames(mcmcout))){
warning("Argument 'mcmcout' did not have valid variable names, so names have been created for you.")
varnames(mcmcout) <- varnames(mcmcout, allow.null=FALSE)
}
parnames <- parms2plot(varnames(mcmcout), parms, regex, random, leaf.marker, do.unlist=FALSE)
if (length(parnames)==0)
stop("No parameters matched arguments 'parms' or 'regex'.")
np <- length(unlist(parnames))
cat('\n<div id="outer">\n', file=htmlfile, append=TRUE)
cat('<h1>', heading, '</h1>', sep="", file=htmlfile, append=TRUE)
cat('<div id="toc">\n', file=htmlfile, append=TRUE)
cat('\n<h2>Table of Contents</h2>', file=htmlfile, append=TRUE)
cat('<ul id="toc_items">\n', file=htmlfile, append=TRUE)
for (group.name in names(parnames)) {
cat(sprintf('<li class="toc_item"><a href="#%s">%s</a></li>\n', group.name, group.name), file=htmlfile, append=TRUE)
}
cat('</ul></div>\n', file=htmlfile, append=TRUE)
cat('<div class="main">\n', file=htmlfile, append=TRUE)
htmlwidth <- 640
htmlheight <- 480
for (group.name in names(parnames)) {
cat(sprintf('<h2><a name="%s">Plots for %s</a></h2>\n', group.name, group.name), file=htmlfile, append=TRUE)
for (p in parnames[[group.name]]) {
pctdone <- round(100*match(p, unlist(parnames))/np)
cat("\r", rep(" ", getOption("width")), sep="")
cat("\rPreparing plots for ", group.name, ". ", pctdone, "% complete.", sep="")
gname <- paste(p, ".png", sep="")
png(file.path(dir, gname), width=htmlwidth, height=htmlheight)
plot_err <- tryCatch({
mcmcplot1(mcmcout[, p, drop=FALSE], col=col, lty=lty, xlim=xlim, ylim=ylim, style=style, greek=greek)
}, error=function(e) {e})
dev.off()
if (inherits(plot_err, "error")) {
cat(sprintf('<p class="plot_err">%s. %s</p>', p, plot_err),
file=htmlfile, append=TRUE)
} else {
.html.img(file=htmlfile, class="mcmcplot", src=gname,
width=htmlwidth, height=htmlheight)
}
}
}
cat("\r", rep(" ", getOption("width")), "\r", sep="")
cat('\n</div>\n</div>\n', file=htmlfile, append=TRUE)
.html.end(htmlfile)
full.name.path <- paste("file://", htmlfile, sep="")
browseURL(full.name.path)
invisible(full.name.path)
}
|
/R/mcmcplot.R
|
no_license
|
cran/mcmcplots
|
R
| false
| false
| 3,674
|
r
|
mcmcplot <- function(mcmcout, parms=NULL, regex=NULL, random=NULL, leaf.marker="[\\[_]", dir=tempdir(), filename="MCMCoutput", extension="html", title=NULL, heading=title, col=NULL, lty=1, xlim=NULL, ylim=NULL, style=c("gray", "plain"), greek=FALSE){
## This must come before mcmcout is evaluated in any other expression
if (is.null(title))
title <- paste("MCMC Plots: ", deparse(substitute(mcmcout)), sep="")
if (is.null(heading))
heading <- title
style <- match.arg(style)
## Turn off graphics device if interrupted in the middle of plotting
current.devices <- dev.list()
on.exit( sapply(dev.list(), function(dev) if(!(dev %in% current.devices)) dev.off(dev)) )
## Convert input mcmcout to mcmc.list object
mcmcout <- convert.mcmc.list(mcmcout)
nchains <- length(mcmcout)
if (is.null(col)){
col <- mcmcplotsPalette(nchains)
}
css.file <- system.file("MCMCoutput.css", package="mcmcplots")
css.file <- paste("file:///", css.file, sep="")
htmlfile <- .html.begin(dir, filename, extension, title=title, cssfile=css.file)
## Select parameters for plotting
if (is.null(varnames(mcmcout))){
warning("Argument 'mcmcout' did not have valid variable names, so names have been created for you.")
varnames(mcmcout) <- varnames(mcmcout, allow.null=FALSE)
}
parnames <- parms2plot(varnames(mcmcout), parms, regex, random, leaf.marker, do.unlist=FALSE)
if (length(parnames)==0)
stop("No parameters matched arguments 'parms' or 'regex'.")
np <- length(unlist(parnames))
cat('\n<div id="outer">\n', file=htmlfile, append=TRUE)
cat('<h1>', heading, '</h1>', sep="", file=htmlfile, append=TRUE)
cat('<div id="toc">\n', file=htmlfile, append=TRUE)
cat('\n<h2>Table of Contents</h2>', file=htmlfile, append=TRUE)
cat('<ul id="toc_items">\n', file=htmlfile, append=TRUE)
for (group.name in names(parnames)) {
cat(sprintf('<li class="toc_item"><a href="#%s">%s</a></li>\n', group.name, group.name), file=htmlfile, append=TRUE)
}
cat('</ul></div>\n', file=htmlfile, append=TRUE)
cat('<div class="main">\n', file=htmlfile, append=TRUE)
htmlwidth <- 640
htmlheight <- 480
for (group.name in names(parnames)) {
cat(sprintf('<h2><a name="%s">Plots for %s</a></h2>\n', group.name, group.name), file=htmlfile, append=TRUE)
for (p in parnames[[group.name]]) {
pctdone <- round(100*match(p, unlist(parnames))/np)
cat("\r", rep(" ", getOption("width")), sep="")
cat("\rPreparing plots for ", group.name, ". ", pctdone, "% complete.", sep="")
gname <- paste(p, ".png", sep="")
png(file.path(dir, gname), width=htmlwidth, height=htmlheight)
plot_err <- tryCatch({
mcmcplot1(mcmcout[, p, drop=FALSE], col=col, lty=lty, xlim=xlim, ylim=ylim, style=style, greek=greek)
}, error=function(e) {e})
dev.off()
if (inherits(plot_err, "error")) {
cat(sprintf('<p class="plot_err">%s. %s</p>', p, plot_err),
file=htmlfile, append=TRUE)
} else {
.html.img(file=htmlfile, class="mcmcplot", src=gname,
width=htmlwidth, height=htmlheight)
}
}
}
cat("\r", rep(" ", getOption("width")), "\r", sep="")
cat('\n</div>\n</div>\n', file=htmlfile, append=TRUE)
.html.end(htmlfile)
full.name.path <- paste("file://", htmlfile, sep="")
browseURL(full.name.path)
invisible(full.name.path)
}
|
#++++++++++++++++++++++++++++++++++
# Captação de erros de codificacao:
catch.error = function(x){
# let us create a missing value for test purpose
y = NA
# Try to catch that error (NA) we just created
catch_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(catch_error, "error"))
y = tolower(x)
# check result if error exists, otherwise the function works fine.
return(y)
}
#++++++++++++++++++++++++++++++++++
|
/catch_error.R
|
no_license
|
dtpapp/wordcloud2
|
R
| false
| false
| 477
|
r
|
#++++++++++++++++++++++++++++++++++
# Captação de erros de codificacao:
catch.error = function(x){
# let us create a missing value for test purpose
y = NA
# Try to catch that error (NA) we just created
catch_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(catch_error, "error"))
y = tolower(x)
# check result if error exists, otherwise the function works fine.
return(y)
}
#++++++++++++++++++++++++++++++++++
|
# TOOL acgh-survival-test.R: "Survival test for called copy number data" (Statistical test for survival and called copy number data. The testing is recommended to be performed after running the Identify common regions from called copy number data tool.)
# INPUT regions.tsv: regions.tsv TYPE GENE_EXPRS
# INPUT META phenodata.tsv: phenodata.tsv TYPE GENERIC
# OUTPUT survival-test.tsv: survival-test.tsv
# PARAMETER survival: survival TYPE METACOLUMN_SEL DEFAULT survival (Phenodata column with survival data)
# PARAMETER status: status TYPE METACOLUMN_SEL DEFAULT status (Phenodata column with patient status: alive=0, dead=1)
# PARAMETER number.of.permutations: number.of.permutations TYPE INTEGER DEFAULT 10000 (The number of permutations. At least 10000 recommended for final calculations.)
# PARAMETER test.aberrations: test.aberrations TYPE [1: gains, -1: losses, 0: both] DEFAULT 0 (Whether to test only for gains or losses, or both.)
# Ilari Scheinin <firstname.lastname@gmail.com>
# 2013-04-04
# Adapted for Galaxy by Saskia Hiltemann (SH)
# Fetch commandline arguments
args <- commandArgs(trailingOnly = TRUE)
scriptsdir <- args[1]
inputfile <- args[2]
phenofile <- args[3]
#colsurvival <- as.numeric(args[4])
#colstatus <- as.numeric(args[5])
survival <- args[4]
status <- args[5]
number.of.permutations <- as.numeric(args[6])
test.aberrations <- args[7]
file <- inputfile
dat <- read.table(file, header=TRUE, sep='\t', quote='', row.names=1, as.is=TRUE, check.names=FALSE)
phenodata <- read.table(phenofile, header=TRUE, sep='\t', check.names=FALSE)
#survival<-colnames(phenodata)[colsurvival] #SH
#status<-colnames(phenodata)[colstatus] #SH
first.data.col <- min(grep('^chip\\.', names(dat)), grep('^flag\\.', names(dat)))
data.info <- dat[,1:(first.data.col-1)]
calls <- as.matrix(dat[,grep('^flag\\.', colnames(dat))])
# first try parallel computing
prob <- TRUE
try({
library(CGHtestpar)
pvs <- pvalstest_logrank(calls, data.info, dataclinvar=phenodata, whtime=which(colnames(phenodata) == survival), whstatus=which(colnames(phenodata) == status), lgonly=as.integer(test.aberrations), niter=number.of.permutations, ncpus=4)
fdrs <- fdrperm(pvs)
prob <- FALSE
}, silent=TRUE)
# if problems, fall back to sequential computing
if (prob) {
library(CGHtest)
pvs <- pvalstest_logrank(calls, data.info, dataclinvar=phenodata, whtime=which(colnames(phenodata) == survival), whstatus=which(colnames(phenodata) == status), lgonly=as.integer(test.aberrations), niter=number.of.permutations)
fdrs <- fdrperm(pvs)
}
fdrs <- cbind(fdrs, dat[,first.data.col:ncol(dat)])
options(scipen=10)
write.table(fdrs, file='survival-test.tsv', quote=FALSE, sep='\t')
# EOF
|
/Chipster/Rscripts/acgh12-survival-test.R
|
no_license
|
thehyve/arraycgh-galaxy
|
R
| false
| false
| 2,763
|
r
|
# TOOL acgh-survival-test.R: "Survival test for called copy number data" (Statistical test for survival and called copy number data. The testing is recommended to be performed after running the Identify common regions from called copy number data tool.)
# INPUT regions.tsv: regions.tsv TYPE GENE_EXPRS
# INPUT META phenodata.tsv: phenodata.tsv TYPE GENERIC
# OUTPUT survival-test.tsv: survival-test.tsv
# PARAMETER survival: survival TYPE METACOLUMN_SEL DEFAULT survival (Phenodata column with survival data)
# PARAMETER status: status TYPE METACOLUMN_SEL DEFAULT status (Phenodata column with patient status: alive=0, dead=1)
# PARAMETER number.of.permutations: number.of.permutations TYPE INTEGER DEFAULT 10000 (The number of permutations. At least 10000 recommended for final calculations.)
# PARAMETER test.aberrations: test.aberrations TYPE [1: gains, -1: losses, 0: both] DEFAULT 0 (Whether to test only for gains or losses, or both.)
# Ilari Scheinin <firstname.lastname@gmail.com>
# 2013-04-04
# Adapted for Galaxy by Saskia Hiltemann (SH)
# Fetch commandline arguments
args <- commandArgs(trailingOnly = TRUE)
scriptsdir <- args[1]
inputfile <- args[2]
phenofile <- args[3]
#colsurvival <- as.numeric(args[4])
#colstatus <- as.numeric(args[5])
survival <- args[4]
status <- args[5]
number.of.permutations <- as.numeric(args[6])
test.aberrations <- args[7]
file <- inputfile
dat <- read.table(file, header=TRUE, sep='\t', quote='', row.names=1, as.is=TRUE, check.names=FALSE)
phenodata <- read.table(phenofile, header=TRUE, sep='\t', check.names=FALSE)
#survival<-colnames(phenodata)[colsurvival] #SH
#status<-colnames(phenodata)[colstatus] #SH
first.data.col <- min(grep('^chip\\.', names(dat)), grep('^flag\\.', names(dat)))
data.info <- dat[,1:(first.data.col-1)]
calls <- as.matrix(dat[,grep('^flag\\.', colnames(dat))])
# first try parallel computing
prob <- TRUE
try({
library(CGHtestpar)
pvs <- pvalstest_logrank(calls, data.info, dataclinvar=phenodata, whtime=which(colnames(phenodata) == survival), whstatus=which(colnames(phenodata) == status), lgonly=as.integer(test.aberrations), niter=number.of.permutations, ncpus=4)
fdrs <- fdrperm(pvs)
prob <- FALSE
}, silent=TRUE)
# if problems, fall back to sequential computing
if (prob) {
library(CGHtest)
pvs <- pvalstest_logrank(calls, data.info, dataclinvar=phenodata, whtime=which(colnames(phenodata) == survival), whstatus=which(colnames(phenodata) == status), lgonly=as.integer(test.aberrations), niter=number.of.permutations)
fdrs <- fdrperm(pvs)
}
fdrs <- cbind(fdrs, dat[,first.data.col:ncol(dat)])
options(scipen=10)
write.table(fdrs, file='survival-test.tsv', quote=FALSE, sep='\t')
# EOF
|
# test_ISOLocalName.R
# Author: Emmanuel Blondel <emmanuel.blondel1@gmail.com>
#
# Description: Unit tests for ISOLocalName.R
#=======================
require(geometa, quietly = TRUE)
require(testthat)
context("ISOLocalName")
test_that("encoding",{
testthat::skip_on_cran()
#encoding
md <- ISOLocalName$new(value = "myvalue")
expect_is(md, "ISOLocalName")
expect_equal(md$value, "myvalue")
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
#decoding
md2 <- ISOLocalName$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
})
|
/tests/testthat/test_ISOLocalName.R
|
no_license
|
cran/geometa
|
R
| false
| false
| 617
|
r
|
# test_ISOLocalName.R
# Author: Emmanuel Blondel <emmanuel.blondel1@gmail.com>
#
# Description: Unit tests for ISOLocalName.R
#=======================
require(geometa, quietly = TRUE)
require(testthat)
context("ISOLocalName")
test_that("encoding",{
testthat::skip_on_cran()
#encoding
md <- ISOLocalName$new(value = "myvalue")
expect_is(md, "ISOLocalName")
expect_equal(md$value, "myvalue")
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
#decoding
md2 <- ISOLocalName$new(xml = xml)
xml2 <- md2$encode()
expect_true(ISOAbstractObject$compare(md, md2))
})
|
#' @title Closure
#'
#' @description
#' \code{clo} divides each row of \code{X} by its row sum
#'
#' @details If \code{check} is \code{TRUE} then this function will stop if
#' there are any negative or \code{NA} values in \code{X}
#' @param X A matrix or dataframe of positive numeric values
#' @param check A logical scalar
#' @return A version of \code{X} where each row has been scaled so they sum to 1.
#' @examples
#' X <- matrix(1:12, nrow=3)
#' x <- clo(X)
#' rowSums(x)
#' @export
propr.clo <- function(X, check=FALSE){
if(check){
if(any(X < 0)) stop("negative values found")
if(any(is.na(X))) stop("NA values found")
}
return(sweep(X, 1, rowSums(X), "/"))
}
#######################################################################################
#' @title Centred logratio transformation
#'
#' @description
#' \code{clr} takes the log of each row of X and centres it (i.e., subtracts the mean).
#'
#' @details If \code{check} is \code{TRUE} then this function will stop if
#' there are any negative or \code{NA} values in \code{X}
#' @param X A matrix or dataframe of positive numeric values
#' @param check A logical scalar
#' @return The logarithm of \code{X} where each row has been shifted to have mean 0.
#' \deqn{\mathrm{clr}(x) = \log x_i - \sum_{i=1}^D \log x_i}
#' @examples
#' X <- matrix(1:12, nrow=3)
#' x <- clr(X)
#' rowSums(x) # Pretty close to zero
#' apply(exp(x), 1, prod) # The row products of exp(x) will be 1
#' @export
propr.clr <- function(X, check=FALSE){
if(check){
if(any(X < 0)) stop("negative values found")
if(any(is.na(X))) stop("NA values found")
}
logX <- log(X)
return(sweep(logX, 1, rowMeans(logX), "-"))
}
#######################################################################################
#' @title Variance of logratios
#'
#' @description
#' \code{vlr} returns a matrix where element (i,j) is
#' the variance (over rows) of the log of the ratios of column i and j.
#'
#' @details If \code{check} is \code{TRUE} then this function will stop if
#' there are any negative or \code{NA} values in \code{X}.
#' @param X A matrix or dataframe of positive numeric values
#' @param check A logical scalar
#' @return The symmetric matrix
#' \eqn{\mathrm{Var}{\log(X_i/X_j)}}{Var(log(X_i/X_j))} where \eqn{X_i} and \eqn{X_j}
#' denote \emph{columns} \eqn{i} and \eqn{j} of \eqn{X}.
#' @examples
#' N <- 10 # Number of observations
#' # Make a data frame with columns a and b roughly proportional
#' # and columns c and d roughly proportional
#' X <- data.frame(a=(1:N), b=(1:N) * rnorm(N, 10, 0.1),
#' c=(N:1), d=(N:1) * rnorm(N, 10, 1.0))
#' round(vlr(X),2)
#' @export
propr.vlr <- function(X, check=FALSE){
if(check){
if(any(X < 0)) stop("negative values found")
if(any(is.na(X))) stop("NA values found")
}
logX <- log(X)
Cov <- stats::var(logX) ## Note the avoidance of compositions::var
D <- ncol(logX)
VarCol <- matrix(rep(diag(Cov), D), ncol = D)
return(-2 * Cov + VarCol + t(VarCol))
}
#######################################################################################
#' @title Symmetric phi statistic
#'
#' @description
#' \code{phisym} returns a matrix where element (i,j) is
#' the symmetric phi statistic between columns i and j of \code{X}.
#' @details \code{X} should be the result of a centred logratio transformation
#' @param X A matrix or dataframe
#' @return TBA.
#' @examples
#' N <- 10 # Number of observations
#' # Make a data frame with columns a and b roughly proportional
#' # and columns c and d roughly proportional
#' X <- data.frame(a=(1:N), b=(1:N) * rnorm(N, 10, 0.1),
#' c=(N:1), d=(N:1) * rnorm(N, 10, 1.0))
#' round(phisym(clr(X)),2)
#' @export
propr.phisym <- function (X)
{
Cov <- stats::var(X)
tmp <- 2 * Cov / outer(diag(Cov), diag(Cov), "+")
return((1-tmp)/(1+tmp))
}
#######################################################################################
#' @title Expected value of phi from Dirichlet log-ratio distributions
#'
#' @description
#' default returns dataframe of the lower-triangle of symmetrical phi metric,
#' alternatively returns matrix of the summetrical phi metric
#' in either case, the value of phi is the expected value of a number of Dirichlet
#' Monte-Carlo replicates of the data. This reduces the problem of
#' 0-count and low-count features being highly variable because their
#' values range wildly and so the expected value is always large
#' @details requires aldex.clr function from ALDEx2
#' param aldex.clr is an S3 object from the aldex.clr function
#' we ignore all the other measures that are used for trouble-shooting phi
#' the sma.df function in particular is very time and memory intensive
#' @examples
#' # use a count table where the samples are by column, features by row
#' x <- aldex.clr(count.table, return="df")
#' # if return = df, returns a dataframe of the expected value of the lower
#' triangle of the propr.phisym function.
#' # if return = mat, returns the symmetric matrix
#' The number of Dirichlet Monte-Carlo replicates is
#' obtained from the aldex.clr object
propr.aldex.phi <- function(aldex.clr, return="df"){
# calculate expected value of phi
# a single high phi value will push the component out of consideration
# a median is right out for memory considerations
# get first value
sym.phi <- propr.phisym(t(sapply(getMonteCarloInstances(aldex.clr),
function(y){y[,1]})))
# sum the rest of the values as we proceed through the DIR MC instances
for(i in 2:numMCInstances(aldex.clr)){
#print(i)
sym.phi <- sym.phi + propr.phisym(t(sapply(getMonteCarloInstances(aldex.clr),
function(y){y[,i]})))
}
##### Done ALDEx2 stuff
# make indices of the correct size
lt <- which(col(sym.phi)<row(sym.phi), arr.ind=FALSE)
lt.ind <- which(col(sym.phi)<row(sym.phi), arr.ind=TRUE)
# dataframe to hold the info,
# data is a set of vectors where only the lower triangle is kept, because the matrix
# is symmetrical
# this is needed so subsequent subset function works properly
sma.df <- data.frame(row=factor(rownames(sym.phi)[lt.ind[,"row"]]),
col=factor(colnames(sym.phi)[lt.ind[,"col"]]))
#save the lower triangle as an expected value
sma.df$phi <- sym.phi[lt] / numMCInstances(aldex.clr)
if(return=="df") return(sma.df)
if(return=="mat") return(sym.phi / numMCInstances(aldex.clr))
}
#######################################################################################
#######################################################################################
#' @title Standardised Major Axis fits of pairs of columns
#'
#' @description
#' \code{sma} returns a list whose elements are matrices whose elements (i,j)
#' relate to the Standardised Major Axis fits of columns i and j of \code{X}
#' @details \strong{Note:} \code{X} should be the result of a centred logratio transformation
#' @param X A matrix or dataframe
#' @return A list of three elements \code{b}, \code{p} and \code{r2}.
#' @examples
#' N <- 100 # Number of observations
#' # Make a data frame with columns a and b roughly proportional
#' # and columns c and d unrelated
#' a <- seq(from=5, to=15, len=N)
#' b <- a * rnorm(N, 1, 0.1)
#' c <- rnorm(N,10)
#' d <- rnorm(N,10)
#' X <- data.frame(a, b, c, d)
#' pairs(X)
#' pairs(clr(X)) # Note the spurious correlation between variables c and d
#' sma(clr(X))
#' @export
propr.sma <- function(X){
X.cor <- stats::cor(X, use="pairwise.complete.obs")
X.var <- stats::cov(X, use="pairwise.complete.obs")
X.sd <- sqrt(diag(X.var))
# Following the approach of Warton et al. Biol. Rev. (2006), 81, pp. 259-291
# r.rf2 = cor(X+Y, X-Y)^2
# = (var(X) - var(Y))^2 / ((var(X) + var(Y))^2 - 4cov(X,Y)^2)
r.rf2 <-
(outer(diag(X.var), diag(X.var), "-")^2 ) /
(outer(diag(X.var), diag(X.var), "+")^2 - 4 * X.var^2 )
# At this point the diagonal of r.rf2 will be 0/0 = NaN. The correlation should be 0
diag(r.rf2) <- 0
res.dof <- nrow(X) - 2
F <- r.rf2/(1 - r.rf2) * res.dof
list(b=sign(X.cor) * outer(X.sd, X.sd, "/"), # slope = sign(s_xy) s_y/s_x
p=1 - pf(F, 1, res.dof), # p-value of the test that b = 1
r2=X.cor^2) # the squared correlation coefficient
}
#######################################################################################
#' @title Pairwise proportionality, slope and other statistics of columns
#'
#' @description
#' \code{phiDF} returns a dataframe of various statistics for each pair of columns in
#' \code{X}.
#' @details \strong{Note:} \code{X} should be the result of a centred logratio transformation
#' @param X A matrix or dataframe
#' @return A list of three elements \code{b}, \code{p} and \code{r2}.
#' @examples
#' N <- 100 # Number of observations
#' # Make a data frame with columns a and b roughly proportional
#' # and columns c and d unrelated
#' a <- seq(from=5, to=15, len=N)
#' b <- a * rnorm(N, 1, 0.1)
#' c <- rnorm(N,10)
#' d <- rnorm(N,10)
#' X <- data.frame(a, b, c, d)
#' pairs(X)
#' pairs(clr(X)) # Note the spurious correlation between variables c and d
#' phiDF(X)
#' # Note that phi and phisym are related to the slope and r^2 values:
#' with(phiDF(X),
#' all.equal(
#' phisym,
#' (1 + b^2 - 2*b*sqrt(r2))/(1 + b^2 + 2*b*sqrt(r2))
#' )
#' )
#' with(phiDF(X),
#' all.equal(
#' phi,
#' (1 + b^2 - 2*b*sqrt(r2))
#' )
#' )
#' @export
propr.phiDF <- function(X){
X.clr <- clr(X)
X.sma <- sma(X.clr)
X.vlr <- vlr(X)
X.clr.var <- apply(X.clr, 2, var) # The variance of each column
X.phi <- sweep(X.vlr, 2, X.clr.var, FUN="/")
X.phisym <- phisym(X.clr)
lt <- which(col(X.sma$b)<row(X.sma$b),arr.ind=FALSE)
lt.ind <- which(col(X.sma$b)<row(X.sma$b),arr.ind=TRUE)
result <- data.frame(
row=factor(rownames(X.sma$b)[lt.ind[,"row"]]),
col=factor(colnames(X.sma$b)[lt.ind[,"col"]])
)
result$b <- X.sma$b[lt]
result$p <- X.sma$p[lt]
result$r2 <- X.sma$r2[lt]
result$vlr <- X.vlr[lt]
result$phi <- X.phi[lt]
result$phisym <- X.phisym[lt]
return(result)
}
|
/chunk/R/propr-functions.R
|
no_license
|
maunadasari/CoDa_microbiome_tutorial
|
R
| false
| false
| 10,203
|
r
|
#' @title Closure
#'
#' @description
#' \code{clo} divides each row of \code{X} by its row sum
#'
#' @details If \code{check} is \code{TRUE} then this function will stop if
#' there are any negative or \code{NA} values in \code{X}
#' @param X A matrix or dataframe of positive numeric values
#' @param check A logical scalar
#' @return A version of \code{X} where each row has been scaled so they sum to 1.
#' @examples
#' X <- matrix(1:12, nrow=3)
#' x <- clo(X)
#' rowSums(x)
#' @export
propr.clo <- function(X, check=FALSE){
if(check){
if(any(X < 0)) stop("negative values found")
if(any(is.na(X))) stop("NA values found")
}
return(sweep(X, 1, rowSums(X), "/"))
}
#######################################################################################
#' @title Centred logratio transformation
#'
#' @description
#' \code{clr} takes the log of each row of X and centres it (i.e., subtracts the mean).
#'
#' @details If \code{check} is \code{TRUE} then this function will stop if
#' there are any negative or \code{NA} values in \code{X}
#' @param X A matrix or dataframe of positive numeric values
#' @param check A logical scalar
#' @return The logarithm of \code{X} where each row has been shifted to have mean 0.
#' \deqn{\mathrm{clr}(x) = \log x_i - \sum_{i=1}^D \log x_i}
#' @examples
#' X <- matrix(1:12, nrow=3)
#' x <- clr(X)
#' rowSums(x) # Pretty close to zero
#' apply(exp(x), 1, prod) # The row products of exp(x) will be 1
#' @export
propr.clr <- function(X, check=FALSE){
if(check){
if(any(X < 0)) stop("negative values found")
if(any(is.na(X))) stop("NA values found")
}
logX <- log(X)
return(sweep(logX, 1, rowMeans(logX), "-"))
}
#######################################################################################
#' @title Variance of logratios
#'
#' @description
#' \code{vlr} returns a matrix where element (i,j) is
#' the variance (over rows) of the log of the ratios of column i and j.
#'
#' @details If \code{check} is \code{TRUE} then this function will stop if
#' there are any negative or \code{NA} values in \code{X}.
#' @param X A matrix or dataframe of positive numeric values
#' @param check A logical scalar
#' @return The symmetric matrix
#' \eqn{\mathrm{Var}{\log(X_i/X_j)}}{Var(log(X_i/X_j))} where \eqn{X_i} and \eqn{X_j}
#' denote \emph{columns} \eqn{i} and \eqn{j} of \eqn{X}.
#' @examples
#' N <- 10 # Number of observations
#' # Make a data frame with columns a and b roughly proportional
#' # and columns c and d roughly proportional
#' X <- data.frame(a=(1:N), b=(1:N) * rnorm(N, 10, 0.1),
#' c=(N:1), d=(N:1) * rnorm(N, 10, 1.0))
#' round(vlr(X),2)
#' @export
propr.vlr <- function(X, check=FALSE){
if(check){
if(any(X < 0)) stop("negative values found")
if(any(is.na(X))) stop("NA values found")
}
logX <- log(X)
Cov <- stats::var(logX) ## Note the avoidance of compositions::var
D <- ncol(logX)
VarCol <- matrix(rep(diag(Cov), D), ncol = D)
return(-2 * Cov + VarCol + t(VarCol))
}
#######################################################################################
#' @title Symmetric phi statistic
#'
#' @description
#' \code{phisym} returns a matrix where element (i,j) is
#' the symmetric phi statistic between columns i and j of \code{X}.
#' @details \code{X} should be the result of a centred logratio transformation
#' @param X A matrix or dataframe
#' @return TBA.
#' @examples
#' N <- 10 # Number of observations
#' # Make a data frame with columns a and b roughly proportional
#' # and columns c and d roughly proportional
#' X <- data.frame(a=(1:N), b=(1:N) * rnorm(N, 10, 0.1),
#' c=(N:1), d=(N:1) * rnorm(N, 10, 1.0))
#' round(phisym(clr(X)),2)
#' @export
propr.phisym <- function (X)
{
Cov <- stats::var(X)
tmp <- 2 * Cov / outer(diag(Cov), diag(Cov), "+")
return((1-tmp)/(1+tmp))
}
#######################################################################################
#' @title Expected value of phi from Dirichlet log-ratio distributions
#'
#' @description
#' default returns dataframe of the lower-triangle of symmetrical phi metric,
#' alternatively returns matrix of the summetrical phi metric
#' in either case, the value of phi is the expected value of a number of Dirichlet
#' Monte-Carlo replicates of the data. This reduces the problem of
#' 0-count and low-count features being highly variable because their
#' values range wildly and so the expected value is always large
#' @details requires aldex.clr function from ALDEx2
#' param aldex.clr is an S3 object from the aldex.clr function
#' we ignore all the other measures that are used for trouble-shooting phi
#' the sma.df function in particular is very time and memory intensive
#' @examples
#' # use a count table where the samples are by column, features by row
#' x <- aldex.clr(count.table, return="df")
#' # if return = df, returns a dataframe of the expected value of the lower
#' triangle of the propr.phisym function.
#' # if return = mat, returns the symmetric matrix
#' The number of Dirichlet Monte-Carlo replicates is
#' obtained from the aldex.clr object
propr.aldex.phi <- function(aldex.clr, return="df"){
# calculate expected value of phi
# a single high phi value will push the component out of consideration
# a median is right out for memory considerations
# get first value
sym.phi <- propr.phisym(t(sapply(getMonteCarloInstances(aldex.clr),
function(y){y[,1]})))
# sum the rest of the values as we proceed through the DIR MC instances
for(i in 2:numMCInstances(aldex.clr)){
#print(i)
sym.phi <- sym.phi + propr.phisym(t(sapply(getMonteCarloInstances(aldex.clr),
function(y){y[,i]})))
}
##### Done ALDEx2 stuff
# make indices of the correct size
lt <- which(col(sym.phi)<row(sym.phi), arr.ind=FALSE)
lt.ind <- which(col(sym.phi)<row(sym.phi), arr.ind=TRUE)
# dataframe to hold the info,
# data is a set of vectors where only the lower triangle is kept, because the matrix
# is symmetrical
# this is needed so subsequent subset function works properly
sma.df <- data.frame(row=factor(rownames(sym.phi)[lt.ind[,"row"]]),
col=factor(colnames(sym.phi)[lt.ind[,"col"]]))
#save the lower triangle as an expected value
sma.df$phi <- sym.phi[lt] / numMCInstances(aldex.clr)
if(return=="df") return(sma.df)
if(return=="mat") return(sym.phi / numMCInstances(aldex.clr))
}
#######################################################################################
#######################################################################################
#' @title Standardised Major Axis fits of pairs of columns
#'
#' @description
#' \code{sma} returns a list whose elements are matrices whose elements (i,j)
#' relate to the Standardised Major Axis fits of columns i and j of \code{X}
#' @details \strong{Note:} \code{X} should be the result of a centred logratio transformation
#' @param X A matrix or dataframe
#' @return A list of three elements \code{b}, \code{p} and \code{r2}.
#' @examples
#' N <- 100 # Number of observations
#' # Make a data frame with columns a and b roughly proportional
#' # and columns c and d unrelated
#' a <- seq(from=5, to=15, len=N)
#' b <- a * rnorm(N, 1, 0.1)
#' c <- rnorm(N,10)
#' d <- rnorm(N,10)
#' X <- data.frame(a, b, c, d)
#' pairs(X)
#' pairs(clr(X)) # Note the spurious correlation between variables c and d
#' sma(clr(X))
#' @export
propr.sma <- function(X){
X.cor <- stats::cor(X, use="pairwise.complete.obs")
X.var <- stats::cov(X, use="pairwise.complete.obs")
X.sd <- sqrt(diag(X.var))
# Following the approach of Warton et al. Biol. Rev. (2006), 81, pp. 259-291
# r.rf2 = cor(X+Y, X-Y)^2
# = (var(X) - var(Y))^2 / ((var(X) + var(Y))^2 - 4cov(X,Y)^2)
r.rf2 <-
(outer(diag(X.var), diag(X.var), "-")^2 ) /
(outer(diag(X.var), diag(X.var), "+")^2 - 4 * X.var^2 )
# At this point the diagonal of r.rf2 will be 0/0 = NaN. The correlation should be 0
diag(r.rf2) <- 0
res.dof <- nrow(X) - 2
F <- r.rf2/(1 - r.rf2) * res.dof
list(b=sign(X.cor) * outer(X.sd, X.sd, "/"), # slope = sign(s_xy) s_y/s_x
p=1 - pf(F, 1, res.dof), # p-value of the test that b = 1
r2=X.cor^2) # the squared correlation coefficient
}
#######################################################################################
#' @title Pairwise proportionality, slope and other statistics of columns
#'
#' @description
#' \code{phiDF} returns a dataframe of various statistics for each pair of columns in
#' \code{X}.
#' @details \strong{Note:} \code{X} should be the result of a centred logratio transformation
#' @param X A matrix or dataframe
#' @return A list of three elements \code{b}, \code{p} and \code{r2}.
#' @examples
#' N <- 100 # Number of observations
#' # Make a data frame with columns a and b roughly proportional
#' # and columns c and d unrelated
#' a <- seq(from=5, to=15, len=N)
#' b <- a * rnorm(N, 1, 0.1)
#' c <- rnorm(N,10)
#' d <- rnorm(N,10)
#' X <- data.frame(a, b, c, d)
#' pairs(X)
#' pairs(clr(X)) # Note the spurious correlation between variables c and d
#' phiDF(X)
#' # Note that phi and phisym are related to the slope and r^2 values:
#' with(phiDF(X),
#' all.equal(
#' phisym,
#' (1 + b^2 - 2*b*sqrt(r2))/(1 + b^2 + 2*b*sqrt(r2))
#' )
#' )
#' with(phiDF(X),
#' all.equal(
#' phi,
#' (1 + b^2 - 2*b*sqrt(r2))
#' )
#' )
#' @export
propr.phiDF <- function(X){
X.clr <- clr(X)
X.sma <- sma(X.clr)
X.vlr <- vlr(X)
X.clr.var <- apply(X.clr, 2, var) # The variance of each column
X.phi <- sweep(X.vlr, 2, X.clr.var, FUN="/")
X.phisym <- phisym(X.clr)
lt <- which(col(X.sma$b)<row(X.sma$b),arr.ind=FALSE)
lt.ind <- which(col(X.sma$b)<row(X.sma$b),arr.ind=TRUE)
result <- data.frame(
row=factor(rownames(X.sma$b)[lt.ind[,"row"]]),
col=factor(colnames(X.sma$b)[lt.ind[,"col"]])
)
result$b <- X.sma$b[lt]
result$p <- X.sma$p[lt]
result$r2 <- X.sma$r2[lt]
result$vlr <- X.vlr[lt]
result$phi <- X.phi[lt]
result$phisym <- X.phisym[lt]
return(result)
}
|
#Lab 9 Questions
library(here)
library(palmerpenguins)
penguins<- penguins
##---------Question 1----------------
bartlett.test(body_mass_g ~ species, data = penguins)
##---------Question 2-----------------
bartlett.test(body_mass_g ~ sex, data = penguins)
##----------Question 3----------------
par(mar = c(8, 4, 2, 2))
boxplot(
body_mass_g ~ sex * species,
data = penguins,
las = 2,
xlab = NULL,
ylab = "body mass(g)")
dev.off()
species_sex = aggregate(
body_mass_g ~ sex * species,
data = penguins,
FUN = c)
str(species_sex)
species_sex$body_mass_g
#run the bartlett test to test for homogeneity of variance
bartlett.test(species_sex$body_mass_g)
##-------------Question 4--------------
|
/individual_assignment/Lab 9 Questions.R
|
no_license
|
hparry/ECo_602_2020
|
R
| false
| false
| 712
|
r
|
#Lab 9 Questions
library(here)
library(palmerpenguins)
penguins<- penguins
##---------Question 1----------------
bartlett.test(body_mass_g ~ species, data = penguins)
##---------Question 2-----------------
bartlett.test(body_mass_g ~ sex, data = penguins)
##----------Question 3----------------
par(mar = c(8, 4, 2, 2))
boxplot(
body_mass_g ~ sex * species,
data = penguins,
las = 2,
xlab = NULL,
ylab = "body mass(g)")
dev.off()
species_sex = aggregate(
body_mass_g ~ sex * species,
data = penguins,
FUN = c)
str(species_sex)
species_sex$body_mass_g
#run the bartlett test to test for homogeneity of variance
bartlett.test(species_sex$body_mass_g)
##-------------Question 4--------------
|
library(lmomco)
### Name: lmomsRCmark
### Title: Sample L-moments Moments for Right-Tail Censoring by a Marking
### Variable
### Aliases: lmomsRCmark
### Keywords: L-moment (sample) data censoring
### ** Examples
# Efron, B., 1988, Logistic regression, survival analysis, and the
# Kaplan-Meier curve: Journal of the American Statistical Association,
# v. 83, no. 402, pp.414--425
# Survival time measured in days for 51 patients with a marking
# variable in the "time,mark" ensemble. If marking variable is 1,
# then the time is right-censored by an unknown censoring threshold.
Efron <-
c(7,0, 34,0, 42,0, 63,0, 64,0, 74,1, 83,0, 84,0, 91,0,
108,0, 112,0, 129,0, 133,0, 133,0, 139,0, 140,0, 140,0,
146,0, 149,0, 154,0, 157,0, 160,0, 160,0, 165,0, 173,0,
176,0, 185,1, 218,0, 225,0, 241,0, 248,0, 273,0, 277,0,
279,1, 297,0, 319,1, 405,0, 417,0, 420,0, 440,0, 523,1,
523,0, 583,0, 594,0, 1101,0, 1116,1, 1146,0, 1226,1,
1349,1, 1412,1, 1417,1);
# Break up the ensembles into to vectors
ix <- seq(1,length(Efron),by=2)
T <- Efron[ix]
Efron.data <- T;
Efron.rcmark <- Efron[(ix+1)]
lmr.RC <- lmomsRCmark(Efron.data, rcmark=Efron.rcmark)
lmr.ub <- lmoms(Efron.data)
lmr.noRC <- lmomsRCmark(Efron.data)
PP <- pp(Efron.data)
plot(PP, Efron.data, col=(Efron.rcmark+1), ylab="DATA")
lines(PP, qlmomco(PP, lmom2par(lmr.noRC, type="kap")), lwd=3, col=8)
lines(PP, qlmomco(PP, lmom2par(lmr.ub, type="kap")))
lines(PP, qlmomco(PP, lmom2par(lmr.RC, type="kap")), lwd=2, col=2)
legend(0,1000,c("uncensored L-moments by indicator (Kappa distribution)",
"unbiased L-moments (Kappa)",
"right-censored L-moments by indicator (Kappa distribution)"),
lwd=c(3,1,2), col=c(8,1,2))
########
ZF <- 5 # discharge of undetection of streamflow
Q <- c(rep(ZF,8), 116, 34, 56, 78, 909, 12, 56, 45, 560, 300, 2500)
Qc <- Q == ZF; Qc <- as.numeric(Qc)
lmr <- lmoms(Q)
lmr.cen <- lmomsRCmark(Q, rcmark=Qc, flip=TRUE)
flip <- lmr.cen$flip
fit <- pargev(lmr); fit.cen <- pargev(lmr.cen)
F <- seq(0.001, 0.999, by=0.001)
Qfit <- qlmomco( F, fit )
Qfit.cen <- flip - qlmomco(1 - F, fit.cen) # remember to reverse qdf
plot(pp(Q),sort(Q), log="y", xlab="NONEXCEED PROB.", ylab="QUANTILE")
lines(F, Qfit); lines(F, Qfit.cen,col=2)
|
/data/genthat_extracted_code/lmomco/examples/lmomsRCmark.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,337
|
r
|
library(lmomco)
### Name: lmomsRCmark
### Title: Sample L-moments Moments for Right-Tail Censoring by a Marking
### Variable
### Aliases: lmomsRCmark
### Keywords: L-moment (sample) data censoring
### ** Examples
# Efron, B., 1988, Logistic regression, survival analysis, and the
# Kaplan-Meier curve: Journal of the American Statistical Association,
# v. 83, no. 402, pp.414--425
# Survival time measured in days for 51 patients with a marking
# variable in the "time,mark" ensemble. If marking variable is 1,
# then the time is right-censored by an unknown censoring threshold.
Efron <-
c(7,0, 34,0, 42,0, 63,0, 64,0, 74,1, 83,0, 84,0, 91,0,
108,0, 112,0, 129,0, 133,0, 133,0, 139,0, 140,0, 140,0,
146,0, 149,0, 154,0, 157,0, 160,0, 160,0, 165,0, 173,0,
176,0, 185,1, 218,0, 225,0, 241,0, 248,0, 273,0, 277,0,
279,1, 297,0, 319,1, 405,0, 417,0, 420,0, 440,0, 523,1,
523,0, 583,0, 594,0, 1101,0, 1116,1, 1146,0, 1226,1,
1349,1, 1412,1, 1417,1);
# Break up the ensembles into to vectors
ix <- seq(1,length(Efron),by=2)
T <- Efron[ix]
Efron.data <- T;
Efron.rcmark <- Efron[(ix+1)]
lmr.RC <- lmomsRCmark(Efron.data, rcmark=Efron.rcmark)
lmr.ub <- lmoms(Efron.data)
lmr.noRC <- lmomsRCmark(Efron.data)
PP <- pp(Efron.data)
plot(PP, Efron.data, col=(Efron.rcmark+1), ylab="DATA")
lines(PP, qlmomco(PP, lmom2par(lmr.noRC, type="kap")), lwd=3, col=8)
lines(PP, qlmomco(PP, lmom2par(lmr.ub, type="kap")))
lines(PP, qlmomco(PP, lmom2par(lmr.RC, type="kap")), lwd=2, col=2)
legend(0,1000,c("uncensored L-moments by indicator (Kappa distribution)",
"unbiased L-moments (Kappa)",
"right-censored L-moments by indicator (Kappa distribution)"),
lwd=c(3,1,2), col=c(8,1,2))
########
ZF <- 5 # discharge of undetection of streamflow
Q <- c(rep(ZF,8), 116, 34, 56, 78, 909, 12, 56, 45, 560, 300, 2500)
Qc <- Q == ZF; Qc <- as.numeric(Qc)
lmr <- lmoms(Q)
lmr.cen <- lmomsRCmark(Q, rcmark=Qc, flip=TRUE)
flip <- lmr.cen$flip
fit <- pargev(lmr); fit.cen <- pargev(lmr.cen)
F <- seq(0.001, 0.999, by=0.001)
Qfit <- qlmomco( F, fit )
Qfit.cen <- flip - qlmomco(1 - F, fit.cen) # remember to reverse qdf
plot(pp(Q),sort(Q), log="y", xlab="NONEXCEED PROB.", ylab="QUANTILE")
lines(F, Qfit); lines(F, Qfit.cen,col=2)
|
#setwd("~/Dropbox/ChihPingWang/Model1/abc_V2/simulation64V2/oubmbm/")
setwd("~/Dropbox/FCU/Teaching/Mentoring/2019Spring/ChihPingWang/Model1/abc_V2/simulation_32_bciV2/ououcir/")
rm(list=ls())
library(TreeSim)
library(EasyABC)
library(coda)
library(Sim.DiffProc)
library(MCMCpack)
library(ape)
library(abc)
library(nlme)
library(adephylo)
library(maps)
library(phytools)
library(phytools)
library(phylobase)
source("~/Dropbox/FCU/Teaching/Mentoring/2019Spring/ChihPingWang/Model1/abc_V2/simulation_32_bciV2/bcifcn.r")
load("ououcirsimV2size32.RData")
sum.stat<-function(trait=trait,tree=tree){
names(trait)<-tree$tip.label
pic.trait<-pic(x=trait,phy=tree)
return(c(mean(trait),sd(trait),median(trait),skewness(trait),kurtosis(trait),mean(pic.trait),sd(pic.trait),median(pic.trait),skewness(pic.trait),kurtosis(pic.trait),phylosig(tree,x=trait,method = "K",test=T)$K,phylosig(tree,x=trait,method = "lambda",test=T)$lambda))
}
postTheta <- data.frame(rej$unadj.values)
rep=50
sims=1000
root=root
tree=tree
tol=0.1
model="ououcir"
true.reg.params=prior.params$reg.params
names(true.reg.params)<-c("b0","b1","b2")
paramname<-"alpha.x"
post.params.array=sample(postTheta[,paramname],rep,replace=FALSE)
true.model.params=prior.params$model.params
rownames(model.params.array)<-c("alpha.y", "alpha.x", "theta.x", "sigmasq.x","alpha.tau","theta.tau","sigmasq.tau")
sample_params<-bcifcn(model=model,paramname=paramname,rep=rep,post.params.array=post.params.array,true.model.params=true.model.params,true.reg.params=true.reg.params,root=root,tree=tree,sims=sims,tol=tol)
save.image(paste(model,n,"_",paramname,".rda",sep=""))
|
/simulation_32_bciV2/ououcir/ououcirabcSimV2size32bci.alpha.x.r
|
no_license
|
djhwueng/ououcir
|
R
| false
| false
| 1,632
|
r
|
#setwd("~/Dropbox/ChihPingWang/Model1/abc_V2/simulation64V2/oubmbm/")
setwd("~/Dropbox/FCU/Teaching/Mentoring/2019Spring/ChihPingWang/Model1/abc_V2/simulation_32_bciV2/ououcir/")
rm(list=ls())
library(TreeSim)
library(EasyABC)
library(coda)
library(Sim.DiffProc)
library(MCMCpack)
library(ape)
library(abc)
library(nlme)
library(adephylo)
library(maps)
library(phytools)
library(phytools)
library(phylobase)
source("~/Dropbox/FCU/Teaching/Mentoring/2019Spring/ChihPingWang/Model1/abc_V2/simulation_32_bciV2/bcifcn.r")
load("ououcirsimV2size32.RData")
sum.stat<-function(trait=trait,tree=tree){
names(trait)<-tree$tip.label
pic.trait<-pic(x=trait,phy=tree)
return(c(mean(trait),sd(trait),median(trait),skewness(trait),kurtosis(trait),mean(pic.trait),sd(pic.trait),median(pic.trait),skewness(pic.trait),kurtosis(pic.trait),phylosig(tree,x=trait,method = "K",test=T)$K,phylosig(tree,x=trait,method = "lambda",test=T)$lambda))
}
postTheta <- data.frame(rej$unadj.values)
rep=50
sims=1000
root=root
tree=tree
tol=0.1
model="ououcir"
true.reg.params=prior.params$reg.params
names(true.reg.params)<-c("b0","b1","b2")
paramname<-"alpha.x"
post.params.array=sample(postTheta[,paramname],rep,replace=FALSE)
true.model.params=prior.params$model.params
rownames(model.params.array)<-c("alpha.y", "alpha.x", "theta.x", "sigmasq.x","alpha.tau","theta.tau","sigmasq.tau")
sample_params<-bcifcn(model=model,paramname=paramname,rep=rep,post.params.array=post.params.array,true.model.params=true.model.params,true.reg.params=true.reg.params,root=root,tree=tree,sims=sims,tol=tol)
save.image(paste(model,n,"_",paramname,".rda",sep=""))
|
#
# PART 1: Instace level
library(DALEX)
library(modelStudio)
library(randomForest)
library(rms)
library(gbm)
johny_d <- archivist::aread("pbiecek/models/e3596")
henry <- archivist::aread("pbiecek/models/a6538")
new_obs <- rbind(johny_d = johny_d, henry = henry)
# restore models for titanic
titanic_lmr_v6 <- archivist::aread("pbiecek/models/51c50")
titanic_lmr_v6$model_info$type <- "classification"
ms <- modelStudio(titanic_lmr_v6, new_obs)
r2d3::save_d3_html(ms, file = "titanic_lmr_v6.html", selfcontained = TRUE,
title = "modelStudio for titanic_lmr_v6 and titanic")
titanic_rf_v3 <- archivist::aread("pbiecek/models/0e5d2")
titanic_rf_v3$model_info$type <- "classification"
ms <- modelStudio(titanic_rf_v3, new_obs)
r2d3::save_d3_html(ms, file = "titanic_rf_v3.html", selfcontained = TRUE,
title = "modelStudio for titanic_rf_v3 and titanic")
titanic_gbm_v6 <- archivist::aread("pbiecek/models/3d514")
titanic_gbm_v6$model_info$type <- "classification"
ms <- modelStudio(titanic_gbm_v6, new_obs)
r2d3::save_d3_html(ms, file = "titanic_gbm_v6.html", selfcontained = TRUE,
title = "modelStudio for titanic_gbm_v6 and titanic")
titanic_rf_v6 <- archivist::aread("pbiecek/models/9b971")
titanic_rf_v6$model_info$type <- "classification"
ms <- modelStudio(titanic_rf_v6, new_obs)
r2d3::save_d3_html(ms, file = "titanic_rf_v6.html", selfcontained = TRUE,
title = "modelStudio for titanic_rf_v6 and titanic")
|
/modelStudio/generateModelStudio.R
|
no_license
|
pbiecek/ema
|
R
| false
| false
| 1,500
|
r
|
#
# PART 1: Instace level
library(DALEX)
library(modelStudio)
library(randomForest)
library(rms)
library(gbm)
johny_d <- archivist::aread("pbiecek/models/e3596")
henry <- archivist::aread("pbiecek/models/a6538")
new_obs <- rbind(johny_d = johny_d, henry = henry)
# restore models for titanic
titanic_lmr_v6 <- archivist::aread("pbiecek/models/51c50")
titanic_lmr_v6$model_info$type <- "classification"
ms <- modelStudio(titanic_lmr_v6, new_obs)
r2d3::save_d3_html(ms, file = "titanic_lmr_v6.html", selfcontained = TRUE,
title = "modelStudio for titanic_lmr_v6 and titanic")
titanic_rf_v3 <- archivist::aread("pbiecek/models/0e5d2")
titanic_rf_v3$model_info$type <- "classification"
ms <- modelStudio(titanic_rf_v3, new_obs)
r2d3::save_d3_html(ms, file = "titanic_rf_v3.html", selfcontained = TRUE,
title = "modelStudio for titanic_rf_v3 and titanic")
titanic_gbm_v6 <- archivist::aread("pbiecek/models/3d514")
titanic_gbm_v6$model_info$type <- "classification"
ms <- modelStudio(titanic_gbm_v6, new_obs)
r2d3::save_d3_html(ms, file = "titanic_gbm_v6.html", selfcontained = TRUE,
title = "modelStudio for titanic_gbm_v6 and titanic")
titanic_rf_v6 <- archivist::aread("pbiecek/models/9b971")
titanic_rf_v6$model_info$type <- "classification"
ms <- modelStudio(titanic_rf_v6, new_obs)
r2d3::save_d3_html(ms, file = "titanic_rf_v6.html", selfcontained = TRUE,
title = "modelStudio for titanic_rf_v6 and titanic")
|
library(vcfR);library(ape);library(progress);library(data.table)
library(plyr);library(ggplot2);library(ggrepel);library(zoo);library(cowplot)
setwd("~/selasphorus_evolution/")
vcf <- read.vcfR("~/Dropbox/selasphorus/called_snps/selasphorus_het_depth_mq30_biallel_nomd.recode.vcf.gz")
#concatenated NJ tree
dna <- vcfR2DNAbin(vcf,unphased_as_NA = F,consensus = T,extract.haps = F)
dist <- dist.dna(dna,model="K80")
nj <- bionj(dist)
nj <- root(nj,c("Scal1","Scal2","Scal3","Scal4","Scal5","Scal6","Scal7"))
plot(nj,cex=0.5)
#get local nj trees in nonoverlapping windows
trees <- list()
pb <- progress_bar$new(total = length(unique(vcf@fix[,1])))
for(contig in unique(vcf@fix[,1])){
a <- vcf[vcf@fix[,1]==contig]
start <- 1;step <- 2e4
if(max(as.numeric(a@fix[,2]))>step){ #for contigs longer than step
for(i in seq(step,max(as.numeric(a@fix[,2])),step)){
b <- a[as.numeric(a@fix[,2])>start & as.numeric(a@fix[,2])<i]
nsnps <- nrow(b@gt)
c <- vcfR2DNAbin(b,unphased_as_NA = F,consensus = T,extract.haps = F,verbose=F)
dist <- dist.dna(c,model="K80")
if(length(c)>=20 & !is.infinite(max(dist)) & !is.na(max(dist))){ #require >50 SNPs
nj <- nj(dist)
write.tree(nj,paste0("analysis/twisst/20kb_trees.tre"),append=T)
write(x=paste0(contig,"\t",start,"\t",i),file="analysis/twisst/20kb_tree_regions.txt",append=T)
start <- start+step
}
}
} else { #for single-window contigs
start <- 1;i <- step
b <- a[as.numeric(a@fix[,2])>start & as.numeric(a@fix[,2])<i]
nsnps <- nrow(b@gt)
c <- vcfR2DNAbin(b,unphased_as_NA = F,consensus = T,extract.haps = F,verbose=F)
dist <- dist.dna(c,model="K80")
if(length(c)>=20 & !is.infinite(max(dist)) & !is.na(max(dist))){
nj <- nj(dist)
write.tree(nj,paste0("analysis/twisst/20kb_trees.tre"),append=T)
write(x=paste0(contig,"\t",start,"\t",i),file="analysis/twisst/20kb_tree_regions.txt",append=T)
start <- start+step
}
}
pb$tick()
}
#run twisst
system(paste0("cd ~/selasphorus_evolution/analysis/twisst/;\
/anaconda3/bin/python2 twisst.py -t 50kb_trees.tre -w 50kb.weights.csv -g rufus -g sasin -g sedentarius -g calliope --method complete --groupsFile pop_groups.tsv"))
################ plotting #################
tw <- fread("analysis/twisst/50kb.weights.csv")
regions <- fread("analysis/twisst/50kb_tree_regions.txt")
tw <- cbind(tw,regions)
chralign <- fread("data/wgs/CannaMUGM01_to_Tgut2_mummer_summary.txt")
tw <- merge(tw,chralign,by.x="V1",by.y="qName") %>% data.frame()
tw$topo1prop <- tw$topo1/2800
tw$topo2prop <- tw$topo2/2800
tw$topo3prop <- tw$topo3/2800
#chromosome order for pretty plots
chr_order <- c("1","1A","1B","2","3","4","4A",as.character(5:28),"Z","M","NA")
tw$chr <- factor(gsub("chr","",tw$refName),levels=chr_order)
tw <- arrange(tw,chr,refStart,V2)
tw$x <- 1:nrow(tw)
tw$rolltopo1 <- rollmean(tw$topo1prop,40,fill = NA)
tw$rolltopo2 <- rollmean(tw$topo2prop,40,fill = NA)
tw$rolltopo3 <- rollmean(tw$topo3prop,40,fill = NA)
mtw <- tw[,c("V1","V2","refName","refStart","topo1prop","topo2prop","topo3prop","chr","x","rolltopo1","rolltopo2","rolltopo3")]
mtw <- melt(mtw,id.vars=c("V1","V2","refName","refStart","chr","x"))
mtw$topology <- NA
mtw$topology[mtw$variable %in% c("rolltopo1","topo1prop")] <- " rufus sasin sedentarius"
mtw$topology[mtw$variable %in% c("rolltopo2","topo2prop")] <- " rufus sedentarius sasin"
mtw$topology[mtw$variable %in% c("rolltopo3","topo3prop")] <- " rufus sasin sedentarius"
mtw$topology <- factor(mtw$topology,levels=c(" rufus sasin sedentarius"," rufus sedentarius sasin"," rufus sasin sedentarius"))
chr_labels <- ddply(mtw,.(chr),summarize,mid=median(x),start=min(x),stop=max(x))
chr_labels$chr <- as.character(chr_labels$chr)
chr_labels$chr[chr_labels$chr %in% as.character(21:28)] <- "21-28"
chr_labels$mid[chr_labels$chr=="21-28"] <- median(chr_labels$mid[chr_labels$chr=="21-28"],na.rm=T)
chr_labels$start[chr_labels$chr=="21-28"] <- min(chr_labels$start[chr_labels$chr=="21-28"],na.rm=T)
chr_labels$stop[chr_labels$chr=="21-28"] <- max(chr_labels$stop[chr_labels$chr=="21-28"],na.rm=T)
chr_labels$start[chr_labels$chr=="1B"] <- 0
chr_labels$stop[chr_labels$chr=="1B"] <- 0
chr_labels$value <- -0.1 #y position in the plot
chr_labels <- subset(chr_labels,!is.na(chr) & !duplicated(chr))
t1 <- function(){
treestr <- "((rufus,sasin_sasin),sasin_sedentarius);"
tree <- read.tree(text=treestr)
plot.phylo(tree,show.tip.label = F,direction="upwards",edge.color="orangered")
}
t2 <- function(){
treestr <- "((rufus,sasin_sasin),sasin_sedentarius);"
tree <- read.tree(text=treestr)
plot.phylo(tree,show.tip.label = F,direction="upwards",edge.color="gold")
}
t3 <- function(){
treestr <- "(rufus,(sasin_sasin,sasin_sedentarius));"
tree <- read.tree(text=treestr)
plot.phylo(tree,show.tip.label = F,direction="upwards",edge.color="steelblue2")
}
png("fig/nj_tree_scan_50kb.png",width=6.5,height=2,res=600,units = "in")
a <- ggplot(data=mtw,aes(x=x,y=value,color=topology))+
theme_classic()+
theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line.x=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_text(size=8),
axis.text.y=element_text(size=6),
legend.title=element_text(size=8),
legend.text=element_text(size=6),
legend.position = "bottom",
legend.box.margin = margin(-20,0,0,0),
legend.key.size = unit(0,"mm"),
legend.background = element_blank())+
ylab("proportion of subtrees")+
ylim(-0.4,1)+
scale_color_manual(values = c("orangered","gold","steelblue2"))+
#facet_wrap(~topology,ncol=1)+
geom_step(data=subset(mtw,variable %in% c("topo1prop","topo2prop","topo3prop")),lwd=0.1,alpha=0.5)+
geom_step(data=subset(mtw,variable %in% c("rolltopo1","rolltopo2","rolltopo3")),lwd=0.4)+
#geom_smooth(se=F,method="loess",span=0.05,lwd=0.5)+
geom_segment(data=chr_labels,aes(x=start+10,xend=stop-10,y=value,yend=value,col=NA),col="black")+
geom_text_repel(data=chr_labels,aes(label=chr,x=mid,y=value,col=NA),force=2,ylim=c(-.45,-0.1),
col="black",size=2,angle=0,direction="y",box.padding = 0.15,
segment.size=0.2)
ggdraw()+
draw_plot(a,0,.05,1,0.95)+
draw_plot(t1,0.32,0,0.14,0.15)+
draw_plot(t2,0.49,0,0.14,0.15)+
draw_plot(t3,0.65,0,0.14,0.15)
dev.off()
#get stats on top topology by window
toptree <- c();maxsupport <- c()
for(i in 1:nrow(tw)){
row <- tw[i,]
maxtopo=max(row$topo1,row$topo2,row$topo3)
toptree[i] <- c("topo1","topo2","topo3")[row[,c("topo1","topo2","topo3")]==maxtopo]
maxsupport[i] <- maxtopo
}
summary(factor(toptree[tw$chr=="Z"]))/nrow(tw[tw$chr=="Z",])
summary(factor(toptree))/nrow(tw)
###consensus tree
# trees <- read.tree("analysis/twisst/50kb_trees.tre")
# consensus_tree <- root(consensus(trees,p=0.2),outgroup=grep("cal",trees[[1]]$tip.label,value=T))
# plot(consensus_tree,cex=0.5)
|
/scripts/wgs_nj_tree.R
|
no_license
|
cjbattey/selasphorus_evolution
|
R
| false
| false
| 7,028
|
r
|
library(vcfR);library(ape);library(progress);library(data.table)
library(plyr);library(ggplot2);library(ggrepel);library(zoo);library(cowplot)
setwd("~/selasphorus_evolution/")
vcf <- read.vcfR("~/Dropbox/selasphorus/called_snps/selasphorus_het_depth_mq30_biallel_nomd.recode.vcf.gz")
#concatenated NJ tree
dna <- vcfR2DNAbin(vcf,unphased_as_NA = F,consensus = T,extract.haps = F)
dist <- dist.dna(dna,model="K80")
nj <- bionj(dist)
nj <- root(nj,c("Scal1","Scal2","Scal3","Scal4","Scal5","Scal6","Scal7"))
plot(nj,cex=0.5)
#get local nj trees in nonoverlapping windows
trees <- list()
pb <- progress_bar$new(total = length(unique(vcf@fix[,1])))
for(contig in unique(vcf@fix[,1])){
a <- vcf[vcf@fix[,1]==contig]
start <- 1;step <- 2e4
if(max(as.numeric(a@fix[,2]))>step){ #for contigs longer than step
for(i in seq(step,max(as.numeric(a@fix[,2])),step)){
b <- a[as.numeric(a@fix[,2])>start & as.numeric(a@fix[,2])<i]
nsnps <- nrow(b@gt)
c <- vcfR2DNAbin(b,unphased_as_NA = F,consensus = T,extract.haps = F,verbose=F)
dist <- dist.dna(c,model="K80")
if(length(c)>=20 & !is.infinite(max(dist)) & !is.na(max(dist))){ #require >50 SNPs
nj <- nj(dist)
write.tree(nj,paste0("analysis/twisst/20kb_trees.tre"),append=T)
write(x=paste0(contig,"\t",start,"\t",i),file="analysis/twisst/20kb_tree_regions.txt",append=T)
start <- start+step
}
}
} else { #for single-window contigs
start <- 1;i <- step
b <- a[as.numeric(a@fix[,2])>start & as.numeric(a@fix[,2])<i]
nsnps <- nrow(b@gt)
c <- vcfR2DNAbin(b,unphased_as_NA = F,consensus = T,extract.haps = F,verbose=F)
dist <- dist.dna(c,model="K80")
if(length(c)>=20 & !is.infinite(max(dist)) & !is.na(max(dist))){
nj <- nj(dist)
write.tree(nj,paste0("analysis/twisst/20kb_trees.tre"),append=T)
write(x=paste0(contig,"\t",start,"\t",i),file="analysis/twisst/20kb_tree_regions.txt",append=T)
start <- start+step
}
}
pb$tick()
}
#run twisst
system(paste0("cd ~/selasphorus_evolution/analysis/twisst/;\
/anaconda3/bin/python2 twisst.py -t 50kb_trees.tre -w 50kb.weights.csv -g rufus -g sasin -g sedentarius -g calliope --method complete --groupsFile pop_groups.tsv"))
################ plotting #################
tw <- fread("analysis/twisst/50kb.weights.csv")
regions <- fread("analysis/twisst/50kb_tree_regions.txt")
tw <- cbind(tw,regions)
chralign <- fread("data/wgs/CannaMUGM01_to_Tgut2_mummer_summary.txt")
tw <- merge(tw,chralign,by.x="V1",by.y="qName") %>% data.frame()
tw$topo1prop <- tw$topo1/2800
tw$topo2prop <- tw$topo2/2800
tw$topo3prop <- tw$topo3/2800
#chromosome order for pretty plots
chr_order <- c("1","1A","1B","2","3","4","4A",as.character(5:28),"Z","M","NA")
tw$chr <- factor(gsub("chr","",tw$refName),levels=chr_order)
tw <- arrange(tw,chr,refStart,V2)
tw$x <- 1:nrow(tw)
tw$rolltopo1 <- rollmean(tw$topo1prop,40,fill = NA)
tw$rolltopo2 <- rollmean(tw$topo2prop,40,fill = NA)
tw$rolltopo3 <- rollmean(tw$topo3prop,40,fill = NA)
mtw <- tw[,c("V1","V2","refName","refStart","topo1prop","topo2prop","topo3prop","chr","x","rolltopo1","rolltopo2","rolltopo3")]
mtw <- melt(mtw,id.vars=c("V1","V2","refName","refStart","chr","x"))
mtw$topology <- NA
mtw$topology[mtw$variable %in% c("rolltopo1","topo1prop")] <- " rufus sasin sedentarius"
mtw$topology[mtw$variable %in% c("rolltopo2","topo2prop")] <- " rufus sedentarius sasin"
mtw$topology[mtw$variable %in% c("rolltopo3","topo3prop")] <- " rufus sasin sedentarius"
mtw$topology <- factor(mtw$topology,levels=c(" rufus sasin sedentarius"," rufus sedentarius sasin"," rufus sasin sedentarius"))
chr_labels <- ddply(mtw,.(chr),summarize,mid=median(x),start=min(x),stop=max(x))
chr_labels$chr <- as.character(chr_labels$chr)
chr_labels$chr[chr_labels$chr %in% as.character(21:28)] <- "21-28"
chr_labels$mid[chr_labels$chr=="21-28"] <- median(chr_labels$mid[chr_labels$chr=="21-28"],na.rm=T)
chr_labels$start[chr_labels$chr=="21-28"] <- min(chr_labels$start[chr_labels$chr=="21-28"],na.rm=T)
chr_labels$stop[chr_labels$chr=="21-28"] <- max(chr_labels$stop[chr_labels$chr=="21-28"],na.rm=T)
chr_labels$start[chr_labels$chr=="1B"] <- 0
chr_labels$stop[chr_labels$chr=="1B"] <- 0
chr_labels$value <- -0.1 #y position in the plot
chr_labels <- subset(chr_labels,!is.na(chr) & !duplicated(chr))
t1 <- function(){
treestr <- "((rufus,sasin_sasin),sasin_sedentarius);"
tree <- read.tree(text=treestr)
plot.phylo(tree,show.tip.label = F,direction="upwards",edge.color="orangered")
}
t2 <- function(){
treestr <- "((rufus,sasin_sasin),sasin_sedentarius);"
tree <- read.tree(text=treestr)
plot.phylo(tree,show.tip.label = F,direction="upwards",edge.color="gold")
}
t3 <- function(){
treestr <- "(rufus,(sasin_sasin,sasin_sedentarius));"
tree <- read.tree(text=treestr)
plot.phylo(tree,show.tip.label = F,direction="upwards",edge.color="steelblue2")
}
png("fig/nj_tree_scan_50kb.png",width=6.5,height=2,res=600,units = "in")
a <- ggplot(data=mtw,aes(x=x,y=value,color=topology))+
theme_classic()+
theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.line.x=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_text(size=8),
axis.text.y=element_text(size=6),
legend.title=element_text(size=8),
legend.text=element_text(size=6),
legend.position = "bottom",
legend.box.margin = margin(-20,0,0,0),
legend.key.size = unit(0,"mm"),
legend.background = element_blank())+
ylab("proportion of subtrees")+
ylim(-0.4,1)+
scale_color_manual(values = c("orangered","gold","steelblue2"))+
#facet_wrap(~topology,ncol=1)+
geom_step(data=subset(mtw,variable %in% c("topo1prop","topo2prop","topo3prop")),lwd=0.1,alpha=0.5)+
geom_step(data=subset(mtw,variable %in% c("rolltopo1","rolltopo2","rolltopo3")),lwd=0.4)+
#geom_smooth(se=F,method="loess",span=0.05,lwd=0.5)+
geom_segment(data=chr_labels,aes(x=start+10,xend=stop-10,y=value,yend=value,col=NA),col="black")+
geom_text_repel(data=chr_labels,aes(label=chr,x=mid,y=value,col=NA),force=2,ylim=c(-.45,-0.1),
col="black",size=2,angle=0,direction="y",box.padding = 0.15,
segment.size=0.2)
ggdraw()+
draw_plot(a,0,.05,1,0.95)+
draw_plot(t1,0.32,0,0.14,0.15)+
draw_plot(t2,0.49,0,0.14,0.15)+
draw_plot(t3,0.65,0,0.14,0.15)
dev.off()
#get stats on top topology by window
toptree <- c();maxsupport <- c()
for(i in 1:nrow(tw)){
row <- tw[i,]
maxtopo=max(row$topo1,row$topo2,row$topo3)
toptree[i] <- c("topo1","topo2","topo3")[row[,c("topo1","topo2","topo3")]==maxtopo]
maxsupport[i] <- maxtopo
}
summary(factor(toptree[tw$chr=="Z"]))/nrow(tw[tw$chr=="Z",])
summary(factor(toptree))/nrow(tw)
###consensus tree
# trees <- read.tree("analysis/twisst/50kb_trees.tre")
# consensus_tree <- root(consensus(trees,p=0.2),outgroup=grep("cal",trees[[1]]$tip.label,value=T))
# plot(consensus_tree,cex=0.5)
|
#' Builds a table of summary statistics
#'
#' @param mocis_object A list returned from \code{MoCiS::mocis}
#' @return A tibble with
#' @export
#' @examples
mocis_table <- function(data, dec = 2){
print_ci <- function(estimate, lower, upper){
ifelse(is.numeric(estimate),
paste0(round(estimate, dec), " (", round(lower, dec), ", ", round(upper, dec), ")"),
"")
}
print_value <- function(value){
ifelse(is.numeric(value),
paste(round(value, dec)),
"")
}
print_p_value <- function(value){
ifelse(is.numeric(value),
ifelse(value > 0.001, paste(round(value, 3)), "<0.001"),
"")
}
max_all_lod = .5
data %>%
transmute(var = var,
gen = gen,
loc = loc,
# Full data
`Sampling site` = mocis_name_station(loc),
Species = mocis_name_species(gen),
`$n_{\\text{obs}}$` = map_dbl(aggdata, ~sum(.x$n)),
`$n_y$` = map_dbl(aggdata, ~length(unique(.x[["YEAR"]]))),
prc_all_lod = map_dbl(aggdata, ~mean(.x[["all.lod"]])),
prc_all_lod10 = map_dbl(aggdata, ~mean(.x %>% filter(YEAR > (max(YEAR)-10)) %>% pull(all.lod))),
Years = map_chr(aggdata, ~paste(range(.x[["YEAR"]]), collapse = "-")),
`Slope (95$\\%$ CI)` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_ci(.x[["slope"]], .x[["lower"]], .x[["upper"]]))),
`$R^2$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["r2"]]))),
`$p$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_p_value(.x[["p"]]))),
CV = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["cv"]][1]))),
`LDT` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["cv"]][2]))),
YRQ = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["cv"]][3]))),
`Pow$_{\\text{tot}}$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["power"]][1]))),
`Pow$_{10y}$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["power"]][2]))),
`LDT$_{10y}$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["power"]][3]))),
`Conc$_{\\text{pred}}$ (95$\\%$ CI)` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_ci(.x[["yhat.last"]], .x[["yhat.last.lower"]], .x[["yhat.last.upper"]]))),
# Past ten years of data
`Slope$_{10y}$ (95$\\%$ CI)` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_ci(.x[["slope"]], .x[["lower"]], .x[["upper"]]))),
`LDT$_{10y}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_value(.x[["cv"]][2]))),
`YRQ$_{10}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_value(.x[["cv"]][3]))),
`Pow$_{tot}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_value(.x[["power"]][1]))),
`$R^2_{10y}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_value(.x[["r2"]]))),
`$p_{10y}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_p_value(.x[["p"]]))
# ,
# `Yr$_{\\text{change}}$` = map_chr(changepoint, ~ifelse(is.null(.x), "",.x[["changepoint"]])))
)) %>%
select(-prc_all_lod, -prc_all_lod10)
}
|
/R/mocis_table.R
|
no_license
|
NRM-MOC/MoCiS.tools
|
R
| false
| false
| 4,161
|
r
|
#' Builds a table of summary statistics
#'
#' @param mocis_object A list returned from \code{MoCiS::mocis}
#' @return A tibble with
#' @export
#' @examples
mocis_table <- function(data, dec = 2){
print_ci <- function(estimate, lower, upper){
ifelse(is.numeric(estimate),
paste0(round(estimate, dec), " (", round(lower, dec), ", ", round(upper, dec), ")"),
"")
}
print_value <- function(value){
ifelse(is.numeric(value),
paste(round(value, dec)),
"")
}
print_p_value <- function(value){
ifelse(is.numeric(value),
ifelse(value > 0.001, paste(round(value, 3)), "<0.001"),
"")
}
max_all_lod = .5
data %>%
transmute(var = var,
gen = gen,
loc = loc,
# Full data
`Sampling site` = mocis_name_station(loc),
Species = mocis_name_species(gen),
`$n_{\\text{obs}}$` = map_dbl(aggdata, ~sum(.x$n)),
`$n_y$` = map_dbl(aggdata, ~length(unique(.x[["YEAR"]]))),
prc_all_lod = map_dbl(aggdata, ~mean(.x[["all.lod"]])),
prc_all_lod10 = map_dbl(aggdata, ~mean(.x %>% filter(YEAR > (max(YEAR)-10)) %>% pull(all.lod))),
Years = map_chr(aggdata, ~paste(range(.x[["YEAR"]]), collapse = "-")),
`Slope (95$\\%$ CI)` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_ci(.x[["slope"]], .x[["lower"]], .x[["upper"]]))),
`$R^2$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["r2"]]))),
`$p$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_p_value(.x[["p"]]))),
CV = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["cv"]][1]))),
`LDT` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["cv"]][2]))),
YRQ = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["cv"]][3]))),
`Pow$_{\\text{tot}}$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["power"]][1]))),
`Pow$_{10y}$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["power"]][2]))),
`LDT$_{10y}$` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_value(.x[["power"]][3]))),
`Conc$_{\\text{pred}}$ (95$\\%$ CI)` = ifelse(prc_all_lod > max_all_lod, "-",
map_chr(linmod, ~print_ci(.x[["yhat.last"]], .x[["yhat.last.lower"]], .x[["yhat.last.upper"]]))),
# Past ten years of data
`Slope$_{10y}$ (95$\\%$ CI)` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_ci(.x[["slope"]], .x[["lower"]], .x[["upper"]]))),
`LDT$_{10y}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_value(.x[["cv"]][2]))),
`YRQ$_{10}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_value(.x[["cv"]][3]))),
`Pow$_{tot}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_value(.x[["power"]][1]))),
`$R^2_{10y}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_value(.x[["r2"]]))),
`$p_{10y}$` = ifelse(prc_all_lod10 > max_all_lod, "-",
map_chr(linmod10, ~print_p_value(.x[["p"]]))
# ,
# `Yr$_{\\text{change}}$` = map_chr(changepoint, ~ifelse(is.null(.x), "",.x[["changepoint"]])))
)) %>%
select(-prc_all_lod, -prc_all_lod10)
}
|
##########################################################################
###
### generate simulated survival data with predefiend censoring rate
### Author: Zihang Zhong
### Date: Aug 2020
### Specifications:
### - the main aim here is to root-find the censoring parameter for censoring distribution and generate the right censoring data
### - As the simulated righted censored data requires two independent survival distributions,one for event time EVENT,
### and the other for censoring time C. here, the baseline hazard function was set as from Weibull and the censoring
### time from uniform distribution. other distribution is also allowed but not discussed here.
### - considering two kind right-censored survival data:
### A) data with no consideration of covariates
### B) data incorporating with covariates
### B.1) all covariates are binary variables
### B.2) all covariates are normal variables -- default are standard normal
### B.3) all covariates are mixed, the mix of normal, binary, uniform or possion
###
###
### Structure:
### - 1) Support functions
### 1.1) data generation
### - CenDatNoCov
### - CenDatBin
### - CenDatNorm
### - CenDatMixed
### 1.2) core code to calculate the censoring parameter
### - CensProbBin
### - CensProbNorm
### - CensProbMixed
### - 2) verify the average censoring rate of simulated data is coincident with the nominal censoring rate by simulations
###
###
### Parameters:
### - alpha: the shape parameter for hazard function(weibull distribuion)
### - lambda: the scale parameter for hazard function(weibull distribuion)
### - cens.p: the nominal censoring rate
### - beta: the predefined coefficients of the covariates
### - class: the distribution of the covariates
### - para: the distribution parameters of the covariates
### - theta: the censoring parameter
### - size: the sample size
### - nSims: the number of simulations
###################################################################################
###################################################################################
# library(simsurv)
#====================================================================
#A) data with no covariates--CenDatNoCov
#====================================================================
CenDatNoCov <- function(alpha,lambda,cens.p,size){
theta <- round(uniroot(function(x)lambda/(alpha*x)*pgamma((x/lambda)^alpha, 1/alpha) * gamma(1/alpha)-cens.p,c(0.00001,1000))$root,3)
data<-data.frame(
T = rweibull(size,alpha,lambda),
C = runif(size,0,theta))
cens.data <- data.frame(time = ifelse(data$T<=data$C,data$T,data$C),
status = ifelse(data$T<=data$C,1,0))
return(list(theta = theta, cens.data = cens.data))
}
CenDatNoCov(alpha = 2,lambda = 4,cens.p = 0.3,size =200)
# ---------------------------------------------------------------------------
# simulate 1000 times to verify the value of theta is ruboost.
# ---------------------------------------------------------------------------
SimCensP <- c()
set.seed(20200810)
for (i in 1:1000){
data <- CensDataNoCov(alpha = 2,lambda = 4,cens.p = 0.3,size =200)$cens.data
SimCensP[i] <- sum(data["status"])/200
}
mean(SimCensP)
#====================================================================
# B) incorporating with covariates
#====================================================================
# ====================================================================
# B.1) all covariates are binary variables: generate the censoring data -- CenDatBin
# ====================================================================
CenDatBin <- function(alpha,lambda,cens.p,beta,p,size,seed=20200812){
alpha = alpha
lambda =lambda
cens.p = cens.p
beta = beta
p = p
n = size
# ---------------------------------------------------------------------------------
# the core code to calculate the censoring parameter : a more adaptive function -- censProbBin
censProbBin <- function(theta){
beta.0 <--alpha*log(lambda)
LenCovar <- length(beta)
CombInd <-list(NULL)
ncomb <- c()
finalInd <- c()
for (i in 1:LenCovar){
ind<-combn(1:LenCovar,i)
CombInd[[i]]<-ind
ncomb[i]<-ncol(CombInd[[i]])
for (j in 1:ncomb[i]) {
location <- c(CombInd[[i]][, j],rep(0,LenCovar-i))
finalInd<- cbind(finalInd,location)
}
}
comb<-matrix(0,nrow = LenCovar, ncol = 2^LenCovar)
for(i in 1: 2^LenCovar-1){
comb[finalInd[,i],i]=1
}
lambda.i<-exp(-(beta.0+apply(beta*comb,2,sum))/alpha)
cond.cens.Prob<-(lambda.i/(alpha*theta))*pgamma((theta/lambda.i)^alpha, 1/alpha) * gamma(1/alpha)
pdf.lambda.i<-apply(matrix(p,nrow=LenCovar,ncol = 2^LenCovar)**comb*((1-matrix(p,nrow=LenCovar,ncol = 2^LenCovar))**(1-comb)),2,prod)
return(t(cond.cens.Prob)%*%pdf.lambda.i)
}
# ---------------------------------------------------------------------------------
theta <- round(uniroot(function(x)censProbBin(x)-cens.p,c(0.0000001,100))$root,3)
set.seed(seed)
cov<-mapply(rbinom,n,1,p)
colnames(cov) <- paste("x",1:length(p),sep = "")
EVENT<-rweibull(n,alpha,lambda*exp(-1/alpha*(cov%*%beta)))
data <-as.data.frame(cbind(id = 1:n,cov = cov,EVENT = EVENT,C=runif(n,0,theta)))
data$time = ifelse(data$EVENT<=data$C,data$EVENT,data$C)
data$status = ifelse(data$EVENT<=data$C,1,0)
return(list(theta = theta, cens.data = data))
}
CenDatBin(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),p = c(0.3,0.4,0.5,0.6),size = 200,seed=20200812)
# -----------------------------------------------------------------------------------
# simulations to verify
# -----------------------------------------------------------------------------------
cens.p <- c()
set.seed(20200812)
n <- 200
for(i in 1:1000){
data <-CenDatBin(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),p = c(0.3,0.4,0.5,0.6),size = n,seed=2020+i)$cens.data
cens.p[i]<-sum(data["status"])/n
}
1-mean(cens.p)
# coxph(Surv(time,status))~x1+x2+x3+x4,data = data)
# ====================================================================
# B.2) all covariates are normal variables :generate the censoring data -- CenDatNorm
# default all are from standard normal distribution
# ====================================================================s
CenDatNorm <- function(alpha,lambda,cens.p,beta,size,seed=20200812){
alpha = alpha
lambda =lambda
cens.p = cens.p
beta = beta
n = size
# ---------------------------------------------------------------------------------
# 2.1) the core code to calculate the censoring parameter : a more adaptive function -- censProbNorm
censProbNorm<-function(theta){
beta.0 <- -alpha*log(lambda)
PdfLambdai<-function(u) dlnorm(u,-beta.0/alpha, beta%*%beta/alpha^2)
CondCensProb<-function(u) (u/(alpha*theta))*pgamma((theta/u)^alpha, 1/alpha) * gamma(1/alpha)
cens.Prob<-integrate(function(u){PdfLambdai(u)*CondCensProb(u)},-Inf,Inf)$value
return(cens.Prob)
}
# ---------------------------------------------------------------------------------
theta <- round(uniroot(function(x)censProbNorm(x)-cens.p,c(0.0001,1000))$root,3)
set.seed(seed)
cov<-mapply(rnorm,n,0,rep(1,length(beta)))
colnames(cov) <- paste("x",1:length(beta),sep = "")
EVENT<-rweibull(n,alpha,lambda*exp(-1/alpha*(cov%*%beta)))
data <-as.data.frame(cbind(id = 1:n,cov = cov,EVENT = EVENT,C=runif(n,0,theta)))
data$time = ifelse(data$EVENT<=data$C,data$EVENT,data$C)
data$status = ifelse(data$EVENT<=data$C,1,0)
return(list(theta = theta, cens.data = data))
}
CenDatNorm(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),size = 200,seed=20200812)
# -----------------------------------------------------------------------------------
# simulations to verify
# -----------------------------------------------------------------------------------
cens.p<-c()
set.seed(20200810)
n<-200
for(i in 1:1000){
data <- CenDatNorm(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),size = n, seed=202008+i)$cens.data
cens.p[i]<-sum(data["status"])/n
}
1-mean(cens.p)
# ====================================================================
# B.3) all covariates are mixed variables : generate the censoring data -- CenDatMixed
# - specify the distribution of all variables: "N" = normal,"B" = binary, "P"= possion, "U"= uniform
# - specify the distribution parameters in list for "B", "P", "U";for "U", the start and end should be set in matrix with two colomns
# ====================================================================
CenDatMixed <- function(alpha,lambda,cens.p,beta,size,
class = c("N","B","P","U"),
para = list(B=c(0.5),P = c(5), U = matrix(c(0,1),ncol = 2,byrow = TRUE)),
seed=20200812){
alpha = alpha
lambda =lambda
cens.p = cens.p
beta = beta
size = size
seed = seed
# ---------------------------------------------------------------------------------
# the core code to calculate the censoring parameter : a more adaptive function -- CensProbMixed
CensProbMixed <- function(class,para,theta){
set.seed(20200810)
n<-10000
nNorm <- length(class[which(class =="N")])
nBin <- length(class[which(class =="B")])
nPos <- length(class[which(class =="P")])
nUni <- length(class[which(class =="U")])
if (nNorm > 0){
a<-as.matrix(mapply(rnorm,n,0,rep(1,nNorm)))
}
if (nBin > 0){
b<-as.matrix(mapply(rbinom,n,1,para$B))
}
if (nPos > 0){
c<-as.matrix(mapply(rpois,n,para$P))
}
if (nUni > 0){
d<-as.matrix(mapply(runif,n,para$U[,1],para$U[,2]))
}
x <-cbind(a,b,c,d)
beta.0<--alpha*log(lambda)
lambda.i<-exp(-(beta.0+x%*%beta)/alpha)
max.lambda.i<-max(lambda.i)
min.lambda.i<-min(lambda.i)
PdfLambdai<-function(u){
dens<-density(lambda.i,bw = "nrd0",kernel="gaussian",na.rm=TRUE)
y.loess<-loess(dens$y~dens$x,span = 0.1)
pred.y<-predict(y.loess,newdata = u)
return(pred.y)
}
CondCensProb<-function(u) (u/(alpha*theta))*pgamma((theta/u)^alpha, 1/alpha) * gamma(1/alpha)
cens.Prob<-integrate(function(u){PdfLambdai(u)*CondCensProb(u)},min.lambda.i,max.lambda.i)$value
return(cens.Prob)
}
# ---------------------------------------------------------------------------------
theta <- round(uniroot(function(y) CensProbMixed(class = class,para = para,y)-cens.p,c(0.001,100))$root,3)
set.seed(seed)
nNorm <- length(class[which(class =="N")])
nBin <- length(class[which(class =="B")])
nPos <- length(class[which(class =="P")])
nUni <- length(class[which(class =="U")])
if (nNorm > 0){
a<-as.matrix(mapply(rnorm,size,0,rep(1,nNorm)))
}
if (nBin > 0){
b<-as.matrix(mapply(rbinom,size,1,para$B))
}
if (nPos > 0){
c<-as.matrix(mapply(rpois,size,para$P))
}
if (nUni > 0){
d<-as.matrix(mapply(runif,size,para$U[,1],para$U[,2]))
}
cov <-cbind(a,b,c,d)
colnames(cov) <- paste("x",1:length(beta),sep = "")
EVENT<-rweibull(size,alpha,lambda*exp(-1/alpha*(cov%*%beta)))
data <-as.data.frame(cbind(id = 1:size,cov = cov,EVENT = EVENT,C=runif(size,0,theta)))
data$time = ifelse(data$EVENT<=data$C,data$EVENT,data$C)
data$status = ifelse(data$EVENT<=data$C,1,0)
return(list(theta = theta, cens.data = data))
}
CenDatMixed(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),size=200,
class = c("N","B","P","U"),
para = list(B=c(0.5),P = c(5), U = matrix(c(0,1),ncol = 2,byrow = TRUE)),
seed=20200812)
# -----------------------------------------------------------------------------------
# simulations to verify
# -----------------------------------------------------------------------------------
cens.p<-c()
for(i in 1:1000){
data <- CenDatMixed(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,0.5,-0.6,-0.3,0.4),size=200,
class = c("N","B","B","P","U","U"),
para = list(B=c(0.5,0.4),P = c(5), U = matrix(c(0,1,0.2,0.6),ncol = 2,byrow = TRUE)),
seed=202008+i)$cens.data
cens.p[i]<-sum(data["status"])/200
}
1-mean(cens.p)
|
/predefined censoring rate.R
|
no_license
|
zihang1012/simulated-survival-data-with-predefiend-censoring-rate-
|
R
| false
| false
| 12,636
|
r
|
##########################################################################
###
### generate simulated survival data with predefiend censoring rate
### Author: Zihang Zhong
### Date: Aug 2020
### Specifications:
### - the main aim here is to root-find the censoring parameter for censoring distribution and generate the right censoring data
### - As the simulated righted censored data requires two independent survival distributions,one for event time EVENT,
### and the other for censoring time C. here, the baseline hazard function was set as from Weibull and the censoring
### time from uniform distribution. other distribution is also allowed but not discussed here.
### - considering two kind right-censored survival data:
### A) data with no consideration of covariates
### B) data incorporating with covariates
### B.1) all covariates are binary variables
### B.2) all covariates are normal variables -- default are standard normal
### B.3) all covariates are mixed, the mix of normal, binary, uniform or possion
###
###
### Structure:
### - 1) Support functions
### 1.1) data generation
### - CenDatNoCov
### - CenDatBin
### - CenDatNorm
### - CenDatMixed
### 1.2) core code to calculate the censoring parameter
### - CensProbBin
### - CensProbNorm
### - CensProbMixed
### - 2) verify the average censoring rate of simulated data is coincident with the nominal censoring rate by simulations
###
###
### Parameters:
### - alpha: the shape parameter for hazard function(weibull distribuion)
### - lambda: the scale parameter for hazard function(weibull distribuion)
### - cens.p: the nominal censoring rate
### - beta: the predefined coefficients of the covariates
### - class: the distribution of the covariates
### - para: the distribution parameters of the covariates
### - theta: the censoring parameter
### - size: the sample size
### - nSims: the number of simulations
###################################################################################
###################################################################################
# library(simsurv)
#====================================================================
#A) data with no covariates--CenDatNoCov
#====================================================================
CenDatNoCov <- function(alpha,lambda,cens.p,size){
theta <- round(uniroot(function(x)lambda/(alpha*x)*pgamma((x/lambda)^alpha, 1/alpha) * gamma(1/alpha)-cens.p,c(0.00001,1000))$root,3)
data<-data.frame(
T = rweibull(size,alpha,lambda),
C = runif(size,0,theta))
cens.data <- data.frame(time = ifelse(data$T<=data$C,data$T,data$C),
status = ifelse(data$T<=data$C,1,0))
return(list(theta = theta, cens.data = cens.data))
}
CenDatNoCov(alpha = 2,lambda = 4,cens.p = 0.3,size =200)
# ---------------------------------------------------------------------------
# simulate 1000 times to verify the value of theta is ruboost.
# ---------------------------------------------------------------------------
SimCensP <- c()
set.seed(20200810)
for (i in 1:1000){
data <- CensDataNoCov(alpha = 2,lambda = 4,cens.p = 0.3,size =200)$cens.data
SimCensP[i] <- sum(data["status"])/200
}
mean(SimCensP)
#====================================================================
# B) incorporating with covariates
#====================================================================
# ====================================================================
# B.1) all covariates are binary variables: generate the censoring data -- CenDatBin
# ====================================================================
CenDatBin <- function(alpha,lambda,cens.p,beta,p,size,seed=20200812){
alpha = alpha
lambda =lambda
cens.p = cens.p
beta = beta
p = p
n = size
# ---------------------------------------------------------------------------------
# the core code to calculate the censoring parameter : a more adaptive function -- censProbBin
censProbBin <- function(theta){
beta.0 <--alpha*log(lambda)
LenCovar <- length(beta)
CombInd <-list(NULL)
ncomb <- c()
finalInd <- c()
for (i in 1:LenCovar){
ind<-combn(1:LenCovar,i)
CombInd[[i]]<-ind
ncomb[i]<-ncol(CombInd[[i]])
for (j in 1:ncomb[i]) {
location <- c(CombInd[[i]][, j],rep(0,LenCovar-i))
finalInd<- cbind(finalInd,location)
}
}
comb<-matrix(0,nrow = LenCovar, ncol = 2^LenCovar)
for(i in 1: 2^LenCovar-1){
comb[finalInd[,i],i]=1
}
lambda.i<-exp(-(beta.0+apply(beta*comb,2,sum))/alpha)
cond.cens.Prob<-(lambda.i/(alpha*theta))*pgamma((theta/lambda.i)^alpha, 1/alpha) * gamma(1/alpha)
pdf.lambda.i<-apply(matrix(p,nrow=LenCovar,ncol = 2^LenCovar)**comb*((1-matrix(p,nrow=LenCovar,ncol = 2^LenCovar))**(1-comb)),2,prod)
return(t(cond.cens.Prob)%*%pdf.lambda.i)
}
# ---------------------------------------------------------------------------------
theta <- round(uniroot(function(x)censProbBin(x)-cens.p,c(0.0000001,100))$root,3)
set.seed(seed)
cov<-mapply(rbinom,n,1,p)
colnames(cov) <- paste("x",1:length(p),sep = "")
EVENT<-rweibull(n,alpha,lambda*exp(-1/alpha*(cov%*%beta)))
data <-as.data.frame(cbind(id = 1:n,cov = cov,EVENT = EVENT,C=runif(n,0,theta)))
data$time = ifelse(data$EVENT<=data$C,data$EVENT,data$C)
data$status = ifelse(data$EVENT<=data$C,1,0)
return(list(theta = theta, cens.data = data))
}
CenDatBin(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),p = c(0.3,0.4,0.5,0.6),size = 200,seed=20200812)
# -----------------------------------------------------------------------------------
# simulations to verify
# -----------------------------------------------------------------------------------
cens.p <- c()
set.seed(20200812)
n <- 200
for(i in 1:1000){
data <-CenDatBin(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),p = c(0.3,0.4,0.5,0.6),size = n,seed=2020+i)$cens.data
cens.p[i]<-sum(data["status"])/n
}
1-mean(cens.p)
# coxph(Surv(time,status))~x1+x2+x3+x4,data = data)
# ====================================================================
# B.2) all covariates are normal variables :generate the censoring data -- CenDatNorm
# default all are from standard normal distribution
# ====================================================================s
CenDatNorm <- function(alpha,lambda,cens.p,beta,size,seed=20200812){
alpha = alpha
lambda =lambda
cens.p = cens.p
beta = beta
n = size
# ---------------------------------------------------------------------------------
# 2.1) the core code to calculate the censoring parameter : a more adaptive function -- censProbNorm
censProbNorm<-function(theta){
beta.0 <- -alpha*log(lambda)
PdfLambdai<-function(u) dlnorm(u,-beta.0/alpha, beta%*%beta/alpha^2)
CondCensProb<-function(u) (u/(alpha*theta))*pgamma((theta/u)^alpha, 1/alpha) * gamma(1/alpha)
cens.Prob<-integrate(function(u){PdfLambdai(u)*CondCensProb(u)},-Inf,Inf)$value
return(cens.Prob)
}
# ---------------------------------------------------------------------------------
theta <- round(uniroot(function(x)censProbNorm(x)-cens.p,c(0.0001,1000))$root,3)
set.seed(seed)
cov<-mapply(rnorm,n,0,rep(1,length(beta)))
colnames(cov) <- paste("x",1:length(beta),sep = "")
EVENT<-rweibull(n,alpha,lambda*exp(-1/alpha*(cov%*%beta)))
data <-as.data.frame(cbind(id = 1:n,cov = cov,EVENT = EVENT,C=runif(n,0,theta)))
data$time = ifelse(data$EVENT<=data$C,data$EVENT,data$C)
data$status = ifelse(data$EVENT<=data$C,1,0)
return(list(theta = theta, cens.data = data))
}
CenDatNorm(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),size = 200,seed=20200812)
# -----------------------------------------------------------------------------------
# simulations to verify
# -----------------------------------------------------------------------------------
cens.p<-c()
set.seed(20200810)
n<-200
for(i in 1:1000){
data <- CenDatNorm(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),size = n, seed=202008+i)$cens.data
cens.p[i]<-sum(data["status"])/n
}
1-mean(cens.p)
# ====================================================================
# B.3) all covariates are mixed variables : generate the censoring data -- CenDatMixed
# - specify the distribution of all variables: "N" = normal,"B" = binary, "P"= possion, "U"= uniform
# - specify the distribution parameters in list for "B", "P", "U";for "U", the start and end should be set in matrix with two colomns
# ====================================================================
CenDatMixed <- function(alpha,lambda,cens.p,beta,size,
class = c("N","B","P","U"),
para = list(B=c(0.5),P = c(5), U = matrix(c(0,1),ncol = 2,byrow = TRUE)),
seed=20200812){
alpha = alpha
lambda =lambda
cens.p = cens.p
beta = beta
size = size
seed = seed
# ---------------------------------------------------------------------------------
# the core code to calculate the censoring parameter : a more adaptive function -- CensProbMixed
CensProbMixed <- function(class,para,theta){
set.seed(20200810)
n<-10000
nNorm <- length(class[which(class =="N")])
nBin <- length(class[which(class =="B")])
nPos <- length(class[which(class =="P")])
nUni <- length(class[which(class =="U")])
if (nNorm > 0){
a<-as.matrix(mapply(rnorm,n,0,rep(1,nNorm)))
}
if (nBin > 0){
b<-as.matrix(mapply(rbinom,n,1,para$B))
}
if (nPos > 0){
c<-as.matrix(mapply(rpois,n,para$P))
}
if (nUni > 0){
d<-as.matrix(mapply(runif,n,para$U[,1],para$U[,2]))
}
x <-cbind(a,b,c,d)
beta.0<--alpha*log(lambda)
lambda.i<-exp(-(beta.0+x%*%beta)/alpha)
max.lambda.i<-max(lambda.i)
min.lambda.i<-min(lambda.i)
PdfLambdai<-function(u){
dens<-density(lambda.i,bw = "nrd0",kernel="gaussian",na.rm=TRUE)
y.loess<-loess(dens$y~dens$x,span = 0.1)
pred.y<-predict(y.loess,newdata = u)
return(pred.y)
}
CondCensProb<-function(u) (u/(alpha*theta))*pgamma((theta/u)^alpha, 1/alpha) * gamma(1/alpha)
cens.Prob<-integrate(function(u){PdfLambdai(u)*CondCensProb(u)},min.lambda.i,max.lambda.i)$value
return(cens.Prob)
}
# ---------------------------------------------------------------------------------
theta <- round(uniroot(function(y) CensProbMixed(class = class,para = para,y)-cens.p,c(0.001,100))$root,3)
set.seed(seed)
nNorm <- length(class[which(class =="N")])
nBin <- length(class[which(class =="B")])
nPos <- length(class[which(class =="P")])
nUni <- length(class[which(class =="U")])
if (nNorm > 0){
a<-as.matrix(mapply(rnorm,size,0,rep(1,nNorm)))
}
if (nBin > 0){
b<-as.matrix(mapply(rbinom,size,1,para$B))
}
if (nPos > 0){
c<-as.matrix(mapply(rpois,size,para$P))
}
if (nUni > 0){
d<-as.matrix(mapply(runif,size,para$U[,1],para$U[,2]))
}
cov <-cbind(a,b,c,d)
colnames(cov) <- paste("x",1:length(beta),sep = "")
EVENT<-rweibull(size,alpha,lambda*exp(-1/alpha*(cov%*%beta)))
data <-as.data.frame(cbind(id = 1:size,cov = cov,EVENT = EVENT,C=runif(size,0,theta)))
data$time = ifelse(data$EVENT<=data$C,data$EVENT,data$C)
data$status = ifelse(data$EVENT<=data$C,1,0)
return(list(theta = theta, cens.data = data))
}
CenDatMixed(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,-0.3,0.4),size=200,
class = c("N","B","P","U"),
para = list(B=c(0.5),P = c(5), U = matrix(c(0,1),ncol = 2,byrow = TRUE)),
seed=20200812)
# -----------------------------------------------------------------------------------
# simulations to verify
# -----------------------------------------------------------------------------------
cens.p<-c()
for(i in 1:1000){
data <- CenDatMixed(alpha = 2,lambda = 4,cens.p = 0.3,beta = c(-0.1,0.2,0.5,-0.6,-0.3,0.4),size=200,
class = c("N","B","B","P","U","U"),
para = list(B=c(0.5,0.4),P = c(5), U = matrix(c(0,1,0.2,0.6),ncol = 2,byrow = TRUE)),
seed=202008+i)$cens.data
cens.p[i]<-sum(data["status"])/200
}
1-mean(cens.p)
|
# Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
########################################
#### CURRENT FILE: ON START SCRIPT #####
########################################
## Fill the DESCRIPTION ----
## Add meta data about your application
golem::fill_desc(
pkg_name = "berlinxbikes", # The Name of the package containing the App
pkg_title = "berlinxbikes", # The Title of the package containing the App
pkg_description = "A shiny app tgo explore bike lane types and bike accidents in Berlin", # The Description of the package containing the App
author_first_name = "Cédric", # Your First Name
author_last_name = "Scherer", # Your Last Name
author_email = "cedricphilippscherer@gmail.com", # Your Email
repo_url = NULL # The URL of the GitHub Repo (optional)
)
## Set {golem} options ----
golem::set_golem_options()
## Create Common Files ----
## See ?usethis for more information
usethis::use_mit_license( name = "Golem User" ) # You can set another license here
usethis::use_readme_rmd( open = FALSE )
usethis::use_code_of_conduct()
usethis::use_lifecycle_badge( "Experimental" )
usethis::use_news_md( open = FALSE )
## Use git ----
usethis::use_git()
## Init Testing Infrastructure ----
## Create a template for tests
golem::use_recommended_tests()
## Use Recommended Packages ----
golem::use_recommended_deps()
## Favicon ----
# If you want to change the favicon (default is golem's one)
golem::remove_favicon()
golem::use_favicon() # path = "path/to/ico". Can be an online file.
## Add helper functions ----
golem::use_utils_ui()
golem::use_utils_server()
# You're now set! ----
# go to dev/02_dev.R
rstudioapi::navigateToFile( "dev/02_dev.R" )
|
/dev/01_start.R
|
no_license
|
JohnCoene/berlinxbikes
|
R
| false
| false
| 2,006
|
r
|
# Building a Prod-Ready, Robust Shiny Application.
#
# README: each step of the dev files is optional, and you don't have to
# fill every dev scripts before getting started.
# 01_start.R should be filled at start.
# 02_dev.R should be used to keep track of your development during the project.
# 03_deploy.R should be used once you need to deploy your app.
#
#
########################################
#### CURRENT FILE: ON START SCRIPT #####
########################################
## Fill the DESCRIPTION ----
## Add meta data about your application
golem::fill_desc(
pkg_name = "berlinxbikes", # The Name of the package containing the App
pkg_title = "berlinxbikes", # The Title of the package containing the App
pkg_description = "A shiny app tgo explore bike lane types and bike accidents in Berlin", # The Description of the package containing the App
author_first_name = "Cédric", # Your First Name
author_last_name = "Scherer", # Your Last Name
author_email = "cedricphilippscherer@gmail.com", # Your Email
repo_url = NULL # The URL of the GitHub Repo (optional)
)
## Set {golem} options ----
golem::set_golem_options()
## Create Common Files ----
## See ?usethis for more information
usethis::use_mit_license( name = "Golem User" ) # You can set another license here
usethis::use_readme_rmd( open = FALSE )
usethis::use_code_of_conduct()
usethis::use_lifecycle_badge( "Experimental" )
usethis::use_news_md( open = FALSE )
## Use git ----
usethis::use_git()
## Init Testing Infrastructure ----
## Create a template for tests
golem::use_recommended_tests()
## Use Recommended Packages ----
golem::use_recommended_deps()
## Favicon ----
# If you want to change the favicon (default is golem's one)
golem::remove_favicon()
golem::use_favicon() # path = "path/to/ico". Can be an online file.
## Add helper functions ----
golem::use_utils_ui()
golem::use_utils_server()
# You're now set! ----
# go to dev/02_dev.R
rstudioapi::navigateToFile( "dev/02_dev.R" )
|
##----------------------------------------------------------------------
# Volcano plot
##----------------------------------------------------------------------
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# Handling visibility
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
shinyjs::hide("volcanoNamesFill")
observeEvent(input$volcanoNames, {
if(input$volcanoNames){
shinyjs::show("volcanoNamesFill")
} else {
shinyjs::hide("volcanoNamesFill")
}
})
observeEvent(input$volcanoInputRb, {
if(input$volcanoInputRb == "met"){
shinyjs::show("colHypomethylated")
shinyjs::show("colHypermethylated")
shinyjs::hide("colUpregulated")
shinyjs::hide("colDownregulated")
shinyjs::show("volcanoxcutMet")
shinyjs::hide("volcanoxcutExp")
} else if(input$volcanoInputRb == "exp"){
shinyjs::hide("colHypomethylated")
shinyjs::hide("colHypermethylated")
shinyjs::show("colUpregulated")
shinyjs::show("colDownregulated")
shinyjs::show("volcanoxcutExp")
shinyjs::hide("volcanoxcutMet")
}
})
# If the user wants to hilght as probe or gene
observe({
if(!is.null(input$volcanoHighlight)){
updateCheckboxInput(session, "volcanoNames", value = TRUE)
updateCheckboxInput(session, "volcanoNamesFill", value = TRUE)
updateSelectizeInput(session, 'volcanoShowHighlitgh', selected = "highlighted")
} else {
updateSelectizeInput(session, 'volcanoShowHighlitgh', selected = "significant")
}
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# File selection
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
observe({
shinyFileChoose(input, 'volcanofile', roots=get.volumes(input$workingDir),
session=session, restrictions=system.file(package='base'),
filetypes=c('excel', 'csv'))
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# DATA INPUT
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
volcanodata <- reactive({
inFile <- input$volcanofile
if (is.null(inFile)) return(NULL)
file <- as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), inFile)$datapath)
# verify if the file is a csv
ext <- tools::file_ext(file)
if(ext != "csv"){
createAlert(session, "dmrmessage", "dmrAlert", title = "Data input error", style = "danger",
content = paste0("Sorry, but I'm expecting a csv file, but I got a: ",
ext), append = FALSE)
return(NULL)
}
withProgress(message = 'Loading data',
detail = 'This may take a while...', value = 0, {
df <- as.data.frame(read_csv(file)); df$X1 <- NULL
incProgress(1, detail = "Completed")
})
return(df)
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# UPDATING FIELDS AFTER DATA INPUT
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
observeEvent(input$volcanofile, {
file <- basename(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$volcanofile)$datapath))
if(length(file) > 0){
file <- unlist(str_split(file,"_"))
group1 <- file[4]
group2 <- file[5]
pcut <- file[7]
meancut <- gsub(".csv","",file[9])
updateNumericInput(session, "volcanoxcutMet", value = meancut)
updateNumericInput(session, "volcanoxcutExp", value = meancut)
updateNumericInput(session, "volcanoycut", value = pcut)
}
})
# automatically change the type based in the input
observe({
if(!is.null(input$volcanofile)){
file <- basename(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$volcanofile)$datapath))
selected <- "met"
if(grepl("DEA",file)) selected <- "exp"
updateRadioButtons(session, "volcanoInputRb", selected = selected)
}
})
# Update choises to select to highlight
observe({
data <- volcanodata()
if(!is.null(data)) {
file <- basename(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})),
input$volcanofile)$datapath))
if(grepl("DEA",file)){
if("Gene_symbol" %in% colnames(data)){
updateSelectizeInput(session, 'volcanoHighlight',
choices = as.character(na.omit(unique(data$Gene_symbol))), server = TRUE)
}
} else {
updateSelectizeInput(session, 'volcanoHighlight',
choices = as.character(na.omit(unique(data$probeID))), server = TRUE)
}
}
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# STATUS BOXES
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
observeEvent(input$volcanoPlotBt , {
output$volcanoBoxUp <- renderValueBox({
ret <- isolate({volcano.values()})
if(is.null(ret)) {
value <- 0
} else {
value <- ret$up
}
valueBox(
value = value,
subtitle = ret$label[2],
icon = icon("arrow-up"),
color = "red"
)
})
})
observeEvent(input$volcanoPlotBt , {
output$volcanoBoxInsig <- renderValueBox({
ret <- isolate({volcano.values()})
if(is.null(ret)) {
value <- 0
} else {
value <- ret$insig
}
valueBox(
value = value,
ret$label[1],
icon = icon("minus"),
color = "black"
)
})
})
observeEvent(input$volcanoPlotBt , {
output$volcanoBoxDown <- renderValueBox({
ret <- isolate({volcano.values()})
if(is.null(ret)) {
value <- 0
} else {
value <- ret$down
}
valueBox(
value = value,
ret$label[3],
icon = icon("arrow-down"),
color = "olive"
)
})
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# PLOT
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
volcano.values <- reactive({
if(input$volcanoPlotBt){
closeAlert(session, "volcanoAlert")
# read csv file with results
data <- volcanodata()
if(is.null(data)) return(NULL)
names.fill <- isolate({input$volcanoNamesFill})
if(isolate({input$volcanoInputRb})=="met") {
x.cut <- isolate({as.numeric(input$volcanoxcutMet)})
} else {
x.cut <- isolate({as.numeric(input$volcanoxcutExp)})
}
y.cut <- isolate({as.numeric(input$volcanoycut)})
# Set parameters based in the filename
# patterns are
# DEA_result_groupCol_group1_group2_pcut_0.05_logFC.cut_0.csv
# DMR_results_groupCol_group1_group2_pcut_0.05_meancut_0.3.csv
file <- basename(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$volcanofile)$datapath))
file <- unlist(str_split(file,"_"))
groupCol <- file[3]
group1 <- file[4]
group2 <- file[5]
names <- NULL
# methylation pipeline
if(isolate({input$volcanoInputRb})=="met"){
group1.col <- gsub("[[:punct:]]| ", ".", group1)
group2.col <- gsub("[[:punct:]]| ", ".", group2)
diffcol <- paste("diffmean", group1.col, group2.col,sep = ".")
pcol <- paste("p.value.adj", group1.col, group2.col,sep = ".")
if(!(pcol %in% colnames(data) & diffcol %in% colnames(data) )) {
createAlert(session, "volcanomessage", "volcanoAlert", title = "Error", style = "success",
content = "We couldn't find the right columns in the data", append = FALSE)
}
if(isolate({input$volcanoNames})) names <- data$probeID
label <- c("Not Significant",
"Hypermethylated",
"Hypomethylated")
label[2:3] <- paste(label[2:3], "in", group2)
# Update data into a file
statuscol <- paste("status",group1.col,group2.col,sep = ".")
statuscol2 <- paste("status",group2.col,group1.col,sep = ".")
data[,statuscol] <- "Not Significant"
data[,statuscol2] <- "Not Significant"
# get significant data
sig <- data[,pcol] < y.cut
sig[is.na(sig)] <- FALSE
# hypermethylated samples compared to old state
hyper <- data[,diffcol] > x.cut
hyper[is.na(hyper)] <- FALSE
# hypomethylated samples compared to old state
hypo <- data[,diffcol] < (-x.cut)
hypo[is.na(hypo)] <- FALSE
if (any(hyper & sig)) data[hyper & sig,statuscol] <- paste("Hypermethylated","in", group2)
if (any(hyper & sig)) data[hyper & sig,statuscol2] <- paste("Hypomethylated","in", group1)
if (any(hypo & sig)) data[hypo & sig,statuscol] <- paste("Hypomethylated","in", group2)
if (any(hypo & sig)) data[hypo & sig,statuscol2] <- paste("Hypermethylated","in", group1)
insig.count <- nrow(data) - table(sig)["TRUE"]
up.count <- table(hyper & sig)["TRUE"]
down.count <- table(hypo & sig)["TRUE"]
rownames(data) <- data$probeID
if(isolate({input$volcanoSave})){
getPath <- parseDirPath(get.volumes(isolate({input$workingDir})), input$workingDir)
if (length(getPath) == 0) getPath <- paste0(Sys.getenv("HOME"),"/TCGAbiolinksGUI")
csv <- paste0(paste("DMR_results",
gsub("_",".",groupCol),
group1.col,
group2.col,
"pcut",y.cut,
"meancut",x.cut,
sep = "_"),
".csv")
write_csv(data,path = file.path(getPath, csv))
createAlert(session, "volcanomessage", "volcanoAlert", title = "File created", style = "success",
content = paste0(file.path(getPath, csv)), append = FALSE)
}
withProgress(message = 'Creating plot',
detail = 'This may take a while...', value = 0, {
p <- TCGAVisualize_volcano(x = data[,diffcol],
y = data[,pcol],
ylab = expression(paste(-Log[10],
" (FDR corrected -P values)")),
xlab = expression(paste(
"DNA Methylation difference (",beta,"-values)")
),
color = c(isolate({input$colinsignificant}),
isolate({input$colHypermethylated}),
isolate({input$colHypomethylated})),
title = paste("Volcano plot", "(", group2, "vs", group1,")"),
legend= "Legend",
label = label,
names = names,
names.fill = names.fill,
x.cut = x.cut,
y.cut = y.cut,
show.names = isolate({input$volcanoShowHighlitgh}),
highlight=isolate({input$volcanoHighlight}),
highlight.color = isolate({input$volcanoColHighlight}),
filename = NULL)
})
} else {
label <- c("Not Significant",
"Upregulated",
"Downregulated")
label[2:3] <- paste(label[2:3], "in", group2)
if(isolate({input$volcanoNames})) names <- as.character(data$Gene_symbol)
data$status <- "Insignificant"
data[data$logFC >= x.cut & data$FDR <= y.cut,"status"] <- paste0("Upregulated in ", group2)
data[data$logFC <= -x.cut & data$FDR <= y.cut,"status"] <- paste0("Downregulated in ", group2)
up.count <- table(data$logFC >= x.cut & data$FDR <= y.cut)["TRUE"]
if(is.na(up.count)) up.count <- 0
down.count <- table(data$logFC <= -x.cut & data$FDR <= y.cut)["TRUE"]
if(is.na(down.count)) down.count <- 0
insig.count <- nrow(data) - down.count - up.count
# Update data into a file
if(isolate({input$volcanoSave})){
getPath <- parseDirPath(get.volumes(isolate({input$workingDir})), input$workingDir)
if (length(getPath) == 0) getPath <- paste0(Sys.getenv("HOME"),"/TCGAbiolinksGUI")
out.filename <- paste0(paste("DEA_results",
gsub("_",".",groupCol),
gsub("_",".",group1),
gsub("_",".",group2),
"pcut", y.cut,
"logFC.cut",x.cut,
sep="_"),
".csv")
out.filename <- file.path(getPath,out.filename)
write_csv(data, path = out.filename)
createAlert(session, "volcanomessage", "volcanoAlert", title = "File created", style = "success",
content = paste0(out.filename), append = FALSE)
}
withProgress(message = 'Creating plot',
detail = 'This may take a while...', value = 0, {
p <- TCGAVisualize_volcano(x = data$logFC,
y = data$FDR,
ylab = expression(paste(-Log[10],
" (FDR corrected -P values)")),
xlab = " Gene expression fold change (Log2)",
color = c(isolate({input$colinsignificant}),
isolate({input$colUpregulated}),
isolate({input$colDownregulated})),
title = paste("Volcano plot", "(", group2, "vs", group1,")"),
legend= "Legend",
label = label,
names = names,
x.cut = x.cut,
y.cut = y.cut,
show.names = isolate({input$volcanoShowHighlitgh}),
highlight=isolate({input$volcanoHighlight}),
highlight.color = isolate({input$volcanoColHighlight}),
filename = NULL)
})
}
}
ret <- list(plot = p, up = up.count, down = down.count, insig =insig.count, label = label)
})
observeEvent(input$volcanoPlotBt , {
output$volcano.plot <- renderPlot({
ret <- isolate({volcano.values()})
if(is.null(ret)) return(NULL)
ret$plot
})
})
observeEvent(input$volcanoPlotBt , {
updateCollapse(session, "collapseVolcano", open = "Volcano plot")
output$volcanoPlot <- renderUI({
plotOutput("volcano.plot", width = paste0(isolate({input$volcanowidth}), "%"), height = isolate({input$volcanoheight}))
})})
|
/inst/app/server/volcano.R
|
no_license
|
inambioinfo/TCGAbiolinksGUI
|
R
| false
| false
| 16,626
|
r
|
##----------------------------------------------------------------------
# Volcano plot
##----------------------------------------------------------------------
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# Handling visibility
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
shinyjs::hide("volcanoNamesFill")
observeEvent(input$volcanoNames, {
if(input$volcanoNames){
shinyjs::show("volcanoNamesFill")
} else {
shinyjs::hide("volcanoNamesFill")
}
})
observeEvent(input$volcanoInputRb, {
if(input$volcanoInputRb == "met"){
shinyjs::show("colHypomethylated")
shinyjs::show("colHypermethylated")
shinyjs::hide("colUpregulated")
shinyjs::hide("colDownregulated")
shinyjs::show("volcanoxcutMet")
shinyjs::hide("volcanoxcutExp")
} else if(input$volcanoInputRb == "exp"){
shinyjs::hide("colHypomethylated")
shinyjs::hide("colHypermethylated")
shinyjs::show("colUpregulated")
shinyjs::show("colDownregulated")
shinyjs::show("volcanoxcutExp")
shinyjs::hide("volcanoxcutMet")
}
})
# If the user wants to hilght as probe or gene
observe({
if(!is.null(input$volcanoHighlight)){
updateCheckboxInput(session, "volcanoNames", value = TRUE)
updateCheckboxInput(session, "volcanoNamesFill", value = TRUE)
updateSelectizeInput(session, 'volcanoShowHighlitgh', selected = "highlighted")
} else {
updateSelectizeInput(session, 'volcanoShowHighlitgh', selected = "significant")
}
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# File selection
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
observe({
shinyFileChoose(input, 'volcanofile', roots=get.volumes(input$workingDir),
session=session, restrictions=system.file(package='base'),
filetypes=c('excel', 'csv'))
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# DATA INPUT
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
volcanodata <- reactive({
inFile <- input$volcanofile
if (is.null(inFile)) return(NULL)
file <- as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), inFile)$datapath)
# verify if the file is a csv
ext <- tools::file_ext(file)
if(ext != "csv"){
createAlert(session, "dmrmessage", "dmrAlert", title = "Data input error", style = "danger",
content = paste0("Sorry, but I'm expecting a csv file, but I got a: ",
ext), append = FALSE)
return(NULL)
}
withProgress(message = 'Loading data',
detail = 'This may take a while...', value = 0, {
df <- as.data.frame(read_csv(file)); df$X1 <- NULL
incProgress(1, detail = "Completed")
})
return(df)
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# UPDATING FIELDS AFTER DATA INPUT
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
observeEvent(input$volcanofile, {
file <- basename(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$volcanofile)$datapath))
if(length(file) > 0){
file <- unlist(str_split(file,"_"))
group1 <- file[4]
group2 <- file[5]
pcut <- file[7]
meancut <- gsub(".csv","",file[9])
updateNumericInput(session, "volcanoxcutMet", value = meancut)
updateNumericInput(session, "volcanoxcutExp", value = meancut)
updateNumericInput(session, "volcanoycut", value = pcut)
}
})
# automatically change the type based in the input
observe({
if(!is.null(input$volcanofile)){
file <- basename(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$volcanofile)$datapath))
selected <- "met"
if(grepl("DEA",file)) selected <- "exp"
updateRadioButtons(session, "volcanoInputRb", selected = selected)
}
})
# Update choises to select to highlight
observe({
data <- volcanodata()
if(!is.null(data)) {
file <- basename(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})),
input$volcanofile)$datapath))
if(grepl("DEA",file)){
if("Gene_symbol" %in% colnames(data)){
updateSelectizeInput(session, 'volcanoHighlight',
choices = as.character(na.omit(unique(data$Gene_symbol))), server = TRUE)
}
} else {
updateSelectizeInput(session, 'volcanoHighlight',
choices = as.character(na.omit(unique(data$probeID))), server = TRUE)
}
}
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# STATUS BOXES
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
observeEvent(input$volcanoPlotBt , {
output$volcanoBoxUp <- renderValueBox({
ret <- isolate({volcano.values()})
if(is.null(ret)) {
value <- 0
} else {
value <- ret$up
}
valueBox(
value = value,
subtitle = ret$label[2],
icon = icon("arrow-up"),
color = "red"
)
})
})
observeEvent(input$volcanoPlotBt , {
output$volcanoBoxInsig <- renderValueBox({
ret <- isolate({volcano.values()})
if(is.null(ret)) {
value <- 0
} else {
value <- ret$insig
}
valueBox(
value = value,
ret$label[1],
icon = icon("minus"),
color = "black"
)
})
})
observeEvent(input$volcanoPlotBt , {
output$volcanoBoxDown <- renderValueBox({
ret <- isolate({volcano.values()})
if(is.null(ret)) {
value <- 0
} else {
value <- ret$down
}
valueBox(
value = value,
ret$label[3],
icon = icon("arrow-down"),
color = "olive"
)
})
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# PLOT
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
volcano.values <- reactive({
if(input$volcanoPlotBt){
closeAlert(session, "volcanoAlert")
# read csv file with results
data <- volcanodata()
if(is.null(data)) return(NULL)
names.fill <- isolate({input$volcanoNamesFill})
if(isolate({input$volcanoInputRb})=="met") {
x.cut <- isolate({as.numeric(input$volcanoxcutMet)})
} else {
x.cut <- isolate({as.numeric(input$volcanoxcutExp)})
}
y.cut <- isolate({as.numeric(input$volcanoycut)})
# Set parameters based in the filename
# patterns are
# DEA_result_groupCol_group1_group2_pcut_0.05_logFC.cut_0.csv
# DMR_results_groupCol_group1_group2_pcut_0.05_meancut_0.3.csv
file <- basename(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$volcanofile)$datapath))
file <- unlist(str_split(file,"_"))
groupCol <- file[3]
group1 <- file[4]
group2 <- file[5]
names <- NULL
# methylation pipeline
if(isolate({input$volcanoInputRb})=="met"){
group1.col <- gsub("[[:punct:]]| ", ".", group1)
group2.col <- gsub("[[:punct:]]| ", ".", group2)
diffcol <- paste("diffmean", group1.col, group2.col,sep = ".")
pcol <- paste("p.value.adj", group1.col, group2.col,sep = ".")
if(!(pcol %in% colnames(data) & diffcol %in% colnames(data) )) {
createAlert(session, "volcanomessage", "volcanoAlert", title = "Error", style = "success",
content = "We couldn't find the right columns in the data", append = FALSE)
}
if(isolate({input$volcanoNames})) names <- data$probeID
label <- c("Not Significant",
"Hypermethylated",
"Hypomethylated")
label[2:3] <- paste(label[2:3], "in", group2)
# Update data into a file
statuscol <- paste("status",group1.col,group2.col,sep = ".")
statuscol2 <- paste("status",group2.col,group1.col,sep = ".")
data[,statuscol] <- "Not Significant"
data[,statuscol2] <- "Not Significant"
# get significant data
sig <- data[,pcol] < y.cut
sig[is.na(sig)] <- FALSE
# hypermethylated samples compared to old state
hyper <- data[,diffcol] > x.cut
hyper[is.na(hyper)] <- FALSE
# hypomethylated samples compared to old state
hypo <- data[,diffcol] < (-x.cut)
hypo[is.na(hypo)] <- FALSE
if (any(hyper & sig)) data[hyper & sig,statuscol] <- paste("Hypermethylated","in", group2)
if (any(hyper & sig)) data[hyper & sig,statuscol2] <- paste("Hypomethylated","in", group1)
if (any(hypo & sig)) data[hypo & sig,statuscol] <- paste("Hypomethylated","in", group2)
if (any(hypo & sig)) data[hypo & sig,statuscol2] <- paste("Hypermethylated","in", group1)
insig.count <- nrow(data) - table(sig)["TRUE"]
up.count <- table(hyper & sig)["TRUE"]
down.count <- table(hypo & sig)["TRUE"]
rownames(data) <- data$probeID
if(isolate({input$volcanoSave})){
getPath <- parseDirPath(get.volumes(isolate({input$workingDir})), input$workingDir)
if (length(getPath) == 0) getPath <- paste0(Sys.getenv("HOME"),"/TCGAbiolinksGUI")
csv <- paste0(paste("DMR_results",
gsub("_",".",groupCol),
group1.col,
group2.col,
"pcut",y.cut,
"meancut",x.cut,
sep = "_"),
".csv")
write_csv(data,path = file.path(getPath, csv))
createAlert(session, "volcanomessage", "volcanoAlert", title = "File created", style = "success",
content = paste0(file.path(getPath, csv)), append = FALSE)
}
withProgress(message = 'Creating plot',
detail = 'This may take a while...', value = 0, {
p <- TCGAVisualize_volcano(x = data[,diffcol],
y = data[,pcol],
ylab = expression(paste(-Log[10],
" (FDR corrected -P values)")),
xlab = expression(paste(
"DNA Methylation difference (",beta,"-values)")
),
color = c(isolate({input$colinsignificant}),
isolate({input$colHypermethylated}),
isolate({input$colHypomethylated})),
title = paste("Volcano plot", "(", group2, "vs", group1,")"),
legend= "Legend",
label = label,
names = names,
names.fill = names.fill,
x.cut = x.cut,
y.cut = y.cut,
show.names = isolate({input$volcanoShowHighlitgh}),
highlight=isolate({input$volcanoHighlight}),
highlight.color = isolate({input$volcanoColHighlight}),
filename = NULL)
})
} else {
label <- c("Not Significant",
"Upregulated",
"Downregulated")
label[2:3] <- paste(label[2:3], "in", group2)
if(isolate({input$volcanoNames})) names <- as.character(data$Gene_symbol)
data$status <- "Insignificant"
data[data$logFC >= x.cut & data$FDR <= y.cut,"status"] <- paste0("Upregulated in ", group2)
data[data$logFC <= -x.cut & data$FDR <= y.cut,"status"] <- paste0("Downregulated in ", group2)
up.count <- table(data$logFC >= x.cut & data$FDR <= y.cut)["TRUE"]
if(is.na(up.count)) up.count <- 0
down.count <- table(data$logFC <= -x.cut & data$FDR <= y.cut)["TRUE"]
if(is.na(down.count)) down.count <- 0
insig.count <- nrow(data) - down.count - up.count
# Update data into a file
if(isolate({input$volcanoSave})){
getPath <- parseDirPath(get.volumes(isolate({input$workingDir})), input$workingDir)
if (length(getPath) == 0) getPath <- paste0(Sys.getenv("HOME"),"/TCGAbiolinksGUI")
out.filename <- paste0(paste("DEA_results",
gsub("_",".",groupCol),
gsub("_",".",group1),
gsub("_",".",group2),
"pcut", y.cut,
"logFC.cut",x.cut,
sep="_"),
".csv")
out.filename <- file.path(getPath,out.filename)
write_csv(data, path = out.filename)
createAlert(session, "volcanomessage", "volcanoAlert", title = "File created", style = "success",
content = paste0(out.filename), append = FALSE)
}
withProgress(message = 'Creating plot',
detail = 'This may take a while...', value = 0, {
p <- TCGAVisualize_volcano(x = data$logFC,
y = data$FDR,
ylab = expression(paste(-Log[10],
" (FDR corrected -P values)")),
xlab = " Gene expression fold change (Log2)",
color = c(isolate({input$colinsignificant}),
isolate({input$colUpregulated}),
isolate({input$colDownregulated})),
title = paste("Volcano plot", "(", group2, "vs", group1,")"),
legend= "Legend",
label = label,
names = names,
x.cut = x.cut,
y.cut = y.cut,
show.names = isolate({input$volcanoShowHighlitgh}),
highlight=isolate({input$volcanoHighlight}),
highlight.color = isolate({input$volcanoColHighlight}),
filename = NULL)
})
}
}
ret <- list(plot = p, up = up.count, down = down.count, insig =insig.count, label = label)
})
observeEvent(input$volcanoPlotBt , {
output$volcano.plot <- renderPlot({
ret <- isolate({volcano.values()})
if(is.null(ret)) return(NULL)
ret$plot
})
})
observeEvent(input$volcanoPlotBt , {
updateCollapse(session, "collapseVolcano", open = "Volcano plot")
output$volcanoPlot <- renderUI({
plotOutput("volcano.plot", width = paste0(isolate({input$volcanowidth}), "%"), height = isolate({input$volcanoheight}))
})})
|
shinyServer(function(input, output, session) {
dt <- reactive({
dt <- readRDS("table.rds")
})
dt_filter <- reactive({
dt <- dt()
if(!is.null(input$placeholder)) {
dt <- dt %>%
}
}
dt
})
output$selected_grp <- renderUI({
grp_choice <- dt()$var_name %>% unique
selectizeInput("selected_grp",
"Choose Group",
selected = c("Preselected"),
choices = grp_choice,
multiple = TRUE
)
})
output$summarized_table <- DT::renderDataTable({
tmp <- dt_filter() %>%
(function(x){
group_by_(x, .dots = lapply(input$selected_groupbyvar, as.symbol)) %>%
summarize(
})
session$onSessionEnded(function() {
stopApp(NULL)
})
})
|
/server.R
|
no_license
|
octonana/personal_wip2
|
R
| false
| false
| 861
|
r
|
shinyServer(function(input, output, session) {
dt <- reactive({
dt <- readRDS("table.rds")
})
dt_filter <- reactive({
dt <- dt()
if(!is.null(input$placeholder)) {
dt <- dt %>%
}
}
dt
})
output$selected_grp <- renderUI({
grp_choice <- dt()$var_name %>% unique
selectizeInput("selected_grp",
"Choose Group",
selected = c("Preselected"),
choices = grp_choice,
multiple = TRUE
)
})
output$summarized_table <- DT::renderDataTable({
tmp <- dt_filter() %>%
(function(x){
group_by_(x, .dots = lapply(input$selected_groupbyvar, as.symbol)) %>%
summarize(
})
session$onSessionEnded(function() {
stopApp(NULL)
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export_results.R
\name{export_results}
\alias{export_results}
\title{Export experiment results to a dataframe or file.}
\usage{
export_results(experiment, strategies = NULL, tracks = "all", file = NULL)
}
\arguments{
\item{experiment}{An \code{rtrack_experiment} object from
\code{\link{read_experiment}}.}
\item{strategies}{An optional \code{rtrack_strategies} object generated by
\code{\link{call_strategy}}. If present, the strategies corresponding to
the tracks in the experiment (after subsetting using the \code{tracks}
parameter) will be extracted and returned in the results.}
\item{tracks}{Which tracks should be exported. Default, "all", exports the
entire experiment object. A subset of tracks can be specified using either
numeric indices or a vector of track IDs following usual R standards.}
\item{file}{The file to which the results will be written. If \code{NULL}
(the default), the data will be returned as a
\code{\link[base]{data.frame}}.}
}
\value{
A \code{data.frame} containing the experimental groups and factors
(as supplied in the original experiment description) together with the
summary metrics. This is returned invisibly if \code{file} is specified.
}
\description{
Binds experiment data together with analysis results and optionally writes
this to file.
}
\details{
If only the results matching a thresholded subset of strategies should be
exported, then this can be achieved by performing strategy calling and
thresholding separately and passing the \code{strategies$tracks} component of
the resulting \code{rtrack_strategies} object to this function as the
parameter \code{tracks}. This will restrict the output of
\code{export_results} to only the tracks where an above-threshold strategy
has been determined.
If the parameter \code{file} is supplied, the file extension will be used to
determine which format to save the file in. The formats ".csv", ".csv2" (see
\code{\link[utils]{write.table}} for details of the formats), ".tsv" (
tab-delimited text; can also be written as ".txt" or ".tab") and ".xlsx" are
supported. If the file extension is not in this list, the data will be
written as tab-delimited text with a warning. Note that the Excel ".xlsx"
format is supported, but the older ".xls" is not.
}
\examples{
require(Rtrack)
experiment.description <- system.file("extdata", "Minimal_experiment.xlsx",
package = "Rtrack")
experiment <- read_experiment(experiment.description, format = "excel",
project.dir = system.file("extdata", "", package = "Rtrack"))
# The code below returns a data.frame.
# Use the parameter 'file' to write to a file instead.
export_results(experiment)
}
\seealso{
\code{\link{call_strategy}}, \code{\link{threshold_strategies}}.
}
|
/man/export_results.Rd
|
no_license
|
rupertoverall/Rtrack
|
R
| false
| true
| 2,792
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export_results.R
\name{export_results}
\alias{export_results}
\title{Export experiment results to a dataframe or file.}
\usage{
export_results(experiment, strategies = NULL, tracks = "all", file = NULL)
}
\arguments{
\item{experiment}{An \code{rtrack_experiment} object from
\code{\link{read_experiment}}.}
\item{strategies}{An optional \code{rtrack_strategies} object generated by
\code{\link{call_strategy}}. If present, the strategies corresponding to
the tracks in the experiment (after subsetting using the \code{tracks}
parameter) will be extracted and returned in the results.}
\item{tracks}{Which tracks should be exported. Default, "all", exports the
entire experiment object. A subset of tracks can be specified using either
numeric indices or a vector of track IDs following usual R standards.}
\item{file}{The file to which the results will be written. If \code{NULL}
(the default), the data will be returned as a
\code{\link[base]{data.frame}}.}
}
\value{
A \code{data.frame} containing the experimental groups and factors
(as supplied in the original experiment description) together with the
summary metrics. This is returned invisibly if \code{file} is specified.
}
\description{
Binds experiment data together with analysis results and optionally writes
this to file.
}
\details{
If only the results matching a thresholded subset of strategies should be
exported, then this can be achieved by performing strategy calling and
thresholding separately and passing the \code{strategies$tracks} component of
the resulting \code{rtrack_strategies} object to this function as the
parameter \code{tracks}. This will restrict the output of
\code{export_results} to only the tracks where an above-threshold strategy
has been determined.
If the parameter \code{file} is supplied, the file extension will be used to
determine which format to save the file in. The formats ".csv", ".csv2" (see
\code{\link[utils]{write.table}} for details of the formats), ".tsv" (
tab-delimited text; can also be written as ".txt" or ".tab") and ".xlsx" are
supported. If the file extension is not in this list, the data will be
written as tab-delimited text with a warning. Note that the Excel ".xlsx"
format is supported, but the older ".xls" is not.
}
\examples{
require(Rtrack)
experiment.description <- system.file("extdata", "Minimal_experiment.xlsx",
package = "Rtrack")
experiment <- read_experiment(experiment.description, format = "excel",
project.dir = system.file("extdata", "", package = "Rtrack"))
# The code below returns a data.frame.
# Use the parameter 'file' to write to a file instead.
export_results(experiment)
}
\seealso{
\code{\link{call_strategy}}, \code{\link{threshold_strategies}}.
}
|
dataPath <- system.file("test_data", package="TPP")
# Load function input:
dat2 <- readRDS(file.path(dataPath, "panobinostat_2D_normResults2.rds")) # example input from an older experiment (12 rows)
# Start tests:
test_that("find_norm_fc_col", code={
# if both options are given, pick the column prefix for normalized fold changes:
new <- TPP:::obtain_fcStr_from_df_annotation(dat2)
expect_equal(new, "norm_rel_fc_protein_")
})
test_that("find_unmod_fc_col", code={
# if slot for normalized fcstr is NULL, pick column prefix for unmodified fold changes:
datIn <- dat2
attr(datIn, "importSettings")$fcStrNorm <- NULL # set field content to null
new <- TPP:::obtain_fcStr_from_df_annotation(datIn)
expect_equal(new, "rel_fc_protein_")
})
test_that("missing_attr_entry", code={
# if slot for normalized fcstr is NULL, pick column prefix for unmodified fold changes:
datIn <- dat2
attr(datIn, "importSettings")["fcStrNorm"] <- NULL # remove field completely
new <- TPP:::obtain_fcStr_from_df_annotation(datIn)
expect_equal(new, "rel_fc_protein_")
})
test_that("missing_unmod_str_but_valid_norm_str", code={
# if slot for normalized fcstr is NULL, pick column prefix for unmodified fold changes:
datIn <- dat2
attr(datIn, "importSettings")["fcStr"] <- NULL # remove field completely
new <- TPP:::obtain_fcStr_from_df_annotation(datIn)
expect_equal(new, "norm_rel_fc_protein_")
})
test_that("invalid_norm_FC_colname", code={
# specified fcStr prefix is not correct because no such column exists in table:
datIn <- dat2
attr(datIn, "importSettings")["fcStrNorm"] <- "nonsense" # this string does not exist in column names
expect_error(TPP:::obtain_fcStr_from_df_annotation(datIn))
})
test_that("invalid_unmod_FC_colname", code={
# specified fcStr prefix is not correct because no such column exists in table:
datIn <- dat2
attr(datIn, "importSettings")$fcStr <- "nonsense" # this string does not exist in column names
attr(datIn, "importSettings")["fcStrNorm"] <- NULL # remove field completely
expect_error(TPP:::obtain_fcStr_from_df_annotation(datIn))
})
test_that("invalid_unmod_FC_colname_but_valid_norm_FC_colname", code={
# specified fcStr prefix is not correct because no such column exists in table:
datIn <- dat2
attr(datIn, "importSettings")["fcStr"] <- NULL # remove field completely
new <- TPP:::obtain_fcStr_from_df_annotation(datIn)
expect_equal(new, "norm_rel_fc_protein_")
})
test_that("missing_setting_list", code={
# specified fcStr prefix is not correct because no such column exists in table:
datIn <- dat2
attr(datIn, "importSettings") <- NULL # remove field completely
expect_error(TPP:::obtain_fcStr_from_df_annotation(datIn))
})
|
/tests/testthat/test_obtain_fcStr_from_df_annotation.R
|
no_license
|
andreaschrader/TPP
|
R
| false
| false
| 2,727
|
r
|
dataPath <- system.file("test_data", package="TPP")
# Load function input:
dat2 <- readRDS(file.path(dataPath, "panobinostat_2D_normResults2.rds")) # example input from an older experiment (12 rows)
# Start tests:
test_that("find_norm_fc_col", code={
# if both options are given, pick the column prefix for normalized fold changes:
new <- TPP:::obtain_fcStr_from_df_annotation(dat2)
expect_equal(new, "norm_rel_fc_protein_")
})
test_that("find_unmod_fc_col", code={
# if slot for normalized fcstr is NULL, pick column prefix for unmodified fold changes:
datIn <- dat2
attr(datIn, "importSettings")$fcStrNorm <- NULL # set field content to null
new <- TPP:::obtain_fcStr_from_df_annotation(datIn)
expect_equal(new, "rel_fc_protein_")
})
test_that("missing_attr_entry", code={
# if slot for normalized fcstr is NULL, pick column prefix for unmodified fold changes:
datIn <- dat2
attr(datIn, "importSettings")["fcStrNorm"] <- NULL # remove field completely
new <- TPP:::obtain_fcStr_from_df_annotation(datIn)
expect_equal(new, "rel_fc_protein_")
})
test_that("missing_unmod_str_but_valid_norm_str", code={
# if slot for normalized fcstr is NULL, pick column prefix for unmodified fold changes:
datIn <- dat2
attr(datIn, "importSettings")["fcStr"] <- NULL # remove field completely
new <- TPP:::obtain_fcStr_from_df_annotation(datIn)
expect_equal(new, "norm_rel_fc_protein_")
})
test_that("invalid_norm_FC_colname", code={
# specified fcStr prefix is not correct because no such column exists in table:
datIn <- dat2
attr(datIn, "importSettings")["fcStrNorm"] <- "nonsense" # this string does not exist in column names
expect_error(TPP:::obtain_fcStr_from_df_annotation(datIn))
})
test_that("invalid_unmod_FC_colname", code={
# specified fcStr prefix is not correct because no such column exists in table:
datIn <- dat2
attr(datIn, "importSettings")$fcStr <- "nonsense" # this string does not exist in column names
attr(datIn, "importSettings")["fcStrNorm"] <- NULL # remove field completely
expect_error(TPP:::obtain_fcStr_from_df_annotation(datIn))
})
test_that("invalid_unmod_FC_colname_but_valid_norm_FC_colname", code={
# specified fcStr prefix is not correct because no such column exists in table:
datIn <- dat2
attr(datIn, "importSettings")["fcStr"] <- NULL # remove field completely
new <- TPP:::obtain_fcStr_from_df_annotation(datIn)
expect_equal(new, "norm_rel_fc_protein_")
})
test_that("missing_setting_list", code={
# specified fcStr prefix is not correct because no such column exists in table:
datIn <- dat2
attr(datIn, "importSettings") <- NULL # remove field completely
expect_error(TPP:::obtain_fcStr_from_df_annotation(datIn))
})
|
\name{ASMap-package}
\alias{ASMap-package}
\docType{package}
\title{
Additional functions for linkage map construction and manipulation of R/qtl
objects.
}
\description{
Additional functions for linkage map construction and manipulation of R/qtl
objects. This includes extremely fast linkage map clustering and marker
ordering using MSTmap (see Wu et al., 2008).
}
\details{
\tabular{ll}{
Package: \tab ASMap\cr
Type: \tab Package\cr
Version: \tab 1.0-4\cr
Date: \tab 2018-10-24\cr
License: \tab GPL 2\cr
}
Welcome to the ASMap package!
One of the fundamental reasons why this package exists was to utilize
and implement the source code for the the Minimum Spanning Tree
algorithm derived in Wu et al. (2008) (reference
below) for linkage map construction. The algorithm is lightning quick at
linkage group clustering and optimal marker ordering and can handle large numbers of
markers.
The package contains two very efficient functions, \code{mstmap.data.frame}
and \code{mstmap.cross}, that provide users with a highly flexible set
linkage map construction methods using the MSTmap
algorithm. \code{mstmap.data.frame} constructs a linkage map from
a data frame of genetic marker data and will use the entire
contents of the object to form linkage groups and optimally order
markers within each linkage group. \code{mstmap.cross} is a
linkage map construction function for \pkg{qtl} package objects and can
be used to construct linkage maps in a flexible number of ways.
See \code{?mstmap.cross} for complete details.
To complement the computationally efficient linkage map construction
functions, the package also contains functions \code{pullCross} and
\code{pushCross} that allow the pulling/pushing markers of different
types to and from the linkage map. This system gives users the ability
to initially pull markers aside that are not needed for immediate
construction and push them back later if required. There are also
functions for fast numerical and graphical diagnosis of unconstructed
and constructed linkage maps. Specifically, there is an improved
\code{heatMap} that graphically displays pairwise recombination
fractions and LOD scores with separate legends for
each. \code{profileGen} can be used to simultaneously profile multiple
statistics such as recombination counts and double recombination
counts for individual lines across the constructed linkage
map. \code{profileMark} allows simultaneous graphical visualization of
marker or interval statistics profiles across the genome or subsetted
for a predefined set of linkage groups. Graphical identification and
orientation of linkage groups using reference linkage maps can be conducted using
\code{alignCross}. All of these graphical functions utilize the power of
the advanced graphics package \pkg{lattice} to provide seamless multiple
displays.
Other miscellaneous utilities for \pkg{qtl} objects include
\itemize{
\item \code{mergeCross}: Merging of linkage groups
\item \code{breakCross}: Breaking of linkage groups
\item \code{combineMap}: Combining linkage maps
\item \code{quickEst}: Very quick estimation of genetic map distances
\item \code{genClones}: Reporting genotype clones
\item \code{fixClones}: Consensus genotypes for clonal groups
}
A comprehensive vignette showcasing the package is now available! It
contains detailed explanations of the functions in the package and how
they can be used to perform efficient map construction. There is a fully
worked example that involves pre-construction diagnostics, linkage map
construction and post construction diagnostics. This example also shows
how functions of the package can be used for post linkage map
construction techniques such as fine mapping and combining linkage maps.
The vignette has be succinctly summarised in the Journal of
Statistical Software publication Taylor and Butler (2017) referenced
below.
}
\author{
Julian Taylor, Dave Butler, Timothy Close, Yonghui Wu, Stefano Lonardi
Maintainer: Julian Taylor <julian.taylor@adelaide.edu.au>
}
\references{
Wu, Y., Bhat, P., Close, T.J, Lonardi, S. (2008) Efficient and Accurate
Construction of Genetic Linkage Maps from Minimum Spanning Tree of a
Graph. Plos Genetics, \bold{4}, Issue 10.
Taylor, J., Butler, D. (2017) R Package ASMap: Efficient Genetic
Linkage Map Construction and Diagnosis. Journal of Statistical Software,
\bold{79}(6), 1--29.
}
\keyword{package}
\seealso{
\code{\link[qtl]{qtl-package}}
}
|
/man/ASMap-package.Rd
|
no_license
|
cran/ASMap
|
R
| false
| false
| 4,532
|
rd
|
\name{ASMap-package}
\alias{ASMap-package}
\docType{package}
\title{
Additional functions for linkage map construction and manipulation of R/qtl
objects.
}
\description{
Additional functions for linkage map construction and manipulation of R/qtl
objects. This includes extremely fast linkage map clustering and marker
ordering using MSTmap (see Wu et al., 2008).
}
\details{
\tabular{ll}{
Package: \tab ASMap\cr
Type: \tab Package\cr
Version: \tab 1.0-4\cr
Date: \tab 2018-10-24\cr
License: \tab GPL 2\cr
}
Welcome to the ASMap package!
One of the fundamental reasons why this package exists was to utilize
and implement the source code for the the Minimum Spanning Tree
algorithm derived in Wu et al. (2008) (reference
below) for linkage map construction. The algorithm is lightning quick at
linkage group clustering and optimal marker ordering and can handle large numbers of
markers.
The package contains two very efficient functions, \code{mstmap.data.frame}
and \code{mstmap.cross}, that provide users with a highly flexible set
linkage map construction methods using the MSTmap
algorithm. \code{mstmap.data.frame} constructs a linkage map from
a data frame of genetic marker data and will use the entire
contents of the object to form linkage groups and optimally order
markers within each linkage group. \code{mstmap.cross} is a
linkage map construction function for \pkg{qtl} package objects and can
be used to construct linkage maps in a flexible number of ways.
See \code{?mstmap.cross} for complete details.
To complement the computationally efficient linkage map construction
functions, the package also contains functions \code{pullCross} and
\code{pushCross} that allow the pulling/pushing markers of different
types to and from the linkage map. This system gives users the ability
to initially pull markers aside that are not needed for immediate
construction and push them back later if required. There are also
functions for fast numerical and graphical diagnosis of unconstructed
and constructed linkage maps. Specifically, there is an improved
\code{heatMap} that graphically displays pairwise recombination
fractions and LOD scores with separate legends for
each. \code{profileGen} can be used to simultaneously profile multiple
statistics such as recombination counts and double recombination
counts for individual lines across the constructed linkage
map. \code{profileMark} allows simultaneous graphical visualization of
marker or interval statistics profiles across the genome or subsetted
for a predefined set of linkage groups. Graphical identification and
orientation of linkage groups using reference linkage maps can be conducted using
\code{alignCross}. All of these graphical functions utilize the power of
the advanced graphics package \pkg{lattice} to provide seamless multiple
displays.
Other miscellaneous utilities for \pkg{qtl} objects include
\itemize{
\item \code{mergeCross}: Merging of linkage groups
\item \code{breakCross}: Breaking of linkage groups
\item \code{combineMap}: Combining linkage maps
\item \code{quickEst}: Very quick estimation of genetic map distances
\item \code{genClones}: Reporting genotype clones
\item \code{fixClones}: Consensus genotypes for clonal groups
}
A comprehensive vignette showcasing the package is now available! It
contains detailed explanations of the functions in the package and how
they can be used to perform efficient map construction. There is a fully
worked example that involves pre-construction diagnostics, linkage map
construction and post construction diagnostics. This example also shows
how functions of the package can be used for post linkage map
construction techniques such as fine mapping and combining linkage maps.
The vignette has be succinctly summarised in the Journal of
Statistical Software publication Taylor and Butler (2017) referenced
below.
}
\author{
Julian Taylor, Dave Butler, Timothy Close, Yonghui Wu, Stefano Lonardi
Maintainer: Julian Taylor <julian.taylor@adelaide.edu.au>
}
\references{
Wu, Y., Bhat, P., Close, T.J, Lonardi, S. (2008) Efficient and Accurate
Construction of Genetic Linkage Maps from Minimum Spanning Tree of a
Graph. Plos Genetics, \bold{4}, Issue 10.
Taylor, J., Butler, D. (2017) R Package ASMap: Efficient Genetic
Linkage Map Construction and Diagnosis. Journal of Statistical Software,
\bold{79}(6), 1--29.
}
\keyword{package}
\seealso{
\code{\link[qtl]{qtl-package}}
}
|
#' Plot brain parcellations
#'
#' \code{ggseg} plots and returns a ggplot object of plotted
#' aparc areas.
#' @author Athanasia Mowinckel and Didac Pineiro
#'
#' @param data A data.frame to use for plot aesthetics. Must include a
#' column called "area" corresponding to aparc areas.
#'
#' @param atlas Either a string with the name of atlas to use,
#' or a data.frame containing atlas information (i.e. pre-loaded atlas).
#' @param plot.areas Character vector, plots only areas specified in the vector.
#' @param ... other options sent to ggplot2::geom_poly for plotting, including
#' mapping aes (cannot include x and y aethetics).
#' @param hemisphere String to choose hemisphere to plot. Any of c("left","right")[default].
#' @param view String to choose view of the data. Any of c("lateral","medial")[default].
#' @param position String choosing how to view the data. Either "dispersed"[default] or "stacked".
#' @param adapt.scales if \code{TRUE}, then the axes will
#' be hemisphere without ticks. If \code{FALSE}, then will be latitude
#' longitude values. Also affected by \code{position} argument
#'
#' @details
#' \describe{
#'
#' \item{`dkt`}{
#' The Desikan-Killiany Cortical Atlas [default], Freesurfer cortical segmentations.}
#'
#' \item{`yeo7`}{
#' Seven resting-state networks from Yeo et al. 2011, J. Neurophysiology}
#'
#' \item{`yeo17`}{
#' Seventeen resting-state networks from Yeo et al. 2011, J. Neurophysiology}
#'
#' \item{`aseg`}{
#' Freesurfer automatic subcortical segmentation of a brain volume}
#'
#' }
#'
#' @return a ggplot object
#'
#' @import ggplot2
#' @importFrom dplyr select group_by summarise_at vars funs mutate filter left_join "%>%"
#' @importFrom stats na.omit
#'
#' @examples
#' library(ggplot2)
#' ggseg()
#' ggseg(mapping=aes(fill=area))
#' ggseg(colour="black", size=.7, mapping=aes(fill=area)) + theme_void()
#' ggseg(atlas="yeo7")
#' ggseg(adapt.scales = FALSE, position = "stacked")
#' ggseg(adapt.scales = TRUE, position = "stacked")
#' ggseg(adapt.scales = TRUE)
#' ggseg(adapt.scales = FALSE)
#'
#' @seealso [ggplot()], [aes()], [geom_polygon()], [coord_fixed()] from the ggplot2 package
#'
#' @export
ggseg = function(data = NULL,atlas="dkt",
plot.areas=NULL,
position="dispersed",
view=c("lateral","medial","axial","sagittal"),
hemisphere = c("right","left"),
adapt.scales=TRUE,...){
geobrain = if(!is.character(atlas)){
atlas
}else{
get(atlas)
}
if(position=="stacked"){
if(any(!geobrain %>% dplyr::select(side) %>% unique %>% unlist() %in% c("medial","lateral"))){
stop("Cannot stack atlas. Check if atlas has medial and axial views.")
}
# Alter coordinates of the left side to stack ontop of right side
stack = geobrain %>%
dplyr::group_by(hemi,side) %>%
dplyr::summarise_at(dplyr::vars(long,lat),dplyr::funs(min,max))
stack$lat_max[1] = ifelse(stack$lat_max[1] < 4.5,
stack$lat_max[1]+.5,
stack$lat_max[1])
geobrain = geobrain %>%
# Move right side over to the left
dplyr::mutate(lat=ifelse(hemi %in% "right",
lat + (stack$lat_max[1]), lat)) %>%
# move right side on top of left, and swap the places of medial and lateral
dplyr::mutate(long=ifelse(hemi %in% "right" & side %in% "lateral" ,
long - stack$long_min[3], long),
long=ifelse(hemi %in% "right" & side %in% "medial" ,
long +(stack$long_min[2]-stack$long_min[4]), long)
)
} # If stacked
# Remove data we don't want to plot
geobrain = geobrain %>%
dplyr::filter(hemi %in% hemisphere, side %in% view)
# If data has been supplied, merge it
if(!is.null(data))
geobrain = suppressWarnings(suppressMessages(
geobrain %>%
dplyr::full_join(data, copy=TRUE)
))
# Filter data to single area if that is all you want.
if(!is.null(plot.areas)){
if(any(!plot.areas %in% geobrain$area)){
stop(paste("There is no", plot.areas,
"in", atlas,"data. Check spelling. Options are:",
paste0(geobrain$area %>% unique,collapse=", ")))
}
geobrain = geobrain %>% dplyr::filter(area %in% plot.areas)
}
# Create the plot
gg = ggplot2::ggplot(data = geobrain, ggplot2::aes(x=long, y=lat, group=group)) +
ggplot2::geom_polygon(...) +
ggplot2::coord_fixed()
# Scales may be adapted, for more convenient vieweing
if(adapt.scales){
if(position == "stacked"){
pos = list(
x=geobrain %>%
dplyr::group_by(hemi) %>%
dplyr::summarise(val=mean(lat)),
y=geobrain %>%
dplyr::group_by(side) %>%
dplyr::summarise(val=mean(long))
)
gg = gg +
ggplot2::scale_y_continuous(
breaks=pos$x$val,
labels=pos$x$hemi) +
ggplot2::scale_x_continuous(
breaks=pos$y$val,
labels=pos$y$side
) +
ggplot2::labs(x="side", y="hemisphere")
}else{
pos = geobrain %>%
dplyr::group_by(hemi) %>%
dplyr::summarise_at(dplyr::vars(long,lat),dplyr::funs(mean))
gg = gg +
ggplot2::scale_x_continuous(
breaks=pos$long,
labels=pos$hemi) +
ggplot2::scale_y_continuous(breaks=NULL)+
ggplot2::labs(y=NULL, x="hemisphere")
}
}
gg + theme_brain()
}
|
/R/ggseg.R
|
permissive
|
brennanpincardiff/ggseg
|
R
| false
| false
| 5,503
|
r
|
#' Plot brain parcellations
#'
#' \code{ggseg} plots and returns a ggplot object of plotted
#' aparc areas.
#' @author Athanasia Mowinckel and Didac Pineiro
#'
#' @param data A data.frame to use for plot aesthetics. Must include a
#' column called "area" corresponding to aparc areas.
#'
#' @param atlas Either a string with the name of atlas to use,
#' or a data.frame containing atlas information (i.e. pre-loaded atlas).
#' @param plot.areas Character vector, plots only areas specified in the vector.
#' @param ... other options sent to ggplot2::geom_poly for plotting, including
#' mapping aes (cannot include x and y aethetics).
#' @param hemisphere String to choose hemisphere to plot. Any of c("left","right")[default].
#' @param view String to choose view of the data. Any of c("lateral","medial")[default].
#' @param position String choosing how to view the data. Either "dispersed"[default] or "stacked".
#' @param adapt.scales if \code{TRUE}, then the axes will
#' be hemisphere without ticks. If \code{FALSE}, then will be latitude
#' longitude values. Also affected by \code{position} argument
#'
#' @details
#' \describe{
#'
#' \item{`dkt`}{
#' The Desikan-Killiany Cortical Atlas [default], Freesurfer cortical segmentations.}
#'
#' \item{`yeo7`}{
#' Seven resting-state networks from Yeo et al. 2011, J. Neurophysiology}
#'
#' \item{`yeo17`}{
#' Seventeen resting-state networks from Yeo et al. 2011, J. Neurophysiology}
#'
#' \item{`aseg`}{
#' Freesurfer automatic subcortical segmentation of a brain volume}
#'
#' }
#'
#' @return a ggplot object
#'
#' @import ggplot2
#' @importFrom dplyr select group_by summarise_at vars funs mutate filter left_join "%>%"
#' @importFrom stats na.omit
#'
#' @examples
#' library(ggplot2)
#' ggseg()
#' ggseg(mapping=aes(fill=area))
#' ggseg(colour="black", size=.7, mapping=aes(fill=area)) + theme_void()
#' ggseg(atlas="yeo7")
#' ggseg(adapt.scales = FALSE, position = "stacked")
#' ggseg(adapt.scales = TRUE, position = "stacked")
#' ggseg(adapt.scales = TRUE)
#' ggseg(adapt.scales = FALSE)
#'
#' @seealso [ggplot()], [aes()], [geom_polygon()], [coord_fixed()] from the ggplot2 package
#'
#' @export
ggseg = function(data = NULL,atlas="dkt",
plot.areas=NULL,
position="dispersed",
view=c("lateral","medial","axial","sagittal"),
hemisphere = c("right","left"),
adapt.scales=TRUE,...){
geobrain = if(!is.character(atlas)){
atlas
}else{
get(atlas)
}
if(position=="stacked"){
if(any(!geobrain %>% dplyr::select(side) %>% unique %>% unlist() %in% c("medial","lateral"))){
stop("Cannot stack atlas. Check if atlas has medial and axial views.")
}
# Alter coordinates of the left side to stack ontop of right side
stack = geobrain %>%
dplyr::group_by(hemi,side) %>%
dplyr::summarise_at(dplyr::vars(long,lat),dplyr::funs(min,max))
stack$lat_max[1] = ifelse(stack$lat_max[1] < 4.5,
stack$lat_max[1]+.5,
stack$lat_max[1])
geobrain = geobrain %>%
# Move right side over to the left
dplyr::mutate(lat=ifelse(hemi %in% "right",
lat + (stack$lat_max[1]), lat)) %>%
# move right side on top of left, and swap the places of medial and lateral
dplyr::mutate(long=ifelse(hemi %in% "right" & side %in% "lateral" ,
long - stack$long_min[3], long),
long=ifelse(hemi %in% "right" & side %in% "medial" ,
long +(stack$long_min[2]-stack$long_min[4]), long)
)
} # If stacked
# Remove data we don't want to plot
geobrain = geobrain %>%
dplyr::filter(hemi %in% hemisphere, side %in% view)
# If data has been supplied, merge it
if(!is.null(data))
geobrain = suppressWarnings(suppressMessages(
geobrain %>%
dplyr::full_join(data, copy=TRUE)
))
# Filter data to single area if that is all you want.
if(!is.null(plot.areas)){
if(any(!plot.areas %in% geobrain$area)){
stop(paste("There is no", plot.areas,
"in", atlas,"data. Check spelling. Options are:",
paste0(geobrain$area %>% unique,collapse=", ")))
}
geobrain = geobrain %>% dplyr::filter(area %in% plot.areas)
}
# Create the plot
gg = ggplot2::ggplot(data = geobrain, ggplot2::aes(x=long, y=lat, group=group)) +
ggplot2::geom_polygon(...) +
ggplot2::coord_fixed()
# Scales may be adapted, for more convenient vieweing
if(adapt.scales){
if(position == "stacked"){
pos = list(
x=geobrain %>%
dplyr::group_by(hemi) %>%
dplyr::summarise(val=mean(lat)),
y=geobrain %>%
dplyr::group_by(side) %>%
dplyr::summarise(val=mean(long))
)
gg = gg +
ggplot2::scale_y_continuous(
breaks=pos$x$val,
labels=pos$x$hemi) +
ggplot2::scale_x_continuous(
breaks=pos$y$val,
labels=pos$y$side
) +
ggplot2::labs(x="side", y="hemisphere")
}else{
pos = geobrain %>%
dplyr::group_by(hemi) %>%
dplyr::summarise_at(dplyr::vars(long,lat),dplyr::funs(mean))
gg = gg +
ggplot2::scale_x_continuous(
breaks=pos$long,
labels=pos$hemi) +
ggplot2::scale_y_continuous(breaks=NULL)+
ggplot2::labs(y=NULL, x="hemisphere")
}
}
gg + theme_brain()
}
|
# 3/9/2018 JHZ
setwd("u:/work")
mmp12 <- read.table("MMP12.dat",as.is=TRUE, col.names=c("MarkerName", "Allele1", "Allele2", "Effect", "StdErr", "logP"))
mmp12 <- within(mmp12, {phen <- "MMP12";P <- 10^logP;N <- 3400})
library(TwoSampleMR)
exposure_dat <- format_data(mmp12, type="exposure", snp_col = "MarkerName", effect_allele_col = "Allele1", other_allele_col = "Allele2",
eaf_col = "effect_allele_freq", beta_col = "Effect", se_col = "StdErr", pval_col = "P", samplesize_col = "N")
ao <- available_outcomes()
subset(ao,consortium=="CARDIoGRAMplusC4D")
outcome_dat <- extract_outcome_data(exposure_dat$SNP, 7, proxies = 1, rsq = 0.8, align_alleles = 1, palindromes = 1, maf_threshold =
0.3)
dat <- harmonise_data(exposure_dat, outcome_dat, action = 2)
res_mr <- mr(dat)
mr_heterogeneity(dat)
mr_pleiotropy_test(dat)
res_single <- mr_singlesnp(dat)
res_loo <- mr_leaveoneout(dat)
pdf("MMP12-MR.pdf")
mr_scatter_plot(res_mr, dat)
mr_forest_plot(res_single)
mr_leaveoneout_plot(res_loo)
mr_funnel_plot(res_single)
library(MendelianRandomization)
MRInputObject <- with(dat, mr_input(bx = beta.exposure, bxse = se.exposure, by = beta.outcome, byse = se.outcome,
exposure = "MMP-12", outcome = "Coronary heart disease", snps = SNP))
mr_ivw(MRInputObject, model = "default", robust = FALSE, penalized = FALSE, weights = "simple", distribution = "normal", alpha = 0.05)
mr_egger(MRInputObject, robust = FALSE, penalized = FALSE, distribution = "normal", alpha = 0.05)
mr_maxlik(MRInputObject, model = "default", distribution = "normal", alpha = 0.05)
mr_median(MRInputObject, weighting = "weighted", distribution = "normal", alpha = 0.05, iterations = 10000, seed = 314159265)
mr_allmethods(MRInputObject, method = "all")
mr_plot(MRInputObject, error = TRUE, orientate = FALSE, interactive = TRUE, labels = TRUE, line = "ivw")
dev.off()
|
/CAD/MMP12.R
|
no_license
|
jinghuazhao/Omics-analysis
|
R
| false
| false
| 1,900
|
r
|
# 3/9/2018 JHZ
setwd("u:/work")
mmp12 <- read.table("MMP12.dat",as.is=TRUE, col.names=c("MarkerName", "Allele1", "Allele2", "Effect", "StdErr", "logP"))
mmp12 <- within(mmp12, {phen <- "MMP12";P <- 10^logP;N <- 3400})
library(TwoSampleMR)
exposure_dat <- format_data(mmp12, type="exposure", snp_col = "MarkerName", effect_allele_col = "Allele1", other_allele_col = "Allele2",
eaf_col = "effect_allele_freq", beta_col = "Effect", se_col = "StdErr", pval_col = "P", samplesize_col = "N")
ao <- available_outcomes()
subset(ao,consortium=="CARDIoGRAMplusC4D")
outcome_dat <- extract_outcome_data(exposure_dat$SNP, 7, proxies = 1, rsq = 0.8, align_alleles = 1, palindromes = 1, maf_threshold =
0.3)
dat <- harmonise_data(exposure_dat, outcome_dat, action = 2)
res_mr <- mr(dat)
mr_heterogeneity(dat)
mr_pleiotropy_test(dat)
res_single <- mr_singlesnp(dat)
res_loo <- mr_leaveoneout(dat)
pdf("MMP12-MR.pdf")
mr_scatter_plot(res_mr, dat)
mr_forest_plot(res_single)
mr_leaveoneout_plot(res_loo)
mr_funnel_plot(res_single)
library(MendelianRandomization)
MRInputObject <- with(dat, mr_input(bx = beta.exposure, bxse = se.exposure, by = beta.outcome, byse = se.outcome,
exposure = "MMP-12", outcome = "Coronary heart disease", snps = SNP))
mr_ivw(MRInputObject, model = "default", robust = FALSE, penalized = FALSE, weights = "simple", distribution = "normal", alpha = 0.05)
mr_egger(MRInputObject, robust = FALSE, penalized = FALSE, distribution = "normal", alpha = 0.05)
mr_maxlik(MRInputObject, model = "default", distribution = "normal", alpha = 0.05)
mr_median(MRInputObject, weighting = "weighted", distribution = "normal", alpha = 0.05, iterations = 10000, seed = 314159265)
mr_allmethods(MRInputObject, method = "all")
mr_plot(MRInputObject, error = TRUE, orientate = FALSE, interactive = TRUE, labels = TRUE, line = "ivw")
dev.off()
|
library(forecast)
library(zoo)
Amtrak.data <- read.csv("Amtrak data.csv")
ridership.ts <- ts(Amtrak.data$Ridership, start = c(1991, 1), end = c(2004, 3), freq = 12)
# Figure 5-4
par(mfrow = c(2,2))
plot(ridership.ts, ylab = "Ridership", xlab = "Time", bty = "l", xlim = c(1991,2004.25), main = "Ridership")
plot(diff(ridership.ts, lag = 12), ylab = "Lag-12", xlab = "Time", bty = "l", xlim = c(1991,2004.25), main = "Lag-12 Difference")
plot(diff(ridership.ts, lag = 1), ylab = "Lag-1", xlab = "Time", bty = "l", xlim = c(1991,2004.25), main = "Lag-1 Difference")
plot(diff(diff(ridership.ts, lag = 12), lag = 1), ylab = "Lag-12, then Lag-1", xlab = "Time", bty = "l", xlim = c(1991,2004.25), main = "Twice-Differenced (Lag-12, Lag-1)")
dev.off()
ridership.deseasonalized <- diff(ridership.ts, lag = 12)
summary(tslm(ridership.deseasonalized ~ trend))
|
/samples/Amtrak Fig 5-4.R
|
permissive
|
bmoretz/Time-Series-Forecasting
|
R
| false
| false
| 855
|
r
|
library(forecast)
library(zoo)
Amtrak.data <- read.csv("Amtrak data.csv")
ridership.ts <- ts(Amtrak.data$Ridership, start = c(1991, 1), end = c(2004, 3), freq = 12)
# Figure 5-4
par(mfrow = c(2,2))
plot(ridership.ts, ylab = "Ridership", xlab = "Time", bty = "l", xlim = c(1991,2004.25), main = "Ridership")
plot(diff(ridership.ts, lag = 12), ylab = "Lag-12", xlab = "Time", bty = "l", xlim = c(1991,2004.25), main = "Lag-12 Difference")
plot(diff(ridership.ts, lag = 1), ylab = "Lag-1", xlab = "Time", bty = "l", xlim = c(1991,2004.25), main = "Lag-1 Difference")
plot(diff(diff(ridership.ts, lag = 12), lag = 1), ylab = "Lag-12, then Lag-1", xlab = "Time", bty = "l", xlim = c(1991,2004.25), main = "Twice-Differenced (Lag-12, Lag-1)")
dev.off()
ridership.deseasonalized <- diff(ridership.ts, lag = 12)
summary(tslm(ridership.deseasonalized ~ trend))
|
library(edgeR)
library(limma)
library(Glimma)
DE_results <- read.delim("Trans.raw.counts.matrix.sample1_vs_sample2.edgeR.DE_results", row.names=1, stringsAsFactors=FALSE, check.names=F)
DE_results <- DE_results[order(row.names(DE_results)),]
rawcounts <- read.table("Trans.raw.counts.matrix", header=T, row.names=1, com='', check.names=F)
rawcounts <- rawcounts[order(row.names(rawcounts)),]
samples <- read.delim("sample.txt", header=FALSE, stringsAsFactors=FALSE, com='#', check.names=F)
colnames(samples) <- c("Group","Sample")
#rawcounts <- rawcounts[,samples$Sample]
rawcounts[rawcounts ==0] <- 0.000000001
rnaseqMatrix = rawcounts
#rnaseqMatrix = round(rawcounts)
#rnaseqMatrix = rnaseqMatrix[rowSums(rnaseqMatrix)>=2,]
conditions = factor(samples$Group)
exp_study = DGEList(counts=rnaseqMatrix, group=conditions)
exp_study = calcNormFactors(exp_study)
exp_study$common.dispersion=0.1
exp_study = estimateCommonDisp(exp_study)
exp_study$common.dispersion=0.1
exp_study = estimateTagwiseDisp(exp_study)
cpm_table <- cpm(exp_study)
cpm_table <- as.data.frame(cpm_table)
cpm_table = cpm_table[rownames(DE_results),]
### Optional, Merge the DE_results and cpm_table
cpm.DE <- cbind(cpm_table, DE_results)
write.csv(cpm.DE, file="cpm.DE_results.csv")
### Set significance level for different colors on plot
DE_results$GeneID <- rownames(DE_results)
DE_results$logCPM <- DE_results$logCPM
DE_results$Sig <- as.numeric(DE_results$FDR < 0.05)
### Make the Glimma MA-plot
glMDPlot(DE_results, counts=cpm_table, samples=samples$Sample,
anno=cpm.DE, groups=samples$Group,
xval="logCPM", yval="logFC",
display.columns=colnames(DE_results),
folder="glimma_MA", html="MA-plot",
search.by="GeneID", status=DE_results$Sig, cols=c("black","red"), launch=FALSE)
### Make the Glimma Volcano plot
glXYPlot(x=DE_results$logFC, y=-log10(DE_results$FDR), counts=cpm_table, samples=samples$Sample,
anno=DE_results, groups=samples$Group,
xlab="log2(FC)", ylab="-log10(FDR)",
display.columns=colnames(DE_results),
folder="glimma_Volcano", html="Volcano-plot",
search.by="GeneID", status=DE_results$Sig, cols=c("black","red"), launch=FALSE)
|
/script_backup/Trinity/Rscript/glimma.R
|
no_license
|
kent5438/sharing-github
|
R
| false
| false
| 2,211
|
r
|
library(edgeR)
library(limma)
library(Glimma)
DE_results <- read.delim("Trans.raw.counts.matrix.sample1_vs_sample2.edgeR.DE_results", row.names=1, stringsAsFactors=FALSE, check.names=F)
DE_results <- DE_results[order(row.names(DE_results)),]
rawcounts <- read.table("Trans.raw.counts.matrix", header=T, row.names=1, com='', check.names=F)
rawcounts <- rawcounts[order(row.names(rawcounts)),]
samples <- read.delim("sample.txt", header=FALSE, stringsAsFactors=FALSE, com='#', check.names=F)
colnames(samples) <- c("Group","Sample")
#rawcounts <- rawcounts[,samples$Sample]
rawcounts[rawcounts ==0] <- 0.000000001
rnaseqMatrix = rawcounts
#rnaseqMatrix = round(rawcounts)
#rnaseqMatrix = rnaseqMatrix[rowSums(rnaseqMatrix)>=2,]
conditions = factor(samples$Group)
exp_study = DGEList(counts=rnaseqMatrix, group=conditions)
exp_study = calcNormFactors(exp_study)
exp_study$common.dispersion=0.1
exp_study = estimateCommonDisp(exp_study)
exp_study$common.dispersion=0.1
exp_study = estimateTagwiseDisp(exp_study)
cpm_table <- cpm(exp_study)
cpm_table <- as.data.frame(cpm_table)
cpm_table = cpm_table[rownames(DE_results),]
### Optional, Merge the DE_results and cpm_table
cpm.DE <- cbind(cpm_table, DE_results)
write.csv(cpm.DE, file="cpm.DE_results.csv")
### Set significance level for different colors on plot
DE_results$GeneID <- rownames(DE_results)
DE_results$logCPM <- DE_results$logCPM
DE_results$Sig <- as.numeric(DE_results$FDR < 0.05)
### Make the Glimma MA-plot
glMDPlot(DE_results, counts=cpm_table, samples=samples$Sample,
anno=cpm.DE, groups=samples$Group,
xval="logCPM", yval="logFC",
display.columns=colnames(DE_results),
folder="glimma_MA", html="MA-plot",
search.by="GeneID", status=DE_results$Sig, cols=c("black","red"), launch=FALSE)
### Make the Glimma Volcano plot
glXYPlot(x=DE_results$logFC, y=-log10(DE_results$FDR), counts=cpm_table, samples=samples$Sample,
anno=DE_results, groups=samples$Group,
xlab="log2(FC)", ylab="-log10(FDR)",
display.columns=colnames(DE_results),
folder="glimma_Volcano", html="Volcano-plot",
search.by="GeneID", status=DE_results$Sig, cols=c("black","red"), launch=FALSE)
|
\name{fread}
\alias{fread}
\title{ Fast and friendly file finagler }
\description{
Similar to \code{read.table} but faster and more convenient. All controls such as \code{sep}, \code{colClasses} and \code{nrows} are automatically detected. \code{bit64::integer64} types are also detected and read directly without needing to read as character before converting.
Dates are read as character currently. They can be converted afterwards using the excellent \code{fasttime} package or standard base functions.
`fread` is for \emph{regular} delimited files; i.e., where every row has the same number of columns. In future, secondary separator (\code{sep2}) may be specified \emph{within} each column. Such columns will be read as type \code{list} where each cell is itself a vector.
}
\usage{
fread(input, sep="auto", sep2="auto", nrows=-1L, header="auto", na.strings="NA",
stringsAsFactors=FALSE, verbose=getOption("datatable.verbose"), autostart=1L,
skip=0L, select=NULL, drop=NULL, colClasses=NULL,
integer64=getOption("datatable.integer64"), # default: "integer64"
dec=if (sep!=".") "." else ",",
check.names=FALSE, encoding="unknown", quote="\"",
showProgress=getOption("datatable.showProgress"), # default: TRUE
data.table=getOption("datatable.fread.datatable") # default: TRUE
)
}
\arguments{
\item{input}{ Either the file name to read (containing no \\n character), a shell command that preprocesses the file (e.g. \code{fread("grep blah filename"))} or the input itself as a string (containing at least one \\n), see examples. In both cases, a length 1 character string. A filename input is passed through \code{\link[base]{path.expand}} for convenience and may be a URL starting http:// or file://. }
\item{sep}{ The separator between columns. Defaults to the first character in the set [\code{,\\t |;:}] that exists on line \code{autostart} outside quoted (\code{""}) regions, and separates the rows above \code{autostart} into a consistent number of fields, too. }
\item{sep2}{ The separator \emph{within} columns. A \code{list} column will be returned where each cell is a vector of values. This is much faster using less working memory than \code{strsplit} afterwards or similar techniques. For each column \code{sep2} can be different and is the first character in the same set above [\code{,\\t |;:}], other than \code{sep}, that exists inside each field outside quoted regions on line \code{autostart}. NB: \code{sep2} is not yet implemented. }
\item{nrows}{ The number of rows to read, by default -1 means all. Unlike \code{read.table}, it doesn't help speed to set this to the number of rows in the file (or an estimate), since the number of rows is automatically determined and is already fast. Only set \code{nrows} if you require the first 10 rows, for example. `nrows=0` is a special case that just returns the column names and types; e.g., a dry run for a large file or to quickly check format consistency of a set of files before starting to read any. }
\item{header}{ Does the first data line contain column names? Defaults according to whether every non-empty field on the first data line is type character. If so, or TRUE is supplied, any empty column names are given a default name. }
\item{na.strings}{ A character vector of strings which are to be interpreted as \code{NA} values. By default \code{",,"} for columns read as type character is read as a blank string (\code{""}) and \code{",NA,"} is read as \code{NA}. Typical alternatives might be \code{na.strings=NULL} (no coercion to NA at all!) or perhaps \code{na.strings=c("NA","N/A","null")}. }
\item{stringsAsFactors}{ Convert all character columns to factors? }
\item{verbose}{ Be chatty and report timings? }
\item{autostart}{ Any line number within the region of machine readable delimited text, by default 30. If the file is shorter or this line is empty (e.g. short files with trailing blank lines) then the last non empty line (with a non empty line above that) is used. This line and the lines above it are used to auto detect \code{sep}, \code{sep2} and the number of fields. It's extremely unlikely that \code{autostart} should ever need to be changed, we hope. }
\item{skip}{ If -1 (default) use the procedure described below starting on line \code{autostart} to find the first data row. \code{skip>=0} means ignore \code{autostart} and take line \code{skip+1} as the first data row (or column names according to header="auto"|TRUE|FALSE as usual). \code{skip="string"} searches for \code{"string"} in the file (e.g. a substring of the column names row) and starts on that line (inspired by read.xls in package gdata). }
\item{select}{ Vector of column names or numbers to keep, drop the rest. }
\item{drop}{ Vector of column names or numbers to drop, keep the rest. }
\item{colClasses}{ A character vector of classes (named or unnamed), as read.csv. Or a named list of vectors of column names or numbers, see examples. colClasses in fread is intended for rare overrides, not for routine use. fread will only promote a column to a higher type if colClasses requests it. It won't downgrade a column to a lower type since NAs would result. You have to coerce such columns afterwards yourself, if you really require data loss. }
\item{integer64}{ "integer64" (default) reads columns detected as containing integers larger than 2^31 as type \code{bit64::integer64}. Alternatively, \code{"double"|"numeric"} reads as \code{base::read.csv} does; i.e., possibly with loss of precision and if so silently. Or, "character". }
\item{dec}{ The decimal separator as in \code{base::read.csv}. If not "." (default) then usually ",". See details. }
\item{check.names}{ default is \code{FALSE}. If \code{TRUE}, it uses the base function \code{\link{make.unique}} to ensure that column names are all unique.}
\item{quote}{single character value, e.g., \code{"\""} (default) or empty string (\code{""}).
\emph{Single character value:} \code{character} columns can be quoted by the character specified in \code{quote}, e.g., \code{...,2,"Joe Bloggs",3.14,...} or not quoted, e.g., \code{...,2,Joe Bloggs,3.14,...}.
Spaces and other whitepace (other than \code{sep} and \code{\\n}) may appear in an unquoted character field. In essence quoting character fields are \emph{required} only if \code{sep} or \code{\\n} appears in the string value. Quoting may be used to signify that numeric data should be read as text. A quoted field must start with quote and end with a quote that is also immediately followed by \code{sep} or \code{\\n}. Thus, unescaped quotes may be present in a quoted field, e.g., \code{...,2,"Joe, "Bloggs"",3.14,...}, as well as escaped quotes, e.g., \code{...,2,"Joe \",Bloggs\"",3.14,...}. If an embedded quote is followed by the separator inside a quoted field, the embedded quotes up to that point in that field must be balanced; e.g. \code{...,2,"www.blah?x="one",y="two"",3.14,...}.
\emph{Empty string:} To disable quoting set \code{quote=""}. This is particularly useful in handling rare cases such as malformed quotes in columns, e.g., \code{...,2.3,""qq,...} or where a single double quote has a meaning, e.g., \code{...,2.3,"at the 5\" end of the gene",...} etc. \code{sep} or \code{\\n} are not escaped within quotes and would therefore could lead to undesirable behaviour or \code{warnings/errors} if present.
}
\item{encoding}{ default is \code{"unknown"}. Other possible options are \code{"UTF-8"} and \code{"Latin-1"}. }
\item{showProgress}{ TRUE displays progress on the console using \code{\\r}. It is produced in fread's C code where the very nice (but R level) txtProgressBar and tkProgressBar are not easily available. }
\item{data.table}{ TRUE returns a \code{data.table}. FALSE returns a \code{data.frame}. }
}
\details{
Once the separator is found on line \code{autostart}, the number of columns is determined. Then the file is searched backwards from \code{autostart} until a row is found that doesn't have that number of columns. Thus, the first data row is found and any human readable banners are automatically skipped. This feature can be particularly useful for loading a set of files which may not all have consistently sized banners. Setting \code{skip>0} overrides this feature by setting \code{autostart=skip+1} and turning off the search upwards step.
The first 5 rows, middle 5 rows and last 5 rows are then read to determine column types. The lowest type for each column is chosen from the ordered list \code{integer}, \code{integer64}, \code{double}, \code{character}. This enables \code{fread} to allocate exactly the right number of rows, with columns of the right type, up front once. The file may of course \emph{still} contain data of a different type in rows other than first, middle and last 5. In that case, the column types are bumped mid read and the data read on previous rows is coerced. Setting \code{verbose=TRUE} reports the line and field number of each mid read type bump, and how long this type bumping took (if any).
There is no line length limit, not even a very large one. Since we are encouraging \code{list} columns (i.e. \code{sep2}) this has the potential to encourage longer line lengths. So the approach of scanning each line into a buffer first and then rescanning that buffer is not used. There are no buffers used in \code{fread}'s C code at all. The field width limit is limited by R itself: the maximum width of a character string (currenly 2^31-1 bytes, 2GB).
The filename extension (such as .csv) is irrelevant for "auto" \code{sep} and \code{sep2}. Separator detection is entirely driven by the file contents. This can be useful when loading a set of different files which may not be named consistently, or may not have the extension .csv despite being csv. Some datasets have been collected over many years, one file per day for example. Sometimes the file name format has changed at some point in the past or even the format of the file itself. So the idea is that you can loop \code{fread} through a set of files and as long as each file is regular and delimited, \code{fread} can read them all. Whether they all stack is another matter but at least each one is read quickly without you needing to vary \code{colClasses} in \code{read.table} or \code{read.csv}.
All known line endings are detected automatically: \code{\\n} (*NIX including Mac), \code{\\r\\n} (Windows CRLF), \code{\\r} (old Mac) and \code{\\n\\r} (just in case). There is no need to convert input files first. \code{fread} running on any architecture will read a file from any architecture. Both \code{\\r} and \code{\\n} may be embedded in character strings (including column names) provided the field is quoted.
If an empty line is encountered then reading stops there, with warning if any text exists after the empty line such as a footer. The first line of any text discarded is included in the warning message.
On Windows, \code{fread(...,dec=",")} should just work. \code{fread} uses C function \code{strtod} to read numeric data; e.g., \code{1.23} or \code{1,23}. \code{strtod} retrieves the decimal separator (\code{.} or \code{,} usually) from the locale of the R session rather than as an argument passed to the \code{strtod} function. So for \code{fread(...,dec=",")} to work, \code{fread} changes this (and only this) R session's locale temporarily to a locale which provides the desired decimal separator. On Windows, "French_France.1252" is tried which should be available as standard (any locale with comma decimal separator would suffice) and on unix "fr_FR.utf8" (you may need to install this locale on unix). \code{fread()} is very careful to set the locale back again afterwards, even if the function fails with an error. The choice of locale is determined by \code{options()$datatable.fread.dec.locale}. This may be a \emph{vector} of locale names and if so they will be tried in turn until the desired \code{dec} is obtained; thus allowing more than two different decimal separators to be selected. This is a new feature in v1.9.6 and is experimental. In case of problems, turn it off with \code{options(datatable.fread.dec.experiment=FALSE)}.
}
\value{
A \code{data.table} by default. A \code{data.frame} when \code{data.table=FALSE}; e.g. \code{options(datatable.fread.datatable=FALSE)}.
}
\references{
Background :\cr
\url{http://cran.r-project.org/doc/manuals/R-data.html}\cr
\url{http://stackoverflow.com/questions/1727772/quickly-reading-very-large-tables-as-dataframes-in-r}\cr
\url{www.biostat.jhsph.edu/~rpeng/docs/R-large-tables.html}\cr
\url{https://stat.ethz.ch/pipermail/r-help/2007-August/138315.html}\cr
\url{http://www.cerebralmastication.com/2009/11/loading-big-data-into-r/}\cr
\url{http://stackoverflow.com/questions/9061736/faster-than-scan-with-rcpp}\cr
\url{http://stackoverflow.com/questions/415515/how-can-i-read-and-manipulate-csv-file-data-in-c}\cr
\url{http://stackoverflow.com/questions/9352887/strategies-for-reading-in-csv-files-in-pieces}\cr
\url{http://stackoverflow.com/questions/11782084/reading-in-large-text-files-in-r}\cr
\url{http://stackoverflow.com/questions/45972/mmap-vs-reading-blocks}\cr
\url{http://stackoverflow.com/questions/258091/when-should-i-use-mmap-for-file-access}\cr
\url{http://stackoverflow.com/a/9818473/403310}\cr
\url{http://stackoverflow.com/questions/9608950/reading-huge-files-using-memory-mapped-files}
finagler = "to get or achieve by guile or manipulation" \url{http://dictionary.reference.com/browse/finagler}
}
\seealso{ \code{\link[utils]{read.csv}}, \code{\link[base]{url}}, \code{\link[base]{Sys.setlocale}}
}
\examples{
\dontrun{
# Demo speedup
n=1e6
DT = data.table( a=sample(1:1000,n,replace=TRUE),
b=sample(1:1000,n,replace=TRUE),
c=rnorm(n),
d=sample(c("foo","bar","baz","qux","quux"),n,replace=TRUE),
e=rnorm(n),
f=sample(1:1000,n,replace=TRUE) )
DT[2,b:=NA_integer_]
DT[4,c:=NA_real_]
DT[3,d:=NA_character_]
DT[5,d:=""]
DT[2,e:=+Inf]
DT[3,e:=-Inf]
write.table(DT,"test.csv",sep=",",row.names=FALSE,quote=FALSE)
cat("File size (MB):", round(file.info("test.csv")$size/1024^2),"\n")
# 50 MB (1e6 rows x 6 columns)
system.time(DF1 <-read.csv("test.csv",stringsAsFactors=FALSE))
# 60 sec (first time in fresh R session)
system.time(DF1 <- read.csv("test.csv",stringsAsFactors=FALSE))
# 30 sec (immediate repeat is faster, varies)
system.time(DF2 <- read.table("test.csv",header=TRUE,sep=",",quote="",
stringsAsFactors=FALSE,comment.char="",nrows=n,
colClasses=c("integer","integer","numeric",
"character","numeric","integer")))
# 10 sec (consistently). All known tricks and known nrows, see references.
require(data.table)
system.time(DT <- fread("test.csv"))
# 3 sec (faster and friendlier)
require(sqldf)
system.time(SQLDF <- read.csv.sql("test.csv",dbname=NULL))
# 20 sec (friendly too, good defaults)
require(ff)
system.time(FFDF <- read.csv.ffdf(file="test.csv",nrows=n))
# 20 sec (friendly too, good defaults)
identical(DF1,DF2)
all.equal(as.data.table(DF1), DT)
identical(DF1,within(SQLDF,{b<-as.integer(b);c<-as.numeric(c)}))
identical(DF1,within(as.data.frame(FFDF),d<-as.character(d)))
# Scaling up ...
l = vector("list",10)
for (i in 1:10) l[[i]] = DT
DTbig = rbindlist(l)
tables()
write.table(DTbig,"testbig.csv",sep=",",row.names=FALSE,quote=FALSE)
# 500MB (10 million rows x 6 columns)
system.time(DF <- read.table("testbig.csv",header=TRUE,sep=",",
quote="",stringsAsFactors=FALSE,comment.char="",nrows=1e7,
colClasses=c("integer","integer","numeric",
"character","numeric","integer")))
# 100-200 sec (varies)
system.time(DT <- fread("testbig.csv"))
# 30-40 sec
all(mapply(all.equal, DF, DT))
# Real data example (Airline data)
# http://stat-computing.org/dataexpo/2009/the-data.html
download.file("http://stat-computing.org/dataexpo/2009/2008.csv.bz2",
destfile="2008.csv.bz2")
# 109MB (compressed)
system("bunzip2 2008.csv.bz2")
# 658MB (7,009,728 rows x 29 columns)
colClasses = sapply(read.csv("2008.csv",nrows=100),class)
# 4 character, 24 integer, 1 logical. Incorrect.
colClasses = sapply(read.csv("2008.csv",nrows=200),class)
# 5 character, 24 integer. Correct. Might have missed data only using 100 rows
# since read.table assumes colClasses is correct.
system.time(DF <- read.table("2008.csv", header=TRUE, sep=",",
quote="",stringsAsFactors=FALSE,comment.char="",nrows=7009730,
colClasses=colClasses)
# 360 secs
system.time(DT <- fread("2008.csv"))
# 40 secs
table(sapply(DT,class))
# 5 character and 24 integer columns. Correct without needing to worry about colClasses
# issue above.
# Reads URLs directly :
fread("http://www.stats.ox.ac.uk/pub/datasets/csb/ch11b.dat")
}
# Reads text input directly :
fread("A,B\n1,2\n3,4")
# Reads pasted input directly :
fread("A,B
1,2
3,4
")
# Finds the first data line automatically :
fread("
This is perhaps a banner line or two or ten.
A,B
1,2
3,4
")
# Detects whether column names are present automatically :
fread("
1,2
3,4
")
# Numerical precision :
DT = fread("A\n1.010203040506070809010203040506\n") # silent loss of precision
DT[,sprintf("\%.15E",A)] # stored accurately as far as double precision allows
DT = fread("A\n1.46761e-313\n") # detailed warning about ERANGE; read as 'numeric'
DT[,sprintf("\%.15E",A)] # beyond what double precision can store accurately to 15 digits
# For greater accuracy use colClasses to read as character, then package Rmpfr.
# colClasses
data = "A,B,C,D\n1,3,5,7\n2,4,6,8\n"
fread(data, colClasses=c(B="character",C="character",D="character")) # as read.csv
fread(data, colClasses=list(character=c("B","C","D"))) # saves typing
fread(data, colClasses=list(character=2:4)) # same using column numbers
# drop
fread(data, colClasses=c("B"="NULL","C"="NULL")) # as read.csv
fread(data, colClasses=list(NULL=c("B","C"))) #
fread(data, drop=c("B","C")) # same but less typing, easier to read
fread(data, drop=2:3) # same using column numbers
# select
# (in read.csv you need to work out which to drop)
fread(data, select=c("A","D")) # less typing, easier to read
fread(data, select=c(1,4)) # same using column numbers
}
\keyword{ data }
|
/man/fread.Rd
|
no_license
|
vmugue/data.table
|
R
| false
| false
| 18,403
|
rd
|
\name{fread}
\alias{fread}
\title{ Fast and friendly file finagler }
\description{
Similar to \code{read.table} but faster and more convenient. All controls such as \code{sep}, \code{colClasses} and \code{nrows} are automatically detected. \code{bit64::integer64} types are also detected and read directly without needing to read as character before converting.
Dates are read as character currently. They can be converted afterwards using the excellent \code{fasttime} package or standard base functions.
`fread` is for \emph{regular} delimited files; i.e., where every row has the same number of columns. In future, secondary separator (\code{sep2}) may be specified \emph{within} each column. Such columns will be read as type \code{list} where each cell is itself a vector.
}
\usage{
fread(input, sep="auto", sep2="auto", nrows=-1L, header="auto", na.strings="NA",
stringsAsFactors=FALSE, verbose=getOption("datatable.verbose"), autostart=1L,
skip=0L, select=NULL, drop=NULL, colClasses=NULL,
integer64=getOption("datatable.integer64"), # default: "integer64"
dec=if (sep!=".") "." else ",",
check.names=FALSE, encoding="unknown", quote="\"",
showProgress=getOption("datatable.showProgress"), # default: TRUE
data.table=getOption("datatable.fread.datatable") # default: TRUE
)
}
\arguments{
\item{input}{ Either the file name to read (containing no \\n character), a shell command that preprocesses the file (e.g. \code{fread("grep blah filename"))} or the input itself as a string (containing at least one \\n), see examples. In both cases, a length 1 character string. A filename input is passed through \code{\link[base]{path.expand}} for convenience and may be a URL starting http:// or file://. }
\item{sep}{ The separator between columns. Defaults to the first character in the set [\code{,\\t |;:}] that exists on line \code{autostart} outside quoted (\code{""}) regions, and separates the rows above \code{autostart} into a consistent number of fields, too. }
\item{sep2}{ The separator \emph{within} columns. A \code{list} column will be returned where each cell is a vector of values. This is much faster using less working memory than \code{strsplit} afterwards or similar techniques. For each column \code{sep2} can be different and is the first character in the same set above [\code{,\\t |;:}], other than \code{sep}, that exists inside each field outside quoted regions on line \code{autostart}. NB: \code{sep2} is not yet implemented. }
\item{nrows}{ The number of rows to read, by default -1 means all. Unlike \code{read.table}, it doesn't help speed to set this to the number of rows in the file (or an estimate), since the number of rows is automatically determined and is already fast. Only set \code{nrows} if you require the first 10 rows, for example. `nrows=0` is a special case that just returns the column names and types; e.g., a dry run for a large file or to quickly check format consistency of a set of files before starting to read any. }
\item{header}{ Does the first data line contain column names? Defaults according to whether every non-empty field on the first data line is type character. If so, or TRUE is supplied, any empty column names are given a default name. }
\item{na.strings}{ A character vector of strings which are to be interpreted as \code{NA} values. By default \code{",,"} for columns read as type character is read as a blank string (\code{""}) and \code{",NA,"} is read as \code{NA}. Typical alternatives might be \code{na.strings=NULL} (no coercion to NA at all!) or perhaps \code{na.strings=c("NA","N/A","null")}. }
\item{stringsAsFactors}{ Convert all character columns to factors? }
\item{verbose}{ Be chatty and report timings? }
\item{autostart}{ Any line number within the region of machine readable delimited text, by default 30. If the file is shorter or this line is empty (e.g. short files with trailing blank lines) then the last non empty line (with a non empty line above that) is used. This line and the lines above it are used to auto detect \code{sep}, \code{sep2} and the number of fields. It's extremely unlikely that \code{autostart} should ever need to be changed, we hope. }
\item{skip}{ If -1 (default) use the procedure described below starting on line \code{autostart} to find the first data row. \code{skip>=0} means ignore \code{autostart} and take line \code{skip+1} as the first data row (or column names according to header="auto"|TRUE|FALSE as usual). \code{skip="string"} searches for \code{"string"} in the file (e.g. a substring of the column names row) and starts on that line (inspired by read.xls in package gdata). }
\item{select}{ Vector of column names or numbers to keep, drop the rest. }
\item{drop}{ Vector of column names or numbers to drop, keep the rest. }
\item{colClasses}{ A character vector of classes (named or unnamed), as read.csv. Or a named list of vectors of column names or numbers, see examples. colClasses in fread is intended for rare overrides, not for routine use. fread will only promote a column to a higher type if colClasses requests it. It won't downgrade a column to a lower type since NAs would result. You have to coerce such columns afterwards yourself, if you really require data loss. }
\item{integer64}{ "integer64" (default) reads columns detected as containing integers larger than 2^31 as type \code{bit64::integer64}. Alternatively, \code{"double"|"numeric"} reads as \code{base::read.csv} does; i.e., possibly with loss of precision and if so silently. Or, "character". }
\item{dec}{ The decimal separator as in \code{base::read.csv}. If not "." (default) then usually ",". See details. }
\item{check.names}{ default is \code{FALSE}. If \code{TRUE}, it uses the base function \code{\link{make.unique}} to ensure that column names are all unique.}
\item{quote}{single character value, e.g., \code{"\""} (default) or empty string (\code{""}).
\emph{Single character value:} \code{character} columns can be quoted by the character specified in \code{quote}, e.g., \code{...,2,"Joe Bloggs",3.14,...} or not quoted, e.g., \code{...,2,Joe Bloggs,3.14,...}.
Spaces and other whitepace (other than \code{sep} and \code{\\n}) may appear in an unquoted character field. In essence quoting character fields are \emph{required} only if \code{sep} or \code{\\n} appears in the string value. Quoting may be used to signify that numeric data should be read as text. A quoted field must start with quote and end with a quote that is also immediately followed by \code{sep} or \code{\\n}. Thus, unescaped quotes may be present in a quoted field, e.g., \code{...,2,"Joe, "Bloggs"",3.14,...}, as well as escaped quotes, e.g., \code{...,2,"Joe \",Bloggs\"",3.14,...}. If an embedded quote is followed by the separator inside a quoted field, the embedded quotes up to that point in that field must be balanced; e.g. \code{...,2,"www.blah?x="one",y="two"",3.14,...}.
\emph{Empty string:} To disable quoting set \code{quote=""}. This is particularly useful in handling rare cases such as malformed quotes in columns, e.g., \code{...,2.3,""qq,...} or where a single double quote has a meaning, e.g., \code{...,2.3,"at the 5\" end of the gene",...} etc. \code{sep} or \code{\\n} are not escaped within quotes and would therefore could lead to undesirable behaviour or \code{warnings/errors} if present.
}
\item{encoding}{ default is \code{"unknown"}. Other possible options are \code{"UTF-8"} and \code{"Latin-1"}. }
\item{showProgress}{ TRUE displays progress on the console using \code{\\r}. It is produced in fread's C code where the very nice (but R level) txtProgressBar and tkProgressBar are not easily available. }
\item{data.table}{ TRUE returns a \code{data.table}. FALSE returns a \code{data.frame}. }
}
\details{
Once the separator is found on line \code{autostart}, the number of columns is determined. Then the file is searched backwards from \code{autostart} until a row is found that doesn't have that number of columns. Thus, the first data row is found and any human readable banners are automatically skipped. This feature can be particularly useful for loading a set of files which may not all have consistently sized banners. Setting \code{skip>0} overrides this feature by setting \code{autostart=skip+1} and turning off the search upwards step.
The first 5 rows, middle 5 rows and last 5 rows are then read to determine column types. The lowest type for each column is chosen from the ordered list \code{integer}, \code{integer64}, \code{double}, \code{character}. This enables \code{fread} to allocate exactly the right number of rows, with columns of the right type, up front once. The file may of course \emph{still} contain data of a different type in rows other than first, middle and last 5. In that case, the column types are bumped mid read and the data read on previous rows is coerced. Setting \code{verbose=TRUE} reports the line and field number of each mid read type bump, and how long this type bumping took (if any).
There is no line length limit, not even a very large one. Since we are encouraging \code{list} columns (i.e. \code{sep2}) this has the potential to encourage longer line lengths. So the approach of scanning each line into a buffer first and then rescanning that buffer is not used. There are no buffers used in \code{fread}'s C code at all. The field width limit is limited by R itself: the maximum width of a character string (currenly 2^31-1 bytes, 2GB).
The filename extension (such as .csv) is irrelevant for "auto" \code{sep} and \code{sep2}. Separator detection is entirely driven by the file contents. This can be useful when loading a set of different files which may not be named consistently, or may not have the extension .csv despite being csv. Some datasets have been collected over many years, one file per day for example. Sometimes the file name format has changed at some point in the past or even the format of the file itself. So the idea is that you can loop \code{fread} through a set of files and as long as each file is regular and delimited, \code{fread} can read them all. Whether they all stack is another matter but at least each one is read quickly without you needing to vary \code{colClasses} in \code{read.table} or \code{read.csv}.
All known line endings are detected automatically: \code{\\n} (*NIX including Mac), \code{\\r\\n} (Windows CRLF), \code{\\r} (old Mac) and \code{\\n\\r} (just in case). There is no need to convert input files first. \code{fread} running on any architecture will read a file from any architecture. Both \code{\\r} and \code{\\n} may be embedded in character strings (including column names) provided the field is quoted.
If an empty line is encountered then reading stops there, with warning if any text exists after the empty line such as a footer. The first line of any text discarded is included in the warning message.
On Windows, \code{fread(...,dec=",")} should just work. \code{fread} uses C function \code{strtod} to read numeric data; e.g., \code{1.23} or \code{1,23}. \code{strtod} retrieves the decimal separator (\code{.} or \code{,} usually) from the locale of the R session rather than as an argument passed to the \code{strtod} function. So for \code{fread(...,dec=",")} to work, \code{fread} changes this (and only this) R session's locale temporarily to a locale which provides the desired decimal separator. On Windows, "French_France.1252" is tried which should be available as standard (any locale with comma decimal separator would suffice) and on unix "fr_FR.utf8" (you may need to install this locale on unix). \code{fread()} is very careful to set the locale back again afterwards, even if the function fails with an error. The choice of locale is determined by \code{options()$datatable.fread.dec.locale}. This may be a \emph{vector} of locale names and if so they will be tried in turn until the desired \code{dec} is obtained; thus allowing more than two different decimal separators to be selected. This is a new feature in v1.9.6 and is experimental. In case of problems, turn it off with \code{options(datatable.fread.dec.experiment=FALSE)}.
}
\value{
A \code{data.table} by default. A \code{data.frame} when \code{data.table=FALSE}; e.g. \code{options(datatable.fread.datatable=FALSE)}.
}
\references{
Background :\cr
\url{http://cran.r-project.org/doc/manuals/R-data.html}\cr
\url{http://stackoverflow.com/questions/1727772/quickly-reading-very-large-tables-as-dataframes-in-r}\cr
\url{www.biostat.jhsph.edu/~rpeng/docs/R-large-tables.html}\cr
\url{https://stat.ethz.ch/pipermail/r-help/2007-August/138315.html}\cr
\url{http://www.cerebralmastication.com/2009/11/loading-big-data-into-r/}\cr
\url{http://stackoverflow.com/questions/9061736/faster-than-scan-with-rcpp}\cr
\url{http://stackoverflow.com/questions/415515/how-can-i-read-and-manipulate-csv-file-data-in-c}\cr
\url{http://stackoverflow.com/questions/9352887/strategies-for-reading-in-csv-files-in-pieces}\cr
\url{http://stackoverflow.com/questions/11782084/reading-in-large-text-files-in-r}\cr
\url{http://stackoverflow.com/questions/45972/mmap-vs-reading-blocks}\cr
\url{http://stackoverflow.com/questions/258091/when-should-i-use-mmap-for-file-access}\cr
\url{http://stackoverflow.com/a/9818473/403310}\cr
\url{http://stackoverflow.com/questions/9608950/reading-huge-files-using-memory-mapped-files}
finagler = "to get or achieve by guile or manipulation" \url{http://dictionary.reference.com/browse/finagler}
}
\seealso{ \code{\link[utils]{read.csv}}, \code{\link[base]{url}}, \code{\link[base]{Sys.setlocale}}
}
\examples{
\dontrun{
# Demo speedup
n=1e6
DT = data.table( a=sample(1:1000,n,replace=TRUE),
b=sample(1:1000,n,replace=TRUE),
c=rnorm(n),
d=sample(c("foo","bar","baz","qux","quux"),n,replace=TRUE),
e=rnorm(n),
f=sample(1:1000,n,replace=TRUE) )
DT[2,b:=NA_integer_]
DT[4,c:=NA_real_]
DT[3,d:=NA_character_]
DT[5,d:=""]
DT[2,e:=+Inf]
DT[3,e:=-Inf]
write.table(DT,"test.csv",sep=",",row.names=FALSE,quote=FALSE)
cat("File size (MB):", round(file.info("test.csv")$size/1024^2),"\n")
# 50 MB (1e6 rows x 6 columns)
system.time(DF1 <-read.csv("test.csv",stringsAsFactors=FALSE))
# 60 sec (first time in fresh R session)
system.time(DF1 <- read.csv("test.csv",stringsAsFactors=FALSE))
# 30 sec (immediate repeat is faster, varies)
system.time(DF2 <- read.table("test.csv",header=TRUE,sep=",",quote="",
stringsAsFactors=FALSE,comment.char="",nrows=n,
colClasses=c("integer","integer","numeric",
"character","numeric","integer")))
# 10 sec (consistently). All known tricks and known nrows, see references.
require(data.table)
system.time(DT <- fread("test.csv"))
# 3 sec (faster and friendlier)
require(sqldf)
system.time(SQLDF <- read.csv.sql("test.csv",dbname=NULL))
# 20 sec (friendly too, good defaults)
require(ff)
system.time(FFDF <- read.csv.ffdf(file="test.csv",nrows=n))
# 20 sec (friendly too, good defaults)
identical(DF1,DF2)
all.equal(as.data.table(DF1), DT)
identical(DF1,within(SQLDF,{b<-as.integer(b);c<-as.numeric(c)}))
identical(DF1,within(as.data.frame(FFDF),d<-as.character(d)))
# Scaling up ...
l = vector("list",10)
for (i in 1:10) l[[i]] = DT
DTbig = rbindlist(l)
tables()
write.table(DTbig,"testbig.csv",sep=",",row.names=FALSE,quote=FALSE)
# 500MB (10 million rows x 6 columns)
system.time(DF <- read.table("testbig.csv",header=TRUE,sep=",",
quote="",stringsAsFactors=FALSE,comment.char="",nrows=1e7,
colClasses=c("integer","integer","numeric",
"character","numeric","integer")))
# 100-200 sec (varies)
system.time(DT <- fread("testbig.csv"))
# 30-40 sec
all(mapply(all.equal, DF, DT))
# Real data example (Airline data)
# http://stat-computing.org/dataexpo/2009/the-data.html
download.file("http://stat-computing.org/dataexpo/2009/2008.csv.bz2",
destfile="2008.csv.bz2")
# 109MB (compressed)
system("bunzip2 2008.csv.bz2")
# 658MB (7,009,728 rows x 29 columns)
colClasses = sapply(read.csv("2008.csv",nrows=100),class)
# 4 character, 24 integer, 1 logical. Incorrect.
colClasses = sapply(read.csv("2008.csv",nrows=200),class)
# 5 character, 24 integer. Correct. Might have missed data only using 100 rows
# since read.table assumes colClasses is correct.
system.time(DF <- read.table("2008.csv", header=TRUE, sep=",",
quote="",stringsAsFactors=FALSE,comment.char="",nrows=7009730,
colClasses=colClasses)
# 360 secs
system.time(DT <- fread("2008.csv"))
# 40 secs
table(sapply(DT,class))
# 5 character and 24 integer columns. Correct without needing to worry about colClasses
# issue above.
# Reads URLs directly :
fread("http://www.stats.ox.ac.uk/pub/datasets/csb/ch11b.dat")
}
# Reads text input directly :
fread("A,B\n1,2\n3,4")
# Reads pasted input directly :
fread("A,B
1,2
3,4
")
# Finds the first data line automatically :
fread("
This is perhaps a banner line or two or ten.
A,B
1,2
3,4
")
# Detects whether column names are present automatically :
fread("
1,2
3,4
")
# Numerical precision :
DT = fread("A\n1.010203040506070809010203040506\n") # silent loss of precision
DT[,sprintf("\%.15E",A)] # stored accurately as far as double precision allows
DT = fread("A\n1.46761e-313\n") # detailed warning about ERANGE; read as 'numeric'
DT[,sprintf("\%.15E",A)] # beyond what double precision can store accurately to 15 digits
# For greater accuracy use colClasses to read as character, then package Rmpfr.
# colClasses
data = "A,B,C,D\n1,3,5,7\n2,4,6,8\n"
fread(data, colClasses=c(B="character",C="character",D="character")) # as read.csv
fread(data, colClasses=list(character=c("B","C","D"))) # saves typing
fread(data, colClasses=list(character=2:4)) # same using column numbers
# drop
fread(data, colClasses=c("B"="NULL","C"="NULL")) # as read.csv
fread(data, colClasses=list(NULL=c("B","C"))) #
fread(data, drop=c("B","C")) # same but less typing, easier to read
fread(data, drop=2:3) # same using column numbers
# select
# (in read.csv you need to work out which to drop)
fread(data, select=c("A","D")) # less typing, easier to read
fread(data, select=c(1,4)) # same using column numbers
}
\keyword{ data }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segmentation.R
\name{extractSegmentSummary}
\alias{extractSegmentSummary}
\title{extractSegmentSummary()}
\usage{
extractSegmentSummary(final.objects)
}
\arguments{
\item{final.objects}{list of casper object}
}
\value{
list of loss and gain segments identified in all scales
}
\description{
generates coherent set of CNV segments using the pairwise comparison of all scales from BAF and expression signals
}
|
/man/extractSegmentSummary.Rd
|
no_license
|
derekN-um/CaSpER
|
R
| false
| true
| 504
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segmentation.R
\name{extractSegmentSummary}
\alias{extractSegmentSummary}
\title{extractSegmentSummary()}
\usage{
extractSegmentSummary(final.objects)
}
\arguments{
\item{final.objects}{list of casper object}
}
\value{
list of loss and gain segments identified in all scales
}
\description{
generates coherent set of CNV segments using the pairwise comparison of all scales from BAF and expression signals
}
|
library(dplyr)
library(tidyverse)
library(DT)
library(shinydashboard)
library(shiny)
library(ggplot2)
library(gganimate)
library(plotly)
library(colourpicker)
library(viridis)
library(RColorBrewer)
library(shinyWidgets)
library(devtools)
library(animation)
library(cowplot)
library(magick)
library(praise)
library(testthat)
theme_set(theme_classic())
### read perState data ###
state.df1999 = read.csv(file = "./state1.df1999.csv")
state.df1999 = state.df1999 %>%
mutate(., rate1 = crude_rate, rate2 = count/population*100000)
### read perAge data ###
age.df = read.csv(file = "./age.df.csv") ## 1999 - 2015 ##
age.df$age = factor(age.df$age, levels =c("<1","1-4", "5-9","10-14","15-19","20-24","25-29","30-34",
"35-39","40-44","45-49","50-54", "55-59", "60-64", "65-69",
"70-74", "75-79", "80-84", "85+"))
##create a variable that lists cancer type
cancer.type = sort(unique(state.df1999$site))
##create a sex varilable
cancer.sex = sort(unique(state.df1999$sex))
##create a variable that lists event_type
cancer.event_type = sort(unique(state.df1999$event_type))
##create a variable that list states
cancer.area = sort(unique(state.df1999$area1))
|
/global.R
|
no_license
|
AidenFather/US-Cancer-Statistics
|
R
| false
| false
| 1,228
|
r
|
library(dplyr)
library(tidyverse)
library(DT)
library(shinydashboard)
library(shiny)
library(ggplot2)
library(gganimate)
library(plotly)
library(colourpicker)
library(viridis)
library(RColorBrewer)
library(shinyWidgets)
library(devtools)
library(animation)
library(cowplot)
library(magick)
library(praise)
library(testthat)
theme_set(theme_classic())
### read perState data ###
state.df1999 = read.csv(file = "./state1.df1999.csv")
state.df1999 = state.df1999 %>%
mutate(., rate1 = crude_rate, rate2 = count/population*100000)
### read perAge data ###
age.df = read.csv(file = "./age.df.csv") ## 1999 - 2015 ##
age.df$age = factor(age.df$age, levels =c("<1","1-4", "5-9","10-14","15-19","20-24","25-29","30-34",
"35-39","40-44","45-49","50-54", "55-59", "60-64", "65-69",
"70-74", "75-79", "80-84", "85+"))
##create a variable that lists cancer type
cancer.type = sort(unique(state.df1999$site))
##create a sex varilable
cancer.sex = sort(unique(state.df1999$sex))
##create a variable that lists event_type
cancer.event_type = sort(unique(state.df1999$event_type))
##create a variable that list states
cancer.area = sort(unique(state.df1999$area1))
|
library(pracma)
library(tidyverse)
library(Directional)
cross <- pracma::cross
radius <- 3.5
height_list <- c(-1/sqrt(2), -1/2, 1/2, 1/sqrt(2))
view_angle <- c(0.108, 0.073, -0.073, -0.108)
num_per <- length(height_list)
rot_matrix <- function(height){
angle <- view_angle[height]
return(rot.matrix(c(0, 0), theta = -angle, rads = TRUE))
}
gc_curvature <- function(separation, height) {
unit_height <- height_list[height]
x_coord <- (1/10)*(separation + 1)
y_coord <- -sqrt(1 - x_coord^2 - unit_height^2)
left_pt <- radius*c(-x_coord, y_coord, unit_height)
right_pt <- radius*c(x_coord, y_coord, unit_height)
perp_temp <- cross(cross(left_pt, right_pt), left_pt)
perp <- Norm(left_pt)*perp_temp/Norm(perp_temp)
end_angle <- acos(dot(left_pt, right_pt)/(Norm(left_pt))^2)
mid_angle <- end_angle/2
max_height_unrot <- cos(mid_angle)*left_pt + sin(mid_angle)*perp
max_height_rot <- rot_matrix(height) %*% max_height_unrot
left_pt_rot <- rot_matrix(height) %*% left_pt
return(abs(left_pt_rot[3] - max_height_rot[3]))
}
# Get all the data for GC Curvature
curve_data <- tibble::tibble(
separation = rep(1:5, num_per),
height = rep(1:num_per, each = 5),
curvature = purrr::map2(
.x = separation,
.y = height,
.f = function(x, y){round(gc_curvature(x, y), 4)}
) %>% unlist()
) %>%
mutate(
GeoCurve = str_c("GeoCurve_Sphere_Sep", separation, "_Ht", height, ".jpg"),
ArcCurve = str_c("ArcCurve_Sphere_Sep", separation, "_Ht", height, ".jpg")
) %>%
select(GeoCurve, ArcCurve, everything())
write_csv(curve_data, "../docs/kid_IGEO_curvature_40stim.csv")
# ac_curvature <- function(separation, height, t) {
# unit_height <- height_list[height]
# x_coord <- (1/10)*(separation + 1)
# y_coord <- -sqrt(1 - x_coord^2 - unit_height^2)
# left_pt <- radius*c(-x_coord, y_coord, unit_height)
# right_pt <- radius*c(x_coord, y_coord, unit_height)
# back_pt <- radius*c(0, cos(t), sin(t))
# vec_1 <- left_pt - back_pt
# vec_2 <- right_pt - back_pt
# normal <- pracma::cross(vec_1, vec_2)
# unit_normal <- normal/Norm(normal)
# plane_constant <- pracma::dot(left_pt, unit_normal)
# center_pt <- plane_constant*unit_normal
# to_left <- left_pt - center_pt
# to_right <- right_pt - center_pt
# perp_temp <- cross(cross(to_left, to_right), to_left)
# perp <- Norm(to_left)*perp_temp/Norm(perp_temp)
# end_angle <- acos(dot(to_left, to_right)/(Norm(to_left))^2)
# mid_angle <- end_angle/2
# max_height <- center_pt + cos(mid_angle)*to_left + sin(mid_angle)*perp
# return(abs(max_height[3] - radius*unit_height))
# }
#RotationAngle = ViewAngle[[Height]];
#Turn = RotationTransform[-RotationAngle, {1, 0, 0}, {0, 0, 0}];
#mid_point_rot <- rot_matrix(size, separation, pull, spin) %*% mid_point_unrot
#highest_point <- abs(mid_point_rot[3])
#height <- abs(height_ac(size, separation, pull, spin))
#return(abs(highest_point - height)/length_ac(size, separation, pull))
|
/Updated_Kid_IGEO/r_code/matching_conditions_40stim.R
|
no_license
|
mattyj612/IGEO
|
R
| false
| false
| 2,962
|
r
|
library(pracma)
library(tidyverse)
library(Directional)
cross <- pracma::cross
radius <- 3.5
height_list <- c(-1/sqrt(2), -1/2, 1/2, 1/sqrt(2))
view_angle <- c(0.108, 0.073, -0.073, -0.108)
num_per <- length(height_list)
rot_matrix <- function(height){
angle <- view_angle[height]
return(rot.matrix(c(0, 0), theta = -angle, rads = TRUE))
}
gc_curvature <- function(separation, height) {
unit_height <- height_list[height]
x_coord <- (1/10)*(separation + 1)
y_coord <- -sqrt(1 - x_coord^2 - unit_height^2)
left_pt <- radius*c(-x_coord, y_coord, unit_height)
right_pt <- radius*c(x_coord, y_coord, unit_height)
perp_temp <- cross(cross(left_pt, right_pt), left_pt)
perp <- Norm(left_pt)*perp_temp/Norm(perp_temp)
end_angle <- acos(dot(left_pt, right_pt)/(Norm(left_pt))^2)
mid_angle <- end_angle/2
max_height_unrot <- cos(mid_angle)*left_pt + sin(mid_angle)*perp
max_height_rot <- rot_matrix(height) %*% max_height_unrot
left_pt_rot <- rot_matrix(height) %*% left_pt
return(abs(left_pt_rot[3] - max_height_rot[3]))
}
# Get all the data for GC Curvature
curve_data <- tibble::tibble(
separation = rep(1:5, num_per),
height = rep(1:num_per, each = 5),
curvature = purrr::map2(
.x = separation,
.y = height,
.f = function(x, y){round(gc_curvature(x, y), 4)}
) %>% unlist()
) %>%
mutate(
GeoCurve = str_c("GeoCurve_Sphere_Sep", separation, "_Ht", height, ".jpg"),
ArcCurve = str_c("ArcCurve_Sphere_Sep", separation, "_Ht", height, ".jpg")
) %>%
select(GeoCurve, ArcCurve, everything())
write_csv(curve_data, "../docs/kid_IGEO_curvature_40stim.csv")
# ac_curvature <- function(separation, height, t) {
# unit_height <- height_list[height]
# x_coord <- (1/10)*(separation + 1)
# y_coord <- -sqrt(1 - x_coord^2 - unit_height^2)
# left_pt <- radius*c(-x_coord, y_coord, unit_height)
# right_pt <- radius*c(x_coord, y_coord, unit_height)
# back_pt <- radius*c(0, cos(t), sin(t))
# vec_1 <- left_pt - back_pt
# vec_2 <- right_pt - back_pt
# normal <- pracma::cross(vec_1, vec_2)
# unit_normal <- normal/Norm(normal)
# plane_constant <- pracma::dot(left_pt, unit_normal)
# center_pt <- plane_constant*unit_normal
# to_left <- left_pt - center_pt
# to_right <- right_pt - center_pt
# perp_temp <- cross(cross(to_left, to_right), to_left)
# perp <- Norm(to_left)*perp_temp/Norm(perp_temp)
# end_angle <- acos(dot(to_left, to_right)/(Norm(to_left))^2)
# mid_angle <- end_angle/2
# max_height <- center_pt + cos(mid_angle)*to_left + sin(mid_angle)*perp
# return(abs(max_height[3] - radius*unit_height))
# }
#RotationAngle = ViewAngle[[Height]];
#Turn = RotationTransform[-RotationAngle, {1, 0, 0}, {0, 0, 0}];
#mid_point_rot <- rot_matrix(size, separation, pull, spin) %*% mid_point_unrot
#highest_point <- abs(mid_point_rot[3])
#height <- abs(height_ac(size, separation, pull, spin))
#return(abs(highest_point - height)/length_ac(size, separation, pull))
|
library(shiny)
library(shinyWidgets)
library(ggplot2)
library(dplyr)
library(patchwork)
library(rsconnect)
library(base)
library(lubridate)
library(shinyjs)
library(shinydashboard)
library(plotly)
library(htmlwidgets)
library(rlang)
library(forcats)
library(tidyr)
library(rlang)
library(survival)
library(ranger)
library(ggplot2)
library(dplyr)
library(ggfortify)
library(survminer)
library(rlist)
library(DT)
load("biclusterlist2.RData")
load("biclusterlist3.RData")
load("survival.RData")
load("foolist2.RData")
load("foolist3.RData")
load("BLCA_PrimaryTumors_Cleaned_H0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment.RData")
load("BLCA_PrimaryTumors_Cleaned_L0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment.RData")
load("locus.RData")
load("BP_info_H.RData")
load("BP_info_L.RData")
load("map.RData")
round_preserve_sum <- function(x, digits = 0) {
up <- 10 ^ digits
x <- x * up
y <- floor(x)
indices <- tail(order(x-y), round(sum(x)) - sum(y))
y[indices] <- y[indices] + 1
y / up
}
chrom <- biclusterlist2[[1]][[2]]
chrom <- chrom$chrom # where did "All" go
chrom <- c(as.character(chrom),"all")
myColors <- c("#DD8D29", "#E2D200", "#46ACC8", "#E58601", "#B40F20","#808080")
names(myColors) <- c("high_amplification","amplification","no_change","1_copy_del","2_copy_del","Missing")
hbic <- BLCA_PrimaryTumors_Cleaned_H0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment
hbic_filtered <- hbic %>% filter(Samples.In.Bicluster>20)
lbic <- BLCA_PrimaryTumors_Cleaned_L0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment
lbic_filtered <- lbic %>% filter(Samples.In.Bicluster>20)
bladder_hgenes <- unique(BLCA_PrimaryTumors_Cleaned_H0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment$Gene.ID)
bladder_hgenes <- c(bladder_hgenes, "all")
bladder_lgenes <- unique(BLCA_PrimaryTumors_Cleaned_L0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment$Gene.ID)
bladder_lgenes <- c(bladder_lgenes, "all")
bladder_hBP <- unique(c(BP_info_H$GoTerm1,BP_info_H$GoTerm2,BP_info_H$GoTerm3,BP_info_H$GoTerm4,BP_info_H$GoTerm5))
bladder_hBP <- c(bladder_hBP, "all")
bladder_lBP <- unique(c(BP_info_L$GoTerm1,BP_info_L$GoTerm2,BP_info_L$GoTerm3,BP_info_L$GoTerm4,BP_info_L$GoTerm5))
bladder_lBP <- c(bladder_lBP, "all")
chrom_list <- unique(map$chrom)
hbic_gene_list <- list()
for (i in unique(hbic_filtered$Bicluster.No)){
new <- hbic_filtered %>% filter(Bicluster.No==i)
genes <- new$Gene.ID
hbic_gene_list <- list.append(hbic_gene_list,genes)
}
names(hbic_gene_list) <- unique(hbic_filtered$Bicluster.No)
lbic_gene_list <- list()
for (i in unique(lbic_filtered$Bicluster.No)){
new <- lbic_filtered %>% filter(Bicluster.No==i)
genes <- new$Gene.ID
lbic_gene_list <- list.append(lbic_gene_list,genes)
}
names(lbic_gene_list) <- unique(lbic_filtered$Bicluster.No)
hbic_BP_list <- list()
for (i in unique(hbic_filtered$Bicluster.No)){
new <- BP_info_H %>% filter(Bicluster.No==i)
BPs <- c(new$GoTerm1, new$GoTerm2, new$GoTerm3, new$GoTerm4, new$GoTerm5)
hbic_BP_list <- list.append(hbic_BP_list,BPs)
}
names(hbic_BP_list) <- unique(hbic_filtered$Bicluster.No)
lbic_BP_list<- list()
for (i in unique(lbic_filtered$Bicluster.No)){
new <- BP_info_L %>% filter(Bicluster.No==i)
BPs <- c(new$GoTerm1, new$GoTerm2, new$GoTerm3, new$GoTerm4, new$GoTerm5)
lbic_BP_list <- list.append(lbic_BP_list,BPs)
}
names(lbic_BP_list) <- unique(lbic_filtered$Bicluster.No)
hbic_chrom_list <- list()
for (i in unique(hbic_filtered$Bicluster.No)){
new <- biclusterlist2[[i]][[2]]
bic_chrom <- new$prop
names(bic_chrom) <- new$chrom
hbic_chrom_list <- list.append(hbic_chrom_list,names(which(bic_chrom>0.8)))
}
names(hbic_chrom_list) <- unique(hbic_filtered$Bicluster.No)
lbic_chrom_list <- list()
for (i in unique(lbic_filtered$Bicluster.No)){
new <- biclusterlist3[[i]][[2]]
bic_chrom <- new$prop
names(bic_chrom) <- new$chrom
lbic_chrom_list <- list.append(lbic_chrom_list,names(which(bic_chrom>0.8)))
}
names(lbic_chrom_list) <- unique(lbic_filtered$Bicluster.No)
ui <- dashboardPage (
dashboardHeader(title="Bicluster Visualizations"),
dashboardSidebar(
sidebarMenu(
menuItem("Introduction", tabName = "intro"),
menuItem("Biological Pathways", tabName = "biopath"),
menuItem("Copy Number", tabName="copynum"),
menuItem("Contact", tabName = "contact"))),
dashboardBody(
tabItems(
tabItem("intro",h4("to be added")),
tabItem("biopath",
useShinyjs(),
fluidRow(
column(3, selectInput(
inputId = "reg",
label = "Choose up or down regulated gene expression:",
selected="Up",
choices = c("Up","Down"))),
column(3,selectInput(
inputId = "variable",
label = "Select variable of interest",
choices = c("Overall survival", "Disease-specific survival", "Disease-free interval", "Progression-free interval"))),
# be able to input multiple genes
column(3, selectizeInput(
inputId = "gene",
label = "Gene of interest",
choices = bladder_hgenes,
multiple = TRUE)),
column(3, selectizeInput(
inputId = "path",
label = "Biological Pathway of interest",
choices = bladder_hBP,
multiple=TRUE)),
# change bic input choices to dynamic
column(3,selectInput(
inputId = "bic",
label = "Select the bicluster of interest:",
choices = unique(hbic_filtered$Bicluster.No)))),
sidebarPanel(
textOutput(outputId="bicinfo"),width = 3),
mainPanel(
tabsetPanel(
tabPanel("Visualization",plotOutput(outputId = "survivalvis"),width = 9),
tabPanel("Gene Information", dataTableOutput(outputId = "survivaltable"),style = "height:500px; overflow-y: scroll;overflow-x: scroll;",width = 9)))),
tabItem("copynum",
useShinyjs(),
fluidRow(
column(3, selectInput(
inputId = "reg_copy",
label = "Choose up or down regulated gene expression:",
selected="Up",
choices = c("Up","Down"))),
column(3,selectInput(
inputId = "num",
label = "Select the bicluster of interest:",
choices = unique(hbic_filtered$Bicluster.No))),
column(3,selectInput(
inputId = "withcopy",
label = "Fill with copy number?",
choices = c("Yes","No"))),
column(3,selectizeInput(
inputId = "chrom",
label = "Select the chromosome of interest:",
choices = chrom, multiple=TRUE
))),
sidebarPanel(
textOutput(outputId= "info"), width = 3),
mainPanel(
tabsetPanel(
tabPanel("Visualization",plotlyOutput(outputId = "mapvis"),width = 9),
tabPanel("Gene Information",dataTableOutput(outputId = "table"),style = "height:500px; overflow-y: scroll;overflow-x: scroll;",width = 9)))),
tabItem("contact",h4("to be added")))))
server <- function(input, output, session) {
gene_list <- list()
observeEvent(input$reg,{
if(input$reg=="Up"){
biclist <- biclusterlist2
gene_list <- hbic_gene_list
path_list <- hbic_BP_list
updateSelectizeInput(session,"gene",choices = bladder_hgenes)
updateSelectizeInput(session,"path",choices = bladder_hBP)
# if the input gene is not in any of the biclusters, the bicluster list won't update
observeEvent(input$gene,
if (input$gene=="all") {
updateSelectInput(session,"bic",choices = unique(hbic_filtered$Bicluster.No))
}
else {
updated_bic<-c()
for (i in 1:length(gene_list)){
if (input$gene %in% gene_list[[i]]) {
updated_bic <- append(updated_bic, names(gene_list[i]))
}
}
updateSelectInput(session,"bic",choices = updated_bic)
})
observeEvent(input$path,
if (input$path=="all"){
updateSelectInput(session,"bic",choices = unique(hbic_filtered$Bicluster.No))
}
else {
updated_bic<-c()
for (i in 1:length(path_list)){
if (input$path %in% path_list[[i]]) {
updated_bic <- append(updated_bic, names(path_list[i]))
}
}
updateSelectInput(session,"bic",choices = updated_bic)
})
output$bicinfo <- renderText({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
surv_dat <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(PFI.time,PFI)~bicluster, data=surv_dat)
print(paste0("This bicluster contains: \n ",toString(nrow(biclist[[as.integer(input$bic)]][[5]]))," genes, \n ",toString(unique(biclist[[as.integer(input$bic)]][[1]]$Samples.In.Bicluster))," samples. \n","The KM-analysis outputs a p-value of ",round(surv_pvalue(km_fit,data=surv_dat)$pval,3)))
})
observeEvent(input$variable,
if (input$variable=="Progression-free interval"){
output$survivalvis <-
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(PFI.time,PFI)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = " Progression-free Interval Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Overall survival"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(OS.time,OS)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Overall Survival Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Disease-specific survival"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(DSS.time,DSS)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Disease-specific Survival Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Disease-free interval"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0)) %>% ggplot(aes(x=DFI.time,fill=as.factor(bicluster)))+geom_boxplot(alpha=0.5)+
scale_fill_manual(values = c("#78B7C5","#F2AD00"),name = paste0("Bicluster ",as_string(input$bic)), labels = c("No","Yes"),guide = guide_legend(reverse=TRUE)) +
theme(panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),plot.title = element_text(size=10))})
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(DFI.time,DFI)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Disease-free Interval Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
)
output$survivaltable <- renderDataTable({
datatable(biclist[[as.integer(input$bic)]][[1]]%>% select(Gene.ID,chrom),options = list(paging=FALSE))
})
}
else if(input$reg=="Down") {
biclist <- biclusterlist3
gene_list <- lbic_gene_list
path_list <- lbic_BP_list
updateSelectizeInput(session,"gene",choices = bladder_lgenes)
updateSelectizeInput(session,"path",choices = bladder_lBP)
updateSelectInput(session,"bic", choices = unique(lbic_filtered$Bicluster.No))
observeEvent(input$gene,
if (input$gene=="all") {
updateSelectInput(session,"bic",choices = unique(lbic_filtered$Bicluster.No))
}
else {
updated_bic<-c()
for (i in 1:length(gene_list)){
if (input$gene %in% gene_list[[i]]) {
updated_bic <- append(updated_bic, names(gene_list[i]))
}
}
updateSelectInput(session,"bic",choices = updated_bic)})
observeEvent(input$path,
if (input$path=="all") {
updateSelectInput(session,"bic",choices = unique(lbic_filtered$Bicluster.No))}
else {
updated_bic<-c()
for (i in 1:length(path_list)){
if (input$path %in% path_list[[i]]) {
updated_bic <- append(updated_bic, names(path_list[i]))
}
}
updateSelectInput(session,"bic",choices = updated_bic)
})
}
output$bicinfo <- renderText({
# biclist <- biclist[[as.integer(input$bic)]][[1]]
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
surv_dat <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(PFI.time,PFI)~bicluster, data=surv_dat)
print(paste0("This bicluster contains: \n ",toString(nrow(biclist[[as.integer(input$bic)]][[5]]))," genes, \n ",toString(unique(biclist[[as.integer(input$bic)]][[1]]$Samples.In.Bicluster))," samples. \n","The KM-analysis outputs a p-value of ",round(surv_pvalue(km_fit,data=surv_dat)$pval,3)))
})
observeEvent(input$variable,
if (input$variable=="Progression-free interval"){
output$survivalvis <-
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(PFI.time,PFI)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = " Progression-free Interval Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Overall survival"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(OS.time,OS)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Overall Survival Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Disease-specific survival"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(DSS.time,DSS)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Disease-specific Survival Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Disease-free interval"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0)) %>% ggplot(aes(x=DFI.time,fill=as.factor(bicluster)))+geom_boxplot(alpha=0.5)+
scale_fill_manual(values = c("#78B7C5","#F2AD00"),name = paste0("Bicluster ",as_string(input$bic)), labels = c("No","Yes"),guide = guide_legend(reverse=TRUE)) +
theme(panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),plot.title = element_text(size=10))})
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(DFI.time,DFI)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Disease-free Interval Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
)
output$survivaltable <- renderDataTable({
datatable(biclist[[as.integer(input$bic)]][[1]] %>% select(Gene.ID,chrom),options = list(paging=FALSE))
})})
# find biclusters witth more than 80% from the same chromosome.
# Give each bicluster like this a chromosome label
observeEvent(input$reg_copy,
if (input$reg_copy=="Up"){
biclist <- biclusterlist2
observeEvent(input$withcopy,
if (input$withcopy=="No"){
output$mapvis <- renderPlotly({
biclist <- biclist[[as.integer(input$num)]][[2]]
ggplotly(biclist %>% ggplot(aes(x=chrom,y=prop,fill=col))+geom_bar(stat='identity')+
labs(title = "") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),legend.position = "none",plot.title = element_text(size=10)) +
coord_cartesian(ylim=c(0,NA)) + scale_fill_manual(values = levels(as.factor(biclist$col)))+
xlab("Chromosome") +
ylab("Percentage") +
scale_y_continuous(labels=scales::percent_format()),tooltip="text")})
}
else {
output$mapvis <- renderPlotly({
foo <- foolist2[[as.integer(input$num)]]
ggplotly(foo %>% ggplot(aes(x=chrom,fill=copynumber,text=paste(copynumber,": \n",round(copyprop*100,1),"%")))+geom_bar()+theme(axis.text.x = element_text(angle = 45, hjust = 1), panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),plot.title = element_text(size=10)) +
coord_cartesian(ylim=c(0,NA)) +
xlab("Chromosome") + scale_y_continuous(labels = function(x) x/10)+
ylab("Percentage")+ scale_fill_manual(values = myColors),tooltip="text")})})
observeEvent(input$chrom,
if (input$chrom=="all"){
updateSelectInput(session,"num",choices = hbic_filtered$Bicluster.No)
}
else {
updateSelectInput(session,"num",choices = names(which(hbic_chrom_list==input$chrom)))})
output$info <- renderText({
biclist <- biclist[[as.integer(input$num)]][[1]]
print(paste0("This bicluster contains: \n ",toString(nrow(biclist))," genes, \n ",toString(biclist$Samples.In.Bicluster[1])," samples."))
})
output$table <- renderDataTable({
biclist <- biclist[[as.integer(input$num)]][[1]]
datatable(biclist %>% select(Gene.ID,chrom),options = list(paging=FALSE))
})}
else if (input$reg_copy=="Down"){
biclist <- biclusterlist3
updateSelectInput(session,"num", choices = unique(lbic_filtered$Bicluster.No))
observeEvent(input$withcopy,
if (input$withcopy=="No"){
output$mapvis <- renderPlotly({
biclist <- biclist[[as.integer(input$num)]][[2]]
ggplotly(biclist %>% ggplot(aes(x=chrom,y=prop,fill=col))+geom_bar(stat='identity')+
labs(title = "") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),legend.position = "none",plot.title = element_text(size=10)) +
coord_cartesian(ylim=c(0,NA)) + scale_fill_manual(values = levels(as.factor(biclist$col)))+
xlab("Chromosome") +
ylab("Percentage") +
scale_y_continuous(labels=scales::percent_format()),tooltip="text")})
}
else {
output$mapvis <- renderPlotly({
foo <- foolist3[[as.integer(input$num)]]
ggplotly(foo %>% ggplot(aes(x=chrom,fill=copynumber,text=paste(copynumber,": \n",round(copyprop*100,1),"%")))+geom_bar()+theme(axis.text.x = element_text(angle = 45, hjust = 1), panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),plot.title = element_text(size=10)) +
coord_cartesian(ylim=c(0,NA)) +
xlab("Chromosome") + scale_y_continuous(labels = function(x) x/10)+
ylab("Percentage")+ scale_fill_manual(values = myColors),tooltip="text")})})
observeEvent(input$chrom,
if (input$chrom=="all"){
updateSelectInput(session,"num",choices = lbic_filtered$Bicluster.No)
}
else {
updateSelectInput(session,"num",choices = names(which(lbic_chrom_list==input$chrom)))})
output$info <- renderText({
biclist <- biclist[[as.integer(input$num)]][[1]]
print(paste0("This bicluster contains: \n ",toString(nrow(biclist))," genes, \n ",toString(biclist$Samples.In.Bicluster[1])," samples."))
})
output$table <- renderDataTable({
biclist <- biclist[[as.integer(input$num)]][[1]]
datatable(biclist %>% select(Gene.ID,chrom),options = list(paging=FALSE))
})})
}
shinyApp(ui = ui, server = server)
|
/shiny_app/app.R
|
no_license
|
KhiabanianLab/TuBA-Portal
|
R
| false
| false
| 29,850
|
r
|
library(shiny)
library(shinyWidgets)
library(ggplot2)
library(dplyr)
library(patchwork)
library(rsconnect)
library(base)
library(lubridate)
library(shinyjs)
library(shinydashboard)
library(plotly)
library(htmlwidgets)
library(rlang)
library(forcats)
library(tidyr)
library(rlang)
library(survival)
library(ranger)
library(ggplot2)
library(dplyr)
library(ggfortify)
library(survminer)
library(rlist)
library(DT)
load("biclusterlist2.RData")
load("biclusterlist3.RData")
load("survival.RData")
load("foolist2.RData")
load("foolist3.RData")
load("BLCA_PrimaryTumors_Cleaned_H0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment.RData")
load("BLCA_PrimaryTumors_Cleaned_L0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment.RData")
load("locus.RData")
load("BP_info_H.RData")
load("BP_info_L.RData")
load("map.RData")
round_preserve_sum <- function(x, digits = 0) {
up <- 10 ^ digits
x <- x * up
y <- floor(x)
indices <- tail(order(x-y), round(sum(x)) - sum(y))
y[indices] <- y[indices] + 1
y / up
}
chrom <- biclusterlist2[[1]][[2]]
chrom <- chrom$chrom # where did "All" go
chrom <- c(as.character(chrom),"all")
myColors <- c("#DD8D29", "#E2D200", "#46ACC8", "#E58601", "#B40F20","#808080")
names(myColors) <- c("high_amplification","amplification","no_change","1_copy_del","2_copy_del","Missing")
hbic <- BLCA_PrimaryTumors_Cleaned_H0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment
hbic_filtered <- hbic %>% filter(Samples.In.Bicluster>20)
lbic <- BLCA_PrimaryTumors_Cleaned_L0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment
lbic_filtered <- lbic %>% filter(Samples.In.Bicluster>20)
bladder_hgenes <- unique(BLCA_PrimaryTumors_Cleaned_H0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment$Gene.ID)
bladder_hgenes <- c(bladder_hgenes, "all")
bladder_lgenes <- unique(BLCA_PrimaryTumors_Cleaned_L0_05_JcdInd0_3_MinGenes2_MinSamples2_GenesInBiclusters_EdgeBasedSampleEnrichment$Gene.ID)
bladder_lgenes <- c(bladder_lgenes, "all")
bladder_hBP <- unique(c(BP_info_H$GoTerm1,BP_info_H$GoTerm2,BP_info_H$GoTerm3,BP_info_H$GoTerm4,BP_info_H$GoTerm5))
bladder_hBP <- c(bladder_hBP, "all")
bladder_lBP <- unique(c(BP_info_L$GoTerm1,BP_info_L$GoTerm2,BP_info_L$GoTerm3,BP_info_L$GoTerm4,BP_info_L$GoTerm5))
bladder_lBP <- c(bladder_lBP, "all")
chrom_list <- unique(map$chrom)
hbic_gene_list <- list()
for (i in unique(hbic_filtered$Bicluster.No)){
new <- hbic_filtered %>% filter(Bicluster.No==i)
genes <- new$Gene.ID
hbic_gene_list <- list.append(hbic_gene_list,genes)
}
names(hbic_gene_list) <- unique(hbic_filtered$Bicluster.No)
lbic_gene_list <- list()
for (i in unique(lbic_filtered$Bicluster.No)){
new <- lbic_filtered %>% filter(Bicluster.No==i)
genes <- new$Gene.ID
lbic_gene_list <- list.append(lbic_gene_list,genes)
}
names(lbic_gene_list) <- unique(lbic_filtered$Bicluster.No)
hbic_BP_list <- list()
for (i in unique(hbic_filtered$Bicluster.No)){
new <- BP_info_H %>% filter(Bicluster.No==i)
BPs <- c(new$GoTerm1, new$GoTerm2, new$GoTerm3, new$GoTerm4, new$GoTerm5)
hbic_BP_list <- list.append(hbic_BP_list,BPs)
}
names(hbic_BP_list) <- unique(hbic_filtered$Bicluster.No)
lbic_BP_list<- list()
for (i in unique(lbic_filtered$Bicluster.No)){
new <- BP_info_L %>% filter(Bicluster.No==i)
BPs <- c(new$GoTerm1, new$GoTerm2, new$GoTerm3, new$GoTerm4, new$GoTerm5)
lbic_BP_list <- list.append(lbic_BP_list,BPs)
}
names(lbic_BP_list) <- unique(lbic_filtered$Bicluster.No)
hbic_chrom_list <- list()
for (i in unique(hbic_filtered$Bicluster.No)){
new <- biclusterlist2[[i]][[2]]
bic_chrom <- new$prop
names(bic_chrom) <- new$chrom
hbic_chrom_list <- list.append(hbic_chrom_list,names(which(bic_chrom>0.8)))
}
names(hbic_chrom_list) <- unique(hbic_filtered$Bicluster.No)
lbic_chrom_list <- list()
for (i in unique(lbic_filtered$Bicluster.No)){
new <- biclusterlist3[[i]][[2]]
bic_chrom <- new$prop
names(bic_chrom) <- new$chrom
lbic_chrom_list <- list.append(lbic_chrom_list,names(which(bic_chrom>0.8)))
}
names(lbic_chrom_list) <- unique(lbic_filtered$Bicluster.No)
ui <- dashboardPage (
dashboardHeader(title="Bicluster Visualizations"),
dashboardSidebar(
sidebarMenu(
menuItem("Introduction", tabName = "intro"),
menuItem("Biological Pathways", tabName = "biopath"),
menuItem("Copy Number", tabName="copynum"),
menuItem("Contact", tabName = "contact"))),
dashboardBody(
tabItems(
tabItem("intro",h4("to be added")),
tabItem("biopath",
useShinyjs(),
fluidRow(
column(3, selectInput(
inputId = "reg",
label = "Choose up or down regulated gene expression:",
selected="Up",
choices = c("Up","Down"))),
column(3,selectInput(
inputId = "variable",
label = "Select variable of interest",
choices = c("Overall survival", "Disease-specific survival", "Disease-free interval", "Progression-free interval"))),
# be able to input multiple genes
column(3, selectizeInput(
inputId = "gene",
label = "Gene of interest",
choices = bladder_hgenes,
multiple = TRUE)),
column(3, selectizeInput(
inputId = "path",
label = "Biological Pathway of interest",
choices = bladder_hBP,
multiple=TRUE)),
# change bic input choices to dynamic
column(3,selectInput(
inputId = "bic",
label = "Select the bicluster of interest:",
choices = unique(hbic_filtered$Bicluster.No)))),
sidebarPanel(
textOutput(outputId="bicinfo"),width = 3),
mainPanel(
tabsetPanel(
tabPanel("Visualization",plotOutput(outputId = "survivalvis"),width = 9),
tabPanel("Gene Information", dataTableOutput(outputId = "survivaltable"),style = "height:500px; overflow-y: scroll;overflow-x: scroll;",width = 9)))),
tabItem("copynum",
useShinyjs(),
fluidRow(
column(3, selectInput(
inputId = "reg_copy",
label = "Choose up or down regulated gene expression:",
selected="Up",
choices = c("Up","Down"))),
column(3,selectInput(
inputId = "num",
label = "Select the bicluster of interest:",
choices = unique(hbic_filtered$Bicluster.No))),
column(3,selectInput(
inputId = "withcopy",
label = "Fill with copy number?",
choices = c("Yes","No"))),
column(3,selectizeInput(
inputId = "chrom",
label = "Select the chromosome of interest:",
choices = chrom, multiple=TRUE
))),
sidebarPanel(
textOutput(outputId= "info"), width = 3),
mainPanel(
tabsetPanel(
tabPanel("Visualization",plotlyOutput(outputId = "mapvis"),width = 9),
tabPanel("Gene Information",dataTableOutput(outputId = "table"),style = "height:500px; overflow-y: scroll;overflow-x: scroll;",width = 9)))),
tabItem("contact",h4("to be added")))))
server <- function(input, output, session) {
gene_list <- list()
observeEvent(input$reg,{
if(input$reg=="Up"){
biclist <- biclusterlist2
gene_list <- hbic_gene_list
path_list <- hbic_BP_list
updateSelectizeInput(session,"gene",choices = bladder_hgenes)
updateSelectizeInput(session,"path",choices = bladder_hBP)
# if the input gene is not in any of the biclusters, the bicluster list won't update
observeEvent(input$gene,
if (input$gene=="all") {
updateSelectInput(session,"bic",choices = unique(hbic_filtered$Bicluster.No))
}
else {
updated_bic<-c()
for (i in 1:length(gene_list)){
if (input$gene %in% gene_list[[i]]) {
updated_bic <- append(updated_bic, names(gene_list[i]))
}
}
updateSelectInput(session,"bic",choices = updated_bic)
})
observeEvent(input$path,
if (input$path=="all"){
updateSelectInput(session,"bic",choices = unique(hbic_filtered$Bicluster.No))
}
else {
updated_bic<-c()
for (i in 1:length(path_list)){
if (input$path %in% path_list[[i]]) {
updated_bic <- append(updated_bic, names(path_list[i]))
}
}
updateSelectInput(session,"bic",choices = updated_bic)
})
output$bicinfo <- renderText({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
surv_dat <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(PFI.time,PFI)~bicluster, data=surv_dat)
print(paste0("This bicluster contains: \n ",toString(nrow(biclist[[as.integer(input$bic)]][[5]]))," genes, \n ",toString(unique(biclist[[as.integer(input$bic)]][[1]]$Samples.In.Bicluster))," samples. \n","The KM-analysis outputs a p-value of ",round(surv_pvalue(km_fit,data=surv_dat)$pval,3)))
})
observeEvent(input$variable,
if (input$variable=="Progression-free interval"){
output$survivalvis <-
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(PFI.time,PFI)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = " Progression-free Interval Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Overall survival"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(OS.time,OS)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Overall Survival Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Disease-specific survival"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(DSS.time,DSS)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Disease-specific Survival Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Disease-free interval"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0)) %>% ggplot(aes(x=DFI.time,fill=as.factor(bicluster)))+geom_boxplot(alpha=0.5)+
scale_fill_manual(values = c("#78B7C5","#F2AD00"),name = paste0("Bicluster ",as_string(input$bic)), labels = c("No","Yes"),guide = guide_legend(reverse=TRUE)) +
theme(panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),plot.title = element_text(size=10))})
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(DFI.time,DFI)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Disease-free Interval Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
)
output$survivaltable <- renderDataTable({
datatable(biclist[[as.integer(input$bic)]][[1]]%>% select(Gene.ID,chrom),options = list(paging=FALSE))
})
}
else if(input$reg=="Down") {
biclist <- biclusterlist3
gene_list <- lbic_gene_list
path_list <- lbic_BP_list
updateSelectizeInput(session,"gene",choices = bladder_lgenes)
updateSelectizeInput(session,"path",choices = bladder_lBP)
updateSelectInput(session,"bic", choices = unique(lbic_filtered$Bicluster.No))
observeEvent(input$gene,
if (input$gene=="all") {
updateSelectInput(session,"bic",choices = unique(lbic_filtered$Bicluster.No))
}
else {
updated_bic<-c()
for (i in 1:length(gene_list)){
if (input$gene %in% gene_list[[i]]) {
updated_bic <- append(updated_bic, names(gene_list[i]))
}
}
updateSelectInput(session,"bic",choices = updated_bic)})
observeEvent(input$path,
if (input$path=="all") {
updateSelectInput(session,"bic",choices = unique(lbic_filtered$Bicluster.No))}
else {
updated_bic<-c()
for (i in 1:length(path_list)){
if (input$path %in% path_list[[i]]) {
updated_bic <- append(updated_bic, names(path_list[i]))
}
}
updateSelectInput(session,"bic",choices = updated_bic)
})
}
output$bicinfo <- renderText({
# biclist <- biclist[[as.integer(input$bic)]][[1]]
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
surv_dat <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(PFI.time,PFI)~bicluster, data=surv_dat)
print(paste0("This bicluster contains: \n ",toString(nrow(biclist[[as.integer(input$bic)]][[5]]))," genes, \n ",toString(unique(biclist[[as.integer(input$bic)]][[1]]$Samples.In.Bicluster))," samples. \n","The KM-analysis outputs a p-value of ",round(surv_pvalue(km_fit,data=surv_dat)$pval,3)))
})
observeEvent(input$variable,
if (input$variable=="Progression-free interval"){
output$survivalvis <-
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(PFI.time,PFI)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = " Progression-free Interval Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Overall survival"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(OS.time,OS)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Overall Survival Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Disease-specific survival"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(DSS.time,DSS)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Disease-specific Survival Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
else if (input$variable=="Disease-free interval"){
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0)) %>% ggplot(aes(x=DFI.time,fill=as.factor(bicluster)))+geom_boxplot(alpha=0.5)+
scale_fill_manual(values = c("#78B7C5","#F2AD00"),name = paste0("Bicluster ",as_string(input$bic)), labels = c("No","Yes"),guide = guide_legend(reverse=TRUE)) +
theme(panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),plot.title = element_text(size=10))})
output$survivalvis <- renderPlot({
samples <- colnames(biclist[[as.integer(input$bic)]][[5]])[2:length(colnames(biclist[[as.integer(input$bic)]][[5]]))]
biclist <- survival %>% mutate(bicluster = ifelse(sample %in% samples,1,0))
km_fit <- survfit(Surv(DFI.time,DFI)~bicluster, data=biclist)
autoplot(km_fit) +
labs(x = "\n Survival Time (Days) ", y = "Survival Probabilities \n",
title = "Disease-free Interval Times Of \n Bladder Cancer Patients \n") +
theme(plot.title = element_text(hjust = 0.5),
axis.title.x = element_text(face="bold", colour="#FF7A33", size = 12),
axis.title.y = element_text(face="bold", colour="#FF7A33", size = 12),
legend.title = element_text(face="bold", size = 10))})
}
)
output$survivaltable <- renderDataTable({
datatable(biclist[[as.integer(input$bic)]][[1]] %>% select(Gene.ID,chrom),options = list(paging=FALSE))
})})
# find biclusters witth more than 80% from the same chromosome.
# Give each bicluster like this a chromosome label
observeEvent(input$reg_copy,
if (input$reg_copy=="Up"){
biclist <- biclusterlist2
observeEvent(input$withcopy,
if (input$withcopy=="No"){
output$mapvis <- renderPlotly({
biclist <- biclist[[as.integer(input$num)]][[2]]
ggplotly(biclist %>% ggplot(aes(x=chrom,y=prop,fill=col))+geom_bar(stat='identity')+
labs(title = "") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),legend.position = "none",plot.title = element_text(size=10)) +
coord_cartesian(ylim=c(0,NA)) + scale_fill_manual(values = levels(as.factor(biclist$col)))+
xlab("Chromosome") +
ylab("Percentage") +
scale_y_continuous(labels=scales::percent_format()),tooltip="text")})
}
else {
output$mapvis <- renderPlotly({
foo <- foolist2[[as.integer(input$num)]]
ggplotly(foo %>% ggplot(aes(x=chrom,fill=copynumber,text=paste(copynumber,": \n",round(copyprop*100,1),"%")))+geom_bar()+theme(axis.text.x = element_text(angle = 45, hjust = 1), panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),plot.title = element_text(size=10)) +
coord_cartesian(ylim=c(0,NA)) +
xlab("Chromosome") + scale_y_continuous(labels = function(x) x/10)+
ylab("Percentage")+ scale_fill_manual(values = myColors),tooltip="text")})})
observeEvent(input$chrom,
if (input$chrom=="all"){
updateSelectInput(session,"num",choices = hbic_filtered$Bicluster.No)
}
else {
updateSelectInput(session,"num",choices = names(which(hbic_chrom_list==input$chrom)))})
output$info <- renderText({
biclist <- biclist[[as.integer(input$num)]][[1]]
print(paste0("This bicluster contains: \n ",toString(nrow(biclist))," genes, \n ",toString(biclist$Samples.In.Bicluster[1])," samples."))
})
output$table <- renderDataTable({
biclist <- biclist[[as.integer(input$num)]][[1]]
datatable(biclist %>% select(Gene.ID,chrom),options = list(paging=FALSE))
})}
else if (input$reg_copy=="Down"){
biclist <- biclusterlist3
updateSelectInput(session,"num", choices = unique(lbic_filtered$Bicluster.No))
observeEvent(input$withcopy,
if (input$withcopy=="No"){
output$mapvis <- renderPlotly({
biclist <- biclist[[as.integer(input$num)]][[2]]
ggplotly(biclist %>% ggplot(aes(x=chrom,y=prop,fill=col))+geom_bar(stat='identity')+
labs(title = "") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),legend.position = "none",plot.title = element_text(size=10)) +
coord_cartesian(ylim=c(0,NA)) + scale_fill_manual(values = levels(as.factor(biclist$col)))+
xlab("Chromosome") +
ylab("Percentage") +
scale_y_continuous(labels=scales::percent_format()),tooltip="text")})
}
else {
output$mapvis <- renderPlotly({
foo <- foolist3[[as.integer(input$num)]]
ggplotly(foo %>% ggplot(aes(x=chrom,fill=copynumber,text=paste(copynumber,": \n",round(copyprop*100,1),"%")))+geom_bar()+theme(axis.text.x = element_text(angle = 45, hjust = 1), panel.background = element_rect(fill = "white",colour = "white",size = 0.5, linetype = "solid"), panel.grid.major = element_line(size = 0.5, linetype = 'solid',colour = "light grey"), panel.grid.minor = element_line(size = 0.25, linetype = 'solid',colour = "light grey"),plot.title = element_text(size=10)) +
coord_cartesian(ylim=c(0,NA)) +
xlab("Chromosome") + scale_y_continuous(labels = function(x) x/10)+
ylab("Percentage")+ scale_fill_manual(values = myColors),tooltip="text")})})
observeEvent(input$chrom,
if (input$chrom=="all"){
updateSelectInput(session,"num",choices = lbic_filtered$Bicluster.No)
}
else {
updateSelectInput(session,"num",choices = names(which(lbic_chrom_list==input$chrom)))})
output$info <- renderText({
biclist <- biclist[[as.integer(input$num)]][[1]]
print(paste0("This bicluster contains: \n ",toString(nrow(biclist))," genes, \n ",toString(biclist$Samples.In.Bicluster[1])," samples."))
})
output$table <- renderDataTable({
biclist <- biclist[[as.integer(input$num)]][[1]]
datatable(biclist %>% select(Gene.ID,chrom),options = list(paging=FALSE))
})})
}
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/center.R
\name{lawn_center}
\alias{lawn_center}
\title{Get center point}
\usage{
lawn_center(features, lint = FALSE)
}
\arguments{
\item{features}{input featurs, as a \code{\link{data-FeatureCollection}}}
\item{lint}{(logical) Lint or not. Uses geojsonhint. Takes up increasing time
as the object to get linted increases in size, so probably use by
default for small objects, but not for large if you know they are good geojson
objects. Default: \code{FALSE}}
}
\value{
a \code{\link{data-Point}} feature at the absolute center point of
all input features
}
\description{
Takes a \code{\link{data-FeatureCollection}} and returns the
absolute center point of all features
}
\examples{
lawn_center(lawn_data$points_average)
}
\seealso{
Other measurements: \code{\link{lawn_along}},
\code{\link{lawn_area}}, \code{\link{lawn_bbox_polygon}},
\code{\link{lawn_bbox}}, \code{\link{lawn_bearing}},
\code{\link{lawn_centroid}},
\code{\link{lawn_destination}},
\code{\link{lawn_distance}}, \code{\link{lawn_envelope}},
\code{\link{lawn_extent}},
\code{\link{lawn_line_distance}},
\code{\link{lawn_midpoint}},
\code{\link{lawn_point_on_surface}},
\code{\link{lawn_square}}
}
|
/man/lawn_center.Rd
|
permissive
|
jbousquin/lawn
|
R
| false
| true
| 1,265
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/center.R
\name{lawn_center}
\alias{lawn_center}
\title{Get center point}
\usage{
lawn_center(features, lint = FALSE)
}
\arguments{
\item{features}{input featurs, as a \code{\link{data-FeatureCollection}}}
\item{lint}{(logical) Lint or not. Uses geojsonhint. Takes up increasing time
as the object to get linted increases in size, so probably use by
default for small objects, but not for large if you know they are good geojson
objects. Default: \code{FALSE}}
}
\value{
a \code{\link{data-Point}} feature at the absolute center point of
all input features
}
\description{
Takes a \code{\link{data-FeatureCollection}} and returns the
absolute center point of all features
}
\examples{
lawn_center(lawn_data$points_average)
}
\seealso{
Other measurements: \code{\link{lawn_along}},
\code{\link{lawn_area}}, \code{\link{lawn_bbox_polygon}},
\code{\link{lawn_bbox}}, \code{\link{lawn_bearing}},
\code{\link{lawn_centroid}},
\code{\link{lawn_destination}},
\code{\link{lawn_distance}}, \code{\link{lawn_envelope}},
\code{\link{lawn_extent}},
\code{\link{lawn_line_distance}},
\code{\link{lawn_midpoint}},
\code{\link{lawn_point_on_surface}},
\code{\link{lawn_square}}
}
|
/*
File: Controls.r
Contains: Control Manager interfaces
Version: Technology: MacOS 7.x
Release: Universal Interfaces 3.0.1
Copyright: © 1985-1997 by Apple Computer, Inc., all rights reserved
Bugs?: Please include the the file and version information (from above) with
the problem description. Developers belonging to one of the Apple
developer programs can submit bug reports to:
devsupport@apple.com
*/
#ifndef __CONTROLS_R__
#define __CONTROLS_R__
#ifndef __CONDITIONALMACROS_R__
#include "ConditionalMacros.r"
#endif
#define popupFixedWidth 0x01
#define popupVariableWidth 0x02
#define popupUseAddResMenu 0x04
#define popupUseWFont 0x08
#define popupTitleBold 0x0100
#define popupTitleItalic 0x0200
#define popupTitleUnderline 0x0400
#define popupTitleOutline 0x0800
#define popupTitleShadow 0x1000
#define popupTitleCondense 0x2000
#define popupTitleExtend 0x4000
#define popupTitleNoStyle 0x8000
#define popupTitleLeftJust 0x00000000
#define popupTitleCenterJust 0x00000001
#define popupTitleRightJust 0x000000FF
#ifdef oldTemp
/*--------------------------cctb • Control Color old Lookup Table----------------------*/
type 'cctb' {
unsigned hex longint; /* CCSeed */
integer; /* ccReserved */
integer = $$Countof(ColorSpec) - 1; /* ctSize */
wide array ColorSpec {
integer cFrameColor, /* partcode */
cBodyColor,
cTextColor,
cElevatorColor;
unsigned integer; /* RGB: red */
unsigned integer; /* green */
unsigned integer; /* blue */
};
};
#else
/*----------------------------cctb • Control Color Lookup Table-------------------------*/
type 'cctb' {
unsigned hex longint = 0; /* CCSeed */
integer = 0; /* ccReserved */
integer = $$Countof(ColorSpec) - 1; /* ctSize */
wide array ColorSpec {
integer cFrameColor, /* partcode */
cBodyColor,
cTextColor,
cElevatorColor,
cFillPatColor,
cArrowsLight,
cArrowsDark,
cThumbLight,
cThumbDark,
cHiliteLight,
cHiliteDark,
cTitleBarLight,
cTitleBarDark,
cTingeLight,
cTingeDark;
unsigned integer; /* RGB: red */
unsigned integer; /* green */
unsigned integer; /* blue */
};
};
#endif
/*----------------------------CNTL • Control Template-----------------------------------*/
type 'CNTL' {
rect; /* Bounds */
integer; /* Value */
byte invisible, visible; /* visible */
fill byte;
integer; /* Max */
integer; /* Min */
integer pushButProc, /* ProcID */
checkBoxProc,
radioButProc,
pushButProcUseWFont = 8,
checkBoxProcUseWFont,
radioButProcUseWFont,
scrollBarProc = 16;
longint; /* RefCon */
pstring; /* Title */
};
#define popupMenuCDEFproc 1008 /* ProcID 1008 = 16 * 63 */
#endif /* __CONTROLS_R__ */
|
/3.0.1/Universal/Interfaces/RIncludes/Controls.r
|
no_license
|
elliotnunn/UniversalInterfaces
|
R
| false
| false
| 3,188
|
r
|
/*
File: Controls.r
Contains: Control Manager interfaces
Version: Technology: MacOS 7.x
Release: Universal Interfaces 3.0.1
Copyright: © 1985-1997 by Apple Computer, Inc., all rights reserved
Bugs?: Please include the the file and version information (from above) with
the problem description. Developers belonging to one of the Apple
developer programs can submit bug reports to:
devsupport@apple.com
*/
#ifndef __CONTROLS_R__
#define __CONTROLS_R__
#ifndef __CONDITIONALMACROS_R__
#include "ConditionalMacros.r"
#endif
#define popupFixedWidth 0x01
#define popupVariableWidth 0x02
#define popupUseAddResMenu 0x04
#define popupUseWFont 0x08
#define popupTitleBold 0x0100
#define popupTitleItalic 0x0200
#define popupTitleUnderline 0x0400
#define popupTitleOutline 0x0800
#define popupTitleShadow 0x1000
#define popupTitleCondense 0x2000
#define popupTitleExtend 0x4000
#define popupTitleNoStyle 0x8000
#define popupTitleLeftJust 0x00000000
#define popupTitleCenterJust 0x00000001
#define popupTitleRightJust 0x000000FF
#ifdef oldTemp
/*--------------------------cctb • Control Color old Lookup Table----------------------*/
type 'cctb' {
unsigned hex longint; /* CCSeed */
integer; /* ccReserved */
integer = $$Countof(ColorSpec) - 1; /* ctSize */
wide array ColorSpec {
integer cFrameColor, /* partcode */
cBodyColor,
cTextColor,
cElevatorColor;
unsigned integer; /* RGB: red */
unsigned integer; /* green */
unsigned integer; /* blue */
};
};
#else
/*----------------------------cctb • Control Color Lookup Table-------------------------*/
type 'cctb' {
unsigned hex longint = 0; /* CCSeed */
integer = 0; /* ccReserved */
integer = $$Countof(ColorSpec) - 1; /* ctSize */
wide array ColorSpec {
integer cFrameColor, /* partcode */
cBodyColor,
cTextColor,
cElevatorColor,
cFillPatColor,
cArrowsLight,
cArrowsDark,
cThumbLight,
cThumbDark,
cHiliteLight,
cHiliteDark,
cTitleBarLight,
cTitleBarDark,
cTingeLight,
cTingeDark;
unsigned integer; /* RGB: red */
unsigned integer; /* green */
unsigned integer; /* blue */
};
};
#endif
/*----------------------------CNTL • Control Template-----------------------------------*/
type 'CNTL' {
rect; /* Bounds */
integer; /* Value */
byte invisible, visible; /* visible */
fill byte;
integer; /* Max */
integer; /* Min */
integer pushButProc, /* ProcID */
checkBoxProc,
radioButProc,
pushButProcUseWFont = 8,
checkBoxProcUseWFont,
radioButProcUseWFont,
scrollBarProc = 16;
longint; /* RefCon */
pstring; /* Title */
};
#define popupMenuCDEFproc 1008 /* ProcID 1008 = 16 * 63 */
#endif /* __CONTROLS_R__ */
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsk.R
\docType{class}
\name{rsk-class}
\alias{rsk-class}
\title{Class to Store Rsk Data}
\description{
Class for data stored in the ``Ruskin'' format used by RBR [1], including both
\code{rsk} SQLite files and the ASCII \code{txt} exported files.
A \code{rsk} object may be read with \code{\link{read.rsk}} or created with
\code{\link{as.rsk}}. Plots can be made with \code{\link{plot,rsk-method}}, while
\code{\link{summary,rsk-method}} produces statistical summaries and \code{show}
produces overviews. If atmospheric pressure has not been removed from the
data, the functions \code{\link{rskPatm}} may provide guidance as to its value;
however, this last function is no equal to decent record-keeping at sea. Data
may be retrieved with \code{\link{[[,rsk-method}} or replaced with
\code{\link{[[<-,rsk-method}}.
}
\references{
1. \href{https://www.rbr-global.com/products}{RBR website: www.rbr-global.com/products}
}
\seealso{
Other classes provided by \code{oce}: \code{\link{adp-class}},
\code{\link{adv-class}}, \code{\link{argo-class}},
\code{\link{bremen-class}}, \code{\link{cm-class}},
\code{\link{coastline-class}}, \code{\link{ctd-class}},
\code{\link{echosounder-class}},
\code{\link{lisst-class}}, \code{\link{lobo-class}},
\code{\link{met-class}}, \code{\link{oce-class}},
\code{\link{odf-class}}, \code{\link{sealevel-class}},
\code{\link{section-class}}, \code{\link{topo-class}},
\code{\link{windrose-class}}
Other things related to \code{rsk} data: \code{\link{[[,rsk-method}},
\code{\link{[[<-,rsk-method}}, \code{\link{as.rsk}},
\code{\link{plot,rsk-method}}, \code{\link{read.rsk}},
\code{\link{rskPatm}}, \code{\link{rskToc}},
\code{\link{rsk}}, \code{\link{subset,rsk-method}},
\code{\link{summary,rsk-method}}
}
\author{
Dan Kelley and Clark Richards
}
|
/pkgs/oce/man/rsk-class.Rd
|
no_license
|
vaguiar/EDAV_Project_2017
|
R
| false
| true
| 1,890
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsk.R
\docType{class}
\name{rsk-class}
\alias{rsk-class}
\title{Class to Store Rsk Data}
\description{
Class for data stored in the ``Ruskin'' format used by RBR [1], including both
\code{rsk} SQLite files and the ASCII \code{txt} exported files.
A \code{rsk} object may be read with \code{\link{read.rsk}} or created with
\code{\link{as.rsk}}. Plots can be made with \code{\link{plot,rsk-method}}, while
\code{\link{summary,rsk-method}} produces statistical summaries and \code{show}
produces overviews. If atmospheric pressure has not been removed from the
data, the functions \code{\link{rskPatm}} may provide guidance as to its value;
however, this last function is no equal to decent record-keeping at sea. Data
may be retrieved with \code{\link{[[,rsk-method}} or replaced with
\code{\link{[[<-,rsk-method}}.
}
\references{
1. \href{https://www.rbr-global.com/products}{RBR website: www.rbr-global.com/products}
}
\seealso{
Other classes provided by \code{oce}: \code{\link{adp-class}},
\code{\link{adv-class}}, \code{\link{argo-class}},
\code{\link{bremen-class}}, \code{\link{cm-class}},
\code{\link{coastline-class}}, \code{\link{ctd-class}},
\code{\link{echosounder-class}},
\code{\link{lisst-class}}, \code{\link{lobo-class}},
\code{\link{met-class}}, \code{\link{oce-class}},
\code{\link{odf-class}}, \code{\link{sealevel-class}},
\code{\link{section-class}}, \code{\link{topo-class}},
\code{\link{windrose-class}}
Other things related to \code{rsk} data: \code{\link{[[,rsk-method}},
\code{\link{[[<-,rsk-method}}, \code{\link{as.rsk}},
\code{\link{plot,rsk-method}}, \code{\link{read.rsk}},
\code{\link{rskPatm}}, \code{\link{rskToc}},
\code{\link{rsk}}, \code{\link{subset,rsk-method}},
\code{\link{summary,rsk-method}}
}
\author{
Dan Kelley and Clark Richards
}
|
## create any required directories for the project
checkdir <- function (directory) {
ifelse (!dir.exists(directory), dir.create(directory), FALSE)
}
|
/R/checkdir.R
|
no_license
|
SophieSt/Drilling-Rigs-from-Nightlights
|
R
| false
| false
| 158
|
r
|
## create any required directories for the project
checkdir <- function (directory) {
ifelse (!dir.exists(directory), dir.create(directory), FALSE)
}
|
celltype enrichment
celltr<-function(clusters_list,runname="",inpath='~/Dropbox/SHARED/tools/Data_to_load_CellFET/',outpath=getwd(),selection=F){ #inpath="~/Dropbox/SHARED/tools/Data_to_load_CellFET",
cat('\tNOTE:\tclusters_list only ENSG ids currently supported')
cat("\tCell background is the genes with one2one human orthologous of mice genes used to build the list of cell class enriched genes by Zeisel et al 2015(Science)\n")
# library(MetaDE)
# library('parallel')
###load data
Load(paste(inpath,"/HUMQb.Rdata",sep="")) #"HUMQb" human ENSid orthologous genes of mice background genes
Load(paste(inpath,"/hmscDF_neuron.Rdata",sep="")) #"hmscDF" human ENSid orthologous of mice single cell enriched by class dataframe
### create a matrix for results
cFET=matrix(nrow=length(clusters_list), ncol=14)
row.names(cFET)=names(clusters_list)
colnames(cFET)=c("cell class","FET p.value","FET FDR","OR","[95% OR CI] inf","OR [95% OR CI] sup",
"module cell enriched genes","module out cell enriched genes",
"non module cell enriched genes","non module out cell enriched genes",
"gene names of in modules cell enriched genes","module size","cell enriched genes size",
"% of genes in module in cell background"
)
resMsc=list()
for(ccl in 1:length(hmscDF)){ # ccl: cell class
cat('\t=====================',names(hmscDF)[ccl],'=====================',ccl,' of ',length(hmscDF),'\n')
cclENS=hmscDF[[ccl]]
### function to fill the matrix of results
#for(i in 1:length(clusters_list)){
FUNC=function(i){
Ms=length(clusters_list[[i]]) #Ms: module size
CB=HUMQb[,'hsapiens_homolog_ensembl_gene'] #CB: cell background
Cs=length(cclENS) #Cs: cell enriched genes size
MCBp=length(intersect(CB,clusters_list[[i]]))/Ms #MCBp: % of genes in module in cell background
#cFET
cat('\t\t',names(clusters_list)[i],'\n')
#calculate the number Mc of module i cell enriched genes(Mc: in module AND in cell class)
Mc=length(intersect(cclENS,clusters_list[[i]]))
McID=paste(unlist(HUMQb[which(CB %in% intersect(cclENS,clusters_list[[i]])),'external_gene_name']),collapse=", ")
#calculate the number NMc of remaining genes not in module but in cell class
NMc=length(cclENS)-Mc
#calculate the number Mnc of genes in module but not in cell class
Mnc=length(intersect(CB,clusters_list[[i]]))-Mc
#calculate the number NMnc of genes out of module AND not in cell class
NMnc=length(CB)-(Mc+NMc+Mnc)
# contingency matrice for Fisher Exact Test FET all DNMs and ns DNMs
matr=matrix(c(Mc,NMc,Mnc,NMnc), nrow=2)
#FET
#FisherM=fisher.test(matr,alternative="greater")
FisherM=fisher.test(matr)
Fisher.p=FisherM$p.value
Fisher.or=FisherM$estimate
Fisher.cinf=FisherM$conf.int[1]
Fisher.cis=FisherM$conf.int[2]
cFET[i,]=c(names(hmscDF)[ccl],Fisher.p,NA,Fisher.or,Fisher.cinf,Fisher.cis,Mc,Mnc,NMc,NMnc,McID,Ms,Cs,MCBp)
}
# cfet=mclapply(1:length(clusters_list),FUNC,mc.cores=detectCores())
cfet=lapply(1:length(clusters_list),FUNC)
#The cfet output object of the mclapply function is a list of n vectors cFET[i,] in the good order
for(i in 1:length(clusters_list)){
cFET[i,]=cfet[[i]]
}
cFET[,"FET FDR"]=p.adjust(cFET[,"FET p.value"],method="fdr")
# write.table(cFET, sep='\t', file=paste(outpath,"/",names(hmscDF)[ccl],"_cFET_",runname,".txt",sep=""), row.names=TRUE, quote=FALSE, col.names=NA)
resMsc[[ccl]]=cFET
}
names(resMsc)=names(hmscDF)
if(selection==T){
select=which(as.numeric(resMsc[[1]][,"FET FDR"]) < 0.2)
cat("number of selected modules for ", names(resMsc)[1]," :",length(select),'\n')
if(length(select)==1){
SignifT=resMsc[[1]][c(select,NA),]
SignifT=SignifT[-which(is.na(rownames(SignifT))==T),]
}else{
SignifT=resMsc[[1]][select,]
}
for(ccl in 2:length(resMsc)){
select=which(as.numeric(resMsc[[ccl]][,"FET FDR"]) < 0.2)
cat("number of selected modules for ", names(resMsc)[ccl]," :",length(select),'\n')
if(length(select)==1){
SignifT=rbind(SignifT,resMsc[[ccl]][c(select,NA),])
SignifT=SignifT[-which(is.na(rownames(SignifT))==T),]
}else{
SignifT=rbind(SignifT,resMsc[[ccl]][select,])
}
}
# write.table(SignifT, sep='\t', file=paste(outpath,'/significant_cFET_',runname,'.txt',sep=''), row.names=TRUE, quote=FALSE, col.names=NA)
}else{
allT=resMsc[[1]]
for(ccl in 2:length(resMsc)){
allT=rbind(allT,resMsc[[ccl]])
}
# write.table(allT, sep='\t', file=paste(outpath,'/ALL_cFET_',runname,'.txt',sep=''), row.names=TRUE, quote=FALSE, col.names=NA)
}
return(as.data.frame(lapply(resMsc,function(x){x[,'FET p.value']})))
}
|
/R/celltr.R
|
no_license
|
ks4471/celltr
|
R
| false
| false
| 4,891
|
r
|
celltype enrichment
celltr<-function(clusters_list,runname="",inpath='~/Dropbox/SHARED/tools/Data_to_load_CellFET/',outpath=getwd(),selection=F){ #inpath="~/Dropbox/SHARED/tools/Data_to_load_CellFET",
cat('\tNOTE:\tclusters_list only ENSG ids currently supported')
cat("\tCell background is the genes with one2one human orthologous of mice genes used to build the list of cell class enriched genes by Zeisel et al 2015(Science)\n")
# library(MetaDE)
# library('parallel')
###load data
Load(paste(inpath,"/HUMQb.Rdata",sep="")) #"HUMQb" human ENSid orthologous genes of mice background genes
Load(paste(inpath,"/hmscDF_neuron.Rdata",sep="")) #"hmscDF" human ENSid orthologous of mice single cell enriched by class dataframe
### create a matrix for results
cFET=matrix(nrow=length(clusters_list), ncol=14)
row.names(cFET)=names(clusters_list)
colnames(cFET)=c("cell class","FET p.value","FET FDR","OR","[95% OR CI] inf","OR [95% OR CI] sup",
"module cell enriched genes","module out cell enriched genes",
"non module cell enriched genes","non module out cell enriched genes",
"gene names of in modules cell enriched genes","module size","cell enriched genes size",
"% of genes in module in cell background"
)
resMsc=list()
for(ccl in 1:length(hmscDF)){ # ccl: cell class
cat('\t=====================',names(hmscDF)[ccl],'=====================',ccl,' of ',length(hmscDF),'\n')
cclENS=hmscDF[[ccl]]
### function to fill the matrix of results
#for(i in 1:length(clusters_list)){
FUNC=function(i){
Ms=length(clusters_list[[i]]) #Ms: module size
CB=HUMQb[,'hsapiens_homolog_ensembl_gene'] #CB: cell background
Cs=length(cclENS) #Cs: cell enriched genes size
MCBp=length(intersect(CB,clusters_list[[i]]))/Ms #MCBp: % of genes in module in cell background
#cFET
cat('\t\t',names(clusters_list)[i],'\n')
#calculate the number Mc of module i cell enriched genes(Mc: in module AND in cell class)
Mc=length(intersect(cclENS,clusters_list[[i]]))
McID=paste(unlist(HUMQb[which(CB %in% intersect(cclENS,clusters_list[[i]])),'external_gene_name']),collapse=", ")
#calculate the number NMc of remaining genes not in module but in cell class
NMc=length(cclENS)-Mc
#calculate the number Mnc of genes in module but not in cell class
Mnc=length(intersect(CB,clusters_list[[i]]))-Mc
#calculate the number NMnc of genes out of module AND not in cell class
NMnc=length(CB)-(Mc+NMc+Mnc)
# contingency matrice for Fisher Exact Test FET all DNMs and ns DNMs
matr=matrix(c(Mc,NMc,Mnc,NMnc), nrow=2)
#FET
#FisherM=fisher.test(matr,alternative="greater")
FisherM=fisher.test(matr)
Fisher.p=FisherM$p.value
Fisher.or=FisherM$estimate
Fisher.cinf=FisherM$conf.int[1]
Fisher.cis=FisherM$conf.int[2]
cFET[i,]=c(names(hmscDF)[ccl],Fisher.p,NA,Fisher.or,Fisher.cinf,Fisher.cis,Mc,Mnc,NMc,NMnc,McID,Ms,Cs,MCBp)
}
# cfet=mclapply(1:length(clusters_list),FUNC,mc.cores=detectCores())
cfet=lapply(1:length(clusters_list),FUNC)
#The cfet output object of the mclapply function is a list of n vectors cFET[i,] in the good order
for(i in 1:length(clusters_list)){
cFET[i,]=cfet[[i]]
}
cFET[,"FET FDR"]=p.adjust(cFET[,"FET p.value"],method="fdr")
# write.table(cFET, sep='\t', file=paste(outpath,"/",names(hmscDF)[ccl],"_cFET_",runname,".txt",sep=""), row.names=TRUE, quote=FALSE, col.names=NA)
resMsc[[ccl]]=cFET
}
names(resMsc)=names(hmscDF)
if(selection==T){
select=which(as.numeric(resMsc[[1]][,"FET FDR"]) < 0.2)
cat("number of selected modules for ", names(resMsc)[1]," :",length(select),'\n')
if(length(select)==1){
SignifT=resMsc[[1]][c(select,NA),]
SignifT=SignifT[-which(is.na(rownames(SignifT))==T),]
}else{
SignifT=resMsc[[1]][select,]
}
for(ccl in 2:length(resMsc)){
select=which(as.numeric(resMsc[[ccl]][,"FET FDR"]) < 0.2)
cat("number of selected modules for ", names(resMsc)[ccl]," :",length(select),'\n')
if(length(select)==1){
SignifT=rbind(SignifT,resMsc[[ccl]][c(select,NA),])
SignifT=SignifT[-which(is.na(rownames(SignifT))==T),]
}else{
SignifT=rbind(SignifT,resMsc[[ccl]][select,])
}
}
# write.table(SignifT, sep='\t', file=paste(outpath,'/significant_cFET_',runname,'.txt',sep=''), row.names=TRUE, quote=FALSE, col.names=NA)
}else{
allT=resMsc[[1]]
for(ccl in 2:length(resMsc)){
allT=rbind(allT,resMsc[[ccl]])
}
# write.table(allT, sep='\t', file=paste(outpath,'/ALL_cFET_',runname,'.txt',sep=''), row.names=TRUE, quote=FALSE, col.names=NA)
}
return(as.data.frame(lapply(resMsc,function(x){x[,'FET p.value']})))
}
|
context("Get NSF awards")
test_that("get_awards runs with only from_date specified", {
award_info <- get_awards(from_date = "11/01/2017")
expect_is(award_info, "data.frame")
})
test_that("get_awards runs with from_date and to_date specified", {
award_info <- get_awards(from_date = "11/01/2017",
to_date = "01/01/2018")
expect_is(award_info, "data.frame")
})
test_that("get_awards runs with query specified", {
award_info <- get_awards(from_date = "11/01/2017",
query = "id=1748653")
expect_is(award_info, "data.frame")
})
test_that("get_awards runs with only print_fields specified", {
award_info <- get_awards(from_date = "11/01/2017",
print_fields = "id,title")
expect_equal(length(award_info), 2)
})
test_that("Error checks work", {
expect_error(get_awards(from_date = "1-1-17"))
expect_error(get_awards(from_date = 2017))
expect_error(get_awards(query = "hi"))
expect_error(get_awards(print_fields = TRUE))
})
|
/tests/testthat/test_get_awards.R
|
permissive
|
NCEAS/datamgmt
|
R
| false
| false
| 1,055
|
r
|
context("Get NSF awards")
test_that("get_awards runs with only from_date specified", {
award_info <- get_awards(from_date = "11/01/2017")
expect_is(award_info, "data.frame")
})
test_that("get_awards runs with from_date and to_date specified", {
award_info <- get_awards(from_date = "11/01/2017",
to_date = "01/01/2018")
expect_is(award_info, "data.frame")
})
test_that("get_awards runs with query specified", {
award_info <- get_awards(from_date = "11/01/2017",
query = "id=1748653")
expect_is(award_info, "data.frame")
})
test_that("get_awards runs with only print_fields specified", {
award_info <- get_awards(from_date = "11/01/2017",
print_fields = "id,title")
expect_equal(length(award_info), 2)
})
test_that("Error checks work", {
expect_error(get_awards(from_date = "1-1-17"))
expect_error(get_awards(from_date = 2017))
expect_error(get_awards(query = "hi"))
expect_error(get_awards(print_fields = TRUE))
})
|
\name{get_nom_scale}
\alias{get_nom_scale}
\title{Non-Metric Nominal Scale}
\usage{
get_nom_scale(y, x, Xdummy)
}
\arguments{
\item{y}{vector of values}
\item{x}{vector of the natural number series 1, 2, ..., p
obtained by means of the function myRank}
\item{Xdummy}{dummy matrix corresponding to x}
}
\value{
scaled matrix
}
\description{
Internal function not to be called by the user
}
\details{
Internal function. \code{get_nom_scale} is called by
\code{plspm}.
}
\note{
This function replaces the elements of x by the the means
of y conditioned to the levels of x
}
\section{Warning}{
Do NOT use this function unless you are ME, a package
developer, or a jedi user who really knows what is doing
(seriously!)
}
\keyword{internal}
|
/man/get_nom_scale.Rd
|
no_license
|
gastonstat/plspm
|
R
| false
| false
| 766
|
rd
|
\name{get_nom_scale}
\alias{get_nom_scale}
\title{Non-Metric Nominal Scale}
\usage{
get_nom_scale(y, x, Xdummy)
}
\arguments{
\item{y}{vector of values}
\item{x}{vector of the natural number series 1, 2, ..., p
obtained by means of the function myRank}
\item{Xdummy}{dummy matrix corresponding to x}
}
\value{
scaled matrix
}
\description{
Internal function not to be called by the user
}
\details{
Internal function. \code{get_nom_scale} is called by
\code{plspm}.
}
\note{
This function replaces the elements of x by the the means
of y conditioned to the levels of x
}
\section{Warning}{
Do NOT use this function unless you are ME, a package
developer, or a jedi user who really knows what is doing
(seriously!)
}
\keyword{internal}
|
## Data Science Capstone
library(NLP)
library(tm)
library(qdapRegex)
library(ggplot2)
library(plyr)
library(RColorBrewer)
library(wordcloud)
library(sqldf)
library(akmeans)
library(NbClust)
library(lsa)
library(ape)
library(arules)
library(fpc)
library(gridExtra)
library(dendextend)
##library(gridExtra)
## Mac
setwd("/Users/jpmoraga/Desktop/Data Science Capstone")
## Windows
## setwd("C:/Users/jmoraga/Desktop/respaldo jpmoraga/Data Science Certification/Data Science Capstone")
source("Functions.R")
## Mac
setwd("/Users/jpmoraga/Desktop/Data Science Capstone/Data")
## Windows
## setwd("C:/Users/jmoraga/Desktop/respaldo jpmoraga/Data Science Certification/Data Science Capstone/Data")
# filepath <- "https://d396qusza40orc.cloudfront.net/dsscapstone/dataset/Coursera-SwiftKey.zip"
#
# download.file(filepath,paste(getwd(),"Coursera-SwiftKey.zip",sep = "/"))
#
# unzip("Coursera-SwiftKey.zip")
##setwd("de_DE")
############################################# Quiz 1 ###################################################
setwd(paste(getwd(),"Final/en_US",sep = "/"))
rbind(file.info(list.files()[1]),file.info(list.files()[2]),file.info(list.files()[3]))
con1 <- file(list.files()[1], "r")
con2 <- file(list.files()[2], "r")
con3 <- file(list.files()[3], "r")
en_Blogs <- readLines(con1, encoding = "UTF-8")
en_News <- readLines(con2, encoding = "UTF-8")
en_Tweets <- readLines(con3, encoding = "UTF-8")
close(con1)
close(con2)
close(con3)
df <- data.frame(
list(Blogs = c(Size_bytes = object.size(en_Blogs),Lines = length(en_Blogs)),
News = c(Size_bytes = object.size(en_News),Lines = length(en_News)),
Tweets = c(Size_bytes = object.size(en_Tweets),Lines = length(en_Tweets)))
)
## Files size chart
bs <- barplot(c(Blogs=df$Blogs[1],News=df$News[1],Tweets=df$Tweets[1]), xlab = "Size (bytes)", main = "Files size", axes = FALSE)
text(x = bs, y = c(df$Blogs[1],df$News[1],df$Tweets[1]), labels = c(df$Blogs[1],df$News[1],df$Tweets[1]), pos = 1)
## Files lines chart
bl <- barplot(c(df$Blogs[2],df$News[2],df$Tweets[2]), xlab = "Lines", main = "Lines", axes = FALSE, ylim = c(0,3000000))
text(x = bl, y = c(df$Blogs[2],df$News[2],df$Tweets[2]), labels = c(df$Blogs[2],df$News[2],df$Tweets[2]), pos = 3)
head(sort(nchar(en_Blogs), decreasing = TRUE),1)
head(sort(nchar(en_News), decreasing = TRUE),1)
head(sort(nchar(en_Tweets), decreasing = TRUE),1)
sample_size <- 30000
set.seed(1)
sample_Blogs <- sample(en_Blogs, sample_size)
sample_News <- sample(en_News, sample_size)
sample_Tweets <- sample(en_Tweets, sample_size)
# # Create clean-up functions
# cleanText = function(x){
# # This simple function does not cover ambiguities such as 's or 'd
# x <- gsub("let's","let us",x)
# x <- gsub("I'm","I am",x)
# x <- gsub("'re", " are",x)
# x <- gsub("n't", " not",x)
# x <- gsub("'ll", " will",x)
# x <- gsub("'ve"," have",x)
# x <- gsub("’|“|â€", "", x)
# x <- gsub("[^a-zA-Z ]", "", x)
# return(x)
# }
## Clean text
sample_Blogs <- sapply(sample_Blogs, cleanText)
sample_News <- sapply(sample_News, cleanText)
sample_Tweets <- sapply(sample_Tweets, cleanText)
## Remove emoticons
sample_Blogs <- rm_emoticon(sample_Blogs, trim = TRUE ,clean = TRUE)
sample_News <- rm_emoticon(sample_News, trim = TRUE ,clean = TRUE)
sample_Tweets <- rm_emoticon(sample_Tweets, trim = TRUE ,clean = TRUE)
## Remove lines with length = 0
sample_Blogs <- sample_Blogs[nchar(sample_Blogs) != 0]
sample_News <- sample_News[nchar(sample_News) != 0]
sample_Tweets <- sample_Tweets[nchar(sample_Tweets) != 0]
## tolower
sample_Blogs <- tolower(sample_Blogs)
sample_News <- tolower(sample_News)
sample_Tweets <- tolower(sample_Tweets)
word_Blogs <- sapply(sample_Blogs, wordpunct_tokenizer)
word_News <- sapply(sample_News, wordpunct_tokenizer)
word_Tweets <- sapply(sample_Tweets, wordpunct_tokenizer)
########################## Analysis including stop words #############################
## Tokenization
Token_Blogs <- NULL
for(i in 1:length(sample_Blogs))
{
s <- sample_Blogs[i]
w <- word_Blogs[[i]]
s <- String(s)
s <- s[w]
Token_Blogs[[length(Token_Blogs)+1]] <- s
i = i + 1
}
Token_News <- NULL
for(i in 1:length(sample_News))
{
s <- sample_News[i]
w <- word_News[[i]]
s <- String(s)
s <- s[w]
Token_News[[length(Token_News)+1]] <- s
i = i + 1
}
Token_Tweets <- NULL
for(i in 1:length(sample_Tweets))
{
s <- sample_Tweets[i]
w <- word_Tweets[[i]]
s <- String(s)
s <- s[w]
Token_Tweets[[length(Token_Tweets)+1]] <- s
i = i + 1
}
############### Frequencies analysis including stopwords #################
## 1-gram
## Blogs
All_Blogs <- as.data.frame(unlist(Token_Blogs))
colnames(All_Blogs) <- "Words"
Freq_Blogs <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Blogs_bar <- Freq_Blogs[head(order(-Freq_Blogs$Words), 20),]
ggplot(Freq_Blogs_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart (Blogs)") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs$W, Freq_Blogs$Words, min.freq = 2000, max.words = 50, random.order=FALSE)
## News
All_News <- as.data.frame(unlist(Token_News))
colnames(All_News) <- "Words"
Freq_News <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_News_bar <- Freq_News[head(order(-Freq_News$Words), 20),]
ggplot(Freq_News_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News$W, Freq_News$Words, min.freq = 1000, max.words = 50, random.order=FALSE)
## Tweets
All_Tweets <- as.data.frame(unlist(Token_Tweets))
colnames(All_Tweets) <- "Words"
Freq_Tweets <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Tweets_bar <- Freq_Tweets[head(order(-Freq_Tweets$Words), 20),]
ggplot(Freq_Tweets_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets$W, Freq_Tweets$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## All together
All_Words <- rbind(All_Blogs, All_News, All_Tweets)
colnames(All_Words) <- "Words"
Freq_Words <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Words_bar <- Freq_Words[head(order(-Freq_Words$Words), 20),]
ggplot(Freq_Words_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words$W, Freq_Words$Words, min.freq = 2000, max.words = 50, random.order=FALSE)
## 2-gram
Token_Blogs2 <- Token_Blogs[lengths(Token_Blogs, use.names = TRUE) > 1]
Token_News2 <- Token_News[lengths(Token_News, use.names = TRUE) > 1]
Token_Tweets2 <- Token_Tweets[lengths(Token_Tweets, use.names = TRUE) > 1]
## Blogs 2
ng2_Blogs <- NULL
for (j in 1:length(Token_Blogs2))
{
ng2 <- ngrams(unlist(Token_Blogs2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_Blogs <- rbind(ng2_Blogs,n2)
print(c(j,length(Token_Blogs2),j*100/length(Token_Blogs2), "2-blogs"))
}
All_Blogs2 <- as.data.frame(ng2_Blogs, row.names = seq(1:nrow(ng2_Blogs)))
colnames(All_Blogs2) <- "Words"
remove(ng2_Blogs)
Freq_Blogs2 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs2 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Blogs_bar2 <- Freq_Blogs2[head(order(-Freq_Blogs2$Words), 20),]
ggplot(Freq_Blogs_bar2, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs2$W, Freq_Blogs2$Words, min.freq = 50, max.words = 30, random.order=FALSE)
## News 2
ng2_News <- NULL
for (j in 1:length(Token_News2))
{
ng2 <- ngrams(unlist(Token_News2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_News <- rbind(ng2_News,n2)
print(c(j,length(Token_News2),j*100/length(Token_News2), "2-news"))
}
All_News2 <- as.data.frame(ng2_News, row.names = seq(1:nrow(ng2_News)))
colnames(All_News2) <- "Words"
remove(ng2_News)
Freq_News2 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News2 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_News_bar2 <- Freq_News2[head(order(-Freq_News2$Words), 20),]
ggplot(Freq_News_bar2, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News2$W, Freq_News2$Words, min.freq = 50, max.words = 30, random.order=FALSE)
## Tweets 2
ng2_Tweets <- NULL
for (j in 1:length(Token_Tweets2))
{
ng2 <- ngrams(unlist(Token_Tweets2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_Tweets <- rbind(ng2_Tweets,n2)
print(c(j,length(Token_Tweets2),j*100/length(Token_Tweets2), "2-tweets"))
}
All_Tweets2 <- as.data.frame(ng2_Tweets, row.names = seq(1:nrow(ng2_Tweets)))
colnames(All_Tweets2) <- "Words"
remove(ng2_Tweets)
Freq_Tweets2 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets2 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Tweets_bar2 <- Freq_Tweets2[head(order(-Freq_Tweets2$Words), 20),]
ggplot(Freq_Tweets_bar2, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets2$W, Freq_Tweets2$Words, min.freq = 50, max.words = 30, random.order=FALSE)
## All together 2
All_Words2 <- rbind(All_Blogs2, All_News2, All_Tweets2)
colnames(All_Words2) <- "Words"
Freq_Words2 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words2 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Words_bar2 <- Freq_Words2[head(order(-Freq_Words2$Words), 20),]
ggplot(Freq_Words_bar2, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words2$W, Freq_Words2$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## 3-gram
Token_Blogs3 <- Token_Blogs[lengths(Token_Blogs, use.names = TRUE) > 2]
Token_News3 <- Token_News[lengths(Token_News, use.names = TRUE) > 2]
Token_Tweets3 <- Token_Tweets[lengths(Token_Tweets, use.names = TRUE) > 2]
## Blogs 3
ng3_Blogs <- NULL
for (j in 1:length(Token_Blogs3))
{
ng3 <- ngrams(unlist(Token_Blogs3[[j]]), 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_Blogs <- rbind(ng3_Blogs,n2)
print(c(j,length(Token_Blogs3),j*100/length(Token_Blogs3), "3-blogs"))
}
All_Blogs3 <- as.data.frame(ng3_Blogs, row.names = seq(1:nrow(ng3_Blogs)))
colnames(All_Blogs3) <- "Words"
remove(ng3_Blogs)
Freq_Blogs3 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs3 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Blogs_bar3 <- Freq_Blogs3[head(order(-Freq_Blogs3$Words), 20),]
ggplot(Freq_Blogs_bar3, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs3$W, Freq_Blogs3$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## News 3
ng3_News <- NULL
for (j in 1:length(Token_News3))
{
ng3 <- ngrams(Token_News3[[j]], 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_News <- rbind(ng3_News,n2)
print(c(j,length(Token_News3),j*100/length(Token_News3), "3-news"))
}
All_News3 <- as.data.frame(ng3_News, row.names = seq(1:nrow(ng3_News)))
colnames(All_News3) <- "Words"
remove(ng3_News)
Freq_News3 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News3 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_News_bar3 <- Freq_News3[head(order(-Freq_News3$Words), 20),]
ggplot(Freq_News_bar3, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News3$W, Freq_News3$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## Tweets 3
ng3_Tweets <- NULL
for (j in 1:length(Token_Tweets3))
{
ng3 <- ngrams(unlist(Token_Tweets3[[j]]), 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_Tweets <- rbind(ng3_Tweets,n2)
print(c(j,length(Token_Tweets3),j*100/length(Token_Tweets3), "3-tweets"))
}
All_Tweets3 <- as.data.frame(ng3_Tweets, row.names = seq(1:nrow(ng3_Tweets)))
colnames(All_Tweets3) <- "Words"
remove(ng3_Tweets)
Freq_Tweets3 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets3 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Tweets_bar3 <- Freq_Tweets3[head(order(-Freq_Tweets3$Words), 20),]
ggplot(Freq_Tweets_bar3, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets3$W, Freq_Tweets3$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## All together 3
All_Words3 <- rbind(All_Blogs3, All_News3, All_Tweets3)
##colnames(All_Words3) <- "Words"
Freq_Words3 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words3 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Words_bar3 <- Freq_Words3[head(order(-Freq_Words3$Words), 20),]
ggplot(Freq_Words_bar3, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words3$W, Freq_Words3$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## 4-gram
Token_Blogs4 <- Token_Blogs[lengths(Token_Blogs, use.names = TRUE) > 3]
Token_News4 <- Token_News[lengths(Token_News, use.names = TRUE) > 3]
Token_Tweets4 <- Token_Tweets[lengths(Token_Tweets, use.names = TRUE) > 3]
## Blogs 4
ng4_Blogs <- NULL
for (j in 1:length(Token_Blogs4))
{
ng4 <- ngrams(unlist(Token_Blogs4[[j]]), 4L)
n2 <- NULL
for (k in 1:length(ng4))
{
n1 <- NULL
n <- unlist(ng4[k])
n1 <- paste(n[1],n[2],n[3],n[4],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng4_Blogs <- rbind(ng4_Blogs,n2)
print(c(j,length(Token_Blogs4),j*100/length(Token_Blogs4), "4-blogs"))
}
All_Blogs4 <- as.data.frame(ng4_Blogs, row.names = seq(1:nrow(ng4_Blogs)))
colnames(All_Blogs4) <- "Words"
remove(ng4_Blogs)
Freq_Blogs4 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs4 GROUP BY Words ORDER BY COUNT(Words) DESC")
## News 4
ng4_News <- NULL
for (j in 1:length(Token_News4))
{
ng4 <- ngrams(Token_News4[[j]], 4L)
n2 <- NULL
for (k in 1:length(ng4))
{
n1 <- NULL
n <- unlist(ng4[k])
n1 <- paste(n[1],n[2],n[3],n[4],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng4_News <- rbind(ng4_News,n2)
print(c(j,length(Token_News4),j*100/length(Token_News4), "4-news"))
}
All_News4 <- as.data.frame(ng4_News, row.names = seq(1:nrow(ng4_News)))
colnames(All_News4) <- "Words"
remove(ng4_News)
Freq_News4 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News4 GROUP BY Words ORDER BY COUNT(Words) DESC")
## Tweets 4
ng4_Tweets <- NULL
for (j in 1:length(Token_Tweets4))
{
ng4 <- ngrams(unlist(Token_Tweets4[[j]]), 4L)
n2 <- NULL
for (k in 1:length(ng4))
{
n1 <- NULL
n <- unlist(ng4[k])
n1 <- paste(n[1],n[2],n[3],n[4],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng4_Tweets <- rbind(ng4_Tweets,n2)
print(c(j,length(Token_Tweets4),j*100/length(Token_Tweets4), "4-tweets"))
}
All_Tweets4 <- as.data.frame(ng4_Tweets, row.names = seq(1:nrow(ng4_Tweets)))
colnames(All_Tweets4) <- "Words"
remove(ng4_Tweets)
Freq_Tweets4 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets4 GROUP BY Words ORDER BY COUNT(Words) DESC")
## All together 4
All_Words4 <- rbind(All_Blogs4, All_News4, All_Tweets4)
##colnames(All_Words3) <- "Words"
Freq_Words4 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words4 GROUP BY Words ORDER BY COUNT(Words) DESC")
############### Frequencies analysis without stopwords #################
## Remove stopwords
## Create function
## nsw = function(x){x[x %in% stopwords() == FALSE]}
Token_Blogs_nsw <- lapply(Token_Blogs2,nsw)
Token_News_nsw <- lapply(Token_News2,nsw)
Token_Tweets_nsw <- lapply(Token_Tweets2,nsw)
## 1-gram
## Blogs
All_Blogs_nsw <- as.data.frame(unlist(Token_Blogs_nsw))
colnames(All_Blogs_nsw) <- "Words"
Freq_Blogs_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs_nsw GROUP BY Words")
Freq_Blogs_bar_nsw <- Freq_Blogs_nsw[head(order(-Freq_Blogs_nsw$Words), 20),]
ggplot(Freq_Blogs_bar_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart without stopword") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs_nsw$W, Freq_Blogs_nsw$Words, min.freq = 50, max.words = 100, random.order=FALSE)
## News
All_News_nsw <- as.data.frame(unlist(Token_News_nsw))
colnames(All_News_nsw) <- "Words"
Freq_News_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News_nsw GROUP BY Words")
Freq_News_bar_nsw <- Freq_News_nsw[head(order(-Freq_News_nsw$Words), 20),]
ggplot(Freq_News_bar_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News_nsw$W, Freq_News_nsw$Words, min.freq = 50, max.words = 100, random.order=FALSE)
## Tweets
All_Tweets_nsw <- as.data.frame(unlist(Token_Tweets_nsw))
colnames(All_Tweets_nsw) <- "Words"
Freq_Tweets_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets_nsw GROUP BY Words")
Freq_Tweets_bar_nsw <- Freq_Tweets_nsw[head(order(-Freq_Tweets_nsw$Words), 20),]
ggplot(Freq_Tweets_bar_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets_nsw$W, Freq_Tweets_nsw$Words, min.freq = 50, max.words = 100, random.order=FALSE)
## All together
All_Words_nsw <- rbind(All_Blogs_nsw, All_News_nsw, All_Tweets_nsw)
colnames(All_Words_nsw) <- "Words"
Freq_Words_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words_nsw GROUP BY Words")
Freq_Words_bar_nsw <- Freq_Words_nsw[head(order(-Freq_Words_nsw$Words), 20),]
ggplot(Freq_Words_bar_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words_nsw$W, Freq_Words_nsw$Words, min.freq = 50, max.words = 100, random.order=FALSE)
## 2-gram
Token_Blogs_nsw2 <- Token_Blogs_nsw[lengths(Token_Blogs_nsw, use.names = TRUE) > 1]
Token_News_nsw2 <- Token_News_nsw[lengths(Token_News_nsw, use.names = TRUE) > 1]
Token_Tweets_nsw2 <- Token_Tweets_nsw[lengths(Token_Tweets_nsw, use.names = TRUE) > 1]
## Blogs 2
ng2_Blogs_nsw <- NULL
for (j in 1:length(Token_Blogs_nsw2))
##for (j in 1:3)
{
ng2 <- ngrams(unlist(Token_Blogs_nsw2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_Blogs_nsw <- rbind(ng2_Blogs_nsw,n2)
print(c(j,length(Token_Blogs_nsw2),j*100/length(Token_Blogs_nsw2), "2-blogs-nsw"))
}
All_Blogs2_nsw <- as.data.frame(ng2_Blogs_nsw)
colnames(All_Blogs2_nsw) <- "Words"
remove(ng2_Blogs_nsw)
Freq_Blogs2_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs2_nsw GROUP BY Words")
Freq_Blogs_bar2_nsw <- Freq_Blogs2_nsw[head(order(-Freq_Blogs2_nsw$Words), 20),]
ggplot(Freq_Blogs_bar2_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs2_nsw$W, Freq_Blogs2_nsw$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## News 2
ng2_News_nsw <- NULL
for (j in 1:length(Token_News_nsw2))
{
ng2 <- ngrams(unlist(Token_News_nsw2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_News_nsw <- rbind(ng2_News_nsw,n2)
print(c(j,length(Token_News_nsw2),j*100/length(Token_News_nsw2), "2-news-nsw"))
}
All_News2_nsw <- as.data.frame(ng2_News_nsw)
colnames(All_News2_nsw) <- "Words"
remove(ng2_News_nsw)
Freq_News2_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News2_nsw GROUP BY Words")
Freq_News_bar2_nsw <- Freq_News2_nsw[head(order(-Freq_News2_nsw$Words), 20),]
ggplot(Freq_News_bar2_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News2_nsw$W, Freq_News2_nsw$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## Tweets 2
ng2_Tweets_nsw <- NULL
for (j in 1:length(Token_Tweets_nsw2))
{
ng2 <- ngrams(unlist(Token_Tweets_nsw2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_Tweets_nsw <- rbind(ng2_Tweets_nsw,n2)
print(c(j,length(Token_Tweets_nsw2),j*100/length(Token_Tweets_nsw2), "2-tweets-nsw"))
}
All_Tweets2_nsw <- as.data.frame(ng2_Tweets_nsw)
colnames(All_Tweets2_nsw) <- "Words"
remove(ng2_Tweets_nsw)
Freq_Tweets2_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets2_nsw GROUP BY Words")
Freq_Tweets_bar2_nsw <- Freq_Tweets2_nsw[head(order(-Freq_Tweets2_nsw$Words), 20),]
ggplot(Freq_Tweets_bar2_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets2_nsw$W, Freq_Tweets2_nsw$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## All together 2
All_Words2_nsw <- rbind(All_Blogs2_nsw, All_News2_nsw, All_Tweets2_nsw)
colnames(All_Words2_nsw) <- "Words"
Freq_Words2_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words2_nsw GROUP BY Words")
Freq_Words_bar2_nsw <- Freq_Words2_nsw[head(order(-Freq_Words2_nsw$Words), 20),]
ggplot(Freq_Words_bar2_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words2_nsw$W, Freq_Words2_nsw$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## 3-gram
Token_Blogs_nsw3 <- Token_Blogs_nsw[lengths(Token_Blogs_nsw, use.names = TRUE) > 2]
Token_News_nsw3 <- Token_News_nsw[lengths(Token_News_nsw, use.names = TRUE) > 2]
Token_Tweets_nsw3 <- Token_Tweets_nsw[lengths(Token_Tweets_nsw, use.names = TRUE) > 2]
## Blogs 3
ng3_Blogs_nsw <- NULL
for (j in 1:length(Token_Blogs_nsw3))
{
ng3 <- ngrams(Token_Blogs_nsw3[[j]], 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_Blogs_nsw <- rbind(ng3_Blogs_nsw,n2)
print(c(j,length(Token_Blogs_nsw3),j*100/length(Token_Blogs_nsw3), "3-blogs-nsw"))
}
All_Blogs3_nsw <- as.data.frame(ng3_Blogs_nsw)
colnames(All_Blogs3_nsw) <- "Words"
remove(ng3_Blogs_nsw)
Freq_Blogs3_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs3_nsw GROUP BY Words")
Freq_Blogs_bar3_nsw <- Freq_Blogs3_nsw[head(order(-Freq_Blogs3_nsw$Words), 20),]
ggplot(Freq_Blogs_bar3_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs3_nsw$W, Freq_Blogs3_nsw$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## News 3
ng3_News_nsw <- NULL
for (j in 1:length(Token_News_nsw3))
{
ng3 <- ngrams(Token_News_nsw3[[j]], 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_News_nsw <- rbind(ng3_News_nsw,n2)
print(c(j,length(Token_News_nsw3),j*100/length(Token_News_nsw3), "3-news-nsw"))
}
All_News3_nsw <- as.data.frame(ng3_News_nsw)
colnames(All_News3_nsw) <- "Words"
remove(ng3_News_nsw)
Freq_News3_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News3_nsw GROUP BY Words")
Freq_News_bar3_nsw <- Freq_News3_nsw[head(order(-Freq_News3_nsw$Words), 20),]
ggplot(Freq_News_bar3_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News3_nsw$W, Freq_News3_nsw$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## Tweets 3
ng3_Tweets_nsw <- NULL
for (j in 1:length(Token_Tweets_nsw3))
{
ng3 <- ngrams(Token_Tweets_nsw3[[j]], 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_Tweets_nsw <- rbind(ng3_Tweets_nsw,n2)
print(c(j,length(Token_Tweets_nsw3),j*100/length(Token_Tweets_nsw3), "3-tweets-nsw"))
}
All_Tweets3_nsw <- as.data.frame(ng3_Tweets_nsw)
colnames(All_Tweets3_nsw) <- "Words"
remove(ng3_Tweets_nsw)
Freq_Tweets3_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets3_nsw GROUP BY Words")
Freq_Tweets_bar3_nsw <- Freq_Tweets3_nsw[head(order(-Freq_Tweets3_nsw$Words), 20),]
ggplot(Freq_Tweets_bar3_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets3_nsw$W, Freq_Tweets3_nsw$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## All together 3
All_Words3_nsw <- rbind(All_Blogs3_nsw, All_News3_nsw, All_Tweets3_nsw)
colnames(All_Words3_nsw) <- "Words"
Freq_Words3_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words3_nsw GROUP BY Words")
Freq_Words_bar3_nsw <- Freq_Words3_nsw[head(order(-Freq_Words3_nsw$Words), 20),]
ggplot(Freq_Words_bar3_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words3_nsw$W, Freq_Words3_nsw$Words, min.freq = 50, max.words = 20, random.order=FALSE)
########################## Cluster Analysis #############################
## Without stopwords
# Token_Blogs_nsw1 <- as.vector(Token_Blogs_nsw)
# Token_News_nsw1 <- as.vector(Token_News_nsw)
# Token_Tweets_nsw1 <- as.vector(Token_Tweets_nsw)
Corp_Blogs <- VCorpus(VectorSource(Token_Blogs_nsw))
Corp_News <- VCorpus(VectorSource(Token_News_nsw))
Corp_Tweets <- VCorpus(VectorSource(Token_Tweets_nsw))
Corp_Blogs <- TermDocumentMatrix(Corp_Blogs)
Corp_News <- TermDocumentMatrix(Corp_News)
Corp_Tweets <- TermDocumentMatrix(Corp_Tweets)
sparsity <- 0.98
Corp_Blogs <- removeSparseTerms(Corp_Blogs, sparsity)
Corp_News <- removeSparseTerms(Corp_News, sparsity)
Corp_Tweets <- removeSparseTerms(Corp_Tweets, sparsity)
Corp_Blogs <- as.matrix(Corp_Blogs)
Corp_News <- as.matrix(Corp_News)
Corp_Tweets <- as.matrix(Corp_Tweets)
Corp_Blogs <- Corp_Blogs[,colSums(Corp_Blogs) != 0]
Corp_News <- Corp_News[,colSums(Corp_News) != 0]
Corp_Tweets <- Corp_Tweets[,colSums(Corp_Tweets) != 0]
dim(Corp_Blogs)
dcos <- cosine(Corp_Blogs)
dcos <- as.dist(1-dcos)
hc <- hclust(dcos, "ward.D2")
## http://artax.karlin.mff.cuni.cz/r-help/library/NbClust/html/NbClust.html
res <- NbClust(t(Corp_Blogs), dcos, distance = NULL, max.nc = 100, method = "ward.D", index = "kl")
res$Best.nc
## https://rpubs.com/gaston/dendrograms
hc_Blogs <- cutree(hc, k = res$Best.nc[1])
hc_Blogs_df <- as.data.frame(hc_Blogs)
## Cluster size
table(hc_Blogs_df)
## Cluster Dendrogram colour
dend <- as.dendrogram(hc)
d2=color_branches(dend,k=res$Best.nc[1]) # auto-coloring 5 clusters of branches.
plot(d2)
## Look into each cluster
c <- 3
cluster <- subset(hc_Blogs_df,hc_Blogs_df$hc_Blogs == c)
Words <- as.data.frame(unlist(Token_Blogs_nsw[as.integer(row.names(cluster))]))
colnames(Words) <- "Words"
Freq <- sqldf("SELECT Words W, COUNT(Words) Words FROM Words GROUP BY Words")
Freq_bar <- Freq[Freq$Words > nrow(cluster)*0.1,]
ggplot(Freq_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle(paste("Frequency chart Cluster",c,"(Size[",nrow(cluster),"])",sep = " ")) + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
|
/Code.R
|
no_license
|
jpmoraga/DataScienceCapstone
|
R
| false
| false
| 30,756
|
r
|
## Data Science Capstone
library(NLP)
library(tm)
library(qdapRegex)
library(ggplot2)
library(plyr)
library(RColorBrewer)
library(wordcloud)
library(sqldf)
library(akmeans)
library(NbClust)
library(lsa)
library(ape)
library(arules)
library(fpc)
library(gridExtra)
library(dendextend)
##library(gridExtra)
## Mac
setwd("/Users/jpmoraga/Desktop/Data Science Capstone")
## Windows
## setwd("C:/Users/jmoraga/Desktop/respaldo jpmoraga/Data Science Certification/Data Science Capstone")
source("Functions.R")
## Mac
setwd("/Users/jpmoraga/Desktop/Data Science Capstone/Data")
## Windows
## setwd("C:/Users/jmoraga/Desktop/respaldo jpmoraga/Data Science Certification/Data Science Capstone/Data")
# filepath <- "https://d396qusza40orc.cloudfront.net/dsscapstone/dataset/Coursera-SwiftKey.zip"
#
# download.file(filepath,paste(getwd(),"Coursera-SwiftKey.zip",sep = "/"))
#
# unzip("Coursera-SwiftKey.zip")
##setwd("de_DE")
############################################# Quiz 1 ###################################################
setwd(paste(getwd(),"Final/en_US",sep = "/"))
rbind(file.info(list.files()[1]),file.info(list.files()[2]),file.info(list.files()[3]))
con1 <- file(list.files()[1], "r")
con2 <- file(list.files()[2], "r")
con3 <- file(list.files()[3], "r")
en_Blogs <- readLines(con1, encoding = "UTF-8")
en_News <- readLines(con2, encoding = "UTF-8")
en_Tweets <- readLines(con3, encoding = "UTF-8")
close(con1)
close(con2)
close(con3)
df <- data.frame(
list(Blogs = c(Size_bytes = object.size(en_Blogs),Lines = length(en_Blogs)),
News = c(Size_bytes = object.size(en_News),Lines = length(en_News)),
Tweets = c(Size_bytes = object.size(en_Tweets),Lines = length(en_Tweets)))
)
## Files size chart
bs <- barplot(c(Blogs=df$Blogs[1],News=df$News[1],Tweets=df$Tweets[1]), xlab = "Size (bytes)", main = "Files size", axes = FALSE)
text(x = bs, y = c(df$Blogs[1],df$News[1],df$Tweets[1]), labels = c(df$Blogs[1],df$News[1],df$Tweets[1]), pos = 1)
## Files lines chart
bl <- barplot(c(df$Blogs[2],df$News[2],df$Tweets[2]), xlab = "Lines", main = "Lines", axes = FALSE, ylim = c(0,3000000))
text(x = bl, y = c(df$Blogs[2],df$News[2],df$Tweets[2]), labels = c(df$Blogs[2],df$News[2],df$Tweets[2]), pos = 3)
head(sort(nchar(en_Blogs), decreasing = TRUE),1)
head(sort(nchar(en_News), decreasing = TRUE),1)
head(sort(nchar(en_Tweets), decreasing = TRUE),1)
sample_size <- 30000
set.seed(1)
sample_Blogs <- sample(en_Blogs, sample_size)
sample_News <- sample(en_News, sample_size)
sample_Tweets <- sample(en_Tweets, sample_size)
# # Create clean-up functions
# cleanText = function(x){
# # This simple function does not cover ambiguities such as 's or 'd
# x <- gsub("let's","let us",x)
# x <- gsub("I'm","I am",x)
# x <- gsub("'re", " are",x)
# x <- gsub("n't", " not",x)
# x <- gsub("'ll", " will",x)
# x <- gsub("'ve"," have",x)
# x <- gsub("’|“|â€", "", x)
# x <- gsub("[^a-zA-Z ]", "", x)
# return(x)
# }
## Clean text
sample_Blogs <- sapply(sample_Blogs, cleanText)
sample_News <- sapply(sample_News, cleanText)
sample_Tweets <- sapply(sample_Tweets, cleanText)
## Remove emoticons
sample_Blogs <- rm_emoticon(sample_Blogs, trim = TRUE ,clean = TRUE)
sample_News <- rm_emoticon(sample_News, trim = TRUE ,clean = TRUE)
sample_Tweets <- rm_emoticon(sample_Tweets, trim = TRUE ,clean = TRUE)
## Remove lines with length = 0
sample_Blogs <- sample_Blogs[nchar(sample_Blogs) != 0]
sample_News <- sample_News[nchar(sample_News) != 0]
sample_Tweets <- sample_Tweets[nchar(sample_Tweets) != 0]
## tolower
sample_Blogs <- tolower(sample_Blogs)
sample_News <- tolower(sample_News)
sample_Tweets <- tolower(sample_Tweets)
word_Blogs <- sapply(sample_Blogs, wordpunct_tokenizer)
word_News <- sapply(sample_News, wordpunct_tokenizer)
word_Tweets <- sapply(sample_Tweets, wordpunct_tokenizer)
########################## Analysis including stop words #############################
## Tokenization
Token_Blogs <- NULL
for(i in 1:length(sample_Blogs))
{
s <- sample_Blogs[i]
w <- word_Blogs[[i]]
s <- String(s)
s <- s[w]
Token_Blogs[[length(Token_Blogs)+1]] <- s
i = i + 1
}
Token_News <- NULL
for(i in 1:length(sample_News))
{
s <- sample_News[i]
w <- word_News[[i]]
s <- String(s)
s <- s[w]
Token_News[[length(Token_News)+1]] <- s
i = i + 1
}
Token_Tweets <- NULL
for(i in 1:length(sample_Tweets))
{
s <- sample_Tweets[i]
w <- word_Tweets[[i]]
s <- String(s)
s <- s[w]
Token_Tweets[[length(Token_Tweets)+1]] <- s
i = i + 1
}
############### Frequencies analysis including stopwords #################
## 1-gram
## Blogs
All_Blogs <- as.data.frame(unlist(Token_Blogs))
colnames(All_Blogs) <- "Words"
Freq_Blogs <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Blogs_bar <- Freq_Blogs[head(order(-Freq_Blogs$Words), 20),]
ggplot(Freq_Blogs_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart (Blogs)") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs$W, Freq_Blogs$Words, min.freq = 2000, max.words = 50, random.order=FALSE)
## News
All_News <- as.data.frame(unlist(Token_News))
colnames(All_News) <- "Words"
Freq_News <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_News_bar <- Freq_News[head(order(-Freq_News$Words), 20),]
ggplot(Freq_News_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News$W, Freq_News$Words, min.freq = 1000, max.words = 50, random.order=FALSE)
## Tweets
All_Tweets <- as.data.frame(unlist(Token_Tweets))
colnames(All_Tweets) <- "Words"
Freq_Tweets <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Tweets_bar <- Freq_Tweets[head(order(-Freq_Tweets$Words), 20),]
ggplot(Freq_Tweets_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets$W, Freq_Tweets$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## All together
All_Words <- rbind(All_Blogs, All_News, All_Tweets)
colnames(All_Words) <- "Words"
Freq_Words <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Words_bar <- Freq_Words[head(order(-Freq_Words$Words), 20),]
ggplot(Freq_Words_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words$W, Freq_Words$Words, min.freq = 2000, max.words = 50, random.order=FALSE)
## 2-gram
Token_Blogs2 <- Token_Blogs[lengths(Token_Blogs, use.names = TRUE) > 1]
Token_News2 <- Token_News[lengths(Token_News, use.names = TRUE) > 1]
Token_Tweets2 <- Token_Tweets[lengths(Token_Tweets, use.names = TRUE) > 1]
## Blogs 2
ng2_Blogs <- NULL
for (j in 1:length(Token_Blogs2))
{
ng2 <- ngrams(unlist(Token_Blogs2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_Blogs <- rbind(ng2_Blogs,n2)
print(c(j,length(Token_Blogs2),j*100/length(Token_Blogs2), "2-blogs"))
}
All_Blogs2 <- as.data.frame(ng2_Blogs, row.names = seq(1:nrow(ng2_Blogs)))
colnames(All_Blogs2) <- "Words"
remove(ng2_Blogs)
Freq_Blogs2 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs2 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Blogs_bar2 <- Freq_Blogs2[head(order(-Freq_Blogs2$Words), 20),]
ggplot(Freq_Blogs_bar2, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs2$W, Freq_Blogs2$Words, min.freq = 50, max.words = 30, random.order=FALSE)
## News 2
ng2_News <- NULL
for (j in 1:length(Token_News2))
{
ng2 <- ngrams(unlist(Token_News2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_News <- rbind(ng2_News,n2)
print(c(j,length(Token_News2),j*100/length(Token_News2), "2-news"))
}
All_News2 <- as.data.frame(ng2_News, row.names = seq(1:nrow(ng2_News)))
colnames(All_News2) <- "Words"
remove(ng2_News)
Freq_News2 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News2 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_News_bar2 <- Freq_News2[head(order(-Freq_News2$Words), 20),]
ggplot(Freq_News_bar2, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News2$W, Freq_News2$Words, min.freq = 50, max.words = 30, random.order=FALSE)
## Tweets 2
ng2_Tweets <- NULL
for (j in 1:length(Token_Tweets2))
{
ng2 <- ngrams(unlist(Token_Tweets2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_Tweets <- rbind(ng2_Tweets,n2)
print(c(j,length(Token_Tweets2),j*100/length(Token_Tweets2), "2-tweets"))
}
All_Tweets2 <- as.data.frame(ng2_Tweets, row.names = seq(1:nrow(ng2_Tweets)))
colnames(All_Tweets2) <- "Words"
remove(ng2_Tweets)
Freq_Tweets2 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets2 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Tweets_bar2 <- Freq_Tweets2[head(order(-Freq_Tweets2$Words), 20),]
ggplot(Freq_Tweets_bar2, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets2$W, Freq_Tweets2$Words, min.freq = 50, max.words = 30, random.order=FALSE)
## All together 2
All_Words2 <- rbind(All_Blogs2, All_News2, All_Tweets2)
colnames(All_Words2) <- "Words"
Freq_Words2 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words2 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Words_bar2 <- Freq_Words2[head(order(-Freq_Words2$Words), 20),]
ggplot(Freq_Words_bar2, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words2$W, Freq_Words2$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## 3-gram
Token_Blogs3 <- Token_Blogs[lengths(Token_Blogs, use.names = TRUE) > 2]
Token_News3 <- Token_News[lengths(Token_News, use.names = TRUE) > 2]
Token_Tweets3 <- Token_Tweets[lengths(Token_Tweets, use.names = TRUE) > 2]
## Blogs 3
ng3_Blogs <- NULL
for (j in 1:length(Token_Blogs3))
{
ng3 <- ngrams(unlist(Token_Blogs3[[j]]), 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_Blogs <- rbind(ng3_Blogs,n2)
print(c(j,length(Token_Blogs3),j*100/length(Token_Blogs3), "3-blogs"))
}
All_Blogs3 <- as.data.frame(ng3_Blogs, row.names = seq(1:nrow(ng3_Blogs)))
colnames(All_Blogs3) <- "Words"
remove(ng3_Blogs)
Freq_Blogs3 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs3 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Blogs_bar3 <- Freq_Blogs3[head(order(-Freq_Blogs3$Words), 20),]
ggplot(Freq_Blogs_bar3, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs3$W, Freq_Blogs3$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## News 3
ng3_News <- NULL
for (j in 1:length(Token_News3))
{
ng3 <- ngrams(Token_News3[[j]], 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_News <- rbind(ng3_News,n2)
print(c(j,length(Token_News3),j*100/length(Token_News3), "3-news"))
}
All_News3 <- as.data.frame(ng3_News, row.names = seq(1:nrow(ng3_News)))
colnames(All_News3) <- "Words"
remove(ng3_News)
Freq_News3 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News3 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_News_bar3 <- Freq_News3[head(order(-Freq_News3$Words), 20),]
ggplot(Freq_News_bar3, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News3$W, Freq_News3$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## Tweets 3
ng3_Tweets <- NULL
for (j in 1:length(Token_Tweets3))
{
ng3 <- ngrams(unlist(Token_Tweets3[[j]]), 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_Tweets <- rbind(ng3_Tweets,n2)
print(c(j,length(Token_Tweets3),j*100/length(Token_Tweets3), "3-tweets"))
}
All_Tweets3 <- as.data.frame(ng3_Tweets, row.names = seq(1:nrow(ng3_Tweets)))
colnames(All_Tweets3) <- "Words"
remove(ng3_Tweets)
Freq_Tweets3 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets3 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Tweets_bar3 <- Freq_Tweets3[head(order(-Freq_Tweets3$Words), 20),]
ggplot(Freq_Tweets_bar3, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets3$W, Freq_Tweets3$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## All together 3
All_Words3 <- rbind(All_Blogs3, All_News3, All_Tweets3)
##colnames(All_Words3) <- "Words"
Freq_Words3 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words3 GROUP BY Words ORDER BY COUNT(Words) DESC")
Freq_Words_bar3 <- Freq_Words3[head(order(-Freq_Words3$Words), 20),]
ggplot(Freq_Words_bar3, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words3$W, Freq_Words3$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## 4-gram
Token_Blogs4 <- Token_Blogs[lengths(Token_Blogs, use.names = TRUE) > 3]
Token_News4 <- Token_News[lengths(Token_News, use.names = TRUE) > 3]
Token_Tweets4 <- Token_Tweets[lengths(Token_Tweets, use.names = TRUE) > 3]
## Blogs 4
ng4_Blogs <- NULL
for (j in 1:length(Token_Blogs4))
{
ng4 <- ngrams(unlist(Token_Blogs4[[j]]), 4L)
n2 <- NULL
for (k in 1:length(ng4))
{
n1 <- NULL
n <- unlist(ng4[k])
n1 <- paste(n[1],n[2],n[3],n[4],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng4_Blogs <- rbind(ng4_Blogs,n2)
print(c(j,length(Token_Blogs4),j*100/length(Token_Blogs4), "4-blogs"))
}
All_Blogs4 <- as.data.frame(ng4_Blogs, row.names = seq(1:nrow(ng4_Blogs)))
colnames(All_Blogs4) <- "Words"
remove(ng4_Blogs)
Freq_Blogs4 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs4 GROUP BY Words ORDER BY COUNT(Words) DESC")
## News 4
ng4_News <- NULL
for (j in 1:length(Token_News4))
{
ng4 <- ngrams(Token_News4[[j]], 4L)
n2 <- NULL
for (k in 1:length(ng4))
{
n1 <- NULL
n <- unlist(ng4[k])
n1 <- paste(n[1],n[2],n[3],n[4],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng4_News <- rbind(ng4_News,n2)
print(c(j,length(Token_News4),j*100/length(Token_News4), "4-news"))
}
All_News4 <- as.data.frame(ng4_News, row.names = seq(1:nrow(ng4_News)))
colnames(All_News4) <- "Words"
remove(ng4_News)
Freq_News4 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News4 GROUP BY Words ORDER BY COUNT(Words) DESC")
## Tweets 4
ng4_Tweets <- NULL
for (j in 1:length(Token_Tweets4))
{
ng4 <- ngrams(unlist(Token_Tweets4[[j]]), 4L)
n2 <- NULL
for (k in 1:length(ng4))
{
n1 <- NULL
n <- unlist(ng4[k])
n1 <- paste(n[1],n[2],n[3],n[4],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng4_Tweets <- rbind(ng4_Tweets,n2)
print(c(j,length(Token_Tweets4),j*100/length(Token_Tweets4), "4-tweets"))
}
All_Tweets4 <- as.data.frame(ng4_Tweets, row.names = seq(1:nrow(ng4_Tweets)))
colnames(All_Tweets4) <- "Words"
remove(ng4_Tweets)
Freq_Tweets4 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets4 GROUP BY Words ORDER BY COUNT(Words) DESC")
## All together 4
All_Words4 <- rbind(All_Blogs4, All_News4, All_Tweets4)
##colnames(All_Words3) <- "Words"
Freq_Words4 <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words4 GROUP BY Words ORDER BY COUNT(Words) DESC")
############### Frequencies analysis without stopwords #################
## Remove stopwords
## Create function
## nsw = function(x){x[x %in% stopwords() == FALSE]}
Token_Blogs_nsw <- lapply(Token_Blogs2,nsw)
Token_News_nsw <- lapply(Token_News2,nsw)
Token_Tweets_nsw <- lapply(Token_Tweets2,nsw)
## 1-gram
## Blogs
All_Blogs_nsw <- as.data.frame(unlist(Token_Blogs_nsw))
colnames(All_Blogs_nsw) <- "Words"
Freq_Blogs_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs_nsw GROUP BY Words")
Freq_Blogs_bar_nsw <- Freq_Blogs_nsw[head(order(-Freq_Blogs_nsw$Words), 20),]
ggplot(Freq_Blogs_bar_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart without stopword") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs_nsw$W, Freq_Blogs_nsw$Words, min.freq = 50, max.words = 100, random.order=FALSE)
## News
All_News_nsw <- as.data.frame(unlist(Token_News_nsw))
colnames(All_News_nsw) <- "Words"
Freq_News_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News_nsw GROUP BY Words")
Freq_News_bar_nsw <- Freq_News_nsw[head(order(-Freq_News_nsw$Words), 20),]
ggplot(Freq_News_bar_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News_nsw$W, Freq_News_nsw$Words, min.freq = 50, max.words = 100, random.order=FALSE)
## Tweets
All_Tweets_nsw <- as.data.frame(unlist(Token_Tweets_nsw))
colnames(All_Tweets_nsw) <- "Words"
Freq_Tweets_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets_nsw GROUP BY Words")
Freq_Tweets_bar_nsw <- Freq_Tweets_nsw[head(order(-Freq_Tweets_nsw$Words), 20),]
ggplot(Freq_Tweets_bar_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets_nsw$W, Freq_Tweets_nsw$Words, min.freq = 50, max.words = 100, random.order=FALSE)
## All together
All_Words_nsw <- rbind(All_Blogs_nsw, All_News_nsw, All_Tweets_nsw)
colnames(All_Words_nsw) <- "Words"
Freq_Words_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words_nsw GROUP BY Words")
Freq_Words_bar_nsw <- Freq_Words_nsw[head(order(-Freq_Words_nsw$Words), 20),]
ggplot(Freq_Words_bar_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle("1-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words_nsw$W, Freq_Words_nsw$Words, min.freq = 50, max.words = 100, random.order=FALSE)
## 2-gram
Token_Blogs_nsw2 <- Token_Blogs_nsw[lengths(Token_Blogs_nsw, use.names = TRUE) > 1]
Token_News_nsw2 <- Token_News_nsw[lengths(Token_News_nsw, use.names = TRUE) > 1]
Token_Tweets_nsw2 <- Token_Tweets_nsw[lengths(Token_Tweets_nsw, use.names = TRUE) > 1]
## Blogs 2
ng2_Blogs_nsw <- NULL
for (j in 1:length(Token_Blogs_nsw2))
##for (j in 1:3)
{
ng2 <- ngrams(unlist(Token_Blogs_nsw2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_Blogs_nsw <- rbind(ng2_Blogs_nsw,n2)
print(c(j,length(Token_Blogs_nsw2),j*100/length(Token_Blogs_nsw2), "2-blogs-nsw"))
}
All_Blogs2_nsw <- as.data.frame(ng2_Blogs_nsw)
colnames(All_Blogs2_nsw) <- "Words"
remove(ng2_Blogs_nsw)
Freq_Blogs2_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs2_nsw GROUP BY Words")
Freq_Blogs_bar2_nsw <- Freq_Blogs2_nsw[head(order(-Freq_Blogs2_nsw$Words), 20),]
ggplot(Freq_Blogs_bar2_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs2_nsw$W, Freq_Blogs2_nsw$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## News 2
ng2_News_nsw <- NULL
for (j in 1:length(Token_News_nsw2))
{
ng2 <- ngrams(unlist(Token_News_nsw2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_News_nsw <- rbind(ng2_News_nsw,n2)
print(c(j,length(Token_News_nsw2),j*100/length(Token_News_nsw2), "2-news-nsw"))
}
All_News2_nsw <- as.data.frame(ng2_News_nsw)
colnames(All_News2_nsw) <- "Words"
remove(ng2_News_nsw)
Freq_News2_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News2_nsw GROUP BY Words")
Freq_News_bar2_nsw <- Freq_News2_nsw[head(order(-Freq_News2_nsw$Words), 20),]
ggplot(Freq_News_bar2_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News2_nsw$W, Freq_News2_nsw$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## Tweets 2
ng2_Tweets_nsw <- NULL
for (j in 1:length(Token_Tweets_nsw2))
{
ng2 <- ngrams(unlist(Token_Tweets_nsw2[[j]]), 2L)
n2 <- NULL
for (k in 1:length(ng2))
{
n1 <- NULL
n <- unlist(ng2[k])
n1 <- paste(n[1],n[2],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng2_Tweets_nsw <- rbind(ng2_Tweets_nsw,n2)
print(c(j,length(Token_Tweets_nsw2),j*100/length(Token_Tweets_nsw2), "2-tweets-nsw"))
}
All_Tweets2_nsw <- as.data.frame(ng2_Tweets_nsw)
colnames(All_Tweets2_nsw) <- "Words"
remove(ng2_Tweets_nsw)
Freq_Tweets2_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets2_nsw GROUP BY Words")
Freq_Tweets_bar2_nsw <- Freq_Tweets2_nsw[head(order(-Freq_Tweets2_nsw$Words), 20),]
ggplot(Freq_Tweets_bar2_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets2_nsw$W, Freq_Tweets2_nsw$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## All together 2
All_Words2_nsw <- rbind(All_Blogs2_nsw, All_News2_nsw, All_Tweets2_nsw)
colnames(All_Words2_nsw) <- "Words"
Freq_Words2_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words2_nsw GROUP BY Words")
Freq_Words_bar2_nsw <- Freq_Words2_nsw[head(order(-Freq_Words2_nsw$Words), 20),]
ggplot(Freq_Words_bar2_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words2") + ylab("Freq") + ggtitle("2-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words2_nsw$W, Freq_Words2_nsw$Words, min.freq = 50, max.words = 50, random.order=FALSE)
## 3-gram
Token_Blogs_nsw3 <- Token_Blogs_nsw[lengths(Token_Blogs_nsw, use.names = TRUE) > 2]
Token_News_nsw3 <- Token_News_nsw[lengths(Token_News_nsw, use.names = TRUE) > 2]
Token_Tweets_nsw3 <- Token_Tweets_nsw[lengths(Token_Tweets_nsw, use.names = TRUE) > 2]
## Blogs 3
ng3_Blogs_nsw <- NULL
for (j in 1:length(Token_Blogs_nsw3))
{
ng3 <- ngrams(Token_Blogs_nsw3[[j]], 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_Blogs_nsw <- rbind(ng3_Blogs_nsw,n2)
print(c(j,length(Token_Blogs_nsw3),j*100/length(Token_Blogs_nsw3), "3-blogs-nsw"))
}
All_Blogs3_nsw <- as.data.frame(ng3_Blogs_nsw)
colnames(All_Blogs3_nsw) <- "Words"
remove(ng3_Blogs_nsw)
Freq_Blogs3_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Blogs3_nsw GROUP BY Words")
Freq_Blogs_bar3_nsw <- Freq_Blogs3_nsw[head(order(-Freq_Blogs3_nsw$Words), 20),]
ggplot(Freq_Blogs_bar3_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Blogs3_nsw$W, Freq_Blogs3_nsw$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## News 3
ng3_News_nsw <- NULL
for (j in 1:length(Token_News_nsw3))
{
ng3 <- ngrams(Token_News_nsw3[[j]], 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_News_nsw <- rbind(ng3_News_nsw,n2)
print(c(j,length(Token_News_nsw3),j*100/length(Token_News_nsw3), "3-news-nsw"))
}
All_News3_nsw <- as.data.frame(ng3_News_nsw)
colnames(All_News3_nsw) <- "Words"
remove(ng3_News_nsw)
Freq_News3_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_News3_nsw GROUP BY Words")
Freq_News_bar3_nsw <- Freq_News3_nsw[head(order(-Freq_News3_nsw$Words), 20),]
ggplot(Freq_News_bar3_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_News3_nsw$W, Freq_News3_nsw$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## Tweets 3
ng3_Tweets_nsw <- NULL
for (j in 1:length(Token_Tweets_nsw3))
{
ng3 <- ngrams(Token_Tweets_nsw3[[j]], 3L)
n2 <- NULL
for (k in 1:length(ng3))
{
n1 <- NULL
n <- unlist(ng3[k])
n1 <- paste(n[1],n[2],n[3],sep = " ")
n2 <- rbind(n2,n1)
k = k + 1
}
ng3_Tweets_nsw <- rbind(ng3_Tweets_nsw,n2)
print(c(j,length(Token_Tweets_nsw3),j*100/length(Token_Tweets_nsw3), "3-tweets-nsw"))
}
All_Tweets3_nsw <- as.data.frame(ng3_Tweets_nsw)
colnames(All_Tweets3_nsw) <- "Words"
remove(ng3_Tweets_nsw)
Freq_Tweets3_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Tweets3_nsw GROUP BY Words")
Freq_Tweets_bar3_nsw <- Freq_Tweets3_nsw[head(order(-Freq_Tweets3_nsw$Words), 20),]
ggplot(Freq_Tweets_bar3_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Tweets3_nsw$W, Freq_Tweets3_nsw$Words, min.freq = 50, max.words = 20, random.order=FALSE)
## All together 3
All_Words3_nsw <- rbind(All_Blogs3_nsw, All_News3_nsw, All_Tweets3_nsw)
colnames(All_Words3_nsw) <- "Words"
Freq_Words3_nsw <- sqldf("SELECT Words W, COUNT(Words) Words FROM All_Words3_nsw GROUP BY Words")
Freq_Words_bar3_nsw <- Freq_Words3_nsw[head(order(-Freq_Words3_nsw$Words), 20),]
ggplot(Freq_Words_bar3_nsw, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words3") + ylab("Freq") + ggtitle("3-gram Frequency chart without stopwords") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
wordcloud(Freq_Words3_nsw$W, Freq_Words3_nsw$Words, min.freq = 50, max.words = 20, random.order=FALSE)
########################## Cluster Analysis #############################
## Without stopwords
# Token_Blogs_nsw1 <- as.vector(Token_Blogs_nsw)
# Token_News_nsw1 <- as.vector(Token_News_nsw)
# Token_Tweets_nsw1 <- as.vector(Token_Tweets_nsw)
Corp_Blogs <- VCorpus(VectorSource(Token_Blogs_nsw))
Corp_News <- VCorpus(VectorSource(Token_News_nsw))
Corp_Tweets <- VCorpus(VectorSource(Token_Tweets_nsw))
Corp_Blogs <- TermDocumentMatrix(Corp_Blogs)
Corp_News <- TermDocumentMatrix(Corp_News)
Corp_Tweets <- TermDocumentMatrix(Corp_Tweets)
sparsity <- 0.98
Corp_Blogs <- removeSparseTerms(Corp_Blogs, sparsity)
Corp_News <- removeSparseTerms(Corp_News, sparsity)
Corp_Tweets <- removeSparseTerms(Corp_Tweets, sparsity)
Corp_Blogs <- as.matrix(Corp_Blogs)
Corp_News <- as.matrix(Corp_News)
Corp_Tweets <- as.matrix(Corp_Tweets)
Corp_Blogs <- Corp_Blogs[,colSums(Corp_Blogs) != 0]
Corp_News <- Corp_News[,colSums(Corp_News) != 0]
Corp_Tweets <- Corp_Tweets[,colSums(Corp_Tweets) != 0]
dim(Corp_Blogs)
dcos <- cosine(Corp_Blogs)
dcos <- as.dist(1-dcos)
hc <- hclust(dcos, "ward.D2")
## http://artax.karlin.mff.cuni.cz/r-help/library/NbClust/html/NbClust.html
res <- NbClust(t(Corp_Blogs), dcos, distance = NULL, max.nc = 100, method = "ward.D", index = "kl")
res$Best.nc
## https://rpubs.com/gaston/dendrograms
hc_Blogs <- cutree(hc, k = res$Best.nc[1])
hc_Blogs_df <- as.data.frame(hc_Blogs)
## Cluster size
table(hc_Blogs_df)
## Cluster Dendrogram colour
dend <- as.dendrogram(hc)
d2=color_branches(dend,k=res$Best.nc[1]) # auto-coloring 5 clusters of branches.
plot(d2)
## Look into each cluster
c <- 3
cluster <- subset(hc_Blogs_df,hc_Blogs_df$hc_Blogs == c)
Words <- as.data.frame(unlist(Token_Blogs_nsw[as.integer(row.names(cluster))]))
colnames(Words) <- "Words"
Freq <- sqldf("SELECT Words W, COUNT(Words) Words FROM Words GROUP BY Words")
Freq_bar <- Freq[Freq$Words > nrow(cluster)*0.1,]
ggplot(Freq_bar, aes(x = reorder(W, -Words), y = Words)) + geom_bar(stat="identity") + xlab("Words") + ylab("Freq") + ggtitle(paste("Frequency chart Cluster",c,"(Size[",nrow(cluster),"])",sep = " ")) + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
|
setwd("D://GitHub//powerfamily//to server//classrate2")
setwd(paste(getwd(),"/breast",sep=""))
# lambda2 in c(1e-4,1e-3,1e-2,1e-1,1,5,10)
load("ans0.5.rda")
(ans.avg_05 = ans.avg)
load("ans1.rda")
(ans.avg_1 = ans.avg)
load("ans2.rda")
(ans.avg_2 = ans.avg)
load("ans5.rda")
(ans.avg_5 = ans.avg)
load("nzo0.5.rda")
(nzo.avg_05 = nzo.avg)
load("nzo1.rda")
(nzo.avg_1 = nzo.avg)
load("nzo2.rda")
(nzo.avg_2 = nzo.avg)
load("nzo5.rda")
(nzo.avg_5 = nzo.avg)
load("time0.5.rda")
(time.avg_05 = time.avg)
load("time1.rda")
(time.avg_1 = time.avg)
load("time2.rda")
(time.avg_2 = time.avg)
load("time5.rda")
(time.avg_5 = time.avg)
i05 = which.min(ans.avg_05)
i1 = which.min(ans.avg_1)
i2 = which.min(ans.avg_2)
i5 = which.min(ans.avg_5)
loc.list = c(i05, i1, i2, i5)
qv.list = c(0.5, 1, 2, 5)
lambda2.list = c(1e-4,1e-3,1e-2,1e-1,1,5,10)[loc.list]
ans.list = c(ans.avg_05[i05], ans.avg_1[i1], ans.avg_2[i2], ans.avg_5[i5])
nzo.list = c(nzo.avg_05[i05], nzo.avg_1[i1], nzo.avg_2[i2], nzo.avg_5[i5])
time.list = c(time.avg_05[i05], time.avg_1[i1], time.avg_2[i2], time.avg_5[i5])
cbind(qv.list, lambda2.list, ans.list, nzo.list, time.list)
|
/to server/classrate2/1breast/T_classification_summary_breast.R
|
no_license
|
boxiang-wang/powerfamilyold
|
R
| false
| false
| 1,139
|
r
|
setwd("D://GitHub//powerfamily//to server//classrate2")
setwd(paste(getwd(),"/breast",sep=""))
# lambda2 in c(1e-4,1e-3,1e-2,1e-1,1,5,10)
load("ans0.5.rda")
(ans.avg_05 = ans.avg)
load("ans1.rda")
(ans.avg_1 = ans.avg)
load("ans2.rda")
(ans.avg_2 = ans.avg)
load("ans5.rda")
(ans.avg_5 = ans.avg)
load("nzo0.5.rda")
(nzo.avg_05 = nzo.avg)
load("nzo1.rda")
(nzo.avg_1 = nzo.avg)
load("nzo2.rda")
(nzo.avg_2 = nzo.avg)
load("nzo5.rda")
(nzo.avg_5 = nzo.avg)
load("time0.5.rda")
(time.avg_05 = time.avg)
load("time1.rda")
(time.avg_1 = time.avg)
load("time2.rda")
(time.avg_2 = time.avg)
load("time5.rda")
(time.avg_5 = time.avg)
i05 = which.min(ans.avg_05)
i1 = which.min(ans.avg_1)
i2 = which.min(ans.avg_2)
i5 = which.min(ans.avg_5)
loc.list = c(i05, i1, i2, i5)
qv.list = c(0.5, 1, 2, 5)
lambda2.list = c(1e-4,1e-3,1e-2,1e-1,1,5,10)[loc.list]
ans.list = c(ans.avg_05[i05], ans.avg_1[i1], ans.avg_2[i2], ans.avg_5[i5])
nzo.list = c(nzo.avg_05[i05], nzo.avg_1[i1], nzo.avg_2[i2], nzo.avg_5[i5])
time.list = c(time.avg_05[i05], time.avg_1[i1], time.avg_2[i2], time.avg_5[i5])
cbind(qv.list, lambda2.list, ans.list, nzo.list, time.list)
|
#'Read the wind data from HKW
#'@description Function to read the wind speed and direction from the HKZ lidar. Data can be
#'downloaded from \url{https://www.windopzee.net/meet-locaties/hollandse-kust-zuid-hkz/data/}.
#'@param dir directory of the files (allowed to be stored in subfolders).
#'@param h height of the measurement (63m,91m,116m,141m,166m,191m,216m,241m,266m,291m).
#'@param what choose between Speed and Direction. Wind speed as sqrt(u^2+v^2).
#'@importFrom rlang .data
#'@author Marieke Dirksen
#'@export
read_K13A<-function(dir="D:/data/Lidar/K13/",h=60,what="Speed"){
# I<-df[,grep("*_Altitude$*", colnames(df))]
# heights<-subset(df,select=I)
# heights<-gather(heights,"h","val")
h_opt<-c(63,91,116,141,166,191,216,241,266,291)
# names(df_height)<-c("nr","height")
h_exists<-h %in% h_opt
if(h_exists==FALSE){
message("Height level not included returning FALSE, try 63,91,116,141,166,191,216,241,266 or 291")
return(FALSE)
}
hkz<-list.files(dir,pattern="*K13*",
recursive = TRUE,full.names = TRUE)
df<-do.call("rbind",lapply(hkz,function(x){data.table::fread(x,skip=1,header=TRUE)}))
df<-df[-1,]
t.vec<-df$`Timestamp(UTC)`
t.vec<-gsub("[A-Z]"," ",t.vec)
t.vec<-as.POSIXct(t.vec,format="%Y-%m-%d %H:%M:%S ") #
if(what=="Speed"){
clmn.I<-df[,grep("*WsHor_avg$*", colnames(df))]
u<-subset(df,select=clmn.I) #.data$
heights<-gsub("K13A_H","",names(u))
heights<-as.numeric(gsub("[^0-9.-]", "", heights))
I.h<-which(heights==h)
if(length(I.h)==1){
hg<-subset(u,select=I.h)
df_h<-cbind(t.vec,hg)
df_h$h<-h
names(df_h)<-c("time","u","h")
df_h$u<-as.numeric(df_h$u)
}
if(length(I.h)==2){
message("Two columns with this height level, omitting the non-configurable reference height")
I.ref<-grep(".*refh.*",colnames(u)[I.h])
hg<-subset(u,select=I.h)
hg<-subset(hg,select=-I.ref)
df_h<-cbind(t.vec,hg)
df_h$h<-h
names(df_h)<-c("time","u","h")
}
df_h<-df_h[stats::complete.cases(df_h),]
return(df_h)
}
if(what=="Direction"){
I<-df[,grep("*_Wd$", colnames(df))]
u<-subset(df,select=I) #.data$
heights<-gsub("K13A_H","",names(u))
heights<-as.numeric(gsub("[^0-9.-]", "", heights))
I.h<-which(heights==h)
if(length(I.h)==1){
hg<-subset(u,select=I.h)
df_h<-cbind(t.vec,hg)
df_h$h<-h
names(df_h)<-c("time","dir","h")
df_h$dir<-as.numeric(df_h$dir)
}
if(length(I.h)==2){
message("Two columns with this height level, omitting the non-configurable reference height")
I.ref<-grep(".*refh.*",colnames(u)[I.h])
hg<-subset(u,select=I.h)
hg<-subset(hg,select=-I.ref)
df_h<-cbind(t.vec,hg)
df_h$h<-h
names(df_h)<-c("time","u","h")
}
df_h<-df_h[stats::complete.cases(df_h),]
return(df_h)
}
}
|
/R/read_K13a.R
|
no_license
|
MariekeDirk/WINS50_Lidar
|
R
| false
| false
| 2,890
|
r
|
#'Read the wind data from HKW
#'@description Function to read the wind speed and direction from the HKZ lidar. Data can be
#'downloaded from \url{https://www.windopzee.net/meet-locaties/hollandse-kust-zuid-hkz/data/}.
#'@param dir directory of the files (allowed to be stored in subfolders).
#'@param h height of the measurement (63m,91m,116m,141m,166m,191m,216m,241m,266m,291m).
#'@param what choose between Speed and Direction. Wind speed as sqrt(u^2+v^2).
#'@importFrom rlang .data
#'@author Marieke Dirksen
#'@export
read_K13A<-function(dir="D:/data/Lidar/K13/",h=60,what="Speed"){
# I<-df[,grep("*_Altitude$*", colnames(df))]
# heights<-subset(df,select=I)
# heights<-gather(heights,"h","val")
h_opt<-c(63,91,116,141,166,191,216,241,266,291)
# names(df_height)<-c("nr","height")
h_exists<-h %in% h_opt
if(h_exists==FALSE){
message("Height level not included returning FALSE, try 63,91,116,141,166,191,216,241,266 or 291")
return(FALSE)
}
hkz<-list.files(dir,pattern="*K13*",
recursive = TRUE,full.names = TRUE)
df<-do.call("rbind",lapply(hkz,function(x){data.table::fread(x,skip=1,header=TRUE)}))
df<-df[-1,]
t.vec<-df$`Timestamp(UTC)`
t.vec<-gsub("[A-Z]"," ",t.vec)
t.vec<-as.POSIXct(t.vec,format="%Y-%m-%d %H:%M:%S ") #
if(what=="Speed"){
clmn.I<-df[,grep("*WsHor_avg$*", colnames(df))]
u<-subset(df,select=clmn.I) #.data$
heights<-gsub("K13A_H","",names(u))
heights<-as.numeric(gsub("[^0-9.-]", "", heights))
I.h<-which(heights==h)
if(length(I.h)==1){
hg<-subset(u,select=I.h)
df_h<-cbind(t.vec,hg)
df_h$h<-h
names(df_h)<-c("time","u","h")
df_h$u<-as.numeric(df_h$u)
}
if(length(I.h)==2){
message("Two columns with this height level, omitting the non-configurable reference height")
I.ref<-grep(".*refh.*",colnames(u)[I.h])
hg<-subset(u,select=I.h)
hg<-subset(hg,select=-I.ref)
df_h<-cbind(t.vec,hg)
df_h$h<-h
names(df_h)<-c("time","u","h")
}
df_h<-df_h[stats::complete.cases(df_h),]
return(df_h)
}
if(what=="Direction"){
I<-df[,grep("*_Wd$", colnames(df))]
u<-subset(df,select=I) #.data$
heights<-gsub("K13A_H","",names(u))
heights<-as.numeric(gsub("[^0-9.-]", "", heights))
I.h<-which(heights==h)
if(length(I.h)==1){
hg<-subset(u,select=I.h)
df_h<-cbind(t.vec,hg)
df_h$h<-h
names(df_h)<-c("time","dir","h")
df_h$dir<-as.numeric(df_h$dir)
}
if(length(I.h)==2){
message("Two columns with this height level, omitting the non-configurable reference height")
I.ref<-grep(".*refh.*",colnames(u)[I.h])
hg<-subset(u,select=I.h)
hg<-subset(hg,select=-I.ref)
df_h<-cbind(t.vec,hg)
df_h$h<-h
names(df_h)<-c("time","u","h")
}
df_h<-df_h[stats::complete.cases(df_h),]
return(df_h)
}
}
|
#Valuation Analysis
library(rgdal)
source("data_prep_and_joins.R")
GEF.seq.dta = readOGR("/home/aiddata/Desktop/Github/GEF/Data/GEF_treatment_centroids_carbon_extract.geojson", "OGRGeoJSON")
GEF.noUS.trt <- GEF.noUS[GEF.noUS$treatment == 1,]
GEF.noUS.seq <- merge(GEF.noUS.trt@data, GEF.seq.dta, by="id")
#Carbon sequestration model
if(names(GEF.noUS.seq)[391] == "mean")
{
names(GEF.noUS.seq)[391] <- "CarbonBiomass"
} else {
print("Error!!!!!!!-----------------")
}
#Fit the carbon model, which provides the relationship between M T / HA and our
#Treatment terms (+ controls).
CarbonModel <- lm(CarbonBiomass ~ factor(GEZ_TERM) + latitude.x + longitude.x +
mean_patch_size2010 + lossyr25.na.categorical_2010 +
ltdr_yearly_ndvi_max.2010.mean, data=GEF.noUS.seq)
|
/CarbonSequestration.R
|
no_license
|
DanRunfola/gef-analysis-01
|
R
| false
| false
| 792
|
r
|
#Valuation Analysis
library(rgdal)
source("data_prep_and_joins.R")
GEF.seq.dta = readOGR("/home/aiddata/Desktop/Github/GEF/Data/GEF_treatment_centroids_carbon_extract.geojson", "OGRGeoJSON")
GEF.noUS.trt <- GEF.noUS[GEF.noUS$treatment == 1,]
GEF.noUS.seq <- merge(GEF.noUS.trt@data, GEF.seq.dta, by="id")
#Carbon sequestration model
if(names(GEF.noUS.seq)[391] == "mean")
{
names(GEF.noUS.seq)[391] <- "CarbonBiomass"
} else {
print("Error!!!!!!!-----------------")
}
#Fit the carbon model, which provides the relationship between M T / HA and our
#Treatment terms (+ controls).
CarbonModel <- lm(CarbonBiomass ~ factor(GEZ_TERM) + latitude.x + longitude.x +
mean_patch_size2010 + lossyr25.na.categorical_2010 +
ltdr_yearly_ndvi_max.2010.mean, data=GEF.noUS.seq)
|
setwd("/datascience/projects/statisticallyfit/github/learningprogramming/R/RStats/learneconometrics/CarterHill_PrinciplesOfEconometrics/Chapter9_TimeSeries")
rm(list=ls())
#install.packages("nlts")
#install.packages("spdep")
#library(nlts)
#library(spdep)
library(lmtest)
library(foreign)
library(ggfortify)
# Data from: http://www.principlesofeconometrics.com/poe4/poe4stata.htm
## Part a)
okun <- read.dta("okun.dta")
g <- okun$g; length(g)
u <- okun$u; length(u)
du <- diff(u); length(du)
okunNA_1 <- data.frame(DU=c(NA, du), DU_1=c(NA, NA, du[1:96]),
G=g, G_1=c(NA, g[1:97]))
head(okunNA_1); tail(okunNA_1)
okun.lm <- lm(data=okunNA_1, DU ~ DU_1 + G + G_1)
summary(okun.lm)
## Part b) correlogram of residuals of okun.lm
# TODO: how many lags to calculate?
acfs <- acf(okun.lm$residuals, lag.max=100, plot=FALSE); acfs
autoplot(acfs[2:95])
## Part c) LM test of residual autocorrelation
e <- okun.lm$residuals; e; length(e)
errorsNA_4 <- data.frame(DU_1=c(NA, NA, du[1:96]),
G=g,
G_1=c(NA, g[1:97]),
E=c(NA, NA, e),
E_1=c(NA, NA, NA, e[1:95]),
E_2=c(NA, NA, NA, NA, e[1:94]),
E_3=c(NA, NA, NA, NA, NA, e[1:93]),
E_4=c(NA, NA, NA, NA, NA, NA, e[1:92]))
head(errorsNA_4, 10); tail(errorsNA_4)
# Lag 1
lagrange.1.lm <- lm(data=errorsNA_4, E ~ DU_1 + G + G_1 + E_1)
s <- summary(lagrange.1.lm); s
R2 <- s$r.squared; R2
T <- nrow(okunNA_1); T
LM <- T*R2; LM # TODO: what am I doing wrong?
# Lag 4
lagrange.4.lm <- lm(data=errorsNA_4, E ~ DU_1 + G + G_1 + E_1 + E_2 + E_3 + E_4)
s <- summary(lagrange.4.lm); s
R2 <- s$r.squared; R2
T <- nrow(okunNA_1); T
LM <- T*R2; LM # TODO: what am I doing wrong?
pValues <- data.frame(CHISQ=c(NA), P=c(NA)); pValues
for(n in 1:70){
bg <- bgtest(okun.lm, order=n, type="Chisq")
if(n == 1) {
pValues$CHISQ <- bg$statistic
pValues$P <- bg$p.value
} else {
pValues <- rbind(pValues, c(bg$statistic, bg$p.value))
#pValues <- rbind(pValues, bg$p.value)
}
}
pValues
any(pValues$P <= 0.05)
sum(pValues$P <= 0.05)
which(pValues$P <= 0.05) # so autocorrelation at lags 8,10, 11, 12, 13, 17
pValues$P[which(pValues$P <= 0.05)]
pValues$CHISQ[which(pValues$P <= 0.05)]
head(pValues, 4)
## Part d) reestimate the equation with DU_2 and G_2 added separately, then together
okunNA_2 <- okunNA_1
okunNA_2$DU_2 <- c(NA, NA, NA, du[1:95])
okunNA_2$G_2 <- c(NA, NA, g[1:96])
head(okunNA_2); tail(okunNA_2)
summary(lm(data=okunNA_2, DU ~ DU_1 + DU_2 + G + G_1))
summary(lm(data=okunNA_2, DU ~ DU_1 + G + G_1 + G_2))
summary(lm(data=okunNA_2, DU ~ DU_1 + DU_2 + G + G_1 + G_2))
# No, coefficients of DU_2 and G_2 are not significantly different than zero.
|
/CarterHill_PrinciplesOfEconometrics/Chapter9_StationaryTimeSeries/exercise9.21.R
|
no_license
|
statisticallyfit/REconometrics
|
R
| false
| false
| 2,902
|
r
|
setwd("/datascience/projects/statisticallyfit/github/learningprogramming/R/RStats/learneconometrics/CarterHill_PrinciplesOfEconometrics/Chapter9_TimeSeries")
rm(list=ls())
#install.packages("nlts")
#install.packages("spdep")
#library(nlts)
#library(spdep)
library(lmtest)
library(foreign)
library(ggfortify)
# Data from: http://www.principlesofeconometrics.com/poe4/poe4stata.htm
## Part a)
okun <- read.dta("okun.dta")
g <- okun$g; length(g)
u <- okun$u; length(u)
du <- diff(u); length(du)
okunNA_1 <- data.frame(DU=c(NA, du), DU_1=c(NA, NA, du[1:96]),
G=g, G_1=c(NA, g[1:97]))
head(okunNA_1); tail(okunNA_1)
okun.lm <- lm(data=okunNA_1, DU ~ DU_1 + G + G_1)
summary(okun.lm)
## Part b) correlogram of residuals of okun.lm
# TODO: how many lags to calculate?
acfs <- acf(okun.lm$residuals, lag.max=100, plot=FALSE); acfs
autoplot(acfs[2:95])
## Part c) LM test of residual autocorrelation
e <- okun.lm$residuals; e; length(e)
errorsNA_4 <- data.frame(DU_1=c(NA, NA, du[1:96]),
G=g,
G_1=c(NA, g[1:97]),
E=c(NA, NA, e),
E_1=c(NA, NA, NA, e[1:95]),
E_2=c(NA, NA, NA, NA, e[1:94]),
E_3=c(NA, NA, NA, NA, NA, e[1:93]),
E_4=c(NA, NA, NA, NA, NA, NA, e[1:92]))
head(errorsNA_4, 10); tail(errorsNA_4)
# Lag 1
lagrange.1.lm <- lm(data=errorsNA_4, E ~ DU_1 + G + G_1 + E_1)
s <- summary(lagrange.1.lm); s
R2 <- s$r.squared; R2
T <- nrow(okunNA_1); T
LM <- T*R2; LM # TODO: what am I doing wrong?
# Lag 4
lagrange.4.lm <- lm(data=errorsNA_4, E ~ DU_1 + G + G_1 + E_1 + E_2 + E_3 + E_4)
s <- summary(lagrange.4.lm); s
R2 <- s$r.squared; R2
T <- nrow(okunNA_1); T
LM <- T*R2; LM # TODO: what am I doing wrong?
pValues <- data.frame(CHISQ=c(NA), P=c(NA)); pValues
for(n in 1:70){
bg <- bgtest(okun.lm, order=n, type="Chisq")
if(n == 1) {
pValues$CHISQ <- bg$statistic
pValues$P <- bg$p.value
} else {
pValues <- rbind(pValues, c(bg$statistic, bg$p.value))
#pValues <- rbind(pValues, bg$p.value)
}
}
pValues
any(pValues$P <= 0.05)
sum(pValues$P <= 0.05)
which(pValues$P <= 0.05) # so autocorrelation at lags 8,10, 11, 12, 13, 17
pValues$P[which(pValues$P <= 0.05)]
pValues$CHISQ[which(pValues$P <= 0.05)]
head(pValues, 4)
## Part d) reestimate the equation with DU_2 and G_2 added separately, then together
okunNA_2 <- okunNA_1
okunNA_2$DU_2 <- c(NA, NA, NA, du[1:95])
okunNA_2$G_2 <- c(NA, NA, g[1:96])
head(okunNA_2); tail(okunNA_2)
summary(lm(data=okunNA_2, DU ~ DU_1 + DU_2 + G + G_1))
summary(lm(data=okunNA_2, DU ~ DU_1 + G + G_1 + G_2))
summary(lm(data=okunNA_2, DU ~ DU_1 + DU_2 + G + G_1 + G_2))
# No, coefficients of DU_2 and G_2 are not significantly different than zero.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_predx_colnames.R
\name{get_predx_colnames}
\alias{get_predx_colnames}
\title{Internal function returns the names of columns used for conversion to predx objects}
\usage{
get_predx_colnames(classes)
}
\arguments{
\item{classes}{A vector of predx classes names (e.g. 'Point', 'Binary').}
}
\value{
`predx` tibble
}
\description{
Internal function returns the names of columns used for conversion to predx objects
}
\examples{
get_predx_colnames(c("Binary", "Point", "BinCat"))
}
|
/man/get_predx_colnames.Rd
|
no_license
|
cdcepi/predx
|
R
| false
| true
| 559
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_predx_colnames.R
\name{get_predx_colnames}
\alias{get_predx_colnames}
\title{Internal function returns the names of columns used for conversion to predx objects}
\usage{
get_predx_colnames(classes)
}
\arguments{
\item{classes}{A vector of predx classes names (e.g. 'Point', 'Binary').}
}
\value{
`predx` tibble
}
\description{
Internal function returns the names of columns used for conversion to predx objects
}
\examples{
get_predx_colnames(c("Binary", "Point", "BinCat"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CanonicalFunctions.r
\name{Canonical.Rsq}
\alias{Canonical.Rsq}
\title{Internal function for rdacca.hp() to calculate the Rsquared, adjusted Rsquared, Pseudo-F value, AIC and AICc for canonical analysis (RDA, db-RDA and CCA)}
\usage{
Canonical.Rsq(dv, iv, method = "RDA", n.perm = 1000)
}
\arguments{
\item{dv}{Response variables. if method="dbRDA", dv is the "dist" matrix.}
\item{iv}{Explanatory variables, typically of environmental variables.}
\item{method}{The type of canonical analysis: RDA, dbRDA or CCA, the default is "RDA".}
\item{n.perm}{Number of permutations to use when computing the adjusted R-squared for a CCA.}
}
\description{
Internal function for rdacca.hp() to calculate the Rsquared, adjusted Rsquared, Pseudo-F value, AIC and AICc for canonical analysis (RDA, db-RDA and CCA)
}
\keyword{internal}
|
/man/Canonical.Rsq.Rd
|
no_license
|
laijiangshan/rdacca.hp
|
R
| false
| true
| 902
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CanonicalFunctions.r
\name{Canonical.Rsq}
\alias{Canonical.Rsq}
\title{Internal function for rdacca.hp() to calculate the Rsquared, adjusted Rsquared, Pseudo-F value, AIC and AICc for canonical analysis (RDA, db-RDA and CCA)}
\usage{
Canonical.Rsq(dv, iv, method = "RDA", n.perm = 1000)
}
\arguments{
\item{dv}{Response variables. if method="dbRDA", dv is the "dist" matrix.}
\item{iv}{Explanatory variables, typically of environmental variables.}
\item{method}{The type of canonical analysis: RDA, dbRDA or CCA, the default is "RDA".}
\item{n.perm}{Number of permutations to use when computing the adjusted R-squared for a CCA.}
}
\description{
Internal function for rdacca.hp() to calculate the Rsquared, adjusted Rsquared, Pseudo-F value, AIC and AICc for canonical analysis (RDA, db-RDA and CCA)
}
\keyword{internal}
|
#Step 1
x= read.table("Assignment 6 Dataset.txt", header=TRUE, sep=",")
install.packages("plyr")
library(plyr)
y = ddply(x, "Sex", transform, Grade.Average=mean(Grade))
y
write.table(y, "Sorted_Average")
write.table(y, "Sorted_Average", sep=",")
newx = subset(x, grepl("[iI]", x$Name))
newx
write.table(newx,"DataSubset", sep=",")
|
/module_08.R
|
no_license
|
gfoster4/LIS5937
|
R
| false
| false
| 336
|
r
|
#Step 1
x= read.table("Assignment 6 Dataset.txt", header=TRUE, sep=",")
install.packages("plyr")
library(plyr)
y = ddply(x, "Sex", transform, Grade.Average=mean(Grade))
y
write.table(y, "Sorted_Average")
write.table(y, "Sorted_Average", sep=",")
newx = subset(x, grepl("[iI]", x$Name))
newx
write.table(newx,"DataSubset", sep=",")
|
#****************************************************************************************************************************************************model
sidebarLayout(
sidebarPanel(
tags$head(tags$style("#formula {height: 50px; background: ghostwhite; color: blue;word-wrap: break-word;}")),
tags$head(tags$style("#str {overflow-y:scroll; max-height: 200px; background: lavender}")),
tags$head(tags$style("#fit {overflow-y:scroll; max-height: 500px; background: lavender;color: black;}")),
tags$head(tags$style("#step {overflow-y:scroll;max-height: 500px; background: lavender};")),
h4(tags$b("模型构建")),
p("使用上一个页面(“数据”选项卡)准备数据。"),
hr(),
h4(tags$b("第1步 选择建模使用变量")),
uiOutput('y'),
uiOutput('x'),
#uiOutput('fx'),
radioButtons("intercept", "3. (可选)保留或删除截距/常数项", ##> intercept or not
choices = c("删除截距/常数项" = "-1",
"保留截距/常数项" = ""),
selected = ""),
uiOutput('conf'),
hr(),
h4(tags$b("第2步 检查模型并生成结果")),
tags$b("有效模型示例: Y ~ X1 + X2"),
verbatimTextOutput("formula"),
p("公式中的'-1'表示截距/常数项已被删除"),
hr(),
h4(tags$b("第3步 如果数据和模型准备就绪,单击蓝色按钮生成模型结果。")),
p(br()),
actionButton("B1", (tags$b("显示拟合结果>>")),class="btn btn-primary",icon=icon("bar-chart-o")),
p(br()),
p(br()),
hr()
),
mainPanel(
h4(tags$b("Output 1. 数据确认")),
tabsetPanel(
tabPanel("变量情报", br(),
verbatimTextOutput("str")
),
tabPanel("数据(一部分)", br(),
p("请在“数据”选项卡中编辑修改数据"),
DT::DTOutput("Xdata2")
)
),
hr(),
h4(tags$b("Output 2. 模型的结果")),
#actionButton("B1", h4(tags$b("1:出力2.モデルの結果を表示/更新をクリックしてください")), style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
tabsetPanel(
tabPanel("模型估计", br(),
HTML(
"
<b> 说明 </b>
<ul>
<li> 每个变量的值为:估计系数(95%置信区间),T统计量(t=)和每个变量显著性的P值(P=)。</li>
<li> 对各变量进行T检验,P<0.05,表明该变量对模型有统计学意义。</li>
<li> 观察值表示样本数量</li>
<li> R<sup>2</sup>是线性回归模型的拟合优度度量,表示自变量共同解释的因变量方差的百分比。假设R2=0.49。这一结果暗示对49%的因变量方差已证明,剩下的51%仍未证明。</li>
<li> 经调整的R<sup>2</sup>用于比较包含不同自变量数的回归模型拟合优度。</li>
<li> F统计量(回归中总体显著性的F检验)对多个系数同时进行判断。
F=(R^2/(k-1))/(1-R^2)/(n-k);n为样本量;k为变量+常数项的数量</li>
</ul>
"
),
#verbatimTextOutput("フィット(Fit)")
p(tags$b("结果")),
htmlOutput("fit"),
downloadButton("downloadfit", "保存到CSV中"),
downloadButton("downloadfit.latex", "保存LaTex代码")
),
tabPanel("数据拟合", br(),
DT::DTOutput("fitdt0")
),
tabPanel("ANOVA", br(),
HTML(
"<b> 说明 </b>
<ul>
<li> DF<sub>变量</sub> = 1</li>
<li> DF<sub>残留误差</sub> = [样本值的个数] - [变量数] -1</li>
<li> MS = SS/DF</li>
<li> F = MS<sub>变量</sub> / MS<sub>残留误差</sub> </li>
<li> P值 < 0.05,则变量对于模型是有统计学意义的。</li>
</ul>"
),
p(tags$b("ANOVA表")),
DT::DTOutput("anova")),
tabPanel("AIC变量选择", br(),
HTML(
"<b> 说明 </b>
<ul>
<li> 采用Akaike信息准则(AIC)进行逐步(Stepwise)模型选择。</li>
<li> 模型拟合根据其AIC值秩和,AIC值最低的模型有时被认为是“最佳”模型。</li>
</ul>"
),
p(tags$b("采用Akaike信息准则的模型选择")),
verbatimTextOutput("step"),
downloadButton("downloadsp", "保存TXT文件")
),
tabPanel("诊断图", br(),
HTML(
"<b> 说明 </b>
<ul>
<li> 残差的Q-Q正态图检查残差的正态性。 点的线性表示数据是正态分布的。</li>
<li> 用残差vs拟合图发现异常值</li>
</ul>"
),
p(tags$b("1. 残差的Q-Q正态图")),
plotly::plotlyOutput("p.lm1"),
p(tags$b("2. 残差vs拟合图")),
plotly::plotlyOutput("p.lm2")
),
tabPanel("3D散点图", p(br()),
HTML(
"<b> 说明 </b>
<ul>
<li> 3D散点图显示了因变量(Y)与两个自变量(X1,X2)之间的关系。</li>
<li> 分组变量将点划分为组。</li>
</ul>"
),
uiOutput("vx1"),
uiOutput("vx2"),
uiOutput("vgroup"),
plotly::plotlyOutput("p.3dl")
)
)
)
)
|
/7_1MFSlr_cn/ui_model.R
|
permissive
|
ricciardi/mephas_web
|
R
| false
| false
| 4,601
|
r
|
#****************************************************************************************************************************************************model
sidebarLayout(
sidebarPanel(
tags$head(tags$style("#formula {height: 50px; background: ghostwhite; color: blue;word-wrap: break-word;}")),
tags$head(tags$style("#str {overflow-y:scroll; max-height: 200px; background: lavender}")),
tags$head(tags$style("#fit {overflow-y:scroll; max-height: 500px; background: lavender;color: black;}")),
tags$head(tags$style("#step {overflow-y:scroll;max-height: 500px; background: lavender};")),
h4(tags$b("模型构建")),
p("使用上一个页面(“数据”选项卡)准备数据。"),
hr(),
h4(tags$b("第1步 选择建模使用变量")),
uiOutput('y'),
uiOutput('x'),
#uiOutput('fx'),
radioButtons("intercept", "3. (可选)保留或删除截距/常数项", ##> intercept or not
choices = c("删除截距/常数项" = "-1",
"保留截距/常数项" = ""),
selected = ""),
uiOutput('conf'),
hr(),
h4(tags$b("第2步 检查模型并生成结果")),
tags$b("有效模型示例: Y ~ X1 + X2"),
verbatimTextOutput("formula"),
p("公式中的'-1'表示截距/常数项已被删除"),
hr(),
h4(tags$b("第3步 如果数据和模型准备就绪,单击蓝色按钮生成模型结果。")),
p(br()),
actionButton("B1", (tags$b("显示拟合结果>>")),class="btn btn-primary",icon=icon("bar-chart-o")),
p(br()),
p(br()),
hr()
),
mainPanel(
h4(tags$b("Output 1. 数据确认")),
tabsetPanel(
tabPanel("变量情报", br(),
verbatimTextOutput("str")
),
tabPanel("数据(一部分)", br(),
p("请在“数据”选项卡中编辑修改数据"),
DT::DTOutput("Xdata2")
)
),
hr(),
h4(tags$b("Output 2. 模型的结果")),
#actionButton("B1", h4(tags$b("1:出力2.モデルの結果を表示/更新をクリックしてください")), style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
tabsetPanel(
tabPanel("模型估计", br(),
HTML(
"
<b> 说明 </b>
<ul>
<li> 每个变量的值为:估计系数(95%置信区间),T统计量(t=)和每个变量显著性的P值(P=)。</li>
<li> 对各变量进行T检验,P<0.05,表明该变量对模型有统计学意义。</li>
<li> 观察值表示样本数量</li>
<li> R<sup>2</sup>是线性回归模型的拟合优度度量,表示自变量共同解释的因变量方差的百分比。假设R2=0.49。这一结果暗示对49%的因变量方差已证明,剩下的51%仍未证明。</li>
<li> 经调整的R<sup>2</sup>用于比较包含不同自变量数的回归模型拟合优度。</li>
<li> F统计量(回归中总体显著性的F检验)对多个系数同时进行判断。
F=(R^2/(k-1))/(1-R^2)/(n-k);n为样本量;k为变量+常数项的数量</li>
</ul>
"
),
#verbatimTextOutput("フィット(Fit)")
p(tags$b("结果")),
htmlOutput("fit"),
downloadButton("downloadfit", "保存到CSV中"),
downloadButton("downloadfit.latex", "保存LaTex代码")
),
tabPanel("数据拟合", br(),
DT::DTOutput("fitdt0")
),
tabPanel("ANOVA", br(),
HTML(
"<b> 说明 </b>
<ul>
<li> DF<sub>变量</sub> = 1</li>
<li> DF<sub>残留误差</sub> = [样本值的个数] - [变量数] -1</li>
<li> MS = SS/DF</li>
<li> F = MS<sub>变量</sub> / MS<sub>残留误差</sub> </li>
<li> P值 < 0.05,则变量对于模型是有统计学意义的。</li>
</ul>"
),
p(tags$b("ANOVA表")),
DT::DTOutput("anova")),
tabPanel("AIC变量选择", br(),
HTML(
"<b> 说明 </b>
<ul>
<li> 采用Akaike信息准则(AIC)进行逐步(Stepwise)模型选择。</li>
<li> 模型拟合根据其AIC值秩和,AIC值最低的模型有时被认为是“最佳”模型。</li>
</ul>"
),
p(tags$b("采用Akaike信息准则的模型选择")),
verbatimTextOutput("step"),
downloadButton("downloadsp", "保存TXT文件")
),
tabPanel("诊断图", br(),
HTML(
"<b> 说明 </b>
<ul>
<li> 残差的Q-Q正态图检查残差的正态性。 点的线性表示数据是正态分布的。</li>
<li> 用残差vs拟合图发现异常值</li>
</ul>"
),
p(tags$b("1. 残差的Q-Q正态图")),
plotly::plotlyOutput("p.lm1"),
p(tags$b("2. 残差vs拟合图")),
plotly::plotlyOutput("p.lm2")
),
tabPanel("3D散点图", p(br()),
HTML(
"<b> 说明 </b>
<ul>
<li> 3D散点图显示了因变量(Y)与两个自变量(X1,X2)之间的关系。</li>
<li> 分组变量将点划分为组。</li>
</ul>"
),
uiOutput("vx1"),
uiOutput("vx2"),
uiOutput("vgroup"),
plotly::plotlyOutput("p.3dl")
)
)
)
)
|
#makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
singrisone/ProgrammingAssignment2
|
R
| false
| false
| 890
|
r
|
#makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if (!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
# Consistency check.R
# Author: Yunchen Xiao
# This .R file assesses the consistency of the parameter estimates obtained on
# the main reference dataset by calculating the Monte-Carlo errors.
rm(list = ls())
paras.final.ori <- as.matrix(read.table("Round 5 parameters 10000 all 3 ori.txt",
sep = "", header = TRUE))
paras.final.ex1 <- as.matrix(read.table("Round 6 parameters 10000 all 3 ex1.txt",
sep = "", header = TRUE))
paras.final.ex2 <- as.matrix(read.table("Round 5 parameters 10000 all 3 ex2.txt",
sep = "", header = TRUE))
paras.mean.ori <- apply(paras.final.ori, 2 ,mean)
paras.mean.ex1 <- apply(paras.final.ex1, 2, mean)
paras.mean.ex2 <- apply(paras.final.ex2, 2, mean)
paras.mean.mat <- rbind(paras.mean.ori, paras.mean.ex1,
paras.mean.ex2)
# MC errors:
sd(paras.mean.mat[,1])/sqrt(3) # 7.887399e-05
sd(paras.mean.mat[,2])/sqrt(3) # 0.00129537
sd(paras.mean.mat[,3])/sqrt(3) # 0.04156439
sd(paras.mean.mat[,4])/sqrt(3) # 0.0001512242
sd(paras.mean.mat[,5])/sqrt(3) # 0.0002559215
sd(paras.mean.mat[,6])/sqrt(3) # 0.1093293
# MC error percentage:
sd(paras.mean.mat[,1])/0.01*100 # 1.37%
sd(paras.mean.mat[,2])/0.05*100 # 4.49%
sd(paras.mean.mat[,3])/10*100 # 0.72%
sd(paras.mean.mat[,4])/0.01*100 # 2.62%
sd(paras.mean.mat[,5])/0.1*100 # 0.44%
sd(paras.mean.mat[,6])/5*10 # 0.38%
|
/ABC-BCD/Consistency analysis/Consistency check.R
|
permissive
|
ycx12341/Data-Code-Figures-RSOS-rev
|
R
| false
| false
| 1,451
|
r
|
# Consistency check.R
# Author: Yunchen Xiao
# This .R file assesses the consistency of the parameter estimates obtained on
# the main reference dataset by calculating the Monte-Carlo errors.
rm(list = ls())
paras.final.ori <- as.matrix(read.table("Round 5 parameters 10000 all 3 ori.txt",
sep = "", header = TRUE))
paras.final.ex1 <- as.matrix(read.table("Round 6 parameters 10000 all 3 ex1.txt",
sep = "", header = TRUE))
paras.final.ex2 <- as.matrix(read.table("Round 5 parameters 10000 all 3 ex2.txt",
sep = "", header = TRUE))
paras.mean.ori <- apply(paras.final.ori, 2 ,mean)
paras.mean.ex1 <- apply(paras.final.ex1, 2, mean)
paras.mean.ex2 <- apply(paras.final.ex2, 2, mean)
paras.mean.mat <- rbind(paras.mean.ori, paras.mean.ex1,
paras.mean.ex2)
# MC errors:
sd(paras.mean.mat[,1])/sqrt(3) # 7.887399e-05
sd(paras.mean.mat[,2])/sqrt(3) # 0.00129537
sd(paras.mean.mat[,3])/sqrt(3) # 0.04156439
sd(paras.mean.mat[,4])/sqrt(3) # 0.0001512242
sd(paras.mean.mat[,5])/sqrt(3) # 0.0002559215
sd(paras.mean.mat[,6])/sqrt(3) # 0.1093293
# MC error percentage:
sd(paras.mean.mat[,1])/0.01*100 # 1.37%
sd(paras.mean.mat[,2])/0.05*100 # 4.49%
sd(paras.mean.mat[,3])/10*100 # 0.72%
sd(paras.mean.mat[,4])/0.01*100 # 2.62%
sd(paras.mean.mat[,5])/0.1*100 # 0.44%
sd(paras.mean.mat[,6])/5*10 # 0.38%
|
library(SMVar)
### Name: Spleendata
### Title: Spleendata
### Aliases: Spleendata
### Keywords: datasets
### ** Examples
data(Spleendata)
attach(Spleendata)
|
/data/genthat_extracted_code/SMVar/examples/Spleendata.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 164
|
r
|
library(SMVar)
### Name: Spleendata
### Title: Spleendata
### Aliases: Spleendata
### Keywords: datasets
### ** Examples
data(Spleendata)
attach(Spleendata)
|
# |----------------------------------------------------------------------------------|
# | Project: Study of Diabetes in MES13 cells |
# | Script: Analysis of microarray data from a LNCap/SETD7 KD experiment |
# | Scientist: Chao Wang |
# | Author: Davit Sargsyan |
# | Created: 11/15/2017 |
# |----------------------------------------------------------------------------------|
# Header----
# Save consol output to a log file
# sink(file = "tmp/log_lncap_setd7kd_venn_diagram_v1.txt")
require(data.table)
require(ggplot2)
require(VennDiagram)
require(gridExtra)
# Part I: Data----
# Move up one directory
wd <- getwd()
setwd("..")
DATA_HOME <- paste(getwd(),
"data/lncap.setd7kd",
sep = "/")
# Reset working directory
setwd(wd)
getwd()
lData <- dir(DATA_HOME)
lData
dt1 <- fread(paste(DATA_HOME,
"LNCaP vs LNCaP PEITC.csv",
sep = "/"),
skip = 4,
header = TRUE,
colClasses = "character")[, c(1, 3, 6, 9)]
names(dt1)[1:2] <- c("LNCaP",
"LNCaP PEITC")
summary(dt1)
dt1 <- subset(dt1,
`Other ID` != "")
dt1
dt2 <- fread(paste(DATA_HOME,
"Setd7 vs Setd7 PEITC.csv",
sep = "/"),
skip = 5,
header = TRUE,
colClasses = "character")[, c(1, 3, 6, 9)]
names(dt2)[1:2] <- c("Setd7",
"Setd7 PEITC")
dt2 <- subset(dt2,
`Other ID` != "")
dt2
# Get mapping file: Affimetrix ID to gene name----
dt1.map <- fread(paste(DATA_HOME,
"LNCaP vs LNCaP PEITC ID-gene name.csv",
sep = "/"),
header = TRUE,
colClasses = "character")
dt1.map <- unique(dt1.map)
dt1.map <- subset(dt1.map,
Symbol != "")
dt2.map <- fread(paste(DATA_HOME,
"Setd7 vs Setd7 PEITC ID-gene name.csv",
sep = "/"),
header = TRUE,
colClasses = "character")
dt2.map <- unique(dt2.map)
dt2.map <- subset(dt2.map,
Symbol != "")
dt3.map <- fread(paste(DATA_HOME,
"LNCaP vs Setd7 ID-gene name.csv",
sep = "/"),
header = TRUE,
colClasses = "character")
dt3.map <- unique(dt3.map)
dt3.map <- subset(dt3.map,
Symbol != "")
dt.map <- merge(dt1.map,
dt2.map,
by = names(dt1.map),
all = TRUE)
dt.map <- merge(dt.map,
dt3.map,
by = names(dt.map),
all = TRUE)
names(dt.map)[1] <- "Other ID"
dt.map
# Map LNCaP----
# Merge data and mappiing
dt1 <- merge(dt.map,
dt1,
by = "Other ID")
gene1 <- unique(dt1$`Other ID`)
dt2 <- merge(dt.map,
dt2,
by = "Other ID")
gene2 <- unique(dt2$`Other ID`)
# CHECKPOINT----
dtt <- merge(dt1,
dt2,
by = "Other ID")
nrow(dtt) == sum(gene1 %in% gene2)
# Draw Venn diagram----
p1 <- venn.diagram(x = list(LNCaP = gene1,
Setd7 = gene2),
filename = NULL,
fill = c("light blue", "grey"),
alpha = c(0.5, 0.5),
compression = "lzw+p",
main = "Number of Genes Found")
grid.arrange(gTree(children = p1))
png(filename = "tmp/venn_lncap_setd7.png",
height = 5,
width = 5,
units = 'in',
res = 300)
grid.arrange(gTree(children = p1))
graphics.off()
|
/source/lncap_setd7kd_venn_diagram_v1.R
|
no_license
|
KongLabRUSP/lncap.setd7kd
|
R
| false
| false
| 3,816
|
r
|
# |----------------------------------------------------------------------------------|
# | Project: Study of Diabetes in MES13 cells |
# | Script: Analysis of microarray data from a LNCap/SETD7 KD experiment |
# | Scientist: Chao Wang |
# | Author: Davit Sargsyan |
# | Created: 11/15/2017 |
# |----------------------------------------------------------------------------------|
# Header----
# Save consol output to a log file
# sink(file = "tmp/log_lncap_setd7kd_venn_diagram_v1.txt")
require(data.table)
require(ggplot2)
require(VennDiagram)
require(gridExtra)
# Part I: Data----
# Move up one directory
wd <- getwd()
setwd("..")
DATA_HOME <- paste(getwd(),
"data/lncap.setd7kd",
sep = "/")
# Reset working directory
setwd(wd)
getwd()
lData <- dir(DATA_HOME)
lData
dt1 <- fread(paste(DATA_HOME,
"LNCaP vs LNCaP PEITC.csv",
sep = "/"),
skip = 4,
header = TRUE,
colClasses = "character")[, c(1, 3, 6, 9)]
names(dt1)[1:2] <- c("LNCaP",
"LNCaP PEITC")
summary(dt1)
dt1 <- subset(dt1,
`Other ID` != "")
dt1
dt2 <- fread(paste(DATA_HOME,
"Setd7 vs Setd7 PEITC.csv",
sep = "/"),
skip = 5,
header = TRUE,
colClasses = "character")[, c(1, 3, 6, 9)]
names(dt2)[1:2] <- c("Setd7",
"Setd7 PEITC")
dt2 <- subset(dt2,
`Other ID` != "")
dt2
# Get mapping file: Affimetrix ID to gene name----
dt1.map <- fread(paste(DATA_HOME,
"LNCaP vs LNCaP PEITC ID-gene name.csv",
sep = "/"),
header = TRUE,
colClasses = "character")
dt1.map <- unique(dt1.map)
dt1.map <- subset(dt1.map,
Symbol != "")
dt2.map <- fread(paste(DATA_HOME,
"Setd7 vs Setd7 PEITC ID-gene name.csv",
sep = "/"),
header = TRUE,
colClasses = "character")
dt2.map <- unique(dt2.map)
dt2.map <- subset(dt2.map,
Symbol != "")
dt3.map <- fread(paste(DATA_HOME,
"LNCaP vs Setd7 ID-gene name.csv",
sep = "/"),
header = TRUE,
colClasses = "character")
dt3.map <- unique(dt3.map)
dt3.map <- subset(dt3.map,
Symbol != "")
dt.map <- merge(dt1.map,
dt2.map,
by = names(dt1.map),
all = TRUE)
dt.map <- merge(dt.map,
dt3.map,
by = names(dt.map),
all = TRUE)
names(dt.map)[1] <- "Other ID"
dt.map
# Map LNCaP----
# Merge data and mappiing
dt1 <- merge(dt.map,
dt1,
by = "Other ID")
gene1 <- unique(dt1$`Other ID`)
dt2 <- merge(dt.map,
dt2,
by = "Other ID")
gene2 <- unique(dt2$`Other ID`)
# CHECKPOINT----
dtt <- merge(dt1,
dt2,
by = "Other ID")
nrow(dtt) == sum(gene1 %in% gene2)
# Draw Venn diagram----
p1 <- venn.diagram(x = list(LNCaP = gene1,
Setd7 = gene2),
filename = NULL,
fill = c("light blue", "grey"),
alpha = c(0.5, 0.5),
compression = "lzw+p",
main = "Number of Genes Found")
grid.arrange(gTree(children = p1))
png(filename = "tmp/venn_lncap_setd7.png",
height = 5,
width = 5,
units = 'in',
res = 300)
grid.arrange(gTree(children = p1))
graphics.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setup_font.R
\name{setup_font}
\alias{setup_font}
\title{Setup a font to be used in Shiny or Markdown}
\usage{
setup_font(id, output_dir, variants = NULL)
}
\arguments{
\item{id}{Id of the font, correspond to column \code{id} from \code{\link{get_all_fonts}}.}
\item{output_dir}{Output directory where to save font and CSS files. Must be a directory.}
\item{variants}{Variant(s) to download, default is to includes all available ones.}
}
\value{
To directories will be created (if they do not exist): \strong{fonts} and \strong{css}.
}
\description{
Setup a font to be used in Shiny or Markdown
}
\examples{
\donttest{
# In a Shiny app, you can use the www/ directory
# in Markdown, use a subfolder of your Rmd directory
setup_font(
id = "open-sans-condensed",
output_dir = "path/to/www"
)
}
}
|
/man/setup_font.Rd
|
no_license
|
marinamerlo/gfonts
|
R
| false
| true
| 881
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setup_font.R
\name{setup_font}
\alias{setup_font}
\title{Setup a font to be used in Shiny or Markdown}
\usage{
setup_font(id, output_dir, variants = NULL)
}
\arguments{
\item{id}{Id of the font, correspond to column \code{id} from \code{\link{get_all_fonts}}.}
\item{output_dir}{Output directory where to save font and CSS files. Must be a directory.}
\item{variants}{Variant(s) to download, default is to includes all available ones.}
}
\value{
To directories will be created (if they do not exist): \strong{fonts} and \strong{css}.
}
\description{
Setup a font to be used in Shiny or Markdown
}
\examples{
\donttest{
# In a Shiny app, you can use the www/ directory
# in Markdown, use a subfolder of your Rmd directory
setup_font(
id = "open-sans-condensed",
output_dir = "path/to/www"
)
}
}
|
# Boxplots
input <- mtcars[,c('mpg','cyl')]
print(head(input))
png(file = "boxplot.png")
boxplot(mpg ~ cyl, data = mtcars, xlab = "Number of Cylinders",ylab = "Miles Per Gallon", main = "Mileage Data")
dev.off()
# Boxplot with Notch
png(file = "boxplot_with_notch.png")
boxplot(mpg ~ cyl, data = mtcars,
xlab = "Number of Cylinders",
ylab = "Miles Per Gallon",
main = "Mileage Data",
notch = TRUE,
varwidth = TRUE,
col = c("green","yellow","purple"),
names = c("High","Medium","Low")
)
dev.off()
|
/R_WorkSpace/learning/chart/box_plot_demo.R
|
no_license
|
jk983294/Explore
|
R
| false
| false
| 556
|
r
|
# Boxplots
input <- mtcars[,c('mpg','cyl')]
print(head(input))
png(file = "boxplot.png")
boxplot(mpg ~ cyl, data = mtcars, xlab = "Number of Cylinders",ylab = "Miles Per Gallon", main = "Mileage Data")
dev.off()
# Boxplot with Notch
png(file = "boxplot_with_notch.png")
boxplot(mpg ~ cyl, data = mtcars,
xlab = "Number of Cylinders",
ylab = "Miles Per Gallon",
main = "Mileage Data",
notch = TRUE,
varwidth = TRUE,
col = c("green","yellow","purple"),
names = c("High","Medium","Low")
)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/results_vm_sev_ols_mc.mvn_ci.R
\docType{data}
\name{results_vm_sev_ols_mc.mvn_ci}
\alias{results_vm_sev_ols_mc.mvn_ci}
\title{Results: Simple Mediation Model - Vale and Maurelli (1983) - Skewness = 3, Kurtosis = 21 - Complete Data - Monte Carlo Method Confidence Intervals with Ordinary Least Squares Parameter Estimates and Standard Errors}
\format{
A data frame with the following variables
\describe{
\item{taskid}{
Simulation task identification number.
}
\item{n}{
Sample size.
}
\item{simreps}{
Monte Carlo replications.
}
\item{taudot}{
Population slope of path from \code{x} to \code{y} \eqn{\left( \dot{\tau} \right)}
}
\item{beta}{
Population slope of path from \code{m} to \code{y} \eqn{\left( \beta \right)}
}
\item{alpha}{
Population slope of path from \code{x} to \code{m} \eqn{\left( \alpha \right)}
}
\item{alphabeta}{
Population indirect effect of \code{x} on \code{y} through \code{m} \eqn{\left( \alpha \beta \right)}
}
\item{sigma2x}{
Population variance of \code{x} \eqn{\left( \sigma_{x}^{2} \right)}
}
\item{sigma2epsilonm}{
Population error variance of \code{m} \eqn{\left( \sigma_{\varepsilon_{m}}^{2} \right)}
}
\item{sigma2epsilony}{
Population error variance of \code{y} \eqn{\left( \sigma_{\varepsilon_{y}}^{2} \right)}
}
\item{mux}{
Population mean of \code{x} \eqn{\left( \mu_x \right)}.
}
\item{deltam}{
Population intercept of \code{m} \eqn{\left( \delta_m \right)}.
}
\item{deltay}{
Population intercept of \code{y} \eqn{\left( \delta_y \right)}.
}
\item{est}{
Mean of the estimate of the indirect effect \eqn{\left( \hat{\alpha} \hat{\beta} \right)}.
}
\item{se}{
Mean of the estimate of standard error of the indirect effect \eqn{\left( \hat{\alpha} \hat{\beta} \right)}.
}
\item{reps}{
Monte Carlo method of bootstrap replications.
}
\item{ci_0.05}{
Mean of the lower limit confidence interval for the 99.9\% confidence interval.
}
\item{ci_0.5}{
Mean of the lower limit confidence interval for the 99\% confidence interval.
}
\item{ci_2.5}{
Mean of the lower limit confidence interval for the 95\% confidence interval.
}
\item{ci_97.5}{
Mean of the upper limit confidence interval for the 95\% confidence interval.
}
\item{ci_99.5}{
Mean of the upper limit confidence interval for the 99\% confidence interval.
}
\item{ci_99.95}{
Mean of the upper limit confidence interval for the 99.9\% confidence interval.
}
\item{zero_hit_99.9}{
Mean zero hit for the 99.9\% confidence interval.
}
\item{zero_hit_99}{
Mean zero hit for the 99\% confidence interval.
}
\item{zero_hit_95}{
Mean zero hit for the 95\% confidence interval.
}
\item{len_99.9}{
Mean confidence interval length for the 99.9\% confidence interval.
}
\item{len_99}{
Mean confidence interval length for the 99\% confidence interval.
}
\item{len_95}{
Mean confidence interval length for the 95\% confidence interval.
}
\item{shape_99.9}{
Mean confidence interval shape for the 99.9\% confidence interval.
}
\item{shape_99}{
Mean confidence interval shape for the 99\% confidence interval.
}
\item{shape_95}{
Mean confidence interval shape for the 95\% confidence interval.
}
\item{theta_hit_99.9}{
Mean theta hit for the 99.9\% confidence interval.
}
\item{theta_hit_99}{
Mean theta hit for the 99\% confidence interval.
}
\item{theta_hit_95}{
Mean theta hit for the 95\% confidence interval.
}
\item{theta_miss_99.9}{
Mean theta miss for the 99.9\% confidence interval.
}
\item{theta_miss_99}{
Mean theta miss for the 99\% confidence interval.
}
\item{theta_miss_95}{
Mean theta miss for the 95\% confidence interval.
}
\item{theta}{
Population parameter \eqn{\alpha \beta}.
}
\item{power_99.9}{
Mean power for the 99.9\% confidence interval.
}
\item{power_99}{
Mean power for the 99\% confidence interval.
}
\item{power_95}{
Mean power for the 95\% confidence interval.
}
\item{liberal_ll_99.9}{
Lower limit of the liberal criteria for the 99.9\% confidence interval.
}
\item{liberal_ul_99.9}{
Upper limit of the liberal criteria for the 99.9\% confidence interval.
}
\item{moderate_ll_99.9}{
Lower limit of the moderate criteria for the 99.9\% confidence interval.
}
\item{moderate_ul_99.9}{
Upper limit of the moderate criteria for the 99.9\% confidence interval.
}
\item{strict_ll_99.9}{
Lower limit of the strict criteria for the 99.9\% confidence interval.
}
\item{strict_ul_99.9}{
Upper limit of the strict criteria for the 99.9\% confidence interval.
}
\item{liberal_ll_99}{
Lower limit of the liberal criteria for the 99\% confidence interval.
}
\item{liberal_ul_99}{
Upper limit of the liberal criteria for the 99\% confidence interval.
}
\item{moderate_ll_99}{
Lower limit of the moderate criteria for the 99\% confidence interval.
}
\item{moderate_ul_99}{
Upper limit of the moderate criteria for the 99\% confidence interval.
}
\item{strict_ll_99}{
Lower limit of the strict criteria for the 99\% confidence interval.
}
\item{strict_ul_99}{
Upper limit of the strict criteria for the 99\% confidence interval.
}
\item{liberal_ll_95}{
Lower limit of the liberal criteria for the 95\% confidence interval.
}
\item{liberal_ul_95}{
Upper limit of the liberal criteria for the 95\% confidence interval.
}
\item{moderate_ll_95}{
Lower limit of the moderate criteria for the 95\% confidence interval.
}
\item{moderate_ul_95}{
Upper limit of the moderate criteria for the 95\% confidence interval.
}
\item{strict_ll_95}{
Lower limit of the strict criteria for the 95\% confidence interval.
}
\item{strict_ul_95}{
Upper limit of the strict criteria for the 95\% confidence interval.
}
\item{serlin_ll_95}{
Lower limit of the Serlin criteria for the 95\% confidence interval.
}
\item{serlin_ul_95}{
Upper limit of the Serlin criteria for the 95\% confidence interval.
}
\item{liberal_99.9}{
Logical. 1 if miss rate is inside the liberal robustness criteria for 99.9\% confidence interval.
}
\item{liberal_99}{
Logical. 1 if miss rate is inside the liberal robustness criteria for 99\% confidence interval.
}
\item{liberal_95}{
Logical. 1 if miss rate is inside the liberal robustness criteria for 95\% confidence interval.
}
\item{moderate_99.9}{
Logical. 1 if miss rate is inside the moderate robustness criteria for 99.9\% confidence interval.
}
\item{moderate_99}{
Logical. 1 if miss rate is inside the moderate robustness criteria for 99\% confidence interval.
}
\item{moderate_95}{
Logical. 1 if miss rate is inside the moderate robustness criteria for 95\% confidence interval.
}
\item{strict_99.9}{
Logical. 1 if miss rate is inside the strict robustness criteria for 99.9\% confidence interval.
}
\item{strict_99}{
Logical. 1 if miss rate is inside the strict robustness criteria for 99\% confidence interval.
}
\item{strict_95}{
Logical. 1 if miss rate is inside the strict robustness criteria for 95\% confidence interval.
}
\item{serlin_95}{
Logical. 1 if miss rate is inside the Serlin robustness criteria for 95\% confidence interval.
}
\item{missing}{
Type of missingness.
}
\item{std}{
Standardized vs. unstandardize indirect effect.
}
\item{Method}{
Method used. Fit in this case.
}
\item{n_label}{
Sample size labels.
}
\item{alpha_label}{
\eqn{\alpha} labels.
}
\item{beta_label}{
\eqn{\beta} labels.
}
\item{taudot_label}{
\eqn{\dot{\tau}} labels.
}
\item{theta_label}{
\eqn{\theta} labels.
}
}
}
\usage{
results_vm_sev_ols_mc.mvn_ci
}
\description{
Results: Simple Mediation Model - Vale and Maurelli (1983) - Skewness = 3, Kurtosis = 21 - Complete Data - Monte Carlo Method Confidence Intervals with Ordinary Least Squares Parameter Estimates and Standard Errors
}
\details{
The simple mediation model is given by
\deqn{
y_i = \delta_y + \dot{\tau} x_i + \beta m_i + \varepsilon_{y_{i}}
}
\deqn{
m_i = \delta_m + \alpha x_i + \varepsilon_{m_{i}}
}
The parameters for the mean structure are
\deqn{
\boldsymbol{\theta}_{\text{mean structure}} = \left\{ \mu_x, \delta_m, \delta_y \right\} .
}
The parameters for the covariance structure are
\deqn{
\boldsymbol{\theta}_{\text{covariance structure}} = \left\{ \dot{\tau}, \beta, \alpha, \sigma_{x}^{2},
\sigma_{\varepsilon_{m}}^{2}, \sigma_{\varepsilon_{y}}^{2} \right\} .
}
}
\examples{
data(results_vm_sev_ols_mc.mvn_ci, package = "jeksterslabRmedsimple")
head(results_vm_sev_ols_mc.mvn_ci)
str(results_vm_sev_ols_mc.mvn_ci)
}
\seealso{
Other results:
\code{\link{results_beta_fit.ols}},
\code{\link{results_beta_ols_mc.mvn_ci}},
\code{\link{results_exp_fit.ols}},
\code{\link{results_exp_ols_mc.mvn_ci}},
\code{\link{results_mvn_fit.ols}},
\code{\link{results_mvn_fit.sem}},
\code{\link{results_mvn_mar_fit.sem}},
\code{\link{results_mvn_mar_mc.mvn_ci}},
\code{\link{results_mvn_mar_nb.fiml_ci}},
\code{\link{results_mvn_mar_pb.mvn_ci}},
\code{\link{results_mvn_mcar_fit.sem}},
\code{\link{results_mvn_mcar_mc.mvn_ci}},
\code{\link{results_mvn_mcar_nb.fiml_ci}},
\code{\link{results_mvn_mcar_pb.mvn_ci}},
\code{\link{results_mvn_mnar_fit.sem}},
\code{\link{results_mvn_mnar_mc.mvn_ci}},
\code{\link{results_mvn_mnar_nb.fiml_ci}},
\code{\link{results_mvn_nb_ci}},
\code{\link{results_mvn_ols_mc.mvn_ci}},
\code{\link{results_mvn_pb.mvn_ci}},
\code{\link{results_mvn_sem_mc.mvn_ci}},
\code{\link{results_vm_mod_fit.ols}},
\code{\link{results_vm_mod_fit.sem.mlr}},
\code{\link{results_vm_mod_nb_ci}},
\code{\link{results_vm_mod_ols_mc.mvn_ci}},
\code{\link{results_vm_mod_pb.mvn_ci}},
\code{\link{results_vm_mod_sem_mc.mvn_ci}},
\code{\link{results_vm_sev_fit.ols}},
\code{\link{results_vm_sev_fit.sem.mlr}},
\code{\link{results_vm_sev_nb_ci}},
\code{\link{results_vm_sev_pb.mvn_ci}},
\code{\link{results_vm_sev_sem_mc.mvn_ci}}
}
\concept{results}
\keyword{results}
|
/man/results_vm_sev_ols_mc.mvn_ci.Rd
|
permissive
|
jeksterslabds/jeksterslabRmedsimple
|
R
| false
| true
| 9,644
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/results_vm_sev_ols_mc.mvn_ci.R
\docType{data}
\name{results_vm_sev_ols_mc.mvn_ci}
\alias{results_vm_sev_ols_mc.mvn_ci}
\title{Results: Simple Mediation Model - Vale and Maurelli (1983) - Skewness = 3, Kurtosis = 21 - Complete Data - Monte Carlo Method Confidence Intervals with Ordinary Least Squares Parameter Estimates and Standard Errors}
\format{
A data frame with the following variables
\describe{
\item{taskid}{
Simulation task identification number.
}
\item{n}{
Sample size.
}
\item{simreps}{
Monte Carlo replications.
}
\item{taudot}{
Population slope of path from \code{x} to \code{y} \eqn{\left( \dot{\tau} \right)}
}
\item{beta}{
Population slope of path from \code{m} to \code{y} \eqn{\left( \beta \right)}
}
\item{alpha}{
Population slope of path from \code{x} to \code{m} \eqn{\left( \alpha \right)}
}
\item{alphabeta}{
Population indirect effect of \code{x} on \code{y} through \code{m} \eqn{\left( \alpha \beta \right)}
}
\item{sigma2x}{
Population variance of \code{x} \eqn{\left( \sigma_{x}^{2} \right)}
}
\item{sigma2epsilonm}{
Population error variance of \code{m} \eqn{\left( \sigma_{\varepsilon_{m}}^{2} \right)}
}
\item{sigma2epsilony}{
Population error variance of \code{y} \eqn{\left( \sigma_{\varepsilon_{y}}^{2} \right)}
}
\item{mux}{
Population mean of \code{x} \eqn{\left( \mu_x \right)}.
}
\item{deltam}{
Population intercept of \code{m} \eqn{\left( \delta_m \right)}.
}
\item{deltay}{
Population intercept of \code{y} \eqn{\left( \delta_y \right)}.
}
\item{est}{
Mean of the estimate of the indirect effect \eqn{\left( \hat{\alpha} \hat{\beta} \right)}.
}
\item{se}{
Mean of the estimate of standard error of the indirect effect \eqn{\left( \hat{\alpha} \hat{\beta} \right)}.
}
\item{reps}{
Monte Carlo method of bootstrap replications.
}
\item{ci_0.05}{
Mean of the lower limit confidence interval for the 99.9\% confidence interval.
}
\item{ci_0.5}{
Mean of the lower limit confidence interval for the 99\% confidence interval.
}
\item{ci_2.5}{
Mean of the lower limit confidence interval for the 95\% confidence interval.
}
\item{ci_97.5}{
Mean of the upper limit confidence interval for the 95\% confidence interval.
}
\item{ci_99.5}{
Mean of the upper limit confidence interval for the 99\% confidence interval.
}
\item{ci_99.95}{
Mean of the upper limit confidence interval for the 99.9\% confidence interval.
}
\item{zero_hit_99.9}{
Mean zero hit for the 99.9\% confidence interval.
}
\item{zero_hit_99}{
Mean zero hit for the 99\% confidence interval.
}
\item{zero_hit_95}{
Mean zero hit for the 95\% confidence interval.
}
\item{len_99.9}{
Mean confidence interval length for the 99.9\% confidence interval.
}
\item{len_99}{
Mean confidence interval length for the 99\% confidence interval.
}
\item{len_95}{
Mean confidence interval length for the 95\% confidence interval.
}
\item{shape_99.9}{
Mean confidence interval shape for the 99.9\% confidence interval.
}
\item{shape_99}{
Mean confidence interval shape for the 99\% confidence interval.
}
\item{shape_95}{
Mean confidence interval shape for the 95\% confidence interval.
}
\item{theta_hit_99.9}{
Mean theta hit for the 99.9\% confidence interval.
}
\item{theta_hit_99}{
Mean theta hit for the 99\% confidence interval.
}
\item{theta_hit_95}{
Mean theta hit for the 95\% confidence interval.
}
\item{theta_miss_99.9}{
Mean theta miss for the 99.9\% confidence interval.
}
\item{theta_miss_99}{
Mean theta miss for the 99\% confidence interval.
}
\item{theta_miss_95}{
Mean theta miss for the 95\% confidence interval.
}
\item{theta}{
Population parameter \eqn{\alpha \beta}.
}
\item{power_99.9}{
Mean power for the 99.9\% confidence interval.
}
\item{power_99}{
Mean power for the 99\% confidence interval.
}
\item{power_95}{
Mean power for the 95\% confidence interval.
}
\item{liberal_ll_99.9}{
Lower limit of the liberal criteria for the 99.9\% confidence interval.
}
\item{liberal_ul_99.9}{
Upper limit of the liberal criteria for the 99.9\% confidence interval.
}
\item{moderate_ll_99.9}{
Lower limit of the moderate criteria for the 99.9\% confidence interval.
}
\item{moderate_ul_99.9}{
Upper limit of the moderate criteria for the 99.9\% confidence interval.
}
\item{strict_ll_99.9}{
Lower limit of the strict criteria for the 99.9\% confidence interval.
}
\item{strict_ul_99.9}{
Upper limit of the strict criteria for the 99.9\% confidence interval.
}
\item{liberal_ll_99}{
Lower limit of the liberal criteria for the 99\% confidence interval.
}
\item{liberal_ul_99}{
Upper limit of the liberal criteria for the 99\% confidence interval.
}
\item{moderate_ll_99}{
Lower limit of the moderate criteria for the 99\% confidence interval.
}
\item{moderate_ul_99}{
Upper limit of the moderate criteria for the 99\% confidence interval.
}
\item{strict_ll_99}{
Lower limit of the strict criteria for the 99\% confidence interval.
}
\item{strict_ul_99}{
Upper limit of the strict criteria for the 99\% confidence interval.
}
\item{liberal_ll_95}{
Lower limit of the liberal criteria for the 95\% confidence interval.
}
\item{liberal_ul_95}{
Upper limit of the liberal criteria for the 95\% confidence interval.
}
\item{moderate_ll_95}{
Lower limit of the moderate criteria for the 95\% confidence interval.
}
\item{moderate_ul_95}{
Upper limit of the moderate criteria for the 95\% confidence interval.
}
\item{strict_ll_95}{
Lower limit of the strict criteria for the 95\% confidence interval.
}
\item{strict_ul_95}{
Upper limit of the strict criteria for the 95\% confidence interval.
}
\item{serlin_ll_95}{
Lower limit of the Serlin criteria for the 95\% confidence interval.
}
\item{serlin_ul_95}{
Upper limit of the Serlin criteria for the 95\% confidence interval.
}
\item{liberal_99.9}{
Logical. 1 if miss rate is inside the liberal robustness criteria for 99.9\% confidence interval.
}
\item{liberal_99}{
Logical. 1 if miss rate is inside the liberal robustness criteria for 99\% confidence interval.
}
\item{liberal_95}{
Logical. 1 if miss rate is inside the liberal robustness criteria for 95\% confidence interval.
}
\item{moderate_99.9}{
Logical. 1 if miss rate is inside the moderate robustness criteria for 99.9\% confidence interval.
}
\item{moderate_99}{
Logical. 1 if miss rate is inside the moderate robustness criteria for 99\% confidence interval.
}
\item{moderate_95}{
Logical. 1 if miss rate is inside the moderate robustness criteria for 95\% confidence interval.
}
\item{strict_99.9}{
Logical. 1 if miss rate is inside the strict robustness criteria for 99.9\% confidence interval.
}
\item{strict_99}{
Logical. 1 if miss rate is inside the strict robustness criteria for 99\% confidence interval.
}
\item{strict_95}{
Logical. 1 if miss rate is inside the strict robustness criteria for 95\% confidence interval.
}
\item{serlin_95}{
Logical. 1 if miss rate is inside the Serlin robustness criteria for 95\% confidence interval.
}
\item{missing}{
Type of missingness.
}
\item{std}{
Standardized vs. unstandardize indirect effect.
}
\item{Method}{
Method used. Fit in this case.
}
\item{n_label}{
Sample size labels.
}
\item{alpha_label}{
\eqn{\alpha} labels.
}
\item{beta_label}{
\eqn{\beta} labels.
}
\item{taudot_label}{
\eqn{\dot{\tau}} labels.
}
\item{theta_label}{
\eqn{\theta} labels.
}
}
}
\usage{
results_vm_sev_ols_mc.mvn_ci
}
\description{
Results: Simple Mediation Model - Vale and Maurelli (1983) - Skewness = 3, Kurtosis = 21 - Complete Data - Monte Carlo Method Confidence Intervals with Ordinary Least Squares Parameter Estimates and Standard Errors
}
\details{
The simple mediation model is given by
\deqn{
y_i = \delta_y + \dot{\tau} x_i + \beta m_i + \varepsilon_{y_{i}}
}
\deqn{
m_i = \delta_m + \alpha x_i + \varepsilon_{m_{i}}
}
The parameters for the mean structure are
\deqn{
\boldsymbol{\theta}_{\text{mean structure}} = \left\{ \mu_x, \delta_m, \delta_y \right\} .
}
The parameters for the covariance structure are
\deqn{
\boldsymbol{\theta}_{\text{covariance structure}} = \left\{ \dot{\tau}, \beta, \alpha, \sigma_{x}^{2},
\sigma_{\varepsilon_{m}}^{2}, \sigma_{\varepsilon_{y}}^{2} \right\} .
}
}
\examples{
data(results_vm_sev_ols_mc.mvn_ci, package = "jeksterslabRmedsimple")
head(results_vm_sev_ols_mc.mvn_ci)
str(results_vm_sev_ols_mc.mvn_ci)
}
\seealso{
Other results:
\code{\link{results_beta_fit.ols}},
\code{\link{results_beta_ols_mc.mvn_ci}},
\code{\link{results_exp_fit.ols}},
\code{\link{results_exp_ols_mc.mvn_ci}},
\code{\link{results_mvn_fit.ols}},
\code{\link{results_mvn_fit.sem}},
\code{\link{results_mvn_mar_fit.sem}},
\code{\link{results_mvn_mar_mc.mvn_ci}},
\code{\link{results_mvn_mar_nb.fiml_ci}},
\code{\link{results_mvn_mar_pb.mvn_ci}},
\code{\link{results_mvn_mcar_fit.sem}},
\code{\link{results_mvn_mcar_mc.mvn_ci}},
\code{\link{results_mvn_mcar_nb.fiml_ci}},
\code{\link{results_mvn_mcar_pb.mvn_ci}},
\code{\link{results_mvn_mnar_fit.sem}},
\code{\link{results_mvn_mnar_mc.mvn_ci}},
\code{\link{results_mvn_mnar_nb.fiml_ci}},
\code{\link{results_mvn_nb_ci}},
\code{\link{results_mvn_ols_mc.mvn_ci}},
\code{\link{results_mvn_pb.mvn_ci}},
\code{\link{results_mvn_sem_mc.mvn_ci}},
\code{\link{results_vm_mod_fit.ols}},
\code{\link{results_vm_mod_fit.sem.mlr}},
\code{\link{results_vm_mod_nb_ci}},
\code{\link{results_vm_mod_ols_mc.mvn_ci}},
\code{\link{results_vm_mod_pb.mvn_ci}},
\code{\link{results_vm_mod_sem_mc.mvn_ci}},
\code{\link{results_vm_sev_fit.ols}},
\code{\link{results_vm_sev_fit.sem.mlr}},
\code{\link{results_vm_sev_nb_ci}},
\code{\link{results_vm_sev_pb.mvn_ci}},
\code{\link{results_vm_sev_sem_mc.mvn_ci}}
}
\concept{results}
\keyword{results}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/roommate.R
\name{roommate.checkPreferences}
\alias{roommate.checkPreferences}
\title{Check if preference order for a one-sided market is complete}
\usage{
roommate.checkPreferences(pref)
}
\arguments{
\item{pref}{is a matrix with the preference order of each individual in the
market. This argument is only required when \code{utils} is not provided.
If there are \code{n} individuals, then this matrix will be of dimension
\code{n-1} by \code{n}. The \code{i,j}th element refers to \code{j}'s
\code{i}th most favorite partner. Preference orders can either be specified
using R-indexing (starting at 1) or C++ indexing (starting at 0). The
matrix \code{pref} must be of dimension \code{n-1} by \code{n}. Otherwise,
the function will throw an error.}
}
\value{
a matrix with preference orderings with proper C++ indices or NULL if
the preference order is not complete.
}
\description{
Check if preference order for a one-sided market is complete
}
|
/man/roommate.checkPreferences.Rd
|
no_license
|
trinker/matchingR
|
R
| false
| false
| 1,036
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/roommate.R
\name{roommate.checkPreferences}
\alias{roommate.checkPreferences}
\title{Check if preference order for a one-sided market is complete}
\usage{
roommate.checkPreferences(pref)
}
\arguments{
\item{pref}{is a matrix with the preference order of each individual in the
market. This argument is only required when \code{utils} is not provided.
If there are \code{n} individuals, then this matrix will be of dimension
\code{n-1} by \code{n}. The \code{i,j}th element refers to \code{j}'s
\code{i}th most favorite partner. Preference orders can either be specified
using R-indexing (starting at 1) or C++ indexing (starting at 0). The
matrix \code{pref} must be of dimension \code{n-1} by \code{n}. Otherwise,
the function will throw an error.}
}
\value{
a matrix with preference orderings with proper C++ indices or NULL if
the preference order is not complete.
}
\description{
Check if preference order for a one-sided market is complete
}
|
source("data.R")
doPlot4 <- function() {
tbl <- ExData()
png(filename = "plot4.png", width = 480, height = 480, units = "px")
par (mfrow = c(2,2), mar = c(4,4,2,1), oma = c(0,0,2,0))
with(tbl, {
plot(DateTime, Global_active_power, xlab="", ylab="Global Active Power", type="l")
plot(DateTime, Voltage, xlab="datetime", ylab="Voltage", type="l")
cols = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
plot(DateTime, Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(DateTime, Sub_metering_2, type="l", col="red")
lines(DateTime, Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, lwd=1, col=c("black","blue","red"), legend=cols, bty="n")
plot(DateTime, Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="l")
})
dev.off()
}
doPlot4()
|
/plot4.r
|
no_license
|
labbyjr/ExData_Plotting1
|
R
| false
| false
| 922
|
r
|
source("data.R")
doPlot4 <- function() {
tbl <- ExData()
png(filename = "plot4.png", width = 480, height = 480, units = "px")
par (mfrow = c(2,2), mar = c(4,4,2,1), oma = c(0,0,2,0))
with(tbl, {
plot(DateTime, Global_active_power, xlab="", ylab="Global Active Power", type="l")
plot(DateTime, Voltage, xlab="datetime", ylab="Voltage", type="l")
cols = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
plot(DateTime, Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(DateTime, Sub_metering_2, type="l", col="red")
lines(DateTime, Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, lwd=1, col=c("black","blue","red"), legend=cols, bty="n")
plot(DateTime, Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="l")
})
dev.off()
}
doPlot4()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expr-lang.R
\name{lang_standardise}
\alias{lang_standardise}
\title{Standardise a call.}
\usage{
lang_standardise(call = caller_frame())
}
\arguments{
\item{call}{Can be a call, a formula quoting a call in the
right-hand side, or a frame object from which to extract the call
expression. If not supplied, the calling frame is used.}
}
\value{
A tidy quote if \code{.call} is a tidy quote, a call otherwise.
}
\description{
This is essentially equivalent to \code{\link[base:match.call]{base::match.call()}}, but handles
primitive functions more gracefully.
}
\seealso{
\code{\link[=lang_homogenise]{lang_homogenise()}} for a version more suitable to
language analysis.
}
|
/man/lang_standardise.Rd
|
no_license
|
dpastoor/rlang
|
R
| false
| true
| 749
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expr-lang.R
\name{lang_standardise}
\alias{lang_standardise}
\title{Standardise a call.}
\usage{
lang_standardise(call = caller_frame())
}
\arguments{
\item{call}{Can be a call, a formula quoting a call in the
right-hand side, or a frame object from which to extract the call
expression. If not supplied, the calling frame is used.}
}
\value{
A tidy quote if \code{.call} is a tidy quote, a call otherwise.
}
\description{
This is essentially equivalent to \code{\link[base:match.call]{base::match.call()}}, but handles
primitive functions more gracefully.
}
\seealso{
\code{\link[=lang_homogenise]{lang_homogenise()}} for a version more suitable to
language analysis.
}
|
## Lattice Functions
#
# `xyplot`: this is the main function for creating scatterplots
# `bwplot`: box-and-whiskers plots (???boxplots???)
# `histogram`: histograms
# `stripplot`: like a boxplot but with actual points
#- `dotplot`: plot dots on "violin strings"
#- `splom`: scatterplot matrix; like `pairs` in base plotting system
#- `levelplot`, `contourplot`: for plotting "image" data
## Simple Lattice Plot
library(lattice)
library(datasets)
## Simple scatterplot
xyplot(Ozone ~ Wind, data = airquality)
## Simple Lattice Plot
library(datasets)
library(lattice)
## Convert 'Month' to a factor variable
airquality <- transform(airquality, Month = factor(Month))
xyplot(Ozone ~ Wind | Month, data = airquality, layout = c(5, 1))
## Lattice Behavior
p <- xyplot(Ozone ~ Wind, data = airquality) ## Nothing happens!
print(p) ## Plot appears
xyplot(Ozone ~ Wind, data = airquality) ## Auto-printing
## Lattice Panel Functions
set.seed(10)
x <- rnorm(100)
f <- rep(0:1, each = 50)
y <- x + f - f * x+ rnorm(100, sd = 0.5)
f <- factor(f, labels = c("Group 1", "Group 2"))
xyplot(y ~ x | f, layout = c(2, 1)) ## Plot with 2 panels
## Lattice Panel Functions
## Custom panel function
xyplot(y ~ x | f, panel = function(x, y, ...) {
panel.xyplot(x, y, ...) ## First call the default panel function for 'xyplot'
panel.abline(h = median(y), lty = 2) ## Add a horizontal line at the median
})
## Lattice Panel Functions: Regression line
## Custom panel function
xyplot(y ~ x | f, panel = function(x, y, ...) {
panel.xyplot(x, y, ...) ## First call default panel function
panel.lmline(x, y, col = 2) ## Overlay a simple linear regression line
})
## Many Panel Lattice Plot
library(lattice)
env <- readRDS("maacs_env.rds")
env <- transform(env, MxNum = factor(MxNum))
xyplot(log2(airmus) ~ VisitNum | MxNum, data = env, strip = FALSE, pch = 20, xlab = "Visit Number", ylab = expression(Log[2] * " Airborne Mouse Allergen"), main = "Mouse Allergen and Asthma Cohort Study (Baltimore City)")
|
/4. Exploratory Data Analysis/5.R
|
no_license
|
shubhamjanhere/Coursera-Data-Science-Specialization
|
R
| false
| false
| 2,071
|
r
|
## Lattice Functions
#
# `xyplot`: this is the main function for creating scatterplots
# `bwplot`: box-and-whiskers plots (???boxplots???)
# `histogram`: histograms
# `stripplot`: like a boxplot but with actual points
#- `dotplot`: plot dots on "violin strings"
#- `splom`: scatterplot matrix; like `pairs` in base plotting system
#- `levelplot`, `contourplot`: for plotting "image" data
## Simple Lattice Plot
library(lattice)
library(datasets)
## Simple scatterplot
xyplot(Ozone ~ Wind, data = airquality)
## Simple Lattice Plot
library(datasets)
library(lattice)
## Convert 'Month' to a factor variable
airquality <- transform(airquality, Month = factor(Month))
xyplot(Ozone ~ Wind | Month, data = airquality, layout = c(5, 1))
## Lattice Behavior
p <- xyplot(Ozone ~ Wind, data = airquality) ## Nothing happens!
print(p) ## Plot appears
xyplot(Ozone ~ Wind, data = airquality) ## Auto-printing
## Lattice Panel Functions
set.seed(10)
x <- rnorm(100)
f <- rep(0:1, each = 50)
y <- x + f - f * x+ rnorm(100, sd = 0.5)
f <- factor(f, labels = c("Group 1", "Group 2"))
xyplot(y ~ x | f, layout = c(2, 1)) ## Plot with 2 panels
## Lattice Panel Functions
## Custom panel function
xyplot(y ~ x | f, panel = function(x, y, ...) {
panel.xyplot(x, y, ...) ## First call the default panel function for 'xyplot'
panel.abline(h = median(y), lty = 2) ## Add a horizontal line at the median
})
## Lattice Panel Functions: Regression line
## Custom panel function
xyplot(y ~ x | f, panel = function(x, y, ...) {
panel.xyplot(x, y, ...) ## First call default panel function
panel.lmline(x, y, col = 2) ## Overlay a simple linear regression line
})
## Many Panel Lattice Plot
library(lattice)
env <- readRDS("maacs_env.rds")
env <- transform(env, MxNum = factor(MxNum))
xyplot(log2(airmus) ~ VisitNum | MxNum, data = env, strip = FALSE, pch = 20, xlab = "Visit Number", ylab = expression(Log[2] * " Airborne Mouse Allergen"), main = "Mouse Allergen and Asthma Cohort Study (Baltimore City)")
|
#load R package for OMX (may require installing it before)
require(rhdf5)
#set own work directory
setwd("Z:/indiv/carlos/travel time/auto")
#read CSV file and extract 2 vectors for zone number and intrazonal time
intrazonalTTime<-read.csv("intrazonalInternalZones.csv")
origin <-intrazonalTTime$Origin
tt <- intrazonalTTime$Intratime
#list contents of the matrix
listOMX <- function( OMXFileName ) {
#Get the version and shape information
RootAttr <- getRootAttrOMX( OMXFileName )
Version <- RootAttr$VERSION
Shape <- RootAttr$SHAPE
#Use the h5ls function to read the contents of the file
Contents <- h5ls( OMXFileName )
MatrixContents <- Contents[ Contents$group == "/data", ]
LookupContents <- Contents[ Contents$group == "/lookup", ]
#Read the matrix attribute information
Names <- MatrixContents$name
Types <- MatrixContents$dclass
H5File <- H5Fopen( OMXFileName )
H5Group <- H5Gopen( H5File, "data" )
MatAttr <- list()
for( i in 1:length(Names) ) {
Attr <- list(type="matrix")
H5Data <- H5Dopen( H5Group, Names[i] )
if(H5Aexists(H5Data, "NA")) {
H5Attr <- H5Aopen( H5Data, "NA" )
Attr$navalue <- H5Aread( H5Attr )
H5Aclose( H5Attr )
}
if(H5Aexists(H5Data, "Description")) {
H5Attr <- H5Aopen( H5Data, "Description" )
Attr$description <- H5Aread( H5Attr )
H5Aclose( H5Attr )
}
MatAttr[[Names[i]]] <- Attr
H5Dclose( H5Data )
rm( Attr )
}
H5Gclose( H5Group )
H5Fclose( H5File )
MatAttr <- do.call( rbind, lapply( MatAttr, function(x) data.frame(x) ) )
rm( Names, Types )
#Read the lookup attribute information
H5File <- H5Fopen( OMXFileName )
H5Group <- H5Gopen( H5File, "lookup" )
Names <- LookupContents$name
Types <- LookupContents$dclass
LookupAttr <- list()
if(length(Names)>0) {
for( i in 1:length(Names) ) {
Attr <- list(type="lookup")
H5Data <- H5Dopen( H5Group, Names[i] )
if( H5Aexists( H5Data, "DIM" ) ) {
H5Attr <- H5Aopen( H5Data, "DIM" )
Attr$lookupdim <- H5Aread( H5Attr )
H5Aclose( H5Attr )
} else {
Attr$lookupdim <- ""
}
if( H5Aexists( H5Data, "Description" ) ) {
H5Attr <- H5Aopen( H5Data, "Description" )
Attr$description <- H5Aread( H5Attr )
H5Aclose( H5Attr )
} else {
Attr$description <- ""
}
LookupAttr[[Names[i]]] <- Attr
H5Dclose( H5Data )
rm( Attr )
}
H5Gclose( H5Group )
H5Fclose( H5File )
LookupAttr <- do.call( rbind, lapply( LookupAttr, function(x) data.frame(x) ) )
rm( Names, Types )
}
#Combine the results into a list
if(length(MatAttr)>0) {
MatInfo <- cbind( MatrixContents[,c("name","dclass","dim")], MatAttr )
} else {
MatInfo <- MatrixContents[,c("name","dclass","dim")]
}
if(length(LookupAttr)>0) {
LookupInfo <- cbind( LookupContents[,c("name","dclass","dim")], LookupAttr )
} else {
LookupInfo <- LookupContents[,c("name","dclass","dim")]
}
list( OMXVersion=Version, Rows=Shape[1], Columns=Shape[2], Matrices=MatInfo, Lookups=LookupInfo )
}
listOMX("auto_impedances_revised_intrazonal.omx")
#readlookupvector
readLookupOMX <- function( OMXFileName, LookupName ) {
#Identify the item to be read
ItemName <- paste( "lookup", LookupName, sep="/" )
#Read the lookup
Lookup <- h5read( OMXFileName, ItemName )
#Read the name of the dimension the lookup corresponds
H5File <- H5Fopen( OMXFileName )
H5Group <- H5Gopen( H5File, "lookup" )
H5Data <- H5Dopen( H5Group, LookupName )
if( H5Aexists( H5Data, "DIM" ) ) {
H5Attr <- H5Aopen( H5Data, "DIM" )
Dim <- H5Aread( H5Attr )
H5Aclose( H5Attr )
} else {
Dim <- ""
}
H5Dclose( H5Data )
H5Gclose( H5Group )
H5Fclose( H5File )
#Return the lookup and the corresponding dimension
list( Lookup=Lookup, LookupDim=Dim )
}
lookup <-readLookupOMX("auto_impedances_revised_intrazonal.omx","zone_number")
lookupVector<-lookup[[1]]
#check if indexes are coherent with lookup vectors
sum <-0
for (i in 1:length(origin)){
sum <- sum + abs(lookupVector[i]-origin[i])
}
#write cells in a matrix
writeMatrixOMX <- function( OMXFileName, Matrix, MatrixSaveName, RowIndex=NULL, ColIndex=NULL, NaValue=-1 ,
Replace=FALSE, Description="" ) {
#Get names of matrices in the file vc
Contents <- h5ls( OMXFileName )
MatrixNames <- Contents$name[ Contents$group == "/data" ]
MatrixExists <- MatrixSaveName %in% MatrixNames
# Get the matrix dimensions specified in the file
RootAttr <- getRootAttrOMX( OMXFileName )
Shape <- RootAttr$SHAPE
#Check whether there is matrix of that name already in the file
if( MatrixExists & Replace == FALSE ){
stop( paste("A matrix named '", MatrixSaveName, "' already exists. Value of 'Replace' argument must be TRUE in order to overwrite.", sep="") )
}
#Allow indexed writing (if RowIndex and ColIndex are not NULL) only if the matrix already exists
if( !( is.null( RowIndex ) & is.null( ColIndex ) ) ){
if( !MatrixExists ){
stop( "Indexed writing to a matrix only allowed if a full matrix of that name already exists." )
}
}
#If neither dimension will be written to indexes, write the full matrix and add the NA attribute
if( is.null( RowIndex ) & is.null( ColIndex ) ){
#Check conformance of matrix dimensions with OMX file
if( !all( dim( Matrix ) == Shape ) ){
stop( paste( "Matrix dimensions not consistent with", OMXFileName, ":", Shape[1], "Rows,", Shape[2], "Cols" ) )
}
#Transpose matrix and convert NA to designated storage value
Matrix <- t( Matrix )
Matrix[ is.na( Matrix ) ] <- NaValue
#Write matrix to file, set chunking and compression
ItemName <- paste( "data", MatrixSaveName, sep="/" )
h5createDataset(OMXFileName, ItemName, dim(Matrix), chunk=c(nrow(Matrix),1), level=7)
h5write( Matrix, OMXFileName, ItemName)
#Add the NA storage value and matrix descriptions as attributes to the matrix
H5File <- H5Fopen( OMXFileName )
H5Group <- H5Gopen( H5File, "data" )
H5Data <- H5Dopen( H5Group, MatrixSaveName )
h5writeAttribute( NaValue, H5Data, "NA" )
h5writeAttribute( Description, H5Data, "Description" )
#Close everything up before exiting
H5Dclose( H5Data )
H5Gclose( H5Group )
H5Fclose( H5File )
#Otherwise write only to the indexed positions
} else {
if( is.null( RowIndex ) ) RowIndex <- 1:Shape[1]
if( is.null( ColIndex ) ) ColIndex <- 1:Shape[2]
#Check that indexes are within matrix dimension ranges
if( any( RowIndex <= 0 ) | ( max( RowIndex ) > Shape[1] ) ){
stop( "One or more values of 'RowIndex' are outside the index range of the matrix." )
}
if( any( ColIndex <= 0 ) | ( max( ColIndex ) > Shape[2] ) ){
stop( "One or more values of 'ColIndex' are outside the index range of the matrix." )
}
#Check that there are no duplicated indices
if( any( duplicated( RowIndex ) ) ){
stop( "Duplicated index values in 'RowIndex'. Not permitted." )
}
if( any( duplicated( ColIndex ) ) ){
stop( "Duplicated index values in 'ColIndex'. Not permitted." )
}
#Combine the row and column indexes into a list
#Indices are reversed since matrix is stored in transposed form
Indices <- list( RowIndex, ColIndex )
#Transpose matrix and convert NA to designated storage value
Matrix <- t( Matrix )
Matrix[ is.na( Matrix ) ] <- NaValue
# Write the matrix to the indexed positions
ItemName <- paste( "data", MatrixSaveName, sep="/" )
h5write( Matrix, OMXFileName, ItemName, index=Indices )
}
TRUE
}
for(i in 1:length(origin)){
writeMatrixOMX("auto_impedances_revised_intrazonal.omx", tt[i] , "mf452_auto_imp", RowIndex=i, ColIndex=i, NaValue=-1 ,
Replace=TRUE, Description="" )
}
|
/omxManipulation/intrazonalToSkim.R
|
no_license
|
cllorca1/mtoEstimation
|
R
| false
| false
| 7,869
|
r
|
#load R package for OMX (may require installing it before)
require(rhdf5)
#set own work directory
setwd("Z:/indiv/carlos/travel time/auto")
#read CSV file and extract 2 vectors for zone number and intrazonal time
intrazonalTTime<-read.csv("intrazonalInternalZones.csv")
origin <-intrazonalTTime$Origin
tt <- intrazonalTTime$Intratime
#list contents of the matrix
listOMX <- function( OMXFileName ) {
#Get the version and shape information
RootAttr <- getRootAttrOMX( OMXFileName )
Version <- RootAttr$VERSION
Shape <- RootAttr$SHAPE
#Use the h5ls function to read the contents of the file
Contents <- h5ls( OMXFileName )
MatrixContents <- Contents[ Contents$group == "/data", ]
LookupContents <- Contents[ Contents$group == "/lookup", ]
#Read the matrix attribute information
Names <- MatrixContents$name
Types <- MatrixContents$dclass
H5File <- H5Fopen( OMXFileName )
H5Group <- H5Gopen( H5File, "data" )
MatAttr <- list()
for( i in 1:length(Names) ) {
Attr <- list(type="matrix")
H5Data <- H5Dopen( H5Group, Names[i] )
if(H5Aexists(H5Data, "NA")) {
H5Attr <- H5Aopen( H5Data, "NA" )
Attr$navalue <- H5Aread( H5Attr )
H5Aclose( H5Attr )
}
if(H5Aexists(H5Data, "Description")) {
H5Attr <- H5Aopen( H5Data, "Description" )
Attr$description <- H5Aread( H5Attr )
H5Aclose( H5Attr )
}
MatAttr[[Names[i]]] <- Attr
H5Dclose( H5Data )
rm( Attr )
}
H5Gclose( H5Group )
H5Fclose( H5File )
MatAttr <- do.call( rbind, lapply( MatAttr, function(x) data.frame(x) ) )
rm( Names, Types )
#Read the lookup attribute information
H5File <- H5Fopen( OMXFileName )
H5Group <- H5Gopen( H5File, "lookup" )
Names <- LookupContents$name
Types <- LookupContents$dclass
LookupAttr <- list()
if(length(Names)>0) {
for( i in 1:length(Names) ) {
Attr <- list(type="lookup")
H5Data <- H5Dopen( H5Group, Names[i] )
if( H5Aexists( H5Data, "DIM" ) ) {
H5Attr <- H5Aopen( H5Data, "DIM" )
Attr$lookupdim <- H5Aread( H5Attr )
H5Aclose( H5Attr )
} else {
Attr$lookupdim <- ""
}
if( H5Aexists( H5Data, "Description" ) ) {
H5Attr <- H5Aopen( H5Data, "Description" )
Attr$description <- H5Aread( H5Attr )
H5Aclose( H5Attr )
} else {
Attr$description <- ""
}
LookupAttr[[Names[i]]] <- Attr
H5Dclose( H5Data )
rm( Attr )
}
H5Gclose( H5Group )
H5Fclose( H5File )
LookupAttr <- do.call( rbind, lapply( LookupAttr, function(x) data.frame(x) ) )
rm( Names, Types )
}
#Combine the results into a list
if(length(MatAttr)>0) {
MatInfo <- cbind( MatrixContents[,c("name","dclass","dim")], MatAttr )
} else {
MatInfo <- MatrixContents[,c("name","dclass","dim")]
}
if(length(LookupAttr)>0) {
LookupInfo <- cbind( LookupContents[,c("name","dclass","dim")], LookupAttr )
} else {
LookupInfo <- LookupContents[,c("name","dclass","dim")]
}
list( OMXVersion=Version, Rows=Shape[1], Columns=Shape[2], Matrices=MatInfo, Lookups=LookupInfo )
}
listOMX("auto_impedances_revised_intrazonal.omx")
#readlookupvector
readLookupOMX <- function( OMXFileName, LookupName ) {
#Identify the item to be read
ItemName <- paste( "lookup", LookupName, sep="/" )
#Read the lookup
Lookup <- h5read( OMXFileName, ItemName )
#Read the name of the dimension the lookup corresponds
H5File <- H5Fopen( OMXFileName )
H5Group <- H5Gopen( H5File, "lookup" )
H5Data <- H5Dopen( H5Group, LookupName )
if( H5Aexists( H5Data, "DIM" ) ) {
H5Attr <- H5Aopen( H5Data, "DIM" )
Dim <- H5Aread( H5Attr )
H5Aclose( H5Attr )
} else {
Dim <- ""
}
H5Dclose( H5Data )
H5Gclose( H5Group )
H5Fclose( H5File )
#Return the lookup and the corresponding dimension
list( Lookup=Lookup, LookupDim=Dim )
}
lookup <-readLookupOMX("auto_impedances_revised_intrazonal.omx","zone_number")
lookupVector<-lookup[[1]]
#check if indexes are coherent with lookup vectors
sum <-0
for (i in 1:length(origin)){
sum <- sum + abs(lookupVector[i]-origin[i])
}
#write cells in a matrix
writeMatrixOMX <- function( OMXFileName, Matrix, MatrixSaveName, RowIndex=NULL, ColIndex=NULL, NaValue=-1 ,
Replace=FALSE, Description="" ) {
#Get names of matrices in the file vc
Contents <- h5ls( OMXFileName )
MatrixNames <- Contents$name[ Contents$group == "/data" ]
MatrixExists <- MatrixSaveName %in% MatrixNames
# Get the matrix dimensions specified in the file
RootAttr <- getRootAttrOMX( OMXFileName )
Shape <- RootAttr$SHAPE
#Check whether there is matrix of that name already in the file
if( MatrixExists & Replace == FALSE ){
stop( paste("A matrix named '", MatrixSaveName, "' already exists. Value of 'Replace' argument must be TRUE in order to overwrite.", sep="") )
}
#Allow indexed writing (if RowIndex and ColIndex are not NULL) only if the matrix already exists
if( !( is.null( RowIndex ) & is.null( ColIndex ) ) ){
if( !MatrixExists ){
stop( "Indexed writing to a matrix only allowed if a full matrix of that name already exists." )
}
}
#If neither dimension will be written to indexes, write the full matrix and add the NA attribute
if( is.null( RowIndex ) & is.null( ColIndex ) ){
#Check conformance of matrix dimensions with OMX file
if( !all( dim( Matrix ) == Shape ) ){
stop( paste( "Matrix dimensions not consistent with", OMXFileName, ":", Shape[1], "Rows,", Shape[2], "Cols" ) )
}
#Transpose matrix and convert NA to designated storage value
Matrix <- t( Matrix )
Matrix[ is.na( Matrix ) ] <- NaValue
#Write matrix to file, set chunking and compression
ItemName <- paste( "data", MatrixSaveName, sep="/" )
h5createDataset(OMXFileName, ItemName, dim(Matrix), chunk=c(nrow(Matrix),1), level=7)
h5write( Matrix, OMXFileName, ItemName)
#Add the NA storage value and matrix descriptions as attributes to the matrix
H5File <- H5Fopen( OMXFileName )
H5Group <- H5Gopen( H5File, "data" )
H5Data <- H5Dopen( H5Group, MatrixSaveName )
h5writeAttribute( NaValue, H5Data, "NA" )
h5writeAttribute( Description, H5Data, "Description" )
#Close everything up before exiting
H5Dclose( H5Data )
H5Gclose( H5Group )
H5Fclose( H5File )
#Otherwise write only to the indexed positions
} else {
if( is.null( RowIndex ) ) RowIndex <- 1:Shape[1]
if( is.null( ColIndex ) ) ColIndex <- 1:Shape[2]
#Check that indexes are within matrix dimension ranges
if( any( RowIndex <= 0 ) | ( max( RowIndex ) > Shape[1] ) ){
stop( "One or more values of 'RowIndex' are outside the index range of the matrix." )
}
if( any( ColIndex <= 0 ) | ( max( ColIndex ) > Shape[2] ) ){
stop( "One or more values of 'ColIndex' are outside the index range of the matrix." )
}
#Check that there are no duplicated indices
if( any( duplicated( RowIndex ) ) ){
stop( "Duplicated index values in 'RowIndex'. Not permitted." )
}
if( any( duplicated( ColIndex ) ) ){
stop( "Duplicated index values in 'ColIndex'. Not permitted." )
}
#Combine the row and column indexes into a list
#Indices are reversed since matrix is stored in transposed form
Indices <- list( RowIndex, ColIndex )
#Transpose matrix and convert NA to designated storage value
Matrix <- t( Matrix )
Matrix[ is.na( Matrix ) ] <- NaValue
# Write the matrix to the indexed positions
ItemName <- paste( "data", MatrixSaveName, sep="/" )
h5write( Matrix, OMXFileName, ItemName, index=Indices )
}
TRUE
}
for(i in 1:length(origin)){
writeMatrixOMX("auto_impedances_revised_intrazonal.omx", tt[i] , "mf452_auto_imp", RowIndex=i, ColIndex=i, NaValue=-1 ,
Replace=TRUE, Description="" )
}
|
#Utility functions
#lmp
#readBinary
#readMetadata
#loadPackages
#
#
#
#
lmp <- function (modelobject) {
if (class(modelobject) != "lm")
stop("Not an object of class 'lm' ")
f <- summary(modelobject)$fstatistic
p <- pf(f[1], f[2], f[3], lower.tail = F)
attributes(p) <- NULL
return(p)
}
.trak <-
setClass(
Class = "trak",
slots = c(
area = "data.frame",
centroid = "data.frame",
direction = "data.frame",
dropped_frames = "data.frame",
majoraxislength = "data.frame",
minoraxislength = "data.frame",
orientation = "data.frame",
orientation_continuous = "data.frame",
radius = "data.frame",
speed = "data.frame",
speed.regressed = "data.frame",
theta = "data.frame",
time = 'numeric',
weightedcentroid = "data.frame",
activity = "list",
metadata = "data.frame",
hz = "numeric"
),
package = 'RATrak'
)
# Note that this currently only takes speed, centroid, and time. Of course it can be easily modified to accept more.
# Also the function is very cumbersome -- a restructure is probably worthwhile
readInfo <-
function(speedBinFileName = NULL,
centroidBinFileName = NULL,
timeBinFileName = NULL,
metadataFileName,
wellCount,
start = 1,
end = wellCount,
hz = 5,
inferPhenos = F,
size.centroid = NA_integer_) {
#WARNING: The precision for the centroid data is not consistent across autotracker versions. If the centroid coordinates make no sense, try changing size.centroid
#This is passed on as the size argument to readBin()
time <- numeric()
centroid <- data.frame()
speed <- data.frame()
if (!is.null(centroidBinFileName) &
!is.null(timeBinFileName) & !is.null(speedBinFileName)) {
speed <- readBinary(speedBinFileName, wellCount, dataType = 'speed')
centroid <-
readBinary(centroidBinFileName,
wellCount,
dataType = 'centroid',
size.centroid = size.centroid)
time <- readBinary(timeBinFileName, dataType = 'time')
}
else if (is.null(centroidBinFileName) &
!is.null(timeBinFileName) &
!is.null(speedBinFileName)) {
speed <- readBinary(speedBinFileName, wellCount, dataType = 'speed')
time <- readBinary(timeBinFileName, dataType = 'time')
}
else if (!is.null(centroidBinFileName) &
is.null(timeBinFileName) &
!is.null(speedBinFileName)) {
speed <- readBinary(speedBinFileName, wellCount, dataType = 'speed')
centroid <-
readBinary(centroidBinFileName,
wellCount,
dataType = 'centroid',
size.centroid = size.centroid)
}
else if (!is.null(centroidBinFileName) &
!is.null(timeBinFileName) &
is.null(speedBinFileName)) {
print('Only centroid and time data provided. Calculating speed')
centroid <-
readBinary(centroidBinFileName,
wellCount,
dataType = 'centroid',
size.centroid = size.centroid)
time <- readBinary(timeBinFileName, dataType = 'time')
speed <- flies.calculateSpeed(as.matrix(centroid), time)
}
else if (is.null(centroidBinFileName) &
is.null(timeBinFileName) &
!is.null(speedBinFileName)) {
speed <- readBinary(speedBinFileName, wellCount, dataType = 'speed')
}
else if (is.null(centroidBinFileName) &
!is.null(timeBinFileName) &
is.null(speedBinFileName)) {
warning('Only time data provided')
time <- readBinary(timeBinFileName, dataType = 'time')
}
else if (!is.null(centroidBinFileName) &
is.null(timeBinFileName) & is.null(speedBinFileName)) {
print('Only centroid data provided. Calculating speed')
centroid <-
readBinary(centroidBinFileName,
wellCount,
dataType = 'centroid',
size.centroid = size.centroid)
speed <-
flies.calculateSpeed(as.matrix(centroid)) * hz #Rescale speed to pixel/s
}
else
stop('Neither speed, centroid, or time data was provided')
metadata <- readMetadata(metadataFileName, start, end)
data <-
.trak(
speed = speed,
centroid = centroid,
metadata = metadata,
time = time,
hz = hz
)
if (inferPhenos)
data <- flies.activity(data)
return(data)
}
checkIfFile = function(name, base) {
fileLoc = paste0(base, name)
if (file_test("-f", paste0(base, name))) {
return(fileLoc)
}
else{
return(NULL)
}
}
# readInfo setup for importing margo folderData
# https://www.biorxiv.org/content/10.1101/593046v1
readInfo.margo <-
function(rawDataFolder,
metadataFileName = NULL,
wellCount,
start = 1,
end = wellCount,
hz = 5,
startFrame = 4,
inferPhenos = F,
size.centroid = NA_integer_,
featuresToIgnore=c("weightedcentroid","majoraxislength","minoraxislength", "direction",
"orientation","radius","theta","area")) {
#WARNING: The precision for the centroid data is not consistent across autotracker versions
#If the centroid coordinates make no sense, try changing size.centroid
#This is passed on as the size argument to readBin()
files = list.files(rawDataFolder)
features <- c('area', 'centroid', 'direction', 'dropped_frames', 'majoraxislength', 'minoraxislength', 'orientation', 'radius', 'speed', 'theta', 'time', 'weightedcentroid')
#Load features from the files in rawDataFolder
for (name in features) {
if (name %in% featuresToIgnore) { #Ignore this feature
if(name == 'time')
assign(name, numeric())
else
assign(name, data.frame())
}
else if(!any(grepl(pattern = paste0('.*__', name, '.*'), files, ignore.case = T))){ #Feature is not present in rawDataFolder
if(name == 'time')
assign(name, numeric())
else
assign(name, data.frame())
}
else{ #Read feature
fileName <- grep(pattern = paste0('.*__', name, '.*'), files, value = T, ignore.case = T)
message('Loading: ', fileName)
assign(
name,
readBinary.margo(
paste0(rawDataFolder, fileName),
dataType = name,
colCount = wellCount
)
)
}
}
if(!is.null(metadataFileName))
metadata <- readMetadata(metadataFileName, start, end)
else{
metadata <- data.frame()
warning('No metadata provided')
}
orientation <- orientation * pi / 180
#Continuous phase
orientation_continuous <-
as.data.frame(do.call(cbind, mclapply(as.data.frame(orientation, unwrap, mc.cores = 16)))
data <-
.trak(
area = area,
centroid = centroid,
direction = direction,
dropped_frames = dropped_frames,
majoraxislength = majoraxislength,
minoraxislength = minoraxislength,
orientation = orientation,
orientation_continuous = orientation_continuous,
radius = radius,
speed = speed,
speed.regressed = data.frame(),
theta = theta,
time = time,
weightedcentroid = weightedcentroid,
metadata = metadata,
hz = hz
)
if (inferPhenos)
data <- flies.activity(data)
return(data)
}
readBinary <-
function(fileName,
colCount,
dataType,
size.centroid = 4,
size.speed_time = 4,
startFrame = 2) {
file <- file(fileName, "rb")
if (dataType == 'speed') {
mat <-
matrix(
readBin(file, numeric(), n = 1e10, size = size.speed_time),
ncol = colCount,
byrow = TRUE
)
mat <-
mat[startFrame:nrow(mat),] #Discard first few frames if needed
close(file)
mat[is.nan(mat)] = 0
return(as.data.frame(mat))
}
else if (dataType == 'centroid') {
mat.tmp <-
matrix(
readBin(file, numeric(), n = 1e10, size = size.centroid),
ncol = colCount * 2,
byrow = TRUE
)
#Reshape matrix
mat <- matrix(ncol = ncol(mat.tmp), nrow = nrow(mat.tmp))
xCols <- seq(from = 1,
to = ncol(mat.tmp) - 1,
by = 2)
yCols <- seq(from = 2,
to = ncol(mat.tmp),
by = 2)
mat[, xCols] <- mat.tmp[, 1:colCount]
mat[, yCols] <- mat.tmp[, (colCount + 1):(colCount * 2)]
mat <-
mat[startFrame:nrow(mat),] #Shift to correct for margo output
close(file)
mat[is.nan(mat)] = 0
return(as.data.frame(mat))
}
else if (dataType == 'time') {
time <- readBin(file, numeric(), n = 1e10, size = size.speed_time)
time <-
time[startFrame:length(time)]
close(file)
return(time)
}
else
stop(paste('datatype:',
dataType,
'was not recognized.'))
}
#WARNING: The precision for the centroid data has been changed between single and double in different autotracker versions.
readBinary.margo <-
function(fileName,
colCount,
dataType = NULL,
size.centroid = 4,
size.default = 4,
startFrame = 1) {
file <- file(fileName, "rb")
if (dataType == 'centroid' || dataType == "weightedcentroid") {
mat <-
matrix(
readBin(file, numeric(), n = 1e10, size = size.centroid),
ncol = colCount * 2,
byrow = TRUE
)
close(file)
#Note memory issue here because of mat and mat.tmp being available together
#Reshape matrix
xCols <- seq(from = 1,
to = ncol(mat) - 1,
by = 2)
yCols <- seq(from = 2,
to = ncol(mat),
by = 2)
mat <- mat[,order(c(xCols,yCols))]
# mat[, xCols] <- mat.tmp[, 1:colCount]
# mat[, yCols] <- mat.tmp[, (colCount + 1):(colCount * 2)]
mat <- mat[startFrame:nrow(mat),] #Shift to correct for margo output
mat[is.nan(mat)] = 0
# mat <- apply(mat,2,FUN = function(x){
# base <- x[which(!is.nan(x))[1]]
# for (i in 1:length(x)) {
# if (is.nan(x[i])) {
# if (identical(x[i-1], numeric(0))) {
# x[i] = base
# } else{
# x[i] = x[i-1]
# }
# }
# }
# return(x)
# })
return(as.data.frame(mat))
}
else if (dataType == "dropped_frames") {
#Be careful of syncing here -- the rounding may cause issues
# Needs to be fixed
bits = (rawToBits(readBin(
file, raw(), n = 1e10, size = 1
)))
close(file)
mat <-
matrix(bits[1:(DescTools::RoundTo(length(bits), colCount, trunc))], #Round to multiple of well count
ncol = colCount,
byrow = TRUE)
mat = mat==1
# mat <- sapply(as.data.frame(mat), as.logical)
mat = mat[(startFrame):nrow(mat),] #Discard first few frames if needed
return(as.data.frame(mat))
}
else if (dataType == "time") {
time <- readBin(file, numeric(), n = 1e9, size = size.default)
time <- time[startFrame:length(time)]
close(file)
return(time)
}
else{
mat <-
matrix(
readBin(file, numeric(), n = 1e9, size = size.default),
ncol = colCount,
byrow = TRUE
)#Discard first few frames if needed
close(file)
mat[is.nan(mat)] = 0
return(as.data.frame(mat))
}
}
readMetadata <- function(fileName, start = 1, end) {
#Determine field separator
L <- readLines(fileName, n = 1)
if (grepl(";", L))
meta <- read.table(fileName, header = TRUE, sep = ';')
else if (grepl(",", L))
meta <- read.table(fileName, header = TRUE, sep = ',')
else if (grepl("\t", L))
meta <- read.table(fileName, header = TRUE, sep = '\t')
else
stop(paste('Could not determine field separator in', fileName))
colnames(meta) = tolower(colnames(meta))
data = meta[start:end, ]
return(data)
}
#This script provides a quick way to group load packages - it's not needed in the package but could be useful for end users in other applications
loadPackages <- function(names) {
missingPackages <-
names[!(names %in% installed.packages()[, "Package"])]
if (length(missingPackages)) {
install.packages(missingPackages)
}
for (pkg in names) {
library(pkg, character.only = TRUE)
}
}
#
classApply <- function(x, FUN, ...) {
cl <- class(x)
result <- list()
for (i in propertyNames(cl)) {
result[[i]] <- FUN(slot(x, i), ...)
}
result
}
groupMean <- function(x, l) {
rowMeans(as.matrix(x[, l == T]))
}
paste_ <- function(x){
tmp = ""
for (w in x) {
if(tmp != "")
tmp = paste(tmp,w,sep="_")
else
tmp= w
}
return(tmp)
}
dtwDistance_parallel_listed <- function(spgeom1) {
# if second set of lines is not given, calculate pairwise distances within
# first set of lines
if (is.null(spgeom2)) {
# prepare empty distance matrix
n_geoms <- length(spgeom1)
distmat <- foreach(i=1:(n_geoms - 1),.combine='rbind',.packages = c("sp","dtw","foreach","doParallel")) %dopar% {
crds1 <- spgeoms[[i]]
temp <- foreach(j=(i + 1):n_geoms,.packages = c("sp","dtw"),.combine="c") %do% {
crds2 <- crds1 <- spgeoms[[j]]
align <- dtw(crds1,crds2)
align$normalizedDistance # normalized distance
}
temp <- c(rep(0,(n_geoms - length(temp))),temp)
temp
}
distmat <- rbind(distmat,rep(0,n_geoms))
distmat <- t(as.matrix(distmat))
# print(dim(distmat))
# if two sets of lines are given, calculate pairwise distances
}
ids <- names(spgeom1)
# print(length(ids))
colnames(distmat) <- ids
rownames(distmat) <- ids
return(distmat)
}
generate_video <- function(trak,video_location,fly_number,start,end,width,variables = c("speed","orientation","direction")){
td <- seconds_to_period(86400)
time_start = sprintf('%02d:%02d:%02d',td@hour + 24*day(td), minute(td), second(td))
y_min <-
min(trak@centroid[which(trak@centroid[, 2 * fly_number] !=
0), 2 * fly_number])
y_max <-
max(trak@centroid[which(trak@centroid[, 2 * fly_number] !=
0), 2 * fly_number])
x_min <-
min(trak@centroid[which(trak@centroid[, 2 * fly_number - 1] !=
0), 2 * fly_number - 1])
x_max <-
max(trak@centroid[which(trak@centroid[, 2 * fly_number - 1] !=
0), 2 * fly_number - 1])
start_y = y_min - 10
start_x = x_min - 10
vid_width = 120
name_fly_vid = paste0("temp/Fly_Vid_Frame_%d.png")
cmd <-
paste0('ffmpeg -ss ',
start / trak@hz ,
" -i ",
video_location,
" -t ",
(end - start + 1) / trak@hz,
' -filter:v "crop=',
vid_width,
':',
vid_width,
':',
start_x,
':',
start_y,
'\" ',
name_fly_vid,
''
)
#print(cmd)
system(cmd)
list_of_plots <- list()
for (i in start:end) {
count <- i - start + 1
current_name <- paste0("temp/Fly_Vid_Frame_", count,".png")
temp <- list()
j = 0
if ("speed" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@speed[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Speed") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
# ggtitle(paste0("Speed from ", start, " to ", end)) +
ylim(c(0,100)) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("majoraxislength" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@majoraxislength[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Major Axis") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
# ggtitle(paste0("Major Axis Length from ", start, " to ", end)) +
ylim(c(0,50)) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("minoraxislength" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@minoraxislength[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Minor Axis") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
# ggtitle(paste0("Minor Axis Length from ", start, " to ", end)) +
ylim(c(0,20)) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("orientation" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@orientation[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Orientation") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
#ggtitle(paste0("Orientation from ", start, " to ", end)) +
ylim(c(-pi/2,pi/2)) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("orientation_continuous" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@orientation_continuous[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Cont. Orientation") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
#ggtitle(paste0("Orientation from ", start, " to ", end)) +
ylim(c(min(trak@orientation_continuous[(start-width):end,fly_number]),max(trak@orientation_continuous[start:end,fly_number]))) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("area" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@area[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Area") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
#ggtitle(paste0("Orientation from ", start, " to ", end)) +
ylim(c(min(trak@area[(start-width):(end+width),fly_number]),max(trak@area[(start-width):(end+width),fly_number]))) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("direction" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@direction[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Direction") +
xlab(paste0("Frame Number")) +
geom_vline(xintercept = i,color="red",alpha=0.5) +
# ggtitle(paste0("Direction from ", start, " to ", end)) +
ylim(c(-pi,pi )) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width))
}
centroid_x <- trak@centroid[i,2*fly_number-1]-start_x
centroid_y <- trak@centroid[i,2*fly_number]-start_y
vec_start_x <- trak@centroid[i-1,2*fly_number-1]-start_x
vec_start_y <- trak@centroid[i-1,2*fly_number]-start_y
majoraxislength <- trak@majoraxislength[i,fly_number]
minoraxislength <- trak@minoraxislength[i,fly_number]
orientation <- f1s_set1@orientation[i,fly_number]
current_name <- paste0("temp/Fly_Vid_Frame_", count,".png")
base <- image_ggplot(image_read(current_name)) +
geom_point(aes(x=centroid_x,y=centroid_y),color="red") +
geom_ellipse(aes(x0=centroid_x,y0=centroid_y,a = majoraxislength/2,b=minoraxislength/2,angle=orientation)) +
geom_segment(aes(xend=centroid_x,yend=centroid_y,x=vec_start_x,y=vec_start_y),arrow = arrow(length=unit(0.10,"cm"),type="closed"))
temp_plot <- plot_grid(plotlist = temp,align="v",ncol=1)
output <- plot_grid(temp_plot,base,ncol=2,rel_widths = c(1,1),rel_heights = c(1,1))
ggsave(paste0("temp/temp_",i,".png"),output,height = 8,width=16,units = "in",dpi=320)
}
name_plots=paste0("temp/Fly",fly_number,"_Start",start,"_End",end,"_Time",format(Sys.time(), "%Y-%m-%d_%H%M%S"),".mp4")
merge_cmd <- paste0("ffmpeg -framerate ", trak@hz ," -start_number ",start, " -i 'temp/temp_%d.png' -pix_fmt yuv420p " ,name_plots)
#print(merge_cmd)
system(merge_cmd,wait = T)
#name_merged=paste0("temp/",format(Sys.time(), "%Y-%m-%d_%H%M%S"),"Fly",fly_number,"_Start",start,"_End_",end,"_Merged.mp4")
#cmd_combine <- paste0("ffmpeg -i ",name_plots," -i ", name_fly_vid, ' -filter_complex "[0][1]scale2ref=\'oh*mdar\':\'if(lt(main_h,ih),ih,main_h)\'[0s][1s];[1s][0s]scale2ref=\'oh*mdar\':\'if(lt(main_h,ih),ih,main_h)\'[1s][0s];[0s][1s]hstack,setsar=1"', " -preset ultrafast ", name_merged)
#print(cmd_combine)
#system(cmd_combine,wait=T)
system("rm temp/temp_*")
system("rm temp/Fly_Vid_Frame_*")
}
v_parperp <-function(d){
p_0 <- d[1,]
p_1 <- d[2,]
p_2 <- d[3,]
v_perp <- (((p_1[2] - p_0[2])*(p_2[1]-p_1[1])) - ((p_2[2] - p_1[2])*(p_1[1]-p_0[1])))/(sqrt((p_1[1]-p_0[1])^2 + (p_1[2]-p_0[2])^2))
v_par <- (((p_2[1] - p_1[1])*(p_1[1] - p_0[1])) + ((p_2[2] - p_1[2])*(p_1[2] - p_0[2])))/(sqrt((p_1[1]-p_0[1])^2 + (p_1[2]-p_0[2])^2))
output <-c(v_par,v_perp)
names(output) <- c("v_par","v_perp")
return(output)
}
euc.dist <- function(x1 = c(0,0), x2) {
sqrt(sum((x1 - x2) ^ 2))
}
normal <- function(window) {
p_0 = window[1,]
p_1 = window[2,]
p_2 = window[3,]
direction <- c(-(((p_1[2] - p_0[2]) / euc.dist(p_0,p_1)) + ((p_2[2] - p_1[2]) / euc.dist(p_1,p_2))),(((p_1[1] - p_0[1]) / euc.dist(p_0,p_1)) + ((p_2[1] - p_1[1]) / euc.dist(p_1,p_2))))
normalized <- direction/euc.dist(x2 = direction)
output <- atan2(normalized[2],normalized[1])
return(output)
}
get_time_diff <- function(start_time){
start_time <- as.POSIXct(start_time,format="%m/%d/%Y %H:%M:%S",origin = "1970-01-01",tz="EST")
start_time <- format(start_time,origin = "1970-01-01", format="%H:%M:%S")
start_time <- strptime(start_time,format="%H:%M:%S")
section_start <- as.POSIXct("19:00:00",format="%H:%M:%S",origin = "1970-01-01",tz="EST")
section_start <- format(section_start, format="%H:%M:%S",origin = "1970-01-01")
section_start <- strptime(section_start,format="%H:%M:%S")
time_to_section_start <- as.numeric(difftime(section_start,start_time,units = "s"))
return(time_to_section_start)
}
unwrap <- function(data, tol = pi/1.2, step = pi)
{
data_length <- length(data)
for (a in 1:(data_length - 1)) {
b <- a + 1
data_diff <- data[a] - data[b]
if (data_diff <= (-tol)) {
for (c in b:data_length) {
data[c] <- data[c] - step
}
}
if (data_diff >= (tol)) {
for (c in b:data_length) {
data[c] <- data[c] + step
}
}
}
return(data)
}
|
/R/utils.R
|
no_license
|
Wolfffff/RATrak
|
R
| false
| false
| 23,977
|
r
|
#Utility functions
#lmp
#readBinary
#readMetadata
#loadPackages
#
#
#
#
lmp <- function (modelobject) {
if (class(modelobject) != "lm")
stop("Not an object of class 'lm' ")
f <- summary(modelobject)$fstatistic
p <- pf(f[1], f[2], f[3], lower.tail = F)
attributes(p) <- NULL
return(p)
}
.trak <-
setClass(
Class = "trak",
slots = c(
area = "data.frame",
centroid = "data.frame",
direction = "data.frame",
dropped_frames = "data.frame",
majoraxislength = "data.frame",
minoraxislength = "data.frame",
orientation = "data.frame",
orientation_continuous = "data.frame",
radius = "data.frame",
speed = "data.frame",
speed.regressed = "data.frame",
theta = "data.frame",
time = 'numeric',
weightedcentroid = "data.frame",
activity = "list",
metadata = "data.frame",
hz = "numeric"
),
package = 'RATrak'
)
# Note that this currently only takes speed, centroid, and time. Of course it can be easily modified to accept more.
# Also the function is very cumbersome -- a restructure is probably worthwhile
readInfo <-
function(speedBinFileName = NULL,
centroidBinFileName = NULL,
timeBinFileName = NULL,
metadataFileName,
wellCount,
start = 1,
end = wellCount,
hz = 5,
inferPhenos = F,
size.centroid = NA_integer_) {
#WARNING: The precision for the centroid data is not consistent across autotracker versions. If the centroid coordinates make no sense, try changing size.centroid
#This is passed on as the size argument to readBin()
time <- numeric()
centroid <- data.frame()
speed <- data.frame()
if (!is.null(centroidBinFileName) &
!is.null(timeBinFileName) & !is.null(speedBinFileName)) {
speed <- readBinary(speedBinFileName, wellCount, dataType = 'speed')
centroid <-
readBinary(centroidBinFileName,
wellCount,
dataType = 'centroid',
size.centroid = size.centroid)
time <- readBinary(timeBinFileName, dataType = 'time')
}
else if (is.null(centroidBinFileName) &
!is.null(timeBinFileName) &
!is.null(speedBinFileName)) {
speed <- readBinary(speedBinFileName, wellCount, dataType = 'speed')
time <- readBinary(timeBinFileName, dataType = 'time')
}
else if (!is.null(centroidBinFileName) &
is.null(timeBinFileName) &
!is.null(speedBinFileName)) {
speed <- readBinary(speedBinFileName, wellCount, dataType = 'speed')
centroid <-
readBinary(centroidBinFileName,
wellCount,
dataType = 'centroid',
size.centroid = size.centroid)
}
else if (!is.null(centroidBinFileName) &
!is.null(timeBinFileName) &
is.null(speedBinFileName)) {
print('Only centroid and time data provided. Calculating speed')
centroid <-
readBinary(centroidBinFileName,
wellCount,
dataType = 'centroid',
size.centroid = size.centroid)
time <- readBinary(timeBinFileName, dataType = 'time')
speed <- flies.calculateSpeed(as.matrix(centroid), time)
}
else if (is.null(centroidBinFileName) &
is.null(timeBinFileName) &
!is.null(speedBinFileName)) {
speed <- readBinary(speedBinFileName, wellCount, dataType = 'speed')
}
else if (is.null(centroidBinFileName) &
!is.null(timeBinFileName) &
is.null(speedBinFileName)) {
warning('Only time data provided')
time <- readBinary(timeBinFileName, dataType = 'time')
}
else if (!is.null(centroidBinFileName) &
is.null(timeBinFileName) & is.null(speedBinFileName)) {
print('Only centroid data provided. Calculating speed')
centroid <-
readBinary(centroidBinFileName,
wellCount,
dataType = 'centroid',
size.centroid = size.centroid)
speed <-
flies.calculateSpeed(as.matrix(centroid)) * hz #Rescale speed to pixel/s
}
else
stop('Neither speed, centroid, or time data was provided')
metadata <- readMetadata(metadataFileName, start, end)
data <-
.trak(
speed = speed,
centroid = centroid,
metadata = metadata,
time = time,
hz = hz
)
if (inferPhenos)
data <- flies.activity(data)
return(data)
}
checkIfFile = function(name, base) {
fileLoc = paste0(base, name)
if (file_test("-f", paste0(base, name))) {
return(fileLoc)
}
else{
return(NULL)
}
}
# readInfo setup for importing margo folderData
# https://www.biorxiv.org/content/10.1101/593046v1
readInfo.margo <-
function(rawDataFolder,
metadataFileName = NULL,
wellCount,
start = 1,
end = wellCount,
hz = 5,
startFrame = 4,
inferPhenos = F,
size.centroid = NA_integer_,
featuresToIgnore=c("weightedcentroid","majoraxislength","minoraxislength", "direction",
"orientation","radius","theta","area")) {
#WARNING: The precision for the centroid data is not consistent across autotracker versions
#If the centroid coordinates make no sense, try changing size.centroid
#This is passed on as the size argument to readBin()
files = list.files(rawDataFolder)
features <- c('area', 'centroid', 'direction', 'dropped_frames', 'majoraxislength', 'minoraxislength', 'orientation', 'radius', 'speed', 'theta', 'time', 'weightedcentroid')
#Load features from the files in rawDataFolder
for (name in features) {
if (name %in% featuresToIgnore) { #Ignore this feature
if(name == 'time')
assign(name, numeric())
else
assign(name, data.frame())
}
else if(!any(grepl(pattern = paste0('.*__', name, '.*'), files, ignore.case = T))){ #Feature is not present in rawDataFolder
if(name == 'time')
assign(name, numeric())
else
assign(name, data.frame())
}
else{ #Read feature
fileName <- grep(pattern = paste0('.*__', name, '.*'), files, value = T, ignore.case = T)
message('Loading: ', fileName)
assign(
name,
readBinary.margo(
paste0(rawDataFolder, fileName),
dataType = name,
colCount = wellCount
)
)
}
}
if(!is.null(metadataFileName))
metadata <- readMetadata(metadataFileName, start, end)
else{
metadata <- data.frame()
warning('No metadata provided')
}
orientation <- orientation * pi / 180
#Continuous phase
orientation_continuous <-
as.data.frame(do.call(cbind, mclapply(as.data.frame(orientation, unwrap, mc.cores = 16)))
data <-
.trak(
area = area,
centroid = centroid,
direction = direction,
dropped_frames = dropped_frames,
majoraxislength = majoraxislength,
minoraxislength = minoraxislength,
orientation = orientation,
orientation_continuous = orientation_continuous,
radius = radius,
speed = speed,
speed.regressed = data.frame(),
theta = theta,
time = time,
weightedcentroid = weightedcentroid,
metadata = metadata,
hz = hz
)
if (inferPhenos)
data <- flies.activity(data)
return(data)
}
readBinary <-
function(fileName,
colCount,
dataType,
size.centroid = 4,
size.speed_time = 4,
startFrame = 2) {
file <- file(fileName, "rb")
if (dataType == 'speed') {
mat <-
matrix(
readBin(file, numeric(), n = 1e10, size = size.speed_time),
ncol = colCount,
byrow = TRUE
)
mat <-
mat[startFrame:nrow(mat),] #Discard first few frames if needed
close(file)
mat[is.nan(mat)] = 0
return(as.data.frame(mat))
}
else if (dataType == 'centroid') {
mat.tmp <-
matrix(
readBin(file, numeric(), n = 1e10, size = size.centroid),
ncol = colCount * 2,
byrow = TRUE
)
#Reshape matrix
mat <- matrix(ncol = ncol(mat.tmp), nrow = nrow(mat.tmp))
xCols <- seq(from = 1,
to = ncol(mat.tmp) - 1,
by = 2)
yCols <- seq(from = 2,
to = ncol(mat.tmp),
by = 2)
mat[, xCols] <- mat.tmp[, 1:colCount]
mat[, yCols] <- mat.tmp[, (colCount + 1):(colCount * 2)]
mat <-
mat[startFrame:nrow(mat),] #Shift to correct for margo output
close(file)
mat[is.nan(mat)] = 0
return(as.data.frame(mat))
}
else if (dataType == 'time') {
time <- readBin(file, numeric(), n = 1e10, size = size.speed_time)
time <-
time[startFrame:length(time)]
close(file)
return(time)
}
else
stop(paste('datatype:',
dataType,
'was not recognized.'))
}
#WARNING: The precision for the centroid data has been changed between single and double in different autotracker versions.
readBinary.margo <-
function(fileName,
colCount,
dataType = NULL,
size.centroid = 4,
size.default = 4,
startFrame = 1) {
file <- file(fileName, "rb")
if (dataType == 'centroid' || dataType == "weightedcentroid") {
mat <-
matrix(
readBin(file, numeric(), n = 1e10, size = size.centroid),
ncol = colCount * 2,
byrow = TRUE
)
close(file)
#Note memory issue here because of mat and mat.tmp being available together
#Reshape matrix
xCols <- seq(from = 1,
to = ncol(mat) - 1,
by = 2)
yCols <- seq(from = 2,
to = ncol(mat),
by = 2)
mat <- mat[,order(c(xCols,yCols))]
# mat[, xCols] <- mat.tmp[, 1:colCount]
# mat[, yCols] <- mat.tmp[, (colCount + 1):(colCount * 2)]
mat <- mat[startFrame:nrow(mat),] #Shift to correct for margo output
mat[is.nan(mat)] = 0
# mat <- apply(mat,2,FUN = function(x){
# base <- x[which(!is.nan(x))[1]]
# for (i in 1:length(x)) {
# if (is.nan(x[i])) {
# if (identical(x[i-1], numeric(0))) {
# x[i] = base
# } else{
# x[i] = x[i-1]
# }
# }
# }
# return(x)
# })
return(as.data.frame(mat))
}
else if (dataType == "dropped_frames") {
#Be careful of syncing here -- the rounding may cause issues
# Needs to be fixed
bits = (rawToBits(readBin(
file, raw(), n = 1e10, size = 1
)))
close(file)
mat <-
matrix(bits[1:(DescTools::RoundTo(length(bits), colCount, trunc))], #Round to multiple of well count
ncol = colCount,
byrow = TRUE)
mat = mat==1
# mat <- sapply(as.data.frame(mat), as.logical)
mat = mat[(startFrame):nrow(mat),] #Discard first few frames if needed
return(as.data.frame(mat))
}
else if (dataType == "time") {
time <- readBin(file, numeric(), n = 1e9, size = size.default)
time <- time[startFrame:length(time)]
close(file)
return(time)
}
else{
mat <-
matrix(
readBin(file, numeric(), n = 1e9, size = size.default),
ncol = colCount,
byrow = TRUE
)#Discard first few frames if needed
close(file)
mat[is.nan(mat)] = 0
return(as.data.frame(mat))
}
}
readMetadata <- function(fileName, start = 1, end) {
#Determine field separator
L <- readLines(fileName, n = 1)
if (grepl(";", L))
meta <- read.table(fileName, header = TRUE, sep = ';')
else if (grepl(",", L))
meta <- read.table(fileName, header = TRUE, sep = ',')
else if (grepl("\t", L))
meta <- read.table(fileName, header = TRUE, sep = '\t')
else
stop(paste('Could not determine field separator in', fileName))
colnames(meta) = tolower(colnames(meta))
data = meta[start:end, ]
return(data)
}
#This script provides a quick way to group load packages - it's not needed in the package but could be useful for end users in other applications
loadPackages <- function(names) {
missingPackages <-
names[!(names %in% installed.packages()[, "Package"])]
if (length(missingPackages)) {
install.packages(missingPackages)
}
for (pkg in names) {
library(pkg, character.only = TRUE)
}
}
#
classApply <- function(x, FUN, ...) {
cl <- class(x)
result <- list()
for (i in propertyNames(cl)) {
result[[i]] <- FUN(slot(x, i), ...)
}
result
}
groupMean <- function(x, l) {
rowMeans(as.matrix(x[, l == T]))
}
paste_ <- function(x){
tmp = ""
for (w in x) {
if(tmp != "")
tmp = paste(tmp,w,sep="_")
else
tmp= w
}
return(tmp)
}
dtwDistance_parallel_listed <- function(spgeom1) {
# if second set of lines is not given, calculate pairwise distances within
# first set of lines
if (is.null(spgeom2)) {
# prepare empty distance matrix
n_geoms <- length(spgeom1)
distmat <- foreach(i=1:(n_geoms - 1),.combine='rbind',.packages = c("sp","dtw","foreach","doParallel")) %dopar% {
crds1 <- spgeoms[[i]]
temp <- foreach(j=(i + 1):n_geoms,.packages = c("sp","dtw"),.combine="c") %do% {
crds2 <- crds1 <- spgeoms[[j]]
align <- dtw(crds1,crds2)
align$normalizedDistance # normalized distance
}
temp <- c(rep(0,(n_geoms - length(temp))),temp)
temp
}
distmat <- rbind(distmat,rep(0,n_geoms))
distmat <- t(as.matrix(distmat))
# print(dim(distmat))
# if two sets of lines are given, calculate pairwise distances
}
ids <- names(spgeom1)
# print(length(ids))
colnames(distmat) <- ids
rownames(distmat) <- ids
return(distmat)
}
generate_video <- function(trak,video_location,fly_number,start,end,width,variables = c("speed","orientation","direction")){
td <- seconds_to_period(86400)
time_start = sprintf('%02d:%02d:%02d',td@hour + 24*day(td), minute(td), second(td))
y_min <-
min(trak@centroid[which(trak@centroid[, 2 * fly_number] !=
0), 2 * fly_number])
y_max <-
max(trak@centroid[which(trak@centroid[, 2 * fly_number] !=
0), 2 * fly_number])
x_min <-
min(trak@centroid[which(trak@centroid[, 2 * fly_number - 1] !=
0), 2 * fly_number - 1])
x_max <-
max(trak@centroid[which(trak@centroid[, 2 * fly_number - 1] !=
0), 2 * fly_number - 1])
start_y = y_min - 10
start_x = x_min - 10
vid_width = 120
name_fly_vid = paste0("temp/Fly_Vid_Frame_%d.png")
cmd <-
paste0('ffmpeg -ss ',
start / trak@hz ,
" -i ",
video_location,
" -t ",
(end - start + 1) / trak@hz,
' -filter:v "crop=',
vid_width,
':',
vid_width,
':',
start_x,
':',
start_y,
'\" ',
name_fly_vid,
''
)
#print(cmd)
system(cmd)
list_of_plots <- list()
for (i in start:end) {
count <- i - start + 1
current_name <- paste0("temp/Fly_Vid_Frame_", count,".png")
temp <- list()
j = 0
if ("speed" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@speed[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Speed") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
# ggtitle(paste0("Speed from ", start, " to ", end)) +
ylim(c(0,100)) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("majoraxislength" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@majoraxislength[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Major Axis") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
# ggtitle(paste0("Major Axis Length from ", start, " to ", end)) +
ylim(c(0,50)) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("minoraxislength" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@minoraxislength[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Minor Axis") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
# ggtitle(paste0("Minor Axis Length from ", start, " to ", end)) +
ylim(c(0,20)) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("orientation" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@orientation[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Orientation") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
#ggtitle(paste0("Orientation from ", start, " to ", end)) +
ylim(c(-pi/2,pi/2)) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("orientation_continuous" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@orientation_continuous[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Cont. Orientation") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
#ggtitle(paste0("Orientation from ", start, " to ", end)) +
ylim(c(min(trak@orientation_continuous[(start-width):end,fly_number]),max(trak@orientation_continuous[start:end,fly_number]))) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("area" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@area[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Area") +
xlab("") +
geom_vline(xintercept = i,color="red",alpha=0.5) +
#ggtitle(paste0("Orientation from ", start, " to ", end)) +
ylim(c(min(trak@area[(start-width):(end+width),fly_number]),max(trak@area[(start-width):(end+width),fly_number]))) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width)) +
theme(axis.text.x=element_blank())
}
if ("direction" %in% variables) {
j <- j + 1
temp[[j]] <- ggplot(NULL,aes(x=(i-width):(i+width),y=trak@direction[(i-width):(i+width),fly_number])) +
geom_path() +
theme_minimal() +
ylab("Direction") +
xlab(paste0("Frame Number")) +
geom_vline(xintercept = i,color="red",alpha=0.5) +
# ggtitle(paste0("Direction from ", start, " to ", end)) +
ylim(c(-pi,pi )) +
scale_x_continuous(breaks = round(seq(min(i-width), (i+width), by = 10),1),limits=c(i-width,i+width))
}
centroid_x <- trak@centroid[i,2*fly_number-1]-start_x
centroid_y <- trak@centroid[i,2*fly_number]-start_y
vec_start_x <- trak@centroid[i-1,2*fly_number-1]-start_x
vec_start_y <- trak@centroid[i-1,2*fly_number]-start_y
majoraxislength <- trak@majoraxislength[i,fly_number]
minoraxislength <- trak@minoraxislength[i,fly_number]
orientation <- f1s_set1@orientation[i,fly_number]
current_name <- paste0("temp/Fly_Vid_Frame_", count,".png")
base <- image_ggplot(image_read(current_name)) +
geom_point(aes(x=centroid_x,y=centroid_y),color="red") +
geom_ellipse(aes(x0=centroid_x,y0=centroid_y,a = majoraxislength/2,b=minoraxislength/2,angle=orientation)) +
geom_segment(aes(xend=centroid_x,yend=centroid_y,x=vec_start_x,y=vec_start_y),arrow = arrow(length=unit(0.10,"cm"),type="closed"))
temp_plot <- plot_grid(plotlist = temp,align="v",ncol=1)
output <- plot_grid(temp_plot,base,ncol=2,rel_widths = c(1,1),rel_heights = c(1,1))
ggsave(paste0("temp/temp_",i,".png"),output,height = 8,width=16,units = "in",dpi=320)
}
name_plots=paste0("temp/Fly",fly_number,"_Start",start,"_End",end,"_Time",format(Sys.time(), "%Y-%m-%d_%H%M%S"),".mp4")
merge_cmd <- paste0("ffmpeg -framerate ", trak@hz ," -start_number ",start, " -i 'temp/temp_%d.png' -pix_fmt yuv420p " ,name_plots)
#print(merge_cmd)
system(merge_cmd,wait = T)
#name_merged=paste0("temp/",format(Sys.time(), "%Y-%m-%d_%H%M%S"),"Fly",fly_number,"_Start",start,"_End_",end,"_Merged.mp4")
#cmd_combine <- paste0("ffmpeg -i ",name_plots," -i ", name_fly_vid, ' -filter_complex "[0][1]scale2ref=\'oh*mdar\':\'if(lt(main_h,ih),ih,main_h)\'[0s][1s];[1s][0s]scale2ref=\'oh*mdar\':\'if(lt(main_h,ih),ih,main_h)\'[1s][0s];[0s][1s]hstack,setsar=1"', " -preset ultrafast ", name_merged)
#print(cmd_combine)
#system(cmd_combine,wait=T)
system("rm temp/temp_*")
system("rm temp/Fly_Vid_Frame_*")
}
v_parperp <-function(d){
p_0 <- d[1,]
p_1 <- d[2,]
p_2 <- d[3,]
v_perp <- (((p_1[2] - p_0[2])*(p_2[1]-p_1[1])) - ((p_2[2] - p_1[2])*(p_1[1]-p_0[1])))/(sqrt((p_1[1]-p_0[1])^2 + (p_1[2]-p_0[2])^2))
v_par <- (((p_2[1] - p_1[1])*(p_1[1] - p_0[1])) + ((p_2[2] - p_1[2])*(p_1[2] - p_0[2])))/(sqrt((p_1[1]-p_0[1])^2 + (p_1[2]-p_0[2])^2))
output <-c(v_par,v_perp)
names(output) <- c("v_par","v_perp")
return(output)
}
euc.dist <- function(x1 = c(0,0), x2) {
sqrt(sum((x1 - x2) ^ 2))
}
normal <- function(window) {
p_0 = window[1,]
p_1 = window[2,]
p_2 = window[3,]
direction <- c(-(((p_1[2] - p_0[2]) / euc.dist(p_0,p_1)) + ((p_2[2] - p_1[2]) / euc.dist(p_1,p_2))),(((p_1[1] - p_0[1]) / euc.dist(p_0,p_1)) + ((p_2[1] - p_1[1]) / euc.dist(p_1,p_2))))
normalized <- direction/euc.dist(x2 = direction)
output <- atan2(normalized[2],normalized[1])
return(output)
}
get_time_diff <- function(start_time){
start_time <- as.POSIXct(start_time,format="%m/%d/%Y %H:%M:%S",origin = "1970-01-01",tz="EST")
start_time <- format(start_time,origin = "1970-01-01", format="%H:%M:%S")
start_time <- strptime(start_time,format="%H:%M:%S")
section_start <- as.POSIXct("19:00:00",format="%H:%M:%S",origin = "1970-01-01",tz="EST")
section_start <- format(section_start, format="%H:%M:%S",origin = "1970-01-01")
section_start <- strptime(section_start,format="%H:%M:%S")
time_to_section_start <- as.numeric(difftime(section_start,start_time,units = "s"))
return(time_to_section_start)
}
unwrap <- function(data, tol = pi/1.2, step = pi)
{
data_length <- length(data)
for (a in 1:(data_length - 1)) {
b <- a + 1
data_diff <- data[a] - data[b]
if (data_diff <= (-tol)) {
for (c in b:data_length) {
data[c] <- data[c] - step
}
}
if (data_diff >= (tol)) {
for (c in b:data_length) {
data[c] <- data[c] + step
}
}
}
return(data)
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 3.83014228852198e+294, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615784827-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 329
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 3.83014228852198e+294, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
bc7a726c423bc5f4513df8a26f1bbaaf biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-002.qdimacs 1417 1747
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-002/biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-002.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 131
|
r
|
bc7a726c423bc5f4513df8a26f1bbaaf biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.with-IOC.unfold-002.qdimacs 1417 1747
|
\name{bdoc}
\alias{bdoc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Bayesian Discrete Ordered Classification of DNA Barcodes }
\description{
This package contains the "bdoc" function that will classify DNA barcodes in a test data set
to a species in the reference data set of DNA barcodes. This function will produce an assignment probability
together with plots of the posterior probabilities of belonging to any of the species in the reference data set. These plots can be used to determine if a test barcode comes
from a species not contained in the reference data set.
}
\usage{
bdoc(traindata, testdata, delta = 9.7e-08, epsilon = 0.2, priors = "equal", stoppingrule = TRUE, impute = 1, plot.file = "pdf")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{traindata}{ Contains a training dataset of type data.frame with the first column reserved for an ID (possibly genus), the second column reserved for the actual species name, and the remainder of the columns containing the nucleotide sequence of the DNA barcode.}
\item{testdata}{ Contains a test data set of type data.frame of the DNA barcodes to be classified. Note, column 1 should contain the first nucleotide, position 2 the second, and so on.}
\item{delta}{Scalar value between 0 and 0.1 used to adjust the conditional probabilities.}
\item{epsilon}{ Scalar value between 0 and 1 used to adjust the posterior probability calculations.}
\item{priors}{The prior probabilities to be used. This can be a vector of probabilities for each species in the reference data set (should sum to 1) or any of the following options: "equal" - to use prior probabilities all eqaul to 1/s if s is the number of species in the reference data set; "data" - to use prior probabilities equal to the prevalence of each species in the reference data set; "dir" - to use unequal, arbitrary probabilities generated from a Dirichlet(1,1,...,1) distribution.}
\item{stoppingrule}{Logical. By defalut stoppingrule=TRUE, which will terminate the sequential calculation when the posterior probability for a species in the reference data set equals 1. If set to FALSE, the calculation continues until the end of the barcode is reached.}
\item{impute}{Imputation method. If impute=1, the the proportional allocation method will be used. If impute=2 the majority rule impuation will be performed.}
\item{plot.file}{ Type of posterior probability plot to be saved to the current directory. By default, plot.file="pdf", which will save a PDF of the posterior proability plot(s). Other options include: "jpg" - save a JPEG file of the plot(s); "png" - save a PNG file of the plot(s); "wmf" - save a WMF file of the plot(s); "ps" - save a PS file of the plot(s).}
}
\details{
The object "traindata" should be of type data.frame and contain the species-level identification in the second column. This column should be named "species" in order for the function to construct the correct conditional probabilities. The object "testdata" should be of type data.frame with only the barcodes of the DNA sequences to be classified. All the rest of the options have default values that are strongly recommended. Plots of the posterior probabilities for each of the barcodes in the test data set are constructed and saved with format plot.file to the current R directory. See example below.
}
\value{
\item{k }{The total number of barcodes in the test data set.}
\item{totaltime }{The total time used for all of the barcodes in the test data set.}
\item{imp }{The time used to impute the missing values.}
\item{like }{The time used construct and adjust the conditional probabilities.}
\item{class }{The time used to compute the posteriors and make the species level assignment.}
\item{delta }{The value used to adjust the conditional probabilities.}
\item{species.class }{A matrix of the species assignment as well as the probability of assignment for each barcode in the test data set.}
\item{priors }{A vector of the initial prior probabilities.}
\item{posteriors }{A list containing: 1. the species-level assignment for each barcode in the test data set; 2. the matrix of posterior probabilities at each position for each barcode in the test data set. See the example below.}
\item{Posterior Probability Plots }{Posterior probability plots are constructed and saved in the format of plot.file to the current R directory named "seq1", "seq2", and so on.}
}
\references{ Hebert, P., A. Cywinska, S. Ball, and J. deWaard (2003). Biological identifications
through DNA barcodes. Proc. R. Soc. Lond. (B) 270, 313-322. }
\author{ Michael Anderson and Suzanne Dubnicka }
\seealso{ \code{\link{data.frame}} }
\examples{
data(battraindata1)
data(battestdata1)
traindata<-battraindata1
#battraindata1 contains the genus (column 1) and species (column 2)
#barcode information for 758 bats representing 96 unique species.
#The length of each barcode is 659 nucleotides long.
testdata<-battestdata1
#battetdata1 contains the genus (column 1) and species (column 2)
#barcode information for 82 bats that were held out of battraindata1.
#The length of each barcode is 659 nucleotides long and to classify,
#the first two columns need to be removed as these will usually not
#be known.
#A quick view of how bdoc performs using the first 10 nucleotide
#positions of the DNA barcodes. NOTE: This is just for demonstrating
#bdoc usage in a shorter time frame than the entire data set would
#require. See the commented example below for bdoc performance on all
#nucleotide positions.
result<-bdoc(traindata[,1:12],testdata[,-c(1:2,13:661)]) #after this executes, plots of type
#plot.file names "seq1", "seq2",
#and so on can be found in the
#folder identified by getwd().
#Performance of bdoc on the training and test data sets using all nucleotides can be seen using the following.
#result<-bdoc(traindata,testdata[,-c(1:2)]) #after this executes, plots of type
#plot.file names "seq1", "seq2",
#and so on can be found in the
#folder identified by getwd().
#result$priors #displays the priors used for classification
result$species.class #gives the matrix of species assignments
#and probabilities.
#result$posteriors[[1]]$post #gives the matrix of posterior probabilities
#at each position for barcode 1. Change
#posteriors[[1]] to posteriors[[2]] for the
#posteriors for barcode 2, etc.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ classif }
% __ONLY ONE__ keyword per line
|
/man/bdoc.Rd
|
no_license
|
cran/bdoc
|
R
| false
| false
| 7,091
|
rd
|
\name{bdoc}
\alias{bdoc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Bayesian Discrete Ordered Classification of DNA Barcodes }
\description{
This package contains the "bdoc" function that will classify DNA barcodes in a test data set
to a species in the reference data set of DNA barcodes. This function will produce an assignment probability
together with plots of the posterior probabilities of belonging to any of the species in the reference data set. These plots can be used to determine if a test barcode comes
from a species not contained in the reference data set.
}
\usage{
bdoc(traindata, testdata, delta = 9.7e-08, epsilon = 0.2, priors = "equal", stoppingrule = TRUE, impute = 1, plot.file = "pdf")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{traindata}{ Contains a training dataset of type data.frame with the first column reserved for an ID (possibly genus), the second column reserved for the actual species name, and the remainder of the columns containing the nucleotide sequence of the DNA barcode.}
\item{testdata}{ Contains a test data set of type data.frame of the DNA barcodes to be classified. Note, column 1 should contain the first nucleotide, position 2 the second, and so on.}
\item{delta}{Scalar value between 0 and 0.1 used to adjust the conditional probabilities.}
\item{epsilon}{ Scalar value between 0 and 1 used to adjust the posterior probability calculations.}
\item{priors}{The prior probabilities to be used. This can be a vector of probabilities for each species in the reference data set (should sum to 1) or any of the following options: "equal" - to use prior probabilities all eqaul to 1/s if s is the number of species in the reference data set; "data" - to use prior probabilities equal to the prevalence of each species in the reference data set; "dir" - to use unequal, arbitrary probabilities generated from a Dirichlet(1,1,...,1) distribution.}
\item{stoppingrule}{Logical. By defalut stoppingrule=TRUE, which will terminate the sequential calculation when the posterior probability for a species in the reference data set equals 1. If set to FALSE, the calculation continues until the end of the barcode is reached.}
\item{impute}{Imputation method. If impute=1, the the proportional allocation method will be used. If impute=2 the majority rule impuation will be performed.}
\item{plot.file}{ Type of posterior probability plot to be saved to the current directory. By default, plot.file="pdf", which will save a PDF of the posterior proability plot(s). Other options include: "jpg" - save a JPEG file of the plot(s); "png" - save a PNG file of the plot(s); "wmf" - save a WMF file of the plot(s); "ps" - save a PS file of the plot(s).}
}
\details{
The object "traindata" should be of type data.frame and contain the species-level identification in the second column. This column should be named "species" in order for the function to construct the correct conditional probabilities. The object "testdata" should be of type data.frame with only the barcodes of the DNA sequences to be classified. All the rest of the options have default values that are strongly recommended. Plots of the posterior probabilities for each of the barcodes in the test data set are constructed and saved with format plot.file to the current R directory. See example below.
}
\value{
\item{k }{The total number of barcodes in the test data set.}
\item{totaltime }{The total time used for all of the barcodes in the test data set.}
\item{imp }{The time used to impute the missing values.}
\item{like }{The time used construct and adjust the conditional probabilities.}
\item{class }{The time used to compute the posteriors and make the species level assignment.}
\item{delta }{The value used to adjust the conditional probabilities.}
\item{species.class }{A matrix of the species assignment as well as the probability of assignment for each barcode in the test data set.}
\item{priors }{A vector of the initial prior probabilities.}
\item{posteriors }{A list containing: 1. the species-level assignment for each barcode in the test data set; 2. the matrix of posterior probabilities at each position for each barcode in the test data set. See the example below.}
\item{Posterior Probability Plots }{Posterior probability plots are constructed and saved in the format of plot.file to the current R directory named "seq1", "seq2", and so on.}
}
\references{ Hebert, P., A. Cywinska, S. Ball, and J. deWaard (2003). Biological identifications
through DNA barcodes. Proc. R. Soc. Lond. (B) 270, 313-322. }
\author{ Michael Anderson and Suzanne Dubnicka }
\seealso{ \code{\link{data.frame}} }
\examples{
data(battraindata1)
data(battestdata1)
traindata<-battraindata1
#battraindata1 contains the genus (column 1) and species (column 2)
#barcode information for 758 bats representing 96 unique species.
#The length of each barcode is 659 nucleotides long.
testdata<-battestdata1
#battetdata1 contains the genus (column 1) and species (column 2)
#barcode information for 82 bats that were held out of battraindata1.
#The length of each barcode is 659 nucleotides long and to classify,
#the first two columns need to be removed as these will usually not
#be known.
#A quick view of how bdoc performs using the first 10 nucleotide
#positions of the DNA barcodes. NOTE: This is just for demonstrating
#bdoc usage in a shorter time frame than the entire data set would
#require. See the commented example below for bdoc performance on all
#nucleotide positions.
result<-bdoc(traindata[,1:12],testdata[,-c(1:2,13:661)]) #after this executes, plots of type
#plot.file names "seq1", "seq2",
#and so on can be found in the
#folder identified by getwd().
#Performance of bdoc on the training and test data sets using all nucleotides can be seen using the following.
#result<-bdoc(traindata,testdata[,-c(1:2)]) #after this executes, plots of type
#plot.file names "seq1", "seq2",
#and so on can be found in the
#folder identified by getwd().
#result$priors #displays the priors used for classification
result$species.class #gives the matrix of species assignments
#and probabilities.
#result$posteriors[[1]]$post #gives the matrix of posterior probabilities
#at each position for barcode 1. Change
#posteriors[[1]] to posteriors[[2]] for the
#posteriors for barcode 2, etc.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ classif }
% __ONLY ONE__ keyword per line
|
context("rsurv.R unit tests")
library("data.table")
# Replication of exponential distribution --------------------------------------
test_that("rsurv() produces correct output for exponential distribution" , {
set.seed(10)
N_REP <- 10000
rates <- c(.25, .5, 2)
N <- length(rates)
MAX_T <- 50
sc <- example_survival_curves(n = N, rates = rates,
times = seq(0, MAX_T, 1/52))
sim <- data.table(
id = rep(1:N, times = N_REP),
time = pmin(rsurv(sc, n_rep = N_REP), MAX_T)
)
sim_summary <- sim[, .(mean = mean(time)),
by = "id"]
expect_equal(
sim_summary$mean,
1/rates,
tolerance = .03
)
})
|
/tests/testthat/test-rsurv.R
|
no_license
|
PeterJemley/rsurv
|
R
| false
| false
| 686
|
r
|
context("rsurv.R unit tests")
library("data.table")
# Replication of exponential distribution --------------------------------------
test_that("rsurv() produces correct output for exponential distribution" , {
set.seed(10)
N_REP <- 10000
rates <- c(.25, .5, 2)
N <- length(rates)
MAX_T <- 50
sc <- example_survival_curves(n = N, rates = rates,
times = seq(0, MAX_T, 1/52))
sim <- data.table(
id = rep(1:N, times = N_REP),
time = pmin(rsurv(sc, n_rep = N_REP), MAX_T)
)
sim_summary <- sim[, .(mean = mean(time)),
by = "id"]
expect_equal(
sim_summary$mean,
1/rates,
tolerance = .03
)
})
|
\name{rdistaz}
\alias{rdistaz}
\title{Distance and Azimuth from two points }
\description{
Calculate distance, Azimuth and Back-Azimuth from two points on Globe.
}
\usage{
rdistaz(olat, olon, tlat, tlon)
}
\arguments{
\item{olat}{origin latitude, degrees }
\item{olon}{origin longitude, degrees }
\item{tlat}{target latitude, degrees }
\item{tlon}{target longitude, degrees }
}
\value{
List:
\item{del}{Delta, angle in degrees}
\item{az}{Azimuth, angle in degrees}
\item{baz}{back Azimuth, (az+180) in degrees}
\item{dist}{distance in km}
\item{err}{0 or 1, error flag. 0=error, 1=no error, see details}
}
\details{
Program is set up for one origin (olat, olon) pair and many
target (tlat, tlon) pairs given as vectors.
If multiple olat and olon are given, the program returns a list
of outputs for each.
If olat or any tlat is greater than 90 or less than -90 NA is returned
and error flag is 0.
If any tlat and tlon is equal to olat and olon, the points are
coincident.
In that case the distances are set to zero, but the az and baz are NA,
and the error flag is set to 0.
}
\author{Jonathan M. Lees<jonathan.lees@unc.edu>}
\seealso{along.great, getgreatarc}
\examples{
#### one point
d <- rdistaz(12, 23, -32, -65)
d
#### many random target points
org <- c(80.222, -100.940)
targ <- cbind(runif(10, 10, 50), runif(10, 20, 100))
rdistaz(org[1], org[2], targ[,1], targ[,2])
############ if origin and target are identical
##### the distance is zero, but the az and baz are not defined
rdistaz(80.222, -100.940, 80.222, -100.940)
######################## set one of the targets equal to the origin
targ[7,1] <- org[1]
targ[7,2] <- org[2]
rdistaz(org[1], org[2], targ[,1], targ[,2])
#### put in erroneous latitude data
targ[3,1] <- -91.3
rdistaz(org[1], org[2], targ[,1], targ[,2])
}
\keyword{misc}
|
/man/rdistaz.Rd
|
no_license
|
Lluis76/RSEIS
|
R
| false
| false
| 1,886
|
rd
|
\name{rdistaz}
\alias{rdistaz}
\title{Distance and Azimuth from two points }
\description{
Calculate distance, Azimuth and Back-Azimuth from two points on Globe.
}
\usage{
rdistaz(olat, olon, tlat, tlon)
}
\arguments{
\item{olat}{origin latitude, degrees }
\item{olon}{origin longitude, degrees }
\item{tlat}{target latitude, degrees }
\item{tlon}{target longitude, degrees }
}
\value{
List:
\item{del}{Delta, angle in degrees}
\item{az}{Azimuth, angle in degrees}
\item{baz}{back Azimuth, (az+180) in degrees}
\item{dist}{distance in km}
\item{err}{0 or 1, error flag. 0=error, 1=no error, see details}
}
\details{
Program is set up for one origin (olat, olon) pair and many
target (tlat, tlon) pairs given as vectors.
If multiple olat and olon are given, the program returns a list
of outputs for each.
If olat or any tlat is greater than 90 or less than -90 NA is returned
and error flag is 0.
If any tlat and tlon is equal to olat and olon, the points are
coincident.
In that case the distances are set to zero, but the az and baz are NA,
and the error flag is set to 0.
}
\author{Jonathan M. Lees<jonathan.lees@unc.edu>}
\seealso{along.great, getgreatarc}
\examples{
#### one point
d <- rdistaz(12, 23, -32, -65)
d
#### many random target points
org <- c(80.222, -100.940)
targ <- cbind(runif(10, 10, 50), runif(10, 20, 100))
rdistaz(org[1], org[2], targ[,1], targ[,2])
############ if origin and target are identical
##### the distance is zero, but the az and baz are not defined
rdistaz(80.222, -100.940, 80.222, -100.940)
######################## set one of the targets equal to the origin
targ[7,1] <- org[1]
targ[7,2] <- org[2]
rdistaz(org[1], org[2], targ[,1], targ[,2])
#### put in erroneous latitude data
targ[3,1] <- -91.3
rdistaz(org[1], org[2], targ[,1], targ[,2])
}
\keyword{misc}
|
library(aws.s3)
library(jsonlite)
library(curl)
library(glue)
## Need to have AWS credentials set in .Renviron already!
## Compute the 95% confidence interval for the median using
## a remote server
median_CI <- function(x, N = 1000) {
bucket <- "confint"
buckets_available <- bucketlist()$Bucket
if(bucket %in% buckets_available) {
message("using '", bucket, "' bucket")
} else {
stop("'", bucket, "' bucket not available")
}
## Upload the data to AWS S3 bucket
## Key for storing data on AWS
key <- "xdata"
val <- s3saveRDS(x, key, bucket)
if(!val) {
stop("problem saving data to S3")
}
## Call the function on the remote server
## Construct API URL and open connection to the web server
cmd <- glue("http://67.205.166.80:8000/confint?",
"key={key}&bucket={bucket}&N={N}")
con <- curl(cmd)
## Read the answer from the server
message("connecting to server")
tryCatch({
ans <- readLines(con, 1, warn = FALSE)
}, finally = {
## Close server connection
close(con)
})
## Convert answer from JSON and return
fromJSON(ans)
}
|
/confmedian_client.R
|
no_license
|
rdpeng/plumberdemo
|
R
| false
| false
| 1,336
|
r
|
library(aws.s3)
library(jsonlite)
library(curl)
library(glue)
## Need to have AWS credentials set in .Renviron already!
## Compute the 95% confidence interval for the median using
## a remote server
median_CI <- function(x, N = 1000) {
bucket <- "confint"
buckets_available <- bucketlist()$Bucket
if(bucket %in% buckets_available) {
message("using '", bucket, "' bucket")
} else {
stop("'", bucket, "' bucket not available")
}
## Upload the data to AWS S3 bucket
## Key for storing data on AWS
key <- "xdata"
val <- s3saveRDS(x, key, bucket)
if(!val) {
stop("problem saving data to S3")
}
## Call the function on the remote server
## Construct API URL and open connection to the web server
cmd <- glue("http://67.205.166.80:8000/confint?",
"key={key}&bucket={bucket}&N={N}")
con <- curl(cmd)
## Read the answer from the server
message("connecting to server")
tryCatch({
ans <- readLines(con, 1, warn = FALSE)
}, finally = {
## Close server connection
close(con)
})
## Convert answer from JSON and return
fromJSON(ans)
}
|
rm(list=ls())
setwd("/Users/ivor.williams/Documents/CRED/CRCP/NCRMP/Report Card Workshop/Fish Indicators 2017")
# SET UP ------------------------------------------------------------------
library(gdata) # needed for drop_levels()
library(reshape) # reshape library inclues the cast() function used below
source("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/lib/fish_team_functions.R")
source("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/lib/Islandwide Mean&Variance Functions.R")
Modified_Site_Species_Richness<-function(x){
# Modification fos tandard Calc_Site_Species_Richness to not count species with zero counts (as they can be left in data file to ensure that the site has data records at all)
y<-aggregate(x$COUNT,by=x[,c("SITEVISITID", "METHOD", "REP", "SPECIES")], sum) #convert to count per species per rep
y[y$x>1,]$x<-1 #convert any non-zero count to 1, so we can sum those to get total number of species with count>0
z<-aggregate(y$x,by=y[,c("SITEVISITID", "METHOD", "REP")], sum) # count number of species with non-zero counts this REP
xx<-aggregate(z$x,by=z[,c("SITEVISITID", "METHOD")], mean) # count number of entries per rep
dimnames(xx)[[2]]<-c("SITEVISITID", "METHOD", "SPECIESRICHNESS")
return(xx)
} # end Modified_Site_Species_Richness
sm<-read.csv("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/data/SITE MASTER2016.csv")
sm$SITE<-SiteNumLeadingZeros(sm$SITE)
sectors<-read.csv("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/data/Sectors-Strata-Areas2016.csv", stringsAsFactors=FALSE)
# FISH REA WORKINGS ----------------------------------------------------------------
load("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/data/ALL_REA_FISH_RAW.rdata")
x<-df
# HOUSEKEEPING ------------------------------------------------------------
# clean up the data to only fields we currently use
DATA_COLS<-c("SITEVISITID", "METHOD", "DATE_", "OBS_YEAR", "SITE", "REEF_ZONE", "DEPTH_BIN", "ISLAND", "LATITUDE", "LONGITUDE", "REGION" , "REGION_NAME", "SECTOR", "SPECIAL_AREA", "EXCLUDE_FLAG",
"REP", "REPLICATEID", "DIVER", "HABITAT_CODE", "DEPTH",
"HARD_CORAL", "MA", "TA", "CCA", "SAND", "SOFT_CORAL", "CLAM" , "SPONGE", "CORALLIMORPH", "CYANO", "TUNICATE", "ZOANTHID" , "OTHER", "COMPLEXITY", "TRAINING_YN", "VISIBILITY",
"SPECIES", "COUNT", "SIZE_", "OBS_TYPE",
"SUBSTRATE_HEIGHT_0", "SUBSTRATE_HEIGHT_20", "SUBSTRATE_HEIGHT_50", "SUBSTRATE_HEIGHT_100", "SUBSTRATE_HEIGHT_150", "MAX_HEIGHT",
"RANK", "SCIENTIFIC_NAME", "TAXONNAME", "COMMONNAME", "GENUS", "FAMILY", "COMMONFAMILYALL", "LMAX", "LW_A", "LW_B", "LENGTH_CONVERSION_FACTOR", "TROPHIC", "TROPHIC_MONREP")
head(x[,DATA_COLS])
x<-x[,DATA_COLS]
# by default, remove sites with EXCLUDE_FLAG set to TRUE
x[is.na(x$TRAINING_YN),]$TRAINING_YN<-FALSE # Training flag of NA is equivalent to a FALSE .. as none of the odler data was 'training data'
x<-subset(x, x$TRAINING_YN==FALSE)
x<-subset(x, x$EXCLUDE_FLAG==0, drop=TRUE)
x<-subset(x, x$OBS_TYPE %in% c("U","I","N", "F","T"))
#x<-subset(x, x$REGION=="SAMOA")
x<-subset(x, x$REGION != "CT")
x<-subset(x, x$METHOD %in% c("nSPC"))
x<-subset(x, x$OBS_YEAR >2009)
x<-subset(x, x$REEF_ZONE %in% c("Forereef", "Protected Slope"))
x<-subset(x, x$ISLAND!="South Bank")
#x<-subset(x, x$OBS_YEAR != 2016)
x$SITE<-SiteNumLeadingZeros(x$SITE)
x<-droplevels(x)
#add SEC_NAME to x
# this would be better if SECTOR field in database was up to date properly .. rather than merge with the site_Sectors spreadsheet
x<-merge(x, sm[,c("SITE", "SEC_NAME", "ANALYSIS_SEC", "ANALYSIS_STRATA", "ANALYSIS_SCHEME")], by="SITE", all.x=TRUE)
#for ones that are missing SEC_NAME, set it to ISLAND
no_secs<-is.na(x$SEC_NAME)
tmp<-as.character(x$SEC_NAME)
tmp[no_secs]<-as.character(x[no_secs,]$ISLAND)
x$SEC_NAME<-tmp
table(x$SEC_NAME)
############################################################################################
# remove the component SUBSTRATE_HEIGHT fields
sh_out<-CalcMeanSHMeanSHDiff(x)
x$MEAN_SH<-sh_out[[1]]
x$MEAN_SH_DIFF<-sh_out[[2]]
x<-x[, setdiff(names(x),c("SUBSTRATE_HEIGHT_0", "SUBSTRATE_HEIGHT_20", "SUBSTRATE_HEIGHT_50", "SUBSTRATE_HEIGHT_100", "SUBSTRATE_HEIGHT_150"))]
############################################################################################
x<-droplevels(x)
#######################
## CLEAN UP NAs #######
#######################
x[is.na(x$LMAX),]$SPECIES
x[is.na(x$LMAX) & x$SPECIES=="GYMI",]$LMAX<-45
x[is.na(x$TROPHIC_MONREP) & x$SPECIES=="ABNO",]$TROPHIC_MONREP<-"PLANKTIVORE"
tmp.lev<-levels(x$HABITAT_CODE); head(tmp.lev)
levels(x$HABITAT_CODE)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$SCIENTIFIC_NAME); head(tmp.lev)
levels(x$SCIENTIFIC_NAME)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$COMMONNAME); head(tmp.lev)
levels(x$COMMONNAME)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$GENUS); head(tmp.lev)
levels(x$GENUS)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$FAMILY); head(tmp.lev)
levels(x$FAMILY)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$COMMONFAMILYALL); head(tmp.lev)
levels(x$COMMONFAMILYALL)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$TROPHIC_MONREP); head(tmp.lev)
levels(x$TROPHIC_MONREP)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$RANK); head(tmp.lev)
levels(x$RANK)<-c(tmp.lev, "UNKNOWN")
x[is.na(x$HABITAT_CODE),"HABITAT_CODE"]<-"UNKNOWN"
x[is.na(x$SCIENTIFIC_NAME),"SCIENTIFIC_NAME"]<-"UNKNOWN"
x[is.na(x$COMMONNAME),"COMMONNAME"]<-"UNKNOWN"
x[is.na(x$GENUS),"GENUS"]<-"UNKNOWN"
x[is.na(x$FAMILY),"FAMILY"]<-"UNKNOWN"
x[is.na(x$COMMONFAMILYALL),"COMMONFAMILYALL"]<-"UNKNOWN"
x[is.na(x$TROPHIC_MONREP),"TROPHIC_MONREP"]<-"UNKNOWN"
x[is.na(x$RANK),"RANK"]<-"UNKNOWN"
x[is.na(x$COUNT),]$COUNT<-0
x[is.na(x$SIZE_),]$SIZE_<-0
#fixing unknown lat/long from a sites survyeted by Val Brown in Guam in 2015. These values are probably close
# .. putting this in here so that we do not have NAs in the LAT and LONG .. but we do nto want to save these to the actual master data file
x[x$SITE=="GUA-01310",]$LATITUDE<-13.24173
x[x$SITE=="GUA-01310",]$LONGITUDE<-144.70428
wd<-droplevels(x)
wd$ANALYSIS_YEAR<-wd$OBS_YEAR
wd$ANALYSIS_STRATA<-paste(wd$REEF_ZONE, wd$DEPTH_BIN, sep="")
#island_table<-Aggregate_InputTable(wd, c("REGION","ISLAND"))
OTHER_BENTHIC<-c("CLAM", "CORALLIMORPH", "ZOANTHID", "TUNICATE", "SPONGE", "OTHER", "CYANO", "TA")
wd$OTHER_BENTHIC<-rowSums(wd[,OTHER_BENTHIC],na.rm=T)
SURVEY_SITE_DATA<-c("DEPTH", "HARD_CORAL", "SAND", "MA", "CCA", "MEAN_SH")
#NOTE THAT BENTHOS DOES NOT ALWAYS SUM TO 100% .. I THINK BECAUSE OF ERRORS IN THE ORIGINAL DATA ENTERED INTO THE DATABASE. NEW CODE BELOW IS AN ATTEMPT TO FIX THAT
# Go through all surveys checking for situation where some reps have NAs in a particular BENTHIC_FIELDS, but other records have non-zeros - in that situation, we were recording a field but one other diver left it balnk - those should be zeros not NAs
# this is something that should really be fixed in the database rather than here (as its an error at time of data entry)
#### BELOW code does the job, but should be cleaned up and put in a function
### IDW- COULD GREATLY SPEED THIS UP BY DOING IT FOR A REGION AND YEAR .. AND LIMIT TO ONLY nSPC
# i.e make the checking for some data and some NAs at the levels of a full survey round .. and also use indixes into the wd structure, rather than create temp dfs (tmp_data)
BENTHIC_FIELDS<-c("HARD_CORAL", "MA", "TA", "CYANO", "CCA", "SAND", "OTHER")
UNIQUE_ROUND<-c("REGION", "OBS_YEAR", "METHOD")
round_table<-Aggregate_InputTable(wd, UNIQUE_ROUND)
wd$countBD<-apply(wd[,BENTHIC_FIELDS], 1, function(xx) length(which(!is.na(xx)))) #IDW 10-22-2013 checking for situation where there is NO benthic data at all
for(i in 1:dim(round_table)[1])
{
if(round_table[i,"METHOD"]=="nSPC")
{
tmp_data<-wd[wd$OBS_YEAR==round_table[i,"OBS_YEAR"] & wd$METHOD==round_table[i,"METHOD"] & wd$REGION==round_table[i,"REGION"],]
#go through BENTHIC_FIELDS, checking whether there are some NAs and some data values
for(j in 1:length(BENTHIC_FIELDS))
{
## IF there are both non NAs and NAs
if(length(tmp_data[!is.na(tmp_data[,BENTHIC_FIELDS[j]]),BENTHIC_FIELDS[j]]) > 0
& length(tmp_data[is.na(tmp_data[,BENTHIC_FIELDS[j]]),BENTHIC_FIELDS[j]]) > 0)
{
#set all NAs of that field to 0
tmp_data[is.na(tmp_data[,BENTHIC_FIELDS[j]]),BENTHIC_FIELDS[j]]<-0
#now rewrite the benthic fields with NAs converted to zeros
wd[wd$OBS_YEAR==round_table[i,"OBS_YEAR"] & wd$METHOD==round_table[i,"METHOD"] & wd$REGION==round_table[i,"REGION"],BENTHIC_FIELDS[j]]<-tmp_data[,BENTHIC_FIELDS[j]]
}
}
}
}
# now reset zeros to NAs for all records where there was NO benthic data at all
wd[wd$countBD==0,BENTHIC_FIELDS]<-NA
#generate Lm values
#Fish Species Table
# RAYFINNED FISHES: log10(Lm) = -0.1189 + 0.9157 * log10(Lmax) (Binholand and Froese J Appl Ichthyology 2009)
# ELASMOBRANCHES: log10(Lm) = -0.1246 + 0.9924 * log10(Lmax)
# [Carcharhinidae, Dasyatidae, Ginglymostomatidae, Myliobatidae]
#######
wd$Lm<-10^(-0.1189+(0.9157*log10(wd$LMAX)))
ELASMO<-c("Carcharhinidae", "Dasyatidae", "Ginglymostomatidae", "Myliobatidae")
wd[wd$FAMILY %in% ELASMO,]$Lm<-10^(-0.1246+(0.9924*log10(wd[wd$FAMILY %in% ELASMO,]$LMAX)))
wd<-droplevels(wd)
WD_SAVE<-wd
# WORKING WITH POOLING READY DATA FROM HERE ON -------------------------------------
#base information about the survey - field names should match those in input file (obviously!)
UNIQUE_SURVEY<-c("SITEVISITID","METHOD")
UNIQUE_REP<-c(UNIQUE_SURVEY, "REP")
UNIQUE_COUNT<-c(UNIQUE_REP, "REPLICATEID")
#get base survey info, calculate average depth+complexity+so on
SURVEY_INFO<-c("OBS_YEAR", "REGION", "REGION_NAME", "ISLAND", "SITE", "DATE_", "REEF_ZONE", "DEPTH_BIN", "LATITUDE", "LONGITUDE", "SEC_NAME", "ANALYSIS_SEC", "ANALYSIS_YEAR", "ANALYSIS_STRATA", "EXCLUDE_FLAG", "SITEVISITID", "METHOD")
survey_table<-Aggregate_InputTable(wd, SURVEY_INFO)
#write.csv(survey_table, file="tmpSamoaSites.csv")
survey_est_benthos<-Calc_Site_nSurveysArea(wd, UNIQUE_SURVEY, UNIQUE_REP, UNIQUE_COUNT, SURVEY_SITE_DATA) #Calc_Site_nSurveysArea deals better with situations where one REP has benthic data and other doesnt.
surveys<-merge(survey_table, survey_est_benthos, by=UNIQUE_SURVEY)
write.csv(surveys, file="tmpSurveys2010_16.csv")
#Pull all species information into a separate df, for later use ..
FISH_SPECIES_FIELDS<-c("SPECIES","TAXONNAME", "FAMILY", "COMMONFAMILYALL", "TROPHIC", "TROPHIC_MONREP", "LW_A", "LW_B", "LMAX", "LENGTH_CONVERSION_FACTOR", "Lm")
species_table<-Aggregate_InputTable(wd, FISH_SPECIES_FIELDS)
write.csv(species_table, file="tmpSpeciesTable.csv")
# GENERATE SUMMARY METRICS --------------------------------------------------
## MEAN SIZE OF TARGET SPECIES # MUST BE 15cm OR LARGER AND AT LEAST 40% of LMAX ##########################################
wd<-WD_SAVE
MINSIZE_PROP_CUT_OFF<-0.3
BIO_RANK_CUT_OFF<-25
MIN_TL<-15
SPATIAL_BASE<-c("REGION","ISLAND")
#head(bio)
TARGET_FAMILIES<-c("Acanthuridae", "Mullidae", "Scaridae", "Holocentridae") #Siganidae? Priacanthidae? Carangidae? Serranidae? Lutjanidae? Leethrinidae?
#TARGET_FAMILIES<-c("Scaridae") #FOR PURPOSES OF GENERATING MANAGEABLE SPECIES FOR DEMONSTRATION
tgt_species<-unique(wd[wd$FAMILY %in% TARGET_FAMILIES & wd$LMAX > 30,]$SPECIES) # Perhaps ALSO use species that are only present at more than X% of sites in The region? BEST TO HAVE have defined list for each region
#LIMIT THIS TO TOP SPECIES .. drop records of species not in top ranked speices from bio sampling data
#bio<-bio[!bio$RANK>BIO_RANK_CUT_OFF,]
wd[!wd$SPECIES %in% tgt_species,]$COUNT<-0
#remove fishes that are very small (do not want to penalize location for having large recruitment!, also recruitment variability will add noise)
wd[wd$SIZE_ < wd$LMAX*MINSIZE_PROP_CUT_OFF,]$COUNT<-0
wd<-wd[wd$COUNT>0,]
wd<-droplevels(wd)
#Calculate portion of fishes at a location that are 'mature' - JUST FOR INTEREST!
wd$MCOUNT<-wd$COUNT
wd[wd$SIZE_ > wd$Lm,]$MCOUNT<-0
tmp<-aggregate(wd[,c("COUNT","MCOUNT")],by=wd[,c(SPATIAL_BASE,"SPECIES")],FUN=sum)
tmp$PMAT<-round(tmp$MCOUNT/tmp$COUNT,2);tmp
cast(tmp, SPECIES ~ ISLAND, value="PMAT", sum, fill=NA)
#Drop fishes below a min size and Calculate mean size per species and locations of remaining observations
wd[wd$SIZE_ < MIN_TL,]$COUNT<-0
#see how many we have
#tmp<-droplevels(wd[wd$COUNT>0,])
#tmp<-aggregate(tmp$COUNT, by=tmp[,c("SPECIES","TAXONNAME"),],FUN=sum)
#names(tmp)<-c("SPECIES","TAXONNAME","CREDobs")
#tmp<-merge(tmp, bio, by="TAXONNAME",all.y=T); tmp
#compare mean size in obs with mean size in biosamples
#tmp2<-wd[wd$ISLAND=="Tutuila",]
#tmp2$COUNTLEN<-tmp2$COUNT*tmp2$SIZE_
#tmp3<-aggregate(tmp2[,c("COUNT", "COUNTLEN")], by=tmp2[,c("TAXONNAME","SPECIES", "ISLAND")],FUN=sum)
#tmp3<-tmp3[tmp3$COUNT>0,]
#tmp3$MEANSIZE<-tmp3$COUNTLEN/tmp3$COUNT
#tmp3
#tmp4<-merge(tmp3,tmp,by="TAXONNAME")'tmp4
Calc_MeanSize<-function(x, spatial_base=c("ISLAND", "REP_CARD_UNIT", "SEC_NAME", "ANALYSIS_STRATA"), min_obs=1){
base_cols<-c(spatial_base, "SPECIES")
x$CS<-x$COUNT*x$SIZE_
y<-aggregate(x[,c("COUNT", "CS")], by=x[,base_cols],FUN=sum)
y<-y[!y$COUNT<min_obs,]
y$MEAN_SIZE<-y$CS/y$COUNT
return(y[,c(base_cols, "MEAN_SIZE")])
} # end Calc__MeanSize
ms<-Calc_MeanSize(wd[wd$COUNT>0,],spatial_base=SPATIAL_BASE, min_obs=1); head(ms);dim(ms)
ms<-merge(ms, species_table[,c("SPECIES","TAXONNAME","Lm")],by="SPECIES", all.x=T)
#ms<-merge(ms, bio[,c("TAXONNAME","RANK")],by="TAXONNAME", all.x=T)
ms$SZ_LM<-ms$MEAN_SIZE/ms$Lm
tmp<-cast(ms, SPECIES + Lm ~ ISLAND, value="SZ_LM", sum, fill=NA)
head(tmp)
write.csv(tmp,file="tmpMeanSizeWORKINGS.csv")
ave.ms<-aggregate(ms$SZ_LM, by=ms[,c(SPATIAL_BASE)],FUN=mean)
write.csv(ave.ms, file="AggMeanSizeAsPropofLM.csv")
ave.ms3<-ave.ms
ave.ms3[,"x"]<-ave.ms3[,"x"]^3
write.csv(ave.ms3, file="RCAggMeanSizeAsPropofLMCubed.csv")
##INSTANANEOUS BIOMASS####################################################################################################
wd<-WD_SAVE
## CALCULATE INSTANTANEOUS BIOMASS MINUS SHARKS AND JACKS
wd[!wd$OBS_TYPE %in% c("I"),]$COUNT<-0
SHARKS_JACKS<-c("Carangidae", "Carcharhinidae", "Ginglymostomatidae", "Sphyrnidae")
wd[wd$FAMILY %in% SHARKS_JACKS,]$COUNT<-0
r1<-Calc_Site_Bio(wd, "TROPHIC_MONREP"); tmp.cols<-dimnames(r1)[[2]][3:dim(r1)[2]]
r1$TotINSTFishNoSJ<-rowSums(r1[,tmp.cols])
wd_rich<-WD_SAVE
wd_rich[!wd_rich$OBS_TYPE %in% c("U", "I", "N") ,]$COUNT<-0
#remove gobies and blennies and sp. species 9assuming they are mostly juveniles of species that are already counted)
#r2X<-Modified_Site_Species_Richness(wd_rich)
wd_rich[wd_rich$FAMILY %in% c("Blenniidae", "Gobiidae"),]$COUNT<-0
unique(wd_rich$RANK)
#wd_rich[!wd_rich$RANK %in% c("Species", "Subspecies"),]$COUNT<-0 # not doing this for us, as we have lots of species that are clearly unique, just not identified to species level, so sp. data are probably not species taht are otherwise present in the cylinder
r2<-Modified_Site_Species_Richness(wd_rich)
wsd<-merge(surveys, r1, by=UNIQUE_SURVEY)
wsd<-merge(wsd, r2, by=UNIQUE_SURVEY)
data.cols<-c(tmp.cols, "TotINSTFishNoSJ", "SPECIESRICHNESS", SURVEY_SITE_DATA)
write.csv(wsd, file="tmp NCRMP working site data.csv")
####################################################################################################################################################################
#
# POOL UP
#
####################################################################################################################################################################
## Nearly always DOING THIS ONLY WITH nSPC data ####
wsd<-subset(wsd, wsd$METHOD=="nSPC")
wsd<-droplevels(wsd)
## check which ISLANDS differ between sectors and working data..
setdiff(unique(sectors$SEC_NAME), unique(wsd$SEC_NAME))
setdiff(unique(wsd$ANALYSIS_SEC), unique(sectors$SEC_NAME))
setdiff(unique(wsd$SEC_NAME), unique(sectors$SEC_NAME))
#FOREREEF ONLY AND GREATER THAN 2012
wsd<-wsd[wsd$REEF_ZONE %in% c("Forereef", "Protected Slope"),]
#wsd<-wsd[wsd$OBS_YEAR>2012,]
head(wsd)
# DETERMINE THE BASIC STRATIFICATION WITHIN SECTORS - DEFAULT IS REEF_ZONE AND DEPTH_BIN, BUT THIS CODE ALLOWS PSSIBILITY OF CHOOSING ANOTHER
sectors$ANALYSIS_STRATA<-paste(sectors$REEF_ZONE, sectors$DEPTH_BIN, sep='')
#generate table to be able to relate ANALYSIS_SEC to REP_CARD_UNIT (as we have one-off reporting units in this case)
#rcu<-aggregate(wsd$METHOD, by=wsd[,c("ISLAND", "REP_CARD_UNIT", "ANALYSIS_SEC")], FUN=length)
#.... Make Sarigan-Guguan-Alamagan be a single 'ISLAND'
# there MUST already be appropraite records in the sectors table for the new 'ISLAND' name, in this case will be "AGS"
levels(wsd$ISLAND)<-c(levels(wsd$ISLAND), "AGS")
wsd[wsd$ISLAND %in% c("Sarigan", "Guguan", "Alamagan"),"ISLAND"]<-"AGS"
sectors[sectors$ISLAND %in% c("Sarigan", "Guguan", "Alamagan"),"ISLAND"]<-"AGS"
wsd<-droplevels(wsd)
WSD_SAVED<-wsd
SECTORS_SAVED<-sectors
SPATIAL_POOLING_BASE<-c("REGION","ISLAND", "ANALYSIS_SEC", "ANALYSIS_STRATA", "REEF_ZONE")
POOLING_LEVEL<-c(SPATIAL_POOLING_BASE)
CURRENT_SCHEMES<-c("RAMP_BASIC", "MARI2014", "AS_SANCTUARY") #IGNORING YEAR = SO GOING TO THE FINEST SCALE IN EACH REGION
for(i in 1:length(CURRENT_SCHEMES)){
wsd<-WSD_SAVED
sectors<-SECTORS_SAVED
wsd$ANALYSIS_SEC<-as.character(wsd$SEC_NAME)
CURRENT_SCHEME<-CURRENT_SCHEMES[i]
sectors$ANALYSIS_SEC<-sectors[,CURRENT_SCHEME]
# DETERMINE THE BASIC STRATIFICATION WITHIN SECTORS - DEFAULT IS REEF_ZONE AND DEPTH_BIN, BUT THIS CODE ALLOWS PSSIBILITY OF CHOOSING ANOTHER
sectors$ANALYSIS_STRATA<-paste(sectors$REEF_ZONE, sectors$DEPTH_BIN, sep='')
#now deal with those missing sectors - either rename ANALYSIS_SEC OR remove
if(CURRENT_SCHEME=="RAMP_BASIC") {
# wsd[wsd$ANALYSIS_SEC %in% c("PATI_PT_MPA", "ACHANG_MPA", "TUMON_BAY_MPA", "PITI_BOMB_MPA", "GUAM_MP_MINUS_ACHANG"),]$ANALYSIS_SEC<-"GUAM_MP"
# #in this case removing 2014 ACHANG_MPA sites (The shorebased ones) and changing ANALYSIS_SEC for all other GUAM MPA sectors to the RAMP base one "GUAM_MP", also remove SAMOA 2015 sites, they will run in AS_SANCTUARY 2015 and Tutuila 2010&012
# wsd<-wsd[!(wsd$ANALYSIS_SEC == "ACHANG_MPA" & wsd$ANALYSIS_YEAR==2014),]
wsd<-wsd[wsd$ISLAND != "Guam",]
wsd<-wsd[!wsd$REGION == "SAMOA",]
}
if(CURRENT_SCHEME=="MARI2014") {
wsd[wsd$ANALYSIS_SEC %in% c("PATI_PT_MPA", "TUMON_BAY_MPA", "PITI_BOMB_MPA"),]$ANALYSIS_SEC<-"GUAM_MP_MINUS_ACHANG"
wsd<-wsd[wsd$ISLAND %in% "Guam",]
}
if(CURRENT_SCHEME=="AS_SANCTUARY") {
wsd<-wsd[wsd$REGION == "SAMOA",]
wsd[wsd$ISLAND=="Tau",]$ANALYSIS_SEC<-"Tau" # Pooling Tau together HERE - I Think that there is no difference in management (its not actually closed) IF Tau IS IMPORTANT CHECK CHECK CHECK
} #in this case remove everything that isnt SAMOA surveyed in 2015
##DETERMINE WHICH SITES HAVE ANALYSIS STRATA THAT ARE NOT IN THIS
analysis_secs<-unique(wsd$ANALYSIS_SEC)
missing_secs<-unique(analysis_secs[!analysis_secs %in% unique(sectors$ANALYSIS_SEC)])
if(length(missing_secs)>0) {
cat("ANALYSIS SECTORS missing from this scheme:", missing_secs)
}
tmp<-aggregate(wsd[,"TotINSTFishNoSJ"],by=wsd[,c("REGION", "ISLAND", "ANALYSIS_SEC")], sum, na.rm=FALSE)
tmp[tmp$ANALYSIS_SEC %in% missing_secs,]
### CHECK REPLICATION WITHIN STRATA
tmp<-aggregate(wsd[,"METHOD"], by=wsd[,c(POOLING_LEVEL ,"SITE")], length)
tmp<-aggregate(tmp[,"x"], by=tmp[,c(POOLING_LEVEL)], length)
tmp<-merge(sectors, tmp[,c("ANALYSIS_SEC", "ANALYSIS_STRATA","x")],by=c("ANALYSIS_SEC", "ANALYSIS_STRATA"),all.y=TRUE)
names(tmp)[names(tmp)=="x"]<-"n_sites"
a<-cast(tmp, REGION + ISLAND + ANALYSIS_SEC ~ ANALYSIS_STRATA, value="n_sites", sum, fill=NA)
a
#clean up the sectors table so pool all sub sectors within a scheme into a total for this scheme's sectors
sectors<-aggregate(sectors[,"AREA_HA"], by=sectors[,c(SPATIAL_POOLING_BASE)], sum)
names(sectors)[names(sectors)=="x"]<-"AREA_HA"
#################################################################################################################################
############################################# NOW DO THE CALCAULTION OF WINHIN-STRATA AND POOLED UP DATA VALUES #################
#################################################################################################################################
ADDITIONAL_POOLING_BY<-c("METHOD") # additional fields that we want to break data at, but which do not relate to physical areas (eg survey year or method)
#generate within strata means and vars
POOLING_LEVEL<-c(SPATIAL_POOLING_BASE, ADDITIONAL_POOLING_BY)
data.per.strata<-Calc_PerStrata(wsd, data.cols, POOLING_LEVEL)
write.csv(data.per.strata,file=paste(CURRENT_SCHEME, "tmp strata data.csv", sep=""))
#save(data.per.strata, file=paste(CURRENT_SCHEME, "strata_data.rdata", sep=""))
###### REMOVE STRATA with N=1 (cannot pool those up)
data.per.strata$Mean<-data.per.strata$Mean[data.per.strata$Mean$N>1,]
data.per.strata$SampleVar<-data.per.strata$SampleVar[data.per.strata$SampleVar$N>1,]
data.per.strata$SampleSE<-data.per.strata$SampleSE[data.per.strata$SampleSE$N>1,]
# e.g. SAVE BY ISLAND AND REEF ZONE PER YEAR
AGGREGATION_LEVEL<-c("REGION","ISLAND","ANALYSIS_SEC") # Spatial Level to agggregate output data to (eg per REGION or per (REGION, ISLAND) etc...
dp<-Calc_Pooled(data.per.strata$Mean, data.per.strata$SampleVar, data.cols, AGGREGATION_LEVEL, ADDITIONAL_POOLING_BY, SPATIAL_POOLING_BASE, sectors)
write.csv(dp,file=paste(CURRENT_SCHEME, "data_pooled_SEC.csv", sep=""))
save(dp, file=paste(CURRENT_SCHEME, "data_pooled_SEC.rdata", sep=""))
# e.g. SAVE BY ISLAND PER YEAR
AGGREGATION_LEVEL<-c("REGION","ISLAND") # Spatial Level to agggregate output data to (eg per REGION or per (REGION, ISLAND) etc...
dp<-Calc_Pooled(data.per.strata$Mean, data.per.strata$SampleVar, data.cols, AGGREGATION_LEVEL, ADDITIONAL_POOLING_BY, SPATIAL_POOLING_BASE, sectors)
write.csv(dp,file=paste(CURRENT_SCHEME, "data_pooled_is.csv", sep=""))
save(dp, file=paste(CURRENT_SCHEME, "data_pooled_is.rdata", sep=""))
}
#LOAD THE data per SCHEME
load("RAMP_BASICdata_pooled_is.rdata")
x<-dp
load("MARI2014data_pooled_is.rdata")
g<-dp
load("AS_SANCTUARYdata_pooled_is.rdata")
as<-dp
X<-x$Mean
G<-g$Mean
AS<-as$Mean
Mean<-rbind(X, G, AS)
X<-x$PooledSE
G<-g$PooledSE
AS<-as$PooledSE
PooledSE<-rbind(X, G, AS)
dp<-list(Mean, PooledSE)
names(dp)<-list("Mean", "PooledSE")
write.csv(dp, file="CREP2010_16 RC Data.csv")
save(dp, file="CREP2010_16 RC Data.RData")
#################################################################### TOW WORKUP ###########################################################################
# IDW - NEED TO ADD IN SOME CHECKING OF THE MISSING DEPTHS ... AND MAYBE FILTERING AND SO ON ... CAN LIFT CODE FROM REPORT CARD I THINK
##save(df, file="ALL_TOW_FISH_RAW.rdata")
#write.table(df, file = "ALL_TOW_FISH_RAW.csv", sep = ",", col.names = NA, qmethod = "double")
load(file="/Users/ivor.williams/Documents/CRED/Fish Team/Base R/Base Data Files/ALL_TOW_FISH_RAW.rdata")
# FISH TOW WORKINGS -------------------------------------------------------
wtd<-df
#wtd<-subset(wtd, wtd$REGION=="SAMOA", drop=TRUE)
wtd<-subset(wtd, wtd$OBS_YEAR > 2009, drop=TRUE)
wtd<-droplevels(wtd)
wtd$biomass_g<-wtd$LW_A*wtd$COUNT*((wtd$SIZE*wtd$LENGTH_CONVERSION_FACTOR)^wtd$LW_B)
## drop any rows which have NOSC and MISS in the species field, these are tows which were aborted part way through
## remove these so that the tow length is corrected....
nosc<-which(wtd$SPECIES == "NOSC")
wtd<-wtd[-nosc,]
miss<-which(wtd$SPECIES == "MISS")
wtd<-wtd[-nosc,]
#wtd<-merge(wtd, tow_ns[,c("DIVEID", "RCU")], by="DIVEID", all.x=T)
length(unique(wtd$DIVEID))
wtd[is.na(wtd$COUNT),]$COUNT<-0
wtd[is.na(wtd$DEPTH),]$DEPTH<-0
wtd[is.na(wtd$SIZE_),]$SIZE_<-0
wtd[is.na(wtd$CENTROIDLAT),]$CENTROIDLAT<-0
wtd[is.na(wtd$CENTROIDLON),]$CENTROIDLON<-0
tmp.lev<-levels(wtd$REEF_ZONE); head(tmp.lev)
levels(wtd$REEF_ZONE)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(wtd$FAMILY); head(tmp.lev)
levels(wtd$FAMILY)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(wtd$TAXONNAME); head(tmp.lev)
levels(wtd$TAXONNAME)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(wtd$TROPHIC_MONREP); head(tmp.lev)
levels(wtd$TROPHIC_MONREP)<-c(tmp.lev, "UNKNOWN")
wtd[is.na(wtd$REEF_ZONE),"REEF_ZONE"]<-"UNKNOWN"
wtd[is.na(wtd$TAXONNAME),"TAXONNAME"]<-"UNKNOWN"
wtd[is.na(wtd$FAMILY),"FAMILY"]<-"UNKNOWN"
wtd[is.na(wtd$TROPHIC_MONREP),"TROPHIC_MONREP"]<-"UNKNOWN"
wtd$REP_CARD_UNIT<-wtd$ISLAND
#levels(wtd$REP_CARD_UNIT)<-c(levels(wtd$REP_CARD_UNIT), "TUT_N", "TUT_S")
#wtd[wtd$ISLAND=="Tutuila",]$REP_CARD_UNIT<-wtd[wtd$ISLAND=="Tutuila",]$RCU
#summarize tow information (length, depth, lat-long, date)
# first do that by segment
TOW_DATA<-c("REGION", "ISLAND", "REP_CARD_UNIT", "CENTROIDLAT", "CENTROIDLON", "DATE_", "DEPTH", "STARTLOCALTIME", "STRATA", "PROJECTEDLENGTH", "DIVEID")
SEGMENT_ID2<-c( "DIVEID", "SEGMENT")
SEGMENT_INFO<-c("REGION", "ISLAND", "REP_CARD_UNIT", "DATE_", "OBS_YEAR")
SEGMENT_INFO_TO_SUM<-c("PROJECTEDLENGTH")
SEGMENT_INFO_TO_AVE<-c("CENTROIDLAT", "CENTROIDLON", "DEPTH")
SEGMENT_INFO_TO_MIN<-c("STARTLOCALTIME")
SEGMENT_INFO_TO_MODE<-c("REEF_ZONE")
SEGMENT_FIELDS<-c(SEGMENT_INFO, SEGMENT_INFO_TO_SUM, SEGMENT_INFO_TO_AVE, SEGMENT_INFO_TO_MODE, SEGMENT_ID2)
DIVE_INFO<-c("DIVEID", SEGMENT_INFO)
WTD_SAVE<-wtd
MIN_SIZE<-100
#only sharks > 1m
SHARKS<-c("Carcharhinidae", "Ginglymostomatidae")
wtd[is.na(wtd$SIZE_) | !wtd$FAMILY %in% SHARKS,]$SIZE_<-0
wtd[wtd$SIZE_< MIN_SIZE | !wtd$FAMILY %in% SHARKS,]$COUNT<-0
wtd[wtd$SIZE_< MIN_SIZE | !wtd$FAMILY %in% SHARKS,]$biomass_g<-0
levels(wtd$FAMILY)<-c(levels(wtd$FAMILY), "OTHER")
wtd[wtd$SIZE_< MIN_SIZE | !wtd$FAMILY %in% SHARKS,]$FAMILY<-"OTHER"
wtd<-droplevels(wtd)
sum(wtd[!is.na(wtd$COUNT),]$COUNT)
length(unique(wtd$DIVEID))
#clean up the data file ## adel comment: this creates 14 warnings.... ### return to this, extract numeric only columns
##- invalid for factors with NA entries
#wtd[is.na(wtd$COUNT),]$COUNT<-0
#wtd[is.na(wtd$biomass_g),]$biomass_g<-0
#wtd[is.na(wtd$BIOMASS),]$BIOMASS<-0
#wtd[is.na(wtd$BIOMASS_G_M2),]$BIOMASS_G_M2<-0
segment.info<-aggregate(wtd$COUNT, by=wtd[,SEGMENT_FIELDS], sum, na.rm=F)## aggregate sums total count of all fishes per record, using field_list
segment.info<-segment.info[,SEGMENT_FIELDS] # drop the count - was just using that to generate a summary table
length(unique(segment.info$DIVEID))
setdiff(wtd$DIVEID,segment.info$DIVEID)
#sum up to total length etc.. for the dive ID
#set depth, and centroid lat-long field to NaN if zero ...
segment.info[segment.info$DEPTH==0,"DEPTH"]<-NaN
segment.info[segment.info$CENTROIDLAT==0,"CENTROIDLAT"]<-NaN
segment.info[segment.info$CENTROIDLON==0,"CENTROIDLON"]<-NaN
sum.segments<-aggregate(segment.info[,SEGMENT_INFO_TO_SUM],by=segment.info[,DIVE_INFO], sum, na.rm=TRUE);
dimnames(sum.segments)[[2]]<-c(DIVE_INFO, SEGMENT_INFO_TO_SUM)
ave.segments<-aggregate(segment.info[,SEGMENT_INFO_TO_AVE],by=segment.info[,DIVE_INFO], mean, na.rm=TRUE)
med.segments<-aggregate(segment.info[,SEGMENT_INFO_TO_AVE],by=segment.info[,DIVE_INFO], median, na.rm=TRUE)
mode.segments<-aggregate(segment.info[,SEGMENT_INFO_TO_MODE],by=segment.info[,DIVE_INFO], Mode)
dimnames(mode.segments)[[2]]<-c(DIVE_INFO, SEGMENT_INFO_TO_MODE)
tt<-merge(ave.segments, mode.segments[,c("DIVEID",SEGMENT_INFO_TO_MODE)], by="DIVEID")
dive.info<-merge(tt, sum.segments[,c("DIVEID",SEGMENT_INFO_TO_SUM)], by="DIVEID")
dim(dive.info)
write.csv(dive.info, file="tmp Tows.csv")
############################################################
### Now sum abundance and biomass data per species per dive,
### and convert to gm2 and abund m2
############################################################
#Pull all species information into a separate df, for possible later use ..
FISH_SPECIES_FIELDS<-c("SPECIES","FAMILY", "TAXONNAME", "TROPHIC_MONREP")
t.species.table<-aggregate(wtd$COUNT,by=wtd[,FISH_SPECIES_FIELDS], sum, na.rm=FALSE)
sum.abund.bio<-aggregate(wtd[,c("COUNT", "biomass_g")],by=wtd[,c("DIVEID", "FAMILY")], sum, na.rm=TRUE)
dim(sum.abund.bio)
t.fish.data<-merge(sum.abund.bio, dive.info[,c("DIVEID","PROJECTEDLENGTH")], by="DIVEID")
t.fish.data$BIOGM2<-t.fish.data$biomass_g / (10*t.fish.data$PROJECTEDLENGTH)
t.fish.data$ABUN2<-t.fish.data$COUNT / (10*t.fish.data$PROJECTEDLENGTH)
dim(t.fish.data)
## add consumer group to tow data, filter to forereef ONLY, add depth to give option to later filter by depth range .. then pool up by island & year and save SE
# add consumer group (and family, inc ase it is useful later) to t.fish.data
#x.fish.data<-merge(t.fish.data, t.species.table[, FISH_SPECIES_FIELDS], by="SPECIES")
# add data about the tow (island, zone, year, depth)
x.fish.data<-merge(t.fish.data, dive.info[, c("DIVEID", "REGION", "ISLAND", "REP_CARD_UNIT", "REEF_ZONE", "OBS_YEAR", "DEPTH")], by="DIVEID")
dim(x.fish.data)
write.csv(x.fish.data, file="TMPtowData.csv")
#filter out forereef tows only...!!!!!
#x.fish.data<-subset(x.fish.data, x.fish.data$REEF_ZONE=="Forereef", drop=TRUE)
t.fish.data<-(x.fish.data)
xx<-aggregate(t.fish.data$ABUN2, by=t.fish.data[,c("DIVEID", "REGION", "ISLAND", "REP_CARD_UNIT", "OBS_YEAR", "REEF_ZONE", "FAMILY")], sum, na.rm=TRUE)
dimnames(xx)[[2]]<-c("DIVEID", "REGION", "ISLAND", "REP_CARD_UNIT", "YEAR", "STRATA", "FAMILY", "ABUN2")
#now format this more or less as a crosstab, with field of interest as column variable
t.fish.abund<-cast(xx, DIVEID + REGION + ISLAND + REP_CARD_UNIT + YEAR + STRATA ~ FAMILY, value="ABUN2", fill=0)
t.fish.abund$TotSharkAbund<-rowSums(t.fish.abund[,levels(xx$FAMILY)])
SHARK_COLS<-c("Carcharhinidae", "TotSharkAbund")
dim(t.fish.abund)
#aggregate - average per island/strata/year
t.fish.island.mean<-aggregate(t.fish.abund[,SHARK_COLS],by=t.fish.abund[,c("REGION", "ISLAND", "REP_CARD_UNIT", "STRATA", "YEAR")], mean, na.rm=TRUE)
t.fish.island.n<-aggregate(t.fish.abund[,SHARK_COLS],by=t.fish.abund[,c("REGION", "ISLAND", "REP_CARD_UNIT", "STRATA", "YEAR")], length)
t.fish.island.var<-aggregate(t.fish.abund[,SHARK_COLS],by=t.fish.abund[,c("REGION", "ISLAND", "REP_CARD_UNIT", "STRATA", "YEAR")], var, na.rm=TRUE)
t.fish.island.se<-sqrt(t.fish.island.var[,SHARK_COLS])/sqrt(t.fish.island.n$TotSharkAbund)
# add the N to the mean and se dfs before writing them
t.fish.island.mean$n<-t.fish.island.se$n<-t.fish.island.n$TotSharkAbund
write.csv(t.fish.island.mean, file="TMP tow_fish_shark abund100.csv")
write.csv(t.fish.island.se, file="tow_fish_shark abund_se.csv")
###################################################################
# using only 2010 onwards .. pool across any multiple years of surveys .. weighting each year's data equally
######## this is rough - but works for now! #############
island.data<-t.fish.island.mean
island.data<-subset(island.data, island.data$STRATA %in% c("Forereef", "Protected Slope"), drop=TRUE)
island.data<-subset(island.data, island.data$YEAR>2009, drop=TRUE)
island.data<-droplevels(island.data)
idw<-aggregate(island.data[,SHARK_COLS],by=island.data[,c("REGION","ISLAND", "REP_CARD_UNIT")], mean, na.rm=TRUE)
#convert abund in m2 to Ha
idw[,SHARK_COLS]<-idw[,SHARK_COLS]*10000
write.csv(idw, file="RCtow fish 2010on forereef equallyweighted.csv")
## GENERATE COUNTS PER REP FROM THE BASE WORKING DATA ####################################################################################################
wd<-WD_SAVE
## CALCULATE INSTANTANEOUS BIOMASS MINUS SHARKS AND JACKS
wd[!wd$OBS_TYPE %in% c("I", "U", "N"),]$COUNT<-0
wd<-droplevels(wd)
tmp<-cast(wd, OBS_YEAR + ISLAND + REP_CARD_UNIT + ANALYSIS_STRATA + LATITUDE + LONGITUDE + SITE + REP + REPLICATEID + DIVER ~ SPECIES, value="COUNT", sum, fill=0); head(tmp)
write.csv(tmp, file="tmp AS Counts data.csv")
|
/munge_RepCard/W17 RepCard Fish Data.R
|
no_license
|
kaylynmccoy/fish-paste
|
R
| false
| false
| 31,942
|
r
|
rm(list=ls())
setwd("/Users/ivor.williams/Documents/CRED/CRCP/NCRMP/Report Card Workshop/Fish Indicators 2017")
# SET UP ------------------------------------------------------------------
library(gdata) # needed for drop_levels()
library(reshape) # reshape library inclues the cast() function used below
source("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/lib/fish_team_functions.R")
source("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/lib/Islandwide Mean&Variance Functions.R")
Modified_Site_Species_Richness<-function(x){
# Modification fos tandard Calc_Site_Species_Richness to not count species with zero counts (as they can be left in data file to ensure that the site has data records at all)
y<-aggregate(x$COUNT,by=x[,c("SITEVISITID", "METHOD", "REP", "SPECIES")], sum) #convert to count per species per rep
y[y$x>1,]$x<-1 #convert any non-zero count to 1, so we can sum those to get total number of species with count>0
z<-aggregate(y$x,by=y[,c("SITEVISITID", "METHOD", "REP")], sum) # count number of species with non-zero counts this REP
xx<-aggregate(z$x,by=z[,c("SITEVISITID", "METHOD")], mean) # count number of entries per rep
dimnames(xx)[[2]]<-c("SITEVISITID", "METHOD", "SPECIESRICHNESS")
return(xx)
} # end Modified_Site_Species_Richness
sm<-read.csv("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/data/SITE MASTER2016.csv")
sm$SITE<-SiteNumLeadingZeros(sm$SITE)
sectors<-read.csv("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/data/Sectors-Strata-Areas2016.csv", stringsAsFactors=FALSE)
# FISH REA WORKINGS ----------------------------------------------------------------
load("/Users/ivor.williams/Documents/CRED/Fish Team/FishPaste/fish-paste/data/ALL_REA_FISH_RAW.rdata")
x<-df
# HOUSEKEEPING ------------------------------------------------------------
# clean up the data to only fields we currently use
DATA_COLS<-c("SITEVISITID", "METHOD", "DATE_", "OBS_YEAR", "SITE", "REEF_ZONE", "DEPTH_BIN", "ISLAND", "LATITUDE", "LONGITUDE", "REGION" , "REGION_NAME", "SECTOR", "SPECIAL_AREA", "EXCLUDE_FLAG",
"REP", "REPLICATEID", "DIVER", "HABITAT_CODE", "DEPTH",
"HARD_CORAL", "MA", "TA", "CCA", "SAND", "SOFT_CORAL", "CLAM" , "SPONGE", "CORALLIMORPH", "CYANO", "TUNICATE", "ZOANTHID" , "OTHER", "COMPLEXITY", "TRAINING_YN", "VISIBILITY",
"SPECIES", "COUNT", "SIZE_", "OBS_TYPE",
"SUBSTRATE_HEIGHT_0", "SUBSTRATE_HEIGHT_20", "SUBSTRATE_HEIGHT_50", "SUBSTRATE_HEIGHT_100", "SUBSTRATE_HEIGHT_150", "MAX_HEIGHT",
"RANK", "SCIENTIFIC_NAME", "TAXONNAME", "COMMONNAME", "GENUS", "FAMILY", "COMMONFAMILYALL", "LMAX", "LW_A", "LW_B", "LENGTH_CONVERSION_FACTOR", "TROPHIC", "TROPHIC_MONREP")
head(x[,DATA_COLS])
x<-x[,DATA_COLS]
# by default, remove sites with EXCLUDE_FLAG set to TRUE
x[is.na(x$TRAINING_YN),]$TRAINING_YN<-FALSE # Training flag of NA is equivalent to a FALSE .. as none of the odler data was 'training data'
x<-subset(x, x$TRAINING_YN==FALSE)
x<-subset(x, x$EXCLUDE_FLAG==0, drop=TRUE)
x<-subset(x, x$OBS_TYPE %in% c("U","I","N", "F","T"))
#x<-subset(x, x$REGION=="SAMOA")
x<-subset(x, x$REGION != "CT")
x<-subset(x, x$METHOD %in% c("nSPC"))
x<-subset(x, x$OBS_YEAR >2009)
x<-subset(x, x$REEF_ZONE %in% c("Forereef", "Protected Slope"))
x<-subset(x, x$ISLAND!="South Bank")
#x<-subset(x, x$OBS_YEAR != 2016)
x$SITE<-SiteNumLeadingZeros(x$SITE)
x<-droplevels(x)
#add SEC_NAME to x
# this would be better if SECTOR field in database was up to date properly .. rather than merge with the site_Sectors spreadsheet
x<-merge(x, sm[,c("SITE", "SEC_NAME", "ANALYSIS_SEC", "ANALYSIS_STRATA", "ANALYSIS_SCHEME")], by="SITE", all.x=TRUE)
#for ones that are missing SEC_NAME, set it to ISLAND
no_secs<-is.na(x$SEC_NAME)
tmp<-as.character(x$SEC_NAME)
tmp[no_secs]<-as.character(x[no_secs,]$ISLAND)
x$SEC_NAME<-tmp
table(x$SEC_NAME)
############################################################################################
# remove the component SUBSTRATE_HEIGHT fields
sh_out<-CalcMeanSHMeanSHDiff(x)
x$MEAN_SH<-sh_out[[1]]
x$MEAN_SH_DIFF<-sh_out[[2]]
x<-x[, setdiff(names(x),c("SUBSTRATE_HEIGHT_0", "SUBSTRATE_HEIGHT_20", "SUBSTRATE_HEIGHT_50", "SUBSTRATE_HEIGHT_100", "SUBSTRATE_HEIGHT_150"))]
############################################################################################
x<-droplevels(x)
#######################
## CLEAN UP NAs #######
#######################
x[is.na(x$LMAX),]$SPECIES
x[is.na(x$LMAX) & x$SPECIES=="GYMI",]$LMAX<-45
x[is.na(x$TROPHIC_MONREP) & x$SPECIES=="ABNO",]$TROPHIC_MONREP<-"PLANKTIVORE"
tmp.lev<-levels(x$HABITAT_CODE); head(tmp.lev)
levels(x$HABITAT_CODE)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$SCIENTIFIC_NAME); head(tmp.lev)
levels(x$SCIENTIFIC_NAME)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$COMMONNAME); head(tmp.lev)
levels(x$COMMONNAME)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$GENUS); head(tmp.lev)
levels(x$GENUS)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$FAMILY); head(tmp.lev)
levels(x$FAMILY)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$COMMONFAMILYALL); head(tmp.lev)
levels(x$COMMONFAMILYALL)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$TROPHIC_MONREP); head(tmp.lev)
levels(x$TROPHIC_MONREP)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(x$RANK); head(tmp.lev)
levels(x$RANK)<-c(tmp.lev, "UNKNOWN")
x[is.na(x$HABITAT_CODE),"HABITAT_CODE"]<-"UNKNOWN"
x[is.na(x$SCIENTIFIC_NAME),"SCIENTIFIC_NAME"]<-"UNKNOWN"
x[is.na(x$COMMONNAME),"COMMONNAME"]<-"UNKNOWN"
x[is.na(x$GENUS),"GENUS"]<-"UNKNOWN"
x[is.na(x$FAMILY),"FAMILY"]<-"UNKNOWN"
x[is.na(x$COMMONFAMILYALL),"COMMONFAMILYALL"]<-"UNKNOWN"
x[is.na(x$TROPHIC_MONREP),"TROPHIC_MONREP"]<-"UNKNOWN"
x[is.na(x$RANK),"RANK"]<-"UNKNOWN"
x[is.na(x$COUNT),]$COUNT<-0
x[is.na(x$SIZE_),]$SIZE_<-0
#fixing unknown lat/long from a sites survyeted by Val Brown in Guam in 2015. These values are probably close
# .. putting this in here so that we do not have NAs in the LAT and LONG .. but we do nto want to save these to the actual master data file
x[x$SITE=="GUA-01310",]$LATITUDE<-13.24173
x[x$SITE=="GUA-01310",]$LONGITUDE<-144.70428
wd<-droplevels(x)
wd$ANALYSIS_YEAR<-wd$OBS_YEAR
wd$ANALYSIS_STRATA<-paste(wd$REEF_ZONE, wd$DEPTH_BIN, sep="")
#island_table<-Aggregate_InputTable(wd, c("REGION","ISLAND"))
OTHER_BENTHIC<-c("CLAM", "CORALLIMORPH", "ZOANTHID", "TUNICATE", "SPONGE", "OTHER", "CYANO", "TA")
wd$OTHER_BENTHIC<-rowSums(wd[,OTHER_BENTHIC],na.rm=T)
SURVEY_SITE_DATA<-c("DEPTH", "HARD_CORAL", "SAND", "MA", "CCA", "MEAN_SH")
#NOTE THAT BENTHOS DOES NOT ALWAYS SUM TO 100% .. I THINK BECAUSE OF ERRORS IN THE ORIGINAL DATA ENTERED INTO THE DATABASE. NEW CODE BELOW IS AN ATTEMPT TO FIX THAT
# Go through all surveys checking for situation where some reps have NAs in a particular BENTHIC_FIELDS, but other records have non-zeros - in that situation, we were recording a field but one other diver left it balnk - those should be zeros not NAs
# this is something that should really be fixed in the database rather than here (as its an error at time of data entry)
#### BELOW code does the job, but should be cleaned up and put in a function
### IDW- COULD GREATLY SPEED THIS UP BY DOING IT FOR A REGION AND YEAR .. AND LIMIT TO ONLY nSPC
# i.e make the checking for some data and some NAs at the levels of a full survey round .. and also use indixes into the wd structure, rather than create temp dfs (tmp_data)
BENTHIC_FIELDS<-c("HARD_CORAL", "MA", "TA", "CYANO", "CCA", "SAND", "OTHER")
UNIQUE_ROUND<-c("REGION", "OBS_YEAR", "METHOD")
round_table<-Aggregate_InputTable(wd, UNIQUE_ROUND)
wd$countBD<-apply(wd[,BENTHIC_FIELDS], 1, function(xx) length(which(!is.na(xx)))) #IDW 10-22-2013 checking for situation where there is NO benthic data at all
for(i in 1:dim(round_table)[1])
{
if(round_table[i,"METHOD"]=="nSPC")
{
tmp_data<-wd[wd$OBS_YEAR==round_table[i,"OBS_YEAR"] & wd$METHOD==round_table[i,"METHOD"] & wd$REGION==round_table[i,"REGION"],]
#go through BENTHIC_FIELDS, checking whether there are some NAs and some data values
for(j in 1:length(BENTHIC_FIELDS))
{
## IF there are both non NAs and NAs
if(length(tmp_data[!is.na(tmp_data[,BENTHIC_FIELDS[j]]),BENTHIC_FIELDS[j]]) > 0
& length(tmp_data[is.na(tmp_data[,BENTHIC_FIELDS[j]]),BENTHIC_FIELDS[j]]) > 0)
{
#set all NAs of that field to 0
tmp_data[is.na(tmp_data[,BENTHIC_FIELDS[j]]),BENTHIC_FIELDS[j]]<-0
#now rewrite the benthic fields with NAs converted to zeros
wd[wd$OBS_YEAR==round_table[i,"OBS_YEAR"] & wd$METHOD==round_table[i,"METHOD"] & wd$REGION==round_table[i,"REGION"],BENTHIC_FIELDS[j]]<-tmp_data[,BENTHIC_FIELDS[j]]
}
}
}
}
# now reset zeros to NAs for all records where there was NO benthic data at all
wd[wd$countBD==0,BENTHIC_FIELDS]<-NA
#generate Lm values
#Fish Species Table
# RAYFINNED FISHES: log10(Lm) = -0.1189 + 0.9157 * log10(Lmax) (Binholand and Froese J Appl Ichthyology 2009)
# ELASMOBRANCHES: log10(Lm) = -0.1246 + 0.9924 * log10(Lmax)
# [Carcharhinidae, Dasyatidae, Ginglymostomatidae, Myliobatidae]
#######
wd$Lm<-10^(-0.1189+(0.9157*log10(wd$LMAX)))
ELASMO<-c("Carcharhinidae", "Dasyatidae", "Ginglymostomatidae", "Myliobatidae")
wd[wd$FAMILY %in% ELASMO,]$Lm<-10^(-0.1246+(0.9924*log10(wd[wd$FAMILY %in% ELASMO,]$LMAX)))
wd<-droplevels(wd)
WD_SAVE<-wd
# WORKING WITH POOLING READY DATA FROM HERE ON -------------------------------------
#base information about the survey - field names should match those in input file (obviously!)
UNIQUE_SURVEY<-c("SITEVISITID","METHOD")
UNIQUE_REP<-c(UNIQUE_SURVEY, "REP")
UNIQUE_COUNT<-c(UNIQUE_REP, "REPLICATEID")
#get base survey info, calculate average depth+complexity+so on
SURVEY_INFO<-c("OBS_YEAR", "REGION", "REGION_NAME", "ISLAND", "SITE", "DATE_", "REEF_ZONE", "DEPTH_BIN", "LATITUDE", "LONGITUDE", "SEC_NAME", "ANALYSIS_SEC", "ANALYSIS_YEAR", "ANALYSIS_STRATA", "EXCLUDE_FLAG", "SITEVISITID", "METHOD")
survey_table<-Aggregate_InputTable(wd, SURVEY_INFO)
#write.csv(survey_table, file="tmpSamoaSites.csv")
survey_est_benthos<-Calc_Site_nSurveysArea(wd, UNIQUE_SURVEY, UNIQUE_REP, UNIQUE_COUNT, SURVEY_SITE_DATA) #Calc_Site_nSurveysArea deals better with situations where one REP has benthic data and other doesnt.
surveys<-merge(survey_table, survey_est_benthos, by=UNIQUE_SURVEY)
write.csv(surveys, file="tmpSurveys2010_16.csv")
#Pull all species information into a separate df, for later use ..
FISH_SPECIES_FIELDS<-c("SPECIES","TAXONNAME", "FAMILY", "COMMONFAMILYALL", "TROPHIC", "TROPHIC_MONREP", "LW_A", "LW_B", "LMAX", "LENGTH_CONVERSION_FACTOR", "Lm")
species_table<-Aggregate_InputTable(wd, FISH_SPECIES_FIELDS)
write.csv(species_table, file="tmpSpeciesTable.csv")
# GENERATE SUMMARY METRICS --------------------------------------------------
## MEAN SIZE OF TARGET SPECIES # MUST BE 15cm OR LARGER AND AT LEAST 40% of LMAX ##########################################
wd<-WD_SAVE
MINSIZE_PROP_CUT_OFF<-0.3
BIO_RANK_CUT_OFF<-25
MIN_TL<-15
SPATIAL_BASE<-c("REGION","ISLAND")
#head(bio)
TARGET_FAMILIES<-c("Acanthuridae", "Mullidae", "Scaridae", "Holocentridae") #Siganidae? Priacanthidae? Carangidae? Serranidae? Lutjanidae? Leethrinidae?
#TARGET_FAMILIES<-c("Scaridae") #FOR PURPOSES OF GENERATING MANAGEABLE SPECIES FOR DEMONSTRATION
tgt_species<-unique(wd[wd$FAMILY %in% TARGET_FAMILIES & wd$LMAX > 30,]$SPECIES) # Perhaps ALSO use species that are only present at more than X% of sites in The region? BEST TO HAVE have defined list for each region
#LIMIT THIS TO TOP SPECIES .. drop records of species not in top ranked speices from bio sampling data
#bio<-bio[!bio$RANK>BIO_RANK_CUT_OFF,]
wd[!wd$SPECIES %in% tgt_species,]$COUNT<-0
#remove fishes that are very small (do not want to penalize location for having large recruitment!, also recruitment variability will add noise)
wd[wd$SIZE_ < wd$LMAX*MINSIZE_PROP_CUT_OFF,]$COUNT<-0
wd<-wd[wd$COUNT>0,]
wd<-droplevels(wd)
#Calculate portion of fishes at a location that are 'mature' - JUST FOR INTEREST!
wd$MCOUNT<-wd$COUNT
wd[wd$SIZE_ > wd$Lm,]$MCOUNT<-0
tmp<-aggregate(wd[,c("COUNT","MCOUNT")],by=wd[,c(SPATIAL_BASE,"SPECIES")],FUN=sum)
tmp$PMAT<-round(tmp$MCOUNT/tmp$COUNT,2);tmp
cast(tmp, SPECIES ~ ISLAND, value="PMAT", sum, fill=NA)
#Drop fishes below a min size and Calculate mean size per species and locations of remaining observations
wd[wd$SIZE_ < MIN_TL,]$COUNT<-0
#see how many we have
#tmp<-droplevels(wd[wd$COUNT>0,])
#tmp<-aggregate(tmp$COUNT, by=tmp[,c("SPECIES","TAXONNAME"),],FUN=sum)
#names(tmp)<-c("SPECIES","TAXONNAME","CREDobs")
#tmp<-merge(tmp, bio, by="TAXONNAME",all.y=T); tmp
#compare mean size in obs with mean size in biosamples
#tmp2<-wd[wd$ISLAND=="Tutuila",]
#tmp2$COUNTLEN<-tmp2$COUNT*tmp2$SIZE_
#tmp3<-aggregate(tmp2[,c("COUNT", "COUNTLEN")], by=tmp2[,c("TAXONNAME","SPECIES", "ISLAND")],FUN=sum)
#tmp3<-tmp3[tmp3$COUNT>0,]
#tmp3$MEANSIZE<-tmp3$COUNTLEN/tmp3$COUNT
#tmp3
#tmp4<-merge(tmp3,tmp,by="TAXONNAME")'tmp4
Calc_MeanSize<-function(x, spatial_base=c("ISLAND", "REP_CARD_UNIT", "SEC_NAME", "ANALYSIS_STRATA"), min_obs=1){
base_cols<-c(spatial_base, "SPECIES")
x$CS<-x$COUNT*x$SIZE_
y<-aggregate(x[,c("COUNT", "CS")], by=x[,base_cols],FUN=sum)
y<-y[!y$COUNT<min_obs,]
y$MEAN_SIZE<-y$CS/y$COUNT
return(y[,c(base_cols, "MEAN_SIZE")])
} # end Calc__MeanSize
ms<-Calc_MeanSize(wd[wd$COUNT>0,],spatial_base=SPATIAL_BASE, min_obs=1); head(ms);dim(ms)
ms<-merge(ms, species_table[,c("SPECIES","TAXONNAME","Lm")],by="SPECIES", all.x=T)
#ms<-merge(ms, bio[,c("TAXONNAME","RANK")],by="TAXONNAME", all.x=T)
ms$SZ_LM<-ms$MEAN_SIZE/ms$Lm
tmp<-cast(ms, SPECIES + Lm ~ ISLAND, value="SZ_LM", sum, fill=NA)
head(tmp)
write.csv(tmp,file="tmpMeanSizeWORKINGS.csv")
ave.ms<-aggregate(ms$SZ_LM, by=ms[,c(SPATIAL_BASE)],FUN=mean)
write.csv(ave.ms, file="AggMeanSizeAsPropofLM.csv")
ave.ms3<-ave.ms
ave.ms3[,"x"]<-ave.ms3[,"x"]^3
write.csv(ave.ms3, file="RCAggMeanSizeAsPropofLMCubed.csv")
##INSTANANEOUS BIOMASS####################################################################################################
wd<-WD_SAVE
## CALCULATE INSTANTANEOUS BIOMASS MINUS SHARKS AND JACKS
wd[!wd$OBS_TYPE %in% c("I"),]$COUNT<-0
SHARKS_JACKS<-c("Carangidae", "Carcharhinidae", "Ginglymostomatidae", "Sphyrnidae")
wd[wd$FAMILY %in% SHARKS_JACKS,]$COUNT<-0
r1<-Calc_Site_Bio(wd, "TROPHIC_MONREP"); tmp.cols<-dimnames(r1)[[2]][3:dim(r1)[2]]
r1$TotINSTFishNoSJ<-rowSums(r1[,tmp.cols])
wd_rich<-WD_SAVE
wd_rich[!wd_rich$OBS_TYPE %in% c("U", "I", "N") ,]$COUNT<-0
#remove gobies and blennies and sp. species 9assuming they are mostly juveniles of species that are already counted)
#r2X<-Modified_Site_Species_Richness(wd_rich)
wd_rich[wd_rich$FAMILY %in% c("Blenniidae", "Gobiidae"),]$COUNT<-0
unique(wd_rich$RANK)
#wd_rich[!wd_rich$RANK %in% c("Species", "Subspecies"),]$COUNT<-0 # not doing this for us, as we have lots of species that are clearly unique, just not identified to species level, so sp. data are probably not species taht are otherwise present in the cylinder
r2<-Modified_Site_Species_Richness(wd_rich)
wsd<-merge(surveys, r1, by=UNIQUE_SURVEY)
wsd<-merge(wsd, r2, by=UNIQUE_SURVEY)
data.cols<-c(tmp.cols, "TotINSTFishNoSJ", "SPECIESRICHNESS", SURVEY_SITE_DATA)
write.csv(wsd, file="tmp NCRMP working site data.csv")
####################################################################################################################################################################
#
# POOL UP
#
####################################################################################################################################################################
## Nearly always DOING THIS ONLY WITH nSPC data ####
wsd<-subset(wsd, wsd$METHOD=="nSPC")
wsd<-droplevels(wsd)
## check which ISLANDS differ between sectors and working data..
setdiff(unique(sectors$SEC_NAME), unique(wsd$SEC_NAME))
setdiff(unique(wsd$ANALYSIS_SEC), unique(sectors$SEC_NAME))
setdiff(unique(wsd$SEC_NAME), unique(sectors$SEC_NAME))
#FOREREEF ONLY AND GREATER THAN 2012
wsd<-wsd[wsd$REEF_ZONE %in% c("Forereef", "Protected Slope"),]
#wsd<-wsd[wsd$OBS_YEAR>2012,]
head(wsd)
# DETERMINE THE BASIC STRATIFICATION WITHIN SECTORS - DEFAULT IS REEF_ZONE AND DEPTH_BIN, BUT THIS CODE ALLOWS PSSIBILITY OF CHOOSING ANOTHER
sectors$ANALYSIS_STRATA<-paste(sectors$REEF_ZONE, sectors$DEPTH_BIN, sep='')
#generate table to be able to relate ANALYSIS_SEC to REP_CARD_UNIT (as we have one-off reporting units in this case)
#rcu<-aggregate(wsd$METHOD, by=wsd[,c("ISLAND", "REP_CARD_UNIT", "ANALYSIS_SEC")], FUN=length)
#.... Make Sarigan-Guguan-Alamagan be a single 'ISLAND'
# there MUST already be appropraite records in the sectors table for the new 'ISLAND' name, in this case will be "AGS"
levels(wsd$ISLAND)<-c(levels(wsd$ISLAND), "AGS")
wsd[wsd$ISLAND %in% c("Sarigan", "Guguan", "Alamagan"),"ISLAND"]<-"AGS"
sectors[sectors$ISLAND %in% c("Sarigan", "Guguan", "Alamagan"),"ISLAND"]<-"AGS"
wsd<-droplevels(wsd)
WSD_SAVED<-wsd
SECTORS_SAVED<-sectors
SPATIAL_POOLING_BASE<-c("REGION","ISLAND", "ANALYSIS_SEC", "ANALYSIS_STRATA", "REEF_ZONE")
POOLING_LEVEL<-c(SPATIAL_POOLING_BASE)
CURRENT_SCHEMES<-c("RAMP_BASIC", "MARI2014", "AS_SANCTUARY") #IGNORING YEAR = SO GOING TO THE FINEST SCALE IN EACH REGION
for(i in 1:length(CURRENT_SCHEMES)){
wsd<-WSD_SAVED
sectors<-SECTORS_SAVED
wsd$ANALYSIS_SEC<-as.character(wsd$SEC_NAME)
CURRENT_SCHEME<-CURRENT_SCHEMES[i]
sectors$ANALYSIS_SEC<-sectors[,CURRENT_SCHEME]
# DETERMINE THE BASIC STRATIFICATION WITHIN SECTORS - DEFAULT IS REEF_ZONE AND DEPTH_BIN, BUT THIS CODE ALLOWS PSSIBILITY OF CHOOSING ANOTHER
sectors$ANALYSIS_STRATA<-paste(sectors$REEF_ZONE, sectors$DEPTH_BIN, sep='')
#now deal with those missing sectors - either rename ANALYSIS_SEC OR remove
if(CURRENT_SCHEME=="RAMP_BASIC") {
# wsd[wsd$ANALYSIS_SEC %in% c("PATI_PT_MPA", "ACHANG_MPA", "TUMON_BAY_MPA", "PITI_BOMB_MPA", "GUAM_MP_MINUS_ACHANG"),]$ANALYSIS_SEC<-"GUAM_MP"
# #in this case removing 2014 ACHANG_MPA sites (The shorebased ones) and changing ANALYSIS_SEC for all other GUAM MPA sectors to the RAMP base one "GUAM_MP", also remove SAMOA 2015 sites, they will run in AS_SANCTUARY 2015 and Tutuila 2010&012
# wsd<-wsd[!(wsd$ANALYSIS_SEC == "ACHANG_MPA" & wsd$ANALYSIS_YEAR==2014),]
wsd<-wsd[wsd$ISLAND != "Guam",]
wsd<-wsd[!wsd$REGION == "SAMOA",]
}
if(CURRENT_SCHEME=="MARI2014") {
wsd[wsd$ANALYSIS_SEC %in% c("PATI_PT_MPA", "TUMON_BAY_MPA", "PITI_BOMB_MPA"),]$ANALYSIS_SEC<-"GUAM_MP_MINUS_ACHANG"
wsd<-wsd[wsd$ISLAND %in% "Guam",]
}
if(CURRENT_SCHEME=="AS_SANCTUARY") {
wsd<-wsd[wsd$REGION == "SAMOA",]
wsd[wsd$ISLAND=="Tau",]$ANALYSIS_SEC<-"Tau" # Pooling Tau together HERE - I Think that there is no difference in management (its not actually closed) IF Tau IS IMPORTANT CHECK CHECK CHECK
} #in this case remove everything that isnt SAMOA surveyed in 2015
##DETERMINE WHICH SITES HAVE ANALYSIS STRATA THAT ARE NOT IN THIS
analysis_secs<-unique(wsd$ANALYSIS_SEC)
missing_secs<-unique(analysis_secs[!analysis_secs %in% unique(sectors$ANALYSIS_SEC)])
if(length(missing_secs)>0) {
cat("ANALYSIS SECTORS missing from this scheme:", missing_secs)
}
tmp<-aggregate(wsd[,"TotINSTFishNoSJ"],by=wsd[,c("REGION", "ISLAND", "ANALYSIS_SEC")], sum, na.rm=FALSE)
tmp[tmp$ANALYSIS_SEC %in% missing_secs,]
### CHECK REPLICATION WITHIN STRATA
tmp<-aggregate(wsd[,"METHOD"], by=wsd[,c(POOLING_LEVEL ,"SITE")], length)
tmp<-aggregate(tmp[,"x"], by=tmp[,c(POOLING_LEVEL)], length)
tmp<-merge(sectors, tmp[,c("ANALYSIS_SEC", "ANALYSIS_STRATA","x")],by=c("ANALYSIS_SEC", "ANALYSIS_STRATA"),all.y=TRUE)
names(tmp)[names(tmp)=="x"]<-"n_sites"
a<-cast(tmp, REGION + ISLAND + ANALYSIS_SEC ~ ANALYSIS_STRATA, value="n_sites", sum, fill=NA)
a
#clean up the sectors table so pool all sub sectors within a scheme into a total for this scheme's sectors
sectors<-aggregate(sectors[,"AREA_HA"], by=sectors[,c(SPATIAL_POOLING_BASE)], sum)
names(sectors)[names(sectors)=="x"]<-"AREA_HA"
#################################################################################################################################
############################################# NOW DO THE CALCAULTION OF WINHIN-STRATA AND POOLED UP DATA VALUES #################
#################################################################################################################################
ADDITIONAL_POOLING_BY<-c("METHOD") # additional fields that we want to break data at, but which do not relate to physical areas (eg survey year or method)
#generate within strata means and vars
POOLING_LEVEL<-c(SPATIAL_POOLING_BASE, ADDITIONAL_POOLING_BY)
data.per.strata<-Calc_PerStrata(wsd, data.cols, POOLING_LEVEL)
write.csv(data.per.strata,file=paste(CURRENT_SCHEME, "tmp strata data.csv", sep=""))
#save(data.per.strata, file=paste(CURRENT_SCHEME, "strata_data.rdata", sep=""))
###### REMOVE STRATA with N=1 (cannot pool those up)
data.per.strata$Mean<-data.per.strata$Mean[data.per.strata$Mean$N>1,]
data.per.strata$SampleVar<-data.per.strata$SampleVar[data.per.strata$SampleVar$N>1,]
data.per.strata$SampleSE<-data.per.strata$SampleSE[data.per.strata$SampleSE$N>1,]
# e.g. SAVE BY ISLAND AND REEF ZONE PER YEAR
AGGREGATION_LEVEL<-c("REGION","ISLAND","ANALYSIS_SEC") # Spatial Level to agggregate output data to (eg per REGION or per (REGION, ISLAND) etc...
dp<-Calc_Pooled(data.per.strata$Mean, data.per.strata$SampleVar, data.cols, AGGREGATION_LEVEL, ADDITIONAL_POOLING_BY, SPATIAL_POOLING_BASE, sectors)
write.csv(dp,file=paste(CURRENT_SCHEME, "data_pooled_SEC.csv", sep=""))
save(dp, file=paste(CURRENT_SCHEME, "data_pooled_SEC.rdata", sep=""))
# e.g. SAVE BY ISLAND PER YEAR
AGGREGATION_LEVEL<-c("REGION","ISLAND") # Spatial Level to agggregate output data to (eg per REGION or per (REGION, ISLAND) etc...
dp<-Calc_Pooled(data.per.strata$Mean, data.per.strata$SampleVar, data.cols, AGGREGATION_LEVEL, ADDITIONAL_POOLING_BY, SPATIAL_POOLING_BASE, sectors)
write.csv(dp,file=paste(CURRENT_SCHEME, "data_pooled_is.csv", sep=""))
save(dp, file=paste(CURRENT_SCHEME, "data_pooled_is.rdata", sep=""))
}
#LOAD THE data per SCHEME
load("RAMP_BASICdata_pooled_is.rdata")
x<-dp
load("MARI2014data_pooled_is.rdata")
g<-dp
load("AS_SANCTUARYdata_pooled_is.rdata")
as<-dp
X<-x$Mean
G<-g$Mean
AS<-as$Mean
Mean<-rbind(X, G, AS)
X<-x$PooledSE
G<-g$PooledSE
AS<-as$PooledSE
PooledSE<-rbind(X, G, AS)
dp<-list(Mean, PooledSE)
names(dp)<-list("Mean", "PooledSE")
write.csv(dp, file="CREP2010_16 RC Data.csv")
save(dp, file="CREP2010_16 RC Data.RData")
#################################################################### TOW WORKUP ###########################################################################
# IDW - NEED TO ADD IN SOME CHECKING OF THE MISSING DEPTHS ... AND MAYBE FILTERING AND SO ON ... CAN LIFT CODE FROM REPORT CARD I THINK
##save(df, file="ALL_TOW_FISH_RAW.rdata")
#write.table(df, file = "ALL_TOW_FISH_RAW.csv", sep = ",", col.names = NA, qmethod = "double")
load(file="/Users/ivor.williams/Documents/CRED/Fish Team/Base R/Base Data Files/ALL_TOW_FISH_RAW.rdata")
# FISH TOW WORKINGS -------------------------------------------------------
wtd<-df
#wtd<-subset(wtd, wtd$REGION=="SAMOA", drop=TRUE)
wtd<-subset(wtd, wtd$OBS_YEAR > 2009, drop=TRUE)
wtd<-droplevels(wtd)
wtd$biomass_g<-wtd$LW_A*wtd$COUNT*((wtd$SIZE*wtd$LENGTH_CONVERSION_FACTOR)^wtd$LW_B)
## drop any rows which have NOSC and MISS in the species field, these are tows which were aborted part way through
## remove these so that the tow length is corrected....
nosc<-which(wtd$SPECIES == "NOSC")
wtd<-wtd[-nosc,]
miss<-which(wtd$SPECIES == "MISS")
wtd<-wtd[-nosc,]
#wtd<-merge(wtd, tow_ns[,c("DIVEID", "RCU")], by="DIVEID", all.x=T)
length(unique(wtd$DIVEID))
wtd[is.na(wtd$COUNT),]$COUNT<-0
wtd[is.na(wtd$DEPTH),]$DEPTH<-0
wtd[is.na(wtd$SIZE_),]$SIZE_<-0
wtd[is.na(wtd$CENTROIDLAT),]$CENTROIDLAT<-0
wtd[is.na(wtd$CENTROIDLON),]$CENTROIDLON<-0
tmp.lev<-levels(wtd$REEF_ZONE); head(tmp.lev)
levels(wtd$REEF_ZONE)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(wtd$FAMILY); head(tmp.lev)
levels(wtd$FAMILY)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(wtd$TAXONNAME); head(tmp.lev)
levels(wtd$TAXONNAME)<-c(tmp.lev, "UNKNOWN")
tmp.lev<-levels(wtd$TROPHIC_MONREP); head(tmp.lev)
levels(wtd$TROPHIC_MONREP)<-c(tmp.lev, "UNKNOWN")
wtd[is.na(wtd$REEF_ZONE),"REEF_ZONE"]<-"UNKNOWN"
wtd[is.na(wtd$TAXONNAME),"TAXONNAME"]<-"UNKNOWN"
wtd[is.na(wtd$FAMILY),"FAMILY"]<-"UNKNOWN"
wtd[is.na(wtd$TROPHIC_MONREP),"TROPHIC_MONREP"]<-"UNKNOWN"
wtd$REP_CARD_UNIT<-wtd$ISLAND
#levels(wtd$REP_CARD_UNIT)<-c(levels(wtd$REP_CARD_UNIT), "TUT_N", "TUT_S")
#wtd[wtd$ISLAND=="Tutuila",]$REP_CARD_UNIT<-wtd[wtd$ISLAND=="Tutuila",]$RCU
#summarize tow information (length, depth, lat-long, date)
# first do that by segment
TOW_DATA<-c("REGION", "ISLAND", "REP_CARD_UNIT", "CENTROIDLAT", "CENTROIDLON", "DATE_", "DEPTH", "STARTLOCALTIME", "STRATA", "PROJECTEDLENGTH", "DIVEID")
SEGMENT_ID2<-c( "DIVEID", "SEGMENT")
SEGMENT_INFO<-c("REGION", "ISLAND", "REP_CARD_UNIT", "DATE_", "OBS_YEAR")
SEGMENT_INFO_TO_SUM<-c("PROJECTEDLENGTH")
SEGMENT_INFO_TO_AVE<-c("CENTROIDLAT", "CENTROIDLON", "DEPTH")
SEGMENT_INFO_TO_MIN<-c("STARTLOCALTIME")
SEGMENT_INFO_TO_MODE<-c("REEF_ZONE")
SEGMENT_FIELDS<-c(SEGMENT_INFO, SEGMENT_INFO_TO_SUM, SEGMENT_INFO_TO_AVE, SEGMENT_INFO_TO_MODE, SEGMENT_ID2)
DIVE_INFO<-c("DIVEID", SEGMENT_INFO)
WTD_SAVE<-wtd
MIN_SIZE<-100
#only sharks > 1m
SHARKS<-c("Carcharhinidae", "Ginglymostomatidae")
wtd[is.na(wtd$SIZE_) | !wtd$FAMILY %in% SHARKS,]$SIZE_<-0
wtd[wtd$SIZE_< MIN_SIZE | !wtd$FAMILY %in% SHARKS,]$COUNT<-0
wtd[wtd$SIZE_< MIN_SIZE | !wtd$FAMILY %in% SHARKS,]$biomass_g<-0
levels(wtd$FAMILY)<-c(levels(wtd$FAMILY), "OTHER")
wtd[wtd$SIZE_< MIN_SIZE | !wtd$FAMILY %in% SHARKS,]$FAMILY<-"OTHER"
wtd<-droplevels(wtd)
sum(wtd[!is.na(wtd$COUNT),]$COUNT)
length(unique(wtd$DIVEID))
#clean up the data file ## adel comment: this creates 14 warnings.... ### return to this, extract numeric only columns
##- invalid for factors with NA entries
#wtd[is.na(wtd$COUNT),]$COUNT<-0
#wtd[is.na(wtd$biomass_g),]$biomass_g<-0
#wtd[is.na(wtd$BIOMASS),]$BIOMASS<-0
#wtd[is.na(wtd$BIOMASS_G_M2),]$BIOMASS_G_M2<-0
segment.info<-aggregate(wtd$COUNT, by=wtd[,SEGMENT_FIELDS], sum, na.rm=F)## aggregate sums total count of all fishes per record, using field_list
segment.info<-segment.info[,SEGMENT_FIELDS] # drop the count - was just using that to generate a summary table
length(unique(segment.info$DIVEID))
setdiff(wtd$DIVEID,segment.info$DIVEID)
#sum up to total length etc.. for the dive ID
#set depth, and centroid lat-long field to NaN if zero ...
segment.info[segment.info$DEPTH==0,"DEPTH"]<-NaN
segment.info[segment.info$CENTROIDLAT==0,"CENTROIDLAT"]<-NaN
segment.info[segment.info$CENTROIDLON==0,"CENTROIDLON"]<-NaN
sum.segments<-aggregate(segment.info[,SEGMENT_INFO_TO_SUM],by=segment.info[,DIVE_INFO], sum, na.rm=TRUE);
dimnames(sum.segments)[[2]]<-c(DIVE_INFO, SEGMENT_INFO_TO_SUM)
ave.segments<-aggregate(segment.info[,SEGMENT_INFO_TO_AVE],by=segment.info[,DIVE_INFO], mean, na.rm=TRUE)
med.segments<-aggregate(segment.info[,SEGMENT_INFO_TO_AVE],by=segment.info[,DIVE_INFO], median, na.rm=TRUE)
mode.segments<-aggregate(segment.info[,SEGMENT_INFO_TO_MODE],by=segment.info[,DIVE_INFO], Mode)
dimnames(mode.segments)[[2]]<-c(DIVE_INFO, SEGMENT_INFO_TO_MODE)
tt<-merge(ave.segments, mode.segments[,c("DIVEID",SEGMENT_INFO_TO_MODE)], by="DIVEID")
dive.info<-merge(tt, sum.segments[,c("DIVEID",SEGMENT_INFO_TO_SUM)], by="DIVEID")
dim(dive.info)
write.csv(dive.info, file="tmp Tows.csv")
############################################################
### Now sum abundance and biomass data per species per dive,
### and convert to gm2 and abund m2
############################################################
#Pull all species information into a separate df, for possible later use ..
FISH_SPECIES_FIELDS<-c("SPECIES","FAMILY", "TAXONNAME", "TROPHIC_MONREP")
t.species.table<-aggregate(wtd$COUNT,by=wtd[,FISH_SPECIES_FIELDS], sum, na.rm=FALSE)
sum.abund.bio<-aggregate(wtd[,c("COUNT", "biomass_g")],by=wtd[,c("DIVEID", "FAMILY")], sum, na.rm=TRUE)
dim(sum.abund.bio)
t.fish.data<-merge(sum.abund.bio, dive.info[,c("DIVEID","PROJECTEDLENGTH")], by="DIVEID")
t.fish.data$BIOGM2<-t.fish.data$biomass_g / (10*t.fish.data$PROJECTEDLENGTH)
t.fish.data$ABUN2<-t.fish.data$COUNT / (10*t.fish.data$PROJECTEDLENGTH)
dim(t.fish.data)
## add consumer group to tow data, filter to forereef ONLY, add depth to give option to later filter by depth range .. then pool up by island & year and save SE
# add consumer group (and family, inc ase it is useful later) to t.fish.data
#x.fish.data<-merge(t.fish.data, t.species.table[, FISH_SPECIES_FIELDS], by="SPECIES")
# add data about the tow (island, zone, year, depth)
x.fish.data<-merge(t.fish.data, dive.info[, c("DIVEID", "REGION", "ISLAND", "REP_CARD_UNIT", "REEF_ZONE", "OBS_YEAR", "DEPTH")], by="DIVEID")
dim(x.fish.data)
write.csv(x.fish.data, file="TMPtowData.csv")
#filter out forereef tows only...!!!!!
#x.fish.data<-subset(x.fish.data, x.fish.data$REEF_ZONE=="Forereef", drop=TRUE)
t.fish.data<-(x.fish.data)
xx<-aggregate(t.fish.data$ABUN2, by=t.fish.data[,c("DIVEID", "REGION", "ISLAND", "REP_CARD_UNIT", "OBS_YEAR", "REEF_ZONE", "FAMILY")], sum, na.rm=TRUE)
dimnames(xx)[[2]]<-c("DIVEID", "REGION", "ISLAND", "REP_CARD_UNIT", "YEAR", "STRATA", "FAMILY", "ABUN2")
#now format this more or less as a crosstab, with field of interest as column variable
t.fish.abund<-cast(xx, DIVEID + REGION + ISLAND + REP_CARD_UNIT + YEAR + STRATA ~ FAMILY, value="ABUN2", fill=0)
t.fish.abund$TotSharkAbund<-rowSums(t.fish.abund[,levels(xx$FAMILY)])
SHARK_COLS<-c("Carcharhinidae", "TotSharkAbund")
dim(t.fish.abund)
#aggregate - average per island/strata/year
t.fish.island.mean<-aggregate(t.fish.abund[,SHARK_COLS],by=t.fish.abund[,c("REGION", "ISLAND", "REP_CARD_UNIT", "STRATA", "YEAR")], mean, na.rm=TRUE)
t.fish.island.n<-aggregate(t.fish.abund[,SHARK_COLS],by=t.fish.abund[,c("REGION", "ISLAND", "REP_CARD_UNIT", "STRATA", "YEAR")], length)
t.fish.island.var<-aggregate(t.fish.abund[,SHARK_COLS],by=t.fish.abund[,c("REGION", "ISLAND", "REP_CARD_UNIT", "STRATA", "YEAR")], var, na.rm=TRUE)
t.fish.island.se<-sqrt(t.fish.island.var[,SHARK_COLS])/sqrt(t.fish.island.n$TotSharkAbund)
# add the N to the mean and se dfs before writing them
t.fish.island.mean$n<-t.fish.island.se$n<-t.fish.island.n$TotSharkAbund
write.csv(t.fish.island.mean, file="TMP tow_fish_shark abund100.csv")
write.csv(t.fish.island.se, file="tow_fish_shark abund_se.csv")
###################################################################
# using only 2010 onwards .. pool across any multiple years of surveys .. weighting each year's data equally
######## this is rough - but works for now! #############
island.data<-t.fish.island.mean
island.data<-subset(island.data, island.data$STRATA %in% c("Forereef", "Protected Slope"), drop=TRUE)
island.data<-subset(island.data, island.data$YEAR>2009, drop=TRUE)
island.data<-droplevels(island.data)
idw<-aggregate(island.data[,SHARK_COLS],by=island.data[,c("REGION","ISLAND", "REP_CARD_UNIT")], mean, na.rm=TRUE)
#convert abund in m2 to Ha
idw[,SHARK_COLS]<-idw[,SHARK_COLS]*10000
write.csv(idw, file="RCtow fish 2010on forereef equallyweighted.csv")
## GENERATE COUNTS PER REP FROM THE BASE WORKING DATA ####################################################################################################
wd<-WD_SAVE
## CALCULATE INSTANTANEOUS BIOMASS MINUS SHARKS AND JACKS
wd[!wd$OBS_TYPE %in% c("I", "U", "N"),]$COUNT<-0
wd<-droplevels(wd)
tmp<-cast(wd, OBS_YEAR + ISLAND + REP_CARD_UNIT + ANALYSIS_STRATA + LATITUDE + LONGITUDE + SITE + REP + REPLICATEID + DIVER ~ SPECIES, value="COUNT", sum, fill=0); head(tmp)
write.csv(tmp, file="tmp AS Counts data.csv")
|
library(e1071)
m1 <- matrix( c(
0, 0, 0, 1, 1, 2, 1, 2, 3, 2, 3, 3, 0, 1,2,3,
0, 1, 2, 3,
1, 2, 3, 2, 3, 3, 0, 0, 0, 1, 1, 2, 4, 4,4,4, 0,
1, 2, 3,
1, 1, 1, 1, 1, 1, -1,-1, -1,-1,-1,-1, 1 ,1,1,1, 1,
1,-1,-1
), ncol = 3 )
Y = m1[,3]
X = m1[,1:2]
df = data.frame( X , Y )
par(mfcol=c(4,2))
for( cost in c( 1e-3 ,1e-2 ,1e-1, 1e0, 1e+1, 1e+2 ,1e+3)) {
#cost <- 1
model.svm <- svm( Y ~ . , data = df , type = "C-classification" , kernel =
"linear", cost = cost,
scale =FALSE )
#print(model.svm$SV)
plot(x=0,ylim=c(0,5), xlim=c(0,3),main= paste( "cost: ",cost, "#SV: ",
nrow(model.svm$SV) ))
points(m1[m1[,3]>0,1], m1[m1[,3]>0,2], pch=3, col="green")
points(m1[m1[,3]<0,1], m1[m1[,3]<0,2], pch=4, col="blue")
points(model.svm$SV[,1],model.svm$SV[,2], pch=18 , col = "red")
|
/lab7/lab1_svm5.R
|
no_license
|
jj960708/Spring2020_DataAnalytics
|
R
| false
| false
| 994
|
r
|
library(e1071)
m1 <- matrix( c(
0, 0, 0, 1, 1, 2, 1, 2, 3, 2, 3, 3, 0, 1,2,3,
0, 1, 2, 3,
1, 2, 3, 2, 3, 3, 0, 0, 0, 1, 1, 2, 4, 4,4,4, 0,
1, 2, 3,
1, 1, 1, 1, 1, 1, -1,-1, -1,-1,-1,-1, 1 ,1,1,1, 1,
1,-1,-1
), ncol = 3 )
Y = m1[,3]
X = m1[,1:2]
df = data.frame( X , Y )
par(mfcol=c(4,2))
for( cost in c( 1e-3 ,1e-2 ,1e-1, 1e0, 1e+1, 1e+2 ,1e+3)) {
#cost <- 1
model.svm <- svm( Y ~ . , data = df , type = "C-classification" , kernel =
"linear", cost = cost,
scale =FALSE )
#print(model.svm$SV)
plot(x=0,ylim=c(0,5), xlim=c(0,3),main= paste( "cost: ",cost, "#SV: ",
nrow(model.svm$SV) ))
points(m1[m1[,3]>0,1], m1[m1[,3]>0,2], pch=3, col="green")
points(m1[m1[,3]<0,1], m1[m1[,3]<0,2], pch=4, col="blue")
points(model.svm$SV[,1],model.svm$SV[,2], pch=18 , col = "red")
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
na-da/ProgrammingAssignment2
|
R
| false
| false
| 951
|
r
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
## Imports
library(here)
library(fs)
library(furrr)
library(future)
library(tidyverse)
source(here("code", "utils.R"))
## Get data
user_studies <- readRDS(here("data_working", "list_of_users_and_studies.RDS"))
user_df <- readRDS("./data_working/user_data_with_demographics.RDS")
all_groupings <- dir_ls("./data_stripped", recurse = TRUE,
type = "file", regexp = "groupings_summary.RDS")
## Now for accelerometer, let's get all the grouping files, summarize them
## to number of groupings per day, and then combine that into one df.
if (!file.exists("./data_working/accel_groupings.RDS")) {
accel_files <- all_groupings[grepl("accel", all_groupings)]
future::plan("multiprocess")
accel_groupings <- furrr::future_map_dfr(
.x = accel_files,
.f = ~ readRDS(.x) %>%
mutate(uid = str_replace_all(user_id, multi_id_replace))
) %>%
group_by(study_name, uid, year, month, day) %>%
summarize(daily_groupings = n()) %>%
ungroup()
## Now we need the expected number of groupings (based on study settings)
accel_groupings <- accel_groupings %>%
rowwise() %>%
mutate(exp_grps = return_exp_grp("accelerometer",
uid, user_studies) * 24) %>%
ungroup()
## We want to model rate of missingness so substract from expected and in
## some rare cases, we can actually get more groupings than expected so
## truncate those at 0. (E.g., GPS pings or games that use accelerometer).
accel_groupings <- accel_groupings %>%
mutate(missing_grps = exp_grps - daily_groupings,
missing_grps = ifelse(missing_grps < 0, 0, missing_grps))
## Join with demographics
accel_groupings <- accel_groupings %>%
left_join(
user_df %>%
select(uid = user_id_merged, female,
age, race, education, iphone) %>%
filter(uid %in% accel_groupings$uid) %>%
distinct()
)
## Save
saveRDS(accel_groupings, "./data_working/accel_groupings.RDS")
}
|
/code/03_create_accel_working_df.R
|
no_license
|
mkiang/beiwe_missing_data
|
R
| false
| false
| 2,155
|
r
|
## Imports
library(here)
library(fs)
library(furrr)
library(future)
library(tidyverse)
source(here("code", "utils.R"))
## Get data
user_studies <- readRDS(here("data_working", "list_of_users_and_studies.RDS"))
user_df <- readRDS("./data_working/user_data_with_demographics.RDS")
all_groupings <- dir_ls("./data_stripped", recurse = TRUE,
type = "file", regexp = "groupings_summary.RDS")
## Now for accelerometer, let's get all the grouping files, summarize them
## to number of groupings per day, and then combine that into one df.
if (!file.exists("./data_working/accel_groupings.RDS")) {
accel_files <- all_groupings[grepl("accel", all_groupings)]
future::plan("multiprocess")
accel_groupings <- furrr::future_map_dfr(
.x = accel_files,
.f = ~ readRDS(.x) %>%
mutate(uid = str_replace_all(user_id, multi_id_replace))
) %>%
group_by(study_name, uid, year, month, day) %>%
summarize(daily_groupings = n()) %>%
ungroup()
## Now we need the expected number of groupings (based on study settings)
accel_groupings <- accel_groupings %>%
rowwise() %>%
mutate(exp_grps = return_exp_grp("accelerometer",
uid, user_studies) * 24) %>%
ungroup()
## We want to model rate of missingness so substract from expected and in
## some rare cases, we can actually get more groupings than expected so
## truncate those at 0. (E.g., GPS pings or games that use accelerometer).
accel_groupings <- accel_groupings %>%
mutate(missing_grps = exp_grps - daily_groupings,
missing_grps = ifelse(missing_grps < 0, 0, missing_grps))
## Join with demographics
accel_groupings <- accel_groupings %>%
left_join(
user_df %>%
select(uid = user_id_merged, female,
age, race, education, iphone) %>%
filter(uid %in% accel_groupings$uid) %>%
distinct()
)
## Save
saveRDS(accel_groupings, "./data_working/accel_groupings.RDS")
}
|
\name{replace_dollar}
\alias{$<-}
\alias{$<-,SpatExtent-method}
\alias{$<-,SpatVector-method}
\alias{$<-,SpatRaster-method}
\title{Replace with $<-}
\description{
Replace a layer of a SpatRaster, or an attribute variable of a SpatVector
}
\usage{
\S4method{$}{SpatRaster}(x, name) <- value
\S4method{$}{SpatVector}(x, name)<-value
\S4method{$}{SpatExtent}(x, name) <- value
}
\arguments{
\item{x}{SpatRaster, SpatVector or SpatExtent}
\item{name}{character. If \code{x} is a SpatRaster: layer name. If \code{x} is a SpatVector: variable name. If \code{x} is a SpatExtent: "xmin", "xmax". "ymin" or "ymax"}
\item{value}{if \code{x} is a SpatRaster, a SpatRaster for which this \code{TRUE}: \code{nlyr(value) == length(i)}; if \code{x} is a SpatVector, a vector of new values; if \code{x} is a SpatExtent a single number}
}
\value{
Same as \code{x}
}
\seealso{
\code{ \link{[[<-}, \link{[<-}, \link{$}}
}
\examples{
f <- system.file("ex/lux.shp", package="terra")
v <- vect(f)
v$ID_1 <- LETTERS[1:12]
v$new <- sample(12)
values(v)
}
\keyword{ spatial }
|
/man/replace_dollar.Rd
|
no_license
|
cran/terra
|
R
| false
| false
| 1,122
|
rd
|
\name{replace_dollar}
\alias{$<-}
\alias{$<-,SpatExtent-method}
\alias{$<-,SpatVector-method}
\alias{$<-,SpatRaster-method}
\title{Replace with $<-}
\description{
Replace a layer of a SpatRaster, or an attribute variable of a SpatVector
}
\usage{
\S4method{$}{SpatRaster}(x, name) <- value
\S4method{$}{SpatVector}(x, name)<-value
\S4method{$}{SpatExtent}(x, name) <- value
}
\arguments{
\item{x}{SpatRaster, SpatVector or SpatExtent}
\item{name}{character. If \code{x} is a SpatRaster: layer name. If \code{x} is a SpatVector: variable name. If \code{x} is a SpatExtent: "xmin", "xmax". "ymin" or "ymax"}
\item{value}{if \code{x} is a SpatRaster, a SpatRaster for which this \code{TRUE}: \code{nlyr(value) == length(i)}; if \code{x} is a SpatVector, a vector of new values; if \code{x} is a SpatExtent a single number}
}
\value{
Same as \code{x}
}
\seealso{
\code{ \link{[[<-}, \link{[<-}, \link{$}}
}
\examples{
f <- system.file("ex/lux.shp", package="terra")
v <- vect(f)
v$ID_1 <- LETTERS[1:12]
v$new <- sample(12)
values(v)
}
\keyword{ spatial }
|
# wp stands for web play
#
# It is a global object that manages the state of play
# possible for several human players
#
# A specific app object has a reference to an wp
# and a player field
wp.game = function(wp=get_wp()) {
wp$play$game
}
wp.stage = function(wp=get_wp()) {
stage.num = wp$stage.num
if (stage.num < 1 | stage.num > length(wp$vg$stages)) return(NULL)
wp$vg$stages[[stage.num]]
}
wp.pages.dir = function(wp=get_wp()) {
first.non.null(wp$pages.dir,get.pages.dir(wp.game(wp)))
}
wp.page.name = function(wp=get_wp()) {
stage.num = wp$stage.num
if (stage.num==0) {
return("start-page")
} else if (stage.num > length(wp$vg$stages)) {
return("end-page")
} else {
return(wp.stage(wp)$name)
}
}
# Get rmd file of currently shown page
wp.page.file = function(wp=get_wp(), copy.auto=FALSE, make.auto=TRUE) {
page.name = wp.page.name(wp)
page.dir = wp.pages.dir(wp)
file = file.path(page.dir, paste0(page.name,".Rmd"))
if (file.exists(file)) return(file)
auto.file = file.path(page.dir, paste0(page.name,".auto.Rmd"))
if (!file.exists(auto.file) & make.auto) {
make.page.rmd(wp.game(wp),page.name = page.name, stage=wp.stage(wp))
}
if (file.exists(auto.file)) {
if (copy.auto) {
file.copy(auto.file, file)
return(file)
}
return(auto.file)
}
return(NULL)
}
#' Create a new web play object
#'
#' @param game A gtree game generated with \code{\link[gtree]{new_game}}.
#' @param bots A list with one bots for every player. Also add a bot for the human player. You can call \code{\link{make_bots}} to conveniently create the bots.
#' @param human index of the player that is played by a human in the first play of the web app.
#' @param human_draw_method Method how the index of the human player is determined by default if a new play is started. The default \code{"cycle"} lets the human cycle through all players. \code{"random"} picks a random player, and \code{"fixed"} keeps the current player.
#' @param wpUI the id of the \code{uiOutput} element in the app ui where the web play will be shown.
#' @param verbose shall information about state of play be printed to the standard output?
#' @param pages.dir the directory in which the Rmd files for the stage pages can be found. By default \code{"./pages"}.
#' @param custom A list of custom parameters that will be passed to handlers.
#' @param pre.page.handler a function that is called before a page is shown to a human. It should return a list of values that can be accessed in the whiskers {{ }} of the page Rmd file.
#' @param post.page.handler a function that is called after a human made input in a stage. Can for example be used to update a population play summary. (See the KuhnPoker example)
#' @param finish.handler is called when the final results page of a play is left. The default handler simply starts a new play.
#' @param page.ui.fun optionally a function that returns for each page a shiny tag that will be shown. If NULL (default) we specify the page ui via Rmd files in the pages subfolder.
#' @family Web Play
new_wp = function(game,bots,human=draw_human_pos(human_draw_method=human_draw_method,numPlayers=game$vg$params$numPlayers, human=0),human_draw_method = c("cycle","random","fixed")[1], wpUI="wpUI", verbose=FALSE, pages.dir = file.path(getwd(),"pages"),custom=list(), pre.page.handler = NULL,post.page.handler = NULL, finish.handler = wp.default.finish.handler, comp.pages = as.environment(list()), page.ui.fun=NULL, ...) {
restore.point("new.wp")
play = new_play(game,bots, human)
stage_secs = rep(NA_real_, length(game$vg$stages))
names(stage_secs) = names(game$vg$stages)
wp = as.environment(list(play=play, vg=game$vg, stage.num=0, human=human,human_draw_method=human_draw_method, wpUI=wpUI, num.stages = length(game$vg$stages), verbose=verbose, pages.dir = pages.dir,custom=custom, pre.page.handler = pre.page.handler, post.page.handler=post.page.handler, finish.handler=finish.handler, stage_secs=stage_secs,comp.pages=comp.pages, page.ui.fun=page.ui.fun,...))
wp
}
draw_human_pos = function(wp=NULL, human_draw_method=wp$human_draw_method, numPlayers=wp$vg$params$numPlayers, human = wp$human) {
restore.point("draw_human_pos")
if (human_draw_method == "cycle") {
human = human+1
if (human > numPlayers) human = 1
} else if (human_draw_method == "random") {
human = sample.int(numPlayers, 1)
} else if (human_draw_method=="fixed") {
if (human < 1) human = 1
}
human
}
#' Reset a web play to the start of a new play
#'
#' If you immediately want to start the new play. Call
#' \code{\link{wp_continue}} afterwards.
#'
#' @param wp A web play object
#' @param bots You can provide new bots. By default the current bots are used again.
#' @param human You can define a new index of the human player. By default the current human is used.
#' @family Web Play
wp_reset = function(wp=get_wp(), bots=wp$play$bots, human=draw_human_pos(wp)) {
restore.point("wp_reset")
wp$play = new_play(wp$play$game,bots, human)
wp$stage.num = 0
wp$human = human
wp$bots = bots
wp$stage_secs[] = NA
invisible(wp)
}
#' Copy a web play object
#' @family Web Play
wp_copy = function(wp) {
as.environment(as.list(wp))
}
# Is called by wp_continue_play
# Play all moves of nature and bots of a web play
# up until the human player sees again a stage.
#
# @param wp the web play object
# @family Web Play
wp_play_until_human = function(wp) {
restore.point("wp.play.until.human")
play = wp$play
game = wp.game(wp)
while(
((play$is.human.stage==FALSE) |
(play$human.stage.finished >= play$auto.stage.finished)) &
(play$auto.stage.finished < wp$num.stages)
) {
if (wp$verbose) {
num = play$auto.stage.finished+1
cat("\nPlay auto stage ",num, " ", wp$vg$stages[[num]]$name,"\n")
}
play = play_stage_auto(play)
if (wp$verbose) {
li = c(list(.cond=play$.last.condition,.player=paste0(play$.last.player, collapse=", "),is.human = play$is.human.stage), play$hist)
print(as.data.frame(li, row.names=FALSE))
cat("\n")
}
}
wp$play = play
if (play$is.human.stage & (play$auto.stage.finished > play$human.stage.finished)) {
wp$stage.num = play$auto.stage.finished
if (wp$verbose) {
cat("\nWait for human input in stage", wp$stage.num, wp$vg$stages[[wp$stage.num]]$name)
}
} else {
wp$stage.num = wp$num.stages +1
if (wp$verbose) {
cat("\nAll stages finished.")
}
}
return(invisible(wp))
}
#' Sets the state of a web play to a play object
#'
#' Can for example be used to continue with a human
#' after bots played some earlier rounds
#' @family Web Play
wp_set_to_play = function(wp, play, human=play$human) {
if (human != play$human) {
play$human = human
play$bot.player = setdiff(play$game$players,human)
}
wp$play = play
wp$human = play$human
if (play$auto.stage.finished > play$human.stage.finished) {
wp$stage.num = play$auto.stage.finished
} else {
wp$stage.num = play$human.stage.finished +1
}
invisible(wp)
}
|
/R/wp.R
|
no_license
|
skranz/gtreeWebPlay
|
R
| false
| false
| 7,073
|
r
|
# wp stands for web play
#
# It is a global object that manages the state of play
# possible for several human players
#
# A specific app object has a reference to an wp
# and a player field
wp.game = function(wp=get_wp()) {
wp$play$game
}
wp.stage = function(wp=get_wp()) {
stage.num = wp$stage.num
if (stage.num < 1 | stage.num > length(wp$vg$stages)) return(NULL)
wp$vg$stages[[stage.num]]
}
wp.pages.dir = function(wp=get_wp()) {
first.non.null(wp$pages.dir,get.pages.dir(wp.game(wp)))
}
wp.page.name = function(wp=get_wp()) {
stage.num = wp$stage.num
if (stage.num==0) {
return("start-page")
} else if (stage.num > length(wp$vg$stages)) {
return("end-page")
} else {
return(wp.stage(wp)$name)
}
}
# Get rmd file of currently shown page
wp.page.file = function(wp=get_wp(), copy.auto=FALSE, make.auto=TRUE) {
page.name = wp.page.name(wp)
page.dir = wp.pages.dir(wp)
file = file.path(page.dir, paste0(page.name,".Rmd"))
if (file.exists(file)) return(file)
auto.file = file.path(page.dir, paste0(page.name,".auto.Rmd"))
if (!file.exists(auto.file) & make.auto) {
make.page.rmd(wp.game(wp),page.name = page.name, stage=wp.stage(wp))
}
if (file.exists(auto.file)) {
if (copy.auto) {
file.copy(auto.file, file)
return(file)
}
return(auto.file)
}
return(NULL)
}
#' Create a new web play object
#'
#' @param game A gtree game generated with \code{\link[gtree]{new_game}}.
#' @param bots A list with one bots for every player. Also add a bot for the human player. You can call \code{\link{make_bots}} to conveniently create the bots.
#' @param human index of the player that is played by a human in the first play of the web app.
#' @param human_draw_method Method how the index of the human player is determined by default if a new play is started. The default \code{"cycle"} lets the human cycle through all players. \code{"random"} picks a random player, and \code{"fixed"} keeps the current player.
#' @param wpUI the id of the \code{uiOutput} element in the app ui where the web play will be shown.
#' @param verbose shall information about state of play be printed to the standard output?
#' @param pages.dir the directory in which the Rmd files for the stage pages can be found. By default \code{"./pages"}.
#' @param custom A list of custom parameters that will be passed to handlers.
#' @param pre.page.handler a function that is called before a page is shown to a human. It should return a list of values that can be accessed in the whiskers {{ }} of the page Rmd file.
#' @param post.page.handler a function that is called after a human made input in a stage. Can for example be used to update a population play summary. (See the KuhnPoker example)
#' @param finish.handler is called when the final results page of a play is left. The default handler simply starts a new play.
#' @param page.ui.fun optionally a function that returns for each page a shiny tag that will be shown. If NULL (default) we specify the page ui via Rmd files in the pages subfolder.
#' @family Web Play
new_wp = function(game,bots,human=draw_human_pos(human_draw_method=human_draw_method,numPlayers=game$vg$params$numPlayers, human=0),human_draw_method = c("cycle","random","fixed")[1], wpUI="wpUI", verbose=FALSE, pages.dir = file.path(getwd(),"pages"),custom=list(), pre.page.handler = NULL,post.page.handler = NULL, finish.handler = wp.default.finish.handler, comp.pages = as.environment(list()), page.ui.fun=NULL, ...) {
restore.point("new.wp")
play = new_play(game,bots, human)
stage_secs = rep(NA_real_, length(game$vg$stages))
names(stage_secs) = names(game$vg$stages)
wp = as.environment(list(play=play, vg=game$vg, stage.num=0, human=human,human_draw_method=human_draw_method, wpUI=wpUI, num.stages = length(game$vg$stages), verbose=verbose, pages.dir = pages.dir,custom=custom, pre.page.handler = pre.page.handler, post.page.handler=post.page.handler, finish.handler=finish.handler, stage_secs=stage_secs,comp.pages=comp.pages, page.ui.fun=page.ui.fun,...))
wp
}
draw_human_pos = function(wp=NULL, human_draw_method=wp$human_draw_method, numPlayers=wp$vg$params$numPlayers, human = wp$human) {
restore.point("draw_human_pos")
if (human_draw_method == "cycle") {
human = human+1
if (human > numPlayers) human = 1
} else if (human_draw_method == "random") {
human = sample.int(numPlayers, 1)
} else if (human_draw_method=="fixed") {
if (human < 1) human = 1
}
human
}
#' Reset a web play to the start of a new play
#'
#' If you immediately want to start the new play. Call
#' \code{\link{wp_continue}} afterwards.
#'
#' @param wp A web play object
#' @param bots You can provide new bots. By default the current bots are used again.
#' @param human You can define a new index of the human player. By default the current human is used.
#' @family Web Play
wp_reset = function(wp=get_wp(), bots=wp$play$bots, human=draw_human_pos(wp)) {
restore.point("wp_reset")
wp$play = new_play(wp$play$game,bots, human)
wp$stage.num = 0
wp$human = human
wp$bots = bots
wp$stage_secs[] = NA
invisible(wp)
}
#' Copy a web play object
#' @family Web Play
wp_copy = function(wp) {
as.environment(as.list(wp))
}
# Is called by wp_continue_play
# Play all moves of nature and bots of a web play
# up until the human player sees again a stage.
#
# @param wp the web play object
# @family Web Play
wp_play_until_human = function(wp) {
restore.point("wp.play.until.human")
play = wp$play
game = wp.game(wp)
while(
((play$is.human.stage==FALSE) |
(play$human.stage.finished >= play$auto.stage.finished)) &
(play$auto.stage.finished < wp$num.stages)
) {
if (wp$verbose) {
num = play$auto.stage.finished+1
cat("\nPlay auto stage ",num, " ", wp$vg$stages[[num]]$name,"\n")
}
play = play_stage_auto(play)
if (wp$verbose) {
li = c(list(.cond=play$.last.condition,.player=paste0(play$.last.player, collapse=", "),is.human = play$is.human.stage), play$hist)
print(as.data.frame(li, row.names=FALSE))
cat("\n")
}
}
wp$play = play
if (play$is.human.stage & (play$auto.stage.finished > play$human.stage.finished)) {
wp$stage.num = play$auto.stage.finished
if (wp$verbose) {
cat("\nWait for human input in stage", wp$stage.num, wp$vg$stages[[wp$stage.num]]$name)
}
} else {
wp$stage.num = wp$num.stages +1
if (wp$verbose) {
cat("\nAll stages finished.")
}
}
return(invisible(wp))
}
#' Sets the state of a web play to a play object
#'
#' Can for example be used to continue with a human
#' after bots played some earlier rounds
#' @family Web Play
wp_set_to_play = function(wp, play, human=play$human) {
if (human != play$human) {
play$human = human
play$bot.player = setdiff(play$game$players,human)
}
wp$play = play
wp$human = play$human
if (play$auto.stage.finished > play$human.stage.finished) {
wp$stage.num = play$auto.stage.finished
} else {
wp$stage.num = play$human.stage.finished +1
}
invisible(wp)
}
|
##funcao que define a idade de registros de ocorrencia a partir do intervalo de erro das suas idades.
##06/Setembro/2018
##Anderson Aires Eduardo
dataInstance = function(x, col_names, tempRes=1000, n=2*nrow(x)){
if (class(x) != "data.frame"){
stop("O dataset de entrada precisa ser um objeto da classe 'data.frame'.")
}
if (class(col_names) != "character"){
stop("O nome da coluna do nome das especies precisa ser um objeto da classe 'character', contendo os nomes das colunas com a idade media, idades minima e maxima dos resgistros.")
}
if (class(tempRes) != "numeric"){
stop("A resolucao temporal (tempRes) precisa ser um objeto da classe 'numeric'. Esse dado refere-se a resolucao temporal dos dados ambintais (o default é de 1000 anos).")
}
if (class(n) != "numeric"){
stop("O numeri de iteracoes (n) precisa ser um objeto da classe 'numeric'. Esse dado refere-se ao numero de instancias de dados a serem criadas.")
}
##variaveis locais
ptsAgeRaw = x
col_names = col_names
output = list()
for (i in seq(n)){
##manipulacao para substituicao de strings e NAs
ptsAge = apply(ptsAgeRaw[,col_names], 1, as.numeric) #transforma informacao de texto em NA (ex.: pleistocene -> NA)
ptsAge = data.frame(t(ptsAge)) #consolidando os dados
ptsAgeMeanNA = apply( ptsAge, 1, function(x) ifelse(is.na(x[1]), mean(x[2:3]), x[1]) ) #se media=NA, obtem a partir do intervalo (max e min)
ptsAgeMinNA = apply( ptsAge, 1, function(x) ifelse(is.na(x[2]) & is.na(x[3]), x[1], x[2]) ) #se min=NA, entao min=mean (i.e. data altamente precisa)
ptsAgeMaxNA = apply( ptsAge, 1, function(x) ifelse(is.na(x[2]) & is.na(x[3]), x[1], x[3]) ) #se max=NA, entao max=mean (i.e. data altamente precisa)
ptsAge = data.frame(cbind(ptsAgeMeanNA,ptsAgeMinNA,ptsAgeMaxNA)) #consolidando os dados de idade
ptsAge = data.frame(ptsAgeRaw[,names(ptsAgeRaw)[col_names != names(ptsAgeRaw)]], ptsAge)
names(ptsAge) = names(ptsAgeRaw)
##manipulacao para obtencao uma isntancia para as idades
inxMin = agrep(pattern='min', x=col_names, max.distance=1)
inxMax = agrep(pattern='max', x=col_names, max.distance=1)
if(inxMin == inxMax | any(is.na(c(inxMin,inxMax)))){
stop("Falha na identificacao das colunas de idade media, minima e maxima. Nomeie essas colunas preferencialmente como 'mean', 'min' e 'max'.")
}else{
minmaxCols = col_names[c(inxMin, inxMax)]
}
ptsAge$age = apply( ptsAge[,minmaxCols], 1, function(x) runif(1, min=x[1], max=x[2]) ) #sorteia uma idade detro do intervalo
ptsAge$age = round(ptsAge$age/tempRes) #deixando idades como numeros inteiros (importante para corresponder ao nome das pastas)
ptsAge = ptsAge[,-match(col_names, names(ptsAge)) ]
##adicionando instancia ao output
output[[i]] = ptsAge
}
##saida da funcao
return(output)
}
|
/dataInstance.R
|
no_license
|
AndersonEduardo/R-Scripts
|
R
| false
| false
| 3,044
|
r
|
##funcao que define a idade de registros de ocorrencia a partir do intervalo de erro das suas idades.
##06/Setembro/2018
##Anderson Aires Eduardo
dataInstance = function(x, col_names, tempRes=1000, n=2*nrow(x)){
if (class(x) != "data.frame"){
stop("O dataset de entrada precisa ser um objeto da classe 'data.frame'.")
}
if (class(col_names) != "character"){
stop("O nome da coluna do nome das especies precisa ser um objeto da classe 'character', contendo os nomes das colunas com a idade media, idades minima e maxima dos resgistros.")
}
if (class(tempRes) != "numeric"){
stop("A resolucao temporal (tempRes) precisa ser um objeto da classe 'numeric'. Esse dado refere-se a resolucao temporal dos dados ambintais (o default é de 1000 anos).")
}
if (class(n) != "numeric"){
stop("O numeri de iteracoes (n) precisa ser um objeto da classe 'numeric'. Esse dado refere-se ao numero de instancias de dados a serem criadas.")
}
##variaveis locais
ptsAgeRaw = x
col_names = col_names
output = list()
for (i in seq(n)){
##manipulacao para substituicao de strings e NAs
ptsAge = apply(ptsAgeRaw[,col_names], 1, as.numeric) #transforma informacao de texto em NA (ex.: pleistocene -> NA)
ptsAge = data.frame(t(ptsAge)) #consolidando os dados
ptsAgeMeanNA = apply( ptsAge, 1, function(x) ifelse(is.na(x[1]), mean(x[2:3]), x[1]) ) #se media=NA, obtem a partir do intervalo (max e min)
ptsAgeMinNA = apply( ptsAge, 1, function(x) ifelse(is.na(x[2]) & is.na(x[3]), x[1], x[2]) ) #se min=NA, entao min=mean (i.e. data altamente precisa)
ptsAgeMaxNA = apply( ptsAge, 1, function(x) ifelse(is.na(x[2]) & is.na(x[3]), x[1], x[3]) ) #se max=NA, entao max=mean (i.e. data altamente precisa)
ptsAge = data.frame(cbind(ptsAgeMeanNA,ptsAgeMinNA,ptsAgeMaxNA)) #consolidando os dados de idade
ptsAge = data.frame(ptsAgeRaw[,names(ptsAgeRaw)[col_names != names(ptsAgeRaw)]], ptsAge)
names(ptsAge) = names(ptsAgeRaw)
##manipulacao para obtencao uma isntancia para as idades
inxMin = agrep(pattern='min', x=col_names, max.distance=1)
inxMax = agrep(pattern='max', x=col_names, max.distance=1)
if(inxMin == inxMax | any(is.na(c(inxMin,inxMax)))){
stop("Falha na identificacao das colunas de idade media, minima e maxima. Nomeie essas colunas preferencialmente como 'mean', 'min' e 'max'.")
}else{
minmaxCols = col_names[c(inxMin, inxMax)]
}
ptsAge$age = apply( ptsAge[,minmaxCols], 1, function(x) runif(1, min=x[1], max=x[2]) ) #sorteia uma idade detro do intervalo
ptsAge$age = round(ptsAge$age/tempRes) #deixando idades como numeros inteiros (importante para corresponder ao nome das pastas)
ptsAge = ptsAge[,-match(col_names, names(ptsAge)) ]
##adicionando instancia ao output
output[[i]] = ptsAge
}
##saida da funcao
return(output)
}
|
## Creating a special "matrix" object that caches its inverse
##and then computing the inverse of that special "matrix" object
##Creating a special matrix object that caches its inverse
makeCacheMatrix <- function(x = matrix()) {
invert<-null
set<-function(y){
x<<-y
invert<<-null
}
get<-function()x
set_innverse<-function(inverse)invert<<-inverse
get_inverse<-function()invert
list(set=set,get=get,set_inverse=set_inverse,get_inverse=get_inverse)
}
## calculating the special matrix created above
cacheSolve <- function(x, ...) {
inverse<-x$get_inverse()
if(!is.null(invert)){
message("getting cached data")
return(invert)
}
matrix<-x$get()
invert<-solve(matrix,...)
x$set_inverse(invert)
invert
}
|
/cachematrix.R
|
no_license
|
mt86/Cache-Matrix
|
R
| false
| false
| 881
|
r
|
## Creating a special "matrix" object that caches its inverse
##and then computing the inverse of that special "matrix" object
##Creating a special matrix object that caches its inverse
makeCacheMatrix <- function(x = matrix()) {
invert<-null
set<-function(y){
x<<-y
invert<<-null
}
get<-function()x
set_innverse<-function(inverse)invert<<-inverse
get_inverse<-function()invert
list(set=set,get=get,set_inverse=set_inverse,get_inverse=get_inverse)
}
## calculating the special matrix created above
cacheSolve <- function(x, ...) {
inverse<-x$get_inverse()
if(!is.null(invert)){
message("getting cached data")
return(invert)
}
matrix<-x$get()
invert<-solve(matrix,...)
x$set_inverse(invert)
invert
}
|
midwest <- read.csv("http://goo.gl/G1K41K")
options(scipen=999)
# used to initialize the ggplot
# aes specifies x and y axes
# geom point gives scatter plot
# geom smooth gives a line , in this case linear model
g <- ggplot(data = midwest, aes(x=area,y=poptotal)) + geom_point() +
geom_smooth(method = "lm", se = FALSE)
plot(g)
#can be used to view the best fit line after removing outliesrs
g1 <- g + xlim(c(0,0.1)) + ylim(c(0,1000000))
plot(g1)
#original line can be preserved using coord_cartesian
g2 <- g + coord_cartesian(xlim = c(0,0.1) ,ylim = c(0,1000000))
plot(g2)
g1 <- g + labs(title="Midwest Data",x="Area",y="Total Population",
subtitle="From midwest dataset",caption="Midwest demographics")
plot(g1)
#ALL IN ONE
ggplot(midwest, aes(x=area, y=poptotal)) + geom_point(col="steelblue") +
geom_smooth(method = "lm",se = FALSE,col="firebrick") + coord_cartesian(xlim = c(0,0.1) ,ylim = c(0,1000000)) +
labs(title="Midwest data",x="Area",y="Population",subtitle="from midwest data",
caption = "MIdwest demographics")
#color based on a different column, give the col name inside aes
gg <- ggplot(midwest, aes(x=area, y=poptotal)) + geom_point(aes(col=state)) +
geom_smooth(method = "lm",se = FALSE,col="firebrick") + coord_cartesian(xlim = c(0,0.1) ,ylim = c(0,1000000)) +
labs(title="Midwest data",x="Area",y="Population",subtitle="from midwest data",
caption = "MIdwest demographics")
#change color palette
gg + scale_color_brewer(palette = "Set1") + scale_x_reverse()
#ALL color palettes can be seen with this package
library(RColorBrewer)
head(brewer.pal.info,10)
#reverse x axis
gg + scale_color_brewer(palette = "Set1") + scale_x_reverse()
#scaling the x and y axis and customizing the text
gg + scale_color_brewer(palette = "Set1") +
scale_y_continuous(breaks=seq(0,1000000,200000),
labels = function(x){paste0(x/1000,'K')})
|
/ggplot_experiments.R
|
no_license
|
mahtuog/R
|
R
| false
| false
| 1,924
|
r
|
midwest <- read.csv("http://goo.gl/G1K41K")
options(scipen=999)
# used to initialize the ggplot
# aes specifies x and y axes
# geom point gives scatter plot
# geom smooth gives a line , in this case linear model
g <- ggplot(data = midwest, aes(x=area,y=poptotal)) + geom_point() +
geom_smooth(method = "lm", se = FALSE)
plot(g)
#can be used to view the best fit line after removing outliesrs
g1 <- g + xlim(c(0,0.1)) + ylim(c(0,1000000))
plot(g1)
#original line can be preserved using coord_cartesian
g2 <- g + coord_cartesian(xlim = c(0,0.1) ,ylim = c(0,1000000))
plot(g2)
g1 <- g + labs(title="Midwest Data",x="Area",y="Total Population",
subtitle="From midwest dataset",caption="Midwest demographics")
plot(g1)
#ALL IN ONE
ggplot(midwest, aes(x=area, y=poptotal)) + geom_point(col="steelblue") +
geom_smooth(method = "lm",se = FALSE,col="firebrick") + coord_cartesian(xlim = c(0,0.1) ,ylim = c(0,1000000)) +
labs(title="Midwest data",x="Area",y="Population",subtitle="from midwest data",
caption = "MIdwest demographics")
#color based on a different column, give the col name inside aes
gg <- ggplot(midwest, aes(x=area, y=poptotal)) + geom_point(aes(col=state)) +
geom_smooth(method = "lm",se = FALSE,col="firebrick") + coord_cartesian(xlim = c(0,0.1) ,ylim = c(0,1000000)) +
labs(title="Midwest data",x="Area",y="Population",subtitle="from midwest data",
caption = "MIdwest demographics")
#change color palette
gg + scale_color_brewer(palette = "Set1") + scale_x_reverse()
#ALL color palettes can be seen with this package
library(RColorBrewer)
head(brewer.pal.info,10)
#reverse x axis
gg + scale_color_brewer(palette = "Set1") + scale_x_reverse()
#scaling the x and y axis and customizing the text
gg + scale_color_brewer(palette = "Set1") +
scale_y_continuous(breaks=seq(0,1000000,200000),
labels = function(x){paste0(x/1000,'K')})
|
#' Download shape files of micro region as sf objects
#'
#' Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
#'
#' @param year Year of the data (defaults to 2010)
#' @param code_micro 5-digit code of a micro region. If the two-digit code or a two-letter uppercase abbreviation of
#' a state is passed, (e.g. 33 or "RJ") the function will load all micro regions of that state. If code_micro="all",
#' all micro regions of the country are loaded.
#' @param simplified Logic TRUE or FALSE, indicating whether the function returns the 'original' dataset with high resolution or a dataset with 'simplified' borders (Defaults to TRUE)
#' @param showProgress Logical. Defaults to (TRUE) display progress bar
#'
#' @export
#' @family general area functions
#' @examples \donttest{
#'
#' library(geobr)
#'
#' # Read an specific micro region a given year
#' micro <- read_micro_region(code_micro=11008, year=2018)
#'
#' # Read micro regions of a state at a given year
#' micro <- read_micro_region(code_micro=12, year=2017)
#' micro <- read_micro_region(code_micro="AM", year=2000)
#'
#'# Read all micro regions at a given year
#' micro <- read_micro_region(code_micro="all", year=2010)
#' }
#'
#'
read_micro_region <- function(code_micro="all", year=2010, simplified=TRUE, showProgress=TRUE){
# Get metadata with data url addresses
temp_meta <- select_metadata(geography="micro_region", year=year, simplified=simplified)
# Verify code_micro input
# if code_micro=="all", read the entire country
if(code_micro=="all"){ message("Loading data for the whole country. This might take a few minutes.\n")
# list paths of files to download
file_url <- as.character(temp_meta$download_path)
# download files
temp_sf <- download_gpkg(file_url, progress_bar = showProgress)
return(temp_sf)
}
if( !(substr(x = code_micro, 1, 2) %in% temp_meta$code) & !(substr(x = code_micro, 1, 2) %in% temp_meta$code_abrev)){
stop("Error: Invalid Value to argument code_micro.")
} else{
# list paths of files to download
if (is.numeric(code_micro)){ file_url <- as.character(subset(temp_meta, code==substr(code_micro, 1, 2))$download_path) }
if (is.character(code_micro)){ file_url <- as.character(subset(temp_meta, code_abrev==substr(code_micro, 1, 2))$download_path) }
# download files
sf <- download_gpkg(file_url, progress_bar = showProgress)
if(nchar(code_micro)==2){
return(sf)
} else if(code_micro %in% sf$code_micro){ # Get micro region
x <- code_micro
sf <- subset(sf, code_micro==x)
return(sf)
} else{
stop("Error: Invalid Value to argument code_micro. There was no micro region with this code in this year")
}
}
}
|
/r-package/R/read_micro_region.R
|
no_license
|
limanalytics/geobr
|
R
| false
| false
| 2,759
|
r
|
#' Download shape files of micro region as sf objects
#'
#' Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
#'
#' @param year Year of the data (defaults to 2010)
#' @param code_micro 5-digit code of a micro region. If the two-digit code or a two-letter uppercase abbreviation of
#' a state is passed, (e.g. 33 or "RJ") the function will load all micro regions of that state. If code_micro="all",
#' all micro regions of the country are loaded.
#' @param simplified Logic TRUE or FALSE, indicating whether the function returns the 'original' dataset with high resolution or a dataset with 'simplified' borders (Defaults to TRUE)
#' @param showProgress Logical. Defaults to (TRUE) display progress bar
#'
#' @export
#' @family general area functions
#' @examples \donttest{
#'
#' library(geobr)
#'
#' # Read an specific micro region a given year
#' micro <- read_micro_region(code_micro=11008, year=2018)
#'
#' # Read micro regions of a state at a given year
#' micro <- read_micro_region(code_micro=12, year=2017)
#' micro <- read_micro_region(code_micro="AM", year=2000)
#'
#'# Read all micro regions at a given year
#' micro <- read_micro_region(code_micro="all", year=2010)
#' }
#'
#'
read_micro_region <- function(code_micro="all", year=2010, simplified=TRUE, showProgress=TRUE){
# Get metadata with data url addresses
temp_meta <- select_metadata(geography="micro_region", year=year, simplified=simplified)
# Verify code_micro input
# if code_micro=="all", read the entire country
if(code_micro=="all"){ message("Loading data for the whole country. This might take a few minutes.\n")
# list paths of files to download
file_url <- as.character(temp_meta$download_path)
# download files
temp_sf <- download_gpkg(file_url, progress_bar = showProgress)
return(temp_sf)
}
if( !(substr(x = code_micro, 1, 2) %in% temp_meta$code) & !(substr(x = code_micro, 1, 2) %in% temp_meta$code_abrev)){
stop("Error: Invalid Value to argument code_micro.")
} else{
# list paths of files to download
if (is.numeric(code_micro)){ file_url <- as.character(subset(temp_meta, code==substr(code_micro, 1, 2))$download_path) }
if (is.character(code_micro)){ file_url <- as.character(subset(temp_meta, code_abrev==substr(code_micro, 1, 2))$download_path) }
# download files
sf <- download_gpkg(file_url, progress_bar = showProgress)
if(nchar(code_micro)==2){
return(sf)
} else if(code_micro %in% sf$code_micro){ # Get micro region
x <- code_micro
sf <- subset(sf, code_micro==x)
return(sf)
} else{
stop("Error: Invalid Value to argument code_micro. There was no micro region with this code in this year")
}
}
}
|
## Read data
data <- read.table("household_power_consumption.txt", sep=";", header=T, stringsAsFactors=F, na.strings="?")
data$Date <- as.Date(data$Date, "%d/%m/%Y") ##Convert date from "character" to "Date"
## Create a dataframe by filtering the date 02/01/2007 or 02/02/2007
myData <- subset(data, data$Date == "2007-02-01" | data$Date == "2007-02-02")
## convert global active power data into numeric
myData$Global_active_power <- as.numeric(myData$Global_active_power)
##create histgram plot
png(filename = "plot1.png", width = 480, height = 480)
hist(myData$Global_active_power, col='red', main="Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency")
dev.off()
|
/plot1.R
|
no_license
|
jing-olemiss/ExData_Plotting1
|
R
| false
| false
| 698
|
r
|
## Read data
data <- read.table("household_power_consumption.txt", sep=";", header=T, stringsAsFactors=F, na.strings="?")
data$Date <- as.Date(data$Date, "%d/%m/%Y") ##Convert date from "character" to "Date"
## Create a dataframe by filtering the date 02/01/2007 or 02/02/2007
myData <- subset(data, data$Date == "2007-02-01" | data$Date == "2007-02-02")
## convert global active power data into numeric
myData$Global_active_power <- as.numeric(myData$Global_active_power)
##create histgram plot
png(filename = "plot1.png", width = 480, height = 480)
hist(myData$Global_active_power, col='red', main="Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency")
dev.off()
|
# https://google.github.io/CausalImpact/CausalImpact.html
library(CausalImpact)
setwd("set_directory")
eurusd<-read.csv("eurusd.csv")
gbpusd<-read.csv("gbpusd.csv")
set.seed(1)
x1 <- eurusd$Value
y <- gbpusd$Value
data <- cbind(y, x1)
dim(data)
head(data)
matplot(data, type = "l")
pre.period <- c(1, 230)
post.period <- c(231, 249)
impact <- CausalImpact(data, pre.period, post.period)
plot(impact)
summary(impact)
summary(impact, "report")
impact$summary
|
/CausalImpact_currencies.R
|
no_license
|
dranatom120/economics-time-series
|
R
| false
| false
| 464
|
r
|
# https://google.github.io/CausalImpact/CausalImpact.html
library(CausalImpact)
setwd("set_directory")
eurusd<-read.csv("eurusd.csv")
gbpusd<-read.csv("gbpusd.csv")
set.seed(1)
x1 <- eurusd$Value
y <- gbpusd$Value
data <- cbind(y, x1)
dim(data)
head(data)
matplot(data, type = "l")
pre.period <- c(1, 230)
post.period <- c(231, 249)
impact <- CausalImpact(data, pre.period, post.period)
plot(impact)
summary(impact)
summary(impact, "report")
impact$summary
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.