content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(shiny)
library(plotly)
df <- read.csv("arsondeathsbyyear.csv")
fluidPage(
titlePanel("Arson deaths by selected years"),
sidebarLayout(
sidebarPanel(
sliderInput("years",
"Select a range of years",
min = 2010,
max = 2015,
value = c(2010, 2010),
sep = "")
),
mainPanel(
plotlyOutput("linechart")
)
)
) | /ui.R | no_license | emmargbr/MA415-Final-Project | R | false | false | 444 | r | library(shiny)
library(plotly)
df <- read.csv("arsondeathsbyyear.csv")
fluidPage(
titlePanel("Arson deaths by selected years"),
sidebarLayout(
sidebarPanel(
sliderInput("years",
"Select a range of years",
min = 2010,
max = 2015,
value = c(2010, 2010),
sep = "")
),
mainPanel(
plotlyOutput("linechart")
)
)
) |
Delta<-list(
diagonal=200,
radius=100,
homedHeight=300,
xstop=0,
ystop=0,
zstop=0,
xadj=0,
yadj=0,
zadj=0)
Delta$towerX<-c(-Delta$radius*cos((30+Delta$xadj)*pi/180),Delta$radius*cos((30-Delta$yadj)*pi/180),-Delta$radius*sin((Delta$zadj)*pi/180))
Delta$towerY<-c(-Delta$radius*sin((30+Delta$xadj)*pi/180),-Delta$radius*sin((30-Delta$yadj)*pi/180),Delta$radius*cos((Delta$zadj)*pi/180))
Delta$Xbc<-Delta$towerX[3]-Delta$towerX[2]
Delta$Xca<-Delta$towerX[1]-Delta$towerX[3]
Delta$Xab<-Delta$towerX[2]-Delta$towerX[1]
Delta$Ybc<-Delta$towerY[3]-Delta$towerY[2]
Delta$Yca<-Delta$towerY[1]-Delta$towerY[3]
Delta$Yab<-Delta$towerY[2]-Delta$towerY[1]
Delta$coreFa<-Delta$towerX[1]^2 + Delta$towerY[1]^2
Delta$coreFb<-Delta$towerX[2]^2 + Delta$towerY[2]^2
Delta$coreFc<-Delta$towerX[3]^2 + Delta$towerY[3]^2
Delta$Q<-2*(Delta$Xca*Delta$Yab-Delta$Xab*Delta$Yca)
Delta$homedCarriageHeight<-Delta$homedHeight+Delta$diagonal-inverseTransform(Delta$diagonal,Delta$diagonal,Delta$diagonal)
inverseTransform<-function(Ha,Hb,Hc) {
Fa<-Delta$coreFa + Ha^2
Fb<-Delta$coreFb + Hb^2
Fc<-Delta$coreFc + Hc^2
P<-Delta$Xbc*Fa + Delta$Xca*Fb + Delta$Xab*Fc
S<-Delta$Ybc*Fa + Delta$Yca*Fb + Delta$Yab*Fc
R<-2*(Delta$Xbc*Ha + Delta$Xca*Hb + Delta$Xab*Hc)
U<-2*(Delta$Ybc*Ha + Delta$Yca*Hb + Delta$Yab*Hc)
A<- U^2 + R^2 + Delta$Q^2
minusHalfB<- S*U + P*R + Ha*Delta$Q^2 + Delta$towerX[1]*U*Delta$Q-Delta$towerY[1]*R*Delta$Q
C<-(S + Delta$towerX[1]*Delta$Q)^2 + (P-Delta$towerY[1]*Delta$Q)^2 + (Ha^2-Delta$diagonal^2)*Delta$Q^2
result<-(minusHalfB-sqrt(minusHalfB^2-A*C))/A
return(result)
}
#########################################################################################
##
## Perform calibration of a linear delta robot
##
#########################################################################################
dDiagonalRod<-200 ## Diagonal rod 200 mm
dRadius<-100 ## Projected radius downward from diagonal rod
towerXAngle<-210 ## Degrres of towerX
towerYAngle<-330 ## Degrres of tower
towerZAngle<-90 ## Degrres of tower
cartesianToDelta<-function(X,Y,Z) {
dX<-sqrt(dDiagonalRod^2 - (dTower1x-X)^2 - (dTower1y-Y)^2) + Z
dY<-sqrt(dDiagonalRod^2 - (dTower2x-X)^2 - (dTower2y-Y)^2) + Z
dZ<-sqrt(dDiagonalRod^2 - (dTower3x-X)^2 - (dTower3y-Y)^2) + Z
return(c(dX,dY,dZ))
}
deltaToCartesian<-function(dX,tower) {
dY<-dX[2]
dZ<-dX[3]
dX<-dX[1]
Fa<-tower[1]^2+tower[2]^2+dX^2
Fb<-tower[3]^2+tower[4]^2+dY^2
Fc<-tower[5]^2+tower[6]^2+dZ^2
P<-(tower[5]-tower[3])*Fa + (tower[1]-tower[5])*Fb + (tower[3]-tower[1])*Fc
S<-(tower[6]-tower[4])*Fa + (tower[2]-tower[6])*Fb + (tower[4]-tower[2])*Fc
R<-2*((tower[5]-tower[3])*dX + (tower[1]-tower[5])*dY + (tower[3]-tower[1])*dZ)
U<-2*((tower[6]-tower[4])*dX + (tower[2]-tower[6])*dY + (tower[4]-tower[2])*dZ)
Q<-2*((tower[1]-tower[3])*(tower[4]-tower[2])-(tower[3]-tower[1])*(tower[2]-tower[6]))
A<- U^2 + R^2 + Q^2
minusHalfB<- S*U + P*R + dX*Q^2 + tower[1]*U*Q-tower[2]*R*Q
C<-(S + tower[1]*Q)^2 + (P-tower[2]*Q)^2 + (dX^2-dDiagonalRod^2)*Q^2
result<-(minusHalfB-sqrt(minusHalfB^2-A*C))/A
return(result)
}
corr.A<-1
corr.B<-10
end.A<-5
end.B<-0
end.C<--3
corr.radius<-10
slope.x<-0
slope.y<-0
error<-matrix(0,nrow=20,ncol=3)
for(i in 1:nrow(error)) {
error[i,]<-c(runif(1,-50,50),runif(1,-50,50),0)
error[i,3]<-error[i,1]*0.1+error[i,2]*0.1
}
.calc<-function(values) {
out<-numeric(nrow(error))
corr.A<-values[1]
corr.B<-values[2]
end.A<-values[3]
end.B<-values[4]
end.C<-values[5]
corr.radius<-values[6]
slope.x<-values[7]
slope.y<-values[8]
dTower1x<-(cos((towerXAngle)*pi/180)*(dRadius))
dTower1y<-(sin((towerXAngle)*pi/180)*(dRadius))
dTower2x<-(cos((towerYAngle)*pi/180)*(dRadius))
dTower2y<-(sin((towerYAngle)*pi/180)*(dRadius))
dTower3x<-(cos(towerZAngle*pi/180)*(dRadius))
dTower3y<-(sin(towerZAngle*pi/180)*(dRadius))
dTower1x2<-(cos((towerXAngle+corr.A)*pi/180)*(dRadius+corr.radius))
dTower1y2<-(sin((towerXAngle+corr.A)*pi/180)*(dRadius+corr.radius))
dTower2x2<-(cos((towerYAngle+corr.B)*pi/180)*(dRadius+corr.radius))
dTower2y2<-(sin((towerYAngle+corr.B)*pi/180)*(dRadius+corr.radius))
dTower3x2<-(cos(towerZAngle*pi/180)*(dRadius+corr.radius))
dTower3y2<-(sin(towerZAngle*pi/180)*(dRadius+corr.radius))
for(i in 1:nrow(error)) {
dX<-sqrt(dDiagonalRod^2 - (dTower1x-error[i,1])^2 - (dTower1y-error[i,2])^2)+end.A
dY<-sqrt(dDiagonalRod^2 - (dTower2x-error[i,1])^2 - (dTower2y-error[i,2])^2)+end.B
dZ<-sqrt(dDiagonalRod^2 - (dTower3x-error[i,1])^2 - (dTower3y-error[i,2])^2)+end.C
out[i]<-(error[i,3]+error[i,1]*slope.x+error[i,2]*slope.y-deltaToCartesian(c(dX,dY,dZ),tower=c(dTower1x2,dTower1y2,dTower2x2,dTower2y2,dTower3x2,dTower3y2)))^2
}
return(sum(out))
}
calculation<-optim(par=list(corr.A,corr.B,end.A,end.B,end.C,corr.radius,slope.x,slope.y),.calc,method="BFGS")
calculation
| /Delta.Calibration.R | no_license | CoreyGiles/deltaKinematics | R | false | false | 4,972 | r | Delta<-list(
diagonal=200,
radius=100,
homedHeight=300,
xstop=0,
ystop=0,
zstop=0,
xadj=0,
yadj=0,
zadj=0)
Delta$towerX<-c(-Delta$radius*cos((30+Delta$xadj)*pi/180),Delta$radius*cos((30-Delta$yadj)*pi/180),-Delta$radius*sin((Delta$zadj)*pi/180))
Delta$towerY<-c(-Delta$radius*sin((30+Delta$xadj)*pi/180),-Delta$radius*sin((30-Delta$yadj)*pi/180),Delta$radius*cos((Delta$zadj)*pi/180))
Delta$Xbc<-Delta$towerX[3]-Delta$towerX[2]
Delta$Xca<-Delta$towerX[1]-Delta$towerX[3]
Delta$Xab<-Delta$towerX[2]-Delta$towerX[1]
Delta$Ybc<-Delta$towerY[3]-Delta$towerY[2]
Delta$Yca<-Delta$towerY[1]-Delta$towerY[3]
Delta$Yab<-Delta$towerY[2]-Delta$towerY[1]
Delta$coreFa<-Delta$towerX[1]^2 + Delta$towerY[1]^2
Delta$coreFb<-Delta$towerX[2]^2 + Delta$towerY[2]^2
Delta$coreFc<-Delta$towerX[3]^2 + Delta$towerY[3]^2
Delta$Q<-2*(Delta$Xca*Delta$Yab-Delta$Xab*Delta$Yca)
Delta$homedCarriageHeight<-Delta$homedHeight+Delta$diagonal-inverseTransform(Delta$diagonal,Delta$diagonal,Delta$diagonal)
inverseTransform<-function(Ha,Hb,Hc) {
Fa<-Delta$coreFa + Ha^2
Fb<-Delta$coreFb + Hb^2
Fc<-Delta$coreFc + Hc^2
P<-Delta$Xbc*Fa + Delta$Xca*Fb + Delta$Xab*Fc
S<-Delta$Ybc*Fa + Delta$Yca*Fb + Delta$Yab*Fc
R<-2*(Delta$Xbc*Ha + Delta$Xca*Hb + Delta$Xab*Hc)
U<-2*(Delta$Ybc*Ha + Delta$Yca*Hb + Delta$Yab*Hc)
A<- U^2 + R^2 + Delta$Q^2
minusHalfB<- S*U + P*R + Ha*Delta$Q^2 + Delta$towerX[1]*U*Delta$Q-Delta$towerY[1]*R*Delta$Q
C<-(S + Delta$towerX[1]*Delta$Q)^2 + (P-Delta$towerY[1]*Delta$Q)^2 + (Ha^2-Delta$diagonal^2)*Delta$Q^2
result<-(minusHalfB-sqrt(minusHalfB^2-A*C))/A
return(result)
}
#########################################################################################
##
## Perform calibration of a linear delta robot
##
#########################################################################################
dDiagonalRod<-200 ## Diagonal rod 200 mm
dRadius<-100 ## Projected radius downward from diagonal rod
towerXAngle<-210 ## Degrres of towerX
towerYAngle<-330 ## Degrres of tower
towerZAngle<-90 ## Degrres of tower
cartesianToDelta<-function(X,Y,Z) {
dX<-sqrt(dDiagonalRod^2 - (dTower1x-X)^2 - (dTower1y-Y)^2) + Z
dY<-sqrt(dDiagonalRod^2 - (dTower2x-X)^2 - (dTower2y-Y)^2) + Z
dZ<-sqrt(dDiagonalRod^2 - (dTower3x-X)^2 - (dTower3y-Y)^2) + Z
return(c(dX,dY,dZ))
}
deltaToCartesian<-function(dX,tower) {
dY<-dX[2]
dZ<-dX[3]
dX<-dX[1]
Fa<-tower[1]^2+tower[2]^2+dX^2
Fb<-tower[3]^2+tower[4]^2+dY^2
Fc<-tower[5]^2+tower[6]^2+dZ^2
P<-(tower[5]-tower[3])*Fa + (tower[1]-tower[5])*Fb + (tower[3]-tower[1])*Fc
S<-(tower[6]-tower[4])*Fa + (tower[2]-tower[6])*Fb + (tower[4]-tower[2])*Fc
R<-2*((tower[5]-tower[3])*dX + (tower[1]-tower[5])*dY + (tower[3]-tower[1])*dZ)
U<-2*((tower[6]-tower[4])*dX + (tower[2]-tower[6])*dY + (tower[4]-tower[2])*dZ)
Q<-2*((tower[1]-tower[3])*(tower[4]-tower[2])-(tower[3]-tower[1])*(tower[2]-tower[6]))
A<- U^2 + R^2 + Q^2
minusHalfB<- S*U + P*R + dX*Q^2 + tower[1]*U*Q-tower[2]*R*Q
C<-(S + tower[1]*Q)^2 + (P-tower[2]*Q)^2 + (dX^2-dDiagonalRod^2)*Q^2
result<-(minusHalfB-sqrt(minusHalfB^2-A*C))/A
return(result)
}
corr.A<-1
corr.B<-10
end.A<-5
end.B<-0
end.C<--3
corr.radius<-10
slope.x<-0
slope.y<-0
error<-matrix(0,nrow=20,ncol=3)
for(i in 1:nrow(error)) {
error[i,]<-c(runif(1,-50,50),runif(1,-50,50),0)
error[i,3]<-error[i,1]*0.1+error[i,2]*0.1
}
.calc<-function(values) {
out<-numeric(nrow(error))
corr.A<-values[1]
corr.B<-values[2]
end.A<-values[3]
end.B<-values[4]
end.C<-values[5]
corr.radius<-values[6]
slope.x<-values[7]
slope.y<-values[8]
dTower1x<-(cos((towerXAngle)*pi/180)*(dRadius))
dTower1y<-(sin((towerXAngle)*pi/180)*(dRadius))
dTower2x<-(cos((towerYAngle)*pi/180)*(dRadius))
dTower2y<-(sin((towerYAngle)*pi/180)*(dRadius))
dTower3x<-(cos(towerZAngle*pi/180)*(dRadius))
dTower3y<-(sin(towerZAngle*pi/180)*(dRadius))
dTower1x2<-(cos((towerXAngle+corr.A)*pi/180)*(dRadius+corr.radius))
dTower1y2<-(sin((towerXAngle+corr.A)*pi/180)*(dRadius+corr.radius))
dTower2x2<-(cos((towerYAngle+corr.B)*pi/180)*(dRadius+corr.radius))
dTower2y2<-(sin((towerYAngle+corr.B)*pi/180)*(dRadius+corr.radius))
dTower3x2<-(cos(towerZAngle*pi/180)*(dRadius+corr.radius))
dTower3y2<-(sin(towerZAngle*pi/180)*(dRadius+corr.radius))
for(i in 1:nrow(error)) {
dX<-sqrt(dDiagonalRod^2 - (dTower1x-error[i,1])^2 - (dTower1y-error[i,2])^2)+end.A
dY<-sqrt(dDiagonalRod^2 - (dTower2x-error[i,1])^2 - (dTower2y-error[i,2])^2)+end.B
dZ<-sqrt(dDiagonalRod^2 - (dTower3x-error[i,1])^2 - (dTower3y-error[i,2])^2)+end.C
out[i]<-(error[i,3]+error[i,1]*slope.x+error[i,2]*slope.y-deltaToCartesian(c(dX,dY,dZ),tower=c(dTower1x2,dTower1y2,dTower2x2,dTower2y2,dTower3x2,dTower3y2)))^2
}
return(sum(out))
}
calculation<-optim(par=list(corr.A,corr.B,end.A,end.B,end.C,corr.radius,slope.x,slope.y),.calc,method="BFGS")
calculation
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78941295492229e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615765358-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78941295492229e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_equations.R
\name{generate_equations}
\alias{generate_equations}
\title{Turn a model into a set of differential equations displayed as LaTeX/HTML object}
\usage{
generate_equations(mbmodel)
}
\arguments{
\item{mbmodel}{modelbuilder model structure, either as list object or file name}
}
\value{
The function returns equations as an html object
}
\description{
The model needs to adhere to the structure specified by the modelbuilder package
models built using the modelbuilder package automatically have the right structure
a user can also build a model list structure themselves following the specifications
if the user provides a file name, this file needs to contain an object called 'model'
and contain a valid modelbuilder model structure
}
\details{
This function takes as input a model and produces output that displays ODE equations
}
\author{
Andreas Handel
}
| /man/generate_equations.Rd | no_license | ahgroup/modelbuilder | R | false | true | 956 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_equations.R
\name{generate_equations}
\alias{generate_equations}
\title{Turn a model into a set of differential equations displayed as LaTeX/HTML object}
\usage{
generate_equations(mbmodel)
}
\arguments{
\item{mbmodel}{modelbuilder model structure, either as list object or file name}
}
\value{
The function returns equations as an html object
}
\description{
The model needs to adhere to the structure specified by the modelbuilder package
models built using the modelbuilder package automatically have the right structure
a user can also build a model list structure themselves following the specifications
if the user provides a file name, this file needs to contain an object called 'model'
and contain a valid modelbuilder model structure
}
\details{
This function takes as input a model and produces output that displays ODE equations
}
\author{
Andreas Handel
}
|
## Put comments here that give an overall description of what your
## functions do
# The following functions are useful to cache the inverse of a matrix.
install.packages("swirl")
## Write a short comment describing this function
# this function set the value of the matrix
# get the value of the matrix
# set the value of inverse of the matrix
# get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Write a short comment describing this function
#The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | ganeshnbs07/ProgrammingAssignment2 | R | false | false | 1,155 | r | ## Put comments here that give an overall description of what your
## functions do
# The following functions are useful to cache the inverse of a matrix.
install.packages("swirl")
## Write a short comment describing this function
# this function set the value of the matrix
# get the value of the matrix
# set the value of inverse of the matrix
# get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Write a short comment describing this function
#The following function returns the inverse of the matrix. It first checks if
# the inverse has already been computed. If so, it gets the result and skips the
# computation. If not, it computes the inverse, sets the value in the cache via
# setinverse function.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
data <- read.csv("datasets/world-happiness-report-2017.csv")
str(data)
summary(data)
set.seed(10)
source('Utility.r')
#Uklanjanje NA vrednosti
which(is.na(data))
#Uklanjanje NA vrednosti Health.Life.Expectancy
shapiro.test(data$Health.Life.Expectancy)
#Nema normalnu distribuciju
data$Health.Life.Expectancy[is.na(data$Health.Life.Expectancy)] <- median(data$Health.Life.Expectancy,na.rm=T)
#Uklanjanje NA vrednosti Generosity
shapiro.test(data$Generosity)
#Nema normalnu distribuciju
data$Generosity[is.na(data$Generosity)] <- median(data$Generosity,na.rm = T)
#Ponovna provera NA vrednosti
which(is.na(data))
#Uklanjanje autlajera
apply(data[,-1],2,function(x) boxplot.stats(x)$out)
#Family
boxplot.stats(data$Family)$out
quantilesFamily <- as.vector(quantile(data$Family,probs=seq(from=0,to=0.1,by=0.025)))
limitFamily <- quantilesFamily[2]
data$Family[data$Family<limitFamily] <- limitFamily
#Trust Goverment
boxplot.stats(data$Trust.Government.Corruption)$out
quantilesGov <- as.vector(quantile(data$Trust.Government.Corruption,probs=seq(from=0.9,to=1,by=0.025)))
quantilesGov
limitGov <- quantilesGov[4]
data$Trust.Government.Corruption[data$Trust.Government.Corruption>limitGov] <- limitGov
#Normalizacija
normalized <- as.data.frame(apply(data[,-1],2,function(x) (x-min(x))/(max(x)-min(x))))
summary(normalized)
#Odredjivanje optimalnog K
side <- data.frame()
for(k in 2:8) {
model <- kmeans(normalized,k,iter.max = 20,nstart = 1000)
side <- rbind(side,c(k,model$tot.withinss,model$betweenss/model$totss))
}
names(side)<-c('K','TotalWithinSS','Ratio')
side
compute.difference(side$TotalWithinSS)
library(ggplot2)
ggplot(side,mapping = aes(x=K,y=TotalWithinSS))+geom_line()
optimalK <- 3
#Pravljenje modela
model <- kmeans(normalized,optimalK,iter.max = 20,nstart = 1000)
model
#Komentarisanje modela
#Tri klastera - velicina 59, 31 i 65
#Tot SS - zbir razdaljina izmedju srednje vrednosti celog seta, i svake pojedinacne obzervacije
#Between SS - zbir razdaljina izmedju srednje vrednosti celog seta, i svakog centra klastera
#Within SS - zbir razdaljina izmedju centra klastera, i svake pojedinacne obzervacije u tom klasteru
summary.stats(normalized,model$cluster,3)
| /Algorithms/Kmeans-WorldHappiness.R | no_license | djokole/ML-R | R | false | false | 2,191 | r | data <- read.csv("datasets/world-happiness-report-2017.csv")
str(data)
summary(data)
set.seed(10)
source('Utility.r')
#Uklanjanje NA vrednosti
which(is.na(data))
#Uklanjanje NA vrednosti Health.Life.Expectancy
shapiro.test(data$Health.Life.Expectancy)
#Nema normalnu distribuciju
data$Health.Life.Expectancy[is.na(data$Health.Life.Expectancy)] <- median(data$Health.Life.Expectancy,na.rm=T)
#Uklanjanje NA vrednosti Generosity
shapiro.test(data$Generosity)
#Nema normalnu distribuciju
data$Generosity[is.na(data$Generosity)] <- median(data$Generosity,na.rm = T)
#Ponovna provera NA vrednosti
which(is.na(data))
#Uklanjanje autlajera
apply(data[,-1],2,function(x) boxplot.stats(x)$out)
#Family
boxplot.stats(data$Family)$out
quantilesFamily <- as.vector(quantile(data$Family,probs=seq(from=0,to=0.1,by=0.025)))
limitFamily <- quantilesFamily[2]
data$Family[data$Family<limitFamily] <- limitFamily
#Trust Goverment
boxplot.stats(data$Trust.Government.Corruption)$out
quantilesGov <- as.vector(quantile(data$Trust.Government.Corruption,probs=seq(from=0.9,to=1,by=0.025)))
quantilesGov
limitGov <- quantilesGov[4]
data$Trust.Government.Corruption[data$Trust.Government.Corruption>limitGov] <- limitGov
#Normalizacija
normalized <- as.data.frame(apply(data[,-1],2,function(x) (x-min(x))/(max(x)-min(x))))
summary(normalized)
#Odredjivanje optimalnog K
side <- data.frame()
for(k in 2:8) {
model <- kmeans(normalized,k,iter.max = 20,nstart = 1000)
side <- rbind(side,c(k,model$tot.withinss,model$betweenss/model$totss))
}
names(side)<-c('K','TotalWithinSS','Ratio')
side
compute.difference(side$TotalWithinSS)
library(ggplot2)
ggplot(side,mapping = aes(x=K,y=TotalWithinSS))+geom_line()
optimalK <- 3
#Pravljenje modela
model <- kmeans(normalized,optimalK,iter.max = 20,nstart = 1000)
model
#Komentarisanje modela
#Tri klastera - velicina 59, 31 i 65
#Tot SS - zbir razdaljina izmedju srednje vrednosti celog seta, i svake pojedinacne obzervacije
#Between SS - zbir razdaljina izmedju srednje vrednosti celog seta, i svakog centra klastera
#Within SS - zbir razdaljina izmedju centra klastera, i svake pojedinacne obzervacije u tom klasteru
summary.stats(normalized,model$cluster,3)
|
## ----warning=FALSE, message=FALSE---------------------------------------------
library(choroplethr)
library(choroplethrMaps)
library(R6)
# Create the class, inheriting from the base Choropleth object
CountryChoropleth = R6Class("CountryChoropleth",
inherit = choroplethr::Choropleth,
public = list(
# Initialize with a world map
initialize = function(user.df)
{
data(country.map, package="choroplethrMaps")
super$initialize(country.map, user.df)
}
)
)
# Create some sample data and then render it
data(country.regions, package="choroplethrMaps")
df = data.frame(region=country.regions$region, value=sample(1:nrow(country.regions)))
c = CountryChoropleth$new(df)
c$render()
## -----------------------------------------------------------------------------
country_choropleth
| /inst/doc/f-creating-your-own-maps.R | no_license | cran/choroplethr | R | false | false | 819 | r | ## ----warning=FALSE, message=FALSE---------------------------------------------
library(choroplethr)
library(choroplethrMaps)
library(R6)
# Create the class, inheriting from the base Choropleth object
CountryChoropleth = R6Class("CountryChoropleth",
inherit = choroplethr::Choropleth,
public = list(
# Initialize with a world map
initialize = function(user.df)
{
data(country.map, package="choroplethrMaps")
super$initialize(country.map, user.df)
}
)
)
# Create some sample data and then render it
data(country.regions, package="choroplethrMaps")
df = data.frame(region=country.regions$region, value=sample(1:nrow(country.regions)))
c = CountryChoropleth$new(df)
c$render()
## -----------------------------------------------------------------------------
country_choropleth
|
# This script estimates model. The moduels follow as:
# 1. Segment households based on initial income level and subset data;
# 2. Prepare estimation data;
# 3. Estimate the conditional allocation model at different initial values;
# 4. Simulate inclusive values;
# 5. Estimate upper level expenditure model.
cat("This program begins to run at", as.character(Sys.time()), ".\n")
library(ggplot2)
library(reshape2)
library(Rcpp)
library(RcppArmadillo)
library(maxLik)
library(evd)
library(data.table)
library(doParallel)
library(foreach)
library(plm)
library(chebpol)
library(nloptr)
library(mgcv)
options(error = quote({dump.frames(to.file = TRUE)}))
args <- commandArgs(trailingOnly = TRUE)
print(args)
if(length(args)>0){
for(i in 1:length(args)){
eval(parse(text=args[[i]]))
}
}
# setwd("~/Documents/Research/Store switching/processed data")
# plot.wd <- '~/Desktop'
# sourceCpp(paste("../Exercise/Multiple_discrete_continuous_model/", model_name, ".cpp", sep=""))
# source("../Exercise/Multiple_discrete_continuous_model/0_Allocation_function.R")
setwd("/home/brgordon/ccv103/Exercise/run")
# setwd("/kellogg/users/marketing/2661703/Exercise/run")
# setwd("/sscc/home/c/ccv103/Exercise/run")
model_name <- "MDCEV_share"
run_id <- 3
# seg_id <- 1
make_plot <- TRUE
plot.wd <- paste(getwd(), "/estrun_",run_id, sep="")
ww <- 10
ar <- .6
ver.date <- "2016-02-16"
load(paste("estrun_",run_id,"/MDCEV_est_seg",seg_id,"_", ver.date,".rdata",sep=""))
sourceCpp(paste(model_name, ".cpp", sep=""))
source("0_Allocation_function.R")
source("ctrfact_sim_functions_v2.r")
rm(list = c("gamfit","model_name", "tmpdat"))
#######################
# Robust otpimization #
#######################
MDCEV_wrapper <- function(param){
MDCEV_ll_fnC(param, nx, shr, y, s1_index, price, X_list, beta0_base)$ll
}
# Continue maximization with SANN, using the best coef as starting values
pct <- proc.time()
sol.sann <- maxLik(MDCEV_wrapper, start=coef(sol), method="SANN", fixed=myfix)
use.time <- proc.time() - pct
cat("The estimation with SANN finishes with", use.time[3]/60, "min.\n")
print(summary(sol.sann)); cat("\n")
cat("The difference of coefficient estiamtes between sol and SANN =", max(abs(coef(sol) - coef(sol.sann))), ".\n")
# Check log likelihood concavity around the estimates
FindOrder <-function(x){
sig.x <- format(signif(abs(x), 0), scientific = FALSE)
if(sig.x < 1){
x1 <- gsub("(.*)(\\.)|([0]*$)","", sig.x)
d <- -nchar(x1)
}else{
d <- nchar(as.character(trunc(as.numeric(sig.x)))) - 1
}
return(sign(x)*10^d)
}
my.coef <- coef(sol.sann)
ggtmp <- data.frame()
my.grid <- -10:10
if(length(myfix) > 0){
coef.grid <- sapply(my.coef[-myfix], function(x) FindOrder(x) * my.grid) + rep(1, length(my.grid)) %*% t(my.coef[-myfix])
}else{
coef.grid <- sapply(my.coef, function(x) FindOrder(x) * my.grid) + rep(1, length(my.grid)) %*% t(my.coef)
}
for(i in 1:ncol(coef.grid)){
for(j in 1:nrow(coef.grid)){
tmp <- my.coef
tmp[colnames(coef.grid)[i]] <- coef.grid[j,i]
ll <- sum(MDCEV_wrapper(tmp) )
ggtmp <- rbind(ggtmp, data.frame(Coef = colnames(coef.grid)[i], value = coef.grid[j,i], ll = ll))
}
}
ggtmp$est <- my.coef[as.character(ggtmp$Coef)]
ggtmp$est.ind <- ifelse(ggtmp$value == ggtmp$est, 1, 0)
if(make_plot){
pdf(paste(plot.wd, "/graph_init_ll_seg", seg_id, ".pdf",sep=""), width = 8, height = 8)
print(ggplot(ggtmp, aes(value, ll)) + geom_point(aes(color = factor(est.ind))) +
geom_line() +
scale_color_manual(values = c("black", "red")) +
facet_wrap(~Coef, scales = "free") +
guides(color = FALSE)
)
dev.off()
}
# Save results
rm(list=c("hh_exp", "Allocation_constr_fn","Allocation_fn","Allocation_nlop_fn","incl_value_fn","i","MDCEV_ll_fnC",
"MDCEV_LogLike_fnC","MDCEV_wrapper","tmp","tmp1","tmp2","param_assign",
"use.time", "pct", "uP_fn","uPGrad_fn", "make_plot", "ord","panelist","tmpidx","tmpn","cl",
"tmp_sol1", "GMM_fn", "M_fn", "init", "mycore", "param_assignR", "ggtmp", "ggtmp1",
"mysplfun", "mytrimfun", "expFOC_fn", "exp_fn", "solveExp_fn",
"simExp_fn", "SimWrapper_fn", "SimOmega_fn", "cheb.1d.basis", "cheb.basis", "chebfun", "omega_parallel",
"lastFuncGrad", "lastFuncParam", "args"))
save.image(paste("estrun_",run_id,"/MDCEV_bottom_test_seg",seg_id, "_", Sys.Date(),".rdata",sep=""))
cat("This program is done. ")
| /to be deleted/share_allocation/Estimation test/2_mdcev_share_bot_findinits.r | no_license | Superet/Expenditure | R | false | false | 4,355 | r | # This script estimates model. The moduels follow as:
# 1. Segment households based on initial income level and subset data;
# 2. Prepare estimation data;
# 3. Estimate the conditional allocation model at different initial values;
# 4. Simulate inclusive values;
# 5. Estimate upper level expenditure model.
cat("This program begins to run at", as.character(Sys.time()), ".\n")
library(ggplot2)
library(reshape2)
library(Rcpp)
library(RcppArmadillo)
library(maxLik)
library(evd)
library(data.table)
library(doParallel)
library(foreach)
library(plm)
library(chebpol)
library(nloptr)
library(mgcv)
options(error = quote({dump.frames(to.file = TRUE)}))
args <- commandArgs(trailingOnly = TRUE)
print(args)
if(length(args)>0){
for(i in 1:length(args)){
eval(parse(text=args[[i]]))
}
}
# setwd("~/Documents/Research/Store switching/processed data")
# plot.wd <- '~/Desktop'
# sourceCpp(paste("../Exercise/Multiple_discrete_continuous_model/", model_name, ".cpp", sep=""))
# source("../Exercise/Multiple_discrete_continuous_model/0_Allocation_function.R")
setwd("/home/brgordon/ccv103/Exercise/run")
# setwd("/kellogg/users/marketing/2661703/Exercise/run")
# setwd("/sscc/home/c/ccv103/Exercise/run")
model_name <- "MDCEV_share"
run_id <- 3
# seg_id <- 1
make_plot <- TRUE
plot.wd <- paste(getwd(), "/estrun_",run_id, sep="")
ww <- 10
ar <- .6
ver.date <- "2016-02-16"
load(paste("estrun_",run_id,"/MDCEV_est_seg",seg_id,"_", ver.date,".rdata",sep=""))
sourceCpp(paste(model_name, ".cpp", sep=""))
source("0_Allocation_function.R")
source("ctrfact_sim_functions_v2.r")
rm(list = c("gamfit","model_name", "tmpdat"))
#######################
# Robust otpimization #
#######################
MDCEV_wrapper <- function(param){
MDCEV_ll_fnC(param, nx, shr, y, s1_index, price, X_list, beta0_base)$ll
}
# Continue maximization with SANN, using the best coef as starting values
pct <- proc.time()
sol.sann <- maxLik(MDCEV_wrapper, start=coef(sol), method="SANN", fixed=myfix)
use.time <- proc.time() - pct
cat("The estimation with SANN finishes with", use.time[3]/60, "min.\n")
print(summary(sol.sann)); cat("\n")
cat("The difference of coefficient estiamtes between sol and SANN =", max(abs(coef(sol) - coef(sol.sann))), ".\n")
# Check log likelihood concavity around the estimates
FindOrder <-function(x){
sig.x <- format(signif(abs(x), 0), scientific = FALSE)
if(sig.x < 1){
x1 <- gsub("(.*)(\\.)|([0]*$)","", sig.x)
d <- -nchar(x1)
}else{
d <- nchar(as.character(trunc(as.numeric(sig.x)))) - 1
}
return(sign(x)*10^d)
}
my.coef <- coef(sol.sann)
ggtmp <- data.frame()
my.grid <- -10:10
if(length(myfix) > 0){
coef.grid <- sapply(my.coef[-myfix], function(x) FindOrder(x) * my.grid) + rep(1, length(my.grid)) %*% t(my.coef[-myfix])
}else{
coef.grid <- sapply(my.coef, function(x) FindOrder(x) * my.grid) + rep(1, length(my.grid)) %*% t(my.coef)
}
for(i in 1:ncol(coef.grid)){
for(j in 1:nrow(coef.grid)){
tmp <- my.coef
tmp[colnames(coef.grid)[i]] <- coef.grid[j,i]
ll <- sum(MDCEV_wrapper(tmp) )
ggtmp <- rbind(ggtmp, data.frame(Coef = colnames(coef.grid)[i], value = coef.grid[j,i], ll = ll))
}
}
ggtmp$est <- my.coef[as.character(ggtmp$Coef)]
ggtmp$est.ind <- ifelse(ggtmp$value == ggtmp$est, 1, 0)
if(make_plot){
pdf(paste(plot.wd, "/graph_init_ll_seg", seg_id, ".pdf",sep=""), width = 8, height = 8)
print(ggplot(ggtmp, aes(value, ll)) + geom_point(aes(color = factor(est.ind))) +
geom_line() +
scale_color_manual(values = c("black", "red")) +
facet_wrap(~Coef, scales = "free") +
guides(color = FALSE)
)
dev.off()
}
# Save results
rm(list=c("hh_exp", "Allocation_constr_fn","Allocation_fn","Allocation_nlop_fn","incl_value_fn","i","MDCEV_ll_fnC",
"MDCEV_LogLike_fnC","MDCEV_wrapper","tmp","tmp1","tmp2","param_assign",
"use.time", "pct", "uP_fn","uPGrad_fn", "make_plot", "ord","panelist","tmpidx","tmpn","cl",
"tmp_sol1", "GMM_fn", "M_fn", "init", "mycore", "param_assignR", "ggtmp", "ggtmp1",
"mysplfun", "mytrimfun", "expFOC_fn", "exp_fn", "solveExp_fn",
"simExp_fn", "SimWrapper_fn", "SimOmega_fn", "cheb.1d.basis", "cheb.basis", "chebfun", "omega_parallel",
"lastFuncGrad", "lastFuncParam", "args"))
save.image(paste("estrun_",run_id,"/MDCEV_bottom_test_seg",seg_id, "_", Sys.Date(),".rdata",sep=""))
cat("This program is done. ")
|
data<-read.csv('bankdata.csv')
shinyUI(pageWithSidebar(
headerPanel('Clustering Age/Income/Number Of Children'),
sidebarPanel(
selectInput('xcol', 'X Variable', names(data)),
selectInput('ycol', 'Y Variable', names(data),
selected=names(data)[[2]]),
numericInput('clusters', 'Cluster count', 3,
min = 1, max = 9),
h5("Data Reference:"),
tags$a("WEKA",
href="http://facweb.cs.depaul.edu/mobasher/classes/ect584/WEKA/k-means.html")
),
mainPanel(
plotOutput('plot1')
)
))
| /ui.R | no_license | briantcs/developingdataproduct | R | false | false | 557 | r | data<-read.csv('bankdata.csv')
shinyUI(pageWithSidebar(
headerPanel('Clustering Age/Income/Number Of Children'),
sidebarPanel(
selectInput('xcol', 'X Variable', names(data)),
selectInput('ycol', 'Y Variable', names(data),
selected=names(data)[[2]]),
numericInput('clusters', 'Cluster count', 3,
min = 1, max = 9),
h5("Data Reference:"),
tags$a("WEKA",
href="http://facweb.cs.depaul.edu/mobasher/classes/ect584/WEKA/k-means.html")
),
mainPanel(
plotOutput('plot1')
)
))
|
library(seqMeta)
### Name: burdenMeta
### Title: Meta analyze burden tests from multiple studies
### Aliases: burdenMeta
### ** Examples
###load example data for two studies:
### see ?seqMetaExample
data(seqMetaExample)
####run on each cohort:
cohort1 <- prepScores(Z=Z1, y~1, SNPInfo=SNPInfo, data=pheno1)
cohort2 <- prepScores(Z=Z2, y~1, SNPInfo=SNPInfo, data=pheno2)
#### combine results:
out <- burdenMeta(cohort1, cohort2, SNPInfo = SNPInfo, mafRange=c(0,.01))
head(out)
## Not run:
##D ##### Compare with analysis on full data set:
##D bigZ <- matrix(NA,2*n,nrow(SNPInfo))
##D colnames(bigZ) <- SNPInfo$Name
##D
##D for(gene in unique(SNPInfo$gene)) {
##D snp.names <- SNPInfo$Name[SNPInfo$gene == gene]
##D bigZ[1:n,SNPInfo$gene == gene][ , snp.names %in% colnames(Z1)] <-
##D Z1[ , na.omit(match(snp.names,colnames(Z1)))]
##D bigZ[(n+1):(2*n),SNPInfo$gene == gene][ , snp.names %in% colnames(Z2)] <-
##D Z2[ , na.omit(match(snp.names,colnames(Z2)))]
##D }
##D
##D pheno <- rbind(pheno1[, c("y","sex","bmi")],pheno2[,c("y","sex","bmi")])
##D burden.p <- c(by(SNPInfo$Name, SNPInfo$gene, function(snp.names) {
##D inds <- match(snp.names,colnames(bigZ)) burden <- rowSums(bigZ[,na.omit(inds)],na.rm=TRUE)
##D mod <- lm(y~burden + gl(2,nrow(pheno1)),data=pheno)
##D summary(mod)$coef[2,4]
##D }))
##D
##D head(cbind(out$p,burden.p))
##D
##D #will be slightly different:
##D plot(y=out$p,x=burden.p, ylab = "burden meta p-values", xlab = "complete data p-values")
## End(Not run)
| /data/genthat_extracted_code/seqMeta/examples/burdenMeta.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,563 | r | library(seqMeta)
### Name: burdenMeta
### Title: Meta analyze burden tests from multiple studies
### Aliases: burdenMeta
### ** Examples
###load example data for two studies:
### see ?seqMetaExample
data(seqMetaExample)
####run on each cohort:
cohort1 <- prepScores(Z=Z1, y~1, SNPInfo=SNPInfo, data=pheno1)
cohort2 <- prepScores(Z=Z2, y~1, SNPInfo=SNPInfo, data=pheno2)
#### combine results:
out <- burdenMeta(cohort1, cohort2, SNPInfo = SNPInfo, mafRange=c(0,.01))
head(out)
## Not run:
##D ##### Compare with analysis on full data set:
##D bigZ <- matrix(NA,2*n,nrow(SNPInfo))
##D colnames(bigZ) <- SNPInfo$Name
##D
##D for(gene in unique(SNPInfo$gene)) {
##D snp.names <- SNPInfo$Name[SNPInfo$gene == gene]
##D bigZ[1:n,SNPInfo$gene == gene][ , snp.names %in% colnames(Z1)] <-
##D Z1[ , na.omit(match(snp.names,colnames(Z1)))]
##D bigZ[(n+1):(2*n),SNPInfo$gene == gene][ , snp.names %in% colnames(Z2)] <-
##D Z2[ , na.omit(match(snp.names,colnames(Z2)))]
##D }
##D
##D pheno <- rbind(pheno1[, c("y","sex","bmi")],pheno2[,c("y","sex","bmi")])
##D burden.p <- c(by(SNPInfo$Name, SNPInfo$gene, function(snp.names) {
##D inds <- match(snp.names,colnames(bigZ)) burden <- rowSums(bigZ[,na.omit(inds)],na.rm=TRUE)
##D mod <- lm(y~burden + gl(2,nrow(pheno1)),data=pheno)
##D summary(mod)$coef[2,4]
##D }))
##D
##D head(cbind(out$p,burden.p))
##D
##D #will be slightly different:
##D plot(y=out$p,x=burden.p, ylab = "burden meta p-values", xlab = "complete data p-values")
## End(Not run)
|
library(shiny)
library(dplyr)
library(leaflet)
library(ggplot2)
library(plotly)
library(lubridate)
library(scales)
library(shinyWidgets)
library(shinythemes)
library(shinyjs)
library(shinycssloaders)
library(shinymanager)
# This will log out the user after a period of inactivity
inactivity <- "function idleTimer() {
var t = setTimeout(logout, 120000);
window.onmousemove = resetTimer; // catches mouse movements
window.onmousedown = resetTimer; // catches mouse movements
window.onclick = resetTimer; // catches mouse clicks
window.onscroll = resetTimer; // catches scrolling
window.onkeypress = resetTimer; //catches keyboard actions
function logout() {
window.close(); //close the window
}
function resetTimer() {
clearTimeout(t);
t = setTimeout(logout, 120000); // time is in milliseconds (1000 is 1 second)
}
}
idleTimer();"
#This is where the logins go, probably worth hiding this section on the Github.
credientials <- data.frame(
user = c("1", "gather"),
password = c("1", "sanitation"),
stringsAsFactors = FALSE)
#setwd("C:/Users/someg/dev/gather/git/Sanitation_Risk_App/Shiny/Sanitaion_Risk_App/Sanitation_Risk_App/Shiny/")
# User interface ----
ui <- secure_app(head_auth = tags$script(inactivity),
tagList(tags$head(tags$script(type="text/javascript", src = "code.js")),
navbarPage(title = "Sanitation Data Platform", id = "nav", theme = "style.css",
tabPanel('Home', value = -1,
fluidRow( class = "updateTitle",
column(4, "Sanitation Data Platform: Geospatial Visualisations for three Sanitation Risk Indices for Antananarivo, Madagascar", div(style = "height:30px;"), offset = 4)
),
fluidRow(class = "updateArea",
column(4, uiOutput(outputId = 'home'), offset = 4)
)),
#this is in the www folder
tabPanel("Data Sources", value = 5,
tags$iframe(class = 'leaflet-container', style="height:400px; width:100%; scrolling=yes", src="Datasets_sources.pdf")),
tabPanel("SRI1", value = 0,
leafletOutput(outputId = "SRI1", height = 700) %>% withSpinner(type = 4),
absolutePanel(id = "waste", class = "panel panel-default", fixed = TRUE,
draggable = F, top =140, left = "auto", right = 50, bottom = 20,
width = 400, height = 600,
style = "overflow-y: scroll;",
#Stats on the side
h1("Risk"),
br(),
p(id = "mainText", "It considers household density to be of equal importance to all the other indicators combined. This is to account for the additional risk that high household density poses in areas where sanitation facilities are poor (Hathi, et al. 2017)."),
p(id = "mainText", "This index predicts a high level of uncontained faecal waste in the southwest, with another hotspot in the northwest of the study area. The northeast and east are generally predicted to have a low level of risk. This index generates values with few areas that have a significantly higher or lower risk than the others. "),
p(id= "mainText", "SRI1=(Flood risk+Road density+Terrain movement+Households sharing toilet+Main drinking water source+Toilet type+ Toilet location+Population density+Children aged under 5 per household+Rent+Tax+Open defecation)*Household density"),
downloadButton("Download", style="display: block; margin: 0 auto; width: 230px;color: black;"))
),
tabPanel("SRI2", value = 1,
leafletOutput(outputId = "SRI2", height = 700) %>% withSpinner(type = 4),
absolutePanel(id = "waste", class = "panel panel-default", fixed = TRUE,
draggable = F, top =140, left = "auto", right = 50, bottom = "auto",
width = 400, height = 600,
style = "overflow-y: scroll;",
#Stats on the side
h1("Risk"),
br(),
p(id = "mainText", "We see a much greater spread in areas that are predicted to have lower rates of uncontained faecal matter in the environment. Areas of high risk are largely located in the northeast and west of the study area, whereas the southeast is predicted to be at lower risk."),
p(id = "mainText", "Similar to SRI1, this index is limited by the assumption that all of the indicators are of equal importance to one another, limiting its accuracy. To improve this, we need to be able to weight each indicator according to its importance. "),
p(id = "mainText", "SRI2= Average(Flood risk+Road density+Terrain movemen+Open defecation+ Households sharing toilet+Main drinking water source+Toilet type+Population density+Household density+Toilet location+ Children aged under 5 per household+Rent+Tax)"))
), tabPanel("SRI3", value = 2,
leafletOutput(outputId = "SRI3", height = 700) %>% withSpinner(type = 4),
absolutePanel(id = "waste", class = "panel panel-default", fixed = TRUE,
draggable = F, top =140, left = "auto", right = 50, bottom = "auto",
width = 400, height = 600,
style = "overflow-y: scroll;",
#Stats on the side
h1("Risk"),
br(),
p(id = "mainText", "For SRI3, we utilised the AHP method to develop weights for each indicator according to how important it was in predicting the level of uncontained faecal waste in the environment. It is clear from the weightings that the experts we consulted viewed environmental indicators - flood risk and terrain movement - as the key indicators that predict the level of uncontained faecal waste in the environment."),
p(id = "mainText", "This index shows hotspots in the northeast and northwest, and areas of low risk in the east. It correlates closely to flood risk dataset, as this was the indicator that was most influential by the experts and therefore has the highest weighting. This index gives the most even spread of values across all risk values. "),
p(id = "mainText", "SRI3 = (Flood risk*0.17 + Terrain Movement*0.17 + Road density*0.12 + Household density*0.10 + Population density*0.10 + Rent*0.08 + Tax*0.07 + Main drinking water source*0.04 + Children aged under 5 per household*0.04 + Open Defecation*0.04 + Households sharing toilet*0.03 + Toilet type*0.02 + Toilet location*0.02)"))
),
# Adding a download bar
tabPanel("Download Data", value = 5,
sidebarPanel(
sidebarPanel(
selectInput("dataset", "Choose a dataset:",
choices = c("Final Risk", "CUA5 Roads", "CUA5 Outline")),
# Button
downloadButton("downloadData", "Download")
))),
useShinyjs()
)))
# When ready to deploy, it will then ask if you want to update the original app domain. Click yes.
# it will upload here >> https://sanitationrisk.shinyapps.io/shiny
# library(rsconnect)
# rsconnect::setAccountInfo(name='sanitation-hub', token='82EA28FA57A3CF8359DEAC9326DA0DDE', secret='UXNQCdSIUZB6Wfv7HhhiWf4Nqf+MEJ894mPJWC2s')
# rsconnect::deployApp('C:\\Users\\someg\\dev\\gather\\git\\Sanitation_Risk_App\\Shiny\\Sanitaion_Risk_App\\Sanitation_Risk_App\\Shiny', account = 'sanitation-hub')
| /ui.R | no_license | blueriver212/SanitationRisk_App | R | false | false | 8,310 | r | library(shiny)
library(dplyr)
library(leaflet)
library(ggplot2)
library(plotly)
library(lubridate)
library(scales)
library(shinyWidgets)
library(shinythemes)
library(shinyjs)
library(shinycssloaders)
library(shinymanager)
# This will log out the user after a period of inactivity
inactivity <- "function idleTimer() {
var t = setTimeout(logout, 120000);
window.onmousemove = resetTimer; // catches mouse movements
window.onmousedown = resetTimer; // catches mouse movements
window.onclick = resetTimer; // catches mouse clicks
window.onscroll = resetTimer; // catches scrolling
window.onkeypress = resetTimer; //catches keyboard actions
function logout() {
window.close(); //close the window
}
function resetTimer() {
clearTimeout(t);
t = setTimeout(logout, 120000); // time is in milliseconds (1000 is 1 second)
}
}
idleTimer();"
#This is where the logins go, probably worth hiding this section on the Github.
credientials <- data.frame(
user = c("1", "gather"),
password = c("1", "sanitation"),
stringsAsFactors = FALSE)
#setwd("C:/Users/someg/dev/gather/git/Sanitation_Risk_App/Shiny/Sanitaion_Risk_App/Sanitation_Risk_App/Shiny/")
# User interface ----
ui <- secure_app(head_auth = tags$script(inactivity),
tagList(tags$head(tags$script(type="text/javascript", src = "code.js")),
navbarPage(title = "Sanitation Data Platform", id = "nav", theme = "style.css",
tabPanel('Home', value = -1,
fluidRow( class = "updateTitle",
column(4, "Sanitation Data Platform: Geospatial Visualisations for three Sanitation Risk Indices for Antananarivo, Madagascar", div(style = "height:30px;"), offset = 4)
),
fluidRow(class = "updateArea",
column(4, uiOutput(outputId = 'home'), offset = 4)
)),
#this is in the www folder
tabPanel("Data Sources", value = 5,
tags$iframe(class = 'leaflet-container', style="height:400px; width:100%; scrolling=yes", src="Datasets_sources.pdf")),
tabPanel("SRI1", value = 0,
leafletOutput(outputId = "SRI1", height = 700) %>% withSpinner(type = 4),
absolutePanel(id = "waste", class = "panel panel-default", fixed = TRUE,
draggable = F, top =140, left = "auto", right = 50, bottom = 20,
width = 400, height = 600,
style = "overflow-y: scroll;",
#Stats on the side
h1("Risk"),
br(),
p(id = "mainText", "It considers household density to be of equal importance to all the other indicators combined. This is to account for the additional risk that high household density poses in areas where sanitation facilities are poor (Hathi, et al. 2017)."),
p(id = "mainText", "This index predicts a high level of uncontained faecal waste in the southwest, with another hotspot in the northwest of the study area. The northeast and east are generally predicted to have a low level of risk. This index generates values with few areas that have a significantly higher or lower risk than the others. "),
p(id= "mainText", "SRI1=(Flood risk+Road density+Terrain movement+Households sharing toilet+Main drinking water source+Toilet type+ Toilet location+Population density+Children aged under 5 per household+Rent+Tax+Open defecation)*Household density"),
downloadButton("Download", style="display: block; margin: 0 auto; width: 230px;color: black;"))
),
tabPanel("SRI2", value = 1,
leafletOutput(outputId = "SRI2", height = 700) %>% withSpinner(type = 4),
absolutePanel(id = "waste", class = "panel panel-default", fixed = TRUE,
draggable = F, top =140, left = "auto", right = 50, bottom = "auto",
width = 400, height = 600,
style = "overflow-y: scroll;",
#Stats on the side
h1("Risk"),
br(),
p(id = "mainText", "We see a much greater spread in areas that are predicted to have lower rates of uncontained faecal matter in the environment. Areas of high risk are largely located in the northeast and west of the study area, whereas the southeast is predicted to be at lower risk."),
p(id = "mainText", "Similar to SRI1, this index is limited by the assumption that all of the indicators are of equal importance to one another, limiting its accuracy. To improve this, we need to be able to weight each indicator according to its importance. "),
p(id = "mainText", "SRI2= Average(Flood risk+Road density+Terrain movemen+Open defecation+ Households sharing toilet+Main drinking water source+Toilet type+Population density+Household density+Toilet location+ Children aged under 5 per household+Rent+Tax)"))
), tabPanel("SRI3", value = 2,
leafletOutput(outputId = "SRI3", height = 700) %>% withSpinner(type = 4),
absolutePanel(id = "waste", class = "panel panel-default", fixed = TRUE,
draggable = F, top =140, left = "auto", right = 50, bottom = "auto",
width = 400, height = 600,
style = "overflow-y: scroll;",
#Stats on the side
h1("Risk"),
br(),
p(id = "mainText", "For SRI3, we utilised the AHP method to develop weights for each indicator according to how important it was in predicting the level of uncontained faecal waste in the environment. It is clear from the weightings that the experts we consulted viewed environmental indicators - flood risk and terrain movement - as the key indicators that predict the level of uncontained faecal waste in the environment."),
p(id = "mainText", "This index shows hotspots in the northeast and northwest, and areas of low risk in the east. It correlates closely to flood risk dataset, as this was the indicator that was most influential by the experts and therefore has the highest weighting. This index gives the most even spread of values across all risk values. "),
p(id = "mainText", "SRI3 = (Flood risk*0.17 + Terrain Movement*0.17 + Road density*0.12 + Household density*0.10 + Population density*0.10 + Rent*0.08 + Tax*0.07 + Main drinking water source*0.04 + Children aged under 5 per household*0.04 + Open Defecation*0.04 + Households sharing toilet*0.03 + Toilet type*0.02 + Toilet location*0.02)"))
),
# Adding a download bar
tabPanel("Download Data", value = 5,
sidebarPanel(
sidebarPanel(
selectInput("dataset", "Choose a dataset:",
choices = c("Final Risk", "CUA5 Roads", "CUA5 Outline")),
# Button
downloadButton("downloadData", "Download")
))),
useShinyjs()
)))
# When ready to deploy, it will then ask if you want to update the original app domain. Click yes.
# it will upload here >> https://sanitationrisk.shinyapps.io/shiny
# library(rsconnect)
# rsconnect::setAccountInfo(name='sanitation-hub', token='82EA28FA57A3CF8359DEAC9326DA0DDE', secret='UXNQCdSIUZB6Wfv7HhhiWf4Nqf+MEJ894mPJWC2s')
# rsconnect::deployApp('C:\\Users\\someg\\dev\\gather\\git\\Sanitation_Risk_App\\Shiny\\Sanitaion_Risk_App\\Sanitation_Risk_App\\Shiny', account = 'sanitation-hub')
|
## Frances Ning / frances.ning@gmail.com
## https://github.com/fjning/
## cachematrix.R: Takes in a matrix and caches the value. It also calculates
## the inverse of a matrix and avoids re-evaluating the same matrix by
## checking and using the cached inverse matrix.
## Example To run:
## > m<-matrix(c(-1,-2,1,1),2,2)
## > m<-makeCacheMatrix(m)
## > mc<-makeCacheMatrix(m)
## > mc$get()
## > inverse_m <- cacheSolve(mc)
## > inverse_m ## evaluates the inverse of matrix 'm' for 1st time
## > inverse_m <- cacheSolve(mc)
## > inverse_m ## returns the cached inverse matrix
## makeCacheMatrix: This function creates a special "matrix" object that
## can cache its inverse.
makeCacheMatrix <- function(data_matrix = matrix()) {
stored_inverse <- NULL
set <- function(y) {
data_matrix <<- y
stored_inverse <<- NULL
}
## Create function get and assign a matrix to it
get <- function() {
return(data_matrix)
}
setInverse <- function(replacement_matrix) {
stored_inverse <<- replacement_matrix
}
getInverse <- function() {
stored_inverse
}
## Lists out the values of the functions in the makeCacheMatrix frame
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve: This function calculates the inverse of a matrix but checks
## if it has been evaluated before. If so, it returns the cached inverse
## matrix. If not, it saves it for future calls.
cacheSolve <- function(made_matrix, ...) {
## Goes to the made_matrix environment and assigns the stored_inverse
## value from that environment to this local_inverse variable
local_inverse<-made_matrix$getInverse()
## If the made_matrix environment has been evaluated before, the function
## prints out the value of the stored_inverse (cache inverse matrix)
if(!is.null(local_inverse)) {
message("getting cache data")
local_inverse
}
## If this made_matrix environment has never been evaluated before,
## assign the made_matrix to the local_matrix variable
local_matrix <- made_matrix$get()
local_inverse <- solve(local_matrix, ...)
## Assign the calculated local_inverse to the made_matrix environment
made_matrix$setInverse(local_inverse)
local_inverse
}
#########################################################################
## In this example we introduce the <<- operator which can be used
## to assign a value to an object in an environment that is different
## from the current environment. Below are two functions that are used
## to create a special object that stores a numeric vector and cache's
## its mean.
## The first function, makeVector creates a special "vector", which is really a list containing a function to
## set the value of the vector
## get the value of the vector
## set the value of the mean
## get the value of the mean
makeVector <- function(data_vector = numeric()) {
## Initialize variable so function doesn't hang the 1st time
stored_mean <- NULL
set <- function(y) {
data_vector <<- y
stored_mean <<- NULL
}
## Create func "get" in the makeVector parent and assign a vector to it
get <- function() {
return (data_vector)
}
setmean <- function(replacement_mean) {
stored_mean <<- replacement_mean
}
getmean <- function() {
return(stored_mean)
}
## Lists out the values of the functions in the makeVector frame
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## This function calculates the mean of the special "vector" created
## with the above function. However, it first checks to see if the
## mean has already been calculated. If so, it gets the mean from the
## cache and skips the computation. Otherwise, it calculates the mean
## of the data and sets the value of the mean in the cache via the
## setmean function.
cachemean <- function(made_vector, ...) {
## Goes to the made_vector environment and assigns the stored_mean
## value from that environment to this local_mean
local_mean <- made_vector$getmean()
## If the made_vector environment has been evaluated before, the function
## prints out the value of the stored_mean (cache mean)
if(!is.null(local_mean)) {
message("getting cached data")
return(local_mean)
}
## If this made_vector environment has never been evaluated before,
## assign the made_vector to the local_data variable
local_data <- made_vector$get()
local_mean <- mean(local_data, ...)
## Assign the calculated local_mean to the made_vector environment
made_vector$setmean(local_mean)
return(local_mean)
} | /cachematrix.R | no_license | fjning/ProgrammingAssignment2 | R | false | false | 4,906 | r | ## Frances Ning / frances.ning@gmail.com
## https://github.com/fjning/
## cachematrix.R: Takes in a matrix and caches the value. It also calculates
## the inverse of a matrix and avoids re-evaluating the same matrix by
## checking and using the cached inverse matrix.
## Example To run:
## > m<-matrix(c(-1,-2,1,1),2,2)
## > m<-makeCacheMatrix(m)
## > mc<-makeCacheMatrix(m)
## > mc$get()
## > inverse_m <- cacheSolve(mc)
## > inverse_m ## evaluates the inverse of matrix 'm' for 1st time
## > inverse_m <- cacheSolve(mc)
## > inverse_m ## returns the cached inverse matrix
## makeCacheMatrix: This function creates a special "matrix" object that
## can cache its inverse.
makeCacheMatrix <- function(data_matrix = matrix()) {
stored_inverse <- NULL
set <- function(y) {
data_matrix <<- y
stored_inverse <<- NULL
}
## Create function get and assign a matrix to it
get <- function() {
return(data_matrix)
}
setInverse <- function(replacement_matrix) {
stored_inverse <<- replacement_matrix
}
getInverse <- function() {
stored_inverse
}
## Lists out the values of the functions in the makeCacheMatrix frame
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve: This function calculates the inverse of a matrix but checks
## if it has been evaluated before. If so, it returns the cached inverse
## matrix. If not, it saves it for future calls.
cacheSolve <- function(made_matrix, ...) {
## Goes to the made_matrix environment and assigns the stored_inverse
## value from that environment to this local_inverse variable
local_inverse<-made_matrix$getInverse()
## If the made_matrix environment has been evaluated before, the function
## prints out the value of the stored_inverse (cache inverse matrix)
if(!is.null(local_inverse)) {
message("getting cache data")
local_inverse
}
## If this made_matrix environment has never been evaluated before,
## assign the made_matrix to the local_matrix variable
local_matrix <- made_matrix$get()
local_inverse <- solve(local_matrix, ...)
## Assign the calculated local_inverse to the made_matrix environment
made_matrix$setInverse(local_inverse)
local_inverse
}
#########################################################################
## In this example we introduce the <<- operator which can be used
## to assign a value to an object in an environment that is different
## from the current environment. Below are two functions that are used
## to create a special object that stores a numeric vector and cache's
## its mean.
## The first function, makeVector creates a special "vector", which is really a list containing a function to
## set the value of the vector
## get the value of the vector
## set the value of the mean
## get the value of the mean
makeVector <- function(data_vector = numeric()) {
## Initialize variable so function doesn't hang the 1st time
stored_mean <- NULL
set <- function(y) {
data_vector <<- y
stored_mean <<- NULL
}
## Create func "get" in the makeVector parent and assign a vector to it
get <- function() {
return (data_vector)
}
setmean <- function(replacement_mean) {
stored_mean <<- replacement_mean
}
getmean <- function() {
return(stored_mean)
}
## Lists out the values of the functions in the makeVector frame
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## This function calculates the mean of the special "vector" created
## with the above function. However, it first checks to see if the
## mean has already been calculated. If so, it gets the mean from the
## cache and skips the computation. Otherwise, it calculates the mean
## of the data and sets the value of the mean in the cache via the
## setmean function.
cachemean <- function(made_vector, ...) {
## Goes to the made_vector environment and assigns the stored_mean
## value from that environment to this local_mean
local_mean <- made_vector$getmean()
## If the made_vector environment has been evaluated before, the function
## prints out the value of the stored_mean (cache mean)
if(!is.null(local_mean)) {
message("getting cached data")
return(local_mean)
}
## If this made_vector environment has never been evaluated before,
## assign the made_vector to the local_data variable
local_data <- made_vector$get()
local_mean <- mean(local_data, ...)
## Assign the calculated local_mean to the made_vector environment
made_vector$setmean(local_mean)
return(local_mean)
} |
library("DESeq2")
dir <- "/home/caithaug/genome_analysis_course/Genome-analysis/analyses/08_differential_expression/htseq_output/"
setwd(dir)
# Loads data from HTSEQ Count
sampleFiles <- grep("_counts.txt",list.files(directory),value=TRUE)
sampleNames=sub('_counts.txt','',sampleFiles)
sampleCondition=sub('1','',sampleNames)
sampleTable=data.frame(sampleName=sampleNames, fileName=sampleFiles, condition=sampleCondition)
ddsHTSeq=DESeqDataSetFromHTSeqCount(sampleTable=sampleTable,directory=dir,design=~1)
dds <- DESeq(ddsHTSeq)
res <- results(dds)
# Extract the significantly differentially expressed genes
resOrdered<- res[order(res$padj),]
resSig <- subset(resOrdered, padj<0.05)
resSigOrdered <- subset(resSig[order(resSig$log2foldchange),], log2foldchange>0)
write.csv(res, file = "/home/caithaug/genome_analysis_course/Genome-analysis/analyses/08_differential_expression/deseq_output/deseq_res.csv")
write.csv(resSig, file = "/home/caithaug/genome_analysis_course/Genome-analysis/analyses/08_differential_expression/deseq_output/deseq_resSig.csv")
# write.csv(resOrdered, file = "/home/caithaug/genome_analysis_course/Genome-analysis/analyses/08_differential_expression/deseq_output/deseq_resSig.csv")
| /code/deseq2_run.r | no_license | caithaughey/Genome-analysis | R | false | false | 1,214 | r | library("DESeq2")
dir <- "/home/caithaug/genome_analysis_course/Genome-analysis/analyses/08_differential_expression/htseq_output/"
setwd(dir)
# Loads data from HTSEQ Count
sampleFiles <- grep("_counts.txt",list.files(directory),value=TRUE)
sampleNames=sub('_counts.txt','',sampleFiles)
sampleCondition=sub('1','',sampleNames)
sampleTable=data.frame(sampleName=sampleNames, fileName=sampleFiles, condition=sampleCondition)
ddsHTSeq=DESeqDataSetFromHTSeqCount(sampleTable=sampleTable,directory=dir,design=~1)
dds <- DESeq(ddsHTSeq)
res <- results(dds)
# Extract the significantly differentially expressed genes
resOrdered<- res[order(res$padj),]
resSig <- subset(resOrdered, padj<0.05)
resSigOrdered <- subset(resSig[order(resSig$log2foldchange),], log2foldchange>0)
write.csv(res, file = "/home/caithaug/genome_analysis_course/Genome-analysis/analyses/08_differential_expression/deseq_output/deseq_res.csv")
write.csv(resSig, file = "/home/caithaug/genome_analysis_course/Genome-analysis/analyses/08_differential_expression/deseq_output/deseq_resSig.csv")
# write.csv(resOrdered, file = "/home/caithaug/genome_analysis_course/Genome-analysis/analyses/08_differential_expression/deseq_output/deseq_resSig.csv")
|
library(randomForest)
# use 2017/18 data to train
# input:
# null
# output:
# random forest model
gamePredTrain <- function(mplot=FALSE) {
# load 3 chars team id
team_list <- read.csv("..\\data\\TeamList.csv", stringsAsFactors=F)
# print(team_list)
# load 2017/18 regular season team stats per game
team_stats_per_game_1718 <- read.csv("..\\data\\TeamStatsPerGame1718.csv", stringsAsFactors=F)
team_stats_per_game_1718 <- team_stats_per_game_1718[, c("Team", "FG", "FGA", "FGP", "X3P", "X3PA", "X3PP", "X2P", "X2PA", "X2PP", "FT", "FTA", "FTP", "ORB", "DRB", "TRB", "AST", "STL", "BLK", "TOV", "PF", "PTS")]
# print(team_stats_per_game_1718)
# biuld dataset according to team game log
# format: own avg stats, opp avg stats, W/L
game_data = data.frame()
for (i in 1:30) {
team_id <- team_list[,1][i]
team_data <- subset(team_stats_per_game_1718, Team==team_id, select=c("FG", "FGA", "FGP", "X3P", "X3PA", "X3PP", "X2P", "X2PA", "X2PP", "FT", "FTA", "FTP", "ORB", "DRB", "TRB", "AST", "STL", "BLK", "TOV", "PF", "PTS"))
team_log <- read.csv(paste("..\\data\\TeamGameLog1718\\", team_id, ".csv", sep=""), stringsAsFactors=F)
for (j in 1:82) {
opp_team_id <- team_log[,"Opp"][j]
opp_team_data <- subset(team_stats_per_game_1718, Team==opp_team_id, select=c("FG", "FGA", "FGP", "X3P", "X3PA", "X3PP", "X2P", "X2PA", "X2PP", "FT", "FTA", "FTP", "ORB", "DRB", "TRB", "AST", "STL", "BLK", "TOV", "PF", "PTS"))
game_result <- team_log[,"W.L"][j]
new_game_data <- data.frame(team_data[1:21], opp_team_data[1:21], game_result)
game_data <- rbind(game_data, new_game_data)
}
}
# print(game_data)
# train model
game_model <- randomForest(game_result~., data=game_data)
# print(importance(game_model))
# print(game_model)
if (mplot==TRUE) {
plot(game_model)
}
return(game_model)
}
# predict game result with given model and data
# input:
# model: random forest model
# data: data.frame(Team, OppTeam, TeamData[1:21], OppTeamData[1:21])
# output:
# predicted salary: c("Team", "OppTeam", "gamePred")
gamePred <- function(model, data) {
# predict game result by random forest model
game_pred <- predict(model, data)
game_pred_name <- data[, c("Team", "OppTeam")]
game_pred <- data.frame(game_pred_name, game_pred)
#print(game_pred)
return(game_pred)
}
| /04-more-data/NBAProject-v05062018/src/GamePred_rf.R | no_license | nicolaswilde/csx415-assignments | R | false | false | 2,479 | r | library(randomForest)
# use 2017/18 data to train
# input:
# null
# output:
# random forest model
gamePredTrain <- function(mplot=FALSE) {
# load 3 chars team id
team_list <- read.csv("..\\data\\TeamList.csv", stringsAsFactors=F)
# print(team_list)
# load 2017/18 regular season team stats per game
team_stats_per_game_1718 <- read.csv("..\\data\\TeamStatsPerGame1718.csv", stringsAsFactors=F)
team_stats_per_game_1718 <- team_stats_per_game_1718[, c("Team", "FG", "FGA", "FGP", "X3P", "X3PA", "X3PP", "X2P", "X2PA", "X2PP", "FT", "FTA", "FTP", "ORB", "DRB", "TRB", "AST", "STL", "BLK", "TOV", "PF", "PTS")]
# print(team_stats_per_game_1718)
# biuld dataset according to team game log
# format: own avg stats, opp avg stats, W/L
game_data = data.frame()
for (i in 1:30) {
team_id <- team_list[,1][i]
team_data <- subset(team_stats_per_game_1718, Team==team_id, select=c("FG", "FGA", "FGP", "X3P", "X3PA", "X3PP", "X2P", "X2PA", "X2PP", "FT", "FTA", "FTP", "ORB", "DRB", "TRB", "AST", "STL", "BLK", "TOV", "PF", "PTS"))
team_log <- read.csv(paste("..\\data\\TeamGameLog1718\\", team_id, ".csv", sep=""), stringsAsFactors=F)
for (j in 1:82) {
opp_team_id <- team_log[,"Opp"][j]
opp_team_data <- subset(team_stats_per_game_1718, Team==opp_team_id, select=c("FG", "FGA", "FGP", "X3P", "X3PA", "X3PP", "X2P", "X2PA", "X2PP", "FT", "FTA", "FTP", "ORB", "DRB", "TRB", "AST", "STL", "BLK", "TOV", "PF", "PTS"))
game_result <- team_log[,"W.L"][j]
new_game_data <- data.frame(team_data[1:21], opp_team_data[1:21], game_result)
game_data <- rbind(game_data, new_game_data)
}
}
# print(game_data)
# train model
game_model <- randomForest(game_result~., data=game_data)
# print(importance(game_model))
# print(game_model)
if (mplot==TRUE) {
plot(game_model)
}
return(game_model)
}
# predict game result with given model and data
# input:
# model: random forest model
# data: data.frame(Team, OppTeam, TeamData[1:21], OppTeamData[1:21])
# output:
# predicted salary: c("Team", "OppTeam", "gamePred")
gamePred <- function(model, data) {
# predict game result by random forest model
game_pred <- predict(model, data)
game_pred_name <- data[, c("Team", "OppTeam")]
game_pred <- data.frame(game_pred_name, game_pred)
#print(game_pred)
return(game_pred)
}
|
# Sept ,2017
# Daniel Nevo
rm(list=ls())
library(foreach)
library(doParallel)
library(doRNG)
setwd("/home/dn84/ChangePointBin/Sims/Raux")
source('IterSimSingle.R')
setwd("/home/dn84/ChangePointBin/Sims")
# True Paramter Values
mu = 0.2 # Exponential censoring paramter (mean=10)
lambda = 0.1 # Scale for the time-to-event gompartz distribution
alpha = 0.25 # Shape for the time-to-event gompartz distribution
pexp.rates <- c(0.025, 0.3, 0.1, 0.025)
pexp.ts <- c(0, 2, 3, 4)
BS = 200 # Bootstrap replications for variance estimation
beta0 <- log(1)
n.points <- 10
n.sample <- 1000
n.sim <- 1000
cl <- makeCluster(12,outfile="")
registerDoParallel(cl)
# Load prior seeds used for set.seed, and pick the one corresponding to beta0 and n.points
my.seeds <- read.table("Raux/seeds.simple.pexp.txt",header = T, check.names = F)
seed <- my.seeds[which(as.numeric(rownames(my.seeds))==n.points), which(as.numeric(colnames(my.seeds))==exp(beta0))]
set.seed(seed)
Results <- foreach(i = 1:n.sim,.packages = c("ICcalib"),
.combine = "rbind") %dorng% {
cat("i = ", i, "\n")
IterSimSinglePexp(n.sample = n.sample, mu = mu, lambda = lambda, alpha = alpha,
beta = beta0, n.points = n.points, BS = BS, pexp.rates = pexp.rates, pexp.ts = pexp.ts)
}
stopCluster(cl)
save.image(paste0("CPBSinglePexpBeta",exp(beta0),"W",n.points,"N",n.sample,".RData"))
write.table(Results,paste0("CPBSinglePexpBeta",exp(beta0),"W",n.points,"N",n.sample,".txt"))
sessionInfo()
| /Simulations/CalibrationWithNoCovariates/Scripts/CPBsinglepexpbeta1w10n1000.R | no_license | daniel258/ICcalibReproduce | R | false | false | 1,561 | r | # Sept ,2017
# Daniel Nevo
rm(list=ls())
library(foreach)
library(doParallel)
library(doRNG)
setwd("/home/dn84/ChangePointBin/Sims/Raux")
source('IterSimSingle.R')
setwd("/home/dn84/ChangePointBin/Sims")
# True Paramter Values
mu = 0.2 # Exponential censoring paramter (mean=10)
lambda = 0.1 # Scale for the time-to-event gompartz distribution
alpha = 0.25 # Shape for the time-to-event gompartz distribution
pexp.rates <- c(0.025, 0.3, 0.1, 0.025)
pexp.ts <- c(0, 2, 3, 4)
BS = 200 # Bootstrap replications for variance estimation
beta0 <- log(1)
n.points <- 10
n.sample <- 1000
n.sim <- 1000
cl <- makeCluster(12,outfile="")
registerDoParallel(cl)
# Load prior seeds used for set.seed, and pick the one corresponding to beta0 and n.points
my.seeds <- read.table("Raux/seeds.simple.pexp.txt",header = T, check.names = F)
seed <- my.seeds[which(as.numeric(rownames(my.seeds))==n.points), which(as.numeric(colnames(my.seeds))==exp(beta0))]
set.seed(seed)
Results <- foreach(i = 1:n.sim,.packages = c("ICcalib"),
.combine = "rbind") %dorng% {
cat("i = ", i, "\n")
IterSimSinglePexp(n.sample = n.sample, mu = mu, lambda = lambda, alpha = alpha,
beta = beta0, n.points = n.points, BS = BS, pexp.rates = pexp.rates, pexp.ts = pexp.ts)
}
stopCluster(cl)
save.image(paste0("CPBSinglePexpBeta",exp(beta0),"W",n.points,"N",n.sample,".RData"))
write.table(Results,paste0("CPBSinglePexpBeta",exp(beta0),"W",n.points,"N",n.sample,".txt"))
sessionInfo()
|
#working directory setting
setwd("C:/dev_R")
#현재working directory get
getwd()
a=10
b=20
c=a+b
c
a=10;b=20
c=a+b;c
a/b;
a%%#몫
a%%b #나머지
#할당
a=100
a<-200
300->a
a==b
a!=b
!a
a=FALSE
b=10
#&는 무조건 양쪽모두수행
a&(b<-b+1)>20; b
#&&는 앞쪽이 false이면 뒤쪽을 수행x
b=10
a&&(b<-b+1)>20; b
#or는 하나라도 참이면 참이다.
#앞에것이 참이면 뒤쪽을 수행할 것인가?
b=10
TRUE| (b<-b+1)>20; b #한다
b=10
TRUE|| (b<-b+1)>20; b #안한다
#data유형
a = 10
b = "알까기"
c= FALSE
d= 1+2i
#NaN:수학적으로 계산이 불가능한 수
e=NaN
f=NULL
#NA:Not Available
g=NA
a;b;c;d;e;f;g;
#문자형 형태로 데이터 타입을 보여준다
mode(a);mode(b);mode(c);mode(d)
#is=Ture or false로 결과값
#is.numeric(수치형여부)
is.numeric(a);is.numeric(b)
#is.na(na여부),isnull(null여부)
is.na(g);is.null(f)
#벡터를 만들때 여러가지 유형을 넣어도 우선순위에 따라 하나의 유형으로 변경
a=c(1,"2",3,4,TRUE);a
#as.numeric(수치형으로 형변환)
a=as.numeric("100")
is.numeric(a)
a=as.numeric("AAA")
is.numeric(a) --type의 numeric
is.numeric(NA)
mode(a)
is.na(a)
a
#벡터
#c()함수는 combine의 약자
#벡터를 생성하는 가장 대표적 방법
#규칙없는 데이터로 이루어진 벡터를 생성
var1=c(10,2,30,44,5)
var2=c("apple","banana","orange")
var3=c(TRUE,FALSE,TRUE,FALSE)
var4=c(10,"green",TRUE,3.14,1+2i)
var5=c(var1,var2,var3,var4)
var5 #우선순위는 문자가 가장 높다.
#콜론
#var1=start:end, 1씩 증가
#수치형에만 적용
var1=1:5; var1
var2=5:1;var2
var4=-3:3; var4
#seq():sequence
#1이외의 증가/감소 벡터를 만들 수 있음
#수치형에만 적용
var1=seq(1,10,3); var1
var1=seq(from=1,to=10,by=3);var1
#sequence(숫자)
var2=sequence(10); var2 #1~10까지 정수
#1~3을 표현하는 여러가지 방법
c(1,2,3)
seq(1,3,1)
sequence(3)
#rep:replicate의 약자
#다양한 형으로 된 벡터를 생성 가능
#times에 지정된 만큼 전체 벡터를 복사
rep(c("a","b"),times=3)
#each에 지정된 숫자 만큼 각각 복사
rep(c("a","b"),each=3)
var1=c(10,20,"30",40,50)
mode(var1)
is.character(var1)
var1
#몇개의 원소를 가지고 있는지
length(var1)
names(var1) #null,부여된 이름이 없기 때문에
#이름부여(인덱싱)
names(var1)=c("A","B","C","D","E")
names(var1)
var1
var1[3]
var1[3:5]
var1[c(1,5)]
var1[seq(3)] #seq(3)=>1,2,3
#벡터의 연산
v1=1:3 #1,2,3
v2=4:6 #4,5,6
v1+v2
#벡터의 길이가 동일하지 않은 경우 연산
v1=c(1,2,3,8,9)
v1
v2=4:6
v1+v2
#factor의 데이터 중에서 하나이며 벡터의 한 종류-한정적목록
#범주형:(집단별로 자료를 분석하고자 할 때)
#1)명목형:성별,국적..순서없음
#2)순서형:소득순위,학점..순서있음
gender=c("M","F","F","M","M","M") #문자로 넣음
gender=factor(gender) #factor로 바꿔주기
#명목형
#levels:있을수있는 값, 그룹으로 지정할 문자형 벡터를 지정
#labels:levels에 대한 문자형 벡터를 지정
gender2=factor(gender,levels=c("M","F"),labels=c("남","여"),ordered=FALSE)
#순서형
size=c("대","소","대","중","중","대")
fsize=factor(size,levels=c("소","중","대"),labels=c("소","중","대"),ordered=TRUE)
gender3=factor(gender,levels=c("M","F"),labels=c("남","여"),ordered=TRUE)
sort(gender2)
sort(gender3)
sort(fsize)
is.ordered(fsize)
is.ordered(gender2)
mode(gender)
typeof(gender)
is.character(gender)
is.factor(gender)
levels(gender)
levels(gender2) #집단의 이름
levels(gender3)
#행렬
v1=1:5
v2=6:10
#행 방향으로 합치는 방법
v3=rbind(v1,v2);v3
#열 방향으로 합치는 방법
v4=cbind(v1,v2);v4
#칼럼을 먼저 채운다...col방향
v5=matrix(1:10,nrow=5,ncol=2);v5
#row를 먼저 채운다...row방향(byrow)
v6=matrix(1:10,nrow=5,ncol=2,byrow=TRUE);v6
#1차원
var1=array(1:10,dim=10);var1
#dim:원하는 차원
#2차원(2행5열)
var2=array(1:10,dim=c(2,5));var2
#3차원(2행3열4개)
var3=array(1:10,dim=c(2,3,4));var3
#vector
v1=1:5
#array
v2=array(1:6,dim=c(2,3))
#matrix
v22=matrix(1:6,nrow=2,ncol=3)
#factor
v3=factor(c('m','f','m'))
#list
v4=list(v1,v2,v22,v3);v4
#typeof:type 알아보기
typeof(v4[4])
typeof(v4[[4]])
#4번째 꺼 가져오기
v4[[4]][1]
v4[[3]][2,3]
aa=matrix(1:6,nrow=2,ncol=3);aa
aa[2,2]
x=c(1,2,3,4,5)
result=list(mul=x*2,div=x/2,root=sqrt(x))
result
result$div
a=10
a<-'aaa';a
a<-"aaa";a
a+2 #불가
#time(횟수)
rep(a,times=2)
x=c(1,2,3,4,5)
mean(x)
max(x)
min(x)
a=c("Hello","R","Python")
a[0]+","+a[1]+","+a[2] #불가
paste(a,collapse=" ")
paste0(a,collapse = " ")
#package
#PL/SQL:procedure+function들
#R:function+data들
x=c("a","a","a","b","b","c")
install.packages("ggplot2")
library(ggplot2)
#도수분포
qplot(x)
data("iris")
typeof(iris)
typeof(mpg)
str(mpg)
mpg$hwy
data=mpg
#hwy:고속도로연비
qplot(data=mpg,x=hwy)
#cty:도시연비
qplot(data=mpg,x=cty)
#drv:자동차 구동방식
qplot(data=mpg,x=drv,y=cty,geom="line")
qplot(data = mpg, x = drv, y = hwy, geom = "boxplot")
qplot(data = mpg, x = drv, y = hwy, geom = "boxplot", colour = drv)
#4,f,r
mpg$drv
?qplot
#1
score<-c(80,60,70,50,90)
score
#2,3
average<-mean(score)
average
# #ctrl+shift+c -->전체주석처리 단축키
| /2020-01-16.R | no_license | ksm4319/sumin_rep | R | false | false | 5,264 | r | #working directory setting
setwd("C:/dev_R")
#현재working directory get
getwd()
a=10
b=20
c=a+b
c
a=10;b=20
c=a+b;c
a/b;
a%%#몫
a%%b #나머지
#할당
a=100
a<-200
300->a
a==b
a!=b
!a
a=FALSE
b=10
#&는 무조건 양쪽모두수행
a&(b<-b+1)>20; b
#&&는 앞쪽이 false이면 뒤쪽을 수행x
b=10
a&&(b<-b+1)>20; b
#or는 하나라도 참이면 참이다.
#앞에것이 참이면 뒤쪽을 수행할 것인가?
b=10
TRUE| (b<-b+1)>20; b #한다
b=10
TRUE|| (b<-b+1)>20; b #안한다
#data유형
a = 10
b = "알까기"
c= FALSE
d= 1+2i
#NaN:수학적으로 계산이 불가능한 수
e=NaN
f=NULL
#NA:Not Available
g=NA
a;b;c;d;e;f;g;
#문자형 형태로 데이터 타입을 보여준다
mode(a);mode(b);mode(c);mode(d)
#is=Ture or false로 결과값
#is.numeric(수치형여부)
is.numeric(a);is.numeric(b)
#is.na(na여부),isnull(null여부)
is.na(g);is.null(f)
#벡터를 만들때 여러가지 유형을 넣어도 우선순위에 따라 하나의 유형으로 변경
a=c(1,"2",3,4,TRUE);a
#as.numeric(수치형으로 형변환)
a=as.numeric("100")
is.numeric(a)
a=as.numeric("AAA")
is.numeric(a) --type의 numeric
is.numeric(NA)
mode(a)
is.na(a)
a
#벡터
#c()함수는 combine의 약자
#벡터를 생성하는 가장 대표적 방법
#규칙없는 데이터로 이루어진 벡터를 생성
var1=c(10,2,30,44,5)
var2=c("apple","banana","orange")
var3=c(TRUE,FALSE,TRUE,FALSE)
var4=c(10,"green",TRUE,3.14,1+2i)
var5=c(var1,var2,var3,var4)
var5 #우선순위는 문자가 가장 높다.
#콜론
#var1=start:end, 1씩 증가
#수치형에만 적용
var1=1:5; var1
var2=5:1;var2
var4=-3:3; var4
#seq():sequence
#1이외의 증가/감소 벡터를 만들 수 있음
#수치형에만 적용
var1=seq(1,10,3); var1
var1=seq(from=1,to=10,by=3);var1
#sequence(숫자)
var2=sequence(10); var2 #1~10까지 정수
#1~3을 표현하는 여러가지 방법
c(1,2,3)
seq(1,3,1)
sequence(3)
#rep:replicate의 약자
#다양한 형으로 된 벡터를 생성 가능
#times에 지정된 만큼 전체 벡터를 복사
rep(c("a","b"),times=3)
#each에 지정된 숫자 만큼 각각 복사
rep(c("a","b"),each=3)
var1=c(10,20,"30",40,50)
mode(var1)
is.character(var1)
var1
#몇개의 원소를 가지고 있는지
length(var1)
names(var1) #null,부여된 이름이 없기 때문에
#이름부여(인덱싱)
names(var1)=c("A","B","C","D","E")
names(var1)
var1
var1[3]
var1[3:5]
var1[c(1,5)]
var1[seq(3)] #seq(3)=>1,2,3
#벡터의 연산
v1=1:3 #1,2,3
v2=4:6 #4,5,6
v1+v2
#벡터의 길이가 동일하지 않은 경우 연산
v1=c(1,2,3,8,9)
v1
v2=4:6
v1+v2
#factor의 데이터 중에서 하나이며 벡터의 한 종류-한정적목록
#범주형:(집단별로 자료를 분석하고자 할 때)
#1)명목형:성별,국적..순서없음
#2)순서형:소득순위,학점..순서있음
gender=c("M","F","F","M","M","M") #문자로 넣음
gender=factor(gender) #factor로 바꿔주기
#명목형
#levels:있을수있는 값, 그룹으로 지정할 문자형 벡터를 지정
#labels:levels에 대한 문자형 벡터를 지정
gender2=factor(gender,levels=c("M","F"),labels=c("남","여"),ordered=FALSE)
#순서형
size=c("대","소","대","중","중","대")
fsize=factor(size,levels=c("소","중","대"),labels=c("소","중","대"),ordered=TRUE)
gender3=factor(gender,levels=c("M","F"),labels=c("남","여"),ordered=TRUE)
sort(gender2)
sort(gender3)
sort(fsize)
is.ordered(fsize)
is.ordered(gender2)
mode(gender)
typeof(gender)
is.character(gender)
is.factor(gender)
levels(gender)
levels(gender2) #집단의 이름
levels(gender3)
#행렬
v1=1:5
v2=6:10
#행 방향으로 합치는 방법
v3=rbind(v1,v2);v3
#열 방향으로 합치는 방법
v4=cbind(v1,v2);v4
#칼럼을 먼저 채운다...col방향
v5=matrix(1:10,nrow=5,ncol=2);v5
#row를 먼저 채운다...row방향(byrow)
v6=matrix(1:10,nrow=5,ncol=2,byrow=TRUE);v6
#1차원
var1=array(1:10,dim=10);var1
#dim:원하는 차원
#2차원(2행5열)
var2=array(1:10,dim=c(2,5));var2
#3차원(2행3열4개)
var3=array(1:10,dim=c(2,3,4));var3
#vector
v1=1:5
#array
v2=array(1:6,dim=c(2,3))
#matrix
v22=matrix(1:6,nrow=2,ncol=3)
#factor
v3=factor(c('m','f','m'))
#list
v4=list(v1,v2,v22,v3);v4
#typeof:type 알아보기
typeof(v4[4])
typeof(v4[[4]])
#4번째 꺼 가져오기
v4[[4]][1]
v4[[3]][2,3]
aa=matrix(1:6,nrow=2,ncol=3);aa
aa[2,2]
x=c(1,2,3,4,5)
result=list(mul=x*2,div=x/2,root=sqrt(x))
result
result$div
a=10
a<-'aaa';a
a<-"aaa";a
a+2 #불가
#time(횟수)
rep(a,times=2)
x=c(1,2,3,4,5)
mean(x)
max(x)
min(x)
a=c("Hello","R","Python")
a[0]+","+a[1]+","+a[2] #불가
paste(a,collapse=" ")
paste0(a,collapse = " ")
#package
#PL/SQL:procedure+function들
#R:function+data들
x=c("a","a","a","b","b","c")
install.packages("ggplot2")
library(ggplot2)
#도수분포
qplot(x)
data("iris")
typeof(iris)
typeof(mpg)
str(mpg)
mpg$hwy
data=mpg
#hwy:고속도로연비
qplot(data=mpg,x=hwy)
#cty:도시연비
qplot(data=mpg,x=cty)
#drv:자동차 구동방식
qplot(data=mpg,x=drv,y=cty,geom="line")
qplot(data = mpg, x = drv, y = hwy, geom = "boxplot")
qplot(data = mpg, x = drv, y = hwy, geom = "boxplot", colour = drv)
#4,f,r
mpg$drv
?qplot
#1
score<-c(80,60,70,50,90)
score
#2,3
average<-mean(score)
average
# #ctrl+shift+c -->전체주석처리 단축키
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{rpart.rules.push}
\alias{rpart.rules.push}
\title{Writes rule tables required to process rpart rules in SQL to an open RODBC connection.}
\usage{
rpart.rules.push(object, connection, rulePrefix = NULL, tablePrefix = NULL)
}
\arguments{
\item{object}{an rpart object}
\item{connection}{and open RODBC connection}
\item{rulePrefix}{A character string to prepend to each rule name to allow for multiple rule sets}
\item{tablePrefix}{A character string to prepend to each table name to allow for multiple rule sets}
}
\description{
This function handles the process of pushing tabular versions of
\pkg{rpart} rules to an RODBC connection. The entire process of generation
and writing is completed with a single call, with all necessary subcalls
handled within this function.
}
\details{
Once the tables have been pushed to the database, unpivoted source data can
be processed using the rpart model with SQL code similar to the following:
\preformatted{
WITH SOURCE AS
(
SELECT
ID,
TYPE,
VALUE
FROM DATA
UNPIVOT
(
VALUE FOR TYPE IN (FIELD1, FIELD2, FIELD3)
)UNPVT
),
MATCHES AS
(
SELECT
ID
,Subrule
,Variable
,SR.Value
,Less
,Greater
FROM
SOURCE S
LEFT JOIN SUBRULES SR
ON
TYPE = VARIABLE
AND (
S.value = SR.value
OR S.value < SR.Less
OR S.value > SR.Greater
)
),
MATCHED_SUBRULES
AS (
SELECT
Subrule
,ID
FROM
MATCHES M
GROUP BY
Subrule
,ID
),
MATCHED_RULES
AS (
SELECT
R.[Rule]
,MS.*
FROM
RULES AS R
LEFT JOIN MATCHED_SUBRULES MS
ON R.SUBRULE=MS.SUBRULE AND Leaf='TRUE'
)
,
COUNTS AS
(
SELECT
[RULE]
,ID
,MATCH_COUNT=COUNT(DISTINCT SUBRULE)
,NEEDED_COUNT=(SELECT COUNT(DISTINCT SUBRULE) FROM RULES R WHERE R.[RULE]=MR.[RULE])
FROM
MATCHED_RULES MR
GROUP BY
[RULE]
,ID
)
SELECT
RULE
,ID
FROM COUNTS
WHERE
MATCH_COUNT=NEEDED_COUNT
}
The frame is also passed to the database which allows extracting the estimates generated by the rpart model.
}
| /man/rpart.rules.push.Rd | no_license | vladimir-lv/rpart.utils | R | false | false | 2,253 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{rpart.rules.push}
\alias{rpart.rules.push}
\title{Writes rule tables required to process rpart rules in SQL to an open RODBC connection.}
\usage{
rpart.rules.push(object, connection, rulePrefix = NULL, tablePrefix = NULL)
}
\arguments{
\item{object}{an rpart object}
\item{connection}{and open RODBC connection}
\item{rulePrefix}{A character string to prepend to each rule name to allow for multiple rule sets}
\item{tablePrefix}{A character string to prepend to each table name to allow for multiple rule sets}
}
\description{
This function handles the process of pushing tabular versions of
\pkg{rpart} rules to an RODBC connection. The entire process of generation
and writing is completed with a single call, with all necessary subcalls
handled within this function.
}
\details{
Once the tables have been pushed to the database, unpivoted source data can
be processed using the rpart model with SQL code similar to the following:
\preformatted{
WITH SOURCE AS
(
SELECT
ID,
TYPE,
VALUE
FROM DATA
UNPIVOT
(
VALUE FOR TYPE IN (FIELD1, FIELD2, FIELD3)
)UNPVT
),
MATCHES AS
(
SELECT
ID
,Subrule
,Variable
,SR.Value
,Less
,Greater
FROM
SOURCE S
LEFT JOIN SUBRULES SR
ON
TYPE = VARIABLE
AND (
S.value = SR.value
OR S.value < SR.Less
OR S.value > SR.Greater
)
),
MATCHED_SUBRULES
AS (
SELECT
Subrule
,ID
FROM
MATCHES M
GROUP BY
Subrule
,ID
),
MATCHED_RULES
AS (
SELECT
R.[Rule]
,MS.*
FROM
RULES AS R
LEFT JOIN MATCHED_SUBRULES MS
ON R.SUBRULE=MS.SUBRULE AND Leaf='TRUE'
)
,
COUNTS AS
(
SELECT
[RULE]
,ID
,MATCH_COUNT=COUNT(DISTINCT SUBRULE)
,NEEDED_COUNT=(SELECT COUNT(DISTINCT SUBRULE) FROM RULES R WHERE R.[RULE]=MR.[RULE])
FROM
MATCHED_RULES MR
GROUP BY
[RULE]
,ID
)
SELECT
RULE
,ID
FROM COUNTS
WHERE
MATCH_COUNT=NEEDED_COUNT
}
The frame is also passed to the database which allows extracting the estimates generated by the rpart model.
}
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810536108214e+146, 1.99281994802077e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613106054-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 344 | r | testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810536108214e+146, 1.99281994802077e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
source('predict_systematic.R')
ifs_basic <- load_all_ifs()
jsr <- business_environment
my_ifs <- ml_prep(jsr,ifs_basic)
xval(my_ifs,lm_wrap) # 0.288
pred_scatter(my_ifs,lm_wrap,filter_year=2012:2016)
country_trend(my_ifs,'Kenya',lm_wrap)
forecast_plot(jsr,ifs_basic,'Kenya',lm_wrap) # keeps going up-up-up
# TODO: try knn -- don't remember how to do this
# svm_tune <- tune(svm,value ~ .,
# data=select(lib_dem_ifs,-country,-varstr),
# ranges=list(epsilon=0.1*(1:10),
# cost=2^(0:9)))
# plot(svm_tune)
# print(svm_tune)
# this step is much more time-consuming for variables like this with lots of historical data
xval(my_ifs,svm_wrap) # 0.136
pred_scatter(my_ifs,svm_wrap,filter_year=2012:2016)
# because this splits into train and test now, results are sort of stochastic
# TODO: make plot use open circles instead of filled ones
# TODO: need to figure out how to pass arbitrary parameters into pred_scatter() and on to wrapper
country_trend(my_ifs,'Kenya',svm_wrap)
forecast_plot(jsr,ifs_basic,'Kenya',svm_wrap) # peaks, then comes back down...
xval(my_ifs,xgb_wrap) # 0.131 -- maybe better than SVM
pred_scatter(my_ifs,xgb_wrap,filter_year=2012:2016)
country_trend(my_ifs,'Kenya',xgb_wrap)
forecast_plot(jsr,ifs_basic,'Kenya',xgb_wrap) # not much net change
###############################################################################
# Rough plot for Kenya report
###############################################################################
country_ <- 'Kenya'
fore <- jsr_forecast(jsr,ifs_basic,xgb_wrap)
# TODO: probably a more elegant way to do this
c_fore <- fore %>% filter(country==country_) %>% select(year,value,country)
umic_fore <- fore %>%
filter(country %in% umic) %>%
group_by(year) %>%
summarize(value=mean(value)) %>%
mutate(country='UMIC')
lmic_fore <- fore %>%
filter(country %in% lmic) %>%
group_by(year) %>%
summarize(value=mean(value)) %>%
mutate(country='LMIC')
low_fore <- fore %>%
filter(country %in% low_income) %>%
group_by(year) %>%
summarize(value=mean(value)) %>%
mutate(country='Low-income')
cutoff=xintercept=max(jsr$year)+0.5
all_fore <- rbind(c_fore,umic_fore,lmic_fore,low_fore) %>%
mutate(country=fct_reorder(country,value,.desc=TRUE,.fun=median))
ggplot(all_fore,aes(x=year,y=value,group=country,color=country)) +
geom_line(size=1) +
theme_USAID + colors_USAID +
geom_vline(xintercept=cutoff,linetype=2,color='black') +
annotate('text',x=cutoff,y=68,hjust=-0.2,label='Forecast') +
annotate('text',x=cutoff,y=68,hjust=1.2,label='History') +
xlab('Year') + ylab('Score') +
labs(title='Business Environment Index',
caption='History Source: Legatum Institute')
# With xgb, Kenya comes quickly back down to UMIC average level and tracks it on a slow increase
# with svm, everyone smoothly peaks and comes back down | /predict_business_environment.R | no_license | ccjolley/JSR | R | false | false | 2,876 | r | source('predict_systematic.R')
ifs_basic <- load_all_ifs()
jsr <- business_environment
my_ifs <- ml_prep(jsr,ifs_basic)
xval(my_ifs,lm_wrap) # 0.288
pred_scatter(my_ifs,lm_wrap,filter_year=2012:2016)
country_trend(my_ifs,'Kenya',lm_wrap)
forecast_plot(jsr,ifs_basic,'Kenya',lm_wrap) # keeps going up-up-up
# TODO: try knn -- don't remember how to do this
# svm_tune <- tune(svm,value ~ .,
# data=select(lib_dem_ifs,-country,-varstr),
# ranges=list(epsilon=0.1*(1:10),
# cost=2^(0:9)))
# plot(svm_tune)
# print(svm_tune)
# this step is much more time-consuming for variables like this with lots of historical data
xval(my_ifs,svm_wrap) # 0.136
pred_scatter(my_ifs,svm_wrap,filter_year=2012:2016)
# because this splits into train and test now, results are sort of stochastic
# TODO: make plot use open circles instead of filled ones
# TODO: need to figure out how to pass arbitrary parameters into pred_scatter() and on to wrapper
country_trend(my_ifs,'Kenya',svm_wrap)
forecast_plot(jsr,ifs_basic,'Kenya',svm_wrap) # peaks, then comes back down...
xval(my_ifs,xgb_wrap) # 0.131 -- maybe better than SVM
pred_scatter(my_ifs,xgb_wrap,filter_year=2012:2016)
country_trend(my_ifs,'Kenya',xgb_wrap)
forecast_plot(jsr,ifs_basic,'Kenya',xgb_wrap) # not much net change
###############################################################################
# Rough plot for Kenya report
###############################################################################
country_ <- 'Kenya'
fore <- jsr_forecast(jsr,ifs_basic,xgb_wrap)
# TODO: probably a more elegant way to do this
c_fore <- fore %>% filter(country==country_) %>% select(year,value,country)
umic_fore <- fore %>%
filter(country %in% umic) %>%
group_by(year) %>%
summarize(value=mean(value)) %>%
mutate(country='UMIC')
lmic_fore <- fore %>%
filter(country %in% lmic) %>%
group_by(year) %>%
summarize(value=mean(value)) %>%
mutate(country='LMIC')
low_fore <- fore %>%
filter(country %in% low_income) %>%
group_by(year) %>%
summarize(value=mean(value)) %>%
mutate(country='Low-income')
cutoff=xintercept=max(jsr$year)+0.5
all_fore <- rbind(c_fore,umic_fore,lmic_fore,low_fore) %>%
mutate(country=fct_reorder(country,value,.desc=TRUE,.fun=median))
ggplot(all_fore,aes(x=year,y=value,group=country,color=country)) +
geom_line(size=1) +
theme_USAID + colors_USAID +
geom_vline(xintercept=cutoff,linetype=2,color='black') +
annotate('text',x=cutoff,y=68,hjust=-0.2,label='Forecast') +
annotate('text',x=cutoff,y=68,hjust=1.2,label='History') +
xlab('Year') + ylab('Score') +
labs(title='Business Environment Index',
caption='History Source: Legatum Institute')
# With xgb, Kenya comes quickly back down to UMIC average level and tracks it on a slow increase
# with svm, everyone smoothly peaks and comes back down |
## TODO:
## multiple DVH/constraint plots with -> custom renderPlotList(), plotListOutput()
## footer
source("helper.R")
shinyUI(fluidPage(
#theme="bootstrap.css",
titlePanel("Analyze dose-volume histograms using DVHmetrics"),
sidebarLayout(
sidebarPanel(width=3,
conditionalPanel(condition="input.task == 'DVH data'",
radioButtons("DVHin", label=h4("Enter data"),
list("Use built-in data"=1,
"Upload DVH file"=2)),
conditionalPanel(condition="input.DVHin == '2'",
h5("Upload DVH file: "),
radioButtons("DVHtype", "DVH file format:",
list("Eclipse"=1, "CadPlan"=2, "MasterPlan"=3,
"Pinnacle3"=4, "Monaco"=5, "HiArt"=6,
"RayStation"=7, "ProSoma"=8, "PRIMO"=9)),
fileInput("DVHupload", "Select DVH file:", multiple=TRUE),
# radioButtons("fileenc", "File encoding:",
# list("Default"=1, "UTF-8"=2, "UTF-8-BOM"=3)),
radioButtons("DVHplanInfo", "Information encoded in plan:",
list("None"=1, "Prescribed dose"=2)),
checkboxGroupInput("DVHreadOpts", label=NULL,
choices=c("Add to existing data"="DVHadd",
"Use Course for ID"="DVHcourse",
# "Struct volume from DVH"="volume_from_dvh",
"Uncertainty plans"="uncertainty"))),
actionButton("applyData", "Apply"),
radioButtons("DVHverbose", "",
list("Short info on DVHs"=1,
"Detailed info on DVHs"=2))
),
conditionalPanel(condition="input.task == 'Metrics'",
h4("Define metrics"),
textInput("metrInput", "Metric(s):", value=c("DMEAN, D1cc, V10%")),
#tags$textarea(id="defMetricsMult", rows=2, cols=10, ""),
#actionButton('clearText_button','Clear metrics'),
#radioButtons("metrInterp", label=h5("DVH interpolation"),
# list("Linear"=1,
# "Monotone spline"=2,
# "Local polynomial"=3)),
checkboxInput("metrEUDparam", "Show EUD params ...", FALSE),
conditionalPanel(condition="input.metrEUDparam == true",
textInput("metrEUDa", h5("exponent a"), value=""),
textInput("metrEUDfd", h5("fraction dose"), value=""),
textInput("metrEUDab", h5("alpha/beta ratio"), value="")
),
checkboxInput("metrNTCPparam", "Show (N)TCP params ...", FALSE),
conditionalPanel(condition="input.metrNTCPparam == true",
radioButtons("metrNTCPtype", h5("(N)TCP Model"),
list("Probit (Lyman KB)"=1,
"Logit (Niemierko)"=2,
"Probit (Kaellman)"=3)),
textInput("metrNTCPtd50", h5("T(C)D50"), value=""),
textInput("metrNTCPn", h5("n (=1 / EUD-a)"), value=""),
conditionalPanel(condition="input.metrNTCPtype == '1'",
textInput("metrNTCPm", h5("Lyman m"), value="")
),
conditionalPanel(condition="input.metrNTCPtype != '1'",
textInput("metrNTCPgamma50", h5("Logit/Poisson gamma50"), value="")
)
),
uiOutput("metrSelPat"),
actionButton("metrSelPatAll", label="(De)Select All"),
uiOutput("metrSelStruct"),
actionButton("metrSelStructAll", label="(De)Select All"),
selectizeInput("metrSortBy", label=h5("Sort output table by:"),
choices=c("Value"=1,
"Structure"=2,
"Metric"=3,
"Patient ID"=4),
multiple=TRUE)#,
#options=c(placeholder='Click to select variables'))
),
conditionalPanel(condition="input.task == 'Show DVH'",
h4("Plot options"),
radioButtons("plotByPat", label=h5("Plot by patient or by structure"),
list("By patient"=1,
"By structure"=2)),
uiOutput("plotSelPat"),
actionButton("plotSelPatAll", label="(De)Select All"),
uiOutput("plotSelStruct"),
actionButton("plotSelStructAll", label="(De)Select All"),
radioButtons("plotPlotVol", label=h5("Plot relative/absolute volume"),
list("Relative volume"=1,
"Absolute volume"=2)),
radioButtons("plotType", label=h5("DVH type"),
list("Cumulative"=1,
"Differential"=2)),
checkboxInput("plotMSD", "Show M + SD areas", FALSE),
sliderInput("plotThreshVol", label=h5("Threshold volume"),
min=0, max=100, value=1)
),
conditionalPanel(condition="input.task == 'Check constraints'",
radioButtons("constrIn", label=h4("Define constraints"),
list("Use built-in constraints"=1,
"Upload constraints"=2,
"Paste constraints"=3)),
conditionalPanel(condition="input.constrIn == '2'",
h5("Upload constraints: "),
fileInput("constrUpload", "Select constraint file:", multiple=FALSE),
radioButtons("constrDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("constrSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4),
selected=2)),
conditionalPanel(condition="input.constrIn == '3'",
h5("Paste constraints:"),
tags$textarea(id="constrPaste", rows=4, cols=10, ""),
radioButtons("constrPasteDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("constrPasteSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4),
selected=2)),
actionButton("applyConstraints", "Apply"),
#radioButtons("constrInterp", label=h5("DVH interpolation"),
# list("Linear"=1,
# "Monotone spline"=2,
# "Local polynomial"=3)),
checkboxInput("constrEUDparam", "Show EUD params ...", FALSE),
conditionalPanel(condition="input.constrEUDparam == true",
textInput("constrEUDa", h5("exponent a"), value=""),
textInput("constrEUDfd", h5("fraction dose"), value=""),
textInput("constrEUDab", h5("alpha/beta ratio"), value="")
),
checkboxInput("constrNTCPparam", "Show (N)TCP params ...", FALSE),
conditionalPanel(condition="input.constrNTCPparam == true",
radioButtons("constrNTCPtype", h5("(N)TCP Model"),
list("Probit (Lyman KB)"=1,
"Logit (Niemierko)"=2,
"Probit (Kaellman)"=3)),
textInput("constrNTCPtd50", h5("T(C)D50"), value=""),
textInput("constrNTCPn", h5("n (=1 / EUD-a)"), value=""),
conditionalPanel(condition="input.constrNTCPtype == '1'",
textInput("constrNTCPm", h5("Lyman m"), value="")
),
conditionalPanel(condition="input.constrNTCPtype != '1'",
textInput("constrNTCPgamma50", h5("Logit/Poisson gamma50"), value="")
)
),
h4("Output table options"),
checkboxInput("constrSemSign", "Semantic negative sign", TRUE),
selectizeInput("constrOut", label=h5("Select table columns"),
choices=constrOut, multiple=TRUE,
selected=c("1", "2", "3", "4", "5", "7", "9", "10", "11"),
width="100%"),
selectizeInput("constrSortBy", label=h5("Sort output table by:"),
choices=c("Compliance"=1,
"Distance"=2,
"Delta volume"=3,
"Delta dose"=4,
"Observed"=5,
"Constraint"=6,
"Patient ID"=7,
"Structure"=8),
multiple=TRUE)#,
#options=c(placeholder='Click to select variables'))
),
conditionalPanel(condition="input.task == 'Show constraints'",
h4("Plot options"),
radioButtons("constrByPat", label=h5("Plot by patient or by structure"),
list("By patient"=1,
"By structure"=2)),
radioButtons("constrPlotVol", label=h5("Plot relative/absolute volume"),
list("Relative volume"=1,
"Absolute volume"=2)),
sliderInput("constrThreshVol", label=h5("Threshold volume"),
min=0, max=100, value=1)
),
conditionalPanel(condition="input.task == 'BED/EQD2'",
h4("Conversion type"),
selectInput("BEDtype", label=NULL,
choices=list("BED"=1,
"EQD2"=2,
"Isoeffective dose"=3),
selected=1),
conditionalPanel(condition="(input.BEDtype == '1') || (input.BEDtype == '2')",
h4("Input"),
textInput("BED_BED_D", h5("Total Dose"),
value=c("50")),
textInput("BED_BED_FD", h5("Fractional Dose"),
value=c("1.5 2 2.5")),
textInput("BED_BED_FN", h5("Number of fractions"),
value=""),
textInput("BED_BED_AB", h5("alpha/beta ratio"),
value="2")
),
conditionalPanel(condition="input.BEDtype == '3'",
h4("Input"),
textInput("BED_IED_D1", h5("Total Dose 1"),
value=c("50")),
textInput("BED_IED_D2", h5("Total Dose 2"),
value=""),
textInput("BED_IED_FD1", h5("Fractional Dose 1"),
value=c("1.5 2 2.5")),
textInput("BED_IED_FD2", h5("Fractional Dose 2"),
value=c("2")),
textInput("BED_IED_FN1", h5("Number of fractions 1"),
value=""),
textInput("BED_IED_FN2", h5("Number of fractions 2"),
value=""),
textInput("BED_IED_AB", h5("alpha/beta ratio"),
value="2")
)
),
conditionalPanel(condition="input.task == 'About'",
h4("Background info")
)
),
mainPanel(
tabsetPanel(
tabPanel("DVH data",
h6("Information from imported DVH file(s)"),
verbatimTextOutput("DVHinfo")
),
tabPanel("Metrics",
h6("Calculate metrics"),
DT::dataTableOutput("metrics"),
downloadButton("saveMetrics", "Save as text file"),
inputPanel(
radioButtons("saveMetrDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("saveMetrSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4)))
),
tabPanel("Show DVH",
h6("Show cumulative DVH diagrams"),
downloadButton("saveDVHPDF", "Save as PDF"),
downloadButton("saveDVHJPG", "Save as JPEGs (zipped to one file)"),
#plotOutput("DVHplotOrg"),
uiOutput("DVHplot"),
downloadButton("saveDVHMSD", "Save DVH Mean + SD as text file"),
inputPanel(
radioButtons("saveDVHDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("saveDVHSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4)))
),
tabPanel("Check constraints",
h6("Check constraints"),
DT::dataTableOutput("constraints"),
downloadButton("saveConstrTxt", "Save as text file"),
inputPanel(
radioButtons("saveConstrDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("saveConstrSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4)))
),
tabPanel("Show constraints",
h6("Show constraints in DVH diagrams"),
downloadButton("saveConstrPDF", "Save as PDF"),
downloadButton("saveConstrJPG", "Save as JPEGs (zipped to one file)"),
#plotOutput("constraintPlotOrg"),
uiOutput("constraintPlot")
),
tabPanel("BED/EQD2",
h6("BED / EQD2 / Isoeffective dose calculation"),
conditionalPanel(condition="input.BEDtype == '1'",
h5("BED")
),
conditionalPanel(condition="input.BEDtype == '2'",
h5("EQD2")
),
conditionalPanel(condition="input.BEDtype == '3'",
h5("Isoeffective dose")
),
verbatimTextOutput("BED")
),
tabPanel("About",
includeHTML("ABOUT.html")
),
id="task"
)
)
)
))
| /inst/DVHshiny_legacy/ui.R | no_license | cran/DVHmetrics | R | false | false | 16,644 | r | ## TODO:
## multiple DVH/constraint plots with -> custom renderPlotList(), plotListOutput()
## footer
source("helper.R")
shinyUI(fluidPage(
#theme="bootstrap.css",
titlePanel("Analyze dose-volume histograms using DVHmetrics"),
sidebarLayout(
sidebarPanel(width=3,
conditionalPanel(condition="input.task == 'DVH data'",
radioButtons("DVHin", label=h4("Enter data"),
list("Use built-in data"=1,
"Upload DVH file"=2)),
conditionalPanel(condition="input.DVHin == '2'",
h5("Upload DVH file: "),
radioButtons("DVHtype", "DVH file format:",
list("Eclipse"=1, "CadPlan"=2, "MasterPlan"=3,
"Pinnacle3"=4, "Monaco"=5, "HiArt"=6,
"RayStation"=7, "ProSoma"=8, "PRIMO"=9)),
fileInput("DVHupload", "Select DVH file:", multiple=TRUE),
# radioButtons("fileenc", "File encoding:",
# list("Default"=1, "UTF-8"=2, "UTF-8-BOM"=3)),
radioButtons("DVHplanInfo", "Information encoded in plan:",
list("None"=1, "Prescribed dose"=2)),
checkboxGroupInput("DVHreadOpts", label=NULL,
choices=c("Add to existing data"="DVHadd",
"Use Course for ID"="DVHcourse",
# "Struct volume from DVH"="volume_from_dvh",
"Uncertainty plans"="uncertainty"))),
actionButton("applyData", "Apply"),
radioButtons("DVHverbose", "",
list("Short info on DVHs"=1,
"Detailed info on DVHs"=2))
),
conditionalPanel(condition="input.task == 'Metrics'",
h4("Define metrics"),
textInput("metrInput", "Metric(s):", value=c("DMEAN, D1cc, V10%")),
#tags$textarea(id="defMetricsMult", rows=2, cols=10, ""),
#actionButton('clearText_button','Clear metrics'),
#radioButtons("metrInterp", label=h5("DVH interpolation"),
# list("Linear"=1,
# "Monotone spline"=2,
# "Local polynomial"=3)),
checkboxInput("metrEUDparam", "Show EUD params ...", FALSE),
conditionalPanel(condition="input.metrEUDparam == true",
textInput("metrEUDa", h5("exponent a"), value=""),
textInput("metrEUDfd", h5("fraction dose"), value=""),
textInput("metrEUDab", h5("alpha/beta ratio"), value="")
),
checkboxInput("metrNTCPparam", "Show (N)TCP params ...", FALSE),
conditionalPanel(condition="input.metrNTCPparam == true",
radioButtons("metrNTCPtype", h5("(N)TCP Model"),
list("Probit (Lyman KB)"=1,
"Logit (Niemierko)"=2,
"Probit (Kaellman)"=3)),
textInput("metrNTCPtd50", h5("T(C)D50"), value=""),
textInput("metrNTCPn", h5("n (=1 / EUD-a)"), value=""),
conditionalPanel(condition="input.metrNTCPtype == '1'",
textInput("metrNTCPm", h5("Lyman m"), value="")
),
conditionalPanel(condition="input.metrNTCPtype != '1'",
textInput("metrNTCPgamma50", h5("Logit/Poisson gamma50"), value="")
)
),
uiOutput("metrSelPat"),
actionButton("metrSelPatAll", label="(De)Select All"),
uiOutput("metrSelStruct"),
actionButton("metrSelStructAll", label="(De)Select All"),
selectizeInput("metrSortBy", label=h5("Sort output table by:"),
choices=c("Value"=1,
"Structure"=2,
"Metric"=3,
"Patient ID"=4),
multiple=TRUE)#,
#options=c(placeholder='Click to select variables'))
),
conditionalPanel(condition="input.task == 'Show DVH'",
h4("Plot options"),
radioButtons("plotByPat", label=h5("Plot by patient or by structure"),
list("By patient"=1,
"By structure"=2)),
uiOutput("plotSelPat"),
actionButton("plotSelPatAll", label="(De)Select All"),
uiOutput("plotSelStruct"),
actionButton("plotSelStructAll", label="(De)Select All"),
radioButtons("plotPlotVol", label=h5("Plot relative/absolute volume"),
list("Relative volume"=1,
"Absolute volume"=2)),
radioButtons("plotType", label=h5("DVH type"),
list("Cumulative"=1,
"Differential"=2)),
checkboxInput("plotMSD", "Show M + SD areas", FALSE),
sliderInput("plotThreshVol", label=h5("Threshold volume"),
min=0, max=100, value=1)
),
conditionalPanel(condition="input.task == 'Check constraints'",
radioButtons("constrIn", label=h4("Define constraints"),
list("Use built-in constraints"=1,
"Upload constraints"=2,
"Paste constraints"=3)),
conditionalPanel(condition="input.constrIn == '2'",
h5("Upload constraints: "),
fileInput("constrUpload", "Select constraint file:", multiple=FALSE),
radioButtons("constrDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("constrSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4),
selected=2)),
conditionalPanel(condition="input.constrIn == '3'",
h5("Paste constraints:"),
tags$textarea(id="constrPaste", rows=4, cols=10, ""),
radioButtons("constrPasteDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("constrPasteSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4),
selected=2)),
actionButton("applyConstraints", "Apply"),
#radioButtons("constrInterp", label=h5("DVH interpolation"),
# list("Linear"=1,
# "Monotone spline"=2,
# "Local polynomial"=3)),
checkboxInput("constrEUDparam", "Show EUD params ...", FALSE),
conditionalPanel(condition="input.constrEUDparam == true",
textInput("constrEUDa", h5("exponent a"), value=""),
textInput("constrEUDfd", h5("fraction dose"), value=""),
textInput("constrEUDab", h5("alpha/beta ratio"), value="")
),
checkboxInput("constrNTCPparam", "Show (N)TCP params ...", FALSE),
conditionalPanel(condition="input.constrNTCPparam == true",
radioButtons("constrNTCPtype", h5("(N)TCP Model"),
list("Probit (Lyman KB)"=1,
"Logit (Niemierko)"=2,
"Probit (Kaellman)"=3)),
textInput("constrNTCPtd50", h5("T(C)D50"), value=""),
textInput("constrNTCPn", h5("n (=1 / EUD-a)"), value=""),
conditionalPanel(condition="input.constrNTCPtype == '1'",
textInput("constrNTCPm", h5("Lyman m"), value="")
),
conditionalPanel(condition="input.constrNTCPtype != '1'",
textInput("constrNTCPgamma50", h5("Logit/Poisson gamma50"), value="")
)
),
h4("Output table options"),
checkboxInput("constrSemSign", "Semantic negative sign", TRUE),
selectizeInput("constrOut", label=h5("Select table columns"),
choices=constrOut, multiple=TRUE,
selected=c("1", "2", "3", "4", "5", "7", "9", "10", "11"),
width="100%"),
selectizeInput("constrSortBy", label=h5("Sort output table by:"),
choices=c("Compliance"=1,
"Distance"=2,
"Delta volume"=3,
"Delta dose"=4,
"Observed"=5,
"Constraint"=6,
"Patient ID"=7,
"Structure"=8),
multiple=TRUE)#,
#options=c(placeholder='Click to select variables'))
),
conditionalPanel(condition="input.task == 'Show constraints'",
h4("Plot options"),
radioButtons("constrByPat", label=h5("Plot by patient or by structure"),
list("By patient"=1,
"By structure"=2)),
radioButtons("constrPlotVol", label=h5("Plot relative/absolute volume"),
list("Relative volume"=1,
"Absolute volume"=2)),
sliderInput("constrThreshVol", label=h5("Threshold volume"),
min=0, max=100, value=1)
),
conditionalPanel(condition="input.task == 'BED/EQD2'",
h4("Conversion type"),
selectInput("BEDtype", label=NULL,
choices=list("BED"=1,
"EQD2"=2,
"Isoeffective dose"=3),
selected=1),
conditionalPanel(condition="(input.BEDtype == '1') || (input.BEDtype == '2')",
h4("Input"),
textInput("BED_BED_D", h5("Total Dose"),
value=c("50")),
textInput("BED_BED_FD", h5("Fractional Dose"),
value=c("1.5 2 2.5")),
textInput("BED_BED_FN", h5("Number of fractions"),
value=""),
textInput("BED_BED_AB", h5("alpha/beta ratio"),
value="2")
),
conditionalPanel(condition="input.BEDtype == '3'",
h4("Input"),
textInput("BED_IED_D1", h5("Total Dose 1"),
value=c("50")),
textInput("BED_IED_D2", h5("Total Dose 2"),
value=""),
textInput("BED_IED_FD1", h5("Fractional Dose 1"),
value=c("1.5 2 2.5")),
textInput("BED_IED_FD2", h5("Fractional Dose 2"),
value=c("2")),
textInput("BED_IED_FN1", h5("Number of fractions 1"),
value=""),
textInput("BED_IED_FN2", h5("Number of fractions 2"),
value=""),
textInput("BED_IED_AB", h5("alpha/beta ratio"),
value="2")
)
),
conditionalPanel(condition="input.task == 'About'",
h4("Background info")
)
),
mainPanel(
tabsetPanel(
tabPanel("DVH data",
h6("Information from imported DVH file(s)"),
verbatimTextOutput("DVHinfo")
),
tabPanel("Metrics",
h6("Calculate metrics"),
DT::dataTableOutput("metrics"),
downloadButton("saveMetrics", "Save as text file"),
inputPanel(
radioButtons("saveMetrDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("saveMetrSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4)))
),
tabPanel("Show DVH",
h6("Show cumulative DVH diagrams"),
downloadButton("saveDVHPDF", "Save as PDF"),
downloadButton("saveDVHJPG", "Save as JPEGs (zipped to one file)"),
#plotOutput("DVHplotOrg"),
uiOutput("DVHplot"),
downloadButton("saveDVHMSD", "Save DVH Mean + SD as text file"),
inputPanel(
radioButtons("saveDVHDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("saveDVHSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4)))
),
tabPanel("Check constraints",
h6("Check constraints"),
DT::dataTableOutput("constraints"),
downloadButton("saveConstrTxt", "Save as text file"),
inputPanel(
radioButtons("saveConstrDec", "Decimal separator:",
list("."=1, ","=2)),
radioButtons("saveConstrSep", "Column separator:",
list("\\t (tab)"=1, "' ' (space)"=2, ", (comma)"=3, "; (semicolon)"=4)))
),
tabPanel("Show constraints",
h6("Show constraints in DVH diagrams"),
downloadButton("saveConstrPDF", "Save as PDF"),
downloadButton("saveConstrJPG", "Save as JPEGs (zipped to one file)"),
#plotOutput("constraintPlotOrg"),
uiOutput("constraintPlot")
),
tabPanel("BED/EQD2",
h6("BED / EQD2 / Isoeffective dose calculation"),
conditionalPanel(condition="input.BEDtype == '1'",
h5("BED")
),
conditionalPanel(condition="input.BEDtype == '2'",
h5("EQD2")
),
conditionalPanel(condition="input.BEDtype == '3'",
h5("Isoeffective dose")
),
verbatimTextOutput("BED")
),
tabPanel("About",
includeHTML("ABOUT.html")
),
id="task"
)
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualisation_and_clustering_functions.R
\name{runCONCLUS}
\alias{runCONCLUS}
\title{Run CONCLUS in one click}
\usage{
runCONCLUS(sceObject, dataDirectory, experimentName,
colorPalette = "default", statePalette = "default",
clusteringMethod = "ward.D2", epsilon = c(1.3, 1.4, 1.5),
minPoints = c(3, 4), k = 0, PCs = c(4, 6, 8, 10, 20, 40, 50),
perplexities = c(30, 40), randomSeed = 42, deepSplit = 4,
preClustered = F, orderClusters = FALSE, cores = 14,
plotPDFcellSim = TRUE, deleteOutliers = TRUE,
tSNEalreadyGenerated = FALSE, tSNEresExp = "")
}
\arguments{
\item{sceObject}{a SingleCellExperiment object with your data.}
\item{dataDirectory}{CONCLUS will create this directory if it doesn't exist and store there all output files.}
\item{experimentName}{most of output file names of CONCLUS are hardcoded.
experimentName will stay at the beginning of each output file name to
distinguish different runs easily.}
\item{colorPalette}{a vector of colors for clusters.}
\item{statePalette}{a vector of colors for states.}
\item{clusteringMethod}{a clustering methods passed to hclust() function.}
\item{epsilon}{a parameter of fpc::dbscan() function.}
\item{minPoints}{a parameter of fpc::dbscan() function.}
\item{k}{preferred number of clusters. Alternative to deepSplit. A parameter of cutree() function.}
\item{PCs}{a vector of first principal components.
For example, to take ranges 1:5 and 1:10 write c(5, 10).}
\item{perplexities}{a vector of perplexity for t-SNE.}
\item{randomSeed}{random seed for reproducibility.}
\item{deepSplit}{intuitive level of clustering depth. Options are 1, 2, 3, 4.}
\item{preClustered}{if TRUE, it will not change the column clusters after the run.
However, it will anyway run DBSCAN to calculate similarity matrices.}
\item{orderClusters}{can be either FALSE (default) of "name".
If "name", clusters in the similarity matrix of cells will be ordered by name.}
\item{cores}{maximum number of jobs that CONCLUS can run in parallel.}
\item{plotPDFcellSim}{if FALSE, the similarity matrix of cells will be saved in png format.
FALSE is recommended for count matrices with more than 2500 cells due to large pdf file size.}
\item{deleteOutliers}{whether cells which were often defined as outliers by dbscan must be deleted.
It will require recalculating of the similarity matrix of cells. Default is FALSE.
Usually those cells form a separate "outlier" cluster and can be easier distinguished and deleted later
if necessary.}
\item{tSNEalreadyGenerated}{if you already ran CONCLUS ones and have t-SNE coordinated saved
You can set TRUE to run the function faster since it will skip the generation of t-SNE coordinates and use the stored ones.
Option TRUE requires t-SNE coordinates to be located in your 'dataDirectory/tsnes' directory.}
\item{tSNEresExp}{experimentName of t-SNE coordinates which you want to use.
This argument allows copying and pasting t-SNE coordinates between different CONCLUS runs without renaming the files.}
}
\value{
A SingleCellExperiment object.
}
\description{
This function performs core CONCLUS workflow. It generates PCA and t-SNE coordinates,
runs DBSCAN, calculates similarity matrices of cells and clusters, assigns cells to clusters,
searches for positive markers for each cluster. The function saves plots and tables into dataDirectory.
}
\keyword{CONCLUS}
| /man/runCONCLUS.Rd | no_license | PolinaPavlovich/CONCLUS | R | false | true | 3,442 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualisation_and_clustering_functions.R
\name{runCONCLUS}
\alias{runCONCLUS}
\title{Run CONCLUS in one click}
\usage{
runCONCLUS(sceObject, dataDirectory, experimentName,
colorPalette = "default", statePalette = "default",
clusteringMethod = "ward.D2", epsilon = c(1.3, 1.4, 1.5),
minPoints = c(3, 4), k = 0, PCs = c(4, 6, 8, 10, 20, 40, 50),
perplexities = c(30, 40), randomSeed = 42, deepSplit = 4,
preClustered = F, orderClusters = FALSE, cores = 14,
plotPDFcellSim = TRUE, deleteOutliers = TRUE,
tSNEalreadyGenerated = FALSE, tSNEresExp = "")
}
\arguments{
\item{sceObject}{a SingleCellExperiment object with your data.}
\item{dataDirectory}{CONCLUS will create this directory if it doesn't exist and store there all output files.}
\item{experimentName}{most of output file names of CONCLUS are hardcoded.
experimentName will stay at the beginning of each output file name to
distinguish different runs easily.}
\item{colorPalette}{a vector of colors for clusters.}
\item{statePalette}{a vector of colors for states.}
\item{clusteringMethod}{a clustering methods passed to hclust() function.}
\item{epsilon}{a parameter of fpc::dbscan() function.}
\item{minPoints}{a parameter of fpc::dbscan() function.}
\item{k}{preferred number of clusters. Alternative to deepSplit. A parameter of cutree() function.}
\item{PCs}{a vector of first principal components.
For example, to take ranges 1:5 and 1:10 write c(5, 10).}
\item{perplexities}{a vector of perplexity for t-SNE.}
\item{randomSeed}{random seed for reproducibility.}
\item{deepSplit}{intuitive level of clustering depth. Options are 1, 2, 3, 4.}
\item{preClustered}{if TRUE, it will not change the column clusters after the run.
However, it will anyway run DBSCAN to calculate similarity matrices.}
\item{orderClusters}{can be either FALSE (default) of "name".
If "name", clusters in the similarity matrix of cells will be ordered by name.}
\item{cores}{maximum number of jobs that CONCLUS can run in parallel.}
\item{plotPDFcellSim}{if FALSE, the similarity matrix of cells will be saved in png format.
FALSE is recommended for count matrices with more than 2500 cells due to large pdf file size.}
\item{deleteOutliers}{whether cells which were often defined as outliers by dbscan must be deleted.
It will require recalculating of the similarity matrix of cells. Default is FALSE.
Usually those cells form a separate "outlier" cluster and can be easier distinguished and deleted later
if necessary.}
\item{tSNEalreadyGenerated}{if you already ran CONCLUS ones and have t-SNE coordinated saved
You can set TRUE to run the function faster since it will skip the generation of t-SNE coordinates and use the stored ones.
Option TRUE requires t-SNE coordinates to be located in your 'dataDirectory/tsnes' directory.}
\item{tSNEresExp}{experimentName of t-SNE coordinates which you want to use.
This argument allows copying and pasting t-SNE coordinates between different CONCLUS runs without renaming the files.}
}
\value{
A SingleCellExperiment object.
}
\description{
This function performs core CONCLUS workflow. It generates PCA and t-SNE coordinates,
runs DBSCAN, calculates similarity matrices of cells and clusters, assigns cells to clusters,
searches for positive markers for each cluster. The function saves plots and tables into dataDirectory.
}
\keyword{CONCLUS}
|
library(mgpd)
### Name: d123GpsiAlog
### Title: internal
### Aliases: d123GpsiAlog
### Keywords: internal
### ** Examples
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (t1, t2, t3, alpha = 2, A1 = 0, A2 = 0, B1 = 2, B2 = 2,
...)
eval({
t1 <- t1
t2 <- t2
t3 <- t3
alpha <- alpha
A1 <- A1
A2 <- A2
B1 <- B1
B2 <- B2
.d123GpsiAlog
})
| /data/genthat_extracted_code/mgpd/examples/d123GpsiAlog.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 524 | r | library(mgpd)
### Name: d123GpsiAlog
### Title: internal
### Aliases: d123GpsiAlog
### Keywords: internal
### ** Examples
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (t1, t2, t3, alpha = 2, A1 = 0, A2 = 0, B1 = 2, B2 = 2,
...)
eval({
t1 <- t1
t2 <- t2
t3 <- t3
alpha <- alpha
A1 <- A1
A2 <- A2
B1 <- B1
B2 <- B2
.d123GpsiAlog
})
|
#' @include utilities.R utilities_two_sample_test.R
NULL
#'T-test
#'
#'
#'@description Provides a pipe-friendly framework to performs one and two sample
#' t-tests. Read more: \href{https://www.datanovia.com/en/lessons/t-test-in-r/}{T-test in R}.
#'@inheritParams stats::t.test
#'@param data a data.frame containing the variables in the formula.
#'@param formula a formula of the form \code{x ~ group} where \code{x} is a
#' numeric variable giving the data values and \code{group} is a factor with
#' one or multiple levels giving the corresponding groups. For example,
#' \code{formula = TP53 ~ cancer_group}.
#'@param paired a logical indicating whether you want a paired test.
#'@param ref.group a character string specifying the reference group. If
#' specified, for a given grouping variable, each of the group levels will be
#' compared to the reference group (i.e. control group).
#'
#' If \code{ref.group = "all"}, pairwise two sample tests are performed for
#' comparing each grouping variable levels against all (i.e. basemean).
#'@param mu a number specifying an optional parameter used to form the null hypothesis.
#'@param comparisons A list of length-2 vectors specifying the groups of
#' interest to be compared. For example to compare groups "A" vs "B" and "B" vs
#' "C", the argument is as follow: \code{comparisons = list(c("A", "B"), c("B",
#' "C"))}
#'@param p.adjust.method method to adjust p values for multiple comparisons.
#' Used when pairwise comparisons are performed. Allowed values include "holm",
#' "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none". If you don't
#' want to adjust the p value (not recommended), use p.adjust.method = "none".
#'@param pool.sd logical value used in the function \code{pairwise_t_test()}.
#' Switch to allow/disallow the use of a pooled SD.
#'
#' The \code{pool.sd = TRUE} (default) calculates a common SD for all groups
#' and uses that for all comparisons (this can be useful if some groups are
#' small). This method does not actually call t.test, so extra arguments are
#' ignored. Pooling does not generalize to paired tests so pool.sd and paired
#' cannot both be TRUE.
#'
#' If \code{pool.sd = FALSE} the standard two sample t-test is applied to all
#' possible pairs of groups. This method calls the \code{t.test()}, so extra
#' arguments, such as \code{var.equal} are accepted.
#'
#'@param detailed logical value. Default is FALSE. If TRUE, a detailed result is
#' shown.
#'@param ... other arguments to be passed to the function
#' \code{\link[stats]{t.test}}.
#'
#'@details
#'
#'- If a list of comparisons is specified, the result of the pairwise tests is
#'filtered to keep only the comparisons of interest. The p-value is adjusted
#'after filtering.
#'
#'- For a grouped data, if pairwise test is performed, then the p-values are
#'adjusted for each group level independently.
#'
#'@return return a data frame with some the following columns: \itemize{ \item
#' \code{.y.}: the y variable used in the test. \item \code{group1,group2}: the
#' compared groups in the pairwise tests. \item \code{n,n1,n2}: Sample counts.
#' \item \code{statistic}: Test statistic used to compute the p-value. \item
#' \code{df}: degrees of freedom. \item \code{p}: p-value. \item \code{p.adj}:
#' the adjusted p-value. \item \code{method}: the statistical test used to
#' compare groups. \item \code{p.signif, p.adj.signif}: the significance level
#' of p-values and adjusted p-values, respectively. \item \code{estimate}:
#' estimate of the effect size. It corresponds to the estimated mean or
#' difference in means depending on whether it was a one-sample test or a
#' two-sample test. \item \code{estimate1, estimate2}: show the mean values of
#' the two groups, respectively, for independent samples t-tests. \item
#' \code{alternative}: a character string describing the alternative
#' hypothesis. \item \code{conf.low,conf.high}: Lower and upper bound on a
#' confidence interval. }
#'
#' The \strong{returned object has an attribute called args}, which is a list
#' holding the test arguments.
#' @examples
#' # Load data
#' #:::::::::::::::::::::::::::::::::::::::
#' data("ToothGrowth")
#' df <- ToothGrowth
#'
#' # One-sample test
#' #:::::::::::::::::::::::::::::::::::::::::
#' df %>% t_test(len ~ 1, mu = 0)
#'
#'
#' # Two-samples unpaired test
#' #:::::::::::::::::::::::::::::::::::::::::
#' df %>% t_test(len ~ supp)
#'
#' # Two-samples paired test
#' #:::::::::::::::::::::::::::::::::::::::::
#' df %>% t_test (len ~ supp, paired = TRUE)
#'
#' # Compare supp levels after grouping the data by "dose"
#' #::::::::::::::::::::::::::::::::::::::::
#' df %>%
#' group_by(dose) %>%
#' t_test(data =., len ~ supp) %>%
#' adjust_pvalue(method = "bonferroni") %>%
#' add_significance("p.adj")
#'
#' # pairwise comparisons
#' #::::::::::::::::::::::::::::::::::::::::
#' # As dose contains more than two levels ==>
#' # pairwise test is automatically performed.
#' df %>% t_test(len ~ dose)
#'
#' # Comparison against reference group
#' #::::::::::::::::::::::::::::::::::::::::
#' # each level is compared to the ref group
#' df %>% t_test(len ~ dose, ref.group = "0.5")
#'
#' # Comparison against all
#' #::::::::::::::::::::::::::::::::::::::::
#' df %>% t_test(len ~ dose, ref.group = "all")
#'
#'@describeIn t_test t test
#'@export
t_test <- function(
data, formula, comparisons = NULL, ref.group = NULL,
p.adjust.method = "holm",
paired = FALSE, var.equal = FALSE, alternative = "two.sided",
mu = 0, conf.level = 0.95, detailed = FALSE
)
{
env <- as.list(environment())
args <- env %>%
.add_item(method = "t_test")
params <- env %>%
remove_null_items() %>%
add_item(method = "t.test")
outcome <- get_formula_left_hand_side(formula)
group <- get_formula_right_hand_side(formula)
number.of.groups <- guess_number_of_groups(data, group)
if(number.of.groups > 2 & !is.null(ref.group)){
if(ref.group %in% c("all", ".all.")){
params$data <- create_data_with_all_ref_group(data, outcome, group)
params$ref.group <- "all"
}
}
test.func <- two_sample_test
if(number.of.groups > 2)
test.func <- pairwise_two_sample_test
do.call(test.func, params) %>%
set_attrs(args = args) %>%
add_class(c("rstatix_test", "t_test"))
}
#'@describeIn t_test performs pairwise two sample t-test. Wrapper around the R
#' base function \code{\link[stats]{pairwise.t.test}}.
#'@export
pairwise_t_test <- function(
data, formula, comparisons = NULL, ref.group = NULL,
p.adjust.method = "holm", paired = FALSE, pool.sd = !paired,
detailed = FALSE, ...) {
args <- c(as.list(environment()), list(...)) %>%
.add_item(method = "t_test")
if(paired) pool.sd <- FALSE
if(pool.sd){
res <- pairwise_t_test_psd(
data, formula, comparisons = comparisons, ref.group = ref.group,
p.adjust.method = p.adjust.method, detailed = detailed, ...
)
}
else{
res <- pairwise_two_sample_test(
data, formula, method = "t.test",
comparisons = comparisons, ref.group = ref.group,
p.adjust.method = p.adjust.method, paired = paired,
detailed = detailed, ...
)
}
res %>%
set_attrs(args = args) %>%
add_class(c("rstatix_test", "t_test"))
}
pairwise_t_test_psd <- function(
data, formula, comparisons = NULL, ref.group = NULL,
p.adjust.method = "holm", alternative = "two.sided",
detailed = FALSE
)
{
. <- NULL
if(is_grouped_df(data)){
results <- data %>%
doo(pairwise_t_test_psd, formula, comparisons,
ref.group, p.adjust.method, alternative = alternative,
detailed = detailed)
return(results)
}
outcome <- get_formula_left_hand_side(formula)
group <- get_formula_right_hand_side(formula)
# Convert group into factor if this is not already the case
data <- data %>% .as_factor(group, ref.group = ref.group)
outcome.values <- data %>% pull(!!outcome)
group.values <- data %>% pull(!!group)
group.size <- data %>% get_group_size(group)
# Compute pairwise t-test
group1 <- group2 <- p.value <- NULL
results <- stats::pairwise.t.test(
outcome.values, group.values,
p.adjust.method = "none", pool.sd = TRUE,
alternative = alternative
) %>%
tidy() %>%
select(group2, group1, p.value)
colnames(results) <- c("group1", "group2", "p")
n1 <- group.size[results$group1]
n2 <- group.size[results$group2]
results <- results %>%
mutate(method = "T-test") %>%
add_column(.y. = outcome, .before = 1) %>%
add_column(n1 = n1, n2 = n2, .after = "group2")
# If ref.group specified, keep only comparisons against reference
if(!is.null(ref.group)){
results <- results %>%
filter(group1 == ref.group)
}
# If a comparison list is provided, extract the comparisons of interest
if(!is.null(comparisons)){
results <- comparisons %>%
purrr::map_dfr(~ results %>% filter(group1 %in% .x & group2 %in% .x) )
}
p <- p.adj <- NULL
results <- results %>%
adjust_pvalue(method = p.adjust.method) %>%
add_significance("p") %>%
add_significance("p.adj") %>%
mutate(
p = signif(p, digits = 3),
p.adj = signif(p.adj, digits = 3)
)
if(!detailed) results <- remove_details(results, method = "t.test")
results
}
| /R/t_test.R | no_license | kassambara/rstatix | R | false | false | 9,301 | r | #' @include utilities.R utilities_two_sample_test.R
NULL
#'T-test
#'
#'
#'@description Provides a pipe-friendly framework to performs one and two sample
#' t-tests. Read more: \href{https://www.datanovia.com/en/lessons/t-test-in-r/}{T-test in R}.
#'@inheritParams stats::t.test
#'@param data a data.frame containing the variables in the formula.
#'@param formula a formula of the form \code{x ~ group} where \code{x} is a
#' numeric variable giving the data values and \code{group} is a factor with
#' one or multiple levels giving the corresponding groups. For example,
#' \code{formula = TP53 ~ cancer_group}.
#'@param paired a logical indicating whether you want a paired test.
#'@param ref.group a character string specifying the reference group. If
#' specified, for a given grouping variable, each of the group levels will be
#' compared to the reference group (i.e. control group).
#'
#' If \code{ref.group = "all"}, pairwise two sample tests are performed for
#' comparing each grouping variable levels against all (i.e. basemean).
#'@param mu a number specifying an optional parameter used to form the null hypothesis.
#'@param comparisons A list of length-2 vectors specifying the groups of
#' interest to be compared. For example to compare groups "A" vs "B" and "B" vs
#' "C", the argument is as follow: \code{comparisons = list(c("A", "B"), c("B",
#' "C"))}
#'@param p.adjust.method method to adjust p values for multiple comparisons.
#' Used when pairwise comparisons are performed. Allowed values include "holm",
#' "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none". If you don't
#' want to adjust the p value (not recommended), use p.adjust.method = "none".
#'@param pool.sd logical value used in the function \code{pairwise_t_test()}.
#' Switch to allow/disallow the use of a pooled SD.
#'
#' The \code{pool.sd = TRUE} (default) calculates a common SD for all groups
#' and uses that for all comparisons (this can be useful if some groups are
#' small). This method does not actually call t.test, so extra arguments are
#' ignored. Pooling does not generalize to paired tests so pool.sd and paired
#' cannot both be TRUE.
#'
#' If \code{pool.sd = FALSE} the standard two sample t-test is applied to all
#' possible pairs of groups. This method calls the \code{t.test()}, so extra
#' arguments, such as \code{var.equal} are accepted.
#'
#'@param detailed logical value. Default is FALSE. If TRUE, a detailed result is
#' shown.
#'@param ... other arguments to be passed to the function
#' \code{\link[stats]{t.test}}.
#'
#'@details
#'
#'- If a list of comparisons is specified, the result of the pairwise tests is
#'filtered to keep only the comparisons of interest. The p-value is adjusted
#'after filtering.
#'
#'- For a grouped data, if pairwise test is performed, then the p-values are
#'adjusted for each group level independently.
#'
#'@return return a data frame with some the following columns: \itemize{ \item
#' \code{.y.}: the y variable used in the test. \item \code{group1,group2}: the
#' compared groups in the pairwise tests. \item \code{n,n1,n2}: Sample counts.
#' \item \code{statistic}: Test statistic used to compute the p-value. \item
#' \code{df}: degrees of freedom. \item \code{p}: p-value. \item \code{p.adj}:
#' the adjusted p-value. \item \code{method}: the statistical test used to
#' compare groups. \item \code{p.signif, p.adj.signif}: the significance level
#' of p-values and adjusted p-values, respectively. \item \code{estimate}:
#' estimate of the effect size. It corresponds to the estimated mean or
#' difference in means depending on whether it was a one-sample test or a
#' two-sample test. \item \code{estimate1, estimate2}: show the mean values of
#' the two groups, respectively, for independent samples t-tests. \item
#' \code{alternative}: a character string describing the alternative
#' hypothesis. \item \code{conf.low,conf.high}: Lower and upper bound on a
#' confidence interval. }
#'
#' The \strong{returned object has an attribute called args}, which is a list
#' holding the test arguments.
#' @examples
#' # Load data
#' #:::::::::::::::::::::::::::::::::::::::
#' data("ToothGrowth")
#' df <- ToothGrowth
#'
#' # One-sample test
#' #:::::::::::::::::::::::::::::::::::::::::
#' df %>% t_test(len ~ 1, mu = 0)
#'
#'
#' # Two-samples unpaired test
#' #:::::::::::::::::::::::::::::::::::::::::
#' df %>% t_test(len ~ supp)
#'
#' # Two-samples paired test
#' #:::::::::::::::::::::::::::::::::::::::::
#' df %>% t_test (len ~ supp, paired = TRUE)
#'
#' # Compare supp levels after grouping the data by "dose"
#' #::::::::::::::::::::::::::::::::::::::::
#' df %>%
#' group_by(dose) %>%
#' t_test(data =., len ~ supp) %>%
#' adjust_pvalue(method = "bonferroni") %>%
#' add_significance("p.adj")
#'
#' # pairwise comparisons
#' #::::::::::::::::::::::::::::::::::::::::
#' # As dose contains more than two levels ==>
#' # pairwise test is automatically performed.
#' df %>% t_test(len ~ dose)
#'
#' # Comparison against reference group
#' #::::::::::::::::::::::::::::::::::::::::
#' # each level is compared to the ref group
#' df %>% t_test(len ~ dose, ref.group = "0.5")
#'
#' # Comparison against all
#' #::::::::::::::::::::::::::::::::::::::::
#' df %>% t_test(len ~ dose, ref.group = "all")
#'
#'@describeIn t_test t test
#'@export
t_test <- function(
data, formula, comparisons = NULL, ref.group = NULL,
p.adjust.method = "holm",
paired = FALSE, var.equal = FALSE, alternative = "two.sided",
mu = 0, conf.level = 0.95, detailed = FALSE
)
{
env <- as.list(environment())
args <- env %>%
.add_item(method = "t_test")
params <- env %>%
remove_null_items() %>%
add_item(method = "t.test")
outcome <- get_formula_left_hand_side(formula)
group <- get_formula_right_hand_side(formula)
number.of.groups <- guess_number_of_groups(data, group)
if(number.of.groups > 2 & !is.null(ref.group)){
if(ref.group %in% c("all", ".all.")){
params$data <- create_data_with_all_ref_group(data, outcome, group)
params$ref.group <- "all"
}
}
test.func <- two_sample_test
if(number.of.groups > 2)
test.func <- pairwise_two_sample_test
do.call(test.func, params) %>%
set_attrs(args = args) %>%
add_class(c("rstatix_test", "t_test"))
}
#'@describeIn t_test performs pairwise two sample t-test. Wrapper around the R
#' base function \code{\link[stats]{pairwise.t.test}}.
#'@export
pairwise_t_test <- function(
data, formula, comparisons = NULL, ref.group = NULL,
p.adjust.method = "holm", paired = FALSE, pool.sd = !paired,
detailed = FALSE, ...) {
args <- c(as.list(environment()), list(...)) %>%
.add_item(method = "t_test")
if(paired) pool.sd <- FALSE
if(pool.sd){
res <- pairwise_t_test_psd(
data, formula, comparisons = comparisons, ref.group = ref.group,
p.adjust.method = p.adjust.method, detailed = detailed, ...
)
}
else{
res <- pairwise_two_sample_test(
data, formula, method = "t.test",
comparisons = comparisons, ref.group = ref.group,
p.adjust.method = p.adjust.method, paired = paired,
detailed = detailed, ...
)
}
res %>%
set_attrs(args = args) %>%
add_class(c("rstatix_test", "t_test"))
}
pairwise_t_test_psd <- function(
data, formula, comparisons = NULL, ref.group = NULL,
p.adjust.method = "holm", alternative = "two.sided",
detailed = FALSE
)
{
. <- NULL
if(is_grouped_df(data)){
results <- data %>%
doo(pairwise_t_test_psd, formula, comparisons,
ref.group, p.adjust.method, alternative = alternative,
detailed = detailed)
return(results)
}
outcome <- get_formula_left_hand_side(formula)
group <- get_formula_right_hand_side(formula)
# Convert group into factor if this is not already the case
data <- data %>% .as_factor(group, ref.group = ref.group)
outcome.values <- data %>% pull(!!outcome)
group.values <- data %>% pull(!!group)
group.size <- data %>% get_group_size(group)
# Compute pairwise t-test
group1 <- group2 <- p.value <- NULL
results <- stats::pairwise.t.test(
outcome.values, group.values,
p.adjust.method = "none", pool.sd = TRUE,
alternative = alternative
) %>%
tidy() %>%
select(group2, group1, p.value)
colnames(results) <- c("group1", "group2", "p")
n1 <- group.size[results$group1]
n2 <- group.size[results$group2]
results <- results %>%
mutate(method = "T-test") %>%
add_column(.y. = outcome, .before = 1) %>%
add_column(n1 = n1, n2 = n2, .after = "group2")
# If ref.group specified, keep only comparisons against reference
if(!is.null(ref.group)){
results <- results %>%
filter(group1 == ref.group)
}
# If a comparison list is provided, extract the comparisons of interest
if(!is.null(comparisons)){
results <- comparisons %>%
purrr::map_dfr(~ results %>% filter(group1 %in% .x & group2 %in% .x) )
}
p <- p.adj <- NULL
results <- results %>%
adjust_pvalue(method = p.adjust.method) %>%
add_significance("p") %>%
add_significance("p.adj") %>%
mutate(
p = signif(p, digits = 3),
p.adj = signif(p.adj, digits = 3)
)
if(!detailed) results <- remove_details(results, method = "t.test")
results
}
|
library(shiny)
library(ggplot2)
shinyServer(function(input, output) {
output$downloadPDF <-
downloadHandler(
filename = function() {
if(input$format=="pdf"){
extension <- ".pdf"
}else{
extension <- ".html"
}
paste("Zufriedenheit_", paste(as.character(input$kategorie)), extension, sep = "")
},
content = function(file) {
# get file
inFile <- input$data_zufriedenheit
# hand over config
studiengang_id <- paste(as.character(input$kategorie))
datum <- paste("", format(input$date2, format = "%d.%m.%Y"))
# generate PDF
library(knitr)
knit2pdf("zufriedenheitsanalyse.rnw", encoding = "UTF-8")
# copy pdf/html to 'file'
if(input$format=="pdf"){
file.copy("zufriedenheitsanalyse.pdf", file)
}else{
file.copy("zufriedenheitsanalyse.html", file)
}
# delete generated files
file.remove(
"zufriedenheitsanalyse.pdf",
"zufriedenheitsanalyse.tex",
"zufriedenheitsanalyse.aux",
"zufriedenheitsanalyse.log",
"zufriedenheitsanalyse.toc",
"zufriedenheitsanalyse.html"
)
# delete folder with plots
unlink("figure", recursive = TRUE)
},
contentType = "application/pdf"
)
})
| /server.R | no_license | weibelzahl/Zufriedenheitsanalyse | R | false | false | 1,406 | r | library(shiny)
library(ggplot2)
shinyServer(function(input, output) {
output$downloadPDF <-
downloadHandler(
filename = function() {
if(input$format=="pdf"){
extension <- ".pdf"
}else{
extension <- ".html"
}
paste("Zufriedenheit_", paste(as.character(input$kategorie)), extension, sep = "")
},
content = function(file) {
# get file
inFile <- input$data_zufriedenheit
# hand over config
studiengang_id <- paste(as.character(input$kategorie))
datum <- paste("", format(input$date2, format = "%d.%m.%Y"))
# generate PDF
library(knitr)
knit2pdf("zufriedenheitsanalyse.rnw", encoding = "UTF-8")
# copy pdf/html to 'file'
if(input$format=="pdf"){
file.copy("zufriedenheitsanalyse.pdf", file)
}else{
file.copy("zufriedenheitsanalyse.html", file)
}
# delete generated files
file.remove(
"zufriedenheitsanalyse.pdf",
"zufriedenheitsanalyse.tex",
"zufriedenheitsanalyse.aux",
"zufriedenheitsanalyse.log",
"zufriedenheitsanalyse.toc",
"zufriedenheitsanalyse.html"
)
# delete folder with plots
unlink("figure", recursive = TRUE)
},
contentType = "application/pdf"
)
})
|
testlist <- list(A = structure(c(2.86289070918112e-309, 1.41479939360447e-303, 7.83605492824235e-107, 1.22433025349421e-250, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 6L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613104092-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 383 | r | testlist <- list(A = structure(c(2.86289070918112e-309, 1.41479939360447e-303, 7.83605492824235e-107, 1.22433025349421e-250, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 6L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{draw.palette}
\alias{draw.palette}
\title{draw.palette}
\usage{
draw.palette(col, border = "light gray", ...)
}
\arguments{
\item{col}{Palette vector}
\item{border}{Border used for plotting}
\item{...}{Additional params forwarded to plot.}
}
\value{
Nothing. Draws a plaette.
}
\description{
Utility function that draws a palette.
}
\examples{
draw.palette(unlist( rwth.colorpalette() ))
}
| /man/draw.palette.Rd | permissive | Sumidu/ccscourse | R | false | true | 484 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{draw.palette}
\alias{draw.palette}
\title{draw.palette}
\usage{
draw.palette(col, border = "light gray", ...)
}
\arguments{
\item{col}{Palette vector}
\item{border}{Border used for plotting}
\item{...}{Additional params forwarded to plot.}
}
\value{
Nothing. Draws a plaette.
}
\description{
Utility function that draws a palette.
}
\examples{
draw.palette(unlist( rwth.colorpalette() ))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svnfunctions.R
\name{saveManifest}
\alias{saveManifest}
\title{Save manifest to SVN}
\usage{
saveManifest()
}
\description{
Save manifest to SVN
}
| /man/saveManifest.Rd | permissive | piguy314159265/AlteryxRhelper | R | false | true | 226 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svnfunctions.R
\name{saveManifest}
\alias{saveManifest}
\title{Save manifest to SVN}
\usage{
saveManifest()
}
\description{
Save manifest to SVN
}
|
library(dplyr)
library(data.table)
## use read.table to read data and remove "" as NA
getCleanedData <- function(file_name){
con <- file(file_name,"r")
d <- read.table(con, sep = "", header = F, na.strings = "", stringsAsFactor = F)
close(con)
d
}
## 1.Merges the training and the test sets to create one data set.
## get test & traing data from files in sequence of y, subject X
test <- getCleanedData("./UCI HAR Dataset/test/y_test.txt") %>%
cbind(getCleanedData("./UCI HAR Dataset/test/subject_test.txt")) %>%
cbind(getCleanedData("./UCI HAR Dataset/test/X_test.txt"))
train <-getCleanedData("./UCI HAR Dataset/train/y_train.txt") %>%
cbind(getCleanedData("./UCI HAR Dataset/train/subject_train.txt")) %>%
cbind(getCleanedData("./UCI HAR Dataset/train/X_train.txt"))
all_data <- rbind(test,train)
##2.Extracts only the measurements on the mean and standard deviation for each measurement.
features <- getCleanedData("./UCI HAR Dataset/features.txt") ## get features
v1 <- grepl("mean|std", tolower(features$V2)) ##extract columns names similar to mean or std
v2 <- c(TRUE, TRUE) ## keep column 1 & 2
v <- c(v2,v1) ##all columns, TRUE to keep
selected_data <- all_data[,v]
## 3.Uses descriptive activity names to name the activities in the data set
## get activity_labels (1. WALKING 2 WALKING_UPSTAIRS...6)
activity_labels <- getCleanedData("./UCI HAR Dataset/activity_labels.txt")%>%
rename("activity" = V1)
## merge activity names (assign 2 table the same column name for merge)
selected_data <- rename(selected_data,"activity" = V1)
merged_data <- merge(activity_labels,selected_data,by.x="activity", by.y="activity")
## 4.Appropriately labels the data set with descriptive variable names.
## assign column names for the first 3 columns
colnames(merged_data) <- c("activity","activit_name","subject")
## assign the rest from features extracted columns
extract_colnames <- features[,2][v1]
for(i in 1: length(extract_colnames)){
colnames(merged_data)[i+3] <- as.character(extract_colnames[i])
}
## 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
new_data <- aggregate(merged_data[, 4:ncol(merged_data)],
by = list(activity = merged_data$activity,subject = merged_data$subject), mean)
write.table(new_data,"week4_output.txt", sep = " ", col.names = FALSE)
| /run_analysis.r | no_license | yschangd/lesson3_week4_programming | R | false | false | 2,456 | r | library(dplyr)
library(data.table)
## use read.table to read data and remove "" as NA
getCleanedData <- function(file_name){
con <- file(file_name,"r")
d <- read.table(con, sep = "", header = F, na.strings = "", stringsAsFactor = F)
close(con)
d
}
## 1.Merges the training and the test sets to create one data set.
## get test & traing data from files in sequence of y, subject X
test <- getCleanedData("./UCI HAR Dataset/test/y_test.txt") %>%
cbind(getCleanedData("./UCI HAR Dataset/test/subject_test.txt")) %>%
cbind(getCleanedData("./UCI HAR Dataset/test/X_test.txt"))
train <-getCleanedData("./UCI HAR Dataset/train/y_train.txt") %>%
cbind(getCleanedData("./UCI HAR Dataset/train/subject_train.txt")) %>%
cbind(getCleanedData("./UCI HAR Dataset/train/X_train.txt"))
all_data <- rbind(test,train)
##2.Extracts only the measurements on the mean and standard deviation for each measurement.
features <- getCleanedData("./UCI HAR Dataset/features.txt") ## get features
v1 <- grepl("mean|std", tolower(features$V2)) ##extract columns names similar to mean or std
v2 <- c(TRUE, TRUE) ## keep column 1 & 2
v <- c(v2,v1) ##all columns, TRUE to keep
selected_data <- all_data[,v]
## 3.Uses descriptive activity names to name the activities in the data set
## get activity_labels (1. WALKING 2 WALKING_UPSTAIRS...6)
activity_labels <- getCleanedData("./UCI HAR Dataset/activity_labels.txt")%>%
rename("activity" = V1)
## merge activity names (assign 2 table the same column name for merge)
selected_data <- rename(selected_data,"activity" = V1)
merged_data <- merge(activity_labels,selected_data,by.x="activity", by.y="activity")
## 4.Appropriately labels the data set with descriptive variable names.
## assign column names for the first 3 columns
colnames(merged_data) <- c("activity","activit_name","subject")
## assign the rest from features extracted columns
extract_colnames <- features[,2][v1]
for(i in 1: length(extract_colnames)){
colnames(merged_data)[i+3] <- as.character(extract_colnames[i])
}
## 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
new_data <- aggregate(merged_data[, 4:ncol(merged_data)],
by = list(activity = merged_data$activity,subject = merged_data$subject), mean)
write.table(new_data,"week4_output.txt", sep = " ", col.names = FALSE)
|
# install.packages("ggplot2")
library(ggplot2)
library(gapminder)
dt = gapminder
continent = gapminder$continent
ggplot(dt, aes(x=lifeExp, y=gdpPercap, color=continent)) + geom_point()
| /Quiz/Quiz 4-2.R | no_license | YuCheng21/nkust-data-science | R | false | false | 188 | r | # install.packages("ggplot2")
library(ggplot2)
library(gapminder)
dt = gapminder
continent = gapminder$continent
ggplot(dt, aes(x=lifeExp, y=gdpPercap, color=continent)) + geom_point()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str.R
\name{str_paste}
\alias{str_paste}
\title{Equivalent to base paste but with an extra na.rm parameter}
\usage{
str_paste(..., sep = " ", collapse = NULL, na.rm = TRUE)
}
\arguments{
\item{...}{one or more R objects, to be converted to character vectors.}
\item{sep}{a character string to separate the terms.}
\item{collapse}{an optional character string to separate the results.}
\item{na.rm}{logical. Should missing values (including NaN) be removed?}
}
\value{
A character vector of the concatenated values.
}
\description{
Found on : \url{http://stackoverflow.com/questions/13673894/suppress-nas-in-paste}
}
\examples{
paste("chaine1", NA_character_, "chaine2")
caractr::str_paste("chaine1", NA_character_, "chaine2")
}
| /man/str_paste.Rd | permissive | stephLH/caractr | R | false | true | 810 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str.R
\name{str_paste}
\alias{str_paste}
\title{Equivalent to base paste but with an extra na.rm parameter}
\usage{
str_paste(..., sep = " ", collapse = NULL, na.rm = TRUE)
}
\arguments{
\item{...}{one or more R objects, to be converted to character vectors.}
\item{sep}{a character string to separate the terms.}
\item{collapse}{an optional character string to separate the results.}
\item{na.rm}{logical. Should missing values (including NaN) be removed?}
}
\value{
A character vector of the concatenated values.
}
\description{
Found on : \url{http://stackoverflow.com/questions/13673894/suppress-nas-in-paste}
}
\examples{
paste("chaine1", NA_character_, "chaine2")
caractr::str_paste("chaine1", NA_character_, "chaine2")
}
|
dsech_analysis_motif_1<-read.table("dsech_analysis_motif_1.tsv",header = TRUE)
dsech_analysis_motif_2<-read.table("dsech_analysis_motif_2.tsv",header = TRUE)
dsech_analysis_motif_3<-read.table("dsech_analysis_motif_3.tsv",header = TRUE)
dsech_analysis_motif_4<-read.table("dsech_analysis_motif_4.tsv",header = TRUE)
dsech_analysis_motif_5<-read.table("dsech_analysis_motif_5.tsv",header = TRUE)
dsech_analysis_motif_6<-read.table("dsech_analysis_motif_6.tsv",header = TRUE)
dsech_analysis_motif_7<-read.table("dsech_analysis_motif_7.tsv",header = TRUE)
dsech_analysis_motif_8<-read.table("dsech_analysis_motif_8.tsv",header = TRUE)
dsech_analysis_motif_9<-read.table("dsech_analysis_motif_9.tsv",header = TRUE)
dsech_analysis_motif_10<-read.table("dsech_analysis_motif_10.tsv",header = TRUE)
dsech_analysis_motif_11<-read.table("dsech_analysis_motif_11.tsv",header = TRUE)
dsech_analysis_motif_12<-read.table("dsech_analysis_motif_12.tsv",header = TRUE)
dsech_analysis_motif_13<-read.table("dsech_analysis_motif_13.tsv",header = TRUE)
dsech_analysis_motif_14<-read.table("dsech_analysis_motif_14.tsv",header = TRUE)
dsech_analysis_motif_15<-read.table("dsech_analysis_motif_15.tsv",header = TRUE)
dsech_analysis_motif_16<-read.table("dsech_analysis_motif_16.tsv",header = TRUE)
dsech_analysis_motif_17<-read.table("dsech_analysis_motif_17.tsv",header = TRUE)
dsech_analysis_motif_18<-read.table("dsech_analysis_motif_18.tsv",header = TRUE)
dsech_analysis_motif_19<-read.table("dsech_analysis_motif_19.tsv",header = TRUE)
dsech_analysis_motif_20<-read.table("dsech_analysis_motif_20.tsv",header = TRUE)
dsech_analysis_motif_21<-read.table("dsech_analysis_motif_21.tsv",header = TRUE)
dsech_analysis_motif_22<-read.table("dsech_analysis_motif_22.tsv",header = TRUE)
dsech_analysis_motif_23<-read.table("dsech_analysis_motif_23.tsv",header = TRUE)
dsech_analysis_motif_24<-read.table("dsech_analysis_motif_24.tsv",header = TRUE)
dsech_analysis_motif_25<-read.table("dsech_analysis_motif_25.tsv",header = TRUE)
dsech_all_motifs <- rbind (dsech_analysis_motif_1, dsech_analysis_motif_2, dsech_analysis_motif_3, dsech_analysis_motif_4, dsech_analysis_motif_5, dsech_analysis_motif_6, dsech_analysis_motif_7, dsech_analysis_motif_8, dsech_analysis_motif_9, dsech_analysis_motif_10, dsech_analysis_motif_11, dsech_analysis_motif_12, dsech_analysis_motif_13, dsech_analysis_motif_14, dsech_analysis_motif_15, dsech_analysis_motif_16, dsech_analysis_motif_17, dsech_analysis_motif_18, dsech_analysis_motif_19, dsech_analysis_motif_20, dsech_analysis_motif_21, dsech_analysis_motif_22, dsech_analysis_motif_23, dsech_analysis_motif_24, dsech_analysis_motif_25)
dsech_all_motifs$adj_p <- p.adjust(dsech_all_motifs$p_value,method = "BH")
write.table(dsech_all_motifs,"dsech_all_motifs.tsv",row.names = FALSE,sep = "\t")
| /Supplemental_figures/FigS26/dsech_overlap/moti_all_analysis.R | no_license | LarracuenteLab/simulans_clade_satDNA_evolution | R | false | false | 2,905 | r |
dsech_analysis_motif_1<-read.table("dsech_analysis_motif_1.tsv",header = TRUE)
dsech_analysis_motif_2<-read.table("dsech_analysis_motif_2.tsv",header = TRUE)
dsech_analysis_motif_3<-read.table("dsech_analysis_motif_3.tsv",header = TRUE)
dsech_analysis_motif_4<-read.table("dsech_analysis_motif_4.tsv",header = TRUE)
dsech_analysis_motif_5<-read.table("dsech_analysis_motif_5.tsv",header = TRUE)
dsech_analysis_motif_6<-read.table("dsech_analysis_motif_6.tsv",header = TRUE)
dsech_analysis_motif_7<-read.table("dsech_analysis_motif_7.tsv",header = TRUE)
dsech_analysis_motif_8<-read.table("dsech_analysis_motif_8.tsv",header = TRUE)
dsech_analysis_motif_9<-read.table("dsech_analysis_motif_9.tsv",header = TRUE)
dsech_analysis_motif_10<-read.table("dsech_analysis_motif_10.tsv",header = TRUE)
dsech_analysis_motif_11<-read.table("dsech_analysis_motif_11.tsv",header = TRUE)
dsech_analysis_motif_12<-read.table("dsech_analysis_motif_12.tsv",header = TRUE)
dsech_analysis_motif_13<-read.table("dsech_analysis_motif_13.tsv",header = TRUE)
dsech_analysis_motif_14<-read.table("dsech_analysis_motif_14.tsv",header = TRUE)
dsech_analysis_motif_15<-read.table("dsech_analysis_motif_15.tsv",header = TRUE)
dsech_analysis_motif_16<-read.table("dsech_analysis_motif_16.tsv",header = TRUE)
dsech_analysis_motif_17<-read.table("dsech_analysis_motif_17.tsv",header = TRUE)
dsech_analysis_motif_18<-read.table("dsech_analysis_motif_18.tsv",header = TRUE)
dsech_analysis_motif_19<-read.table("dsech_analysis_motif_19.tsv",header = TRUE)
dsech_analysis_motif_20<-read.table("dsech_analysis_motif_20.tsv",header = TRUE)
dsech_analysis_motif_21<-read.table("dsech_analysis_motif_21.tsv",header = TRUE)
dsech_analysis_motif_22<-read.table("dsech_analysis_motif_22.tsv",header = TRUE)
dsech_analysis_motif_23<-read.table("dsech_analysis_motif_23.tsv",header = TRUE)
dsech_analysis_motif_24<-read.table("dsech_analysis_motif_24.tsv",header = TRUE)
dsech_analysis_motif_25<-read.table("dsech_analysis_motif_25.tsv",header = TRUE)
dsech_all_motifs <- rbind (dsech_analysis_motif_1, dsech_analysis_motif_2, dsech_analysis_motif_3, dsech_analysis_motif_4, dsech_analysis_motif_5, dsech_analysis_motif_6, dsech_analysis_motif_7, dsech_analysis_motif_8, dsech_analysis_motif_9, dsech_analysis_motif_10, dsech_analysis_motif_11, dsech_analysis_motif_12, dsech_analysis_motif_13, dsech_analysis_motif_14, dsech_analysis_motif_15, dsech_analysis_motif_16, dsech_analysis_motif_17, dsech_analysis_motif_18, dsech_analysis_motif_19, dsech_analysis_motif_20, dsech_analysis_motif_21, dsech_analysis_motif_22, dsech_analysis_motif_23, dsech_analysis_motif_24, dsech_analysis_motif_25)
dsech_all_motifs$adj_p <- p.adjust(dsech_all_motifs$p_value,method = "BH")
write.table(dsech_all_motifs,"dsech_all_motifs.tsv",row.names = FALSE,sep = "\t")
|
/cachematrix.R | no_license | Ilia-Milanov/ProgrammingAssignment2 | R | false | false | 1,148 | r | ||
#Pairs Trading - Strategy
setwd("~/Quant Finance")
library(quantmod)
library(PerformanceAnalytics)
library(tseries)
library(zoo)
library(dplyr)
library(knitr)
start_date = "2012-01-01"
end_date = "2020-01-01"
title <- c("Coca-Cola vs Pepsi")
stock_list <- c("KO","PEP")
getSymbols(stock_list, src = "yahoo", from = start_date, to=end_date)
#update the list of stocks
stock_pair <- list(a = (Cl(KO)), b = (Cl(PEP)), name= title, hedge_ratio= 0.677)
#Strategy is if the spread is greater/shorter than the upper/lower band,
#then go short/long accordingly
#Step 1: Generate the spread
spread <- stock_pair$a - stock_pair$hedge_ratio*stock_pair$b
spread <- na.omit(merge(spread, BBands(spread)))
names(spread) <- c('close', 'dn', 'mavg', 'up', 'pctb')
# short when spread close is above up
spread$sig[spread$close > spread$up] <- -1
# long when spread close is below dn
spread$sig[spread$close < spread$dn] <- 1
# Flat where spread close crossed the mavg
spread$sig[(diff(sign(spread$close - spread$mavg)) != 0)] <- 0
spread$sig[1] <- 0 # flat on the first day
spread$sig[nrow(spread)] <- 0 # flat on the last day
spread$sig <- na.locf(spread$sig) # wherever sig is NA, copy previous value to next row
spread$sig <- Lag(spread$sig)
spread$sig[1] <- 0 # flat on the first day
table(spread$sig)
dev.new()
par(mfrow=c(1,1))
plot(spread$mavg, main= "KO vs PEP spread with bollinger bands",
col="red", ylab="Spread", type='l')
lines(spread$up, col="purple")
lines(spread$dn, col="purple")
lines(spread$close, col="green")
#Calculate spread daily ret
a_returns <- dailyReturn(stock_pair$a)
b_returns <- dailyReturn(stock_pair$b)
ab_daily_returns <- a_returns - stock_pair$hedge_ratio*b_returns
#create a table with your returns
Returns <- na.omit(spread$sig) * ab_daily_returns
#plot the benchmark of the stock with the performance of your strategy
charts.PerformanceSummary(cbind(dailyReturn(spread$close),Returns))
#create a calendar table of returns
kable(table.CalendarReturns(Returns), caption = "Calendar Returns")
| /SA - Pairs Trading - Trading Strategy.R | permissive | hithere34114/Quant-Finance | R | false | false | 2,038 | r | #Pairs Trading - Strategy
setwd("~/Quant Finance")
library(quantmod)
library(PerformanceAnalytics)
library(tseries)
library(zoo)
library(dplyr)
library(knitr)
start_date = "2012-01-01"
end_date = "2020-01-01"
title <- c("Coca-Cola vs Pepsi")
stock_list <- c("KO","PEP")
getSymbols(stock_list, src = "yahoo", from = start_date, to=end_date)
#update the list of stocks
stock_pair <- list(a = (Cl(KO)), b = (Cl(PEP)), name= title, hedge_ratio= 0.677)
#Strategy is if the spread is greater/shorter than the upper/lower band,
#then go short/long accordingly
#Step 1: Generate the spread
spread <- stock_pair$a - stock_pair$hedge_ratio*stock_pair$b
spread <- na.omit(merge(spread, BBands(spread)))
names(spread) <- c('close', 'dn', 'mavg', 'up', 'pctb')
# short when spread close is above up
spread$sig[spread$close > spread$up] <- -1
# long when spread close is below dn
spread$sig[spread$close < spread$dn] <- 1
# Flat where spread close crossed the mavg
spread$sig[(diff(sign(spread$close - spread$mavg)) != 0)] <- 0
spread$sig[1] <- 0 # flat on the first day
spread$sig[nrow(spread)] <- 0 # flat on the last day
spread$sig <- na.locf(spread$sig) # wherever sig is NA, copy previous value to next row
spread$sig <- Lag(spread$sig)
spread$sig[1] <- 0 # flat on the first day
table(spread$sig)
dev.new()
par(mfrow=c(1,1))
plot(spread$mavg, main= "KO vs PEP spread with bollinger bands",
col="red", ylab="Spread", type='l')
lines(spread$up, col="purple")
lines(spread$dn, col="purple")
lines(spread$close, col="green")
#Calculate spread daily ret
a_returns <- dailyReturn(stock_pair$a)
b_returns <- dailyReturn(stock_pair$b)
ab_daily_returns <- a_returns - stock_pair$hedge_ratio*b_returns
#create a table with your returns
Returns <- na.omit(spread$sig) * ab_daily_returns
#plot the benchmark of the stock with the performance of your strategy
charts.PerformanceSummary(cbind(dailyReturn(spread$close),Returns))
#create a calendar table of returns
kable(table.CalendarReturns(Returns), caption = "Calendar Returns")
|
# 01-kmeans-app
library(shiny)
setwd("/Users/wangqi/Desktop/Github/dashboard-Rshiny/")
df <- read.csv("sample_data.csv")
ui <- fluidPage(
headerPanel('Dashborad with R-shiny'),
selectInput('selectedK', 'K Variable', unique(df$K)),
sidebarPanel(
radioButtons('ycol', 'Y Variable', choices = c("Y", "Z"),
selected = "Y")
),
mainPanel(
plotOutput('plot1')
)
)
server <- function(input, output) {
selectedData <- reactive({
df[df$K == input$selectedK, c("X", input$ycol)]
})
output$plot1 <- renderPlot({
plot(selectedData(), main = input$ycol)
})
}
shinyApp(ui = ui, server = server)
| /app.R | no_license | wangqi12240205/JHH-risk-prediction | R | false | false | 651 | r | # 01-kmeans-app
library(shiny)
setwd("/Users/wangqi/Desktop/Github/dashboard-Rshiny/")
df <- read.csv("sample_data.csv")
ui <- fluidPage(
headerPanel('Dashborad with R-shiny'),
selectInput('selectedK', 'K Variable', unique(df$K)),
sidebarPanel(
radioButtons('ycol', 'Y Variable', choices = c("Y", "Z"),
selected = "Y")
),
mainPanel(
plotOutput('plot1')
)
)
server <- function(input, output) {
selectedData <- reactive({
df[df$K == input$selectedK, c("X", input$ycol)]
})
output$plot1 <- renderPlot({
plot(selectedData(), main = input$ycol)
})
}
shinyApp(ui = ui, server = server)
|
### packages ####
library(dplyr)
library(ggplot2)
library(paleoTS)
library(stats)
library(moments)
library(pgirmess)
library(vegan)
#library(plotly)
#### Data basis - import data set from Excel ####
setwd("C:/Users/Jule/Documents/Uni/MA")
tidyCL<-read.csv("tortoises_tidy.csv", sep=";", header=TRUE)
## change to:
# setwd("C:/Users/Jule/Documents/Uni/MA/PublicationTortoises")
# tidyCL<-read.csv("tortoises_tidy_pub.csv", sep=";", header=TRUE)
# C:\Users\Jule\Documents\Uni\MA\PublicationTortoises
#####prepare for analysis (fix column names in .csv-file after converting table from excel####
colnames(tidyCL)[6] <- "MAmin"
colnames(tidyCL)[7] <- "Mamax"
colnames(tidyCL)[17] <- "CL"
colnames(tidyCL)[18] <- "PL"
colnames(tidyCL)[21] <- "estimated"
tidyCL <- tidyCL %>%
mutate(Age= ((MAmin)+(as.numeric(Mamax)))/2) #%>%
# filter(estimated=="m" | estimated=="mo"| estimated=="mf") #%>%
# filter(!is.na(CL))
####### import extant data ####
extant <- read.csv("MFN_testudinidae.csv", sep=";", header=TRUE, dec=".", na.strings = "NA", stringsAsFactors=FALSE) # file: MFN_testudinidae.csv
colnames(extant)[5] <- "SCL"
colnames(extant)[11] <- "PL"
colnames(extant)[12] <- "PLmid"
Extant <- extant %>%
mutate(CL = SCL * 10, PL=PL*10, PLmid=PLmid*10)
#### estimating CL from PL ####
#table shows CL/PL ratios for all fossil taxa that have both measurements available
CLPLtidy <- tidyCL %>%
filter(!is.na(CL) & !is.na(PL)) %>%
dplyr::select(Taxon, CL, PL, size, Age, Island, Continent, Genus) %>%
mutate(ratio=CL/PL) %>%
group_by(Taxon) %>% #to show ratios per Taxon, leave out to get a total ratio
dplyr::summarise(meanRatio=round(mean(ratio),2), sdRatio=round(sd(ratio),2), n=n(), min=min(ratio), max=max(ratio))
#table shows CL/PL ratios for all extant taxa that have both measurements available
CLPLextant <- Extant %>%
dplyr::filter(!is.na(CL) & !is.na(PL)) %>%
dplyr::select(Taxon=Species, CL, PL, PLmid, Island, Continent, Genus) %>%
mutate(ratio=CL/PL, ratioMid=CL/PLmid) #%>%
# group_by(Taxon) %>% #to show ratios per Taxon, leave out to get a total ratio
# summarise(meanRatio=round(mean(ratio),2), sdRatio=round(sd(ratio),2), n=n(), min=min(ratio), max=max(ratio))
#kruskal.test(CLPLextant$meanRatio, CLPLextant$Taxon)
#KruskalMC <- data.frame(kruskalmc(CLPLextant$ratio, CLPLextant$Taxon))
Ratio <- CLPLextant %>%
dplyr::summarise(meanRatio=round(mean(ratio),2), sdRatio=round(sd(ratio),2), n=n(), min=min(ratio), max=max(ratio))
RatioSpecies <- CLPLextant %>%
group_by(Taxon) %>% #to show ratios per Taxon, leave out to get a total ratio
dplyr::summarise(meanRatio=round(mean(ratio),2), sdRatio=round(sd(ratio),2), n=n(), min=min(ratio), max=max(ratio))
fossil <- tidyCL %>%
mutate(Taxon=as.character(Taxon), Island=as.character(Island), Continent=as.character(Continent), estimated=as.character(estimated), Genus=as.character(Genus)) %>%
dplyr::select(Taxon, CL, PL, size, estimated, Age, Island, Continent, Genus)
modern <- Extant %>%
dplyr::select(Taxon=Species, CL, PL, estimated, Age, Island, Continent, Genus)
All <- bind_rows(modern, fossil)
testRatio <- All %>% #tidyCL
dplyr::select(Taxon, CL, PL, size, estimated, Age, Island, Continent, Genus) %>%
mutate(extraCL = PL*Ratio$meanRatio) %>%
dplyr::select(Taxon, CL, extraCL, PL, size, estimated, Age, Island, Continent, Genus)
testRatio$CL[is.na(testRatio$CL)] <- testRatio$extraCL[is.na(testRatio$CL)]
##### Time bins (stratigraphic stages) ######
# Bin data, smaller bins
Miocene <- testRatio %>% #testRatio or tidyCL
filter(Age < 23.000)
# PleiPlio$bin <- cut(PleiPlio$Age, c(0, 0.000001, 0.0117, 0.126, 0.781, 2.588, 3.6, 5.332, 11.608))
# EpochBins <- as.vector(c("Modern", "Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Upper Pliocene", "Lower Pliocene", "Upper Miocene"))
# MeanBins <- as.vector(c((0+0.000001)/2, (0+0.0117)/2, (0.0117+0.126)/2, (0.126+0.781)/2, (0.781+2.588)/2,(2.588+3.6)/2, (3.6+5.332)/2, (5.332+11.608)/2))
# new cuts: 1.806, 4.466, 7.424, 9.516, 13.789, 18
Miocene$bin <- cut(Miocene$Age, c(0, 0.0117, 0.126, 0.781, 1.806, 2.588, 3.6,
5.332, 7.246, 11.608, 13.82, 15.97, 23.03))
EpochBins <- as.vector(c("Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Gelasian", "Piacencian", "Zanclean", "Messinian","Tortonian", "Serravallian","Langhian",
"Burdigalian/Aquitanian"))
Stages <- as.vector(c("Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Lower Pleistocene", "Upper Pliocene", "Lower Pliocene", "Upper Miocene","Upper Miocene",
"Middle Miocene","Middle Miocene", "Lower Miocene"))
MeanBins <- as.vector(c((0+0.0117)/2, (0.0117+0.126)/2, (0.126+0.781)/2, (0.781+1.806)/2, (1.806+2.588)/2,(2.588+3.6)/2, (3.6+5.332)/2, (5.332+7.246)/2, (7.246+11.608)/2, (11.608+13.82)/2, (13.82+15.97)/2, (15.97+23.03)/2))
#1.806, 4.466, 7.424, 9.516, 13.789, 18
BinsMio <- Miocene %>%
select(bin) %>%
group_by(bin) %>%
dplyr::summarise(nIndividuals=n())
BinsSpeciesMio <- Miocene %>%
select(bin, Taxon) %>%
group_by(bin, Taxon) %>%
dplyr::summarise(nSpecies=n()) %>%
dplyr::summarise(nSpecies=n())
BinsGeneraMio <- Miocene %>%
select(bin, Genus) %>%
group_by(bin, Genus) %>%
dplyr::summarise(nGenera=n()) %>%
dplyr::summarise(nGenera=n())
bin <- as.vector(unique(BinsMio$bin))
BINSMio <- data.frame(bin, EpochBins, Stages, MeanBins) %>%
merge(BinsMio) %>%
merge(BinsSpeciesMio) %>%
merge(BinsGeneraMio) %>%
arrange(MeanBins)
BINSMio$EpochBins = factor(BINSMio$EpochBins,
levels= c("Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Gelasian", "Piacencian", "Zanclean", "Messinian","Tortonian", "Serravallian","Langhian",
"Burdigalian/Aquitanian" ))
#BINS <- read.table("timebins.txt", sep="\t", header=TRUE)
#kable(Bins, caption="Time bins with corresponding sample sizes (individuals)")
#kable(BINSMio, caption="Smaller time bins with age range, epoch name, mean age and corresponding sample sizes (on individual, species and genus level)")
PleiPlioCL <- Miocene %>%
merge(BINSMio) %>%
filter(!is.na(CL))
# SUM <- PleiPlioCL %>%
# group_by(MeanBins) %>%
# summarise(n=n())
#na <- PleiPlioCL$EpochBins[!complete.cases(PleiPlioCL$EpochBins)]
#### Data overview - Scatterplot CL~Time ####
Continent <- as.vector(unique(PleiPlioCL$Continent))
Con <- as.vector(c("Asia", "Africa", "Europe", "America", "America", "America", "Europe"))#
Continents <- data.frame(Continent, Con)
scatter <- testRatio %>%
filter(!is.na(CL)) %>%
filter(Age < 23.000) %>%
merge(Continents) %>%
mutate(Insularity=ifelse(Island=="y","insular", "continental")) %>%
select(CL, Age, Insularity, Con, estimated) %>%
ggplot(aes(-Age, CL))+ # for time to start at the left: -Age
geom_vline(xintercept = c(0, -0.0117, -0.126, -0.781, -1.806, -2.588, -3.6,
-5.332, -7.246, -11.608, -13.82, -15.97, -23.03))+
geom_jitter() + #aes(shape=Insularity, colour= Con)
theme_classic() +
# geom_vline(xintercept= c(0, 0.0117, 0.126, 0.781, 2.588, 3.6, 5.332, 11.608, 15.97, 23.03)) +
geom_vline(xintercept= -20.44, linetype="dashed") +
scale_color_manual(values=c("#000000", "#E69F00", "#FF00FF", "#009E73", "#56B4E9","#F0E442", "#0072B2", "#D55E00", "#CC79A7") , name="Continents")+
# scale_shape_manual(values= c(16, 17), name="Insularity", breaks=c("continental", "insular")) +
xlab("Time [mya]") + ylab("Carapace length [mm]")#+ #+ coord_trans(x="log2") +
#scale_x_log10()
# labs(shape="Insularity", colour="Continents")
#
scatter
# fig.cap="Scatterplot of carapace length over time, indicating insular (triangle) and continental (circles)
# nd colour indicating continents. Lines indicate stratigraphic stages which were used as time bins, the dashed
# line is the border between the two stages of the Lower Miocene, which were consideres as one time bin.
#ggsave("OvervieData.png", height=5, width=8, units='in', dpi=800)
#length(scatter$CL)
#### Maps - fossil occurences of testudinidae #####
CL <- tidyCL %>%
# filter(Age < 11.000) %>%
dplyr::select(Locality, Genus, Taxon, Latitude, Longitude, Country, Age) %>%
group_by(Locality) %>%
mutate(count= n())
length(unique(CL$Locality))
FossilOccurrences<-read.csv("tortoises_occurrences.csv", sep=";", header=TRUE)
FossOcc <- FossilOccurrences %>%
mutate(Age= (as.numeric(as.character(MA.min)) + as.numeric(as.character(Ma.max)))/2) %>%
select(Locality, Country, Latitude, Longitude, Age, Genus, Taxon, Clavailability) %>%
# merge(tidyCL) %>%
group_by(Locality) %>%
mutate(count= n(), Longitude=as.numeric(as.character(Longitude)))
Occurrences <- FossilOccurrences %>%
mutate(Age= (as.numeric(as.character(MA.min)) + as.numeric(as.character(Ma.max)))/2) %>%
select(Locality, Country, Latitude, Longitude, Age, Genus, Taxon, Author) %>%#, comment, Clavailability
arrange(Age)
#kable(Occurrences, row.names = TRUE, caption="fossil occurrences")
#na <- FossOcc[!complete.cases(FossOcc),]
# OccMap <- merge(Map, FossOcc, by="Locality", incomparables=TRUE) %>%
# distinct()
# unique(CL$Locality) #wie viele localities
#length(unique(FossOcc$Locality[which(FossOcc$Clavailability == "yes")]))
mapWorld <- borders("world", colour="azure3", fill="azure3") # create a layer of borders, run line at the beginning (before loading plotly)
cbbPalette <- c("#000000", "#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
OwnPalette <- c("#000000", "#000000", "#56B4E9") #alternativ: c("#000000","#000000", "#E69F00", "#56B4E9")#
mapOc <- FossOcc %>%
ggplot(aes(Longitude, Latitude)) +# borders("world", ylim = c(-60, 90)) +
mapWorld +
theme_classic() +
geom_point(data=FossOcc, alpha=1/4,(aes(as.numeric(Longitude), Latitude, colour=FossOcc$Clavailability))) + # alpha= Transparency
geom_point(data=CL, alpha=1/2,(aes(Longitude, Latitude, colour="yes"))) +
# geom_point(data=pdbd, alpha=1/6,(aes(Longitude, Latitude, colour="pdbd"))) +
theme(legend.position="none") +
scale_colour_manual(values=OwnPalette) +#cbbPalette +
scale_x_continuous(breaks=c(-180, -100, 0, 100, 180)) +
scale_y_continuous(breaks=c(-90, -50, 0, 50, 90))
#unique(FossOcc$CL)
mapOc
# fig.cap="Map displaying all fossil occurrences of testudinids, with color indicating whether relevant
# literature was available (black if not) and if it was, whether body size data was available or not
# (yes and no, respectively).
#ggsave("MapOccurrences.png", height=5, width=8, units='in', dpi=800)
# require(plotly)
# ggplotly(mapOc)
# length(CL$Locality)
# length(unique(CL$Locality)) #wie viele localities
# length(unique(FossOcc$Locality[which(FossOcc$Clavailability == "yes")]))
##### compare with PDBD data! #####
#pbdb_testudinidae.csv
setwd("C:/Users/Jule/Documents/Uni/MA/PublicationTortoises")
PDBD<-read.csv("pbdb_testudinidae.csv", sep=",", header=TRUE, skip=15)
names(PDBD)
pdbd <- PDBD %>%
select(collection_no, identified_name, accepted_name, early_interval, late_interval, max_ma, min_ma,
reference_no, Longitude=lng, Latitude=lat, cc, state, county, Locality=geogcomments) %>%
mutate(Age= (as.numeric(as.character(min_ma)) + as.numeric(as.character(max_ma)))/2)
length(unique(pdbd$Locality)) # geogcomments = Locality in my table, I guess
mapWorld <- borders("world", colour="azure3", fill="azure3") # create a layer of borders, run line at the beginning (before loading plotly)
#cbbPalette <- c("#000000", "#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#OwnPalette <- c("#000000", "#000000", "#56B4E9") #alternativ: c("#000000", "#E69F00", "#56B4E9")
mapPDBD <- pdbd %>%
ggplot(aes(Longitude, Latitude)) +# borders("world", ylim = c(-60, 90)) +
mapWorld +
theme_classic() +
geom_point(data=pdbd, alpha=1/4,(aes(as.numeric(Longitude), Latitude, colour="#000000"))) +
# geom_point(data=CL, alpha=1/2,(aes(Longitude, Latitude, colour="yes"))) +
theme(legend.position="none") +
# scale_colour_manual(values=OwnPalette) +#cbbPalette +
scale_x_continuous(breaks=c(-180, -100, 0, 100, 180)) +
scale_y_continuous(breaks=c(-90, -50, 0, 50, 90))
#unique(FossOcc$CL)
mapPDBD
##### check sample size (?) for Miocene!#####
CLpdbdCom <- tidyCL %>%
filter(Age < 23.000 & Age > 5.330) %>%
dplyr::select(Locality, Genus, Taxon, Latitude, Longitude, Country, Age) %>%
group_by(Locality) %>%
mutate(count= n())
FossOccPdbdCom <- FossilOccurrences %>%
mutate(Age= (as.numeric(as.character(MA.min)) + as.numeric(as.character(Ma.max)))/2) %>%
filter(Age < 23.000 & Age > 5.330) %>%
select(Locality, Country, Latitude, Longitude, Age, Genus, Taxon, Clavailability) %>%
# merge(tidyCL) %>%
group_by(Locality) %>%
mutate(count= n(), Longitude=as.numeric(as.character(Longitude)))
Pdbd <- pdbd %>%
filter(Age < 23.000 & Age > 5.330) %>%
group_by(Locality) %>%
mutate(count= n())
mapWorld <- borders("world", colour="azure3", fill="azure3") # create a layer of borders, run line at the beginning (before loading plotly)
cbbPalette <- c("#000000", "#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
OwnPalette <- c("#000000","#000000", "#E69F00", "#56B4E9")#c("#000000", "#000000", "#56B4E9") #alternativ:
mapOcPDBDMiocene <- FossOccPdbdCom %>%
ggplot(aes(Longitude, Latitude)) +# borders("world", ylim = c(-60, 90)) +
mapWorld +
theme_classic() +
geom_point(data=FossOccPdbdCom, alpha=1/4,(aes(as.numeric(Longitude), Latitude, colour="FossOcc"))) + # alpha= Transparency
geom_point(data=CLpdbdCom, alpha=1/2,(aes(Longitude, Latitude, colour="CL"))) +
geom_point(data=Pdbd, alpha=1/6,(aes(Longitude, Latitude, colour="PDBD"))) +
# theme(legend.position="none") +
scale_colour_manual(values=c("#000000", "#E69F00", "#56B4E9")
#,labels=c("FossOcc", "CL", "PDBD")
) +#cbbPalette +
scale_x_continuous(breaks=c(-180, -100, 0, 100, 180)) +
scale_y_continuous(breaks=c(-90, -50, 0, 50, 90))
#unique(FossOcc$CL)
mapOcPDBDMiocene
#ggsave("MapMioceneComparison.png", height=5, width=8, units='in', dpi=800)
# require(plotly)
# ggplotly(mapOcPDBDMiocene)
##### PDBD data #####
pdbd
unique(pdbd$accepted_name)
length(unique(pdbd$accepted_name))
unique(pdbd$identified_name)
length(unique(pdbd$identified_name))
PDBDMiocene <- pdbd %>%
filter(Age < 23.000 & Age > 5.330)
speciesPDBDMio <- as.vector(unique(PDBDMiocene$identified_name))
length(speciesPDBDMio)
CLMiocene <- tidyCL %>%
filter(Age < 23.000 & Age > 5.330)
speciesCLMiocene <- as.vector(unique(CLMiocene$Taxon))
length(speciesCLMiocene)
compareSpeciesMio <- setdiff(speciesPDBDMio, speciesCLMiocene)
write.table(compareSpeciesMio, file="PDBDSpeciesMioNeeded.txt", sep="\t", row.names = FALSE)
# do the same for period younger than miocene, just to be sure
PDBDQ <- pdbd %>%
filter(Age < 5.330)
speciesPDBDQ <- as.vector(unique(PDBDQ$identified_name))
length(speciesPDBDQ)
CLQ <- tidyCL %>%
filter(Age < 5.330)
speciesCLQ <- as.vector(unique(CLQ$Taxon))
length(speciesCLQ)
compareSpeciesQ <- setdiff(speciesPDBDQ, speciesCLQ)
write.table(compareSpeciesQ, file="PDBDSpeciesHolPleiPlio_justToCheck.txt", sep="\t", row.names = FALSE)
#### Map - Body size of testudinidae ####
tidyCL$bin <- cut(tidyCL$Age, c(0, 0.0117, 0.126, 0.781, 1.806, 2.588, 3.6,
5.332, 7.246, 11.608, 13.82, 15.97, 20.44, 23.03)
)
# c(0, 0.000001, 0.0117, 0.126, 0.781, 2.588, 3.6, 5.332, 11.608, 15.97, 23.03,50.000))
MapCL <- tidyCL %>%
merge(BINSMio) %>%
# filter(Age < 23.000) %>%
# filter(Latitude != "-") %>%
dplyr::select(Genus, Taxon, Latitude, Longitude, Country, CL, PL, MeanBins, size) %>%
group_by(Latitude) %>%
mutate(count= n())
mapWorld <- borders("world", colour="azure3", fill="azure3") # create a layer of borders, run line at the beginning (before loading plotly)
mapCL <- MapCL %>%
ggplot(aes(Longitude, Latitude)) +# borders("world", ylim = c(-60, 90)) +
mapWorld +
theme_classic() +
geom_point(aes(Longitude, Latitude,size=count, colour=MeanBins)) + #colour=size,
scale_colour_gradientn(name="Age [mya]", colors=c("orange", "red", "purple", "blue", "green", "yellow"))+
scale_x_continuous(breaks=c(-180, -100, 0, 100, 180)) +
scale_y_continuous(breaks=c(-90, -50, 0, 50, 90)) +
scale_size_continuous(name="n")
#values=c("#000000", "#E69F00", "#FF00FF", "#009E73", "#56B4E9","#F0E442", "#0072B2", "#D55E00", "#CC79A7")
mapCL
# fig.cap="Map displaying all localities for which body size data for testudinids was available in the
# literature. Size of points denotes sample size, color denotes approximate age.
#ggsave("MapBodysize.png", height=5, width=8, units='in', dpi=800)
#### Get overview over sample sizes #####
OverviewSpecies <- PleiPlioCL %>%
filter(EpochBins != "Modern") %>%
group_by(EpochBins, Taxon) %>%
dplyr::summarise(n=n(), meanCL = mean(CL))
#kable(OverviewSpecies, caption="Overview over fossil species per time bin, with sample size and mean CL.")
OverviewSpeciesFossil <- PleiPlioCL %>%
filter(EpochBins != "Modern") %>%
group_by(Taxon) %>%
dplyr::summarise(n=n(), meanCL = mean(CL))
#write.table(OverviewSpecies,file="OverviewSpeciesSampleSize.txt", sep="\t", row.names = FALSE)
#kable(OverviewSpeciesFossil, caption="General overview over fossil species, with sample size and mean CL")
OverviewGeneraTime <- PleiPlioCL %>%
group_by(EpochBins, Genus) %>%
dplyr::summarise(n=n(), meanCL = mean(CL))
#kable(OverviewGeneraTime, caption="Overview over genera (modern and fossil) per time bin, with sample sizes and mean CL.")
OverviewGenera <- PleiPlioCL %>%
group_by(Genus) %>%
dplyr::summarise(n=n(), meanCL = mean(CL))
#kable(OverviewGenera, caption="General overview over genera, with sample sizes and mean CL.")
# plot(tidyCL$CL~tidyCL$Latitude)
# plot(tidyCL$CL~tidyCL$Longitude)
##### Sampling Accumulation Curves #####
# SACSpecies
#Species Accumulation Curve
veganSL <- tidyCL %>%
dplyr::select(Locality, Taxon) %>%
group_by(Locality, Taxon) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Taxon, n, fill=0)
#par(mfrow = c(2, 1))
veganSL=veganSL[,-1]
veganspSL=specaccum(veganSL,method="rarefaction", permutations=1000)
plot(veganspSL,xlab="No. of Localities",ylab="Species Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2 )
veganSR <- tidyCL %>%
dplyr::select(Reference, Taxon) %>%
group_by(Reference, Taxon) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Taxon, n, fill=0)
veganSR=veganSR[,-1]
veganspSR=specaccum(veganSR,method="rarefaction", permutations=1000)
#jpeg('SACSpecies.jpg')
plot(veganspSR,xlab="No. of References",ylab="Species Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#dev.off()
#fig.cap="Species Accumulation Curve of fossil species per Locality and reference
#SACGenera
veganGL <- tidyCL %>%
dplyr::select(Locality, Genus) %>%
group_by(Locality, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
#par(mfrow = c(2, 1))
veganGL=veganGL[,-1]
veganspGL=specaccum(veganGL,method="rarefaction", permutations=1000)
plot(veganspGL,xlab="No. of Localities",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2, main="Fossil genera, CL, per Locality" )
# --> appendix!
veganGR <- tidyCL %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
#library(vegan)
veganGR=veganGR[,-1]
veganspGR=specaccum(veganGR,method="rarefaction", permutations=1000)
#jpeg('SACGenera.jpg')
plot(veganspGR,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#dev.off()
#fig.cap="Sampling Accumulation Curve of fossil genera per reference
#SACGEurasia
#Species Accumulation Curve with Genera, Eurasia
overviewVegan <- PleiPlioCL %>%
merge(Continents) %>%
filter(EpochBins != "Modern") %>%
group_by( Con) %>% #Continent,
# filter(Con=="Europe" | Con=="Asia" )%>%
filter(!is.na(CL)) %>%
dplyr::summarise(meanCL=mean(CL), sdCL= sd(CL), n=n(), meanAge=mean(Age))
veganEA <- tidyCL %>%
merge(Continents) %>%
filter(Con=="Europe" | Con=="Asia") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganEA=veganEA[,-1]
veganspEA=specaccum(veganEA,method="rarefaction", permutations=1000)
plot(veganspEA,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, Eurasia
#SACGEurope
#Species Accumulation Curve with Genera, Europe
veganEu <- tidyCL %>%
merge(Continents) %>%
filter(Con=="Europe") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganEu=veganEu[,-1]
veganspEu=specaccum(veganEu,method="rarefaction", permutations=1000)
plot(veganspEu,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, Europe
#SACGAfrica
#Species Accumulation Curve with Genera, Africa
veganAf <- tidyCL %>%
merge(Continents) %>%
filter(Con=="Africa") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganAf=veganAf[,-1]
veganspAf=specaccum(veganAf,method="rarefaction", permutations=1000)
plot(veganspAf,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, Africa
#SACGAmerica
#Species Accumulation Curve with Genera, America
veganAm <- tidyCL %>%
merge(Continents) %>%
filter(Con=="America") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganAm=veganAm[,-1]
veganspAm=specaccum(veganAm,method="rarefaction", permutations=1000)
plot(veganspAm,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, America
#SACGNAmerica
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, N-America"}
#Species Accumulation Curve with Genera, N-America
veganNA <- tidyCL %>%
merge(Continents) %>%
filter(Continent=="N-America") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganNA=veganNA[,-1]
veganspNA=specaccum(veganNA,method="rarefaction", permutations=1000)
plot(veganspNA,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, N-America
#SACGSAmerica
#Species Accumulation Curve with Genera, S-America
veganSA <- tidyCL %>%
merge(Continents) %>%
filter(Continent=="S-America") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganSA=veganSA[,-1]
veganspSA=specaccum(veganSA,method="rarefaction", permutations=1000)
plot(veganspSA,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, S-America
#SACGAsia
#Species Accumulation Curve with Genera, Asia
veganAs <- tidyCL %>%
merge(Continents) %>%
filter(Con=="Asia") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganAs=veganAs[,-1]
veganspAs=specaccum(veganAs,method="rarefaction", permutations=1000)
plot(veganspAs,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, Asia
#SAC fossil occurences
SACFossOc <- FossilOccurrences %>%
dplyr::select(Locality, Genus) %>% #, Taxon
group_by(Locality, Genus) %>% #, Taxon
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0) #Taxon
#par(mfrow = c(2, 1))
SACFossOc=SACFossOc[,-1]
SACFossOcSL=specaccum(SACFossOc,method="rarefaction", permutations=1000)
plot(SACFossOcSL,xlab="No. of Localities",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2 )
#####Histograms#####
#HistAll
#Histograms of body size data, all
HistCL <- PleiPlioCL %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
#geom_histogram( col="black", fill="gray") +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
#facet_wrap(~EpochBins, scales="free_x")+ #binwidth=10,
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
#HistCLModern
HistCL
##fig.cap="Distribution of body size data, logtransformed, all data.
#ggsave("HistAll.png", height=5, width=8, units='in', dpi=800)
StatsAll <- PleiPlioCL %>%
filter(!is.na(CL)) %>%
# group_by(Island) %>%
dplyr::summarise(nCL=length(CL), #range=range(CL),
min=min(CL), max=max(CL),
var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
#normalDistribution
qqnorm(PleiPlioCL$CL); qqline(PleiPlioCL$CL, col=2)
qqnorm(log10(PleiPlioCL$CL)); qqline(log10(PleiPlioCL$CL), col=2)
## per time bin
#HistBins
#Histograms of body size data, per time bin
HistCLBins <- PleiPlioCL %>%
# merge(BINS) %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
arrange(MeanBins) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_wrap(~EpochBins, scales="free_x")+ #binwidth=10,
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
HistCLBins
#fig.cap="Distribution of body size data per time bin, logtransformed.
StatsBins <- PleiPlioCL %>%
filter(!is.na(CL)) %>%
group_by(EpochBins) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
## modern vs. fossil
#HistFosMo
#Histograms of body size data, modern vs. fossil
Epoch <- as.vector(c("Modern", "Pleistocene", "Pleistocene", "Pleistocene", "Pleistocene",
"Pliocene", "Pliocene",
"Miocene", "Miocene", "Miocene", "Miocene", "Miocene"))
EPOCH <- as.vector(c("Modern", "Fossil", "Fossil", "Fossil"
, "Fossil", "Fossil", "Fossil", "Fossil", "Fossil", "Fossil"
, "Fossil", "Fossil"))
EpochBins <- as.vector(unique(PleiPlioCL$EpochBins))
Epochs <- data.frame(Epoch,EpochBins, EPOCH)
Epochs$EPOCH = factor(Epochs$EPOCH,levels=c("Modern", "Fossil"))
HistCLFossil <- PleiPlioCL %>%
merge(Epochs) %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_wrap(~EPOCH)+ #binwidth=10,, scales="free_x"
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
HistCLFossil
#ggsave("HistModernFossil.png", height=5, width=8, units='in', dpi=800)
StatsFossil <- PleiPlioCL %>%
merge(Epochs) %>%
filter(!is.na(CL)) %>%
group_by(EPOCH) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
## modern vs. fossil, continental vs. insular
#Histograms of body size data, modern vs. fossil, continental vs. insular
HistCLFossilIsland <- PleiPlioCL %>%
merge(Epochs) %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
mutate(Insularity=ifelse(Island=="y","insular", "continental")) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_grid(EPOCH~Insularity)+ #binwidth=10,, scales="free_x"
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
HistCLFossilIsland
ggsave("HistModernFossilInsularContinental.png", height=5, width=8, units='in', dpi=800)
#fig.cap="Distribution of body size data modern vs. fossil, continental vs. insular logtransformed.
StatsFossilIsland <- PleiPlioCL %>%
merge(Epochs) %>%
filter(!is.na(CL)) %>%
group_by(EPOCH, Island) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
## continental vs. insular
##HistCI", echo=FALSE,
#Histograms of body size data, continental vs. insular
HistCLIsland <- PleiPlioCL %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
mutate(Insularity=ifelse(Island=="y","insular", "continental")) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_wrap(~Insularity)+ #binwidth=10,, scales="free_x"
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
#HistCLModern
HistCLIsland
#fig.cap="Distribution of body site data of continental (n) and insular(y) species, logtransformed.
#ggsave("HistInsularContinental.png", height=5, width=8, units='in', dpi=800)
StatsIsland <- PleiPlioCL %>%
filter(!is.na(CL)) %>%
group_by(Island) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
## continents
##"HistCon", echo=FALSE,
#Histograms of body size data, split by continents
Continent <- as.vector(unique(PleiPlioCL$Continent))
Con <- as.vector(c("Asia", "Africa", "Europe", "America", "America", "America", "Europe"))#
Continents <- data.frame(Continent, Con)
HistCLContinents <- PleiPlioCL %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
merge(Continents) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_wrap(~Con)+ #binwidth=10,, scales="free_x"
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
HistCLContinents
#fig.cap="Distribution of body site data per continent, logtransformed.
StatsContinents <- PleiPlioCL %>%
merge(Continents) %>%
filter(!is.na(CL)) %>%
group_by(Con) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
#### Descriptive statistics ####
#Tabel Stats Distribution CL", echo=FALSE}
Stats <- bind_rows(StatsAll, StatsBins, StatsFossil, StatsIsland,
StatsFossilIsland, StatsContinents) %>%
select(-EpochBins, -Island, -Con, -EPOCH)
Stats$Variable <- c("all", "Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Gelasian", "Piacencian", "Zanclean", "Messinian","Tortonian", "Serravallian","Langhian",
"Burdigalian/Aquitanian", "Modern","Fossil", "continental",
"insular","modern-con", "modern-ins", "fossil-con", "fossil-ins",
"Africa", "America", "Asia", "Europe")
#kable(Stats, caption="General statistics of body size data: all, per time bin, insular and continental, per continent (all referring to CL: min, max, variance, mean, logmean, median, logmedian, skewness, logskewness, kurosis, logkurtosis")
####### Boxplots #####
## genera per time bins
#BPGBins", echo=FALSE, warning=FALSE,
IndGenera <- PleiPlioCL %>%
group_by(EpochBins, Genus) %>%
filter(!is.na(CL)) %>%
# filter(Island == "y") %>%
dplyr::summarise(GenusMean=mean(CL), GenusSD=sd(CL), n=n())
IndGenera[is.na(IndGenera)] <-0
# givesample sizes
give.n <- function(x){
return(c(y = 2500, label = length(x)))
}
BinGenera <- PleiPlioCL %>%
group_by(EpochBins, Genus) %>%
filter(!is.na(CL)) %>%
# filter(Island == "y") %>%
dplyr::summarise(GenusMean=mean(CL), GenusSD=sd(CL), n=n()) %>%
# summarise(TimeBinMean=mean(GenusMean), TimeBinSD=sd(GenusMean), n=n()) %>%
ungroup() %>%#, CL=CL
ggplot(aes(EpochBins, GenusMean)) + #, colour=Genus
theme_classic() +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1)) +
stat_boxplot(geom ='errorbar', width = 0.2) +
geom_boxplot() +
geom_pointrange(data=IndGenera, position="jitter", aes(x=EpochBins, y=GenusMean, colour=EpochBins, ymin = GenusMean-GenusSD, ymax = GenusMean+GenusSD)) +
stat_summary(fun.data = give.n, geom = "text") +
theme(legend.background = element_rect(colour = 'black'),
panel.border = element_rect(colour = "black", fill=NA)) +
scale_colour_manual(values=c("#999999", "#969696", "#999999", "#999999", "#999999", "#969696",
"#999999", "#999999", "#999999", "#969696", "#999999",
"#999999", "#969696", "#999999", "#999999", "#999999",
"#999999", "#999999")) +
theme(legend.position="none") + ylab("Carapace length [mm]") + xlab("Stratigraphic Stages")+
annotate("text", x=0.7, y=2500,label= "n=")
BinGenera
#fig.cap="Boxplots of mean CL per time bin, including mean and sd CL for each genus (as pointrange).
#ggsave("BoxplotBins.png", height=5, width=8, units='in', dpi=800)
####generic level
# Multiple comparison after Kruskal-Wallis Test
names(IndGenera)
library(pgirmess)
kruskalmc(IndGenera$GenusMean,IndGenera$EpochBins)
MCK <- data.frame(kruskalmc(IndGenera$GenusMean,IndGenera$EpochBins))
# resp<-c(0.44,0.44,0.54,0.32,0.21,0.28,0.7,0.77,0.48,0.64,0.71,0.75,0.8,0.76,0.34,0.80,0.73,0.8)
# categ<-as.factor(rep(c("A","B","C"),times=1,each=6))
# kruskalmc(resp, categ)
####
names(PleiPlioCL)
kruskalmc(PleiPlioCL$CL,PleiPlioCL$EpochBins)
MCKW <- data.frame(kruskalmc(PleiPlioCL$CL,PleiPlioCL$EpochBins))
########## paleoTS analysis #########
## all (continental and insular)
### genera (all)
##paleoTSAll", echo=FALSE, include=FALSE,
#paleoTS plot with genus mean, including island species
GenusMean <- PleiPlioCL %>%
filter(EpochBins != "Modern") %>%
filter(!is.na(CL)) %>%
group_by(EpochBins, Genus) %>%
# filter(CL < 999 & Island =="y") %>%
dplyr::summarise(meanCL = mean(CL), n=n()) %>%
# filter(EpochBins != "Aquitanian") %>%
# ungroup() %>%
# group_by(EpochBins) %>%
dplyr::summarise(mm = mean(meanCL), nn=n(), vv=var(meanCL)) %>%
merge(BINSMio) %>%
select(mm, nn, vv, tt=MeanBins)
#na <- PleiPlioCL[!complete.cases(PleiPlioCL),]
GenusMean[is.na(GenusMean)] <- 0
GenusModern <- PleiPlioCL %>%
filter(EpochBins == "Modern") %>%
filter(!is.na(CL)) %>%
# filter(CL < 999& Island =="y") %>%
group_by(Genus) %>%
dplyr::summarise(meanCL=mean(CL), sdCL=sd(CL), n=n(), Age=mean(Age), EpochBins=unique(EpochBins), MeanBins=unique(MeanBins)) #%>%
# dplyr::summarise(mm=mean(meanCL), nn=n(), vv=var(meanCL), tt=mean(MeanBins))
sumTort <- read.csv("tortoises_summary.csv", sep=";", header=TRUE)
#colnames(sumTort)[1] <- "Taxon"
colnames(sumTort)[7] <- "meanCL"
colnames(sumTort)[8] <- "sdCL"
#sumTort$EpochBins <- "Modern"
#sumTort$MeanBins <- 0.0000005
sumTortoises <- sumTort %>%
mutate(Genus= as.character(Genus)) %>%
mutate(MeanBins=(Mamin+Mamax)/2) %>%
select(Genus, MeanBins, meanCL, sdCL, n, Island) %>% #n,
bind_rows(GenusModern)
SumTort <- sumTortoises %>%
group_by(Genus) %>%
# filter(meanCL < 999 & Island =="y") %>%
mutate(tt=MeanBins, vv=sdCL^2, nn=n, mm=meanCL) %>%
dplyr::select(mm, nn, vv, tt, Genus) %>%
mutate(nx = nn*mm) %>%
mutate(mmall=sum(nx)/sum(nn)) %>%
mutate(SD=sqrt(nx), d=mm-mmall) %>%
mutate(nsd=((nx^2+d^2)*nn)) %>%
mutate(varall=sum(nsd)/sum(nn), n=sum(nn)) %>%
dplyr::select(mm=mmall, vv=varall, nn=n, tt, Genus) %>%
unique() %>%
dplyr::select(CL=mm, n=nn, var=vv, tt, Genus)
#write.table(SumTort,file="SumTortModernGenus.txt", sep="\t", row.names = FALSE)
#kable(SumTort, caption="Overview over body size means per time bin on genus level.")
#boxplot(SumTort$CL, caption="Body size distribution in time bin 'modern'.")
modernMeanGenus <- SumTort %>%
ungroup() %>%
select(CL, n, tt) %>%
filter( !is.na(CL)) %>%
#group_by(tt) %>%
dplyr::summarise(mm = mean(CL), vv=var(CL), nn=n(), tt=5.85e-03)
#bis hier alle modernen taxa zusammengefasst (summarised and MFN_testudinidae)
GenusPaleo <- modernMeanGenus %>%
bind_rows(GenusMean)%>%
arrange(tt) %>%
select(tt,nn,mm,vv) %>%
filter(nn!=0)
GenusPaleo$vv[is.na(GenusPaleo$vv)] <- 0
#kable(GenusPaleo,caption="paleoTS object, all data")
paleoGen <-as.paleoTS(GenusPaleo$mm, GenusPaleo$vv, GenusPaleo$nn, GenusPaleo$tt, MM = NULL, genpars = NULL, reset.time=TRUE)
paleoGen$tt = -paleoGen$tt
#jpeg('paleoTSAll.jpg', width=800, height=500)
plot(paleoGen)
#fig.cap="paleoTS plot with genus mean, all
# plot(GenusPaleo$tt, GenusPaleo$mm, type="b", xlab="Time", ylab="Trait Mean")
# arrows(GenusPaleo$tt,GenusPaleo$mm-GenusPaleo$vv,GenusPaleo$tt,GenusPaleo$mm+GenusPaleo$vv, code=3, length=0.02, angle = 90)
abline(h=mean(GenusPaleo$mm), lty=5)
points(x=c(-2.59, -5.33), y=c(245, 245), pch=17)
#dev.off()
####Model-fitting, genera ######
PaleoGenFit <- (fit3models(paleoGen, silent=FALSE, method="AD", pool=FALSE))
#________________________________________________________________________________________________
#######try Juans paleoTS-Code ####
paleo_data <- paleoGen #from Juan
plot(paleo_data)
ou <- opt.GRW(paleo_data, pool = TRUE, cl = list(fnscale = -1), meth = "L-BFGS-B", hess = FALSE) #from Juan
bm <- opt.URW(paleo_data, pool = TRUE, cl = list(fnscale=-1), meth = "L-BFGS-B", hess = FALSE) #from Juan
st <- opt.Stasis(paleo_data, pool = TRUE, cl = list(fnscale=-1), meth = "L-BFGS-B", hess = FALSE) #from Juan
#pun <- fitGpunc(paleo_data,oshare=F,method= "AD" , pool=F) ## not working!! #from Juan
#compareModels(bm, ou,st, silent = FALSE)#pun, #from Juan
cat(ou$value, bm$value, st$value, "\n")
#documentation:
## generate data for a directional sequence
## y <- sim.GRW(ns=30, ms=1, vs=1)
# plot(y)
# m.rw<- opt.GRW(y)
# m.rwu<- opt.URW(y)
# m.sta<- opt.Stasis(y)
## print log-likelihoods; easier to use function fit3models()
## cat(m.rw$value, m.rwu$value, m.sta$value, "\n")
####NOT WORKING YET
# library(MuMIn)
# aic.w <- Weights(c(st$AICc,pun$AICc))
# cbind(c("stasis", "shift"), aic.w)
# aic.w[2]/aic.w[1]
# shift.time <- paleo_data$tt[pun$parameters[5]]
#
#
#
# par(mfrow=c(2,2))
# plot(paleo_data, modelFit= bm, pch=19, lwd=1.5, ylim=NULL, las=1)
# plot(paleo_data, modelFit= ou, pch=19, lwd=1.5, ylim=NULL, las=1)
# plot(paleo_data, modelFit= st, pch=19, lwd=1.5, ylim=NULL, las=1)
# plot(paleo_data, modelFit=pun, pch=19, lwd=1.5, ylim=NULL, las=1)
| /FossilTortoises.R | no_license | TurtleJules/FossilTortoisesPublication | R | false | false | 42,084 | r | ### packages ####
library(dplyr)
library(ggplot2)
library(paleoTS)
library(stats)
library(moments)
library(pgirmess)
library(vegan)
#library(plotly)
#### Data basis - import data set from Excel ####
setwd("C:/Users/Jule/Documents/Uni/MA")
tidyCL<-read.csv("tortoises_tidy.csv", sep=";", header=TRUE)
## change to:
# setwd("C:/Users/Jule/Documents/Uni/MA/PublicationTortoises")
# tidyCL<-read.csv("tortoises_tidy_pub.csv", sep=";", header=TRUE)
# C:\Users\Jule\Documents\Uni\MA\PublicationTortoises
#####prepare for analysis (fix column names in .csv-file after converting table from excel####
colnames(tidyCL)[6] <- "MAmin"
colnames(tidyCL)[7] <- "Mamax"
colnames(tidyCL)[17] <- "CL"
colnames(tidyCL)[18] <- "PL"
colnames(tidyCL)[21] <- "estimated"
tidyCL <- tidyCL %>%
mutate(Age= ((MAmin)+(as.numeric(Mamax)))/2) #%>%
# filter(estimated=="m" | estimated=="mo"| estimated=="mf") #%>%
# filter(!is.na(CL))
####### import extant data ####
extant <- read.csv("MFN_testudinidae.csv", sep=";", header=TRUE, dec=".", na.strings = "NA", stringsAsFactors=FALSE) # file: MFN_testudinidae.csv
colnames(extant)[5] <- "SCL"
colnames(extant)[11] <- "PL"
colnames(extant)[12] <- "PLmid"
Extant <- extant %>%
mutate(CL = SCL * 10, PL=PL*10, PLmid=PLmid*10)
#### estimating CL from PL ####
#table shows CL/PL ratios for all fossil taxa that have both measurements available
CLPLtidy <- tidyCL %>%
filter(!is.na(CL) & !is.na(PL)) %>%
dplyr::select(Taxon, CL, PL, size, Age, Island, Continent, Genus) %>%
mutate(ratio=CL/PL) %>%
group_by(Taxon) %>% #to show ratios per Taxon, leave out to get a total ratio
dplyr::summarise(meanRatio=round(mean(ratio),2), sdRatio=round(sd(ratio),2), n=n(), min=min(ratio), max=max(ratio))
#table shows CL/PL ratios for all extant taxa that have both measurements available
CLPLextant <- Extant %>%
dplyr::filter(!is.na(CL) & !is.na(PL)) %>%
dplyr::select(Taxon=Species, CL, PL, PLmid, Island, Continent, Genus) %>%
mutate(ratio=CL/PL, ratioMid=CL/PLmid) #%>%
# group_by(Taxon) %>% #to show ratios per Taxon, leave out to get a total ratio
# summarise(meanRatio=round(mean(ratio),2), sdRatio=round(sd(ratio),2), n=n(), min=min(ratio), max=max(ratio))
#kruskal.test(CLPLextant$meanRatio, CLPLextant$Taxon)
#KruskalMC <- data.frame(kruskalmc(CLPLextant$ratio, CLPLextant$Taxon))
Ratio <- CLPLextant %>%
dplyr::summarise(meanRatio=round(mean(ratio),2), sdRatio=round(sd(ratio),2), n=n(), min=min(ratio), max=max(ratio))
RatioSpecies <- CLPLextant %>%
group_by(Taxon) %>% #to show ratios per Taxon, leave out to get a total ratio
dplyr::summarise(meanRatio=round(mean(ratio),2), sdRatio=round(sd(ratio),2), n=n(), min=min(ratio), max=max(ratio))
fossil <- tidyCL %>%
mutate(Taxon=as.character(Taxon), Island=as.character(Island), Continent=as.character(Continent), estimated=as.character(estimated), Genus=as.character(Genus)) %>%
dplyr::select(Taxon, CL, PL, size, estimated, Age, Island, Continent, Genus)
modern <- Extant %>%
dplyr::select(Taxon=Species, CL, PL, estimated, Age, Island, Continent, Genus)
All <- bind_rows(modern, fossil)
testRatio <- All %>% #tidyCL
dplyr::select(Taxon, CL, PL, size, estimated, Age, Island, Continent, Genus) %>%
mutate(extraCL = PL*Ratio$meanRatio) %>%
dplyr::select(Taxon, CL, extraCL, PL, size, estimated, Age, Island, Continent, Genus)
testRatio$CL[is.na(testRatio$CL)] <- testRatio$extraCL[is.na(testRatio$CL)]
##### Time bins (stratigraphic stages) ######
# Bin data, smaller bins
Miocene <- testRatio %>% #testRatio or tidyCL
filter(Age < 23.000)
# PleiPlio$bin <- cut(PleiPlio$Age, c(0, 0.000001, 0.0117, 0.126, 0.781, 2.588, 3.6, 5.332, 11.608))
# EpochBins <- as.vector(c("Modern", "Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Upper Pliocene", "Lower Pliocene", "Upper Miocene"))
# MeanBins <- as.vector(c((0+0.000001)/2, (0+0.0117)/2, (0.0117+0.126)/2, (0.126+0.781)/2, (0.781+2.588)/2,(2.588+3.6)/2, (3.6+5.332)/2, (5.332+11.608)/2))
# new cuts: 1.806, 4.466, 7.424, 9.516, 13.789, 18
Miocene$bin <- cut(Miocene$Age, c(0, 0.0117, 0.126, 0.781, 1.806, 2.588, 3.6,
5.332, 7.246, 11.608, 13.82, 15.97, 23.03))
EpochBins <- as.vector(c("Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Gelasian", "Piacencian", "Zanclean", "Messinian","Tortonian", "Serravallian","Langhian",
"Burdigalian/Aquitanian"))
Stages <- as.vector(c("Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Lower Pleistocene", "Upper Pliocene", "Lower Pliocene", "Upper Miocene","Upper Miocene",
"Middle Miocene","Middle Miocene", "Lower Miocene"))
MeanBins <- as.vector(c((0+0.0117)/2, (0.0117+0.126)/2, (0.126+0.781)/2, (0.781+1.806)/2, (1.806+2.588)/2,(2.588+3.6)/2, (3.6+5.332)/2, (5.332+7.246)/2, (7.246+11.608)/2, (11.608+13.82)/2, (13.82+15.97)/2, (15.97+23.03)/2))
#1.806, 4.466, 7.424, 9.516, 13.789, 18
BinsMio <- Miocene %>%
select(bin) %>%
group_by(bin) %>%
dplyr::summarise(nIndividuals=n())
BinsSpeciesMio <- Miocene %>%
select(bin, Taxon) %>%
group_by(bin, Taxon) %>%
dplyr::summarise(nSpecies=n()) %>%
dplyr::summarise(nSpecies=n())
BinsGeneraMio <- Miocene %>%
select(bin, Genus) %>%
group_by(bin, Genus) %>%
dplyr::summarise(nGenera=n()) %>%
dplyr::summarise(nGenera=n())
bin <- as.vector(unique(BinsMio$bin))
BINSMio <- data.frame(bin, EpochBins, Stages, MeanBins) %>%
merge(BinsMio) %>%
merge(BinsSpeciesMio) %>%
merge(BinsGeneraMio) %>%
arrange(MeanBins)
BINSMio$EpochBins = factor(BINSMio$EpochBins,
levels= c("Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Gelasian", "Piacencian", "Zanclean", "Messinian","Tortonian", "Serravallian","Langhian",
"Burdigalian/Aquitanian" ))
#BINS <- read.table("timebins.txt", sep="\t", header=TRUE)
#kable(Bins, caption="Time bins with corresponding sample sizes (individuals)")
#kable(BINSMio, caption="Smaller time bins with age range, epoch name, mean age and corresponding sample sizes (on individual, species and genus level)")
PleiPlioCL <- Miocene %>%
merge(BINSMio) %>%
filter(!is.na(CL))
# SUM <- PleiPlioCL %>%
# group_by(MeanBins) %>%
# summarise(n=n())
#na <- PleiPlioCL$EpochBins[!complete.cases(PleiPlioCL$EpochBins)]
#### Data overview - Scatterplot CL~Time ####
Continent <- as.vector(unique(PleiPlioCL$Continent))
Con <- as.vector(c("Asia", "Africa", "Europe", "America", "America", "America", "Europe"))#
Continents <- data.frame(Continent, Con)
scatter <- testRatio %>%
filter(!is.na(CL)) %>%
filter(Age < 23.000) %>%
merge(Continents) %>%
mutate(Insularity=ifelse(Island=="y","insular", "continental")) %>%
select(CL, Age, Insularity, Con, estimated) %>%
ggplot(aes(-Age, CL))+ # for time to start at the left: -Age
geom_vline(xintercept = c(0, -0.0117, -0.126, -0.781, -1.806, -2.588, -3.6,
-5.332, -7.246, -11.608, -13.82, -15.97, -23.03))+
geom_jitter() + #aes(shape=Insularity, colour= Con)
theme_classic() +
# geom_vline(xintercept= c(0, 0.0117, 0.126, 0.781, 2.588, 3.6, 5.332, 11.608, 15.97, 23.03)) +
geom_vline(xintercept= -20.44, linetype="dashed") +
scale_color_manual(values=c("#000000", "#E69F00", "#FF00FF", "#009E73", "#56B4E9","#F0E442", "#0072B2", "#D55E00", "#CC79A7") , name="Continents")+
# scale_shape_manual(values= c(16, 17), name="Insularity", breaks=c("continental", "insular")) +
xlab("Time [mya]") + ylab("Carapace length [mm]")#+ #+ coord_trans(x="log2") +
#scale_x_log10()
# labs(shape="Insularity", colour="Continents")
#
scatter
# fig.cap="Scatterplot of carapace length over time, indicating insular (triangle) and continental (circles)
# nd colour indicating continents. Lines indicate stratigraphic stages which were used as time bins, the dashed
# line is the border between the two stages of the Lower Miocene, which were consideres as one time bin.
#ggsave("OvervieData.png", height=5, width=8, units='in', dpi=800)
#length(scatter$CL)
#### Maps - fossil occurences of testudinidae #####
CL <- tidyCL %>%
# filter(Age < 11.000) %>%
dplyr::select(Locality, Genus, Taxon, Latitude, Longitude, Country, Age) %>%
group_by(Locality) %>%
mutate(count= n())
length(unique(CL$Locality))
FossilOccurrences<-read.csv("tortoises_occurrences.csv", sep=";", header=TRUE)
FossOcc <- FossilOccurrences %>%
mutate(Age= (as.numeric(as.character(MA.min)) + as.numeric(as.character(Ma.max)))/2) %>%
select(Locality, Country, Latitude, Longitude, Age, Genus, Taxon, Clavailability) %>%
# merge(tidyCL) %>%
group_by(Locality) %>%
mutate(count= n(), Longitude=as.numeric(as.character(Longitude)))
Occurrences <- FossilOccurrences %>%
mutate(Age= (as.numeric(as.character(MA.min)) + as.numeric(as.character(Ma.max)))/2) %>%
select(Locality, Country, Latitude, Longitude, Age, Genus, Taxon, Author) %>%#, comment, Clavailability
arrange(Age)
#kable(Occurrences, row.names = TRUE, caption="fossil occurrences")
#na <- FossOcc[!complete.cases(FossOcc),]
# OccMap <- merge(Map, FossOcc, by="Locality", incomparables=TRUE) %>%
# distinct()
# unique(CL$Locality) #wie viele localities
#length(unique(FossOcc$Locality[which(FossOcc$Clavailability == "yes")]))
mapWorld <- borders("world", colour="azure3", fill="azure3") # create a layer of borders, run line at the beginning (before loading plotly)
cbbPalette <- c("#000000", "#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
OwnPalette <- c("#000000", "#000000", "#56B4E9") #alternativ: c("#000000","#000000", "#E69F00", "#56B4E9")#
mapOc <- FossOcc %>%
ggplot(aes(Longitude, Latitude)) +# borders("world", ylim = c(-60, 90)) +
mapWorld +
theme_classic() +
geom_point(data=FossOcc, alpha=1/4,(aes(as.numeric(Longitude), Latitude, colour=FossOcc$Clavailability))) + # alpha= Transparency
geom_point(data=CL, alpha=1/2,(aes(Longitude, Latitude, colour="yes"))) +
# geom_point(data=pdbd, alpha=1/6,(aes(Longitude, Latitude, colour="pdbd"))) +
theme(legend.position="none") +
scale_colour_manual(values=OwnPalette) +#cbbPalette +
scale_x_continuous(breaks=c(-180, -100, 0, 100, 180)) +
scale_y_continuous(breaks=c(-90, -50, 0, 50, 90))
#unique(FossOcc$CL)
mapOc
# fig.cap="Map displaying all fossil occurrences of testudinids, with color indicating whether relevant
# literature was available (black if not) and if it was, whether body size data was available or not
# (yes and no, respectively).
#ggsave("MapOccurrences.png", height=5, width=8, units='in', dpi=800)
# require(plotly)
# ggplotly(mapOc)
# length(CL$Locality)
# length(unique(CL$Locality)) #wie viele localities
# length(unique(FossOcc$Locality[which(FossOcc$Clavailability == "yes")]))
##### compare with PDBD data! #####
#pbdb_testudinidae.csv
setwd("C:/Users/Jule/Documents/Uni/MA/PublicationTortoises")
PDBD<-read.csv("pbdb_testudinidae.csv", sep=",", header=TRUE, skip=15)
names(PDBD)
pdbd <- PDBD %>%
select(collection_no, identified_name, accepted_name, early_interval, late_interval, max_ma, min_ma,
reference_no, Longitude=lng, Latitude=lat, cc, state, county, Locality=geogcomments) %>%
mutate(Age= (as.numeric(as.character(min_ma)) + as.numeric(as.character(max_ma)))/2)
length(unique(pdbd$Locality)) # geogcomments = Locality in my table, I guess
mapWorld <- borders("world", colour="azure3", fill="azure3") # create a layer of borders, run line at the beginning (before loading plotly)
#cbbPalette <- c("#000000", "#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#OwnPalette <- c("#000000", "#000000", "#56B4E9") #alternativ: c("#000000", "#E69F00", "#56B4E9")
mapPDBD <- pdbd %>%
ggplot(aes(Longitude, Latitude)) +# borders("world", ylim = c(-60, 90)) +
mapWorld +
theme_classic() +
geom_point(data=pdbd, alpha=1/4,(aes(as.numeric(Longitude), Latitude, colour="#000000"))) +
# geom_point(data=CL, alpha=1/2,(aes(Longitude, Latitude, colour="yes"))) +
theme(legend.position="none") +
# scale_colour_manual(values=OwnPalette) +#cbbPalette +
scale_x_continuous(breaks=c(-180, -100, 0, 100, 180)) +
scale_y_continuous(breaks=c(-90, -50, 0, 50, 90))
#unique(FossOcc$CL)
mapPDBD
##### check sample size (?) for Miocene!#####
CLpdbdCom <- tidyCL %>%
filter(Age < 23.000 & Age > 5.330) %>%
dplyr::select(Locality, Genus, Taxon, Latitude, Longitude, Country, Age) %>%
group_by(Locality) %>%
mutate(count= n())
FossOccPdbdCom <- FossilOccurrences %>%
mutate(Age= (as.numeric(as.character(MA.min)) + as.numeric(as.character(Ma.max)))/2) %>%
filter(Age < 23.000 & Age > 5.330) %>%
select(Locality, Country, Latitude, Longitude, Age, Genus, Taxon, Clavailability) %>%
# merge(tidyCL) %>%
group_by(Locality) %>%
mutate(count= n(), Longitude=as.numeric(as.character(Longitude)))
Pdbd <- pdbd %>%
filter(Age < 23.000 & Age > 5.330) %>%
group_by(Locality) %>%
mutate(count= n())
mapWorld <- borders("world", colour="azure3", fill="azure3") # create a layer of borders, run line at the beginning (before loading plotly)
cbbPalette <- c("#000000", "#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
OwnPalette <- c("#000000","#000000", "#E69F00", "#56B4E9")#c("#000000", "#000000", "#56B4E9") #alternativ:
mapOcPDBDMiocene <- FossOccPdbdCom %>%
ggplot(aes(Longitude, Latitude)) +# borders("world", ylim = c(-60, 90)) +
mapWorld +
theme_classic() +
geom_point(data=FossOccPdbdCom, alpha=1/4,(aes(as.numeric(Longitude), Latitude, colour="FossOcc"))) + # alpha= Transparency
geom_point(data=CLpdbdCom, alpha=1/2,(aes(Longitude, Latitude, colour="CL"))) +
geom_point(data=Pdbd, alpha=1/6,(aes(Longitude, Latitude, colour="PDBD"))) +
# theme(legend.position="none") +
scale_colour_manual(values=c("#000000", "#E69F00", "#56B4E9")
#,labels=c("FossOcc", "CL", "PDBD")
) +#cbbPalette +
scale_x_continuous(breaks=c(-180, -100, 0, 100, 180)) +
scale_y_continuous(breaks=c(-90, -50, 0, 50, 90))
#unique(FossOcc$CL)
mapOcPDBDMiocene
#ggsave("MapMioceneComparison.png", height=5, width=8, units='in', dpi=800)
# require(plotly)
# ggplotly(mapOcPDBDMiocene)
##### PDBD data #####
pdbd
unique(pdbd$accepted_name)
length(unique(pdbd$accepted_name))
unique(pdbd$identified_name)
length(unique(pdbd$identified_name))
PDBDMiocene <- pdbd %>%
filter(Age < 23.000 & Age > 5.330)
speciesPDBDMio <- as.vector(unique(PDBDMiocene$identified_name))
length(speciesPDBDMio)
CLMiocene <- tidyCL %>%
filter(Age < 23.000 & Age > 5.330)
speciesCLMiocene <- as.vector(unique(CLMiocene$Taxon))
length(speciesCLMiocene)
compareSpeciesMio <- setdiff(speciesPDBDMio, speciesCLMiocene)
write.table(compareSpeciesMio, file="PDBDSpeciesMioNeeded.txt", sep="\t", row.names = FALSE)
# do the same for period younger than miocene, just to be sure
PDBDQ <- pdbd %>%
filter(Age < 5.330)
speciesPDBDQ <- as.vector(unique(PDBDQ$identified_name))
length(speciesPDBDQ)
CLQ <- tidyCL %>%
filter(Age < 5.330)
speciesCLQ <- as.vector(unique(CLQ$Taxon))
length(speciesCLQ)
compareSpeciesQ <- setdiff(speciesPDBDQ, speciesCLQ)
write.table(compareSpeciesQ, file="PDBDSpeciesHolPleiPlio_justToCheck.txt", sep="\t", row.names = FALSE)
#### Map - Body size of testudinidae ####
tidyCL$bin <- cut(tidyCL$Age, c(0, 0.0117, 0.126, 0.781, 1.806, 2.588, 3.6,
5.332, 7.246, 11.608, 13.82, 15.97, 20.44, 23.03)
)
# c(0, 0.000001, 0.0117, 0.126, 0.781, 2.588, 3.6, 5.332, 11.608, 15.97, 23.03,50.000))
MapCL <- tidyCL %>%
merge(BINSMio) %>%
# filter(Age < 23.000) %>%
# filter(Latitude != "-") %>%
dplyr::select(Genus, Taxon, Latitude, Longitude, Country, CL, PL, MeanBins, size) %>%
group_by(Latitude) %>%
mutate(count= n())
mapWorld <- borders("world", colour="azure3", fill="azure3") # create a layer of borders, run line at the beginning (before loading plotly)
mapCL <- MapCL %>%
ggplot(aes(Longitude, Latitude)) +# borders("world", ylim = c(-60, 90)) +
mapWorld +
theme_classic() +
geom_point(aes(Longitude, Latitude,size=count, colour=MeanBins)) + #colour=size,
scale_colour_gradientn(name="Age [mya]", colors=c("orange", "red", "purple", "blue", "green", "yellow"))+
scale_x_continuous(breaks=c(-180, -100, 0, 100, 180)) +
scale_y_continuous(breaks=c(-90, -50, 0, 50, 90)) +
scale_size_continuous(name="n")
#values=c("#000000", "#E69F00", "#FF00FF", "#009E73", "#56B4E9","#F0E442", "#0072B2", "#D55E00", "#CC79A7")
mapCL
# fig.cap="Map displaying all localities for which body size data for testudinids was available in the
# literature. Size of points denotes sample size, color denotes approximate age.
#ggsave("MapBodysize.png", height=5, width=8, units='in', dpi=800)
#### Get overview over sample sizes #####
OverviewSpecies <- PleiPlioCL %>%
filter(EpochBins != "Modern") %>%
group_by(EpochBins, Taxon) %>%
dplyr::summarise(n=n(), meanCL = mean(CL))
#kable(OverviewSpecies, caption="Overview over fossil species per time bin, with sample size and mean CL.")
OverviewSpeciesFossil <- PleiPlioCL %>%
filter(EpochBins != "Modern") %>%
group_by(Taxon) %>%
dplyr::summarise(n=n(), meanCL = mean(CL))
#write.table(OverviewSpecies,file="OverviewSpeciesSampleSize.txt", sep="\t", row.names = FALSE)
#kable(OverviewSpeciesFossil, caption="General overview over fossil species, with sample size and mean CL")
OverviewGeneraTime <- PleiPlioCL %>%
group_by(EpochBins, Genus) %>%
dplyr::summarise(n=n(), meanCL = mean(CL))
#kable(OverviewGeneraTime, caption="Overview over genera (modern and fossil) per time bin, with sample sizes and mean CL.")
OverviewGenera <- PleiPlioCL %>%
group_by(Genus) %>%
dplyr::summarise(n=n(), meanCL = mean(CL))
#kable(OverviewGenera, caption="General overview over genera, with sample sizes and mean CL.")
# plot(tidyCL$CL~tidyCL$Latitude)
# plot(tidyCL$CL~tidyCL$Longitude)
##### Sampling Accumulation Curves #####
# SACSpecies
#Species Accumulation Curve
veganSL <- tidyCL %>%
dplyr::select(Locality, Taxon) %>%
group_by(Locality, Taxon) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Taxon, n, fill=0)
#par(mfrow = c(2, 1))
veganSL=veganSL[,-1]
veganspSL=specaccum(veganSL,method="rarefaction", permutations=1000)
plot(veganspSL,xlab="No. of Localities",ylab="Species Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2 )
veganSR <- tidyCL %>%
dplyr::select(Reference, Taxon) %>%
group_by(Reference, Taxon) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Taxon, n, fill=0)
veganSR=veganSR[,-1]
veganspSR=specaccum(veganSR,method="rarefaction", permutations=1000)
#jpeg('SACSpecies.jpg')
plot(veganspSR,xlab="No. of References",ylab="Species Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#dev.off()
#fig.cap="Species Accumulation Curve of fossil species per Locality and reference
#SACGenera
veganGL <- tidyCL %>%
dplyr::select(Locality, Genus) %>%
group_by(Locality, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
#par(mfrow = c(2, 1))
veganGL=veganGL[,-1]
veganspGL=specaccum(veganGL,method="rarefaction", permutations=1000)
plot(veganspGL,xlab="No. of Localities",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2, main="Fossil genera, CL, per Locality" )
# --> appendix!
veganGR <- tidyCL %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
#library(vegan)
veganGR=veganGR[,-1]
veganspGR=specaccum(veganGR,method="rarefaction", permutations=1000)
#jpeg('SACGenera.jpg')
plot(veganspGR,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#dev.off()
#fig.cap="Sampling Accumulation Curve of fossil genera per reference
#SACGEurasia
#Species Accumulation Curve with Genera, Eurasia
overviewVegan <- PleiPlioCL %>%
merge(Continents) %>%
filter(EpochBins != "Modern") %>%
group_by( Con) %>% #Continent,
# filter(Con=="Europe" | Con=="Asia" )%>%
filter(!is.na(CL)) %>%
dplyr::summarise(meanCL=mean(CL), sdCL= sd(CL), n=n(), meanAge=mean(Age))
veganEA <- tidyCL %>%
merge(Continents) %>%
filter(Con=="Europe" | Con=="Asia") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganEA=veganEA[,-1]
veganspEA=specaccum(veganEA,method="rarefaction", permutations=1000)
plot(veganspEA,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, Eurasia
#SACGEurope
#Species Accumulation Curve with Genera, Europe
veganEu <- tidyCL %>%
merge(Continents) %>%
filter(Con=="Europe") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganEu=veganEu[,-1]
veganspEu=specaccum(veganEu,method="rarefaction", permutations=1000)
plot(veganspEu,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, Europe
#SACGAfrica
#Species Accumulation Curve with Genera, Africa
veganAf <- tidyCL %>%
merge(Continents) %>%
filter(Con=="Africa") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganAf=veganAf[,-1]
veganspAf=specaccum(veganAf,method="rarefaction", permutations=1000)
plot(veganspAf,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, Africa
#SACGAmerica
#Species Accumulation Curve with Genera, America
veganAm <- tidyCL %>%
merge(Continents) %>%
filter(Con=="America") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganAm=veganAm[,-1]
veganspAm=specaccum(veganAm,method="rarefaction", permutations=1000)
plot(veganspAm,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, America
#SACGNAmerica
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, N-America"}
#Species Accumulation Curve with Genera, N-America
veganNA <- tidyCL %>%
merge(Continents) %>%
filter(Continent=="N-America") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganNA=veganNA[,-1]
veganspNA=specaccum(veganNA,method="rarefaction", permutations=1000)
plot(veganspNA,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, N-America
#SACGSAmerica
#Species Accumulation Curve with Genera, S-America
veganSA <- tidyCL %>%
merge(Continents) %>%
filter(Continent=="S-America") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganSA=veganSA[,-1]
veganspSA=specaccum(veganSA,method="rarefaction", permutations=1000)
plot(veganspSA,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, S-America
#SACGAsia
#Species Accumulation Curve with Genera, Asia
veganAs <- tidyCL %>%
merge(Continents) %>%
filter(Con=="Asia") %>%
dplyr::select(Reference, Genus) %>%
group_by(Reference, Genus) %>%
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0)
veganAs=veganAs[,-1]
veganspAs=specaccum(veganAs,method="rarefaction", permutations=1000)
plot(veganspAs,xlab="No. of References",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2)
#fig.cap="Sampling Accumulation Curve of fossil genera per reference, Asia
#SAC fossil occurences
SACFossOc <- FossilOccurrences %>%
dplyr::select(Locality, Genus) %>% #, Taxon
group_by(Locality, Genus) %>% #, Taxon
dplyr::summarise(n=n()) %>%
tidyr::spread(Genus, n, fill=0) #Taxon
#par(mfrow = c(2, 1))
SACFossOc=SACFossOc[,-1]
SACFossOcSL=specaccum(SACFossOc,method="rarefaction", permutations=1000)
plot(SACFossOcSL,xlab="No. of Localities",ylab="Genera Richness", xvar="individuals", ci.type="line", ci.lty=2, ci.col="grey", col="deepskyblue4", lwd=2 )
#####Histograms#####
#HistAll
#Histograms of body size data, all
HistCL <- PleiPlioCL %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
#geom_histogram( col="black", fill="gray") +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
#facet_wrap(~EpochBins, scales="free_x")+ #binwidth=10,
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
#HistCLModern
HistCL
##fig.cap="Distribution of body size data, logtransformed, all data.
#ggsave("HistAll.png", height=5, width=8, units='in', dpi=800)
StatsAll <- PleiPlioCL %>%
filter(!is.na(CL)) %>%
# group_by(Island) %>%
dplyr::summarise(nCL=length(CL), #range=range(CL),
min=min(CL), max=max(CL),
var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
#normalDistribution
qqnorm(PleiPlioCL$CL); qqline(PleiPlioCL$CL, col=2)
qqnorm(log10(PleiPlioCL$CL)); qqline(log10(PleiPlioCL$CL), col=2)
## per time bin
#HistBins
#Histograms of body size data, per time bin
HistCLBins <- PleiPlioCL %>%
# merge(BINS) %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
arrange(MeanBins) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_wrap(~EpochBins, scales="free_x")+ #binwidth=10,
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
HistCLBins
#fig.cap="Distribution of body size data per time bin, logtransformed.
StatsBins <- PleiPlioCL %>%
filter(!is.na(CL)) %>%
group_by(EpochBins) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
## modern vs. fossil
#HistFosMo
#Histograms of body size data, modern vs. fossil
Epoch <- as.vector(c("Modern", "Pleistocene", "Pleistocene", "Pleistocene", "Pleistocene",
"Pliocene", "Pliocene",
"Miocene", "Miocene", "Miocene", "Miocene", "Miocene"))
EPOCH <- as.vector(c("Modern", "Fossil", "Fossil", "Fossil"
, "Fossil", "Fossil", "Fossil", "Fossil", "Fossil", "Fossil"
, "Fossil", "Fossil"))
EpochBins <- as.vector(unique(PleiPlioCL$EpochBins))
Epochs <- data.frame(Epoch,EpochBins, EPOCH)
Epochs$EPOCH = factor(Epochs$EPOCH,levels=c("Modern", "Fossil"))
HistCLFossil <- PleiPlioCL %>%
merge(Epochs) %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_wrap(~EPOCH)+ #binwidth=10,, scales="free_x"
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
HistCLFossil
#ggsave("HistModernFossil.png", height=5, width=8, units='in', dpi=800)
StatsFossil <- PleiPlioCL %>%
merge(Epochs) %>%
filter(!is.na(CL)) %>%
group_by(EPOCH) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
## modern vs. fossil, continental vs. insular
#Histograms of body size data, modern vs. fossil, continental vs. insular
HistCLFossilIsland <- PleiPlioCL %>%
merge(Epochs) %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
mutate(Insularity=ifelse(Island=="y","insular", "continental")) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_grid(EPOCH~Insularity)+ #binwidth=10,, scales="free_x"
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
HistCLFossilIsland
ggsave("HistModernFossilInsularContinental.png", height=5, width=8, units='in', dpi=800)
#fig.cap="Distribution of body size data modern vs. fossil, continental vs. insular logtransformed.
StatsFossilIsland <- PleiPlioCL %>%
merge(Epochs) %>%
filter(!is.na(CL)) %>%
group_by(EPOCH, Island) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
## continental vs. insular
##HistCI", echo=FALSE,
#Histograms of body size data, continental vs. insular
HistCLIsland <- PleiPlioCL %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
mutate(Insularity=ifelse(Island=="y","insular", "continental")) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_wrap(~Insularity)+ #binwidth=10,, scales="free_x"
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
#HistCLModern
HistCLIsland
#fig.cap="Distribution of body site data of continental (n) and insular(y) species, logtransformed.
#ggsave("HistInsularContinental.png", height=5, width=8, units='in', dpi=800)
StatsIsland <- PleiPlioCL %>%
filter(!is.na(CL)) %>%
group_by(Island) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
## continents
##"HistCon", echo=FALSE,
#Histograms of body size data, split by continents
Continent <- as.vector(unique(PleiPlioCL$Continent))
Con <- as.vector(c("Asia", "Africa", "Europe", "America", "America", "America", "Europe"))#
Continents <- data.frame(Continent, Con)
HistCLContinents <- PleiPlioCL %>%
filter( !is.na(CL)) %>%
mutate(CL=log(CL)) %>%
merge(Continents) %>%
# filter(EpochBins != "Modern") %>%
ggplot(aes(CL)) +
geom_histogram( aes(y=..density..),col="black", fill="gray")+
#geom_density() +
geom_density(alpha=.2, fill="darkslategray1") +
facet_wrap(~Con)+ #binwidth=10,, scales="free_x"
theme_classic()+
#theme_gray() +
theme(panel.border = element_rect(colour = "black", fill=NA))+
xlab("Log Carapace length [mm]")
HistCLContinents
#fig.cap="Distribution of body site data per continent, logtransformed.
StatsContinents <- PleiPlioCL %>%
merge(Continents) %>%
filter(!is.na(CL)) %>%
group_by(Con) %>%
dplyr::summarise(nCL=length(CL), min=min(CL), max=max(CL), var=var(CL), mean=round(mean(CL),1), logm=round(mean(log10(CL)),1),
med=round(median(CL),1), logmed=round(median(log10(CL)),1), #CLmode=mode(CL), CLlogmode=mode(log10(CL)),
skew=round(skewness(CL),2), logsk=round(skewness(log10(CL)),2),
kurt=round(kurtosis(CL),2), logku=round(kurtosis(log10(CL)),2) ) %>%
unique()
#### Descriptive statistics ####
#Tabel Stats Distribution CL", echo=FALSE}
Stats <- bind_rows(StatsAll, StatsBins, StatsFossil, StatsIsland,
StatsFossilIsland, StatsContinents) %>%
select(-EpochBins, -Island, -Con, -EPOCH)
Stats$Variable <- c("all", "Modern", "Upper Pleistocene", "Middle Pleistocene", "Lower Pleistocene", "Gelasian", "Piacencian", "Zanclean", "Messinian","Tortonian", "Serravallian","Langhian",
"Burdigalian/Aquitanian", "Modern","Fossil", "continental",
"insular","modern-con", "modern-ins", "fossil-con", "fossil-ins",
"Africa", "America", "Asia", "Europe")
#kable(Stats, caption="General statistics of body size data: all, per time bin, insular and continental, per continent (all referring to CL: min, max, variance, mean, logmean, median, logmedian, skewness, logskewness, kurosis, logkurtosis")
####### Boxplots #####
## genera per time bins
#BPGBins", echo=FALSE, warning=FALSE,
IndGenera <- PleiPlioCL %>%
group_by(EpochBins, Genus) %>%
filter(!is.na(CL)) %>%
# filter(Island == "y") %>%
dplyr::summarise(GenusMean=mean(CL), GenusSD=sd(CL), n=n())
IndGenera[is.na(IndGenera)] <-0
# givesample sizes
give.n <- function(x){
return(c(y = 2500, label = length(x)))
}
BinGenera <- PleiPlioCL %>%
group_by(EpochBins, Genus) %>%
filter(!is.na(CL)) %>%
# filter(Island == "y") %>%
dplyr::summarise(GenusMean=mean(CL), GenusSD=sd(CL), n=n()) %>%
# summarise(TimeBinMean=mean(GenusMean), TimeBinSD=sd(GenusMean), n=n()) %>%
ungroup() %>%#, CL=CL
ggplot(aes(EpochBins, GenusMean)) + #, colour=Genus
theme_classic() +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1)) +
stat_boxplot(geom ='errorbar', width = 0.2) +
geom_boxplot() +
geom_pointrange(data=IndGenera, position="jitter", aes(x=EpochBins, y=GenusMean, colour=EpochBins, ymin = GenusMean-GenusSD, ymax = GenusMean+GenusSD)) +
stat_summary(fun.data = give.n, geom = "text") +
theme(legend.background = element_rect(colour = 'black'),
panel.border = element_rect(colour = "black", fill=NA)) +
scale_colour_manual(values=c("#999999", "#969696", "#999999", "#999999", "#999999", "#969696",
"#999999", "#999999", "#999999", "#969696", "#999999",
"#999999", "#969696", "#999999", "#999999", "#999999",
"#999999", "#999999")) +
theme(legend.position="none") + ylab("Carapace length [mm]") + xlab("Stratigraphic Stages")+
annotate("text", x=0.7, y=2500,label= "n=")
BinGenera
#fig.cap="Boxplots of mean CL per time bin, including mean and sd CL for each genus (as pointrange).
#ggsave("BoxplotBins.png", height=5, width=8, units='in', dpi=800)
####generic level
# Multiple comparison after Kruskal-Wallis Test
names(IndGenera)
library(pgirmess)
kruskalmc(IndGenera$GenusMean,IndGenera$EpochBins)
MCK <- data.frame(kruskalmc(IndGenera$GenusMean,IndGenera$EpochBins))
# resp<-c(0.44,0.44,0.54,0.32,0.21,0.28,0.7,0.77,0.48,0.64,0.71,0.75,0.8,0.76,0.34,0.80,0.73,0.8)
# categ<-as.factor(rep(c("A","B","C"),times=1,each=6))
# kruskalmc(resp, categ)
####
names(PleiPlioCL)
kruskalmc(PleiPlioCL$CL,PleiPlioCL$EpochBins)
MCKW <- data.frame(kruskalmc(PleiPlioCL$CL,PleiPlioCL$EpochBins))
########## paleoTS analysis #########
## all (continental and insular)
### genera (all)
##paleoTSAll", echo=FALSE, include=FALSE,
#paleoTS plot with genus mean, including island species
GenusMean <- PleiPlioCL %>%
filter(EpochBins != "Modern") %>%
filter(!is.na(CL)) %>%
group_by(EpochBins, Genus) %>%
# filter(CL < 999 & Island =="y") %>%
dplyr::summarise(meanCL = mean(CL), n=n()) %>%
# filter(EpochBins != "Aquitanian") %>%
# ungroup() %>%
# group_by(EpochBins) %>%
dplyr::summarise(mm = mean(meanCL), nn=n(), vv=var(meanCL)) %>%
merge(BINSMio) %>%
select(mm, nn, vv, tt=MeanBins)
#na <- PleiPlioCL[!complete.cases(PleiPlioCL),]
GenusMean[is.na(GenusMean)] <- 0
GenusModern <- PleiPlioCL %>%
filter(EpochBins == "Modern") %>%
filter(!is.na(CL)) %>%
# filter(CL < 999& Island =="y") %>%
group_by(Genus) %>%
dplyr::summarise(meanCL=mean(CL), sdCL=sd(CL), n=n(), Age=mean(Age), EpochBins=unique(EpochBins), MeanBins=unique(MeanBins)) #%>%
# dplyr::summarise(mm=mean(meanCL), nn=n(), vv=var(meanCL), tt=mean(MeanBins))
sumTort <- read.csv("tortoises_summary.csv", sep=";", header=TRUE)
#colnames(sumTort)[1] <- "Taxon"
colnames(sumTort)[7] <- "meanCL"
colnames(sumTort)[8] <- "sdCL"
#sumTort$EpochBins <- "Modern"
#sumTort$MeanBins <- 0.0000005
sumTortoises <- sumTort %>%
mutate(Genus= as.character(Genus)) %>%
mutate(MeanBins=(Mamin+Mamax)/2) %>%
select(Genus, MeanBins, meanCL, sdCL, n, Island) %>% #n,
bind_rows(GenusModern)
SumTort <- sumTortoises %>%
group_by(Genus) %>%
# filter(meanCL < 999 & Island =="y") %>%
mutate(tt=MeanBins, vv=sdCL^2, nn=n, mm=meanCL) %>%
dplyr::select(mm, nn, vv, tt, Genus) %>%
mutate(nx = nn*mm) %>%
mutate(mmall=sum(nx)/sum(nn)) %>%
mutate(SD=sqrt(nx), d=mm-mmall) %>%
mutate(nsd=((nx^2+d^2)*nn)) %>%
mutate(varall=sum(nsd)/sum(nn), n=sum(nn)) %>%
dplyr::select(mm=mmall, vv=varall, nn=n, tt, Genus) %>%
unique() %>%
dplyr::select(CL=mm, n=nn, var=vv, tt, Genus)
#write.table(SumTort,file="SumTortModernGenus.txt", sep="\t", row.names = FALSE)
#kable(SumTort, caption="Overview over body size means per time bin on genus level.")
#boxplot(SumTort$CL, caption="Body size distribution in time bin 'modern'.")
modernMeanGenus <- SumTort %>%
ungroup() %>%
select(CL, n, tt) %>%
filter( !is.na(CL)) %>%
#group_by(tt) %>%
dplyr::summarise(mm = mean(CL), vv=var(CL), nn=n(), tt=5.85e-03)
#bis hier alle modernen taxa zusammengefasst (summarised and MFN_testudinidae)
GenusPaleo <- modernMeanGenus %>%
bind_rows(GenusMean)%>%
arrange(tt) %>%
select(tt,nn,mm,vv) %>%
filter(nn!=0)
GenusPaleo$vv[is.na(GenusPaleo$vv)] <- 0
#kable(GenusPaleo,caption="paleoTS object, all data")
paleoGen <-as.paleoTS(GenusPaleo$mm, GenusPaleo$vv, GenusPaleo$nn, GenusPaleo$tt, MM = NULL, genpars = NULL, reset.time=TRUE)
paleoGen$tt = -paleoGen$tt
#jpeg('paleoTSAll.jpg', width=800, height=500)
plot(paleoGen)
#fig.cap="paleoTS plot with genus mean, all
# plot(GenusPaleo$tt, GenusPaleo$mm, type="b", xlab="Time", ylab="Trait Mean")
# arrows(GenusPaleo$tt,GenusPaleo$mm-GenusPaleo$vv,GenusPaleo$tt,GenusPaleo$mm+GenusPaleo$vv, code=3, length=0.02, angle = 90)
abline(h=mean(GenusPaleo$mm), lty=5)
points(x=c(-2.59, -5.33), y=c(245, 245), pch=17)
#dev.off()
####Model-fitting, genera ######
PaleoGenFit <- (fit3models(paleoGen, silent=FALSE, method="AD", pool=FALSE))
#________________________________________________________________________________________________
#######try Juans paleoTS-Code ####
paleo_data <- paleoGen #from Juan
plot(paleo_data)
ou <- opt.GRW(paleo_data, pool = TRUE, cl = list(fnscale = -1), meth = "L-BFGS-B", hess = FALSE) #from Juan
bm <- opt.URW(paleo_data, pool = TRUE, cl = list(fnscale=-1), meth = "L-BFGS-B", hess = FALSE) #from Juan
st <- opt.Stasis(paleo_data, pool = TRUE, cl = list(fnscale=-1), meth = "L-BFGS-B", hess = FALSE) #from Juan
#pun <- fitGpunc(paleo_data,oshare=F,method= "AD" , pool=F) ## not working!! #from Juan
#compareModels(bm, ou,st, silent = FALSE)#pun, #from Juan
cat(ou$value, bm$value, st$value, "\n")
#documentation:
## generate data for a directional sequence
## y <- sim.GRW(ns=30, ms=1, vs=1)
# plot(y)
# m.rw<- opt.GRW(y)
# m.rwu<- opt.URW(y)
# m.sta<- opt.Stasis(y)
## print log-likelihoods; easier to use function fit3models()
## cat(m.rw$value, m.rwu$value, m.sta$value, "\n")
####NOT WORKING YET
# library(MuMIn)
# aic.w <- Weights(c(st$AICc,pun$AICc))
# cbind(c("stasis", "shift"), aic.w)
# aic.w[2]/aic.w[1]
# shift.time <- paleo_data$tt[pun$parameters[5]]
#
#
#
# par(mfrow=c(2,2))
# plot(paleo_data, modelFit= bm, pch=19, lwd=1.5, ylim=NULL, las=1)
# plot(paleo_data, modelFit= ou, pch=19, lwd=1.5, ylim=NULL, las=1)
# plot(paleo_data, modelFit= st, pch=19, lwd=1.5, ylim=NULL, las=1)
# plot(paleo_data, modelFit=pun, pch=19, lwd=1.5, ylim=NULL, las=1)
|
# tcosFile <- system.file("settings", "TcosOfInterest.csv", package = "AHAsAcutePancreatitis")
# tcos <- read.csv(tcosFile, stringsAsFactors = FALSE)
# studyFolder <- "S:/Studies/EPI534"
# databases <- c("optum", "mdcr", "ccae")
# reportFolder = file.path(studyFolder, "report")
#
# cleanResults <- function(results) {
# #fix a typo
# results[results$eventType == "First EVer Event", ]$eventType <-
# "First Ever Event"
# #create additional filter variables
# results$noCana <-
# with(results, ifelse(grepl("no cana", comparatorName), TRUE, FALSE))
# results$noCensor <-
# with(results, ifelse(grepl("no censoring", comparatorName), TRUE, FALSE))
# #simplify naming
# results[results$timeAtRisk == "Per Protocol Zero Day (no censor at switch)", ]$timeAtRisk <-
# "On Treatment (0 Day)"
# results[results$timeAtRisk == "On Treatment", ]$timeAtRisk <-
# "On Treatment (30 Day)"
# results[results$timeAtRisk == "On Treatment (no censor at switch)", ]$timeAtRisk <-
# "On Treatment (30 Day)"
# results[results$timeAtRisk == "Per Protocol Sixty Day (no censor at switch)", ]$timeAtRisk <-
# "On Treatment (60 Day)"
# results[results$timeAtRisk == "Per Protocol Zero Day", ]$timeAtRisk <-
# "On Treatment (0 Day)"
# results[results$timeAtRisk == "Per Protocol Sixty Day", ]$timeAtRisk <-
# "On Treatment (60 Day)"
# return(results)
# }
#
# loadResultsHois <- function(outputFolder) {
# shinyDataFolder <- file.path(outputFolder, "results", "shinyData")
# file <-
# list.files(shinyDataFolder,
# pattern = "resultsHois_.*.rds",
# full.names = TRUE)
# x <- readRDS(file)
# return(x)
# }
#
# for (database in databases) {
# outputFolder <- file.path(studyFolder, database)
# results <- loadResultsHois(outputFolder)
# results <- cleanResults(results)
#
# # limit to primary analysis per specification
# primary <- results[
# results$analysisId==2 &
# results$timeAtRisk=="On Treatment (30 Day)" &
# !is.na(results$p) &
# results$canaRestricted == T &
# results$noCensor == F,]
#
# primary$hochbergP <- p.adjust(primary$p,method="hochberg")
# final <- primary[c("comparatorDrug", "rr", "ci95lb", "ci95ub", "p", "hochbergP")]
# write.table(final, file.path(reportFolder, paste0("hochbergResult_", database, ".csv" )), row.names=F, sep=",")
# }
#
| /Sglt2iAcutePancreatitis/R/PerformMultiplicityAdjustment.R | permissive | OHDSI/StudyProtocols | R | false | false | 2,388 | r | # tcosFile <- system.file("settings", "TcosOfInterest.csv", package = "AHAsAcutePancreatitis")
# tcos <- read.csv(tcosFile, stringsAsFactors = FALSE)
# studyFolder <- "S:/Studies/EPI534"
# databases <- c("optum", "mdcr", "ccae")
# reportFolder = file.path(studyFolder, "report")
#
# cleanResults <- function(results) {
# #fix a typo
# results[results$eventType == "First EVer Event", ]$eventType <-
# "First Ever Event"
# #create additional filter variables
# results$noCana <-
# with(results, ifelse(grepl("no cana", comparatorName), TRUE, FALSE))
# results$noCensor <-
# with(results, ifelse(grepl("no censoring", comparatorName), TRUE, FALSE))
# #simplify naming
# results[results$timeAtRisk == "Per Protocol Zero Day (no censor at switch)", ]$timeAtRisk <-
# "On Treatment (0 Day)"
# results[results$timeAtRisk == "On Treatment", ]$timeAtRisk <-
# "On Treatment (30 Day)"
# results[results$timeAtRisk == "On Treatment (no censor at switch)", ]$timeAtRisk <-
# "On Treatment (30 Day)"
# results[results$timeAtRisk == "Per Protocol Sixty Day (no censor at switch)", ]$timeAtRisk <-
# "On Treatment (60 Day)"
# results[results$timeAtRisk == "Per Protocol Zero Day", ]$timeAtRisk <-
# "On Treatment (0 Day)"
# results[results$timeAtRisk == "Per Protocol Sixty Day", ]$timeAtRisk <-
# "On Treatment (60 Day)"
# return(results)
# }
#
# loadResultsHois <- function(outputFolder) {
# shinyDataFolder <- file.path(outputFolder, "results", "shinyData")
# file <-
# list.files(shinyDataFolder,
# pattern = "resultsHois_.*.rds",
# full.names = TRUE)
# x <- readRDS(file)
# return(x)
# }
#
# for (database in databases) {
# outputFolder <- file.path(studyFolder, database)
# results <- loadResultsHois(outputFolder)
# results <- cleanResults(results)
#
# # limit to primary analysis per specification
# primary <- results[
# results$analysisId==2 &
# results$timeAtRisk=="On Treatment (30 Day)" &
# !is.na(results$p) &
# results$canaRestricted == T &
# results$noCensor == F,]
#
# primary$hochbergP <- p.adjust(primary$p,method="hochberg")
# final <- primary[c("comparatorDrug", "rr", "ci95lb", "ci95ub", "p", "hochbergP")]
# write.table(final, file.path(reportFolder, paste0("hochbergResult_", database, ".csv" )), row.names=F, sep=",")
# }
#
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/positions.R
\name{cluster_dimer_end}
\alias{cluster_dimer_end}
\title{cluster_dimer_end}
\usage{
cluster_dimer_end(d = a, dihedral = 0, a = 0.035, b = 0.012,
rescale = TRUE)
}
\arguments{
\item{d}{end-to-end distance}
\item{dihedral}{dihedral angle}
\item{a}{semi axis}
\item{b}{semi axis}
\item{rescale}{logical, rescale the z coordinates so that d is the center-to-center distance}
}
\value{
list with r, sizes, angles
}
\description{
cluster_dimer_end
}
\details{
cluster with two nanorods
first rod along x at (0, 0, -d/2)
second rod at (0, 0, d/2)
}
\author{
baptiste Auguie
}
\seealso{
Other user_level cluster: \code{\link{cluster_chain}};
\code{\link{cluster_dimer}}; \code{\link{cluster_helix}};
\code{\link{cluster_shell}}; \code{\link{equal_angles}};
\code{\link{equal_sizes}}; \code{\link{helix}}
}
| /man/cluster_dimer_end.Rd | no_license | dalerxli/cdos | R | false | false | 912 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/positions.R
\name{cluster_dimer_end}
\alias{cluster_dimer_end}
\title{cluster_dimer_end}
\usage{
cluster_dimer_end(d = a, dihedral = 0, a = 0.035, b = 0.012,
rescale = TRUE)
}
\arguments{
\item{d}{end-to-end distance}
\item{dihedral}{dihedral angle}
\item{a}{semi axis}
\item{b}{semi axis}
\item{rescale}{logical, rescale the z coordinates so that d is the center-to-center distance}
}
\value{
list with r, sizes, angles
}
\description{
cluster_dimer_end
}
\details{
cluster with two nanorods
first rod along x at (0, 0, -d/2)
second rod at (0, 0, d/2)
}
\author{
baptiste Auguie
}
\seealso{
Other user_level cluster: \code{\link{cluster_chain}};
\code{\link{cluster_dimer}}; \code{\link{cluster_helix}};
\code{\link{cluster_shell}}; \code{\link{equal_angles}};
\code{\link{equal_sizes}}; \code{\link{helix}}
}
|
#sort CSV based on columns
library(tidyverse)
iDir <- "C:/Users/John Mutua/Dropbox/raw_data"
oDir <- "C:/Users/John Mutua/Dropbox"
dirLS <- list.dirs(paste0(iDir, "/"), recursive = FALSE, full.names = FALSE)
for (dir in dirLS){
fLS = list.files(paste0(iDir, "/", dir), pattern="*.csv", full.names=TRUE)
for (f in fLS){
f_name <- gsub(paste0(".*", dir, "/"), "", f)
f_name <- gsub(".csv.*", "", f_name)
#data <- read_csv(f)
data <- read_csv("C:/Users/John Mutua/Dropbox/raw_data/ea_NASA_1981_2010.csv")
data <- select(data,-c(X))
#data <- select(data, SITE_ID, LONG, LAT, CL_VARIABLE, LAYER_NAME, YEAR, MONTH, DAY, VALUE)
data <- select(data, site_id, LON, LAT, YEAR, MM, DD, DOY, YYYYMMDD, RH2M, T2M_MAX)
data <- data %>%
arrange(site_id, YEAR, MM, DD, LON, LAT)
stationLS <- unique(data$site_id)
data_split<- subset(data, site_id %in% stationLS[81:169])
write_csv(data_split, paste0(oDir, "/ea_NASA_1981_2010", "_81-169", ".csv"))
}
}
| /sort_splitCSV.R | no_license | sonthuybacha/misc_R | R | false | false | 1,073 | r | #sort CSV based on columns
library(tidyverse)
iDir <- "C:/Users/John Mutua/Dropbox/raw_data"
oDir <- "C:/Users/John Mutua/Dropbox"
dirLS <- list.dirs(paste0(iDir, "/"), recursive = FALSE, full.names = FALSE)
for (dir in dirLS){
fLS = list.files(paste0(iDir, "/", dir), pattern="*.csv", full.names=TRUE)
for (f in fLS){
f_name <- gsub(paste0(".*", dir, "/"), "", f)
f_name <- gsub(".csv.*", "", f_name)
#data <- read_csv(f)
data <- read_csv("C:/Users/John Mutua/Dropbox/raw_data/ea_NASA_1981_2010.csv")
data <- select(data,-c(X))
#data <- select(data, SITE_ID, LONG, LAT, CL_VARIABLE, LAYER_NAME, YEAR, MONTH, DAY, VALUE)
data <- select(data, site_id, LON, LAT, YEAR, MM, DD, DOY, YYYYMMDD, RH2M, T2M_MAX)
data <- data %>%
arrange(site_id, YEAR, MM, DD, LON, LAT)
stationLS <- unique(data$site_id)
data_split<- subset(data, site_id %in% stationLS[81:169])
write_csv(data_split, paste0(oDir, "/ea_NASA_1981_2010", "_81-169", ".csv"))
}
}
|
library(biomaRt)
library(tidyverse)
library(KEGGREST)
library(pathview)
library(topGO)
BMOutput <- read_csv("BioMarTHumanWEntrez.csv")
CTLVsMutDECell <- read_csv("CtlVsMutDEXCellNeuron.csv") %>%
rename_at(.vars = vars(ends_with(".Join")),
.funs = funs(sub("[.]Join$", "", .))) %>%
left_join(BMOutput, by="external_gene_name")
CTLVsC9DECell <- read_csv("CtlVsC9DEXCellNeuron.csv") %>%
rename_at(.vars = vars(ends_with(".Join")),
.funs = funs(sub("[.]Join$", "", .))) %>%
left_join(BMOutput, by="external_gene_name")
CTLVsTAUDECell <- read_csv("CtlVsTAUDEXCellNeuron.csv") %>%
rename_at(.vars = vars(ends_with(".Join")),
.funs = funs(sub("[.]Join$", "", .))) %>%
left_join(BMOutput, by="external_gene_name")
C9VsTAUDECell <- read_csv("C9VsTAUDEXCellNeuron.csv") %>%
rename_at(.vars = vars(ends_with(".Join")),
.funs = funs(sub("[.]Join$", "", .))) %>%
left_join(BMOutput, by="external_gene_name")
#TopGo
KFMat <- function(DEDF){
tmp <- DEDF %>% drop_na("entrezgene_id")
tmp <- filter(tmp, padj < 0.05)
geneList <- tmp$pvalue
names(geneList) <- tmp$entrezgene_id
selection <- function(allScore){ return(allScore < 0.05)}
GOdata <- new("topGOdata",
ontology = "MF",
allGenes = geneList,
geneSel=selection,
annot = annFUN.org, mapping="org.Hs.eg.db" )
resultKS <- runTest(GOdata, algorithm = "weight01", statistic = "ks")
tabKS <- GenTable(GOdata, raw.p.value = resultKS, topNodes = length(resultKS@score), numChar = 120)
#par(cex = 0.3)
print(showSigOfNodes(GOdata, score(resultKS), firstSigNodes = 2, useInfo = "def"))
return(tabKS)
}
FisherMat <- function(DEDF){
tmp <- DEDF %>% drop_na("entrezgene_id")
geneList <- tmp$pvalue
names(geneList) <- tmp$entrezgene_id
selection <- function(allScore){ return(allScore < 0.05)}
GOdata <- new("topGOdata",
ontology = "MF",
allGenes = geneList,
geneSel=selection,
annot = annFUN.org, mapping="org.Hs.eg.db" )
resultFisher <- runTest(GOdata, algorithm = "elim", statistic = "fisher")
tabFisher <- GenTable(GOdata, raw.p.value = resultFisher, topNodes = length(resultFisher@score),
numChar = 120)
print(showSigOfNodes(GOdata, score(resultFisher), firstSigNodes = 2, useInfo = "def"))
return(tabFisher)
}
svg("CTLVsMutKSXCell.svg")
CTLVsMutKS <- KFMat(CTLVsMutDECell)
dev.off()
svg("CTLVsMutFisherXCell.svg")
CTLVsMutFisher <- FisherMat(CTLVsMutDECell)
dev.off()
svg("CTLVsC9KSKSXCell.svg")
CTLVsC9KS <- KFMat(CTLVsC9DECell)
dev.off()
svg("CTLVsC9FisherXCell.svg")
CTLVsC9Fisher <- FisherMat(CTLVsC9DECell)
dev.off()
svg("CTLVsTAUKSXCell.svg")
CTLVsTAUKS <- KFMat(CTLVsTAUDECell)
dev.off()
svg("CTLVsTAUFisherXCell.svg")
CTLVsTAUFisher <- FisherMat(CTLVsTAUDECell)
dev.off()
svg("C9VsTAUKSXCell.svg")
C9VsTAUKS <- KFMat(C9VsTAUDECell)
dev.off()
svg("C9VsTAUFisherXCell.svg")
C9VsTAUFisher <- FisherMat(C9VsTAUDECell)
dev.off()
MUTC9KS <- full_join(CTLVsMutKS, CTLVsC9KS, by=c("GO.ID", "Term"), suffix=c(".CTLVsMUT", ".CTLVsC9"))
TAUVsC9xTAUKS <- full_join(CTLVsMutKS, CTLVsC9KS, by=c("GO.ID", "Term"), suffix=c(".CTLVsTAU", ".C9VsTAU"))
AllKS <- full_join(MUTC9KS, TAUVsC9xTAUKS, by=c("GO.ID", "Term"))
MUTC9Fisher <- full_join(CTLVsMutFisher, CTLVsC9Fisher, by=c("GO.ID", "Term"), suffix=c(".CTLVsMUT", ".CTLVsC9"))
TAUVsC9xTAUFisher <- full_join(CTLVsMutFisher, CTLVsC9Fisher, by=c("GO.ID", "Term"), suffix=c(".CTLVsTAU", ".C9VsTAU"))
AllFisher <- full_join(MUTC9Fisher, TAUVsC9xTAUFisher, by=c("GO.ID", "Term"))
write_csv(AllKS, "AllGOTermsKSXCell.csv")
write_csv(AllKS, "AllGOTermsFisherXCell.csv")
write_csv(CTLVsMutKS, "CTLVsMutGOKSXCell.csv")
write_csv(CTLVsMutFisher, "CTLVsMutGOFisherXCell.csv")
write_csv(CTLVsC9KS, "CTLVsC9GOKSXCell.csv")
write_csv(CTLVsC9Fisher, "CTLVsC9GOFisherXCell.csv")
write_csv(CTLVsTAUKS, "CTLVsTAUGOKSXCell.csv")
write_csv(CTLVsTAUFisher, "CTLVsTAUGOFisherXCell.csv")
write_csv(C9VsTAUKS, "C9VsTAUGOKSXCell.csv")
write_csv(C9VsTAUFisher, "C9VsTAUGOFisherXCell.csv") | /FTDBrain/12.GOTermXCell.R | no_license | SethMagnusJarvis/PhDFigureCreation | R | false | false | 4,157 | r | library(biomaRt)
library(tidyverse)
library(KEGGREST)
library(pathview)
library(topGO)
BMOutput <- read_csv("BioMarTHumanWEntrez.csv")
CTLVsMutDECell <- read_csv("CtlVsMutDEXCellNeuron.csv") %>%
rename_at(.vars = vars(ends_with(".Join")),
.funs = funs(sub("[.]Join$", "", .))) %>%
left_join(BMOutput, by="external_gene_name")
CTLVsC9DECell <- read_csv("CtlVsC9DEXCellNeuron.csv") %>%
rename_at(.vars = vars(ends_with(".Join")),
.funs = funs(sub("[.]Join$", "", .))) %>%
left_join(BMOutput, by="external_gene_name")
CTLVsTAUDECell <- read_csv("CtlVsTAUDEXCellNeuron.csv") %>%
rename_at(.vars = vars(ends_with(".Join")),
.funs = funs(sub("[.]Join$", "", .))) %>%
left_join(BMOutput, by="external_gene_name")
C9VsTAUDECell <- read_csv("C9VsTAUDEXCellNeuron.csv") %>%
rename_at(.vars = vars(ends_with(".Join")),
.funs = funs(sub("[.]Join$", "", .))) %>%
left_join(BMOutput, by="external_gene_name")
#TopGo
KFMat <- function(DEDF){
tmp <- DEDF %>% drop_na("entrezgene_id")
tmp <- filter(tmp, padj < 0.05)
geneList <- tmp$pvalue
names(geneList) <- tmp$entrezgene_id
selection <- function(allScore){ return(allScore < 0.05)}
GOdata <- new("topGOdata",
ontology = "MF",
allGenes = geneList,
geneSel=selection,
annot = annFUN.org, mapping="org.Hs.eg.db" )
resultKS <- runTest(GOdata, algorithm = "weight01", statistic = "ks")
tabKS <- GenTable(GOdata, raw.p.value = resultKS, topNodes = length(resultKS@score), numChar = 120)
#par(cex = 0.3)
print(showSigOfNodes(GOdata, score(resultKS), firstSigNodes = 2, useInfo = "def"))
return(tabKS)
}
FisherMat <- function(DEDF){
tmp <- DEDF %>% drop_na("entrezgene_id")
geneList <- tmp$pvalue
names(geneList) <- tmp$entrezgene_id
selection <- function(allScore){ return(allScore < 0.05)}
GOdata <- new("topGOdata",
ontology = "MF",
allGenes = geneList,
geneSel=selection,
annot = annFUN.org, mapping="org.Hs.eg.db" )
resultFisher <- runTest(GOdata, algorithm = "elim", statistic = "fisher")
tabFisher <- GenTable(GOdata, raw.p.value = resultFisher, topNodes = length(resultFisher@score),
numChar = 120)
print(showSigOfNodes(GOdata, score(resultFisher), firstSigNodes = 2, useInfo = "def"))
return(tabFisher)
}
svg("CTLVsMutKSXCell.svg")
CTLVsMutKS <- KFMat(CTLVsMutDECell)
dev.off()
svg("CTLVsMutFisherXCell.svg")
CTLVsMutFisher <- FisherMat(CTLVsMutDECell)
dev.off()
svg("CTLVsC9KSKSXCell.svg")
CTLVsC9KS <- KFMat(CTLVsC9DECell)
dev.off()
svg("CTLVsC9FisherXCell.svg")
CTLVsC9Fisher <- FisherMat(CTLVsC9DECell)
dev.off()
svg("CTLVsTAUKSXCell.svg")
CTLVsTAUKS <- KFMat(CTLVsTAUDECell)
dev.off()
svg("CTLVsTAUFisherXCell.svg")
CTLVsTAUFisher <- FisherMat(CTLVsTAUDECell)
dev.off()
svg("C9VsTAUKSXCell.svg")
C9VsTAUKS <- KFMat(C9VsTAUDECell)
dev.off()
svg("C9VsTAUFisherXCell.svg")
C9VsTAUFisher <- FisherMat(C9VsTAUDECell)
dev.off()
MUTC9KS <- full_join(CTLVsMutKS, CTLVsC9KS, by=c("GO.ID", "Term"), suffix=c(".CTLVsMUT", ".CTLVsC9"))
TAUVsC9xTAUKS <- full_join(CTLVsMutKS, CTLVsC9KS, by=c("GO.ID", "Term"), suffix=c(".CTLVsTAU", ".C9VsTAU"))
AllKS <- full_join(MUTC9KS, TAUVsC9xTAUKS, by=c("GO.ID", "Term"))
MUTC9Fisher <- full_join(CTLVsMutFisher, CTLVsC9Fisher, by=c("GO.ID", "Term"), suffix=c(".CTLVsMUT", ".CTLVsC9"))
TAUVsC9xTAUFisher <- full_join(CTLVsMutFisher, CTLVsC9Fisher, by=c("GO.ID", "Term"), suffix=c(".CTLVsTAU", ".C9VsTAU"))
AllFisher <- full_join(MUTC9Fisher, TAUVsC9xTAUFisher, by=c("GO.ID", "Term"))
write_csv(AllKS, "AllGOTermsKSXCell.csv")
write_csv(AllKS, "AllGOTermsFisherXCell.csv")
write_csv(CTLVsMutKS, "CTLVsMutGOKSXCell.csv")
write_csv(CTLVsMutFisher, "CTLVsMutGOFisherXCell.csv")
write_csv(CTLVsC9KS, "CTLVsC9GOKSXCell.csv")
write_csv(CTLVsC9Fisher, "CTLVsC9GOFisherXCell.csv")
write_csv(CTLVsTAUKS, "CTLVsTAUGOKSXCell.csv")
write_csv(CTLVsTAUFisher, "CTLVsTAUGOFisherXCell.csv")
write_csv(C9VsTAUKS, "C9VsTAUGOKSXCell.csv")
write_csv(C9VsTAUFisher, "C9VsTAUGOFisherXCell.csv") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{environ}
\alias{environ}
\title{Parameters associated with environmental effects and stochasticity.}
\format{A list of length 9:
\describe{
\item{harvest_surv}{
Numeric vector of length 1 for aphid survival rate at harvesting.
}
\item{disp_aphid}{
Numeric vector of length 1 for dispersal rates between fields for aphids,
adult wasps.
}
\item{disp_wasp}{
Numeric vector of length 1 for dispersal rates between fields for aphids,
adult wasps.
}
\item{pred_rate}{
Numeric vector of length 1 for predation rate for aphids and non-adult wasps.
}
\item{cycle_length}{
Numeric vector of length 1 for time between harvests (typical for alfalfa).
}
\item{disp_start}{
List of length 2, each item of which contains a 1-length numeric vector
indicating the day at which aphids begin dispersing for 20ºC (\code{lowT})
and 27ºC (\code{highT}).
It's assumed that only adults are dispersing, but the day at which
this occurs depends on how quickly the aphids are developing.
}
\item{sigma_x}{
Numeric vector of length 1, indicating environmental std dev for aphids.
}
\item{sigma_y}{
Numeric vector of length 1, indicating environmental std dev for wasps.
}
\item{rho}{
Numeric vector of length 1, indicating environmental correlation among instars.
}
}}
\source{
\url{http://doi.wiley.com/10.1890/13-1933.1}
}
\usage{
environ
}
\description{
Parameters associated with environmental effects and stochasticity.
}
\keyword{datasets}
| /man/environ.Rd | no_license | lucasnell/sappp | R | false | true | 1,642 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{environ}
\alias{environ}
\title{Parameters associated with environmental effects and stochasticity.}
\format{A list of length 9:
\describe{
\item{harvest_surv}{
Numeric vector of length 1 for aphid survival rate at harvesting.
}
\item{disp_aphid}{
Numeric vector of length 1 for dispersal rates between fields for aphids,
adult wasps.
}
\item{disp_wasp}{
Numeric vector of length 1 for dispersal rates between fields for aphids,
adult wasps.
}
\item{pred_rate}{
Numeric vector of length 1 for predation rate for aphids and non-adult wasps.
}
\item{cycle_length}{
Numeric vector of length 1 for time between harvests (typical for alfalfa).
}
\item{disp_start}{
List of length 2, each item of which contains a 1-length numeric vector
indicating the day at which aphids begin dispersing for 20ºC (\code{lowT})
and 27ºC (\code{highT}).
It's assumed that only adults are dispersing, but the day at which
this occurs depends on how quickly the aphids are developing.
}
\item{sigma_x}{
Numeric vector of length 1, indicating environmental std dev for aphids.
}
\item{sigma_y}{
Numeric vector of length 1, indicating environmental std dev for wasps.
}
\item{rho}{
Numeric vector of length 1, indicating environmental correlation among instars.
}
}}
\source{
\url{http://doi.wiley.com/10.1890/13-1933.1}
}
\usage{
environ
}
\description{
Parameters associated with environmental effects and stochasticity.
}
\keyword{datasets}
|
## Author : Simon Moulds
## Date : August 2017
## this script loads all the data required by write_gams_input.R, saving
## the various objects as a single RData file.
library(magrittr)
library(dplyr)
library(tidyr)
library(raster)
library(rgdal)
## Subnational inventory data
x = readRDS("data/apy_indiastat_combined_data_qc.rds")
## administrative area map (use this to define cells included in analysis)
adm2 = readOGR("data-raw/india_adm2_2001/data", layer="g2008_2_India")
template = raster(nrows=360, ncols=360, xmn=68, xmx=98, ymn=6, ymx=36)
india_map = rasterize(adm2, template, "ADM0_CODE")
india_map = focal(india_map, w=matrix(data=1, nrow=7, ncol=7), fun=mean, na.rm=TRUE, pad=TRUE)
## suitability maps
get_gaez_suit_data = function(crop, path, ...) {
input_levels = c("h_schi","i_scii","h_schr","i_scir","l_sclr")
out = list()
count = 0
for (i in 1:length(input_levels)) {
level = input_levels[i]
dir = paste0("res03crav6190", sub("_", "", level), crop, "_package")
fn = paste0("res03_crav6190", level, "_", crop, ".tif")
if (file.exists(file.path(path, dir, fn))) {
count = count + 1
r = raster(file.path(path, dir, fn))
out[[level]] = r
}
}
if (count == 0) {
stop("no files available for the supplied crop")
}
out
}
get_gaez_pot_yield_data = function(crop, path, ...) {
irri_input_levels = c("h","i")
rain_input_levels = c("h","i","l")
out = list()
count = 0
for (i in 1:length(irri_input_levels)) {
level = irri_input_levels[i]
dir = paste0("res02crav6190", level, crop, "000ayld_package")
fn = paste0("res02_crav6190", level, "_", crop, "000a_yld.tif")
if (file.exists(file.path(path, dir, fn))) {
count = count + 1
r = raster(file.path(path, dir, fn))
nm = paste0(level, "_irri")
out[[nm]] = r
}
}
for (i in 1:length(rain_input_levels)) {
level = rain_input_levels[i]
dir = paste0("res02crav6190", level, crop, "150byld_package")
fn = paste0("res02_crav6190", level, "_", crop, "150b_yld.tif")
if (file.exists(file.path(path, dir, fn))) {
count = count + 1
r = raster(file.path(path, dir, fn))
nm = paste0(level, "_rain")
out[[nm]] = r
}
}
if (count == 0) {
stop("no files available for the supplied crop")
}
out
}
gaez_path = "data/GAEZ"
india_ext = extent(68,98,6,36)
barley_suit = get_gaez_suit_data("brl", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
barley_pot_yield = get_gaez_pot_yield_data("brly", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
banana_suit = get_gaez_suit_data("ban", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
banana_pot_yield = get_gaez_pot_yield_data("bana", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
## no irrigation suit maps, so use rainfed suit instead
cassava_suit = get_gaez_suit_data("csv", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,1), fun=max) %>% `[[`(c(1,1)) %>% setNames(c("irri_suit","rain_suit"))
cassava_pot_yield = get_gaez_pot_yield_data("casv", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
chickpea_suit = get_gaez_suit_data("chk", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
chickpea_pot_yield = get_gaez_pot_yield_data("chck", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
coconut_suit = get_gaez_suit_data("con", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
coconut_pot_yield = get_gaez_pot_yield_data("cocn", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
coffee_suit = get_gaez_suit_data("cof", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
coffee_pot_yield = get_gaez_pot_yield_data("coff", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
cotton_suit = get_gaez_suit_data("cot", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
cotton_pot_yield = get_gaez_pot_yield_data("cott", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
cowpea_suit = get_gaez_suit_data("cow", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
cowpea_pot_yield = get_gaez_pot_yield_data("cowp", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
finger_millet_suit = get_gaez_suit_data("fml", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
finger_millet_pot_yield = get_gaez_pot_yield_data("fmlt", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
groundnut_suit = get_gaez_suit_data("grd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
groundnut_pot_yield = get_gaez_pot_yield_data("grnd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
lentil_suit = get_gaez_suit_data("chk", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
lentil_pot_yield = get_gaez_pot_yield_data("chck", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
maize_suit = get_gaez_suit_data("mze", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
maize_pot_yield = get_gaez_pot_yield_data("maiz", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
other_cereals_suit = get_gaez_suit_data("oat", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
other_cereals_pot_yield = get_gaez_pot_yield_data("oats", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
other_fibre_crops_suit = get_gaez_suit_data("flx", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
other_fibre_crops_pot_yield = get_gaez_pot_yield_data("flax", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
other_oil_crops_suit = get_gaez_suit_data("olv", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
other_oil_crops_pot_yield = get_gaez_pot_yield_data("oliv", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
other_pulses_suit = get_gaez_suit_data("chk", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
other_pulses_pot_yield = get_gaez_pot_yield_data("chck", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
pearl_millet_suit = get_gaez_suit_data("pml", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
pearl_millet_pot_yield = get_gaez_pot_yield_data("pmlt", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
pigeonpea_suit = get_gaez_suit_data("pig", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
pigeonpea_pot_yield = get_gaez_pot_yield_data("pigp", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
potato_suit = get_gaez_suit_data("wpo", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
potato_pot_yield = get_gaez_pot_yield_data("wpot", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
rapeseed_suit = get_gaez_suit_data("rsd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
rapeseed_pot_yield = get_gaez_pot_yield_data("rape", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
rest_of_crops_suit = get_gaez_suit_data("mze", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
rest_of_crops_pot_yield = get_gaez_pot_yield_data("maiz", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
## rcw - irrigated, rcd - rainfed
rice_suit_w = get_gaez_suit_data("rcw", gaez_path) %>% `[`(1:2) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1), fun=max) %>% setNames(c("irri_suit"))
rice_suit_d = get_gaez_suit_data("rcd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,1), fun=max) %>% setNames(c("rain_suit"))
rice_suit = stack(list(rice_suit_w, rice_suit_d))
rice_pot_yield_w = get_gaez_pot_yield_data("ricw", gaez_path) %>% `[`(1:2) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1), fun=max) %>% setNames(c("irri_potyield"))
rice_pot_yield_d = get_gaez_pot_yield_data("ricd", gaez_path) %>% `[`(3:5) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,1), fun=max) %>% setNames(c("rain_potyield"))
rice_pot_yield = stack(list(rice_pot_yield_w, rice_pot_yield_d))
sesameseed_suit = get_gaez_suit_data("rsd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sesameseed_pot_yield = get_gaez_pot_yield_data("rape", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
sorghum_suit = get_gaez_suit_data("srg", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sorghum_pot_yield = get_gaez_pot_yield_data("sorg", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
soybean_suit = get_gaez_suit_data("soy", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
soybean_pot_yield = get_gaez_pot_yield_data("soyb", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
sugarcane_suit = get_gaez_suit_data("suc", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sugarcane_pot_yield = get_gaez_pot_yield_data("sugc", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
sunflower_suit = get_gaez_suit_data("sfl", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sunflower_pot_yield = get_gaez_pot_yield_data("sunf", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
sweet_potato_suit = get_gaez_suit_data("spo", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sweet_potato_pot_yield = get_gaez_pot_yield_data("spot", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
teas_suit = get_gaez_suit_data("tea", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
teas_pot_yield = get_gaez_pot_yield_data("teas", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
temperate_fruit_suit = get_gaez_suit_data("mze", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
temperate_fruit_pot_yield = get_gaez_pot_yield_data("maiz", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
tobacco_suit = get_gaez_suit_data("tob", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
tobacco_pot_yield = get_gaez_pot_yield_data("toba", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
tropical_fruit_suit = get_gaez_suit_data("ban", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
tropical_fruit_pot_yield = get_gaez_pot_yield_data("bana", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
vegetables_suit = get_gaez_suit_data("oni", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
vegetables_pot_yield = get_gaez_pot_yield_data("onio", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
wheat_suit = get_gaez_suit_data("whe", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
wheat_pot_yield = get_gaez_pot_yield_data("whea", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
## as for cassava, there are no suitability maps for irrigated yams, so
## use rainfed instead
yams_suit = get_gaez_suit_data("yam", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,1), fun=max) %>% `[[`(c(1,1)) %>% setNames(c("irri_suit","rain_suit"))
yams_pot_yield = get_gaez_pot_yield_data("rape", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
gaez_suit = list(banana = banana_suit,
barley = barley_suit,
cassava = cassava_suit,
chickpea = chickpea_suit,
coconut = coconut_suit,
coffee = coffee_suit,
cotton = cotton_suit,
cowpea = cowpea_suit,
finger_millet = finger_millet_suit,
groundnut = groundnut_suit,
lentil = lentil_suit,
maize = maize_suit,
other_cereals = other_cereals_suit,
other_fibre_crops = other_fibre_crops_suit,
other_oil_crops = other_oil_crops_suit,
other_pulses = other_pulses_suit,
pearl_millet = pearl_millet_suit,
pigeonpea = pigeonpea_suit,
potato = potato_suit,
rapeseed = rapeseed_suit,
rest_of_crops = rest_of_crops_suit,
rice = rice_suit,
sesameseed = sesameseed_suit,
sorghum = sorghum_suit,
soybean = soybean_suit,
sugarcane = sugarcane_suit,
sunflower = sunflower_suit,
sweet_potato = sweet_potato_suit,
tea = teas_suit,
temperate_fruit = temperate_fruit_suit,
tobacco = tobacco_suit,
tropical_fruit = tropical_fruit_suit,
vegetables = vegetables_suit,
wheat = wheat_suit,
yams = yams_suit)
gaez_pot_yield = list(banana = banana_pot_yield,
barley = barley_pot_yield,
cassava = cassava_pot_yield,
chickpea = chickpea_pot_yield,
coconut = coconut_pot_yield,
coffee = coffee_pot_yield,
cotton = cotton_pot_yield,
cowpea = cowpea_pot_yield,
finger_millet = finger_millet_pot_yield,
groundnut = groundnut_pot_yield,
lentil = lentil_pot_yield,
maize = maize_pot_yield,
other_cereals = other_cereals_pot_yield,
other_fibre_crops = other_fibre_crops_pot_yield,
other_oil_crops = other_oil_crops_pot_yield,
other_pulses = other_pulses_pot_yield,
pearl_millet = pearl_millet_pot_yield,
pigeonpea = pigeonpea_pot_yield,
potato = potato_pot_yield,
rapeseed = rapeseed_pot_yield,
rest_of_crops = rest_of_crops_pot_yield,
rice = rice_pot_yield,
sesameseed = sesameseed_pot_yield,
sorghum = sorghum_pot_yield,
soybean = soybean_pot_yield,
sugarcane = sugarcane_pot_yield,
sunflower = sunflower_pot_yield,
sweet_potato = sweet_potato_pot_yield,
tea = teas_pot_yield,
temperate_fruit = temperate_fruit_pot_yield,
tobacco = tobacco_pot_yield,
tropical_fruit = tropical_fruit_pot_yield,
vegetables = vegetables_pot_yield,
wheat = wheat_pot_yield,
yams = yams_pot_yield)
## market access
r = raster("data/market-influence/mkt_access_5m/w001001.adf") %>% crop(india_ext)
## interpolate, because the raw map is missing data for some coastal
## regions that other input maps include
## IDW doesn't work, for some reason, so use nearest-neighbour instead
## library(gstat)
## spdf = as(r, "SpatialPointsDataFrame") %>% setNames("mkt_access")
## gs = gstat(formula=mkt_access~1, data=spdf)
## idw = interpolate(r, gs)
## idw[!is.na(r)] = r[!is.na(r)]
## idw[is.na(india_map)] = NA
## india_mkt_access = idw
nn = r
pass = 1
repeat {
nn = focal(nn, w=matrix(data=1, nrow=5, ncol=5), fun=mean, na.rm=TRUE, pad=TRUE)
if (!any(is.na(nn[india_map]))) {
break
}
pass = pass + 1
}
nn[!is.na(r)] = r[!is.na(r)]
nn[is.na(india_map)] = NA
india_mkt_access = nn
## crop fraction
india_cropland_frac = raster("data/iiasa-ifpri-cropland-map/iiasa_ifpri_cropland_map_5m.tif") / 100
india_cropland_area = india_cropland_frac * area(india_cropland_frac) * 100 ## km2 -> hectare
## irrigated fraction
india_irri_frac = list.files("data/SCIENTIFIC_DATA_IRRIGATION_MAP_2000_2015", "^[0-9]{4}-[0-9]{4}_5m.tif$", full.names=TRUE) %>% sort %>% stack
india_irri_area = india_irri_frac * area(india_irri_frac) * 100 ## km2 -> hectare
india_irri_area[is.na(india_irri_area)] = 0
## GRIPC
proj = CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
gripc_irri_area = raster("data-raw/GRIPC/GRIPC_irrigated_area.asc") %>% `projection<-`(proj)
gripc_rain_area = raster("data-raw/GRIPC/GRIPC_rainfed_area.asc") %>% `projection<-`(proj)
gripc_pady_area = raster("data-raw/GRIPC/GRIPC_paddy_area.asc") %>% `projection<-`(proj)
print("Saving objects...")
save(x, gaez_suit, gaez_pot_yield, india_mkt_access, india_cropland_area, india_irri_area, gripc_irri_area, gripc_rain_area, gripc_pady_area, file="data/input_data.RData")
| /code/load_input_data.R | no_license | nemochina2008/india_irrigated_area_2005 | R | false | false | 21,060 | r | ## Author : Simon Moulds
## Date : August 2017
## this script loads all the data required by write_gams_input.R, saving
## the various objects as a single RData file.
library(magrittr)
library(dplyr)
library(tidyr)
library(raster)
library(rgdal)
## Subnational inventory data
x = readRDS("data/apy_indiastat_combined_data_qc.rds")
## administrative area map (use this to define cells included in analysis)
adm2 = readOGR("data-raw/india_adm2_2001/data", layer="g2008_2_India")
template = raster(nrows=360, ncols=360, xmn=68, xmx=98, ymn=6, ymx=36)
india_map = rasterize(adm2, template, "ADM0_CODE")
india_map = focal(india_map, w=matrix(data=1, nrow=7, ncol=7), fun=mean, na.rm=TRUE, pad=TRUE)
## suitability maps
get_gaez_suit_data = function(crop, path, ...) {
input_levels = c("h_schi","i_scii","h_schr","i_scir","l_sclr")
out = list()
count = 0
for (i in 1:length(input_levels)) {
level = input_levels[i]
dir = paste0("res03crav6190", sub("_", "", level), crop, "_package")
fn = paste0("res03_crav6190", level, "_", crop, ".tif")
if (file.exists(file.path(path, dir, fn))) {
count = count + 1
r = raster(file.path(path, dir, fn))
out[[level]] = r
}
}
if (count == 0) {
stop("no files available for the supplied crop")
}
out
}
get_gaez_pot_yield_data = function(crop, path, ...) {
irri_input_levels = c("h","i")
rain_input_levels = c("h","i","l")
out = list()
count = 0
for (i in 1:length(irri_input_levels)) {
level = irri_input_levels[i]
dir = paste0("res02crav6190", level, crop, "000ayld_package")
fn = paste0("res02_crav6190", level, "_", crop, "000a_yld.tif")
if (file.exists(file.path(path, dir, fn))) {
count = count + 1
r = raster(file.path(path, dir, fn))
nm = paste0(level, "_irri")
out[[nm]] = r
}
}
for (i in 1:length(rain_input_levels)) {
level = rain_input_levels[i]
dir = paste0("res02crav6190", level, crop, "150byld_package")
fn = paste0("res02_crav6190", level, "_", crop, "150b_yld.tif")
if (file.exists(file.path(path, dir, fn))) {
count = count + 1
r = raster(file.path(path, dir, fn))
nm = paste0(level, "_rain")
out[[nm]] = r
}
}
if (count == 0) {
stop("no files available for the supplied crop")
}
out
}
gaez_path = "data/GAEZ"
india_ext = extent(68,98,6,36)
barley_suit = get_gaez_suit_data("brl", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
barley_pot_yield = get_gaez_pot_yield_data("brly", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
banana_suit = get_gaez_suit_data("ban", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
banana_pot_yield = get_gaez_pot_yield_data("bana", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
## no irrigation suit maps, so use rainfed suit instead
cassava_suit = get_gaez_suit_data("csv", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,1), fun=max) %>% `[[`(c(1,1)) %>% setNames(c("irri_suit","rain_suit"))
cassava_pot_yield = get_gaez_pot_yield_data("casv", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
chickpea_suit = get_gaez_suit_data("chk", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
chickpea_pot_yield = get_gaez_pot_yield_data("chck", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
coconut_suit = get_gaez_suit_data("con", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
coconut_pot_yield = get_gaez_pot_yield_data("cocn", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
coffee_suit = get_gaez_suit_data("cof", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
coffee_pot_yield = get_gaez_pot_yield_data("coff", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
cotton_suit = get_gaez_suit_data("cot", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
cotton_pot_yield = get_gaez_pot_yield_data("cott", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
cowpea_suit = get_gaez_suit_data("cow", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
cowpea_pot_yield = get_gaez_pot_yield_data("cowp", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
finger_millet_suit = get_gaez_suit_data("fml", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
finger_millet_pot_yield = get_gaez_pot_yield_data("fmlt", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
groundnut_suit = get_gaez_suit_data("grd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
groundnut_pot_yield = get_gaez_pot_yield_data("grnd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
lentil_suit = get_gaez_suit_data("chk", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
lentil_pot_yield = get_gaez_pot_yield_data("chck", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
maize_suit = get_gaez_suit_data("mze", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
maize_pot_yield = get_gaez_pot_yield_data("maiz", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
other_cereals_suit = get_gaez_suit_data("oat", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
other_cereals_pot_yield = get_gaez_pot_yield_data("oats", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
other_fibre_crops_suit = get_gaez_suit_data("flx", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
other_fibre_crops_pot_yield = get_gaez_pot_yield_data("flax", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
other_oil_crops_suit = get_gaez_suit_data("olv", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
other_oil_crops_pot_yield = get_gaez_pot_yield_data("oliv", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
other_pulses_suit = get_gaez_suit_data("chk", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
other_pulses_pot_yield = get_gaez_pot_yield_data("chck", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
pearl_millet_suit = get_gaez_suit_data("pml", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
pearl_millet_pot_yield = get_gaez_pot_yield_data("pmlt", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
pigeonpea_suit = get_gaez_suit_data("pig", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
pigeonpea_pot_yield = get_gaez_pot_yield_data("pigp", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
potato_suit = get_gaez_suit_data("wpo", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
potato_pot_yield = get_gaez_pot_yield_data("wpot", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
rapeseed_suit = get_gaez_suit_data("rsd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
rapeseed_pot_yield = get_gaez_pot_yield_data("rape", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
rest_of_crops_suit = get_gaez_suit_data("mze", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
rest_of_crops_pot_yield = get_gaez_pot_yield_data("maiz", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
## rcw - irrigated, rcd - rainfed
rice_suit_w = get_gaez_suit_data("rcw", gaez_path) %>% `[`(1:2) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1), fun=max) %>% setNames(c("irri_suit"))
rice_suit_d = get_gaez_suit_data("rcd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,1), fun=max) %>% setNames(c("rain_suit"))
rice_suit = stack(list(rice_suit_w, rice_suit_d))
rice_pot_yield_w = get_gaez_pot_yield_data("ricw", gaez_path) %>% `[`(1:2) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1), fun=max) %>% setNames(c("irri_potyield"))
rice_pot_yield_d = get_gaez_pot_yield_data("ricd", gaez_path) %>% `[`(3:5) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,1), fun=max) %>% setNames(c("rain_potyield"))
rice_pot_yield = stack(list(rice_pot_yield_w, rice_pot_yield_d))
sesameseed_suit = get_gaez_suit_data("rsd", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sesameseed_pot_yield = get_gaez_pot_yield_data("rape", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
sorghum_suit = get_gaez_suit_data("srg", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sorghum_pot_yield = get_gaez_pot_yield_data("sorg", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
soybean_suit = get_gaez_suit_data("soy", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
soybean_pot_yield = get_gaez_pot_yield_data("soyb", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
sugarcane_suit = get_gaez_suit_data("suc", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sugarcane_pot_yield = get_gaez_pot_yield_data("sugc", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
sunflower_suit = get_gaez_suit_data("sfl", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sunflower_pot_yield = get_gaez_pot_yield_data("sunf", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
sweet_potato_suit = get_gaez_suit_data("spo", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
sweet_potato_pot_yield = get_gaez_pot_yield_data("spot", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
teas_suit = get_gaez_suit_data("tea", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
teas_pot_yield = get_gaez_pot_yield_data("teas", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
temperate_fruit_suit = get_gaez_suit_data("mze", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
temperate_fruit_pot_yield = get_gaez_pot_yield_data("maiz", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
tobacco_suit = get_gaez_suit_data("tob", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
tobacco_pot_yield = get_gaez_pot_yield_data("toba", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
tropical_fruit_suit = get_gaez_suit_data("ban", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
tropical_fruit_pot_yield = get_gaez_pot_yield_data("bana", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
vegetables_suit = get_gaez_suit_data("oni", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
vegetables_pot_yield = get_gaez_pot_yield_data("onio", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
wheat_suit = get_gaez_suit_data("whe", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_suit","rain_suit"))
wheat_pot_yield = get_gaez_pot_yield_data("whea", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
## as for cassava, there are no suitability maps for irrigated yams, so
## use rainfed instead
yams_suit = get_gaez_suit_data("yam", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,1), fun=max) %>% `[[`(c(1,1)) %>% setNames(c("irri_suit","rain_suit"))
yams_pot_yield = get_gaez_pot_yield_data("rape", gaez_path) %>% stack %>% crop(india_ext) %>% stackApply(indices=c(1,1,2,2,2), fun=max) %>% setNames(c("irri_potyield","rain_potyield"))
gaez_suit = list(banana = banana_suit,
barley = barley_suit,
cassava = cassava_suit,
chickpea = chickpea_suit,
coconut = coconut_suit,
coffee = coffee_suit,
cotton = cotton_suit,
cowpea = cowpea_suit,
finger_millet = finger_millet_suit,
groundnut = groundnut_suit,
lentil = lentil_suit,
maize = maize_suit,
other_cereals = other_cereals_suit,
other_fibre_crops = other_fibre_crops_suit,
other_oil_crops = other_oil_crops_suit,
other_pulses = other_pulses_suit,
pearl_millet = pearl_millet_suit,
pigeonpea = pigeonpea_suit,
potato = potato_suit,
rapeseed = rapeseed_suit,
rest_of_crops = rest_of_crops_suit,
rice = rice_suit,
sesameseed = sesameseed_suit,
sorghum = sorghum_suit,
soybean = soybean_suit,
sugarcane = sugarcane_suit,
sunflower = sunflower_suit,
sweet_potato = sweet_potato_suit,
tea = teas_suit,
temperate_fruit = temperate_fruit_suit,
tobacco = tobacco_suit,
tropical_fruit = tropical_fruit_suit,
vegetables = vegetables_suit,
wheat = wheat_suit,
yams = yams_suit)
gaez_pot_yield = list(banana = banana_pot_yield,
barley = barley_pot_yield,
cassava = cassava_pot_yield,
chickpea = chickpea_pot_yield,
coconut = coconut_pot_yield,
coffee = coffee_pot_yield,
cotton = cotton_pot_yield,
cowpea = cowpea_pot_yield,
finger_millet = finger_millet_pot_yield,
groundnut = groundnut_pot_yield,
lentil = lentil_pot_yield,
maize = maize_pot_yield,
other_cereals = other_cereals_pot_yield,
other_fibre_crops = other_fibre_crops_pot_yield,
other_oil_crops = other_oil_crops_pot_yield,
other_pulses = other_pulses_pot_yield,
pearl_millet = pearl_millet_pot_yield,
pigeonpea = pigeonpea_pot_yield,
potato = potato_pot_yield,
rapeseed = rapeseed_pot_yield,
rest_of_crops = rest_of_crops_pot_yield,
rice = rice_pot_yield,
sesameseed = sesameseed_pot_yield,
sorghum = sorghum_pot_yield,
soybean = soybean_pot_yield,
sugarcane = sugarcane_pot_yield,
sunflower = sunflower_pot_yield,
sweet_potato = sweet_potato_pot_yield,
tea = teas_pot_yield,
temperate_fruit = temperate_fruit_pot_yield,
tobacco = tobacco_pot_yield,
tropical_fruit = tropical_fruit_pot_yield,
vegetables = vegetables_pot_yield,
wheat = wheat_pot_yield,
yams = yams_pot_yield)
## market access
r = raster("data/market-influence/mkt_access_5m/w001001.adf") %>% crop(india_ext)
## interpolate, because the raw map is missing data for some coastal
## regions that other input maps include
## IDW doesn't work, for some reason, so use nearest-neighbour instead
## library(gstat)
## spdf = as(r, "SpatialPointsDataFrame") %>% setNames("mkt_access")
## gs = gstat(formula=mkt_access~1, data=spdf)
## idw = interpolate(r, gs)
## idw[!is.na(r)] = r[!is.na(r)]
## idw[is.na(india_map)] = NA
## india_mkt_access = idw
nn = r
pass = 1
repeat {
nn = focal(nn, w=matrix(data=1, nrow=5, ncol=5), fun=mean, na.rm=TRUE, pad=TRUE)
if (!any(is.na(nn[india_map]))) {
break
}
pass = pass + 1
}
nn[!is.na(r)] = r[!is.na(r)]
nn[is.na(india_map)] = NA
india_mkt_access = nn
## crop fraction
india_cropland_frac = raster("data/iiasa-ifpri-cropland-map/iiasa_ifpri_cropland_map_5m.tif") / 100
india_cropland_area = india_cropland_frac * area(india_cropland_frac) * 100 ## km2 -> hectare
## irrigated fraction
india_irri_frac = list.files("data/SCIENTIFIC_DATA_IRRIGATION_MAP_2000_2015", "^[0-9]{4}-[0-9]{4}_5m.tif$", full.names=TRUE) %>% sort %>% stack
india_irri_area = india_irri_frac * area(india_irri_frac) * 100 ## km2 -> hectare
india_irri_area[is.na(india_irri_area)] = 0
## GRIPC
proj = CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
gripc_irri_area = raster("data-raw/GRIPC/GRIPC_irrigated_area.asc") %>% `projection<-`(proj)
gripc_rain_area = raster("data-raw/GRIPC/GRIPC_rainfed_area.asc") %>% `projection<-`(proj)
gripc_pady_area = raster("data-raw/GRIPC/GRIPC_paddy_area.asc") %>% `projection<-`(proj)
print("Saving objects...")
save(x, gaez_suit, gaez_pot_yield, india_mkt_access, india_cropland_area, india_irri_area, gripc_irri_area, gripc_rain_area, gripc_pady_area, file="data/input_data.RData")
|
# Example preprocessing script.
first.letter.counts<-ddply(letters, c('FirstLetter'), nrow)
second.letter.counts<-ddply(letters, c('SecondLetter'), nrow) | /letters/munge/01-A.R | no_license | pblimic/R-Projects | R | false | false | 153 | r | # Example preprocessing script.
first.letter.counts<-ddply(letters, c('FirstLetter'), nrow)
second.letter.counts<-ddply(letters, c('SecondLetter'), nrow) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reactable.R
\docType{package}
\name{reactable-package}
\alias{reactable-package}
\alias{_PACKAGE}
\title{reactable: Interactive Data Tables Based on 'React Table'}
\description{
Interactive data tables for R, based on the 'React Table'
JavaScript library. Provides an HTML widget that can be used in 'R Markdown'
documents and 'Shiny' applications, or viewed from an R console.
}
\details{
See the \href{https://glin.github.io/reactable/}{online documentation} for
examples and an extensive usage guide.
}
\seealso{
Useful links:
\itemize{
\item \url{https://glin.github.io/reactable/}
\item \url{https://github.com/glin/reactable}
\item Report bugs at \url{https://github.com/glin/reactable/issues}
}
}
\author{
\strong{Maintainer}: Greg Lin \email{glin@glin.io}
Other contributors:
\itemize{
\item Tanner Linsley (React Table library) [contributor, copyright holder]
\item Emotion team and other contributors (Emotion library) [contributor, copyright holder]
}
}
\keyword{internal}
| /man/reactable-package.Rd | permissive | dgyurko/reactable | R | false | true | 1,084 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reactable.R
\docType{package}
\name{reactable-package}
\alias{reactable-package}
\alias{_PACKAGE}
\title{reactable: Interactive Data Tables Based on 'React Table'}
\description{
Interactive data tables for R, based on the 'React Table'
JavaScript library. Provides an HTML widget that can be used in 'R Markdown'
documents and 'Shiny' applications, or viewed from an R console.
}
\details{
See the \href{https://glin.github.io/reactable/}{online documentation} for
examples and an extensive usage guide.
}
\seealso{
Useful links:
\itemize{
\item \url{https://glin.github.io/reactable/}
\item \url{https://github.com/glin/reactable}
\item Report bugs at \url{https://github.com/glin/reactable/issues}
}
}
\author{
\strong{Maintainer}: Greg Lin \email{glin@glin.io}
Other contributors:
\itemize{
\item Tanner Linsley (React Table library) [contributor, copyright holder]
\item Emotion team and other contributors (Emotion library) [contributor, copyright holder]
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/detect_referencing_error.R
\name{correct_referencing_error}
\alias{correct_referencing_error}
\title{Correct Referencing Errors}
\usage{
correct_referencing_error(observed_chemical_shifts,
computed_chemical_shifts, ratio_mean_sd = 3, threshold = -2,
verbose = FALSE)
}
\arguments{
\item{observed_chemical_shifts}{observed chemical shift dataframe. Should contain field: resname, resid, nucleus, expCS}
\item{computed_chemical_shifts}{observed chemical shift dataframe. Should contain field: resname, resid, nucleus, predCS}
\item{ratio_mean_sd}{ratio of mean to sd of the error.}
\item{threshold}{cutoff to use for identifying systematic errors.}
\item{verbose}{if TRUE, print progress log from MCMCpack. Default is FALSE}
}
\description{
This function allows user to detect potential referencing errors by comparing observed and computed chemical shifts
}
\examples{
correct_referencing_error(observed_chemical_shifts, computed_chemical_shifts)
}
| /man/correct_referencing_error.Rd | permissive | atfrank/nmR | R | false | true | 1,034 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/detect_referencing_error.R
\name{correct_referencing_error}
\alias{correct_referencing_error}
\title{Correct Referencing Errors}
\usage{
correct_referencing_error(observed_chemical_shifts,
computed_chemical_shifts, ratio_mean_sd = 3, threshold = -2,
verbose = FALSE)
}
\arguments{
\item{observed_chemical_shifts}{observed chemical shift dataframe. Should contain field: resname, resid, nucleus, expCS}
\item{computed_chemical_shifts}{observed chemical shift dataframe. Should contain field: resname, resid, nucleus, predCS}
\item{ratio_mean_sd}{ratio of mean to sd of the error.}
\item{threshold}{cutoff to use for identifying systematic errors.}
\item{verbose}{if TRUE, print progress log from MCMCpack. Default is FALSE}
}
\description{
This function allows user to detect potential referencing errors by comparing observed and computed chemical shifts
}
\examples{
correct_referencing_error(observed_chemical_shifts, computed_chemical_shifts)
}
|
#p2t2m.c 2017/09/30 change to c> mysql > r-plot > php
#install.packages("showtext")
library(showtext)
library(magrittr)
library(RMySQL)
conn <- dbConnect(MySQL(),host="192.168.2.60", dbname = "powermonitor", username="rpi", password="12345678")
sqldf = dbGetQuery(conn,"select * from powermonitor.power where timestamp > (now() - INTERVAL 72 HOUR) LIMIT 4320;")#3days
daysqldf = dbGetQuery(conn,"select * from powermonitor.power where DATE(timestamp) = CURDATE() and hour(timestamp) BETWEEN '05' AND '20';")#today05~20
ghost_w1=dbGetQuery(conn,"select round(avg(w1),0) from powermonitor.power where hour(timestamp)=hour(now()) and minute(timestamp)=minute(now()) group by date_format(timestamp,'%H:%i');")#group w1 by minute
ghost_hs1=dbGetQuery(conn,"select date_format(timestamp,'%H:%i') as t,avg(w1) as w1 from powermonitor.power group by date_format(timestamp,'%H:%i');")#group w1 by minute
sqldaykwh1=dbGetQuery(conn,"select from_days(to_days(timestamp)) as date,max(wh1) as max, min(wh1) as min ,max(wh1) - min(wh1) as daywh from powermonitor.power group by from_days(to_days(timestamp));")#everyday point
ghost_w2=dbGetQuery(conn,"select round(avg(w2),0) from powermonitor.power where hour(timestamp)=hour(now()) and minute(timestamp)=minute(now()) group by date_format(timestamp,'%H:%i');")#group w1 by minute
ghost_hs2=dbGetQuery(conn,"select date_format(timestamp,'%H:%i') as t,avg(w2) as w2 from powermonitor.power group by date_format(timestamp,'%H:%i');")#group w1 by minute
sqldaykwh2=dbGetQuery(conn,"select from_days(to_days(timestamp)) as date,max(wh2) as max, min(wh2) as min ,max(wh2) - min(wh2) as daywh from powermonitor.power group by from_days(to_days(timestamp));")#everyday point
dbDisconnect(conn)
showtext_auto(enable = TRUE)
font_add("cwTeXYen","cwyen.ttf")
sqldf=as.data.frame(sqldf,stringsAsFactors=FALSE)
sqldf$timestamp%<>%strptime(., "%Y-%m-%d %H:%M:%S")
daysqldf=as.data.frame(daysqldf,stringsAsFactors=FALSE)
daysqldf$timestamp%<>%strptime(., "%Y-%m-%d %H:%M:%S")
ghost_hs1%<>%as.data.frame(.,stringsAsFactors=FALSE)
ghost_hs1$t%<>%strptime(., "%H:%M")
sqldaykwh1%<>%as.data.frame(.,stringsAsFactors=FALSE)
sqldaykwh1$date%<>%as.Date(.,origin="1970-01-01")
ghost_hs2%<>%as.data.frame(.,stringsAsFactors=FALSE)
ghost_hs2$t%<>%strptime(., "%H:%M")
sqldaykwh2%<>%as.data.frame(.,stringsAsFactors=FALSE)
sqldaykwh2$date%<>%as.Date(.,origin="1970-01-01")
msg=paste(
paste("現在版本",tail(sqldf$timestamp,1)),
paste("現在時間", Sys.time()),
paste("p1即時功率", tail(sqldf$w1,1),"w/",tail(sqldf$a1*sqldf$v1,1),"w/",ghost_w1,"w1"),
paste("p2即時功率", tail(sqldf$w2,1),"w/",tail(sqldf$a2*sqldf$v2,1),"w/",ghost_w2,"w2"),
paste("現在t1溫度", tail(sqldf$temp1,1),"度"),
paste("現在t2溫度", tail(sqldf$temp2,1),"度"),
paste("p1現在功率因數", tail(replace(sqldf$w1/(sqldf$a1*sqldf$v1), is.na(sqldf$w1/(sqldf$a1*sqldf$v1)), 0),1),"%"),
paste("p2現在功率因數", tail(replace(sqldf$w2/(sqldf$a2*sqldf$v2), is.na(sqldf$w2/(sqldf$a2*sqldf$v2)), 0),1),"%"),
paste("p1平均功率因數", mean(sqldf$w1/(sqldf$a1*sqldf$v1),na.rm = T),"%"),
paste("p2平均功率因數", mean(sqldf$w2/(sqldf$a2*sqldf$v2),na.rm = T),"%"),
paste("p1今天耗電量", tail(sqldaykwh1$daywh/1000,1),"度"),
paste("p2今天耗電量", tail(sqldaykwh2$daywh/1000,1),"度"),
paste("系統已運作", length(sqldaykwh1$date),"天"),
paste("p1累積耗電量", max(sqldf$wh1)/1000,"度"),
paste("p2累積耗電量", max(sqldf$wh2)/1000,"度"),
paste("p1平均每日耗電", max(sqldf$wh1)/(1000*length(sqldaykwh1$date)),"度"),
paste("p2平均每日耗電", max(sqldf$wh2)/(1000*length(sqldaykwh2$date)),"度"),
sep="\r\n")
write.table(msg,"/home/pi/powermonitor/R/msg.csv",row.names = FALSE)
#write.table(msg,"~/msg.csv",row.names = FALSE)
png("/home/pi/powermonitor/R/p1v.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$v1,type="l",main="p1電壓",xlab="Time",ylab="電壓(V)",family ="cwTeXYen" )
abline(h=mean(sqldf$v1),col="blue")
dev.off()
png("/home/pi/powermonitor/R/p2v.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$v2,type="l",main="p2電壓",xlab="Time",ylab="電壓(V)")
abline(h=mean(sqldf$v1),col="blue")
dev.off()
#plot(sqldf$timestamp,sqldf$a1,type="l",main="電流",xlab="Time",ylab="電流(A)")
png("/home/pi/powermonitor/R/p1wd.png",width = 1200, height = 400)
plot(daysqldf$timestamp,daysqldf$w1,type="l",main="p1今日有功功率",xlab="Time",ylab="有功功率(w)")
dev.off()
png("/home/pi/powermonitor/R/p2wd.png",width = 1200, height = 400)
plot(daysqldf$timestamp,daysqldf$w2,type="l",main="p2今日有功功率",xlab="Time",ylab="有功功率(w)")
dev.off()
#plot(daysqldf$timestamp,daysqldf$temp1,type="l",main="t1今日溫度",xlab="Time",ylab="溫度")
#plot(sqldf$timestamp,sqldf$v1*sqldf$a1,type="l",main="視在功率",xlab="Time",ylab="視在功率(V*A)")
png("/home/pi/powermonitor/R/p1w.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$w1,type="l",main="p1有功功率",xlab="Time",ylab="有功功率(W)")
dev.off()
png("/home/pi/powermonitor/R/p2w.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$w2,type="l",main="p2有功功率",xlab="Time",ylab="有功功率(W)")
dev.off()
png("/home/pi/powermonitor/R/t1.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$temp1,type="l",main="p1現在設備溫度",xlab="Time",ylab="溫度")
dev.off()
png("/home/pi/powermonitor/R/t2.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$temp2,type="l",main="p2現在設備溫度",xlab="Time",ylab="溫度")
dev.off()
#plot(sqldf$timestamp,sqldf$temp1,type="l",ylim=c(min(sqldf$temp2)*0.98,max(sqldf$temp1)*1.02),main="混合溫度",xlab="Time",ylab="溫度")
#par(new = T)
#plot(sqldf$timestamp,sqldf$temp2,type="l",ylim=c(min(sqldf$temp2)*0.98,max(sqldf$temp1)*1.02), axes=F, xlab=NA, ylab=NA, cex=1.2,col="green")
#plot(sqldf$timestamp,sqldf$wh1/1000,type="l",main="累計千瓦時",xlab="Time",ylab="度(KWH)")
#plot(sqldf$timestamp,sqldf$w1/(sqldf$a1*sqldf$v1),type="l",main="功率因數",xlab="Time",ylab="功率因數(%)")
#abline(h=c(0.7,0.9,1))
#abline(h=mean(sqldf$w1/(sqldf$a1*sqldf$v1),na.rm = T),col="blue")
#par(mar = c(5,5,2,5))
#plot(sqldf$timestamp,sqldf$wh1/1000,type="l",main="功率與累計產量",xlab="Time",ylab="度(KWH)")
#par(new = T)
#plot(sqldf$times,sqldf$v1*sqldf$a1,type="l", axes=F, xlab=NA, ylab=NA, cex=1.2,col="green")
#axis(side = 4)
#mtext(side = 4, line = 3, 'W=A*V')
png("/home/pi/powermonitor/R/p1kwh.png",width = 1200, height = 400)
plot(as.Date(sqldaykwh1$date,origin="1970/01/01"),sqldaykwh1$daywh/1000,main="p1每日用電度數",xlab="Time",ylab="度(KWH)",ylim=c(0,max(sqldaykwh1$daywh/1000,na.rm = T)+0.2))
abline(h=mean(sqldaykwh1$daywh/1000,na.rm = T))
stdevs <- mean(sqldaykwh1$daywh/1000,na.rm = T) + c(-1, +1) * sd(sqldaykwh1$daywh/1000,na.rm = T)
abline(h=stdevs, lty=2, lwd=4, col="blue")
dev.off()
png("/home/pi/powermonitor/R/p2kwh.png",width = 1200, height = 400)
plot(as.Date(sqldaykwh2$date,origin="1970/01/01"),sqldaykwh2$daywh/1000,main="p2每日用電度數",xlab="Time",ylab="度(KWH)",ylim=c(0,max(sqldaykwh2$daywh/1000,na.rm = T)+0.2))
abline(h=mean(sqldaykwh2$daywh/1000,na.rm = T))
stdevs <- mean(sqldaykwh2$daywh/1000,na.rm = T) + c(-1, +1) * sd(sqldaykwh2$daywh/1000,na.rm = T)
abline(h=stdevs, lty=2, lwd=4, col="blue")
dev.off()
png("/home/pi/powermonitor/R/p1d.png",width = 1200, height = 400)
plot(ghost_hs1,type="l",col="blue")
dev.off()
png("/home/pi/powermonitor/R/p2d.png",width = 1200, height = 400)
plot(ghost_hs2,type="l",col="blue")
dev.off()
| /R/plot.r | no_license | andy12345638/powerread | R | false | false | 7,587 | r | #p2t2m.c 2017/09/30 change to c> mysql > r-plot > php
#install.packages("showtext")
library(showtext)
library(magrittr)
library(RMySQL)
conn <- dbConnect(MySQL(),host="192.168.2.60", dbname = "powermonitor", username="rpi", password="12345678")
sqldf = dbGetQuery(conn,"select * from powermonitor.power where timestamp > (now() - INTERVAL 72 HOUR) LIMIT 4320;")#3days
daysqldf = dbGetQuery(conn,"select * from powermonitor.power where DATE(timestamp) = CURDATE() and hour(timestamp) BETWEEN '05' AND '20';")#today05~20
ghost_w1=dbGetQuery(conn,"select round(avg(w1),0) from powermonitor.power where hour(timestamp)=hour(now()) and minute(timestamp)=minute(now()) group by date_format(timestamp,'%H:%i');")#group w1 by minute
ghost_hs1=dbGetQuery(conn,"select date_format(timestamp,'%H:%i') as t,avg(w1) as w1 from powermonitor.power group by date_format(timestamp,'%H:%i');")#group w1 by minute
sqldaykwh1=dbGetQuery(conn,"select from_days(to_days(timestamp)) as date,max(wh1) as max, min(wh1) as min ,max(wh1) - min(wh1) as daywh from powermonitor.power group by from_days(to_days(timestamp));")#everyday point
ghost_w2=dbGetQuery(conn,"select round(avg(w2),0) from powermonitor.power where hour(timestamp)=hour(now()) and minute(timestamp)=minute(now()) group by date_format(timestamp,'%H:%i');")#group w1 by minute
ghost_hs2=dbGetQuery(conn,"select date_format(timestamp,'%H:%i') as t,avg(w2) as w2 from powermonitor.power group by date_format(timestamp,'%H:%i');")#group w1 by minute
sqldaykwh2=dbGetQuery(conn,"select from_days(to_days(timestamp)) as date,max(wh2) as max, min(wh2) as min ,max(wh2) - min(wh2) as daywh from powermonitor.power group by from_days(to_days(timestamp));")#everyday point
dbDisconnect(conn)
showtext_auto(enable = TRUE)
font_add("cwTeXYen","cwyen.ttf")
sqldf=as.data.frame(sqldf,stringsAsFactors=FALSE)
sqldf$timestamp%<>%strptime(., "%Y-%m-%d %H:%M:%S")
daysqldf=as.data.frame(daysqldf,stringsAsFactors=FALSE)
daysqldf$timestamp%<>%strptime(., "%Y-%m-%d %H:%M:%S")
ghost_hs1%<>%as.data.frame(.,stringsAsFactors=FALSE)
ghost_hs1$t%<>%strptime(., "%H:%M")
sqldaykwh1%<>%as.data.frame(.,stringsAsFactors=FALSE)
sqldaykwh1$date%<>%as.Date(.,origin="1970-01-01")
ghost_hs2%<>%as.data.frame(.,stringsAsFactors=FALSE)
ghost_hs2$t%<>%strptime(., "%H:%M")
sqldaykwh2%<>%as.data.frame(.,stringsAsFactors=FALSE)
sqldaykwh2$date%<>%as.Date(.,origin="1970-01-01")
msg=paste(
paste("現在版本",tail(sqldf$timestamp,1)),
paste("現在時間", Sys.time()),
paste("p1即時功率", tail(sqldf$w1,1),"w/",tail(sqldf$a1*sqldf$v1,1),"w/",ghost_w1,"w1"),
paste("p2即時功率", tail(sqldf$w2,1),"w/",tail(sqldf$a2*sqldf$v2,1),"w/",ghost_w2,"w2"),
paste("現在t1溫度", tail(sqldf$temp1,1),"度"),
paste("現在t2溫度", tail(sqldf$temp2,1),"度"),
paste("p1現在功率因數", tail(replace(sqldf$w1/(sqldf$a1*sqldf$v1), is.na(sqldf$w1/(sqldf$a1*sqldf$v1)), 0),1),"%"),
paste("p2現在功率因數", tail(replace(sqldf$w2/(sqldf$a2*sqldf$v2), is.na(sqldf$w2/(sqldf$a2*sqldf$v2)), 0),1),"%"),
paste("p1平均功率因數", mean(sqldf$w1/(sqldf$a1*sqldf$v1),na.rm = T),"%"),
paste("p2平均功率因數", mean(sqldf$w2/(sqldf$a2*sqldf$v2),na.rm = T),"%"),
paste("p1今天耗電量", tail(sqldaykwh1$daywh/1000,1),"度"),
paste("p2今天耗電量", tail(sqldaykwh2$daywh/1000,1),"度"),
paste("系統已運作", length(sqldaykwh1$date),"天"),
paste("p1累積耗電量", max(sqldf$wh1)/1000,"度"),
paste("p2累積耗電量", max(sqldf$wh2)/1000,"度"),
paste("p1平均每日耗電", max(sqldf$wh1)/(1000*length(sqldaykwh1$date)),"度"),
paste("p2平均每日耗電", max(sqldf$wh2)/(1000*length(sqldaykwh2$date)),"度"),
sep="\r\n")
write.table(msg,"/home/pi/powermonitor/R/msg.csv",row.names = FALSE)
#write.table(msg,"~/msg.csv",row.names = FALSE)
png("/home/pi/powermonitor/R/p1v.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$v1,type="l",main="p1電壓",xlab="Time",ylab="電壓(V)",family ="cwTeXYen" )
abline(h=mean(sqldf$v1),col="blue")
dev.off()
png("/home/pi/powermonitor/R/p2v.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$v2,type="l",main="p2電壓",xlab="Time",ylab="電壓(V)")
abline(h=mean(sqldf$v1),col="blue")
dev.off()
#plot(sqldf$timestamp,sqldf$a1,type="l",main="電流",xlab="Time",ylab="電流(A)")
png("/home/pi/powermonitor/R/p1wd.png",width = 1200, height = 400)
plot(daysqldf$timestamp,daysqldf$w1,type="l",main="p1今日有功功率",xlab="Time",ylab="有功功率(w)")
dev.off()
png("/home/pi/powermonitor/R/p2wd.png",width = 1200, height = 400)
plot(daysqldf$timestamp,daysqldf$w2,type="l",main="p2今日有功功率",xlab="Time",ylab="有功功率(w)")
dev.off()
#plot(daysqldf$timestamp,daysqldf$temp1,type="l",main="t1今日溫度",xlab="Time",ylab="溫度")
#plot(sqldf$timestamp,sqldf$v1*sqldf$a1,type="l",main="視在功率",xlab="Time",ylab="視在功率(V*A)")
png("/home/pi/powermonitor/R/p1w.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$w1,type="l",main="p1有功功率",xlab="Time",ylab="有功功率(W)")
dev.off()
png("/home/pi/powermonitor/R/p2w.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$w2,type="l",main="p2有功功率",xlab="Time",ylab="有功功率(W)")
dev.off()
png("/home/pi/powermonitor/R/t1.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$temp1,type="l",main="p1現在設備溫度",xlab="Time",ylab="溫度")
dev.off()
png("/home/pi/powermonitor/R/t2.png",width = 1200, height = 400)
plot(sqldf$timestamp,sqldf$temp2,type="l",main="p2現在設備溫度",xlab="Time",ylab="溫度")
dev.off()
#plot(sqldf$timestamp,sqldf$temp1,type="l",ylim=c(min(sqldf$temp2)*0.98,max(sqldf$temp1)*1.02),main="混合溫度",xlab="Time",ylab="溫度")
#par(new = T)
#plot(sqldf$timestamp,sqldf$temp2,type="l",ylim=c(min(sqldf$temp2)*0.98,max(sqldf$temp1)*1.02), axes=F, xlab=NA, ylab=NA, cex=1.2,col="green")
#plot(sqldf$timestamp,sqldf$wh1/1000,type="l",main="累計千瓦時",xlab="Time",ylab="度(KWH)")
#plot(sqldf$timestamp,sqldf$w1/(sqldf$a1*sqldf$v1),type="l",main="功率因數",xlab="Time",ylab="功率因數(%)")
#abline(h=c(0.7,0.9,1))
#abline(h=mean(sqldf$w1/(sqldf$a1*sqldf$v1),na.rm = T),col="blue")
#par(mar = c(5,5,2,5))
#plot(sqldf$timestamp,sqldf$wh1/1000,type="l",main="功率與累計產量",xlab="Time",ylab="度(KWH)")
#par(new = T)
#plot(sqldf$times,sqldf$v1*sqldf$a1,type="l", axes=F, xlab=NA, ylab=NA, cex=1.2,col="green")
#axis(side = 4)
#mtext(side = 4, line = 3, 'W=A*V')
png("/home/pi/powermonitor/R/p1kwh.png",width = 1200, height = 400)
plot(as.Date(sqldaykwh1$date,origin="1970/01/01"),sqldaykwh1$daywh/1000,main="p1每日用電度數",xlab="Time",ylab="度(KWH)",ylim=c(0,max(sqldaykwh1$daywh/1000,na.rm = T)+0.2))
abline(h=mean(sqldaykwh1$daywh/1000,na.rm = T))
stdevs <- mean(sqldaykwh1$daywh/1000,na.rm = T) + c(-1, +1) * sd(sqldaykwh1$daywh/1000,na.rm = T)
abline(h=stdevs, lty=2, lwd=4, col="blue")
dev.off()
png("/home/pi/powermonitor/R/p2kwh.png",width = 1200, height = 400)
plot(as.Date(sqldaykwh2$date,origin="1970/01/01"),sqldaykwh2$daywh/1000,main="p2每日用電度數",xlab="Time",ylab="度(KWH)",ylim=c(0,max(sqldaykwh2$daywh/1000,na.rm = T)+0.2))
abline(h=mean(sqldaykwh2$daywh/1000,na.rm = T))
stdevs <- mean(sqldaykwh2$daywh/1000,na.rm = T) + c(-1, +1) * sd(sqldaykwh2$daywh/1000,na.rm = T)
abline(h=stdevs, lty=2, lwd=4, col="blue")
dev.off()
png("/home/pi/powermonitor/R/p1d.png",width = 1200, height = 400)
plot(ghost_hs1,type="l",col="blue")
dev.off()
png("/home/pi/powermonitor/R/p2d.png",width = 1200, height = 400)
plot(ghost_hs2,type="l",col="blue")
dev.off()
|
#' This class is a generic container for RTIGER analysis
#'
#' @slot matobs Nested lists. the first level is a list of samples. For each sample there are 5 matrices that contains the allele counts for each position.
#' @slot params a list with the parameters after training.
#' @slot info List with phenotipic data of the samples.
#' @slot Viterbi List of chromosomes with the viterbi path per sample.
#' @slot Probabilities Computed probabilites for the EM algorithm.
#' @slot num.iter Number of iterations needed to stop the EM algorithm.
#' @rdname RTIGERDataSet
#' @exportClass RTIGER
.RTIGER = setClass("RTIGER",
representation = representation(
# Grangesobs = "list",
matobs = "list",
params = "list",
# FilteringThreshold = "list",
info = "list",
# Model = "list",
Viterbi = "list",
Probabilities = "list",
num.iter = "numeric"
))
#' Prints description of RTIGER object
#'
#' @keywords internal
#' @noRd
#'
setMethod(f = "show", signature = c("RTIGER"), function(object) {
cat("An object of class \"", class(object), "\" \n", sep = "")
cat(" Number of samples: ", object@info$sample_nr, "\n", sep = "")
cat(" Number of chromosomes per sample: ", object@info$part_nr, "\n",
sep = "")
cat(" Chromosome names: ", paste(as.character(object@info$part_names), collapse = " "), "\n", sep = "")
cat(" Rigidity value: ", object@params$rigidity, "\n", sep = "")
# cat(" Number of observations per chromosome: ", paste(as.character(object@info$part_lengths), collapse = " "), "\n", sep = "")
})
| /R/RTIGER-Class.R | no_license | rfael0cm/RTIGER | R | false | false | 1,745 | r | #' This class is a generic container for RTIGER analysis
#'
#' @slot matobs Nested lists. the first level is a list of samples. For each sample there are 5 matrices that contains the allele counts for each position.
#' @slot params a list with the parameters after training.
#' @slot info List with phenotipic data of the samples.
#' @slot Viterbi List of chromosomes with the viterbi path per sample.
#' @slot Probabilities Computed probabilites for the EM algorithm.
#' @slot num.iter Number of iterations needed to stop the EM algorithm.
#' @rdname RTIGERDataSet
#' @exportClass RTIGER
.RTIGER = setClass("RTIGER",
representation = representation(
# Grangesobs = "list",
matobs = "list",
params = "list",
# FilteringThreshold = "list",
info = "list",
# Model = "list",
Viterbi = "list",
Probabilities = "list",
num.iter = "numeric"
))
#' Prints description of RTIGER object
#'
#' @keywords internal
#' @noRd
#'
setMethod(f = "show", signature = c("RTIGER"), function(object) {
cat("An object of class \"", class(object), "\" \n", sep = "")
cat(" Number of samples: ", object@info$sample_nr, "\n", sep = "")
cat(" Number of chromosomes per sample: ", object@info$part_nr, "\n",
sep = "")
cat(" Chromosome names: ", paste(as.character(object@info$part_names), collapse = " "), "\n", sep = "")
cat(" Rigidity value: ", object@params$rigidity, "\n", sep = "")
# cat(" Number of observations per chromosome: ", paste(as.character(object@info$part_lengths), collapse = " "), "\n", sep = "")
})
|
## Plot4.R
## Reading in the full data set and changing the date format
all_data <- read.csv("household_power_consumption.txt", header=TRUE, sep =";", na.strings="?")
all_data$Date <- as.Date(all_data$Date, format="%d/%m/%Y")
## Creating the subset of data containg the dates 2/1/2007 and 2/2/2007
data <- subset(all_data, (Date == "2007-02-01" | Date == "2007-02-02"))
## Creating a new column for dateTime
data$dateTime <- as.POSIXct(paste(data$Date,data$Time))
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(dateTime, Global_active_power, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
plot(dateTime, Voltage, type="l", xlab="datetime")
plot(dateTime, Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(dateTime, Sub_metering_2, col="Red")
lines(dateTime, Sub_metering_3, col="Blue")
legend("topright", lty=1, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(dateTime, Global_reactive_power, type="l", xlab="datetime")
})
## Creating the PNG file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() | /plot4.R | no_license | cjenkins31/ExData_Plotting1 | R | false | false | 1,267 | r | ## Plot4.R
## Reading in the full data set and changing the date format
all_data <- read.csv("household_power_consumption.txt", header=TRUE, sep =";", na.strings="?")
all_data$Date <- as.Date(all_data$Date, format="%d/%m/%Y")
## Creating the subset of data containg the dates 2/1/2007 and 2/2/2007
data <- subset(all_data, (Date == "2007-02-01" | Date == "2007-02-02"))
## Creating a new column for dateTime
data$dateTime <- as.POSIXct(paste(data$Date,data$Time))
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(dateTime, Global_active_power, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
plot(dateTime, Voltage, type="l", xlab="datetime")
plot(dateTime, Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(dateTime, Sub_metering_2, col="Red")
lines(dateTime, Sub_metering_3, col="Blue")
legend("topright", lty=1, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(dateTime, Global_reactive_power, type="l", xlab="datetime")
})
## Creating the PNG file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getResults.R
\name{getm}
\alias{getm}
\title{Get gene length}
\usage{
getm(tree, phydat, spe1, spe2)
}
\arguments{
\item{tree}{A phylogenetic tree}
\item{phydat}{An object of class phydat}
\item{spe1}{The name of species 1}
\item{spe2}{The name of species 2}
}
\value{
The length of each gene.
}
\description{
Get the length of two genes. (Genes must be of equal length.)
}
| /man/getm.Rd | permissive | dinaIssakova/rgenesconverged | R | false | true | 455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getResults.R
\name{getm}
\alias{getm}
\title{Get gene length}
\usage{
getm(tree, phydat, spe1, spe2)
}
\arguments{
\item{tree}{A phylogenetic tree}
\item{phydat}{An object of class phydat}
\item{spe1}{The name of species 1}
\item{spe2}{The name of species 2}
}
\value{
The length of each gene.
}
\description{
Get the length of two genes. (Genes must be of equal length.)
}
|
retrieve_flow <- function(gage = NULL, start.date = "1950-10-30",
end.date = Sys.Date(), service.type = "iv",
site.table) {
if (service.type == "iv") {
e.date <- end.date + lubridate::days(1)
} else {
e.date <- end.date
}
gage.df <- dataRetrieval::readNWISdata(service = service.type,
site = gage,
startDate = start.date,
#startDate = "2016-01-01",
endDate = end.date,
asDateTime = FALSE,
#tz = "America/New_York",
# Dischrage Code.
parameterCd = "00060")
#--------------------------------------------------------------------------
if (nrow(gage.df) == 0) {
na.df <- data.frame(agency = NA,
site = NA,
date_time = NA,
wuql_code = NA,
timezone = NA,
flow = NA)
return(na.df)
}
#--------------------------------------------------------------------------
name.string <- site.table %>%
filter(site_no == gage) %>%
pull(code)
#--------------------------------------------------------------------------
names(gage.df)[4:5] <- c("discharge_cfs", "qual_code")
#--------------------------------------------------------------------------
if (service.type == "iv") {
gage.df <- gage.df %>%
dplyr::mutate(dateTime = as.POSIXct(dateTime, format = "%Y-%m-%dT%H:%M"),
dateTime = as.POSIXct(round(dateTime, units = "hours")),
# Subtract 5 hours to convert UTC to EST.
dateTime = dateTime - lubridate::hours(5),
tz_cd = "EST") %>%
dplyr::filter(dateTime >= start.date,
dateTime <= end.date)
}
#--------------------------------------------------------------------------
final.df <- gage.df %>%
rename(agency = agency_cd,
site = site_no,
timezone = tz_cd) %>%
group_by(agency, site, dateTime) %>%
summarize(flow = mean(discharge_cfs)) %>%
ungroup(dateTime) %>%
mutate(site = name.string) %>%
rename(date_time = dateTime)
#----------------------------------------------------------------------------
return(final.df)
}
#------------------------------------------------------------------------------
isolate_retrieve_flow <- function(gages = NULL, start.date = "1950-10-30",
end.date = Sys.Date(), service.type = "iv",
site.table){
shiny::isolate(retrieve_flow(gages, start.date,
end.date, service.type, site.table))
}
#------------------------------------------------------------------------------
pull_flow <- function(gages = NULL, start.date = "1950-10-30",
end.date = Sys.Date(), service.type = "iv",
shiny = FALSE, n.cores = NULL){
if (!service.type %in% c("iv", "dv")) stop("service.type must be 'iv' (instantaneous) or 'dv' (daily values).")
# file.dir <- file.path("www/potomac_gages.csv")
file.dir <- file.path("data/potomac_gages.csv")
site.df <- data.table::fread(file.dir, data.table = FALSE,
colClasses = list(character = c("site_no"))) %>%
mutate(site_no = paste0("0", site_no))
#----------------------------------------------------------------------------
if(!is.null(gages)) site.df <- dplyr::filter(site.df, code %in% gages)
#----------------------------------------------------------------------------
site.vec <- site.df$site_no
#----------------------------------------------------------------------------
library(parallel)
if (is.null(n.cores)) n.cores <- detectCores() - 1
cl <- makeCluster(n.cores)
on.exit(stopCluster(cl))
clusterExport(cl = cl,
varlist = c("site.df", "site.vec", "retrieve_flow", "isolate_retrieve_flow"),
envir = environment())
clusterEvalQ(cl, c(library(dplyr), library(dataRetrieval)))
#----------------------------------------------------------------------------
# This loop sequences through each site and imports new data.
# The last input date in the COOP database table is used as the startDate input
# for the readNWISdata function. Any duplicated rows are then removed and the
# new table overwrites the old table in the COOP database.
# This method should ensure that no data is excluded from the table because
# any disruption in the daily import schedual will allow the script to pick up
# from the last import date.
if (shiny == FALSE) {
flow.list <- parLapply(cl, site.vec, function(site.i) {
retrieve_flow(site.i, start.date, end.date, service.type, site.df)
})
} else {
flow.list <- parLapply(cl, site.vec, function(site.i) {
isolate_retrieve_flow(site.i, start.date, end.date, service.type, site.df)
})
}
#------------------------------------------------------------------------------
final.df <- bind_rows(flow.list)
#------------------------------------------------------------------------------
return(final.df)
}
| /functions/pull_flow_func.R | no_license | InterstateCommissionPotomacRiverBasin/pull_flow | R | false | false | 5,376 | r | retrieve_flow <- function(gage = NULL, start.date = "1950-10-30",
end.date = Sys.Date(), service.type = "iv",
site.table) {
if (service.type == "iv") {
e.date <- end.date + lubridate::days(1)
} else {
e.date <- end.date
}
gage.df <- dataRetrieval::readNWISdata(service = service.type,
site = gage,
startDate = start.date,
#startDate = "2016-01-01",
endDate = end.date,
asDateTime = FALSE,
#tz = "America/New_York",
# Dischrage Code.
parameterCd = "00060")
#--------------------------------------------------------------------------
if (nrow(gage.df) == 0) {
na.df <- data.frame(agency = NA,
site = NA,
date_time = NA,
wuql_code = NA,
timezone = NA,
flow = NA)
return(na.df)
}
#--------------------------------------------------------------------------
name.string <- site.table %>%
filter(site_no == gage) %>%
pull(code)
#--------------------------------------------------------------------------
names(gage.df)[4:5] <- c("discharge_cfs", "qual_code")
#--------------------------------------------------------------------------
if (service.type == "iv") {
gage.df <- gage.df %>%
dplyr::mutate(dateTime = as.POSIXct(dateTime, format = "%Y-%m-%dT%H:%M"),
dateTime = as.POSIXct(round(dateTime, units = "hours")),
# Subtract 5 hours to convert UTC to EST.
dateTime = dateTime - lubridate::hours(5),
tz_cd = "EST") %>%
dplyr::filter(dateTime >= start.date,
dateTime <= end.date)
}
#--------------------------------------------------------------------------
final.df <- gage.df %>%
rename(agency = agency_cd,
site = site_no,
timezone = tz_cd) %>%
group_by(agency, site, dateTime) %>%
summarize(flow = mean(discharge_cfs)) %>%
ungroup(dateTime) %>%
mutate(site = name.string) %>%
rename(date_time = dateTime)
#----------------------------------------------------------------------------
return(final.df)
}
#------------------------------------------------------------------------------
isolate_retrieve_flow <- function(gages = NULL, start.date = "1950-10-30",
end.date = Sys.Date(), service.type = "iv",
site.table){
shiny::isolate(retrieve_flow(gages, start.date,
end.date, service.type, site.table))
}
#------------------------------------------------------------------------------
pull_flow <- function(gages = NULL, start.date = "1950-10-30",
end.date = Sys.Date(), service.type = "iv",
shiny = FALSE, n.cores = NULL){
if (!service.type %in% c("iv", "dv")) stop("service.type must be 'iv' (instantaneous) or 'dv' (daily values).")
# file.dir <- file.path("www/potomac_gages.csv")
file.dir <- file.path("data/potomac_gages.csv")
site.df <- data.table::fread(file.dir, data.table = FALSE,
colClasses = list(character = c("site_no"))) %>%
mutate(site_no = paste0("0", site_no))
#----------------------------------------------------------------------------
if(!is.null(gages)) site.df <- dplyr::filter(site.df, code %in% gages)
#----------------------------------------------------------------------------
site.vec <- site.df$site_no
#----------------------------------------------------------------------------
library(parallel)
if (is.null(n.cores)) n.cores <- detectCores() - 1
cl <- makeCluster(n.cores)
on.exit(stopCluster(cl))
clusterExport(cl = cl,
varlist = c("site.df", "site.vec", "retrieve_flow", "isolate_retrieve_flow"),
envir = environment())
clusterEvalQ(cl, c(library(dplyr), library(dataRetrieval)))
#----------------------------------------------------------------------------
# This loop sequences through each site and imports new data.
# The last input date in the COOP database table is used as the startDate input
# for the readNWISdata function. Any duplicated rows are then removed and the
# new table overwrites the old table in the COOP database.
# This method should ensure that no data is excluded from the table because
# any disruption in the daily import schedual will allow the script to pick up
# from the last import date.
if (shiny == FALSE) {
flow.list <- parLapply(cl, site.vec, function(site.i) {
retrieve_flow(site.i, start.date, end.date, service.type, site.df)
})
} else {
flow.list <- parLapply(cl, site.vec, function(site.i) {
isolate_retrieve_flow(site.i, start.date, end.date, service.type, site.df)
})
}
#------------------------------------------------------------------------------
final.df <- bind_rows(flow.list)
#------------------------------------------------------------------------------
return(final.df)
}
|
#!/usr/bin/Rscript
options(scipen=100)
startTime <- Sys.time()
### !!! UPDATE 24.07.19: STOUFFER ONE-SIDED
################ USE THE FOLLOWING FILES FROM PREVIOUS STEPS
# - script3: all_meanLogFC_TAD.Rdata
# - script8: all_obs_ratioDown.Rdata
# - script9sameNbr: emp_pval_meanLogFC.Rdata
# - script10sameNbr: emp_pval_meanCorr.Rdata
################################################################################
################ OUTPUT
# - emp_pval_combined.Rdata + tables
################################################################################
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
settingF <- args[1]
stopifnot(file.exists(settingF))
pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2")
script0_name <- "0_prepGeneData"
script1_name <- "1_runGeneDE"
#script8_name <- "8c10000_runAllDown"
#script9_name <- "910000_runEmpPvalMeanTADLogFC"
script8_name <- "8cOnlyRatioDownFastSave_runAllDown"
script9sameNbr_name <- "9sameNbrDouble_runEmpPvalMeanTADLogFC"
#cat("!!! WARNING: USE 10000 PERMUTATIONS DATA !!!\n")
script10sameNbr_name <- "10sameNbr_runEmpPvalMeanTADCorr"
script_name <- "11sameNbrSameNbrDouble_runEmpPvalCombined"
stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
cat(paste0("> START ", script_name, "\n"))
source("main_settings.R")
source(settingF)
source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
# create the directories
curr_outFold <- paste0(pipOutFold, "/", script_name)
system(paste0("mkdir -p ", curr_outFold))
pipLogFile <- paste0(pipOutFold, "/", format(Sys.time(), "%Y%d%m%H%M%S"),"_", script_name, "_logFile.txt")
system(paste0("rm -f ", pipLogFile))
twoTailsStouffer <- FALSE
# stouffer(c(emp_pval_intraCorr[x], emp_pval_logFC[x]), two.tails = twoTailsStouffer)))
# ADDED 16.11.2018 to check using other files
txt <- paste0("inputDataType\t=\t", inputDataType, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("gene2tadDT_file\t=\t", gene2tadDT_file, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("TADpos_file\t=\t", TADpos_file, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("settingF\t=\t", settingF, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("twoTailsStouffer\t=\t", as.character(twoTailsStouffer), "\n")
printAndLog(txt, pipLogFile)
################################****************************************************************************************
####################################################### PREPARE INPUT
################################****************************************************************************************
# load emp. p-val logFC
emp_pval_logFC <- eval(parse(text = load(file.path(pipOutFold, script9sameNbr_name, "emp_pval_meanLogFC.Rdata"))))
# load emp. p-val intraTAD corr
emp_pval_intraCorr <- eval(parse(text = load(file.path(pipOutFold, script10sameNbr_name, "emp_pval_meanCorr.Rdata"))))
intersectRegions <- sort(intersect(names(emp_pval_logFC), names(emp_pval_intraCorr)))
txt <- paste0(toupper(script_name), "> Take regions in common between permutations and observed data \n")
printAndLog(txt, pipLogFile)
### filter TAD only regions
if(useTADonly) {
if( length(grep("_TAD", names(emp_pval_logFC))) > 0 ) {
txt <- paste0(toupper(script_name), "> !!! WARNING: empirical p-val logFC data contain non-TAD regions as well !!!\n")
printAndLog(txt, pipLogFile)
}
if( length(grep("_TAD", names(emp_pval_intraCorr))) > 0 ) {
txt <- paste0(toupper(script_name), "> !!! WARNING: empirical p-val intraCorr data contain non-TAD regions as well !!!\n")
printAndLog(txt, pipLogFile)
}
initLen <- length(intersectRegions)
intersectRegions <- intersectRegions[grep("_TAD", intersectRegions)]
txt <- paste0(toupper(script_name), "> Take only TAD regions: ", length(intersectRegions), "/", initLen, "\n")
printAndLog(txt, pipLogFile)
}
initLen <- length(emp_pval_logFC)
emp_pval_logFC <- emp_pval_logFC[intersectRegions]
txt <- paste0(toupper(script_name), "> ... -> emp. p-val logFC: ", length(emp_pval_logFC), "/", initLen, "\n")
printAndLog(txt, pipLogFile)
initLen <- length(emp_pval_intraCorr)
emp_pval_intraCorr <- emp_pval_intraCorr[intersectRegions]
txt <- paste0(toupper(script_name), "> ... -> emp. p-val intraCorr: ", length(emp_pval_intraCorr), "/", initLen, "\n")
printAndLog(txt, pipLogFile)
stopifnot(!any(is.na(emp_pval_logFC)))
stopifnot(!any(is.na(emp_pval_intraCorr)))
stopifnot(all(names(emp_pval_logFC) == names(emp_pval_intraCorr)))
################################****************************************************************************************
####################################################### CALCULATE EMP. PVAL INTRA-TAD CORR & WRITE OUTPUT
################################****************************************************************************************
# COMBINE EMPIRICAL P-VALUES
emp_pval_combined <- unlist(sapply(seq_along(intersectRegions), function(x)
stouffer(c(emp_pval_intraCorr[x], emp_pval_logFC[x]), two.tails = twoTailsStouffer)))
names(emp_pval_combined) <- intersectRegions
stopifnot(length(emp_pval_combined) == length(intersectRegions))
save(emp_pval_combined, file= paste0(curr_outFold, "/emp_pval_combined.Rdata"))
cat(paste0("... written: ", curr_outFold, "/emp_pval_combined.Rdata", "\n"))
# NOT DOWN IN THIS VERSION !
##***** build and save table
## TAD | meanFC | ratioDown | emp. p-val combined | genes list comma separated
## load emp. p-val logFC
#gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
#gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
#pipeline_geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/pipeline_geneList.Rdata"))))
#DE_table <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_topTable.Rdata"))))
#entrezDT <- read.delim(entrezDT_file, header=T, stringsAsFactors = F)
#entrezDT$entrezID <- as.character(entrezDT$entrezID)
#DE_table <- DE_table[DE_table$genes %in% names(pipeline_geneList),]
#stopifnot(nrow(DE_table) > 0)
#stopifnot(all(DE_table$genes %in% names(pipeline_geneList)))
#DE_entrez <- as.character(unlist(sapply(DE_table$genes, function(x) pipeline_geneList[x])))
#gene2tadDT <- gene2tadDT[gene2tadDT$entrezID %in% DE_entrez,]
#stopifnot(all(DE_entrez %in% entrezDT$entrezID))
#################################****************************************************************************************
######################################################## BUILD TABLES & WRITE OUTPUT
#################################****************************************************************************************
#obs_logFC <- eval(parse(text = load(paste0(pipOutFold, "/", script3_name, "/all_meanLogFC_TAD.Rdata"))))
#obs_ratioDown <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/all_obs_ratioDown.Rdata"))))
#interReg <- sort(Reduce(intersect, list(names(obs_logFC), names(obs_ratioDown), names(emp_pval_combined))))
#txt <- paste0(toupper(script_name), "> Number of TADs in logFC: ", length(obs_logFC), "\n")
#printAndLog(txt, pipLogFile)
#txt <- paste0(toupper(script_name), "> Number of TADs in ratioDown: ", length(obs_ratioDown), "\n")
#printAndLog(txt, pipLogFile)
#txt <- paste0(toupper(script_name), "> Number of TADs in emp. p-val combined: ", length(emp_pval_combined), "\n")
#printAndLog(txt, pipLogFile)
#txt <- paste0(toupper(script_name), "> Number of TADs in the intersect: ", length(interReg), "\n")
#printAndLog(txt, pipLogFile)
#interReg_empPval <- emp_pval_combined[names(emp_pval_combined) %in% interReg]
#interReg_empPval_sort <- sort(interReg_empPval)
#pvalDT <- foreach(i_reg = 1:length(interReg_empPval_sort), .combine = 'rbind') %do% {
# reg <- names(interReg_empPval_sort)[i_reg]
# reg_genes_entrez <- gene2tadDT$entrezID[gene2tadDT$region == reg]
# reg_genes_symbol <- unlist(sapply(reg_genes_entrez, function(x) entrezDT$symbol[entrezDT$entrezID == x]))
# reg_genes_symbol_list <- paste0(reg_genes_symbol, collapse = ",")
# data.frame(rank_pval = i_reg,
# TAD = reg,
# meanLogFC = obs_logFC[reg],
# ratioDown = obs_ratioDown[reg],
# emp_pval_comb = emp_pval_combined[reg],
# TAD_genes = reg_genes_symbol_list
# )
#}
#write.table(pvalDT, file = paste0(curr_outFold, "/TAD_ratioDown_logFC_empPvalComb.txt"), col.names =T , row.names = F, quote=F, sep="\t")
#cat(paste0("... written: ", paste0(curr_outFold, "/TAD_ratioDown_logFC_empPvalComb.txt"), "\n"))
txt <- paste0(startTime, "\n", Sys.time(), "\n")
printAndLog(txt, pipLogFile)
cat(paste0("*** DONE: ", script_name, "\n"))
#txt <- paste0("!!! WARNING: USE 10000 PERMUTATIONS DATA !!!\n")
#printAndLog(txt, pipLogFile)
#cat(paste0("*** DONE: ", script_name, "\n"))
| /11sameNbrSameNbrDouble_runEmpPvalCombined.R | no_license | marzuf/TAD_DE_pipeline_v2 | R | false | false | 10,026 | r | #!/usr/bin/Rscript
options(scipen=100)
startTime <- Sys.time()
### !!! UPDATE 24.07.19: STOUFFER ONE-SIDED
################ USE THE FOLLOWING FILES FROM PREVIOUS STEPS
# - script3: all_meanLogFC_TAD.Rdata
# - script8: all_obs_ratioDown.Rdata
# - script9sameNbr: emp_pval_meanLogFC.Rdata
# - script10sameNbr: emp_pval_meanCorr.Rdata
################################################################################
################ OUTPUT
# - emp_pval_combined.Rdata + tables
################################################################################
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1)
settingF <- args[1]
stopifnot(file.exists(settingF))
pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2")
script0_name <- "0_prepGeneData"
script1_name <- "1_runGeneDE"
#script8_name <- "8c10000_runAllDown"
#script9_name <- "910000_runEmpPvalMeanTADLogFC"
script8_name <- "8cOnlyRatioDownFastSave_runAllDown"
script9sameNbr_name <- "9sameNbrDouble_runEmpPvalMeanTADLogFC"
#cat("!!! WARNING: USE 10000 PERMUTATIONS DATA !!!\n")
script10sameNbr_name <- "10sameNbr_runEmpPvalMeanTADCorr"
script_name <- "11sameNbrSameNbrDouble_runEmpPvalCombined"
stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
cat(paste0("> START ", script_name, "\n"))
source("main_settings.R")
source(settingF)
source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE)) # error bar
# create the directories
curr_outFold <- paste0(pipOutFold, "/", script_name)
system(paste0("mkdir -p ", curr_outFold))
pipLogFile <- paste0(pipOutFold, "/", format(Sys.time(), "%Y%d%m%H%M%S"),"_", script_name, "_logFile.txt")
system(paste0("rm -f ", pipLogFile))
twoTailsStouffer <- FALSE
# stouffer(c(emp_pval_intraCorr[x], emp_pval_logFC[x]), two.tails = twoTailsStouffer)))
# ADDED 16.11.2018 to check using other files
txt <- paste0("inputDataType\t=\t", inputDataType, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("gene2tadDT_file\t=\t", gene2tadDT_file, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("TADpos_file\t=\t", TADpos_file, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("settingF\t=\t", settingF, "\n")
printAndLog(txt, pipLogFile)
txt <- paste0("twoTailsStouffer\t=\t", as.character(twoTailsStouffer), "\n")
printAndLog(txt, pipLogFile)
################################****************************************************************************************
####################################################### PREPARE INPUT
################################****************************************************************************************
# load emp. p-val logFC
emp_pval_logFC <- eval(parse(text = load(file.path(pipOutFold, script9sameNbr_name, "emp_pval_meanLogFC.Rdata"))))
# load emp. p-val intraTAD corr
emp_pval_intraCorr <- eval(parse(text = load(file.path(pipOutFold, script10sameNbr_name, "emp_pval_meanCorr.Rdata"))))
intersectRegions <- sort(intersect(names(emp_pval_logFC), names(emp_pval_intraCorr)))
txt <- paste0(toupper(script_name), "> Take regions in common between permutations and observed data \n")
printAndLog(txt, pipLogFile)
### filter TAD only regions
if(useTADonly) {
if( length(grep("_TAD", names(emp_pval_logFC))) > 0 ) {
txt <- paste0(toupper(script_name), "> !!! WARNING: empirical p-val logFC data contain non-TAD regions as well !!!\n")
printAndLog(txt, pipLogFile)
}
if( length(grep("_TAD", names(emp_pval_intraCorr))) > 0 ) {
txt <- paste0(toupper(script_name), "> !!! WARNING: empirical p-val intraCorr data contain non-TAD regions as well !!!\n")
printAndLog(txt, pipLogFile)
}
initLen <- length(intersectRegions)
intersectRegions <- intersectRegions[grep("_TAD", intersectRegions)]
txt <- paste0(toupper(script_name), "> Take only TAD regions: ", length(intersectRegions), "/", initLen, "\n")
printAndLog(txt, pipLogFile)
}
initLen <- length(emp_pval_logFC)
emp_pval_logFC <- emp_pval_logFC[intersectRegions]
txt <- paste0(toupper(script_name), "> ... -> emp. p-val logFC: ", length(emp_pval_logFC), "/", initLen, "\n")
printAndLog(txt, pipLogFile)
initLen <- length(emp_pval_intraCorr)
emp_pval_intraCorr <- emp_pval_intraCorr[intersectRegions]
txt <- paste0(toupper(script_name), "> ... -> emp. p-val intraCorr: ", length(emp_pval_intraCorr), "/", initLen, "\n")
printAndLog(txt, pipLogFile)
stopifnot(!any(is.na(emp_pval_logFC)))
stopifnot(!any(is.na(emp_pval_intraCorr)))
stopifnot(all(names(emp_pval_logFC) == names(emp_pval_intraCorr)))
################################****************************************************************************************
####################################################### CALCULATE EMP. PVAL INTRA-TAD CORR & WRITE OUTPUT
################################****************************************************************************************
# COMBINE EMPIRICAL P-VALUES
emp_pval_combined <- unlist(sapply(seq_along(intersectRegions), function(x)
stouffer(c(emp_pval_intraCorr[x], emp_pval_logFC[x]), two.tails = twoTailsStouffer)))
names(emp_pval_combined) <- intersectRegions
stopifnot(length(emp_pval_combined) == length(intersectRegions))
save(emp_pval_combined, file= paste0(curr_outFold, "/emp_pval_combined.Rdata"))
cat(paste0("... written: ", curr_outFold, "/emp_pval_combined.Rdata", "\n"))
# NOT DOWN IN THIS VERSION !
##***** build and save table
## TAD | meanFC | ratioDown | emp. p-val combined | genes list comma separated
## load emp. p-val logFC
#gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
#gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
#pipeline_geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/pipeline_geneList.Rdata"))))
#DE_table <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_topTable.Rdata"))))
#entrezDT <- read.delim(entrezDT_file, header=T, stringsAsFactors = F)
#entrezDT$entrezID <- as.character(entrezDT$entrezID)
#DE_table <- DE_table[DE_table$genes %in% names(pipeline_geneList),]
#stopifnot(nrow(DE_table) > 0)
#stopifnot(all(DE_table$genes %in% names(pipeline_geneList)))
#DE_entrez <- as.character(unlist(sapply(DE_table$genes, function(x) pipeline_geneList[x])))
#gene2tadDT <- gene2tadDT[gene2tadDT$entrezID %in% DE_entrez,]
#stopifnot(all(DE_entrez %in% entrezDT$entrezID))
#################################****************************************************************************************
######################################################## BUILD TABLES & WRITE OUTPUT
#################################****************************************************************************************
#obs_logFC <- eval(parse(text = load(paste0(pipOutFold, "/", script3_name, "/all_meanLogFC_TAD.Rdata"))))
#obs_ratioDown <- eval(parse(text = load(paste0(pipOutFold, "/", script8_name, "/all_obs_ratioDown.Rdata"))))
#interReg <- sort(Reduce(intersect, list(names(obs_logFC), names(obs_ratioDown), names(emp_pval_combined))))
#txt <- paste0(toupper(script_name), "> Number of TADs in logFC: ", length(obs_logFC), "\n")
#printAndLog(txt, pipLogFile)
#txt <- paste0(toupper(script_name), "> Number of TADs in ratioDown: ", length(obs_ratioDown), "\n")
#printAndLog(txt, pipLogFile)
#txt <- paste0(toupper(script_name), "> Number of TADs in emp. p-val combined: ", length(emp_pval_combined), "\n")
#printAndLog(txt, pipLogFile)
#txt <- paste0(toupper(script_name), "> Number of TADs in the intersect: ", length(interReg), "\n")
#printAndLog(txt, pipLogFile)
#interReg_empPval <- emp_pval_combined[names(emp_pval_combined) %in% interReg]
#interReg_empPval_sort <- sort(interReg_empPval)
#pvalDT <- foreach(i_reg = 1:length(interReg_empPval_sort), .combine = 'rbind') %do% {
# reg <- names(interReg_empPval_sort)[i_reg]
# reg_genes_entrez <- gene2tadDT$entrezID[gene2tadDT$region == reg]
# reg_genes_symbol <- unlist(sapply(reg_genes_entrez, function(x) entrezDT$symbol[entrezDT$entrezID == x]))
# reg_genes_symbol_list <- paste0(reg_genes_symbol, collapse = ",")
# data.frame(rank_pval = i_reg,
# TAD = reg,
# meanLogFC = obs_logFC[reg],
# ratioDown = obs_ratioDown[reg],
# emp_pval_comb = emp_pval_combined[reg],
# TAD_genes = reg_genes_symbol_list
# )
#}
#write.table(pvalDT, file = paste0(curr_outFold, "/TAD_ratioDown_logFC_empPvalComb.txt"), col.names =T , row.names = F, quote=F, sep="\t")
#cat(paste0("... written: ", paste0(curr_outFold, "/TAD_ratioDown_logFC_empPvalComb.txt"), "\n"))
txt <- paste0(startTime, "\n", Sys.time(), "\n")
printAndLog(txt, pipLogFile)
cat(paste0("*** DONE: ", script_name, "\n"))
#txt <- paste0("!!! WARNING: USE 10000 PERMUTATIONS DATA !!!\n")
#printAndLog(txt, pipLogFile)
#cat(paste0("*** DONE: ", script_name, "\n"))
|
context("setup_codingclub_session")
library('mockery')
testthat::test_that("check class and format of the input param", {
expect_error(setup_codingclub_session(session_date = 123456),
"session_date must be a string representing a date (YYYYMMDD)",
fixed = TRUE)
expect_error(setup_codingclub_session(session_date = TRUE),
"session_date must be a string representing a date (YYYYMMDD)",
fixed = TRUE)
expect_error(setup_codingclub_session(session_date = "a string"),
"session_date must be a string representing a date (YYYYMMDD)",
fixed = TRUE)
expect_error(setup_codingclub_session(session_date = "2020-04-02"),
"session_date must be a string representing a date (YYYYMMDD)",
fixed = TRUE)
})
testthat::test_that("Check no download starts if user doesn't want", {
# Take a snapshot of the root directory
snapshot <- fileSnapshot("./", md5sum = TRUE)
# To do this, we mock Sys.Date and readline
# 20200528 is a valid coding club session date
# anything except "Y" or "y" would abort download
scs_20200528_n <- setup_codingclub_session
stub(scs_20200528_n, 'Sys.Date', as.Date('2020-05-28'))
stub(scs_20200528_n, 'readline', "n")
# Check that number of unchanged files/directories is equal to all files/dirs
expect_true(all(changedFiles(snapshot)$changes[,"mtime"] == FALSE))
})
testthat::test_that("Nothing is changed in directory if no session exists", {
scs_20200802_y <- setup_codingclub_session
stub(scs_20200802_y, 'Sys.Date', as.Date('2020-08-02'))
stub(scs_20200802_y, 'readline', "y")
# Take a snapshot of the root directory
snapshot <- fileSnapshot("./", md5sum = TRUE)
# 20200802 is an invalid coding club session date, 2 warnings returned
warnings_scs_20200802_y <- capture_warnings(scs_20200802_y())
expect_true(length(warnings_scs_20200802_y) == 2)
expect_match(warnings_scs_20200802_y,
"No src files found for session 20200802. Is the date correct?",
all = FALSE,
fixed = TRUE
)
expect_match(warnings_scs_20200802_y,
"No data files found for session 20200802. Is the date correct?",
all = FALSE
)
# Check that number of unchanged files/directories is equal to all files/dirs
expect_true(all(changedFiles(snapshot)$changes[,"mtime"] == FALSE))
})
| /tests/testthat/test-setup_codingclub_session.R | permissive | kdmulligan/inborutils | R | false | false | 2,363 | r | context("setup_codingclub_session")
library('mockery')
testthat::test_that("check class and format of the input param", {
expect_error(setup_codingclub_session(session_date = 123456),
"session_date must be a string representing a date (YYYYMMDD)",
fixed = TRUE)
expect_error(setup_codingclub_session(session_date = TRUE),
"session_date must be a string representing a date (YYYYMMDD)",
fixed = TRUE)
expect_error(setup_codingclub_session(session_date = "a string"),
"session_date must be a string representing a date (YYYYMMDD)",
fixed = TRUE)
expect_error(setup_codingclub_session(session_date = "2020-04-02"),
"session_date must be a string representing a date (YYYYMMDD)",
fixed = TRUE)
})
testthat::test_that("Check no download starts if user doesn't want", {
# Take a snapshot of the root directory
snapshot <- fileSnapshot("./", md5sum = TRUE)
# To do this, we mock Sys.Date and readline
# 20200528 is a valid coding club session date
# anything except "Y" or "y" would abort download
scs_20200528_n <- setup_codingclub_session
stub(scs_20200528_n, 'Sys.Date', as.Date('2020-05-28'))
stub(scs_20200528_n, 'readline', "n")
# Check that number of unchanged files/directories is equal to all files/dirs
expect_true(all(changedFiles(snapshot)$changes[,"mtime"] == FALSE))
})
testthat::test_that("Nothing is changed in directory if no session exists", {
scs_20200802_y <- setup_codingclub_session
stub(scs_20200802_y, 'Sys.Date', as.Date('2020-08-02'))
stub(scs_20200802_y, 'readline', "y")
# Take a snapshot of the root directory
snapshot <- fileSnapshot("./", md5sum = TRUE)
# 20200802 is an invalid coding club session date, 2 warnings returned
warnings_scs_20200802_y <- capture_warnings(scs_20200802_y())
expect_true(length(warnings_scs_20200802_y) == 2)
expect_match(warnings_scs_20200802_y,
"No src files found for session 20200802. Is the date correct?",
all = FALSE,
fixed = TRUE
)
expect_match(warnings_scs_20200802_y,
"No data files found for session 20200802. Is the date correct?",
all = FALSE
)
# Check that number of unchanged files/directories is equal to all files/dirs
expect_true(all(changedFiles(snapshot)$changes[,"mtime"] == FALSE))
})
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
as.Date.POSIXlt<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::as.Date.POSIXlt(params)
}
}
| /R/as.Date.POSIXlt.R | no_license | granatb/RapeR | R | false | false | 685 | r |
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
as.Date.POSIXlt<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::as.Date.POSIXlt(params)
}
}
|
\name{CTSgetR}
\alias{CTSgetR}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
R interface for the Chemical Translation System
}
\description{
Tools to translate identifiers between > 200 of the most common biological databases including: Chemical Name, InChIKey, PubChem CID, ChemSpider, BioCyc, ChEBI, CAS, HMDB, KEGG and LipidMAPS.
}
\usage{
CTSgetR(id,from,to,parallel,async,limit.values,server)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{id}{
A vector of metabolite identifiers or names to translate.
}
\item{from}{
Database name describing \code{id}. See \code{\link{CTS.options}}.
}
\item{to}{
Database name to translate \code{id} to. See \code{\link{CTS.options}}.
}
\item{parallel}{
Parse results in parallel (defaults to \code{FALSE}).
}
\item{async}{
Carry out web query asynchronously (defaults to \code{FALSE}).
}
\item{limit.values}{
Return a single value or a comma separated string of multiple values (defaults to \code{FALSE}).
}
\item{server}{
Web address for the Chemical Translation System web services (defaults to http://cts.fiehnlab.ucdavis.edu/service/convert).
}
}
\details{
One or many metabolite identifiers (\code{id}) of a single database type (\code{from}) are translated to a single database identifier (\code{to}). For one to many translations see \code{\link[CTSgetR]{multi.CTSgetR}}.
}
\value{
A 2 column data frame containing from and to translated values. Values not found in the database are returned as "error".
}
\references{
http://cts.fiehnlab.ucdavis.edu/
}
\author{
Dmitry Grapov
}
\note{
http://cts.fiehnlab.ucdavis.edu/
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{multi.CTSgetR}}, \code{\link{CTS.options}}
}
\examples{
#translate between KEGG and PubChem CIDs
id<-c("C15973","C00026","C05381","C15972","C00091","C00042","C05379","C00311","C00036","C00024","C00149","C00417","C00158","C00022","C05125","C16254","C00122","C16255","C00074")
from<-"KEGG"
to<-"PubChem CID"
CTSgetR(id,from,to)
#asynchronous query of CTS, for large queries
#use \code{async.limit} to not overwhelm the server
CTSgetR(id,from,to,async=TRUE,async.limit=100)
#translate from InchiKey to chemical name and allow multiple return values
id <- c("QNAYBMKLOCPYGJ-REOHCLBHSA-N")
from <- "InChIKey"
to <- "Chemical Name"
CTSgetR(id,from,to,limit.values=FALSE)
}
| /man/CTS.getR.Rd | no_license | huipan1973/CTSgetR | R | false | false | 2,402 | rd | \name{CTSgetR}
\alias{CTSgetR}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
R interface for the Chemical Translation System
}
\description{
Tools to translate identifiers between > 200 of the most common biological databases including: Chemical Name, InChIKey, PubChem CID, ChemSpider, BioCyc, ChEBI, CAS, HMDB, KEGG and LipidMAPS.
}
\usage{
CTSgetR(id,from,to,parallel,async,limit.values,server)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{id}{
A vector of metabolite identifiers or names to translate.
}
\item{from}{
Database name describing \code{id}. See \code{\link{CTS.options}}.
}
\item{to}{
Database name to translate \code{id} to. See \code{\link{CTS.options}}.
}
\item{parallel}{
Parse results in parallel (defaults to \code{FALSE}).
}
\item{async}{
Carry out web query asynchronously (defaults to \code{FALSE}).
}
\item{limit.values}{
Return a single value or a comma separated string of multiple values (defaults to \code{FALSE}).
}
\item{server}{
Web address for the Chemical Translation System web services (defaults to http://cts.fiehnlab.ucdavis.edu/service/convert).
}
}
\details{
One or many metabolite identifiers (\code{id}) of a single database type (\code{from}) are translated to a single database identifier (\code{to}). For one to many translations see \code{\link[CTSgetR]{multi.CTSgetR}}.
}
\value{
A 2 column data frame containing from and to translated values. Values not found in the database are returned as "error".
}
\references{
http://cts.fiehnlab.ucdavis.edu/
}
\author{
Dmitry Grapov
}
\note{
http://cts.fiehnlab.ucdavis.edu/
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{multi.CTSgetR}}, \code{\link{CTS.options}}
}
\examples{
#translate between KEGG and PubChem CIDs
id<-c("C15973","C00026","C05381","C15972","C00091","C00042","C05379","C00311","C00036","C00024","C00149","C00417","C00158","C00022","C05125","C16254","C00122","C16255","C00074")
from<-"KEGG"
to<-"PubChem CID"
CTSgetR(id,from,to)
#asynchronous query of CTS, for large queries
#use \code{async.limit} to not overwhelm the server
CTSgetR(id,from,to,async=TRUE,async.limit=100)
#translate from InchiKey to chemical name and allow multiple return values
id <- c("QNAYBMKLOCPYGJ-REOHCLBHSA-N")
from <- "InChIKey"
to <- "Chemical Name"
CTSgetR(id,from,to,limit.values=FALSE)
}
|
#setwd("/Applications/XAMPP/xamppfiles/htdocs/Softwarepraktikum/jquery/upload/30-03-17_04-16-01/input")
args <- commandArgs(TRUE)
N <- args[1]
P <- args[2]
O <- args[3]
setwd(P)
library(Biobase)
library(BiocGenerics)
library(BiocInstaller)
library(cluster)
library(genefilter)
library(affy)
library(affycomp)
library(affydata)
library(affyio)
library(simpleaffy)
library(sm)
library(BH)
library(bioDist)
library(MVA)
library(qcc)
#library(made4)
library(ade4)
library(NMF)
library(hugene20sttranscriptcluster.db)
Data <- ReadAffy()
#MAS
mas5 <- mas5(Data)
mas <- log2(exprs(mas5))
setwd("..")
setwd("output")
png(filename="masheatpearson.png")
aheatmap(cor(mas, method = "pearson")) #mit Legende
title(main="Heatmap Pearson")
dev.off()
| /myr7mas.R | no_license | juliustem/softwarepraktikum | R | false | false | 747 | r | #setwd("/Applications/XAMPP/xamppfiles/htdocs/Softwarepraktikum/jquery/upload/30-03-17_04-16-01/input")
args <- commandArgs(TRUE)
N <- args[1]
P <- args[2]
O <- args[3]
setwd(P)
library(Biobase)
library(BiocGenerics)
library(BiocInstaller)
library(cluster)
library(genefilter)
library(affy)
library(affycomp)
library(affydata)
library(affyio)
library(simpleaffy)
library(sm)
library(BH)
library(bioDist)
library(MVA)
library(qcc)
#library(made4)
library(ade4)
library(NMF)
library(hugene20sttranscriptcluster.db)
Data <- ReadAffy()
#MAS
mas5 <- mas5(Data)
mas <- log2(exprs(mas5))
setwd("..")
setwd("output")
png(filename="masheatpearson.png")
aheatmap(cor(mas, method = "pearson")) #mit Legende
title(main="Heatmap Pearson")
dev.off()
|
# Base data for all data sets --------------------------------------------------
library(S4Vectors)
df_Base <- DataFrame(
BiocVersion = "3.13",
SourceVersion = NA,
Coordinate_1_based = TRUE,
Species = "Homo sapiens",
TaxonomyId = "9606",
SourceVersion = Sys.time(),
Genome = NA,
SourceUrl = "https://dx.doi.org/10.1038/ncomms7342",
DataProvider = NA,
Maintainer = "Leo Lahti <leo.lahti@iki.fi>"
)
df <- rbind(
cbind(df_Base,
DataFrame(Title = "O'Keefe diet swap microbiome counts",
Description = paste0("Count matrix for the O'Keefe diet swap microbiome dataset"),
SourceType = "CSV",
RDataClass = "matrix",
DispatchClass = "Rds",
RDataPath = "microbiomeDataSets/okeefe-ds/counts.rds",
Tags = NA)),
cbind(df_Base,
DataFrame(Title = "O'Keefe diet swap microbiome row data",
Description = paste0("Row data for the O'Keefe diet swap microbiome dataset"),
SourceType = "CSV",
RDataClass = "DFrame",
DispatchClass = "Rds",
RDataPath = "microbiomeDataSets/okeefe-ds/rowdata.rds",
Tags = NA)),
cbind(df_Base,
DataFrame(Title = "O'Keefe diet swap sample data",
Description = paste0("Sample data for the O'Keefe diet swap microbiome dataset"),
SourceType = "CSV",
RDataClass = "DFrame",
DispatchClass = "Rds",
RDataPath = "microbiomeDataSets/okeefe-ds/coldata.rds",
Tags = NA))
)
df$Tags <- paste(df$Tags[!is.na(df$Tags)],"Microbiome",collapse = ":",sep="")
write.csv(df, file = "inst/extdata/metadata-okeefe-ds.csv", row.names = FALSE)
| /inst/scripts/make-okeefe-ds-metadata.R | no_license | microsud/microbiomeDataSets | R | false | false | 1,875 | r |
# Base data for all data sets --------------------------------------------------
library(S4Vectors)
df_Base <- DataFrame(
BiocVersion = "3.13",
SourceVersion = NA,
Coordinate_1_based = TRUE,
Species = "Homo sapiens",
TaxonomyId = "9606",
SourceVersion = Sys.time(),
Genome = NA,
SourceUrl = "https://dx.doi.org/10.1038/ncomms7342",
DataProvider = NA,
Maintainer = "Leo Lahti <leo.lahti@iki.fi>"
)
df <- rbind(
cbind(df_Base,
DataFrame(Title = "O'Keefe diet swap microbiome counts",
Description = paste0("Count matrix for the O'Keefe diet swap microbiome dataset"),
SourceType = "CSV",
RDataClass = "matrix",
DispatchClass = "Rds",
RDataPath = "microbiomeDataSets/okeefe-ds/counts.rds",
Tags = NA)),
cbind(df_Base,
DataFrame(Title = "O'Keefe diet swap microbiome row data",
Description = paste0("Row data for the O'Keefe diet swap microbiome dataset"),
SourceType = "CSV",
RDataClass = "DFrame",
DispatchClass = "Rds",
RDataPath = "microbiomeDataSets/okeefe-ds/rowdata.rds",
Tags = NA)),
cbind(df_Base,
DataFrame(Title = "O'Keefe diet swap sample data",
Description = paste0("Sample data for the O'Keefe diet swap microbiome dataset"),
SourceType = "CSV",
RDataClass = "DFrame",
DispatchClass = "Rds",
RDataPath = "microbiomeDataSets/okeefe-ds/coldata.rds",
Tags = NA))
)
df$Tags <- paste(df$Tags[!is.na(df$Tags)],"Microbiome",collapse = ":",sep="")
write.csv(df, file = "inst/extdata/metadata-okeefe-ds.csv", row.names = FALSE)
|
fz = function(y) {(1/y-1)^2*sin(pi*(1/y-1))*exp(-(1/y-1)/2)*(-1/y^2)}
f = function(x) {2*x^2*sin(pi*x)}
temp1 = vector('numeric', 1000)
temp2 = vector('numeric', 1000)
for (i in 1:1000) {
u = runif(10000)
x = -2*log(u)
y = -2*log(1-u)
temp2[i] = (mean(f(x)) + mean(f(y)))/2
uz = runif(10000)
temp1[i] = mean(fz(uz))
}
var(temp1)
var(temp2)
mean(temp1)
mean(temp2)
| /A1/problem5.R | permissive | h225yang/Computational-Inference | R | false | false | 405 | r |
fz = function(y) {(1/y-1)^2*sin(pi*(1/y-1))*exp(-(1/y-1)/2)*(-1/y^2)}
f = function(x) {2*x^2*sin(pi*x)}
temp1 = vector('numeric', 1000)
temp2 = vector('numeric', 1000)
for (i in 1:1000) {
u = runif(10000)
x = -2*log(u)
y = -2*log(1-u)
temp2[i] = (mean(f(x)) + mean(f(y)))/2
uz = runif(10000)
temp1[i] = mean(fz(uz))
}
var(temp1)
var(temp2)
mean(temp1)
mean(temp2)
|
mytranspose <- function(x) {
if(is.null(x)){
y<-NULL
return(y)
}
z<-as.matrix(x)
if(nrow(z)==0){
y<-0
return(y)
}
y <- matrix(1, nrow=ncol(z), ncol = nrow(z))
for(i in 1:nrow(z)) {
for(j in 1:ncol(z)) {
y[j,i] <- z[i,j]
}
}
return(y)
} | /mytranspose.R | no_license | Emotee/transpose | R | false | false | 305 | r | mytranspose <- function(x) {
if(is.null(x)){
y<-NULL
return(y)
}
z<-as.matrix(x)
if(nrow(z)==0){
y<-0
return(y)
}
y <- matrix(1, nrow=ncol(z), ncol = nrow(z))
for(i in 1:nrow(z)) {
for(j in 1:ncol(z)) {
y[j,i] <- z[i,j]
}
}
return(y)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme.R
\name{scale_fill_fsu}
\alias{scale_fill_fsu}
\title{Function to provide color blind-friendly fills}
\usage{
scale_fill_fsu(..., palette = "fsu_dark")
}
\arguments{
\item{palette}{Name of color palette to use (currently, either "cb_grey" or "cb_black")}
}
\description{
Function to provide color blind-friendly fills
}
| /man/scale_fill_fsu.Rd | no_license | ameliabedelia/amelia | R | false | true | 404 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme.R
\name{scale_fill_fsu}
\alias{scale_fill_fsu}
\title{Function to provide color blind-friendly fills}
\usage{
scale_fill_fsu(..., palette = "fsu_dark")
}
\arguments{
\item{palette}{Name of color palette to use (currently, either "cb_grey" or "cb_black")}
}
\description{
Function to provide color blind-friendly fills
}
|
# TODO
# * Get pM and pF for mup_rbya - ideally this should be returned by muppet in
# resultsbyyearandage
# * mup_rbx - RETRO
# * Question what to do about ssbW vs sW, the latter is what sam returns
# for SSB weights.
# check if one can not do something like this, or better still include
# this type of thing in a xxxx.sh file within the package
# system("nohup ./muppet -nox -ind icecod.dat.opt -mcmc 500000 -mcscale -mcsave 1000 &")
# #' @title muppet_rbya
# #'
# #' @description reads muppet year-and-age input and output
# #'
# #' @param path A directory path
# #' @param scale A scaler (default 1)
# #' @param fleets Name of fleets, e.g. if only fall survey tuning, use fleets = "2".
# If missing (default) then use sequential numbers.
# #' @param assyear Assessment year, if missing (default) will use the year after the last
# #' catch at age input
# #' @param run Name of the run, if missing (default) will use the directory name
# #' @param wide A boolean indicating if returned table wide (default TRUE). If FALSE variable are
# #' return within column 'var' and values in column 'val' (not active)
# #'
# #' @return A tibble
mup_rbya <- function(path, scale = 1, fleets, assyear, run, wide = TRUE) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(!file.exists(file.path(path, "resultsbyyearandage"))) {
stop(paste0("File: '", file.path(path, "resultsbyyearandage"), "' does not exist"))
}
if(missing(run)) run <- basename(path)
# rbya ---
rbya <-
readr::read_tsv(file.path(path, "resultsbyyearandage"),
na = c("-1", "0"),
show_col_types = FALSE) %>%
dplyr::rename(oC = ObsCno,
pC = CalcCno,
rC = CatchDiff)
# check length of fleets vs nfleets
if(!missing(fleets)) {
nfleets <- (ncol(rbya) - 13) / 3
if(nfleets != length(fleets)) {
stop(paste0("Named fleets (", paste(fleets, collapse = " ") , ") not the same as number of fleets (", nfleets, ")" ))
}
}
if(missing(fleets)) {
nfleets <- (ncol(rbya) - 13) / 3
fleets <- as.character(1:nfleets)
}
txty <- paste(c("pU","oU", "rU"),c(matrix(fleets,3,length(fleets),byrow=T)),sep="")
names(rbya)[14:ncol(rbya)] <- txty
if(missing(assyear)) {
assyear <- rbya %>% dplyr::filter(!is.na(oC)) %>% dplyr::pull(year) %>% max()
assyear <- assyear + 1
}
#if (ncol(rbya) != 19) {
# rbya$pU2 <- NA
# rbya$oU2 <- NA
# rbya$rU2 <- NA
#}
rbya <-
rbya %>%
dplyr::select(year, age,
n = N,
f = F,
oC,
pC,
rC,
cW = CatchWeights,
ssbW = SSBWeights,
sW = StockWeights,
mat = StockMaturity,
m = M,
z = Z,
dplyr::everything()
) %>%
dplyr::mutate(oC = oC / scale,
pC = pC / scale,
cW = cW / scale,
sW = sW / scale,
ssbW = ssbW / scale,
n = n / scale,
run = run,
model = "mup",
assyear = assyear,
yc = year - age)
# if(!wide) {
# rbya <-
# rbya %>%
# dplyr::select(-c(pC, rC, oU1, oU2, pU1, pU2, rU1, rU2)) %>%
# tidyr::gather(var, val, -c(year, age, run, model, assyear, yc)) %>%
# dplyr::select(year, age, var, val, dplyr::everything())
# }
return(rbya)
}
mup_opr <- function(path, scale = 1, fleets, assyear, run, log = TRUE) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
d <-
mup_rbya(path, scale = scale, fleets = fleets, assyear = assyear, run = run) %>%
dplyr::rename(.run = run)
lh <- function(x, var, what) {
x %>%
dplyr::select(year, age, dplyr::starts_with(c(what)), .run, assyear) %>%
tidyr::gather(fleet, {{ var }}, -c(year, age, .run, assyear)) %>%
dplyr::mutate(fleet = stringr::str_sub(fleet, 2),
fleet = ifelse(fleet == "C", "catch", fleet))
}
d <-
dplyr::full_join(d %>% lh(o, "o"),
d %>% lh(p, "p"),
by = c("year", "age", ".run", "assyear", "fleet")) %>%
dplyr::full_join(d %>% lh(r, "r"),
by = c("year", "age", ".run", "assyear", "fleet"))
d2 <-
mup_rby(path, scale = scale, fleets = fleets, assyear = assyear, run = run) %>%
dplyr::rename(.run = run)
lh2 <- function(x, var, what) {
x %>%
dplyr::select(year, dplyr::starts_with(c(what)), .run, assyear) %>%
tidyr::gather(fleet, {{ var }}, -c(year, .run, assyear)) %>%
dplyr::mutate(fleet = stringr::str_sub(fleet, 2),
fleet = ifelse(fleet == "Y", "catch", fleet))
}
d2 <-
dplyr::full_join(d2 %>% lh2(o, "o"),
d2 %>% lh2(p, "p"),
by = c("year", ".run", "assyear", "fleet")) %>%
dplyr::mutate(r = log(o/p))
d <- dplyr::bind_rows(d, d2)
if(log) {
d <-
d %>%
dplyr::mutate(o = log(o),
p = log(p))
}
d <- d %>% dplyr::rename(run = .run)
return(d)
}
mup_rby <- function(path, scale = 1, fleets, assyear, run) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(!file.exists(file.path(path, "resultsbyyear"))) {
stop(paste0("File: '", file.path(path, "resultsbyyear"), "' does not exist"))
}
if(missing(run)) run <- basename(path)
rby <- readr::read_tsv(file.path(path, "resultsbyyear"),
na = c("-1", "0"),
show_col_types = FALSE) %>%
janitor::remove_empty(which = "cols")
# check length of fleets vs nfleets
if(!missing(fleets)) {
nfleets <- (ncol(rby) - 14) / 2
if(nfleets != length(fleets)) {
stop(paste0("Named fleets (", paste(fleets, collapse = " ") , ") not the same as number of fleets (", nfleets, ")" ))
}
}
if(missing(fleets)) {
nfleets <- (ncol(rby) - 14) / 2
fleets <- as.character(1:nfleets)
}
txty <- paste(c("pU","oU"),c(matrix(fleets,2,length(fleets),byrow=T)),sep="")
names(rby)[15:ncol(rby)] <- txty
rby <-
rby %>%
dplyr::rename(r = Recruitment,
bio = RefBio2,
ssb = Spawningstock,
fbar = RefF,
pY = CalcCatchIn1000tons,
oY = CatchIn1000tons,
#oU1 = ObsSurveyBiomass1,
#pU1 = CalcSurveyBiomass1,
#oU2 = ObsSurveyBiomass2,
#pU2 = CalcSurveyBiomass2,
bio1 = RefBio1,
bio2 = CbioR,
eggp = Eggproduction) %>%
dplyr::mutate(y = ifelse(is.na(oY), pY, oY),
hr = y/bio,
hr2 = (1/3 * y + 3/4 * dplyr::lead(y)) / bio,
r = r / scale) %>%
dplyr::select(year:fbar, hr, pY, oY, dplyr::everything()) %>%
dplyr::select(-y)
if(missing(assyear)) {
assyear <- rby %>% dplyr::filter(!is.na(oY)) %>% dplyr::pull(year) %>% max()
assyear <- assyear + 1
}
rby <-
rby %>%
dplyr::mutate(run = run,
model = "mup",
assyear = assyear)
return(rby)
}
# note: assyear not extractable by default
mup_rba <- function(path, fleets, run) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(!file.exists(file.path(path, "resultsbyage"))) {
stop(paste0("File: '", file.path(path, "resultsbyage"), "' does not exist"))
}
rba <-
readr::read_tsv(file.path(path, "resultsbyage"),
na = c("-1", "0"),
show_col_types = FALSE)
# check length of fleets vs nfleets
if(!missing(fleets)) {
nfleets <- (ncol(rba) - 4) / 3
if(nfleets != length(fleets)) {
stop(paste0("Named fleets (", paste(fleets, collapse = " ") , ") not the same as number of fleets (", nfleets, ")" ))
}
}
if(missing(fleets)) {
nfleets <- (ncol(rba) - 4) / 3
fleets <- as.character(1:nfleets)
}
txty <- paste(c("sigmaU", "qU", "pU"),c(matrix(fleets,3, length(fleets),byrow=T)),sep="")
names(rba)[5:ncol(rba)] <- txty
if(missing(run)) run <- basename(path)
rba <-
rba %>%
dplyr::mutate(run = run,
model = "mup")
return(rba)
}
mup_std <- function(path) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(file.exists(paste0(path, "/muppet.par"))) {
d <-
utils::read.table(paste0(path, "/muppet.std"), header = TRUE) %>%
tibble::as_tibble()
return(d)
}
# NOTE: function returns NULL if muppet.par does not exist
}
mup_par <- function(path, run) {
if(file.exists(paste0(path, "/muppet.par"))) {
fil <- paste0(path, "/muppet.par")
} else {
}
x <- readr::read_lines(fil)
head <- x[1]
head <- stringr::str_split(head, " ")[[1]]
head <- head[c(6, 11, 17)] %>% stringr::str_trim() %>% as.numeric()
names(head) <- c("npar", "objective", "max_gradient")
idx <- grep("#", x)
N <- length(idx)
res <- list()
for(i in 1:N) {
if(i == 1) {
res[[i]] <- head
} else {
if(i < N) {
i1 <- idx[i] + 1
i2 <- idx[i + 1] - 1
res[[i]] <-
x[i1:i2] %>%
stringr::str_trim() %>%
stringr::str_split(" ", simplify = TRUE)
nr <- nrow(res[[i]])
if(nr > 1) {
res[[i]] <-
res[[i]] %>%
as.numeric() %>%
matrix(nrow = nr)
} else {
res[[i]] <-
res[[i]] %>%
as.numeric()
}
} else {
res[[i]] <- x[N] %>% as.numeric()
}
}
}
names(res) <-
c("obj", x[idx[-1]]) %>%
stringr::str_replace_all("#", "") %>%
stringr::str_replace(":", "") %>%
stringr::str_replace("\\[", "") %>%
stringr::str_replace("\\]", "") %>%
stringr::str_trim()
return(res)
}
#' Reads ADMB hst files
#'
#' Reads output of ADMB MCMC report containing estimated distributions. The .hst
#' report contains information about the MCMC analysis: the sample sizes
#' (specied with the -mcmc command-line option), the step size scaling
#' factor, the step sizes, and information about the posterior probability
#' distribution (e.g., the mean, standard deviation, and lower and upper
#' bounds). For each simulated parameter, a range of values (with step sizes
#' reported in the "step sizes" section of the .hst file) and their simulated
#' posterior probabilities is reported. Plotting the first column
#' (parameter values) on the x-axis and the second column (simulated
#' probabilities) on the y-axis can be a convenient way to make a visualization
#' of the posterior probability distribution.
#'
#' @param path Name of the \emph{directory} that contains the result.
#' @param txt The parameters to extract
#' @param startyear Assessment start year
#' @param names A character vector of length two replacing the default names
#' \emph{c('value','prop')}
#' @param negative.allowed Flag, default is FALSE
#'
#' @return A list, each component being a dataframe with two columns. The
#' default names are \emph{c('value','prop')} which can be replace by specifying
#' \emph{names}.
mup_hst <- function (path, txt, startyear = 1955, names, negative.allowed = FALSE) {
file <- paste(path, "muppet.hst", sep = "/")
tmpskra <- tempfile("bayes")
on.exit(unlink(tmpskra))
tmp <- scan(file, what = character(), sep = "\n", quiet = TRUE)
tmp1 <- matrix(tmp, length(tmp), 1)
utils::write.table(tmp1, file = tmpskra, sep = "", col.names = F,
row.names = F, quote = F)
i <- grep(txt, tmp)
j <- grep("#", tmp)
j <- j[j > i[length(i)]]
if (length(j) > 0) {
j <- j[1] - 1
} else {
j <- length(tmp)
}
i1 <- i[1:(length(i))] + 1
i2 <- c(i[2:length(i)] - 1, j)
if (length(i) == 1) i2 <- j
Result <- list()
for (i in 1:length(i1)) {
#print(i)
x <- getlineswin(tmpskra, i1[i], i2[i])
names(x) <- c('value','prop')
if (!negative.allowed) x <- x[x$value >= 0, ]
Result[[i]] <- getlineswin(tmpskra, i1[i], i2[i])
names(Result[[i]]) <- c('value','prob')
#Result[[i]] <- calcprofile(Result[[i]], negative.allowed = negative.allowed)
}
if (!missing(startyear)) {
names(Result) <- paste((startyear:(startyear + length(Result) - 1)),
sep = "")
#attributes(Result)$years <- startyear:(startyear + length(Result) - 1)
}
# if (!missing(names)) names(Result) <- names
# if (length(Result) == 1) Result <- Result[[1]]
Result <-
dplyr::bind_rows(Result, .id = "year") %>%
tibble::as_tibble() %>%
dplyr::mutate(year = as.integer(year))
return(Result)
}
getlineswin <- function (file, line1, line2) {
tmpskra <- tempfile("bayes")
on.exit(unlink(tmpskra))
#if (missing(nlines)) nlines <- length(count.fields(file, sep = "\n"))
x <- scan(file, sep = "\t", what = character(), quiet = TRUE)
x <- matrix(x, length(x), 1)
x <- x[line1:line2, ]
utils::write.table(x, file = tmpskra, sep = "\n", col.names = F,
row.names = F, quote = F)
return(utils::read.table(tmpskra))
}
#' @title read muppet output
#'
#' @description reads muppet results
#'
#' @param path A directory path
#' @param scale A scaler (default 1)
#' @param fleets xxx
#' @param assyear Assessment year, if missing (default) will use the year after the last
#' catch at age input
#' @param run Name of the run, if missing (default) will use the directory name
#' @param wide A boolean indicating if returned table wide (default TRUE). If FALSE variable are
#' return within column 'var' and values in column 'val'.
#'
#' @return A tibble
#'
#' @export
#'
mup_rbx <- function(path, scale = 1, fleets, assyear, run, wide = TRUE) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(missing(run)) run <- basename(path)
if(missing(scale)) scale <- 1
list(rby = mup_rby(path, scale, fleets = fleets, assyear = assyear, run = run),
rbya = mup_rbya(path, scale, fleets = fleets, assyear = assyear, run, wide),
rba = mup_rba(path, fleets = fleets, run = run),
opr = mup_opr(path, scale, assyear = assyear, run = run),
std = mup_std(path),
par = mup_par(path))
}
# HOSKI ------------------------------------------------------------------------
# Hér sérðu dæmi um nýja read_separ (read_separ1) í bili
# notkun á því og útkomu.
# res <- read_separ1(".",".",fleets=c("3","1","2","4"),assYear=year+1)
#
# Floti 1 er alltaf mars survey , 2 er haustrall 3 er marssurvey 1-2 og 4 haustrall12 það er út af þessu logq1 dæmi
#
# Við höldum alltaf þessum nöfnum þ.a í haustrallkeyrslu er bara oU2 og pU2 ekki oU1 og pU1.
#
# Ef vilja hafa þetta öðruvísi er defaultið að númera flotanna í vaxandi röð.
# read_separ1 <- function (path, run, rName = NA, mName = NA, calcSurBio = F,
# ggFactor = T, Scale = 1000, assYear = NA, retroY = NA,fleets)
# {
# if(file.exists("muppet.par")){ # get AIC could look for as.numeric=T
# dat <- scan("muppet.par",what=character(),sep=" ",quiet=T)[1:12]
# aicinfo <- as.numeric(c(dat[6],dat[11]))
# names(aicinfo) <- c("npar","objective")
#
# dat <- scan("muppet.par",what=character(),sep="\n",quiet=T)
# i <- grep("# surveybiopow",dat)
# surveybiopow <- as.numeric(dat[i+1])
# }
#
#
#
# if(missing(fleets)){ # to have some default
# # Test number of columns
# if (is.na(retroY))
# rby <- read.table(paste(path, run, "resultsbyyear", sep = "/"),
# header = T, na.strings = c("-1", "0"))
# if (!is.na(retroY))
# rby <- read.table(paste(paste(path, run, "resultsbyyear",
# sep = "/"), retroY, sep = ""),
# header = T, na.strings = c("-1", "0"))
# nfleets <- (ncol(rby)-14)/2
# fleets <- as.character(1:nfleets)
# }
# txty <- paste(c("oU","pU"),c(matrix(fleets,2,length(fleets),byrow=T)),sep="")
# cnRby <- c("year", "r", "n3", "n6", "bioF", "bio", "bio1",
# "ssb", "ssb2", "fbar", "hr", "oY", "pY",txty,"run", "model")
# txtya <- paste(c("oU","pU","rU"),c(matrix(fleets,3,length(fleets),byrow=T)),sep="")
# cnRbya <- c("year", "age", "oC", "cW", "sW", "ssbW", "mat",
# "n", "z", "f", "m", "pC", "rC",txtya)
# txta<- paste(c("cvU","qU","pU"),c(matrix(fleets,3,length(fleets),byrow=T)),sep="")
#
# cnRba <- c("age", "sel", "pSel", "sigma", txta, "run", "model")
# if (is.na(retroY))
# rby <- read.table(paste(path, run, "resultsbyyear", sep = "/"),
# header = T, na.strings = c("-1", "0"))
# if (!is.na(retroY)) {
# rby <- read.table(paste(paste(path, run, "resultsbyyear",
# sep = "/"), retroY, sep = ""), header = T, na.strings = c("-1",
# "0"))
# }
# n <- nrow(rby)
# if (ncol(rby) != 18) {
# rby$pU2 <- rep(NA, n)
# rby$oU2 <- rep(NA, n)
# }
# names(rby) <- c("year", "fbar", "pY", "oY", "ssb", "ssb2",
# "bioF", "bio1", "bio", "preR", "r", "n1", "n3", "n6",
# txty)
# if (ggFactor)
# rby$r <- rby$r * exp(-0.4)
# rby$hr <- ifelse(!is.na(rby$oY), rby$oY, rby$pY)/rby$bio
# rby$run <- rName
# rby$model <- mName
# rby <- rby[, cnRby]
# rby$r <- rby$r/Scale
# rby$n3 <- rby$n3/Scale
# rby$n6 <- rby$n6/Scale
# if (is.na(retroY))
# rbya <- read.table(paste(path, run, "resultsbyyearandage",
# sep = "/"), header = T, na.strings = c("-1", "0"))
# if (!is.na(retroY)) {
# rbya <- read.table(paste(paste(path, run, "resultsbyyearandage",
# sep = "/"), retroY, sep = ""), header = T, na.strings = c("-1",
# "0"))
# }
# n <- nrow(rby)
# names(rbya) <- c("year", "age", "n", "z", "sW", "m", "f",
# "pC", "cW", "ssbW", "mat", "oC", "rC",txtya)
# if (ggFactor)
# rbya$n <- ifelse(rbya$age %in% 1, rbya$n * exp(-0.4),
# rbya$n)
# if (ggFactor)
# rbya$n <- ifelse(rbya$age %in% 2, rbya$n * exp(-0.2),
# rbya$n)
# rbya <- rbya[, cnRbya]
# rbya$run <- rName
# rbya$model <- mName
# rbya$oC <- rbya$oC/Scale
# rbya$cW <- rbya$cW/Scale
# rbya$sW <- rbya$sW/Scale
# rbya$n <- rbya$n/Scale
# rbya$pC <- rbya$pC/Scale
# if (is.na(retroY))
# rba <- read.table(paste(path, run, "resultsbyage", sep = "/"),
# header = T, na.strings = c("-1", "0"))
# if (!is.na(retroY)) {
# rba <- read.table(paste(paste(path, run, "resultsbyage",
# sep = "/"), retroY, sep = ""), header = T, na.strings = c("-1",
# "0"))
# }
# n <- nrow(rba)
# names(rba) <- c("age", "sel", "pSel", "sigma", txta)
# rba$run <- rName
# rba$model <- mName
# rba <- rba[, cnRba]
# if (!is.na(retroY)) {
# print(retroY)
# rby$assYear <- as.numeric(retroY) + 1
# rbya$assYear <- as.numeric(retroY) + 1
# rba$assYear <- as.numeric(retroY) + 1
# }
# else {
# rby$assYear <- assYear
# rbya$assYear <- assYear
# rba$assYear <- assYear
# }
# if(exists("surveybiopow")){
# names(surveybiopow) <- fleets
# return(list(rby = rby, rbya = rbya, rba = rba,aicinfo=aicinfo,surveybiopow=surveybiopow))
# }
# else
# return(list(rby = rby, rbya = rbya, rba = rba)) # no muppet.par
# }
#
# # retro example ----------------------------------------------------------------
# source("readSepar.r")
# library(stringr)
# library(tidyverse)
#
# Changepinfile <- function(file="muppet.par",txt = c("# lnRecr:","# lnEffort:"),outputfile="muppet.pin") {
# dat <- scan(file,what=character(),sep="\n",quiet=TRUE)
# for(k in 1:length(txt)){
# j <- grep(txt[k],dat)
# if(length(j) > 0) {
# k1 <- unlist(str_locate_all(dat[j+1]," "))
# dat[j+1] <- substring(dat[j+1],1,k1[length(k1)]-1)
# }
# }
# write.table(dat,file=outputfile,row.names=FALSE,col.names=FALSE,sep="\n",quote=F)
# }
#
# Replace <- function(txt,parameter,pattern){
# if(!missing(parameter)){
# i <- grep(pattern,txt)
# if(!any(i)){
# print(paste(" ",pattern," ","does not exist"))
# break()
# }
# txt[i] <- paste(as.character(parameter),"\t",pattern)
# }
# return(txt)
# }
# rby <- rbya <- rba <- aicinfo <- surveybiopow <- list()
# PIN <- TRUE
#
# inputfile <- "icecod.dat.opt.final"
# for(year in c(2019:2001)){
# print(year)
# assyear <- year+1
# txt <- readLines(inputfile)
# txt <- Replace(txt,year,'# Last opt year')
# txt <- Replace(txt,min(c(year+2,2019)),'# Last data year')
#
# if(PIN && (year != 2019))Changepinfile("muppet.par",txt = c("# lnRecr:","# lnEffort:"),outputfile="muppet.pin")
# write.table(txt,file="icecod.dat.opt",sep="\n",row.names=F,quote=F,col.names=F)
# system("muppet -nox -ind icecod.dat.opt > /dev/null")
# res <- read_separ1(".",".",fleets=c("3","1","2","4"),assYear=year+1)
# rby[[as.character(assyear)]] <- res$rby
# rbya[[as.character(assyear)]] <- res$rbya
# rba[[as.character(assyear)]] <- res$rba
# aicinfo[[as.character(assyear)]] <- res$aicinfo
# surveybiopow[[as.character(assyear)]] <- res$surveybiopow
#
#
# # Those mv are really not needed but we do at least to remove the files.
# system(paste("mv resultsbyyear tmpresults/resultsbyyear",year,sep=""))
# system(paste("mv resultsbyyearandage tmpresults/resultsbyyearandage",year,sep=""))
# system(paste("mv resultsbyage tmpresults/resultsbyage",year,sep=""))
# }
#
# rby <- bind_rows(rby)
# rbya <- bind_rows(rbya)
# rba <- bind_rows(rba)
# aicinfo <- bind_rows(aicinfo)
# aicinfo$assYear <- unique(rby$assYear)
# surveybiopow <- bind_rows(surveybiopow)
# surveybiopow$assYear <- unique(rby$assYear)
#
# save(list=c("rby","rbya","rba","aicinfo","surveybiopow"),file="retro.rdata")
#
| /R/muppet.R | no_license | einarhjorleifsson/fishvice | R | false | false | 22,398 | r | # TODO
# * Get pM and pF for mup_rbya - ideally this should be returned by muppet in
# resultsbyyearandage
# * mup_rbx - RETRO
# * Question what to do about ssbW vs sW, the latter is what sam returns
# for SSB weights.
# check if one can not do something like this, or better still include
# this type of thing in a xxxx.sh file within the package
# system("nohup ./muppet -nox -ind icecod.dat.opt -mcmc 500000 -mcscale -mcsave 1000 &")
# #' @title muppet_rbya
# #'
# #' @description reads muppet year-and-age input and output
# #'
# #' @param path A directory path
# #' @param scale A scaler (default 1)
# #' @param fleets Name of fleets, e.g. if only fall survey tuning, use fleets = "2".
# If missing (default) then use sequential numbers.
# #' @param assyear Assessment year, if missing (default) will use the year after the last
# #' catch at age input
# #' @param run Name of the run, if missing (default) will use the directory name
# #' @param wide A boolean indicating if returned table wide (default TRUE). If FALSE variable are
# #' return within column 'var' and values in column 'val' (not active)
# #'
# #' @return A tibble
mup_rbya <- function(path, scale = 1, fleets, assyear, run, wide = TRUE) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(!file.exists(file.path(path, "resultsbyyearandage"))) {
stop(paste0("File: '", file.path(path, "resultsbyyearandage"), "' does not exist"))
}
if(missing(run)) run <- basename(path)
# rbya ---
rbya <-
readr::read_tsv(file.path(path, "resultsbyyearandage"),
na = c("-1", "0"),
show_col_types = FALSE) %>%
dplyr::rename(oC = ObsCno,
pC = CalcCno,
rC = CatchDiff)
# check length of fleets vs nfleets
if(!missing(fleets)) {
nfleets <- (ncol(rbya) - 13) / 3
if(nfleets != length(fleets)) {
stop(paste0("Named fleets (", paste(fleets, collapse = " ") , ") not the same as number of fleets (", nfleets, ")" ))
}
}
if(missing(fleets)) {
nfleets <- (ncol(rbya) - 13) / 3
fleets <- as.character(1:nfleets)
}
txty <- paste(c("pU","oU", "rU"),c(matrix(fleets,3,length(fleets),byrow=T)),sep="")
names(rbya)[14:ncol(rbya)] <- txty
if(missing(assyear)) {
assyear <- rbya %>% dplyr::filter(!is.na(oC)) %>% dplyr::pull(year) %>% max()
assyear <- assyear + 1
}
#if (ncol(rbya) != 19) {
# rbya$pU2 <- NA
# rbya$oU2 <- NA
# rbya$rU2 <- NA
#}
rbya <-
rbya %>%
dplyr::select(year, age,
n = N,
f = F,
oC,
pC,
rC,
cW = CatchWeights,
ssbW = SSBWeights,
sW = StockWeights,
mat = StockMaturity,
m = M,
z = Z,
dplyr::everything()
) %>%
dplyr::mutate(oC = oC / scale,
pC = pC / scale,
cW = cW / scale,
sW = sW / scale,
ssbW = ssbW / scale,
n = n / scale,
run = run,
model = "mup",
assyear = assyear,
yc = year - age)
# if(!wide) {
# rbya <-
# rbya %>%
# dplyr::select(-c(pC, rC, oU1, oU2, pU1, pU2, rU1, rU2)) %>%
# tidyr::gather(var, val, -c(year, age, run, model, assyear, yc)) %>%
# dplyr::select(year, age, var, val, dplyr::everything())
# }
return(rbya)
}
mup_opr <- function(path, scale = 1, fleets, assyear, run, log = TRUE) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
d <-
mup_rbya(path, scale = scale, fleets = fleets, assyear = assyear, run = run) %>%
dplyr::rename(.run = run)
lh <- function(x, var, what) {
x %>%
dplyr::select(year, age, dplyr::starts_with(c(what)), .run, assyear) %>%
tidyr::gather(fleet, {{ var }}, -c(year, age, .run, assyear)) %>%
dplyr::mutate(fleet = stringr::str_sub(fleet, 2),
fleet = ifelse(fleet == "C", "catch", fleet))
}
d <-
dplyr::full_join(d %>% lh(o, "o"),
d %>% lh(p, "p"),
by = c("year", "age", ".run", "assyear", "fleet")) %>%
dplyr::full_join(d %>% lh(r, "r"),
by = c("year", "age", ".run", "assyear", "fleet"))
d2 <-
mup_rby(path, scale = scale, fleets = fleets, assyear = assyear, run = run) %>%
dplyr::rename(.run = run)
lh2 <- function(x, var, what) {
x %>%
dplyr::select(year, dplyr::starts_with(c(what)), .run, assyear) %>%
tidyr::gather(fleet, {{ var }}, -c(year, .run, assyear)) %>%
dplyr::mutate(fleet = stringr::str_sub(fleet, 2),
fleet = ifelse(fleet == "Y", "catch", fleet))
}
d2 <-
dplyr::full_join(d2 %>% lh2(o, "o"),
d2 %>% lh2(p, "p"),
by = c("year", ".run", "assyear", "fleet")) %>%
dplyr::mutate(r = log(o/p))
d <- dplyr::bind_rows(d, d2)
if(log) {
d <-
d %>%
dplyr::mutate(o = log(o),
p = log(p))
}
d <- d %>% dplyr::rename(run = .run)
return(d)
}
mup_rby <- function(path, scale = 1, fleets, assyear, run) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(!file.exists(file.path(path, "resultsbyyear"))) {
stop(paste0("File: '", file.path(path, "resultsbyyear"), "' does not exist"))
}
if(missing(run)) run <- basename(path)
rby <- readr::read_tsv(file.path(path, "resultsbyyear"),
na = c("-1", "0"),
show_col_types = FALSE) %>%
janitor::remove_empty(which = "cols")
# check length of fleets vs nfleets
if(!missing(fleets)) {
nfleets <- (ncol(rby) - 14) / 2
if(nfleets != length(fleets)) {
stop(paste0("Named fleets (", paste(fleets, collapse = " ") , ") not the same as number of fleets (", nfleets, ")" ))
}
}
if(missing(fleets)) {
nfleets <- (ncol(rby) - 14) / 2
fleets <- as.character(1:nfleets)
}
txty <- paste(c("pU","oU"),c(matrix(fleets,2,length(fleets),byrow=T)),sep="")
names(rby)[15:ncol(rby)] <- txty
rby <-
rby %>%
dplyr::rename(r = Recruitment,
bio = RefBio2,
ssb = Spawningstock,
fbar = RefF,
pY = CalcCatchIn1000tons,
oY = CatchIn1000tons,
#oU1 = ObsSurveyBiomass1,
#pU1 = CalcSurveyBiomass1,
#oU2 = ObsSurveyBiomass2,
#pU2 = CalcSurveyBiomass2,
bio1 = RefBio1,
bio2 = CbioR,
eggp = Eggproduction) %>%
dplyr::mutate(y = ifelse(is.na(oY), pY, oY),
hr = y/bio,
hr2 = (1/3 * y + 3/4 * dplyr::lead(y)) / bio,
r = r / scale) %>%
dplyr::select(year:fbar, hr, pY, oY, dplyr::everything()) %>%
dplyr::select(-y)
if(missing(assyear)) {
assyear <- rby %>% dplyr::filter(!is.na(oY)) %>% dplyr::pull(year) %>% max()
assyear <- assyear + 1
}
rby <-
rby %>%
dplyr::mutate(run = run,
model = "mup",
assyear = assyear)
return(rby)
}
# note: assyear not extractable by default
mup_rba <- function(path, fleets, run) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(!file.exists(file.path(path, "resultsbyage"))) {
stop(paste0("File: '", file.path(path, "resultsbyage"), "' does not exist"))
}
rba <-
readr::read_tsv(file.path(path, "resultsbyage"),
na = c("-1", "0"),
show_col_types = FALSE)
# check length of fleets vs nfleets
if(!missing(fleets)) {
nfleets <- (ncol(rba) - 4) / 3
if(nfleets != length(fleets)) {
stop(paste0("Named fleets (", paste(fleets, collapse = " ") , ") not the same as number of fleets (", nfleets, ")" ))
}
}
if(missing(fleets)) {
nfleets <- (ncol(rba) - 4) / 3
fleets <- as.character(1:nfleets)
}
txty <- paste(c("sigmaU", "qU", "pU"),c(matrix(fleets,3, length(fleets),byrow=T)),sep="")
names(rba)[5:ncol(rba)] <- txty
if(missing(run)) run <- basename(path)
rba <-
rba %>%
dplyr::mutate(run = run,
model = "mup")
return(rba)
}
mup_std <- function(path) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(file.exists(paste0(path, "/muppet.par"))) {
d <-
utils::read.table(paste0(path, "/muppet.std"), header = TRUE) %>%
tibble::as_tibble()
return(d)
}
# NOTE: function returns NULL if muppet.par does not exist
}
mup_par <- function(path, run) {
if(file.exists(paste0(path, "/muppet.par"))) {
fil <- paste0(path, "/muppet.par")
} else {
}
x <- readr::read_lines(fil)
head <- x[1]
head <- stringr::str_split(head, " ")[[1]]
head <- head[c(6, 11, 17)] %>% stringr::str_trim() %>% as.numeric()
names(head) <- c("npar", "objective", "max_gradient")
idx <- grep("#", x)
N <- length(idx)
res <- list()
for(i in 1:N) {
if(i == 1) {
res[[i]] <- head
} else {
if(i < N) {
i1 <- idx[i] + 1
i2 <- idx[i + 1] - 1
res[[i]] <-
x[i1:i2] %>%
stringr::str_trim() %>%
stringr::str_split(" ", simplify = TRUE)
nr <- nrow(res[[i]])
if(nr > 1) {
res[[i]] <-
res[[i]] %>%
as.numeric() %>%
matrix(nrow = nr)
} else {
res[[i]] <-
res[[i]] %>%
as.numeric()
}
} else {
res[[i]] <- x[N] %>% as.numeric()
}
}
}
names(res) <-
c("obj", x[idx[-1]]) %>%
stringr::str_replace_all("#", "") %>%
stringr::str_replace(":", "") %>%
stringr::str_replace("\\[", "") %>%
stringr::str_replace("\\]", "") %>%
stringr::str_trim()
return(res)
}
#' Reads ADMB hst files
#'
#' Reads output of ADMB MCMC report containing estimated distributions. The .hst
#' report contains information about the MCMC analysis: the sample sizes
#' (specied with the -mcmc command-line option), the step size scaling
#' factor, the step sizes, and information about the posterior probability
#' distribution (e.g., the mean, standard deviation, and lower and upper
#' bounds). For each simulated parameter, a range of values (with step sizes
#' reported in the "step sizes" section of the .hst file) and their simulated
#' posterior probabilities is reported. Plotting the first column
#' (parameter values) on the x-axis and the second column (simulated
#' probabilities) on the y-axis can be a convenient way to make a visualization
#' of the posterior probability distribution.
#'
#' @param path Name of the \emph{directory} that contains the result.
#' @param txt The parameters to extract
#' @param startyear Assessment start year
#' @param names A character vector of length two replacing the default names
#' \emph{c('value','prop')}
#' @param negative.allowed Flag, default is FALSE
#'
#' @return A list, each component being a dataframe with two columns. The
#' default names are \emph{c('value','prop')} which can be replace by specifying
#' \emph{names}.
mup_hst <- function (path, txt, startyear = 1955, names, negative.allowed = FALSE) {
file <- paste(path, "muppet.hst", sep = "/")
tmpskra <- tempfile("bayes")
on.exit(unlink(tmpskra))
tmp <- scan(file, what = character(), sep = "\n", quiet = TRUE)
tmp1 <- matrix(tmp, length(tmp), 1)
utils::write.table(tmp1, file = tmpskra, sep = "", col.names = F,
row.names = F, quote = F)
i <- grep(txt, tmp)
j <- grep("#", tmp)
j <- j[j > i[length(i)]]
if (length(j) > 0) {
j <- j[1] - 1
} else {
j <- length(tmp)
}
i1 <- i[1:(length(i))] + 1
i2 <- c(i[2:length(i)] - 1, j)
if (length(i) == 1) i2 <- j
Result <- list()
for (i in 1:length(i1)) {
#print(i)
x <- getlineswin(tmpskra, i1[i], i2[i])
names(x) <- c('value','prop')
if (!negative.allowed) x <- x[x$value >= 0, ]
Result[[i]] <- getlineswin(tmpskra, i1[i], i2[i])
names(Result[[i]]) <- c('value','prob')
#Result[[i]] <- calcprofile(Result[[i]], negative.allowed = negative.allowed)
}
if (!missing(startyear)) {
names(Result) <- paste((startyear:(startyear + length(Result) - 1)),
sep = "")
#attributes(Result)$years <- startyear:(startyear + length(Result) - 1)
}
# if (!missing(names)) names(Result) <- names
# if (length(Result) == 1) Result <- Result[[1]]
Result <-
dplyr::bind_rows(Result, .id = "year") %>%
tibble::as_tibble() %>%
dplyr::mutate(year = as.integer(year))
return(Result)
}
getlineswin <- function (file, line1, line2) {
tmpskra <- tempfile("bayes")
on.exit(unlink(tmpskra))
#if (missing(nlines)) nlines <- length(count.fields(file, sep = "\n"))
x <- scan(file, sep = "\t", what = character(), quiet = TRUE)
x <- matrix(x, length(x), 1)
x <- x[line1:line2, ]
utils::write.table(x, file = tmpskra, sep = "\n", col.names = F,
row.names = F, quote = F)
return(utils::read.table(tmpskra))
}
#' @title read muppet output
#'
#' @description reads muppet results
#'
#' @param path A directory path
#' @param scale A scaler (default 1)
#' @param fleets xxx
#' @param assyear Assessment year, if missing (default) will use the year after the last
#' catch at age input
#' @param run Name of the run, if missing (default) will use the directory name
#' @param wide A boolean indicating if returned table wide (default TRUE). If FALSE variable are
#' return within column 'var' and values in column 'val'.
#'
#' @return A tibble
#'
#' @export
#'
mup_rbx <- function(path, scale = 1, fleets, assyear, run, wide = TRUE) {
if(!dir.exists(path)) {
stop(paste0("File path: '", path, "' does not exist"))
}
if(missing(run)) run <- basename(path)
if(missing(scale)) scale <- 1
list(rby = mup_rby(path, scale, fleets = fleets, assyear = assyear, run = run),
rbya = mup_rbya(path, scale, fleets = fleets, assyear = assyear, run, wide),
rba = mup_rba(path, fleets = fleets, run = run),
opr = mup_opr(path, scale, assyear = assyear, run = run),
std = mup_std(path),
par = mup_par(path))
}
# HOSKI ------------------------------------------------------------------------
# Hér sérðu dæmi um nýja read_separ (read_separ1) í bili
# notkun á því og útkomu.
# res <- read_separ1(".",".",fleets=c("3","1","2","4"),assYear=year+1)
#
# Floti 1 er alltaf mars survey , 2 er haustrall 3 er marssurvey 1-2 og 4 haustrall12 það er út af þessu logq1 dæmi
#
# Við höldum alltaf þessum nöfnum þ.a í haustrallkeyrslu er bara oU2 og pU2 ekki oU1 og pU1.
#
# Ef vilja hafa þetta öðruvísi er defaultið að númera flotanna í vaxandi röð.
# read_separ1 <- function (path, run, rName = NA, mName = NA, calcSurBio = F,
# ggFactor = T, Scale = 1000, assYear = NA, retroY = NA,fleets)
# {
# if(file.exists("muppet.par")){ # get AIC could look for as.numeric=T
# dat <- scan("muppet.par",what=character(),sep=" ",quiet=T)[1:12]
# aicinfo <- as.numeric(c(dat[6],dat[11]))
# names(aicinfo) <- c("npar","objective")
#
# dat <- scan("muppet.par",what=character(),sep="\n",quiet=T)
# i <- grep("# surveybiopow",dat)
# surveybiopow <- as.numeric(dat[i+1])
# }
#
#
#
# if(missing(fleets)){ # to have some default
# # Test number of columns
# if (is.na(retroY))
# rby <- read.table(paste(path, run, "resultsbyyear", sep = "/"),
# header = T, na.strings = c("-1", "0"))
# if (!is.na(retroY))
# rby <- read.table(paste(paste(path, run, "resultsbyyear",
# sep = "/"), retroY, sep = ""),
# header = T, na.strings = c("-1", "0"))
# nfleets <- (ncol(rby)-14)/2
# fleets <- as.character(1:nfleets)
# }
# txty <- paste(c("oU","pU"),c(matrix(fleets,2,length(fleets),byrow=T)),sep="")
# cnRby <- c("year", "r", "n3", "n6", "bioF", "bio", "bio1",
# "ssb", "ssb2", "fbar", "hr", "oY", "pY",txty,"run", "model")
# txtya <- paste(c("oU","pU","rU"),c(matrix(fleets,3,length(fleets),byrow=T)),sep="")
# cnRbya <- c("year", "age", "oC", "cW", "sW", "ssbW", "mat",
# "n", "z", "f", "m", "pC", "rC",txtya)
# txta<- paste(c("cvU","qU","pU"),c(matrix(fleets,3,length(fleets),byrow=T)),sep="")
#
# cnRba <- c("age", "sel", "pSel", "sigma", txta, "run", "model")
# if (is.na(retroY))
# rby <- read.table(paste(path, run, "resultsbyyear", sep = "/"),
# header = T, na.strings = c("-1", "0"))
# if (!is.na(retroY)) {
# rby <- read.table(paste(paste(path, run, "resultsbyyear",
# sep = "/"), retroY, sep = ""), header = T, na.strings = c("-1",
# "0"))
# }
# n <- nrow(rby)
# if (ncol(rby) != 18) {
# rby$pU2 <- rep(NA, n)
# rby$oU2 <- rep(NA, n)
# }
# names(rby) <- c("year", "fbar", "pY", "oY", "ssb", "ssb2",
# "bioF", "bio1", "bio", "preR", "r", "n1", "n3", "n6",
# txty)
# if (ggFactor)
# rby$r <- rby$r * exp(-0.4)
# rby$hr <- ifelse(!is.na(rby$oY), rby$oY, rby$pY)/rby$bio
# rby$run <- rName
# rby$model <- mName
# rby <- rby[, cnRby]
# rby$r <- rby$r/Scale
# rby$n3 <- rby$n3/Scale
# rby$n6 <- rby$n6/Scale
# if (is.na(retroY))
# rbya <- read.table(paste(path, run, "resultsbyyearandage",
# sep = "/"), header = T, na.strings = c("-1", "0"))
# if (!is.na(retroY)) {
# rbya <- read.table(paste(paste(path, run, "resultsbyyearandage",
# sep = "/"), retroY, sep = ""), header = T, na.strings = c("-1",
# "0"))
# }
# n <- nrow(rby)
# names(rbya) <- c("year", "age", "n", "z", "sW", "m", "f",
# "pC", "cW", "ssbW", "mat", "oC", "rC",txtya)
# if (ggFactor)
# rbya$n <- ifelse(rbya$age %in% 1, rbya$n * exp(-0.4),
# rbya$n)
# if (ggFactor)
# rbya$n <- ifelse(rbya$age %in% 2, rbya$n * exp(-0.2),
# rbya$n)
# rbya <- rbya[, cnRbya]
# rbya$run <- rName
# rbya$model <- mName
# rbya$oC <- rbya$oC/Scale
# rbya$cW <- rbya$cW/Scale
# rbya$sW <- rbya$sW/Scale
# rbya$n <- rbya$n/Scale
# rbya$pC <- rbya$pC/Scale
# if (is.na(retroY))
# rba <- read.table(paste(path, run, "resultsbyage", sep = "/"),
# header = T, na.strings = c("-1", "0"))
# if (!is.na(retroY)) {
# rba <- read.table(paste(paste(path, run, "resultsbyage",
# sep = "/"), retroY, sep = ""), header = T, na.strings = c("-1",
# "0"))
# }
# n <- nrow(rba)
# names(rba) <- c("age", "sel", "pSel", "sigma", txta)
# rba$run <- rName
# rba$model <- mName
# rba <- rba[, cnRba]
# if (!is.na(retroY)) {
# print(retroY)
# rby$assYear <- as.numeric(retroY) + 1
# rbya$assYear <- as.numeric(retroY) + 1
# rba$assYear <- as.numeric(retroY) + 1
# }
# else {
# rby$assYear <- assYear
# rbya$assYear <- assYear
# rba$assYear <- assYear
# }
# if(exists("surveybiopow")){
# names(surveybiopow) <- fleets
# return(list(rby = rby, rbya = rbya, rba = rba,aicinfo=aicinfo,surveybiopow=surveybiopow))
# }
# else
# return(list(rby = rby, rbya = rbya, rba = rba)) # no muppet.par
# }
#
# # retro example ----------------------------------------------------------------
# source("readSepar.r")
# library(stringr)
# library(tidyverse)
#
# Changepinfile <- function(file="muppet.par",txt = c("# lnRecr:","# lnEffort:"),outputfile="muppet.pin") {
# dat <- scan(file,what=character(),sep="\n",quiet=TRUE)
# for(k in 1:length(txt)){
# j <- grep(txt[k],dat)
# if(length(j) > 0) {
# k1 <- unlist(str_locate_all(dat[j+1]," "))
# dat[j+1] <- substring(dat[j+1],1,k1[length(k1)]-1)
# }
# }
# write.table(dat,file=outputfile,row.names=FALSE,col.names=FALSE,sep="\n",quote=F)
# }
#
# Replace <- function(txt,parameter,pattern){
# if(!missing(parameter)){
# i <- grep(pattern,txt)
# if(!any(i)){
# print(paste(" ",pattern," ","does not exist"))
# break()
# }
# txt[i] <- paste(as.character(parameter),"\t",pattern)
# }
# return(txt)
# }
# rby <- rbya <- rba <- aicinfo <- surveybiopow <- list()
# PIN <- TRUE
#
# inputfile <- "icecod.dat.opt.final"
# for(year in c(2019:2001)){
# print(year)
# assyear <- year+1
# txt <- readLines(inputfile)
# txt <- Replace(txt,year,'# Last opt year')
# txt <- Replace(txt,min(c(year+2,2019)),'# Last data year')
#
# if(PIN && (year != 2019))Changepinfile("muppet.par",txt = c("# lnRecr:","# lnEffort:"),outputfile="muppet.pin")
# write.table(txt,file="icecod.dat.opt",sep="\n",row.names=F,quote=F,col.names=F)
# system("muppet -nox -ind icecod.dat.opt > /dev/null")
# res <- read_separ1(".",".",fleets=c("3","1","2","4"),assYear=year+1)
# rby[[as.character(assyear)]] <- res$rby
# rbya[[as.character(assyear)]] <- res$rbya
# rba[[as.character(assyear)]] <- res$rba
# aicinfo[[as.character(assyear)]] <- res$aicinfo
# surveybiopow[[as.character(assyear)]] <- res$surveybiopow
#
#
# # Those mv are really not needed but we do at least to remove the files.
# system(paste("mv resultsbyyear tmpresults/resultsbyyear",year,sep=""))
# system(paste("mv resultsbyyearandage tmpresults/resultsbyyearandage",year,sep=""))
# system(paste("mv resultsbyage tmpresults/resultsbyage",year,sep=""))
# }
#
# rby <- bind_rows(rby)
# rbya <- bind_rows(rbya)
# rba <- bind_rows(rba)
# aicinfo <- bind_rows(aicinfo)
# aicinfo$assYear <- unique(rby$assYear)
# surveybiopow <- bind_rows(surveybiopow)
# surveybiopow$assYear <- unique(rby$assYear)
#
# save(list=c("rby","rbya","rba","aicinfo","surveybiopow"),file="retro.rdata")
#
|
#reading data
raw.movies <- readr::read_csv("data/tmdb_5000_movies.csv")
clean.movies <- raw.movies[raw.movies$revenue != '0',]
#remove variables
rm(raw.movies)
#top grossing films
other.top <- clean.movies[order(clean.movies$revenue, decreasing = T),c(2, 7, 13)]
other.top <- cbind(spot=1:100, other.top[1:100,])
View(other.top)
#top grossing fantasy films
other.top.fantasy <- other.top[grepl("Fantasy", other.top$genres),]
#percentage of top films
dim(other.top.fantasy)[1] / dim(other.top)[1] * 100
#percentage of total revenue
sum(other.top.fantasy$revenue) / sum(other.top$revenue) * 100
#remove variables
rm(other.top.fantasy, other.top)
#descriprives before removing outliers
source('Descriptives.R')
Descriptives(clean.movies$revenue)
Descriptives(clean.movies$revenue[grepl("Fantasy", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Romance", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Horror", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Action", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Comedy", clean.movies$genres)])
#removing outliers
source('Outliers.R')
#descriptives after removing outliers
Descriptives(clean.movies$revenue)
Descriptives(clean.movies$revenue[grepl("Fantasy", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Romance", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Horror", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Action", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Comedy", clean.movies$genres)])
#remove functions
rm(A, CV, IQR, IQRCV, IQRKurtosis, IQRSkewness, Ku, Mode, Pearson, Q, Skewness, Descriptives)
#extracting season
yq <- zoo::as.yearqtr(zoo::as.yearmon(clean.movies$release_date, "%Y-%m-%d") + 1/12)
clean.movies$release_season <- factor(format(yq, "%q"), levels = 1:4, labels = c("winter", "spring", "summer", "fall"))
rm(yq)
#extracting release month
clean.movies$release_month <- substr(clean.movies$release_date, 6, 7)
#shortening realease date to year
clean.movies$release_date <- substr(clean.movies$release_date, 1, 4)
names(clean.movies)[12] <- 'release_year'
#generating plots
source('Plots.R')
#create class intervals
source('ClassIntervals.R')
sub(".*\"name\": \"(.*)\".*", "\\1", clean.movies$genres, perl=TRUE)
| /movies.R | no_license | PGStatistics/BivariateAnalysis | R | false | false | 2,347 | r | #reading data
raw.movies <- readr::read_csv("data/tmdb_5000_movies.csv")
clean.movies <- raw.movies[raw.movies$revenue != '0',]
#remove variables
rm(raw.movies)
#top grossing films
other.top <- clean.movies[order(clean.movies$revenue, decreasing = T),c(2, 7, 13)]
other.top <- cbind(spot=1:100, other.top[1:100,])
View(other.top)
#top grossing fantasy films
other.top.fantasy <- other.top[grepl("Fantasy", other.top$genres),]
#percentage of top films
dim(other.top.fantasy)[1] / dim(other.top)[1] * 100
#percentage of total revenue
sum(other.top.fantasy$revenue) / sum(other.top$revenue) * 100
#remove variables
rm(other.top.fantasy, other.top)
#descriprives before removing outliers
source('Descriptives.R')
Descriptives(clean.movies$revenue)
Descriptives(clean.movies$revenue[grepl("Fantasy", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Romance", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Horror", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Action", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Comedy", clean.movies$genres)])
#removing outliers
source('Outliers.R')
#descriptives after removing outliers
Descriptives(clean.movies$revenue)
Descriptives(clean.movies$revenue[grepl("Fantasy", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Romance", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Horror", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Action", clean.movies$genres)])
Descriptives(clean.movies$revenue[grepl("Comedy", clean.movies$genres)])
#remove functions
rm(A, CV, IQR, IQRCV, IQRKurtosis, IQRSkewness, Ku, Mode, Pearson, Q, Skewness, Descriptives)
#extracting season
yq <- zoo::as.yearqtr(zoo::as.yearmon(clean.movies$release_date, "%Y-%m-%d") + 1/12)
clean.movies$release_season <- factor(format(yq, "%q"), levels = 1:4, labels = c("winter", "spring", "summer", "fall"))
rm(yq)
#extracting release month
clean.movies$release_month <- substr(clean.movies$release_date, 6, 7)
#shortening realease date to year
clean.movies$release_date <- substr(clean.movies$release_date, 1, 4)
names(clean.movies)[12] <- 'release_year'
#generating plots
source('Plots.R')
#create class intervals
source('ClassIntervals.R')
sub(".*\"name\": \"(.*)\".*", "\\1", clean.movies$genres, perl=TRUE)
|
summary(mtcars)
plot(mpg ~ hp, data = mtcars)
#funcao para evitar digitar mtcars todo momento
attach(mtcars)
#caclulo da covarianca
covaricanca <- cov(mpg,hp)
#correlacao
correlacao <- cor(mpg,hp)
#exibindo dados de covarianca e correlação
sprintf("Covarianca:%f Correlacao:%f",covaricanca,correlacao)
#histograma de mpg
hist(mpg)
#primeiros dados do dataframe
head(mtcars)
#nomes das colunas
colnames(mtcars)
#multiplas correlações
correlacoes <- cor(mtcars[,1:4])
#verifica se há normalidade
shapiro.test(mpg)$p.value
#verifica se há igualdade nas variancas
fligner.test(mpg ~ hp, data = mtcars)
#criando a regressao linear
modelo.linear <- lm(mpg ~ hp, data = mtcars)
sd(mpg)
summary(modelo.linear)
coefficients(modelo.linear)
modelo.linear$coefficients
#plotar mpg(y) em funcao de hp (x)
plot(mpg ~ hp)
#plotar a reta do modelo
abline(modelo.linear, col = "red")
#plotar os valores estimados de mpg para o modelo
points(modelo.linear$fitted.values~ hp, col = "red", data = mtcars)
#valores estimados para o modelo
fitted.values(modelo.linear)
modelo.linear$fitted.values
#coeficiente de determinação
summary(modelo.linear)$r.squared
#coeficiente de determinacao Ajustado
summary(modelo.linear)$adj.r.squared
#teste hipotese para os desvios, como residuos não seguem uma distribuição normal este modelo não deve ser usado
shapiro.test(modelo.linear$residuals)
hist(modelo.linear$residuals,freq = F)
#novos valores de HP para a predição
novos_hp <- seq(from = 50 , to = 550 , by = 50)
#predicao
predicao <- predict(modelo.linear,data.frame(hp=novos_hp) )
#gráfico com os estimados
plot(predicao ~ novos_hp, col = "red", pch = 15,ylab = "Autonomia mpg", xlab = "Novos Valores de hp", main = "Predição")
#dados observados
points(mpg~hp)
#uma linha horizontal em 0
abline(h=0)
#calcula a média.
media.mpg <- mean(mtcars$mpg)
#faz a modelagem, utiliza-se apenas um paramentro, somente o interceptor
modelo.const <- lm(mpg ~ 1, data = mtcars )
summary(modelo.const)
#visualizar no gráfico
#gráfico com os estimados
plot(predicao ~ novos_hp, col = "red", pch = 15, ylab = "Autonomia mpg", xlab = "Novos Valores de hp", main = "Predição")
#dados observados
points(mpg~hp)
#linha da média
abline(modelo.const, col = "green")
#calculando analise de varianca entre os modelos
anova(modelo.const, modelo.linear,test = "Chisq")
#teste melhor modelo akaike, escolher coluna AIC menor valor
AIC(modelo.const, modelo.linear)
#foi identificado que nem um dos modelos criados é realemento efetico
#tendo em vista que sua acuracia e de mais ou menos 60%
#Por isso vamos verificar possibilidade de criar um modelo de regressão linear multivariada
boxplot(mpg ~ cyl, ylab="Autonomia mpg", xlab="Cilindros", data = mtcars)
#outro grafico
plot(mpg ~ hp , pch =cyl, col = cyl, data = mtcars)
legend(300,32,legend = c("4","6","8"), pch = c(4,6,8), col = c("blue","pink","grey"), title = "Cilindros")
#verificando a correlação
multi.cor <- cor(mtcars[,c("mpg","hp","cyl")], method = "pearson")
#correlação base
multi.cor
#install.packages("corrplot")
#aqui está uma forma de exibir a correlação
library(corrplot)
corrplot.mixed(multi.cor, upper = "ellipse")
#regressão linear multivariada
modelo.multi <- lm(mpg ~ hp + cyl, data = mtcars)
#aqui temos nossa regressão depois vamos melhorar isso. Por hora apenas vamos deixar assim.
summary(modelo.multi)
#rˆ2 do modelo linear
summary(modelo.linear)$adj.r.squared
#rˆ2 do modelo multi variado
summary(modelo.multi)$adj.r.squared
#coeficiente de determinacao simples
summary(modelo.linear)$r.squared
#aqui é só para ver como sempre há um aumento maior no coeficiente de determinacao e que devemos utilizar o ajustado
summary(modelo.multi)$r.squared
coefficients(modelo.multi)
#grafico do modelo multilinear
plot(mpg ~ hp , pch =cyl, col = cyl, data = mtcars)
points(modelo.multi$fitted.values ~ hp, col = "red", pch = 18, data = mtcars)
fun.reta <- function(x, cyl){
modelo.multi$coefficients[1]+
modelo.multi$coefficients["cyl"]*cyl +
modelo.multi$coefficients["hp"]*x
}
curve(expr = fun.reta(x,4) , from = 0, to = 500, add = T, col = 4)
curve(expr = fun.reta(x,6) , from = 0, to = 500, add = T, col = 6)
curve(expr = fun.reta(x,8) , from = 0, to = 500, add = T, col = 8)
legend(300,32,legend = c("4","6","8"), pch = c(4,6,8), col = c("blue","pink","grey"), title = "Cilindros")
#Intervalo de confianca
intervalo_confianca <- confint.default(modelo.multi, level = 0.95)
cbind(intervalo_confianca,modelo.multi$coefficients)
#comparando modelos com anova
anova(modelo.const, modelo.linear, modelo.multi,test = "Chisq")
#SELEÇÃO DE ATRIBUTOS
#install.packages("car")
library(car)
scatterplotMatrix(mtcars[ , c("mpg" , "cyl" , "disp","hp" , "wt" ,"qsec")], lwd = 3)
#listando as colunas para escolher as que interessam
colnames(mtcars)
#Escolha um grau de significancia
#modelo linear multi variado
modelo.multi2.0 <- lm(mpg ~ cyl + disp + hp + wt + qsec, data = mtcars)
#observa-se a importancia das variáveis no modelo
summary(modelo.multi2.0)
#função para analise de remoçao de variavel
variavel_menor_backward <- function(modelo, dados, coef = 0.975){
#modelo <- modelo.multi2
#summary(modelo)
#coef <- 0.975
#dados<- mtcars
#obtém-se os graus de liberdade
modelo.coef <- summary(modelo)$coefficients;modelo.coef
if(nrow(summary(modelo)$coefficients) == 2 ){
return(sprintf("Só há uma variável, nada a remover"))
}
gl<-nrow(dados) - length(coefficients(modelo));gl
#calcula o quantil
quantile <- qt(coef,gl);quantile
modelo.coef <- modelo.coef[-1,];modelo.coef
#menor_valor <- min (abs(summary(modelo)$coefficients[, "t value"])) ; menor_valor
menor_valor <- min (abs(modelo.coef[, "t value"])); menor_valor
#obtém os valores que são menor que o quantil
nome <- names(which(abs(modelo.coef[,"t value"]) == menor_valor))
if (quantile > menor_valor) {
return(sprintf("Remova %s, t %f < quantil: %f", nome,menor_valor,quantile))
}
return(sprintf("Remova %s, t %f < quantil: %f", nome,menor_valor,quantile))
}
variavel_menor_backward(modelo.multi2.0, mtcars)
#nova formula sem qsec
modelo.multi2.1 <- lm(mpg ~ cyl + disp + hp + wt , data = mtcars)
summary(modelo.multi2.1)
#nova formula sem disp
modelo.multi2.2 <- lm(mpg ~ cyl + hp + wt , data = mtcars)
summary(modelo.multi2.2)
#verificando se variaveis que podem sert removidas
variavel_menor_backward(modelo.multi2.2, mtcars)
#nova formula sem hp
modelo.multi2.3 <- lm(mpg ~ cyl + wt , data = mtcars)
summary(modelo.multi2.3)
#nova formula sem cyl
modelo.multi2.4 <- lm(mpg ~ wt , data = mtcars)
summary(modelo.multi2.4)
variavel_menor_backward(modelo.multi2.4, mtcars)
#covarianca
vcov(modelo.multi2.0)
vcov(modelo.multi2.3)
#analisando a diferença entre os modelos
anova(modelo.multi2.0, modelo.multi2.1, modelo.multi2.2, modelo.multi2.3, modelo.multi2.4,test = "Chisq")
AIC(modelo.multi2.0, modelo.multi2.1, modelo.multi2.2, modelo.multi2.3)#escolher com menor valor de erro AIC
#ESTUDO DO MELHOR MODELO
library(hnp)
layout(matrix(c(1,2,3,4),2,2))
#Grafico Normal das Probabilidades dos modelos
hnp(modelo.multi2.0, xlab = 'N(0,1)', ylab = 'Resíduos', main = modelo.multi2.0$call$formula)
#analise de residuos
hnp(modelo.multi2.1, xlab = 'N(0,1)', ylab = 'Resíduos', main = modelo.multi2.1$call$formula)
#analise de residuos
hnp(modelo.multi2.2, xlab = 'N(0,1)', ylab = 'Resíduos', main = modelo.multi2.2$call$formula)
#analise de residuos
hnp(modelo.multi2.3, xlab = 'N(0,1)', ylab = 'Resíduos', main = modelo.multi2.3$call$formula)
#comparativo de resultado AIC x VEROSSIMILHANÇA
analise <- data.frame(
modelos = c('2.0', '2.1', '2.2', '2.3'),
aic = c(AIC(modelo.multi2.0), AIC(modelo.multi2.1),AIC(modelo.multi2.2),AIC(modelo.multi2.3)),
verossimilhança = c(logLik(modelo.multi2.0), logLik(modelo.multi2.1),logLik(modelo.multi2.2),logLik(modelo.multi2.3))
)
#lib para ajudar na seleção de atributos
library(MASS)
modelo.multi.both <- step(modelo.multi2.0, direction = "both")#adiciona atributos e analise a eficacia dele para modelo
modelo.multi.forward <- step(modelo.multi2.0, direction = "forward")#retira atributos
#ponto de alavancagem, instancias que afetam muito o atributo analisado
sort(influence(modelo.multi2.3)$hat)
#A soma das alavancagens é igual ao número de variáveis
sum(influence(modelo.multi2.3)$hat)
#grafico do modelo selecionado
layout(matrix(c(1,2,3,4),2,2))
X11()
plot(modelo.multi2.3)
#REGRESSÃO LINEAR GENERALIZADA - GLM
## BINOMIAL ##
#Análise de insetos mortos
#criando o data frame
insetos <- data.frame(
dose = c(0.0, 2.6, 3.8, 5.1, 7.7, 10.2),
total = c(49, 50, 48, 46, 49, 50),
mortos = c(0, 6, 16, 24, 42, 44))
#Nossa variável resposta
insetos$proporcao <- NULL
insetos$proporcao <- insetos$mortos / insetos$total
#insetos <- cbind(insetos, proporcao)
#limpando
head(insetos)
#Olhar o comportamentomento do nosso Y
hist(insetos$proporcao, freq = F)
#distribuição binomial entre 0 e 1
# Plotando o gráfico da variável resposta por dose
X11()
plot(proporcao ~ dose, data = insetos , xlim = c(0,12), ylim = c(0,1), main = "Insetos Mortos (%) x Dose")
#criando o modelo
# Fazendo a regressão do modelo binomial
modelo.logit <- glm(proporcao ~ dose, family = binomial(link = "logit"), data = insetos)
# Utilizar outra função de ligação probit
modelo.probit <- glm(proporcao ~ dose, family = binomial(link = "probit"), data = insetos)
#aqui os coeficientes de beta
coef(modelo.logit)
#novas dados para estimar a proporção de insetos mortos
new.doses <- seq(0,12, by=0.1)
#estimando os valores
new.prob.logit <- predict(modelo.logit,data.frame(dose=new.doses))
new.prob.probit <- predict(modelo.probit,data.frame(dose=new.doses), type = "response")
#funcao inversa
mu <- function(t) {exp(t)/(1+exp(t))}
#vendo os gráficos
X11()
plot(proporcao ~ dose, data = insetos , xlim = c(0,12), ylim = c(0,1), main = "Insetos Mortos (%) x Dose")
lines(new.doses, mu(new.prob.logit), col = "blue")
points(insetos$dose, modelo.logit$fitted.values, pch = "*", col = "blue")
lines(new.doses, new.prob.probit, col = "green")
legend(0,1, legend = c("observados","logit","probit"), col = c("black","blue","green"), pch=c("o","*","line") )
#comparando os modelos
anova(modelo.logit, modelo.probit , test = "Chisq")
exp(modelo.logit$coefficients[2])
exp(confint.default(modelo.logit))
## POISSON ##
bacterias <- c(175, 108,95,82,71,50,49,31,28,17,16,11)
tempo <- 1:12
plot(bacterias ~ tempo , xlim = c(0,20) , ylim= c(0,180), type = "h", ylab = "Quantidade de Bactérias", xlab = "Unidade de tempo")
modelo <- glm(bacterias ~ tempo, family = poisson)
x <- seq(0:20)
preditos <- predict(modelo, data.frame(tempo = x))
f<- function(l){exp(l)}
points(x , f(preditos), col = "red")
lines(x , f(preditos), col = "red", lty = 2)
preditos <- predict(modelo, data.frame(tempo = x), type = "response", interval = "prediction")
summary(modelo)
int.confianca <- exp(confint.default(modelo))
int.confianca
## GAMA ##
#Direto para o modelo
modelo.g.mpg <- glm(mpg ~ hp ,family = Gamma, data = mtcars)
#Preditor linear multivariado
modelo.g.mpg.2 <- glm(mpg ~ hp + cyl,family = Gamma, data = mtcars)
#Aqui temos a gaussiana inversa, Outra distribuíção para dados assimétricos
modelo.ig.mpg <- glm(mpg ~ hp + cyl,family = inverse.gaussian, data = mtcars)
#valores de predição de 1 até 1000
n.hp <- 1:1000
#vamos apra os gráficos
X11()
plot(mpg ~ hp , data = mtcars, ylim = c(-5,30), xlim=c(0,1000))
lines(n.hp,predict(modelo.linear, newdata = data.frame(hp = n.hp), type = "response"), col = "green")
#points(mtcars$hp,modelo.g.mpg$fitted.values, col = "red", pch = 14)
abline(h = 0)
lines(n.hp,predict(modelo.g.mpg, newdata = data.frame(hp = n.hp), type = "response"), col = "red")
lines(n.hp,predict(modelo.g.mpg.2, newdata = data.frame(hp = n.hp, cyl = 4), type = "response"), col = "blue")
lines(n.hp,predict(modelo.g.mpg.2, newdata = data.frame(hp = n.hp, cyl = 6), type = "response"), col = "blue")
lines(n.hp,predict(modelo.g.mpg.2, newdata = data.frame(hp = n.hp, cyl = 8), type = "response"), col = "blue")
#melhor modelo para problema
analise <- data.frame(
colnames = c("linear", "gama 1", "gama 2", "I. gaussian"),
aic = c(AIC( modelo.linear),AIC(modelo.g.mpg),AIC(modelo.g.mpg.2),AIC(modelo.ig.mpg)),
verossimilhança = c(logLik(modelo.linear),logLik(modelo.g.mpg),logLik(modelo.g.mpg.2),logLik(modelo.ig.mpg))
)
anova(modelo.g.mpg,modelo.g.mpg.2,modelo.ig.mpg ,modelo.linear, test = "Chisq")
layout(matrix(c(1,2,3,4),c(2,2)))
hnp(modelo.g.mpg.2, xlab = 'N(0,1)', ylab = 'Resíduos')
plot(modelo.g.mpg.2)
| /estudos_regressao_aula_danilo.R | no_license | LexGalante/R | R | false | false | 12,620 | r | summary(mtcars)
plot(mpg ~ hp, data = mtcars)
#funcao para evitar digitar mtcars todo momento
attach(mtcars)
#caclulo da covarianca
covaricanca <- cov(mpg,hp)
#correlacao
correlacao <- cor(mpg,hp)
#exibindo dados de covarianca e correlação
sprintf("Covarianca:%f Correlacao:%f",covaricanca,correlacao)
#histograma de mpg
hist(mpg)
#primeiros dados do dataframe
head(mtcars)
#nomes das colunas
colnames(mtcars)
#multiplas correlações
correlacoes <- cor(mtcars[,1:4])
#verifica se há normalidade
shapiro.test(mpg)$p.value
#verifica se há igualdade nas variancas
fligner.test(mpg ~ hp, data = mtcars)
#criando a regressao linear
modelo.linear <- lm(mpg ~ hp, data = mtcars)
sd(mpg)
summary(modelo.linear)
coefficients(modelo.linear)
modelo.linear$coefficients
#plotar mpg(y) em funcao de hp (x)
plot(mpg ~ hp)
#plotar a reta do modelo
abline(modelo.linear, col = "red")
#plotar os valores estimados de mpg para o modelo
points(modelo.linear$fitted.values~ hp, col = "red", data = mtcars)
#valores estimados para o modelo
fitted.values(modelo.linear)
modelo.linear$fitted.values
#coeficiente de determinação
summary(modelo.linear)$r.squared
#coeficiente de determinacao Ajustado
summary(modelo.linear)$adj.r.squared
#teste hipotese para os desvios, como residuos não seguem uma distribuição normal este modelo não deve ser usado
shapiro.test(modelo.linear$residuals)
hist(modelo.linear$residuals,freq = F)
#novos valores de HP para a predição
novos_hp <- seq(from = 50 , to = 550 , by = 50)
#predicao
predicao <- predict(modelo.linear,data.frame(hp=novos_hp) )
#gráfico com os estimados
plot(predicao ~ novos_hp, col = "red", pch = 15,ylab = "Autonomia mpg", xlab = "Novos Valores de hp", main = "Predição")
#dados observados
points(mpg~hp)
#uma linha horizontal em 0
abline(h=0)
#calcula a média.
media.mpg <- mean(mtcars$mpg)
#faz a modelagem, utiliza-se apenas um paramentro, somente o interceptor
modelo.const <- lm(mpg ~ 1, data = mtcars )
summary(modelo.const)
#visualizar no gráfico
#gráfico com os estimados
plot(predicao ~ novos_hp, col = "red", pch = 15, ylab = "Autonomia mpg", xlab = "Novos Valores de hp", main = "Predição")
#dados observados
points(mpg~hp)
#linha da média
abline(modelo.const, col = "green")
#calculando analise de varianca entre os modelos
anova(modelo.const, modelo.linear,test = "Chisq")
#teste melhor modelo akaike, escolher coluna AIC menor valor
AIC(modelo.const, modelo.linear)
#foi identificado que nem um dos modelos criados é realemento efetico
#tendo em vista que sua acuracia e de mais ou menos 60%
#Por isso vamos verificar possibilidade de criar um modelo de regressão linear multivariada
boxplot(mpg ~ cyl, ylab="Autonomia mpg", xlab="Cilindros", data = mtcars)
#outro grafico
plot(mpg ~ hp , pch =cyl, col = cyl, data = mtcars)
legend(300,32,legend = c("4","6","8"), pch = c(4,6,8), col = c("blue","pink","grey"), title = "Cilindros")
#verificando a correlação
multi.cor <- cor(mtcars[,c("mpg","hp","cyl")], method = "pearson")
#correlação base
multi.cor
#install.packages("corrplot")
#aqui está uma forma de exibir a correlação
library(corrplot)
corrplot.mixed(multi.cor, upper = "ellipse")
#regressão linear multivariada
modelo.multi <- lm(mpg ~ hp + cyl, data = mtcars)
#aqui temos nossa regressão depois vamos melhorar isso. Por hora apenas vamos deixar assim.
summary(modelo.multi)
#rˆ2 do modelo linear
summary(modelo.linear)$adj.r.squared
#rˆ2 do modelo multi variado
summary(modelo.multi)$adj.r.squared
#coeficiente de determinacao simples
summary(modelo.linear)$r.squared
#aqui é só para ver como sempre há um aumento maior no coeficiente de determinacao e que devemos utilizar o ajustado
summary(modelo.multi)$r.squared
coefficients(modelo.multi)
#grafico do modelo multilinear
plot(mpg ~ hp , pch =cyl, col = cyl, data = mtcars)
points(modelo.multi$fitted.values ~ hp, col = "red", pch = 18, data = mtcars)
fun.reta <- function(x, cyl){
modelo.multi$coefficients[1]+
modelo.multi$coefficients["cyl"]*cyl +
modelo.multi$coefficients["hp"]*x
}
curve(expr = fun.reta(x,4) , from = 0, to = 500, add = T, col = 4)
curve(expr = fun.reta(x,6) , from = 0, to = 500, add = T, col = 6)
curve(expr = fun.reta(x,8) , from = 0, to = 500, add = T, col = 8)
legend(300,32,legend = c("4","6","8"), pch = c(4,6,8), col = c("blue","pink","grey"), title = "Cilindros")
#Intervalo de confianca
intervalo_confianca <- confint.default(modelo.multi, level = 0.95)
cbind(intervalo_confianca,modelo.multi$coefficients)
#comparando modelos com anova
anova(modelo.const, modelo.linear, modelo.multi,test = "Chisq")
#SELEÇÃO DE ATRIBUTOS
#install.packages("car")
library(car)
scatterplotMatrix(mtcars[ , c("mpg" , "cyl" , "disp","hp" , "wt" ,"qsec")], lwd = 3)
#listando as colunas para escolher as que interessam
colnames(mtcars)
#Escolha um grau de significancia
#modelo linear multi variado
modelo.multi2.0 <- lm(mpg ~ cyl + disp + hp + wt + qsec, data = mtcars)
#observa-se a importancia das variáveis no modelo
summary(modelo.multi2.0)
#função para analise de remoçao de variavel
variavel_menor_backward <- function(modelo, dados, coef = 0.975){
#modelo <- modelo.multi2
#summary(modelo)
#coef <- 0.975
#dados<- mtcars
#obtém-se os graus de liberdade
modelo.coef <- summary(modelo)$coefficients;modelo.coef
if(nrow(summary(modelo)$coefficients) == 2 ){
return(sprintf("Só há uma variável, nada a remover"))
}
gl<-nrow(dados) - length(coefficients(modelo));gl
#calcula o quantil
quantile <- qt(coef,gl);quantile
modelo.coef <- modelo.coef[-1,];modelo.coef
#menor_valor <- min (abs(summary(modelo)$coefficients[, "t value"])) ; menor_valor
menor_valor <- min (abs(modelo.coef[, "t value"])); menor_valor
#obtém os valores que são menor que o quantil
nome <- names(which(abs(modelo.coef[,"t value"]) == menor_valor))
if (quantile > menor_valor) {
return(sprintf("Remova %s, t %f < quantil: %f", nome,menor_valor,quantile))
}
return(sprintf("Remova %s, t %f < quantil: %f", nome,menor_valor,quantile))
}
variavel_menor_backward(modelo.multi2.0, mtcars)
#nova formula sem qsec
modelo.multi2.1 <- lm(mpg ~ cyl + disp + hp + wt , data = mtcars)
summary(modelo.multi2.1)
#nova formula sem disp
modelo.multi2.2 <- lm(mpg ~ cyl + hp + wt , data = mtcars)
summary(modelo.multi2.2)
#verificando se variaveis que podem sert removidas
variavel_menor_backward(modelo.multi2.2, mtcars)
#nova formula sem hp
modelo.multi2.3 <- lm(mpg ~ cyl + wt , data = mtcars)
summary(modelo.multi2.3)
#nova formula sem cyl
modelo.multi2.4 <- lm(mpg ~ wt , data = mtcars)
summary(modelo.multi2.4)
variavel_menor_backward(modelo.multi2.4, mtcars)
#covarianca
vcov(modelo.multi2.0)
vcov(modelo.multi2.3)
#analisando a diferença entre os modelos
anova(modelo.multi2.0, modelo.multi2.1, modelo.multi2.2, modelo.multi2.3, modelo.multi2.4,test = "Chisq")
AIC(modelo.multi2.0, modelo.multi2.1, modelo.multi2.2, modelo.multi2.3)#escolher com menor valor de erro AIC
#ESTUDO DO MELHOR MODELO
library(hnp)
layout(matrix(c(1,2,3,4),2,2))
#Grafico Normal das Probabilidades dos modelos
hnp(modelo.multi2.0, xlab = 'N(0,1)', ylab = 'Resíduos', main = modelo.multi2.0$call$formula)
#analise de residuos
hnp(modelo.multi2.1, xlab = 'N(0,1)', ylab = 'Resíduos', main = modelo.multi2.1$call$formula)
#analise de residuos
hnp(modelo.multi2.2, xlab = 'N(0,1)', ylab = 'Resíduos', main = modelo.multi2.2$call$formula)
#analise de residuos
hnp(modelo.multi2.3, xlab = 'N(0,1)', ylab = 'Resíduos', main = modelo.multi2.3$call$formula)
#comparativo de resultado AIC x VEROSSIMILHANÇA
analise <- data.frame(
modelos = c('2.0', '2.1', '2.2', '2.3'),
aic = c(AIC(modelo.multi2.0), AIC(modelo.multi2.1),AIC(modelo.multi2.2),AIC(modelo.multi2.3)),
verossimilhança = c(logLik(modelo.multi2.0), logLik(modelo.multi2.1),logLik(modelo.multi2.2),logLik(modelo.multi2.3))
)
#lib para ajudar na seleção de atributos
library(MASS)
modelo.multi.both <- step(modelo.multi2.0, direction = "both")#adiciona atributos e analise a eficacia dele para modelo
modelo.multi.forward <- step(modelo.multi2.0, direction = "forward")#retira atributos
#ponto de alavancagem, instancias que afetam muito o atributo analisado
sort(influence(modelo.multi2.3)$hat)
#A soma das alavancagens é igual ao número de variáveis
sum(influence(modelo.multi2.3)$hat)
#grafico do modelo selecionado
layout(matrix(c(1,2,3,4),2,2))
X11()
plot(modelo.multi2.3)
#REGRESSÃO LINEAR GENERALIZADA - GLM
## BINOMIAL ##
#Análise de insetos mortos
#criando o data frame
insetos <- data.frame(
dose = c(0.0, 2.6, 3.8, 5.1, 7.7, 10.2),
total = c(49, 50, 48, 46, 49, 50),
mortos = c(0, 6, 16, 24, 42, 44))
#Nossa variável resposta
insetos$proporcao <- NULL
insetos$proporcao <- insetos$mortos / insetos$total
#insetos <- cbind(insetos, proporcao)
#limpando
head(insetos)
#Olhar o comportamentomento do nosso Y
hist(insetos$proporcao, freq = F)
#distribuição binomial entre 0 e 1
# Plotando o gráfico da variável resposta por dose
X11()
plot(proporcao ~ dose, data = insetos , xlim = c(0,12), ylim = c(0,1), main = "Insetos Mortos (%) x Dose")
#criando o modelo
# Fazendo a regressão do modelo binomial
modelo.logit <- glm(proporcao ~ dose, family = binomial(link = "logit"), data = insetos)
# Utilizar outra função de ligação probit
modelo.probit <- glm(proporcao ~ dose, family = binomial(link = "probit"), data = insetos)
#aqui os coeficientes de beta
coef(modelo.logit)
#novas dados para estimar a proporção de insetos mortos
new.doses <- seq(0,12, by=0.1)
#estimando os valores
new.prob.logit <- predict(modelo.logit,data.frame(dose=new.doses))
new.prob.probit <- predict(modelo.probit,data.frame(dose=new.doses), type = "response")
#funcao inversa
mu <- function(t) {exp(t)/(1+exp(t))}
#vendo os gráficos
X11()
plot(proporcao ~ dose, data = insetos , xlim = c(0,12), ylim = c(0,1), main = "Insetos Mortos (%) x Dose")
lines(new.doses, mu(new.prob.logit), col = "blue")
points(insetos$dose, modelo.logit$fitted.values, pch = "*", col = "blue")
lines(new.doses, new.prob.probit, col = "green")
legend(0,1, legend = c("observados","logit","probit"), col = c("black","blue","green"), pch=c("o","*","line") )
#comparando os modelos
anova(modelo.logit, modelo.probit , test = "Chisq")
exp(modelo.logit$coefficients[2])
exp(confint.default(modelo.logit))
## POISSON ##
bacterias <- c(175, 108,95,82,71,50,49,31,28,17,16,11)
tempo <- 1:12
plot(bacterias ~ tempo , xlim = c(0,20) , ylim= c(0,180), type = "h", ylab = "Quantidade de Bactérias", xlab = "Unidade de tempo")
modelo <- glm(bacterias ~ tempo, family = poisson)
x <- seq(0:20)
preditos <- predict(modelo, data.frame(tempo = x))
f<- function(l){exp(l)}
points(x , f(preditos), col = "red")
lines(x , f(preditos), col = "red", lty = 2)
preditos <- predict(modelo, data.frame(tempo = x), type = "response", interval = "prediction")
summary(modelo)
int.confianca <- exp(confint.default(modelo))
int.confianca
## GAMA ##
#Direto para o modelo
modelo.g.mpg <- glm(mpg ~ hp ,family = Gamma, data = mtcars)
#Preditor linear multivariado
modelo.g.mpg.2 <- glm(mpg ~ hp + cyl,family = Gamma, data = mtcars)
#Aqui temos a gaussiana inversa, Outra distribuíção para dados assimétricos
modelo.ig.mpg <- glm(mpg ~ hp + cyl,family = inverse.gaussian, data = mtcars)
#valores de predição de 1 até 1000
n.hp <- 1:1000
#vamos apra os gráficos
X11()
plot(mpg ~ hp , data = mtcars, ylim = c(-5,30), xlim=c(0,1000))
lines(n.hp,predict(modelo.linear, newdata = data.frame(hp = n.hp), type = "response"), col = "green")
#points(mtcars$hp,modelo.g.mpg$fitted.values, col = "red", pch = 14)
abline(h = 0)
lines(n.hp,predict(modelo.g.mpg, newdata = data.frame(hp = n.hp), type = "response"), col = "red")
lines(n.hp,predict(modelo.g.mpg.2, newdata = data.frame(hp = n.hp, cyl = 4), type = "response"), col = "blue")
lines(n.hp,predict(modelo.g.mpg.2, newdata = data.frame(hp = n.hp, cyl = 6), type = "response"), col = "blue")
lines(n.hp,predict(modelo.g.mpg.2, newdata = data.frame(hp = n.hp, cyl = 8), type = "response"), col = "blue")
#melhor modelo para problema
analise <- data.frame(
colnames = c("linear", "gama 1", "gama 2", "I. gaussian"),
aic = c(AIC( modelo.linear),AIC(modelo.g.mpg),AIC(modelo.g.mpg.2),AIC(modelo.ig.mpg)),
verossimilhança = c(logLik(modelo.linear),logLik(modelo.g.mpg),logLik(modelo.g.mpg.2),logLik(modelo.ig.mpg))
)
anova(modelo.g.mpg,modelo.g.mpg.2,modelo.ig.mpg ,modelo.linear, test = "Chisq")
layout(matrix(c(1,2,3,4),c(2,2)))
hnp(modelo.g.mpg.2, xlab = 'N(0,1)', ylab = 'Resíduos')
plot(modelo.g.mpg.2)
|
library(susieR)
library(data.table)
library(rprojroot)
library(dplyr)
library(Matrix)
library(tidyr)
F = is_rstudio_project$make_fix_file()
R = as.matrix(fread(F("gwas/grid/genotypes/tau100/ss500/train/ldmats/ld_chr1_5000_5100.ld.gz")))
npd = nearPD(R)
R2 = as.matrix(npd$mat)
gwas = fread(F("gwas/grid/genotypes/tau100/ss500/train/gwas_results/fixed_effects/ge/gwas_gridt100_train.ge.1.pcs0.smooth.glm.linear.gz"))
colnames(gwas)[1]="CHROM"
gwas = gwas[CHROM==1]
gwas = gwas[POS>=5e6 & POS<5.1e6]
causal = fread(F("gwas/grid/genotypes/tau100/ss500/train/genos_gridt100_l1e7_ss750_m0.05_chr1_20.rmdup.train.all.thinned_100kb.effects"))
colnames(causal) = c("rep","SNP","allele","esize")
causal = causal%>%
filter(rep==1)%>%
separate(SNP, sep="_",
into=c("chrom","position","ref","alt"),
remove = FALSE)
causal$chrom = as.numeric(causal$chrom)
causal$position = as.numeric(causal$position)
causal.red = causal%>%
filter(chrom==1 & position>=5e6 & position<5.1e6)
gwas = gwas%>%
mutate(b = case_when(ID%in%causal.red$SNP ~ 1,
TRUE ~ 0))
fitted_rss = susie_rss(gwas$T_STAT,
R2, L=10, check_z = FALSE)
summary(fitted_rss)$cs
susie_plot(fitted_rss, y="PIP", b=gwas$b)
ggplot(gwas,aes(POS,-log10(P)))+
geom_point()+
theme_minimal()+
geom_point(data=gwas%>%filter(b==1),
aes(POS,-log10(P)),
color="red")
#now with rare PC correction
gwas.re = fread(F("gwas/grid/genotypes/tau100/ss500/train/gwas_results/fixed_effects/ge/gwas_gridt100_train.ge.1.cmre.smooth.glm.linear.gz"))
colnames(gwas.re)[1]="CHROM"
gwas.re = gwas.re[CHROM==1]
gwas.re = gwas.re[POS<=1e5]
gwas.re = gwas.re%>%
mutate(b = case_when(ID%in%causal.red$SNP ~ 1,
TRUE ~ 0))
fitted_rss.re = susie_rss(gwas.re$T_STAT,
R2, L=1, check_z = FALSE)
summary(fitted_rss.re)$cs
susie_plot(fitted_rss.re, y="PIP", b=gwas$b)
############## let's try with common
R_cm = as.matrix(fread(F("gwas/grid/genotypes/tau100/ss500/train/genotypes/ldmats/ldmat_chr1_1_100kb_common.ld.gz")))
npd_cm = nearPD(R_cm)
R2_cm = as.matrix(npd_cm$mat)
ids_cm = fread(F("gwas/grid/genotypes/tau100/ss500/train/genotypes/ldmats/genos_chr1_1_100kb_common.bim"))
colnames(ids_cm) = c("rep","SNP","cm","position","ref","alt")
gwas_cm = gwas%>%
filter(ID %in% ids_cm$SNP)
fitted_rss_cm = susie_rss(gwas_cm$T_STAT,
R2_cm, L=1, check_z = FALSE)
summary(fitted_rss_cm)$cs
susie_plot(fitted_rss_cm, y="PIP", b=gwas_cm$b)
################## correction with rare PCs
gwas.re = gwas.re%>%
filter(ID %in% ids_cm$SNP)
fitted_rss.re = susie_rss(gwas.re$T_STAT,
R2_cm, L=1, check_z = FALSE)
summary(fitted_rss.re)$cs
susie_plot(fitted_rss.re, y="PIP", b=gwas_cm$b)
| /code/fine_mapping/susie.R | no_license | Arslan-Zaidi/popstructure | R | false | false | 2,821 | r | library(susieR)
library(data.table)
library(rprojroot)
library(dplyr)
library(Matrix)
library(tidyr)
F = is_rstudio_project$make_fix_file()
R = as.matrix(fread(F("gwas/grid/genotypes/tau100/ss500/train/ldmats/ld_chr1_5000_5100.ld.gz")))
npd = nearPD(R)
R2 = as.matrix(npd$mat)
gwas = fread(F("gwas/grid/genotypes/tau100/ss500/train/gwas_results/fixed_effects/ge/gwas_gridt100_train.ge.1.pcs0.smooth.glm.linear.gz"))
colnames(gwas)[1]="CHROM"
gwas = gwas[CHROM==1]
gwas = gwas[POS>=5e6 & POS<5.1e6]
causal = fread(F("gwas/grid/genotypes/tau100/ss500/train/genos_gridt100_l1e7_ss750_m0.05_chr1_20.rmdup.train.all.thinned_100kb.effects"))
colnames(causal) = c("rep","SNP","allele","esize")
causal = causal%>%
filter(rep==1)%>%
separate(SNP, sep="_",
into=c("chrom","position","ref","alt"),
remove = FALSE)
causal$chrom = as.numeric(causal$chrom)
causal$position = as.numeric(causal$position)
causal.red = causal%>%
filter(chrom==1 & position>=5e6 & position<5.1e6)
gwas = gwas%>%
mutate(b = case_when(ID%in%causal.red$SNP ~ 1,
TRUE ~ 0))
fitted_rss = susie_rss(gwas$T_STAT,
R2, L=10, check_z = FALSE)
summary(fitted_rss)$cs
susie_plot(fitted_rss, y="PIP", b=gwas$b)
ggplot(gwas,aes(POS,-log10(P)))+
geom_point()+
theme_minimal()+
geom_point(data=gwas%>%filter(b==1),
aes(POS,-log10(P)),
color="red")
#now with rare PC correction
gwas.re = fread(F("gwas/grid/genotypes/tau100/ss500/train/gwas_results/fixed_effects/ge/gwas_gridt100_train.ge.1.cmre.smooth.glm.linear.gz"))
colnames(gwas.re)[1]="CHROM"
gwas.re = gwas.re[CHROM==1]
gwas.re = gwas.re[POS<=1e5]
gwas.re = gwas.re%>%
mutate(b = case_when(ID%in%causal.red$SNP ~ 1,
TRUE ~ 0))
fitted_rss.re = susie_rss(gwas.re$T_STAT,
R2, L=1, check_z = FALSE)
summary(fitted_rss.re)$cs
susie_plot(fitted_rss.re, y="PIP", b=gwas$b)
############## let's try with common
R_cm = as.matrix(fread(F("gwas/grid/genotypes/tau100/ss500/train/genotypes/ldmats/ldmat_chr1_1_100kb_common.ld.gz")))
npd_cm = nearPD(R_cm)
R2_cm = as.matrix(npd_cm$mat)
ids_cm = fread(F("gwas/grid/genotypes/tau100/ss500/train/genotypes/ldmats/genos_chr1_1_100kb_common.bim"))
colnames(ids_cm) = c("rep","SNP","cm","position","ref","alt")
gwas_cm = gwas%>%
filter(ID %in% ids_cm$SNP)
fitted_rss_cm = susie_rss(gwas_cm$T_STAT,
R2_cm, L=1, check_z = FALSE)
summary(fitted_rss_cm)$cs
susie_plot(fitted_rss_cm, y="PIP", b=gwas_cm$b)
################## correction with rare PCs
gwas.re = gwas.re%>%
filter(ID %in% ids_cm$SNP)
fitted_rss.re = susie_rss(gwas.re$T_STAT,
R2_cm, L=1, check_z = FALSE)
summary(fitted_rss.re)$cs
susie_plot(fitted_rss.re, y="PIP", b=gwas_cm$b)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SEERreadr-package.R
\docType{package}
\name{SEERreadr-package}
\alias{SEERreadr}
\alias{SEERreadr-package}
\title{SEERreadr: Read SEER Fixed Width Data Files}
\description{
Functions to read and process the fixed width data
files from SEER. Includes functions to recode values based on the
April 2018 data dictionary.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/GerkeLab/SEERreadr}
\item Report bugs at \url{https://github.com/GerkeLab/SEERreadr/issues}
}
}
\author{
\strong{Maintainer}: Garrick Aden-Buie \email{Garrick.Aden-Buie@moffitt.org}
Authors:
\itemize{
\item Vincent Major \email{vincentmajor@nyumc.org}
}
}
\keyword{internal}
| /man/SEERreadr-package.Rd | permissive | GerkeLab/SEERreadr | R | false | true | 754 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SEERreadr-package.R
\docType{package}
\name{SEERreadr-package}
\alias{SEERreadr}
\alias{SEERreadr-package}
\title{SEERreadr: Read SEER Fixed Width Data Files}
\description{
Functions to read and process the fixed width data
files from SEER. Includes functions to recode values based on the
April 2018 data dictionary.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/GerkeLab/SEERreadr}
\item Report bugs at \url{https://github.com/GerkeLab/SEERreadr/issues}
}
}
\author{
\strong{Maintainer}: Garrick Aden-Buie \email{Garrick.Aden-Buie@moffitt.org}
Authors:
\itemize{
\item Vincent Major \email{vincentmajor@nyumc.org}
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecate.R
\name{kimisc-deprecated}
\alias{kimisc-deprecated}
\title{Deprecated functions}
\description{
The "See also" section contains the deprecated functions in this package.
}
\seealso{
Other deprecated functions: \code{\link{coalesce.na-deprecated}},
\code{\link{df_to_list-deprecated}},
\code{\link{hms.to.seconds-deprecated}},
\code{\link{list_to_df-deprecated}},
\code{\link{nc-deprecated}},
\code{\link{nlist-deprecated}},
\code{\link{ofactor-deprecated}},
\code{\link{sample.rows-deprecated}},
\code{\link{seconds.to.hms-deprecated}},
\code{\link{thisfile-deprecated}},
\code{\link{tll-deprecated}},
\code{\link{vswitch-deprecated}}
}
| /man/kimisc-deprecated.Rd | no_license | krlmlr/kimisc | R | false | true | 747 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecate.R
\name{kimisc-deprecated}
\alias{kimisc-deprecated}
\title{Deprecated functions}
\description{
The "See also" section contains the deprecated functions in this package.
}
\seealso{
Other deprecated functions: \code{\link{coalesce.na-deprecated}},
\code{\link{df_to_list-deprecated}},
\code{\link{hms.to.seconds-deprecated}},
\code{\link{list_to_df-deprecated}},
\code{\link{nc-deprecated}},
\code{\link{nlist-deprecated}},
\code{\link{ofactor-deprecated}},
\code{\link{sample.rows-deprecated}},
\code{\link{seconds.to.hms-deprecated}},
\code{\link{thisfile-deprecated}},
\code{\link{tll-deprecated}},
\code{\link{vswitch-deprecated}}
}
|
##########################################################################################
# Script to run CellCnn analysis: run CellCnn, calculate summary statistics, calculate
# statistical test, generate heatmap, save output files
#
# Anti-PD-1 melanoma skin cancer data set (collaboration with Carsten Krieg and Malgorzata
# Nowicka, UZH)
#
# - panel: "panel2.xlsx"
# - data: batches "data 23" and "data 29", baseline, Non-Responders vs. Responders
#
# Note: run from command line with 'Rscript <filename>.R'
#
# Lukas Weber, June 2017
##########################################################################################
# this file: *baseline*, cytokine expressing memory *CD4* T cells
library(flowCore)
library(readxl)
library(dplyr)
library(limma)
library(lme4)
library(multcomp)
library(pheatmap)
library(RColorBrewer)
########
# inputs
########
dataset <- "panel2_CD4_Tmem_cells_base_combined"
fn_metadata_23 <- "../../data/PD-1 project/CK_metadata/metadata_23_02.xlsx"
fn_metadata_29 <- "../../data/PD-1 project/CK_metadata/metadata_29_02.xlsx"
path_23 <- "../../data/PD-1 project/CK_2016-06-23_02_CD4_merging2_Tmem_merging2_CD69/010_cleanfcs"
path_29 <- "../../data/PD-1 project/CK_2016-06-29_02_CD4_merging_Tmem_merging3_CD69/010_cleanfcs"
fn_panel <- "../../data/PD-1 project/CK_panels/panel2CD4_23_cytokines.xlsx" # identical for 'data 23' and 'data 29'
###############
# load metadata
###############
# load metadata spreadsheets for each data set ("data 23" and "data 29")
metadata_23 <- read_excel(fn_metadata_23)
metadata_29 <- read_excel(fn_metadata_29)
#View(metadata_23)
#View(metadata_29)
ix_keep <- 6:15
# paths
paths <- c(rep(path_23, length(ix_keep)), rep(path_29, length(ix_keep)))
# filenames
files <- c(metadata_23$filename[ix_keep], metadata_29$filename[ix_keep])
# vector of condition IDs
condition <- gsub("^base_", "", c(metadata_23$condition[ix_keep], metadata_29$condition[ix_keep]))
condition
# vector of sample IDs
samples <- gsub("^base_", "", c(metadata_23$shortname[ix_keep], metadata_29$shortname[ix_keep]))
samples
# vector of batch (data set) IDs
batch <- c(rep("23", length(ix_keep)), rep("29", length(ix_keep)))
batch
# check
data.frame(paths, files, condition, samples, batch)
##################
# load data into R
##################
# load data from .fcs files
fn <- paste(paths, files, sep = "/")
fn
data <- lapply(fn, read.FCS, transformation = FALSE, truncate_max_range = FALSE)
# align column names
# - remove "SampleID" and "beadDist" (columns 45 and 59) from data29
ix_remove_samples <- c(rep(FALSE, sum(batch == "23")), rep(TRUE, sum(batch == "29")))
ix_remove_cols <- rep(list(c(45, 59)), sum(batch == "29"))
data[ix_remove_samples] <- mapply(function(d, ix) {
d[, -ix]
}, data[ix_remove_samples], ix_remove_cols)
# check column names
check_cols <- lapply(data, function(d) pData(parameters(d))$name)
all(sapply(check_cols, function(ch) all(ch == check_cols[[1]])))
# load panel details from .xlsx spreadsheet
panel <- read_excel(fn_panel)
panel
################
# transform data
################
# this data set: using cytokines only (for CellCnn analysis)
# cytokine marker columns (to tranform and use for CellCnn analysis)
marker_cols <- as.logical(panel$transform)
marker_cols
panel[marker_cols, ]
# match columns using metal names (since .fcs columns are not in same order as in panels spreadsheet)
marker_metals <- panel[marker_cols, ]$fcs_colname
marker_names <- panel[marker_cols, ]$Antigen
markers_ix <- match(marker_metals, pData(parameters(data[[1]]))$name)
# check
all(panel[marker_cols, ]$fcs_colname == unname(colnames(exprs(data[[1]]))[markers_ix]))
# apply 'asinh' transform with cofactor = 5
cofactor <- 5
data <- lapply(data, function(d) {
e <- exprs(d)
e[, markers_ix] <- asinh(e[, markers_ix] / cofactor)
colnames(e)[markers_ix] <- marker_names
e
})
###########################
# investigate batch effects
###########################
# consider each data set ("23" and "29") to be a batch
# note: CellCnn cannot accept covariates to deal with batch effects
# summarize data for MDS plot: median marker expression per sample
n_cells <- sapply(data, nrow)
n_cells
smp <- rep(samples, n_cells)
smp <- factor(smp, levels = samples)
# marker columns only
data_MDS <- lapply(data, function(e) {
e_markers <- e[, markers_ix]
e_markers
})
data_MDS <- do.call("rbind", data_MDS)
data_MDS <- data.frame(data_MDS, sample = smp)
data_MDS %>%
group_by(sample) %>%
summarize_all(median) ->
df_meds
# rearrange data frame for MDS plot
df_plot <- t(df_meds[, -1])
colnames(df_plot) <- df_meds$sample
# MDS plot: color by condition and batch
cnd_bch <- paste(condition, batch, sep = "_")
cnd_bch <- factor(cnd_bch, levels = unique(cnd_bch))
cnd_bch
pdf(paste0("../../plots/", dataset, "/MDS_plot_condition_batch.pdf"), width = 7.5, height = 7.5)
pal <- c("deepskyblue1", "blue", "orange", "red")
cols_cnd_bch <- as.character(factor(cnd_bch, labels = pal))
plotMDS(df_plot, top = 2000, col = cols_cnd_bch,
main = "MDS plot: \ncondition (NR vs. R) and \nbatch (data base_23 vs. data base_29)")
legend("bottomright", pch = 16,
legend = c("Non responder (NR), data base_23", "Responder (R), data base_23",
"Non responder (NR), data base_29", "Responder (R), data base_29"),
col = pal)
dev.off()
###############################
# export transformed data files
###############################
for (i in 1:length(data)) {
filename <- paste0("../../data_transformed/", dataset, "/",
gsub("\\.fcs$", "_transf.fcs", files[i]))
write.FCS(flowFrame(data[[i]]), filename)
}
###########################################################################
# generate .csv files with input arguments for CellCnn (in required format)
###########################################################################
files_transf <- gsub("\\.fcs$", "_transf.fcs", files)
# create data frame of sample names and conditions (for CellCnn input .csv file)
label <- as.numeric(as.factor(condition)) - 1
label
df_samples <- data.frame(fcs_filename = files_transf, label = label)
df_samples
# re-arrange alphabetically (otherwise CellCnn reads input files in incorrect order)
df_samples <- df_samples[order(df_samples$fcs_filename), ]
df_samples
# create data frame of column names (markers) (for CellCnn input .csv file)
df_markers <- t(data.frame(marker_names))
df_markers
# save as .csv files
write.csv(df_samples, paste0("../../inputs/", dataset, "/input_samples.csv"),
quote = FALSE, row.names = FALSE)
# need to use 'write.table' to allow removing column names
write.table(df_markers, paste0("../../inputs/", dataset, "/input_markers.csv"),
sep = ",", quote = FALSE, row.names = FALSE, col.names = FALSE)
###############################
# run CellCnn from command line
###############################
# for installation instructions and examples see: https://github.com/eiriniar/CellCnn
DIR_CellCnn <- "../../../../../CyTOF/differential/CellCnn/CellCnn/"
# run main analysis
cmd <- paste("python", paste0(DIR_CellCnn, "cellCnn/run_analysis.py"),
paste0("-f ../../inputs/", dataset, "/input_samples.csv"),
paste0("-m ../../inputs/", dataset, "/input_markers.csv"),
paste0("-i ../../data_transformed/", dataset, "/"),
paste0("-o ../../out_CellCnn/", dataset, "/"),
"--export_csv --group_a NR --group_b R")
runtime_main <- system.time(
system(cmd)
)
runtime_main
sink(paste0("../../runtime/", dataset, "/runtime_main.txt"))
runtime_main
sink()
# export selected cells
cmd <- paste("python", paste0(DIR_CellCnn, "cellCnn/run_analysis.py"),
paste0("-f ../../inputs/", dataset, "/input_samples.csv"),
paste0("-m ../../inputs/", dataset, "/input_markers.csv"),
paste0("-i ../../data_transformed/", dataset, "/"),
paste0("-o ../../out_CellCnn/", dataset, "/"),
"--plot",
"--group_a NR --group_b R",
"--filter_response_thres 0.3 --load_results --export_selected_cells")
runtime_select <- system.time(
system(cmd)
)
runtime_select
sink(paste0("../../runtime/", dataset, "/runtime_select.txt"))
runtime_select
sink()
#########################################################
# analysis: load results for selected filters/populations
#########################################################
files_sel <- gsub("_transf\\.fcs$", "_transf_selected_cells.csv", files_transf)
fn_sel <- paste0("../../out_CellCnn/", dataset, "/selected_cells/", files_sel)
# check files are in same order as previously
data.frame(fn_sel, samples)
res_sel <- lapply(fn_sel, read.csv)
#######################################################################
# analysis: calculate summary statistics for selected filter/population
#######################################################################
# note: top filter only (if multiple)
n_cells <- sapply(res_sel, nrow)
n_cells
n_selected <- sapply(res_sel, function(d) sum(d[, 2])) # top filter only (if multiple)
n_selected
# create data frame of results
res <- data.frame(
sample = factor(samples),
condition = factor(condition),
n_cells = n_cells,
n_selected = n_selected,
proportion = n_selected / n_cells,
prop_pct = round(n_selected / n_cells * 100, 2)
)
res
# results per group
sem <- function(x) sd(x) / sqrt(length(x))
res %>%
group_by(condition) %>%
summarize(mean(prop_pct),
sd(prop_pct),
sem(prop_pct),
median(prop_pct),
mad(prop_pct)) ->
res_cond
res_cond
# save results
sink(paste0("../../results/", dataset, "/summary_statistics_selected_filter_by_sample.txt"))
res
sink()
sink(paste0("../../results/", dataset, "/summary_statistics_selected_filter_by_group.txt"))
t(res_cond)
sink()
##########################################################################
# analysis: statistical tests using generalized linear mixed models (GLMM)
##########################################################################
# note: top filter only (if multiple)
# define model formula
# (using observation-level random effects model to account for overdispersion, i.e. random
# intercept term for each sample; see Harrison (2015): https://peerj.com/articles/1114/)
formula_glmer <- proportion ~ condition + (1 | sample)
# fit model
fit <- glmer(formula_glmer, weights = n_cells, family = binomial, data = res)
fit
summary(fit)
# hypothesis test and p-value
contrast <- as.matrix(t(data.frame(conditionR = c(0, 1))))
contrast
hyp <- glht(fit, linfct = contrast)
hyp
summary(hyp)
p_val <- summary(hyp)$test$pvalues
p_val
# save results
sink(paste0("../../results/", dataset, "/mixed_model_test_results.txt"))
summary(hyp)
sink()
#####################################################################################
# analysis: heatmap of marker expression for selected filter/population vs. all cells
#####################################################################################
# note: top filter only (if multiple)
# indices of cells in selected filter/population
ix_sel <- lapply(res_sel, function(r) {
which(r[, 2] == 1)
})
ix_sel
# load transformed data files and subset cells
fn_transf <- paste0("../../data_transformed/", dataset, "/", files_transf)
data <- lapply(fn_transf, function(f) {
exprs(read.FCS(f, transformation = FALSE, truncate_max_range = FALSE))
})
data_sel <- mapply(function(f, ix) {
d <- exprs(read.FCS(f, transformation = FALSE, truncate_max_range = FALSE))
d_sel <- d[ix, , drop = FALSE]
}, fn_transf, ix_sel)
# marker columns only
data_markers <- lapply(data, function(d) {
d[, markers_ix, drop = FALSE]
})
data_sel_markers <- lapply(data_sel, function(d) {
d[, markers_ix, drop = FALSE]
})
# check
t(sapply(data_markers, colnames))
t(sapply(data_sel_markers, colnames))
# calculate median marker expression (pooled cells)
meds <- apply(do.call(rbind, data_markers), 2, median)
meds_sel <- apply(do.call(rbind, data_sel_markers), 2, median)
meds
meds_sel
# # alternatively: calculate median marker expression by sample (i.e. samples equally weighted)
# meds <- t(sapply(data_markers, function(d) {
# apply(d, 2, median)
# }))
#
# meds_sel <- t(sapply(data_sel_markers, function(d) {
# apply(d, 2, median)
# }))
#
# rownames(meds) <- samples
# rownames(meds_sel) <- samples
#
# meds
# meds_sel
#
#
# # calculate overall medians (median of sample medians; i.e. we are weighting samples equally)
# meds_group <- apply(meds, 2, median)
# meds_sel_group <- apply(meds_sel, 2, median)
#
# meds_group
# meds_sel_group
# meds_plot <- rbind(meds_group, meds_sel_group)
# rownames(meds_plot) <- c("all cells", "selected population")
# create heatmap
meds_plot <- rbind(meds, meds_sel)
rownames(meds_plot) <- c("all cells", "selected")
pheatmap(meds_plot,
color = colorRampPalette(brewer.pal(7, "YlGnBu"))(100),
cluster_rows = FALSE, cluster_cols = FALSE,
main = "Median (transformed) marker expression: all cells vs. selected population",
filename = paste0("../../plots/", dataset, "/heatmap_marker_expression.pdf"),
width = 12, height = 2.5)
###################
# copy output files
###################
SAVE_DIR <- paste0("../../files_for_Carsten/", dataset)
system(paste0("cp ../../results/", dataset, "/mixed_model_test_results.txt", " ", SAVE_DIR))
system(paste0("cp ../../results/", dataset, "/summary_statistics_selected_filter_by_group.txt", " ", SAVE_DIR))
system(paste0("cp ../../plots/", dataset, "/heatmap_marker_expression.pdf", " ", SAVE_DIR))
system(paste0("cp ../../plots/", dataset, "/MDS_plot_condition_batch.pdf", " ", SAVE_DIR))
system(paste0("cp ../../out_CellCnn/", dataset, "/plots/selected_population_boxplot_filter*", " ", SAVE_DIR))
system(paste0("cp ../../out_CellCnn/", dataset, "/plots/selected_population_distribution_filter*", " ", SAVE_DIR))
system(paste0("cp -r ../../out_CellCnn/", dataset, "/selected_cells", " ", SAVE_DIR))
system("rm Rplots.pdf")
| /panel2/analysis_CellCnn_PD1_panel2_CD4_Tmem_cells_base_combined.R | permissive | zky17715002/PD1_analysis_CellCnn | R | false | false | 14,184 | r | ##########################################################################################
# Script to run CellCnn analysis: run CellCnn, calculate summary statistics, calculate
# statistical test, generate heatmap, save output files
#
# Anti-PD-1 melanoma skin cancer data set (collaboration with Carsten Krieg and Malgorzata
# Nowicka, UZH)
#
# - panel: "panel2.xlsx"
# - data: batches "data 23" and "data 29", baseline, Non-Responders vs. Responders
#
# Note: run from command line with 'Rscript <filename>.R'
#
# Lukas Weber, June 2017
##########################################################################################
# this file: *baseline*, cytokine expressing memory *CD4* T cells
library(flowCore)
library(readxl)
library(dplyr)
library(limma)
library(lme4)
library(multcomp)
library(pheatmap)
library(RColorBrewer)
########
# inputs
########
dataset <- "panel2_CD4_Tmem_cells_base_combined"
fn_metadata_23 <- "../../data/PD-1 project/CK_metadata/metadata_23_02.xlsx"
fn_metadata_29 <- "../../data/PD-1 project/CK_metadata/metadata_29_02.xlsx"
path_23 <- "../../data/PD-1 project/CK_2016-06-23_02_CD4_merging2_Tmem_merging2_CD69/010_cleanfcs"
path_29 <- "../../data/PD-1 project/CK_2016-06-29_02_CD4_merging_Tmem_merging3_CD69/010_cleanfcs"
fn_panel <- "../../data/PD-1 project/CK_panels/panel2CD4_23_cytokines.xlsx" # identical for 'data 23' and 'data 29'
###############
# load metadata
###############
# load metadata spreadsheets for each data set ("data 23" and "data 29")
metadata_23 <- read_excel(fn_metadata_23)
metadata_29 <- read_excel(fn_metadata_29)
#View(metadata_23)
#View(metadata_29)
ix_keep <- 6:15
# paths
paths <- c(rep(path_23, length(ix_keep)), rep(path_29, length(ix_keep)))
# filenames
files <- c(metadata_23$filename[ix_keep], metadata_29$filename[ix_keep])
# vector of condition IDs
condition <- gsub("^base_", "", c(metadata_23$condition[ix_keep], metadata_29$condition[ix_keep]))
condition
# vector of sample IDs
samples <- gsub("^base_", "", c(metadata_23$shortname[ix_keep], metadata_29$shortname[ix_keep]))
samples
# vector of batch (data set) IDs
batch <- c(rep("23", length(ix_keep)), rep("29", length(ix_keep)))
batch
# check
data.frame(paths, files, condition, samples, batch)
##################
# load data into R
##################
# load data from .fcs files
fn <- paste(paths, files, sep = "/")
fn
data <- lapply(fn, read.FCS, transformation = FALSE, truncate_max_range = FALSE)
# align column names
# - remove "SampleID" and "beadDist" (columns 45 and 59) from data29
ix_remove_samples <- c(rep(FALSE, sum(batch == "23")), rep(TRUE, sum(batch == "29")))
ix_remove_cols <- rep(list(c(45, 59)), sum(batch == "29"))
data[ix_remove_samples] <- mapply(function(d, ix) {
d[, -ix]
}, data[ix_remove_samples], ix_remove_cols)
# check column names
check_cols <- lapply(data, function(d) pData(parameters(d))$name)
all(sapply(check_cols, function(ch) all(ch == check_cols[[1]])))
# load panel details from .xlsx spreadsheet
panel <- read_excel(fn_panel)
panel
################
# transform data
################
# this data set: using cytokines only (for CellCnn analysis)
# cytokine marker columns (to tranform and use for CellCnn analysis)
marker_cols <- as.logical(panel$transform)
marker_cols
panel[marker_cols, ]
# match columns using metal names (since .fcs columns are not in same order as in panels spreadsheet)
marker_metals <- panel[marker_cols, ]$fcs_colname
marker_names <- panel[marker_cols, ]$Antigen
markers_ix <- match(marker_metals, pData(parameters(data[[1]]))$name)
# check
all(panel[marker_cols, ]$fcs_colname == unname(colnames(exprs(data[[1]]))[markers_ix]))
# apply 'asinh' transform with cofactor = 5
cofactor <- 5
data <- lapply(data, function(d) {
e <- exprs(d)
e[, markers_ix] <- asinh(e[, markers_ix] / cofactor)
colnames(e)[markers_ix] <- marker_names
e
})
###########################
# investigate batch effects
###########################
# consider each data set ("23" and "29") to be a batch
# note: CellCnn cannot accept covariates to deal with batch effects
# summarize data for MDS plot: median marker expression per sample
n_cells <- sapply(data, nrow)
n_cells
smp <- rep(samples, n_cells)
smp <- factor(smp, levels = samples)
# marker columns only
data_MDS <- lapply(data, function(e) {
e_markers <- e[, markers_ix]
e_markers
})
data_MDS <- do.call("rbind", data_MDS)
data_MDS <- data.frame(data_MDS, sample = smp)
data_MDS %>%
group_by(sample) %>%
summarize_all(median) ->
df_meds
# rearrange data frame for MDS plot
df_plot <- t(df_meds[, -1])
colnames(df_plot) <- df_meds$sample
# MDS plot: color by condition and batch
cnd_bch <- paste(condition, batch, sep = "_")
cnd_bch <- factor(cnd_bch, levels = unique(cnd_bch))
cnd_bch
pdf(paste0("../../plots/", dataset, "/MDS_plot_condition_batch.pdf"), width = 7.5, height = 7.5)
pal <- c("deepskyblue1", "blue", "orange", "red")
cols_cnd_bch <- as.character(factor(cnd_bch, labels = pal))
plotMDS(df_plot, top = 2000, col = cols_cnd_bch,
main = "MDS plot: \ncondition (NR vs. R) and \nbatch (data base_23 vs. data base_29)")
legend("bottomright", pch = 16,
legend = c("Non responder (NR), data base_23", "Responder (R), data base_23",
"Non responder (NR), data base_29", "Responder (R), data base_29"),
col = pal)
dev.off()
###############################
# export transformed data files
###############################
for (i in 1:length(data)) {
filename <- paste0("../../data_transformed/", dataset, "/",
gsub("\\.fcs$", "_transf.fcs", files[i]))
write.FCS(flowFrame(data[[i]]), filename)
}
###########################################################################
# generate .csv files with input arguments for CellCnn (in required format)
###########################################################################
files_transf <- gsub("\\.fcs$", "_transf.fcs", files)
# create data frame of sample names and conditions (for CellCnn input .csv file)
label <- as.numeric(as.factor(condition)) - 1
label
df_samples <- data.frame(fcs_filename = files_transf, label = label)
df_samples
# re-arrange alphabetically (otherwise CellCnn reads input files in incorrect order)
df_samples <- df_samples[order(df_samples$fcs_filename), ]
df_samples
# create data frame of column names (markers) (for CellCnn input .csv file)
df_markers <- t(data.frame(marker_names))
df_markers
# save as .csv files
write.csv(df_samples, paste0("../../inputs/", dataset, "/input_samples.csv"),
quote = FALSE, row.names = FALSE)
# need to use 'write.table' to allow removing column names
write.table(df_markers, paste0("../../inputs/", dataset, "/input_markers.csv"),
sep = ",", quote = FALSE, row.names = FALSE, col.names = FALSE)
###############################
# run CellCnn from command line
###############################
# for installation instructions and examples see: https://github.com/eiriniar/CellCnn
DIR_CellCnn <- "../../../../../CyTOF/differential/CellCnn/CellCnn/"
# run main analysis
cmd <- paste("python", paste0(DIR_CellCnn, "cellCnn/run_analysis.py"),
paste0("-f ../../inputs/", dataset, "/input_samples.csv"),
paste0("-m ../../inputs/", dataset, "/input_markers.csv"),
paste0("-i ../../data_transformed/", dataset, "/"),
paste0("-o ../../out_CellCnn/", dataset, "/"),
"--export_csv --group_a NR --group_b R")
runtime_main <- system.time(
system(cmd)
)
runtime_main
sink(paste0("../../runtime/", dataset, "/runtime_main.txt"))
runtime_main
sink()
# export selected cells
cmd <- paste("python", paste0(DIR_CellCnn, "cellCnn/run_analysis.py"),
paste0("-f ../../inputs/", dataset, "/input_samples.csv"),
paste0("-m ../../inputs/", dataset, "/input_markers.csv"),
paste0("-i ../../data_transformed/", dataset, "/"),
paste0("-o ../../out_CellCnn/", dataset, "/"),
"--plot",
"--group_a NR --group_b R",
"--filter_response_thres 0.3 --load_results --export_selected_cells")
runtime_select <- system.time(
system(cmd)
)
runtime_select
sink(paste0("../../runtime/", dataset, "/runtime_select.txt"))
runtime_select
sink()
#########################################################
# analysis: load results for selected filters/populations
#########################################################
files_sel <- gsub("_transf\\.fcs$", "_transf_selected_cells.csv", files_transf)
fn_sel <- paste0("../../out_CellCnn/", dataset, "/selected_cells/", files_sel)
# check files are in same order as previously
data.frame(fn_sel, samples)
res_sel <- lapply(fn_sel, read.csv)
#######################################################################
# analysis: calculate summary statistics for selected filter/population
#######################################################################
# note: top filter only (if multiple)
n_cells <- sapply(res_sel, nrow)
n_cells
n_selected <- sapply(res_sel, function(d) sum(d[, 2])) # top filter only (if multiple)
n_selected
# create data frame of results
res <- data.frame(
sample = factor(samples),
condition = factor(condition),
n_cells = n_cells,
n_selected = n_selected,
proportion = n_selected / n_cells,
prop_pct = round(n_selected / n_cells * 100, 2)
)
res
# results per group
sem <- function(x) sd(x) / sqrt(length(x))
res %>%
group_by(condition) %>%
summarize(mean(prop_pct),
sd(prop_pct),
sem(prop_pct),
median(prop_pct),
mad(prop_pct)) ->
res_cond
res_cond
# save results
sink(paste0("../../results/", dataset, "/summary_statistics_selected_filter_by_sample.txt"))
res
sink()
sink(paste0("../../results/", dataset, "/summary_statistics_selected_filter_by_group.txt"))
t(res_cond)
sink()
##########################################################################
# analysis: statistical tests using generalized linear mixed models (GLMM)
##########################################################################
# note: top filter only (if multiple)
# define model formula
# (using observation-level random effects model to account for overdispersion, i.e. random
# intercept term for each sample; see Harrison (2015): https://peerj.com/articles/1114/)
formula_glmer <- proportion ~ condition + (1 | sample)
# fit model
fit <- glmer(formula_glmer, weights = n_cells, family = binomial, data = res)
fit
summary(fit)
# hypothesis test and p-value
contrast <- as.matrix(t(data.frame(conditionR = c(0, 1))))
contrast
hyp <- glht(fit, linfct = contrast)
hyp
summary(hyp)
p_val <- summary(hyp)$test$pvalues
p_val
# save results
sink(paste0("../../results/", dataset, "/mixed_model_test_results.txt"))
summary(hyp)
sink()
#####################################################################################
# analysis: heatmap of marker expression for selected filter/population vs. all cells
#####################################################################################
# note: top filter only (if multiple)
# indices of cells in selected filter/population
ix_sel <- lapply(res_sel, function(r) {
which(r[, 2] == 1)
})
ix_sel
# load transformed data files and subset cells
fn_transf <- paste0("../../data_transformed/", dataset, "/", files_transf)
data <- lapply(fn_transf, function(f) {
exprs(read.FCS(f, transformation = FALSE, truncate_max_range = FALSE))
})
data_sel <- mapply(function(f, ix) {
d <- exprs(read.FCS(f, transformation = FALSE, truncate_max_range = FALSE))
d_sel <- d[ix, , drop = FALSE]
}, fn_transf, ix_sel)
# marker columns only
data_markers <- lapply(data, function(d) {
d[, markers_ix, drop = FALSE]
})
data_sel_markers <- lapply(data_sel, function(d) {
d[, markers_ix, drop = FALSE]
})
# check
t(sapply(data_markers, colnames))
t(sapply(data_sel_markers, colnames))
# calculate median marker expression (pooled cells)
meds <- apply(do.call(rbind, data_markers), 2, median)
meds_sel <- apply(do.call(rbind, data_sel_markers), 2, median)
meds
meds_sel
# # alternatively: calculate median marker expression by sample (i.e. samples equally weighted)
# meds <- t(sapply(data_markers, function(d) {
# apply(d, 2, median)
# }))
#
# meds_sel <- t(sapply(data_sel_markers, function(d) {
# apply(d, 2, median)
# }))
#
# rownames(meds) <- samples
# rownames(meds_sel) <- samples
#
# meds
# meds_sel
#
#
# # calculate overall medians (median of sample medians; i.e. we are weighting samples equally)
# meds_group <- apply(meds, 2, median)
# meds_sel_group <- apply(meds_sel, 2, median)
#
# meds_group
# meds_sel_group
# meds_plot <- rbind(meds_group, meds_sel_group)
# rownames(meds_plot) <- c("all cells", "selected population")
# create heatmap
meds_plot <- rbind(meds, meds_sel)
rownames(meds_plot) <- c("all cells", "selected")
pheatmap(meds_plot,
color = colorRampPalette(brewer.pal(7, "YlGnBu"))(100),
cluster_rows = FALSE, cluster_cols = FALSE,
main = "Median (transformed) marker expression: all cells vs. selected population",
filename = paste0("../../plots/", dataset, "/heatmap_marker_expression.pdf"),
width = 12, height = 2.5)
###################
# copy output files
###################
SAVE_DIR <- paste0("../../files_for_Carsten/", dataset)
system(paste0("cp ../../results/", dataset, "/mixed_model_test_results.txt", " ", SAVE_DIR))
system(paste0("cp ../../results/", dataset, "/summary_statistics_selected_filter_by_group.txt", " ", SAVE_DIR))
system(paste0("cp ../../plots/", dataset, "/heatmap_marker_expression.pdf", " ", SAVE_DIR))
system(paste0("cp ../../plots/", dataset, "/MDS_plot_condition_batch.pdf", " ", SAVE_DIR))
system(paste0("cp ../../out_CellCnn/", dataset, "/plots/selected_population_boxplot_filter*", " ", SAVE_DIR))
system(paste0("cp ../../out_CellCnn/", dataset, "/plots/selected_population_distribution_filter*", " ", SAVE_DIR))
system(paste0("cp -r ../../out_CellCnn/", dataset, "/selected_cells", " ", SAVE_DIR))
system("rm Rplots.pdf")
|
countByOverlaps <- function(features, reads, ignore.strand, inter.feature) {
## NOT work for parallel
countOverlaps(features, reads, ignore.strand=ignore.strand)
}
#' Perform overlap queries between reads and genomic features by bins
#'
#' summarizeOverlapsByBins extends
#' \link[GenomicAlignments:summarizeOverlaps-methods]{summarizeOverlaps} by
#' providing fixed window size and step to split each feature into bins and
#' then do queries. It will return counts by signalSummaryFUN, which applied to
#' bins in one feature, for each feature.
#'
#'
#' @param targetRegions A \link[GenomicRanges:GRanges-class]{GRanges} object of
#' genomic regions of interest.
#' @param reads A \link[GenomicRanges:GRanges-class]{GRanges},
#' \link[GenomicRanges:GRangesList-class]{GRangesList}
#' \link[GenomicAlignments:GAlignments-class]{GAlignments},
#' \link[GenomicAlignments:GAlignmentsList-class]{GAlignmentsList},
#' \link[GenomicAlignments:GAlignmentPairs-class]{GAlignmentPairs} or
#' \link[Rsamtools:BamFile-class]{BamFileList} object that represents the data
#' to be counted by
#' \code{\link[GenomicAlignments:summarizeOverlaps-methods]{summarizeOverlaps}}.
#' @param windowSize Size of windows
#' @param step Step of windows
#' @param signalSummaryFUN function, which will be applied to the bins in each
#' feature.
#' @param mode mode can be one of the pre-defined count methods. see
#' \link[GenomicAlignments:summarizeOverlaps-methods]{summarizeOverlaps}.
#' default is countByOverlaps, alia of countOverlaps(features, reads,
#' ignore.strand=ignore.strand)
#' @param ... Additional arguments passed to
#' \code{\link[GenomicAlignments:summarizeOverlaps-methods]{summarizeOverlaps}}.
#' @return A
#' \link[SummarizedExperiment:RangedSummarizedExperiment-class]{RangedSummarizedExperiment}
#' object. The assays slot holds the counts, rowRanges holds the annotation
#' from features.
#' @author Jianhong Ou
#' @keywords misc
#' @export
#' @importFrom GenomicAlignments summarizeOverlaps
#' @importFrom SummarizedExperiment colData SummarizedExperiment
#' @importFrom S4Vectors SimpleList aggregate
#' @examples
#'
#' fls <- list.files(system.file("extdata", package="GenomicAlignments"),
#' recursive=TRUE, pattern="*bam$", full=TRUE)
#' names(fls) <- basename(fls)
#' genes <- GRanges(
#' seqnames = c(rep("chr2L", 4), rep("chr2R", 5), rep("chr3L", 2)),
#' ranges = IRanges(c(1000, 3000, 4000, 7000, 2000, 3000, 3600,
#' 4000, 7500, 5000, 5400),
#' width=c(rep(500, 3), 600, 900, 500, 300, 900,
#' 300, 500, 500),
#' names=letters[1:11]))
#' se <- summarizeOverlapsByBins(genes, fls, windowSize=50, step=10)
#'
summarizeOverlapsByBins <- function(targetRegions, reads,
windowSize=50, step=10,
signalSummaryFUN=max,
mode=countByOverlaps, ...){
stopifnot(is(targetRegions, "GRanges"))
stopifnot(is.function(signalSummaryFUN))
stopifnot(length(reads)>1)
if(length(names(targetRegions))==0 ||
any(duplicated(names(targetRegions)))){
stop("duplicated or null targetRegions names.")
}
checkFun <- signalSummaryFUN(1:10)
if(length(checkFun)!=1){
stop("the output of signalSummaryFUN must be a vector with length 1")
}
if(!inherits(checkFun, c("numeric", "integer"))){
stop("the output of signalSummaryFUN must be a numeric.")
}
## change the targetRegions by windowSize, step
# if(any(width(targetRegions)<windowSize) | any(width(targetRegions)<step)){
# warning("Some of targetRegions are smaller than windowSize or step.",
# "They will be removed.")
# }
# targetRegions <- targetRegions[width(targetRegions)>=windowSize]
# targetRegions <- targetRegions[width(targetRegions)>=step]
# tileTargetRanges <- tile(x=ranges(targetRegions), width=step)
# nt <- elementNROWS(tileTargetRanges)
# tileTargetRanges.end <- rep(end(targetRegions), nt)
# tileTargetRanges <- unlist(tileTargetRanges)
# width(tileTargetRanges) <- windowSize
# tileTargetRegions <- GRanges(rep(seqnames(targetRegions), nt),
# tileTargetRanges,
# rep(strand(targetRegions), nt),
# oid=rep(1:length(targetRegions), nt))
# tileTargetRegions <- tileTargetRegions[end(tileTargetRanges) <=
# tileTargetRanges.end]
tileTargetRegions <- tileGRanges(targetRegions, windowSize,
step, keepPartialWindow=FALSE)
se <- summarizeOverlaps(features=tileTargetRegions, reads=reads,
mode=mode, ...)
cnts <- aggregate(x=assay(se),
by=list(oid_USED_BY_SE_OU=tileTargetRegions$oid),
FUN=signalSummaryFUN, drop=FALSE)
se.rowRanges <- targetRegions[cnts$oid_USED_BY_SE_OU]
rownames(cnts) <- names(se.rowRanges)
cnts$oid_USED_BY_SE_OU <- NULL
cnts <- as.matrix(cnts)
SummarizedExperiment(assays=SimpleList(counts=cnts),
rowRanges=se.rowRanges,
colData=colData(se))
}
| /R/summarizeOverlapsByBins.R | no_license | jianhong/ChIPpeakAnno | R | false | false | 5,387 | r | countByOverlaps <- function(features, reads, ignore.strand, inter.feature) {
## NOT work for parallel
countOverlaps(features, reads, ignore.strand=ignore.strand)
}
#' Perform overlap queries between reads and genomic features by bins
#'
#' summarizeOverlapsByBins extends
#' \link[GenomicAlignments:summarizeOverlaps-methods]{summarizeOverlaps} by
#' providing fixed window size and step to split each feature into bins and
#' then do queries. It will return counts by signalSummaryFUN, which applied to
#' bins in one feature, for each feature.
#'
#'
#' @param targetRegions A \link[GenomicRanges:GRanges-class]{GRanges} object of
#' genomic regions of interest.
#' @param reads A \link[GenomicRanges:GRanges-class]{GRanges},
#' \link[GenomicRanges:GRangesList-class]{GRangesList}
#' \link[GenomicAlignments:GAlignments-class]{GAlignments},
#' \link[GenomicAlignments:GAlignmentsList-class]{GAlignmentsList},
#' \link[GenomicAlignments:GAlignmentPairs-class]{GAlignmentPairs} or
#' \link[Rsamtools:BamFile-class]{BamFileList} object that represents the data
#' to be counted by
#' \code{\link[GenomicAlignments:summarizeOverlaps-methods]{summarizeOverlaps}}.
#' @param windowSize Size of windows
#' @param step Step of windows
#' @param signalSummaryFUN function, which will be applied to the bins in each
#' feature.
#' @param mode mode can be one of the pre-defined count methods. see
#' \link[GenomicAlignments:summarizeOverlaps-methods]{summarizeOverlaps}.
#' default is countByOverlaps, alia of countOverlaps(features, reads,
#' ignore.strand=ignore.strand)
#' @param ... Additional arguments passed to
#' \code{\link[GenomicAlignments:summarizeOverlaps-methods]{summarizeOverlaps}}.
#' @return A
#' \link[SummarizedExperiment:RangedSummarizedExperiment-class]{RangedSummarizedExperiment}
#' object. The assays slot holds the counts, rowRanges holds the annotation
#' from features.
#' @author Jianhong Ou
#' @keywords misc
#' @export
#' @importFrom GenomicAlignments summarizeOverlaps
#' @importFrom SummarizedExperiment colData SummarizedExperiment
#' @importFrom S4Vectors SimpleList aggregate
#' @examples
#'
#' fls <- list.files(system.file("extdata", package="GenomicAlignments"),
#' recursive=TRUE, pattern="*bam$", full=TRUE)
#' names(fls) <- basename(fls)
#' genes <- GRanges(
#' seqnames = c(rep("chr2L", 4), rep("chr2R", 5), rep("chr3L", 2)),
#' ranges = IRanges(c(1000, 3000, 4000, 7000, 2000, 3000, 3600,
#' 4000, 7500, 5000, 5400),
#' width=c(rep(500, 3), 600, 900, 500, 300, 900,
#' 300, 500, 500),
#' names=letters[1:11]))
#' se <- summarizeOverlapsByBins(genes, fls, windowSize=50, step=10)
#'
summarizeOverlapsByBins <- function(targetRegions, reads,
windowSize=50, step=10,
signalSummaryFUN=max,
mode=countByOverlaps, ...){
stopifnot(is(targetRegions, "GRanges"))
stopifnot(is.function(signalSummaryFUN))
stopifnot(length(reads)>1)
if(length(names(targetRegions))==0 ||
any(duplicated(names(targetRegions)))){
stop("duplicated or null targetRegions names.")
}
checkFun <- signalSummaryFUN(1:10)
if(length(checkFun)!=1){
stop("the output of signalSummaryFUN must be a vector with length 1")
}
if(!inherits(checkFun, c("numeric", "integer"))){
stop("the output of signalSummaryFUN must be a numeric.")
}
## change the targetRegions by windowSize, step
# if(any(width(targetRegions)<windowSize) | any(width(targetRegions)<step)){
# warning("Some of targetRegions are smaller than windowSize or step.",
# "They will be removed.")
# }
# targetRegions <- targetRegions[width(targetRegions)>=windowSize]
# targetRegions <- targetRegions[width(targetRegions)>=step]
# tileTargetRanges <- tile(x=ranges(targetRegions), width=step)
# nt <- elementNROWS(tileTargetRanges)
# tileTargetRanges.end <- rep(end(targetRegions), nt)
# tileTargetRanges <- unlist(tileTargetRanges)
# width(tileTargetRanges) <- windowSize
# tileTargetRegions <- GRanges(rep(seqnames(targetRegions), nt),
# tileTargetRanges,
# rep(strand(targetRegions), nt),
# oid=rep(1:length(targetRegions), nt))
# tileTargetRegions <- tileTargetRegions[end(tileTargetRanges) <=
# tileTargetRanges.end]
tileTargetRegions <- tileGRanges(targetRegions, windowSize,
step, keepPartialWindow=FALSE)
se <- summarizeOverlaps(features=tileTargetRegions, reads=reads,
mode=mode, ...)
cnts <- aggregate(x=assay(se),
by=list(oid_USED_BY_SE_OU=tileTargetRegions$oid),
FUN=signalSummaryFUN, drop=FALSE)
se.rowRanges <- targetRegions[cnts$oid_USED_BY_SE_OU]
rownames(cnts) <- names(se.rowRanges)
cnts$oid_USED_BY_SE_OU <- NULL
cnts <- as.matrix(cnts)
SummarizedExperiment(assays=SimpleList(counts=cnts),
rowRanges=se.rowRanges,
colData=colData(se))
}
|
#
# If you are going to use results produced by the scripts please do cite the
# SRMSerivce R package by providing the following URL
# www.github.com/protViz/SRMService
# by W.E. Wolski, J. Grossmann, C. Panse
#
library(limma)
library(SRMService)
### Protein groups file
packagedir <- path.package("SRMService")
####! set the path to the proteinGroups.txt file.
proteinGroupsFile <-
file.path(packagedir, "samples/proteinGroups/proteinGroups.txt")
###
protein <- readr::read_tsv(proteinGroupsFile)
colnames(protein) <- make.names(colnames(protein))
tmp <- cumsum(rev(table(protein$Peptides)))
barplot(tmp[(length(tmp) - 5):length(tmp)], ylim = c(0, length(protein$Peptides)), xlab =
'nr of proteins with at least # peptides')
##
rawF <-
gsub("Intensity\\.", "", grep("Intensity\\.", colnames(protein), value =
T))
condition <- quantable::split2table(rawF)[, 3]
# Raw.file <- c("23bb","23bcd","23ddd","","","")
# condition <- c("baseline","baseline","w1","w1")
annotation <- data.frame(
Raw.file = rawF,
Condition = condition,
BioReplicate = paste("X", 1:length(condition), sep =
""),
Run = 1:length(condition),
IsotopeLabelType = rep("L", length(condition)),
stringsAsFactors = F
)
###################################
### Configuration section
resultdir <- "output"
dir.create(resultdir)
#fix(annotation)
Experimentname = ""
nrNas = sum(!is.na(annotation$Condition)) - 1
nrNas = 5
nrPeptides = 2
reference = unique(annotation$Condition)[1]
reference = "WT"
qvalueThreshold = 0.05
qfoldchange = 1
write.table(annotation, file = file.path(resultdir, "annotationused.txt"))
####### END of user configuration ##
# source("R/Grp2Analysis.R")
grp2 <- Grp2Analysis(
annotation,
"Experimentname",
maxNA = nrNas,
nrPeptides = nrPeptides,
reference = reference,
numberOfProteinClusters = 20
)
grp2$getDesignMatrix()
grp2$setMQProteinGroups(protein)
grp2$setQValueThresholds(qvalue = qvalueThreshold, qfoldchange = qfoldchange)
mqQuantMatrixGRP2 <- grp2
head(mqQuantMatrixGRP2$getModPValuesCI())
usethis::use_data(mqQuantMatrixGRP2, overwrite = TRUE)
#readr::write_tsv(grp2$getResultTable(), path=file.path(resultdir,"pValues.csv"))
## REMOVE TO RENDER
# rmarkdown::render("vignettes/Grp2AnalysisHeatmap3.Rmd",bookdown::pdf_document2(), params=list(grp = grp2))
# rmarkdown::render("vignettes/Grp2Analysis.Rmd",bookdown::pdf_document2(), params=list(grp = grp2))
| /inst/RunScripts/Run_MQ_QuantTwoGroupAnalysis.R | no_license | protViz/SRMService | R | false | false | 2,470 | r | #
# If you are going to use results produced by the scripts please do cite the
# SRMSerivce R package by providing the following URL
# www.github.com/protViz/SRMService
# by W.E. Wolski, J. Grossmann, C. Panse
#
library(limma)
library(SRMService)
### Protein groups file
packagedir <- path.package("SRMService")
####! set the path to the proteinGroups.txt file.
proteinGroupsFile <-
file.path(packagedir, "samples/proteinGroups/proteinGroups.txt")
###
protein <- readr::read_tsv(proteinGroupsFile)
colnames(protein) <- make.names(colnames(protein))
tmp <- cumsum(rev(table(protein$Peptides)))
barplot(tmp[(length(tmp) - 5):length(tmp)], ylim = c(0, length(protein$Peptides)), xlab =
'nr of proteins with at least # peptides')
##
rawF <-
gsub("Intensity\\.", "", grep("Intensity\\.", colnames(protein), value =
T))
condition <- quantable::split2table(rawF)[, 3]
# Raw.file <- c("23bb","23bcd","23ddd","","","")
# condition <- c("baseline","baseline","w1","w1")
annotation <- data.frame(
Raw.file = rawF,
Condition = condition,
BioReplicate = paste("X", 1:length(condition), sep =
""),
Run = 1:length(condition),
IsotopeLabelType = rep("L", length(condition)),
stringsAsFactors = F
)
###################################
### Configuration section
resultdir <- "output"
dir.create(resultdir)
#fix(annotation)
Experimentname = ""
nrNas = sum(!is.na(annotation$Condition)) - 1
nrNas = 5
nrPeptides = 2
reference = unique(annotation$Condition)[1]
reference = "WT"
qvalueThreshold = 0.05
qfoldchange = 1
write.table(annotation, file = file.path(resultdir, "annotationused.txt"))
####### END of user configuration ##
# source("R/Grp2Analysis.R")
grp2 <- Grp2Analysis(
annotation,
"Experimentname",
maxNA = nrNas,
nrPeptides = nrPeptides,
reference = reference,
numberOfProteinClusters = 20
)
grp2$getDesignMatrix()
grp2$setMQProteinGroups(protein)
grp2$setQValueThresholds(qvalue = qvalueThreshold, qfoldchange = qfoldchange)
mqQuantMatrixGRP2 <- grp2
head(mqQuantMatrixGRP2$getModPValuesCI())
usethis::use_data(mqQuantMatrixGRP2, overwrite = TRUE)
#readr::write_tsv(grp2$getResultTable(), path=file.path(resultdir,"pValues.csv"))
## REMOVE TO RENDER
# rmarkdown::render("vignettes/Grp2AnalysisHeatmap3.Rmd",bookdown::pdf_document2(), params=list(grp = grp2))
# rmarkdown::render("vignettes/Grp2Analysis.Rmd",bookdown::pdf_document2(), params=list(grp = grp2))
|
setClass(
Class = "SamplingPatternCentroids",
contains = "SamplingPatternPurposive"
)
| /R/class_SamplingPatternCentroids.R | no_license | cran/spcosa | R | false | false | 98 | r | setClass(
Class = "SamplingPatternCentroids",
contains = "SamplingPatternPurposive"
)
|
library(ggplot2)
library(ggthemes)
library(MASS)
# ==========================================================================================
# Advanced Grammar of Graphics --------------------------------------------
# ==========================================================================================
# Using either qplot or ggplot typically depends on the quality of the graphics that you are
# seeking. qplot is great for quick data exploration however, using ggplot offers far more
# choice in the refinement of your visualisations. It is also important to note the slight
# variations in syntax between both packages. Here we focus on ggplot, as at time of writing,
# qplot is not available for R version 4.0.2:
gg_static <- ggplot(data = mtcars, mapping = aes(x = hp)) +
ggtitle("Horsepower") +
labs(x = "HP")
mtcars_mm <- data.frame(mm = c(mean(mtcars$hp), median(mtcars$hp)),
stats = factor(c("mean", "median")))
gg_lines <- geom_vline(mapping = aes(xintercept = mm, linetype = stats),
show.legend = TRUE, data = mtcars_mm)
gg_static +
geom_histogram(colour = "black", fill = "white",
breaks = seq(0, 400, 25), closed = "right") +
gg_lines +
scale_linetype_manual(values = c(2, 3)) +
labs(linetype = "")
# ==========================================================================================
# Smoothing and Shading ---------------------------------------------------
# ==========================================================================================
# Adding LOESS Trends -----------------------------------------------------
# When you're looking at raw data, it's sometimes difficult to get an overall impression of
# trends without fitting a parametric model, which means making assumptions about the nature of
# of these trends. This is where nonparametric smoothing comes in - you can use certain methods
# to determine how your data appear to behave without fitting a specific model. However, the
# trade-off is that you are not provided with any specific numeric details of the
# relationships between response and predictor variables (since you're not estimating the
# coefficients such as slopes or intercepts) and you lose any reliable ability to extrapolate.
# Locally weighted scatter plot smoothing (LOESS) is a nonparametric smoothing technique that
# produces the smoothed trend by using regression methods on localised subsets of the data,
# step-by-step over the entire range of the explanatory variable. Using the MASS package, let us
# first create a new data object with any missing values deleted to avoid NA warnings.
surv <- na.omit(survey[, c("Sex", "Wr.Hnd", "Height")])
# then we can plot:
ggplot(surv, aes(x = Wr.Hnd, y = Height)) +
geom_point(aes(col = Sex, shape = Sex)) +
geom_smooth(method = "loess")
# producing the same plot in base R takes significantly more code and therefore time.
# Constructing Smooth Density Estimates -----------------------------------
# Kernel density estimation (KDE) is a method for producing a smooth estimate of a probability
# density function, based on observed data. Briefly, KDE involves assigning a scaled
# probability function (the kernel) to each observation in a data set and summing them all to
# give an impression of the distribution of the data set as a whole. It's basically a
# sophisticated version of a histogram.
# Let's illustrate:
ggplot(data = airquality, aes(x = Temp)) +
geom_density()
# Doing this in base R is relatively easy. But suppose you want to visualise the density of
# estimates for temprature separately according to the month of observation:
air <- airquality
# We also have to recode the data from numeric vector to a factor vector.
air$Month <- factor(air$Month,
labels = c("May", "June", "July", "August", "September"))
# Then:
ggplot(data = air, aes(x = Temp, fill = Month)) +
geom_density(alpha = .4) +
ggtitle("Monthly temperature probability densities") +
labs(x = "Temp (F)", y = "Kernel estimate") +
theme_hc()
# ==========================================================================================
# Multiple Plots and Variable-Mapped Facets -------------------------------
# ==========================================================================================
# A quick way to organise multiple plots in a single image is to use the gridExtra package.
plot_1 <- ggplot(air, aes(x = 1:nrow(air), y = Temp)) +
geom_line(aes(col = Month)) +
geom_point(aes(col = Month, size = Wind)) +
geom_smooth(method = "loess", col = "black") +
labs(x = "Time (days)", y = "Temp (F)")
plot_2 <- ggplot(air, aes(x = Solar.R, fill = Month)) +
geom_density(alpha = .4) +
labs(x = expression(paste("Solar radiation (", ring(A),")")),
y = "Kernel estimate")
plot_3 <- ggplot(air, aes(x = Wind, y = Temp, colour = Month)) +
geom_point(aes(size = Ozone)) +
geom_smooth(method = "lm", level = .9, fullrange = FALSE, alpha = .2) +
labs(x = "Wind speed (MPH)", y = expression(paste("Temp (C", degree, ")")))
gridExtra::grid.arrange(plot_1, plot_2, plot_3)
# Facetsss Mapped to a Categorical Variable -------------------------------
# Often, when exploring a data set, you'll want to create several plots of variables. This
# behaviour is referred to as faceting.
plot_facet <- ggplot(data = air, aes(x = Temp, fill = Month)) +
geom_density(alpha = .4) +
ggtitle("Monthly temp probability densities") +
labs(x = "Temp (F)", y = "Kernal estimate")
# Not ~ can be interpreted as saying 'by':
plot_facet + facet_wrap(~ Month)
plot_facet + facet_wrap(~ Month, scales = "free")
plot_facet + facet_wrap(~ Month, nrow = 1)
# Remember facetgird, which uses two variables etc.
# ==========================================================================================
# Interactive Tools in ggvis ----------------------------------------------
# ==========================================================================================
# The ggvis package allows you to design flexible statistical plots that the end user can
# interact with.
library(ggvis)
surv <- na.omit(survey[, c("Sex", "Wr.Hnd", "Height", "Smoke", "Exer")])
surv %>% ggvis(x = ~Height) %>%
layer_histograms(width = input_slider(1, 15, label = "Binwidth:"), fill:="gray")
# A more interesting visualisation:
filler <- input_radiobuttons(c("Sex" = "Sex", "Smoking status" = "Smoke",
"Exercise frequency" = "Exer"), map = as.name,
label = "Colour points by...")
sizer <- input_slider(10, 300, label = "Point size:")
opacity <- input_slider(0.1, 1, label = "Opacity:")
surv %>%
ggvis(x = ~Wr.Hnd, y = ~Height, fill = filler,
size := sizer, opacity := opacity) %>%
layer_points() %>%
add_axis("x", title = "Handspan") %>%
add_legend("fill", title = "")
# End file ---------------------------------------------------------------- | /Chapter_24.R | no_license | jSoboil/BookofR | R | false | false | 6,946 | r | library(ggplot2)
library(ggthemes)
library(MASS)
# ==========================================================================================
# Advanced Grammar of Graphics --------------------------------------------
# ==========================================================================================
# Using either qplot or ggplot typically depends on the quality of the graphics that you are
# seeking. qplot is great for quick data exploration however, using ggplot offers far more
# choice in the refinement of your visualisations. It is also important to note the slight
# variations in syntax between both packages. Here we focus on ggplot, as at time of writing,
# qplot is not available for R version 4.0.2:
gg_static <- ggplot(data = mtcars, mapping = aes(x = hp)) +
ggtitle("Horsepower") +
labs(x = "HP")
mtcars_mm <- data.frame(mm = c(mean(mtcars$hp), median(mtcars$hp)),
stats = factor(c("mean", "median")))
gg_lines <- geom_vline(mapping = aes(xintercept = mm, linetype = stats),
show.legend = TRUE, data = mtcars_mm)
gg_static +
geom_histogram(colour = "black", fill = "white",
breaks = seq(0, 400, 25), closed = "right") +
gg_lines +
scale_linetype_manual(values = c(2, 3)) +
labs(linetype = "")
# ==========================================================================================
# Smoothing and Shading ---------------------------------------------------
# ==========================================================================================
# Adding LOESS Trends -----------------------------------------------------
# When you're looking at raw data, it's sometimes difficult to get an overall impression of
# trends without fitting a parametric model, which means making assumptions about the nature of
# of these trends. This is where nonparametric smoothing comes in - you can use certain methods
# to determine how your data appear to behave without fitting a specific model. However, the
# trade-off is that you are not provided with any specific numeric details of the
# relationships between response and predictor variables (since you're not estimating the
# coefficients such as slopes or intercepts) and you lose any reliable ability to extrapolate.
# Locally weighted scatter plot smoothing (LOESS) is a nonparametric smoothing technique that
# produces the smoothed trend by using regression methods on localised subsets of the data,
# step-by-step over the entire range of the explanatory variable. Using the MASS package, let us
# first create a new data object with any missing values deleted to avoid NA warnings.
surv <- na.omit(survey[, c("Sex", "Wr.Hnd", "Height")])
# then we can plot:
ggplot(surv, aes(x = Wr.Hnd, y = Height)) +
geom_point(aes(col = Sex, shape = Sex)) +
geom_smooth(method = "loess")
# producing the same plot in base R takes significantly more code and therefore time.
# Constructing Smooth Density Estimates -----------------------------------
# Kernel density estimation (KDE) is a method for producing a smooth estimate of a probability
# density function, based on observed data. Briefly, KDE involves assigning a scaled
# probability function (the kernel) to each observation in a data set and summing them all to
# give an impression of the distribution of the data set as a whole. It's basically a
# sophisticated version of a histogram.
# Let's illustrate:
ggplot(data = airquality, aes(x = Temp)) +
geom_density()
# Doing this in base R is relatively easy. But suppose you want to visualise the density of
# estimates for temprature separately according to the month of observation:
air <- airquality
# We also have to recode the data from numeric vector to a factor vector.
air$Month <- factor(air$Month,
labels = c("May", "June", "July", "August", "September"))
# Then:
ggplot(data = air, aes(x = Temp, fill = Month)) +
geom_density(alpha = .4) +
ggtitle("Monthly temperature probability densities") +
labs(x = "Temp (F)", y = "Kernel estimate") +
theme_hc()
# ==========================================================================================
# Multiple Plots and Variable-Mapped Facets -------------------------------
# ==========================================================================================
# A quick way to organise multiple plots in a single image is to use the gridExtra package.
plot_1 <- ggplot(air, aes(x = 1:nrow(air), y = Temp)) +
geom_line(aes(col = Month)) +
geom_point(aes(col = Month, size = Wind)) +
geom_smooth(method = "loess", col = "black") +
labs(x = "Time (days)", y = "Temp (F)")
plot_2 <- ggplot(air, aes(x = Solar.R, fill = Month)) +
geom_density(alpha = .4) +
labs(x = expression(paste("Solar radiation (", ring(A),")")),
y = "Kernel estimate")
plot_3 <- ggplot(air, aes(x = Wind, y = Temp, colour = Month)) +
geom_point(aes(size = Ozone)) +
geom_smooth(method = "lm", level = .9, fullrange = FALSE, alpha = .2) +
labs(x = "Wind speed (MPH)", y = expression(paste("Temp (C", degree, ")")))
gridExtra::grid.arrange(plot_1, plot_2, plot_3)
# Facetsss Mapped to a Categorical Variable -------------------------------
# Often, when exploring a data set, you'll want to create several plots of variables. This
# behaviour is referred to as faceting.
plot_facet <- ggplot(data = air, aes(x = Temp, fill = Month)) +
geom_density(alpha = .4) +
ggtitle("Monthly temp probability densities") +
labs(x = "Temp (F)", y = "Kernal estimate")
# Not ~ can be interpreted as saying 'by':
plot_facet + facet_wrap(~ Month)
plot_facet + facet_wrap(~ Month, scales = "free")
plot_facet + facet_wrap(~ Month, nrow = 1)
# Remember facetgird, which uses two variables etc.
# ==========================================================================================
# Interactive Tools in ggvis ----------------------------------------------
# ==========================================================================================
# The ggvis package allows you to design flexible statistical plots that the end user can
# interact with.
library(ggvis)
surv <- na.omit(survey[, c("Sex", "Wr.Hnd", "Height", "Smoke", "Exer")])
surv %>% ggvis(x = ~Height) %>%
layer_histograms(width = input_slider(1, 15, label = "Binwidth:"), fill:="gray")
# A more interesting visualisation:
filler <- input_radiobuttons(c("Sex" = "Sex", "Smoking status" = "Smoke",
"Exercise frequency" = "Exer"), map = as.name,
label = "Colour points by...")
sizer <- input_slider(10, 300, label = "Point size:")
opacity <- input_slider(0.1, 1, label = "Opacity:")
surv %>%
ggvis(x = ~Wr.Hnd, y = ~Height, fill = filler,
size := sizer, opacity := opacity) %>%
layer_points() %>%
add_axis("x", title = "Handspan") %>%
add_legend("fill", title = "")
# End file ---------------------------------------------------------------- |
#Harper
require("minpack.lm")
data<-read.csv("../../Data/converted harper 1961.csv",header =F)
# in this dataset nil becomes 0, trace or n.d becomes NA,and the midpoint of the range is used
#where there is a range I have used the midpoint
data<-as.data.frame(data)
viability<-as.vector(as.matrix((data[4:10,])))
formatted_data<-as.data.frame(matrix(nrow=length(viability),ncol = 9))
formatted_data[,5]<-viability
formatted_data[,4]<-rep(c(1/60/60/24,1/12/24,1/2/24,1/24,4/24,6/24,23/24),length(viability)/7)
formatted_data[,6]<-rep(1:11,each=7)
formatted_data[,1]<-rep(as.vector(as.matrix((data[1,]))),each=7)
formatted_data[,2]<-rep(as.vector(as.matrix((data[2,]))),each=7)
formatted_data[,3]<-rep(as.vector(as.matrix((data[3,]))),each=7)
colnames(formatted_data)<-c("Temp","Hum","Rep","Time","Viability","Experiment","b1","v0","AH")
est0=6.11
lrv= 2256000/461.15
formatted_data$AH<-est0*exp(lrv*((1/273.15)-(1/(formatted_data$Temp+273.15))))*formatted_data$Hum/100
times<-seq(0,1,length.out = 100)
par(mfrow=c(3,4))
for (i in (unique(formatted_data$Experiment))){
data_subset<-formatted_data[which(formatted_data[,6]==i),]
model<-nlsLM(Viability~v0*exp(-b*Time),data=data_subset,start=list(v0=100,b=0),lower = c(v0=0 ,b=0),upper = c(v0=100 ,b=5000))
formatted_data[which(formatted_data[,6]==i),7]<-summary(model)$parameters[2,1]
formatted_data[which(formatted_data[,6]==i),8]<-summary(model)$parameters[1,1]
plot(times,summary(model)$parameters[1,1]*exp(-times*summary(model)$parameters[2,1]),"s",ylim=c(0,100))
points(data_subset$Time,data_subset$Viability,col="blue",cex=1)
}
shrunk_data<-formatted_data[seq(1,77,by=7),c(1,2,3,4,5,7,8,9)]
par(mfrow=c(2,3))
plot(shrunk_data$Temp,shrunk_data$b1)
plot(shrunk_data$Temp,shrunk_data$v0)
plot(shrunk_data$Hum,shrunk_data$b1)
plot(shrunk_data$Hum,shrunk_data$v0)
plot(shrunk_data$AH,shrunk_data$b1)
plot(shrunk_data$AH,shrunk_data$v0)
##TEMPERATURE
shrunk_data_repeats<-as.data.frame(matrix(NA,ncol=ncol(shrunk_data),nrow = sum(shrunk_data$Rep)))
colnames(shrunk_data_repeats)=colnames(shrunk_data)
index<-0
for (i in 1:nrow(shrunk_data)) {
shrunk_data_repeats[(index+1):(index+shrunk_data$Rep[i]) , ] <-shrunk_data[i,]
index<-index+shrunk_data$Rep[i]
}
Temps<-seq(-10,40,length.out = 1000)
Temp_Model<-nlsLM(b1~b0*exp(Temp*g),data=shrunk_data_repeats,start = list(g=0,b0=0))
g<-summary(Temp_Model)$coefficients[1,1]
b0<-summary(Temp_Model)$coefficients[2,1]
plot(Temps,b0*exp(g*Temps),"s")
points(shrunk_data_repeats$Temp,shrunk_data_repeats$b1)
#result of all this is that equation for how b responds to temp is
#(b0 * exp (g * Temps))
b0
#9.079
g
#0.085
#q=-b so q0=-b0
#HUMIDITY
Hums<-seq(0,100,length.out = 1000)
Hum_Model<-nlsLM(b1~b0*exp(Hum*g),data=shrunk_data_repeats,start = list(g=0,b0=0))
g<-summary(Hum_Model)$coefficients[1,1]
b0<-summary(Hum_Model)$coefficients[2,1]
plot(Hums,b0*exp(g*Hums),"s")
points(shrunk_data_repeats$Hum,shrunk_data_repeats$b1)
#result of all this is that equation for how b responds to temp is
#(b0 * exp (g * Temps))
b0
#21.98
g
#0.0209
###AH
AH<-seq(min(formatted_data$AH),max(formatted_data$AH),length.out = 1000)
AH_Model<-nlsLM(b1~b0*exp(AH*g),data=shrunk_data_repeats,start = list(g=0,b0=0.000000))
AH_Model<-nlsLM(b1~b0*exp(AH*g),data=shrunk_data_repeats,start = list(g=0.1,b0=0.0000001))
g<-summary(AH_Model)$coefficients[1,1]
b0<-summary(AH_Model)$coefficients[2,1]
g
#0.06
b0
#30.162
#plot(shrunk_data_repeats$AH,shrunk_data_repeats$q1)
plot(AH,b0*exp(g*AH),"s")
points(shrunk_data_repeats$AH,shrunk_data_repeats$b1)
| /Code/datamanipulation/findingb.R | no_license | rbak54/Climate_Virus | R | false | false | 3,564 | r | #Harper
require("minpack.lm")
data<-read.csv("../../Data/converted harper 1961.csv",header =F)
# in this dataset nil becomes 0, trace or n.d becomes NA,and the midpoint of the range is used
#where there is a range I have used the midpoint
data<-as.data.frame(data)
viability<-as.vector(as.matrix((data[4:10,])))
formatted_data<-as.data.frame(matrix(nrow=length(viability),ncol = 9))
formatted_data[,5]<-viability
formatted_data[,4]<-rep(c(1/60/60/24,1/12/24,1/2/24,1/24,4/24,6/24,23/24),length(viability)/7)
formatted_data[,6]<-rep(1:11,each=7)
formatted_data[,1]<-rep(as.vector(as.matrix((data[1,]))),each=7)
formatted_data[,2]<-rep(as.vector(as.matrix((data[2,]))),each=7)
formatted_data[,3]<-rep(as.vector(as.matrix((data[3,]))),each=7)
colnames(formatted_data)<-c("Temp","Hum","Rep","Time","Viability","Experiment","b1","v0","AH")
est0=6.11
lrv= 2256000/461.15
formatted_data$AH<-est0*exp(lrv*((1/273.15)-(1/(formatted_data$Temp+273.15))))*formatted_data$Hum/100
times<-seq(0,1,length.out = 100)
par(mfrow=c(3,4))
for (i in (unique(formatted_data$Experiment))){
data_subset<-formatted_data[which(formatted_data[,6]==i),]
model<-nlsLM(Viability~v0*exp(-b*Time),data=data_subset,start=list(v0=100,b=0),lower = c(v0=0 ,b=0),upper = c(v0=100 ,b=5000))
formatted_data[which(formatted_data[,6]==i),7]<-summary(model)$parameters[2,1]
formatted_data[which(formatted_data[,6]==i),8]<-summary(model)$parameters[1,1]
plot(times,summary(model)$parameters[1,1]*exp(-times*summary(model)$parameters[2,1]),"s",ylim=c(0,100))
points(data_subset$Time,data_subset$Viability,col="blue",cex=1)
}
shrunk_data<-formatted_data[seq(1,77,by=7),c(1,2,3,4,5,7,8,9)]
par(mfrow=c(2,3))
plot(shrunk_data$Temp,shrunk_data$b1)
plot(shrunk_data$Temp,shrunk_data$v0)
plot(shrunk_data$Hum,shrunk_data$b1)
plot(shrunk_data$Hum,shrunk_data$v0)
plot(shrunk_data$AH,shrunk_data$b1)
plot(shrunk_data$AH,shrunk_data$v0)
##TEMPERATURE
shrunk_data_repeats<-as.data.frame(matrix(NA,ncol=ncol(shrunk_data),nrow = sum(shrunk_data$Rep)))
colnames(shrunk_data_repeats)=colnames(shrunk_data)
index<-0
for (i in 1:nrow(shrunk_data)) {
shrunk_data_repeats[(index+1):(index+shrunk_data$Rep[i]) , ] <-shrunk_data[i,]
index<-index+shrunk_data$Rep[i]
}
Temps<-seq(-10,40,length.out = 1000)
Temp_Model<-nlsLM(b1~b0*exp(Temp*g),data=shrunk_data_repeats,start = list(g=0,b0=0))
g<-summary(Temp_Model)$coefficients[1,1]
b0<-summary(Temp_Model)$coefficients[2,1]
plot(Temps,b0*exp(g*Temps),"s")
points(shrunk_data_repeats$Temp,shrunk_data_repeats$b1)
#result of all this is that equation for how b responds to temp is
#(b0 * exp (g * Temps))
b0
#9.079
g
#0.085
#q=-b so q0=-b0
#HUMIDITY
Hums<-seq(0,100,length.out = 1000)
Hum_Model<-nlsLM(b1~b0*exp(Hum*g),data=shrunk_data_repeats,start = list(g=0,b0=0))
g<-summary(Hum_Model)$coefficients[1,1]
b0<-summary(Hum_Model)$coefficients[2,1]
plot(Hums,b0*exp(g*Hums),"s")
points(shrunk_data_repeats$Hum,shrunk_data_repeats$b1)
#result of all this is that equation for how b responds to temp is
#(b0 * exp (g * Temps))
b0
#21.98
g
#0.0209
###AH
AH<-seq(min(formatted_data$AH),max(formatted_data$AH),length.out = 1000)
AH_Model<-nlsLM(b1~b0*exp(AH*g),data=shrunk_data_repeats,start = list(g=0,b0=0.000000))
AH_Model<-nlsLM(b1~b0*exp(AH*g),data=shrunk_data_repeats,start = list(g=0.1,b0=0.0000001))
g<-summary(AH_Model)$coefficients[1,1]
b0<-summary(AH_Model)$coefficients[2,1]
g
#0.06
b0
#30.162
#plot(shrunk_data_repeats$AH,shrunk_data_repeats$q1)
plot(AH,b0*exp(g*AH),"s")
points(shrunk_data_repeats$AH,shrunk_data_repeats$b1)
|
####################
#todo############
#check species in taxon table before inserting
#merge subturf taxa
####################
#for loop
import_data <- function(file, con, merge_dictionary){
#browser()
print(file)
chkft <- c("pleuro","acro", "liver", "lichen", "litter" ,"soil", "rock", "totalVascular", "totalBryophytes", "totalLichen", "vegetationHeight", "mossHeight")
f <- readLines(file, warn = FALSE) %>%
if_else(condition = substring(., 1, 1) == "\"", true = {
gsub(pattern = "^\"", replacement = "", x = .) %>% #replace starting quote
gsub(pattern = "\"\"", replacement = "\"", x = .) %>% #replace double quotes
gsub(pattern = "\"\"$", replacement = "\"", x = .) %>% #replace double quotes that was part of a triple at end
#gsub(pattern = "(?<!\\)),\"$", replacement = ",", x = ., perl = TRUE) #-ve look behind fixes Veskre comment problem
gsub(pattern = ",\"$", replacement = ",", x = .) #-ve remove end quote
}, false = .) %>%
paste(collapse = "\n")
dat <- read.delim(text = f, sep = ";", dec = ",", stringsAsFactors = FALSE)#sep = ";", dec = ","
if(ncol(dat) > 10){
if(any(sapply(dat[, chkft], class) == "character"))
dat <- read.delim(text = f, sep = ";", dec = ".", stringsAsFactors = FALSE)#sep = ";", dec = "."
}else{
dat <- read.delim(text = f, sep = ",", dec = ".", stringsAsFactors = FALSE)
}
names(dat) <- make.names(names(dat))
dat <- dat %>%
filter(!is.na(turfID), turfID != "", turfID != "turfID") %>%
mutate(
turfID = trimws(turfID),
comment = as.character(comment),
year = as.integer(year)
)
head(dat)
names(dat)
Encoding(dat$comment) <- "latin1"
#remove numeric suffix on duplicates
names(dat) <- gsub("_\\d$", "", names(dat))
#extract turf data####
#fix typos in turfID
dat <- dat %>%
mutate(turfID = recode(turfID,
"515 TT4 247" = "515 TT4 274",
"277 TTC" = "286 TTC",
"192 TT4 29" = "192 TT4 299"))
turf <- dat %>%
select(turfID, matches("treat$"), one_of(c("originPlotID", "destinationPlotID"))) %>%
distinct() %>%
mutate_all(trimws)
turf
names(turf)
alreadyIn <- dbGetQuery(con,"select turfID from turfs")
newTurfs <- turf %>% anti_join(alreadyIn) #find which turfs IDs are not already in database
if(nrow(newTurfs) > 0) {
message("adding ", paste(newTurfs$turfID, collapse = " "), " new turfs" )
dbPadWriteTable(con, "turfs", newTurfs, row.names = FALSE, append = TRUE)
}
message("done turfs")
#subTurf env ####
subturfEnv <- dat %>%
filter(Measure != "Cover") %>%
select(turfID, subTurf = subPlot, year, pleuro, acro, liver, lichen, litter, soil, rock, comment) %>%
mutate(subTurf = as.integer(subTurf))
if(!is.null(dat$missing)){
bad = dat$missing[dat$Measure != "Cover"]
bad[is.na(bad)] <- ""
subturfEnv <- subturfEnv %>% mutate(bad = bad)
} else{
subturfEnv <- subturfEnv %>% mutate(bad = "")
}
subturfEnv
dbPadWriteTable(con, "subTurfEnvironment", subturfEnv, row.names = FALSE, append = TRUE)
nrow(subturfEnv)
#TurfEnv ####
turfEnv <- dat %>%
filter(Measure == "Cover") %>%
select(turfID, year, pleuro, acro, liver, lichen, litter, soil, rock, totalVascular, totalBryophytes, totalLichen, vegetationHeight, mossHeight, comment, recorder, date)
if(mode(turfEnv$mossHeight) == "character"){
turfEnv <- turfEnv %>%
mutate(mossHeight = gsub(",", ".", mossHeight),
mossHeight = as.numeric(mossHeight))
}
if(any(nchar(as.character(turfEnv$comment)) > 255, na.rm = TRUE)) {
stop ("more than 255 characters in a comment field in turfEnv")
}
dbPadWriteTable(con, "turfEnvironment", turfEnv, row.names = FALSE, append = TRUE)
nrow(turfEnv)
#TurfCommunity ####
spp <- dat %>%
filter(Measure == "Cover") %>%
select(turfID, year, (which(names(dat) == "recorder") + 1):(which(names(dat) == "pleuro") - 1)) %>%
gather(key = species, value = cover, -turfID, -year) %>%
filter(!is.na(cover), cover != 0) %>% #remove absent taxa
mutate(
cf = grepl("cf", cover, ignore.case = TRUE),
cover = gsub("cf", "", cover, ignore.case = TRUE) #move any CF to new column
)
#oddity search
spp %>% filter(is.na(as.numeric(cover))) %>% count(cover)
#merge synonyms
spp <- spp %>%
left_join(merge_dictionary, by = c("species" = "oldID")) %>%
mutate(newID = coalesce(newID, species)) %>%
select(turfID, year, species = newID, cover, cf) %>%
group_by(year, turfID, species) %>%
mutate(cover = as.numeric(cover)) %>%
summarise(cover = sum(cover)) %>% #aggregate taxa
filter(cover > 0) %>%
ungroup()
#check_new_taxa
spp_list <- dbGetQuery(conn = con, statement = "select species from taxon")
spp %>% anti_join(spp_list) %>% verify(nrow(.) == 0)
#inject
initNrowTurfCommunity <- dbGetQuery(con, "select count(*) as n from turfCommunity")
dbPadWriteTable(con, "turfCommunity", spp)
finalNrowTurfCommunity <- dbGetQuery(con, "select count(*) as n from turfCommunity")
stopifnot(nrow(spp) == finalNrowTurfCommunity - initNrowTurfCommunity)
#subTurfCommunity ####
message("subturfcommunity")
subspp <- dat %>%
filter(Measure != "Cover") %>%
select(turfID, year, subTurf = subPlot, (which(names(dat) == "recorder") + 1):(which(names(dat) == "pleuro") -1)) %>%
mutate(subTurf = as.integer(subTurf)) %>%
gather(key = species, value = presence, -turfID, -subTurf, -year) %>%
filter(!is.na(presence), presence != 0, presence != "") #remove absent taxa
# #oddity search
subspp %>% count(presence)
# #merge synonyms
subspp <- subspp %>%
left_join(merge_dictionary, by = c("species" = "oldID")) %>%
mutate(newID = coalesce(newID, species)) %>%
select(turfID, subTurf, year, species = newID, presence) %>%
group_by(year, turfID, subTurf, species) %>%
summarise(presence = paste0(presence, collapse = "")) %>% #aggregate taxa
ungroup()
#check_new_taxa
subspp %>% anti_join(spp_list) %>% verify(nrow(.) == 0)
# subspp[subspp == 0] <- NA
# subsppX <- lapply(unique(mergedNames), function(sppname){
# species <- subspp[, names(subspp) == sppname, drop = FALSE]
# if (ncol(species) == 1) {
# return(species)
# } else {
# apply (species, 1, function(r) {
# occurence <- which(!is.na(r))
# if(length(occurence) == 0) return(NA)
# if(length(occurence) == 1) return(r[occurence])
# else {
# warning(paste("more than one species observation in same subplot!"))
# write.csv(data.frame(filename = n, species = sppname, occurence = r[occurence]), file = "cooccurence_log.csv", append = TRUE)
# return(r[occurence][1])
# }
# })
# }
# })
#
#
# subsppX <- setNames(as.data.frame(subsppX), unique(mergedNames))
# subspp <- cbind(subspp[, 1:3], subsppX)
#euphrasia rule adults=adults+juvenile+seedling, j=j+s, s=s
seedlingSp <- c("Euph.fri", "Eup.fri","Eup.sp","Eup.str","Euph.fri","Euph.sp", "Euph.str","Euph.str.1", "Euph.wet", "Poa.ann","Thlaspi..arv","Com.ten","Gen.ten", "Rhi.min", "Cap.bur", "Mel.pra","Mel.sp","Mel.syl","Noc.cae","Ste.med","Thl.arv","Ver.arv")
subspp <- subspp %>%
mutate(
cf = grepl("cf", presence, ignore.case = TRUE),
fertile = grepl("F",presence, ignore.case = FALSE),
dominant = grepl("D",presence, ignore.case = TRUE),
vegetative = grepl("V",presence, ignore.case = TRUE),
seedling_1 = grepl("S",presence, ignore.case = TRUE),
seedling_n = stringr::str_extract(presence, pattern = "(?<=Sx)\\d+|\\d+(?=xS)"),
seedling_n = as.integer(seedling_n),
seedlings = case_when(
!is.na(seedling_n) ~ seedling_n,
seedling_1 ~ 1L,
TRUE ~ 0L
),
juvenile_1 = grepl("J",presence, ignore.case = TRUE),
juvenile_n = stringr::str_extract(presence, pattern = "(?<=Jx)\\d+|\\d+(?=xJ)"),
juvenile_n = as.integer(juvenile_n),
juvenile = case_when(
!is.na(juvenile_n) ~ juvenile_n,
juvenile_1 ~ 1L,
TRUE ~ 0L
),
adult = fertile|dominant|vegetative|grepl("1", presence) ) %>%
select(-c(seedling_1, seedling_n, juvenile_1, juvenile_n)) %>%
#########more annuals?
mutate(
juvenile = if_else(species %in% seedlingSp & juvenile == 0L & adult, 1L, juvenile),
seedlings = if_else(species %in% seedlingSp & seedlings == 0L & juvenile > 0L, juvenile, seedlings)
) %>%
mutate_at(vars(cf, fertile, dominant, vegetative, adult), as.integer)
#inject
initNrowSubTurfCommunity <- dbGetQuery(con, "select count(*) as n from subturfCommunity")
dbPadWriteTable(con, "subturfCommunity", subspp)
finalNrowSubTurfCommunity <- dbGetQuery(con, "select count(*) as n from subturfCommunity")
stopifnot(nrow(subspp) == finalNrowSubTurfCommunity - initNrowSubTurfCommunity)
############### Vigdis seedling problem #only for 2011 data #############################
#
# id_seedling <- subspp %>%
# filter(year == 2011, species != "seedling") %>%
# group_by(turfID) %>%
# summarise(n_seedlings = sum(seedling))
#
# uid_seedlings <- subspp %>%
# filter(year == 2011, species == "seedling") %>%
# full_join(id_seedlings, by = turfID) %>%
# left_join(dat %>%
# filter(year == 2011, measure == "cover") %>%
# select(turfID, recorder))
#
#
# if(dat$year[1] == 2011 & FALSE){ #disable seedling error <---- FIX THIS!!!
# seed <- dat[dat$TTtreat != "" & dat$Measure != "Cover", c("turfID","subPlot", "year", "seedlings", "recorder")] #get data.frame of seedlings N1
# seed$subPlot <- as.integer(as.character(seed$subPlot))
# seed$turfID <- factor(seed$turfID)
# seedsum <- dbGetQuery(con, paste("select * from [number identified seedlings by subplot] where siteID='",dat$DestinationSite[1], "' and Year=2011", sep="")) #sqlQuery database for number of seedlings per subplot N2
# seed <- seed[order(seed$turfID, seed$subPlot),]
#
# head(seed)
# head(seedsum)
#
# seed <- seed[!paste(seed$turf, seed$subPlot) %in% setdiff(paste(seed$turf, seed$subPlot), paste(seedsum$turf, seedsum$subTurf)),]# then remove any missing rows as they have no species
#
# seed$seedlings[is.na(seed$seedlings)] <- 0
#
# seed$seedlings2 <- seed$seedlings
# seed$seedlings2[seed$recorder == "W"]<-seed$seedlings[seed$recorder == "W"]-seedsum$SumOfseedlings[seed$recorder == "W"]#for VV /W subplots n seedlings N1 = N1 - N2
#
# data.frame(seed$recorder, seed$seedlings, seedsum$SumOfseedlings, seed$seedlings2)
#
# #insert N1 into subTurfCommunity as unident seedling
#
# seed <- seed[seed$seedlings2 > 0,]
# seed <- data.frame(turfID = seed$turfID, year = seed$year, subTurf = seed$subPlot, species = "seed.unid", seedlings = seed$seedlings2, juvenile = 0,adult = 0,fertile = 0,vegetative = 0,dominant = 0, cf = 1)
# dbWriteTable(con, "subTurfCommunity", seed, row.names=FALSE, append = TRUE)
# }
# ######################### vigdis seedling problem fixed
}
| /inst/uploadDataSource/importcommunityNY_test_16.des.2013.r | no_license | jslynn33/seedclimComm | R | false | false | 11,681 | r | ####################
#todo############
#check species in taxon table before inserting
#merge subturf taxa
####################
#for loop
import_data <- function(file, con, merge_dictionary){
#browser()
print(file)
chkft <- c("pleuro","acro", "liver", "lichen", "litter" ,"soil", "rock", "totalVascular", "totalBryophytes", "totalLichen", "vegetationHeight", "mossHeight")
f <- readLines(file, warn = FALSE) %>%
if_else(condition = substring(., 1, 1) == "\"", true = {
gsub(pattern = "^\"", replacement = "", x = .) %>% #replace starting quote
gsub(pattern = "\"\"", replacement = "\"", x = .) %>% #replace double quotes
gsub(pattern = "\"\"$", replacement = "\"", x = .) %>% #replace double quotes that was part of a triple at end
#gsub(pattern = "(?<!\\)),\"$", replacement = ",", x = ., perl = TRUE) #-ve look behind fixes Veskre comment problem
gsub(pattern = ",\"$", replacement = ",", x = .) #-ve remove end quote
}, false = .) %>%
paste(collapse = "\n")
dat <- read.delim(text = f, sep = ";", dec = ",", stringsAsFactors = FALSE)#sep = ";", dec = ","
if(ncol(dat) > 10){
if(any(sapply(dat[, chkft], class) == "character"))
dat <- read.delim(text = f, sep = ";", dec = ".", stringsAsFactors = FALSE)#sep = ";", dec = "."
}else{
dat <- read.delim(text = f, sep = ",", dec = ".", stringsAsFactors = FALSE)
}
names(dat) <- make.names(names(dat))
dat <- dat %>%
filter(!is.na(turfID), turfID != "", turfID != "turfID") %>%
mutate(
turfID = trimws(turfID),
comment = as.character(comment),
year = as.integer(year)
)
head(dat)
names(dat)
Encoding(dat$comment) <- "latin1"
#remove numeric suffix on duplicates
names(dat) <- gsub("_\\d$", "", names(dat))
#extract turf data####
#fix typos in turfID
dat <- dat %>%
mutate(turfID = recode(turfID,
"515 TT4 247" = "515 TT4 274",
"277 TTC" = "286 TTC",
"192 TT4 29" = "192 TT4 299"))
turf <- dat %>%
select(turfID, matches("treat$"), one_of(c("originPlotID", "destinationPlotID"))) %>%
distinct() %>%
mutate_all(trimws)
turf
names(turf)
alreadyIn <- dbGetQuery(con,"select turfID from turfs")
newTurfs <- turf %>% anti_join(alreadyIn) #find which turfs IDs are not already in database
if(nrow(newTurfs) > 0) {
message("adding ", paste(newTurfs$turfID, collapse = " "), " new turfs" )
dbPadWriteTable(con, "turfs", newTurfs, row.names = FALSE, append = TRUE)
}
message("done turfs")
#subTurf env ####
subturfEnv <- dat %>%
filter(Measure != "Cover") %>%
select(turfID, subTurf = subPlot, year, pleuro, acro, liver, lichen, litter, soil, rock, comment) %>%
mutate(subTurf = as.integer(subTurf))
if(!is.null(dat$missing)){
bad = dat$missing[dat$Measure != "Cover"]
bad[is.na(bad)] <- ""
subturfEnv <- subturfEnv %>% mutate(bad = bad)
} else{
subturfEnv <- subturfEnv %>% mutate(bad = "")
}
subturfEnv
dbPadWriteTable(con, "subTurfEnvironment", subturfEnv, row.names = FALSE, append = TRUE)
nrow(subturfEnv)
#TurfEnv ####
turfEnv <- dat %>%
filter(Measure == "Cover") %>%
select(turfID, year, pleuro, acro, liver, lichen, litter, soil, rock, totalVascular, totalBryophytes, totalLichen, vegetationHeight, mossHeight, comment, recorder, date)
if(mode(turfEnv$mossHeight) == "character"){
turfEnv <- turfEnv %>%
mutate(mossHeight = gsub(",", ".", mossHeight),
mossHeight = as.numeric(mossHeight))
}
if(any(nchar(as.character(turfEnv$comment)) > 255, na.rm = TRUE)) {
stop ("more than 255 characters in a comment field in turfEnv")
}
dbPadWriteTable(con, "turfEnvironment", turfEnv, row.names = FALSE, append = TRUE)
nrow(turfEnv)
#TurfCommunity ####
spp <- dat %>%
filter(Measure == "Cover") %>%
select(turfID, year, (which(names(dat) == "recorder") + 1):(which(names(dat) == "pleuro") - 1)) %>%
gather(key = species, value = cover, -turfID, -year) %>%
filter(!is.na(cover), cover != 0) %>% #remove absent taxa
mutate(
cf = grepl("cf", cover, ignore.case = TRUE),
cover = gsub("cf", "", cover, ignore.case = TRUE) #move any CF to new column
)
#oddity search
spp %>% filter(is.na(as.numeric(cover))) %>% count(cover)
#merge synonyms
spp <- spp %>%
left_join(merge_dictionary, by = c("species" = "oldID")) %>%
mutate(newID = coalesce(newID, species)) %>%
select(turfID, year, species = newID, cover, cf) %>%
group_by(year, turfID, species) %>%
mutate(cover = as.numeric(cover)) %>%
summarise(cover = sum(cover)) %>% #aggregate taxa
filter(cover > 0) %>%
ungroup()
#check_new_taxa
spp_list <- dbGetQuery(conn = con, statement = "select species from taxon")
spp %>% anti_join(spp_list) %>% verify(nrow(.) == 0)
#inject
initNrowTurfCommunity <- dbGetQuery(con, "select count(*) as n from turfCommunity")
dbPadWriteTable(con, "turfCommunity", spp)
finalNrowTurfCommunity <- dbGetQuery(con, "select count(*) as n from turfCommunity")
stopifnot(nrow(spp) == finalNrowTurfCommunity - initNrowTurfCommunity)
#subTurfCommunity ####
message("subturfcommunity")
subspp <- dat %>%
filter(Measure != "Cover") %>%
select(turfID, year, subTurf = subPlot, (which(names(dat) == "recorder") + 1):(which(names(dat) == "pleuro") -1)) %>%
mutate(subTurf = as.integer(subTurf)) %>%
gather(key = species, value = presence, -turfID, -subTurf, -year) %>%
filter(!is.na(presence), presence != 0, presence != "") #remove absent taxa
# #oddity search
subspp %>% count(presence)
# #merge synonyms
subspp <- subspp %>%
left_join(merge_dictionary, by = c("species" = "oldID")) %>%
mutate(newID = coalesce(newID, species)) %>%
select(turfID, subTurf, year, species = newID, presence) %>%
group_by(year, turfID, subTurf, species) %>%
summarise(presence = paste0(presence, collapse = "")) %>% #aggregate taxa
ungroup()
#check_new_taxa
subspp %>% anti_join(spp_list) %>% verify(nrow(.) == 0)
# subspp[subspp == 0] <- NA
# subsppX <- lapply(unique(mergedNames), function(sppname){
# species <- subspp[, names(subspp) == sppname, drop = FALSE]
# if (ncol(species) == 1) {
# return(species)
# } else {
# apply (species, 1, function(r) {
# occurence <- which(!is.na(r))
# if(length(occurence) == 0) return(NA)
# if(length(occurence) == 1) return(r[occurence])
# else {
# warning(paste("more than one species observation in same subplot!"))
# write.csv(data.frame(filename = n, species = sppname, occurence = r[occurence]), file = "cooccurence_log.csv", append = TRUE)
# return(r[occurence][1])
# }
# })
# }
# })
#
#
# subsppX <- setNames(as.data.frame(subsppX), unique(mergedNames))
# subspp <- cbind(subspp[, 1:3], subsppX)
#euphrasia rule adults=adults+juvenile+seedling, j=j+s, s=s
seedlingSp <- c("Euph.fri", "Eup.fri","Eup.sp","Eup.str","Euph.fri","Euph.sp", "Euph.str","Euph.str.1", "Euph.wet", "Poa.ann","Thlaspi..arv","Com.ten","Gen.ten", "Rhi.min", "Cap.bur", "Mel.pra","Mel.sp","Mel.syl","Noc.cae","Ste.med","Thl.arv","Ver.arv")
subspp <- subspp %>%
mutate(
cf = grepl("cf", presence, ignore.case = TRUE),
fertile = grepl("F",presence, ignore.case = FALSE),
dominant = grepl("D",presence, ignore.case = TRUE),
vegetative = grepl("V",presence, ignore.case = TRUE),
seedling_1 = grepl("S",presence, ignore.case = TRUE),
seedling_n = stringr::str_extract(presence, pattern = "(?<=Sx)\\d+|\\d+(?=xS)"),
seedling_n = as.integer(seedling_n),
seedlings = case_when(
!is.na(seedling_n) ~ seedling_n,
seedling_1 ~ 1L,
TRUE ~ 0L
),
juvenile_1 = grepl("J",presence, ignore.case = TRUE),
juvenile_n = stringr::str_extract(presence, pattern = "(?<=Jx)\\d+|\\d+(?=xJ)"),
juvenile_n = as.integer(juvenile_n),
juvenile = case_when(
!is.na(juvenile_n) ~ juvenile_n,
juvenile_1 ~ 1L,
TRUE ~ 0L
),
adult = fertile|dominant|vegetative|grepl("1", presence) ) %>%
select(-c(seedling_1, seedling_n, juvenile_1, juvenile_n)) %>%
#########more annuals?
mutate(
juvenile = if_else(species %in% seedlingSp & juvenile == 0L & adult, 1L, juvenile),
seedlings = if_else(species %in% seedlingSp & seedlings == 0L & juvenile > 0L, juvenile, seedlings)
) %>%
mutate_at(vars(cf, fertile, dominant, vegetative, adult), as.integer)
#inject
initNrowSubTurfCommunity <- dbGetQuery(con, "select count(*) as n from subturfCommunity")
dbPadWriteTable(con, "subturfCommunity", subspp)
finalNrowSubTurfCommunity <- dbGetQuery(con, "select count(*) as n from subturfCommunity")
stopifnot(nrow(subspp) == finalNrowSubTurfCommunity - initNrowSubTurfCommunity)
############### Vigdis seedling problem #only for 2011 data #############################
#
# id_seedling <- subspp %>%
# filter(year == 2011, species != "seedling") %>%
# group_by(turfID) %>%
# summarise(n_seedlings = sum(seedling))
#
# uid_seedlings <- subspp %>%
# filter(year == 2011, species == "seedling") %>%
# full_join(id_seedlings, by = turfID) %>%
# left_join(dat %>%
# filter(year == 2011, measure == "cover") %>%
# select(turfID, recorder))
#
#
# if(dat$year[1] == 2011 & FALSE){ #disable seedling error <---- FIX THIS!!!
# seed <- dat[dat$TTtreat != "" & dat$Measure != "Cover", c("turfID","subPlot", "year", "seedlings", "recorder")] #get data.frame of seedlings N1
# seed$subPlot <- as.integer(as.character(seed$subPlot))
# seed$turfID <- factor(seed$turfID)
# seedsum <- dbGetQuery(con, paste("select * from [number identified seedlings by subplot] where siteID='",dat$DestinationSite[1], "' and Year=2011", sep="")) #sqlQuery database for number of seedlings per subplot N2
# seed <- seed[order(seed$turfID, seed$subPlot),]
#
# head(seed)
# head(seedsum)
#
# seed <- seed[!paste(seed$turf, seed$subPlot) %in% setdiff(paste(seed$turf, seed$subPlot), paste(seedsum$turf, seedsum$subTurf)),]# then remove any missing rows as they have no species
#
# seed$seedlings[is.na(seed$seedlings)] <- 0
#
# seed$seedlings2 <- seed$seedlings
# seed$seedlings2[seed$recorder == "W"]<-seed$seedlings[seed$recorder == "W"]-seedsum$SumOfseedlings[seed$recorder == "W"]#for VV /W subplots n seedlings N1 = N1 - N2
#
# data.frame(seed$recorder, seed$seedlings, seedsum$SumOfseedlings, seed$seedlings2)
#
# #insert N1 into subTurfCommunity as unident seedling
#
# seed <- seed[seed$seedlings2 > 0,]
# seed <- data.frame(turfID = seed$turfID, year = seed$year, subTurf = seed$subPlot, species = "seed.unid", seedlings = seed$seedlings2, juvenile = 0,adult = 0,fertile = 0,vegetative = 0,dominant = 0, cf = 1)
# dbWriteTable(con, "subTurfCommunity", seed, row.names=FALSE, append = TRUE)
# }
# ######################### vigdis seedling problem fixed
}
|
context("Add daily volume")
# Data inputs
test_that("dataframe can be provided and using different column names",{
skip_on_cran()
skip_on_travis()
flowdata <- tidyhydat::hy_daily_flows("08NM116") %>%
dplyr::rename(Flows = Value)
flowdata <- add_daily_volume(flowdata, values = Flows)
expect_true(all(c("Flows") %in% colnames(flowdata)) &
nrow(flowdata) >= 1)
})
test_that("station_number can be provided",{
skip_on_cran()
skip_on_travis()
stns <- "08NM003"
stations_data <- add_daily_volume(station_number = stns)
expect_true(stns %in% unique(stations_data$STATION_NUMBER) &
nrow(stations_data) >= 1)
})
test_that("multiple station_numbers can be provided",{
skip_on_cran()
skip_on_travis()
stns <- c("08NM003","08NM116")
stations_data <- add_daily_volume(station_number = stns)
expect_true(all(unique(stations_data$STATION_NUMBER) %in% stns) &
nrow(stations_data) >= 1)
})
# Function results
test_that("add_daily_volume actually adds a column called Volume_m3",{
skip_on_cran()
skip_on_travis()
stns <- "08NM003"
data_col <- add_daily_volume(station_number = stns)
expect_true("Volume_m3" %in% names(data_col))
})
| /tests/testthat/test-add_daily_volume.R | permissive | Rain3498/fasstr | R | false | false | 1,218 | r | context("Add daily volume")
# Data inputs
test_that("dataframe can be provided and using different column names",{
skip_on_cran()
skip_on_travis()
flowdata <- tidyhydat::hy_daily_flows("08NM116") %>%
dplyr::rename(Flows = Value)
flowdata <- add_daily_volume(flowdata, values = Flows)
expect_true(all(c("Flows") %in% colnames(flowdata)) &
nrow(flowdata) >= 1)
})
test_that("station_number can be provided",{
skip_on_cran()
skip_on_travis()
stns <- "08NM003"
stations_data <- add_daily_volume(station_number = stns)
expect_true(stns %in% unique(stations_data$STATION_NUMBER) &
nrow(stations_data) >= 1)
})
test_that("multiple station_numbers can be provided",{
skip_on_cran()
skip_on_travis()
stns <- c("08NM003","08NM116")
stations_data <- add_daily_volume(station_number = stns)
expect_true(all(unique(stations_data$STATION_NUMBER) %in% stns) &
nrow(stations_data) >= 1)
})
# Function results
test_that("add_daily_volume actually adds a column called Volume_m3",{
skip_on_cran()
skip_on_travis()
stns <- "08NM003"
data_col <- add_daily_volume(station_number = stns)
expect_true("Volume_m3" %in% names(data_col))
})
|
\name{VIMiQ5_685}
\alias{VIMiQ5_685}
\docType{data}
\title{
Amplification Reaction Using the Bio-Rad iQ5
}
\description{
Data set of an amplification reaction using the Bio-Rad iQ5 thermo cycler. The
samples of Vimentin were amplified in the iQ5 as replicates according to
Roediger et al. (2013). The quantification was performed during the elongation
step (68.5 degrees Celsius).
}
\usage{data(VIMiQ5_685)}
\format{
A data frame with 40 observations on the following 97 variables. The first
column ("Cycles") contains the number of cycles and consecutive columns
contain the replicates ("A01" to "H12").
}
\source{
Stefan Roediger, Claudia Deutschmann (BTU Cottbus - Senftenberg)
}
\references{
A Highly Versatile Microscope Imaging Technology Platform for the Multiplex
Real-Time Detection of Biomolecules and Autoimmune Antibodies. S. Roediger,
P. Schierack, A. Boehm, J. Nitschke, I. Berger, U. Froemmel, C. Schmidt,
M. Ruhland, I. Schimke, D. Roggenbuck, W. Lehmann and C. Schroeder.
\emph{Advances in Biochemical Bioengineering/Biotechnology}. 133:33--74, 2013.
}
\examples{
T595 <- rowMeans(VIMiQ5_595[, 2:ncol(VIMiQ5_595)])
T685 <- rowMeans(VIMiQ5_685[, 2:ncol(VIMiQ5_685)])
plot(1:length(T595), T595, main = "Fluorescence at different
temperatures\nQuantification in iQ5 (Bio-Rad)", xlab = "Cycle",
ylab = "Cycle dependent fluorescence", pch = 15, type = "b")
lines(1:length(T685), T685, pch = 19, type = "b", col = 2)
legend(1, 10000, c("Annealing (59.5 deg C)", "Elongation (68.5 deg C)"),
pch = c(15, 19), col = c(1,2))
}
\keyword{datasets}
\keyword{iQ5}
\keyword{Elongation}
| /man/VIMiQ5_685.Rd | no_license | PCRuniversum/chipPCR | R | false | false | 1,628 | rd | \name{VIMiQ5_685}
\alias{VIMiQ5_685}
\docType{data}
\title{
Amplification Reaction Using the Bio-Rad iQ5
}
\description{
Data set of an amplification reaction using the Bio-Rad iQ5 thermo cycler. The
samples of Vimentin were amplified in the iQ5 as replicates according to
Roediger et al. (2013). The quantification was performed during the elongation
step (68.5 degrees Celsius).
}
\usage{data(VIMiQ5_685)}
\format{
A data frame with 40 observations on the following 97 variables. The first
column ("Cycles") contains the number of cycles and consecutive columns
contain the replicates ("A01" to "H12").
}
\source{
Stefan Roediger, Claudia Deutschmann (BTU Cottbus - Senftenberg)
}
\references{
A Highly Versatile Microscope Imaging Technology Platform for the Multiplex
Real-Time Detection of Biomolecules and Autoimmune Antibodies. S. Roediger,
P. Schierack, A. Boehm, J. Nitschke, I. Berger, U. Froemmel, C. Schmidt,
M. Ruhland, I. Schimke, D. Roggenbuck, W. Lehmann and C. Schroeder.
\emph{Advances in Biochemical Bioengineering/Biotechnology}. 133:33--74, 2013.
}
\examples{
T595 <- rowMeans(VIMiQ5_595[, 2:ncol(VIMiQ5_595)])
T685 <- rowMeans(VIMiQ5_685[, 2:ncol(VIMiQ5_685)])
plot(1:length(T595), T595, main = "Fluorescence at different
temperatures\nQuantification in iQ5 (Bio-Rad)", xlab = "Cycle",
ylab = "Cycle dependent fluorescence", pch = 15, type = "b")
lines(1:length(T685), T685, pch = 19, type = "b", col = 2)
legend(1, 10000, c("Annealing (59.5 deg C)", "Elongation (68.5 deg C)"),
pch = c(15, 19), col = c(1,2))
}
\keyword{datasets}
\keyword{iQ5}
\keyword{Elongation}
|
library(photobiologyFilters)
### Name: etola
### Title: PE films supplied by Etola Oy
### Aliases: etola
### Keywords: datasets
### ** Examples
etola
| /data/genthat_extracted_code/photobiologyFilters/examples/etola.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 158 | r | library(photobiologyFilters)
### Name: etola
### Title: PE films supplied by Etola Oy
### Aliases: etola
### Keywords: datasets
### ** Examples
etola
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
raw_data <- read.csv("outcome-of-care-measures.csv", colClasses="character")
## check that state and outcome are valid
## column 7: state
validStates = unique(raw_data[,7])
if (state %in% validStates == FALSE) {
stop("invalid state")
}
if (outcome %in% c("heart attack", "heart failure", "pneumonia") == FALSE) {
stop("invalid outcome")
}
## Return hosptial name in that state with the given 'num'
## column 11: 30-day death rates from heart attack
## column 17: 30-day death rates from heart failure
## column 23: 30-day death rates from pheumonia
if (outcome == "heart attack") {
col = 11
} else if (outcome == "heart failure") {
col = 17
} else if (outcome == "pneumonia") {
col = 23
}
## don't forget to set class type for death rate to numeric before subsetting
## otherwise, there will be NA death rate
raw_data[,col] <- as.numeric(raw_data[,col])
df <- subset(raw_data, raw_data[, 7] == state & !is.na(raw_data[, col]))
if (num == "best") {
df <- subset(df, df[,col] == min(df[,col]))
result = df[,2]
} else if (num == "worst") {
df <- subset(df, df[,col] == max(df[,col]))
result = df[,2]
} else if (!is.na(as.numeric(num) == TRUE)) {
numHospitals = unique(raw_data[,2])
n = as.numeric(num)
if (n > length(numHospitals)) {
result <- NA
} else {
result = df[order(df[,col], df[,2]), 2][n]
}
} else {
stop("num needs to 'best', 'worst', or a number")
}
result
} | /ProgrammingAssignment3/rankhospital.R | no_license | frankpoon/R-Programming | R | false | false | 1,730 | r | rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
raw_data <- read.csv("outcome-of-care-measures.csv", colClasses="character")
## check that state and outcome are valid
## column 7: state
validStates = unique(raw_data[,7])
if (state %in% validStates == FALSE) {
stop("invalid state")
}
if (outcome %in% c("heart attack", "heart failure", "pneumonia") == FALSE) {
stop("invalid outcome")
}
## Return hosptial name in that state with the given 'num'
## column 11: 30-day death rates from heart attack
## column 17: 30-day death rates from heart failure
## column 23: 30-day death rates from pheumonia
if (outcome == "heart attack") {
col = 11
} else if (outcome == "heart failure") {
col = 17
} else if (outcome == "pneumonia") {
col = 23
}
## don't forget to set class type for death rate to numeric before subsetting
## otherwise, there will be NA death rate
raw_data[,col] <- as.numeric(raw_data[,col])
df <- subset(raw_data, raw_data[, 7] == state & !is.na(raw_data[, col]))
if (num == "best") {
df <- subset(df, df[,col] == min(df[,col]))
result = df[,2]
} else if (num == "worst") {
df <- subset(df, df[,col] == max(df[,col]))
result = df[,2]
} else if (!is.na(as.numeric(num) == TRUE)) {
numHospitals = unique(raw_data[,2])
n = as.numeric(num)
if (n > length(numHospitals)) {
result <- NA
} else {
result = df[order(df[,col], df[,2]), 2][n]
}
} else {
stop("num needs to 'best', 'worst', or a number")
}
result
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extraction.R
\name{file_name}
\alias{file_name}
\title{Concatenate a date string & suffix}
\usage{
file_name(suffix, date_str)
}
\arguments{
\item{suffix}{a character string.}
\item{date_str}{a character string representing the date.}
}
\description{
Concatenate a date string & suffix
}
| /man/file_name.Rd | no_license | clement-lee/hybridProcess | R | false | true | 367 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extraction.R
\name{file_name}
\alias{file_name}
\title{Concatenate a date string & suffix}
\usage{
file_name(suffix, date_str)
}
\arguments{
\item{suffix}{a character string.}
\item{date_str}{a character string representing the date.}
}
\description{
Concatenate a date string & suffix
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/silnice.R
\name{silnice}
\alias{silnice}
\title{Road Network}
\source{
Mapový podklad – Data200, 2021 © Český úřad zeměměřický a katastrální. \url{https://www.cuzk.cz}
}
\usage{
silnice()
}
\value{
\code{sf} data frame with 59.594 rows of 3 variables + geometry:
\describe{
\item{TRIDA}{Class of the road: highway = dálnice, speedway = rychlostní silnice, 1st class road = silnice I. třídy, 2nd class road = silnice II. třídy, 3rd class road = silnice III. třídy, other road = neevidovaná silnice}
\item{CISLO_SILNICE}{Local road code}
\item{MEZINARODNI_OZNACENI}{International road code}
}
}
\description{
Function returning data frame of roads of the Czech Republic as \code{sf} lines. It has no obligatory parameters.
}
\details{
Due to package size constraints the data are stored externally (and a working internet connection is required to use the package).
The data is current to December 2020. Downloaded size is 6 MB.
}
| /man/silnice.Rd | permissive | jlacko/RCzechia | R | false | true | 1,032 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/silnice.R
\name{silnice}
\alias{silnice}
\title{Road Network}
\source{
Mapový podklad – Data200, 2021 © Český úřad zeměměřický a katastrální. \url{https://www.cuzk.cz}
}
\usage{
silnice()
}
\value{
\code{sf} data frame with 59.594 rows of 3 variables + geometry:
\describe{
\item{TRIDA}{Class of the road: highway = dálnice, speedway = rychlostní silnice, 1st class road = silnice I. třídy, 2nd class road = silnice II. třídy, 3rd class road = silnice III. třídy, other road = neevidovaná silnice}
\item{CISLO_SILNICE}{Local road code}
\item{MEZINARODNI_OZNACENI}{International road code}
}
}
\description{
Function returning data frame of roads of the Czech Republic as \code{sf} lines. It has no obligatory parameters.
}
\details{
Due to package size constraints the data are stored externally (and a working internet connection is required to use the package).
The data is current to December 2020. Downloaded size is 6 MB.
}
|
<<<<<<< HEAD
#This Script will load all of the employment data from cabinent level agencies.
setwd('/Users/matthew/Documents/GitHub/Trump_Administration_departures/Data/')
=======
# <Load Office of Public Managment Data and visualize the result.>
# Copyright (C) <2018> <Matthew C Morriss>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#This Script will load all of the employment data from cabinent level agencies.
setwd('/Users/matthew/Documents/GitHub/Trump_Administration_departures/TAD')
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
library(dplyr)
library(ggplot2)
library(readr)
library(reshape2)
library(DataCombine)
#################################### Load Data ####################################
# Load data from the office of personnel management
EmploymentData <- read.csv('OPM_Data.csv')
#calculate fraction of year
yearFrac = EmploymentData$Year + (EmploymentData$Month/12)
EmploymentData <- cbind(EmploymentData, yearFrac)
<<<<<<< HEAD
#################################### How many people work for government ###################################
all <- aggregate(EmploymentData$United.States, by=list(Category=EmploymentData$fracYear), FUN=sum)
a <- ggplot(all, aes(x = all$Category, y = all$x))+
geom_line()+
xlim(2004, 2018)
a
##################################### create a plot ####################################
#of state department employees vs year
stateDept = filter(EmploymentData, Employment == "DJ-DEPARTMENT OF JUSTICE")
=======
##################################### State Dept. ####################################
#of state department employees vs year
stateDept = filter(EmploymentData, Employment == "ST-DEPARTMENT OF STATE")
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
p <- ggplot(stateDept, aes(x = stateDept$fracYear, y = stateDept$Location...All
) )+
geom_line()+
# geom_line(data = stateDept, aes(x = stateDept$fracYear, y = stateDept$Foreign.Countries))+
geom_vline(xintercept = 2017)+
labs(x = 'Year',y = 'Number of Employees')+
labs(title = "Number of State Department Employees")+
theme_classic()+
theme(axis.text.x = element_text(angle = 45,hjust = 1))+
scale_x_continuous(
breaks =seq(2006, 2018, 2),
limits = c(2006, 2018))+
ylim(10000, 14000)
p
<<<<<<< HEAD
##################################### create a plot ####################################
#of state department employees vs year
edDept = filter(EmploymentData, Employment == "ED-DEPARTMENT OF EDUCATION")
e <- ggplot(stateDept, aes(x = edDept$fracYear, y = edDept$Location...All
) )+
geom_line()+
# geom_line(data = stateDept, aes(x = stateDept$fracYear, y = stateDept$Foreign.Countries))+
geom_vline(xintercept = 2017)+
labs(x = 'Year',y = 'Number of Employees')+
labs(title = "Number of Department Department of Education Employees")+
theme_classic()+
theme(axis.text.x = element_text(angle = 45,hjust = 1))+
scale_x_continuous(
breaks =seq(2006, 2018, 2),
limits = c(2006, 2018))
# ylim(10000, 15000)
e
##################################### create a plot ####################################
#of state department employees vs year
eDept = filter(ED, Employment == "IN-DEPARTMENT OF THE INTERIOR")
e <- ggplot(stateDept, aes(x = eDept$Year, y = eDept$PerChange) )+
geom_line()+
# geom_line(data = stateDept, aes(x = stateDept$fracYear, y = stateDept$Foreign.Countries))+
geom_vline(xintercept = 2017)+
labs(x = 'Year',y = 'Number of Employees')+
labs(title = "Number of Department Department of Energy Employees")+
theme_classic()+
theme(axis.text.x = element_text(angle = 45,hjust = 1))+
scale_x_continuous(
breaks =seq(2006, 2018, 2),
limits = c(2006, 2018))
# ylim(10000, 15000)
e
#################################### heat map ####################################
EmploymentData <- EmploymentData[!(EmploymentData$Employment == "Cabinet Level Agencies"),]
EData <- filter(EmploymentData,Month == 9)
EData <- filter(ED, Year >= 2006)
# #Test of how well the data is concurrent
# EData <- EData[order(EData$Employment,EData$yearFrac),]
# EData$Employment <- factor(EData$Employment)
barplot(prop.table(table(EData$Employment)))
# Now try to calculate the precent change between year between carbinent
ED <- PercChange(EData,Var = "United.States",
type = "percent",
GroupVar = "Employment",
slideBy = -1,
TimeVar = "yearFrac",
NewVar = "PerChange")
# EmploymentData <- EmploymentData[order(Employment)]
stateDept = filter(ED, Employment == "ST-DEPARTMENT OF STATE")
ggplot(ED, aes(x = ED$Year,
y = ED$Employment,
# color = ED$PerChange,
fill = ED$PerChange)) +
geom_tile(colour="white", linewidth=2,
width=.9, height=.9) +
geom_vline(xintercept = 2017)+
coord_fixed()+
scale_fill_gradient2(low = "darkred", mid = "white", high = "darkgreen")+
labs(x = "Year", y = "Cabinent Department", fill = "Percent Change")+
theme_classic()+
xlim(2007, 2017)+
scale_x_continuous(
breaks =seq(2006, 2018, 1),
limits = c(2006, 2018))+
theme(axis.text.x = element_text(angle = 45,hjust = 1))
=======
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
#################################### Heatmap final ####################################
EmploymentData <- EmploymentData[!(EmploymentData$Employment == "Cabinet Level Agencies"),]
EData <- filter(EmploymentData,Month == 9)
EData <- filter(EData, Year >= 2006)
<<<<<<< HEAD
# EData <- filter(EData, EmploymentData$Employment != "AG-DEPARTMENT OF AGRICULTURE" &
# EmploymentData$Employment!= "IN-DEPARTMENT OF THE INTERIOR" )
# #Test of how well the data is concurrent
# EData <- EData[order(EData$yearFrac),]
EData$Employment <- factor(EData$Employment)
# barplot(prop.table(table(EData$Employment)))
# Now try to calculate the precent change between year between carbinent
=======
EData$Employment <- factor(EData$Employment)
# Now calculate the precent change between year between carbinent
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
ED <- PercChange(EData,Var = "United.States",
type = "percent",
GroupVar = "Employment",
slideBy = -1,
TimeVar = "yearFrac",
NewVar = "PerChange")
<<<<<<< HEAD
# EmploymentData <- EmploymentData[order(Employment)]
stateDept = filter(ED, Employment == "ST-DEPARTMENT OF STATE")
=======
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
ggplot(ED, aes(x = ED$Year,
y = ED$Employment,
# color = ED$PerChange,
fill = ED$PerChange)) +
<<<<<<< HEAD
geom_tile(colour="white",
width=.9, height=.9) +
geom_vline(xintercept = 2016.5)+
coord_fixed()+
scale_fill_gradient2(low = "darkred", mid = "white", high = "darkgreen")+
labs(x = "Year", y = "Cabinent Department", fill = "Percent Change")+
ggtitle("% Change in Employees Cabinent Level Agencies")+
theme_classic()+
xlim(2007, 2017)+
scale_x_continuous(
breaks =seq(2006, 2018, 1),
limits = c(2006, 2018))+
scale_y_discrete(limits = rev(levels(ED$Employment)))+
theme(axis.text.x = element_text(angle = 45,hjust = 1))
# coord_flip()
ggsave("Percent_change_per_Agency.eps", device=cairo_ps)
=======
geom_tile(colour="white",
width=.9, height=.9) +
geom_vline(xintercept = 2016.5)+
coord_fixed()+
scale_fill_gradient2(low = "darkred", mid = "white", high = "darkgreen")+
labs(x = "Year", y = "Cabinent Department", fill = "Percent Change")+
ggtitle("% Change in Employees Cabinent Level Agencies")+
theme_classic()+
xlim(2007, 2017)+
scale_x_continuous(
breaks =seq(2006, 2018, 1),
limits = c(2006, 2018))+
scale_y_discrete(limits = rev(levels(ED$Employment)))+
theme(axis.text.x = element_text(angle = 45,hjust = 1))
# ggsave("Percent_change_per_Agency.eps", device=cairo_ps)
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
| /Departures.R | no_license | morrismc/Trump-Administration-Departures | R | false | false | 8,727 | r | <<<<<<< HEAD
#This Script will load all of the employment data from cabinent level agencies.
setwd('/Users/matthew/Documents/GitHub/Trump_Administration_departures/Data/')
=======
# <Load Office of Public Managment Data and visualize the result.>
# Copyright (C) <2018> <Matthew C Morriss>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#This Script will load all of the employment data from cabinent level agencies.
setwd('/Users/matthew/Documents/GitHub/Trump_Administration_departures/TAD')
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
library(dplyr)
library(ggplot2)
library(readr)
library(reshape2)
library(DataCombine)
#################################### Load Data ####################################
# Load data from the office of personnel management
EmploymentData <- read.csv('OPM_Data.csv')
#calculate fraction of year
yearFrac = EmploymentData$Year + (EmploymentData$Month/12)
EmploymentData <- cbind(EmploymentData, yearFrac)
<<<<<<< HEAD
#################################### How many people work for government ###################################
all <- aggregate(EmploymentData$United.States, by=list(Category=EmploymentData$fracYear), FUN=sum)
a <- ggplot(all, aes(x = all$Category, y = all$x))+
geom_line()+
xlim(2004, 2018)
a
##################################### create a plot ####################################
#of state department employees vs year
stateDept = filter(EmploymentData, Employment == "DJ-DEPARTMENT OF JUSTICE")
=======
##################################### State Dept. ####################################
#of state department employees vs year
stateDept = filter(EmploymentData, Employment == "ST-DEPARTMENT OF STATE")
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
p <- ggplot(stateDept, aes(x = stateDept$fracYear, y = stateDept$Location...All
) )+
geom_line()+
# geom_line(data = stateDept, aes(x = stateDept$fracYear, y = stateDept$Foreign.Countries))+
geom_vline(xintercept = 2017)+
labs(x = 'Year',y = 'Number of Employees')+
labs(title = "Number of State Department Employees")+
theme_classic()+
theme(axis.text.x = element_text(angle = 45,hjust = 1))+
scale_x_continuous(
breaks =seq(2006, 2018, 2),
limits = c(2006, 2018))+
ylim(10000, 14000)
p
<<<<<<< HEAD
##################################### create a plot ####################################
#of state department employees vs year
edDept = filter(EmploymentData, Employment == "ED-DEPARTMENT OF EDUCATION")
e <- ggplot(stateDept, aes(x = edDept$fracYear, y = edDept$Location...All
) )+
geom_line()+
# geom_line(data = stateDept, aes(x = stateDept$fracYear, y = stateDept$Foreign.Countries))+
geom_vline(xintercept = 2017)+
labs(x = 'Year',y = 'Number of Employees')+
labs(title = "Number of Department Department of Education Employees")+
theme_classic()+
theme(axis.text.x = element_text(angle = 45,hjust = 1))+
scale_x_continuous(
breaks =seq(2006, 2018, 2),
limits = c(2006, 2018))
# ylim(10000, 15000)
e
##################################### create a plot ####################################
#of state department employees vs year
eDept = filter(ED, Employment == "IN-DEPARTMENT OF THE INTERIOR")
e <- ggplot(stateDept, aes(x = eDept$Year, y = eDept$PerChange) )+
geom_line()+
# geom_line(data = stateDept, aes(x = stateDept$fracYear, y = stateDept$Foreign.Countries))+
geom_vline(xintercept = 2017)+
labs(x = 'Year',y = 'Number of Employees')+
labs(title = "Number of Department Department of Energy Employees")+
theme_classic()+
theme(axis.text.x = element_text(angle = 45,hjust = 1))+
scale_x_continuous(
breaks =seq(2006, 2018, 2),
limits = c(2006, 2018))
# ylim(10000, 15000)
e
#################################### heat map ####################################
EmploymentData <- EmploymentData[!(EmploymentData$Employment == "Cabinet Level Agencies"),]
EData <- filter(EmploymentData,Month == 9)
EData <- filter(ED, Year >= 2006)
# #Test of how well the data is concurrent
# EData <- EData[order(EData$Employment,EData$yearFrac),]
# EData$Employment <- factor(EData$Employment)
barplot(prop.table(table(EData$Employment)))
# Now try to calculate the precent change between year between carbinent
ED <- PercChange(EData,Var = "United.States",
type = "percent",
GroupVar = "Employment",
slideBy = -1,
TimeVar = "yearFrac",
NewVar = "PerChange")
# EmploymentData <- EmploymentData[order(Employment)]
stateDept = filter(ED, Employment == "ST-DEPARTMENT OF STATE")
ggplot(ED, aes(x = ED$Year,
y = ED$Employment,
# color = ED$PerChange,
fill = ED$PerChange)) +
geom_tile(colour="white", linewidth=2,
width=.9, height=.9) +
geom_vline(xintercept = 2017)+
coord_fixed()+
scale_fill_gradient2(low = "darkred", mid = "white", high = "darkgreen")+
labs(x = "Year", y = "Cabinent Department", fill = "Percent Change")+
theme_classic()+
xlim(2007, 2017)+
scale_x_continuous(
breaks =seq(2006, 2018, 1),
limits = c(2006, 2018))+
theme(axis.text.x = element_text(angle = 45,hjust = 1))
=======
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
#################################### Heatmap final ####################################
EmploymentData <- EmploymentData[!(EmploymentData$Employment == "Cabinet Level Agencies"),]
EData <- filter(EmploymentData,Month == 9)
EData <- filter(EData, Year >= 2006)
<<<<<<< HEAD
# EData <- filter(EData, EmploymentData$Employment != "AG-DEPARTMENT OF AGRICULTURE" &
# EmploymentData$Employment!= "IN-DEPARTMENT OF THE INTERIOR" )
# #Test of how well the data is concurrent
# EData <- EData[order(EData$yearFrac),]
EData$Employment <- factor(EData$Employment)
# barplot(prop.table(table(EData$Employment)))
# Now try to calculate the precent change between year between carbinent
=======
EData$Employment <- factor(EData$Employment)
# Now calculate the precent change between year between carbinent
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
ED <- PercChange(EData,Var = "United.States",
type = "percent",
GroupVar = "Employment",
slideBy = -1,
TimeVar = "yearFrac",
NewVar = "PerChange")
<<<<<<< HEAD
# EmploymentData <- EmploymentData[order(Employment)]
stateDept = filter(ED, Employment == "ST-DEPARTMENT OF STATE")
=======
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
ggplot(ED, aes(x = ED$Year,
y = ED$Employment,
# color = ED$PerChange,
fill = ED$PerChange)) +
<<<<<<< HEAD
geom_tile(colour="white",
width=.9, height=.9) +
geom_vline(xintercept = 2016.5)+
coord_fixed()+
scale_fill_gradient2(low = "darkred", mid = "white", high = "darkgreen")+
labs(x = "Year", y = "Cabinent Department", fill = "Percent Change")+
ggtitle("% Change in Employees Cabinent Level Agencies")+
theme_classic()+
xlim(2007, 2017)+
scale_x_continuous(
breaks =seq(2006, 2018, 1),
limits = c(2006, 2018))+
scale_y_discrete(limits = rev(levels(ED$Employment)))+
theme(axis.text.x = element_text(angle = 45,hjust = 1))
# coord_flip()
ggsave("Percent_change_per_Agency.eps", device=cairo_ps)
=======
geom_tile(colour="white",
width=.9, height=.9) +
geom_vline(xintercept = 2016.5)+
coord_fixed()+
scale_fill_gradient2(low = "darkred", mid = "white", high = "darkgreen")+
labs(x = "Year", y = "Cabinent Department", fill = "Percent Change")+
ggtitle("% Change in Employees Cabinent Level Agencies")+
theme_classic()+
xlim(2007, 2017)+
scale_x_continuous(
breaks =seq(2006, 2018, 1),
limits = c(2006, 2018))+
scale_y_discrete(limits = rev(levels(ED$Employment)))+
theme(axis.text.x = element_text(angle = 45,hjust = 1))
# ggsave("Percent_change_per_Agency.eps", device=cairo_ps)
>>>>>>> 66dc71a57bbb21356c645d5201c8de78b35d88ca
|
#author - prashant bhat
# decision tree classification
# import the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[, 3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# feature scaling the dataset
dataset[, -3] = scale(dataset[, -3])
# split the dataset into test set and training set
library('caTools')
split = sample.split(dataset, SplitRatio = 3/4)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
library(rpart)
classifier = rpart(formula = Purchased ~ .,
data = training_set,
method = 'class')
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3], type = 'class')
# confusion matrix
cm = table(test_set[, 3], y_pred)
# Note :
# Donot forget to factorize your dependent variable since algorithm does not recognize this inherently.
| /Decision_Tree_Classification/Decision_Tree_Classification.R | no_license | gabhilash436/Machine-Learning-Playground | R | false | false | 989 | r | #author - prashant bhat
# decision tree classification
# import the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[, 3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# feature scaling the dataset
dataset[, -3] = scale(dataset[, -3])
# split the dataset into test set and training set
library('caTools')
split = sample.split(dataset, SplitRatio = 3/4)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
library(rpart)
classifier = rpart(formula = Purchased ~ .,
data = training_set,
method = 'class')
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3], type = 'class')
# confusion matrix
cm = table(test_set[, 3], y_pred)
# Note :
# Donot forget to factorize your dependent variable since algorithm does not recognize this inherently.
|
# First clean your memory
rm(list=ls()) # this can help
#reading data
# setwd (yourdirectory)
data<- read.table("household_power_consumption.txt", sep=";", header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subsetting
sub<- subset(data, (data$Date == "1/2/2007" | data$Date== "2/2/2007"))
# Changing the class
sub$Date <- as.Date(sub$Date, format = "%d/%m/%Y")
# Creating a new column in dataset named "DateTime":
sub$DateTime <- as.POSIXct(paste(sub$Date, sub$Time))
# plot3
png("plot3.png", width = 480, height = 480)
plot(sub$DateTime, sub$Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="")
lines(sub$DateTime, sub$Sub_metering_2, type="l", col="red")
lines(sub$DateTime, sub$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
dev.off() #Don't forget this!
| /plot3.r | no_license | arturochian/ExData_Plotting1 | R | false | false | 919 | r | # First clean your memory
rm(list=ls()) # this can help
#reading data
# setwd (yourdirectory)
data<- read.table("household_power_consumption.txt", sep=";", header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subsetting
sub<- subset(data, (data$Date == "1/2/2007" | data$Date== "2/2/2007"))
# Changing the class
sub$Date <- as.Date(sub$Date, format = "%d/%m/%Y")
# Creating a new column in dataset named "DateTime":
sub$DateTime <- as.POSIXct(paste(sub$Date, sub$Time))
# plot3
png("plot3.png", width = 480, height = 480)
plot(sub$DateTime, sub$Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="")
lines(sub$DateTime, sub$Sub_metering_2, type="l", col="red")
lines(sub$DateTime, sub$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
dev.off() #Don't forget this!
|
# Vector Calculus
# computations are performed element-wise
earnings <- c(50, 100, 30)
earnings * 3
earnings / 10
earnings -20
earnings + 100
earnings ^ 2
# Element-wise
earnings <- c(50, 100, 30)
expenses <- c(30, 40, 80)
earnings - expenses
earnings + c(10,20,30)
earnings * c(1, 2, 3)
earnings / c(1, 2, 3)
bank <- earnings - expenses
bank
sum(bank)
earnings > expenses
# DataCamp
# Summing and subtracting vectors
A_vector <- c(1, 2, 3)
B_vector <- c(4, 5, 6)
# Take the sum of A_vector and B_vector: total_vector
total_vector <- A_vector + B_vector
# Print total_vector
total_vector
# Calculate the difference between A_vector and B_vector: diff_vector
diff_vector <- A_vector - B_vector
# Print diff_vector
diff_vector
# Calculate your earnings
# Casino winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
roulette_vector <- c(-24, -50, 100, -350, 10)
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
# Calculate your daily earnings: total_daily
total_daily <- poker_vector + roulette_vector
# Calculate total winnings: sum()
# Total winnings with poker: total_poker
total_poker <- sum(poker_vector)
# Total winnings with roulette: total_roulette
total_roulette <- sum(roulette_vector)
# Total winnings overall: total_week
total_week <- sum(total_poker, total_roulette)
# Print total_week
total_week
# Comparing total winnings
# Calculate poker_better
poker_better <- poker_vector > roulette_vector
# Calculate total_poker and total_roulette, as before
total_poker <- sum(poker_vector)
total_roulette <- sum(roulette_vector)
total_week <- sum(total_poker, total_roulette)
# Calculate choose_poker
choose_poker <- total_poker > total_roulette
# Print choose_poker
choose_poker
# First steps in rational gambling
| /R Code Examples/IntroductionToR/2.Vectors/VectorCalculus.R | no_license | analystfreakabhi/DataScience_guide | R | false | false | 1,863 | r | # Vector Calculus
# computations are performed element-wise
earnings <- c(50, 100, 30)
earnings * 3
earnings / 10
earnings -20
earnings + 100
earnings ^ 2
# Element-wise
earnings <- c(50, 100, 30)
expenses <- c(30, 40, 80)
earnings - expenses
earnings + c(10,20,30)
earnings * c(1, 2, 3)
earnings / c(1, 2, 3)
bank <- earnings - expenses
bank
sum(bank)
earnings > expenses
# DataCamp
# Summing and subtracting vectors
A_vector <- c(1, 2, 3)
B_vector <- c(4, 5, 6)
# Take the sum of A_vector and B_vector: total_vector
total_vector <- A_vector + B_vector
# Print total_vector
total_vector
# Calculate the difference between A_vector and B_vector: diff_vector
diff_vector <- A_vector - B_vector
# Print diff_vector
diff_vector
# Calculate your earnings
# Casino winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
roulette_vector <- c(-24, -50, 100, -350, 10)
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
# Calculate your daily earnings: total_daily
total_daily <- poker_vector + roulette_vector
# Calculate total winnings: sum()
# Total winnings with poker: total_poker
total_poker <- sum(poker_vector)
# Total winnings with roulette: total_roulette
total_roulette <- sum(roulette_vector)
# Total winnings overall: total_week
total_week <- sum(total_poker, total_roulette)
# Print total_week
total_week
# Comparing total winnings
# Calculate poker_better
poker_better <- poker_vector > roulette_vector
# Calculate total_poker and total_roulette, as before
total_poker <- sum(poker_vector)
total_roulette <- sum(roulette_vector)
total_week <- sum(total_poker, total_roulette)
# Calculate choose_poker
choose_poker <- total_poker > total_roulette
# Print choose_poker
choose_poker
# First steps in rational gambling
|
## Progression Free Survival
# Set-up environment
rm(list = ls())
load("~/R.Config.Rdata")
setwd(paste0(master.location, "/TBI-LAB - Project - COAD SIDRA-LUMC/NGS_Data"))
source(paste0(toolbox.path, "/R scripts/ipak.function.R"))
required.packages = c("survival", "plyr", "raster", "texreg", "stringr", "coin", "survminer")
ipak(required.packages)
# Set Parameters
Surv.cutoff.years = 20 # SET cut-off
Group.of.interest = "ICR_HML" # "tumour_anatomic_site" or "ICR_cluster_k3"
stage = ""
CMS = ""
exclude = c("Conpair_lower_90_percent", "non-epithelial") #"Conpair_lower_90_percent"
# c("Conpair_lower_90_percent", "non-epithelial")
# Create folders and log file
dir.create("./Figures/Trimmed_p",showWarnings = FALSE)
dir.create("./Figures/Trimmed_p/Kaplan Meier Plots", showWarnings = FALSE)
dir.create("./Logfiles/Kaplan Meier Plots", showWarnings = FALSE)
dir.create("./Analysis/Trimmed_p", showWarnings = FALSE)
dir.create("./Analysis/Trimmed_p/Survival Analysis", showWarnings = FALSE)
# Read in the clinical data file
load("./Processed_Data/Survival Data/JSREP_clinical_data.Rdata")
load("./Analysis/Trimmed_p/016_CMS_Classification/Rfcms.Rdata")
excluded_df = read.csv("./Overview Sample Stocks/Meta_data/Excluded_patients.csv", stringsAsFactors = FALSE)
excluded_df$Patient_ID = str_pad(excluded_df$Patient_ID, 3, pad = "0")
# Add ICR as a variable and assign ICR cluster according to table cluster assignment
load("./Analysis/Trimmed_p/ICR Consensus Clustering/JSREP_ICR_cluster_assignment_k2-6.Rdata")
load("./Processed_Data/RNASeq/Trimmed_p/Normalized/JSREP.clean.dataset.EDAseq.QN.HPC.Rdata")
load("./Processed_Data/WES/MANTIS/MANTIS.Rdata")
table_cluster_assignment$Patient_ID = row.names(table_cluster_assignment)
table_cluster_assignment$Patient_ID = gsub("T_P", "", table_cluster_assignment$Patient_ID)
Merged_dataset = merge(clinical_data, table_cluster_assignment, by = "Patient_ID")
Merged_dataset[, Group.of.interest] = factor(Merged_dataset[, Group.of.interest], levels = c("ICR Low", "ICR Medium", "ICR High"))
Merged_dataset$CMS = Rfcms$RF.predictedCMS[match(Merged_dataset$Patient_ID, substring(rownames(Rfcms), 1, 3))]
MANTIS = MANTIS[which(MANTIS$Tissue == "T"),]
Merged_dataset$MSI = MANTIS$MSI[match(Merged_dataset$Patient_ID, MANTIS$Patient_ID)]
if(CMS == ""){}else{
Merged_dataset = Merged_dataset[which(Merged_dataset$CMS == CMS),]
}
if(exclude == ""){
}else{
Merged_dataset = Merged_dataset[-which(Merged_dataset$Patient_ID %in% excluded_df$Patient_ID[which(excluded_df$Reason.excluded %in% exclude)]),]
}
# time / event object creation
Y = Surv.cutoff.years * 365
TS.Alive = Merged_dataset[Merged_dataset$DFS.Status == "Disease Free", c("DFS.Status", "DFS.Time", Group.of.interest, "ICRscore", "ajcc_pathologic_tumor_stage",
"CMS", "age_at_initial_pathologic_diagnosis", "MSI")]
colnames(TS.Alive) = c("Status","Time", Group.of.interest, "ICRscore", "pathologic_stage", "CMS", "Age", "MSI")
TS.Alive$Time = as.numeric(as.character(TS.Alive$Time))
TS.Alive$Time[TS.Alive$Time > Y] = Y
TS.Dead = Merged_dataset[Merged_dataset$DFS.Status == "Event", c("DFS.Status", "DFS.Time", Group.of.interest, "ICRscore", "ajcc_pathologic_tumor_stage",
"CMS", "age_at_initial_pathologic_diagnosis", "MSI")]
colnames(TS.Dead) = c("Status","Time", Group.of.interest, "ICRscore", "pathologic_stage", "CMS", "Age", "MSI")
TS.Dead$Time = as.numeric(as.character(TS.Dead$Time))
TS.Dead$Status[which(TS.Dead$Time> Y)] = "Disease Free"
TS.Dead$Time[TS.Dead$Time > Y] = Y
TS.Surv = rbind (TS.Dead,TS.Alive)
TS.Surv$Time = as.numeric(as.character(TS.Surv$Time))
TS.Surv$Status <- TS.Surv$Status == "Event"
TS.Surv = subset(TS.Surv,TS.Surv$Time > 1) # remove patients with less then 1 day follow up time
if(stage > 0){
TS.Surv = TS.Surv[which(TS.Surv$pathologic_stage == stage),]
}
# survival curve
msurv = Surv(TS.Surv$Time/30.4, TS.Surv$Status) # calculate the number of months
mfit = survfit(msurv~TS.Surv[,Group.of.interest],conf.type = "log-log")
# Calculations (Needs manual adaptation!)
mdiff = survdiff(eval(mfit$call$formula), data = eval(mfit$call$data))
pval = pchisq(mdiff$chisq,length(mdiff$n) - 1,lower.tail = FALSE)
pvaltxt = ifelse(pval < 0.0001,"p < 0.0001",paste("p =", signif(pval, 3)))
Cal.Surv = TS.Surv
#Cal.Surv[,Group.of.interest] = as.factor(Cal.Surv[,Group.of.interest])
#Cal.Surv[,Group.of.interest] = relevel(Cal.Surv[,Group.of.interest], "ICR3")
Cal.Surv[,Group.of.interest] = as.factor(Cal.Surv[,Group.of.interest])
mHR = coxph(formula = msurv ~ Cal.Surv[,Group.of.interest],data = Cal.Surv, subset = Cal.Surv[, Group.of.interest] %in% c("ICR High", "ICR Medium", "ICR Low"))
mHR.extract = extract.coxph(mHR, include.aic = TRUE,
include.rsquared = TRUE, include.maxrs=TRUE,
include.events = TRUE, include.nobs = TRUE,
include.missings = TRUE, include.zph = TRUE)
HRtxt = paste("Hazard-ratio =", signif(exp(mHR.extract@coef),3),"for",names(mHR$coefficients))
beta = coef(mHR)
se = sqrt(diag(mHR$var))
p = 1 - pchisq((beta/se)^2, 1)
CI = confint(mHR)
CI = round(exp(CI),2)
summary(mHR)
logrank_test(Surv(Time, Status) ~ get(Group.of.interest), data = TS.Surv[which(TS.Surv[, Group.of.interest] %in% c("ICR High", "ICR Medium", "ICR Low")),])
# plot
TS.Surv[,Group.of.interest] = factor(TS.Surv[,Group.of.interest], levels = c("ICR High", "ICR Medium", "ICR Low"))
msurv = Surv(TS.Surv$Time/30.4, TS.Surv$Status)
mfit = survfit(msurv~TS.Surv[,Group.of.interest],conf.type = "log-log")
ggsurv_plot = ggsurvplot(mfit,
xlim = c(0, 90),
break.time.by = 30,
data = TS.Surv,
censor = TRUE,
risk.table = TRUE,
tables.y.text.col = TRUE,
tables.y.text = FALSE,
tables.height = 0.3,
tables.theme = theme_cleantable(),
#tables.col = "strata",
risk.table.pos = "out",
legend = "none",
ylab = "",
xlab = "Time in months",
#fontsize = 4.5,
font.main = 12,
font.x = 12,
font.y = 12,
font.tickslab = 12,
font.caption = 12,
font.legend = 12,
censor.shape = 3,
censor.size = 1.5,
#pval = TRUE,
palette = c("red", "green", "blue")
)
dir.create("./Figures/Trimmed_p/005_ggsurv_plots", showWarnings = FALSE)
pdf(paste0("./Figures/Trimmed_p/005_ggsurv_plots/Fig1d_", stage, "_", CMS, "_",
str_c(exclude, collapse = "_"), "_DFS_Survival_", Group.of.interest,"_Surv_cutoff_years_",Surv.cutoff.years,".pdf"),
height=4,width=4.8, onefile = FALSE, family = "ArialMT") # set filename
print(ggsurv_plot)
dev.off()
| /RNASeq/005_Fig1d_PFS_Kaplan_Meier_ICR.R | no_license | Sidra-TBI-FCO/AC-ICAM-NM | R | false | false | 7,339 | r |
## Progression Free Survival
# Set-up environment
rm(list = ls())
load("~/R.Config.Rdata")
setwd(paste0(master.location, "/TBI-LAB - Project - COAD SIDRA-LUMC/NGS_Data"))
source(paste0(toolbox.path, "/R scripts/ipak.function.R"))
required.packages = c("survival", "plyr", "raster", "texreg", "stringr", "coin", "survminer")
ipak(required.packages)
# Set Parameters
Surv.cutoff.years = 20 # SET cut-off
Group.of.interest = "ICR_HML" # "tumour_anatomic_site" or "ICR_cluster_k3"
stage = ""
CMS = ""
exclude = c("Conpair_lower_90_percent", "non-epithelial") #"Conpair_lower_90_percent"
# c("Conpair_lower_90_percent", "non-epithelial")
# Create folders and log file
dir.create("./Figures/Trimmed_p",showWarnings = FALSE)
dir.create("./Figures/Trimmed_p/Kaplan Meier Plots", showWarnings = FALSE)
dir.create("./Logfiles/Kaplan Meier Plots", showWarnings = FALSE)
dir.create("./Analysis/Trimmed_p", showWarnings = FALSE)
dir.create("./Analysis/Trimmed_p/Survival Analysis", showWarnings = FALSE)
# Read in the clinical data file
load("./Processed_Data/Survival Data/JSREP_clinical_data.Rdata")
load("./Analysis/Trimmed_p/016_CMS_Classification/Rfcms.Rdata")
excluded_df = read.csv("./Overview Sample Stocks/Meta_data/Excluded_patients.csv", stringsAsFactors = FALSE)
excluded_df$Patient_ID = str_pad(excluded_df$Patient_ID, 3, pad = "0")
# Add ICR as a variable and assign ICR cluster according to table cluster assignment
load("./Analysis/Trimmed_p/ICR Consensus Clustering/JSREP_ICR_cluster_assignment_k2-6.Rdata")
load("./Processed_Data/RNASeq/Trimmed_p/Normalized/JSREP.clean.dataset.EDAseq.QN.HPC.Rdata")
load("./Processed_Data/WES/MANTIS/MANTIS.Rdata")
table_cluster_assignment$Patient_ID = row.names(table_cluster_assignment)
table_cluster_assignment$Patient_ID = gsub("T_P", "", table_cluster_assignment$Patient_ID)
Merged_dataset = merge(clinical_data, table_cluster_assignment, by = "Patient_ID")
Merged_dataset[, Group.of.interest] = factor(Merged_dataset[, Group.of.interest], levels = c("ICR Low", "ICR Medium", "ICR High"))
Merged_dataset$CMS = Rfcms$RF.predictedCMS[match(Merged_dataset$Patient_ID, substring(rownames(Rfcms), 1, 3))]
MANTIS = MANTIS[which(MANTIS$Tissue == "T"),]
Merged_dataset$MSI = MANTIS$MSI[match(Merged_dataset$Patient_ID, MANTIS$Patient_ID)]
if(CMS == ""){}else{
Merged_dataset = Merged_dataset[which(Merged_dataset$CMS == CMS),]
}
if(exclude == ""){
}else{
Merged_dataset = Merged_dataset[-which(Merged_dataset$Patient_ID %in% excluded_df$Patient_ID[which(excluded_df$Reason.excluded %in% exclude)]),]
}
# time / event object creation
Y = Surv.cutoff.years * 365
TS.Alive = Merged_dataset[Merged_dataset$DFS.Status == "Disease Free", c("DFS.Status", "DFS.Time", Group.of.interest, "ICRscore", "ajcc_pathologic_tumor_stage",
"CMS", "age_at_initial_pathologic_diagnosis", "MSI")]
colnames(TS.Alive) = c("Status","Time", Group.of.interest, "ICRscore", "pathologic_stage", "CMS", "Age", "MSI")
TS.Alive$Time = as.numeric(as.character(TS.Alive$Time))
TS.Alive$Time[TS.Alive$Time > Y] = Y
TS.Dead = Merged_dataset[Merged_dataset$DFS.Status == "Event", c("DFS.Status", "DFS.Time", Group.of.interest, "ICRscore", "ajcc_pathologic_tumor_stage",
"CMS", "age_at_initial_pathologic_diagnosis", "MSI")]
colnames(TS.Dead) = c("Status","Time", Group.of.interest, "ICRscore", "pathologic_stage", "CMS", "Age", "MSI")
TS.Dead$Time = as.numeric(as.character(TS.Dead$Time))
TS.Dead$Status[which(TS.Dead$Time> Y)] = "Disease Free"
TS.Dead$Time[TS.Dead$Time > Y] = Y
TS.Surv = rbind (TS.Dead,TS.Alive)
TS.Surv$Time = as.numeric(as.character(TS.Surv$Time))
TS.Surv$Status <- TS.Surv$Status == "Event"
TS.Surv = subset(TS.Surv,TS.Surv$Time > 1) # remove patients with less then 1 day follow up time
if(stage > 0){
TS.Surv = TS.Surv[which(TS.Surv$pathologic_stage == stage),]
}
# survival curve
msurv = Surv(TS.Surv$Time/30.4, TS.Surv$Status) # calculate the number of months
mfit = survfit(msurv~TS.Surv[,Group.of.interest],conf.type = "log-log")
# Calculations (Needs manual adaptation!)
mdiff = survdiff(eval(mfit$call$formula), data = eval(mfit$call$data))
pval = pchisq(mdiff$chisq,length(mdiff$n) - 1,lower.tail = FALSE)
pvaltxt = ifelse(pval < 0.0001,"p < 0.0001",paste("p =", signif(pval, 3)))
Cal.Surv = TS.Surv
#Cal.Surv[,Group.of.interest] = as.factor(Cal.Surv[,Group.of.interest])
#Cal.Surv[,Group.of.interest] = relevel(Cal.Surv[,Group.of.interest], "ICR3")
Cal.Surv[,Group.of.interest] = as.factor(Cal.Surv[,Group.of.interest])
mHR = coxph(formula = msurv ~ Cal.Surv[,Group.of.interest],data = Cal.Surv, subset = Cal.Surv[, Group.of.interest] %in% c("ICR High", "ICR Medium", "ICR Low"))
mHR.extract = extract.coxph(mHR, include.aic = TRUE,
include.rsquared = TRUE, include.maxrs=TRUE,
include.events = TRUE, include.nobs = TRUE,
include.missings = TRUE, include.zph = TRUE)
HRtxt = paste("Hazard-ratio =", signif(exp(mHR.extract@coef),3),"for",names(mHR$coefficients))
beta = coef(mHR)
se = sqrt(diag(mHR$var))
p = 1 - pchisq((beta/se)^2, 1)
CI = confint(mHR)
CI = round(exp(CI),2)
summary(mHR)
logrank_test(Surv(Time, Status) ~ get(Group.of.interest), data = TS.Surv[which(TS.Surv[, Group.of.interest] %in% c("ICR High", "ICR Medium", "ICR Low")),])
# plot
TS.Surv[,Group.of.interest] = factor(TS.Surv[,Group.of.interest], levels = c("ICR High", "ICR Medium", "ICR Low"))
msurv = Surv(TS.Surv$Time/30.4, TS.Surv$Status)
mfit = survfit(msurv~TS.Surv[,Group.of.interest],conf.type = "log-log")
ggsurv_plot = ggsurvplot(mfit,
xlim = c(0, 90),
break.time.by = 30,
data = TS.Surv,
censor = TRUE,
risk.table = TRUE,
tables.y.text.col = TRUE,
tables.y.text = FALSE,
tables.height = 0.3,
tables.theme = theme_cleantable(),
#tables.col = "strata",
risk.table.pos = "out",
legend = "none",
ylab = "",
xlab = "Time in months",
#fontsize = 4.5,
font.main = 12,
font.x = 12,
font.y = 12,
font.tickslab = 12,
font.caption = 12,
font.legend = 12,
censor.shape = 3,
censor.size = 1.5,
#pval = TRUE,
palette = c("red", "green", "blue")
)
dir.create("./Figures/Trimmed_p/005_ggsurv_plots", showWarnings = FALSE)
pdf(paste0("./Figures/Trimmed_p/005_ggsurv_plots/Fig1d_", stage, "_", CMS, "_",
str_c(exclude, collapse = "_"), "_DFS_Survival_", Group.of.interest,"_Surv_cutoff_years_",Surv.cutoff.years,".pdf"),
height=4,width=4.8, onefile = FALSE, family = "ArialMT") # set filename
print(ggsurv_plot)
dev.off()
|
################################################################################
################################################################################
#### server.R ####
#### this code will take care of all data manupulation ####
################################################################################
################################################################################
options(shiny.maxRequestSize=100*1024^8)
randDayTime = function(dayStart, dayEnd, hourStart, hourEnd, size){
daySeq = seq.Date(as.Date(dayStart),as.Date(dayEnd),by="day")
daySelect = sample(daySeq,size,replace=TRUE)
hourSelect = sample(hourStart:hourEnd,size,replace=TRUE)
minSelect = sample(0:59,size,replace=TRUE)
secSelect = sample(0:59,size,replace=TRUE)
as.POSIXlt(paste(daySelect, " ", hourSelect,
":", minSelect, ":", secSelect, sep=""))
}
newDataFrame = data.frame(0)
function(input, output) {
#create the dataset
createDataFrame = reactive({
validate(
need(input$noRows != "", "Please enter no of rows and generate a column")
)
if(input$datatype == "numeric"){
if(input$distribution == "normal"){
dataFrame = data.frame(rnorm(input$noRows,
mean = input$normalmean, sd = input$normalsd))
}
else if(input$distribution == "uniform"){
dataFrame = data.frame(runif(input$noRows,
min = input$uniformmin, max = input$uniformmax))
}
else if(input$distribution == "triangular"){
dataFrame = data.frame(rtriangle(input$noRows,
input$triangularmin, input$triangularmax))
}
else if(input$distribution == "exponential"){
dataFrame = data.frame(rexp(input$noRows, rate = input$exponentialrate))
}
else if(input$distribution == "random"){
dataFrame = data.frame(sample(input$randommin:input$randommax,
input$noRows, replace = TRUE))
}
else if(input$distribution == "sequential"){
dataFrame = data.frame(input$sequentialmin:
(input$sequentialmin+input$noRows-1))
}
}
else if(input$datatype == "character"){
if(input$characterType == "fName"){
dataFrame = data.frame(firstNames[sample(1:nrow(firstNames), input$noRows, replace = TRUE ),])
}
else if(input$characterType == "lName"){
dataFrame = data.frame(lastNames[sample(1:nrow(lastNames), input$noRows, replace = TRUE ),])
}
else if(input$characterType == "place"){
dataFrame = data.frame(places[sample(1:nrow(places), input$noRows, replace = TRUE ),])
}
else if(input$characterType == "factor"){
factorValues = data.frame(strsplit(input$factors, ','))
dataFrame = data.frame(factorValues[sample(1:nrow(factorValues), input$noRows, replace = TRUE ),])
}
}
else if(input$datatype == "date"){
if(input$dateType == "oDate"){
noOfDays = input$eDate - input$sDate
dataFrame = data.frame(input$sDate + sample(0:noOfDays,
input$noRows, replace = TRUE))
}
else if(input$dateType == "dateTime"){
dataFrame = data.frame(randDayTime(input$sDate, input$eDate,
input$sHour, input$eHour, input$noRows))
}
else if(input$dateType == "timeStamp"){
dataFrame = randDayTime(input$sDate, input$eDate,
input$sHour, input$eHour, input$noRows)
dataFrame = data.frame(as.numeric(as.POSIXct(dataFrame)))
}
}
names(dataFrame) = input$colName
return(dataFrame)
})
finalDataFrame = reactive({
input$goButton
dataFrame = isolate(createDataFrame())
name = names(newDataFrame)
name = gsub('[[:digit:]]+', '', name)
newName = names(dataFrame)
occurance = length(subset(name, name == newName))
if(occurance > 0)
{
names(dataFrame) = paste(newName, occurance, sep = "")
}
if(nrow(newDataFrame) == 1 && ncol(newDataFrame) == 1){
newDataFrame <<- dataFrame
}else
{
newDataFrame <<- cbind(newDataFrame, dataFrame)
}
newDataFrame
})
deleteDataFrame = reactive({
if(input$delete){
if(ncol(newDataFrame) <= 2)
{isolate(newDataFrame <<- data.frame(0))}
else {isolate(newDataFrame <<- newDataFrame[,!names(newDataFrame) %in% input$delCol])}
}
})
finalDataFrame1 = reactive({
finalDataFrame()
deleteDataFrame()
newDataFrame
})
output$opTable = DT::renderDataTable(finalDataFrame1(),
rownames = FALSE,
options = list(scrollX = TRUE)
)
#function to download the file
output$downloadData = downloadHandler(
filename = function() {paste(input$fileName, '.csv', sep='')},
content = function(file) {write.csv(finalDataFrame(), file, row.names = FALSE)
})
#function to render the drop down for delete
output$toCol <- renderUI({
df <-finalDataFrame1()
if (is.null(df)) return(NULL)
items=names(df)
names(items)=items
selectInput("delCol", "Select Column to Delete:", items, selected = "", multiple=FALSE)
})
#function to render the visualization
output$viz <- renderGvis({
dataFrame = createDataFrame()
if(input$datatype == "numeric"){
histogram = gvisHistogram(dataFrame, option=list(title = names(dataFrame), height = 115))
return(histogram)
}
else if(input$datatype == "character")
{
myData = data.frame(table(dataFrame))
myData1 = data.frame(myData$Freq)
tbl = gvisHistogram(myData1, option=list(title = names(dataFrame), height = 115))
return(tbl)
}
else if(input$datatype == "date")
{
myData = data.frame(table(dataFrame))
myData1 = data.frame(myData$Freq)
tbl2 = gvisHistogram(myData1, option=list(title = names(dataFrame), height = 115))
return(tbl2)
}
})
} | /dataGeneration1/server.R | no_license | balajisubudhi/projects | R | false | false | 6,144 | r | ################################################################################
################################################################################
#### server.R ####
#### this code will take care of all data manupulation ####
################################################################################
################################################################################
options(shiny.maxRequestSize=100*1024^8)
randDayTime = function(dayStart, dayEnd, hourStart, hourEnd, size){
daySeq = seq.Date(as.Date(dayStart),as.Date(dayEnd),by="day")
daySelect = sample(daySeq,size,replace=TRUE)
hourSelect = sample(hourStart:hourEnd,size,replace=TRUE)
minSelect = sample(0:59,size,replace=TRUE)
secSelect = sample(0:59,size,replace=TRUE)
as.POSIXlt(paste(daySelect, " ", hourSelect,
":", minSelect, ":", secSelect, sep=""))
}
newDataFrame = data.frame(0)
function(input, output) {
#create the dataset
createDataFrame = reactive({
validate(
need(input$noRows != "", "Please enter no of rows and generate a column")
)
if(input$datatype == "numeric"){
if(input$distribution == "normal"){
dataFrame = data.frame(rnorm(input$noRows,
mean = input$normalmean, sd = input$normalsd))
}
else if(input$distribution == "uniform"){
dataFrame = data.frame(runif(input$noRows,
min = input$uniformmin, max = input$uniformmax))
}
else if(input$distribution == "triangular"){
dataFrame = data.frame(rtriangle(input$noRows,
input$triangularmin, input$triangularmax))
}
else if(input$distribution == "exponential"){
dataFrame = data.frame(rexp(input$noRows, rate = input$exponentialrate))
}
else if(input$distribution == "random"){
dataFrame = data.frame(sample(input$randommin:input$randommax,
input$noRows, replace = TRUE))
}
else if(input$distribution == "sequential"){
dataFrame = data.frame(input$sequentialmin:
(input$sequentialmin+input$noRows-1))
}
}
else if(input$datatype == "character"){
if(input$characterType == "fName"){
dataFrame = data.frame(firstNames[sample(1:nrow(firstNames), input$noRows, replace = TRUE ),])
}
else if(input$characterType == "lName"){
dataFrame = data.frame(lastNames[sample(1:nrow(lastNames), input$noRows, replace = TRUE ),])
}
else if(input$characterType == "place"){
dataFrame = data.frame(places[sample(1:nrow(places), input$noRows, replace = TRUE ),])
}
else if(input$characterType == "factor"){
factorValues = data.frame(strsplit(input$factors, ','))
dataFrame = data.frame(factorValues[sample(1:nrow(factorValues), input$noRows, replace = TRUE ),])
}
}
else if(input$datatype == "date"){
if(input$dateType == "oDate"){
noOfDays = input$eDate - input$sDate
dataFrame = data.frame(input$sDate + sample(0:noOfDays,
input$noRows, replace = TRUE))
}
else if(input$dateType == "dateTime"){
dataFrame = data.frame(randDayTime(input$sDate, input$eDate,
input$sHour, input$eHour, input$noRows))
}
else if(input$dateType == "timeStamp"){
dataFrame = randDayTime(input$sDate, input$eDate,
input$sHour, input$eHour, input$noRows)
dataFrame = data.frame(as.numeric(as.POSIXct(dataFrame)))
}
}
names(dataFrame) = input$colName
return(dataFrame)
})
finalDataFrame = reactive({
input$goButton
dataFrame = isolate(createDataFrame())
name = names(newDataFrame)
name = gsub('[[:digit:]]+', '', name)
newName = names(dataFrame)
occurance = length(subset(name, name == newName))
if(occurance > 0)
{
names(dataFrame) = paste(newName, occurance, sep = "")
}
if(nrow(newDataFrame) == 1 && ncol(newDataFrame) == 1){
newDataFrame <<- dataFrame
}else
{
newDataFrame <<- cbind(newDataFrame, dataFrame)
}
newDataFrame
})
deleteDataFrame = reactive({
if(input$delete){
if(ncol(newDataFrame) <= 2)
{isolate(newDataFrame <<- data.frame(0))}
else {isolate(newDataFrame <<- newDataFrame[,!names(newDataFrame) %in% input$delCol])}
}
})
finalDataFrame1 = reactive({
finalDataFrame()
deleteDataFrame()
newDataFrame
})
output$opTable = DT::renderDataTable(finalDataFrame1(),
rownames = FALSE,
options = list(scrollX = TRUE)
)
#function to download the file
output$downloadData = downloadHandler(
filename = function() {paste(input$fileName, '.csv', sep='')},
content = function(file) {write.csv(finalDataFrame(), file, row.names = FALSE)
})
#function to render the drop down for delete
output$toCol <- renderUI({
df <-finalDataFrame1()
if (is.null(df)) return(NULL)
items=names(df)
names(items)=items
selectInput("delCol", "Select Column to Delete:", items, selected = "", multiple=FALSE)
})
#function to render the visualization
output$viz <- renderGvis({
dataFrame = createDataFrame()
if(input$datatype == "numeric"){
histogram = gvisHistogram(dataFrame, option=list(title = names(dataFrame), height = 115))
return(histogram)
}
else if(input$datatype == "character")
{
myData = data.frame(table(dataFrame))
myData1 = data.frame(myData$Freq)
tbl = gvisHistogram(myData1, option=list(title = names(dataFrame), height = 115))
return(tbl)
}
else if(input$datatype == "date")
{
myData = data.frame(table(dataFrame))
myData1 = data.frame(myData$Freq)
tbl2 = gvisHistogram(myData1, option=list(title = names(dataFrame), height = 115))
return(tbl2)
}
})
} |
library(Epi)
### Name: cutLexis
### Title: Cut follow-up at a specified date for each person.
### Aliases: cutLexis countLexis
### Keywords: survival
### ** Examples
# A small artificial example
xx <- Lexis( entry=list(age=c(17,24,33,29),per=c(1920,1933,1930,1929)),
duration=c(23,57,12,15), exit.status=c(1,2,1,2) )
xx
cut <- c(33,47,29,50)
cutLexis(xx, cut, new.state=3, precursor=1)
cutLexis(xx, cut, new.state=3, precursor=2)
cutLexis(xx, cut, new.state=3, precursor=1:2)
# The same as the last example
cutLexis(xx, cut, new.state=3)
# The same example with a factor status variable
yy <- Lexis(entry = list(age=c(17,24,33,29),per=c(1920,1933,1930,1929)),
duration = c(23,57,12,15),
entry.status = factor(rep("alpha",4),
levels=c("alpha","beta","gamma")),
exit.status = factor(c("alpha","beta","alpha","beta"),
levels=c("alpha","beta","gamma")))
cutLexis(yy,c(33,47,29,50),precursor="alpha",new.state="gamma")
cutLexis(yy,c(33,47,29,50),precursor=c("alpha","beta"),new.state="aleph")
## Using a dataframe as cut argument
rl <- data.frame( lex.id=1:3, cut=c(19,53,26), timescale="age", new.state=3 )
rl
cutLexis( xx, rl )
cutLexis( xx, rl, precursor=1 )
cutLexis( xx, rl, precursor=0:2 )
## It is immaterial in what order splitting and cutting is done
xs <- splitLexis( xx, breaks=seq(0,100,10), time.scale="age" )
xs
xsC <- cutLexis(xs, rl, precursor=0 )
xC <- cutLexis( xx, rl, pre=0 )
xC
xCs <- splitLexis( xC, breaks=seq(0,100,10), time.scale="age" )
xCs
str(xCs)
| /data/genthat_extracted_code/Epi/examples/cutLexis.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,556 | r | library(Epi)
### Name: cutLexis
### Title: Cut follow-up at a specified date for each person.
### Aliases: cutLexis countLexis
### Keywords: survival
### ** Examples
# A small artificial example
xx <- Lexis( entry=list(age=c(17,24,33,29),per=c(1920,1933,1930,1929)),
duration=c(23,57,12,15), exit.status=c(1,2,1,2) )
xx
cut <- c(33,47,29,50)
cutLexis(xx, cut, new.state=3, precursor=1)
cutLexis(xx, cut, new.state=3, precursor=2)
cutLexis(xx, cut, new.state=3, precursor=1:2)
# The same as the last example
cutLexis(xx, cut, new.state=3)
# The same example with a factor status variable
yy <- Lexis(entry = list(age=c(17,24,33,29),per=c(1920,1933,1930,1929)),
duration = c(23,57,12,15),
entry.status = factor(rep("alpha",4),
levels=c("alpha","beta","gamma")),
exit.status = factor(c("alpha","beta","alpha","beta"),
levels=c("alpha","beta","gamma")))
cutLexis(yy,c(33,47,29,50),precursor="alpha",new.state="gamma")
cutLexis(yy,c(33,47,29,50),precursor=c("alpha","beta"),new.state="aleph")
## Using a dataframe as cut argument
rl <- data.frame( lex.id=1:3, cut=c(19,53,26), timescale="age", new.state=3 )
rl
cutLexis( xx, rl )
cutLexis( xx, rl, precursor=1 )
cutLexis( xx, rl, precursor=0:2 )
## It is immaterial in what order splitting and cutting is done
xs <- splitLexis( xx, breaks=seq(0,100,10), time.scale="age" )
xs
xsC <- cutLexis(xs, rl, precursor=0 )
xC <- cutLexis( xx, rl, pre=0 )
xC
xCs <- splitLexis( xC, breaks=seq(0,100,10), time.scale="age" )
xCs
str(xCs)
|
## ===============================================
## The Box Plot Rule workflow
## ===============================================
BPrule.wf <- function(form,train,test,...) {
require(dplyr, quietly=TRUE)
ms <- as.matrix(filter(train,Insp != 'fraud') %>%
group_by(Prod) %>%
summarise(median=median(Uprice),iqr=IQR(Uprice)) %>%
select(median,iqr))
rownames(ms) <- levels(train$Prod)
ms[which(ms[,'iqr']==0),'iqr'] <- ms[which(ms[,'iqr']==0),'median']
ORscore <- abs(test$Uprice-ms[test$Prod,'median']) /
ms[test$Prod,'iqr']
rankOrder <- order(ORscore,decreasing=T)
res <- list(testSet=test,rankOrder=rankOrder,
probs=matrix(c(ORscore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
myBPrule.wf <- function(form,train,test,...) {
require(dplyr, quietly=TRUE)
ms <- as.matrix(filter(train,Insp != 'fraud') %>%
group_by(Prod) %>%
summarise(median=mean(Uprice),iqr=sd(Uprice)) %>%
select(median,iqr))
rownames(ms) <- levels(train$Prod)
ms[which(ms[,'iqr']==0),'iqr'] <- ms[which(ms[,'iqr']==0),'median']
ORscore <- abs(test$Uprice-ms[test$Prod,'median']) /
ms[test$Prod,'iqr']
rankOrder <- order(ORscore,decreasing=T)
res <- list(testSet=test,rankOrder=rankOrder,
probs=matrix(c(ORscore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
## ===============================================
## The LOF workflow
## ===============================================
LOF.wf <- function(form, train, test, k, ...) {
require(DMwR2, quietly=TRUE)
ntr <- nrow(train)
all <- as.data.frame(rbind(train,test))
N <- nrow(all)
ups <- split(all$Uprice,all$Prod)
r <- list(length=ups)
for(u in seq(along=ups))
r[[u]] <- if (NROW(ups[[u]]) > 3)
lofactor(ups[[u]],min(k,NROW(ups[[u]]) %/% 2))
else if (NROW(ups[[u]])) rep(0,NROW(ups[[u]]))
else NULL
all$lof <- vector(length=N)
split(all$lof,all$Prod) <- r
all$lof[which(!(is.infinite(all$lof) | is.nan(all$lof)))] <-
SoftMax(all$lof[which(!(is.infinite(all$lof) | is.nan(all$lof)))])
res <- list(testSet=test,
rankOrder=order(all[(ntr+1):N,'lof'],decreasing=T),
probs=as.matrix(cbind(all[(ntr+1):N,'lof'],
ifelse(test$Insp=='fraud',1,0))))
res
}
## ===============================================
## The NB with Smote workflow
## ===============================================
NBsm.wf <- function(form,train,test,C.perc="balance",dist="HEOM",...) {
require(e1071,quietly=TRUE)
require(UBL,quietly=TRUE)
sup <- which(train$Insp != 'unkn')
data <- as.data.frame(train[sup,c('ID','Prod','Uprice','Insp')])
data$Insp <- factor(data$Insp,levels=c('ok','fraud'))
newData <- SmoteClassif(Insp ~ .,data,C.perc=C.perc,dist=dist,...)
model <- naiveBayes(Insp ~ .,newData)
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],type='raw')
rankOrder <- order(preds[,'fraud'],decreasing=T)
rankScore <- preds[,'fraud']
res <- list(testSet=test,
rankOrder=rankOrder,
probs=as.matrix(cbind(rankScore,
ifelse(test$Insp=='fraud',1,0))))
res
}
## ===============================================
## The AdaBoosting workflow
## ===============================================
ab.wf <- function(form,train,test,ntrees=100,...) {
require(RWeka,quietly=TRUE)
sup <- which(train$Insp != 'unkn')
data <- as.data.frame(train[sup,c('ID','Prod','Uprice','Insp')])
data$Insp <- factor(data$Insp,levels=c('ok','fraud'))
model <- AdaBoostM1(Insp ~ .,data,
control=Weka_control(I=ntrees))
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],
type='probability')
rankOrder <- order(preds[,"fraud"],decreasing=TRUE)
rankScore <- preds[,"fraud"]
res <- list(testSet=test,
rankOrder=rankOrder,
probs=as.matrix(cbind(rankScore,
ifelse(test$Insp=='fraud',1,0))))
res
}
## ===============================================
## The AdaBoosting with Self-training workflow
## ===============================================
pred.ada <- function(m,d) {
p <- predict(m,d,type='probability')
data.frame(cl=colnames(p)[apply(p,1,which.max)],
p=apply(p,1,max)
)
}
ab.st.wf <- function(form,train,test,ntrees=100,...) {
require(RWeka,quietly=TRUE)
require(DMwR2,quietly=TRUE)
train <- as.data.frame(train[,c('ID','Prod','Uprice','Insp')])
train[which(train$Insp == 'unkn'),'Insp'] <- NA
train$Insp <- factor(train$Insp,levels=c('ok','fraud'))
model <- SelfTrain(form,train,
learner='AdaBoostM1',
learner.pars=list(control=Weka_control(I=ntrees)),
pred='pred.ada')
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],
type='probability')
rankOrder <- order(preds[,'fraud'],decreasing=T)
rankScore <- preds[,"fraud"]
res <- list(testSet=test,
rankOrder=rankOrder,
probs=as.matrix(cbind(rankScore,
ifelse(test$Insp=='fraud',1,0))))
res
}
| /_InClass/FraudCaseStudy/workflowsCode.R | no_license | sbrayton/DMR | R | false | false | 5,404 | r | ## ===============================================
## The Box Plot Rule workflow
## ===============================================
BPrule.wf <- function(form,train,test,...) {
require(dplyr, quietly=TRUE)
ms <- as.matrix(filter(train,Insp != 'fraud') %>%
group_by(Prod) %>%
summarise(median=median(Uprice),iqr=IQR(Uprice)) %>%
select(median,iqr))
rownames(ms) <- levels(train$Prod)
ms[which(ms[,'iqr']==0),'iqr'] <- ms[which(ms[,'iqr']==0),'median']
ORscore <- abs(test$Uprice-ms[test$Prod,'median']) /
ms[test$Prod,'iqr']
rankOrder <- order(ORscore,decreasing=T)
res <- list(testSet=test,rankOrder=rankOrder,
probs=matrix(c(ORscore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
myBPrule.wf <- function(form,train,test,...) {
require(dplyr, quietly=TRUE)
ms <- as.matrix(filter(train,Insp != 'fraud') %>%
group_by(Prod) %>%
summarise(median=mean(Uprice),iqr=sd(Uprice)) %>%
select(median,iqr))
rownames(ms) <- levels(train$Prod)
ms[which(ms[,'iqr']==0),'iqr'] <- ms[which(ms[,'iqr']==0),'median']
ORscore <- abs(test$Uprice-ms[test$Prod,'median']) /
ms[test$Prod,'iqr']
rankOrder <- order(ORscore,decreasing=T)
res <- list(testSet=test,rankOrder=rankOrder,
probs=matrix(c(ORscore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
## ===============================================
## The LOF workflow
## ===============================================
LOF.wf <- function(form, train, test, k, ...) {
require(DMwR2, quietly=TRUE)
ntr <- nrow(train)
all <- as.data.frame(rbind(train,test))
N <- nrow(all)
ups <- split(all$Uprice,all$Prod)
r <- list(length=ups)
for(u in seq(along=ups))
r[[u]] <- if (NROW(ups[[u]]) > 3)
lofactor(ups[[u]],min(k,NROW(ups[[u]]) %/% 2))
else if (NROW(ups[[u]])) rep(0,NROW(ups[[u]]))
else NULL
all$lof <- vector(length=N)
split(all$lof,all$Prod) <- r
all$lof[which(!(is.infinite(all$lof) | is.nan(all$lof)))] <-
SoftMax(all$lof[which(!(is.infinite(all$lof) | is.nan(all$lof)))])
res <- list(testSet=test,
rankOrder=order(all[(ntr+1):N,'lof'],decreasing=T),
probs=as.matrix(cbind(all[(ntr+1):N,'lof'],
ifelse(test$Insp=='fraud',1,0))))
res
}
## ===============================================
## The NB with Smote workflow
## ===============================================
NBsm.wf <- function(form,train,test,C.perc="balance",dist="HEOM",...) {
require(e1071,quietly=TRUE)
require(UBL,quietly=TRUE)
sup <- which(train$Insp != 'unkn')
data <- as.data.frame(train[sup,c('ID','Prod','Uprice','Insp')])
data$Insp <- factor(data$Insp,levels=c('ok','fraud'))
newData <- SmoteClassif(Insp ~ .,data,C.perc=C.perc,dist=dist,...)
model <- naiveBayes(Insp ~ .,newData)
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],type='raw')
rankOrder <- order(preds[,'fraud'],decreasing=T)
rankScore <- preds[,'fraud']
res <- list(testSet=test,
rankOrder=rankOrder,
probs=as.matrix(cbind(rankScore,
ifelse(test$Insp=='fraud',1,0))))
res
}
## ===============================================
## The AdaBoosting workflow
## ===============================================
ab.wf <- function(form,train,test,ntrees=100,...) {
require(RWeka,quietly=TRUE)
sup <- which(train$Insp != 'unkn')
data <- as.data.frame(train[sup,c('ID','Prod','Uprice','Insp')])
data$Insp <- factor(data$Insp,levels=c('ok','fraud'))
model <- AdaBoostM1(Insp ~ .,data,
control=Weka_control(I=ntrees))
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],
type='probability')
rankOrder <- order(preds[,"fraud"],decreasing=TRUE)
rankScore <- preds[,"fraud"]
res <- list(testSet=test,
rankOrder=rankOrder,
probs=as.matrix(cbind(rankScore,
ifelse(test$Insp=='fraud',1,0))))
res
}
## ===============================================
## The AdaBoosting with Self-training workflow
## ===============================================
pred.ada <- function(m,d) {
p <- predict(m,d,type='probability')
data.frame(cl=colnames(p)[apply(p,1,which.max)],
p=apply(p,1,max)
)
}
ab.st.wf <- function(form,train,test,ntrees=100,...) {
require(RWeka,quietly=TRUE)
require(DMwR2,quietly=TRUE)
train <- as.data.frame(train[,c('ID','Prod','Uprice','Insp')])
train[which(train$Insp == 'unkn'),'Insp'] <- NA
train$Insp <- factor(train$Insp,levels=c('ok','fraud'))
model <- SelfTrain(form,train,
learner='AdaBoostM1',
learner.pars=list(control=Weka_control(I=ntrees)),
pred='pred.ada')
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],
type='probability')
rankOrder <- order(preds[,'fraud'],decreasing=T)
rankScore <- preds[,"fraud"]
res <- list(testSet=test,
rankOrder=rankOrder,
probs=as.matrix(cbind(rankScore,
ifelse(test$Insp=='fraud',1,0))))
res
}
|
#
# Descriptive statistics for a matrix or data frame
#
describe <- function(data, by, detailed = FALSE, ...)
{
data_name <- deparse(substitute(data))
if(!is.data.frame(data))
data <- as.data.frame(data)
vars_name <- colnames(data)
if(!missing(by))
{
by_name <- deparse(substitute(by))
#
if(!is.null(data[[by_name]]))
{
by <- as.factor(data[[by_name]])
} else
if(exists(by_name))
{
by <- as.factor(by)
} else
{
stop(by_name, "not available in data.frame", data_name,
"or object not found")
}
#
x <- split(data[setdiff(vars_name, by_name)], by)
out <- vector("list", length = nlevels(by))
# browser()
for(i in seq(nlevels(by)))
{
out[[i]] <- describe(x[[i]], detailed = detailed)
names(out[[i]]$describe) <- setdiff(vars_name, by_name)
}
names(out) <- levels(by)
out$by <- by_name
class(out) <- c("describe")
return(out)
}
nvar <- length(vars_name)
obj <- vector(mode="list", length=nvar)
names(obj) <- if(nvar > 1) vars_name else data_name
type <- rep(NA, nvar)
opt.warn <- options("warn") # save default warning option
options(warn=-1) # and suppress warnings
#
skewness <- function(x) mean((x - mean(x))^3)/sd(x)^3
kurtosis <- function(x) mean((x - mean(x))^4)/sd(x)^4 - 3
#
for(j in seq(nvar))
{
x <- data[,j]
if(is.factor(x) | typeof(x) == "character" | typeof(x) == "logical")
{
type[j] <- "factor"
if(detailed)
{
out <- summary(as.factor(x))
out <- cbind("Freq." = out, "Percent" = out/sum(out)*100,
"Cum.Percent" = cumsum(out/sum(out)*100))
} else
{
out <- summary(as.factor(x))
}
obj[[j]] <- out
}
else if(any(class(x) == "POSIXt"))
{
type[j] <- "numeric"
out <- summary(x)
obj[[j]] <- out
}
else
{
type[j] <- "numeric"
n.miss <- sum(is.na(x))
x <- na.omit(x)
if(detailed)
{
out <- c(length(x), n.miss, mean(x), sd(x), fivenum(x),
skewness(x), kurtosis(x))
names(out) <- c("Obs", "NAs", "Mean", "StdDev",
"Min", "Q1", "Median", "Q3", "Max",
"Skewness", "Kurtosis")
} else
{
out <- c(length(x), mean(x), sd(x), min(x), median(x), max(x))
names(out) <- c("Obs", "Mean", "StdDev", "Min", "Median", "Max")
}
obj[[j]] <- out
}
}
obj <- list(name = data_name, describe = obj, type = type)
class(obj) <- "describe"
options(warn=opt.warn$warn)
return(obj)
}
print.describe <- function(x, digits = getOption("digits") - 3, ...)
{
if(!is.null(x$by))
{
by <- which(sapply(x, class) == "describe")
for(i in by)
{
cat(cli::rule(left = paste(x$by, "=", names(x)[i]),
width = getOption("width")), "\n")
print(x[[i]])
if(i < length(by)) cat("\n")
}
return(invisible())
}
descr <- x$describe
isNum <- (x$type == "numeric")
if(sum(isNum) > 0)
{
out1 <- do.call("rbind",descr[isNum])
print(out1, digits = digits)
}
if(sum(!isNum) > 0)
{
out2 <- descr[!isNum]
for(j in seq(out2))
{
if(is.vector(out2[[j]]))
{
out2[[j]] <- do.call("rbind", out2[j])
cat("\n")
print(out2[[j]], digits = digits)
} else
{
names(dimnames(out2[[j]])) <- c(names(out2)[j], "")
print(out2[[j]], digits = digits)
}
}
}
invisible()
}
| /R/describe.R | no_license | luca-scr/qcc | R | false | false | 3,592 | r | #
# Descriptive statistics for a matrix or data frame
#
describe <- function(data, by, detailed = FALSE, ...)
{
data_name <- deparse(substitute(data))
if(!is.data.frame(data))
data <- as.data.frame(data)
vars_name <- colnames(data)
if(!missing(by))
{
by_name <- deparse(substitute(by))
#
if(!is.null(data[[by_name]]))
{
by <- as.factor(data[[by_name]])
} else
if(exists(by_name))
{
by <- as.factor(by)
} else
{
stop(by_name, "not available in data.frame", data_name,
"or object not found")
}
#
x <- split(data[setdiff(vars_name, by_name)], by)
out <- vector("list", length = nlevels(by))
# browser()
for(i in seq(nlevels(by)))
{
out[[i]] <- describe(x[[i]], detailed = detailed)
names(out[[i]]$describe) <- setdiff(vars_name, by_name)
}
names(out) <- levels(by)
out$by <- by_name
class(out) <- c("describe")
return(out)
}
nvar <- length(vars_name)
obj <- vector(mode="list", length=nvar)
names(obj) <- if(nvar > 1) vars_name else data_name
type <- rep(NA, nvar)
opt.warn <- options("warn") # save default warning option
options(warn=-1) # and suppress warnings
#
skewness <- function(x) mean((x - mean(x))^3)/sd(x)^3
kurtosis <- function(x) mean((x - mean(x))^4)/sd(x)^4 - 3
#
for(j in seq(nvar))
{
x <- data[,j]
if(is.factor(x) | typeof(x) == "character" | typeof(x) == "logical")
{
type[j] <- "factor"
if(detailed)
{
out <- summary(as.factor(x))
out <- cbind("Freq." = out, "Percent" = out/sum(out)*100,
"Cum.Percent" = cumsum(out/sum(out)*100))
} else
{
out <- summary(as.factor(x))
}
obj[[j]] <- out
}
else if(any(class(x) == "POSIXt"))
{
type[j] <- "numeric"
out <- summary(x)
obj[[j]] <- out
}
else
{
type[j] <- "numeric"
n.miss <- sum(is.na(x))
x <- na.omit(x)
if(detailed)
{
out <- c(length(x), n.miss, mean(x), sd(x), fivenum(x),
skewness(x), kurtosis(x))
names(out) <- c("Obs", "NAs", "Mean", "StdDev",
"Min", "Q1", "Median", "Q3", "Max",
"Skewness", "Kurtosis")
} else
{
out <- c(length(x), mean(x), sd(x), min(x), median(x), max(x))
names(out) <- c("Obs", "Mean", "StdDev", "Min", "Median", "Max")
}
obj[[j]] <- out
}
}
obj <- list(name = data_name, describe = obj, type = type)
class(obj) <- "describe"
options(warn=opt.warn$warn)
return(obj)
}
print.describe <- function(x, digits = getOption("digits") - 3, ...)
{
if(!is.null(x$by))
{
by <- which(sapply(x, class) == "describe")
for(i in by)
{
cat(cli::rule(left = paste(x$by, "=", names(x)[i]),
width = getOption("width")), "\n")
print(x[[i]])
if(i < length(by)) cat("\n")
}
return(invisible())
}
descr <- x$describe
isNum <- (x$type == "numeric")
if(sum(isNum) > 0)
{
out1 <- do.call("rbind",descr[isNum])
print(out1, digits = digits)
}
if(sum(!isNum) > 0)
{
out2 <- descr[!isNum]
for(j in seq(out2))
{
if(is.vector(out2[[j]]))
{
out2[[j]] <- do.call("rbind", out2[j])
cat("\n")
print(out2[[j]], digits = digits)
} else
{
names(dimnames(out2[[j]])) <- c(names(out2)[j], "")
print(out2[[j]], digits = digits)
}
}
}
invisible()
}
|
\name{rbftrain}
\alias{rbftrain}
\title{RBF neural network}
\description{A simple RBF neural network which suitable for approximation.}
\usage{ rbftrain(inp,neurons,out,weight=c(),dist=c(),alfa=0.2,it=40,err=0,
sigma=NaN,online=TRUE,permute=TRUE,visual=TRUE, ...)
}
\arguments{
\item{inp}{a matrix that contains one input data in each row.}
\item{neurons}{the number of neurons in the hidden layer.}
\item{out}{a matrix that contains one output data in each row.}
\item{weight}{the starting weights of the network.}
\item{dist}{the starting distortions of the network.}
\item{alfa}{the learning-rate parameter of the back-propagation algorithm.}
\item{it}{the maximum number of training iterations.}
\item{err}{the average error at the studying points,if the average error
anytime lower than this value,the algorithm will stop.}
\item{sigma}{the width of the Gauss functions.}
\item{online}{if TRUE the algorithm will operate in sequential mode of back-propagation,if FALSE the algorithm
will operate in batch mode of back-propagation.}
\item{permute}{if TRUE the algorithm will use a random
permutation of the input data in each epoch.}
\item{visual}{a logical value, that switches on/off the graphical user interface.}
\item{\dots}{currently not used...}
}
\value{
list with 4 argument
\item{weight}{the weights of the network.}
\item{dist}{the distortion of the network.}
\item{neurons}{a numeric vector with length equals to the number of layers in the network, and
the ith layer will contains neurons[i] neuron.}
\item{sigma}{the width of the Gauss functions.}
}
\details{
The function creates an RBF neural network on the basis of the
function parameters. After the creation of the network the function trains
it using the back-propagation algorithm using the inp and out parameter. This two parameters row number must be the same, else the
function will stop with an error message.
If you use the weight or dist argument, than that variables won't be determined by random.
This could be useful if you want to retrain your network. In that case use both of this two arguments in the same time.
The function works with normalized Gauss-functions, which width parameter will be the sigma argument. If you want to give the values, this argument
should be a matrix, with rows equal the number of neurons in the first layer, and columns equal the number of neurons in the second layer.
If the sigma argument is NaN, then the width of each Gauss function will be the half of the distance between the two nearest training samples times 1,1. If the sigma argument is exactly one number,
then all sigma value will be that exact number.
The function has a graphical user interface that can be switched
on and off, with the visual argument. If the graphical user interface is on, then the function could
show the result of the approximation in a co-ordinate system, if it's a function with one parameter.
The result of the function is the parameters of the trained RBF neural network. Use the rbf function for information recall.
}
\seealso{ `rbf' for recalling; `mlp' and `mlptrain' for classification.}
\examples{
x<-t(matrix(-5:10*24,1,16));
y<-t(matrix(sin(pi/180*(-5:10*24)),1,16));
neurons<-8;
\dontrun{
data<-rbftrain(x,neurons,y,sigma=NaN)
rbf(x,data$weight,data$dist,data$neurons,data$sigma)
}
}
\keyword{neural}
| /man/rbftrain.Rd | no_license | albiondervishi1/neural | R | false | false | 3,479 | rd | \name{rbftrain}
\alias{rbftrain}
\title{RBF neural network}
\description{A simple RBF neural network which suitable for approximation.}
\usage{ rbftrain(inp,neurons,out,weight=c(),dist=c(),alfa=0.2,it=40,err=0,
sigma=NaN,online=TRUE,permute=TRUE,visual=TRUE, ...)
}
\arguments{
\item{inp}{a matrix that contains one input data in each row.}
\item{neurons}{the number of neurons in the hidden layer.}
\item{out}{a matrix that contains one output data in each row.}
\item{weight}{the starting weights of the network.}
\item{dist}{the starting distortions of the network.}
\item{alfa}{the learning-rate parameter of the back-propagation algorithm.}
\item{it}{the maximum number of training iterations.}
\item{err}{the average error at the studying points,if the average error
anytime lower than this value,the algorithm will stop.}
\item{sigma}{the width of the Gauss functions.}
\item{online}{if TRUE the algorithm will operate in sequential mode of back-propagation,if FALSE the algorithm
will operate in batch mode of back-propagation.}
\item{permute}{if TRUE the algorithm will use a random
permutation of the input data in each epoch.}
\item{visual}{a logical value, that switches on/off the graphical user interface.}
\item{\dots}{currently not used...}
}
\value{
list with 4 argument
\item{weight}{the weights of the network.}
\item{dist}{the distortion of the network.}
\item{neurons}{a numeric vector with length equals to the number of layers in the network, and
the ith layer will contains neurons[i] neuron.}
\item{sigma}{the width of the Gauss functions.}
}
\details{
The function creates an RBF neural network on the basis of the
function parameters. After the creation of the network the function trains
it using the back-propagation algorithm using the inp and out parameter. This two parameters row number must be the same, else the
function will stop with an error message.
If you use the weight or dist argument, than that variables won't be determined by random.
This could be useful if you want to retrain your network. In that case use both of this two arguments in the same time.
The function works with normalized Gauss-functions, which width parameter will be the sigma argument. If you want to give the values, this argument
should be a matrix, with rows equal the number of neurons in the first layer, and columns equal the number of neurons in the second layer.
If the sigma argument is NaN, then the width of each Gauss function will be the half of the distance between the two nearest training samples times 1,1. If the sigma argument is exactly one number,
then all sigma value will be that exact number.
The function has a graphical user interface that can be switched
on and off, with the visual argument. If the graphical user interface is on, then the function could
show the result of the approximation in a co-ordinate system, if it's a function with one parameter.
The result of the function is the parameters of the trained RBF neural network. Use the rbf function for information recall.
}
\seealso{ `rbf' for recalling; `mlp' and `mlptrain' for classification.}
\examples{
x<-t(matrix(-5:10*24,1,16));
y<-t(matrix(sin(pi/180*(-5:10*24)),1,16));
neurons<-8;
\dontrun{
data<-rbftrain(x,neurons,y,sigma=NaN)
rbf(x,data$weight,data$dist,data$neurons,data$sigma)
}
}
\keyword{neural}
|
library(tidyverse)
library(igraph)
project_directory <- rprojroot::find_root(
criterion = rprojroot::has_dir(".git")
)
day <- 8
part <- 1
input <- read_lines(
paste0(
project_directory
, "/"
, "inputs/day"
, day
, ".txt"
)
)
test_input <- read_lines(
paste0(
project_directory
, "/"
, "test_inputs/day"
, day
, "_part"
, part
, ".txt"
)
)
input_clean <- input %>%
tibble(instruction = .) %>%
separate(instruction, into = c("instruction_type", "value"), sep = " ") %>%
mutate(
value = as.numeric(value)
, id = 1:n()
, accumulator_increment = ifelse(
instruction_type == "acc"
, value
, 0
)
, id_increment = ifelse(
instruction_type == "jmp"
, value
, 1
)
, next_id = id + id_increment
)
input_clean %>%
select(
id
, next_id
) %>%
graph_from_data_frame() %>%
plot()
accumulator <- 0
current_id <- 1
id_list <- c(current_id)
while (length(unique(id_list)) == length(id_list)) {
next_instruction <- input_clean %>%
filter(id == current_id)
next_id <- next_instruction$next_id
accumulator <- accumulator + next_instruction$accumulator_increment
id_list <- c(id_list, next_id)
current_id <- next_id
}
answer <- accumulator
write_lines(
answer
, paste0(
project_directory
, "/"
, "answers/day"
, day
, "_part"
, part
, ".txt"
)
)
| /solutions/day8_part1.r | no_license | johnchower/advent_of_code_2020 | R | false | false | 1,375 | r | library(tidyverse)
library(igraph)
project_directory <- rprojroot::find_root(
criterion = rprojroot::has_dir(".git")
)
day <- 8
part <- 1
input <- read_lines(
paste0(
project_directory
, "/"
, "inputs/day"
, day
, ".txt"
)
)
test_input <- read_lines(
paste0(
project_directory
, "/"
, "test_inputs/day"
, day
, "_part"
, part
, ".txt"
)
)
input_clean <- input %>%
tibble(instruction = .) %>%
separate(instruction, into = c("instruction_type", "value"), sep = " ") %>%
mutate(
value = as.numeric(value)
, id = 1:n()
, accumulator_increment = ifelse(
instruction_type == "acc"
, value
, 0
)
, id_increment = ifelse(
instruction_type == "jmp"
, value
, 1
)
, next_id = id + id_increment
)
input_clean %>%
select(
id
, next_id
) %>%
graph_from_data_frame() %>%
plot()
accumulator <- 0
current_id <- 1
id_list <- c(current_id)
while (length(unique(id_list)) == length(id_list)) {
next_instruction <- input_clean %>%
filter(id == current_id)
next_id <- next_instruction$next_id
accumulator <- accumulator + next_instruction$accumulator_increment
id_list <- c(id_list, next_id)
current_id <- next_id
}
answer <- accumulator
write_lines(
answer
, paste0(
project_directory
, "/"
, "answers/day"
, day
, "_part"
, part
, ".txt"
)
)
|
library(oaxaca)
setwd("~/phd/research/Bertarifa4/data")
data <- read.csv("tarifa2015_b2.csv")
tarifred <- subset(data, fforma==1 & atip==4)
# factors
tarifred$iskveg9 <- as.factor(tarifred$iskveg9)
tarifred$kra <- as.factor(tarifred$kra)
tarifred$ag1 <- as.factor(tarifred$ag1)
tarifred$kol <- as.factor(tarifred$kol)
tarifred$ara <- as.factor(tarifred$ara)
tarifred$kshreg <- as.factor(tarifred$kshreg)
tarifred$nem <- as.factor(tarifred$nem)
tarifred$ujbel <- tarifred$ujbel
tarifred$ttip <- tarifred$ttip
# dummies
tarifred$iskveg9_07 <- 0
tarifred$iskveg9_07[tarifred$iskveg9==1] <- 1
# oaxaca
results <- oaxaca(formula = lnker ~
+ iskveg9
+ letszam_bv1
+ kra
+ kor
+ ag1
+ exp
+ kol
+ ara
+ szolgho
+ kshreg
+ ttip
+ ujbel
- 1
| nem
, data = tarifred, R = 10)
# group.weight mutatja, hogy melyik csoport a benchmark
# 0 = group A
# 1 = group B
plot(results
, decomposition = "twofold"
, group.weight = -1)
| /codes/z_other/oax_02.R | no_license | tackyolgi/bertarifa | R | false | false | 1,299 | r | library(oaxaca)
setwd("~/phd/research/Bertarifa4/data")
data <- read.csv("tarifa2015_b2.csv")
tarifred <- subset(data, fforma==1 & atip==4)
# factors
tarifred$iskveg9 <- as.factor(tarifred$iskveg9)
tarifred$kra <- as.factor(tarifred$kra)
tarifred$ag1 <- as.factor(tarifred$ag1)
tarifred$kol <- as.factor(tarifred$kol)
tarifred$ara <- as.factor(tarifred$ara)
tarifred$kshreg <- as.factor(tarifred$kshreg)
tarifred$nem <- as.factor(tarifred$nem)
tarifred$ujbel <- tarifred$ujbel
tarifred$ttip <- tarifred$ttip
# dummies
tarifred$iskveg9_07 <- 0
tarifred$iskveg9_07[tarifred$iskveg9==1] <- 1
# oaxaca
results <- oaxaca(formula = lnker ~
+ iskveg9
+ letszam_bv1
+ kra
+ kor
+ ag1
+ exp
+ kol
+ ara
+ szolgho
+ kshreg
+ ttip
+ ujbel
- 1
| nem
, data = tarifred, R = 10)
# group.weight mutatja, hogy melyik csoport a benchmark
# 0 = group A
# 1 = group B
plot(results
, decomposition = "twofold"
, group.weight = -1)
|
library(shiny)
ui <- fluidPage(
fluidRow(
column(5, tags$div(class = "header", checked = NA,
tags$h4("Click the map below to query that county's\naverage weekly income by job class.")))
),
fluidRow(
column(5, plotOutput("map_plot", click = "click_plot")),
column(7, plotOutput("line_plot"))
)
)
| /ui.r | no_license | jmt2080ad/bls_shiny | R | false | false | 364 | r | library(shiny)
ui <- fluidPage(
fluidRow(
column(5, tags$div(class = "header", checked = NA,
tags$h4("Click the map below to query that county's\naverage weekly income by job class.")))
),
fluidRow(
column(5, plotOutput("map_plot", click = "click_plot")),
column(7, plotOutput("line_plot"))
)
)
|
# Creating two radio buttons
create_radio_buttons <- function() {
window <- gtkWindow("toplevel", show = F)
box <- gtkVBoxNew(TRUE, 2)
## Create a radio button with a GtkEntry widget
radio1 <- gtkRadioButton()
entry <- gtkEntry()
radio1$add(entry)
## Create a radio button with a label
radio2 <- gtkRadioButtonNewWithLabelFromWidget(radio1,
"I'm the second radio button.")
## Pack them into a box, then show all the widgets
box$packStart(radio1, TRUE, TRUE, 2)
box$packStart(radio2, TRUE, TRUE, 2)
window$add(box)
window$showAll()
}
| /RGtk2/inst/examples/GtkRadioButton.R | no_license | lawremi/RGtk2 | R | false | false | 622 | r | # Creating two radio buttons
create_radio_buttons <- function() {
window <- gtkWindow("toplevel", show = F)
box <- gtkVBoxNew(TRUE, 2)
## Create a radio button with a GtkEntry widget
radio1 <- gtkRadioButton()
entry <- gtkEntry()
radio1$add(entry)
## Create a radio button with a label
radio2 <- gtkRadioButtonNewWithLabelFromWidget(radio1,
"I'm the second radio button.")
## Pack them into a box, then show all the widgets
box$packStart(radio1, TRUE, TRUE, 2)
box$packStart(radio2, TRUE, TRUE, 2)
window$add(box)
window$showAll()
}
|
# Run with:
#
# Rscript --vanilla SimpleGaussian.R input sigma output
#
library(SimpleITK)
args <- commandArgs( TRUE )
myreader <- ImageFileReader()
myreader <- ImageFileReader_SetFilename( myreader, args[[1]] )
myimage <- ImageFileReader_Execute( myreader )
myfilter <- RecursiveGaussianImageFilter()
myfilter <- RecursiveGaussianImageFilter_SetSigma( myfilter, as.real(args[2]) )
smoothedimage <- RecursiveGaussianImageFilter_Execute( myfilter, myimage )
mywriter <- ImageFileWriter()
mywriter <- ImageFileWriter_SetFilename( mywriter, args[[3]] )
mywriter <- ImageFileWriter_Execute( mywriter, smoothedimage )
| /Examples/SimpleGaussian.R | permissive | josephsnyder/SimpleITK | R | false | false | 620 | r | # Run with:
#
# Rscript --vanilla SimpleGaussian.R input sigma output
#
library(SimpleITK)
args <- commandArgs( TRUE )
myreader <- ImageFileReader()
myreader <- ImageFileReader_SetFilename( myreader, args[[1]] )
myimage <- ImageFileReader_Execute( myreader )
myfilter <- RecursiveGaussianImageFilter()
myfilter <- RecursiveGaussianImageFilter_SetSigma( myfilter, as.real(args[2]) )
smoothedimage <- RecursiveGaussianImageFilter_Execute( myfilter, myimage )
mywriter <- ImageFileWriter()
mywriter <- ImageFileWriter_SetFilename( mywriter, args[[3]] )
mywriter <- ImageFileWriter_Execute( mywriter, smoothedimage )
|
rm(list=ls())
### Initialize data sets to work with
# Webscrape Statistics from Baskballreference
library(rvest)
thelink <- xml2::read_html("https://www.basketball-reference.com/leagues/NBA_2021_per_game.html")
nba <- as.data.frame(thelink %>%html_nodes("table")%>%.[[1]] %>%html_table())
# Remove repeated header row
unn <- subset(nba, nba$Rk != 'Rk')
# Remove duplicated player records (duplicated if player traded, keep only 'Total' record)
nba2 <- unn[!duplicated(unn$Player),]
# Convert regressor variables to integer values
i <- c(6:30)
nba2[ , i] <- apply(nba2[ , i], 2, function(x) as.numeric(as.character(x)))
# Filter out players not having played >= [pct] of total games played
pct <- .7 #define percentage of games player must have played in as compared to max player
nba3 <- subset(nba2,nba2$G >= max(nba2$G)*pct)
# Remove attempts and attempts made columns (as they are simple divisors and dividends of percentage columns)
#nba4 <- nba3[-c(1,2,4:8,9,10,12,13,15,16,19,20)]
#summary(nba4)
################### CSV SAVED ON APRIL 8th 2021 ####################
#write.csv(nba3, "nba3.csv")
nba3 <- read.csv('~/Desktop/STAT6440/6440-Project/nba3.csv')
#nba3 <- read.csv('C:/Users/super/Documents/MSA/Sp21/MSA 6440/Final Project/nba3.csv')
colnames(nba3)[11] <- 'FG%'
colnames(nba3)[12] <- '3P'
colnames(nba3)[13] <- '3PA'
colnames(nba3)[14] <- '3P%'
colnames(nba3)[15] <- '2P'
colnames(nba3)[16] <- '2PA'
colnames(nba3)[17] <- '2P%'
colnames(nba3)[18] <- 'eF%'
colnames(nba3)[21] <- 'FT%'
nba4 <- nba3[-c(1,2,4:8,9,10,12,13,15,16,19,20)]
#nba4 <- na.omit(nba3) #remove records with 'na's
nba3$`3P%`[is.na(nba3$`3P%`)]<-0
nba4$`3P%`[is.na(nba4$`3P%`)]<-0
summary(nba4)
# Construct position specific dataframes and assign values to specific positions
table(nba4$Pos)
nba3[nba3$Pos =='C-PF',] # Kelly Olynyk
nba3[nba3$Pos =='PG-SG',] # James Harden
nba3[nba3$Pos =='SF-SG',] # Sviatoslav Mykhailiuk
nba3[nba3$Pos =='SG-PG',] # Delon Wright
nba3[nba3$Pos =='SG-SF',] # Hamidou Diallo & Norman Powell
################# KNN TO DEFINE PLAYERS #################
#### Olynyk: C OR PF?? ####
source("~/Desktop/STAT6440/W2/myfunctions.R")
#source("C:/Users/super/Documents/MSA/Sp21/MSA 6440/Data/myfunctions.R")
knn <- subset(nba4,nba4$Pos =='C' | nba4$Pos =='PF')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
library(FNN)
library(caret)
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "C")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k #1 is too low, use 13, next highest, instead
max.k <- 13
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="C-PF"),]
x0 <- x[-1]
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn
## labels of nearest neighbors
Knn.attr <- attributes(Knn)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #Label as PF
#### HARDIN: PG OR SG?? ####
knn <- subset(nba4,nba4$Pos =='PG' | nba4$Pos =='SG')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "PG")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="PG-SG"),]
x0 <- x[-1]
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn
## labels of nearest neighbors
Knn.attr <- attributes(Knn)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #label as PG
#### Mykhailiuk : SF OR SG?? ####
knn <- subset(nba4,nba4$Pos =='SF' | nba4$Pos =='SG')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "SF")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="SF-SG"),]
x0 <- x[-1]
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn
## labels of nearest neighbors
Knn.attr <- attributes(Knn)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #label as SG
#### Wright: SG OR PG?? ####
knn <- subset(nba4,nba4$Pos =='SG' | nba4$Pos =='PG')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "SG")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="SG-PG"),]
x0 <- x[-1]
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn
## labels of nearest neighbors
Knn.attr <- attributes(Knn)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #label as SG
#### Diallo & Powell: SG OR SF?? ####
knn <- subset(nba4,nba4$Pos =='SG' | nba4$Pos =='SF')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "SG")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="SG-SF"),]
x0 <- x[1,][-1] #Diallo
x1 <- x[2,][-1] #Powell
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
x1.scaled <- scale(x1, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn0 <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn0
Knn1 <- knn(train = training.scaled, test = x1.scaled,
cl = training.data[,1], k = max.k)
Knn1
## labels of nearest neighbors
Knn.attr <- attributes(Knn0)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1])
table(knn$Pos) #label Diallo as SG MAJORITY RULE
Knn.attr <- attributes(Knn1)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #label Powell as SG
#################### POP OUT OF KNN CLASSIFICIATION ###################
#define Olynyk as KNN result, viz. "PF"
nba4$Pos[nba4$Pos=="C-PF"] <- "PF"
nba3$Pos[nba3$Pos=="C-PF"] <- "PF"
#define Hardin as KNN result, viz. "PG"
nba4$Pos[nba4$Pos=="PG-SG"] <- "PG"
nba3$Pos[nba3$Pos=="PG-SG"] <- "PG"
#define Mykhailiuk as KNN result, viz. "SG"
nba4$Pos[nba4$Pos=="SF-SG"] <- "SG"
nba3$Pos[nba3$Pos=="SF-SG"] <- "SG"
#define Write as KNN result, viz. "SG"
nba4$Pos[nba4$Pos=="SG-PG"] <- "SG"
nba3$Pos[nba3$Pos=="SG-PG"] <- "SG"
#define Powell as KNN result, viz. "SG"
nba4$Pos[nba4$Pos=="SG-SF"] <- "SG"
nba3$Pos[nba3$Pos=="SG-SF"] <- "SG"
Dat <- nba4
Dat2 <- nba3[-c(1,4:8,9,10,12,13,15,16,19,20)]
Dat$Pos <- as.factor(Dat$Pos)
table(nba4$Pos) #good
################### CSV SAVED ON APRIL 11th 2021 ####################
#write.csv(Dat2, "NBAper.csv")
## Create training and test data ##
#source("C:/Users/super/Documents/MSA/Sp21/MSA 6440/Data/myfunctions.R")
RNGkind (sample.kind = "Rounding")
set.seed(0) ## set seed so that you get same partition each time
p2 <- partition.2(Dat, 0.7) ## creating 70:30 partition
training.data <- p2$data.train
test.data <- p2$data.test
nba.nolbl <- training.data[,-1]
nba.scaled <- scale(nba.nolbl, center = TRUE, scale = TRUE)
# https://predictivehacks.com/how-to-determine-the-number-of-clusters-of-k-means-in-r/
library(factoextra)
library(NbClust)
library(mclust)
set.seed(123)
fviz_nbclust(nba.scaled, kmeans, method = "wss") +
geom_vline(xintercept = 2, linetype = 2)+
labs(subtitle = "Elbow method")
fviz_nbclust(nba.scaled, kmeans, method = "silhouette")+
labs(subtitle = "Silhouette method")
fviz_nbclust(nba.scaled, kmeans, nstart = 25, method = "gap_stat", nboot = 500)+
labs(subtitle = "Gap statistic method") # none of these charts give us good idea for number of Clusters
############# Clustering ##################
#### Mclust #####
library(mclust)
RNGkind (sample.kind = "Rounding")
set.seed(0)
mc.list <- list()
bic <- NULL
for (k in 1:10){
mc <- Mclust(Dat[-1], k)
bic <- c(bic, mc$bic)
mc.list[[k]] <- c(mc.list, mc)
}
plot(seq(1,10,1), bic, type = "l", xlab = "Number of clusters")
plot(bic[1:10], type = "l", xlab = "Number of clusters")
maxbic <- which.max(bic[1:10])
maxbic
mc3 <- Mclust(Dat[-1], maxbic)
table(mc3$classification, Dat$Pos)
lblper <- c('FG%','3P%','2P%','eFG%','FT%')
lbloff <- c('ORB','AST','PTS')
lbldef <- c('BLK','STL','DRB')
lblsp <- c('TOV','PF')
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,5), c(0,1), type = "n", xlab = "", ylab = "Est. Mean",
xaxt='n', main = "Clustering using Mclust: Percentages")
axis(side = 1, at=seq(1,5,1), las=2, labels = lblper)
xseq <- seq(1,5,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, mc3$parameters$mean[1:5,k], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,3), c(0,20), type = "n", xlab = "", ylab = "Est. Mean",
xaxt='n', main = "Clustering using Mclust: Offense")
axis(side = 1, at=seq(1,3,1), las=2, labels = lbloff)
xseq <- seq(1,3,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, mc3$parameters$mean[c(6,9,14),k], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,3), c(0,5), type = "n", xlab = "", ylab = "Est. Mean",
xaxt='n', main = "Clustering using Mclust: Defense")
axis(side = 1, at=seq(1,3,1), las=2, labels = lbldef)
xseq <- seq(1,3,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, mc3$parameters$mean[c(11,10,7),k], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,2), c(0,3), type = "n", xlab = "", ylab = "Est. Mean",
xaxt='n', main = "Clustering using Mclust: Special")
axis(side = 1, at=seq(1,2,1), las=2, labels = lblsp)
xseq <- seq(1,2,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, mc3$parameters$mean[c(12,13),k], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
# Mclust Clustering Plots
library(tidyr)
library(ggplot2)
dat.mc <- as.data.frame(t(mc3$parameters$mean))
dat.mc$clusters <- row.names(dat.mc)
# Percentages Clusters
dat.mc1 <- dat.mc[c(1:5,15)]
dat.g <- gather(dat.mc1, stats, mc3mean, -clusters)
mc.1 <- ggplot(dat.g, aes(stats, mc3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Mclust: Percentages")
mc.1
# Offense Clusters
dat.mc1 <- dat.mc[c(6,9,14,15)]
dat.g <- gather(dat.mc1, stats, mc3mean, -clusters)
mc.2 <- ggplot(dat.g, aes(stats, mc3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Mclust: Offense")
mc.2
# Defense Clusters
dat.mc1 <- dat.mc[c(11,10,7,15)]
dat.g <- gather(dat.mc1, stats, mc3mean, -clusters)
mc.3 <- ggplot(dat.g, aes(stats, mc3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Mclust: Defense")
mc.3
# Special Clusters
dat.mc1 <- dat.mc[c(12,13,15)]
dat.g <- gather(dat.mc1, stats, mc3mean, -clusters)
mc.4 <- ggplot(dat.g, aes(stats, mc3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Mclust: Special")
mc.4
#################
#### Kmeans #####
#################
RNGkind (sample.kind = "Rounding")
set.seed(1)
### Rescale the data
nba.scaled <- scale(Dat[-1], center = TRUE, scale = TRUE)
nba.scaled.attr <- attributes(nba.scaled)
km.list <- list()
wss <- NULL
for (k in 1:10){
tmp.wss <- NULL
## Running kmeans 10 times for each k to get the best result
for (itr in 1:10){
set.seed(itr)
km <- kmeans(nba.scaled, k)
tmp.wss <- c(tmp.wss, km$tot.withinss)
}
select.seed <- which.min(tmp.wss)
set.seed(select.seed)
km <- kmeans(nba.scaled, k)
wss <- c(wss, km$tot.withinss)
km.list[[k]] <- c(km.list, km)
}
plot(seq(1,10,1), wss, type = "l", xlab = "Number of clusters")
km3 <- kmeans(nba.scaled, 3) #to match nicely with mclust choose 3, can try 4, too
table(mc3$classification, km3$cluster)
# Manually match the labels, as much as possible,
# to the mclust classification for easier comparison
km3id <- ifelse(km3$cluster == 3, 2,
ifelse(km3$cluster == 2, 3,1))
table(km3id, Dat$Pos)
table(mc3$classification, km3id)
# Convert the kmeans centroids to the original scale for plotting
km3.centers.rescaled <- km3$centers
for (j in 1:14){
km3.centers.rescaled[,j] <- nba.scaled.attr$`scaled:center`[j] +
km3$centers[,j]*nba.scaled.attr$`scaled:scale`[j]
}
# Now change the indices to approximately match
# mclust classification for easier comparison
km3.centers.rescaled.tmp <- km3.centers.rescaled
km3.centers.rescaled[1,] <- km3.centers.rescaled.tmp[1,]
km3.centers.rescaled[2,] <- km3.centers.rescaled.tmp[3,]
km3.centers.rescaled[3,] <- km3.centers.rescaled.tmp[2,]
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,5), c(0,1), type = "n", xlab = "", ylab = "Kmeans centroids",
xaxt='n', main = "Clustering using Kmeans: Percentages")
axis(side = 1, at=seq(1,5,1), las=2, labels = lblper)
xseq <- seq(1,5,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, km3.centers.rescaled[k,1:5], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,3), c(0,6), type = "n", xlab = "", ylab = "Kmeans centroids",
xaxt='n', main = "Clustering using Kmeans: Offense")
axis(side = 1, at=seq(1,3,1), las=2, labels = lbloff)
xseq <- seq(1,3,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, km3.centers.rescaled[k,c(6,9,14)], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,3), c(0,6), type = "n", xlab = "", ylab = "Kmeans centroids",
xaxt='n', main = "Clustering using Kmeans: Defense")
axis(side = 1, at=seq(1,3,1), las=2, labels = lbldef)
xseq <- seq(1,3,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, km3.centers.rescaled[k,c(11,10,7)], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,2), c(0,3), type = "n", xlab = "", ylab = "Kmeans centroids",
xaxt='n', main = "Clustering using Kmeans: Defense")
axis(side = 1, at=seq(1,2,1), las=2, labels = lblsp)
xseq <- seq(1,2,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, km3.centers.rescaled[k,c(12,13)], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
# Kmeans Clustering Plots
library(tidyr)
library(ggplot2)
dat.km <- as.data.frame(km3.centers.rescaled)
dat.km$clusters <- row.names(dat.km)
# Percentages Clusters
dat.km1 <- dat.km[c(1:5,15)]
dat.g <- gather(dat.km1, stats, km3mean, -clusters)
km.1 <- ggplot(dat.g, aes(stats, km3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Kmeans: Percentages")
km.1
# Offense Clusters
dat.km1 <- dat.km[c(6,9,14,15)]
dat.g <- gather(dat.km1, stats, km3mean, -clusters)
km.2 <- ggplot(dat.g, aes(stats, km3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Kmeans: Offense")
km.2
# Defense Clusters
dat.km1 <- dat.km[c(11,10,7,15)]
dat.g <- gather(dat.km1, stats, km3mean, -clusters)
km.3 <- ggplot(dat.g, aes(stats, km3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Kmeans: Defense")
km.3
# Special Clusters
dat.km1 <- dat.km[c(12,13,15)]
dat.g <- gather(dat.km1, stats, km3mean, -clusters)
km.4 <- ggplot(dat.g, aes(stats, km3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Kmeans: Special")
km.4
#### Hierarchical #####
### Create distance matrix
d <- dist(nba.scaled, method = "euclidean")
hc.complete <- hclust(d, method="ward.D2")
plot(hc.complete) # display dendogram
hc.complete.id <- cutree(hc.complete, k=3) # cut tree into 3 clusters
rect.hclust(hc.complete, k = 3, border = "red")
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/dend1.pdf")
nba.hier <- Dat[-1]
nba.hier$clusters <- as.data.frame(hc.complete.id)
temp1 <- subset(nba.hier,nba.hier$clusters =='1')
temp2 <- subset(nba.hier,nba.hier$clusters =='2')
temp3 <- subset(nba.hier,nba.hier$clusters =='3')
a <- colMeans(temp1)
b <- colMeans(temp2)
c <- colMeans(temp3)
dat.hier <- rbind(a,b,c)
dat.hier <- as.data.frame(dat.hier)
dat.hier$clusters <- as.character(dat.hier$clusters)
# Hierarchical Clustering Plots
library(tidyr)
library(ggplot2)
# Percentages Clusters
dat.hier1 <- dat.hier[c(1:5,15)]
dat.g <- gather(dat.hier1, stats, hier4mean, -clusters)
hier.1 <- ggplot(dat.g, aes(stats, hier4mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Hierarchical: Percentages")
hier.1
# Offense Clusters
dat.hier1 <- dat.hier[c(6,9,14,15)]
dat.g <- gather(dat.hier1, stats, hier4mean, -clusters)
hier.2 <- ggplot(dat.g, aes(stats, hier4mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Hierarchical: Offense")
hier.2
# Defense Clusters
dat.hier1 <- dat.hier[c(11,10,7,15)]
dat.g <- gather(dat.hier1, stats, hier4mean, -clusters)
hier.3 <- ggplot(dat.g, aes(stats, hier4mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Hierarchical: Defense")
hier.3
# Special Clusters
dat.hier1 <- dat.hier[c(12,13,15)]
dat.g <- gather(dat.hier1, stats, hier4mean, -clusters)
hier.4 <- ggplot(dat.g, aes(stats, hier4mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Hierarchical: Special")
hier.4
# Table analysis
t <- table(km3id, hc.complete.id)
colnames(t) <- c("H 1", "H 2", "H 3")
rownames(t) <- c("KM 1", "KM 2", "KM 3")
t2 <- table(mc3$classification, hc.complete.id)
colnames(t2) <- c("H 1", "H 2", "H 3")
rownames(t2) <- c("MC 1", "MC 2", "MC 3")
table(mc3$classification, km3id)
t
t2
nba.clust <- cbind(Dat2, mc3$classification, km3id,hc.complete.id)
colnames(nba.clust)[17] <- 'MCid'
colnames(nba.clust)[18] <- 'KMid'
colnames(nba.clust)[19] <- 'Hierid'
nba.clust$MCid <- as.factor(nba.clust$MCid)
nba.clust$KMid <- as.factor(nba.clust$KMid)
nba.clust$Hierid <- as.factor(nba.clust$Hierid)
nba.clust
################### CSV SAVED ON APRIL 9th 2021 ####################
#write.csv(nba.clust, "nba.clust3.csv")
# Compare Mcluster, Kmeans, and Hierarchical Clusters
library(gridExtra)
grid.arrange(mc.1, km.1, hier.1, nrow=1, ncol=3)
grid.arrange(mc.2, km.2, hier.2, nrow=1, ncol=3)
grid.arrange(mc.3, km.3, hier.3, nrow=1, ncol=3)
grid.arrange(mc.4, km.4, hier.4, nrow=1, ncol=3)
# Clusters and Positions
nbatoday <- nba.clust[,c(1,2,17,18,19)]
dat.g <- gather(nbatoday[-1], type, cluster,-Pos)
today <- ggplot(dat.g[-1], aes(type,fill=cluster)) +
geom_bar(position = "dodge") +
ggtitle("2020 NBA Cluster Distributions")
posclus.mc <- ggplot(nbatoday[-1], aes(Pos)) +
geom_bar(aes(fill = MCid), stat = "count", position = "dodge") +
ggtitle("2020 NBA Position-Cluster Relation Mclust")
posclus.km <- ggplot(nbatoday[-1], aes(Pos)) +
geom_bar(aes(fill = KMid), stat = "count", position = "dodge") +
ggtitle("2020 NBA Position-Cluster Relation Kmeans")
posclus.h <- ggplot(nbatoday[-1], aes(Pos)) +
geom_bar(aes(fill = Hierid), stat = "count", position = "dodge") +
ggtitle("2020 NBA Position-Cluster Relation Hierarchical")
posclus <- ggplot(dat.g, aes(Pos)) +
geom_bar(aes(fill = cluster), stat = "count", position = "dodge") +
ggtitle("2020 NBA Position-Cluster Relation Concatenated")
today
posclus.mc
posclus.km
posclus.h
posclus
# Pairs plot view clusters
nba.nolbl <- nba.clust[,-c(1,2,17,18,19)]
nba.lbl.mc <- nba.clust[,17]
nba.lbl.km <- nba.clust[,18]
nba.lbl.h <- nba.clust[,19]
n <- nrow(nba.nolbl)
p <- ncol(nba.nolbl)
col.seq <- c("red", "darkgreen", "navy")
species.col.mc <- col.seq[as.numeric(nba.lbl.mc)]
species.col.km <- col.seq[as.numeric(nba.lbl.km)]
species.col.h <- col.seq[as.numeric(nba.lbl.h)]
# Mclust
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[1:5], col = species.col.mc,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.mc)),
fill = unique(species.col.mc))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(6,9,14,12)], col = species.col.mc,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.mc)),
fill = unique(species.col.mc))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(11,10,7,13)], col = species.col.mc,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.mc)),
fill = unique(species.col.mc))
par(xpd = NA)
# Kmeans
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[1:5], col = species.col.km,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.km)),
fill = unique(species.col.km))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(6,9,14,12)], col = species.col.km,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.km)),
fill = unique(species.col.km))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(11,10,7,13)], col = species.col.km,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.km)),
fill = unique(species.col.km))
par(xpd = NA)
# Hierarchical
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[1:5], col = species.col.h,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.h)),
fill = unique(species.col.h))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(6,9,14,12)], col = species.col.h,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.h)),
fill = unique(species.col.h))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(11,10,7,13)], col = species.col.h,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.h)),
fill = unique(species.col.h))
par(xpd = NA)
# Help us define Cluster with background knowledge of NBA (e.g. height, strength, weight, etc.)
MCid1 <- nba.clust[(nba.clust$MCid=="1"),]
MCid2 <- nba.clust[(nba.clust$MCid=="2"),]
MCid3 <- nba.clust[(nba.clust$MCid=="3"),]
MCid1
MCid2
MCid3
KMid1 <- nba.clust[(nba.clust$KMid=="1"),]
KMid2 <- nba.clust[(nba.clust$KMid=="2"),]
KMid3 <- nba.clust[(nba.clust$KMid=="3"),]
KMid1
KMid2
KMid3
Hid1 <- nba.clust[(nba.clust$Hierid=="1"),]
Hid2 <- nba.clust[(nba.clust$Hierid=="2"),]
Hid3 <- nba.clust[(nba.clust$Hierid=="3"),]
Hid1
Hid2
Hid3
| /FINAL 1.R | no_license | justinpeter96/unsupervised-1 | R | false | false | 30,515 | r | rm(list=ls())
### Initialize data sets to work with
# Webscrape Statistics from Baskballreference
library(rvest)
thelink <- xml2::read_html("https://www.basketball-reference.com/leagues/NBA_2021_per_game.html")
nba <- as.data.frame(thelink %>%html_nodes("table")%>%.[[1]] %>%html_table())
# Remove repeated header row
unn <- subset(nba, nba$Rk != 'Rk')
# Remove duplicated player records (duplicated if player traded, keep only 'Total' record)
nba2 <- unn[!duplicated(unn$Player),]
# Convert regressor variables to integer values
i <- c(6:30)
nba2[ , i] <- apply(nba2[ , i], 2, function(x) as.numeric(as.character(x)))
# Filter out players not having played >= [pct] of total games played
pct <- .7 #define percentage of games player must have played in as compared to max player
nba3 <- subset(nba2,nba2$G >= max(nba2$G)*pct)
# Remove attempts and attempts made columns (as they are simple divisors and dividends of percentage columns)
#nba4 <- nba3[-c(1,2,4:8,9,10,12,13,15,16,19,20)]
#summary(nba4)
################### CSV SAVED ON APRIL 8th 2021 ####################
#write.csv(nba3, "nba3.csv")
nba3 <- read.csv('~/Desktop/STAT6440/6440-Project/nba3.csv')
#nba3 <- read.csv('C:/Users/super/Documents/MSA/Sp21/MSA 6440/Final Project/nba3.csv')
colnames(nba3)[11] <- 'FG%'
colnames(nba3)[12] <- '3P'
colnames(nba3)[13] <- '3PA'
colnames(nba3)[14] <- '3P%'
colnames(nba3)[15] <- '2P'
colnames(nba3)[16] <- '2PA'
colnames(nba3)[17] <- '2P%'
colnames(nba3)[18] <- 'eF%'
colnames(nba3)[21] <- 'FT%'
nba4 <- nba3[-c(1,2,4:8,9,10,12,13,15,16,19,20)]
#nba4 <- na.omit(nba3) #remove records with 'na's
nba3$`3P%`[is.na(nba3$`3P%`)]<-0
nba4$`3P%`[is.na(nba4$`3P%`)]<-0
summary(nba4)
# Construct position specific dataframes and assign values to specific positions
table(nba4$Pos)
nba3[nba3$Pos =='C-PF',] # Kelly Olynyk
nba3[nba3$Pos =='PG-SG',] # James Harden
nba3[nba3$Pos =='SF-SG',] # Sviatoslav Mykhailiuk
nba3[nba3$Pos =='SG-PG',] # Delon Wright
nba3[nba3$Pos =='SG-SF',] # Hamidou Diallo & Norman Powell
################# KNN TO DEFINE PLAYERS #################
#### Olynyk: C OR PF?? ####
source("~/Desktop/STAT6440/W2/myfunctions.R")
#source("C:/Users/super/Documents/MSA/Sp21/MSA 6440/Data/myfunctions.R")
knn <- subset(nba4,nba4$Pos =='C' | nba4$Pos =='PF')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
library(FNN)
library(caret)
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "C")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k #1 is too low, use 13, next highest, instead
max.k <- 13
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="C-PF"),]
x0 <- x[-1]
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn
## labels of nearest neighbors
Knn.attr <- attributes(Knn)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #Label as PF
#### HARDIN: PG OR SG?? ####
knn <- subset(nba4,nba4$Pos =='PG' | nba4$Pos =='SG')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "PG")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="PG-SG"),]
x0 <- x[-1]
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn
## labels of nearest neighbors
Knn.attr <- attributes(Knn)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #label as PG
#### Mykhailiuk : SF OR SG?? ####
knn <- subset(nba4,nba4$Pos =='SF' | nba4$Pos =='SG')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "SF")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="SF-SG"),]
x0 <- x[-1]
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn
## labels of nearest neighbors
Knn.attr <- attributes(Knn)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #label as SG
#### Wright: SG OR PG?? ####
knn <- subset(nba4,nba4$Pos =='SG' | nba4$Pos =='PG')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "SG")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="SG-PG"),]
x0 <- x[-1]
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn
## labels of nearest neighbors
Knn.attr <- attributes(Knn)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #label as SG
#### Diallo & Powell: SG OR SF?? ####
knn <- subset(nba4,nba4$Pos =='SG' | nba4$Pos =='SF')
knn$Pos <- as.factor(knn$Pos)
RNGkind (sample.kind = "Rounding")
set.seed(0)
### create 70:30 partition
p2 <- partition.2(knn, 0.7)
training.data <- p2$data.train
test.data <- p2$data.test
### Rescale the data
training.scaled <- scale(training.data[,-1], center = TRUE, scale = TRUE)
training.scaled.wY <- cbind(training.scaled, training.data[,1])
training.scaled.attr <- attributes(training.scaled)
test.scaled <- scale(test.data[,-1],
center = training.scaled.attr$`scaled:center`,
scale = training.scaled.attr$`scaled:scale`)
### fit k-nn model for k = 1, ..., 60
K <- 60
kappa <- rep(0, K)
for (kk in 1:K){
Knn <- knn(train = training.scaled, test = test.scaled,
cl = training.data[,1], k = kk)
c <- confusionMatrix(as.factor(Knn), as.factor(test.data[,1]),
positive = "SG")
kappa[kk] <- c$overall["Kappa"]
cat("K", kk, "Kappa", kappa[kk], "\n")
}
# create a plot for k vs. kappa
plot(c(1:K), kappa, xlab = "k", ylab = "Kappa", type = "l", col = "#FC4E07")
max.k <- which.max(kappa)
max.k
### Fit kNN model on a single new observation with k=[max.k]
x <- nba4[(nba4$Pos=="SG-SF"),]
x0 <- x[1,][-1] #Diallo
x1 <- x[2,][-1] #Powell
x0.scaled <- scale(x0, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
x1.scaled <- scale(x1, center = training.scaled.attr$`scaled:center`, scale = training.scaled.attr$`scaled:scale`)
Knn0 <- knn(train = training.scaled, test = x0.scaled,
cl = training.data[,1], k = max.k)
Knn0
Knn1 <- knn(train = training.scaled, test = x1.scaled,
cl = training.data[,1], k = max.k)
Knn1
## labels of nearest neighbors
Knn.attr <- attributes(Knn0)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1])
table(knn$Pos) #label Diallo as SG MAJORITY RULE
Knn.attr <- attributes(Knn1)
training.data[Knn.attr$nn.index,1]
summary(training.data[Knn.attr$nn.index,1]) #label Powell as SG
#################### POP OUT OF KNN CLASSIFICIATION ###################
#define Olynyk as KNN result, viz. "PF"
nba4$Pos[nba4$Pos=="C-PF"] <- "PF"
nba3$Pos[nba3$Pos=="C-PF"] <- "PF"
#define Hardin as KNN result, viz. "PG"
nba4$Pos[nba4$Pos=="PG-SG"] <- "PG"
nba3$Pos[nba3$Pos=="PG-SG"] <- "PG"
#define Mykhailiuk as KNN result, viz. "SG"
nba4$Pos[nba4$Pos=="SF-SG"] <- "SG"
nba3$Pos[nba3$Pos=="SF-SG"] <- "SG"
#define Write as KNN result, viz. "SG"
nba4$Pos[nba4$Pos=="SG-PG"] <- "SG"
nba3$Pos[nba3$Pos=="SG-PG"] <- "SG"
#define Powell as KNN result, viz. "SG"
nba4$Pos[nba4$Pos=="SG-SF"] <- "SG"
nba3$Pos[nba3$Pos=="SG-SF"] <- "SG"
Dat <- nba4
Dat2 <- nba3[-c(1,4:8,9,10,12,13,15,16,19,20)]
Dat$Pos <- as.factor(Dat$Pos)
table(nba4$Pos) #good
################### CSV SAVED ON APRIL 11th 2021 ####################
#write.csv(Dat2, "NBAper.csv")
## Create training and test data ##
#source("C:/Users/super/Documents/MSA/Sp21/MSA 6440/Data/myfunctions.R")
RNGkind (sample.kind = "Rounding")
set.seed(0) ## set seed so that you get same partition each time
p2 <- partition.2(Dat, 0.7) ## creating 70:30 partition
training.data <- p2$data.train
test.data <- p2$data.test
nba.nolbl <- training.data[,-1]
nba.scaled <- scale(nba.nolbl, center = TRUE, scale = TRUE)
# https://predictivehacks.com/how-to-determine-the-number-of-clusters-of-k-means-in-r/
library(factoextra)
library(NbClust)
library(mclust)
set.seed(123)
fviz_nbclust(nba.scaled, kmeans, method = "wss") +
geom_vline(xintercept = 2, linetype = 2)+
labs(subtitle = "Elbow method")
fviz_nbclust(nba.scaled, kmeans, method = "silhouette")+
labs(subtitle = "Silhouette method")
fviz_nbclust(nba.scaled, kmeans, nstart = 25, method = "gap_stat", nboot = 500)+
labs(subtitle = "Gap statistic method") # none of these charts give us good idea for number of Clusters
############# Clustering ##################
#### Mclust #####
library(mclust)
RNGkind (sample.kind = "Rounding")
set.seed(0)
mc.list <- list()
bic <- NULL
for (k in 1:10){
mc <- Mclust(Dat[-1], k)
bic <- c(bic, mc$bic)
mc.list[[k]] <- c(mc.list, mc)
}
plot(seq(1,10,1), bic, type = "l", xlab = "Number of clusters")
plot(bic[1:10], type = "l", xlab = "Number of clusters")
maxbic <- which.max(bic[1:10])
maxbic
mc3 <- Mclust(Dat[-1], maxbic)
table(mc3$classification, Dat$Pos)
lblper <- c('FG%','3P%','2P%','eFG%','FT%')
lbloff <- c('ORB','AST','PTS')
lbldef <- c('BLK','STL','DRB')
lblsp <- c('TOV','PF')
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,5), c(0,1), type = "n", xlab = "", ylab = "Est. Mean",
xaxt='n', main = "Clustering using Mclust: Percentages")
axis(side = 1, at=seq(1,5,1), las=2, labels = lblper)
xseq <- seq(1,5,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, mc3$parameters$mean[1:5,k], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,3), c(0,20), type = "n", xlab = "", ylab = "Est. Mean",
xaxt='n', main = "Clustering using Mclust: Offense")
axis(side = 1, at=seq(1,3,1), las=2, labels = lbloff)
xseq <- seq(1,3,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, mc3$parameters$mean[c(6,9,14),k], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,3), c(0,5), type = "n", xlab = "", ylab = "Est. Mean",
xaxt='n', main = "Clustering using Mclust: Defense")
axis(side = 1, at=seq(1,3,1), las=2, labels = lbldef)
xseq <- seq(1,3,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, mc3$parameters$mean[c(11,10,7),k], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,2), c(0,3), type = "n", xlab = "", ylab = "Est. Mean",
xaxt='n', main = "Clustering using Mclust: Special")
axis(side = 1, at=seq(1,2,1), las=2, labels = lblsp)
xseq <- seq(1,2,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, mc3$parameters$mean[c(12,13),k], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
# Mclust Clustering Plots
library(tidyr)
library(ggplot2)
dat.mc <- as.data.frame(t(mc3$parameters$mean))
dat.mc$clusters <- row.names(dat.mc)
# Percentages Clusters
dat.mc1 <- dat.mc[c(1:5,15)]
dat.g <- gather(dat.mc1, stats, mc3mean, -clusters)
mc.1 <- ggplot(dat.g, aes(stats, mc3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Mclust: Percentages")
mc.1
# Offense Clusters
dat.mc1 <- dat.mc[c(6,9,14,15)]
dat.g <- gather(dat.mc1, stats, mc3mean, -clusters)
mc.2 <- ggplot(dat.g, aes(stats, mc3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Mclust: Offense")
mc.2
# Defense Clusters
dat.mc1 <- dat.mc[c(11,10,7,15)]
dat.g <- gather(dat.mc1, stats, mc3mean, -clusters)
mc.3 <- ggplot(dat.g, aes(stats, mc3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Mclust: Defense")
mc.3
# Special Clusters
dat.mc1 <- dat.mc[c(12,13,15)]
dat.g <- gather(dat.mc1, stats, mc3mean, -clusters)
mc.4 <- ggplot(dat.g, aes(stats, mc3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Mclust: Special")
mc.4
#################
#### Kmeans #####
#################
RNGkind (sample.kind = "Rounding")
set.seed(1)
### Rescale the data
nba.scaled <- scale(Dat[-1], center = TRUE, scale = TRUE)
nba.scaled.attr <- attributes(nba.scaled)
km.list <- list()
wss <- NULL
for (k in 1:10){
tmp.wss <- NULL
## Running kmeans 10 times for each k to get the best result
for (itr in 1:10){
set.seed(itr)
km <- kmeans(nba.scaled, k)
tmp.wss <- c(tmp.wss, km$tot.withinss)
}
select.seed <- which.min(tmp.wss)
set.seed(select.seed)
km <- kmeans(nba.scaled, k)
wss <- c(wss, km$tot.withinss)
km.list[[k]] <- c(km.list, km)
}
plot(seq(1,10,1), wss, type = "l", xlab = "Number of clusters")
km3 <- kmeans(nba.scaled, 3) #to match nicely with mclust choose 3, can try 4, too
table(mc3$classification, km3$cluster)
# Manually match the labels, as much as possible,
# to the mclust classification for easier comparison
km3id <- ifelse(km3$cluster == 3, 2,
ifelse(km3$cluster == 2, 3,1))
table(km3id, Dat$Pos)
table(mc3$classification, km3id)
# Convert the kmeans centroids to the original scale for plotting
km3.centers.rescaled <- km3$centers
for (j in 1:14){
km3.centers.rescaled[,j] <- nba.scaled.attr$`scaled:center`[j] +
km3$centers[,j]*nba.scaled.attr$`scaled:scale`[j]
}
# Now change the indices to approximately match
# mclust classification for easier comparison
km3.centers.rescaled.tmp <- km3.centers.rescaled
km3.centers.rescaled[1,] <- km3.centers.rescaled.tmp[1,]
km3.centers.rescaled[2,] <- km3.centers.rescaled.tmp[3,]
km3.centers.rescaled[3,] <- km3.centers.rescaled.tmp[2,]
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,5), c(0,1), type = "n", xlab = "", ylab = "Kmeans centroids",
xaxt='n', main = "Clustering using Kmeans: Percentages")
axis(side = 1, at=seq(1,5,1), las=2, labels = lblper)
xseq <- seq(1,5,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, km3.centers.rescaled[k,1:5], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,3), c(0,6), type = "n", xlab = "", ylab = "Kmeans centroids",
xaxt='n', main = "Clustering using Kmeans: Offense")
axis(side = 1, at=seq(1,3,1), las=2, labels = lbloff)
xseq <- seq(1,3,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, km3.centers.rescaled[k,c(6,9,14)], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,3), c(0,6), type = "n", xlab = "", ylab = "Kmeans centroids",
xaxt='n', main = "Clustering using Kmeans: Defense")
axis(side = 1, at=seq(1,3,1), las=2, labels = lbldef)
xseq <- seq(1,3,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, km3.centers.rescaled[k,c(11,10,7)], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
par(oma=c(6,2,1,1), mar = c(4,4,2,1) + 0.1)
plot(c(0,2), c(0,3), type = "n", xlab = "", ylab = "Kmeans centroids",
xaxt='n', main = "Clustering using Kmeans: Defense")
axis(side = 1, at=seq(1,2,1), las=2, labels = lblsp)
xseq <- seq(1,2,1)
col.seq <- c("#00AFBB","#E7B800","#FC4E07")
for (k in 1:3){
lines(xseq, km3.centers.rescaled[k,c(12,13)], col=col.seq[k], lty=2)
}
legend("topleft", col = col.seq, lty = 2,
legend = c("Cluster 1", "Cluster 2", "Cluster 3"))
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/uni_mclust.pdf")
# Kmeans Clustering Plots
library(tidyr)
library(ggplot2)
dat.km <- as.data.frame(km3.centers.rescaled)
dat.km$clusters <- row.names(dat.km)
# Percentages Clusters
dat.km1 <- dat.km[c(1:5,15)]
dat.g <- gather(dat.km1, stats, km3mean, -clusters)
km.1 <- ggplot(dat.g, aes(stats, km3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Kmeans: Percentages")
km.1
# Offense Clusters
dat.km1 <- dat.km[c(6,9,14,15)]
dat.g <- gather(dat.km1, stats, km3mean, -clusters)
km.2 <- ggplot(dat.g, aes(stats, km3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Kmeans: Offense")
km.2
# Defense Clusters
dat.km1 <- dat.km[c(11,10,7,15)]
dat.g <- gather(dat.km1, stats, km3mean, -clusters)
km.3 <- ggplot(dat.g, aes(stats, km3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Kmeans: Defense")
km.3
# Special Clusters
dat.km1 <- dat.km[c(12,13,15)]
dat.g <- gather(dat.km1, stats, km3mean, -clusters)
km.4 <- ggplot(dat.g, aes(stats, km3mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Kmeans: Special")
km.4
#### Hierarchical #####
### Create distance matrix
d <- dist(nba.scaled, method = "euclidean")
hc.complete <- hclust(d, method="ward.D2")
plot(hc.complete) # display dendogram
hc.complete.id <- cutree(hc.complete, k=3) # cut tree into 3 clusters
rect.hclust(hc.complete, k = 3, border = "red")
#dev.copy2pdf(file = "E:/Data mining/Lecture Notes/plots/dend1.pdf")
nba.hier <- Dat[-1]
nba.hier$clusters <- as.data.frame(hc.complete.id)
temp1 <- subset(nba.hier,nba.hier$clusters =='1')
temp2 <- subset(nba.hier,nba.hier$clusters =='2')
temp3 <- subset(nba.hier,nba.hier$clusters =='3')
a <- colMeans(temp1)
b <- colMeans(temp2)
c <- colMeans(temp3)
dat.hier <- rbind(a,b,c)
dat.hier <- as.data.frame(dat.hier)
dat.hier$clusters <- as.character(dat.hier$clusters)
# Hierarchical Clustering Plots
library(tidyr)
library(ggplot2)
# Percentages Clusters
dat.hier1 <- dat.hier[c(1:5,15)]
dat.g <- gather(dat.hier1, stats, hier4mean, -clusters)
hier.1 <- ggplot(dat.g, aes(stats, hier4mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Hierarchical: Percentages")
hier.1
# Offense Clusters
dat.hier1 <- dat.hier[c(6,9,14,15)]
dat.g <- gather(dat.hier1, stats, hier4mean, -clusters)
hier.2 <- ggplot(dat.g, aes(stats, hier4mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Hierarchical: Offense")
hier.2
# Defense Clusters
dat.hier1 <- dat.hier[c(11,10,7,15)]
dat.g <- gather(dat.hier1, stats, hier4mean, -clusters)
hier.3 <- ggplot(dat.g, aes(stats, hier4mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Hierarchical: Defense")
hier.3
# Special Clusters
dat.hier1 <- dat.hier[c(12,13,15)]
dat.g <- gather(dat.hier1, stats, hier4mean, -clusters)
hier.4 <- ggplot(dat.g, aes(stats, hier4mean)) +
geom_bar(aes(fill = clusters), stat = "identity", position = "dodge") +
ggtitle("Clustering using Hierarchical: Special")
hier.4
# Table analysis
t <- table(km3id, hc.complete.id)
colnames(t) <- c("H 1", "H 2", "H 3")
rownames(t) <- c("KM 1", "KM 2", "KM 3")
t2 <- table(mc3$classification, hc.complete.id)
colnames(t2) <- c("H 1", "H 2", "H 3")
rownames(t2) <- c("MC 1", "MC 2", "MC 3")
table(mc3$classification, km3id)
t
t2
nba.clust <- cbind(Dat2, mc3$classification, km3id,hc.complete.id)
colnames(nba.clust)[17] <- 'MCid'
colnames(nba.clust)[18] <- 'KMid'
colnames(nba.clust)[19] <- 'Hierid'
nba.clust$MCid <- as.factor(nba.clust$MCid)
nba.clust$KMid <- as.factor(nba.clust$KMid)
nba.clust$Hierid <- as.factor(nba.clust$Hierid)
nba.clust
################### CSV SAVED ON APRIL 9th 2021 ####################
#write.csv(nba.clust, "nba.clust3.csv")
# Compare Mcluster, Kmeans, and Hierarchical Clusters
library(gridExtra)
grid.arrange(mc.1, km.1, hier.1, nrow=1, ncol=3)
grid.arrange(mc.2, km.2, hier.2, nrow=1, ncol=3)
grid.arrange(mc.3, km.3, hier.3, nrow=1, ncol=3)
grid.arrange(mc.4, km.4, hier.4, nrow=1, ncol=3)
# Clusters and Positions
nbatoday <- nba.clust[,c(1,2,17,18,19)]
dat.g <- gather(nbatoday[-1], type, cluster,-Pos)
today <- ggplot(dat.g[-1], aes(type,fill=cluster)) +
geom_bar(position = "dodge") +
ggtitle("2020 NBA Cluster Distributions")
posclus.mc <- ggplot(nbatoday[-1], aes(Pos)) +
geom_bar(aes(fill = MCid), stat = "count", position = "dodge") +
ggtitle("2020 NBA Position-Cluster Relation Mclust")
posclus.km <- ggplot(nbatoday[-1], aes(Pos)) +
geom_bar(aes(fill = KMid), stat = "count", position = "dodge") +
ggtitle("2020 NBA Position-Cluster Relation Kmeans")
posclus.h <- ggplot(nbatoday[-1], aes(Pos)) +
geom_bar(aes(fill = Hierid), stat = "count", position = "dodge") +
ggtitle("2020 NBA Position-Cluster Relation Hierarchical")
posclus <- ggplot(dat.g, aes(Pos)) +
geom_bar(aes(fill = cluster), stat = "count", position = "dodge") +
ggtitle("2020 NBA Position-Cluster Relation Concatenated")
today
posclus.mc
posclus.km
posclus.h
posclus
# Pairs plot view clusters
nba.nolbl <- nba.clust[,-c(1,2,17,18,19)]
nba.lbl.mc <- nba.clust[,17]
nba.lbl.km <- nba.clust[,18]
nba.lbl.h <- nba.clust[,19]
n <- nrow(nba.nolbl)
p <- ncol(nba.nolbl)
col.seq <- c("red", "darkgreen", "navy")
species.col.mc <- col.seq[as.numeric(nba.lbl.mc)]
species.col.km <- col.seq[as.numeric(nba.lbl.km)]
species.col.h <- col.seq[as.numeric(nba.lbl.h)]
# Mclust
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[1:5], col = species.col.mc,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.mc)),
fill = unique(species.col.mc))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(6,9,14,12)], col = species.col.mc,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.mc)),
fill = unique(species.col.mc))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(11,10,7,13)], col = species.col.mc,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.mc)),
fill = unique(species.col.mc))
par(xpd = NA)
# Kmeans
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[1:5], col = species.col.km,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.km)),
fill = unique(species.col.km))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(6,9,14,12)], col = species.col.km,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.km)),
fill = unique(species.col.km))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(11,10,7,13)], col = species.col.km,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.km)),
fill = unique(species.col.km))
par(xpd = NA)
# Hierarchical
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[1:5], col = species.col.h,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.h)),
fill = unique(species.col.h))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(6,9,14,12)], col = species.col.h,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.h)),
fill = unique(species.col.h))
par(xpd = NA)
par(oma=c(2,2,4,1), mar = c(4,4,4,1) + 0.1)
pairs(nba.nolbl[c(11,10,7,13)], col = species.col.h,
lower.panel = NULL, cex.labels=2, pch=19, cex = 1.2)
mtext(text= "Model Based Cluster",side=3,line=3, font=2,
cex = 1.25,outer=TRUE)
legend(x = 0.005, y = 0.2, cex = 1.2, bty = "n",
legend = as.character(levels(nba.lbl.h)),
fill = unique(species.col.h))
par(xpd = NA)
# Help us define Cluster with background knowledge of NBA (e.g. height, strength, weight, etc.)
MCid1 <- nba.clust[(nba.clust$MCid=="1"),]
MCid2 <- nba.clust[(nba.clust$MCid=="2"),]
MCid3 <- nba.clust[(nba.clust$MCid=="3"),]
MCid1
MCid2
MCid3
KMid1 <- nba.clust[(nba.clust$KMid=="1"),]
KMid2 <- nba.clust[(nba.clust$KMid=="2"),]
KMid3 <- nba.clust[(nba.clust$KMid=="3"),]
KMid1
KMid2
KMid3
Hid1 <- nba.clust[(nba.clust$Hierid=="1"),]
Hid2 <- nba.clust[(nba.clust$Hierid=="2"),]
Hid3 <- nba.clust[(nba.clust$Hierid=="3"),]
Hid1
Hid2
Hid3
|
#############################################################
## Internal validation sampling strategies for exposure
## measurement error correction
## Simulation study
##
## Sampling strategy: Random
## nested loop plots
## lindanab4@gmail.com - 20200506
#############################################################
##############################
# 0 - Load librairies ----
##############################
source(file = "./rcode/visualisation/nested_loop_plot.R")
summary <-readRDS(file = "./results/summaries/summary.Rds")
summary <- summary[order(-linear, R_squared, skewness),]
use_methods <- c("complete_case", "reg_cal",
"efficient_reg_cal", "inadm_reg_cal")
##############################
# 1 - Percentage bias ----
##############################
png(paste0("./results/figures", "/perc_bias_random.png"),
width = 4, height = 4, units = 'in', res = 100)
create_nlp_method(summary = summary,
stats = "perc_bias",
ref = 0,
limits = c(-5, 10),
xlab = "2 x 4 x 3 = 24 ordered scenarios",
ylab = "percentage bias",
use_size_valdata = 0.4,
use_sampling_strat = "random",
use_methods = use_methods)
dev.off()
##############################
# 2 - MSE ----
##############################
png(paste0("./results/figures", "/mse_random.png"),
width = 4, height = 4, units = 'in', res = 100)
create_nlp_method(summary = summary,
stats = "mse",
ref = 0,
limits = c(-0.01, 0.05),
xlab = "2 x 4 x 3 = 24 ordered scenarios",
ylab = "mean squared error",
use_size_valdata = 0.4,
use_sampling_strat = "random",
use_methods = use_methods)
dev.off()
##############################
# 3 - coverage ----
##############################
png(paste0("./results/figures", "/cover_random.png"),
width = 4, height = 4, units = 'in', res = 100)
create_nlp_method(summary = summary,
stats = "cover",
ref = 0.95,
limits = c(0.85, 1),
xlab = "2 x 4 x 3 = 24 ordered scenarios",
ylab = "coverage",
use_size_valdata = 0.4,
use_sampling_strat = "random",
use_methods = use_methods)
dev.off()
| /rcode/visualisation/nlp_random.R | permissive | xanthematthijssen/me_neo | R | false | false | 2,432 | r | #############################################################
## Internal validation sampling strategies for exposure
## measurement error correction
## Simulation study
##
## Sampling strategy: Random
## nested loop plots
## lindanab4@gmail.com - 20200506
#############################################################
##############################
# 0 - Load librairies ----
##############################
source(file = "./rcode/visualisation/nested_loop_plot.R")
summary <-readRDS(file = "./results/summaries/summary.Rds")
summary <- summary[order(-linear, R_squared, skewness),]
use_methods <- c("complete_case", "reg_cal",
"efficient_reg_cal", "inadm_reg_cal")
##############################
# 1 - Percentage bias ----
##############################
png(paste0("./results/figures", "/perc_bias_random.png"),
width = 4, height = 4, units = 'in', res = 100)
create_nlp_method(summary = summary,
stats = "perc_bias",
ref = 0,
limits = c(-5, 10),
xlab = "2 x 4 x 3 = 24 ordered scenarios",
ylab = "percentage bias",
use_size_valdata = 0.4,
use_sampling_strat = "random",
use_methods = use_methods)
dev.off()
##############################
# 2 - MSE ----
##############################
png(paste0("./results/figures", "/mse_random.png"),
width = 4, height = 4, units = 'in', res = 100)
create_nlp_method(summary = summary,
stats = "mse",
ref = 0,
limits = c(-0.01, 0.05),
xlab = "2 x 4 x 3 = 24 ordered scenarios",
ylab = "mean squared error",
use_size_valdata = 0.4,
use_sampling_strat = "random",
use_methods = use_methods)
dev.off()
##############################
# 3 - coverage ----
##############################
png(paste0("./results/figures", "/cover_random.png"),
width = 4, height = 4, units = 'in', res = 100)
create_nlp_method(summary = summary,
stats = "cover",
ref = 0.95,
limits = c(0.85, 1),
xlab = "2 x 4 x 3 = 24 ordered scenarios",
ylab = "coverage",
use_size_valdata = 0.4,
use_sampling_strat = "random",
use_methods = use_methods)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kgeur.r
\name{kgeur}
\alias{kgeur}
\title{Return kg and euro column index of eflalo dataset}
\usage{
kgeur(x)
}
\arguments{
\item{x}{Colnames of eflalo dataset (or any other dataset with column names
as LE_KG_ and LE_EURO_)}
}
\description{
Returns the index of the columns with kg and euro information from a given
eflalo dataset
}
\examples{
data(eflalo)
kgeur(colnames(eflalo))
}
\references{
EU Lot 2 project
}
\author{
Niels T. Hintzen
}
| /vmstools/man/kgeur.Rd | no_license | nielshintzen/vmstools | R | false | true | 523 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kgeur.r
\name{kgeur}
\alias{kgeur}
\title{Return kg and euro column index of eflalo dataset}
\usage{
kgeur(x)
}
\arguments{
\item{x}{Colnames of eflalo dataset (or any other dataset with column names
as LE_KG_ and LE_EURO_)}
}
\description{
Returns the index of the columns with kg and euro information from a given
eflalo dataset
}
\examples{
data(eflalo)
kgeur(colnames(eflalo))
}
\references{
EU Lot 2 project
}
\author{
Niels T. Hintzen
}
|
fviz_gda_var(mca_res, axes = c(1,2), title = "Learning Analytics — e:t:p:M@Math") +
geom_hline(yintercept = 0, color = "grey", linetype="solid") +
geom_vline(xintercept = 0, color = "grey", linetype="solid") +
theme(panel.grid = element_line(colour = "grey", size = 1.5))
ggsave("Kategorien.pdf", width = 11, height = 8)
fviz_gda_var(mca_res, axes = c(2,3), group = group, group_names = group_names, title = "Learning Analytics — e:t:p:M@Math")
# Notizen als strukturierender Faktor
fviz_gda_quali_ellipses(mca_res, datensatz_synthesise_users_gda, "visit_event_action_notes", impute = FALSE)
fviz_gda_quali_supvar(mca_res, datensatz_synthesise_users_gda, "visit_event_action_notes", impute = FALSE)
fviz_gda_quali_ellipses(mca_res, datensatz_synthesise_users_gda, "average_visit_hour", impute = FALSE)
fviz_gda_quali_supvar(mca_res, datensatz_synthesise_users_gda, "average_visit_hour", impute = FALSE)
fviz_gda_quali_ellipses(mca_res, datensatz_synthesise_users_gda, "average_visit_devices", impute = FALSE)
fviz_gda_quali_supvar(mca_res, datensatz_synthesise_users_gda, "average_visit_devices", impute = FALSE)
# HCPC
hcpc_res <- HCPC(mca_res, nb.clust = 3, graph = FALSE)
fviz_gda_quali_ellipses(mca_res, hcpc_res$data.clust, "clust", title = "Learning Analytics — e:t:p:M@Math — Cluster")
ggsave("Cluster.pdf", width = 8, height = 3)
# 3D
get_colors <- function(groups, group.col = palette()){
groups <- as.factor(groups)
ngrps <- length(levels(groups))
if(ngrps > length(group.col))
group.col <- rep(group.col, ngrps)
color <- group.col[as.numeric(groups)]
names(color) <- as.vector(groups)
return(color)
}
x <- mca_res$ind$coord[,1]
y <- mca_res$ind$coord[,2]
z <- mca_res$ind$coord[,3]
groups <- hcpc_res$data.clust$clust
levs <- levels(groups)
group_col <- brewer.pal(n = length(levs), name="Set1")
# Open RGL device
open3d(windowRect = c(0, 0, 1000, 1000))
# Scatter plot
spheres3d(x, y, z, r = 0.03, color = get_colors(groups, group_col))
# Axes
lines3d(c(-1, 1), c(0, 0), c(0, 0), color = "black")
lines3d(c(0, 0), c(-1,1), c(0, 0), color = "black")
lines3d(c(0, 0), c(0, 0), c(-1,1), color = "black")
# Container
bbox3d(color=c("#333333","black"), emission="#999999", specular="#333333",
xlen=0, ylen=0, zlen=0, shininess=5, alpha=0.5)
# Ellipsis
for (i in 1:length(levs)) {
group <- levs[i]
selected <- groups == group
xx <- x[selected]; yy <- y[selected]; zz <- z[selected]
ellips <- ellipse3d(cov(cbind(xx,yy,zz)),
centre=c(mean(xx), mean(yy), mean(zz)), level = 0.86)
shade3d(ellips, col = group_col[i], alpha = 0.2, lit = FALSE)
wire3d(ellips, col = group_col[i], alpha = 0.1, lit = FALSE)
# # show group labels
# texts3d(mean(xx),mean(yy), mean(zz), text = group,
# col= group_col[i], cex = 2)
}
aspect3d(1,1,1)
# Speichern eines Bildes
rgl.viewpoint(theta = 35, phi = 15, zoom = 0.75)
rgl.snapshot("plot_3d.png")
movie3d(spin3d(axis = c(0.5, 0.5, 0.5)), duration = 15, dir = getwd())
# In Repository speichern
rmFromLocalRepo(names(asearch("name:mca_res")), removeData = TRUE, removeMiniature = TRUE)
asave(mca_res, archiveSessionInfo = FALSE)
# PCA Tests
datensatz_synthesise_users_pca <- datensatz_synthesise_users %>%
mutate_at(vars(total_visits, total_actions, total_events), funs(as.numeric))
impute_pca <- imputePCA(datensatz_synthesise_users_pca[,-c(5,8,9)] %>% data.frame)
res_pca <- PCA(impute_pca$completeObs, graph = FALSE)
res_pca <- PCA(datensatz_synthesise_users_pca, quali.sup = c(5,8,9), graph = FALSE)
plot(res_pca)
hcpc_pca <- HCPC(res_pca, nb.clust = 3, graph = FALSE)
plot(hcpc_pca, choice="map", t.level="none", centers.plot=T)
fviz_pca_biplot(res_pca)
fviz_pca_ind(res_pca, habillage = as.factor(datensatz_synthesise_users_pca$average_visit_devices))
fviz_pca_var(res_pca, repel = TRUE)
| /playground.R | no_license | inventionate/learning-analytics | R | false | false | 3,850 | r |
fviz_gda_var(mca_res, axes = c(1,2), title = "Learning Analytics — e:t:p:M@Math") +
geom_hline(yintercept = 0, color = "grey", linetype="solid") +
geom_vline(xintercept = 0, color = "grey", linetype="solid") +
theme(panel.grid = element_line(colour = "grey", size = 1.5))
ggsave("Kategorien.pdf", width = 11, height = 8)
fviz_gda_var(mca_res, axes = c(2,3), group = group, group_names = group_names, title = "Learning Analytics — e:t:p:M@Math")
# Notizen als strukturierender Faktor
fviz_gda_quali_ellipses(mca_res, datensatz_synthesise_users_gda, "visit_event_action_notes", impute = FALSE)
fviz_gda_quali_supvar(mca_res, datensatz_synthesise_users_gda, "visit_event_action_notes", impute = FALSE)
fviz_gda_quali_ellipses(mca_res, datensatz_synthesise_users_gda, "average_visit_hour", impute = FALSE)
fviz_gda_quali_supvar(mca_res, datensatz_synthesise_users_gda, "average_visit_hour", impute = FALSE)
fviz_gda_quali_ellipses(mca_res, datensatz_synthesise_users_gda, "average_visit_devices", impute = FALSE)
fviz_gda_quali_supvar(mca_res, datensatz_synthesise_users_gda, "average_visit_devices", impute = FALSE)
# HCPC
hcpc_res <- HCPC(mca_res, nb.clust = 3, graph = FALSE)
fviz_gda_quali_ellipses(mca_res, hcpc_res$data.clust, "clust", title = "Learning Analytics — e:t:p:M@Math — Cluster")
ggsave("Cluster.pdf", width = 8, height = 3)
# 3D
get_colors <- function(groups, group.col = palette()){
groups <- as.factor(groups)
ngrps <- length(levels(groups))
if(ngrps > length(group.col))
group.col <- rep(group.col, ngrps)
color <- group.col[as.numeric(groups)]
names(color) <- as.vector(groups)
return(color)
}
x <- mca_res$ind$coord[,1]
y <- mca_res$ind$coord[,2]
z <- mca_res$ind$coord[,3]
groups <- hcpc_res$data.clust$clust
levs <- levels(groups)
group_col <- brewer.pal(n = length(levs), name="Set1")
# Open RGL device
open3d(windowRect = c(0, 0, 1000, 1000))
# Scatter plot
spheres3d(x, y, z, r = 0.03, color = get_colors(groups, group_col))
# Axes
lines3d(c(-1, 1), c(0, 0), c(0, 0), color = "black")
lines3d(c(0, 0), c(-1,1), c(0, 0), color = "black")
lines3d(c(0, 0), c(0, 0), c(-1,1), color = "black")
# Container
bbox3d(color=c("#333333","black"), emission="#999999", specular="#333333",
xlen=0, ylen=0, zlen=0, shininess=5, alpha=0.5)
# Ellipsis
for (i in 1:length(levs)) {
group <- levs[i]
selected <- groups == group
xx <- x[selected]; yy <- y[selected]; zz <- z[selected]
ellips <- ellipse3d(cov(cbind(xx,yy,zz)),
centre=c(mean(xx), mean(yy), mean(zz)), level = 0.86)
shade3d(ellips, col = group_col[i], alpha = 0.2, lit = FALSE)
wire3d(ellips, col = group_col[i], alpha = 0.1, lit = FALSE)
# # show group labels
# texts3d(mean(xx),mean(yy), mean(zz), text = group,
# col= group_col[i], cex = 2)
}
aspect3d(1,1,1)
# Speichern eines Bildes
rgl.viewpoint(theta = 35, phi = 15, zoom = 0.75)
rgl.snapshot("plot_3d.png")
movie3d(spin3d(axis = c(0.5, 0.5, 0.5)), duration = 15, dir = getwd())
# In Repository speichern
rmFromLocalRepo(names(asearch("name:mca_res")), removeData = TRUE, removeMiniature = TRUE)
asave(mca_res, archiveSessionInfo = FALSE)
# PCA Tests
datensatz_synthesise_users_pca <- datensatz_synthesise_users %>%
mutate_at(vars(total_visits, total_actions, total_events), funs(as.numeric))
impute_pca <- imputePCA(datensatz_synthesise_users_pca[,-c(5,8,9)] %>% data.frame)
res_pca <- PCA(impute_pca$completeObs, graph = FALSE)
res_pca <- PCA(datensatz_synthesise_users_pca, quali.sup = c(5,8,9), graph = FALSE)
plot(res_pca)
hcpc_pca <- HCPC(res_pca, nb.clust = 3, graph = FALSE)
plot(hcpc_pca, choice="map", t.level="none", centers.plot=T)
fviz_pca_biplot(res_pca)
fviz_pca_ind(res_pca, habillage = as.factor(datensatz_synthesise_users_pca$average_visit_devices))
fviz_pca_var(res_pca, repel = TRUE)
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("US Consumer Price Index"),
dateRangeInput("dateRange",
label = 'Date range input:',
start = min(dat1[,1]), end = max(dat1[,1]),
format = "dd/mm/yyyy"
),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("dataset", "Choose Data Type:",
choices = c("Raw", "Trend")),
checkboxGroupInput("level", "Geographic Level:",
choices = unique(dat1$Level),
selected = c("US","Northeast", "Southwest", "West", "South", "Midwest")),
helpText("Data BLS CPI - Not Seasonally Adjusted")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
| /ui.R | no_license | Lou-isV/dataproducts | R | false | false | 987 | r |
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("US Consumer Price Index"),
dateRangeInput("dateRange",
label = 'Date range input:',
start = min(dat1[,1]), end = max(dat1[,1]),
format = "dd/mm/yyyy"
),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput("dataset", "Choose Data Type:",
choices = c("Raw", "Trend")),
checkboxGroupInput("level", "Geographic Level:",
choices = unique(dat1$Level),
selected = c("US","Northeast", "Southwest", "West", "South", "Midwest")),
helpText("Data BLS CPI - Not Seasonally Adjusted")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
selectorNames <- c(selectorNames, "selectCollectionIDC")
improvedDensityClusteringPanel <- tabPanel("Density Clustering",
titlePanel("Density Clustering"),
sidebarLayout(
sidebarPanel(selectInput("selectCollectionIDC",
label = p("Target collection"),
choices = collectionOptions,
selected = 1),
tags$hr(),
textInput("epsIDC", label = p("Eps"), value = ""),
tags$hr(),
actionButton("startAlgorithmIDC", label = "Start"),
tags$hr(),
textInput("newConteinerIDC", label = p("New Container:"), value = ""),
actionButton("sendToNewContainerIDC", label = "Save Results")),
mainPanel(
verbatimTextOutput("idbsText"),
tabPanel('improvedDensityClustering', DT::dataTableOutput('idbsTable'))))) | /RSolution/WebApplication/NavbarMenus/Calculations/TabPannels/improvedDensityClustering.R | no_license | ArtemYur/RSolution | R | false | false | 865 | r | selectorNames <- c(selectorNames, "selectCollectionIDC")
improvedDensityClusteringPanel <- tabPanel("Density Clustering",
titlePanel("Density Clustering"),
sidebarLayout(
sidebarPanel(selectInput("selectCollectionIDC",
label = p("Target collection"),
choices = collectionOptions,
selected = 1),
tags$hr(),
textInput("epsIDC", label = p("Eps"), value = ""),
tags$hr(),
actionButton("startAlgorithmIDC", label = "Start"),
tags$hr(),
textInput("newConteinerIDC", label = p("New Container:"), value = ""),
actionButton("sendToNewContainerIDC", label = "Save Results")),
mainPanel(
verbatimTextOutput("idbsText"),
tabPanel('improvedDensityClustering', DT::dataTableOutput('idbsTable'))))) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setfunctions.R
\name{\%union\%}
\alias{\%union\%}
\title{Union}
\usage{
x \%union\% y
}
\arguments{
\item{x}{A vector}
\item{y}{A vector}
}
\value{
All unique elements in the combined set (x,y).
}
\description{
Union
}
| /man/grapes-union-grapes.Rd | no_license | davidmacro/declarativeR | R | false | true | 298 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setfunctions.R
\name{\%union\%}
\alias{\%union\%}
\title{Union}
\usage{
x \%union\% y
}
\arguments{
\item{x}{A vector}
\item{y}{A vector}
}
\value{
All unique elements in the combined set (x,y).
}
\description{
Union
}
|
library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(5)
system.time(
scantwo.perm.imp.10.5 <-
scantwo(LG.f2.after.crossover,pheno.col=21:22,method="hk",n.perm=10,n.cluster = 16)
)
names(scantwo.perm.imp.10.5) <- colnames(LG.f2.after.crossover$pheno)
sfStop()
# save output
save(scantwo.perm.imp.10.5, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.10.5.Rdata")
| /F2/scantwo/scantwo_perm_10.5.R | no_license | leejimmy93/KIAT_cabernet | R | false | false | 750 | r | library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(5)
system.time(
scantwo.perm.imp.10.5 <-
scantwo(LG.f2.after.crossover,pheno.col=21:22,method="hk",n.perm=10,n.cluster = 16)
)
names(scantwo.perm.imp.10.5) <- colnames(LG.f2.after.crossover$pheno)
sfStop()
# save output
save(scantwo.perm.imp.10.5, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.10.5.Rdata")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.