blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e1f74cdd307c6ccd87e538db29462fa6eca8df7 | 417b44d377cc158e86bfd65830da272169672512 | /Assignment 2/Xiaocheng-Song/cachematrix.R | d96d301d7a493a41c1bf755870434abab3c61aec | [] | no_license | hanxu-ust/R-Programming-Assignments | a6cdc7a0bdcdffbde324d1c2e613fd31af5955f1 | 4b7db24572ec8fda359ca31e51df30977297e58e | refs/heads/master | 2022-08-11T13:34:41.594744 | 2014-07-14T01:54:37 | 2014-07-14T01:54:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,149 | r | cachematrix.R | # This .R file contains the code that improves the efficiency of matrix inversion
# by caching the inverse of a matrix rather than compute it repeatly.
# This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inver <- NULL
set <- function(y) {
x <<- y
inver <<- NULL
}
get <- function() x
setinversion <- function(inversion) inver <<- inversion
getinversion <- function() inver
list(set = set, get = get,
setinversion = setinversion,
getinversion = getinversion)
}
# This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversion <- x$getinversion()
if(!is.null(inversion)) {
message("getting cached data")
return(inversion)
}
data <- x$get()
inversion <- solve(data)
x$setinversion(inversion)
inversion
}
|
f58b3e4afee920637532e1f80d888e1ea5038d3f | 495f786e2661b3e9e2174210ab0567dfc91b4629 | /carTools.R | afde7b6f0ac08c10e002f678dd3333ad49459133 | [] | no_license | jlisic/simCrop | c1dd543f766e9f092eec0f5a321ec796f67c64a8 | 8b45f6c954168e499bb9a14368287e9f73a9c15b | refs/heads/master | 2016-09-06T13:04:48.101659 | 2014-01-01T18:19:07 | 2014-01-01T18:19:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,229 | r | carTools.R | ########################################## carTools.R ###############################################
# A set of tools to simulate CAR models
########################################## LIBRARY ###############################################
library(msm) # for rtnorm
library(mvtnorm)
library(Matrix)
########################################## INCLUDES ###############################################
source('mh.R')
source('simCrop.R')
########################################## FUNCTION ###############################################
## a function to check if the value for rho ensures that I-rho *W is positive definite
# if a value of rho is not provided the allowable range to ensure positive definiteness is
# provided
carTools.checkRho <- function(W, rho) {
if( !missing(rho) ) {
# for matrix valued rho
if( !is.null(nrow(rho)) ) {
if(sum(eigen(diag(nrow(W)) - rho %*% W)$values <= 0) != 0) {
return(F)
} else {
return(T)
}
} else {
# for non-matrix valued rho
if(sum(eigen(diag(nrow(W)) - rho * W)$values <= 0) != 0) {
return(F)
} else {
return(T)
}
}
}
# if rho is missing we return the scalar range of rho
W.eigen.values <- eigen(W)$values
return( sort( 1/range(W.eigen.values) ) )
}
## side effect addition e.g. ++
`%+=%` = function(e1,e2) eval.parent(substitute(e1 <- e1 + e2))
x = 1
x %+=% 2 ; x
## function that simulates a sample of size m, using a transition matrix theta
simSample <- function( m, theta) {
currentState <- c(1,0)
rotate <- c()
for(i in 1:m) {
currentProb <-currentState %*% theta
trial <- rbinom(1,size=1,prob=1-currentProb[1]) # probability of soybeans
currentState <- c(0,0)
currentState[trial + 1] <-1
rotate[i] <- which( currentState == 1) - 1
}
return(rotate)
}
## probit Gibbs function ##
carTools.probitGibbs <- function(y,X,Beta,Beta0,Sigma0,iter) {
#init some values
m <- length(y)
Beta.save <- c()
z <- 1:m
# the usual X'X
XX <- t(X) %*% X
# inverse of the prior variance
B.star.inv <- solve(Sigma0^2)
B <- (B.star.inv + XX )^-1
# the mcmc loop
for(i in 1:iter) {
# generate deviates for the latent variables
for( j in 1:m) {
if( y[j] == 1) {
z[j] <- rtnorm( 1, mean=X[j,] %*% Beta, sd=1 , lower=0)
} else {
z[j] <- rtnorm( 1, mean=X[j,] %*% Beta, sd=1 , upper=0)
}
}
# this is Albert / Chib Beta sqiggle
Beta.post <- B %*% (B.star.inv %*% Beta0 + t(X) %*% z)
# generate deviates for beta/mu
Beta.save[i] <- rnorm(1,Beta.post, B )
Beta <- Beta.save[i]
}
return(Beta.save)
}
carTools.probitGibbsSpatial <- function( a, Beta.init, lambda.init, beta0,Sigma0, iter ) {
myObjects <- a$cropType[,'myObjects']
myObjects.sort <- sort( myObjects, index.return=T)$ix
# take care of Y
Y <- a$cropType[,-1]
Y <- Y[myObjects.sort,]
# take care of X
X <- matrix(1,nrow=nrow(Y)*(ncol(Y) - 1),ncol=1)
W <- simCrop.createRookDist(a)
fieldSize <- nrow(W)
result <- list()
for( i in 1:length(Beta.init) ) {
Y.change <- Y
Y.change[,-1][!(Y.change==i)[,-ncol(Y.change)]] <- 0
Y.change <- Y.change[,-1] - 1
Y.change <- matrix(Y.change,ncol=1)
result[[i]] <- probitGibbsSpatial(
Y.change,
X,
W,
Beta.init[[i]],
lambda.init[[i]],
Beta0[[i]],
Sigma0[[i]],
iter)
print( sprintf("Finished Running %d",i) )
}
return( result)
}
## probit Gibbs function ##
# Y vector of categorical responses in row major form, repeating for each year, length = (number of years) x fieldSize
# X matrix of covariates in row major form, repeating for each year, length = (number of years) x fieldSize
# W matrix in row major form of spatial neighborhoods, dim is fieldSize x fieldSize
# fieldSize, number of observations in a given year
#
probitGibbsSpatial <- function(Y,X,W,Beta.init,lambda.init,Beta0,Sigma0,iter) {
# set initial conditions
Beta <- Beta.init
lambda <- lambda.init
#init some values
m <- nrow(Y) # number of observations
K <- m / nrow(W)
p <- ncol(X) # number of covariates
Beta.save <- c()
lambda.save <- c()
W.big <- kronecker(diag(K),W)
lambda.range <- sort( 1/range(eigen(W)$values) )
Z <- matrix(0,nrow=m,ncol=1)
# inverse of the prior variance
B.star.inv <- solve(Sigma0^2)
# the mcmc loop
for(i in 1:iter) {
print( sprintf("Lambda Update %d ", i) )
last.time <- proc.time()
Sigma.inv <- diag(m) - lambda * W.big
XX <- t(X) %*% Sigma.inv %*% X
B <- (B.star.inv + XX )^-1
print( proc.time() - last.time)
print( sprintf("Z Generation %d ", i) )
last.time <- proc.time()
means <- X %*% Beta + lambda * W.big %*% (Z - X %*% Beta )
# generate deviates for the latent variables
for( j in 1:m) {
if( Y[j] == 1) {
Z[j] <- rtnorm( 1, mean=means[j], sd=1 , lower=0)
} else if( Y[j] == 0) {
Z[j] <- rtnorm( 1, mean=means[j], sd=1 , upper=0)
} else {
Z[j] <- rnorm( 1, mean=means[j], sd=1)
}
means <- X %*% Beta + lambda * W.big %*% (Z - X %*% Beta )
}
print( proc.time() - last.time)
if( T ) {
#XZ <- t(X) %*% (Z - lambda * W.big %*% (Z - X %*% Beta) )
print( sprintf("Beta Generation %d ", i) )
last.time <- proc.time()
XZ <- t(X) %*% Sigma.inv %*% Z
# this is Albert / Chib Beta sqiggle
Beta.post.location <- B %*% (B.star.inv %*% Beta0 + XZ )
# generate deviates for beta/mu
Beta.save[i] <- rnorm(1,Beta.post.location, B )
Beta <- Beta.save[i]
print( proc.time() - last.time)
}
# generate lambda deviate
if( T ) {
print( sprintf("Rho Generation %d ", i) )
lambda.save[i] <- mh.lambda(Z - X %*% Beta,W.big,0,1,100,lambda.range )
lambda <- lambda.save[i]
print( proc.time() - last.time)
}
}
return( list( Beta = Beta.save, lambda = lambda.save) )
}
carTools.deviates <- function( rho, W, X, Beta,m) {
n <- nrow(X)
if( is.null(n) ) n <- length(X)
if( !is.null(nrow(rho)) ) {
L <- chol(diag(n) - rho %*% W)
} else {
L <- chol(diag(n) - rho * W )
}
if(is.null(nrow(Beta))) Beta <- matrix(Beta,ncol=1)
if(is.null(nrow(X))) X <- matrix(X,ncol=1)
Y <- matrix(0,nrow=n,ncol=1)
for( j in 1: m ) {
for( i in 1:n) {
Y[i] <- X[i,] %*% Beta - rho * W[i,-i] %*%( Y[-i] - X[-i,] %*% Beta ) + rnorm(1)
}
}
return( Y )
}
carTools.generateCropTypes <- function(a, p, rho, X, Beta) {
if( !missing(p) ) {
return( simCrop.generateCropTypes(a.neighbors,p) )
}
if( is.null(a$neighbors) ) {
# figure out which QQS are neighbors
print("No neighbors exist, assuming rook (shared edge)")
a <- simCrop.getNeighbors(a)
}
# create the distance matrix for a
W <- simCrop.createRookDist(a)
# W is sorted by object, so we need to sort our input by object
myObjects <- a$map[,'object']
myObjects.sortIndex <- sort( myObjects, index.return=T)
myObjects.sort <-myObjects.sortIndex$x
myObjects.sortIndex <-myObjects.sortIndex$ix
# this gives us a way to un-sort the result
myObjects.unsortIndex <- sort(myObjects.sortIndex, index.return=T)$ix
# handle missing X and Beta
n <- length(myObjects)
if( missing(X) ) {
X <- matrix(1,nrow=n,ncol=1)
Beta <- list()
for(i in 1:length(rho) ) Beta[[i]] <- rep(0,times=length(rho))
}
X.sort <- X[ myObjects.sortIndex,]
Y <- matrix(1,nrow=n,ncol=1)
for( i in 1:length(rho) ) {
priorState <- a$cropType[,'x'] == i
if( sum(priorState) != 0 ) {
Y <- carTools.deviates( rho[[i]], W, X.sort, Beta[[i]],10 )
Y[priorState] <- (Y[myObjects.unsortIndex])[priorState]
}
}
# now we need to
a$cropType <- cbind( a$cropType, 1 + (Y>0) )
return(a)
}
|
86f4a0f62eed695b07d9febbebd3fc0f0d9a1b64 | 58a7deca4d7fba2d98adb6fc392057627e08a3e6 | /Main.R | 769c1c20a8d6bb84b426a38119dcdbc842683ee8 | [] | no_license | priyankaghule/LegalNLP | 0d97fa501f756e7ed1be31e7a14ec18ad5953250 | 1a0e6fd0b0a2e358de67225872c25b52e4146c84 | refs/heads/master | 2021-05-16T11:53:27.688526 | 2017-09-28T11:13:35 | 2017-09-28T11:13:35 | 105,138,145 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,618 | r | Main.R | source("NLPFunctions.R")
library(XML)
# 1. Get the Text :: Converting pdf to text file
pdfToText <- pdf_text("Docs/ordjud.pdf")
# 1.2 Preprocess the text
sentence <- preprocessText(pdfToText)
words <- getSplitWords(sentence[1])
# 2. Get Court Name
matches <- which(words=="bombay")
if(!is.null(matches)) {
courtName = 'bombay'
}
## 3.1 Extract PETITIONER and RESPONDENT
print(tagPOS(words)$POStagged)
petitionerChunk <- c('NN', 'NNS','VBN')
respondantChunk <- c('JJ','JJ','NN','NNS','VBN')
parties <- getPartiesGroup(words,
petitionerChunk,
respondantChunk
)
petitionerType <- as.String(parties[1])
respondentType <- as.String(parties[2])
petitionerName <- as.String(parties[[3]][1])
respondentName <- as.String(parties[[3]][2])
#Get Counsel Group
## FOR PETITIONER
petitionerCounselPara <- getCounselParaWithTag(words,'petitioner')
print(tagPOS(petitionerCounselPara)$POStagged)
adv1Chunk <- c('NN','NN','NN')
adv2Chunk <- c('JJ','JJ','NNS')
adv3Chunk <- c(',','NN','NNS')
adv4Chunk <- c('JJ','NN','NN')
adv5Chunk <- c('NNS','VBG','NNS')
petitionerAdvs <- list(adv1Chunk,adv2Chunk,adv3Chunk,adv4Chunk,adv5Chunk)
petitionerAdvocates <- getCounselGroup(petitionerCounselPara,petitionerAdvs)
## FOR RESPONDENT
respondentCounselPara <- getCounselParaWithTag(words,'respondent')
print(tagPOS(respondentCounselPara)$POStagged)
adv1Chunk <- c('NN','NN','NN')
adv2Chunk <- c('JJ','JJ','NN')
adv3Chunk <- c('RB','JJ')
respondentAdvs <- list(adv1Chunk,adv2Chunk,adv3Chunk)
respondentAdvocates <- getCounselGroup(respondentCounselPara,respondentAdvs)
## Get Coram Details
coramDetails <- getCoramGroup(words)
##Get Dates Detail
dateDetail <- getDateDetail(words)
print(dateDetail)
rType <- dateDetail[1]
pType <- dateDetail[2]
rDate<- dateDetail[3]
rMonth <- dateDetail[4]
rYear <- dateDetail[5]
rDay <- dateDetail[6]
pDate <- dateDetail[7]
pMonth <- dateDetail[8]
pYear <- dateDetail[9]
pDay <- dateDetail[10]
#Get judgement detail
words <- getSplitWords(sentence)
judgementDetail <- getJudgementDetails(words)
jTitle <- judgementDetail[1]
citeTitle <- judgementDetail[2]
citation <- judgementDetail[3]
actTitle <- judgementDetail[4]
secTitle <- judgementDetail[5]
actId <- judgementDetail[6]
secId <- judgementDetail[7]
##CREATING XML FILE
#Generate XML File
xmlCase = newXMLNode('Case')
#Insert Court Details
xmlCourt = newXMLNode('Court', parent = xmlCase)
xmlCourtName = newXMLNode('CourtName', courtName, parent = xmlCourt)
#Insert Parties group
xmlPartiesGroup = newXMLNode('PartiesGroup', parent = xmlCase)
xmlParties = newXMLNode('Parties', parent = xmlPartiesGroup)
xmlPetitionerGroup = newXMLNode('PetitionerGroup', parent= xmlParties)
xmlPetitioner = newXMLNode('Petitioner', petitionerName, attrs = c(Type=petitionerType), parent = xmlPetitionerGroup)
xmlRespondentGroup = newXMLNode('RespondentGroup', parent = xmlParties)
xmlRespondent = newXMLNode('Respondent', respondentName , attrs = c(Type=respondentType), parent = xmlRespondentGroup)
#Insert Counsel group
xmlCounselGroup = newXMLNode('CounselGroup', parent = xmlCase)
xmlforPetitioner = newXMLNode('forPetitioner', parent = xmlCounselGroup)
for (adv in petitionerAdvocates) {
xmlCounselName = newXMLNode('CounselName', adv , parent = xmlforPetitioner)
}
xmlforRespondent = newXMLNode('forRespondent', parent = xmlCounselGroup)
for (adv in respondentAdvocates) {
xmlCounselName = newXMLNode('CounselName', adv, parent = xmlforRespondent)
}
#Insert Coram group
xmlCoramGroup = newXMLNode('CoramGroup', parent = xmlCase)
xmlJudge = newXMLNode('Judge', coramDetails[1] ,attrs = c(Position = coramDetails[2]), parent = xmlCoramGroup)
#Insert Date:
xmlDate = newXMLNode('Date', pDay, attrs = c(Month = pMonth, Date = pDate, Year = pYear, Type = pType) , parent = xmlCase)
xmlDate = newXMLNode('Date', rDay, attrs = c(Month = rMonth, Date = rDate, Year = rYear, Type = rType) , parent = xmlCase)
#Insert Judgement Group:
xmlJudgementGroup = newXMLNode('JudgementGroup', attrs = c(Title=jTitle), parent = xmlCase)
xmlPara = newXMLNode('Para', parent = xmlJudgementGroup)
xmlCite = newXMLNode('Cite', parent = xmlPara)
xmlCiteTitle = newXMLNode('Title', citeTitle, parent = xmlCite)
xmlCitation = newXMLNode('Citation', citation ,parent = xmlCite)
xmlAct = newXMLNode('Act', parent = xmlPara)
xmlActTitle = newXMLNode('Title' , actTitle,attrs = c(id = actId), parent = xmlAct)
xmlSec = newXMLNode('SecRef', parent = xmlPara)
xmlSecTitle = newXMLNode('Title' , secTitle,attrs = c(id = secId), parent = xmlSec)
saveXML(xmlCase, file="ordjud.xml")
|
c27934973b75f44ef120af205bbe2e65c2eaf72d | db371233a68bafec28889de24dd7a847b0847121 | /cachematrix.R | d9d5c0eb2415d2cc01b9c1bbc24f073439b98d0a | [] | no_license | vvazquezcardenas/ProgrammingAssignment2 | be2875c32110594620656a6e7c3a2c165a3db456 | e8a3c64c86c80c59c8ab58ba440293ab5f91c60e | refs/heads/master | 2021-01-18T03:51:59.678376 | 2016-01-24T16:24:42 | 2016-01-24T16:24:42 | 50,271,336 | 0 | 0 | null | 2016-01-24T04:00:47 | 2016-01-24T04:00:43 | null | UTF-8 | R | false | false | 1,396 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
##Function “makeCacheMatrix” creates a special matrix object that can cache its inverse.
##makeCacheMatrix contains 4 functions: set, get, setInverse, getInverse.
##get is a function that returns the vector x stored in the main function.
##set is a function that changes the vector stored in the main function.
##setInverse and getInverse are functions that will help us get and set the inverse with the cache function,
##begins inv equals null in order that cache function can know if there is a cache inv or not.
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(newMatrix) {
x <<- newMatrix
inv <<- NULL
}
get <-function() x
setInverse <-function(solve) inv<<-solve
getInverse <-function() inv
list(set = set, get = get,setInverse = setInverse,getInverse = getInverse)
}
## This function manipulates the values of the inverse of matrix, first try to get the valur of inv and
##already exists then just get and returnit but if its null calculate it with the solve function
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
49788d7bbcdeb4237abdbcb7ace03cabf72d9708 | c49dcdeaf949f17f4db52cb123fabb1dc2951997 | /codigo/0-sintaxis-elemental.R | 73b22ac5805992aa3c37f816ce61f89c75826ef4 | [] | no_license | rcontreraso/intro-R-CCSS | 3b69b3276d562d76cf42d79687dc245aa93ada88 | 82f5d1e2bd3e06757d4bac8f59a3b81a34816f71 | refs/heads/master | 2022-11-16T06:17:38.434015 | 2020-07-17T21:00:46 | 2020-07-17T21:00:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 828 | r | 0-sintaxis-elemental.R | # INTRODUCCIÓN A R PARA CIENCIAS SOCIALES
# ESTACIÓN LASTARRIA - JULIO 2020
# PROFESOR: FELIPE RUIZ
# Signo gato permite incorporar comentarios.
# Botón "run" o ctrl+enter ejecuta línea en donde esté el cursor
X <- 5 #asigación básica --> Aparece en entorno.
X # Muestra su contenido
X <- 1455 #Se puede sobreescribir.
X
Y <- "Hola" #Pueden ser letras
Y
#Vector = encadenamiento lineal de datos
# c = cFunción concatenar
sexo <- c(1,2,2,2,2,1,1,1,2,2,1)
# Tabla simple de frecuencias
table(sexo)
#Para ver qué tupo de vector es
class(sexo)
#En este caso encadenamos letras
GSE <- c("ABC1", "C2", "E", "E", "ABC1", "E", "D", "ABC1", "C2", "E", "E")
# Frecuencias
table(GSE)
# Tipo de vector
class(GSE)
# Creación manual de base de datos
datos <- data.frame(GSE, sexo)
# Visualizar base de datos
View(datos)
|
4a99cbb856a574065c4663cbd6de300d737332f6 | 63ea46174c3632dee435d1d740ef6ee1ba6f612a | /R/MS_STX.R | aa56e6b44496403809d53db56a564a56eaf610fa | [] | no_license | ZhuLeZi/leplant | 17844e1d591d0f8a01de43571e5e921bdfa09133 | 31cfa55929503ac84b5c7f3f604702d84131cb45 | refs/heads/master | 2022-01-17T08:30:08.986475 | 2019-06-24T13:22:08 | 2019-06-24T13:22:08 | 168,328,516 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 237 | r | MS_STX.R | MS_STX<-function(sp){
mt<-SHX(sp,by='stx',out='table')
mt$ms<-paste(mt$水分生态类型,"植物有",mt$种数,"种,占总物种数的",mt$百分比,"%",sep="")
jg<-paste(mt$ms,collapse = ";")%>%paste(.,"。",sep = "")
jg
}
|
c1e49a20f97a2122ef8f08d48d98247cdc0e6a75 | cf7315014b033ca6eb334eeb79595dab09a7b9ee | /man/plot_exposure.Rd | 842811dc5d3802839adf63eb2edecb0c39ee3244 | [] | no_license | yoshi-ra/purexposure | cbcb97a323b90365ba5115dc2846240ef456e94d | 0183ba40b0df4fb03fb4303a788bc1304587e30d | refs/heads/master | 2023-08-23T02:07:31.140582 | 2021-10-21T04:16:44 | 2021-10-21T04:16:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,605 | rd | plot_exposure.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/05-plot.R
\name{plot_exposure}
\alias{plot_exposure}
\title{Plot exposure to applied pesticides at a location.}
\usage{
plot_exposure(
exposure_list,
color_by = "amount",
buffer_or_county = "county",
percentile = c(0.25, 0.5, 0.75),
fill = "viridis",
alpha = 0.7,
pls_labels = FALSE,
pls_labels_size = 4
)
}
\arguments{
\item{exposure_list}{A list returned from \code{calculate_exposure}.}
\item{color_by}{Either "amount" (the default) or "percentile". Specifies
whether you would like application amounts to be colored according to
amount, resulting in a gradient legend, or by the percentile that they fall
into for the given data set and date range. You can specify percentile
cutpoints with the \code{percentile} argument.}
\item{buffer_or_county}{Either "county" (the default) or "buffer". Specifies
whether you would like colors to be scaled according to the limits
of application within the buffer, or in the county for the same time period,
chemicals, and method of application.}
\item{percentile}{A numeric vector in (0, 1) specifying percentile cutpoints
if \code{color_by = "percentile"}. The default is \code{c(0.25, 0.5, 0.75)},
which results in four categories: < 25th percentile, >= 25th to < 50th,
>= 50th to < 75th, and >= 75th.}
\item{fill}{A palette from the colormap package. The default is
"viridis". To see colormap palette options, visit
\url{https://bhaskarvk.github.io/colormap/} or run
\code{colormap::colormaps}.}
\item{alpha}{A number in [0,1] specifying the transparency of fill colors.
Numbers closer to 0 will result in more transparency. The default is 0.7.}
\item{pls_labels}{TRUE / FALSE for whether you would like sections or townships
to be labeled with their PLS ID. The default is \code{FALSE}.}
\item{pls_labels_size}{A number specifying the size of PLS labels. The default
is 4.}
}
\value{
A list with the following elements:
\describe{
\item{maps}{A list of plots. One plot for each exposure value returned in
the \code{exposure} element of the \code{calculate_exposure} list.}
\item{pls_data}{A list of data frames with 12 columns: \code{pls}, giving
the PLS ID, \code{percent}, the % intersection of that PLS unit with the
buffer, \code{kg}, the amount of kg of pesticides applied in that PLS unit
for the relevant time period, chemicals, and application method,
\code{kg_intersection}, \code{kg} multiplied by \code{percent} (this is the
value that is plotted), \code{start_date}, \code{end_date}, \code{chemicals},
\code{aerial_ground}, which give the time period, chemicals, and application
method for each plot/exposure estimate, \code{none_recorded}, \code{location},
\code{radius} (m), and \code{area} (m^2).}
\item{cutoff_values}{A list of data frames with two columns: \code{percentile} and
\code{kg} giving the cutoff values for each percentile. Only returned if
\code{color_by = "percentile"}.}
}
}
\description{
\code{plot_exposure} returns a plot of pesticide application in the PLS units
intersected by a buffer for each combination of time period, applied active
ingredients, and application method relevant for the exposure values returned
from \code{calculate_exposure}.
}
\examples{
library(magrittr)
\donttest{
fresno_list <- readRDS(system.file("extdata", "exposure_ex.rds",
package = "purexposure")) \%>\% plot_exposure()}
\donttest{
tulare_list <- pull_clean_pur(2010, "tulare") %>%
calculate_exposure(location = "-119.3473, 36.2077", radius = 3500) %>%
plot_exposure()
names(tulare_list)
tulare_list$maps
tulare_list$pls_data
tulare_list$exposure
# return one plot, pls_data data frame, exposure row, and cutoff_values
# data frame for each exposure combination
dalton_list <- pull_clean_pur(2000, "modoc") %>%
calculate_exposure(location = "-121.4182, 41.9370",
radius = 4000,
time_period = "6 months",
aerial_ground = TRUE) %>%
plot_exposure(fill = "plasma")
do.call("rbind", dalton_list$exposure)
# one map for each exposure value (unique combination of chemicals,
# dates, and aerial/ground application)
dalton_list$maps[[1]]
dalton_list$maps[[2]]
dalton_list$maps[[3]]
dalton_list$maps[[4]]
dalton_list$maps[[5]]
dalton_list$maps[[6]]
# exposure to a particular active ingredient
# plot percentile categories instead of amounts
chemical_df <- rbind(find_chemical_codes(2009, c("metam-sodium"))) %>%
dplyr::rename(chemical_class = chemical)
santa_maria <- pull_clean_pur(2008:2010, "santa barbara",
chemicals = chemical_df$chemname,
sum_application = TRUE,
sum = "chemical_class",
chemical_class = chemical_df) %>%
calculate_exposure(location = "-119.6122, 34.90635",
radius = 3000,
time_period = "1 year",
chemicals = "chemical_class") %>%
plot_exposure(color_by = "percentile")
do.call("rbind", santa_maria$exposure)
santa_maria$maps[[1]]
santa_maria$maps[[2]]
santa_maria$maps[[3]]
# scale colors based on buffer or county
clotho <- pull_clean_pur(1996, "fresno") %>%
dplyr::filter(chemname == "SULFUR") %>%
calculate_exposure(location = "-119.6082, 36.7212",
radius = 1500)
plot_exposure(clotho, "amount", buffer_or_county = "county", pls_labels = TRUE)$maps
plot_exposure(clotho, "amount", buffer_or_county = "buffer", pls_labels = TRUE)$maps
}
}
|
c23ae0a3d6ecb5651946917a1c9e84c7f2e94092 | 9dcfe093f403265baceeb60711ab2b8d948a149a | /FStests/false_discovery_rate_package.R | 42d48f27ca3f1fec67cd656c48c9a80d40142a83 | [] | no_license | ruthlorenz/stat_tests_correlated_climdata | c326adf115816d73cace13a7a6330cd09936c9c2 | 9297a3b12d994c0058f6e65611edb2cac207620c | refs/heads/master | 2021-01-10T04:34:00.992103 | 2016-02-24T13:26:25 | 2016-02-24T13:26:25 | 51,578,030 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,287 | r | false_discovery_rate_package.R | # function to check field significance with False Discovery Rate
# Wilks 2006, J. of Applied Met. and Clim.
# p_val = P-values at every grid point, h_val = 0 or 1 at every grid point,
# depending on significance level
# K = total number of local t-test, nlon*nlat if all grid points included,
# otherwise sum(!is.na(data[,,1]))
fdrTest <- function(p_val, siglev=0.05, ...){
dims_p <- dim(p_val)
nlon<-dims_p[1]
nlat<-dims_p[2]
if (!is.na(dims_p[3])){
ntim<-dims_p[3]
} else {
ntim<-1
tmp<-p_val
p_val<-array(NA,dim=c(nlon,nlat,ntim))
p_val[,,1]<-tmp
}
h_val<-array(NA,dim=c(nlon,nlat,ntim))
for (t in 1:ntim){
for (lat in 1:nlat){
for (lon in 1:nlon){
if (is.na(p_val[lon,lat,t])){
h_val[lon,lat,t]<-NA
} else if (p_val[lon,lat,t] < siglev){
h_val[lon,lat,t]<-1
} else {
h_val[lon,lat,t]<-0
}
}
}
}
K<-sum(!is.na(p_val[,,1]))
fdr<-array(0,dim=c(nlon,nlat,ntim))
sig_FDR<-array(0,dim=c(nlon,nlat,ntim))
p<-array(NA,dim=c(nlon*nlat))
#put all p-values in 1D vector
prob_1D<-(c(p_val))
# sort vector increasing
p_sort<-sort(prob_1D, decreasing = FALSE)
# reject those local tests for which max[p(k)<=(siglev^(k/K)]
for (k in 1:K){
if (p_sort[k]<=(siglev*(k/K))){
p[k]<-p_sort[k]
} else {
p[k]<-0.0
}
}
p_fdr<-max(p,na.rm=T)
fdr[which(p_val<=p_fdr)] <- 1
sig_FDR[which(fdr==1 & h_val==1)] <- 1
sig_pts<-array(NA,dim=c(ntim))
for (j in 1:ntim){
sig_pts[j]<-(sum(sig_FDR[,,j],na.rm=T))
}
method <- paste("False Discovery Rate for field significance")
rval <- list(h.value=sig_FDR, p.value=p_val, field.sig = siglev,
nr.sigpt=sig_pts, total.test=K, method=method, call=match.call())
class(rval) <- "fdrFS"
return(rval)
}
summary.fdrFS <- function(object, ...){
cat("\n")
msg <- paste("Results for FDR-test",sep='' )
print(msg)
cat("Field significance level: ",object$field.sig,"\n")
cat("Number of significant points: ", object$nr.sigpt, "\n")
cat("Total number of tests: ", object$total.test, "\n")
invisible()
} #end of 'summary.fdrFS'
print.fdrFS <- function(x, ...) {
cat("Call:\n")
print(x$call)
cat("\nNumber of Significant points:",x$nr.sigpt,"out of ", x$total.test," tests.\n")
} |
889e7b38a8aebbf898e7e021a666dd8cce7a3f11 | c77912aed1bf388876c8a973857be340d76466fb | /apps/dashboard/ui.R | ad6f0f759305e234a600e9ecb267b3993c92f9e4 | [] | no_license | global-trade-alert/ricardo | 12f09219694e84466008726c76d3add6c11797be | 6382834205e0655603171ec65438aee51e0a2f11 | refs/heads/master | 2023-03-23T02:09:17.363763 | 2021-03-17T15:32:02 | 2021-03-17T15:32:02 | 254,600,436 | 0 | 0 | null | 2021-02-03T11:01:58 | 2020-04-10T09:52:03 | R | UTF-8 | R | false | false | 353 | r | ui.R | # Module UI function
dashboardui <- function(id) {
# Create a namespace function using the provided id
ns <- NS(id)
tagList(
tags$div(class="removeui",
tags$div(class="dashboard pad-container",
tags$div(class="panel-grid",
uiOutput(ns("appPanelsOutput"))
)
)
)
)
}
|
3c21d664bba7a9c8c94fe7e7113f618132164ad6 | e81fb6deb4465df65a96be76218f08bb65f2abab | /R/cov.remover.R | 52f0e46b75cb1f18d3f319fec5e50ec2183ba484 | [] | no_license | akivab2/Analyze4CPackage | 2f62edfc79592d4be88b7cfff560c85ccd3fc1c0 | a232d4fb7b027d330eb48530db724fbe63300c08 | refs/heads/master | 2021-03-24T12:35:28.346212 | 2018-07-09T16:39:22 | 2018-07-09T16:39:22 | 95,444,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 930 | r | cov.remover.R | #' @export
cov.remover <- function(data,remove)
{
data_pos <- data[which(data[,3]>0),]; #getting the positive REs
probs <- 1-(data_pos[,3]/sum(data_pos[,3])); #getting the relative part of reads for each RE site from sum of reads. we do 1 minus this number since we want those with less reads to get a higher score.
n <- round(nrow(data_pos)*remove); #getting the number of REs that need to be removed (meaning that they were positive and now will get a 0)
samps <- sample(data_pos[,2],n,replace=FALSE,prob=probs) #sampling from the positive REs, those with a lower number of reads have a higher chance of being chosen here
inds <- match(samps,data_pos[,2]) #getting the indices (indices from the data frame "data_pos") of the sampled REs
data_pos[inds,3] <- 0 #giving the sampled REs a 0
data[which(data[,3]>0),3] <- data_pos[,3] #replacing the previous number of reads with the updated one
return(data);
}
|
ab6ffefbc53cafe98453168fc5399079237bf7a4 | e8bd387242529cdc6484013f91e2671c4da69036 | /man/EquityPtf.Rd | 90919083fd79e310ff9d5df619c99c389f5c20b1 | [] | no_license | jianboli/RBacktesting | 82e333add7a2e95ef96e86ae5ff64741e0daa1fb | f258be48c08a962b660337c861cef706563d1411 | refs/heads/master | 2016-09-06T10:03:50.862755 | 2015-10-23T19:27:51 | 2015-10-23T19:27:51 | 25,362,815 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 354 | rd | EquityPtf.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/EquityPtf.R
\name{EquityPtf}
\alias{EquityPtf}
\title{EquityPtf Class (S3) Constructor: this is a portfolio of equities}
\usage{
EquityPtf()
}
\value{
Generate empty EquityPtf object
}
\description{
EquityPtf Class (S3) Constructor: this is a portfolio of equities
}
|
1839db7e30d34fbe5c4eaa2ef0272246ae6cd8f9 | c5e562151f1725747a233647570b758066f12165 | /pop2.R | 20bc96903a6e046e9daa02f5be8042ef5e048804 | [] | no_license | kmcguire2/chem160module13 | ba515ed1b29fa773be7c83465c6bb5da1a53e0d8 | b8857a91078bfd844158cd87c685e4a8ed8f9369 | refs/heads/main | 2023-01-10T00:40:22.192413 | 2020-10-31T19:56:21 | 2020-10-31T19:56:21 | 308,961,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 330 | r | pop2.R | #Logistic model, Mathiusian model up to a carrying capacity
pop <- 100 #initial population
pop.hist <- c() #empty vector
r <- 0.05 #growth rate
K <- 1000 #carrying capacity
#changes over 150 time steps
for (i in 1:150) {
pop.hist[i] <- pop
delta.pop <- r*pop*(1-pop/K) #change in pop
pop <- pop+delta.pop
}
plot(pop.hist) |
9215498b856755eed214d749a99082193574689a | 2cdb135e648cbf6f78a5b55b424f8bdb13f5c140 | /scripts/Sortingattempts 7-27-16.R | b81c3977a7d1824b73e62f752b3fac6c04625f3e | [] | no_license | Fairlane100/SOTL-project | 89c234d5078ea1b9d110791cd688aafb99f9d150 | 1ea832f0e39b96836c2e916a16ffa43bc11a574c | refs/heads/master | 2021-01-20T19:42:02.219274 | 2016-12-01T20:15:57 | 2016-12-01T20:15:57 | 46,145,850 | 0 | 0 | null | 2016-09-16T18:46:47 | 2015-11-13T20:35:12 | C# | UTF-8 | R | false | false | 534 | r | Sortingattempts 7-27-16.R | #sorting corpus based on word frequencies
# need to have word.freqs file loaded
#do two sorts based on term frequency and doc frequency
str(word.freqs)
str(termfreqs)
sorted.wordfreqs.termfreq <- word.freqs[order(-word.freqs$term.freq), ]
write.csv (sorted.wordfreqs.termfreq, file ="sorted.wordfreqs.termfreq.csv",)
sorted.wordfreqs.docfreq <- word.freqs[order(-word.freqs$doc.freq), ]
write.csv (sorted.wordfreqs.docfreq, file ="sorted.wordfreqs.docfreq.csv",)
# both these csv files are available in GitHub-->SOTL project folder
|
dccf3af9dbd819ee19b678d57982e0dfa8e52f61 | c19c05198442c22aace62cd1c8177adb6533da6d | /temperatureAnalysis.R | e604f262eba33c86cef593ca132f1a4ffb5014f2 | [] | no_license | pinskylab/NJNC-Larval-Analysis | 17e9f1411a52aed0d8cdae8420b95f32b140b258 | 23d76306d877babffa8ae3a2f8efbdcffc1b15fd | refs/heads/master | 2020-03-19T02:26:22.658104 | 2019-02-20T20:14:24 | 2019-02-20T20:14:24 | 135,628,617 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,569 | r | temperatureAnalysis.R | setwd("~/Desktop/Analysis/TemperatureAnalysis")
library(ncdf4)
library(raster) #manipulation
library(rgdal) #geospatial analysis
library(ncdf4.helpers)
library(abind)
library(ggplot2)
library(dplyr)
library(gridExtra)
#This is the data from the daily time-series (NOAA 1/4° x 1/4° resolution)
NJNCtemps1 <- nc_open("1989_2001.nc")
NJNCtemps2 <- nc_open("2002_2012.nc")
sst.array1 <- ncvar_get(NJNCtemps1,"sst")
sst.array2 <- ncvar_get(NJNCtemps2,"sst")
sst.arrays <- abind(sst.array1,sst.array2)
#use this command to swap rows and columns
sst.arrays <- aperm(sst.arrays, c(2,1,3))
lat <- ncvar_get(NJNCtemps1,"lat")
lon <- ncvar_get(NJNCtemps1,"lon")
lon <- lon-360
#Name rows and columns of
colnames(sst.arrays) <- c(lon)
rownames(sst.arrays) <- c(lat)
#load in shelf data as a raster
shelf <- raster("shelfData.nc", crs=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0"))
#stack entire temperature array as a raster brick
temperatureBrick <- brick(sst.arrays, xmn=min(lon), xmx=max(lon), ymn=min(lat), ymx=max(lat),
crs=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0"))
#flip the brick to make highest latitude at the top
temperatureBrick <- flip(temperatureBrick,direction = 'y')
#make an extent (same as temp data) and crop shelf data to it
temp.object <- extent(-81.625,-71.875,30.875,40.625)
shelf <- crop(shelf,temp.object)
#resample shelf data to temperature data (so they have matching resolutions)
interpShelf<- resample(shelf,temperatureBrick,method = 'bilinear')
#convert all shelf data that is greater than 0m or less than 200m to NA
interpShelf[interpShelf < (-200)]<-NA
interpShelf[interpShelf >= 0]<-NA
#mask the temperature data by these bounds on the shelf data (removes all temperatures not on shelf)
temperatureBrick <- mask(temperatureBrick,interpShelf)
#Subsetting the data
temperature.df <- as.data.frame(temperatureBrick,xy=TRUE)
njTemp.df <- temperature.df[1:841,]
ncTemp.df <- temperature.df[841:1600,]
#create an array of dates from '89 to 2012
dates <- seq(as.Date("1989-01-01"), as.Date("2012-12-31"), by=1)
colnames(njtempObject) <- c("x","y",as.character(dates))
dates.df <- as.data.frame(do.call(rbind,strsplit(as.character(dates),'-',fixed = TRUE))) #splits up dates by '-' markings
njTemp.df <- matrix(colMeans(njTemp.df[3:8768],na.rm = TRUE),nrow=1,ncol=8766) #creates a row array of average daily temps for north location
njTemp.df <- t(njTemp.df) #transpose into column array
njTemp.df<- cbind(dates.df,njTemp.df) #add the dates as 3 more columns (year, month, day)
colnames(njTemp.df)<-c("year","month","day","north")#rename the columns
#complete the above operations on the south temps as well
ncTemp.df <- matrix(colMeans(ncTemp.df[3:8768],na.rm = TRUE),nrow=1,ncol=8766)
ncTemp.df <- t(ncTemp.df)
#combine north df with south df
allTemps.df <- cbind(njTemp.df,ncTemp.df)
colnames(allTemps.df)<-c("year","month","day","north","south")#just so southern temps say "south"
#save all daily temps
dailyTemps <- allTemps.df[,4:5]
dailyTemps <- transform(dailyTemps,Date = dates)
save(dailyTemps,file = "~/Desktop/Analysis/with 50 extras/dailyTemps.RData")
dailyTemps <- transform(dailyTemps,
year = dates.df[,1],
month = dates.df[,2],
day = dates.df[,3])
#create data frames of monthly and annual means for north and south
northYears.df <- dailyTemps %>%
group_by(year) %>%
summarise(north.SST = mean(north))
northMonths.df <- dailyTemps %>%
group_by(year,month) %>%
summarise(north.SST = mean(north))
southYears.df <- dailyTemps %>%
group_by(year) %>%
summarise(south.SST = mean(south))
southMonths.df <- dailyTemps %>%
group_by(year,month) %>%
summarise(south.SST = mean(south))
#combine the north and south temperatures into a single data frame (only did annual here)
nsYears.df <- cbind(as.data.frame(northYears.df),as.data.frame(southYears.df[,2]))
save(nsYears.df,file=("~/Desktop/Analysis/with 50 extras/nsYearsdf.RData"))
#START HERE
###########
load("~/Desktop/Analysis/with 50 extras/dailyTemps.RData")
load("~/Desktop/Analysis/with 50 extras/nsYearsdf.RData")
#plot north and south shelf temperatures
years = c(1989:2012) #so x-scale works in ggplot
annualPlot <- ggplot(nsYears.df)+
#geom_line(aes(years,north.SST,colour = "NORTH"))+
#geom_line(aes(years,south.SST,colour = "SOUTH"))+
geom_point(aes(years,south.SST,colour = "SOUTH"))+
geom_point(aes(years,north.SST,colour = "NORTH"))+
geom_smooth(aes(x = years,y = north.SST,method = 'auto'))+
geom_smooth(aes(x = years,y = south.SST,colour = "SOUTH",method = 'auto'))+
labs(title="Annual Shelf Temperatures",x="Years",y="Temperature (°C)")+
scale_colour_manual("Location",values = c("steelblue3","tomato"))+
scale_x_continuous(breaks = seq(min(years), max(years), by = 2))+
scale_y_continuous(limits = c(4,26),breaks = seq(min(4),max(26),by=2))+
theme_bw() +
theme(axis.text.x = element_text(angle=45),
axis.text.y = element_text(angle=45))
#jpeg(file="~/Desktop/Analysis/TemperatureAnalysis/Plots/AnnualShelfTemperatures.jpeg", width = 10, height = 7.5, res = 300, units = "in")
#annualPlot
#dev.off()
#averaging the data by month (use aggregate to sort my month and year)
monthlyAverages <- as.data.frame(aggregate(dailyTemps[,1:2], list(dailyTemps$month,dailyTemps$year),mean,na.rm=TRUE))
colnames(monthlyAverages)<- c("Month","Year","North","South")
#monthCount <- c(1:288)
monthlyAverages <- transform(monthlyAverages,monthCount = c(1:288))
monthlyAverages <- transform(monthlyAverages,Season = )
monthlyTempPlot <- ggplot(monthlyAverages, aes(monthCount,North)) + geom_line() + geom_line(aes(color = Year))
+ ggtitle("Monthly Northern Temperatures") + xlab("Month Count") + ylab("Temperature (°C)")
#jpeg(file="~/Desktop/Analysis/TemperatureAnalysis/Plots/MonthlyTempPlot.jpeg", width = 10, height = 7.5, res = 300, units = "in")
#monthlyTempPlot
#dev.off()
#subset seasonally
winter <- monthlyAverages[which(monthlyAverages$Month=='01' | monthlyAverages$Month=='02' | monthlyAverages$Month=='03' | monthlyAverages$Month=='04' ),]
fall <- monthlyAverages[which(monthlyAverages$Month=='09' | monthlyAverages$Month=='10' | monthlyAverages$Month=='11' | monthlyAverages$Month=='12' ),]
#split seasonal into north and south and average over the subsetted periods
#also add in a location and season column for each data frame
annualWinterN <- as.data.frame(
winter %>%
group_by(Year) %>%
summarise(SST = mean(North))
)
annualWinterN <- transform(annualWinterN,Location = "North",Season ="Winter" )
annualFallN <- as.data.frame(
fall %>%
group_by(Year) %>%
summarise(SST = mean(North))
)
annualFallN <- transform(annualFallN,Location = "North",Season = "Fall")
annualWinterS <- as.data.frame(
winter %>%
group_by(Year) %>%
summarise(SST = mean(South))
)
annualWinterS <- transform(annualWinterS,Location = "South",Season="Winter")
annualFallS <- as.data.frame(
fall %>%
group_by(Year) %>%
summarise(SST = mean(South))
)
annualFallS <- transform(annualFallS,Location="South",Season="Fall")
#use rbind to create a data frame for each season
#order data frames by ascending year
fallTemps<-rbind(annualFallN,annualFallS)
fallTemps<-fallTemps[order(fallTemps$Year),]
winterTemps<-rbind(annualWinterN,annualWinterS)
winterTemps<-winterTemps[order(winterTemps$Year),]
#merge the two seasonal data frames into one master data frame
#order by year
seasonalAverages <- rbind(annualWinterN,annualFallN,annualWinterS,annualFallS)
seasonalAverages<- seasonalAverages[order(seasonalAverages$Year),]
#Plot all data (smooth) -> lm for only winter south temps(jan-april) and all north temps
jpeg(file="~/Desktop/Analysis/AFS/lmTemperatureTrends.jpeg", width = 10, height = 7.5, res = 300, units = "in")
ggplot()+
# geom_smooth(data = dailyTemps[as.numeric(dailyTemps$month)==01 | as.numeric(dailyTemps$month)==02 | as.numeric(dailyTemps$month)==03 | as.numeric(dailyTemps$month)==04,],aes(x=Date,y=north,color="North",linetype = "Winter"),method = 'lm')+
# geom_smooth(data = dailyTemps[as.numeric(dailyTemps$month)==09 | as.numeric(dailyTemps$month)==10 | as.numeric(dailyTemps$month)==11 | as.numeric(dailyTemps$month)==12,],aes(x=Date,y=north,color="North",linetype = "Fall"),method = 'lm')+
# geom_smooth(data = dailyTemps[as.numeric(dailyTemps$month)==01 | as.numeric(dailyTemps$month)==02 | as.numeric(dailyTemps$month)==03 | as.numeric(dailyTemps$month)==04,],aes(x=Date,y=south,color="South",linetype = "Winter"),method = 'lm')+
geom_smooth(data = dailyTemps[which(dailyTemps$Season == "Winter"),],aes(x=Date,y=south,color="South",linetype="Winter"),method = 'lm')+
geom_smooth(data = dailyTemps[which(dailyTemps$Season == "Winter"),],aes(x=Date,y=north,color="North",linetype="Winter"),method = 'lm')+
geom_smooth(data = dailyTemps[which(dailyTemps$Season == "Fall"),],aes(x=Date,y=north,color="North",linetype="Fall"),method = 'lm') +
scale_linetype_manual("Season", values = c("Winter" = 1, "Fall" = 6))+
scale_y_continuous(limits = c(0,31))+
#scale_linetype_discrete("Season", labels = c("winter","fall"))+
scale_color_manual("Location", values = c("blue","red"))+
ylab("Temperature (°C)")+
theme_bw()+
guides(lty = guide_legend(override.aes = list(col = 'black')))+
theme(axis.title.x = element_text(size = 15),
axis.text.x = element_text(size = 13),
axis.title.y = element_text(size = 15),
axis.text.y = element_text(size=13),
legend.text = element_text(size = 13),
legend.title = element_text(size=15))
dev.off()
#===========================================================
#Determine Hot vs. Cold Years
#===========================================================
#Calculate means
nFallMean <- mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='North'),]$SST)
nWinterMean <- mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='North'),]$SST)
sFallMean <- mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='South'),]$SST)
sWinterMean <- mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='South'),]$SST)
nMean <- mean(seasonalAverages[which(seasonalAverages$Location=='North'),]$SST)
sMean <- mean(seasonalAverages[which(seasonalAverages$Location=='South'),]$SST)
hLineMeans <- data.frame(Location = c("North","South"),
FallMeans = c(nFallMean,sFallMean),
WinterMeans = c(nWinterMean,sWinterMean))
hLineAnnual <- data.frame(Location = c("North","South"),
Mean = c(nMean,sMean))
#++++++++++++++++++++
#FUNCTIONS
#++++++++++++++++++++
#This function categorizes hot or cold and calculates anomaly
for(i in 1:nrow(seasonalAverages)){
if (seasonalAverages$Season[i] == 'Fall' &
seasonalAverages$Location[i] == 'North' &
seasonalAverages$SST[i] < mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='North'),]$SST))
{seasonalAverages$HorC[i] = "Cold"
seasonalAverages$anomaly[i] = seasonalAverages$SST[i] - mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='North'),]$SST) }
else if (seasonalAverages$Season[i] == 'Fall' &
seasonalAverages$Location[i] == 'North' &
seasonalAverages$SST[i] > mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='North'),]$SST))
{seasonalAverages$HorC[i] = "Hot"
seasonalAverages$anomaly[i] = seasonalAverages$SST[i] - mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='North'),]$SST)}
else if (seasonalAverages$Season[i] == 'Winter' &
seasonalAverages$Location[i] == 'North' &
seasonalAverages$SST[i] < mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='North'),]$SST))
{seasonalAverages$HorC[i] = "Cold"
seasonalAverages$anomaly[i] = seasonalAverages$SST[i] - mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='North'),]$SST)}
else if (seasonalAverages$Season[i] == 'Winter' &
seasonalAverages$Location[i] == 'North' &
seasonalAverages$SST[i] > mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='North'),]$SST))
{seasonalAverages$HorC[i] = "Hot"
seasonalAverages$anomaly[i] = seasonalAverages$SST[i] - mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='North'),]$SST)}
else if (seasonalAverages$Season[i] == 'Fall' &
seasonalAverages$Location[i] == 'South' &
seasonalAverages$SST[i] < mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='South'),]$SST))
{seasonalAverages$HorC[i] = "Cold"
seasonalAverages$anomaly[i] = seasonalAverages$SST[i] - mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='South'),]$SST)}
else if (seasonalAverages$Season[i] == 'Fall' &
seasonalAverages$Location[i] == 'South' &
seasonalAverages$SST[i] > mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='South'),]$SST))
{seasonalAverages$HorC[i] = "Hot"
seasonalAverages$anomaly[i] = seasonalAverages$SST[i] - mean(seasonalAverages[which(seasonalAverages$Season=='Fall' & seasonalAverages$Location=='South'),]$SST)}
else if (seasonalAverages$Season[i] == 'Winter' &
seasonalAverages$Location[i] == 'South' &
seasonalAverages$SST[i] < mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='South'),]$SST))
{seasonalAverages$HorC[i] = "Cold"
seasonalAverages$anomaly[i] = seasonalAverages$SST[i] - mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='South'),]$SST)}
else if (seasonalAverages$Season[i] == 'Winter' &
seasonalAverages$Location[i] == 'South' &
seasonalAverages$SST[i] > mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='South'),]$SST))
{seasonalAverages$HorC[i] = "Hot"
seasonalAverages$anomaly[i] = seasonalAverages$SST[i] - mean(seasonalAverages[which(seasonalAverages$Season=='Winter' & seasonalAverages$Location=='South'),]$SST)}
}
#This function just groups the categorical anomalies for plotting
for(i in 1:nrow(seasonalAverages)){
if(seasonalAverages$HorC[i] == 'Cold' & seasonalAverages$Season[i] == 'Fall')
{seasonalAverages$group[i] = "Cold; Fall"}
else if(seasonalAverages$HorC[i] == 'Cold' & seasonalAverages$Season[i] == 'Winter')
{seasonalAverages$group[i] = "Cold; Winter"}
else if(seasonalAverages$HorC[i] == 'Hot' & seasonalAverages$Season[i] == 'Fall')
{seasonalAverages$group[i] = "Hot; Fall"}
else if(seasonalAverages$HorC[i] == 'Hot' & seasonalAverages$Season[i] == 'Winter')
{seasonalAverages$group[i] = "Hot; Winter"}
}
#This function calculates anamoly of each individual (based on 30dayAVG vs Regional mean across all years)
for(i in 1:nrow(updatedallSL)){
if(updatedallSL$Location[i]=='North')
{updatedallSL$anomaly[i] = updatedallSL$northAVG[i] - mean(seasonalAverages[which(seasonalAverages$Location=='North'),]$SST)}
else if(updatedallSL$Location[i]=='South')
{updatedallSL$anomaly[i] = updatedallSL$southAVG[i] - mean(seasonalAverages[which(seasonalAverages$Location=='South'),]$SST)}
}
#This function just uses anomaly to determine if a fish is hot or cold (haven't added a buffer region yet)
for(i in 1:nrow(updatedallSL)){
if(updatedallSL$anomaly[i] > 0)
{updatedallSL$HorC[i] = "Hot"}
else if(updatedallSL$anomaly[i] < 0)
{updatedallSL$HorC[i] = "Cold"}
}
#This function calculates seasonal anomaly of each individual
for(i in 1:nrow(updatedallSL)){
if(updatedallSL$Location[i]=='North' & updatedallSL$Season[i]=='Fall')
{updatedallSL$seasonalAnomaly[i] = updatedallSL$northAVG[i] - nFallMean
updatedallSL$group[i] = "North-Fall"}
else if(updatedallSL$Location[i]=='North' & updatedallSL$Season[i]=='Winter')
{updatedallSL$seasonalAnomaly[i] = updatedallSL$northAVG[i] - nWinterMean
updatedallSL$group[i] = "North-Winter"}
else if(updatedallSL$Location[i]=='South' & updatedallSL$Season[i]=='Fall')
{updatedallSL$seasonalAnomaly[i] = updatedallSL$southAVG[i] - sFallMean
updatedallSL$group[i] = "South-Fall"}
else if(updatedallSL$Location[i]=='South' & updatedallSL$Season[i]=='Winter')
{updatedallSL$seasonalAnomaly[i] = updatedallSL$southAVG[i] - sWinterMean
updatedallSL$group[i] = "South-Winter"}
}
for(i in 1:nrow(updatedallSL)){
if(updatedallSL$seasonalAnomaly[i] < 0)
{updatedallSL$HorC_S[i] = "Cold"}
else if(updatedallSL$seasonalAnomaly[i] > 0)
{updatedallSL$HorC_S[i] = "Hot"}
}
#++++++++++++++++++++++
#Plotting anomaly stuff
#++++++++++++++++++++++
#Create a faceted plot to show seasonal differences between north and south
seasonalPlot <- ggplot(seasonalAverages,aes(as.numeric(levels(Year))[Year],SST,color=as.factor(group),shape=as.factor(group)))+
geom_point()+
geom_hline(data = hLineMeans, aes(yintercept = WinterMeans),alpha=0.4,linetype="longdash")+
geom_hline(data = hLineMeans, aes(yintercept = FallMeans),alpha=0.4,linetype="longdash")+
labs(title="Annual Shelf SST by Season & Location",x="Years",y="Temperature (°C)")+
scale_colour_manual(name = "Hot or Cold Year by Season",
values = c("blue","blue","red","red"),
labels = c("Cold, Fall","Cold, Winter","Hot, Fall","Hot, Winter"))+
scale_shape_manual(name = "Hot or Cold Year by Season",
values = c(19,17,19,17),
labels = c("Cold, Fall","Cold, Winter","Hot, Fall","Hot, Winter"))+
#scale_fill_manual("Seasonal Values",values = c("blue","red"))+
scale_y_continuous(limits = c(4,26),breaks = seq(min(4),max(26),by=2))+
scale_x_continuous(breaks = c(seq(1989,2012,by = 2)))+
facet_grid(Location ~ .)+
theme_bw() +
theme(axis.text.x = element_text(angle=45),
axis.text.y = element_text(angle=45),
strip.background =element_rect(fill="gray80"),
legend.box.background = element_rect(colour = "black"))
#save file as jpeg
# jpeg(file="~/Desktop/Analysis/TemperatureAnalysis/Plots/individualSeasonalAnomalies2.jpeg", width = 10, height = 7.5, res = 300, units = "in")
# grid.draw(new_plot)
# dev.off()
#I think the issue here is that when I use annual averages, all of the fall individuals are going to be "Hot"
# and all of the winter individuals will be "Cold"
indivAnomaly <- ggplot(updatedallSL,aes(x=Date,y=anomaly,color=as.factor(HorC),alpha=anomaly))+
geom_point(aes(alpha = abs(anomaly)))+ #trying to show strength of anomaly with opacity
geom_hline(aes(yintercept = 0),linetype="longdash")+
scale_color_manual(name = "Hot or Cold Individuals",
values = c("blue","red"))+
scale_x_date(date_breaks = ("2 years"),labels = date_format("%Y"),limits = as.Date(c('1989-01-01','2012-12-31')))+
scale_alpha_continuous(limits = c(0,10),range = c(0.4,1),guide = 'none')+
scale_y_continuous(limits = c(-10,10))+
facet_grid(Location ~ .)+
theme_bw()+
theme(axis.text.x = element_text(angle=45),
axis.text.y = element_text(angle=45),
strip.background =element_rect(fill="gray80"),
legend.box.background = element_rect(colour = "black"))
#This plot is a fix from the last one --> it takes season into account when calculating anomaly
#Want to get blank panel in there for "South - Fall"
#Want to have the strips of the north panel one color and the strips of the south panels another color
indvSeasonAnom <- ggplot(updatedallSL, aes(x=Date,y=seasonalAnomaly,fill=as.factor(HorC_S)))+
geom_hline(aes(yintercept = 0),linetype="longdash",alpha=0.4)+
geom_point(pch = 21,color="white")+
geom_count(show.legend = T,pch=21,color="black")+
scale_fill_manual(name = "Hot or Cold Individuals",
values = c("blue","red"))+
guides(fill = guide_legend(override.aes = list(pch = 21,color="black",size=2)))+
scale_x_date(date_breaks = ("2 years"),labels = date_format("%Y"),limits = as.Date(c('1989-01-01','2012-12-31')))+
labs(title = "Individual Ingress Temperature Anomalies",y="Seasonal Anomaly")+
scale_y_continuous(limits = c(-8,8))+
facet_grid(Location+Season ~ .)+
theme_bw()+
theme(axis.text.x = element_text(angle=45,vjust = 0.7),
legend.box.background = element_rect(colour = "black"))
# #NOT USING RIGHT NOW BUT ADDS DUMMY ROW TO DF FOR EMPTY SOUTH-FALL PANEL
# #####
# dummyRow <- updatedallSL[1,]
# dummyRow[1,] <- NA
# dummyRow[1,c(2,5)] <- c("South","Fall")
# updatedallSLdummy <- rbind(updatedallSL,dummyRow)
#Hacking the facet panels
anomalyGrob <- ggplotGrob(indvSeasonAnom)
print(anomalyGrob)
anomalyGrob$grobs[[13]]$grobs[[1]]$children[[1]]$gp$fill <- "tomato"
anomalyGrob$grobs[[14]]$grobs[[1]]$children[[1]]$gp$fill <- "steelblue3"
anomalyGrob$grobs[[15]]$grobs[[1]]$children[[1]]$gp$fill <- "steelblue3"
anomalyGrob$grobs[[13]]$grobs[[2]]$children[[1]]$gp$fill <- "grey85"
anomalyGrob$grobs[[14]]$grobs[[2]]$children[[1]]$gp$fill <- "grey85"
anomalyGrob$grobs[[13]]$grobs[[2]]$children[[1]]$gp$col <- "steelblue3"
anomalyGrob$grobs[[14]]$grobs[[2]]$children[[1]]$gp$col <- "steelblue3"
anomalyGrob$grobs[[15]]$grobs[[2]]$children[[1]]$gp$col <- "tomato"
anomalyGrob$grobs[[13]]$grobs[[2]]$children[[1]]$gp$lwd <- 5
anomalyGrob$grobs[[14]]$grobs[[2]]$children[[1]]$gp$lwd <- 5
anomalyGrob$grobs[[15]]$grobs[[2]]$children[[1]]$gp$lwd <- 5
grid.draw(anomalyGrob)
#WORK IN PROGRESS
# from: https://stackoverflow.com/questions/24169675/multiple-colors-in-a-facet-strip-background
#Too hacky, worked it out above
updatedallSL$facet_fill_color <- c("a","b")[updatedallSL$Location]
## Create main plot
dummy <- indvSeasonAnom
dummy$layers <- NULL
dummy <- dummy + geom_rect(data=updatedallSL, xmin=-Inf, ymin=-Inf, xmax=Inf, ymax=Inf,
aes(fill = facet_fill_color))+scale_fill_manual(values = c("steelblue3","tomato"))
library(gtable)
g1 <- ggplotGrob(indvSeasonAnom)
g2 <- ggplotGrob(dummy)
gtable_select <- function (x, ...)
{
matches <- c(...)
x$layout <- x$layout[matches, , drop = FALSE]
x$grobs <- x$grobs[matches]
x
}
panels <- grepl(pattern="panel", g2$layout$name)
strips <- grepl(pattern="strip-right", g2$layout$name)
g2$grobs[strips] <- replicate(sum(strips), nullGrob(), simplify = FALSE)
g2$layout$l[panels] <- g2$layout$l[panels] + 1
g2$layout$r[panels] <- g2$layout$r[panels] + 2
new_strips <- gtable_select(g2, panels | strips)
grid.newpage()
grid.draw(new_strips)
gtable_stack <- function(g1, g2){
g1$grobs <- c(g1$grobs, g2$grobs)
g1$layout <- rbind(g1$layout, g2$layout)
g1
}
## ideally you'd remove the old strips, for now they're just covered
new_plot <- gtable_stack(g1, new_strips)
grid.newpage()
grid.draw(new_plot)
|
170e4fa98e7c9ce2383246480e055463aa89d63d | 0cf8414c923f5b7d611502211915118ed19b3697 | /lib/train_final.R | 3f0c06db000fd9e81521034aed06f3871d7af3f1 | [] | no_license | xiuruoyan/Dogs-Fried-Chicken-or-Blueberry-Muffins- | 5228954a095f1ff815fc836f54cbf3184ede2b41 | 0bfd382bd46aac8dd3fdd8daf132ef2f1f00cf2d | refs/heads/master | 2020-04-17T15:02:28.353551 | 2019-01-20T20:26:00 | 2019-01-20T20:26:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,268 | r | train_final.R | #########################################################
### Train a classification model with training images ###
#########################################################
### Group 7
### ADS Project 3 Spring 2018
train <- function(dat_train, label_train, params=NULL,
run.gbm = F, run.svm.lin = F,
run.svm.rbf = F, run.xgboost = F,
run.rf = F, run.lg = F, run.adaboost = F){
# Train with GBM(Baseline) model
if(!require("gbm")){
install.packages("gbm")
}
library("gbm")
gbm <- NULL
if(run.gbm){
train_df <- data.frame(label = label_train[,3]-1, dat_train[,-1])
gbm <- gbm(label~ ., data = train_df, interaction.depth = 1,
distribution="multinomial", shrinkage = 0.1, n.trees = params[1])
return(gbm)
}
# Train with linear SVM model
if(!require("e1071")){
install.packages("e1071")
}
library("e1071")
svm.lin <- NULL
if(run.svm.lin){
svm.lin <- svm(dat_train[,-1], label_train[,-1], cost = params[1],
scale = F, kernel = "linear")
return(svm.lin)
}
# Train with RBF Kernel SVM model
svm.rbf <- NULL
if(run.svm.rbf){
svm.rbf <- svm(dat_train[,-1], label_train[,-1], cost = params[1], gamma = params[2],
scale = F, kernel = "radial")
return(svm.rbf)
}
# Train with XGBoost model
xgboost <- NULL
if(run.xgboost){
xgboost <- xgboostfit(dat_train, label_train, params)
return(xgboost)
}
# Train with Random Forest model
if(!require("randomForest")){
install.packages("randomForest")
}
library("randomForest")
rf <- NULL
if(run.rf){
rf <- randomForest(dat_train[,-1], label_train[,3], ntree = params)
return(rf)
}
# Train with AdaBoost model
if(!require("adabag")){
install.packages("adabag")
}
library("adabag")
ada <- NULL
if(run.ada){
ada <- adaboost(dat_train[,-1], label_train[,3], ntree = params)
return(ada)
}
# Train with logistic regression model
if(!require("nnet")){
install.packages("nnet")
}
library("nnet")
lg <- NULL
if(run.lg){
lg <- logistic(dat_train[,-1], label_train[,3], MaxNWts = 20000, maxit = maxit)
return(lg)
}
} |
02380f2d8036159fd090377aabd671242e132583 | 7dbddae9e805bfa528d282c2174e3d7b75b43dbb | /man/sigma.blblm.Rd | 154623737c9aa9dc7babf116fa286bf8dca09c16 | [
"MIT"
] | permissive | ggsmith842/blblm | 945f64f6a9434fa149a32ea70793100ada08da6e | 7d9386f8e91d2a7b1480bd4d790b41d74707ea87 | refs/heads/master | 2022-10-17T17:23:31.830568 | 2020-06-08T19:38:42 | 2020-06-08T19:38:42 | 267,986,434 | 0 | 0 | null | 2020-05-30T01:16:59 | 2020-05-30T01:16:59 | null | UTF-8 | R | false | true | 460 | rd | sigma.blblm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{sigma.blblm}
\alias{sigma.blblm}
\title{calculates sigma from the blblm}
\usage{
\method{sigma}{blblm}(object, confidence = FALSE, level = 0.95, ...)
}
\arguments{
\item{object}{a blblm result}
\item{confidence}{creates a confidence interval}
\item{level}{the level of confidence}
\item{...}{additional parameters}
}
\description{
calculates sigma from the blblm
}
|
d651904f9019c1aca9ba35ac6cca771383b4ba43 | 51b0a4aa4fe3b603f897724be6294169f0561557 | /R/extent.R | 95b1baf719e33fb499693341baf69cb98dca41ca | [] | no_license | saraorofino/climate | 06053015f01699901fe89a939877aa2d9fafb68c | 9018ee28d1f219e6665b553773c495e03d9cf48c | refs/heads/master | 2021-03-05T04:40:35.630480 | 2020-03-16T02:53:55 | 2020-03-16T02:53:55 | 246,095,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,177 | r | extent.R | #' Describe rate of change in sea ice extent between two years
#'
#' Calculate rate of change in monthly sea ice extent in the Northern Hemisphere
#' @param year1 the earlier year of interest, four digit format (i.e. 1985)
#' @param year2 the later year of interest, four digit format (i.e. 1990)
#' @param ext data frame with at least two columns: year, extent (in millions of square kilometers)
#' @param showplot optional to display the sea ice extent for all the years in the ext data frame, default showplot=F
#' @return a list with the following items
#' \describe{
#' \item{rate}{The rate of change in sea ice extent from year1 to year2}
#' \item{plt}{A plot of sea ice extent over all the years in the ext data frame, null if showplot=F}
#' }
#'
#' @examples
#' Generate some input data:
#' input <- data.frame(year = seq(2000,2019,1), extents = runif(20,3,7))
#' extent(year1 = 2005, year2 = 2010, ext = input, showplot=TRUE)
#'
#' @references
#' Example data downloaded from [ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/monthly/data/]
#' Metadata for package data: [https://nsidc.org/data/G02135/versions/3]
extent <- function(year1, year2, ext, showplot = FALSE){
#Get the value of sea ice extent for the first input year
sub1 <- subset(ext, year == year1)
ext1 <- sub1$extent
#Get the value of sea ice extent for the second input year
sub2 <- subset(ext, year == year2)
ext2 <- sub2$extent
#calculate the rate of change (change in sea ice extent / change in time)
rate = (ext2 - ext1) / (year2 - year1)
#Optional plot of sea ice extent over the years:
if(showplot == TRUE){
#packages needed for graph
library(tidyverse)
library(RColorBrewer)
p = ggplot(ext, aes(x = year, y = extent)) +
geom_point(color = "dodgerblue4") +
geom_line(color = "dodgerblue4") +
scale_x_continuous(expand = c(0.005,0.005)) +
scale_y_continuous(expand = c(0.05,0.05)) +
labs(x = "Year",
y = expression(Sea~Ice~Extent~(km^2))) +
theme_light()
}
if(showplot == FALSE){
p=NULL #no plot if showplot = false
}
return(list(ext1 = ext1, ext2 = ext2, rate = rate, plt = p))
}
|
e1d1ae9239250d42dcf33a8c23494a6934bb142d | 5a8f1569a8101e49749305939805a60144cca82b | /man/write.tree.string.Rd | 58ce5b1c93d0582ec7d79a46c08785a5acaf4b16 | [] | no_license | cran/phybase | 306986ccdc165a62ac56b190d9884f68712b1264 | d8ea06b72e176a07c0164e7e9f7ea66966b9f467 | refs/heads/master | 2016-09-10T00:59:33.002810 | 2008-03-25T00:00:00 | 2008-03-25T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 960 | rd | write.tree.string.Rd | \name{write.tree.string}
\alias{write.tree.string}
\title{ Write a tree file }
\description{
The function writes tree strings to a file in NEXUS or PHYLIP format.
}
\usage{
write.tree.string(X, format = "Nexus", file = "", name = "")
}
\arguments{
\item{X}{ a vector of tree strings }
\item{format}{ tree file format }
\item{file}{ the file name }
\item{name}{ the species names }
}
\details{
If name is provided, the function will use name as the species names in the translation block in the NEXUS tree file. Otherwise, the species names will be extracted from the tree strings.}
\value{
The function returns a tree file in the format of NEXUS or PHYLIP.
}
\author{ Liang Liu \email{lliu@oeb.harvard.edu} }
\keyword{IO}
\references{
Felsenstein, J. The Newick tree format. \url{http://evolution.genetics.washington.edu/phylip/newicktree.html}
}
\seealso{ \code{\link{write.subtree}}, \code{\link{read.tree.string}} }
|
0bcbe854708f97acdf9d2f7b370b0e53212ce870 | 9952bbc11691a7c0c505bba4a4513610d8a542ad | /man/prox_multilevel_ridge_nuc.Rd | 5a8e3e804185e6154d428de4ae53f9001506c803 | [] | no_license | benjilu/balancer | 9a6cbc24f4a4f87d2c9053791f0ce6befab09bd4 | 8af3cd9151c3d5156b6ee44b1252f86e16d489d9 | refs/heads/master | 2022-12-03T02:55:10.339926 | 2020-08-08T05:10:48 | 2020-08-08T05:10:48 | 274,989,853 | 0 | 0 | null | 2020-06-25T18:39:35 | 2020-06-25T18:39:34 | null | UTF-8 | R | false | true | 624 | rd | prox_multilevel_ridge_nuc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{prox_multilevel_ridge_nuc}
\alias{prox_multilevel_ridge_nuc}
\title{Squared L2 Prox for global parameters, nuclear norm prox for local parameters}
\usage{
prox_multilevel_ridge_nuc(x, lam, opts)
}
\arguments{
\item{x}{Input matrix (contains global and local parameters}
\item{lam}{Prox scaling factor}
\item{opts}{List of options (opts["alpha"] holds the ratio between global and local balance}
}
\value{
L2 squared prox values
}
\description{
Squared L2 Prox for global parameters, nuclear norm prox for local parameters
}
|
ad838e8fc3c1362c026ff37d83d118ae3d655b26 | 46133a4c161f7153b4087aa67aa529621cfa2e3b | /analyses/DNR_SRM_20170902/2017-09-06-NMDS-for-Technical-Replication.R | 701027da9916e4405de84bdeecf685efcc09cc4c | [] | no_license | RobertsLab/project-oyster-oa | 8d245ba45ebdce9eb4b4914febd65406e2a06de7 | d0ec0ed733414b7d5e1e2eb489fa75c03bc3335a | refs/heads/master | 2023-06-26T12:59:03.306502 | 2023-06-18T22:09:35 | 2023-06-18T22:09:35 | 68,737,231 | 2 | 4 | null | 2017-04-28T21:04:28 | 2016-09-20T17:38:13 | Jupyter Notebook | UTF-8 | R | false | false | 13,410 | r | 2017-09-06-NMDS-for-Technical-Replication.R | #In this script, I'll use an NMDS plot to see if my technical replicates are similar.
#### IMPORT DATA ####
SRMAreas <- read.csv("2017-09-12-Gigas-SRM-ReplicatesOnly-PostDilutionCurve-NoPivot-RevisedSettings-Report.csv", na.strings = "#N/A") #Specify Skyline's special way of designating N/A values
head(SRMAreas) #Confirm import
tail(SRMAreas) #Confirm import
#### CREATE A MASTER DATAFRAME ####
#I want to merge my Skyline data with sample names, sites, and eelgrass condition to create a master dataframe will all possible information
sequenceFile <- read.csv("2017-07-28-SRM-Samples-Sequence-File.csv", na.strings = "N/A") # Import sequence file
head(sequenceFile) #Confirm import
sequenceFile <- sequenceFile[,c(2,3,8)] #Keep the Replicate.Name, Comment and TIC columns
names(sequenceFile) <- c("Replicate.Name", "Sample.Number", "TIC")
head(sequenceFile) #Confirm change
masterSRMData <- merge(x = SRMAreas, y = sequenceFile, by = "Replicate.Name") #Merge the sample names and replicate names to use for analysis.
head(masterSRMData) #Confirm merge
tail(masterSRMData) #Confirm merge
biologicalReplicates <- read.csv("2017-09-06-Biological-Replicate-Information.csv", na.strings = "N/A", fileEncoding="UTF-8-BOM") #Import site and eelgrass condition information (i.e. biological replicate information), using specific file encoding information
head(biologicalReplicates) #Confirm import
tail(biologicalReplicates) #Confirm import
masterSRMDataBiologicalReplicates <- merge(x = masterSRMData, y = biologicalReplicates, by = "Sample.Number") #Add biological replicate information to master list.
head(masterSRMDataBiologicalReplicates) #Confirm change
#write.csv(x = masterSRMDataBiologicalReplicates, file = "2017-09-07-Master-SRM-Data-BiologicalReplicates-NoBlanks-NoPivot.csv") #Write out master dataframe
#### SUBSET DATA FOR NMDS PLOT ####
#For the NMDS, I want only the protein/peptide/transition information and peak area
SRMDataNMDS <- masterSRMDataBiologicalReplicates #Duplicate master list into a new dataframe
head(SRMDataNMDS) #Confirm copy
tail(SRMDataNMDS) #Confirm copy
SRMDataNMDS <- SRMDataNMDS[,-c(2, 5, 7, 10, 11)] #Remove extraneous columns: Replicate.Name, Transition, Peptide.Retention.Time, Site, Eelgrass
head(SRMDataNMDS) #Confirm column removal
SRMDataNMDS <- SRMDataNMDS[! SRMDataNMDS$Protein.Name %in% "PRTC peptides", ] #Remove PRTC peptide data
head(SRMDataNMDS) #Confirm removal
transform(SRMDataNMDS, Area = as.numeric(Area)) #Make sure Area is recognized as a numeric variable
is.numeric(SRMDataNMDS$Area) #Confirm change
transform(SRMDataNMDS, TIC = as.numeric(TIC)) #Make sure TIC is recognized as a numeric variable
is.numeric(SRMDataNMDS$TIC) #Confirm change
#### MAKE NMDS WITHOUT NORMALIZING ####
#The goal is to have the row names of my new dataframe be Protein/Peptides/Transitions, with the column names as the sample number
SRMDataNonNormalizedNMDS <- SRMDataNMDS #Create a duplicate dataframe
SRMDataNonNormalizedNMDS <- SRMDataNMDS[,-6] #Remove TIC column
head(SRMDataNonNormalizedNMDS) #Confirm creation
#My first step is to change my dataframe from long to wide (i.e. cast it)
library(reshape2) #Instal package to pivot table
SRMDataNMDSNonNormalizedPivoted <- dcast(SRMDataNonNormalizedNMDS, Protein.Name + Peptide.Sequence + Fragment.Ion ~ Sample.Number) #Cast table! Protein/Peptides/Transitions remain as columns with Sample Number as column headers. Normalized.Area used as value column by default.
head(SRMDataNMDSNonNormalizedPivoted) #Confirm cast.
SRMDataNMDSNonNormalizedPivoted$RowNames <- paste(SRMDataNMDSNonNormalizedPivoted$Protein.Name, SRMDataNMDSNonNormalizedPivoted$Peptide.Sequence, SRMDataNMDSNonNormalizedPivoted$Fragment.Ion) #Merge Protein, Peptide and Transition information into one column
head(SRMDataNMDSNonNormalizedPivoted) #Confirm column merge
SRMDataNMDSNonNormalizedPivoted <- SRMDataNMDSNonNormalizedPivoted[,-c(1:3)] #Remove unmerged columns
head(SRMDataNMDSNonNormalizedPivoted) #Confirm column removal
#write.csv(SRMDataNMDSNonNormalizedPivoted, file = "2017-09-07-SRM-Data-NMDS-Pivoted.csv") #Wrote out as .csv to make future analyses easier.
#Now I can make an NMDS plot
#Load the source file for the biostats package
source("biostats.R") #Either load the source R script or copy paste
install.packages("vegan") #Install vegan package
library(vegan)
SRMDataNMDSNonNormalizedPivotedCorrected <- SRMDataNMDSNonNormalizedPivoted #Duplicate dataframe
SRMDataNMDSNonNormalizedPivotedCorrected[is.na(SRMDataNMDSNonNormalizedPivotedCorrected)] <- 0 #Replace NAs with 0s
head(SRMDataNMDSNonNormalizedPivotedCorrected) #Confirm there are no NAs
area.protID <- SRMDataNMDSNonNormalizedPivotedCorrected[-93] #Save all area data as a new dataframe
rownames(area.protID) <- SRMDataNMDSNonNormalizedPivotedCorrected[,93] #Make sure last column of protein names is recognized as row names instead of values
head(area.protID) #Confirm changes
area.t <- t(area.protID) #Transpose the file so that rows and columns are switched
head(area.t) #Confirm transposition
area.tra <- (area.t+1) #Add 1 to all values before transforming
area.tra <- data.trans(area.tra, method = 'log', plot = FALSE) #log(x+1) transformation
proc.nmds.nonnorm.euclidean <- metaMDS(area.t, distance = 'euclidean', k = 2, trymax = 10000, autotransform = FALSE) #Make MDS dissimilarity matrix using euclidean distance. Julian confirmed that I should use euclidean distances, and not bray-curtis
#stressplot(proc.nmds.nonnorm.euclidean) #Make Shepard plot
#ordiplot(proc.nmds.nonnorm.euclidean) #Plot basic NMDS
#vec.proc.nmds.nonnorm.euclidean <- envfit(proc.nmds.nonnorm.euclidean$points, area.t, perm = 1000) #Calculate loadings
ordiplot(proc.nmds.nonnorm.euclidean, choices = c(1,2), type = "text", display = "sites") #Plot refined NMDS displaying only samples with their names
#plot(vec.proc.nmds.euclidean, p.max=.01, col='blue') #Plot eigenvectors
#proc.nmds.nonnorm.euclidean.log <- metaMDS(area.tra, distance = 'euclidean', k = 2, trymax = 10000, autotransform = FALSE) #Make MDS dissimilarity matrix using euclidean distance
#stressplot(proc.nmds.nonnorm.euclidean.log) #Make Shepard plot
#ordiplot(proc.nmds.nonnorm.euclidean.log) #Plot basic NMDS
#ordiplot(proc.nmds.nonnorm.euclidean.log, choices = c(1,2), type = "text", display = "sites") #Plot refined NMDS displaying only samples with their names
#proc.nmds.nonnorm.euclidean.autotransform <- metaMDS(area.t, distance = 'euclidean', k = 2, trymax = 10000, autotransform = TRUE) #Make MDS dissimilarity matrix using euclidean distance and autotransformation
#stressplot(proc.nmds.nonnorm.euclidean.autotransform) #Make Shepard plot
#ordiplot(proc.nmds.nonnorm.euclidean.autotransform) #Plot basic NMDS
#ordiplot(proc.nmds.nonnorm.euclidean.autotransform, choices = c(1,2), type = "text", display = "sites") #Plot refined NMDS displaying only samples with their names
#jpeg(filename = "2017-09-11-NMDS-TechnicalReplication-NonNormalized.jpeg", width = 1000, height = 1000)
#ordiplot(proc.nmds.nonnorm.euclidean, choices = c(1,2), type = "text", display = "sites") #Plot refined NMDS displaying only samples with their names
#dev.off()
#Only the euclidean non-transformed version gives me an NMDS plot, but there looks like there's variation that should be accounted for by normalizing.
#### NORMALIZE BY TIC VALUES ####
SRMNormalizedDataNMDS <- SRMDataNMDS #Duplicate dataframe
SRMNormalizedDataNMDS$Normalized.Area <- SRMNormalizedDataNMDS$Area/SRMDataNMDS$TIC #Divide areas by corresponding TIC values
head(SRMNormalizedDataNMDS) #Confirm division
SRMNormalizedDataNMDS <- SRMNormalizedDataNMDS[,-c(5,6)] #Remove nonnormalized area and TIC columns
head(SRMNormalizedDataNMDS) #Confirm column removal
#### REFORMAT DATAFRAME FOR NMDS ####
#The goal is to have the row names of my new dataframe be Protein/Peptides/Transitions, with the column names as the sample number
#My first step is to change my dataframe from long to wide (i.e. cast it)
library(reshape2) #Instal package to pivot table
SRMDataNMDSPivoted <- dcast(SRMNormalizedDataNMDS, Protein.Name + Peptide.Sequence + Fragment.Ion ~ Sample.Number) #Cast table! Protein/Peptides/Transitions remain as columns with Sample Number as column headers. Normalized.Area used as value column by default.
head(SRMDataNMDSPivoted) #Confirm cast.
SRMDataNMDSPivoted$RowNames <- paste(SRMDataNMDSPivoted$Protein.Name, SRMDataNMDSPivoted$Peptide.Sequence, SRMDataNMDSPivoted$Fragment.Ion) #Merge Protein, Peptide and Transition information into one column
head(SRMDataNMDSPivoted) #Confirm column merge
SRMDataNMDSPivoted <- SRMDataNMDSPivoted[,-c(1:3)] #Remove unmerged columns
head(SRMDataNMDSPivoted) #Confirm column removal
#write.csv(SRMDataNMDSPivoted, file = "2017-09-11-SRM-Data-Normalized-NMDS-Pivoted.csv") #Wrote out as .csv to make future analyses easier.
#### NMDS PLOT ####
#Load the source file for the biostats package
source("biostats.R") #Either load the source R script or copy paste.
install.packages("vegan") #Install vegan package
library(vegan)
SRMDataNMDSPivotedCorrected <- SRMDataNMDSPivoted #Duplicate dataframe
SRMDataNMDSPivotedCorrected[is.na(SRMDataNMDSPivotedCorrected)] <- 0 #Replace NAs with 0s
head(SRMDataNMDSPivotedCorrected) #Confirm there are no NAs
area.protID2 <- SRMDataNMDSPivotedCorrected[-93] #Save all area data as a new dataframe
rownames(area.protID2) <- SRMDataNMDSPivotedCorrected[,93] #Make sure last column of protein names is recognized as row names instead of values
head(area.protID2) #Confirm changes
area2.t <- t(area.protID2) #Transpose the file so that rows and columns are switched
head(area2.t) #Confirm transposition
area2.tra <- (area2.t+1) #Add 1 to all values before transforming
area2.tra <- data.trans(area2.tra, method = 'log', plot = FALSE) #log(x+1) transformation
proc.nmds.euclidean <- metaMDS(area2.t, distance = 'euclidean', k = 2, trymax = 10000, autotransform = FALSE) #Make MDS dissimilarity matrix using euclidean distance. Julian confirmed that I should use euclidean distances, and not bray-curtis
stressplot(proc.nmds.euclidean) #Make Shepard plot
ordiplot(proc.nmds.euclidean) #Plot basic NMDS
vec.proc.nmds.euclidean <- envfit(proc.nmds.euclidean$points, area2.t, perm = 1000) #Calculate loadings
ordiplot(proc.nmds.euclidean, choices = c(1,2), type = "text", display = "sites") #Plot refined NMDS displaying only samples with their names
plot(vec.proc.nmds.euclidean, p.max=.01, col='blue') #Plot eigenvectors
proc.nmds.euclidean.log <- metaMDS(area2.tra, distance = 'euclidean', k = 2, trymax = 10000, autotransform = FALSE) #Make MDS dissimilarity matrix using euclidean distance
#stressplot(proc.nmds.euclidean.log) #Make Shepard plot
#ordiplot(proc.nmds.euclidean.log) #Plot basic NMDS
ordiplot(proc.nmds.euclidean.log, choices = c(1,2), type = "text", display = "sites") #Plot refined NMDS displaying only samples with their names
proc.nmds.euclidean.autotransform <- metaMDS(area2.t, distance = 'euclidean', k = 2, trymax = 10000, autotransform = TRUE) #Make MDS dissimilarity matrix using euclidean distance and autotransformation
#stressplot(proc.nmds.euclidean.autotransform) #Make Shepard plot
#ordiplot(proc.nmds.euclidean.autotransform) #Plot basic NMDS
ordiplot(proc.nmds.euclidean.autotransform, choices = c(1,2), type = "text", display = "sites") #Plot refined NMDS displaying only samples with their names
#jpeg(filename = "2017-09-08-NMDS-TechnicalReplication-Normalized.jpeg", width = 1000, height = 1000)
#ordiplot(proc.nmds.euclidean, choices = c(1,2), type = "text", display = "sites") #Plot refined NMDS displaying only samples with their names
#dev.off()
#### CALCULATE DISTANCES BETWEEN TECHNICAL REPLICATE ORDINATIONS ####
NMDSCoordinates <- proc.nmds.euclidean$points #Save NMDS coordinates of each point in a new dataframe
head(NMDSCoordinates) #Confirm dataframe creation
nSamples <- length(NMDSCoordinates)/2 #Calculate the number of samples
sampleDistances <- vector(length = nSamples) #Create an empty vector to store distance values
for(i in 1:nSamples) { #For rows in NMDSCoordinates
sampleDistances[i] <- sqrt((NMDSCoordinates[i,1]-NMDSCoordinates[i,2])^2 + (NMDSCoordinates[i+1,1]-NMDSCoordinates[i+1,2])^2) #Calculate distance between ordinations
print(sampleDistances[i]) #Print the distance value
}
sampleDistances #Confirm vector creation. This vector has all consecutive pairs, including those that are not paris of technical replicates. I need to retain just the odd numbered rows.
technicalReplicates <- rownames(NMDSCoordinates) #Save rownames as a new vector
technicalReplicates #Confirm vector creation
technicalReplicateDistances <- data.frame(Sample = technicalReplicates[seq(from = 1, to = nSamples, by = 2)],
Distance = sampleDistances[seq(from = 1, to = nSamples, by = 2)]) #Create a new dataframe with just odd numbered row distances (technical replicate pairs)
head(technicalReplicateDistances) #Confirm dataframe creation
tail(technicalReplicateDistances) #Confirm dataframe creation
#### PLOT DISTANCES BETWEEN TECHNICAL REPLICATE ORDINATIONS ####
#jpeg(filename = "2017-09-08-NMDS-TechnicalReplication-Ordination-Distances.jpeg", width = 1000, height = 1000)
plot(x = technicalReplicateDistances$Sample, y = technicalReplicateDistances$Distance, type = "line", xlab = "Sample", ylab = "Distance between Ordinations")
#dev.off() |
6559ca132806366ad0fd7706f88ccd8592e3e28f | c5252a2e4fb25661599f0c514460166eac7af645 | /src/tutorial_bnlearn.R | bee50c3799e57d0cfc24c557cb8684f8d087e70e | [] | no_license | top-on/bayesian-nets | ce74d4a8df65970d40fee5235e76454f76a0dad3 | 49fd6e0091823c360713c0b1f9a9fab897449bf0 | refs/heads/master | 2021-10-08T08:35:54.189469 | 2018-12-09T21:39:34 | 2018-12-09T21:39:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 825 | r | tutorial_bnlearn.R | # TURORIAL ON BAYESIAN STATISTICS WITH R
# source: https://www.r-bloggers.com/bayesian-network-in-r-introduction/
library(bnlearn)
library(dplyr)
data(coronary)
# greedy learner of network structure
bn_df <- data.frame(coronary)
res <- hc(bn_df)
plot(res)
res2 <- mmhc(bn_df)
plot(res2)
# remove selected edge
res$arcs <- res$arcs[-which((res$arcs[,'from'] == "M..Work"
& res$arcs[,'to'] == "Family")),]
plot(res)
# fit model, given graph and data
fittedbn <- bn.fit(res, data = bn_df)
print(fittedbn$Proteins)
# query network:
cpquery(fittedbn, event = (Proteins == "<3"), evidence = (Smoking == "no"))
cpquery(fittedbn, event = (Proteins == "<3"),
evidence = (Smoking == "no" & Pressure == ">140"))
cpquery(fittedbn, event = (Pressure == ">140"), evidence = (Proteins == "<3"))
|
50e70cea9a1ede1c57cadf3106746c13363bca65 | eeb7441d2acd69b0db318b4d0ec9458675b0f418 | /ch3.R | 1e5d9cac599cfc8af0d864fa095e9c513ef98c93 | [] | no_license | yakneens/pagdur | 01d5adff619ddb2b3784dd6286c31ce0c4e5d0db | 58ed05d3415374120eed00fee1a007f0dab4e7a4 | refs/heads/master | 2022-02-09T12:28:41.647702 | 2016-04-28T14:40:03 | 2016-04-28T14:40:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,063 | r | ch3.R | library(RSQLite)
con=dbConnect(dbDriver("SQLite"), "SNPsmall")
animids = dbGetQuery(con, "select distinct animal from snps")
animids = as.vector(animids$animal)
hold = dbGetQuery(con, paste("select * from snps where animal = '", animids[1], "'", sep=""))
snpids = as.vector(dbGetQuery(con, "select distinct name from snpmap")[,1])
dbDisconnect(con)
first_snp = dbGetQuery(con, paste("select * from snps where snp='",snpids[1], "'",sep=""))
first_snp$allele1 = factor(first_snp$allele1)
first_snp$allele2 = factor(first_snp$allele2)
snp = data.frame(first_snp, genotype=factor(paste(first_snp$allele1, first_snp$allele2, sep=""),
levels=c("AA","AB", "BB")))
plot(snp$x, snp$y, col=snp$genotype, pch=as.numeric(snp$genotype),
xlab="x", ylab="y",
main=snp$snp[1], cex.main=0.9)
legend("bottomleft",paste(levels(snp$genotype), " (",summary(snp$genotype),")",sep=""),
col= 1:length(levels(snp$genotype)),
pch= 1:length(levels(snp$genotype)),
cex=0.7)
alleles=factor(c(as.character(snp$allele1), as.character(snp$allele2)), levels=c("A","B"))
summary(alleles) / sum(summary(alleles)) * 100
obs = summary(factor(snp$genotype))
hwal = summary(factor(c(as.character(snp$allele1), as.character(snp$allele2))))
hwal = hwal / sum(hwal)
exp = c(hwal[1]^2, 2 * hwal[1] * hwal[2], hwal[2]^2) * sum(obs)
names(exp) = c("AA","AB","BB")
xtot = sum((abs(obs-exp) - c(0.5, 1, 0.5))^2 / exp)
pval = 1 - pchisq(xtot, 1)
print(pval)
sumslides = matrix(NA, 83, 4)
rownames(sumslides) = animids
colnames(sumslides) = c ("-/-", "A/A", "A/B", "B/B")
numgeno = matrix(9, 54977, 83)
for (i in 1:83){
hold = dbGetQuery(con, paste("select * from snps where animal='",animids[i],"'",sep=""))
hold = data.frame(hold, genotype = factor(paste(hold$allele1,hold$allele2,sep=""), levels = c("--", "AA", "AB", "BB")))
hold = hold[order(hold$snp),]
sumslides[i, ] = summary(hold$genotype)
temp = hold$genotype
levels(temp) = c(9,0,1,2)
numgeno[,i] = as.numeric(as.character(temp))
# change to 9 genotypes under GC score cutoff
numgeno[which(hold$gcscore<0.6),i] = 9
}
rownames(numgeno) = hold$snp
colnames(numgeno) = animids
samplehetero=sumslides[,3] / (sumslides[,2] + sumslides[,3] + sumslides[,4])
up = mean(samplehetero) + 3 * sd(samplehetero)
down = mean(samplehetero) - 3 * sd(samplehetero)
hsout = length(which(samplehetero > up))
hsout = hsout + length(which(samplehetero < down))
plot(sort(samplehetero),1:83,col="blue",cex.main=0.9,
cex.axis=0.8,cex.lab=0.8,
ylab="sample",xlab="heterozygosity",
main=paste("Sample heterozygosity\nmean:",
round(mean(samplehetero),3)," sd:",
round(sd(samplehetero),3)),
sub=paste("mean: black line ",3,
"SD: red line number of outliers:",hsout),
cex.sub=0.8)
abline(v=mean(samplehetero))
abline(v=mean(samplehetero)-3*sd(samplehetero),col="red")
abline(v=mean(samplehetero)+3*sd(samplehetero),col="red")
animcor = cor(numgeno)
library("gplots")
hmcol = greenred(256)
heatmap(animcor,col = hmcol,symm = T,labRow = " ",labCol = " ", trace="none")
genotypes=read.table("SNPxSample.txt",
header=T,sep="\t",na.strings = "9",
colClasses = "factor")
dim(genotypes)
for (i in 1:length(genotypes[1,]))
levels(genotypes[,i])=c("AA","AB","BB",NA)
indexsnp = apply(genotypes, 1, function(x) length(which(is.na(x) == T)))
indexsnp = which(indexsnp == length(genotypes[1,]))
indexsample = apply(genotypes, 2, function(x) length(which(is.na(x) == T)))
indexsample = which(indexsample == length(genotypes[,1]))
genotypes = genotypes[-indexsnp,]
weight=rnorm(83,mean=50,sd=10)
plot(density(weight),col="blue",main="Density plot of weights")
abline(v=mean(weight),col="red")
lines(density(rnorm(83000,mean=50,sd=10)),col="green",lty=2)
singlesnp = function(trait, snp){
if (length(levels(snp)) > 1) lm(trait~snp)
else NA
}
results = apply(genotypes, 1, function(x) singlesnp(weight, factor(t(x))))
pvalfunc = function(model){
if(class(model)=="lm") anova(model)[[5]][1]
else NA
}
pvals = lapply(results, function(x) pvalfunc(x))
names(results) = row.names(genotypes)
pvals = data.frame(snp = row.names(genotypes), pvalue=unlist(pvals))
index=sort(pvals$pvalue,index.return=T)[[2]][1:5]
estimates=NULL
for (i in 1:5){
estimates = rbind(estimates, coefficients(summary(results[[index[i]]])))
}
estimates = cbind(rep(c("AA mean","AB dev","BB dev"), 5), estimates, rep(names(results)[index], each=3))
estimates = data.frame(estimates)
names(estimates) = c("genotype","effect","stderror","t-value","p-value","snp")
for (i in 2:5){
estimates[,i] = signif(as.numeric(as.character(estimates[,i])), 2)
}
print(estimates)
map=dbGetQuery(con, paste("select * from snpmap",sep=""))
merged=merge(pvals,map,by.x=1,by.y=1)
plot(merged$position[which(merged$chromosome==1)],
-log(merged$pvalue[which(merged$chromosome==1)]),
xlab="map position",ylab="-log odds",
col="blue",pch=20,main="Chromosome 1")
abline(h=-log(0.01),col="red")
length(which(pvals$pvalue<0.01))
length(which(pvals$pvalue<0.01/length(pvals$pvalue)))
sort(pvals$pvalue)[1:5]
|
8b6c2a6293b2520dd3db938055ef480a2fb5e03e | 79956e33b152bbdf55e631b31eeb375f268976b6 | /global.R | ba5f63d6a58474396dcf34a2540047d4305a3cd8 | [] | no_license | sakthivelan89/RShinyTwitter | 834544e6f1d5bcaeab652f8186684f97021ce890 | 9ed54e42ce4e33c752d8a216072ebf1e81f1bb32 | refs/heads/master | 2020-07-22T01:20:16.020519 | 2017-06-14T14:49:05 | 2017-06-14T14:49:05 | 94,339,936 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 169 | r | global.R | library(shiny)
library(xtable)
if (!require(twitteR)) {
stop("This app requires the twitteR package. To install it, run 'install.packages(\"twitteR\")'.\n")
}
|
b022c5c82f4466a501b9021eccade80baeff8188 | b1c0f88b081a05d267e9570f264fda772546d879 | /lib/Optimization/Algorithms/MetaHeuristics/GeneticAlgorithm/JSSOperators/HeuristicChromosome.R | 7c03e09737f0153e7fe739acf810727c88f8a5a5 | [] | no_license | pedabreu/OptimizationFramework | 71adf7f46025069b08b8187ca144038a2c464d6b | d04fc81eebdfd9e2cceb6d4df98d522161ba7ebb | refs/heads/master | 2020-12-31T07:19:00.219609 | 2017-03-29T00:07:20 | 2017-03-29T00:07:20 | 86,566,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 939 | r | HeuristicChromosome.R | setConstructorS3("HeuristicChromosome",function()
{
extend(Chromosome(),"HeuristicChromosome")
})
setMethodS3("calculateFitness","HeuristicChromosome", function(this,instance,...) {
## sched <- Schedule()
## sched <- sched$setInstance(instance)
heuristicPL <-this$genes
sched <- heuristicPL$generateSchedule(instance)
##sched$GT(heuristicPL)
fit <- sched$makespan()
this$fitness <- fit
fit
})
setMethodS3("generateRandom","HeuristicChromosome", function(this,
instance,...) {
## nrjobs <- instance$nrJobs()
## gene <- array(1:nrjobs,dim=c(nrjobs,instance$nrMachines()))
## apply(gene,2,sample)
gene <- this$genes # PartialPriorityList()
randomGeneration(gene,instance)
this$genes <- gene
})
#
# setMethodS3("print","HeuristicChromosome", function(this,...) {
#
# print("Chromosome:\n")
#
#
# })
|
5c47c23b52636358d03865a8024b8a0bb60514ef | 5a23bf3c360eba7ab72e4f2584a1dccbb8eb6243 | /Figure_3.R | 65c0cdb0c40ac838342ac699a21e501d9ae7554d | [] | no_license | QYD720/Colonic-microbiota-is-associated-with-inflammation-and-host-epigenomic-alterations-in-ibd | 24b215ea27523ae3fe92c7c7982bbb0195367687 | 218c3286f0c8e3e08977916ca981d5c86f5c62c1 | refs/heads/master | 2022-04-09T14:34:49.955390 | 2020-02-19T15:09:50 | 2020-02-19T15:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,226 | r | Figure_3.R | library("ggplot2")
library("plyr")
library("ape")
library("ade4")
library("made4")
source("heatplot_generic.R")
library("reshape2")
library("gridExtra")
library("phyloseq")
library(dendextend)
library(phyloseq)
library(xlsx)
## GGplot Colors
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
## 27 samples for mRNA
mRNA = c("sCD04a","sCD04i","sCD18a","sCD18i","sCD24a","sCD24i","sCD42a","sCD42i",
"sCD55a","sCD55i","sHT50.1","sHT50.2","sHT53.1","sHT53.2","sUC03a","sUC03i",
"sUC101a.2","sUC101i.1","sUC101i.2","sUC17a.T1","sUC17i.T1","sUC68a","sUC68i",
"sUC75a","sUC75i","sUC93a","sUC93i")
## Reading in tables
mapping = readRDS("SIRG_DADA2_mapping.RDS")
raw_cnts = readRDS("SIRG_DADA2_nc.nh.counts.RDS")
cnts_filt10 = raw_cnts[apply(raw_cnts>0,1,sum)>=round(ncol(raw_cnts)*0.05),]
tree = read.tree("SIRG_biopsy_DADA2.final.nohuman.phylo.tree")
mothur = read.table("SIRG_biopsy_DADA2.mothur.txt",header=TRUE,sep="\t",row.names = 1)
spingo = read.table("SIRG_biopsy_DADA2.final.spingo.7.txt",header=TRUE,sep="\t",row.names=1)
## Relative Abundance tables
cnts_filt10_prop = prop.table(as.matrix(cnts_filt10),2)
cnts_filt10_prop = cnts_filt10_prop * 100
### RSVs present in at least 10% of samples
#### Unifrac distances in PhyloSeq
OTU = otu_table(cnts_filt10_prop,taxa_are_rows =TRUE)
map_data = sample_data(mapping) #For PhyloSeq
physeq = phyloseq(OTU,map_data,tree)
braycurtis_dist10 = phyloseq::distance(physeq,method="bray")
braycurtis_pc10_prop = pcoa(braycurtis_dist10)
#### Preparing Hierarchical clustering ####
fac = as.factor(mapping$Description2)
########################
##### Bray Curtis ######
########################
par(oma=c(3,3,3,5))
#pdf(file="Figure_3_raw_v2.pdf",paper="A4r",width=11,height=8.5,onefile=TRUE,useDingbats=FALSE)
BC_OTUtree = heatplot_editd(cnts_filt10_prop,dist_in = braycurtis_dist10,scale='none',
method='ward.D2',
classvec=fac,
classvecCol = c("#1e8bc3","#1e8bc3","#C30000","#ffb90f","#ffb90f"),
returnSampleTree = TRUE)
library(dendextend)
BC_OTUtree_order = order.dendrogram(BC_OTUtree)
BC_OTUtree_samples = rownames(mapping)[BC_OTUtree_order]
BC_tree_clusters = cutree(BC_OTUtree,k=10) #Cut for 10 clusters, number determined with DynanmicTreeCut R package
BC_clusters = data.frame(clusters=BC_tree_clusters)
BC_clusters$clusters = factor(BC_clusters$clusters,
levels=c("1","2","3","4","5","6","7","8","9","10"))
cluster_cols = c("#F8766D","#00BFC4","#A3A500","#E76BF3","#8B4513","#D89000","#00B0F6","#FF62BC","#00BF7D","#9590FF")
heatplot_editd(cnts_filt10_prop,dist_in = braycurtis_dist10,scale='none',
method='ward.D2',
classvec=BC_clusters$clusters,
classvecCol = cluster_cols,
main="BrayCurtis Heatplot at OTU ward D2 with clusters")
######################### Family Counts ##################################
treeorder = labels(BC_OTUtree)
family = data.frame(mothur[rownames(raw_cnts),"Family"],raw_cnts)
rownames(family) = NULL
colnames(family)[1] = "Family"
family_con = ddply(family,"Family",numcolwise(sum))
rownames(family_con) = family_con[,1]
family_con = family_con[,-1]
family_con_prop = prop.table(as.matrix(family_con),2)
family_con_prop = family_con_prop * 100
tobesummed = rownames(family_con_prop)[apply(family_con_prop,1,mean)<1]
family_prop_tree = data.frame(as.character(rownames(family_con_prop)),family_con_prop)
colnames(family_prop_tree)[1] = "Family"
family_prop_tree$Family = as.character(family_prop_tree$Family)
tobesummed = tobesummed[-9]
family_prop_tree[tobesummed,"Family"] = "Other"
family_con_prop_tree_summed = ddply(family_prop_tree,"Family",numcolwise(sum))
rownames(family_con_prop_tree_summed) = family_con_prop_tree_summed$Family
family_con_prop_tree_summed = family_con_prop_tree_summed[,treeorder]
######################### Phylum Counts ##################################
phylum = data.frame(mothur[rownames(raw_cnts),"Phylum"],raw_cnts)
rownames(phylum) = NULL
colnames(phylum)[1] = "Phylum"
phylum_con = ddply(phylum,"Phylum",numcolwise(sum))
rownames(phylum_con) = phylum_con[,1]
phylum_con = phylum_con[,-1]
phylum_con_prop = prop.table(as.matrix(phylum_con),2)
phylum_con_prop = phylum_con_prop * 100
sort(apply(phylum_con_prop,1,sum),decreasing=TRUE) #Which phylym is highest
################### Reorder family table ###############################
taxa_lu = unique(mothur[,c("Family","Phylum")])
taxa_lu = taxa_lu[!taxa_lu$Family=="unclassified",]
rownames(taxa_lu) = taxa_lu$Family
phyla_order = as.character(taxa_lu[rownames(family_con_prop_tree_summed),"Phylum"])
phyla_order_sub = gsub("Firmicutes",1,phyla_order)
phyla_order_sub = gsub("Bacteroidetes",2,phyla_order_sub)
phyla_order_sub = gsub("Proteobacteria",3,phyla_order_sub)
phyla_order_sub = gsub("Actinobacteria",4,phyla_order_sub)
phyla_order_sub[7] = "5" #Replace NA from indexing with Other with position
phyla_order_sub[12] = "6" #Replace NA from indexing with Other with position
as.numeric(phyla_order_sub)
family_phyla = data.frame(phyla_order_sub,family_con_prop_tree_summed)
family_phyla = family_phyla[order(family_phyla$phyla_order),]
family_phyla2 = family_phyla[,-1]
apply(family_phyla2,1,sum) #Manually specify index based on what families you want in what order
family_phyla3 = family_phyla2[c(3,4,2,1,5,7,6,8,9,10,11,12),]
family_phyla3["Family"] = rownames(family_phyla3)
data.df = melt(family_phyla3,id.vars = "Family")
data.df$Family = factor(data.df$Family,levels=rev(rownames(family_phyla3)))
ggplot(data.df, aes(x = variable, y = value, fill = Family)) + geom_bar(stat = "identity",width=1) +
scale_fill_manual(values = rev(c("red2","darkred","firebrick1","tomato2",
"blue1","darkslategray2","dodgerblue3",
"forestgreen","darkolivegreen3","goldenrod",
"grey","black")))+theme_classic()
|
ddb0bd511a298c281ae84716b6d25c019774fe80 | 8750116c64b8d9c6c0f4fba163f0a1fd31fa56c9 | /R/helpers.R | 920ac76007d3ed90044e02398b491c5c31bc8639 | [
"BSD-2-Clause"
] | permissive | statunizaga/isotree | ead2e9b97a21ff50cbac9b4df1af777093c577a4 | d4dca4e77f69a5f9018d0a13cdfa1ad29dae1f82 | refs/heads/master | 2023-06-13T23:25:53.690208 | 2021-07-17T15:00:25 | 2021-07-17T15:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,688 | r | helpers.R | check.pos.int <- function(var, name) {
if (NROW(var) != 1 || var < 1) {
stop(paste0("'", name, "' must be a positive integer."))
}
}
check.str.option <- function(option, name, allowed) {
if (NROW(option) != 1 || !(option %in% allowed)) {
stop(paste0("'", name, "' must be one of '", paste(allowed, collapse = "', '"), "'."))
}
}
check.is.prob <- function(prob, name) {
if (NROW(prob) != 1 || prob < 0 || prob > 1) {
stop(paste0("'", name, "' must be a number between zero and one."))
}
}
check.is.bool <- function(var, name) {
if (NROW(var) != 1) stop(paste0("'", name, "' must be logical (boolean)."))
}
check.nthreads <- function(nthreads) {
if (NROW(nthreads) != 1) stop("'nthreads' must be one of 'auto' or a positive integer.")
if (is.null(nthreads)) {
nthreads <- 1
} else if (is.na(nthreads)) {
nthreads <- 1
} else if (nthreads == "auto") {
nthreads <- parallel::detectCores()
} else if (nthreads < 1) {
nthreads <- parallel::detectCores()
}
return(as.integer(nthreads))
}
check.categ.cols <- function(categ_cols) {
if (is.null(categ_cols) || !NROW(categ_cols))
return(NULL)
categ_cols <- as.integer(categ_cols)
if (anyNA(categ_cols))
stop("'categ_cols' cannot contain missing values.")
if (any(categ_cols < 1))
stop("'categ_cols' contains invalid column indices.")
if (any(duplicated(categ_cols)))
stop("'categ_cols' contains duplicted entries.")
categ_cols <- sort.int(categ_cols)
return(categ_cols)
}
check.is.1d <- function(var, name) {
if (NCOL(var) > 1) {
stop(paste0("'", name, "' must be a 1-d numeric vector."))
}
}
cast.df.alike <- function(df) {
if (inherits(df, c("data.table", "tibble")))
df <- as.data.frame(df)
return(df)
}
get.types.dmat <- function() {
return(c("matrix"))
}
get.types.spmat <- function(allow_csr = FALSE, allow_csc = TRUE, allow_vec = FALSE) {
outp <- character()
if (allow_csc) outp <- c(outp, "dgCMatrix", "matrix.csc")
if (allow_csr) outp <- c(outp, "dgRMatrix", "matrix.csr")
if (allow_vec && allow_csr) outp <- c(outp, "dsparseVector")
return(outp)
}
cast.df.col.to.num <- function(cl) {
if (inherits(cl, "factor"))
cl <- as.character(cl)
return(as.numeric(cl))
}
process.data <- function(df, sample_weights = NULL, column_weights = NULL, recode_categ = TRUE, categ_cols = NULL) {
df <- cast.df.alike(df)
dmatrix_types <- get.types.dmat()
spmatrix_types <- get.types.spmat()
supported_dtypes <- c("data.frame", dmatrix_types, spmatrix_types)
if (!NROW(intersect(class(df), supported_dtypes)))
stop(paste0("Invalid input data. Supported types are: ", paste(supported_dtypes, collapse = ", ")))
if (NROW(df) < 2L) stop("Input data must have at least 2 rows.")
if (!is.null(sample_weights)) sample_weights <- as.numeric(sample_weights)
if (!is.null(column_weights)) column_weights <- as.numeric(column_weights)
if (NROW(sample_weights) && NROW(df) != NROW(sample_weights))
stop(sprintf("'sample_weights' has different number of rows than df (%d vs. %d).",
NROW(df), NROW(sample_weights)))
if (NROW(column_weights) && NCOL(df) != NROW(column_weights))
stop(sprintf("'column_weights' has different dimension than number of columns in df (%d vs. %d).",
NCOL(df), NROW(column_weights)))
if (!is.null(categ_cols) && ("data.frame" %in% class(df))) {
warning("'categ_cols' is ignored when passing a data.frame as input.")
categ_cols <- NULL
}
if (ncol(df) < 1L)
stop("'df' has no columns.")
outp <- list(X_num = numeric(),
X_cat = integer(),
ncat = integer(),
cols_num = c(),
cols_cat = c(),
cat_levs = c(),
Xc = numeric(),
Xc_ind = integer(),
Xc_indptr = integer(),
nrows = as.integer(NROW(df)),
ncols_num = as.integer(NCOL(df)),
ncols_cat = as.integer(0L),
categ_cols = NULL,
categ_max = integer(),
sample_weights = unname(as.numeric(sample_weights)),
column_weights = unname(as.numeric(column_weights))
)
avoid_sparse_sort <- FALSE
if (NROW(categ_cols)) {
cols_num <- setdiff(1L:ncol(df), categ_cols)
if (inherits(df, c("data.frame", "matrix", "dgCMatrix"))) {
X_cat <- df[, categ_cols, drop=FALSE]
df <- df[, cols_num, drop=FALSE]
} else if (inherits(df, "matrix.csc")) {
nrows <- nrow(df)
df@ja <- df@ja - 1L
df@ia <- df@ia - 1L
df@ra <- deepcopy_vector(df@ra)
avoid_sparse_sort <- TRUE
call_sort_csc_indices(df@ra, df@ja, df@ia)
X_cat <- call_take_cols_by_index_csc(df@ra,
df@ja,
df@ia,
categ_cols - 1L,
TRUE, nrows)
X_cat <- X_cat[["X_cat"]]
df_new <- call_take_cols_by_index_csc(df@ra,
df@ja,
df@ia,
cols_num - 1L,
FALSE, nrows)
df@ra <- df_new[["Xc"]]
df@ja <- df_new[["Xc_ind"]] + 1L
df@ia <- df_new[["Xc_indptr"]] + 1L
df@dimension <- as.integer(c(nrows, NROW(cols_num)))
} else {
X_cat <- df[, categ_cols]
df <- df[, cols_num]
}
ncols_cat <- ncol(X_cat)
categ_max <- as.integer(unname(apply(X_cat, 2, max, na.rm=TRUE)))
if (inherits(X_cat, "sparseMatrix"))
X_cat <- as.matrix(X_cat)
X_cat <- as.integer(X_cat)
if (anyNA(X_cat))
X_cat[is.na(X_cat)] <- -1L
outp$X_cat <- X_cat
outp$categ_cols <- categ_cols
outp$categ_max <- categ_max
outp$ncat <- categ_max + 1L
outp$cols_num <- cols_num
outp$ncols_num <- ncol(df)
outp$ncols_cat <- ncols_cat
if (!ncol(df))
return(outp)
}
### Dense matrix
if ( any(class(df) %in% dmatrix_types) ) {
outp$X_num <- unname(as.numeric(df))
outp$ncols_num <- ncol(df)
return(outp)
}
### Sparse matrix
if ( any(class(df) %in% spmatrix_types) ) {
if (inherits(df, "dgCMatrix")) {
### From package 'Matrix'
if (!NROW(df@x))
stop("'df' has no non-zero entries.")
outp$Xc <- df@x
outp$Xc_ind <- df@i
outp$Xc_indptr <- df@p
} else {
### From package 'SparseM'
if (!NROW(df@ra))
stop("'df' has no non-zero entries.")
outp$Xc <- df@ra
outp$Xc_ind <- df@ja - 1L
outp$Xc_indptr <- df@ia - 1L
}
if (!avoid_sparse_sort) {
if (!inherits(df, "dgCMatrix"))
outp$Xc <- deepcopy_vector(outp$Xc)
call_sort_csc_indices(outp$Xc, outp$Xc_ind, outp$Xc_indptr)
}
outp$ncols_num <- ncol(df)
return(outp)
}
### Data Frame
if ( "data.frame" %in% class(df) ) {
dtypes_num <- c("numeric", "integer", "Date", "POSIXct")
dtypes_cat <- c("character", "factor", "logical")
supported_col_types <- c(dtypes_num, dtypes_cat)
df_coltypes <- Reduce(c, sapply(df, class))
if (any(!(df_coltypes %in% c(supported_col_types, "POSIXt")))) {
stop(paste0("Input data contains unsupported column types. Supported types are ",
paste(supported_col_types, collapse = ", "), " - got the following: ",
paste(unique(df_coltypes[!(df_coltypes %in% supported_col_types)]), collapse = ", ")))
}
if (any(df_coltypes %in% dtypes_num)) {
is_num <- unname(as.logical(sapply(df, function(x) any(class(x) %in% dtypes_num))))
outp$cols_num <- names(df)[is_num]
outp$ncols_num <- as.integer(sum(is_num))
outp$X_num <- unname(as.numeric(as.matrix(as.data.frame(lapply(df[, is_num, drop = FALSE], cast.df.col.to.num)))))
} else { outp$ncols_num <- as.integer(0) }
if (any(df_coltypes %in% dtypes_cat)) {
if (any("ordered" %in% df_coltypes))
warning("Data contains ordered factors. These are treated as unordered.")
is_cat <- unname(as.logical(sapply(df, function(x) any(class(x) %in% dtypes_cat))))
outp$cols_cat <- names(df)[is_cat]
outp$ncols_cat <- as.integer(sum(is_cat))
if (recode_categ) {
outp$X_cat <- as.data.frame(lapply(df[, is_cat, drop = FALSE], factor))
} else {
outp$X_cat <- as.data.frame(lapply(df[, is_cat, drop = FALSE],
function(x) if("factor" %in% class(x)) x else factor(x)))
}
outp$cat_levs <- lapply(outp$X_cat, levels)
outp$ncat <- sapply(outp$cat_levs, NROW)
outp$X_cat <- as.data.frame(lapply(outp$X_cat, function(x) ifelse(is.na(x), -1L, as.integer(x) - 1L)))
outp$X_cat <- unname(as.integer(as.matrix(outp$X_cat)))
}
if (NROW(outp$cols_num) && NROW(outp$cols_cat) && NROW(outp$column_weights)) {
outp$column_weights <- c(outp$column_weights[names(df) %in% outp$cols_num],
outp$column_weights[names(df) %in% outp$cols_cat])
}
return(outp)
}
stop("Unexpected error.")
}
process.data.new <- function(df, metadata, allow_csr = FALSE, allow_csc = TRUE, enforce_shape = FALSE) {
if (!NROW(df)) stop("'df' contains zero rows.")
if (inherits(df, "sparseVector") && !inherits(df, "dsparseVector"))
stop("Sparse vectors only allowed as 'dsparseVector' class.")
if (!inherits(df, "sparseVector")) {
if ( NCOL(df) < (metadata$ncols_num + metadata$ncols_cat) )
stop(sprintf("Input data contains fewer columns than expected (%d vs. %d)",
NCOL(df), (metadata$ncols_num + metadata$ncols_cat)))
} else {
if (df@length < (metadata$ncols_num + metadata$ncols_cat))
stop(sprintf("Input data contains different columns than expected (%d vs. %d)",
df@length, (metadata$ncols_num + metadata$ncols_cat)))
}
df <- cast.df.alike(df)
if (metadata$ncols_cat > 0L && !NROW(metadata$categ_cols) && !inherits(df, "data.frame"))
stop("Model was fit to data.frame with categorical data, must pass a data.frame with new data.")
dmatrix_types <- get.types.dmat()
spmatrix_types <- get.types.spmat(allow_csr = allow_csr, allow_csc = allow_csc, TRUE)
supported_dtypes <- c("data.frame", dmatrix_types, spmatrix_types)
if (!NROW(intersect(class(df), supported_dtypes)))
stop(paste0("Invalid input data. Supported types are: ", paste(supported_dtypes, collapse = ", ")))
if (!allow_csr && inherits(df, c("RsparseMatrix", "matrix.csr")))
stop("CSR matrix not supported for this prediction type. Try converting to CSC.")
if (!allow_csc && inherits(df, c("CsparseMatrix", "matrix.csc")))
stop("CSC matrix not supported for this prediction type. Try converting to CSR.")
outp <- list(
X_num = numeric(),
X_cat = integer(),
nrows = as.integer(NROW(df)),
Xc = numeric(),
Xc_ind = integer(),
Xc_indptr = integer(),
Xr = numeric(),
Xr_ind = integer(),
Xr_indptr = integer()
)
avoid_sparse_sort <- FALSE
if (!NROW(metadata$categ_cols)) {
if (((!NROW(metadata$cols_num) && !NROW(metadata$cols_cat)) || !inherits(df, "data.frame")) &&
( (inherits(df, "sparseVector") && df@length > metadata$ncols_num) ||
(!inherits(df, "sparseVector") && (ncol(df) > metadata$ncols_num)))
&& (enforce_shape || inherits(df, c("RsparseMatrix", "matrix.csr")))
) {
if (inherits(df, c("matrix", "CsparseMatrix")) ||
(!NROW(metadata$cols_num) && inherits(df, "data.frame"))) {
df <- df[, 1L:metadata$ncols_num, drop=FALSE]
} else if (inherits(df, "sparseVector")) {
df <- df[1L:metadata$ncols_num]
} else if (inherits(df, "RsparseMatrix")) {
nrows <- nrow(df)
avoid_sparse_sort <- TRUE
call_sort_csc_indices(df@x, df@j, df@p)
df_new <- call_take_cols_by_slice_csr(
df@x,
df@j,
df@p,
metadata$ncols_num,
FALSE
)
df@x <- df_new[["Xr"]]
df@j <- df_new[["Xr_ind"]]
df@p <- df_new[["Xr_indptr"]]
df@Dim <- as.integer(c(nrows, metadata$ncols_num))
} else if (inherits(df, "matrix.csr")) {
avoid_sparse_sort <- TRUE
df@ja <- df@ja - 1L
df@ia <- df@ia - 1L
df@ra <- deepcopy_vector(df@ra)
call_sort_csc_indices(df@ra, df@ja, df@ia)
df_new <- call_take_cols_by_slice_csr(
df@ra,
df@ja,
df@ia,
metadata$ncols_num,
FALSE
)
df@ra <- df_new[["Xr"]]
df@ja <- df_new[["Xr_ind"]] + 1L
df@ia <- df_new[["Xr_indptr"]] + 1L
df@dimension <- as.integer(c(nrows, metadata$ncols_num))
} else if (inherits(df, "matrix.csc")) {
df@ia <- df@ia - 1L
nrows <- nrow(df)
df_new <- call_take_cols_by_slice_csc(
df@ra,
df@ja,
df@ia,
metadata$ncols_num,
FALSE, nrows
)
df@ra <- df_new[["Xc"]]
df@ja <- df_new[["Xc_ind"]]
df@ia <- df_new[["Xc_indptr"]] + 1L
df@dimension <- as.integer(c(nrows, metadata$ncols_num))
} else if (!inherits(df, "data.frame")) {
df <- df[, 1L:metadata$ncols_num]
}
}
} else { ### has metadata$categ_cols
if (!inherits(df, "sparseVector")) {
nrows <- nrow(df)
if (inherits(df, c("matrix", "data.frame", "dgCMatrix"))) {
X_cat <- df[, metadata$categ_cols, drop=FALSE]
df <- df[, metadata$cols_num, drop=FALSE]
} else if (inherits(df, "dgRMatrix")) {
avoid_sparse_sort <- TRUE
call_sort_csc_indices(df@x, df@j, df@p)
X_cat <- call_take_cols_by_index_csr(df@x,
df@j,
df@p,
metadata$categ_cols - 1L,
TRUE)
X_cat <- X_cat[["X_cat"]]
df_new <- call_take_cols_by_index_csr(df@x,
df@j,
df@p,
metadata$cols_num - 1L,
FALSE)
df@x <- df_new[["Xr"]]
df@j <- df_new[["Xr_ind"]]
df@p <- df_new[["Xr_indptr"]]
df@Dim <- as.integer(c(nrows, NROW(metadata$cols_num)))
} else if (inherits(df, "matrix.csc")) {
avoid_sparse_sort <- TRUE
df@ja <- df@ja - 1L
df@ia <- df@ia - 1L
df@ra <- deepcopy_vector(df@ra)
call_sort_csc_indices(df@ra, df@ja, df@ia)
X_cat <- call_take_cols_by_index_csc(df@ra,
df@ja,
df@ia,
metadata$categ_cols - 1L,
TRUE, nrows)
X_cat <- X_cat[["X_cat"]]
df_new <- call_take_cols_by_index_csc(df@ra,
df@ja,
df@ia,
metadata$cols_num - 1L,
FALSE, nrows)
df@ra <- df_new[["Xc"]]
df@ja <- df_new[["Xc_ind"]] + 1L
df@ia <- df_new[["Xc_indptr"]] + 1L
df@dimension <- as.integer(c(nrows, NROW(metadata$cols_num)))
} else if (inherits(df, "matrix.csr")) {
avoid_sparse_sort <- TRUE
df@ja <- df@ja - 1L
df@ia <- df@ia - 1L
df@ra <- deepcopy_vector(df@ra)
call_sort_csc_indices(df@ra, df@ja, df@ia)
X_cat <- call_take_cols_by_index_csr(df@ra,
df@ja,
df@ia,
metadata$categ_cols - 1L,
TRUE)
X_cat <- X_cat[["X_cat"]]
df_new <- call_take_cols_by_index_csr(df@ra,
df@ja,
df@ia,
metadata$cols_num - 1L,
FALSE)
df@ra <- df_new[["Xr"]]
df@ja <- df_new[["Xr_ind"]] + 1L
df@ia <- df_new[["Xr_indptr"]] + 1L
df@dimension <- as.integer(c(nrows, NROW(metadata$cols_num)))
} else {
X_cat <- df[, metadata$categ_cols]
df <- df[, metadata$cols_num]
}
} else { ### sparseVector
X_cat <- matrix(df[metadata$categ_cols], nrow=1L)
nrows <- 1L
df <- df[metadata$cols_num]
}
X_cat[sweep(X_cat, 2, metadata$categ_max, ">")] <- -1L
if (!inherits(X_cat, "matrix"))
X_cat <- as.matrix(X_cat)
X_cat <- as.integer(X_cat)
if (anyNA(X_cat))
X_cat[is.na(X_cat)] <- -1L
outp$X_cat <- X_cat
outp$nrows <- nrows
}
if (inherits(df, "data.frame") &&
(NROW(metadata$categ_cols) ||
(!NROW(metadata$cols_num) && !NROW(metadata$cols_cat)))
) {
df <- as.data.frame(lapply(df, cast.df.col.to.num))
df <- as.matrix(df)
}
if (inherits(df, "data.frame")) {
if (NROW(setdiff(c(metadata$cols_num, metadata$cols_cat), names(df)))) {
missing_cols <- setdiff(c(metadata$cols_num, metadata$cols_cat), names(df))
stop(paste0(sprintf("Input data is missing %d columns - head: ", NROW(missing_cols)),
paste(head(missing_cols, 3), collapse = ", ")))
}
if (!NROW(metadata$cols_num) && !NROW(metadata$cols_cat)) {
if (NCOL(df) != metadata$ncols_num)
stop(sprintf("Input data has %d columns, but model was fit to data with %d columns.",
NCOL(df), (metadata$ncols_num + metadata$ncols_cat)))
outp$X_num <- unname(as.numeric(as.matrix(as.data.frame(lapply(df, cast.df.col.to.num)))))
} else {
if (metadata$ncols_num > 0L) {
outp$X_num <- unname(as.numeric(as.matrix(as.data.frame(lapply(df[, metadata$cols_num, drop = FALSE], cast.df.col.to.num)))))
}
if (metadata$ncols_cat > 0L) {
outp$X_cat <- df[, metadata$cols_cat, drop = FALSE]
outp$X_cat <- as.data.frame(mapply(function(cl, levs) factor(cl, levs),
outp$X_cat, metadata$cat_levs,
SIMPLIFY = FALSE, USE.NAMES = FALSE))
outp$X_cat <- as.data.frame(lapply(outp$X_cat, function(x) ifelse(is.na(x), -1L, as.integer(x) - 1L)))
outp$X_cat <- unname(as.integer(as.matrix(outp$X_cat)))
}
}
} else if (inherits(df, "dsparseVector")) {
if (allow_csr) {
df@x <- df@x[order(df@i)]
df@i <- df@i[order(df@i)]
outp$Xr <- as.numeric(df@x)
outp$Xr_ind <- as.integer(df@i - 1L)
outp$Xr_indptr <- as.integer(c(0L, NROW(df@x)))
} else {
outp$X_num <- as.numeric(df)
}
outp$nrows <- 1L
} else {
if ("numeric" %in% class(df) && is.null(dim(df)))
df <- matrix(df, nrow = 1)
if (NCOL(df) < metadata$ncols_num)
stop(sprintf("Input data has %d numeric columns, but model was fit to data with %d numeric columns.",
NCOL(df), metadata$ncols_num))
if (!any(class(df) %in% spmatrix_types)) {
outp$X_num <- as.numeric(df)
} else {
if (inherits(df, "dgCMatrix")) {
### From package 'Matrix'
outp$Xc <- df@x
outp$Xc_ind <- df@i
outp$Xc_indptr <- df@p
if (!avoid_sparse_sort)
call_sort_csc_indices(outp$Xc, outp$Xc_ind, outp$Xc_indptr)
} else if (inherits(df, "dgRMatrix")) {
### From package 'Matrix'
outp$Xr <- df@x
outp$Xr_ind <- df@j
outp$Xr_indptr <- df@p
if (!avoid_sparse_sort)
call_sort_csc_indices(outp$Xr, outp$Xr_ind, outp$Xr_indptr)
} else if (inherits(df, "matrix.csc")) {
### From package 'SparseM'
outp$Xc <- df@ra
outp$Xc_ind <- df@ja - 1L
outp$Xc_indptr <- df@ia - 1L
if (!avoid_sparse_sort) {
outp$Xc <- deepcopy_vector(outp$Xc)
call_sort_csc_indices(outp$Xc, outp$Xc_ind, outp$Xc_indptr)
}
} else if (inherits(df, "matrix.csr")) {
### From package 'SparseM'
outp$Xr <- df@ra
outp$Xr_ind <- df@ja - 1L
outp$Xr_indptr <- df@ia - 1L
if (!avoid_sparse_sort) {
outp$Xr <- deepcopy_vector(outp$Xr)
call_sort_csc_indices(outp$Xr, outp$Xr_ind, outp$Xr_indptr)
}
} else {
stop("Invalid input type.")
}
}
}
return(outp)
}
reconstruct.from.imp <- function(imputed_num, imputed_cat, df, model, pdata) {
if (NROW(imputed_cat))
imputed_cat[imputed_cat < 0L] <- NA_integer_
if (inherits(df, "RsparseMatrix")) {
outp <- df
if (!NROW(model$metadata$categ_cols) && ncol(df) == model$metadata$ncols_num) {
outp@x <- imputed_num
} else if (!NROW(model$metadata$categ_cols)) {
outp@x <- deepcopy_vector(outp@x)
call_reconstruct_csr_sliced(
outp@x, outp@p,
imputed_num, pdata$Xr_indptr,
nrow(df)
)
} else {
outp@x <- deepcopy_vector(outp@x)
call_reconstruct_csr_with_categ(
outp@x, outp@j, outp@p,
imputed_num, pdata$Xr_ind, pdata$Xr_indptr,
imputed_cat,
model$metadata$cols_num-1L, model$metadata$categ_cols-1L,
nrow(df), ncol(df)
)
}
return(outp)
} else if (inherits(df, "CsparseMatrix")) {
outp <- df
if (!NROW(model$metadata$categ_cols)) {
outp@x <- imputed_num
} else {
outp[, model$metadata$categ_cols] <- matrix(imputed_cat, nrow=nrow(df))
copy_csc_cols_by_index(
outp@x,
outp@p,
imputed_num,
pdata$Xc_indptr,
model$metadata$cols_num - 1L
)
}
return(outp)
} else if (inherits(df, "matrix.csr")) {
outp <- df
if (!NROW(model$metadata$categ_cols) && ncol(df) == model$metadata$ncols_num) {
outp@ra <- imputed_num
} else if (!NROW(model$metadata$categ_cols)) {
outp@ra <- deepcopy_vector(outp@ra)
call_reconstruct_csr_sliced(
outp@ra, outp@ia-1L,
imputed_num, pdata$Xr_indptr,
nrow(df)
)
} else {
outp@ra <- deepcopy_vector(outp@ra)
call_reconstruct_csr_with_categ(
outp@ra, outp@ja-1L, outp@ia-1L,
imputed_num, pdata$Xr_ind, pdata$Xr_indptr,
imputed_cat,
model$metadata$cols_num-1L, model$metadata$categ_cols-1L,
nrow(df), ncol(df)
)
}
return(outp)
} else if (inherits(df, "matrix.csc")) {
outp <- df
if (!NROW(model$metadata$categ_cols)) {
outp@ra <- imputed_num
} else {
df_new <- assign_csc_cols(
pdata$Xc,
pdata$Xc_ind,
pdata$Xc_indptr,
imputed_cat,
model$metadata$categ_cols - 1L,
model$metadata$cols_num - 1L,
nrow(df)
)
copy_csc_cols_by_index(
df_new$Xc,
df_new$Xc_indptr,
imputed_num,
pdata$Xc_indptr,
model$metadata$cols_num - 1L
)
outp@ra <- df_new$Xc
outp@ja <- df_new$Xc_ind + 1L
outp@ia <- df_new$Xc_indptr + 1L
outp@dimension <- as.integer(c(nrow(df), length(df_new$Xc_indptr)-1L))
}
return(outp)
} else if (inherits(df, "sparseVector")) {
if (!NROW(model$metadata$categ_cols) && df@length == model$metadata$ncols_num) {
df@x <- imputed_num
} else if (!NROW(model$metadata$categ_cols)) {
df@x[1L:NROW(imputed_num)] <- imputed_num
} else {
df[model$metadata$cols_num] <- imputed_num
df[model$metadata$categ_cols] <- imputed_cat
}
} else if (!inherits(df, "data.frame")) {
if (!NROW(model$metadata$categ_cols) && (ncol(df) == model$metadata$ncols_num)) {
return(matrix(imputed_num, nrow = NROW(df)))
} else if (!NROW(model$metadata$categ_cols)) {
df[, 1L:model$metadata$ncols_num] <- matrix(imputed_num, nrow = NROW(df))
return(df)
} else {
df[, model$metadata$categ_cols] <- matrix(imputed_cat, nrow = NROW(df))
if (model$metadata$ncols_num)
df[, model$metadata$cols_num] <- matrix(imputed_num, nrow = NROW(df))
return(df)
}
} else {
df_num <- as.data.frame(matrix(imputed_num, nrow = NROW(df)))
df_cat <- as.data.frame(matrix(imputed_cat, nrow = NROW(df)))
if (!NROW(model$metadata$categ_cols)) {
df_cat <- as.data.frame(mapply(function(x, levs) factor(x, labels = levs),
df_cat + 1L, model$metadata$cat_levs,
SIMPLIFY = FALSE))
}
if (NROW(model$metadata$categ_cols)) {
df[, model$metadata$categ_cols] <- df_cat
if (model$metadata$ncols_num)
df[, model$metadata$cols_num] <- df_num
} else if (!NROW(model$metadata$cols_num)) {
df[, 1L:model$metadata$ncols_num] <- df_num
} else {
if (model$metadata$ncols_num)
df[, model$metadata$cols_num] <- df_num
if (model$metadata$ncols_cat)
df[, model$metadata$cols_cat] <- df_cat
}
return(df)
}
}
export.metadata <- function(model) {
data_info <- list(
ncols_numeric = model$metadata$ncols_num, ## is in c++
ncols_categ = model$metadata$ncols_cat, ## is in c++
cols_numeric = as.list(model$metadata$cols_num),
cols_categ = as.list(model$metadata$cols_cat),
cat_levels = unname(as.list(model$metadata$cat_levs)),
categ_cols = model$metadata$categ_cols,
categ_max = model$metadata$categ_max
)
if (NROW(data_info$cat_levels)) {
force.to.bool <- function(v) {
if (NROW(v) == 2) {
if (("TRUE" %in% v) && ("FALSE" %in% v))
v <- as.logical(v)
}
return(v)
}
data_info$cat_levels <- lapply(data_info$cat_levels, force.to.bool)
}
model_info <- list(
ndim = model$params$ndim,
nthreads = model$nthreads,
build_imputer = model$params$build_imputer
)
params <- list(
sample_size = model$params$sample_size,
ntrees = model$params$ntrees, ## is in c++
ntry = model$params$ntry,
max_depth = model$params$max_depth,
ncols_per_tree = model$params$ncols_per_tree,
prob_pick_avg_gain = model$params$prob_pick_avg_gain,
prob_pick_pooled_gain = model$params$prob_pick_pooled_gain,
prob_split_avg_gain = model$params$prob_split_avg_gain,
prob_split_pooled_gain = model$params$prob_split_pooled_gain,
min_gain = model$params$min_gain,
missing_action = model$params$missing_action, ## is in c++
new_categ_action = model$params$new_categ_action, ## is in c++
categ_split_type = model$params$categ_split_type, ## is in c++
coefs = model$params$coefs,
depth_imp = model$params$depth_imp,
weigh_imp_rows = model$params$weigh_imp_rows,
min_imp_obs = model$params$min_imp_obs,
random_seed = model$random_seed,
all_perm = model$params$all_perm,
coef_by_prop = model$params$coef_by_prop,
weights_as_sample_prob = model$params$weights_as_sample_prob,
sample_with_replacement = model$params$sample_with_replacement,
penalize_range = model$params$penalize_range,
weigh_by_kurtosis = model$params$weigh_by_kurtosis,
assume_full_distr = model$params$assume_full_distr
)
return(list(data_info = data_info, model_info = model_info, params = params))
}
take.metadata <- function(metadata) {
this <- list(
params = list(
sample_size = metadata$params$sample_size, ntrees = metadata$params$ntrees, ndim = metadata$model_info$ndim,
ntry = metadata$params$ntry, max_depth = metadata$params$max_depth,
ncols_per_tree = metadata$params$ncols_per_tree,
prob_pick_avg_gain = metadata$params$prob_pick_avg_gain,
prob_pick_pooled_gain = metadata$params$prob_pick_pooled_gain,
prob_split_avg_gain = metadata$params$prob_split_avg_gain,
prob_split_pooled_gain = metadata$params$prob_split_pooled_gain,
min_gain = metadata$params$min_gain, missing_action = metadata$params$missing_action,
new_categ_action = metadata$params$new_categ_action,
categ_split_type = metadata$params$categ_split_type,
all_perm = metadata$params$all_perm, coef_by_prop = metadata$params$coef_by_prop,
weights_as_sample_prob = metadata$params$weights_as_sample_prob,
sample_with_replacement = metadata$params$sample_with_replacement,
penalize_range = metadata$params$penalize_range,
weigh_by_kurtosis = metadata$params$weigh_by_kurtosis,
coefs = metadata$params$coefs, assume_full_distr = metadata$params$assume_full_distr,
build_imputer = metadata$model_info$build_imputer, min_imp_obs = metadata$params$min_imp_obs,
depth_imp = metadata$params$depth_imp, weigh_imp_rows = metadata$params$weigh_imp_rows
),
metadata = list(
ncols_num = metadata$data_info$ncols_numeric,
ncols_cat = metadata$data_info$ncols_categ,
cols_num = unlist(metadata$data_info$cols_numeric),
cols_cat = unlist(metadata$data_info$cols_categ),
cat_levs = metadata$data_info$cat_levels,
categ_cols = metadata$data_info$categ_cols,
categ_max = metadata$data_info$categ_max
),
random_seed = metadata$params$random_seed,
nthreads = metadata$model_info$nthreads,
cpp_obj = list(
ptr = NULL,
serialized = NULL,
imp_ptr = NULL,
imp_ser = NULL
)
)
if (NROW(this$metadata$cat_levels))
names(this$metadata$cat_levels) <- this$metadata$cols_cat
if (!NROW(this$metadata$categ_cols)) {
this$metadata$categ_cols <- NULL
this$metadata$categ_max <- NULL
}
class(this) <- "isolation_forest"
return(this)
}
|
f6e5ae634df8663a5a54442f440d004bf08fcfdf | 8d895fac78ba3472f622cccdc14a14877e312e07 | /GHCN/sept24.r | de8fb2c099e55c860136d2ebfcf4dc9288594275 | [] | no_license | ibrahim85/Cohen-McCreight | 4373cf29502b50bde2e5a8c33f60a0853f5a3ef7 | 578c8dc1f71f29f5c517cc913cdeed7270e71372 | refs/heads/master | 2021-01-18T05:19:19.804226 | 2015-04-06T21:26:28 | 2015-04-06T21:26:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,793 | r | sept24.r | require(reshape2)
require(plyr)
load("Stationwise.rsav")
load("CGSmeta.rsav")
str(Stationwise)
Stationwise$Stn_code <- as.character(Stationwise$Stn_code)
# ## fix DADRI
# whDadri <- which(Stationwise$Stn_code=='DADRI')
# Stationwise$Stn_code[whDadri] <-
# paste(Stationwise$Stn_code[whDadri],
# Stationwise$Plant_type[whDadri],sep='-')
#
# ## this is a sneak subset trick for later,
# ## will stuff values into here from the add and avg vars.
# dadri2Sub <- subset(Stationwise, Stn_code=='DADRI-II')
#
# whDadri2 <- which(Stationwise$Stn_code=='DADRI-II')
# Stationwise$Stn_code[whDadri2] <-
# paste('DADRI',Stationwise$Plant_type[whDadri2],sep='-')
#
# whDadriTPS <- which(Stationwise$Stn_code=='DADRI-TPS')
# dadriTpsSub <- subset(Stationwise, Stn_code=='DADRI-TPS')
#
# ## columns to add
# colsAdd <- c("MW", "Chandigarh", "Delhi", "HP", "Haryana",
# "JK", "Punjab", "Rajasthan", "Uttarakhand",
# "UP", "NR", "Year",'Mon')
# dts <- melt(dadriTpsSub[,colsAdd], id=c("Year",'Mon'))
# dts$uniqueMon <- paste(dts$Year,dts$Mon,sep='-')
# dtsAdd <- ddply(dts, .(uniqueMon, variable), summarize,
# sum=sum(value))
# addDf <- dcast(dtsAdd, uniqueMon ~ variable )
# addDf$Year <- laply(strsplit(addDf$uniqueMon,'-'),'[',1)
# addDf$Mon <- laply(strsplit(addDf$uniqueMon,'-'),'[',2)
# addDf <- addDf[sort(13*as.numeric(addDf$Year)+
# as.numeric(addDf$Mon), index.return=TRUE,
# dec=FALSE)$ix,]
#
# colsAvg <- c("RateOfSale", "PAFM", "Year",'Mon')
# dts <- melt(dadriTpsSub[,colsAvg], id=c("Year",'Mon'))
# dts$uniqueMon <- paste(dts$Year,dts$Mon,sep='-')
# dtsAvg <- ddply(dts, .(uniqueMon, variable), summarize,
# mean=mean(value))
# avgDf <- dcast(dtsAvg, uniqueMon ~ variable )
# avgDf$Year <- laply(strsplit(avgDf$uniqueMon,'-'),'[',1)
# avgDf$Mon <- laply(strsplit(avgDf$uniqueMon,'-'),'[',2)
# avgDf <- avgDf[sort(13*as.numeric(avgDf$Year)+
# as.numeric(avgDf$Mon), index.return=TRUE,
# dec=FALSE)$ix,]
# str(dadri2Sub)
#
# dadri2Sub[,c('Year','Mon')]==addDf[,c('Year','Mon')]
# dadri2Sub[,c('Year','Mon')]==avgDf[,c('Year','Mon')]
#
# dadri2Sub[,colsAvg] <- avgDf[,-1]
# dadri2Sub[,colsAdd] <- addDf[,-1]
# dadri2Sub$Stn_code <- 'DADRI-TPS'
#
# Stationwise <- subset(Stationwise, Stn_code!='DADRI-TPS')
# Stationwise2 <- rbind(Stationwise,dadri2Sub)
# save(Stationwise2, file='Stationwise2.rsav')
#
##'#####################################################################
## look at data from beneficiaries' perspective
beneDf <- Stationwise[,c('Stn_name','Stn_code','Fuel','Fueltype',
'POSIXct','Date','Metric','Mon','Year',
'Chandigarh','Delhi','HP','Haryana','JK',
'Punjab','Rajasthan','Uttarakhand','UP',
'RateOfSale','PAFM')]
beneDfMelt <-
melt(beneDf, id=c('Stn_name','Stn_code','Fuel','Fueltype',
'POSIXct','Date','Metric','Mon','Year',
'RateOfSale','PAFM') )
names(beneDfMelt)[12:13] <- c("beneficiary",'stnAlloc')
beneDfMelt$uniqueMon <- format(beneDfMelt$POSIXct, '%Y-%m')
##'##################################################################
## sum beneficiaries' monthly PSP (power supply position)
## over all stations
beneMonPsp <- ddply( beneDfMelt, .(uniqueMon, beneficiary),
summarize, PSP=sum(stnAlloc) )
beneMonSale <- ddply( beneDfMelt, .(uniqueMon, beneficiary),
summarize, sale=sum(RateOfSale*stnAlloc) )
beneMonPsp$sale <- beneMonSale$sale
beneMonPsp$AvgRate <- beneMonPsp$sale/beneMonPsp$PSP
beneMonPsp$beneficiary <-
factor(beneMonPsp$beneficiary,
levels=sort(as.character(unique(beneMonPsp$beneficiary))) )
beneMonPsp$POSIXct <-
as.POSIXct(paste(beneMonPsp$uniqueMon,'15',sep='-'),'%Y-%m-%d')
ggplot(beneMonPsp, aes(x=POSIXct,y=PSP,color=beneficiary)) +
geom_line()+ geom_point() +
facet_wrap(~beneficiary, scales='free')
## analyze contribs
indivContribs <-
subset(beneDfMelt, beneficiary=='Delhi' & uniqueMon %in% c('2011-07','2011-08','2011-09'))
ggplot( indivContribs, aes(x=POSIXct, y=stnAlloc, color=Stn_code) ) +
geom_point() + geom_line() + facet_wrap(~Stn_code)
##'##################################################################
## sum beneficiaries' monthly PSP (power supply position)
## over all stations *by type*
beneMonPsp <- ddply( beneDfMelt, .(uniqueMon, beneficiary, fuel),
summarize, PSP=sum(stnAlloc) )
beneMonPsp$POSIXct <-
as.POSIXct(paste(beneMonPsp$uniqueMon,'15',sep='-'),'%Y-%m-%d')
ggplot(beneMonPsp, aes(x=POSIXct,y=PSP,color=beneficiary)) +
geom_line()+ geom_point() +
facet_wrap(~beneficiary, scales='free')
|
8461391ce249f6f6fdd2880fa0c202c82d38b4b3 | f5cbf4947775bb092220711eb5bd07befb34dd50 | /man/ScatterBoxplot.Rd | adcdd9bffecc89064c5b91861b3647446698edf4 | [] | no_license | stork119/SysBioSigTheme | b85fc96e1255a23d9aed0afe2f141cf140fb6314 | 0b541d31bde078ee5512dbb752f9049167e790ff | refs/heads/master | 2022-02-17T08:07:42.664487 | 2019-09-09T10:08:52 | 2019-09-09T10:08:52 | 188,425,237 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,122 | rd | ScatterBoxplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphics_plots_ScatterBoxplot.R
\name{ScatterBoxplot}
\alias{ScatterBoxplot}
\title{ScatterBoxplot}
\usage{
ScatterBoxplot(data, x_, y_, point.size = 0.5, point.alpha = 0.1,
point.scale = 0.25, point.color = NULL, ...)
}
\arguments{
\item{x_}{- x axis}
\item{y_}{- y axis}
\item{...}{Arguments passed on to \code{rescaleDensitiesValues}
\describe{
\item{density.rescale.fun}{parameter, that defines a function used for rescaling signals in plots.
There are three built-in functions, that can be chosen:
(1) \code{'numeric'},
(2) \code{logarithmic} - with base defined in \code{density.rescale.fun.args} - default: \code{e = exp(1)}.
Function must be defined as a lambda construct \code{function(x, ...){...}}.}
\item{density.rescale.fun.args}{list of the arguments to defaults \code{density.rescale.fun}}
}}
\item{colors.limits}{- c(minimal color value, maximal color value)}
\item{facet.rows}{- column name used to create facet rows}
\item{facet.cols}{- column name used to create facet cols}
}
\description{
ScatterBoxplotGGplot
}
|
e04e657a7ef2467fe50257f6883f029e680fa808 | 20506b0b407a133682e8c731e08f4530dcbf2c06 | /data-raw/robas_2_data.R | 819baea7a9b4a86fdb6aa40a264d4348d5f98f5a | [
"MIT"
] | permissive | ROBAS-UCLA/ROBAS.2 | df50542603318b2144e1e3b4cd8addaaa12b44c3 | 99af6935ab2432b1a8e388287842cf7820e98d8b | refs/heads/master | 2023-04-07T11:29:07.466782 | 2022-05-24T02:13:40 | 2022-05-24T02:13:40 | 439,480,289 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 168 | r | robas_2_data.R | library(readr)
library(dplyr)
robas_2_data = read.csv("inst/extdata/robas_2_data.csv", row.names = 1) |> as_tibble()
usethis::use_data(robas_2_data, overwrite = TRUE)
|
c34d4a8d21c6d077aa61523ec5436bb15741b9d5 | 8218c5e8a362673882278bbf58e42ebcd937dc86 | /rfp6/S01_code.R | 69e565a23364434b36ec1821dd97b7b0db7134ee | [] | no_license | Sjan1/r-for-proteomics-tsl | 276fb78c84d82f89f1e5f17080c19c04bd2fa196 | 24a8e08ca3e00f8e73089755aaa00694d5d83a01 | refs/heads/master | 2021-05-04T13:58:42.792222 | 2020-05-11T18:40:21 | 2020-05-11T18:40:21 | 120,327,192 | 1 | 2 | null | 2018-02-05T16:00:33 | 2018-02-05T16:00:33 | null | UTF-8 | R | false | false | 5,975 | r | S01_code.R | source("S00-env.R")
library("rtslprot")
library("dplyr")
library("msmsTests")
## READ THE EXPERIMENT TABLE that describes:
## (i) samples
## (ii) measurements
## (iii) searches
## (iv) experimental factors
mzid <- "msgf"
mzid <- "mascot"
mzid <- "mascot_fdr1pc"
exp <- readSampleExperimentTable("SampleExperimentTable.csv",
mzid = mzid)
## APPLY FILTER if necessary
exp <- exp %>%
filter(cleavage == "tryp") %>%
select(-category) %>%
dropConstantVariables()
## DEFINE UNIQUE SAMPLES
## lower in hierarchy are only fractions (of samples) that will be combined
## all this have to be described in the experiment table
fcols <- c("phenotype", "treatment", "biorep")
etab <- experimentHierarchy(exp, fcols)
etab
## READ INTO MSnSets
## This chunk reads each entry of the experimental design etab
## and creates the corresponding MSnSet from the mzid files.
## The MSnSets are returned as a list of MSnSets.
msnl <- apply(etab, 1, function(.etab) {
filenames <- exp %>%
filter(biorep == .etab[["biorep"]],
phenotype == .etab[["phenotype"]],
treatment == .etab[["treatment"]]) %>%
select(name)
mzid_files <- file.path(mzid, paste(filenames[[1]], "mzid",
sep = "."))
## make MSnSet (choose peptide or PSMs - two functions in rtslprot)
e <- rtslprot:::make_pep_MSnSet(mzid_files,
fdr = 0.05,
level = "peptide",
fcol = "pepSeq")
e@processingData@files <- mzid_files
sampleNames(e) <- paste(.etab, collapse = "_")
e <- updateFvarLabels(e, sampleNames(e))
if (validObject(e))
return(e)
})
## COMBINE MSnSets IN ONE
## Convert the list of MSnSets to a single MSnSet where
## each element of the list becomes a columns (sample)
## and update the experimental design
e <- MSnbase::combine(msnl[[1]], msnl[[2]])
for (i in 3:length(msnl)) {
e <- MSnbase::combine(e, msnl[[i]])
}
rownames(etab) <- sampleNames(e)
pData(e) <- etab
## DEAL WITH NAs ON PEPIDE LEVEL
e <- impute(e, method = "zero")
## keep the original
saveRDS(e,"e.rds")
e0 <- e
saveRDS(e0,"e0.rds")
## save others
saveRDS(e,"e_msgf.rds")
saveRDS(e,"e_mascot.rds")
saveRDS(e,"e_mascot_fdr1pc.rds")
## open if needed
e <- readRDS("e0.Rds")
## 20-03-2019: COMBINE PEPTIDES INTO PROTEINS
## Make vector with all accessions matching every peptide
## Append the vector to the feature data
## Step through all the features (rows and columns)
## and copy accessions when found.
load("e.rda") #MSnSet with peptides from the script code.R
e <- readRDS("e.rds")
e <- readRDS("e_mascot.rds")
e <- readRDS("e_msgf.rds")
e <- readRDS("e_mascot_fdr1pc.rds")
## concatenate all accessions - NEW
i <- grep("accession\\.", fvarLabels(e)) # e is a peptide-level MSnSet
k <- apply(fData(e)[, i], 1,
function(x) unique(na.omit(as.character(x))))
fData(e)$nprots <- lengths(k)
#fData(e)$accession <- sapply(k, paste, collapse = ";") # Laurent's suggestion
fData(e)$accession <- sapply(k, paste) # But when not collapsed, we then easily make a list
l <- as.list(fData(e)$accession) # the list is nedded for combineFeatures
#save modified MSnSet
saveRDS(e,"e.rds")
eprot_m <- combineFeatures(e, groupBy = l,
fun = "sum",redundancy.handler = "multiple")
eprot_u <- combineFeatures(e, groupBy = fData(e)$accession,
fun = "sum", redundancy.handler = "unique")
## save results as RDS
saveRDS(eprot,"eprot.rds")
saveRDS(eprot0,"eprot0.rds")
saveRDS(eprot,"eprot_msgf.rds")
saveRDS(eprot,"eprot_mascot.rds")
saveRDS(eprot,"eprot_mascot_fdr1pc.rds")
saveRDS(eprot,"eprot_m.rds")
saveRDS(eprot0,"eprot_u.rds")
## read the original
eprot <- readRDS("eprot.rds")
eprot <- readRDS("eprot0.rds")
eprot <- readRDS("eprot_msgf.rds")
eprot <- readRDS("eprot_mascot.rds")
eprot <- readRDS("eprot_mascot_fdr1pc.rds")
eprot <- readRDS("eprot_m")
eprot <- readRDS("eprot_u")
## statistical tests
## null_TR__alt_PH+TR
null.f <- "y~treatment"
alt.f <- "y~phenotype+treatment"
## Can variance be explained by phenotype?
####null_1__alt_PH
null.f <- "y~1"
alt.f <- "y~phenotype"
## is treatment doing anything?
## null_PH+TR__alt_PH*TR
null.f <- "y~phenotype+treatment"
alt.f <- "y~phenotype*treatment"
#e.notTreatedSamples <- e[e$treatment=="H",]
e <- rtslprot:::msms_edgeR_test(e,
null.f = null.f,
alt.f = alt.f,
fnm = "phenotype",
test_name = "null_TR__alt_PH+TR")
e <- rtslprot:::msms_edgeR_test(e,
null.f = null.f,
alt.f = alt.f,
fnm = "phenotype",
test_name = "null_1__alt_PH")
e <- rtslprot:::msms_edgeR_test(e,
null.f = null.f,
alt.f = alt.f,
fnm = "phenotype",
test_name = "null_PH+TR__alt_PH*TR")
## null_TR__alt_PH+TR
plot(fData(e)$`LogFC_null_TR__alt_PH+TR`,-log10(fData(e)$`p.value_null_TR__alt_PH+TR`))
hist(fData(e)$`p.value_null_TR__alt_PH+TR`)
hist(fData(e)$`adj.p.values_null_TR__alt_PH+TR`)
## null_1__alt_PH
plot(fData(e)$`LogFC_null_1__alt_PH`,-log10(fData(e)$`p.value_null_1__alt_PH`))
hist(fData(e)$`p.value_null_1__alt_PH`)
hist(fData(e)$`adj.p.values_null_1__alt_PH`)
## null_PH+TR__alt_PH*TR
plot(fData(e)$`LogFC_null_PH+TR__alt_PH*TR`,-log10(fData(e)$`p.value_null_PH+TR__alt_PH*TR`))
hist(fData(e)$`p.value_null_PH+TR__alt_PH*TR`)
hist(fData(e)$`adj.p.values_null_PH+TR__alt_PH*TR`)
length(unique(fData(e)$`p.value_null_TR__alt_PH+TR`))
length(unique(fData(e)$`p.value_null_1__alt_PH`))
length(unique(fData(e)$`p.value_null_PH+TR__alt_PH*TR`))
saveRDS(e,"e.rds")
##
head(exprs(e))
head(fData(e))[,1:3]
pData(e)
|
d73d5fbafe80a3c9cf563c67ef1458e8242d1f23 | a00b6cdccf670efc7be5a9f7f2dfa6072551f22c | /man/sortNetwork.Rd | a50e2c6e2a4913487afb6fa2cf968e77a79e10ea | [] | no_license | uc-bd2k/SigNetA | 5f20a069f8b4ac053a0d2f6d1bba1964df06444a | 7dee256c6fb404ac6e50e478673c002ba0cc2dbe | refs/heads/master | 2021-01-03T13:01:28.720095 | 2019-05-28T08:51:11 | 2019-05-28T08:51:11 | 124,279,996 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 404 | rd | sortNetwork.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sortNetwork.R
\name{sortNetwork}
\alias{sortNetwork}
\title{Sort Network Function}
\usage{
sortNetwork(x)
}
\arguments{
\item{File}{? Defaults to NULL.}
}
\description{
This function allows you to sort top 100 genes from a signature
}
\examples{
sortNetwork() will sort network for any signature file
}
\keyword{sortNetwork}
|
a721e7ad328c37933bb39eda1e440cfe49854eac | 39099fc3d58b960454cf5c576493ab60449064c8 | /man/condor.plot.communities.Rd | 4f4e4618dd1f93724feeab5bc994bfcf8f1cb2d1 | [
"MIT"
] | permissive | ayoung01/condor | 5ecc7dbeddacea87bd0b23ce4d9ba8922134da1e | 0ad46102f6fd85341e11537a106da99efde4dc71 | refs/heads/master | 2020-12-26T03:56:05.135723 | 2019-07-05T18:20:12 | 2019-07-05T18:20:12 | 43,075,151 | 0 | 0 | null | 2015-09-24T15:22:12 | 2015-09-24T15:22:12 | null | UTF-8 | R | false | true | 1,632 | rd | condor.plot.communities.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/condor.plot.communities.R
\name{condor.plot.communities}
\alias{condor.plot.communities}
\title{Plot adjacency matrix with links grouped and colored by community}
\usage{
condor.plot.communities(condor.object, color_list, point.size = 0.01,
xlab = "SNP", ylab = "Gene")
}
\arguments{
\item{condor.object}{output of either \code{\link{condor.cluster}} or
\code{\link{condor.modularity.max}}}
\item{color_list}{vector of colors accepted by \code{col} inside the
\code{\link[graphics]{plot}} function. There must be as many colors as
communities.}
\item{point.size}{passed to \code{cex} in the
\code{\link[graphics]{plot}}}
\item{xlab}{x axis label}
\item{ylab}{y axis label}
}
\value{
produces a \code{\link[graphics]{plot}} output.
}
\description{
This function will generate the network link 'heatmap' with colored dots
representing within-community links and black dots between-community
links
}
\note{
For the condor paper \url{http://arxiv.org/abs/1509.02816}, I used
35 colors from the "Tarnish" palette with "hard" clustering
}
\examples{
r = c(1,1,1,2,2,2,3,3,3,4,4);
b = c(1,2,3,1,2,4,2,3,4,3,4);
reds <- c("Alice","Sue","Janine","Mary")
blues <- c("Bob","John","Ed","Hank")
elist <- data.frame(red=reds[r],blue=blues[b])
condor.object <- create.condor.object(elist)
condor.object <- condor.cluster(condor.object)
condor.plot.communities(condor.object,
color_list=c("darkgreen","darkorange"),point.size=2,
xlab="Women",ylab="Men")
}
\references{
\url{http://tools.medialab.sciences-po.fr/iwanthue/} for
a nice color generator at
}
|
96b1eff7926eaf69bb0a4664cf658e217495e83c | 22540d050618fa7c69c40c89d1397609e2f39936 | /R/sanitize_for_DiagrammeR.R | 2c7162ecce9234bb553275d45bbfbda30f2528c3 | [] | no_license | cran/psyverse | 8d3e6723d66c292f02a4d0b8978d85f868ca52b9 | d1e2dc7f6be23f674f7b6cc1d21089995a331ba0 | refs/heads/master | 2023-03-17T00:04:47.391838 | 2023-03-05T21:00:07 | 2023-03-05T21:00:07 | 250,514,413 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 323 | r | sanitize_for_DiagrammeR.R | sanitize_for_DiagrammeR <- function(x,
regExReplacements = list(c("\\\"", "`"),
c("\\'", "`"),
c("\\\\", "/"))) {
return(sanitizer(x, regExReplacements));
}
|
320a43c0f92223d5af6ead6a9fc6221c3bb023e7 | 8e44344e387cc42b4243b0b1f42fb21f5525c0f6 | /maskTrackll.R | 0ae6331ef56d027f94847599990ca7c0c9c301c9 | [] | no_license | snjy9182/Kernel-Density-Clustering-Mask | b78014b545fa1fab690e7d5ba990feef9acd67c9 | fa9fe2f7fe13db8613d0508a2d11b02bfd48bb6f | refs/heads/master | 2021-01-01T15:58:23.706212 | 2017-07-24T21:29:55 | 2017-07-24T21:29:55 | 97,748,895 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,100 | r | maskTrackll.R | #### maskTrackll.R
#### Wu Lab, Johns Hopkins University
#### Author: Sun Jay Yoo
#### Date: July 21, 2017
## maskTrackll-methods
##
##
###############################################################################
##' @name maskTrackll
##' @aliases maskTrackll
##' @title maskTrackll
##' @rdname maskTrackll-methods
##' @docType methods
##'
##' @description mask track lists and lists of track lists using kernel density clusters
##' @usage
##' maskTrackll(trackll, automatic = F)
##'
##' maskTrackl(track.list, automatic = F)
##'
##' plotTrackPoints(track.list)
##'
##' plotTrackLines(track.list)
##'
##' @param trackll An uncensored/unfiltered list of track lists.
##' @param automatic Find p automatically using a model (not recommended)
##' @param track.list A single uncensored/filtered track list.
##' @details
##'
##' When maskTrackll() is called by default with automatic = F, it will repeatedly ask the user for the kernel density probability (p)
##' and the number of smallest clusters to elimnate. The kernel density probability (p) is a factor that determines how dense the cluster contours are.
##' Low p creates smaller and/or fewer clusters and vice versa. Adjust p accordingly, but if there are still small extra clusters made in undesired
##' areas, raise the number of smallest clusters to eliminate accordingly (note: sometimes noise clusters are too small to see).
##' Use maskTrackl() to apply this to only one track list.
##'
##' Use plotTrackPoints and plotTrackLines to plot lists of track lists into separate scatter/line plots.
##' Use .plotTrackPoints and .plotTrackLines for a single track list. These track lists can be plotted at any point in analysis.
##'
##' EXTRA:
##'
##' The general method for creating a masked track list from a single track list begins by
##' first calculating its kernel density using kernelDensity(),
##' creating the mask using this value createMask() (while adjusting the kernel density probability [p] as needed),
##' then generating the masked track list using applyMask. The reason why these three steps are separated in the source code is
##' to allow for quick repeated adjustments to the kernel density probability (p), as the other two steps can take more time.
##'
##' The value for the kernel density probability (p) is automatically calculated by default using
##' a regression model estimating the approximate desired probability using the track list's average track length
##' (derived from a specific data set).Thus, it is highly recommended to use uncensored/unfiltered track lists.
##'
##' If one had to apply this function to a large number of typically unvariable track lists, a regression model can be made and integrated into the source code.
##'
##' @examples
##'
##' #Default call for masking a list of track lists.
##' masked.trackll <- maskTrackl(trackll)
##'
##' #Default call for masking a track list
##' masked.trackl <- maskTrackl(trackl)
##' @export maskTrackl
##' @export maskTrackll
##' @export plotTrackPoints
##' @export plotTrackLines
##' @export .plotTrackPoints
##' @export .plotTrackLines
##' @importFrom dplyr bin_rows
##' @importFrom dplyr MASS::kde2d
##' @importFrom sp point.in.polygon
###############################################################################
#library(dplyr) #bind_rows, MASS::kde2d
#library(sp) #point.in.polygon
#### kernelDensity ####
#Returns kernel density
kernelDensity = function (track.list){
#Merge track list into a single dataframe
df <- mergeTracks(track.list)
#Calculate kernel density from dataframe
dens <- MASS::kde2d(df[[1]], df[[2]], n=200, lims=c(c(0, 128), c(0, 128)));
return (dens);
}
#### createMask ####
#Returns binary mask and plots
createMask = function (track.list, kernel.density, p = NULL, eliminate = NULL, plot = T){
#Store all merged track coordinate points into a dataframe
df <- mergeTracks(track.list)
if (is.null(p)){
p = -0.1207484 + 0.3468734*(nrow(df)/length(track.list))
}
if (is.null(eliminate)){
eliminate = 0
}
if (p <= 0 || p >= 1){
cat("ERROR: Need valid probability (p) or automatic calculation is not valid.")
}
# Calculate contours to plot
prob <- c(p)
dx <- diff(kernel.density$x[1:2])
dy <- diff(kernel.density$y[1:2])
sz <- sort(kernel.density$z)
c1 <- cumsum(sz) * dx * dy
levels <- sapply(prob, function(x){
approx(c1, sz, xout = 1 - x)$y
})
#Create the countour polygon with using coordinate points
ls <- contourLines(kernel.density, level=levels)
#Keep only the largest user-specified number of clusters, if given
if (eliminate > 0){
num.clusters = length(ls) - eliminate;
while (length(ls) > num.clusters){
noise = 0;
min = Inf;
for (i in 1:length(ls)){
if(length(ls[[i]][[2]]) < min){
noise = i
min = length(ls[[i]][[2]])
}
}
ls[[noise]] <- NULL
}
}
#Use coordinate coordinate polygon to create the cluster shape
cluster <- list()
for (i in 1:length(ls)){
cluster[[i]] <- point.in.polygon(df[[1]], df[[2]], ls[[i]]$x, ls[[i]]$y)
}
#Create binary mask of track coordinates
df$region <- factor(Reduce("+", cluster))
#Plot with mask and contour
if(plot){
title = paste(getTrackFileName(track.list),"Mask with Kernel Density Probability (p) of", round(p, digits = 3), sep = " ");
plot(df[[2]] ~ df[[1]], col=region, data=df, xlim = c(0, 128), ylim = c(0, 128), xlab = "x", ylab = "y", main = title, cex = .1)
contour(kernel.density, levels=levels, labels=prob, add=T)
}
cat("\n", getTrackFileName(track.list), "masked at a kernel density probability of", round(p, digits = 3), "\n")
return(df$region)
}
#### applymask ####
#Creates masked track list
applyMask = function(track.list, mask){
#Instantiate a masked track list with indexing variables
masked.track.list = list();
masked.track.list.names = list();
index.mask = 1;
index = 1;
#Loop through all tracks
for(i in 1:length(track.list)){
mask.bool = TRUE;
#Remove any tracks outside mask
for (j in 1:nrow(track.list[[i]])){
if (mask[[index]] == 1){
mask.bool = FALSE;
}
index = index + 1;
}
if (!mask.bool){
masked.track.list[index.mask] <- track.list[i];
index.mask = index.mask + 1;
masked.track.list.names[1 + length(masked.track.list.names)] = names(track.list[i]);
}
}
names(masked.track.list) <- masked.track.list.names;
#Return masked track list
return (masked.track.list);
}
#### mergeTracks ####
mergeTracks = function(track.list){
if (length(track.list[[1]]) == 3){
df <- bind_rows(track.list, .id = "Trajectory")[, c("x", "y", "z")]
} else {
df <- bind_rows(track.list, .id = "Trajectory")[, c("x", "y", "z", "Frame")]
}
return (df);
}
#### maskTrackl ###
maskTrackl = function (track.list, automatic = F){
cat("\n Mask for", getTrackFileName(track.list), "...\n")
kd <- kernelDensity(track.list);
if (automatic){
mask <- createMask(track.list, kd, p = NULL);
} else {
eliminate = 0;
p = NULL;
done = FALSE;
mask <- createMask(track.list, kd, p = p);
while (!done){
cat("\n")
done = as.integer(readline(prompt="Done (1 = YES; 0 = NO)?: "))
if (is.na(done)){
cat("\nIncorrect input, set to default = 0.\n")
done = 0;
}
done = as.logical(done)
if (done){
break;
}
p = as.numeric(readline(prompt="New kernel density probability (p): "))
if (is.na(p)){
cat("\nIncorrect input, set to default.\n")
p = NULL;
}
eliminate = as.integer(readline(prompt="Number of smallest clusters to elimnate (recommended 0, unless last resort): "))
if (is.na(eliminate)){
cat("\nIncorrect input, set to default = 0.\n")
eliminate = 0;
}
mask <- createMask(track.list, kd, p = p, eliminate = eliminate);
}
}
masked.track.list <- applyMask(track.list, mask);
cat("\n Mask created for", getTrackFileName(track.list), "\n")
return(masked.track.list)
}
#### maskTrackll ####
maskTrackll = function (trackll, automatic = F){
masked.trackll <- list()
for (i in 1:length(trackll)){
masked.trackll[[i]] <- maskTrackl(trackll[[i]], automatic = automatic)
}
names(masked.trackll) <- names(trackll)
cat("\nAll tracks lists masked.\n")
return(masked.trackll)
}
#### plotPoints ####
plotPoints = function(trackll){
for (i in 1:length(trackll)){
.plotPoints(trackll[[i]])
}
}
.plotPoints = function(track.list){
df <- mergeTracks(track.list)
plot(df[[1]], df[[2]], xlim = c(0, 128), ylim = c(0, 128), xlab = "x", ylab = "y", main = getTrackFileName(track.list), cex = .1);
}
#### plotLines ####
plotLines = function(trackll){
for (i in 1:length(trackll)){
.plotLines(trackll[[i]])
}
}
.plotLines = function(track.list){
plot(track.list[[1]][[1]], track.list[[1]][[2]], type = "l", xlim = c(0, 128), ylim = c(0, 128), main = getTrackFileName(track.list))
for(i in 2:length(track.list)){
lines(track.list[[i]][[1]], track.list[[i]][[2]])
}
}
|
57012ee092f67b8186c4e5ff3fdf9344f732e119 | d96d7a5cf83350a650fbc94bc71ffebdb745c876 | /R/ISLR.r | 415e1369803a2602ae66f41a91742b2ea94fc583 | [] | no_license | ssh352/random_snippets | 26e3eed7c11a46ddf1c88bb6668f57ab90a75263 | e973eeb55a6de98026024a84091601b6a08c73ae | refs/heads/master | 2021-06-01T01:57:24.514710 | 2015-12-25T03:53:41 | 2015-12-25T03:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,280 | r | ISLR.r | # regression
library(MASS)
library(ISLR)
fix(Boston)
names(Boston)
lm.fit=lm(medv~lstat, data=Boston)
summary(lm.fit)
names(lm.fit)
confint (lm.fit)
predict(lm.fit, data.frame(lstat=(c(5,10 ,15))), interval="confidence")
plot(lstat, medv)
abline(lm.fit)
par(mfrow=c(2,2))
plot(lm.fit)
plot(predict(lm.fit), residuals(lm.fit))
plot(predict(lm.fit), rstudent(lm.fit))
plot(hatvalues(lm.fit))
which.max(hatvalues(lm.fit))
lm.fit=lm(medv~lstat+age, data=Boston )
summary(lm.fit)
lm.fit=lm(medv~.,data=Boston)
summary(lm.fit)
?summary.lm
summary(lm.fit)$r.sq
summary(lm.fit)$sigma
library(car)
vif(lm.fit)
lm.fit1=lm(medv~.-age ,data=Boston )
summary(lm.fit1)
# interaction
summary(lm(medv~lstat*age ,data=Boston))
# nonlinear transform
lm.fit2=lm(medv~lstat+I(lstat^2))
summary(lm.fit2)
lm.fit=lm(medv~lstat)
anova(lm.fit ,lm.fit2)
par(mfrow=c(2,2))
plot(lm.fit2)
lm.fit5=lm(medv~poly(lstat ,5))
summary(lm.fit5)
summary(lm(medv~log(rm),data=Boston))
attach(Carseats)
names(Carseats)
lm.fit=lm(Sales~. + Income:Advertising + Price:Age, data=Carseats )
summary(lm.fit)
# dummy variable
contrasts(ShelveLoc)
# logistic regression
library(ISLR)
names(Smarket)
dim(Smarket)
cor(Smarket[,-9])
plot(Volume)
glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Smarket, family=binomial)
summary(glm.fit)
summary(glm.fit)$coef
glm.probs=predict(glm.fit, type="response")
glm.probs[1:10]
contrasts(Direction)
glm.pred=rep("Down",1250)
glm.pred[glm.probs >.5]="Up"
# confusion matrix
table(glm.pred, Direction)
# out of sample prediction
train=(Year<2005)
Smarket.2005=Smarket[!train,]
dim(Smarket.2005)
Direction.2005=Direction[!train]
glm.fit=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume, data=Smarket, family=binomial, subset=train)
glm.probs=predict(glm.fit, Smarket.2005, type="response")
glm.pred=rep("Down",252)
glm.pred[glm.probs>.5]="Up"
table(glm.pred, Direction.2005)
mean(glm.pred==Direction.2005)
# principal components analysis
states=row.names(USArrests)
states
names(USArrests)
apply(USArrests , 2, mean)
apply(USArrests , 2, var)
pr.out=prcomp(USArrests, scale=TRUE)
names(pr.out)
pr.out$center
pr.out$scale
pr.out$rotation
dim(pr.out$x)
biplot (pr.out , scale =0)
pr.out$rotation=-pr.out$rotation
pr.out$x=-pr.out$x
biplot (pr.out , scale =0)
pr.out$sdev
pr.var=pr.out$sdev ^2
pr.var
# how much of variance is explained by each factor
pve=pr.var/sum(pr.var)
pve
plot(pve, xlab="Principal Component", ylab="Proportion of Variance Explained", ylim=c(0,1), type='b')
plot(cumsum(pve), xlab="Principal Component ", ylab="Cumulative Proportion of Variance Explained ", ylim=c(0,1),type='b')
# knn k-means clustering
set.seed(2)
x=matrix(rnorm (50*2), ncol=2)
x[1:25,1]=x[1:25,1]+3
x[1:25,2]=x[1:25,2]-4
km.out=kmeans (x,2, nstart =20)
km.out$cluster
plot(x, col=(km.out$cluster+1), main="K-Means Clustering Results with K=2", xlab="", ylab="", pch=20, cex=2)
set.seed(4)
km.out=kmeans(x,3, nstart=20)
km.out
# hierarchical clustering
hc.complete=hclust(dist(x), method="complete")
hc.average =hclust(dist(x), method ="average")
hc.single=hclust(dist(x), method ="single")
par(mfrow=c(1,3))
plot(hc.complete, main="Complete Linkage", xlab="", sub="", cex=.9)
plot(hc.average, main="Average Linkage", xlab="", sub="", cex=.9)
plot(hc.single, main="Single Linkage", xlab="", sub="", cex=.9)
cutree(hc.complete, 2)
cutree(hc.average, 2)
cutree(hc.single, 2)
cutree(hc.single, 4)
xsc=scale(x)
plot(hclust(dist(xsc), method ="complete "), main=" Hierarchical Clustering with Scaled Features ")
x=matrix(rnorm (30*3), ncol=3)
dd=as.dist(1-cor(t(x)))
plot(hclust(dd, method ="complete "), main=" Complete Linkage with Correlation -Based Distance ", xlab="", sub ="")
library(ISLR)
nci.labs=NCI60$labs
nci.data=NCI60$data
dim(nci.data)
nci.labs[1:4]
table(nci.labs)
pr.out=prcomp(nci.data , scale=TRUE)
Cols=function (vec){
+ cols=rainbow (length(unique(vec)))
+ return(cols[as.numeric (as.factor(vec))])
+ }
# decision trees
library(tree)
library(ISLR)
attach(Carseats)
High = ifelse(Sales <=8, "No", "Yes")
Carseats = data.frame(Carseats, High)
tree.carseats = tree(High~.-Sales, Carseats)
summary(tree.carseats)
plot(tree.carseats)
text(tree.carseats, pretty=0)
tree.carseats
|
c8638fe1be36c62b54c45003ea4e92c7df4b78e8 | eac571af0e203a6f0f98a859140559db9d500fd3 | /pca_assets.R | 9ec385abb45d1ece75af4c1448c29402a2c3f1f5 | [] | no_license | tardelr/censo-analysis | 8d2633f0cb8b2f6306f9f43a5298eaa2edd37cfb | 7aba39ef5f0b46ed96b120c8f55f12c527230da0 | refs/heads/master | 2020-09-06T09:54:03.144622 | 2019-11-10T21:14:38 | 2019-11-10T21:14:38 | 220,392,534 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,055 | r | pca_assets.R | library(dplyr)
library(tidyr)
library(plyr)
library(ggplot2)
library(factoextra)
datasets.path <- '~/Desktop/datasets/censo_assets'
assets2000 <- read.csv( file.path( datasets.path, 'sp00.csv' ), colClasses = c( areap='character' ) )
assets2000 <- assets2000 %>%
subset( v0103 == 3550308 ) %>%
select( "areap", "v0214", "v0215", "v0216", "v0217", "v0218", "v0219", "v0220", "v0221", "v0222", "v0223" ) %>%
na.omit()
# assets2000[is.na(assets2000)] <- -1
agg.assets <- aggregate(assets2000[,-1], by=list(areap=assets2000$areap), FUN=mean)
values.only <- agg.assets[,-1]
res.pca <- prcomp(~ ., data = values.only, scale = TRUE, na.action = na.omit )
fviz_eig(res.pca)
fviz_pca_var(res.pca,
col.var = "contrib", # Color by contributions to the PC
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
factors2000 <- res.pca$rotation[,1]
ses2000 <- res.pca$x[,1]
hist(ses2000)
ses.area.2000 <- cbind(agg.assets, ses2000)
write.csv(ses.area.2000, '~/Desktop/datasets/ses_score/ses2000.csv')
# Repeating analysis to 2010
assets2010 <- read.csv( file.path( datasets.path, 'sp10.csv'), colClasses = c( v0011 ='character' ))
assets2010 <- assets2010 %>%
subset( v0002 == 50308) %>%
select("v0011", "v0214", "v0215", "v0216", "v0217", "v0218", "v0219", "v0220", "v0221", "v0222") %>%
na.omit()
# assets2010[is.na(assets2010)] <- -1
agg.assets.2010 <- aggregate(assets2010[,-1], by=list(areap=assets2010$v0011), FUN=mean)
values.only.2010 <- agg.assets.2010[,-1]
res.pca <- prcomp(~ ., data = values.only.2010, scale = T )
fviz_eig(res.pca)
fviz_pca_var(res.pca,
col.var = "contrib", # Color by contributions to the PC
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
factors2010 <- res.pca$rotation[,1]
ses2010 <- res.pca$x[,1]
hist(ses2010)
ses.area.2010 <- cbind(agg.assets.2010, ses2010)
write.csv(ses.area.2010, '~/Desktop/datasets/ses_score/ses2010.csv')
#
|
9ecba26f0a2bd531213edef8cefda8c4c280ef00 | 85d34e7ab8ba5d409ff8f6136c91de786647b168 | /analysis/preprocessing_exp2.R | d09a1bbcdcabc94c4050cefc9e7f771585b0bd6d | [] | no_license | thegricean/RE_production | 1b7aa6029c7810e857ad1d6a1c3bfb8ac2a79ba5 | 435419e7c68dd58fc1082602365a1a05e587aafd | refs/heads/master | 2022-12-04T23:57:38.193570 | 2021-05-06T22:54:42 | 2021-05-06T22:54:42 | 126,399,878 | 3 | 6 | null | 2022-11-21T23:58:33 | 2018-03-22T21:59:27 | HTML | UTF-8 | R | false | false | 11,450 | r | preprocessing_exp2.R | this.dir <- dirname(rstudioapi::getSourceEditorContext()$path)
setwd(this.dir)
library(tidyverse)
library(here)
source(here("analysis","helper_scripts","helpers.R"))
# Read raw data
d = read.table(file=here("data","raw","production_exp2","rawdata_exp2.csv"),sep="\t", header=T, quote="")
nrow(d)
head(d)
# unique(d$gameid): 65 pairs
# Exclude data and participants that cannot be used for analysis
# Exclude NA referential expressions
d = droplevels(d[!(is.na(d$refExp)),])
# Exclude pairs because listeners didn't respond
d = droplevels(d[!(d$gameid == "0960-e" | d$gameid == "6911-b" | d$gameid == "1866-f" | d$gameid == "6581-5"), ])
# Exclude participants that were not paid because they didn't finish it
d = droplevels(d[!(d$gameid == "6444-b" | d$gameid == "4924-4"), ])
# Exclude tabu players
d = droplevels(d[!(d$gameid == "1544-1" | d$gameid == "4885-8" | d$gameid == "8360-7" | d$gameid == "4624-5" | d$gameid == "5738-a" | d$gameid == "8931-5" | d$gameid == "8116-a" | d$gameid == "6180-c" | d$gameid == "1638-6" | d$gameid == "6836-b"), ])
# unique(d$gameid): 47 pairs
# Exclude trials with distractor choices
d = droplevels(d[d$targetStatusClickedObj == "target",])
# Exclude unnecessary columns
production = d[,c("gameid","context","nameClickedObj","alt1Name","alt2Name","refExp","clickedColor","clickedType")]
# Correct for change in labelling (we realized what we considered pink was mostly considered purple)
production$clickedColor = ifelse(as.character(production$clickedColor) == 'pink', 'purple', as.character(production$clickedColor))
production$nameClickedObj = gsub("pink", "purple", production$nameClickedObj)
production$alt1Name = gsub("pink", "purple", production$alt1Name)
production$alt2Name = gsub("pink", "purple", production$alt2Name)
# Get typicalities from typicality "type" study
typ = read.csv(file=here("data","raw","norming_exp2","typicality_exp2_type.csv"))
typ = typ[as.character(typ$Item) == as.character(typ$Utterance),]
row.names(typ) = paste(typ$Color,typ$Item)
production$NormedTypicality = typ[paste(production$clickedColor,production$clickedType),]$Typicality
# production$binaryTypicality = as.factor(ifelse(production$NormedTypicality > .5, "typical", "atypical"))
# Clean responses to make them easier to categorize
production$CleanedResponse = gsub("(^| )([bB]ananna|[Bb]annna|[Bb]anna|[Bb]annana|[Bb]anan|[Bb]ananaa|ban|bana|banada|nana|bannan|babanana|B)($| )"," banana",as.character(production$refExp))
production$CleanedResponse = gsub("(^| )([Cc]arot|[Cc]arrrot|[Cc]arrott|car|carrpt|carrote|carr)($| )"," carrot",as.character(production$CleanedResponse))
production$CleanedResponse = gsub("(^| )([Pp]earr|pea)$"," pear",as.character(production$CleanedResponse))
production$CleanedResponse = gsub("(^| )([Tt]omaot|tokm|tmatoe|tamato|toato|tom|[Tt]omatoe|tomamt|tomtato|toamoat|mato|totomato|tomatop)($| )"," tomato",as.character(production$CleanedResponse))
production$CleanedResponse = gsub("(^| )([Aa]ppe|appple|APPLE|appl|app|apale|aple|ap)($| )"," apple",as.character(production$CleanedResponse))
production$CleanedResponse = gsub("(^| )([Pp]eper|pepp|peppre|pep|bell|jalapeno|jalpaeno|eppper|jalpaeno?)($| )"," pepper",as.character(production$CleanedResponse))
production$CleanedResponse = gsub("(^| )([Aa]vacado|gauc|avodado|avacdo|[Aa]vacadfo|avo|avacoda|avo|advocado|avavcado|avacodo|guacamole|gaucamole|guacolome|advacado|avacado,|avacado\\\\)($| )"," avocado",as.character(production$CleanedResponse))
# Categorize responses
# Was a color mentioned?
# we also include here "lighter", "dark", "brighter"
production$ColorMentioned = ifelse(grepl("green|purple|white|black|brown|purple|violet|yellow|gold|orange|prange|silver|blue|blu|pink|red|purlpe|pruple|puyrple|purplke|yllow|grean|dark|purp|yel|gree|gfeen|bllack|blakc|grey|neon|gray|blck|blu|blac|lavender|ornage|pinkish|lighter|brighter|re$|^or ", production$refExp, ignore.case = TRUE), T, F)
# Was a type mentioned?
production$ItemMentioned = ifelse(grepl("apple|banana|carrot|tomato|pear|pepper|avocado|jalpaeno?", production$CleanedResponse, ignore.case = TRUE), T, F)
# Was a cetegory mentioned? (later in "other")
production$CatMentioned = ifelse(grepl("fruit|fru7t|veg|veggi|veggie|vegetable", production$CleanedResponse, ignore.case = TRUE), T, F)
# Was a negation included? (later in "other")
production$NegationMentioned = ifelse(grepl("not|isnt|arent|isn't|aren't|non", production$CleanedResponse, ignore.case = TRUE), T, F)
# Were more abstract color modifiers used? (later in "other")
production$ColorModifierMentioned = ifelse(grepl("normal|abnormal|healthy|dying|natural|regular|funky|rotten|noraml|norm", production$CleanedResponse, ignore.case = TRUE), T, F)
# Were descriptions included? (later in "other")
production$DescriptionMentioned = ifelse(grepl("like|round|sauce|long|rough|grass|doc|bunnies|bunny|same|stem|inside|ground|with|smile|monkey|sphere|board", production$CleanedResponse, ignore.case = TRUE), T, F)
# Only differentiate between color mention, type mention and expressions including other utterances
production$Other = ifelse(production$CatMentioned | production$NegationMentioned | production$ColorModifierMentioned | production$DescriptionMentioned, T, F)
# Summarize utterance types
production$UtteranceType = as.factor(
ifelse(production$ItemMentioned & production$ColorMentioned & !production$Other, "color_and_type",
ifelse(production$ColorMentioned & !production$ItemMentioned & !production$Other, "color",
ifelse(production$ItemMentioned & !production$ColorMentioned & !production$Other, "type",
"OTHER"))))
production$Color = ifelse(production$UtteranceType == "color",1,0)
production$ColorAndType = ifelse(production$UtteranceType == "color_and_type",1,0)
production$Type = ifelse(production$UtteranceType == "type",1,0)
# Add correct distractor names
# Read lexicon which decodes the type of a color competitor for a certain target
dists = read.csv(here("analysis","helper_scripts","distractors_exp2.csv"))
row.names(dists) = dists$target
# Was there a color competitor in distractor one or two?
production$dDist1 = grepl("distractor_",production$alt1Name)
production$dDist2 = grepl("distractor_",production$alt2Name)
# If so, replace it by the distractor object found in the lexicon
production$Dist1 = as.character(production$alt1Name)
production$Dist2 = as.character(production$alt2Name)
production$Dist1 = ifelse(production$dDist1, as.character(dists[production$nameClickedObj,]$distractor), production$Dist1)
production$Dist2 = ifelse(production$dDist2, as.character(dists[production$nameClickedObj,]$distractor), production$Dist2)
# Make distractor's color and type easily accessible
production$Dist1Color = sapply(strsplit(as.character(production$Dist1),"_"), "[", 2)
production$Dist1Type = sapply(strsplit(as.character(production$Dist1),"_"), "[", 1)
production$Dist2Color = sapply(strsplit(as.character(production$Dist2),"_"), "[", 2)
production$Dist2Type = sapply(strsplit(as.character(production$Dist2),"_"), "[", 1)
# BDA data preparation
# Create utterances for bda
production$UttforBDA = "other"
production[production$Color == 1,]$UttforBDA = as.character(production[production$Color == 1,]$clickedColor)
production[production$Type == 1,]$UttforBDA = as.character(production[production$Type == 1,]$clickedType)
production[production$ColorAndType == 1,]$UttforBDA = paste(as.character(production[production$ColorAndType == 1,]$clickedColor),as.character(production[production$ColorAndType == 1,]$clickedType),sep="_")
production$Target = paste(production$clickedColor, production$clickedType, sep="_")
# Write unique conditions for bda
bda_df = droplevels(production[production$UttforBDA != "other",])
# Distractors are sorted alphabetically to detect identical contexts for unique_conditions
bda_df$DistractorCombo = as.factor(ifelse(as.character(bda_df$Dist1) < as.character(bda_df$Dist2), paste(bda_df$Dist1, bda_df$Dist2), paste(bda_df$Dist2, bda_df$Dist1)))
bda_df$BDADist1 = sapply(strsplit(as.character(bda_df$DistractorCombo)," "), "[", 1)
bda_df$BDADist2 = sapply(strsplit(as.character(bda_df$DistractorCombo)," "), "[", 2)
bda_df$d1_color = sapply(strsplit(as.character(bda_df$BDADist1),"_"), "[", 2)
bda_df$d1_item = sapply(strsplit(as.character(bda_df$BDADist1),"_"), "[", 1)
bda_df$d2_color = sapply(strsplit(as.character(bda_df$BDADist2),"_"), "[", 2)
bda_df$d2_item = sapply(strsplit(as.character(bda_df$BDADist2),"_"), "[", 1)
bda_df <- bda_df %>%
rename(t_color = clickedColor,
t_item = clickedType,
response = UttforBDA,
condition = context)
# Write Bayesian data analysis files (data and unique conditions)
write.table(unique(bda_df[,c("condition","t_color","t_item","d1_color","d1_item","d2_color","d2_item")]),file=here("models","bdaInput","typicality","unique_conditions.csv"),sep=",",row.names=F,quote=F)
write.table(bda_df[,c("condition","t_color","t_item","d1_color","d1_item","d2_color","d2_item","response")],file=here("models","bdaInput","typicality","bda_data.csv"),sep=",",row.names=F,quote=F)
# Construct meanings json for BDA
typicality_data = read.table(file=here("data","typicality_exp2.csv"),sep=",", header=T) %>%
unite(object, Color, Item) %>%
mutate(utterance = ifelse(UtteranceType =='color-and-type',
str_replace(Utterance," ","_"),
as.character(Utterance))) %>%
rename(typicality = Typicality) %>%
rowwise() %>%
mutate(output = paste0('\"', object, '\" : ', typicality)) %>%
group_by(utterance) %>%
summarize(output = paste(output, collapse = ",\n ")) %>%
mutate(output = paste0('{\n ', output, '}')) %>%
summarize(output = paste0('\"', utterance, '\" : ', output, collapse = ',\n ')) %>%
mutate(output = paste0('{\n ', output, '}'))
write_file(typicality_data$output,
path=here("models","refModule","json","typicality-meanings.json"))
# Write file for regression analysis and visualization
production$Dist1 = paste(production$Dist1Color, production$Dist1Type, sep="_")
production$Dist2 = paste(production$Dist2Color, production$Dist2Type, sep="_")
production$Item = production$clickedType
production$TargetColor = production$clickedColor
preproc_file = production[,c("gameid","context","NormedTypicality","UtteranceType","Color","ColorAndType","Type","ColorMentioned","ItemMentioned","Other","Item","TargetColor","Target","Dist1","Dist2","refExp","UttforBDA")]
write.table(preproc_file,file=here("data","data_exp2.csv"),sep="\t",row.names=F,quote=F)
##########################################################
## Supplementary preprocessing: analyze MTurk meta-data ##
##########################################################
# Turker comments
comments = read.table(file="../data/raw/production_exp2/mturk_data_exp2.csv",sep=",", header=T, quote="")
unique(comments$comments)
# Partner rating
ggplot(comments, aes(ratePartner)) +
geom_histogram(stat='count')
# Did they think their partner was a human?
ggplot(comments, aes(thinksHuman)) +
geom_histogram(stat='count')
prop.table(table(comments$thinksHuman))
table(comments$thinksHuman)
# Native language
ggplot(comments, aes(nativeEnglish)) +
geom_histogram(stat='count')
# Total length of experiment
ggplot(comments, aes(totalLength)) +
geom_histogram()
comments$lengthInMinutes = (comments$totalLength/1000)/60
summary(comments$lengthInMinutes)
|
e6901f920fa622551ef769f9f0bfaabfe36d3587 | cc4f45030d429982d7f1f060a64a12b21bfb557d | /R/construct-hospital-hhi.R | f293c8e5e6c4996f18dad0f3a70e967f4bbb16bc | [
"MIT"
] | permissive | anobel/health-care-markets | ec96238a6ed587253ab2a45b49541e5b06a77519 | 6fdbd9f6a15f8b05a01b8b5e162b239c614b294d | refs/heads/master | 2020-04-21T05:33:49.869901 | 2019-02-05T15:54:30 | 2019-02-05T15:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,450 | r | construct-hospital-hhi.R | #' ---
#' output: github_document
#' ---
# The objective of this file is to make comparisons of Hirschman-Herfindahl Indexes across alternative
# geographic market definitions.
# To Do:
# [] TK NOTE THE COUNTY to RATING AREA MAPPING WILL MISS LA COUNTY AS WELL AS ALL COUNTIES IN NE, AK, AND MA.
# Need to use geographic mapping code to assign those rating areas to counties.
# Load Shapefiles
suppressWarnings(suppressMessages(source(here::here("/R/manifest.R"))))
source(here("R/map-theme.R"))
source(here("R/shared-objects.R"))
source(here("R/get-market-from-x-y-coordinates.R"))
source(here("R/estimate_hhi.R"))
sf_hrr <- read_sf(here("output/tidy-mapping-files/hrr/01_hrr-shape-file.shp")) %>%
st_transform(crs = 4326)
sf_cz <- read_sf(here("output/tidy-mapping-files/commuting-zone/01_commuting-zone-shape-file.shp")) %>%
st_transform(crs = 4326)
sf_ra <- read_sf(here("output/tidy-mapping-files/rating-area/01_rating-area-shape-file.shp")) %>%
st_transform(crs = 4326) %>%
mutate(rating_area = ratng_r)
sf_state <- read_sf(here("output/tidy-mapping-files/state/01_state-shape-file.shp")) %>%
st_transform(crs = 4326)
sf_county <- read_sf(here("output/tidy-mapping-files/county/01_county-shape-file.shp")) %>%
st_transform(crs = 4326)
# Map General Actue Care Hospitals to Markets
if (!file.exists(here("output/market-comparisons/01_aha-markets-2017.rds"))) {
# Get latitude and longitue of general acute care hospitals in 2017 AHA survey.
aha <- data.table::fread(here("../../../box/Research-AHA_Data/data/aha/annual/raw/2017/FY2017 ASDB/COMMA/ASPUB17.CSV")) %>%
janitor::clean_names() %>%
filter(mstate %in% states) %>%
mutate(system_id = ifelse(!is.na(sysid),paste0("SYS_",sysid),id)) %>%
filter(serv==10) %>%
select(mname, id, mcrnum , latitude = lat, longitude = long, hrrnum = hrrcode, hsanum = hsacode, admtot, system_id, mloczip, sysname,fips_code=fcounty) %>%
mutate(prvnumgrp = mcrnum) %>%
mutate(hosp_zip_code = str_sub(mloczip,1,5))
# Assign each hospital to its marketplace rating area. Note I have to do it this way as a handful of
# hospitals do not map within a rating area (oddly)
aha_rating_area <-
aha %>%
get_market_from_xy(df = .,
x = longitude,
y = latitude,
sf = sf_ra,
market_id = rating_area)
df_aha_rating_area <-
aha_rating_area %>%
set_names(aha$id) %>%
unlist() %>%
data.frame() %>%
rownames_to_column() %>%
set_names(c("id","rating_area"))
# Assign each AHA hosptial to its commuting zone.
aha_cz <-
aha %>%
get_market_from_xy(df = .,
x = longitude,
y = latitude,
sf = sf_cz,
market_id = cz_id)
df_cz <-
aha_cz %>%
set_names(aha$id) %>%
unlist() %>%
data.frame() %>%
rownames_to_column() %>%
set_names(c("id","cz_id"))
aha_markets <-
aha %>%
left_join(df_aha_rating_area,"id") %>%
left_join(df_cz,"id")
write_rds(aha_markets,path=here("output/market-comparisons/01_aha-markets-2017.rds"))
}
# Hospital Distances: Average Distance Traveled to Hospital
if (!file.exists(here("output/market-comparisons/01_zip-hospital-distances.rds"))) {
source(here("R/calculate-zip-hospital-distances.R"))
} else {
df_hosp_zip_dist <- read_rds(here("output/market-comparisons/01_zip-hospital-distances.rds"))
}
# Load the hospital-zip service file constructed in "R/read-and-tidy-cms-hospital-service-areas.R")
df_hosp_zip <- read_rds(here("output/hospital-county-patient-data/2017/hospital-zip-patient-data.rds")) %>%
left_join(df_hosp_zip_dist,"prvnumgrp")
# We use an alternative patient count number based on the total_cases variable from the hosptial-zip file
# in our exploration of Simpson's paradox below. This ensures that the aggregate market HHIs are based
# on the same underlying patient count measure (i.e., not admission totals from AHA)
df_ffs_cases <- df_hosp_zip %>%
select(prvnumgrp,total_cases) %>%
group_by(prvnumgrp) %>%
summarise(ffs_total_cases = sum(total_cases, na.rm=TRUE))
aha_markets <- read_rds(here("output/market-comparisons/01_aha-markets-2017.rds")) %>%
inner_join(df_ffs_cases,"prvnumgrp")
# Construct Market-Level HHI Measures
hhi_rating_area <-
aha_markets %>%
estimate_hhi(id = system_id,
weight = ffs_total_cases,
market = rating_area) %>%
rename(hhi_rating_area = hhi,
total_weight_rating_area = total_weight) %>%
left_join(
aha_markets %>%
estimate_hhi(id = system_id,
weight = admtot,
market = rating_area) %>%
rename(hhi_rating_area_admtot = hhi,
total_weight_rating_area_admtot = total_weight) ,
"rating_area"
)
hhi_hrr <-
aha_markets %>%
estimate_hhi(id = system_id,
weight = ffs_total_cases,
market = hrrnum) %>%
rename(hhi_hrr = hhi,
total_weight_hrr= total_weight) %>%
left_join(
aha_markets %>%
estimate_hhi(id = system_id,
weight = admtot,
market = hrrnum) %>%
rename(hhi_hrr_admtot = hhi,
total_weight_hrr_admtot = total_weight) ,
"hrrnum"
)
ms_hrr <-
aha_markets %>%
estimate_market_share(id = system_id,
weight = ffs_total_cases,
market = hrrnum) %>%
arrange(hrrnum,desc(market_share))%>%
left_join(aha_markets %>% select(system_id,sysname) %>% unique(), "system_id") %>%
left_join(aha_markets %>% filter(sysname=="") %>% select(system_id,mname),"system_id") %>%
mutate(sysname = ifelse(sysname=="",NA,sysname)) %>%
mutate(name = coalesce(sysname,mname)) %>%
select(hrrnum,name,market_share, hhi, everything())
ms_hrr %>% write_rds(path = here("output/market-comparisons/01_2017_hrr-market-shares.rds"))
hhi_cz <-
aha_markets %>%
estimate_hhi(id = system_id,
weight = ffs_total_cases,
market = cz_id) %>%
rename(hhi_cz = hhi,
total_weight_cz = total_weight) %>%
left_join(
aha_markets %>%
estimate_hhi(id = system_id,
weight = admtot,
market = cz_id) %>%
rename(hhi_cz_admtot = hhi,
total_weight_cz_admtot = total_weight) ,
"cz_id"
)
ms_cz <-
aha_markets %>%
estimate_market_share(id = system_id,
weight = ffs_total_cases,
market = cz_id) %>%
arrange(cz_id,desc(market_share))%>%
left_join(aha_markets %>% select(system_id,sysname) %>% unique(), "system_id") %>%
left_join(aha_markets %>% filter(sysname=="") %>% select(system_id,mname),"system_id") %>%
mutate(sysname = ifelse(sysname=="",NA,sysname)) %>%
mutate(name = coalesce(sysname,mname)) %>%
select(cz_id,name,market_share, hhi, everything())
ms_cz %>% write_rds(path = here("output/market-comparisons/01_2017_commuting-zone-market-shares.rds"))
## ZIP and Hopsital-Level HHIs
zip_hhi <-
df_hosp_zip %>%
inner_join(aha_markets,"prvnumgrp") %>%
estimate_hhi(id = system_id,
weight = total_cases,
market = zip_code) %>%
rename(hhi_zip = hhi)
zip_market_shares <-
df_hosp_zip %>%
inner_join(aha_markets,"prvnumgrp") %>%
estimate_market_share(id = system_id,
weight = total_cases,
market = zip_code) %>%
arrange(zip_code,desc(market_share))%>%
left_join(aha_markets %>% select(system_id,sysname) %>% unique(), "system_id") %>%
left_join(aha_markets %>% filter(sysname=="") %>% select(system_id,mname),"system_id") %>%
mutate(sysname = ifelse(sysname=="",NA,sysname)) %>%
mutate(name = coalesce(sysname,mname)) %>%
select(zip_code,name,market_share, hhi, everything()) %>%
filter(zip_code!="00000")
zip_market_shares %>%
write_rds(path = here("output/market-comparisons/01_2017_ZIP-market-shares.rds"))
# Link Market-Level HHI Measures to "COMMON UNIT" (i.e., county) to facilitate comparison
# Crosswalk from county to HRR
county_to_hrr <- read_csv(here("public-data/shape-files/dartmouth-hrr-hsa-pcsa/county-to-hrr-hsa.csv")) %>%
janitor::clean_names() %>%
filter(row_number()!=1) %>%
mutate_at(vars(hrr,pop10,afact,afact2), as.numeric) %>%
rename(fips_code = county) %>%
# Roll up to HRR level
select(fips_code,hrr,afact) %>%
group_by(fips_code,hrr) %>%
summarise(afact = sum(afact, na.rm=TRUE)) %>%
arrange(fips_code,desc(afact)) %>%
group_by(fips_code) %>%
# Select the HRR with the largest county area in it.
filter(row_number()==1) %>%
ungroup() %>%
select(fips_code,hrrnum = hrr, hrr_afact = afact)
# Cosswalk from county to commuting zone.
county_to_cz <- data.table::fread(here("public-data/shape-files/commuting-zones/counties10-zqvz0r.csv")) %>%
janitor::clean_names() %>%
rename(fips_code = fips) %>%
group_by(out10) %>%
mutate(commuting_zone_population_2010 = sum(pop10, na.rm=TRUE)) %>%
mutate(fips_code = str_pad(paste0(fips_code),width = 5, pad="0")) %>%
select(fips_code,
cz_id = out10)
# Crosswalk from county to rating area
# !!!!!! TK NOTE THIS WILL MISS LA COUNTY AS WELL AS ALL COUNTIES IN NE, AK, AND MA.
# Need to use geographic mapping code to assign those rating areas to counties.
county_to_rating_area <-
read_rds(here("output/geographic-crosswalks/01_rating-areas_counties_2019.rds")) %>%
data.frame() %>%
unique() %>%
select(fips_code,rating_area)
# Need to aggregate ZIP level data up to county
zip_to_county <- read_csv(here("public-data/zcta-to-fips-county/zcta-to-fips-county.csv")) %>%
janitor::clean_names() %>%
filter(row_number() !=1) %>%
mutate(fips_code = county) %>%
select(zip_code = zcta5, fips_code,afact) %>%
mutate(afact = as.numeric(paste0(afact)))
zip_to_hrr <- read_csv(here("public-data/shape-files/dartmouth-hrr-hsa-pcsa/zcta-to-hrr-hsa.csv")) %>%
janitor::clean_names() %>%
filter(row_number() !=1) %>%
select(zip_code = zcta5, hrrnum = hrr, afact) %>%
mutate_at(vars(hrrnum,afact),function(x) as.numeric(paste0(x)))
zip_hhi_aggregated_to_county <-
zip_hhi %>%
inner_join(zip_to_county,"zip_code") %>%
mutate(weight = afact * total_weight) %>%
group_by(fips_code) %>%
summarise(hhi_zip = weighted.mean(hhi_zip,weight,na.rm=TRUE))
zip_hhi_aggregated_to_hrr <-
zip_hhi %>%
inner_join(zip_to_hrr,"zip_code") %>%
mutate(weight = afact * total_weight) %>%
group_by(hrrnum) %>%
summarise(hhi_hrr_zip = weighted.mean(hhi_zip,weight,na.rm=TRUE))
zip_hhi_aggregated_to_cz <-
zip_hhi %>%
inner_join(zip_to_county,"zip_code") %>%
mutate(weight = afact * total_weight) %>%
inner_join(county_to_cz,"fips_code") %>%
group_by(cz_id) %>%
summarise(hhi_zip_cz = weighted.mean(hhi_zip,weight,na.rm=TRUE))
df_county <-
county_to_cz %>%
full_join(county_to_hrr,"fips_code") %>%
full_join(county_to_rating_area,"fips_code") %>%
left_join(hhi_cz ,"cz_id") %>%
left_join(hhi_rating_area ,"rating_area") %>%
left_join(hhi_hrr ,"hrrnum") %>%
left_join(zip_hhi_aggregated_to_county,"fips_code") %>%
select(fips_code,hrrnum,cz_id,rating_area,contains("hhi"))
df_county %>% write_rds(here("output/market-comparisons/01_market-comparisons-county.rds"))
# Look at Alternative Definitions at the commuting zone level.
hhi_cz_final <-
hhi_cz %>%
left_join(zip_hhi_aggregated_to_cz,"cz_id")
hhi_cz_final %>%
write_rds(here("output/market-comparisons/01_HHI_genacute_cz.rds"))
hhi_hrr_final <-
hhi_hrr %>%
left_join(zip_hhi_aggregated_to_hrr,"hrrnum")
hhi_hrr_final %>%
write_rds(here("output/market-comparisons/01_HHI_genacute_hrr.rds"))
####################
#### Construct Maps
####################
states_to_map <- c("KY","TN","VA","NC")
#
# sf_hrr %>%
# left_join(hhi_hrr,"hrrnum") %>%
# filter(hrrstate %in% states_to_map) %>%
# ggplot() +
# geom_sf(aes(fill = hhi_hrr)) +
# scale_fill_gradient2(low = scales::muted("blue"),mid = "white",high = scales::muted("red"),midpoint = 2500,limits = c(0,10000)) +
# theme(legend.position = "bottom") +
# geom_sf(data = sf_state %>% filter(stusps %in% states_to_map), alpha = 0,lwd=.7,colour = "black") +
# coord_sf(datum=NA) +
# remove_all_axes +
# ggtitle("Hospital Referral Regions") +
# ggthemes::theme_tufte(base_family = "Gill Sans")
# ggsave( filename = here("figs/01_HHI_hrr.png"),dpi = 300, scale =1)
#
#
# sf_cz %>%
# left_join(hhi_cz,"cz_id") %>%
# filter(state_01 %in% states_to_map | state_02 %in% states_to_map | state_03 %in% states_to_map) %>%
# ggplot() +
# geom_sf(aes(fill = hhi_cz)) +
# scale_fill_gradient2(low = scales::muted("blue"),mid = "white",high = scales::muted("red"),midpoint = 2500,limits = c(0,10000)) +
# #theme(legend.position = "bottom") +
# geom_sf(data = sf_state %>% filter(stusps %in% states_to_map), alpha = 0,lwd=.7,colour = "black") +
# coord_sf(datum=NA) +
# remove_all_axes +
# ggtitle("Commuting Zones") +
# ggthemes::theme_tufte(base_family = "Gill Sans")
# ggsave(filename = here("figs/01_HHI_commuting-zones.png"),dpi = 300, scale =1)
# ZIP LEVEL MEASURES
p1 = sf_cz %>%
left_join(hhi_cz,"cz_id") %>%
filter(state_01 %in% states_to_map | state_02 %in% states_to_map | state_03 %in% states_to_map) %>%
ggplot() +
geom_sf(aes(fill = hhi_cz)) +
scale_fill_gradient2(low = scales::muted("blue"),mid = "white",high = scales::muted("red"),midpoint = 2500,limits = c(0,10000)) +
#theme(legend.position = "bottom") +
geom_sf(data = sf_state %>% filter(stusps %in% states_to_map), alpha = 0,lwd=.7,colour = "black") +
coord_sf(datum=NA) +
remove_all_axes +
ggtitle("Commuting Zones\n(Geographic Location Method)") +
ggthemes::theme_tufte(base_family = "Gill Sans")
p2 = sf_cz %>%
left_join(zip_hhi_aggregated_to_cz ,"cz_id") %>%
filter(state_01 %in% states_to_map | state_02 %in% states_to_map | state_03 %in% states_to_map) %>%
ggplot() +
geom_sf(aes(fill = hhi_zip_cz)) +
scale_fill_gradient2(low = scales::muted("blue"),mid = "white",high = scales::muted("red"),midpoint = 2500,limits = c(0,10000)) +
#theme(legend.position = "bottom") +
geom_sf(data = sf_state %>% filter(stusps %in% states_to_map), alpha = 0,lwd=.7,colour = "black") +
coord_sf(datum=NA) +
remove_all_axes +
ggtitle("Commuting Zones\n(Patient Flow Method)") +
ggthemes::theme_tufte(base_family = "Gill Sans")
p1 + p2 + plot_layout(ncol=1)
ggsave(filename = here("figs/01_HHI_commuting-zones.png"),dpi = 300, scale =1,width = 6, height=12)
p1_hrr = sf_hrr %>%
left_join(hhi_hrr,"hrrnum") %>%
filter(hrrstate %in% states_to_map) %>%
ggplot() +
geom_sf(aes(fill = hhi_hrr)) +
scale_fill_gradient2(low = scales::muted("blue"),mid = "white",high = scales::muted("red"),midpoint = 2500,limits = c(0,10000)) +
#theme(legend.position = "bottom") +
geom_sf(data = sf_state %>% filter(stusps %in% states_to_map), alpha = 0,lwd=.7,colour = "black") +
coord_sf(datum=NA) +
remove_all_axes +
ggtitle("Hospital Referral Region\n(Geographic Location Method)") +
ggthemes::theme_tufte(base_family = "Gill Sans")
p2_hrr = sf_hrr %>%
left_join(hhi_hrr_final,"hrrnum") %>%
filter(hrrstate %in% states_to_map) %>%
ggplot() +
geom_sf(aes(fill = hhi_hrr_zip))+
scale_fill_gradient2(low = scales::muted("blue"),mid = "white",high = scales::muted("red"),midpoint = 2500,limits = c(0,10000)) +
#theme(legend.position = "bottom") +
geom_sf(data = sf_state %>% filter(stusps %in% states_to_map), alpha = 0,lwd=.7,colour = "black") +
coord_sf(datum=NA) +
remove_all_axes +
ggtitle("Hospital Referral Region\n(Patient Flow Method)") +
ggthemes::theme_tufte(base_family = "Gill Sans")
p1_hrr + p2_hrr + plot_layout(ncol=1)
ggsave(filename = here("figs/01_HHI_hrr.png"),dpi = 300, scale =1,width = 6, height=12)
p1 + p1_hrr + p2 + p2_hrr + plot_layout(ncol=2,nrow=2)
ggsave(filename = here("figs/01_HHI_geo-location-vs-pop-flow.png"),dpi = 300, scale =1,width = 12, height=12)
|
7bceda6f763cef21b6ccfb00596db9ebed083df9 | 01268eb0bf732962393a588fa474c97ee026230a | /man/validate_table_names.Rd | b6a69ffdbdd66176f69e44712c8c8def2eed6d9e | [
"CC0-1.0"
] | permissive | karlbenedict/ecocomDP | 6466fcfed490f138873d7275807b083341852a4a | 0dcd2b1d00ebcf5c0a63ff88ed2589bfffe59e66 | refs/heads/master | 2020-03-25T16:23:30.026001 | 2018-07-16T20:55:23 | 2018-07-16T20:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,403 | rd | validate_table_names.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_validation_criteria.R,
% R/validate_table_names.R
\name{validate_table_names}
\alias{validate_table_names}
\alias{validate_table_names}
\title{Load validation criteria}
\usage{
load_validation_criteria(data.path)
validate_table_names(data.path, criteria)
}
\arguments{
\item{data.path}{A character string specifying the path to the directory containing L1
tables.}
\item{criteria}{A data frame of the validation criteria located in
/inst/validation_criteria.txt.}
\item{data.path}{A character string specifying the path to the directory containing L1
tables.}
}
\value{
A validation report printed in the RStudio console window.
}
\description{
Load criteria against which user supplied ecocomDP tables are validated.
This function ensures that your ecocomDP (L1) tables follow the
table naming convention (i.e. \emph{studyName_ecocomDPTableName.ext},
e.g. \emph{gleon_chloride_observation.csv}).
}
\details{
This function loads and parses validation criteria from
\emph{validation_criteria.txt} located in the \emph{\inst} directory of
the \code{ecocomDP} package.
The full suite of L1 validation checks are performed by the
\code{validate_ecocomDP} function. The sequence of checks performed in
\code{validate_ecocomDO} are not random, rather some checks are dependent
upon others.
}
|
13b287422990d4a1b8024b394df2145a7bf79b84 | 74923b9335356d7ddea1264932bad0d4851181a9 | /R/tagtools/man/sens_struct.Rd | 2b9b9ea96620d0502e21b85dc72da12dc1be77f5 | [] | no_license | FlukeAndFeather/TagTools | 29e491898266bf82edf436674beb7d82f78724c2 | a6f3fd4eab0ddaef79cc9f1718f2801b30c67971 | refs/heads/master | 2020-08-04T03:07:00.111174 | 2019-07-17T18:37:20 | 2019-07-17T18:37:20 | 211,981,790 | 1 | 0 | null | 2019-10-01T00:15:49 | 2019-10-01T00:15:49 | null | UTF-8 | R | false | true | 2,165 | rd | sens_struct.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sens_struct.R
\name{sens_struct}
\alias{sens_struct}
\title{Generate a sensor structure from a sensor data vector or matrix.}
\usage{
sens_struct(data, fs = NULL, T = NULL, depid, type, unit = NULL,
frame = NULL, name = NULL, start_offset = 0,
start_offset_units = "second")
}
\arguments{
\item{data}{sensor data vector or matrix}
\item{fs}{(optional) sensor data sampling rate in Hz}
\item{T}{(optional) is the time in seconds of each measurement in data for irregularly sampled data. The time reference (i.e., the 0 time) should be with respect to the start time of the deployment.}
\item{depid}{string that provides a unique identifier for this tag deployment}
\item{type}{is a string containing the first few letters of the sensor type,
e.g., acc for acceleration. These will be matched to the list of
sensor names in the sensor_names.csv file. If more than one sensor
matches type, a warning will be given. type can be in upper or lower case.}
\item{unit}{(optional) units in which data are sampled. Default determined by matching \code{type} with defaults in sensor_names.csv}
\item{frame}{(optional) frame of reference for data axes, for example 'animal' or 'tag'. Default determined by matching \code{type} with defaults in sensor_names.csv.}
\item{name}{(optional) "full name" to assign to the variable. Default determined by matching \code{type} to defaults in sensor_names.csv/}
\item{start_offset}{(optional) offset in start time for this sensor relative to start of tag recording. Defaults to 0.}
\item{start_offset_units}{(optional) units of start_offset. default is 'second'.}
}
\value{
A sensor list with field \code{data} containing the data and with metadata fields pre-populated from the sensor_names.csv file. Change these manually as needed (or specify the relevant inputs to \code{sens_struct}) to the correct values.
}
\description{
Generate a sensor structure from a sensor data vector or matrix.
}
\examples{
\dontrun{
#example will only work if data matrix Aw is in your workspace.
#A <- sens_struct(data=Aw,fs=fs,depid='md13_134a', type='acc')}
}
|
40b76fd00092101f4fb7ac884b58b0fb910ffe1b | 3592992affcd4c2a766464be007060e5fc7e7eee | /SeedBankInfo.R | 40187c22c6c03f5588b327fa64db18a4e3e509c5 | [] | no_license | mathgenome/SeedBankInfo | 56ecf629b0b47113a3a9d92d132ffd52842e872e | f166a656bfc26fd705d8db64eef609626a1f2a68 | refs/heads/master | 2020-07-06T10:17:21.447909 | 2018-02-16T23:33:05 | 2018-02-16T23:33:05 | 74,046,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,470 | r | SeedBankInfo.R | #Project:
#Accession rareness, allele specificity and core set selection with an information theory approach: application to wheat marker data
#Theory is in publication process
########
#Functions to calculate entropy
MyLog2p<-function(x){if(x==0) 0 else x*log(x,2)} #Defining x logx
entropy<-function(x){-sum(sapply(x,MyLog2p))} #x is a vector of probabilities
#######
#Basic function of accession rareness and allele specificity
#NAs allowed
#x is the binary matrix, where alleles are in rows and
#populations in columns
#Data are allele frequencies for each accession
#nlocus is the number of loci spanning those alleles
rare<-function(x,nlocus){
nalelos<-length(x[,1]);
nvar<-length(x[1,]);
#Calculate pi;
a<-NULL;length(a)<-nalelos;for (i in 1:nalelos){a[i]<-mean(as.numeric(as.vector(x[i,])),na.rm=TRUE)};
p.i<-a;
#Calculate specificity;
a<-NULL;length(a)<-nalelos;for (i in 1:nalelos){if(p.i[i]==0||is.na(p.i[i])==TRUE){a[i]<-0}else{m.vec<-x[i,][!is.na(x[i,])];a[i]<--entropy(as.numeric(as.vector(m.vec/p.i[i])))/length(m.vec)}};
specificity<-a
#Calculate rareness
a<-NULL;length(a)<-nvar;for(j in 1:nvar){a[j]<-sum(specificity*x[,j],na.rm=TRUE)};
rareness<-a/nlocus #This is because in the last row
#I am calculating the sum of specificities across loci
rarenessTab<-data.frame("pop"=names(x),"rareness"=rareness);
result<-list(specificity=specificity,rareness=rareness,table=rarenessTab);
return(result)
}
# ^ ^
# O O
# ( )
# A A
#Humberto Reyes
|
9f1c885bdfbad5ccac057045ccd539dd17d91bac | b67b1035dd4123756e6436dc34919bc90937fcbe | /R/15_estatistica_1/script118_medidas_de_centralidade.R | b221241f8e585313f11419e3bdeee87e110cde14 | [] | no_license | frclasso/Formacao_Cientista_de_Dados_Python_R | d6695b5dcf3d0dffcf4e2e4c66b7d3d37b1eabcb | 6786004827c52b5e2e48495944839ac69076e119 | refs/heads/main | 2023-08-12T17:01:34.498259 | 2021-10-12T17:59:38 | 2021-10-12T17:59:38 | 383,260,177 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 384 | r | script118_medidas_de_centralidade.R | # Medidas de centralidade e variabilidade
# salario dos jogadores
jogadores= c(40000,18000, 12000, 250000,30000,140000,300000, 40000,800000)
# Media
mean(jogadores)
# Mediana
median(jogadores)
# Quartis
quartis= quantile(jogadores)
quartis
# Acessar apenas um quartil, utilizamdo o indice
# terceiro quartil
quartis[4]
# Desvio padrao
sd(jogadores)
# Resumo
summary(jogadores) |
1a85892e08cfba2f83add7a3a89947e86177ce89 | ac0c4a863fbd1c0f395c1f84a19a2efb214c284e | /R/ggSmoothScatter.R | 888ce13db7cac9576f664731cb41452ee495786e | [] | no_license | bedapub/ribiosPlot | 6e9b8630ec0fae86046ac9f8c2653d9b439b76ac | 54b1e1a47bc5501ae0d514cf98912d524c7b3782 | refs/heads/master | 2022-11-24T18:55:00.961522 | 2022-11-09T08:03:56 | 2022-11-09T08:03:56 | 253,518,540 | 1 | 1 | null | 2021-04-07T08:02:21 | 2020-04-06T14:15:55 | R | UTF-8 | R | false | false | 2,270 | r | ggSmoothScatter.R | #' Mimicking the graphics::smoothScatter behaviour for GGally::ggpairs
#'
#' @param data Data to be visualized, normally not directly set by the user
#' @param mapping Data mapping, normally not directly set by the user
#' @param colours Colours to be used
#' @param xlim NULL or a vector of two numbers
#' @param ylim NULL or a vector of two numbers
#' @param ... Other parameters passed to stat_density_2d
#'
#' @importFrom ggplot2 ggplot stat_density_2d scale_fill_gradientn geom_vline
#' @importFrom ggplot2 geom_hline aes scale_x_continuous scale_y_continuous stat
#' @note So far the outliers are not plotted, to be done later
ggSmoothScatter <- function(data, mapping,
colours=colorRampPalette(c("white",blues9[5:9],
"black"))(256),
xlim=NULL, ylim=NULL, ...){
p <- ggplot(data = data, mapping = mapping) +
stat_density_2d(aes(fill=stat(density)^0.25, alpha=1),
geom="tile", contour = FALSE, ...) +
scale_fill_gradientn(colours=colours)
if(!is.null(xlim))
p <- p + scale_x_continuous(limits = xlim)
if(!is.null(ylim))
p <- p + scale_y_continuous(limits = ylim)
p
}
#' Mimicking the graphics::smoothScatter behaviour for GGally::ggpairs, with
#' aux lines
#'
#' @param data Data to be visualized, normally not directly set by the user
#' @param mapping Data mapping, normally not directly set by the user
#' @param colours Colours to be used
#' @param xlim NULL or a vector of two numbers
#' @param ylim NULL or a vector of two numbers
#' @param ... Other parameters passed to stat_density_2d
#'
#' Compared with \code{ggSmoothScatter},
ggSmoothScatterWithAux <- function(data, mapping,
colours=colorRampPalette(c("white",blues9[5:9],
"black"))(256),
xlim=NULL, ylim=NULL, ...) {
p <- ggSmoothScatter(data=data, mapping=mapping, colours=colours,
xlim=xlim, ylim=ylim, ...) +
geom_vline(xintercept=0, col="#999999") +
geom_hline(yintercept=0, col="#999999") +
geom_abline(slope=1, intercept=0, col="red")
return(p)
}
|
59cdee251f9234f974c21271090e2246a04cecfb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ggiraph/examples/dsvg_view.Rd.R | 10656a46782e098ebc1014d047ab2a7f111077a3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 238 | r | dsvg_view.Rd.R | library(ggiraph)
### Name: dsvg_view
### Title: Run plotting code and view svg in RStudio Viewer or web broswer.
### Aliases: dsvg_view
### ** Examples
## No test:
dsvg_view(plot(1:10))
dsvg_view(hist(rnorm(100)))
## End(No test)
|
b6012a2cf41aa072c211d5af326bde6dd3f419d5 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sfsmisc/examples/KSd.Rd.R | 8f6e238c9049f87181526317df7aa73da2846874 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 352 | r | KSd.Rd.R | library(sfsmisc)
### Name: KSd
### Title: Approximate Critical Values for Kolmogorov-Smirnov's D
### Aliases: KSd
### Keywords: distribution
### ** Examples
KSd(90)
KSd(1:9)# now works
op <- par(mfrow=c(2,1))
plot(KSd, 10, 150)# nice
abline(v = c(75,85), col = "gray")
plot(KSd, 79, 81, n = 1001)# *very* tiny discontinuity at 80
par(op)
|
f659472c60b9c1577070a057aca0421282b1c999 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/MSnbase/examples/averageMSnSet.Rd.R | 1dcee2943f28362a7e8bfcfa1b25ddd342a021be | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 913 | r | averageMSnSet.Rd.R | library(MSnbase)
### Name: averageMSnSet
### Title: Generate an average 'MSnSet'
### Aliases: averageMSnSet
### ** Examples
library("pRolocdata")
## 3 replicates from Tan et al. 2009
data(tan2009r1)
data(tan2009r2)
data(tan2009r3)
x <- MSnSetList(list(tan2009r1, tan2009r2, tan2009r3))
avg <- averageMSnSet(x)
dim(avg)
head(exprs(avg))
head(fData(avg)$nNA)
head(fData(avg)$disp)
## using the standard deviation as measure of dispersion
avg2 <-averageMSnSet(x, disp = sd)
head(fData(avg2)$disp)
## keep only complete observations, i.e proteins
## that had 0 missing values for all samples
sel <- apply(fData(avg)$nNA, 1 , function(x) all(x == 0))
avg <- avg[sel, ]
disp <- rowMax(fData(avg)$disp)
library("pRoloc")
setStockcol(paste0(getStockcol(), "AA"))
plot2D(avg, cex = 7.7 * disp)
title(main = paste("Dispersion: non-parametric CV",
paste(round(range(disp), 3), collapse = " - ")))
|
a1bb99000589aded76e97b1b441860dafdf9aa51 | 5a345571b8855ed9c95c2a42021f6b9125817830 | /R/src/packages_reshape2.R | e3afcc143c5f64d29e1da29b8e9bbbdbd0e95525 | [] | no_license | HaYongdae/TIL | 6eb9db589b5425bec0ff5afad9c5c003b954dd63 | 175368f4930a3339b9cc823f43ef7d12da8c108b | refs/heads/master | 2021-06-17T11:46:45.485585 | 2021-03-23T08:16:06 | 2021-03-23T08:16:06 | 188,196,967 | 0 | 0 | null | 2019-08-30T04:21:18 | 2019-05-23T08:52:37 | null | UTF-8 | R | false | false | 2,684 | r | packages_reshape2.R | # DataFrame 적용 방법
melting_DF <- melt(데이터, id.var = "기준 col", measure.vars = "변환 col")
# EX_
head(airquality)
# ozone solar.r wind temp month day
# 1 41 190 7.4 67 5 1
# 2 36 118 8.0 72 5 2
# 3 12 149 12.6 74 5 3
# 4 18 313 11.5 62 5 4
# 5 NA NA 14.3 56 5 5
# 6 28 NA 14.9 66 5 6
install.packages("reshape2")
library(reshape2)
names(airquality) <- tolower(names(airquality))
melt_test2 <- melt(airquality, id.vars = c("month","wind"), measure.vars = "ozone")
head(melt_test2)
# month wind variable value
# 1 5 7.4 ozone 41
# 2 5 8.0 ozone 36
# 3 5 12.6 ozone 12
# 4 5 11.5 ozone 18
# 5 5 14.3 ozone NA
# 6 5 14.9 ozone 28
# DataFrame 적용 방법
dcast(데이터, 기준 열 ~ 변환 열)
acast(데이터, y축 ~ x축 ~ 변환 열, 집계함수)
# melt를 역으로 풀어가는 과정
# ---------------- melt 과정
install.packages("reshape2")
library(reshape2)
names(airquality) <- tolower(names(airquality))
aq_melt <- melt(airquality, id.vars = c("month","wind"), na.rm = TRUE)
head(melt_test2)
# month wind variable value
# 1 5 7.4 ozone 41
# 2 5 8.0 ozone 36
# 3 5 12.6 ozone 12
# 4 5 11.5 ozone 18
# 5 5 14.3 ozone NA
# 6 5 14.9 ozone 28
#----------------------------#
# ------------------- dcast 과정
aq_dcast <- dcast(aq_melt, month + day ~ variable)
head(aq_dcast)
# month day ozone solar.r wind temp
# 1 5 1 41 190 7.4 67
# 2 5 2 36 118 8.0 72
# 3 5 3 12 149 12.6 74
# 4 5 4 18 313 11.5 62
# 5 5 5 NA NA 14.3 56
# 6 5 6 28 NA 14.9 66
#----------------------------#
# -------------------- acast과정
acast(aq_melt, day ~ month ~ variable)
# y축을 day, x축을 month로 하는 variable별 테이블 반환
acast(aq_melt, month ~ variable, mean)
# ozone solar.r wind temp
# 5 23.61538 181.2963 11.622581 65.54839
# 6 29.44444 190.1667 10.266667 79.10000
# 7 59.11538 216.4839 8.941935 83.90323
# 8 59.96154 171.8571 8.793548 83.96774
# 9 31.44828 167.4333 10.180000 76.90000
acast(aq_melt, month ~ variable, sum)
# ozone solar.r wind temp
# 5 614 4895 360.3 2032
# 6 265 5705 308.0 2373
# 7 1537 6711 277.2 2601
# 8 1559 4812 272.6 2603
# 9 912 5023 305.4 2307
# 집계함수 사용 - month를 기준으로 데이 값들이 모두 합산되어 반환
# ------------------------------#
################################################################### |
69d01d521149f17a8badd47f2f22ba8626f37f67 | d43f78178d466325732bc88914d307bda7b03eb8 | /Business.R | 16435a0f1c8b8bb1b57fe98f270005b3e24a04c1 | [] | no_license | gabrielpaesb/R-Finance | 5c3a9769b9e4f60fb42a700dde0ee1c2593b9498 | ff143324e959acc6d2934c49b3eda293c4fe960c | refs/heads/master | 2022-11-28T21:04:51.805399 | 2020-08-06T02:19:25 | 2020-08-06T02:19:25 | 285,451,103 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,244 | r | Business.R | 2+2
help(mean)
example(mean)
help(package = "ggplot2")
install.packages(c("stringsr","lubridate"))
library(stringsr)
library(dplyr)
library(lubridate)
aula_r <- 2
aula_r
"XYZ"==toupper("xyz")
hoje<- Sys.Date()
hoje
amanha<- hoje+1; print(amanha)
log(10)
log(10,10)
#Criando primeiro vetor
idade<- c(19,21,24,31,33,45,50)
nomes<- c("Joao","Maria","Jose","Maria2")
Vou precisar apresentar um trabalho se vc quiser pode usar o computador
idade <- c(19, 21, 24, 31, 33, 45, 50)
sexo <- c("M", "M", "F", "F", "M", "F", "M")
aluno_econ <- c(TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE)
classes <- c(class(idade), class(sexo), class(aluno_econ))
classes
v1 <- c(1, 2, 3)
v2 <- c("a", "b", "c")
v3 <- c(v1, v2)
v3
x <- c(0,1,1,2,3,5,8,13,21,34)
mean(x)
median(x)
var(x)
sd(x)
summary(x)
length(x)
min(x)
max(x)
sum(x)
2 * x
w <- c(21, 34, NA, 17, 25)
mean(w)
mean(w, na.rm = TRUE)
sd(w, na.rm = T)
class(NA)
idade <- c(22, 21, 37, 26, 40, 33, 27)
idade
idade_cres <- sort(idade)
idade_cres
idade_decres <- sort(idade, decreasing = TRUE)
idade_decres
x <- 1:7
x
y <- seq(from = 1, to = 10, by = 2)
y
id <- LETTERS[1:5]
id
id <- letters[1:5]
id
j <- rep(2, times = 5)
j
receita <- c(43.92, 45.40, 48.27, 51.87, 56.52)
receita
firmas_pb <- data.frame(
cod_firma = c(14532, 72475, 63896, 52749, 52290),
nome_firma = c("Firma A", "Firma B", "Firma C", "Firma D", "Firma E"),
receita = c(18.01, 21.41, 18.18, 100.69, 50.15),
custo = c(12.34, 11.23, 9.67, 77.21, 23.67),
pme = c(1, 0, 1, 0, 0)
)
firmas_pb
getwd()
setwd("C:/Users/gabri/Documents/R/R Business/minicurso-r-master/Aula_1")
dir()
Estoque<- read.table("estoque.txt", sep = "",dec = ".", encoding = "UTF-8", header= T )
################################################################################################
install.packages("tidyr")
install.packages("dplyr")
install.packages("tibble")
install.packages("devtools")
devtools:: install_github("abjur/abjData")
library(tidyr)
library(dplyr)
library(tibble)
library(abjData)
#Utilizando gather()
tabela_1 <- tibble(pais = c("Afeganistão", "Brasil", "China"),
"1999" = c(745, 37737, 212258),
"2000" = c(2666, 80488, 213766))
tabela_1
tidy_1 <- gather(tabela_1, key = "ano", value = "casos", "1999", "2000")
tidy_1
#Utilizando spread()
tabela_2 <- tibble(pais = c("Afeganistcao", "Afeganiscao",
"Afeganistcao", "Afeganistcao",
"Brasil", "Brasil",
"Brasil", "Brasil"),
ano = c(1999, 1999, 2000, 2000,
1999, 1999, 2000, 2000),
tipo = c("casos", "populacao","casos", "populacao",
"casos", "populacao","casos", "populacao"),
contagem = c(745, 19987071, 2666, 20595360,
37737, 172006362, 80488, 174504898))
tabela_2
tidy_2 <- spread(tabela_2, key = "tipo", value = "contagem")
tidy_2
getwd()
setwd("C:/Users/gabri/Documents/R/R Business/minicurso-r-master/Aula_2/Dados")
cliente<- read.csv("C:/Users/gabri/Documents/R/R Business/minicurso-r-master/Aula_2/Dados/cliente.csv")
contratos<- read.csv("C:/Users/gabri/Documents/R/R Business/minicurso-r-master/Aula_2/Dados/contrato.csv")
cliente
tidy_3 <- gather(cliente, key= "homem", value= "genero" , "mulher","homem")
id.emp <- 1:11
nome.emp <- c('Renato', 'Miguel', 'Paulo', 'PatrÃcia', 'Inês', 'Saulo', 'Diego', 'Maria', 'Jose',
'Julia', 'Tiago')
idade <- c(30, 31, 29, 30, 25, 30, 30, 35, 24, 31, 29)
uf <- c('MG', 'DF', 'CE', 'DF', 'DF', 'DF', 'RJ', 'SP', 'RS', 'SC', 'BA')
id.cargo <- c(4, 4, 4, 4, 5, 4, 6, 3, 1, 2, 8)
Empregados <- data.frame(id.emp, nome.emp, idade, uf, id.cargo)
Empregados
id.cargo <- 1:7
nome.cargo <- c('Técnico', 'Assistente', 'Consultor', 'Analista', 'Auditor',
'Gerente', 'Gestor')
salario <- c(7000, 4000, 15000, 11000, 10000, 13000, 20000)
Cargos <- data.frame(id.cargo, nome.cargo, salario)
Cargos
inner_join(Empregados, Cargos, by = "id.cargo")
id.paciente <- 1:9
nome.abr <- c("A.A.M", "S.S.G.F", "T.I.A", "L.O.S.M", "Y.Q.W", "F.A", "T.B.N",
"J.J.L", "M.S.S")
exame.a <- c(3.8, 3.8, 3.9, 4.0, 4.4, 3.8, 3.7, 3.6, 4.0)
exame.b <- c(109.98, 109.90, 109.89, 109.99, 110.01, 109.95, 109.98, 109.93,
110.00)
exame.c <- c(0, 1, 1, 0, 1, 1, 0, 0, 1)
Pacientes <- data.frame(id.paciente, nome.abr, exame.a, exame.b, exame.c)
Pacientes
id.paciente <- c(1, 4, 5, 7, 8, 11, 15, 25)
tp.remedio <- c("A", "B", "A", "B", "A", "A", "B", "B")
Controle <- data.frame(id.paciente, tp.remedio)
Controle
left_join(Pacientes, Controle, by = "id.paciente")
rotatividade<- left_join(cliente,contratos, by='cliente_id')
x <- c(1:10)
sqrt(sum(x))
x %>% sum() %>% sqrt()
pnud_min %>% select(ano, muni, uf, gini, pop) %>%
filter(uf == "PB")
|
fb1757a81fd5e51544b24c257885e82b8dce96bf | bfedefc238eb8525235af99bcf007c8488b9ba11 | /src/utils/file.utils.R | 0c8deaea1c33e3ea76f0298029b2a98d9f9a1784 | [] | no_license | nvthao-math/rainfall-v3 | a94358ade5404708c4dd7d01f5dcc70f47ecf52c | d17fe7c0f441b7c20b1c999536865d4d3e244589 | refs/heads/master | 2020-03-19T05:42:22.901822 | 2018-06-04T01:25:31 | 2018-06-04T01:25:31 | 135,955,009 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 424 | r | file.utils.R | # write line to file safe mode
file.safewrite <- function(line, file.path, is.append){
if(!file.exists(file.path)){
parts <- unlist(strsplit(file.path, "/"))
# file.name <- parts[length(parts)]
folder <- paste(parts[c(1:(length(parts) - 1))], collapse = "/")
if(!dir.exists(folder)){
dir.create(folder, showWarnings = TRUE, recursive = TRUE)
}
}
write(line, file=file.path,append=is.append)
} |
41675adcfdb1d43d6e57b7a5390d221bc9e07ce6 | 8a29dbd1eda2a23c7a4f96b6c8fa143347ae3710 | /tests/testthat/test-baad-data.R | 46ab34d0444d9e3afa8dcc12edfb212985b87ef7 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | traitecoevo/baad.data | cc71539344edbae3b7fd0b28229d1b04811101c3 | b81e1e981bb4ce6e020b3f0f0e161c8284968b5f | refs/heads/master | 2021-06-28T18:08:35.390843 | 2016-08-23T23:12:54 | 2016-08-23T23:12:54 | 35,264,277 | 5 | 3 | NOASSERTION | 2020-11-04T16:27:15 | 2015-05-08T07:12:18 | R | UTF-8 | R | false | false | 1,783 | r | test-baad-data.R | context("baad.data")
test_that("versions", {
v <- baad_data_versions(FALSE)
expect_true("1.0.0" %in% v)
expect_equal(match("1.0.0", v), 4)
curr <- baad_data_version_current(FALSE)
expect_true(numeric_version(curr) >= numeric_version("1.0.0"))
expect_true(curr %in% v)
})
test_that("ecology version", {
path <- tempfile()
d <- baad_data("1.0.0", path)
# Note, we are just checking the data component here,
# because the whole `data` object was behaving differently
# on different platforms, due to slightly different behaviours
# of bibtex package.
# In addition, we are taking has of data after calling `as.character(unlist`
# because appveyor gives different hash when NAs are present, even if
# other tests show contents as all.equal
# See https://github.com/traitecoevo/baad.data/issues/6
expect_equal(storr:::hash_object(as.character(unlist(d[["data"]]))),
"8a333e041a8c436d8d04e683dd6c2545")
expect_is(d, "list")
expect_is(d$data, "data.frame")
expect_true(file.exists(path))
expect_identical(baad_data("1.0.0", path), d)
expect_true("1.0.0" %in% baad_data_versions(TRUE, path))
v <- baad_data_versions(FALSE, path)
res <- list()
for (i in v) {
res[[i]] <- baad_data(i, path)
expect_is(res[[i]]$data, "data.frame")
expect_is(res[[i]]$dictionary, "data.frame")
expect_is(res[[i]]$bib, "bibentry")
}
expect_equal(baad_data_versions(TRUE, path),
baad_data_versions(FALSE, path))
baad_data_del("1.0.0", path)
expect_false("1.0.0" %in% baad_data_versions(TRUE, path))
baad_data_del(NULL, path)
expect_false(file.exists(path))
d <- baad_data("1.0.0")
path <- file.path(getwd(), "baad_1.0.0.rds")
saveRDS(d, path)
expect_true(file.exists(path))
})
|
dd7793530d4cc728bdede889faf1f88702fc16ad | c666e0234b33f0d34088b2b3486f58bbeb9988be | /global.R | 1743a6faac4ac15739734ec76ac07f748b498851 | [] | no_license | joebragg/DataProducts | ca3844a4c8ded2e36bb04c4b6a78ab7d1675e346 | 4f0d679245892634ea0d0eedb79a746ddd9f178c | refs/heads/master | 2020-05-19T13:29:16.382321 | 2015-02-21T03:25:41 | 2015-02-21T03:25:41 | 30,756,381 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,170 | r | global.R | library(shiny)
library(ggplot2)
if(!file.exists("data/vehicles.csv")){
download.file("http://www.fueleconomy.gov/feg/epadata/vehicles.csv.zip",
destfile="data/vehicles.csv.zip")
unzip("data/vehicles.csv.zip",exdir="data")
}
vehicle.data<-read.csv("data/vehicles.csv")[,c("year","make","model","VClass",
"cylinders","displ","trany","drive","evMotor","fuelType1","fuelType2",
"city08","highway08","comb08","cityA08","highwayA08","combA08",
"fuelCost08","fuelCostA08","youSaveSpend","co2TailpipeGpm",
"charge120","charge240")]
names(vehicle.data)<-c("Year","Make","Model","Vehicle_Class",
"Cylinders","Displacement","Transmission","Drive","evMotor","Fuel_Type","Alt_Fuel_Type",
"City_MPG","Highway_MPG","Combined_MPG","City_Alt_MPG","Highway_Alt_MPG","Comb_Alt_MPG",
"Fuel_Cost","Alt_Fuel_Cost","You_Save_Spend","CO2_Tailpipe_GPM",
"Charge_Time_120","Charge_Time_240") |
b43db5a1442a31983e540f1be889ce37e2a3a1a0 | 3c2c46d70fda776fe6b361b235be15fceb570e1c | /InProgress/Boundedness /RandomGuessing.R | b3a498a1732072623a45065dcf46d826ab03d79a | [] | no_license | brctzcn/BDA2014 | 3f26e867ee08d5b2d1b44df5b125db226affdf7e | 0f41dc7c5d46e0f4ad77734fa09c249223d93a0f | refs/heads/master | 2021-06-07T03:00:14.588099 | 2015-06-17T16:03:06 | 2015-06-17T16:03:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 617 | r | RandomGuessing.R |
df1 <- replicate(10000, floor(runif(21, 1, 4)))
df2 <- replicate(10000, floor(runif(21, 1, 4)))
checkAnswers <- function(x){
sum((df1[1:21, x] - df2[1:21, x])==0)
}
df3 <- lapply(1:10000, checkAnswers)
df3 <- sort(unlist(df3))
(10000 - which(df3 == 7)[1])/10000
(10000 - which(df3 == 8)[1])/10000
df1 <- replicate(10000, floor(runif(15, 1, 4)))
df2 <- replicate(10000, floor(runif(15, 1, 4)))
checkAnswers <- function(x){
sum((df1[1:15, x] - df2[1:15, x])==0)
}
df3 <- lapply(1:10000, checkAnswers)
df3 <- sort(unlist(df3))
(10000 - which(df3 == 6)[1])/10000
(10000 - which(df3 == 7)[1])/10000
|
cefc7b1113fc53d1359fae6661713b558afb8d4e | 049c8b4125b4e0a787161d0c28b1d7999351bd5b | /cachematrix.R | c1ecdf08d65be0ae39d6ff0e09470c658523c9d0 | [] | no_license | speedsterfinn/ProgrammingAssignment2 | c2e55620d5d97715dd63f88fc87a44eca6449d92 | 3e3bc64af26d3bdcbc7680b97e323ed22ec7340e | refs/heads/master | 2021-01-24T00:53:27.706786 | 2018-02-24T22:46:57 | 2018-02-24T22:46:57 | 122,783,434 | 0 | 0 | null | 2018-02-24T21:58:40 | 2018-02-24T21:58:40 | null | UTF-8 | R | false | false | 844 | r | cachematrix.R | ## These two functions allow the user to cache the
## inverse of an invertible matrix.
## Function makeCacheMatrix returns an object
## that will hold an invertible matrix, mat, and
## its inverse, inv.
makeCacheMatrix <- function(M = matrix()) {
inv <- NULL
set <- function(m) {
M <<- m
inv <<- NULL
}
get <- function() M
setinverse <- function(inverse) inv <- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Function cacheSolve accepts a makeCacheMatrix
## object as input and returns the inverse of
## the matrix stored in makeCacheMatrix
cacheSolve <- function(M, ...) {
## Return a matrix that is the inverse of 'M'
inv <- M$getinverse()
if (!is.null(inv)) {
return(inv)
}
mat <- M$get()
inv <- solve(mat)
M$setinverse(inv)
inv
}
|
40fbf67b97caa9aae0df5880ad43177981e24b6b | 777c57e70fd5de57b4ac46d4a869fb2e9f96ba39 | /Delousing-initiate.R | 0b00d8a5c414b3a5a5268ed9e79e869bf7b07df9 | [] | no_license | adamaki/DelouseR | 6bced3d85455210e3c709409a89e501ce4af90a2 | b6e53e0177d5d9d6e56b37f32bef03dca016a702 | refs/heads/master | 2021-10-28T09:01:47.126145 | 2021-10-25T09:59:07 | 2021-10-25T09:59:07 | 203,556,671 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,128 | r | Delousing-initiate.R | # Cleaner fish tank delousing
# Adam Brooker 29th August 2019
library(ggplot2)
library(plyr)
library(dplyr)
library(reshape2)
library(data.table)
library(stringr)
library(colorspace)
#library(grImport)
#library(grImport2)
library(devtools)
library(cowplot)
#library(Cairo)
library(magick)
library(colorRamps)
library(forcats)
library(RColorBrewer)
library(tidyr)
library(viridis)
# FUNCTIONS----------------------------------------------------------------------------------------------------
# function for mapping lice numbers to salmon image
map.lice <- function(locations, maxlice, msdvalues = T, pvalues = T, leg = 'No. lice'){
locations$bins <- as.numeric(cut(locations$lice.m, breaks = seq(-0.1, maxlice, 0.1), labels = seq(1, (maxlice+0.1)*10, 1)))
licepal <- rev(heat.colors((maxlice+0.1)*10, alpha = 0.5))
#licepal <- rev(gray.colors((maxlice+0.1)*10, start = 0.4, end = 0.95, gamma = 2.2))
salcol <- image_read('/Users/adambrooker/Dropbox/1-IoA/Projects/SAIC Lumpfish/Delousing Trials/SalmonOutline.bmp')
salcol <- if(locations$bins[[1]] > 0) {image_fill(salcol, licepal[[locations$bins[[1]]]], point = '+156+227', fuzz = 40)} else {salcol} #head
salcol <- if(locations$bins[[2]] > 0) {image_fill(salcol, licepal[[locations$bins[[2]]]], point = '+299+124', fuzz = 40)} else {salcol} # front dorsal
salcol <- if(locations$bins[[3]] > 0) {image_fill(salcol, licepal[[locations$bins[[3]]]], point = '+630+116', fuzz = 40)} else {salcol} # mid dorsal
salcol <- if(locations$bins[[3]] > 0) {image_fill(salcol, licepal[[locations$bins[[3]]]], point = '+538+68', fuzz = 40)} else {salcol} # mid dorsal (fin)
salcol <- if(locations$bins[[4]] > 0) {image_fill(salcol, licepal[[locations$bins[[4]]]], point = '+934+154', fuzz = 40)} else {salcol} # rear dorsal
salcol <- if(locations$bins[[4]] > 0) {image_fill(salcol, licepal[[locations$bins[[4]]]], point = '+913+124', fuzz = 40)} else {salcol} # rear dorsal (fin)
salcol <- if(locations$bins[[5]] > 0) {image_fill(salcol, licepal[[locations$bins[[5]]]], point = '+314+207', fuzz = 40)} else {salcol} # front flank
salcol <- if(locations$bins[[6]] > 0) {image_fill(salcol, licepal[[locations$bins[[6]]]], point = '+570+217', fuzz = 40)} else {salcol} # mid flank
salcol <- if(locations$bins[[7]] > 0) {image_fill(salcol, licepal[[locations$bins[[7]]]], point = '+941+194', fuzz = 40)} else {salcol} # rear flank
salcol <- if(locations$bins[[8]] > 0) {image_fill(salcol, licepal[[locations$bins[[8]]]], point = '+343+314', fuzz = 40)} else {salcol} # front ventral
salcol <- if(locations$bins[[9]] > 0) {image_fill(salcol, licepal[[locations$bins[[9]]]], point = '+680+308', fuzz = 40)} else {salcol} # mid ventral
salcol <- if(locations$bins[[10]] > 0) {image_fill(salcol, licepal[[locations$bins[[10]]]], point = '+904+250', fuzz = 40)} else {salcol} # rear ventral
salcol <- if(locations$bins[[10]] > 0) {image_fill(salcol, licepal[[locations$bins[[10]]]], point = '+887+293', fuzz = 40)} else {salcol} # rear ventral (fin)
salcol <- if(locations$bins[[11]] > 0) {image_fill(salcol, licepal[[locations$bins[[11]]]], point = '+1147+205', fuzz = 40)} else {salcol} # tail
if(msdvalues == T){
if(pvalues == T){
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[1]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[1]], 1),nsmall = 1), locations$lice.sig[[1]]),
size = 20, color = 'black', degrees = 0, location = '+136+215') # head
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[2]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[2]], 1),nsmall = 1), locations$lilce.sig[[2]]),
size = 20, color = 'black', degrees = -13, location = '+278+121') # dorsal front
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[3]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[3]], 1),nsmall = 1), locations$lice.sig[[3]]),
size = 20, color = 'black', degrees = 6, location = '+626+105') # dorsal middle
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[4]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[4]], 1),nsmall = 1), locations$lice.sig[[4]]),
size = 20, color = 'black', degrees = 7.5, location = '+906+139') # dorsal rear
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[5]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[5]], 1),nsmall = 1), locations$lice.sig[[5]]),
size = 20, color = 'black', degrees = 0, location = '+278+189') # flank front
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[6]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[6]], 1),nsmall = 1), locations$lice.sig[[6]]),
size = 20, color = 'black', degrees = 0, location = '+626+189') # flank middle
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[7]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[7]], 1),nsmall = 1), locations$lice.sig[[7]]),
size = 20, color = 'black', degrees = 0, location = '+906+189') # flank rear
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[8]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[8]], 1),nsmall = 1), locations$lice.sig[[8]]),
size = 20, color = 'black', degrees = 0, location = '+255+279') # ventral front
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[9]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[9]], 1),nsmall = 1), locations$lice.sig[[9]]),
size = 20, color = 'black', degrees = -8, location = '+626+306') # ventral middle
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[10]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[10]], 1),nsmall = 1), locations$lice.sig[[10]]),
size = 20, color = 'black', degrees = -9, location = '+906+236') # ventral rear
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[11]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[11]], 1),nsmall = 1), locations$lice.sig[[11]]),
size = 20, color = 'black', degrees = 0, location = '+1110+189') # tail
} else {
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[1]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[1]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = 0, location = '+155+215') # head
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[2]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[2]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = -13, location = '+278+121') # dorsal front
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[3]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[3]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = 6, location = '+626+105') # dorsal middle
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[4]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[4]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = 7.5, location = '+906+139') # dorsal rear
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[5]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[5]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = 0, location = '+278+189') # flank front
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[6]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[6]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = 0, location = '+626+189') # flank middle
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[7]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[7]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = 0, location = '+906+189') # flank rear
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[8]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[8]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = 0, location = '+270+279') # ventral front
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[9]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[9]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = -8, location = '+626+306') # ventral middle
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[10]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[10]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = -9, location = '+906+236') # ventral rear
salcol <- image_annotate(salcol,
paste0(format(round(locations$lice.m[[11]], 1),nsmall = 1), '\u00B1', format(round(locations$lice.sd[[11]], 1),nsmall = 1)),
size = 20, color = 'black', degrees = 0, location = '+1116+189') # tail
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[1]], 1),nsmall = 1), size = 20, color = 'black', degrees = 0, location = '+156+215')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[2]], 1), nsmall = 1), size = 20, color = 'black', degrees = -14, location = '+295+118')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[3]], 1), nsmall = 1), size = 20, color = 'black', degrees = 6, location = '+626+105')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[4]], 1), nsmall = 1), size = 20, color = 'black', degrees = 7.5, location = '+906+139')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[5]], 1), nsmall = 1), size = 20, color = 'black', degrees = 0, location = '+295+189')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[6]], 1), nsmall = 1), size = 20, color = 'black', degrees = 0, location = '+626+189')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[7]], 1), nsmall = 1), size = 20, color = 'black', degrees = 0, location = '+906+189')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[8]], 1), nsmall = 1), size = 20, color = 'black', degrees = 0, location = '+295+279')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[9]], 1), nsmall = 1), size = 20, color = 'black', degrees = -2, location = '+626+306')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[10]], 1), nsmall = 1), size = 20, color = 'black', degrees = -9, location = '+906+236')
#salcol <- image_annotate(salcol, format(round(locations$lice.m[[11]], 1), nsmall = 1), size = 20, color = 'black', degrees = 0, location = '+1130+189')
}
}
salplot <- ggdraw() +
draw_image(salcol)
liceplot <- ggplot(data = locations, aes(x = location, y = lice.m, fill = lice.m)) +
geom_bar(stat = 'identity') +
labs(fill = leg) +
guides(fill = guide_colourbar(title.position = 'bottom', title.hjust = 0.5, frame.colour = 'black')) +
theme(legend.position = 'bottom', legend.justification = 'centre', legend.key.width = unit(2, 'cm')) +
scale_fill_gradientn(colors = licepal, limits = c(0, maxlice), breaks = seq(0, floor(maxlice), 1))
liceleg <- get_legend(liceplot)
salplot <<- salplot
salfig <<- plot_grid(salplot, liceleg, nrow = 2, ncol = 1, rel_heights = c(0.8, 0.2))
liceleg <<- liceleg
}
# function to calculate fish headings from positions (outputs vector of fish headings)
heading.func <- function(df, thresh){
IDS <- unique(df$ID)
headvec <- numeric()
for (j in 1:length(IDS)){
heading <- numeric()
diffx <- diff(df$fish.rx[df$ID == IDS[[j]]])
#diffx <- diff(df$fish.rx[1:10])
diffy <- diff(df$fish.ry[df$ID == IDS[[j]]])
#diffy <- diff(df$fish.ry[1:10])
diffy <- diffy * -1 # switch sign to account for origin in top left and not bottom left of image
for (i in 1:length(diffx)){
if(diffx[[i]] != 0 & diffy[[i]] != 0){
#if(atan(diffy[[i]]/diffx[[i]]) > thresh | atan(diffy[[i]]/diffx[[i]]) < -thresh){
if(sqrt(diffx[[i]]^2+diffy[[i]]^2) > thresh | sqrt(diffx[[i]]^2+diffy[[i]]^2) < -thresh){
if(diffx[[i]] > 0 & diffy[[i]] > 0) {
heading <- c(heading, round((atan(diffx[[i]]/diffy[[i]]))*180/pi, 2))
} else {
if(diffx[[i]] > 0 & diffy[[i]] < 0) {
heading <- c(heading, round(90+((atan((diffy[[i]]*-1)/diffx[[i]]))*180/pi), 2))
} else {
if(diffx[[i]] < 0 & diffy[[i]] < 0) {
heading <- c(heading, round(270-((atan((diffy[[i]]*-1)/(diffx[[i]]*-1)))*180/pi), 2))
} else {
if(diffx[[i]] < 0 & diffy[[i]] > 0){
heading <- c(heading, round(270+((atan(diffy[[i]]/(diffx[[i]]*-1)))*180/pi), 2))
}
}
}
}
} else { heading <- c(heading, NA) }
} else {
if(diffx[[i]] == 0 & diffy[[i]] > thresh) {
heading <- c(heading, 0)
} else {
if(diffx[[i]] > thresh & diffy[[i]] == 0) {
heading <- c(heading, 90)
} else {
if(diffx[[i]] == 0 & diffy[[i]] < -thresh) {
heading <- c(heading, 180)
} else {
if(diffx[[i]] < -thresh & diffy[[i]] == 0) {
heading <- c(heading, 270)
} else {
heading <- c(heading, NA)
}
}
}
}
}
}
headvec <- c(headvec, NA, heading)
}
headvec <<- headvec
}
# Regression equation and R2---------------------------------------------
lm_eqn <- function(df){
m <- lm(y ~ x, df);
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,
list(a = format(unname(coef(m)[1]), digits = 2),
b = format(unname(coef(m)[2]), digits = 2),
r2 = format(summary(m)$r.squared, digits = 3)))
as.character(as.expression(eq));
}
# new stat smooth function to show equation and r2 on plot
stat_smooth_func2 <- function(mapping = NULL, data = NULL,
geom = "smooth", position = "identity",
...,
method = "auto",
formula = y ~ x,
se = TRUE,
n = 80,
span = 0.75,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE,
xpos = NULL,
ypos = NULL) {
layer(
data = data,
mapping = mapping,
stat = StatSmoothFunc,
geom = geom,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
method = method,
formula = formula,
se = se,
n = n,
fullrange = fullrange,
level = level,
na.rm = na.rm,
method.args = method.args,
span = span,
xpos = xpos,
ypos = ypos,
...
)
)
}
StatSmoothFunc <- ggproto("StatSmooth", Stat,
setup_params = function(data, params) {
# Figure out what type of smoothing to do: loess for small datasets,
# gam with a cubic regression basis for large data
# This is based on the size of the _largest_ group.
if (identical(params$method, "auto")) {
max_group <- max(table(data$group))
if (max_group < 1000) {
params$method <- "loess"
} else {
params$method <- "gam"
params$formula <- y ~ s(x, bs = "cs")
}
}
if (identical(params$method, "gam")) {
params$method <- mgcv::gam
}
params
},
compute_group = function(data, scales, method = "auto", formula = y~x,
se = TRUE, n = 80, span = 0.75, fullrange = FALSE,
xseq = NULL, level = 0.95, method.args = list(),
na.rm = FALSE, xpos=NULL, ypos=NULL) {
if (length(unique(data$x)) < 2) {
# Not enough data to perform fit
return(data.frame())
}
if (is.null(data$weight)) data$weight <- 1
if (is.null(xseq)) {
if (is.integer(data$x)) {
if (fullrange) {
xseq <- scales$x$dimension()
} else {
xseq <- sort(unique(data$x))
}
} else {
if (fullrange) {
range <- scales$x$dimension()
} else {
range <- range(data$x, na.rm = TRUE)
}
xseq <- seq(range[1], range[2], length.out = n)
}
}
# Special case span because it's the most commonly used model argument
if (identical(method, "loess")) {
method.args$span <- span
}
if (is.character(method)) method <- match.fun(method)
base.args <- list(quote(formula), data = quote(data), weights = quote(weight))
model <- do.call(method, c(base.args, method.args))
m = model
#eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,
eq <- substitute(~~italic(r)^2~"="~r2,
list(#a = format(coef(m)[1], digits = 3),
#b = format(coef(m)[2], digits = 3),
r2 = format(summary(m)$r.squared, digits = 3)))
func_string = as.character(as.expression(eq))
if(is.null(xpos)) xpos = min(data$x)*0.9
if(is.null(ypos)) ypos = max(data$y)*0.9
data.frame(x=xpos, y=ypos, label=func_string)
},
required_aes = c("x", "y")
)
|
48597739f10fa20ece16ca0c690f051ab7de2946 | 0f8e7a7c47440422243ae3afbf0eb26799a3db1e | /run_analysis.R | 1ddd8dd0279b5ffc8e70822b88c33e6ef2175163 | [] | no_license | angra311/GCDP | 0b3ad76a4922f27b4e08e92ca102896a1e0b7a39 | 42478f5772603f5b45b9cf020d095aee73300009 | refs/heads/master | 2021-01-23T17:19:22.208220 | 2015-08-14T23:50:22 | 2015-08-14T23:50:22 | 40,625,794 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,194 | r | run_analysis.R | ## Read the feature names from file
feature_names <- read.table ("UCI HAR Dataset/features.txt", as.is=TRUE,
row.names = 1)
feature_names[,1] <- make.names(feature_names[,1])
## extract the indices of the ones we want to keep
mi <- grep ("mean\\.",feature_names[,1],ignore.case=TRUE)
si <- grep ("std\\.",feature_names[,1],ignore.case=TRUE)
desired_features<-feature_names[c(mi,si),1]
## Load training data
## First subjects
subjects_train <- read.table ("UCI HAR Dataset/train/subject_train.txt",
col.names = "Subject")
## Then the measurements, keep only the ones we want.
x_train <- read.table ("UCI HAR Dataset/train/X_train.txt", header=FALSE,
col.names = feature_names[,1])
x_train <- x_train[,desired_features]
activity_labels <- read.table ("UCI HAR Dataset/activity_labels.txt", row.names = 1)
## finally the obseved activities - replace the number with the descriptive name
y_train <- read.table ("UCI HAR Dataset/train/Y_train.txt",header=FALSE,
col.names = "Activity")
y_train$Activity<-activity_labels[y_train$Activity,1]
xy_train <- cbind(x_train, subjects_train, y_train)
## Load test data
## First subjects
subjects_test <- read.table ("UCI HAR Dataset/test/subject_test.txt", col.names = "Subject")
## Then the measurements, keep only the ones we want.
x_test <- read.table ("UCI HAR Dataset/test/X_test.txt", header=FALSE,
col.names = feature_names[,1])
x_test <- x_test[,desired_features]
## finally the obseved activities - replace the number with the descriptive name
y_test <- read.table ("UCI HAR Dataset/test/Y_test.txt",header=FALSE,
col.names = "Activity")
y_test$Activity<-activity_labels[y_test$Activity,1]
xy_test <- cbind(x_test, subjects_test, y_test)
## Merge train and test into one dataset
merged_data <- rbind (xy_train, xy_test)
## Then extract summaries by Activity and Subject for the other measurements
summary <-aggregate (. ~ Activity+Subject, data=merged_data, mean)
## clean up
rm (merged_data, xy_test, xy_train, x_test, x_train, y_test, y_train, subjects_test,
subjects_train)
|
73b8733a49ed95233fcc8af8b44791fa241214cc | f534c8c8712dc39b27c296e0f00994f49100d692 | /xdate_spp_site_subset.R | c535ae98103c44a0bf3ef54a6045b5d329973fc4 | [] | no_license | jam2767/dissertation_code | 9082068966875951fa5174a598243a3d912f9ee7 | d71c16f35a03211c5d30da4da714bb2bb1654fd0 | refs/heads/master | 2021-01-18T08:42:58.920570 | 2017-07-16T03:55:51 | 2017-07-16T03:55:51 | 53,062,316 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,682 | r | xdate_spp_site_subset.R | #subsetting data by species and reordering folders for xdating
#crossdate species at each site individually using cofecha
#create species folders within sites
#identify missing trees from census data and tucson file matches
#match tucson files and adult data for each site
#Read all inventory data
trees.xdate <- read.csv("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Adult_Field_Data_JAM_MCD.csv")
#Harvard Forest
rings.xdate.h <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/Harvard (H)/Tucson/")
combined.xdate.h <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.h)
write.csv(x=combined.xdate.h,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/H_Treerings.csv")
#Baskett
rings.xdate.m <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/Baskett (M)/Tucson/")
combined.xdate.m <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.m)
write.csv(x=combined.xdate.m,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/M_Treerings.csv")
#Smithsonian
rings.xdate.s <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/Smithsonian (S)/Tucson/")
combined.xdate.s <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.s)
write.csv(x=combined.xdate.s,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/S_Treerings.csv")
#Bartlett
rings.xdate.b <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/Bartlett (B)/Tucson/")
combined.xdate.b <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.b)
write.csv(x=combined.xdate.b,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/B_Treerings.csv")
#UNDERC
rings.xdate.w <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/UNDERC (W)/Tucson/")
combined.xdate.w <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.w)
write.csv(x=combined.xdate.w,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/W_Treerings.csv")
#VRO
rings.xdate.v <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/Vermillion (V)/Tucson/")
combined.xdate.v <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.v)
write.csv(x=combined.xdate.v,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/V_Treerings.csv")
#Oak Ridge
rings.xdate.t <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/ORNL (T)/Tucson/")
combined.xdate.t <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.t)
write.csv(x=combined.xdate.t,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/T_Treerings.csv")
##need to convert files to Tucscon before I can xdate
#Duke Forest
rings.xdate.d <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/Duke (D) 2013/Tucson/")
combined.xdate.d <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.d)
write.csv(x=combined.xdate.d,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/D_Treerings.csv")
##need to convert files to Tucscon before I can xdate
#Coweeta
rings.xdate.c <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/Coweeta (C)/Tucson/")
combined.xdate.c <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.c)
write.csv(x=combined.xdate.c,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/C_Treerings.csv")
#Ordway Swisher
rings.xdate.f <- Read_Tuscon("/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/Windendro_text/Ordway Swisher (F)/Tucson/")
combined.xdate.f <- matchInventoryRings(trees=trees.xdate,rings=rings.xdate.f)
write.csv(x=combined.xdate.f,file="/Users/josh/Dropbox/Dissertation/CH1_Treerings/Data/F_Treerings.csv")
|
07ab80d8b9fa5b7569529cd2cf3351dfa7f85204 | e5fcf1aeff9cbfab3f19a862c7d80145ce68dbe4 | /man/kfolds2CVinfos_glm.Rd | f39f0f05e4b054c6201484eebdee5344bf235478 | [] | no_license | fbertran/plsRglm | 7d7294101829065f4d1672d26af42b09b577464a | 058296cd0c1e1488265b87573d524a61f538809b | refs/heads/master | 2023-04-08T02:43:19.921763 | 2023-03-14T22:28:43 | 2023-03-14T22:28:43 | 18,454,150 | 16 | 6 | null | 2021-03-14T15:41:24 | 2014-04-04T22:09:56 | R | UTF-8 | R | false | true | 3,294 | rd | kfolds2CVinfos_glm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kfolds2CVinfos_glm.R
\name{kfolds2CVinfos_glm}
\alias{kfolds2CVinfos_glm}
\title{Extracts and computes information criteria and fits statistics for k-fold
cross validated partial least squares glm models}
\usage{
kfolds2CVinfos_glm(pls_kfolds, MClassed = FALSE, verbose = TRUE)
}
\arguments{
\item{pls_kfolds}{an object computed using \code{\link{cv.plsRglm}}}
\item{MClassed}{should number of miss classed be computed ?}
\item{verbose}{should infos be displayed ?}
}
\value{
\item{list}{table of fit statistics for first group partition}
\item{list()}{\dots{}} \item{list}{table of fit statistics for last group
partition}
}
\description{
This function extracts and computes information criteria and fits statistics
for k-fold cross validated partial least squares glm models for both formula
or classic specifications of the model.
}
\details{
The Mclassed option should only set to \code{TRUE} if the response is
binary.
}
\note{
Use \code{\link{summary}} and \code{\link{cv.plsRglm}} instead.
}
\examples{
\donttest{
data(Cornell)
summary(cv.plsRglm(Y~.,data=Cornell,
nt=6,K=12,NK=1,keepfolds=FALSE,keepdataY=TRUE,modele="pls",verbose=FALSE),MClassed=TRUE)
data(aze_compl)
summary(cv.plsR(y~.,data=aze_compl,nt=10,K=8,modele="pls",verbose=FALSE),
MClassed=TRUE,verbose=FALSE)
summary(cv.plsRglm(y~.,data=aze_compl,nt=10,K=8,modele="pls",verbose=FALSE),
MClassed=TRUE,verbose=FALSE)
summary(cv.plsRglm(y~.,data=aze_compl,nt=10,K=8,
modele="pls-glm-family",
family=gaussian(),verbose=FALSE),
MClassed=TRUE,verbose=FALSE)
summary(cv.plsRglm(y~.,data=aze_compl,nt=10,K=8,
modele="pls-glm-logistic",
verbose=FALSE),MClassed=TRUE,verbose=FALSE)
summary(cv.plsRglm(y~.,data=aze_compl,nt=10,K=8,
modele="pls-glm-family",
family=binomial(),verbose=FALSE),
MClassed=TRUE,verbose=FALSE)
if(require(chemometrics)){
data(hyptis)
hyptis
yhyptis <- factor(hyptis$Group,ordered=TRUE)
Xhyptis <- as.data.frame(hyptis[,c(1:6)])
options(contrasts = c("contr.treatment", "contr.poly"))
modpls2 <- plsRglm(yhyptis,Xhyptis,6,modele="pls-glm-polr")
modpls2$Coeffsmodel_vals
modpls2$InfCrit
modpls2$Coeffs
modpls2$std.coeffs
table(yhyptis,predict(modpls2$FinalModel,type="class"))
modpls3 <- PLS_glm(yhyptis[-c(1,2,3)],Xhyptis[-c(1,2,3),],3,modele="pls-glm-polr",
dataPredictY=Xhyptis[c(1,2,3),],verbose=FALSE)
summary(cv.plsRglm(factor(Group,ordered=TRUE)~.,data=hyptis[,-c(7,8)],nt=4,K=10,
random=TRUE,modele="pls-glm-polr",keepcoeffs=TRUE,verbose=FALSE),
MClassed=TRUE,verbose=FALSE)
}
}
}
\references{
Nicolas Meyer, Myriam Maumy-Bertrand et
Frédéric Bertrand (2010). Comparing the linear and the
logistic PLS regression with qualitative predictors: application to
allelotyping data. \emph{Journal de la Societe Francaise de Statistique},
151(2), pages 1-18.
\url{http://publications-sfds.math.cnrs.fr/index.php/J-SFdS/article/view/47}
}
\seealso{
\code{\link{kfolds2coeff}}, \code{\link{kfolds2Pressind}},
\code{\link{kfolds2Press}}, \code{\link{kfolds2Mclassedind}} and
\code{\link{kfolds2Mclassed}} to extract and transforms results from k-fold
cross-validation.
}
\author{
Frédéric Bertrand\cr
\email{frederic.bertrand@utt.fr}\cr
\url{https://fbertran.github.io/homepage/}
}
\keyword{models}
\keyword{regression}
|
dc49be7356110c71793c0f6bcd97d4df4e3fef5f | 5e605fdb3bd68987f776b0121f950a7aee1ccbb9 | /man/fit.nb.glm.1u.Rd | 9e6bceff5f67d70b9303cc245646533e8c188b52 | [] | no_license | diystat/NBPSeq | f150da798677a3c27dc27cee916f960f66af149d | 358f095f4846476c7c9ffe720b502899ea18affb | refs/heads/master | 2021-01-01T16:19:01.230766 | 2014-05-18T00:19:07 | 2014-05-18T00:19:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,580 | rd | fit.nb.glm.1u.Rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{fit.nb.glm.1u}
\alias{fit.nb.glm.1u}
\title{Fit a single negative binomial (NB) log-linear regression model with a common unknown dispersion paramreter}
\usage{
fit.nb.glm.1u(y, s, x, phi = NA, beta0 = rep(NA, dim(x)[2]),
kappa = 1/phi, info.kappa = TRUE, ...)
}
\arguments{
\item{y}{a n-vector of NB counts.}
\item{s}{a n-vector of library sizes.}
\item{x}{a n by p design matrix.}
\item{phi}{a scalar, the NB dipsersion parameter.}
\item{beta0}{a p-vector specifying the known and unknown
components of beta, the regression coefficients. NA values
indicate unknown components and non-NA values specify the values
of the known components. The default is that all components of
beta are unknown.}
\item{kappa}{a scalar, the size/shape parameter. \code{kappa}
will be set to \code{1/phi} if \code{phi} is not \code{NA} and
will be estiamted if both \code{phi} and \code{kappa} are NA.}
\item{info.kappa}{}
\item{...}{additional parameters to \code{\link{irls.nb.1}}}
}
\value{
a list
\item{mu}{an n-vector, estimated means (MLE).}
\item{beta}{an p-vector, estimated regression coefficients (MLE).}
\item{iter}{number of iterations performed in the IRLS algorithm.}
\item{zero}{logical, whether any of the estimated \code{mu} is close to zero.}
\item{kappa}{a scalar, the size parameter}
\item{phi}{a scalr, 1/kappa, the dispsersion parameter}
\item{l}{log likelihood of the fitted model.}
\item{D}{a p-vector, the score vector}
\item{j}{a p-by-p matrix, observed information matrix}
}
\description{
Fit a single negative binomial (NB) log-linear regression model with a common unknown dispersion paramreter.
}
\details{
Find the MLE of the dipsersion parameter and the regression
coefficients in a NB regression model.
Under the NB regression model, the components of y follow a NB
distribution with means mu = s exp(x' beta) and a common
dispersion parameter phi.
}
\note{
When the disperison is known, the user should specify only one of
\code{phi} or \code{kappa}. Whenever \code{phi} is specified
(non-NA), \code{kappa} will be set to 1/\code{phi}.
The observed information matrix, j, will be computed for all
parameters---kappa and all components of beta (including known
components). It will be computed at the estimated values of (phi,
beta) or (kappa, beta), which can be unconstrained or constrained
MLEs depending on how the arguments \code{phi} (or \code{kappa})
and \code{beta} are specified.
TODO: allow computing the information matrix using phi or log(kappa) as
parameter
}
|
26df858bdaebde887e724211dc1e3cc22e2cd1a1 | f5faeeb7362ac4a97a4a4d61d8320ff9079e2dd0 | /modules/benchmark/R/load.csv.R | 6d86799bde9090a2b108706ea49fd07fd47bd9e8 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Viskari/pecan | 0b1793d2994afac9f312369880862a2d3a408dc3 | 7489a97ef4d3deaf5b62a10a86a5ae14f859aaa8 | refs/heads/master | 2021-01-17T21:16:08.800080 | 2016-10-03T20:23:28 | 2016-10-03T20:23:28 | 28,244,459 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 964 | r | load.csv.R | ##' @name load.csv
##' @title load.csv
##' @export
##' @param data.path character
##' @param format list
##' @param start_year numeric
##' @param end_year numeric
##' @param site list
##'
##' @author Betsy Cowdery
load.csv <- function(data.path, format, site, vars=NULL){
if (format$header == 0 | format$header == 1){
dat <- read.csv(data.path, skip = format$skip, na.strings = format$na.strings, as.is=TRUE,
check.names = FALSE, header = as.logical(format$header))
}else if (format$header > 1){
dat <- read.csv(data.path, skip = format$skip, na.strings = format$na.strings, as.is=TRUE,
check.names = FALSE, header = TRUE)
dat <- dat[-c(1:header-1),]
}else{
dat <- read.csv(data.path, skip = format$skip, na.strings = format$na.strings, as.is=TRUE,
check.names = FALSE)
}
if(!is.null(vars)){
return(dplyr::select(dat, one_of(vars)))
}else{
return(dat)
}
} |
ad32fc4e05e33c97cef4e40961bd42b8c2f4154e | 2253c85e1c90b54df4b69ad40b6ce9b207c76415 | /R/shortcode.R | 2d8426665d4a344cccf64d00182140463517d435 | [
"MIT"
] | permissive | djnavarro/hugodown | a204e1709df31ac1dae81f895bf3e89191f93e39 | 168a361518f5450e498d0fa9e34eea93f0aa677d | refs/heads/master | 2023-07-02T11:17:59.870024 | 2021-07-04T23:50:13 | 2021-07-04T23:50:13 | 270,511,218 | 0 | 0 | NOASSERTION | 2020-07-10T05:29:44 | 2020-06-08T03:15:26 | R | UTF-8 | R | false | false | 4,112 | r | shortcode.R | #' Generate a hugo shortcode
#'
#' @description
#' Generate a hugo shortcode with appropriate pandoc markup to preserve it as
#' is when embedded in an R markdown document.
#'
#' Generally, I don't recommend calling this function directly; instead
#' use it inside a function with the same name as the shortcode you want to
#' wrap. See [embed_gist()] and friends for examples
#'
#' @param .name Name of the shortcode
#' @param ... Arguments to the shortcode, supplied either by name or
#' position depending on the shortcode. By default, strings will
#' automatically be quoted with single quotes. Suppress this quoting
#' by wrapping the argument in `I()`.
#' @param .contents Contents of the shortcode for paired shortcodes.
#' @param .output Is the output of the shortcode html or markdown? This
#' controls whether the shortcode uses `<>` or `%`.
#' @param .inline Is the shortcode designed to be used inline or in its own
#' paragraph? Controls whether the shortcode is wrapped in a block or inline
#' [raw attribute](https://pandoc.org/MANUAL.html#extension-raw_attribute).
#' @export
#' @examples
#' pkg <- function(name) {
#' shortcode("pkg", name, .inline = TRUE)
#' }
#' pkg("hugodown")
shortcode <- function(.name, ..., .contents = NULL, .output = c("html", "md"), .inline = FALSE) {
call <- paste0(c(.name, shortcode_args(...)), collapse = " ")
wrap <- switch(arg_match(.output),
html = function(x) paste0("{{< ", x, " >}}"),
md = function(x) paste0("{{% ", x, " %}}"),
)
if (is.null(.contents)) {
out <- wrap(call)
} else {
out <- paste0(wrap(call), .contents, wrap(paste0("/", .name)))
}
if (.inline) {
paste0("`", out, "`{=html}")
} else {
paste0("```{=html}\n", out, "\n```\n")
}
}
shortcode_args <- function(...) {
args <- list2(...)
args <- args[!vapply(args, is.null, logical(1))]
if (length(args) == 0) {
return(NULL)
}
names <- names2(args)
as_value <- function(x) {
if (is.character(x) && !inherits(x, "AsIs")) {
encodeString(x, quote = "'")
} else {
format(x)
}
}
values <- vapply(args, as_value, character(1))
equals <- ifelse(names == "", "", "=")
paste0(names, equals, values, collapse = " ")
}
#' Generate hugo shortcodes to embed various types of media
#'
#' @description
#' These are wrappers that make it easy to generate
#' [hugo shortcodes](https://gohugo.io/content-management/shortcodes/) that
#' make it easy to embed various types of media into your pages. You use from
#' inline R code like:
#'
#' ```
#' This tweet announced the release of hugo 0.24:
#'
#' `R embed_tweet("877500564405444608")`
#' ```
#'
#' @param username GitHub user name
#' @param id A string giving the object id. You'll usually find this by
#' inspecting the URL:
#' * gist: `https://gist.github.com/spf13/7896402` -> `7896402`
#' * instagram: `https://www.instagram.com/p/BWNjjyYFxVx/` -> `BWNjjyYFxVx`
#' * twitter: `https://twitter.com/spf13/status/877500564405444608` -> `877500564405444608`
#' * vimeo: `https://vimeo.com/channels/staffpicks/146022717` -> `146022717`
#' * youtube: `https://www.youtube.com/watch?v=w7Ft2ymGmfc` -> `w7Ft2ymGmfc`
#' @param filename Pick single file from multiple file gist
#' @export
embed_gist <- function(username, id, filename = NULL) {
shortcode("gist", username, id, filename)
}
#' @param caption Show instagram caption?
#' @export
#' @rdname embed_gist
embed_instagram <- function(id, caption = TRUE) {
stopifnot(is.character(id))
shortcode("instagram", I(id), if (!caption) I("hidecaption"))
}
#' @export
#' @rdname embed_gist
embed_tweet <- function(id) {
stopifnot(is.character(id))
shortcode("tweet", I(id))
}
#' @export
#' @rdname embed_gist
embed_vimeo <- function(id) {
stopifnot(is.character(id))
shortcode("vimeo", I(id))
}
#' @param autoplay Automatically play youtube video?
#' @export
#' @rdname embed_gist
embed_youtube <- function(id, autoplay = FALSE) {
stopifnot(is.character(id))
if (autoplay) {
shortcode("youtube", id = I(id), autoplay = "true")
} else {
shortcode("youtube", id = id)
}
}
|
eee63866fa0ca2b1010b4a00d3b1bbeec57cb99d | 3873c2e97422f403482a440b7501f031c5e7ab28 | /AnalisarSelicIsoladamente.R | 9f98e662a3b78d9ec45656b09762b6cc434378e0 | [] | no_license | EderCamposRibeiro/TCC_BigData | 2996a9f83e8a8f6d31d0ea5f351f090058481916 | 46b5d1d1df3c4a30fbceb008c8d8c48ab7c1c9da | refs/heads/master | 2021-01-09T18:19:09.005571 | 2020-02-22T20:45:48 | 2020-02-22T20:45:48 | 242,404,913 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 3,453 | r | AnalisarSelicIsoladamente.R | library(forecast)
library(ggplot2)
library(seasonal)
library(seasonalview)
library(urca)
#Ler o arquivo com os dados da taxa selic ao mês(03/1999 - 01/2020 Mensal)
arq = read.csv(file.choose(), header = T, sep = ";", dec = ",")
print(arq)
#Transformar o arquivo em uma série temporal
selic = ts(arq$meta_selic,start = c(1999,3), end=c(2019,12), frequency=12)
plot(selic)
#-------------------------------------------------------------------------------------
autoplot(selic, ylab = "Percentual da taxa Selic", xlab = "Tempo", main = "Taxa Selic 1999/2019")
#A série, visualmente, não aparenta ter sazonalidade, possui alguma variação e parece ter uma
#tendência de queda
#-------------------------------------------------------------------------------------
#Divisão de tela para apresentação de Histograma e Boxplot
split.screen( c(1,2))
screen(1)
hist(selic, xlab = "Percentual", ylab = "Frequência", main = "Histograma Selic")
screen(2)
boxplot(selic, ylab = "Mediana", main = "Boxplot Selic")
close.screen(all=T)
summary(selic)
#Histograma para ver como os dados estão distribuídos
#A maior parte do tempo a taxa estava entre 10% e 16,75%.
#Gerar um boxplot para entender como que a ocupação está distribuída
#Como visto no histograma a mediana está por volta de 12,75% e uma taxa acima de
#26%(em torno de 26/27%) já é considerada alta(Outlier)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#4.50 10.00 12.75 13.54 16.75 42.00
#-------------------------------------------------------------------------------------
#Vamos decompor a série para entender melhor como se comporta a selic
dec = decompose(selic)
autoplot(dec, main = "Decomposição Taxa Selic 1999/2019", xlab="Tempo")
#Existe um padrão de sazonalidade. Pelo gráfco ela é bem regular.
#A tendência tem forte variação porém com queda durante o período.
#-------------------------------------------------------------------------------------
#Até agora vimos que a taxa média da selic é de cerca de 13%aa.
#Vimos que taxas acima de 25% é exceção e não regra;
#Existe um padrão sazonal frequente
#No período analisado existe uma tendência de queda da taxa(Notícia boa)
#-------------------------------------------------------------------------------------
#Vamos analisar a tendência com mais cuidado:
autoplot(dec$trend)
#A taxa vem caindo ao longo dos anos, porém em meados de 2013 aprasentou um forte aumento que conincide com
#o início da crise econômica. E, 2016 iniciou-se uma queda acentuada.
autoplot(window(dec$trend, start=c(2016,01)))
#Essa tendência de queda da taxa Selici começou no início de 2016, estabilizou no segundo semestre de
# 2018 e em 2019 voltou a cair chegando a 4,5%aa, taxa inédida no país.
#-------------------------------------------------------------------------------------
#Vamos analisar a sazonalidade com mais cuidado:
#Gera um plote com a ocupação sazonal 1999-2020 (cada ano é uma cor)
ggseasonplot(selic)
#Aqui se confirma que existe um padrão bem regular na sazonalidade
#Apenas dois anos apresentam não lineares 1999 e 2003
#Nos demais anos há diferenca de taxa, porém essas se mantém com certa estabilidade durante o ano
#As linhas demosntram, também, uma queda ao longo dos anos.
#Ver a Taxa entre 1999 e 2003:
ggseasonplot(window(selic, start=c(1999,01), end = c(2003, 12)))
#Confirmação dos dados anteriores, apresentando apenas dados mais limpos
|
1f131c9d2a14aca5d24cc93a0980d2d10a7d9e42 | 8ab442dab9bc83efc145f8ed756f071eaaaaa041 | /run_model_opti.R | 9381795b217e3abdff95791bef3b0bc36a8343aa | [
"MIT"
] | permissive | sidorowicz-aleksandra/GrapeReception | a2d71d481ad32e0a62309ee54e5544358390bd9b | 4889ff28938efee2f18d69824ab6cd71a79a590d | refs/heads/master | 2020-05-15T19:06:45.940499 | 2020-02-11T14:59:14 | 2020-02-11T14:59:14 | 182,447,222 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,613 | r | run_model_opti.R | source("functions_model_opti.R")
library(xlsx)
T_horizon <-32
T_expand <- 2
#---- RUN MODEL FUNCTION ----
run_model<- function(Queue,Prob_TR, V_TS_all_n, V_TB_all_n){
# Initiation of presses ----
Presses <- InitiatePresses()
# Initialize profit and costs ----
V <- 0
C_degr <- 0
C_wasted <- 0
max_V <- sum(Queue$Variety*Queue$Load)
##################################################################################
# Algorithm
START<-Sys.time()
for(i in 2:(T_horizon+T_expand))
{
#Trucks waiting times check
if (any(i-Queue$t==5)){
Queue_degr <- subset(Queue,i-Queue$t==5)
C_degr <- C_degr + sum((Queue_degr$Variety-1)*Queue_degr$Load)
Queue[i-Queue$t==5,1] <- 1 # change variety to 1 if truck waits 2 hours
}
if (any(i-Queue$t==9)){
Queue_wasted <- subset(Queue,i-Queue$t==9)
C_wasted <- C_wasted + sum(Queue_wasted$Variety*Queue_wasted$Load)
Queue <- subset(Queue,i-Queue$t!=9) # delete the truck if it waits 4 hours
}
DEC<-DECISIONS(Presses,Queue,i,V_TS_all_n,V_TB_all_n)
DEC_v <- DEC[1][[1]]
DEC_l <- DEC[2][[1]]
#print('*')
if (any(DEC_l>0)){
Queue_t <- subset(Queue,Queue$t<=i-1) # up to time t
#choose randomly maximizing strategy
d <- sample(1:nrow(DEC_v),1)
d_v <- DEC_v[d,]
d_l <- DEC_l[d,]
v_uniq <- unique(as.numeric(d_v))
v_uniq <- subset(v_uniq,v_uniq>0)
presses_to_optimize <- which(d_v==0)
for (k in 1:length(v_uniq)){ # for each variety
d_l_v <- d_l*(d_v==v_uniq[k])
Queue_v <- subset(Queue_t,Queue_t$Variety == v_uniq[k])
Queue_v <- Queue_v[order(Queue_v$t),]
if (nrow(Queue_v)>0) rownames(Queue_v) <- 1:nrow(Queue_v)
d_l_v_new <-d_l_v*0
again = 1
while (again==1){
if (any(d_l_v_new!=0)) d_l_v=d_l_v_new
again = 0
for (kl in which(d_l_v>0)){
if(d_l[kl]==Queue_v$Load[1]){ #exact match
d_l_v_new[kl] <- 0
d_l[kl] <- 0
# update press
if (kl<5){ Presses[kl,-4] <- f_S(i,Presses[kl,-4],Queue_v[1,1:2])
}else Presses[kl,-4] <- f_B(i,Presses[kl,-4],Queue_v[1,1:2])
# update queue
Queue_t <- Queue_t[-which(Queue_t$Id==Queue_v$Id[1]),]
Queue <- Queue[-which(Queue$t==Queue_v$t[1] & Queue$Id==Queue_v$Id[1]),]
Queue_v <- Queue_v[-1,]
if (nrow(Queue_v)>0) rownames(Queue_v)<- 1:nrow(Queue_v)
}else if(d_l[kl]<Queue_v$Load[1]){ #too much
# update press
if (kl<5){ Presses[kl,-4] <- f_S(i,Presses[kl,-4],as.data.frame(c(Queue_v[1,1],d_l[kl])))
}else Presses[kl,-4] <- f_B(i,Presses[kl,-4],as.data.frame(c(Queue_v[1,1],d_l[kl])))
# update queue
Queue_t$Load[which(Queue_t$Id==Queue_v$Id[1])] <-as.numeric(Queue_v[1,2])-as.numeric(d_l[kl])
Queue$Load[which(Queue$t==Queue_v$t[1] & Queue$Id==Queue_v$Id[1])] <- as.numeric(Queue_v[1,2])-as.numeric(d_l[kl])
Queue_v[1,2] <- as.numeric(Queue_v[1,2])-as.numeric(d_l[kl])
d_l_v_new[kl] <- 0
d_l[kl] <- 0
}else{ #not enough
again = 1
d_l_v_new[kl] <- d_l[kl] - Queue_v[1,2]
d_l[kl] <- d_l[kl] - Queue_v[1,2]
# update press
if (kl<5){ Presses[kl,-4] <- f_S(i,Presses[kl,-4],Queue_v[1,1:2])
}else Presses[kl,-4] <- f_B(i,Presses[kl,-4],Queue_v[1,1:2])
# update queue
Queue_t <- Queue_t[-which(Queue_t$Id==Queue_v$Id[1]),]
Queue <- Queue[-which(Queue$t==Queue_v$t[1] & Queue$Id==Queue_v$Id[1]),]
Queue_v <- Queue_v[-1,]
if (nrow(Queue_v)>0)rownames(Queue_v)<- 1:nrow(Queue_v)
}
}
}
}
# update main queue indexes
if (nrow(subset(Queue,Queue$t<=i))>0){
Queue[Queue$t<=i,3]<-c(1:nrow(subset(Queue,t<=i)))
rownames(Queue) <- 1:nrow(Queue)}
#if there are not updatet presses:
if (length(presses_to_optimize)>0){
for (pr in presses_to_optimize){
if (pr<5){ Presses[pr,-4] <- f_S(i,Presses[pr,-4],as.data.frame(t(c(0,0))))
}else Presses[pr,-4] <- f_B(i,Presses[pr,-4],as.data.frame(t(c(0,0))))
}
}
# increase value function
V1<-25*Presses$Variety[1]*ifelse(Presses$Load[1]==25 & Presses$Start_time[1]==i,1,0)
V2<-25*Presses$Variety[2]*ifelse(Presses$Load[2]==25 & Presses$Start_time[2]==i,1,0)
V3<-25*Presses$Variety[3]*ifelse(Presses$Load[3]==25 & Presses$Start_time[3]==i,1,0)
V4<-25*Presses$Variety[4]*ifelse(Presses$Load[4]==25 & Presses$Start_time[4]==i,1,0)
V5<-50*Presses$Variety[5]*ifelse(Presses$Load[5]==50 & Presses$Start_time[5]==i,1,0)
V6<-50*Presses$Variety[6]*ifelse(Presses$Load[6]==50 & Presses$Start_time[6]==i,1,0)
V<-V+V1+V2+V3+V4+V5+V6
}else{
# Queue update
if (nrow(subset(Queue,Queue$t<=i))>0){
Queue[Queue$t<=i,3]<-c(1:nrow(subset(Queue,t<=i)))
rownames(Queue) <- 1:nrow(Queue)}
# Presses update
Presses[1,-4] <- f_S(i,Presses[1,-4],as.data.frame(t(c(0,0))))
Presses[2,-4] <- f_S(i,Presses[2,-4],as.data.frame(t(c(0,0))))
Presses[3,-4] <- f_S(i,Presses[3,-4],as.data.frame(t(c(0,0))))
Presses[4,-4] <- f_S(i,Presses[4,-4],as.data.frame(t(c(0,0))))
Presses[5,-4] <- f_B(i,Presses[5,-4],as.data.frame(t(c(0,0))))
Presses[6,-4] <- f_B(i,Presses[6,-4],as.data.frame(t(c(0,0))))
}
}
Presses_not_filled <- subset(Presses,Presses$Start_time==0)
result <- rbind(c('Evalutaion time:',Sys.time()-START),
c('Maximal profit:',max_V),
c('Obtained:',V+sum(Presses_not_filled$Variety*Presses_not_filled$Load)),
c('Degradation loss:',C_degr),
c('Waste loss:',C_wasted),
c('Remain loss:',ifelse(nrow(Queue)>0,sum(Queue$Variety*Queue$Load),0)))
result <- as.data.frame(result)
colnames(result) <- c('Measure','Result')
print(result)
return(result)
}
#---- SIMULATION ----
file_names <- c('R_R_')
for (f in file_names){
print(f)
file_name = f
Queue <- read.csv(paste0('data/Poisson/',file_name,'Queue.csv'))
Prob_TR <- read.csv(paste0('data/Poisson/',file_name,'Prob_TR.csv'))
V_TS_all_n <- paste0('data/Poisson/',file_name,'V_TS_all.xlsx')
V_TB_all_n <- paste0('data/Poisson/',file_name,'V_TB_all.xlsx')
txt <- run_model(Queue,Prob_TR, V_TS_all_n, V_TB_all_n)
write.xlsx(txt,paste0('Results_10_',f,'.xlsx'),row.names =FALSE)
}
|
6b51374ee18f56b9e4352d3b5343ec706ce40041 | c7428f526a66ad30c5d4a723908026279e0e6f94 | /server.r | 3f0bfa8815bda0d71618d51ebcd193544e456cb2 | [] | no_license | carly-levi/Scraping_and_Interactive_Visualization | 69d842948f362f0ed1eb2f07a234219de6602502 | dd627c5becbe102959febfc2d2ee98fa3cc23530 | refs/heads/master | 2021-01-19T05:41:42.180186 | 2017-04-06T15:01:55 | 2017-04-06T15:01:55 | 87,442,224 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 514 | r | server.r | library(ggplot2)
library(dplyr)
shinyServer(
function(input, output, session)
output$graph <- renderPlot({
graph_name = switch(input$year,
14-15 = "14-15",
13-14 = "13-14",
12-13 = "12-13",
11-12 = "11-12",
10-11 = "10-11")
ggplot(data = blue_devils, aes(x = input$var, y = input$var2)) +
geom_histogram() +
ggtitle(graphname)
})
}
) |
31aaae8cf13c280590955a27b33a8d5c2bc5a41f | 0032cae2e393d94cd1d621dcf7b45ce917a68f26 | /My Elo Tennis.R | fd03ae98edebef359cdb4c6a4d2bb95f764051b9 | [] | no_license | Reinaldodos/Tennis | abc4a50eb929c4448e8539f49ff45cd4f9a87f1d | ccf0d582657966592c34868decd22eaad9286725 | refs/heads/master | 2020-04-17T12:24:51.770317 | 2020-01-21T10:02:04 | 2020-01-21T10:02:04 | 166,578,202 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,141 | r | My Elo Tennis.R | library(data.table)
require(tidyverse)
library(PlayerRatings)
library(RSelenium)
rD <-
rsDriver(
# port = 4567L,
browser = "chrome",
version = "latest",
# chromever = "latest",
# geckover = "latest",
# iedrver = NULL,
# phantomver = "2.1.1",
verbose = TRUE,
check = TRUE)
remDr <- rD[["client"]]
url = "https://mon-espace-tennis.fft.fr/"
remDr$navigate(url)
#Connexion au compte
remDr$findElement("css", "#edit-name")$sendKeysToElement(list("Reinaldo"))
remDr$findElement("css", "#edit-pass")$sendKeysToElement(list("DTUPHQpP28"))
remDr$findElement("css", "#edit-submit")$clickElement()
PALMARES_ANNEE = function(URL)
{
remDr$navigate(URL)
#Récupérer les résultats
doc <- readHTMLTable(
htmlParse(remDr$getPageSource()[[1]]),
header = TRUE, as.data.frame = TRUE, stringsAsFactors = FALSE
)
palmares = data.table(doc[[2]])
palmares = palmares[Wo == "Non"]
palmares = subset(palmares,!(palmares$Adversaire %in% DONE$V2))
palmares = cbind(Joueur$V2[1], palmares)
toto = remDr$findElements("xpath","//td//a")
urls = rbindlist(lapply(toto, function(x)
x$getElementAttribute(attrName = "href")))
adversaires = cbind(urls, palmares$Adversaire)
JOUEURS <<- rbind(JOUEURS, adversaires)
FICHIER <<- rbind(FICHIER, palmares)
}
DONE = NULL
FICHIER = NULL
Joueur = data.table("https://mon-espace-tennis.fft.fr/palmares/1065827", "DOS SANTOS Reinaldo")
# Joueur = data.table("https://mon-espace-tennis.fft.fr/palmares/998110?millesime=2015", "DOS SANTOS Antonio")
JOUEURS = Joueur
while (nrow(JOUEURS) > 0)
{
Joueur = JOUEURS[1]
Joueur$V1[1] = strsplit(Joueur$V1[1], "?", fixed = TRUE)[[1]][1]
try(PALMARES_ANNEE(Joueur$V1[1]), silent = TRUE)
# try(PALMARES_ANNEE(paste(Joueur$V1[1], "?millesime=2015", sep = "")), silent = TRUE)
# try(PALMARES_ANNEE(paste(Joueur$V1[1], "?millesime=2014", sep = "")), silent = TRUE)
# try(PALMARES_ANNEE(paste(Joueur$V1[1], "?millesime=2013", sep = "")), silent = TRUE)
DONE = unique(rbind(DONE, Joueur))
JOUEURS$V1 = unlist(lapply(JOUEURS$V1, function(x) strsplit(x, "?", fixed = TRUE)))
JOUEURS = unique(subset(JOUEURS,!(JOUEURS$V1 %in% DONE$V1)))
print(c(nrow(FICHIER), Joueur$V2), quote = FALSE)
if (nrow(FICHIER) > 1000) {break}
}
FICHIER = unique(FICHIER)
# FICHIER = readRDS("Dropbox/Carto & Stats/R/Tennis/FICHIER")
Dates = strptime(FICHIER$Date, format = "%d/%m/%Y")
FICHIER = cbind(FICHIER, Dates)
FICHIER = FICHIER[V1 != ""]
FICHIER = FICHIER[Adversaire != ""]
FICHIER = FICHIER[order(Dates)]
saveRDS(object = FICHIER, file = "Dropbox/Carto & Stats/R/Tennis/FICHIER")
results = subset(FICHIER, select = c("Dates", "V1", "Adversaire", "V/D"))
results$`V/D` = gsub(pattern = "V.", replacement = 1, x = results$`V/D`)
results$`V/D` = gsub(pattern = "D.", replacement = 0, x = results$`V/D`)
results$Dates = as.integer((FICHIER$Dates - min(FICHIER$Dates))/7/24/3600)
results$`V/D` = as.numeric(results$`V/D`)
# Ranking = fide(x = results, init = 1400, sort = TRUE)
Ranking = glicko(x = results, init = c(1500, 350), sort = TRUE)
Classement = data.table(Ranking$ratings)
Top = Classement[Games > 3]
|
c7856db84c2bbd3960c6352f2527ae8ae3d78868 | 1022c0957e291e6c545e2db89715a74d7c1a509d | /tbsim_app/man/plotBactSplit.Rd | b12b7485b146fc30bebbc927d8cbd912245a0f9b | [] | no_license | saviclab/TBsim | ace67165891836c8c11af2c80868131914bdfc95 | d30f62d809e4d807a88f585a306e251b63213892 | refs/heads/master | 2021-07-11T19:06:27.401669 | 2020-08-18T22:41:05 | 2020-08-18T22:41:05 | 192,394,874 | 2 | 1 | null | 2020-01-22T16:59:57 | 2019-06-17T18:02:43 | R | UTF-8 | R | false | true | 264 | rd | plotBactSplit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plotBactSplit}
\alias{plotBactSplit}
\title{Plot Bacterial load split by compartment}
\usage{
plotBactSplit(res)
}
\description{
Plot Bacterial load split by compartment
}
|
139e8db22d9f6b0333e1c610c10e8e1762cc7ad6 | f5ebce9efe0b955e0fe556a790e6e108b4864cfc | /man/get_user_mode.Rd | 6eae81ec9f58f6129dad457100beaf5784a97e8f | [] | no_license | gidonc/durhamevp | 6da21bbc5cf4470ef7f58d637a105dbed8aa81b2 | 42c62034c61d76a66ad7fc6d5c7d25d7d5db5ef2 | refs/heads/master | 2022-05-03T11:30:02.920215 | 2022-04-02T20:15:07 | 2022-04-02T20:15:07 | 134,703,200 | 0 | 0 | null | 2018-07-26T13:22:23 | 2018-05-24T11:04:32 | R | UTF-8 | R | false | true | 417 | rd | get_user_mode.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/database_interactivity.R
\name{get_user_mode}
\alias{get_user_mode}
\title{Returns table of current user modes}
\usage{
get_user_mode(user_id = "all")
}
\arguments{
\item{user_id}{Id of the user whose mode to check or a vector of user ids. The default ("all") selects all users.}
}
\description{
Returns the table of current user modes.
}
|
eeb50983581f36a1d978804ca727c23657c45320 | 125af6bd1dedd45089b3d5c95e0a4691e1fe715b | /R/hysplit_dispersion_plot.R | c9d90222bf17e96e853b92e9367bb349f329cafe | [
"MIT"
] | permissive | envhyf/SplitR | b068a2997d20831281a6f210801cc6b0b2a40265 | 386c65067c6ad62bcec442fabe7f93c81826f495 | refs/heads/master | 2021-01-18T10:30:53.412897 | 2015-01-24T10:03:04 | 2015-01-24T10:03:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,500 | r | hysplit_dispersion_plot.R | #' Plot HYSPLIT dispersion model output onto maps
#' @description The function plots hourly outputs of dispersed particles onto maps.
#' @param hours specification of hours of dispersion data to plot.
#' @param dispersion_df optionally specify a data frame with dispersion data as an input.
#' @param df_folder_path absolute path of the folder containing a dispersion data frame.
#' @param map_type selection provider of base maps for plotting. Choices are 'osm' (Open Street Map) and 'stamen' (Stamen Maps).
#' @param map_output_name a partial identifier prepended to the output map files.
#' @param path_output_files a full path for a location that the dispersion output files were written.
#' @export hysplit_dispersion_plot
#' @examples
#' \dontrun{
#' # Make a set of hourly plots from a dispersion data frame
#' hysplit_dispersion_plot(hours = "all",
#' dispersion_df = disp.df,
#' map_output_name = "new map",
#' path_output_files = "~/Documents/SplitR/Output/Plots/")
#'}
hysplit_dispersion_plot <- function(hours = 'all',
dispersion_df = NULL,
df_folder_path = NULL,
map_type = "stamen",
map_output_name = NULL,
path_output_files = NULL){
# Obtain the appropriate dispersion data frame; if the value supplied to 'dispersion_df' is not
# null (and if a valid data frame object is given) use it as the dispersion_df
if (!is.null(dispersion_df)){
valid_names <- ifelse(all(c("particle_no", "lon", "lat", "height",
"hour", "hour_start", "particle_id") %in%
names(dispersion_df)), TRUE, FALSE)
valid_classes <- ifelse(all(is.integer(dispersion_df[,1]) &
is.numeric(dispersion_df[,2]) &
is.numeric(dispersion_df[,3]) &
is.numeric(dispersion_df[,4]) &
is.numeric(dispersion_df[,5])), TRUE, FALSE)
if (valid_names == FALSE | valid_classes == FALSE){
stop("The supplied data frame is not a valid dispersion df object.")
}
}
if (is.null(dispersion_df) & !is.null(df_folder_path)){
if (.Platform$OS.type == "unix"){
csv_absolute_path <- gsub("//", "/", paste(df_folder_path, "/dispersion.csv", sep = ''))
}
if (.Platform$OS.type == "windows"){
if (grepl("\\\\", df_folder_path)) df_folder_path <- gsub("\\\\", "", df_folder_path)
csv_absolute_path <- paste(df_folder_path, "\\dispersion.csv", sep = '')
}
dispersion_df <- read.csv(csv_absolute_path,
header = TRUE, stringsAsFactors = FALSE)
}
# If value for 'hours' argument contains 'all' (default), determine the ending hour from
# the dispersion data frame
if (hours == 'all'){
last_hour <- max(dispersion_df$hour)
hours <- 1:last_hour
}
# If value for 'hours' argument contains a vector list, validate that vector to ensure
# those hours are within the range of hours in the dispersion data frame
if (is.vector(hours)){
hours_dispersion_df <- unique(dispersion_df$hour)
hours <- hours[which(hours %in% hours_dispersion_df)]
}
# Determine the extent of particle dispersion
bbox_data <- make_bbox(lon = dispersion_df$lon, lat = dispersion_df$lat)
# Create 'bounding_box' function to provide a square bounding box that's defined
# by the center-point lat/lon, and the distance away in kilometers
bounding_box <- function(lat, lon, dist, in.miles = TRUE){
if (in.miles){
ang_rad <- function(miles) miles/3958.756
} else {
ang_rad <- function(miles) miles/1000
}
ang_rad <- function(dist_km) dist_km/1000
`%+/-%` <- function(x, margin){x + c(-1, +1) * margin}
deg2rad <- function(x) x/(180/pi)
rad2deg <- function(x) x*(180/pi)
lat_range <- function(latr, r) rad2deg(latr %+/-% r)
lon_range <- function(lonr, dlon) rad2deg(lonr %+/-% dlon)
r <- ang_rad(dist)
latr <- deg2rad(lat)
lonr <- deg2rad(lon)
dlon <- asin(sin(r)/cos(latr))
m <- matrix(c(lon_range(lonr = lonr, dlon = dlon),
lat_range(latr = latr, r = r)), nrow = 2, byrow = TRUE)
dimnames(m) <- list(c("lng", "lat"), c("min", "max"))
m
}
# Determine the distance away from the center-point to generate a bounding box
# for the map image that encompasses the bounding box for the dispersion data;
# this will keep generating new 'bbox_map' objects until the map extents are
# greater than the data extents
for (i in seq(from = 0.2, to = 1000, by = 0.2)){
bbox_map <- bounding_box(lon = (bbox_data[[1]] + bbox_data[[3]])/2,
lat = (bbox_data[[2]] + bbox_data[[4]])/2,
i, in.miles = FALSE)
if (bbox_map[1] <= bbox_data[[1]] &
bbox_map[2] >= bbox_data[[3]] &
bbox_map[3] <= bbox_data[[2]] &
bbox_map[4] >= bbox_data[[4]]){
break()
}
}
# If chosen, a Stamen 'toner' style map that encompasses the bounds
# of the dispersion data will be downloaded
if (map_type == "stamen"){
map <- get_map(location = bbox_map,
maptype = "toner",
source = "stamen")
}
# If chosen, an Open Street Maps 'terrain' style map that encompasses
# the bounds of the dispersion data will be downloaded
if (map_type == "osm"){
map <- get_map(location = bbox_map,
maptype = "terrain",
source = "osm")
}
for (i in hours){
# Create a data frame that is a subset by hour
dispersion_df_hour <- subset(dispersion_df, hour == i)
hour_x <- dispersion_df_hour$lon
hour_y <- dispersion_df_hour$lat
hour_h <- dispersion_df_hour$height
df_xyh <- as.data.frame(cbind(hour_x, hour_y, hour_h))
# Remove vector objects from memory
rm(hour_x, hour_y, hour_h)
# Generate a ggplot object from the 'df_xyh' data frame
gg <- ggmap(ggmap = map, extent = "device") +
geom_point(data = df_xyh, aes(x = hour_x, y = hour_y, colour = hour_h,
size = hour_h, alpha = 0.5)) +
scale_colour_gradient(low = "green", high = "darkred", trans = "sqrt",
limits = c(0, 5000)) +
geom_smooth(data = df_xyh, aes(x = hour_x, y = hour_y, stat = "smooth"),
method = "loess") +
theme(legend.position = "none",
axis.line = element_blank(), axis.ticks = element_blank(),
axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.y = element_blank(),
axis.text.x = element_blank(), axis.text.y = element_blank(), axis.text.y = element_blank())
if (is.null(map_output_name)){
ggsave(filename = paste("dispersion-map-h", hours[i], ".pdf", sep = ''),
device = pdf,
path = paste0(path_output_files),
width = 8, height = 8)
} else if (!is.null(map_output_name)){
ggsave(filename = paste(map_output_name, "-dispersion-map-h", hours[i], ".pdf", sep = ''),
device = pdf,
path = paste0(path_output_files),
width = 8, height = 8)
}
}
}
|
3ee69c10a0e5abba8479f208ab01686c33292ab3 | 57e1744a36c84af152a2ac4aff6889222138790d | /VM_PLS/plsregress.R | 79773ca7c94d2b4a00c6a656f6bf022044e64d33 | [] | no_license | JamesShieh0510/bpm | 0852fc0d40b7c09bfd4d4bd0483af21e922f16de | 98c9d50e767fc95e6f0693433c2dc403df958fb7 | refs/heads/master | 2020-03-19T17:15:45.470972 | 2018-06-10T12:04:25 | 2018-06-10T12:04:25 | 136,750,693 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,003 | r | plsregress.R | plsregress <- function(X, Y, ncomp){
#X = read.table("C:/Users/Horace/Desktop/BPM_R/VM_PLS/XF.txt", header = FALSE)
#Y = read.table("C:/Users/Horace/Desktop/BPM_R/VM_PLS/YF.txt", header = FALSE)
#ncomp = 3
n = nrow(X)
dx = ncol(X)
maxncomp = min(n-1,dx)
if (ncomp > maxncomp){
ncomp = maxncomp
}
meanX = colMeans(X)
meanY = colMeans(Y)
X0 = matrix(NaN, n ,dx)
Y0 = matrix(NaN, nrow(Y), ncol(Y))
for (i in 1:n){
for (j in 1:dx){
X0[i,j] = X[i,j] - meanX[j]
}
}
for (i in 1:nrow(Y)){
for (j in 1:ncol(Y)){
Y0[i,j] = Y[i,j] - meanY[j]
}
}
dy = ncol(Y0)
Xloadings = matrix(0, dx, ncomp)
Yloadings = matrix(0, dy, ncomp)
Weights = matrix(0, dx, ncomp)
V = matrix(0, dx, ncomp)
Cov = t(X0) %*% Y0
for (i in 1:ncomp){
svd = svd(Cov)
si = svd$d[1]
ri = svd$u[,1]
ci = svd$v[,1]
ri = as.matrix(ri, ncol=1)
ci = as.matrix(ci, ncol=1)
ti = X0 %*% ri
normti = norm(ti, '2')
ti = ti / normti # ti'*ti == 1
Xloadings[,i] = t(X0) %*% ti
qi = (si * ci) / normti # = Y0'*ti
Yloadings[,i] = qi
Weights[,i] = ri / normti
vi = matrix(Xloadings[,i])
if (i >= 2){
for (k in 1:2){
for (j in 1:(i-1)){
vj = matrix(V[,j])
vi = vi - as.matrix(outer((t(vj) %*% vi) , vj, '*'))
}
}
}
vi = vi / norm(vi,'2')
V[,i] = vi
Cov = Cov - vi %*% (t(vi) %*% Cov)
Vi = V[,(1:i)]
Cov = Cov - Vi %*% (t(Vi) %*% Cov)
}
beta = Weights %*% t(Yloadings)
beta = rbind(meanY - (meanX %*% beta), beta)
W = beta
save(W, file = "plsregress.rda")
} |
d49f7fece8e0fbe4895be04a28c8483304123ac1 | 55c59b150b49de2123191bbd9e62cc5baed5c52b | /man/pc.sel.Rd | aa9ee5ca0b345258385cd9a9bfed560995537e04 | [] | no_license | RfastOfficial/Rfast2 | b45c43d627571f5f1c5bbf293454d92643853146 | 23c639c345d526ac05ce8b1613d9671975a8402b | refs/heads/master | 2023-08-08T01:15:34.684148 | 2023-07-21T10:48:12 | 2023-07-21T10:48:12 | 213,210,384 | 27 | 4 | null | 2023-01-20T11:23:56 | 2019-10-06T17:14:37 | C++ | UTF-8 | R | false | false | 1,709 | rd | pc.sel.Rd | \name{Variable selection using the PC-simple algorithm}
\alias{pc.sel}
\title{Variable selection using the PC-simple algorithm
}
\description{
Variable selection using the PC-simple algorithm.
}
\usage{
pc.sel(y, x, ystand = TRUE, xstand = TRUE, alpha = 0.05)
}
\arguments{
\item{y}{
A numerical vector with continuous data.
}
\item{x}{
A matrix with numerical data; the independent variables, of which some will probably be selected.
}
\item{ystand}{
If this is TRUE the response variable is centered. The mean is subtracted from every value.
}
\item{xstand}{
If this is TRUE the independent variables are standardised.
}
\item{alpha}{
The significance level.
}
}
\details{
Variable selection for continuous data only is performed using the PC-simple algorithm
(Buhlmann, Kalisch and Maathuis, 2010). The PC algorithm used to infer the skeleton of a Bayesian
Network has been adopted in the context of variable selection. In other words, the PC algorithm
is used for a single node.
}
\value{
A list including:
\item{vars}{
A vector with the selected variables.
}
\item{n.tests}{
The number of tests performed.
}
\item{runtime}{
The runtime of the algorithm.
}
}
\references{
Buhlmann P., Kalisch M. and Maathuis M. H. (2010). Variable selection in high-dimensional linear models:
partially faithful distributions and the PC-simple algorithm. Biometrika, 97(2): 261-278.
\url{ https://arxiv.org/pdf/0906.3204.pdf }
}
\author{
Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{pc.skel}, \link{omp}
}
}
\examples{
y <- rnorm(100)
x <- matrix( rnorm(100 * 50), ncol = 50)
a <- pc.sel(y, x)
}
|
8aff69051d40aa274f8dfb9b7051df164b37c965 | 5eb6f3b5c61391ec1a8662c06f2199fb532bcc6f | /R/ropenaire-package.R | 23d28bba3db4fc9a791fec18b029aeba5bcd1751 | [
"MIT"
] | permissive | njahn82/ropenaire | 9e45c4405bee3693c535d7deb231259cf7db4a36 | af0ea1564595229fa0f7dda06b6e063fe8313837 | refs/heads/master | 2020-05-17T00:51:58.767820 | 2018-05-16T11:06:37 | 2018-05-16T11:06:37 | 38,446,170 | 7 | 3 | null | 2018-05-14T17:35:46 | 2015-07-02T17:12:25 | R | UTF-8 | R | false | false | 397 | r | ropenaire-package.R | #' **Client for the OpenAIRE API**
#'
#' @importFrom readr read_csv read_tsv
#' @importFrom xml2 read_xml
#' @importFrom jsonlite fromJSON
#' @importFrom crul HttpClient
#' @importFrom tibble as_tibble
#' @name ropenaire-package
#' @aliases ropenaire
#' @docType package
#' @author Najko Jahn \email{najko.jahn@@uni-bielefeld.de}
#' @author Scott Chamberlain \email{myrmecocystus@@gmail.com}
NULL
|
14d5a3db7f8a04e915ba2a064d31006b8dd5a48a | 8f9a7f8a781e030178239e0153387f75ae1582f9 | /man/gs_append.Rd | 7f7ebb8a14038ddb60e735741052903b1b605f53 | [] | no_license | meerapatelmd/gUnit | fefe35524a76779a5fd24586d73f05f177b2d1ab | cbfe7309ceee4a7f30a4519f9fbffa910ec0ea65 | refs/heads/master | 2023-01-21T03:38:09.108929 | 2020-11-29T09:25:36 | 2020-11-29T09:25:36 | 211,777,544 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 564 | rd | gs_append.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gs_append.R
\name{gs_append}
\alias{gs_append}
\title{Adds a dataframe to a Google Sheet tab by doing a dplyr::bind_row on the data downloaded as a tmp csv and completely rewriting the appended data to a new tab and deleting the old tab}
\usage{
gs_append(dataframe, gsheet_name, tab)
}
\description{
Adds a dataframe to a Google Sheet tab by doing a dplyr::bind_row on the data downloaded as a tmp csv and completely rewriting the appended data to a new tab and deleting the old tab
}
|
2c2c2b46672fe02061074c3db797b4d39b968c95 | 3a2a577056e53a6455c02256df5015ee0053904b | /man/wm_records_name.Rd | 97de3ff70fc0bd8630a430ad3aea55111c8fa1df | [
"MIT"
] | permissive | cran/worrms | 5f6b0d1b3f53504121da7f91ea5828d056e16ceb | fa3382eb254ef5e5cfa08e26d6e10a1db8ed0286 | refs/heads/master | 2023-07-07T06:28:32.608109 | 2023-06-20T13:00:02 | 2023-06-20T13:00:02 | 101,341,532 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,210 | rd | wm_records_name.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wm_records_name.R
\name{wm_records_name}
\alias{wm_records_name}
\title{Get records by single name, optional fuzzy matching}
\usage{
wm_records_name(name, fuzzy = TRUE, marine_only = TRUE, offset = 1, ...)
}
\arguments{
\item{name}{(character) a taxonomic name, required.}
\item{fuzzy}{(logical) fuzzy search. default: \code{TRUE}}
\item{marine_only}{(logical) marine only or not. default: \code{TRUE}}
\item{offset}{(integer) record to start at. default: 1}
\item{...}{named curl options. see \code{curl::curl_options}}
}
\value{
A tibble/data.frame
}
\description{
Get records by single name, optional fuzzy matching
}
\note{
there is no underscore method like other functions in this package
as there is already a plural version: \code{\link[=wm_records_names]{wm_records_names()}}
}
\examples{
\dontrun{
wm_records_name(name = 'Leucophaeus')
wm_records_name(name = 'Leucophaeus', fuzzy = FALSE)
wm_records_name(name = 'Leucophaeus', marine_only = FALSE)
wm_records_name(name = 'Platanista', marine_only = FALSE)
wm_records_name(name = 'Platanista', marine_only = FALSE, offset = 5)
}
}
|
3561f7ce128c5a84df7fbd849b3723b0d020fb80 | af742a4b76c1b4f69b806d09f8808cdb4eb5ee0c | /global.R | 7cb924bf700cff543a19f97e25551bdcacebd3c2 | [
"MIT"
] | permissive | wangjs/BibeR | 774b18daacbcb8a85b49ec22b968efcd265b77db | 197d62b952b8d3613ccdc2d19053d9296d468b7d | refs/heads/master | 2020-03-10T22:35:20.267249 | 2017-01-24T04:24:36 | 2017-01-24T04:24:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,635 | r | global.R | library(shiny)
library(gsubfn) # strapllyc to get time cited from citation
library(ggplot2)
library(DT)
#library(d3heatmap)
#library(igraph)
library(countrycode)
library(rworldmap)
library(networkD3)
library(dplyr) # filter function
library(pheatmap)
# a new parser to parse plain txt
require(stringr) # str_extract()
require(tm) # tolower() or toupper()
# function
parser = function(x, item) {
#
if (item %in% c('author')) {
author = str_extract(x, "(?<=\nAU )[a-zA-Z]+, [a-zA-Z]+(\n [a-zA-Z]+, [a-zA-Z]+)*")
author = gsub("\n ", ";", author)
author = toupper(author)
return(author)
}
if (item %in% c('c_author', 'corresponding author', 'correspond')) {
c_author = str_extract(x, "(?<=\nRP )[a-zA-Z]+, [a-zA-Z]+(\n [a-zA-Z]+, [a-zA-Z]+)*")
c_author = gsub("\n ", ";", c_author)
c_author = toupper(c_author)
return(c_author)
}
if (item %in% c('c_author address', 'corresponding author address', 'correspond address')) {
c_author_address = str_extract(x, "(?<=\nRP ).*(\n .*)*")
c_author_address = gsub("\n ", ";", c_author_address)
c_author_address = toupper(c_author_address)
return(c_author_address)
}
if (item %in% c('title')) {
title = str_extract(x, "(?<=\nTI ).*(\n .*)*")
title = gsub("\n ", " ", title)
title = toupper(title)
return(title)
}
if (item %in% c('journal')) {
journal = str_extract(x, "(?<=\nSO ).*(\n .*)*")
journal = gsub("\n ", " ", journal)
journal = toupper(journal)
return(journal)
}
if (item %in% c('language')) {
language = str_extract(x, "(?<=\nLA ).*(\n .*)*")
language = gsub("\n ", "", language)
language = toupper(language)
return(language)
}
if (item %in% c('type')) {
type = str_extract(x, "(?<=\nDT ).*(\n .*)*")
type = gsub("\n ", " ", type)
type = toupper(type)
return(type)
}
if (item %in% c('keyword')) {
keyword = str_extract(x, "(?<=\nDE ).*(\n .*)*")
keyword = gsub("\n ", " ", keyword)
keyword = toupper(keyword)
return(keyword)
}
if (item %in% c('keyword plus')) {
keyword.plus = str_extract(x, "(?<=\nDE ).*(\n .*)*")
keyword.plus = gsub("\n ", " ", keyword.plus)
keyword.plus = toupper(keyword.plus)
return(keyword.plus)
}
if (item %in% c('abstract')) {
abstract = str_extract(x, "(?<=\nAB ).*(\n .*)*")
abstract = gsub("\n ", " ", abstract)
abstract = toupper(abstract)
return(abstract)
}
if (item %in% c('address')) {
address = str_extract(x, "(?<=\nC1 ).*(\n .*)*")
address = gsub("\n ", "\r", address)
address = toupper(address)
return(address)
}
if (item %in% c('cited reference','reference')) {
ref = str_extract(x, "(?<=\nNR )[0-9]*")
ref = as.numeric(ref)
return(ref)
}
if (item %in% c('times cited','cited')) {
cited = str_extract(x, "(?<=\nTC )[0-9]*")
cited = as.numeric(cited)
return(cited)
}
if (item %in% c('year','date')) {
year = str_extract(x, "(?<=\nPY )[0-9]*")
year = as.numeric(year)
return(year)
}
if (item %in% c('page','page number')) {
page = str_extract(x, "(?<=\nPG )[0-9]*")
page = as.numeric(page)
return(page)
}
if (item %in% c('research domain', 'subject', 'domain')) {
domain = str_extract(x, "(?<=\nSC ).*(\n .*)*")
domain = gsub("\n ", " ", domain)
domain = toupper(domain)
return(domain)
}
}
# function
parse_all = function(file_path) {
require(stringr) # str_extract()
require(tm) # tolower() or toupper()
file = readChar(file_path, file.info(file_path)$size)
split.txt = unlist(strsplit(file, "\n\n"))
split.txt = split.txt[-length(split.txt)] # remove the last one which is a null entry
author = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'author')))
c_author = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'c_author')))
c_author_address = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'c_author address')))
title = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'title')))
journal = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'journal')))
language = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'language')))
type = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'type')))
keyword = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'keyword')))
keyword.plus = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'keyword plus')))
abstract = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'abstract')))
address = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'address')))
ref = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'reference')))
cited = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'cited')))
year = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'year')))
page = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'page')))
domain = unlist(lapply(split.txt, function(x)
parser(x = x, item = 'domain')))
keyword = unlist(lapply(keyword, function(x){
if (!is.na(x)) {
keyword = gsub("; ", ";", x)
return(keyword)
}else{return(NA)}
}))
country = unlist(lapply(address, function(address) {
if (!is.na(address)) {
c = strsplit(address, "\r")[[1]]
c.output = unlist(lapply(c, function(x) {
c.input = strsplit(x, ",")[[1]]
c.input = c.input[length(c.input)]
c.input = removeNumbers(c.input)
c.input = gsub("\\.", "", c.input)
c.input = trimws(c.input) # remove the first and last white space, require R 3.2.0
c.input = gsub("[A-Z]{2} USA", "USA", c.input)
c.input = gsub("[A-Z] USA", "USA", c.input)
c.input = gsub("[A-Z]USA", "USA", c.input)
c.input_tmp = str_extract(c.input, "(?<=^ )[A-Z]*")
if(!is.na(c.input_tmp)) c.input = c.input_tmp
if (nchar(c.input) == 2)
c.input = 'USA'
return(c.input)
}))
c.output = paste(unique(c.output), collapse = ";")
return(c.output)
}else{
NA
}
}))
institution = unlist(lapply(address, function(address) {
if (!is.na(address)) {
a = strsplit(address, "\r")[[1]]
a.output = unlist(lapply(a, function(x) {
a.input = gsub("\\[.*\\]", "", x)
a.input = trimws(a.input)
a.input = strsplit(a.input, ",")[[1]][1]
# get the country of the institution
c.input = strsplit(x, ",")[[1]]
c.input = c.input[length(c.input)]
c.input = removeNumbers(c.input)
c.input = gsub("\\.", "", c.input)
c.input = trimws(c.input) # remove the first and last white space, require R 3.2.0
c.input = gsub("[A-Z]{2} USA", "USA", c.input)
c.input = gsub("[A-Z] USA", "USA", c.input)
c.input = gsub("[A-Z]USA", "USA", c.input)
c.input_tmp = str_extract(c.input, "(?<=^ )[A-Z]*")
if(!is.na(c.input_tmp)) c.input = c.input_tmp
if (nchar(c.input) == 2)
c.input = 'USA'
a.input = paste(a.input, c.input, sep = "! ")
return(a.input)
}))
a.output = paste(unique(a.output), collapse = ";")
return(a.output)
}else{
NA
}
}))
c_author_country = unlist(lapply(c_author_address, function(address) {
if (!is.na(address)) {
cac.input = strsplit(address, ",")[[1]]
cac.input = cac.input[length(cac.input)]
cac.input = removeNumbers(cac.input)
cac.input = gsub("\\.", "", cac.input)
cac.input = trimws(cac.input) # remove the first and last white space, require R 3.2.0
cac.input = gsub("[A-Z]{2} USA", "USA", cac.input)
cac.input = gsub("[A-Z] USA", "USA", cac.input)
cac.input = gsub("[A-Z]USA", "USA", cac.input)
cac.input_tmp = str_extract(cac.input, "(?<=^ )[A-Z]*")
if(!is.na(cac.input_tmp)) cac.input = cac.input_tmp
if (nchar(cac.input) == 2)
cac.input = 'USA'
return(cac.input)
}else{
NA
}
}))
c_author_institution = unlist(lapply(c_author_address, function(address) {
if (!is.na(address)) {
a.input = gsub("\\[.*\\]", "", address)
a.input = trimws(a.input)
a.input = strsplit(a.input, ",")[[1]][3]
return(a.input)
}else{
NA
}
}))
c_author_institution = paste(c_author_institution, c_author_country, sep = ", ")
df = data.frame(
author = as.character(author),
c.author = as.character(c_author),
c.author.country = as.character(c_author_country),
title = as.character(title),
journal = as.character(journal),
language = as.character(language),
type = as.character(type),
keyword = as.character(keyword),
keyword.plus = as.character(keyword.plus),
abstract = as.character(abstract),
country = as.character(country),
institution = as.character(institution),
c.author.institution = as.character(c_author_institution),
ref = as.character(ref),
cited = as.character(cited),
year = year,
page = page,
domain = as.character(domain)
)
# if country is NA, but when c_author country is available, define country as c_author country
df$country = as.character(df$country)
df[which(is.na(df$country)), "country"] = "not" # "not" is a random string to let to match, which is odd
df[which(df$country == "not"), "country"] = as.character(df$c.author.country[which(df$country == "not")])
# other case like when corresponding author did not appear in author address, need to add it into the author country
a = as.character(df$c.author.country)
b = as.character(df$country)
n = length(a)
tmp = NULL
for(i in 1:n){
tmp = c(tmp, grepl(a[i],b[i]))
}
df[which(tmp == 0), ]$country = paste(df[which(tmp == 0), ]$country,
df[which(tmp == 0), ]$c.author.country,
sep = ";")
rm(tmp)
rm(a)
rm(b)
rm(i)
# end of this part
df$inter_cooper = unlist(lapply(df$country, function(x) {
if (is.na(x))
return(NA)
if (grepl(';', x))
return(T)
if (!grepl(';', x))
return(F)
}))
#df$c.author.institution = gsub("!", ",", df$c.author.institution)
df$institution = gsub("!", ",", df$institution)
return(df)
}
createLink = function(val) {
sprintf('<a href="https://scholar.google.com/scholar?q=%s" target="_blank" class="btn btn-primary">Link</a>',val)
}
|
cb382aafabbfc7098ddef1827be8200b0cc3cf68 | e3b617e3c82c650f08a005141f09e62d8d283897 | /README.rd | f680277194a8c349343709f84ac5ec7306e367a3 | [] | no_license | myokoym/rabbit-slide-myokoym-sinatrasapporo-20150919 | 4438c27e7f96f490a053002e37f87ef44eed61f7 | 188d7926e77f5c7c76106ee7169190f638bbacf8 | refs/heads/master | 2021-03-12T23:33:31.270214 | 2015-09-18T18:27:44 | 2015-09-18T18:27:44 | 42,400,037 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 435 | rd | README.rd | = Sinatra w/ Bower
Sinatraアプリのフロントエンド用パッケージ(JavaScriptやCSSなど)の
管理を楽にするためにBowerやアセットパイプラインを使う話。
== 作者向け
=== 表示
rake
=== 公開
rake publish
== 閲覧者向け
=== インストール
gem install rabbit-slide-myokoym-sinatrasapporo-20150919
=== 表示
rabbit rabbit-slide-myokoym-sinatrasapporo-20150919.gem
|
021d4fc47b601de6cc2e7cbea4bd7134b9c7ed27 | a600cc438eb46c15f9bc7715827d6a5feac74684 | /man/essSurv.Rd | c282aa1f8ca92927e6f5ff26932fba970c9cee74 | [] | no_license | github-js/ess | 8bc0d7d3561f90163cf50b375314deef05d1ce46 | 4d96e63e7bd536e0af26fda48d4af1047f7fc42d | refs/heads/master | 2021-09-04T09:45:10.617072 | 2018-01-17T19:16:36 | 2018-01-17T19:16:36 | 114,037,261 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,397 | rd | essSurv.Rd | \name{essSurv}
\alias{essSurv}
\title{
Calculating ESS for time-to-event outcome
}
\description{
Calculates ESS for right-censored time-to-event outcome
}
\usage{
essSurv(shapeParam,scaleParam,m,nsim)
}
\arguments{
\item{shapeParam}{Shape parameter of the inverse gamma prior}
\item{scaleParam}{Scale parameter of the inverse gamma prior}
\item{m}{A positive integer specified as an maximum value in which ESS is searched.}
\item{nsim}{umber of simulations for numerical approximation}
}
\value{
\item{ess}{Prior effective sample size}
}
\references{
Morita, S., Thall, P. F., and Muller, P. (2010). Evaluating the impact of prior assumptions in Bayesian biostatistics. Stat Biosci, 2, 1-17.
Morita, S., Thall, P. F., and Muller, P. (2008). Determining the effective sample size of a parametric prior. Biometrics, 64, 595-602.
Thall, P. F., Wooten, L. H., Tannir, N. M. (2005). Monitoring event times in early phase clinical trials: some practical issues. Clinical Trials, 2, 467-478.
}
\author{
Jaejoon Song <jjsong2@mdanderson.org>, Satoshi Morita <smorita@kuhp.kyoto-u.ac.jp >
}
\examples{
## Revisiting Example 5 in Morita et al. (2010, Stat Biosci).
## This is a inverse gamma-exponential model
## with an inverse gamma prior specified as IG(5.348,30.161)
## we can compute the ESS as the following
essSurv(shapeParam=5.348,scaleParam=30.161,m=7,nsim=1000)
}
}
\keyword{ CRM }
|
bf134dbf0aad47307eab3ceca67bd2170394137a | eacd12b7b1b2893d18848100b8244123c7468a41 | /man/random_rna.Rd | 39aba4fa11d10bfc1b2028d766647e9e713cb1f9 | [] | no_license | rforbiodatascience21/2021_group_14_rpackage | 20696e27447b6002adceb89cb7e71dcae22f0c1b | 7da213080d7b5c13c539ef6bd754dc509a1bea58 | refs/heads/main | 2023-03-25T16:52:28.777955 | 2021-03-22T10:41:02 | 2021-03-22T10:41:02 | 350,264,000 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 264 | rd | random_rna.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/random_rna.R
\name{random_rna}
\alias{random_rna}
\title{Create a random RNA sequence}
\usage{
random_rna(l)
}
\value{
random RNA sequence
}
\description{
Create a random RNA sequence
}
|
93a0708d149255e605c5abc3db8610dc1e183e80 | b1cfb0ebaad846c04424aaa122ec712c7a22ac08 | /daCosta_and_Benucci_etal_2021_SwithgrassMicrobiome/PRJ_SwGreenhouse_Rcode.R | a5654784b255a182091bf234924e18ca2d763c1d | [
"MIT"
] | permissive | Gian77/Scientific-Papers-R-Code | ce3091fc06045c01539f1680bf0409274e62345d | ae6970047bf7fde2d9795b836a3522e597713d61 | refs/heads/master | 2023-03-18T23:28:50.037605 | 2023-03-13T15:12:28 | 2023-03-13T15:12:28 | 189,509,215 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 180,224 | r | PRJ_SwGreenhouse_Rcode.R | # Project: GLBRC greenhouse Pedro
# Aithors: Chou Ming-Yi, Benucci GMN, Beschoren da Costa P, Bonito G (preliminary order)
# Institution: Michigan State University
# WORKING ENVIRONMENT SETUP --------------------------------------------------------------------------------
options(scipen = 999)
options(max.print = 100000000)
#rm(list = ls(all=TRUE)) # removes all variables in the global environment so you start fresh
Sys.time() # prints out the time and date you ran the code
set.seed(1977) #to make reproduceble results
detach(package:phyloseq, unload = TRUE) #to reorder packages
search() #to search into the environment
#rm(list= ls()[!(ls() %in% c('keepThis','andThis'))]) #remove all but ...
session.info() #to get session information
# Citation resourches
citation()
version$version.string
devtools::session_info()
print(citation("MASS"), style = "text")
library(purrr)
c("vegan", "phyloseq", "ape") %>%
map(citation) %>%
print(style = "text")
# NECESSARY PACKAGES ---------------------------------------------------------------------------------------
library(phyloseq)
library(Biostrings)
library(ape)
library(tidyverse)
library(dplyr)
library(tidyr)
library(ggplot2)
library(ggpubr)
library(magrittr)
library(decontam)
library(stringr)
library(purrr)
library(vegan)
library(jtools)
library(interactions)
library(multcompView)
library(multcomp)
library(sjPlot)
library(car)
library(caret)
library(grid)
library(gridExtra)
library(Boruta)
library(randomForest)
library(rfUtilities)
library(agricolae)
library(ggfortify)
library(rcompanion)
# Palettes -------------------------------------------------------------------------------------------------
Pal_geno <- c("#F2AA91","#F6511D","#FFB400","#00A6ED","#7FB800","#0D2C54")
Pal_soil <- c("#F6511D","#FFB400","#00A6ED","#7FB800")
pie(rep(1, length(Pal_geno)), labels = sprintf("%d (%s)",
seq_along(Pal_geno),Pal_geno), col = Pal_geno)
# LOAD ALL LIBRARIES ---------------------------------------------------------------------------------------
# Fungi ----------------------------------------------------------------------------------------------------
Lib_all_OTU_ITS <-
read.delim(
"/home/gian/Documents/GREENHOSUE_glbrc_project/Data/Joint_Fun_otu_tables/FOTU_table_ITS_UPARSE.txt",
row.names = 1
)
Lib_all_mapping_ITS <-
read.delim(
"/home/gian/Documents/GREENHOSUE_glbrc_project/Data/Joint_Fun_otu_tables/Lib_all_Fun_mapping.txt",
row.names = 1
)
All_otus_ITS <-
readDNAStringSet(
"/home/gian/Documents/GREENHOSUE_glbrc_project/Data/Joint_Fun_otu_tables/FOTUs.fasta",
format = "fasta",
seek.first.rec = TRUE,
use.names = TRUE
)
levels(factor(Lib_all_mapping_ITS$Sample_name))
# Prokaryotes ----------------------------------------------------------------------------------------------
Lib_all_OTU_16S <-
read.delim(
"/home/gian/Documents/GREENHOSUE_glbrc_project/Data/Joint_Bacterial_otu_tables/otu_table_16S_UPARSE.txt",
row.names = 1
)
Lib_all_mapping_16S <-
read.delim(
"/home/gian/Documents/GREENHOSUE_glbrc_project/Data/Joint_Bacterial_otu_tables/Lib_all_mapping.txt",
row.names = 1
)
All_otus_16S <-
readDNAStringSet(
"/home/gian/Documents/GREENHOSUE_glbrc_project/Data/Joint_Bacterial_otu_tables/otus.fasta",
format = "fasta",
seek.first.rec = TRUE,
use.names = TRUE
)
levels(as.factor(Lib_all_mapping_16S$Sample_name))
# ***********************************************************************************************-----------
# ADJUST MAPPING FILE --------------------------------------------------------------------------------------
classify_mock_control_sample<- function (Lib_mapping) {
Control_list<-c("Control1","Control2","Control3","Control4","Control5","Control6",
"Control_neg_noseq", "Control_noseq")
Mock_list<- c("Mock1","Mock2", "Mock_resuspendBar","Mock_1ul", "Mock2_176_SpikeIn",
"Mock2_376_Line6_G3_SpikeIn","Mock2_MMPRNT-1953_SpikeIn")
Lib_mapping$band_1_RAW_concentration <-
if_else(is.na(Lib_mapping$band_1_RAW_concentration), 0, Lib_mapping$band_1_RAW_concentration)
Lib_mapping$band_2_RAW_concentration <-
if_else(is.na(Lib_mapping$band_2_RAW_concentration), 0, Lib_mapping$band_2_RAW_concentration)
Lib_rownames<-(rownames(Lib_mapping))
Lib_mapping <-
mutate(Lib_mapping,
Control_mock_sample = if_else(Lib_mapping$Sample_name%in%c(Control_list), "Control",
if_else(Lib_mapping$Sample_name%in%c(Mock_list), "Mock","Sample" ))) %>%
mutate(AmpliconCon = band_1_RAW_concentration + band_2_RAW_concentration)
rownames(Lib_mapping)<-Lib_rownames
return(Lib_mapping)
}
Lib_all_mapping_ITS_new <-
classify_mock_control_sample(Lib_all_mapping_ITS)
Lib_all_mapping_16S_new <-
classify_mock_control_sample(Lib_all_mapping_16S)
head(Lib_all_mapping_ITS_new)
Lib_all_mapping_ITS_new %>%
select(Sample_name, Control_mock_sample, Control_mock_sample)
# Adjust PPCR concentration --------------------------------------------------------------------------------
# Keep DNA concentration only of bands between 250 and 750bp
# as amplicon_concentration, remove_non_target_bands.
ModConc <- function(map){
map_rownames <-rownames(map)
map1 <- map[c("band_1_size","band_2_size","band_1_RAW_concentration","band_2_RAW_concentration")]
#first set size those below 300 to or above 550 zero. if band size is zero,
#then concentration changes to zero. Finaly, then unite() the 2 columns of
#band size and the 2 columns of band cocnentration
map1[is.na(map1)] = 0 #turns NA to zero; input for next command cannot be NA
map1[map1$band_1_size<300 | map1$band_1_size>600,]$band_1_RAW_concentration<-NA # this turns primer dimer bands into NA values for band
map1[map1$band_2_size<300 | map1$band_2_size>600,]$band_2_RAW_concentration<-NA # this turns primer dimer bands into NA values for band
map1[map1$band_1_size<300 | map1$band_1_size>600,]$band_1_size<-NA # this turns primer dimer bands into NA values for band size
map1[map1$band_2_size<300 | map1$band_2_size>600,]$band_2_size<-NA # this turns primer dimer bands into NA values for band size
map1[is.na(map1)] = 0 #turns NA to "" to allow unite to mix both columns
map2 <- map1 %>%
mutate(
amplicon_size = pmax(map1$band_1_size, map1$band_2_size),
amplicon_concentration = pmax(map1$band_1_RAW_concentration, map1$band_2_RAW_concentration))
#turn empy value to 1, the aproximated detection limit
map2[map2$amplicon_size==0,]$amplicon_size <- 1
#turn empy value to 1, the aproximated detection limit
map2[map2$amplicon_concentration==0,]$amplicon_concentration <- 1
head(map2) %T>% print()
map_last <-
cbind(map, map2[,c(5,6)])
return(map_last)
}
Lib_all_mapping_ITS_mod <-
ModConc(Lib_all_mapping_ITS_new)
head(Lib_all_mapping_ITS_mod)
Lib_all_mapping_16S_mod <-
ModConc(Lib_all_mapping_16S_new)
head(Lib_all_mapping_16S_mod)
# GENERATE PHYLOSEQ FILES ----------------------------------------------------------------------------------
# generates fungal phyloseq object
Lib_all_ITS <-
phyloseq(
otu_table(Lib_all_OTU_ITS, taxa_are_rows = TRUE),
sample_data(Lib_all_mapping_ITS_mod),
All_otus_ITS)
Lib_all_ITS
head(sample_data(Lib_all_ITS))
table(Lib_all_ITS@sam_data$Control_mock_sample)
# generates phyloseq object
Lib_all_16S <-
phyloseq(
otu_table(Lib_all_OTU_16S, taxa_are_rows = TRUE),
sample_data(Lib_all_mapping_16S_mod),
All_otus_16S)
Lib_all_16S
head(sample_data(Lib_all_16S))
table(Lib_all_16S@sam_data$Control_mock_sample)
# EXTRACT PROJECT PHYLOSEQ OBJETCS ------------------------------------------------------------------------
# >>> GREEENHOUSE STUDY ------------------------------------------------------------------------------------
# Fungi
greenhouse_ITS <- subset_samples(Lib_all_ITS, Project_name%in%c("Greenhouse") &
Library_N%in%c("Lib_6", "Lib_8"))
otu_table(greenhouse_ITS) <-
otu_table(greenhouse_ITS)[which(rowSums(otu_table(greenhouse_ITS)) > 0),]
greenhouse_ITS
count(as.data.frame(as.matrix(sample_data(greenhouse_ITS))),
Project_name, Library_N, DNA_plate, Control_mock_sample)
head(greenhouse_ITS@sam_data)
table(greenhouse_ITS@sam_data$Control_mock_sample)
# Extracting controls ands mock samples
greenhouse_ITS_contr_mock <-
subset_samples(Lib_all_ITS,
DNA_plate%in%c("PLATE_3_ROOT","PLATE_4_ROOT","PLATE_6_ROOT","PLATE_11_SOIL") &
Project_name%in%c("Quality"))
otu_table(greenhouse_ITS_contr_mock) <-
otu_table(greenhouse_ITS_contr_mock)[which(rowSums(otu_table(greenhouse_ITS_contr_mock)) > 0),]
greenhouse_ITS_contr_mock
count(as.data.frame(as.matrix(sample_data(greenhouse_ITS_contr_mock))),
Project_name, Library_N, DNA_plate, Control_mock_sample)
greenhouse_ITS_contr_mock@sam_data
# Bacteria
greenhouse_16S <- subset_samples(Lib_all_16S, Project_name%in%c("Greenhouse") &
Library_n%in%c("Lib_2", "Lib_4"))
otu_table(greenhouse_16S) <-
otu_table(greenhouse_16S)[which(rowSums(otu_table(greenhouse_16S)) > 0),]
greenhouse_16S
count(as.data.frame(as.matrix(sample_data(greenhouse_16S))),
Project_name, Library_n, DNA_plate, Control_mock_sample)
head(greenhouse_16S@sam_data)
# Extracting controls ands mock samples
greenhouse_16S_contr_mock <-
subset_samples(Lib_all_16S,
DNA_plate%in%c("PLATE_3_ROOT","PLATE_4_ROOT","PLATE_6_ROOT","PLATE_11_SOIL") &
Project_name%in%c("Quality"))
otu_table(greenhouse_16S_contr_mock) <-
otu_table(greenhouse_16S_contr_mock)[which(rowSums(otu_table(greenhouse_16S_contr_mock)) > 0),]
greenhouse_16S_contr_mock
count(as.data.frame(as.matrix(sample_data(greenhouse_16S_contr_mock))),
Project_name, Library_n, DNA_plate, Control_mock_sample)
greenhouse_16S_contr_mock@sam_data
# Remove re-do samples with lowest read number -------------------------------------------------------------
head(greenhouse_ITS@sam_data)
greenhouse_ITS@sam_data$Description
duplicated(greenhouse_ITS@sam_data$Description)
table(duplicated(greenhouse_ITS@sam_data$Description))
head(greenhouse_16S@sam_data)
greenhouse_16S@sam_data$Description
duplicated(greenhouse_16S@sam_data$Description)
# Extracting samples with the highest number of reads
RemoveDuplicates <- function(physeq){
print("Duplicated samples")
print(table(duplicated(physeq@sam_data$Description)))
physeq <-
subset_samples(physeq, Description%in%physeq@sam_data[
duplicated(physeq@sam_data$Description), ]$Description)
df <-
data.frame(sample_sums(physeq), row.names = row.names(physeq@sam_data))
colnames(df) <- "ReadNo"
df$SampleID <- row.names(physeq@sam_data)
df$Description <- physeq@sam_data$Description
df <- df[order(df$Description),]
df %T>% print()
res <-
df %>%
group_by(Description) %>%
top_n(1, -abs(ReadNo )) %>%
as.data.frame()
res <- res$SampleID
return(res)
}
RemoveDuplicates(greenhouse_ITS)
greenhouse_ITS_clean <-
subset_samples(
greenhouse_ITS,
!sample_names(greenhouse_ITS) %in% RemoveDuplicates(greenhouse_ITS))
duplicated(greenhouse_ITS_clean@sam_data$Description)
RemoveDuplicates(greenhouse_16S)
greenhouse_16S_clean <-
subset_samples(
greenhouse_16S,
!sample_names(greenhouse_16S) %in% RemoveDuplicates(greenhouse_16S))
duplicated(greenhouse_16S_clean@sam_data$Description)
# since there are samples replicated multiple times I run it 3 times
greenhouse_16S_clean <-
subset_samples(
greenhouse_16S_clean,
!sample_names(greenhouse_16S_clean) %in% RemoveDuplicates(greenhouse_16S_clean))
duplicated(greenhouse_16S_clean@sam_data$Description)
greenhouse_16S_clean <-
subset_samples(
greenhouse_16S_clean,
!sample_names(greenhouse_16S_clean) %in% RemoveDuplicates(greenhouse_16S_clean))
duplicated(greenhouse_16S_clean@sam_data$Description)
# SEPARATE MOCK form SAMPLES -------------------------------------------------------------------------------
greenhouse_ITS_contr <-
subset_samples(greenhouse_ITS_contr_mock, Control_mock_sample%in%c("Control"))
greenhouse_ITS_contr
greenhouse_ITS_contr@sam_data
count(as.data.frame(as.matrix(sample_data(greenhouse_ITS_contr))),
Project_name, Library_N, DNA_plate, Control_mock_sample)
greenhouse_ITS_mock <-
subset_samples(greenhouse_ITS_contr_mock, Control_mock_sample%in%c("Mock"))
greenhouse_ITS_mock
greenhouse_ITS_mock@sam_data
count(as.data.frame(as.matrix(sample_data(greenhouse_ITS_mock))),
Project_name, Library_N, DNA_plate, Control_mock_sample)
greenhouse_16S_contr <-
subset_samples(greenhouse_16S_contr_mock, Control_mock_sample%in%c("Control"))
greenhouse_16S_contr
greenhouse_16S_contr@sam_data
count(as.data.frame(as.matrix(sample_data(greenhouse_16S_contr))),
Project_name, Library_n, DNA_plate, Control_mock_sample)
greenhouse_16S_mock <-
subset_samples(greenhouse_16S_contr_mock, Control_mock_sample%in%c("Mock"))
greenhouse_16S_mock
greenhouse_16S_mock@sam_data
count(as.data.frame(as.matrix(sample_data(greenhouse_16S_mock))),
Project_name, Library_n, DNA_plate, Control_mock_sample)
# GENERATE PHYLOSEQ OBJECTS FOR GREENHOUSE -----------------------------------------------------------------
physeq_fungi_gs <-
merge_phyloseq(greenhouse_ITS_clean, greenhouse_ITS_contr)
physeq_fungi_gs
count(as.data.frame(as.matrix(sample_data(physeq_fungi_gs))),
Project_name, Library_N, DNA_plate, Control_mock_sample)
physeq_fungi_gs@sam_data
physeq_bact_gs <-
merge_phyloseq(greenhouse_16S_clean, greenhouse_16S_contr)
physeq_bact_gs
count(as.data.frame(as.matrix(sample_data(physeq_bact_gs))),
Project_name, Library_n, DNA_plate, Control_mock_sample)
physeq_bact_gs@sam_data
# PCR BAD ANALYSES -----------------------------------------------------------------------------------------
# GEN BAND plot --------------------------------------------------------------------------------------------
quiaxcel_band_plot<-function(Lib_mapping){
# true bands must be between 300 and 550 bp; primer dimer concentrtion is less than 15ng/ul
Band_size_and_cocentration1 <- Lib_mapping%>%
select(band_1_size, band_1_RAW_concentration, Control_mock_sample)%>%
rename(band_size=band_1_size, band_concentration=band_1_RAW_concentration)
Band_size_and_cocentration2 <- Lib_mapping%>%
select(band_2_size, band_2_RAW_concentration, Control_mock_sample)%>%
rename(band_size=band_2_size, band_concentration=band_2_RAW_concentration)
# generate a long format
Band_size_and_cocentration <-
bind_rows(Band_size_and_cocentration1, Band_size_and_cocentration2)
Band_size_and_cocentration[is.na(Band_size_and_cocentration)] <- NA
head(Band_size_and_cocentration) %T>% print()
plot_band <-
ggplot(Band_size_and_cocentration,
aes(x=band_concentration, y=band_size, color=Control_mock_sample))+
geom_point() +
theme_classic() +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 8, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8),
legend.position="right") +
guides(color = guide_legend(title = "Sample type", ncol = 1))
return(plot_band)
}
#view plot of band DNA concentration per band size
quiaxcel_band_plot(Lib_all_mapping_ITS_new) +
labs(title = "PCR band concentration/size ITS",
x= "Concentration (ng/ul)",
y="Size (bp)")
quiaxcel_band_plot(Lib_all_mapping_16S_new) +
labs(title = "PCR band concentration/size 16S",
x= "Concentration (ng/ul)",
y="Size (bp)")
# Plotting size histograms ---------------------------------------------------------------------------------
# There are some NA that will prevent the size to be plotted
PlotHist <- function(df, width){
ggplot(df, aes(x=band_1_size)) +
geom_histogram(binwidth=width, colour="grey80", fill="grey80") +
#geom_vline(aes(xintercept=mean(band_1_size, na.rm=TRUE)),
# color="red", linetype="dashed", size=0.8) +
theme_classic() +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 8, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8),
legend.position="none") -> plot_dist
return(plot_dist)
}
PlotHist(Lib_all_mapping_ITS_new, 1) +
labs(title="Distribution of band sizes", x="band size (bp)", y="Sample number")
PlotHist(Lib_all_mapping_16S_new, 1) +
xlim(0, 1000) +
labs(title="Distribution of band sizes", x="band size (bp)", y="Sample number")
# *** Band plots ------------------------------------------------------------------------------------------
ggarrange(
ggarrange(
quiaxcel_band_plot(Lib_all_mapping_ITS_new) +
ylim(0, 2500) +
labs(title = "PCR band ITS",
x= "Concentration (ng/ul)",
y="Size (bp)"),
quiaxcel_band_plot(Lib_all_mapping_16S_new) +
ylim(0, 2500) +
labs(title = "PCR band 16S",
x= "Concentration (ng/ul)",
y="Size (bp)"),
labels = c("A","B"),
align = "hv",
ncol =2,
nrow = 1,
common.legend = TRUE,
legend=c("right")),
ggarrange(
PlotHist(Lib_all_mapping_ITS_new, 1) +
labs(title="", x="band size (bp)", y="Sample number"),
PlotHist(Lib_all_mapping_16S_new, 1) +
xlim(0, 1000) +
labs(title="", x="band size (bp)", y="Sample number"),
labels = c("C","D"),
align = "hv",
ncol = 2,
nrow = 1,
legend = c("right")),
ncol = 1,
nrow = 2) -> band_plot
band_plot
# DECONTAMINATIONS -----------------------------------------------------------------------------------------
# INSPECTING LIBRARY SIZES ---------------------------------------------------------------------------------
sample_data(physeq_fungi_gs)$LibrarySize <-
sample_sums(physeq_fungi_gs)
sample_data(physeq_fungi_gs)$Index <-
seq(nrow(sample_data(physeq_fungi_gs)))
sample_data(physeq_fungi_gs)$is.neg <-
sample_data(physeq_fungi_gs)$Control_mock_sample == "Control"
head(sample_data(physeq_fungi_gs))
sample_data(physeq_bact_gs)$LibrarySize <-
sample_sums(physeq_bact_gs)
sample_data(physeq_bact_gs)$Index <-
seq(nrow(sample_data(physeq_bact_gs)))
sample_data(physeq_bact_gs)$is.neg <-
sample_data(physeq_bact_gs)$Control_mock_sample == "Control"
head(sample_data(physeq_bact_gs))
write.csv(physeq_fungi_gs@sam_data, "mapping_good_ITS.csv")
write.csv(physeq_bact_gs@sam_data, "mapping_good_16s.csv")
# Plotting sample depth ------------------------------------------------------------------------------------
PlotDepth <- function(physeq){
df <-
as(sample_data(physeq), "matrix")
df <-
as.data.frame(df)[,c(26:28)]
# reconvert to numeric
df$LibrarySize <- as.numeric(as.character(df$LibrarySize))
df$Index <- as.numeric(as.character(df$Index))
# order
df <- df[order(df$LibrarySize), ]
df$Index <- seq(nrow(df))
# inspect
str(df) %T>% print()
head(df) %T>% print()
ggplot(data=df, aes(x=Index, y=LibrarySize, color=is.neg)) +
geom_point(alpha =0.7) +
theme_classic() +
scale_colour_manual("Negative control", values = c("grey", "red")) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 8, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8)) -> plot_dist
return(plot_dist)
}
PlotDepth(physeq_fungi_gs) +
labs(title="Fungi",
subtitle = "Samples read depth",
x="Sample index",
y="Read number")
PlotDepth(physeq_bact_gs) +
labs(title="Prokaryotes",
subtitle = "Samples read depth",
x="Sample index",
y="Read number")
# DECONTAMINATION ------------------------------------------------------------------------------------------
head(physeq_fungi_gs@sam_data)
physeq_fungi_gs@sam_data$DNA_extraction_yield # do not know why has many NA
physeq_fungi_gs@sam_data[is.na(physeq_fungi_gs@sam_data$DNA_extraction_yield),]
physeq_fungi_gs@sam_data$AmpliconCon
# Error: conc must be positive numeric.
physeq_fungi_gs@sam_data$AmpliconCon <- physeq_fungi_gs@sam_data$AmpliconCon + 0.000001
contam_fungi_gs <- isContaminant(physeq_fungi_gs,
method="either",
neg="is.neg",
batch = "DNA_plate",
batch.combine = "fisher",
conc="AmpliconCon",
threshold=c(0.1, 0.5))
table(contam_fungi_gs$contaminant)
physeq_bact_gs@sam_data$AmpliconCon <- physeq_bact_gs@sam_data$AmpliconCon + 0.000001
contam_bact_gs <- isContaminant(physeq_bact_gs,
method="either",
neg="is.neg",
batch = "DNA_plate",
batch.combine = "fisher",
conc="AmpliconCon",
threshold=c(0.1, 0.5))
table(contam_bact_gs$contaminant)
# plotting contaminant OTUs --------------------------------------------------------------------------------
PlotContam <- function(df, contam){
# Make phyloseq object of presence-absence in negative controls and true samples
physeq_pa <- transform_sample_counts(df, function(abund) 1*(abund>0))
physeq_pa_neg <- subset_samples(physeq_pa, Control_mock_sample%in%c("Control"))
physeq_pa_pos <- subset_samples(physeq_pa, Control_mock_sample%in%c("Sample"))
# Make data.frame of prevalence in positive and negative samples
df_contam <- data.frame(pa.pos=taxa_sums(physeq_pa_pos),
pa.neg=taxa_sums(physeq_pa_neg),
contaminant=contam$contaminant,
Pvalue=contam$p)
head(df_contam) %T>% print()
# plotting
ggplot(data=df_contam, aes(x=pa.neg, y=pa.pos, color=contaminant)) +
geom_point(size=0.8, alpha=0.7) +
labs(x="Prevalence in negative controls", y="Prevalence in true samples") +
theme_classic() +
scale_colour_manual("Contaminant OTUs", values = c("grey", "red")) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 8, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8))-> plot_cont
return(plot_cont)
}
PlotContam(physeq_fungi_gs, contam_fungi_gs)
PlotContam(physeq_bact_gs, contam_bact_gs)
table(contam_fungi_gs$contaminant)
table(contam_bact_gs$contaminant)
table(contam_fungi_gs$contaminant)/sum(table(contam_fungi_gs$contaminant))*100
table(contam_bact_gs$contaminant)/sum(table(contam_bact_gs$contaminant))*100
# *** FIGURE S1 - dcontam ----------------------------------------------------------------------------------
ggarrange(
ggarrange(PlotDepth(physeq_fungi_gs) +
labs(title="Fungi",
subtitle = "Samples read depth",
x="Sample index",
y="Read number"),
PlotDepth(physeq_bact_gs) +
labs(title="Prokaryotes",
subtitle = "Samples read depth",
x="Sample index",
y="Read number"),
labels = c("A","B"),
widths = c(1,1,1,1),
align = "hv" ,
ncol = 2,
nrow = 1,
common.legend = TRUE,
legend = c("bottom")),
ggarrange(
PlotContam(physeq_fungi_gs, contam_fungi_gs) +
labs(subtitle="Contaminants"),
PlotContam(physeq_bact_gs, contam_bact_gs) +
labs(subtitle="Contaminants"),
widths = c(1,1),
labels = c("C","D"),
align = "hv" ,
ncol = 2,
nrow = 1,
common.legend = TRUE,
legend = c("bottom")),
widths = c(1, 1.2),
ncol = 1,
nrow = 2) -> Fig_S1
Fig_S1
# REMOVING CONTAMINANTS ------------------------------------------------------------------------------------
# function to remove bad taxa
remove_taxa = function(physeq, badTaxa) {
allTaxa = taxa_names(physeq)
myTaxa <- allTaxa[!(allTaxa %in% badTaxa)]
return(prune_taxa(myTaxa, physeq))
}
physeq_fungi_gs_clean <-
remove_taxa(physeq_fungi_gs, rownames(subset(
contam_fungi_gs, contaminant %in% c("TRUE")
)))
physeq_bact_gs_clean <-
remove_taxa(physeq_bact_gs, rownames(subset(
contam_bact_gs, contaminant %in% c("TRUE")
)))
# MAPPING FILES FOR SRA -----------------------------------------------------------------------------
write.csv(sample_data(physeq_fungi_gs_clean), "physeq_fungi_gs_clean.csv")
write.csv(sample_data(physeq_bact_gs_clean), "physeq_bact_gs_clean.csv")
sample_names(physeq_bact_gs_clean)
as.vector(physeq_bact_gs_clean@otu_table[, c("Amp489")])
physeq_bact_gs_clean@sam_data[c("Amp489"),]
# REMOVING ALL CONTROLS -----------------------------------------------------------------------------------
# fungi
physeq_fungi_gs_clean <-
subset_samples(physeq_fungi_gs_clean, Control_mock_sample %in% c("Sample"))
otu_table(physeq_fungi_gs_clean) <-
otu_table(physeq_fungi_gs_clean)[which(rowSums(otu_table(physeq_fungi_gs_clean)) > 0),]
physeq_fungi_gs_clean
head(physeq_fungi_gs_clean@otu_table)
# bacteria
physeq_bact_gs_clean <-
subset_samples(physeq_bact_gs_clean, Control_mock_sample %in% c("Sample"))
otu_table(physeq_bact_gs_clean) <-
otu_table(physeq_bact_gs_clean)[which(rowSums(otu_table(physeq_bact_gs_clean)) > 0),]
physeq_bact_gs_clean
head(physeq_bact_gs_clean@otu_table)
# EXPORT SEQUENCES for CONSTAX -----------------------------------------------------------------------------
write.dna(refseq(physeq_fungi_gs_clean), format="fasta", file = "PRJ_Greenhouse_seq_ITS.fasta", colsep="")
write.dna(refseq(physeq_bact_gs_clean), format="fasta", file = "PRJ_Greenhouse_seq_16S.fasta", colsep="")
# Importing CONSTAX taxonomies -----------------------------------------------------------------------------
# FUNGI ----------------------------------------------------------------------------------------------------
taxonomy_ITS <-
read.delim(
"constax_taxonomy_ITS.txt",
header = TRUE,
row.names = 1,
sep = "\t")
head(taxonomy_ITS)
taxonomy_ITS[1:100, ]
dim(taxonomy_ITS)
table(taxonomy_ITS$High_level_taxonomy)
table(taxonomy_ITS$Kingdom)
# NOTE! There are still some mock among the samples. We have some cross-talks we need to quantify!
untarget_uparse_ITS <- c("Alveolata","Amoebozoa","Choanoflagellozoa","Metazoa","Cercozoa", "Protista",
"Rhizaria","Stramenopila", "Viridiplantae", "MockK", "Mockc","Mockk","Mockp")
apply(taxonomy_ITS, 2, function(x) which(x %in% untarget_uparse_ITS))
apply(taxonomy_ITS, 2, function(x) which(x %in% c("MockK")))
# Remove non-target taxa and non-classified taxa that did not have any hit
# when blasted against UNITE at 60% conevrage and identity. ALso, remove
# fungal OTUs that were classified as Fungi bu had low hit coverage < 60%
# and identity < 60%
taxonomy_ITS_bad <-
subset(
taxonomy_ITS,
High_level_taxonomy %in% untarget_uparse_ITS |
Kingdom %in% untarget_uparse_ITS |
High_level_taxonomy %in% c("") &
Kingdom %in% c(""))
dim(taxonomy_ITS_bad)
taxonomy_ITS_filt <-
taxonomy_ITS[!(rownames(taxonomy_ITS) %in% rownames(taxonomy_ITS_bad)), ]
taxonomy_ITS_filt <-
data.frame(OTU_ID = rownames(taxonomy_ITS_filt), taxonomy_ITS_filt)
dim(taxonomy_ITS_filt)
head(taxonomy_ITS_filt)
table(taxonomy_ITS_filt$Kingdom)
table(taxonomy_ITS_filt$High_level_taxonomy)
# looking at non-classified OTUs:
# Unclassified OTUs that hit Fungi at > 60%
dim(subset(
taxonomy_ITS,
High_level_taxonomy %in% c("Fungi") &
Kingdom %in% c("")))
# Fungal OTUs that were classified as Fungi bu had low hit
# coverage < 60% and identity < 60%
dim(subset(
taxonomy_ITS,
Kingdom %in% c("Fungi") &
HL_hit_query_cover < 60 |
HL_hit_percent_id < 60))
# How many isolates we were able to detect with the MiSeq?
subset(taxonomy_ITS,!Isolate %in% "")
# BACTERIA -------------------------------------------------------------------------------------------------
taxonomy_16S <-
read.delim(
"constax_taxonomy_16S.txt",
header = TRUE,
row.names = 1,
sep = "\t")
head(taxonomy_16S)
taxonomy_16S[1:100, ]
dim(taxonomy_16S)
taxonomy_16S <-
taxonomy_16S[-8]
taxonomy_16S <-
data.frame(OTU_ID = rownames(taxonomy_16S), taxonomy_16S)
# cleaning taxonomy labels
taxonomy_16S <-
map_dfc(
1:ncol(taxonomy_16S),
~ str_remove_all(taxonomy_16S[,.x],"_1")) %>%
as.data.frame()
# readding labels
colnames(taxonomy_16S) <-
c("OTU_ID",
"Kingdom",
"Phylum",
"Class",
"Order",
"Family",
"Genus",
"Species",
"Isolate",
"Isolate Isolate_percent_id",
"Isolate_query_cover",
"High_level_taxonomy",
"HL_hit_percent_id",
"HL_hit_query_cover")
# remember! now there are all characters
str(taxonomy_16S)
rownames(taxonomy_16S) <- taxonomy_16S$OTU_ID
# Looking at non-target
table(taxonomy_16S$High_level_taxonomy)
table(taxonomy_16S$Kingdom)
table(taxonomy_16S$Family)
table(taxonomy_16S$Isolate)
# NOTE! There are still some mock among the samples. We have some cross-talks we need to quantify!
untarget_uparse <- c("Mitochondria","Chloroplast")
apply(taxonomy_16S, 2, function(x) which(x %in% untarget_uparse))
apply(taxonomy_16S, 2, function(x) which(x %in% c("Mock")))
# Remove non-target taxa and non-classified taxa that did not have any hit
# when blasted against UNITE at 60% conevrage and identity. ALso, remove
# fungal OTUs that were classified as Fungi bu had low hit coverage < 60%
# and identity < 60%
taxonomy_16S_bad <-
subset(
taxonomy_16S,
High_level_taxonomy%in%untarget_uparse |
Kingdom%in%untarget_uparse |
Order%in%untarget_uparse |
Family%in%untarget_uparse |
High_level_taxonomy%in%c("") &
Kingdom%in%c(""))
dim(taxonomy_16S_bad)
apply(taxonomy_16S_bad, 2, function(x) which(x %in% untarget_uparse))
taxonomy_16S_filt <-
taxonomy_16S[!(rownames(taxonomy_16S) %in% rownames(taxonomy_16S_bad)), ]
dim(taxonomy_16S_filt)
apply(taxonomy_16S_filt, 2, function(x) which(x %in% untarget_uparse))
head(taxonomy_16S_filt)
table(taxonomy_16S_filt$Kingdom)
table(taxonomy_16S_filt$High_level_taxonomy)
rownames(taxonomy_16S_filt) <- taxonomy_16S_filt$OTU_ID
# Extract Chloroplast and Mitocondria 16S reads ------------------------------------------------------------
taxonomy_16S$HL_hit_percent_id <- as.numeric(taxonomy_16S$HL_hit_percent_id)
rownames(taxonomy_16S) <- taxonomy_16S$OTU_ID
taxonomy_Chloro_Mito <-
subset(taxonomy_16S,
Order%in%untarget_uparse |
Family%in%untarget_uparse |
High_level_taxonomy%in%untarget_uparse |
Kingdom%in%untarget_uparse)
taxonomy_Chloro_Mito
physeq_Chloro_Mito <- phyloseq(otu_table(physeq_bact_gs_clean, taxa_are_rows = TRUE),
sample_data(physeq_bact_gs_clean),
tax_table(as.matrix(taxonomy_Chloro_Mito)),
refseq(physeq_bact_gs_clean))
physeq_Chloro_Mito
head(physeq_Chloro_Mito@sam_data)
# Reformat Taxonomy ----------------------------------------------------------------------------------------
source("../R_functions/ReformatTaxonomy.R")
FinalizeTaxonomy <- function(constax){
constax$Species <-
gsub(" sp ", "", constax$Species)
constax[] = lapply(constax, blank2na, na.strings=c('','NA','na','N/A','n/a','NaN','nan'))
lastValue <- function(x) tail(x[!is.na(x)], 1)
constax$Genus <- as.character(constax$Genus)
constax[which(is.na(constax$Genus) == FALSE),]$Genus <-
paste(constax$Genus[is.na(constax$Genus) == FALSE], "sp.", sep = " ")
last_taxons<- apply(constax[,c(1:8)], 1, lastValue)
constax$BestMatch <- last_taxons
constax$BestMatch <-
gsub("_", " ", constax$BestMatch)
constax$Taxonomy <-
paste(constax$OTU_ID, constax$BestMatch, sep = "-")
constax$BestMatch <-
gsub(" sp.", "", constax$BestMatch)
constax$Genus <-
gsub(" sp.", "", constax$Genus)
return(constax)
}
taxonomy_ITS_cor <-
FinalizeTaxonomy(taxonomy_ITS_filt)
taxonomy_ITS_cor[1:50, ]
taxonomy_16S_cor <-
FinalizeTaxonomy(taxonomy_16S_filt)
taxonomy_16S_cor[1:50, ]
# Extract mock sequences across samples --------------------------------------------------------------------
# Fungi
head(taxonomy_ITS)
levels(taxonomy_ITS$Isolate)
taxonomy_ITS_mock1 <-
subset(taxonomy_ITS, Isolate %in% c("Mock_sp1", "Mock_sp2","Mock_sp3", "Mock_sp4",
"Mock_sp5", "Mock_sp6","Mock_sp7", "Mock_sp8",
"Mock_sp9", "Mock_sp10","Mock_sp11", "Mock_sp12"))
taxonomy_ITS_mock2 <-
subset(taxonomy_ITS, High_level_taxonomy %in% "MockK")
taxonomy_ITS_mock <-
full_join(
data.frame(OTU_ID = rownames(taxonomy_ITS_mock1),
taxonomy_ITS_mock1),
data.frame(OTU_ID = rownames(taxonomy_ITS_mock2),
taxonomy_ITS_mock2), by="OTU_ID")
taxonomy_ITS_mock
rownames(taxonomy_ITS_mock) <- taxonomy_ITS_mock$OTU_ID
physeq_fungi_gs_clean_mock <- phyloseq(otu_table(physeq_fungi_gs_clean, taxa_are_rows = TRUE),
sample_data(physeq_fungi_gs_clean),
tax_table(as.matrix(taxonomy_ITS_mock)),
refseq(physeq_fungi_gs_clean))
physeq_fungi_gs_clean_mock
physeq_fungi_gs_clean_mock@sam_data
physeq_fungi_gs_clean_mock@tax_table
physeq_fungi_gs_clean_mock@otu_table
# Sample Amp1369 is actually a mock sample, has to be removed.
physeq_fungi_gs_clean_mock@sam_data[rownames(physeq_fungi_gs_clean_mock@sam_data)%in%"Amp1369", ]
as.character(otu_table(subset_samples(physeq_fungi_gs_clean_mock, Sample_name%in%"305")))
physeq_fungi_gs_clean@otu_table <-
subset(physeq_fungi_gs_clean@otu_table, select = -c(Amp1369))
physeq_fungi_gs_clean@otu_table <-
physeq_fungi_gs_clean@otu_table[which(rowSums(physeq_fungi_gs_clean@otu_table) > 0),]
# Bacteria
head(taxonomy_16S)
taxonomy_16S$Kingdom
table(taxonomy_16S$Isolate)
taxonomy_16S_mock <-
subset(taxonomy_16S, Isolate %in% c("Mock_Bacillus_subtilis6S0","Mock_Escherichia_coli6S_8",
"Mock_Lactobacillus_fermentum6S_2","Mock_Listeria_monocytogenes6S_6",
"Mock_Pseudomonas_aeruginosa6S_4","Mock_Salmonella_enterica6S_6"))
taxonomy_16S_mock
physeq_bact_gs_clean_mock <- phyloseq(otu_table(physeq_bact_gs_clean, taxa_are_rows = TRUE),
sample_data(physeq_bact_gs_clean),
tax_table(as.matrix(taxonomy_16S_mock)),
refseq(physeq_bact_gs_clean))
physeq_bact_gs_clean_mock
physeq_bact_gs_clean_mock@sam_data
physeq_bact_gs_clean_mock@tax_table
physeq_bact_gs_clean_mock@otu_table
# MOCK COMMUNITY ANALYSIS ----------------------------------------------------------------------------------
physeq_fungi_gs_clean_mock %>%
tax_table()
physeq_bact_gs_clean_mock %>%
tax_table()
# IMPORT PLANT AND SOIL METADATA ---------------------------------------------------------------------------
metadata_gs <- read.csv(
"/home/gian/Documents/GREENHOSUE_glbrc_project/Data/Greenhouse_data/greenhouse_plants.csv",
header = TRUE, sep = ",")
colnames(metadata_gs)
head(metadata_gs)
dim(metadata_gs)
# Fungi
meta_fungi <- as(physeq_fungi_gs_clean@sam_data, "data.frame")
meta_fungi$Sample_ID <- rownames(meta_fungi)
head(meta_fungi)
dim(meta_fungi)
str(meta_fungi)
names(meta_fungi)[names(meta_fungi) == "Sample_name"] <- "Pot"
metadata_fungi_gs <-
merge(meta_fungi, metadata_gs, by="Pot")
rownames(metadata_fungi_gs) <-
metadata_fungi_gs$Sample_ID
dim(metadata_fungi_gs)
head(metadata_fungi_gs)
# Bacteria
meta_bact <- as(physeq_bact_gs_clean@sam_data, "data.frame")
meta_bact$Sample_ID <- rownames(meta_bact)
head(meta_bact)
dim(meta_bact)
str(meta_bact)
names(meta_bact)[names(meta_bact) == "Sample_name"] <- "Pot"
metadata_bact_gs <-
merge(meta_bact, metadata_gs, by="Pot")
rownames(metadata_bact_gs) <-
metadata_bact_gs$Sample_ID
dim(metadata_bact_gs)
head(metadata_bact_gs)
# Generating Phyloseq objetcs for GREENHOSUE STUDY ---------------------------------------------------------
# Look out for Mock OTUs first
dim(taxonomy_ITS_cor[rownames(taxonomy_ITS_cor)%in%rownames(taxonomy_ITS_mock), ])
physeq_fungi_new <- phyloseq(otu_table(physeq_fungi_gs_clean, taxa_are_rows = TRUE),
sample_data(metadata_fungi_gs),
tax_table(as.matrix(taxonomy_ITS_cor)),
refseq(physeq_fungi_gs_clean))
physeq_fungi_new
head(physeq_fungi_new@sam_data)
head(physeq_fungi_new@tax_table)
head(physeq_fungi_new@otu_table)
count(as.data.frame(as.matrix(sample_data(physeq_fungi_new))),
Genotype, Soil_location)
taxonomy_16S_cor[rownames(taxonomy_16S_cor)%in%rownames(taxonomy_16S_mock), ]
physeq_bact_new <- phyloseq(otu_table(physeq_bact_gs_clean, taxa_are_rows = TRUE),
sample_data(metadata_bact_gs),
tax_table(as.matrix(taxonomy_16S_cor)),
refseq(physeq_bact_gs_clean))
physeq_bact_new
physeq_bact_new <-
remove_taxa(physeq_bact_new, rownames(taxonomy_16S_mock))
otu_table(physeq_bact_new) <-
otu_table(physeq_bact_new)[which(rowSums(otu_table(physeq_bact_new)) > 0),]
physeq_bact_new
head(physeq_bact_new@sam_data)
head(physeq_bact_new@tax_table)
head(physeq_bact_new@otu_table)
# Match sample for bateria and fungi
setdiff(physeq_fungi_new@sam_data$Pot, physeq_bact_new@sam_data$Pot) # present in first not in second object
setdiff(physeq_bact_new@sam_data$Pot, physeq_fungi_new@sam_data$Pot) # present in second not in first object
# use same pots for fungi and bacteria
sample_names(subset_samples(physeq_bact_new, Pot%in%"305"))
physeq_bact_new <-
subset_samples(physeq_bact_new, Pot!="305")
physeq_bact_new@otu_table <-
physeq_bact_new@otu_table[which(rowSums(physeq_bact_new@otu_table) > 0),]
physeq_bact_new
sort(sample_sums(physeq_bact_new))
# EXPORT DATASETS ------------------------------------------------------------------------------------------
saveRDS(physeq_fungi_new, "fungi_data.rds")
saveRDS(physeq_bact_new, "bacteria_data.rds")
write.table(sample_data(physeq_fungi_new), "filtered_data/its_metadata.txt", sep = "\t",
quote=FALSE, row.names = TRUE, col.names = TRUE)
write.table(otu_table(physeq_fungi_new), "filtered_data/its_otu_table.txt", sep = "\t",
quote=FALSE, row.names = TRUE, col.names = TRUE)
write.table(tax_table(physeq_fungi_new), "filtered_data/its_taxonomy.txt", sep="\t",
quote=FALSE, row.names = TRUE, col.names = TRUE)
write.dna(refseq(physeq_fungi_new), format="fasta", file = "filtered_data/its_OTUs.fasta", colsep="")
write.table(sample_data(physeq_bact_new), "filtered_data/16s_metadata.txt", sep = "\t",
quote=FALSE, row.names = TRUE, col.names = TRUE)
write.table(otu_table(physeq_bact_new), "filtered_data/16s_otu_table.txt", sep = "\t",
quote=FALSE, row.names = TRUE, col.names = TRUE)
write.table(tax_table(physeq_bact_new), "filtered_data/16s_taxonomy.txt", sep="\t",
quote=FALSE, row.names = TRUE, col.names = TRUE)
write.dna(refseq(physeq_bact_new), format="fasta", file = "filtered_data/16s_OTUs.fasta", colsep="")
# Chloroplast and Mitocondria
write.dna(physeq_Chloro_Mito@refseq, format="fasta",
file = "PRJ_Greenhouse_Chloro_Mito_16S.fasta", colsep="")
write.table(physeq_Chloro_Mito@tax_table,
"PRJ_Greenhouse_taxonomy_Chloro_Mito_16S.txt", sep="\t",
quote=FALSE, row.names = TRUE, col.names = TRUE)
write.table(sort(taxa_sums(physeq_Chloro_Mito), decreasing = TRUE),
"PRJ_Greenhouse_abundance_Chloro_Mito_16S.txt", sep="\t",
quote=FALSE, row.names = TRUE, col.names = TRUE)
# write mock
write.dna(refseq(physeq_bact_gs_clean_mock), format="fasta", file = "Mock_OTUs_16S.fasta", colsep="")
write.dna(refseq(physeq_fungi_gs_clean_mock), format="fasta", file = "mock_OTUs_ITS.fasta", colsep="")
# ******************************************************************************************----------------
# ALPHA DIVERSITY ------------------------------------------------------------------------------------------
# Remove Soil Samples Fungi
physeq_fungi_new <-
subset_samples(physeq_fungi_new, Ecotype%in%c("Lowland", "Upland"))
otu_table(physeq_fungi_new) <-
otu_table(physeq_fungi_new)[which(rowSums(otu_table(physeq_fungi_new)) > 0),]
physeq_fungi_new
count(as.data.frame(as.matrix(sample_data(physeq_fungi_new))),
Genotype, Soil_location)
# Remove Soil Samples bact
physeq_bact_new <-
subset_samples(physeq_bact_new, Ecotype%in%c("Lowland", "Upland"))
otu_table(physeq_bact_new) <-
otu_table(physeq_bact_new)[which(rowSums(otu_table(physeq_bact_new)) > 0),]
physeq_bact_new
count(as.data.frame(as.matrix(sample_data(physeq_bact_new))),
Genotype, Soil_location)
# RICHNESS -------------------------------------------------------------------------------------------------
MakeDf <- function(physeq, var){
otu <- as(physeq@otu_table, "matrix")
otu <- t(as.data.frame(otu))
metadata <- as(physeq@sam_data, "data.frame")
print("Are metadata sample identical to otu_table?")
identical(rownames(otu), rownames(metadata)) %T>% print()
df <- data.frame(richness = specnumber(otu, MARGIN = 1))
df$rarefied <- rarefy(otu, sample=var, se = FALSE, MARGIN = 1)
df$shannon <- diversity(otu, index = "shannon", MARGIN=1)#/log(df$richness)
df$simpson <- diversity(otu, index = "simpson", MARGIN=1)
df$readNo <- apply(otu,1,sum)
df$Soil <- metadata$Soil_location
df$Genotype <- metadata$Genotype
df$Ecotype <- metadata$Ecotype
df$Pot <- metadata$Pot
print(table(df$Genotype, df$Soil))
return(df)
}
alpha_df_fungi <-
MakeDf(physeq_fungi_new, 10000)
dim(alpha_df_fungi)
alpha_df_bact <-
MakeDf(physeq_bact_new, 5000)
dim(alpha_df_bact)
identical(alpha_df_fungi$Pot, alpha_df_bact$Pot)
sample_order <- match(alpha_df_bact$Pot, alpha_df_fungi$Pot)
alpha_df_fungi <- alpha_df_fungi[sample_order, ]
identical(alpha_df_fungi$Pot, alpha_df_bact$Pot)
# Removing samples that have same value for richness and rarefied, see ? rarefy
alpha_df_fungi_rare <-
alpha_df_fungi[!alpha_df_fungi$richness == alpha_df_fungi$rarefied, ]
dim(alpha_df_fungi_rare)
alpha_df_bact_rare <-
alpha_df_bact[!(as.numeric(alpha_df_bact$richness) == alpha_df_bact$rarefied), ]
dim(alpha_df_bact_rare)
# Visualize the data first ---------------------------------------------------------------------------------
ReadEffect <- function(df, index, var){
read_plot <-
df %>%
ggplot(aes(x = readNo,
y = log(get(index)),
col = get(var))) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
theme_classic() +
xlim(0, NA) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =45, size = 8, hjust = 1, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8))
return(read_plot)
}
ReadEffect(alpha_df_fungi, "richness", "Soil") +
labs(title = "Read No vs. Alpha index", x="Read No", y ="log(richness)") +
guides(color = guide_legend(ncol = 1, title = "Soil"))
# Plotting Richness ----------------------------------------------------------------------------------------
PlotAlpha <- function(df){
plot_alpha <-
ggplot(df, aes(x=Genotype, y=Var, color=Soil)) +
geom_boxplot(outlier.shape = 1, outlier.size = 1, outlier.stroke = 1,
position = position_dodge(preserve = "single"), alpha=0.6, lwd = 0.5) +
theme_classic() +
#ylim(NA, 7.8) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =45, size = 8, hjust = 1, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8))
return(plot_alpha)
}
alpha_df_fungi_filt %>%
dplyr::select(Genotype, Soil, richness, shannon) %>%
mutate(Var = log(richness)) %>%
PlotAlpha()
PlotInt <- function(df){
plot_alpha <-
ggplot(df, aes(x=Genotype, y=Var, color=Soil, group=Soil)) +
#geom_point() +
geom_line() +
theme_classic() +
# ylim(NA, 7.5) +
theme(plot.title = element_text(size = 12, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =45, size = 8, hjust = 1, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8))
return(plot_alpha)
}
alpha_df_fungi %>%
group_by(Genotype, Soil) %>%
summarise(Var = mean(log(richness))) %>%
PlotInt() + labs(title = "Interaction plot", x="Genotype", y = "Mean of log(richness)")
# Model diagnostic plots -----------------------------------------------------------------------------------
DiagPlots <- function(df, fit, var){
par(mfrow=c(2,3))
par(mar = c(4,4,1.6,1))
plot(fit, which=1)
plot(fit, which=2)
plot(fit, which=3)
plot(fit, which=5)
hist(residuals(fit), breaks = 15,
main = "Residuals distribution",
xlab = "Residuals",
cex.main=1.5, font.main = 1)
plot(fit$fitted.values, var,
xlab="predicted log(richness)",
ylab="actual log(richness)",
main = "Predicted vs Actual")
abline(a=0, b=1, col="red", lwd=1, lty=1)
return()
}
var = log(alpha_df_fungi$richness)
DiagPlots(alpha_df_fungi, fit_fungi_rich_m4, var)
autoplot(fit_fungi_rich_m4, which = 1:6, label.size = 3) +
theme(axis.text.x = element_text(angle =45, size = 8, hjust = 1, vjust = 1))
# VISUALIZE readNo vs Richness relationship ----------------------------------------------------------------
# Plotting
title1 = text_grob("Read No vs. Alpha diversity metric", size = 12, face = 2)
grid.arrange(
ggarrange(
ggarrange(ReadEffect(alpha_df_fungi, "richness", "Soil") +
labs(x="Read No", y ="log(richness)") +
guides(color = guide_legend(nrow = 1, title = "Soil")) + ylim(NA, 6),
ReadEffect(alpha_df_fungi_rare, "rarefied", "Soil") +
labs(x="Read No", y ="log(rarefied)") +
guides(color = guide_legend(nrow = 1, title = "Soil"))+ ylim(NA, 6),
labels = c("A", "B"),
common.legend = TRUE,
legend = "bottom"),
ggarrange(ReadEffect(alpha_df_fungi, "richness", "Genotype") +
labs(x="Read No", y ="log(richness)") +
guides(color = guide_legend(nrow = 1, title = "Genotype"))+ ylim(NA, 6) +
scale_shape_manual(values = c(0,1,2,5,3,8),
labels = c("Alamo", "Blackwell", "Cave-in-rock", "Kanlow", "Shelter","Southlow")),
ReadEffect(alpha_df_fungi_rare, "rarefied", "Genotype") +
labs(x="Read No", y ="log(rarefied)") +
guides(color = guide_legend(nrow = 1, title = "Genotype"))+ ylim(NA, 6) +
scale_shape_manual(values = c(0,1,2,5,3,8),
labels = c("Alamo", "Blackwell", "Cave-in-rock", "Kanlow", "Shelter","Southlow")),
labels = c("C","D"),
common.legend = TRUE,
legend = "bottom"),
ncol = 1, nrow = 2),
top = title1)
grid.arrange(
ggarrange(
ggarrange(ReadEffect(alpha_df_bact[-188,], "richness", "Soil") + #remove 1 outlier for
labs(x="Read No", y ="log(richness)") +
guides(color = guide_legend(nrow = 1, title = "Soil"))+ ylim(NA, 7.7),
ReadEffect(alpha_df_bact_rare, "rarefied", "Soil") +
labs(x="Read No", y ="log(rarefied)") +
guides(color = guide_legend(nrow = 1, title = "Soil"))+ ylim(NA, 7.7),
labels = c("A", "B"),
common.legend = TRUE,
legend = "bottom"),
ggarrange(ReadEffect(alpha_df_bact[-188,], "richness", "Genotype") +
labs(x="Read No", y ="log(richness)") +
guides(color = guide_legend(nrow = 1, title = "Genotype"))+ ylim(NA, 7.7) +
scale_shape_manual(values = c(0,1,2,5,3,8),
labels = c("Alamo", "Blackwell", "Cave-in-rock", "Kanlow", "Shelter","Southlow")),
ReadEffect(alpha_df_bact_rare, "rarefied", "Genotype") +
labs(x="Read No", y ="log(rarefied)") +
guides(color = guide_legend(nrow = 1, title = "Genotype"))+ ylim(NA, 7.7) +
scale_shape_manual(values = c(0,1,2,5,3,8),
labels = c("Alamo", "Blackwell", "Cave-in-rock", "Kanlow", "Shelter","Southlow")),
labels = c("C","D"),
common.legend = TRUE,
legend = "bottom"),
ncol = 1, nrow = 2),
top = title1)
# *************************************************************************************************-------------
# ROBUSTNESS - Robust linear models rlm() MASS package
# removing autliers - robustfying data
library(party)
library(flexplot)
hist(log(alpha_df_fungi_filt$richness), breaks = 20)
flexplot(richness ~ 1, data = alpha_df_fungi_filt) # visualize richness distrib
rf_fit <- party::cforest(richness ~ readNo + Soil + Genotype, data = alpha_df_fungi_filt)
estimates(rf_fit)
flexplot(richness ~ 1, data = alpha_df_fungi_filt)
# GLM MODELS ---------------------------------------------------------------------------------------------------
# Filtering outliers defined as a value 1.5 times of interquartile range above
# upper quartile (Q3) or below lower quartile (Q1). Simple boxplot method.
par(mfrow=c(1,3))
car::Boxplot(log(alpha_df_fungi$richness), id.method="y")
car::Boxplot(alpha_df_fungi$shannon, id.method="y")
car::Boxplot(alpha_df_fungi$readNo, id.method="y")
dev.off()
alpha_df_fungi[c(134, 171,32, 98, 30, 53), ]
alpha_df_fungi_rare[rownames(alpha_df_fungi_rare)== "Amp1411",]
alpha_df_fungi_filt <-
subset(alpha_df_fungi, !Pot%in%c("184", "109", "89", "87", "303", "198"))
par(mfrow=c(1,3))
car::Boxplot(log(alpha_df_bact$richness), id.method="y")
car::Boxplot(alpha_df_bact$readNo, id.method="y")
car::Boxplot(alpha_df_bact$shannon, id.method="y")
dev.off()
alpha_df_bact[c(150, 185, 115, 188, 190, 188), ]
alpha_df_bact_filt <-
subset(alpha_df_bact, !Pot%in%c("326", "93", "284", "117", "256", "117", "128"))
# 1) FUNGI RICHNESS -------------------------------------------------------------------------------------
fit_fungi_rich_m1 = glm(log(richness) ~ readNo, family = "Gamma", data=alpha_df_fungi_filt)
fit_fungi_rich_m2 = glm(log(richness)~ readNo + Soil, family = "Gamma", data=alpha_df_fungi_filt)
fit_fungi_rich_m3 = glm(log(richness) ~ readNo + Soil + Genotype, family = "Gamma",data=alpha_df_fungi_filt)
fit_fungi_rich_m4 = glm(log(richness) ~ readNo + Soil + Genotype + Genotype:Soil, family = "Gamma", data=alpha_df_fungi_filt)
compareGLM(fit_fungi_rich_m1, fit_fungi_rich_m2, fit_fungi_rich_m3, fit_fungi_rich_m4)
anova(fit_fungi_rich_m1, fit_fungi_rich_m2, fit_fungi_rich_m3, fit_fungi_rich_m4, test="Chisq")
Anova(fit_fungi_rich_m4)
Anova(fit_fungi_rich_m3)
Anova(fit_fungi_rich_m4, test.statistic=c("F"), type = 2,
contrasts = c("contr.sum","contr.poly"))
Anova(fit_fungi_rich_m4, test.statistic=c("F"), type = 3,
contrasts = c("contr.sum","contr.poly"))
shapiro.test(fit_fungi_rich_m4$residuals)
leveneTest(log(richness) ~ Soil, data=alpha_df_fungi_filt)
leveneTest(log(richness) ~ Genotype, data=alpha_df_fungi_filt)
var = log(alpha_df_fungi_filt$richness)
DiagPlots(alpha_df_fungi_filt, fit_fungi_rich_m4, var)
# 2) FUNGI SHANNON --------------------------------------------------------------------------------------------
fit_fungi_shan_m1 = glm(shannon ~ readNo, family = "Gamma",data=alpha_df_fungi_filt)
fit_fungi_shan_m2 = glm(shannon ~ readNo + Genotype, family = "Gamma",data=alpha_df_fungi_filt)
fit_fungi_shan_m3 = glm(shannon ~ readNo + Genotype + Soil, family = "Gamma",data=alpha_df_fungi_filt)
fit_fungi_shan_m4 = glm(shannon ~ readNo + Genotype + Soil + Genotype:Soil, family = "Gamma", data=alpha_df_fungi_filt)
compareGLM(fit_fungi_shan_m1, fit_fungi_shan_m2, fit_fungi_shan_m3, fit_fungi_shan_m4)
anova(fit_fungi_shan_m1, fit_fungi_shan_m2, fit_fungi_shan_m3, fit_fungi_shan_m4, test="Chisq")
Anova(fit_fungi_shan_m3)
Anova(fit_fungi_shan_m3, test.statistic=c("F"), type = 2,
contrasts = c("contr.sum","contr.poly"))
var = alpha_df_fungi_filt$shannon
DiagPlots(alpha_df_fungi_filt, fit_fungi_shan_m3, var)
shapiro.test(fit_fungi_shan_m3$residuals)
leveneTest(shannon ~ Soil, data=alpha_df_fungi_filt)
leveneTest(shannon ~ Genotype, data=alpha_df_fungi_filt)
# 3) BACTERIA RICHNESS ----------------------------------------------------------------------------------------------
fit_bact_rich_m1 = glm(log(richness) ~ readNo, family = "Gamma",data=alpha_df_bact_filt)
fit_bact_rich_m2 = glm(log(richness)~ readNo + Genotype, family = "Gamma",data=alpha_df_bact_filt)
fit_bact_rich_m3 = glm(log(richness) ~ readNo + Genotype + Soil, family = "Gamma",data=alpha_df_bact_filt)
fit_bact_rich_m4 = glm(log(richness) ~ readNo + Genotype + Soil + Genotype:Soil, family = "Gamma", data=alpha_df_bact_filt)
compareGLM(fit_bact_rich_m1, fit_bact_rich_m2, fit_bact_rich_m3, fit_bact_rich_m4)
anova(fit_bact_rich_m1, fit_bact_rich_m2, fit_bact_rich_m3, fit_bact_rich_m4, test="Chisq")
Anova(fit_bact_rich_m1)
Anova(fit_bact_rich_m3, test.statistic=c("F"), type = 2,
contrasts = c("contr.sum","contr.poly"))
shapiro.test(fit_bact_rich_m3$residuals)
leveneTest(log(richness) ~ Soil, data=alpha_df_bact_filt)
leveneTest(log(richness) ~ Genotype, data=alpha_df_bact_filt)
var = log(alpha_df_bact_filt$richness)
DiagPlots(alpha_df_bact_filt, fit_bact_rich_m3, var)
# 4) BACTERIA SHANNON -----------------------------------------------------------------------------------------
fit_bact_shan_m1 = glm(shannon ~ readNo,family = "Gamma", data=alpha_df_bact_filt)
fit_bact_shan_m2 = glm(shannon ~ readNo + Genotype,family = "Gamma", data=alpha_df_bact_filt)
fit_bact_shan_m3 = glm(shannon ~ readNo + Genotype + Soil, family = "Gamma", data=alpha_df_bact_filt)
fit_bact_shan_m4 = glm(shannon ~ readNo + Genotype + Soil + Genotype:Soil, family = "Gamma",data=alpha_df_bact_filt)
compareGLM(fit_bact_shan_m1, fit_bact_shan_m2, fit_bact_shan_m3, fit_bact_shan_m4)
anova(fit_bact_shan_m1, fit_bact_shan_m2, fit_bact_shan_m3, fit_bact_shan_m4, test="Chisq")
Anova(fit_bact_shan_m3)
Anova(fit_bact_shan_m3, test.statistic=c("F"), type = 2,
contrasts = c("contr.sum","contr.poly"))
var = alpha_df_bact_filt$shannon
DiagPlots(alpha_df_bact_filt, fit_bact_shan_m3, var)
shapiro.test(fit_bact_shan_m3$residuals)
leveneTest(shannon ~ Soil, data=alpha_df_bact_filt)
leveneTest(shannon ~ Genotype, data=alpha_df_bact_filt)
# *** TABLE 1 - glm models ------------------------------------------------------------------
Anova(fit_fungi_rich_m4, test.statistic=c("F"), type = 2,
contrasts = c("contr.sum","contr.poly"))
Anova(fit_fungi_shan_m3, test.statistic=c("F"), type = 2,
contrasts = c("contr.sum","contr.poly"))
Anova(fit_bact_rich_m3, test.statistic=c("F"), type = 2,
contrasts = c("contr.sum","contr.poly"))
Anova(fit_bact_shan_m3, test.statistic=c("F"), type = 2,
contrasts = c("contr.sum","contr.poly"))
Anova(fit_fungi_rich_m4)
Anova(fit_fungi_shan_m3)
Anova(fit_bact_rich_m3)
Anova(fit_bact_shan_m3)
# Plotting soil and genotypes -----------------------------------------------------------------
# Post-hoc Tukey tests among the three experimental treatments with partial residuals,
# after accounting for differential sequencing
residuals(fit_bact_rich_m1, type = c("deviance"))
PlotAlphaDiv <- function(df, fit, Var){
res <-
as.data.frame(residuals(fit, type = c("deviance")))
colnames(res) <- "residuals"
left_join(tibble::rownames_to_column(df),
tibble::rownames_to_column(res),
by="rowname") -> df
head(df) %>% print()
df$Genotype <- gsub("-", "_", df$Genotype)
print(anova(aov(df$residuals ~ df[,Var])))
tukey <-
TukeyHSD(aov(df$residuals ~ df[,Var]))
print(multcompLetters(extract_p(tukey[[1]])))
plot_div <-
ggplot(df, aes(x=get(Var), y=residuals)) +
geom_boxplot(outlier.shape = 1, outlier.size = 1, outlier.stroke = 1,
position = position_dodge(preserve = "single"), alpha=0.6, lwd = 0.5) +
stat_summary(fun=mean, geom="point", shape=18, size=1.9, color="red", fill="red") +
theme_classic() +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =45, size = 8, hjust = 1, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8)) +
labs(x=NULL)
return(plot_div)
}
PlotAlphaDiv(alpha_df_fungi_filt, fit_fungi_rich_1, "Soil") +
stat_summary(geom = 'text', label = c("c", "a", "b", "a"),
fun = max, aes(y = 0.04), size = 3.5, color = "black") +
labs(title = "log(richness)", x=NULL)
PlotAlphaDiv(alpha_df_fungi_filt, fit_fungi_rich_m1, "Genotype") +
ylim(NA, 0.04)
# **** FIGURE 1 - glm models ----------------------------------------------------------------------
title5 = text_grob("Log(richness)", size = 12, face = 2)
title6 = text_grob("Shannon index", size = 12, face = 2)
ggarrange(
grid.arrange(
ggarrange(
PlotAlphaDiv(alpha_df_fungi_filt, fit_fungi_rich_m1, "Soil") +
stat_summary(geom = 'text', label = c("c", "a", "b", "a") ,
fun = max, aes(y = 0.2), size = 3.5, color = "black") +
labs(title = "Fungi"),
PlotAlphaDiv(alpha_df_fungi_filt, fit_fungi_rich_m1, "Genotype") +
ylim(NA, 0.2) +
labs(title = "Fungi", y=NULL) +
scale_x_discrete(labels=c(Alamo = "Alamo",
Blackwell = "Blackwell",
`Cave_in_rock` = "Cave-in-rock",
Kanlow ="Kanlow",
Shlelter="Shelter",
Southlow="Southlow")),
ncol = 2,
nrow = 1,
widths = c(1,1.2),
align = "h", labels = "A"),
top=title5),
grid.arrange(
ggarrange(
PlotAlphaDiv(alpha_df_fungi_filt, fit_fungi_shan_m1, "Soil") +
stat_summary(geom = 'text', label = c("c", "a", "ab", "b"),
fun = max, aes(y = 0.5), size = 3.5, color = "black") +
labs(title = "Fungi"),
PlotAlphaDiv(alpha_df_fungi_filt, fit_fungi_shan_m1, "Genotype") +
ylim(NA, 0.5) +
labs(title = "Fungi",y=NULL) +
scale_x_discrete(labels=c(Alamo = "Alamo",
Blackwell = "Blackwell",
`Cave_in_rock` = "Cave-in-rock",
Kanlow ="Kanlow",
Shlelter="Shelter",
Southlow="Southlow")),
ncol = 2,
nrow = 1,
widths = c(1,1.2),
align = "h", labels = "B"),
top=title6),
ggarrange(
PlotAlphaDiv(alpha_df_bact_filt, fit_bact_rich_m1, "Soil") +
stat_summary(geom = 'text', label = c("c", "a", "b", "b"),
fun = max, aes(y = 0.2), size = 3.5, color = "black") +
labs(title = "Bacteria") +
ylim(-0.1, 0.2),
PlotAlphaDiv(alpha_df_bact_filt, fit_bact_rich_m1, "Genotype") +
ylim(-0.1, 0.2) +
labs(title = "Bacteria",y=NULL)+
scale_x_discrete(labels=c(Alamo = "Alamo",
Blackwell = "Blackwell",
`Cave_in_rock` = "Cave-in-rock",
Kanlow ="Kanlow",
Shlelter="Shelter",
Southlow="Southlow")),
ncol = 2,
nrow = 1,
widths = c(1,1.2),
align = "h", labels = "C"),
ggarrange(
PlotAlphaDiv(alpha_df_bact_filt, fit_bact_shan_m1, "Soil") +
stat_summary(geom = 'text', label = c("a", "a", "b", "b"),
fun = max, aes(y = 0.3), size = 3.5, color = "black") +
labs(title = "Bacteria"),
PlotAlphaDiv(alpha_df_bact_filt, fit_bact_shan_m1, "Genotype") +
ylim(NA, 0.3) +
labs(title = "Bacteria",y=NULL)+
scale_x_discrete(labels=c(Alamo = "Alamo",
Blackwell = "Blackwell",
`Cave_in_rock` = "Cave-in-rock",
Kanlow ="Kanlow",
Shlelter="Shelter",
Southlow="Southlow")),
ncol = 2,
nrow = 1,
widths = c(1,1.2),
align = "h", labels = "D"),
ncol = 2,
nrow = 2)
# *** FIGURE 1 - alpha diversity ---------------------------------------------------------------------------
title2 = text_grob("Alpha diversity", size = 12, face = 2)
grid.arrange(
ggarrange(alpha_df_fungi_filt %>%
dplyr::select(Genotype, Soil, richness, shannon) %>%
mutate(Var = log(richness)) %>%
PlotAlpha() +
scale_color_manual(values = Pal_soil) +
labs(title = "Fungi", x=NULL, y = "log(richness)"),
alpha_df_bact_filt %>%
dplyr::select(Genotype, Soil, richness, shannon) %>%
mutate(Var = log(richness)) %>%
PlotAlpha() +
scale_color_manual(values = Pal_soil) +
labs(title = "Bacteria", x=NULL, y = NULL),
alpha_df_fungi_filt %>%
dplyr::select(Genotype, Soil, richness, shannon) %>%
mutate(Var = shannon) %>%
PlotAlpha() +
scale_color_manual(values = Pal_soil) +
labs(title = NULL, x="Soil", y = "Shannon index"),
alpha_df_bact_filt %>%
dplyr::select(Genotype, Soil, richness, shannon) %>%
mutate(Var = shannon) %>%
PlotAlpha() +
scale_color_manual(values = Pal_soil) +
labs(title = NULL, x="Soil", y = NULL),
labels = c("A","","B",""),
common.legend = TRUE,
legend = "bottom",
ncol = 2,
nrow = 2),
top = title2)
# **********************************************************************------------------------------------
# BIOMASS MODELS -------------------------------------------------------------------------------------------
# *** Fungi *** --------------------------------------------------------------------------------------------
head(physeq_fungi_new@sam_data)
otu_fungi_new <- as(physeq_fungi_new@otu_table, "matrix")
otu_fungi_new <- t(as.data.frame(otu_fungi_new))
str(otu_fungi_new)
taxa_fungi_new <- as(physeq_fungi_new@tax_table, "matrix")
taxa_fungi_new <- as.data.frame(taxa_fungi_new)
head(taxa_fungi_new)
meta_fungi_new <-
as(physeq_fungi_new@sam_data, "data.frame") %>%
dplyr::select(Pot, N_Collared_Leaves_12wk, Total_leaves_12wks, Dead_leaves_12wks,
Yellow_leaves_12wks, N_tillers_12wks, Flower_12wk, Spikelets_Emerged_percent_12wks,
Anters_Emerged_percent_12wks, Stress_12wks, plant_height_12wks_cm, Stage_12wks,
n_tillers_16wks, Plant_height_16wks_cm, Root_lenght_16wks_cm,
developmental.estage_16wkE, developmental.stage_16wks_detail, Development_16wks_ERS,
Flower_16wk, nodes_LeadingTiller_16wks, aerial_part_dry_weight_16wks_grams,
AverageDryWeightPerGroup, PotMinusAGroupDryWeightAverage,
pH, P, K, Ca, Mg, OM, NO3)
str(meta_fungi_new)
# Calculating PCoA in vegan --------------------------------------------------------------------------------
cmdscale(vegdist(otu_fungi_new, method = "bray"), eig=TRUE) -> pcoa_its
as.data.frame(pcoa_its$points) -> pcoa_fungi
colnames(pcoa_fungi) <- c("PCoA.1", "PCoA.2")
identical(rownames(pcoa_fungi), rownames(alpha_df_fungi_filt))
alpha_df_fungi_filt <-
left_join(tibble::rownames_to_column(alpha_df_fungi_filt), #keep this samples only
tibble::rownames_to_column(pcoa_fungi), by="rowname")
str(alpha_df_fungi_filt)
# nmds_fungi <- metaMDS(otu_fungi_new, k=2, trymax=200, autotransform = TRUE)
# stressplot(nmds_fungi)
# df_nmds_fungi <- as.data.frame(nmds_fungi$points)
# head(df_nmds_fungi)
# Calculating dispersion -----------------------------------------------------------------------------------
head(otu_fungi_new)
otu_fungi_new_filt <-
subset(otu_fungi_new, rownames(otu_fungi_new) %in% alpha_df_fungi_filt$rowname) %>%
as.data.frame()
otu_fungi_new_filt <-
otu_fungi_new_filt[, colSums(otu_fungi_new_filt)>0]
str(otu_fungi_new_filt)
str(alpha_df_fungi_filt)
# reorder
rownames(alpha_df_fungi_filt) <- alpha_df_fungi_filt$rowname
identical(rownames(alpha_df_fungi_filt), rownames(otu_fungi_new_filt))
order_fungi <- match(rownames(otu_fungi_new_filt), rownames(alpha_df_fungi_filt))
alpha_df_fungi_filt <- alpha_df_fungi_filt[order_fungi,]
permdisp_fungi_soil <-
betadisper(vegdist(otu_fungi_new_filt, method = "bray"), alpha_df_fungi_filt$Soil)
dist_fungi_soil<-
data.frame(permdisp_fungi_soil$group, permdisp_fungi_soil$distances)
colnames(dist_fungi_soil) <- c("value", "dispSoil")
dist_fungi_soil
permdisp_fungi_genotype <-
betadisper(vegdist(otu_fungi_new_filt, method = "bray"), alpha_df_fungi_filt$Genotype)
dist_fungi_genotype<-
data.frame(permdisp_fungi_genotype$group, permdisp_fungi_genotype$distances)
colnames(dist_fungi_genotype) <- c("value", "dispGenotype")
dist_fungi_genotype
identical(rownames(dist_fungi_genotype), rownames(dist_fungi_soil))
meta_fungi_merged <-
cbind(dist_fungi_genotype, dist_fungi_soil) %>%
dplyr::select(dispGenotype, dispSoil) %>%
tibble::rownames_to_column() %>%
left_join(alpha_df_fungi_filt, by="rowname") %>%
left_join(tibble::rownames_to_column(meta_fungi_new[,-1]), by="rowname")
str(meta_fungi_merged)
# transform to z-score -------------------------------------------------------------------------------------
#apply(mod_fungi_3[,2:8], 2, function(x) scale(x, center = TRUE, scale = TRUE))
VarStand <- function(df) {
for (n in names(df)) {
if (class(df[[n]]) == "numeric" | class(df[[n]]) == "integer"){
var = paste(n,"_z", sep="")
df[[var]] <- scale(df[[n]], center = TRUE, scale = TRUE)
df[[n]] = NULL
}
}
return(df)
}
mod_fungi_1 <- meta_fungi_merged[, c("Pot",
"richness",
"shannon",
"PCoA.1",
"PCoA.2",
"dispSoil",
"dispGenotype",
"readNo")]
VarStand(mod_fungi_1)
# mod_fungi_2 <- meta_fungi_merged[, c("Pot",
# "aerial_part_dry_weight_16wks_grams",
# "N_Collared_Leaves_12wk",
# "Total_leaves_12wks",
# "Dead_leaves_12wks",
# "Yellow_leaves_12wks",
# "N_tillers_12wks",
# "plant_height_12wks_cm",
# "n_tillers_16wks",
# "Plant_height_16wks_cm",
# "Root_lenght_16wks_cm",
# "nodes_LeadingTiller_16wks")]
mod_fungi_2 <- meta_fungi_merged[, c("Pot",
"aerial_part_dry_weight_16wks_grams")]
VarStand(mod_fungi_2)
mod_fungi_3 <- meta_fungi_merged[, c("Pot",
"pH",
"P",
"K",
"Ca",
"Mg",
"OM",
"NO3")]
VarStand(mod_fungi_3)
mod_fungi_3 %>% mutate_if(is.numeric, as.factor)
mod_fungi_4 <- meta_fungi_merged[, c("Pot",
#"Flower_12wk",
#"Stress_12wks",
#"Stage_12wks",
#"developmental.estage_16wkE",
#"developmental.stage_16wks_detail",
#"Development_16wks_ERS",
#"Flower_16wk",
"Soil",
"Genotype",
"Ecotype")]
# using chemistry variables as numeric ---------------------------------------------------------------------
# physeq_fungi_new@sam_data$SoilGen <-
# paste(physeq_fungi_new@sam_data$Soil, physeq_fungi_new@sam_data$Genotype)
meta_fungi_filt_2 <-
left_join(mod_fungi_2,
VarStand(mod_fungi_1), by="Pot") %>%
left_join(VarStand(mod_fungi_3), by="Pot") %>%
left_join(mod_fungi_4, by="Pot")
# remove Pot and remove 1 sample with too many NA
meta_fungi_filt_2 <-
meta_fungi_filt_2[,-1][complete.cases(meta_fungi_filt_2[,-1])==TRUE, ]
str(meta_fungi_filt_2)
#recoding varaiables
# meta_fungi_filt_2$Stress_12wks <-
# as.factor(ifelse(meta_fungi_filt_2$Stress_12wks=="", "N", paste(meta_fungi_filt_2$Stress_12wks)))
# rename colnames
colnames(meta_fungi_filt_2) <-c("Biomass",
"Richness","Shannon",
"PCoA.1","PCoA.2","Disp.Soil","Disp.Genotype",
"Read.No",
"pH","P","K","Ca","Mg","OM","NO3",
#"Stress.12wks",
"Soil","Genotype","Ecotype")
head(meta_fungi_filt_2)
# loop to store 100 boruta runs
sign_var <- vector(mode = "character")
for(i in 1:99) {
sel_attr[i] <- Boruta(Biomass ~., meta_fungi_filt_2, pValue = 0.05,
mcAdj = TRUE, maxRuns=100, doTrace = 3)
sign_var <- append(sign_var, getSelectedAttributes(sel_attr, withTentative = TRUE))
}
sign_var
unique(sign_var)
df_fungi_RF_2 <-
meta_fungi_filt_2[,c(unique(sign_var), "Biomass")]
# try tuning the model first
round(sqrt(ncol(df_fungi_RF_2[, 1:(ncol(df_fungi_RF_2) - 1)])))
set.seed(12345)
bestmtry_fungi_Bmass_2 <-
tuneRF(
x = df_fungi_RF_2[, 1:(ncol(df_fungi_RF_2) - 1)],
y = df_fungi_RF_2$Biomass,
mtryStart = 4,
ntreeTry = 1001,
improve = 0.01,
stepFactor = 0.5,
nodesize = 1,
doBest = TRUE,
trace = TRUE,
plot = TRUE
)
RF_fungi_Bmass_2 <-
randomForest(
x = df_fungi_RF_2[, 1:(ncol(df_fungi_RF_2) - 1)],
y = df_fungi_RF_2$Biomass,
ntree = 1001,
mtry = 2,
importance = TRUE,
proximity = TRUE
)
RF_fungi_Bmass_2
plot(RF_fungi_Bmass_2)
# Assessing model significance using permitations
set.seed(110324)
perm_RF_fungi_Bmass_2 <-
rf.significance(
x = RF_fungi_Bmass_2,
xdata = df_fungi_RF_2[, 1:(ncol(df_fungi_RF_2) - 1)],
nperm = 999,
nmtry = 4,
ntree = 1001
)
perm_RF_fungi_Bmass_2 # model significant = 0.001
# 1 - Plotting error ---------------------------------------------------------------------------------------
PlotError <- function(rf_model){
model_df <- (data.frame(Trees = 1:1001, Error = rf_model$mse))
ggplot(data=model_df, aes(x=Trees, y=Error)) +
labs(title = "Model Errors", y="Error", x="Tree") +
theme_classic() +
geom_line(color="red", size=0.8) +
ylim(0, NA) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 7, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8)) -> error_plot
return(error_plot)
}
PlotError(RF_fungi_Bmass) +
annotate("text", x=Inf, y = Inf,
label=paste("Mean squared error:", round(last(RF_fungi_Bmass$mse), 2)), size=2.5, vjust=1, hjust=1) +
annotate("text", x=Inf, y = Inf,
label= paste("% Var explained:", round(last(RF_fungi_Bmass$rsq*100),2)), size=2.5, vjust=3, hjust=1) +
annotate("text", x=Inf, y = Inf,
label = paste("italic(p) ==", round(perm_RF_fungi_Bmass$pValue, 4)), parse = TRUE, size=2.5, vjust=4, hjust=1)
# 2 - Plotting Line ----------------------------------------------------------------------------------------
PlotLine <- function(rf_model, metadata){
df_model <- data.frame(actual=rf_model$y, pred=rf_model$predicted)
if (identical(rownames(metadata), rownames(df_model))==TRUE){
df_model$Soil <- metadata$Soil
df_model$Genotype <- metadata$Genotype
df_model %T>% print()
ggplot(data=df_model, aes(x=actual, y=pred)) +
geom_point(aes(shape=Genotype, color=Soil), size=1.5, stroke=0.5) +
geom_smooth(method = "lm", formula = "y ~ x", se = TRUE, color="black", size=0.5) +
#geom_smooth(method = "loess", formula = "y ~ x", se = TRUE, color="red", size=0.8) +
theme_classic() +
scale_y_continuous(labels = scales::number_format(accuracy = 1)) +
# # scale_shape_manual(values = c(1, 2, 3),
# labels = c('0 dpf', '13 dpf', '33 dpf')) +
# guides(color = guide_legend(nrow = 2),
# shape = guide_legend(nrow = 3)) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_blank(), legend.background = element_blank(),
legend.text = element_text(size = 8)) -> line_plot
return(line_plot)
}else{
stop("Error: dataframe and metadata rownames are not matching!")
}
}
# 3 Inf -Inf Bottom Right h1,v0 1 0
PlotLine(RF_fungi_Bmass, df_fungi_RF) + theme(legend.position = c(0.1, 0.8)) +
annotate("text", x=Inf, y = -Inf,
label=paste("Mean squared error:", round(last(RF_fungi_Bmass$mse), 2)), size=2.5, vjust=-6, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label= paste("% Var explained:", round(last(RF_fungi_Bmass$rsq*100),2)), size=2.5, vjust=-4, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label = paste("italic(p) ==", round(perm_RF_fungi_Bmass$pValue, 4)), parse = TRUE, size=2.5, vjust=-1.5, hjust=1)
# 3- Plotting features -------------------------------------------------------------------------------------
PlotFeature <- function(rf_model, var){
imp_RF <- as.data.frame(rf_model$importance)
imp_RF$features <- rownames(imp_RF)
colnames(imp_RF) <- c("IncMSE","IncNodePurity","features")
imp_RF <- arrange(imp_RF, desc(imp_RF[,var]))
imp_RF %T>% print()
ggplot(data=imp_RF) +
geom_bar(aes(x= reorder(features, -get(var)),
y= get(var)), color="grey80", fill="grey80",stat="identity") +
coord_flip() +
#ylim(0, 0.03) +
theme_classic() +
theme(plot.margin=unit(c(7,9,7,7),"pt")) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =45, size = 7, hjust = 1, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 1, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8)) -> plot_importance
return(plot_importance)
}
PlotFeature(RF_fungi_Bmass, "IncMSE" ) + labs(title = "Predictors")
PlotFeature(RF_fungi_Bmass, "IncNodePurity" ) + labs(title = "Predictors")
as.data.frame(RF_fungi_Bmass$importance)
# 4 PCA of Predictors --------------------------------------------------------------------------------------
# Test for colinearity using PCA
# PlotNeutralPCA <- function(df, Var){
# pca_plot <-
# autoplot(
# prcomp(x = df, scale= TRUE, center=TRUE), # calculates principal compenents and pltos with ggplot2
# data = Var, label = FALSE, shape = "Genotype", colour="Soil", # add metadate, labels of objects
# loadings = TRUE, loadings.colour = "black", size=1.5,
# frame = FALSE, frame.colour = "Soil", loadings.label.colour = "black",
# loadings.label = TRUE, loadings.label.size = 3, loadings.label.repel = TRUE) +
# labs(title = "PCA") +
# # scale_colour_manual(values = paletteCB4) +
# # scale_fill_manual(values = paletteCB4) +
# # scale_shape_manual(values = c(21,22,24)) +
# theme_classic() +
# theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
# plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
# axis.title = element_text(angle = 0, size = 10, face = "bold"),
# axis.text.x = element_text(angle =0, size = 7, hjust = 0.5, vjust = 1),
# axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
# legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
# legend.title = element_text(size = 10, face = "bold"),
# legend.text = element_text(size = 8))
# # guides(color = guide_legend(ncol = 2), #title.position="top"
# # fill = guide_legend(ncol= 2),
# # shape = guide_legend(ncol = 1)) +
# # theme(legend.margin = margin(0,-0.5,0,0, unit="cm")) # reduce space betweem legends
# return(pca_plot)
# }
#
#
# PlotNeutralPCA(meta_fungi_filt_2[,c(1:6, 8:15)], meta_fungi_filt_2)
#
# prcomp_fungi <-
# prcomp(meta_fungi_filt_2[c(1:6, 8:15)], center = TRUE, scale = TRUE)
#
# autoplot(prcomp_fungi, loadings = TRUE, loadings.label = TRUE)
# pca_points_fungi <-
# as_tibble(prcomp_fungi$x) %>%
# bind_cols(meta_fungi_filt_2)
# pca_points_fungi
# summary(lm(PC1 ~ NO3, pca_points_fungi))
#
# prcomp_fungi
#
# PlotNeutralPCA <- function(df){
# pca_plot <-
# ggplot(df, aes(x = PC1, y = PC2)) +
# geom_point(aes(colour = Soil, shape= Genotype)) +
# # autoplot(
# # prcomp(x = df, scale= TRUE, center=TRUE), # calculates principal compenents and pltos with ggplot2
# # data = Var, label = FALSE, shape = "Genotype", colour="Soil", # add metadate, labels of objects
# # loadings = TRUE, loadings.colour = "black", size=1.5,
# # frame = FALSE, frame.colour = "Soil", loadings.label.colour = "black",
# # loadings.label = TRUE, loadings.label.size = 3, loadings.label.repel = TRUE) +
# labs(title = "PCA") +
# # scale_colour_manual(values = paletteCB4) +
# # scale_fill_manual(values = paletteCB4) +
# # scale_shape_manual(values = c(21,22,24)) +
# theme_classic() +
# theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
# plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
# axis.title = element_text(angle = 0, size = 10, face = "bold"),
# axis.text.x = element_text(angle =0, size = 7, hjust = 0.5, vjust = 1),
# axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
# legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
# legend.title = element_text(size = 10, face = "bold"),
# legend.text = element_text(size = 8))
# # guides(color = guide_legend(ncol = 2), #title.position="top"
# # fill = guide_legend(ncol= 2),
# # shape = guide_legend(ncol = 1)) +
# # theme(legend.margin = margin(0,-0.5,0,0, unit="cm")) # reduce space betweem legends
# return(pca_plot)
# }
#
#
# pca_load_fungi <-
# as_tibble(prcomp_fungi$rotation, rownames = 'variable') %>%
# mutate(variable = dplyr::recode(variable,
# "Biomass" = "Biomass",
# "Richness" = "Richness",
# "Shannon" = "Shannon",
# "PCoA.1" = "PCoA.1",
# "PCoA.2" = "PCoA.2",
# "Disp.Soil" = "Disp.Soil",
# "Read.No" = "Read.No",
# "pH"="pH",
# "P" = expression(PO[4]^{"-"}),
# "K" = expression(K^{"+"}),
# "Ca" =expression(Ca^{"2+"}),
# "Mg" =expression(Mg^{"2+"}),
# "OM" = "OM",
# "NO3" = expression(NO[3]^{"-"})))
#
#
#
# PlotNeutralPCA(pca_points_fungi) +
# geom_segment(data = pca_load_fungi,
# aes(x = 0, y = 0,
# xend = PC1*6,
# yend = PC2*6),
# arrow = arrow(length = unit(1/2, "picas"))) +
# annotate("text", x = (pca_load_fungi$PC1*6), y = (pca_load_fungi$PC2*5.2),
# label = c(
# "Biomass",
# "Richness",
# "Shannon",
# "PCoA.1",
# "PCoA.2",
# "Disp.Soil",
# "Read.No",
# "pH",
# expression(PO[4]^{"-"}),
# expression(K^{"+"}),
# expression(Ca^{"2+"}),
# expression(Mg^{"2+"}),
# "OM"="OM",
# expression(NO[3]^{"-"})),
# size = 3.5)
# New functions
PlotNeutralPCAfungi <- function(df){
#generate pca
pca_res <-
df %>%
select(-Soil, -Genotype, -Ecotype, -Disp.Genotype) %>%
prcomp(x = ., center = TRUE, scale = TRUE)
# extract points for the samples
pca_points <-
as_tibble(pca_res$x) %>%
bind_cols(df)
# extract point for the loadings
pca_load <-
as_tibble(pca_res$rotation, rownames = 'variable')
# % variation for each axis
axis_var <-
round(as.vector(summary(pca_res)$importance[2,])*100,1)
pca_plot <-
ggplot(pca_points, aes(x = PC1, y = PC2)) +
geom_point(aes(colour = Soil, shape= Genotype)) +
labs(title = "PCA",
x=as.expression(paste("PC1 (",axis_var[1],"%)"), parse=TRUE),
y=as.expression(paste("PC2 (",axis_var[2],"%)"), parse=TRUE)) +
theme_classic() +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 7, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8))
pca_plot <-
pca_plot +
#grids(linetype = "dashed") +
geom_segment(data = pca_load,
aes(x = 0, y = 0,
xend = PC1*7.7,
yend = PC2*7.7),
arrow = arrow(length = unit(1/2, "picas"))) +
annotate("text", x = (pca_load$PC1*8.3), y = (pca_load$PC2*8),
size = 3,
label = c(
"Biomass",
"Richness",
"Shannon",
"PCoA.1",
"PCoA.2",
"Disp.Soil",
"Read.No",
"pH",
expression(PO[4]^{"3-"}),
expression(K^{"+"}),
expression(Ca^{"2+"}),
expression(Mg^{"2+"}),
"OM",
expression(NO[3]^{"-"})))
return(pca_plot)
}
PlotNeutralPCAfungi(meta_fungi_filt_2)
PlotNeutralPCAbact <- function(df){
pca_res <-
df %>%
select(-Soil, -Genotype, -Ecotype) %>%
prcomp(x = ., center = TRUE, scale = TRUE)
pca_points <-
as_tibble(pca_res$x) %>%
bind_cols(df)
pca_load <-
as_tibble(pca_res$rotation, rownames = 'variable')
axis_var <-
round(as.vector(summary(pca_res)$importance[2,])*100,1)
pca_plot <-
ggplot(pca_points, aes(x = PC1, y = PC2)) +
geom_point(aes(colour = Soil, shape= Genotype)) +
labs(title = "PCA",
x=as.expression(paste("PC1 (",axis_var[1],"%)"), parse=TRUE),
y=as.expression(paste("PC2 (",axis_var[2],"%)"), parse=TRUE)) +
theme_classic() +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 7, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8))
pca_plot <-
pca_plot +
#grids(linetype = "dashed") +
geom_segment(data = pca_load,
aes(x = 0, y = 0,
xend = PC1*7.7,
yend = PC2*7.7),
arrow = arrow(length = unit(1/2, "picas"))) +
annotate("text", x = (pca_load$PC1*8.6), y = (pca_load$PC2*8),
size = 3,
label = c(
"Biomass",
"Richness",
"Shannon",
"PCoA.1",
"PCoA.2",
"Disp.Soil",
"Disp.Genotype",
"Read.No",
"pH",
expression(PO[4]^{"3-"}),
expression(K^{"+"}),
expression(Ca^{"2+"}),
expression(Mg^{"2+"}),
"OM",
expression(NO[3]^{"-"})))
return(pca_plot)
}
PlotNeutralPCAbact(meta_bact_filt_2)
# Multicollinearity does not affect the accuracy of predictive models, including regression models.
# Take the attached image as an example. The features in the x and y axis are clearly correlated;
# however, you need both of them to create an accurate classifier. If you discard one of them for
# being highly correlated with the other one, the performance of your model will decrease.
#If you want to remove the collinearity, you can always use PCA to project the data into a new
# space where the 'new features' will be orthogonal to each other. You can then, train your model
# with the new features, but you will find that the performance is the same. You simply rotated
# your original decision boundary. Now, where multicollinearity becomes 'an issue' is when you want
# to 'interpret' the parameters learned by your model. In other words, you cannot say that the
# feature with the 'biggest weight' is 'the most important' when the features are correlated.
# Note that this is independent on the accuracy of the model, this is only the interpretation part,
# which in my opinion, you should not be doing anyway.
# Using chemistry data as factor ---------------------------------------------------------------------------
meta_fungi_filt <-
left_join(mod_fungi_2,
VarStand(mod_fungi_1), by="Pot") %>%
left_join(mutate_if(mod_fungi_3, is.numeric, as.factor), by="Pot") %>%
left_join(mod_fungi_4, by="Pot")
# remove Pot and remove 1 sample with too many NA
meta_fungi_filt <-
meta_fungi_filt[,-1][complete.cases(meta_fungi_filt[,-1])==TRUE, ]
str(meta_fungi_filt)
#recoding varaiables
meta_fungi_filt$Stress_12wks <-
as.factor(ifelse(meta_fungi_filt$Stress_12wks=="", "N", paste(meta_fungi_filt$Stress_12wks)))
# loop to store 100 boruta runs
sign_var <- vector(mode = "character")
for(i in 1:99) {
sel_attr[i] <- Boruta(aerial_part_dry_weight_16wks_grams ~., meta_fungi_filt, pValue = 0.05,
mcAdj = TRUE, maxRuns=100, doTrace = 3)
sign_var <- append(sign_var, getSelectedAttributes(sel_attr, withTentative = TRUE))
}
sign_var
unique(sign_var)
df_fungi_RF <-
meta_fungi_filt[,c(unique(sign_var), "aerial_part_dry_weight_16wks_grams")]
# try tuning the model first
round(sqrt(ncol(df_fungi_RF[, 1:(ncol(df_fungi_RF) - 1)])))
set.seed(12345)
bestmtry_fungi_Bmass <-
tuneRF(
x = df_fungi_RF[, 1:(ncol(df_fungi_RF) - 1)],
y = df_fungi_RF$aerial_part_dry_weight_16wks_grams,
mtryStart = 4,
ntreeTry = 1001,
improve = 0.01,
stepFactor = 0.5,
nodesize = 1,
doBest = TRUE,
trace = TRUE,
plot = TRUE
)
RF_fungi_Bmass <-
randomForest(
x = df_fungi_RF[, 1:(ncol(df_fungi_RF) - 1)],
y = df_fungi_RF$aerial_part_dry_weight_16wks_grams,
ntree = 1001,
mtry = 2,
importance = TRUE,
proximity = TRUE
)
RF_fungi_Bmass
plot(RF_fungi_Bmass)
# Assessing model significance using permitations
set.seed(110324)
perm_RF_fungi_Bmass <-
rf.significance(
x = RF_fungi_Bmass,
xdata = df_fungi_RF[, 1:(ncol(df_fungi_RF) - 1)],
nperm = 999,
nmtry = 1,
ntree = 1001
)
perm_RF_fungi_Bmass # model significant = 0.001
# FIGURE SXX - RF MODELS with factor
ggarrange(PlotLine(RF_fungi_Bmass, df_fungi_RF) +
theme(legend.position = c(0.15, 0.8)) +
scale_color_manual(values = Pal_soil) +
scale_shape_manual(values = c(15,16,17,18,3,8)) +
labs(title = "Random Forest ", y="Predicted\nAerial Plant Biomass", x="Observed\nAerial Plant Biomass") +
annotate("text", x=Inf, y = -Inf,
label=paste("Mean squared error:", round(last(RF_fungi_Bmass$mse), 2)), size=2.5, vjust=-6, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label= paste("% Var explained:", round(last(RF_fungi_Bmass$rsq*100),2)), size=2.5, vjust=-4, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label = paste("italic(p) ==", round(perm_RF_fungi_Bmass$pValue, 4)), parse = TRUE, size=2.5, vjust=-1.5, hjust=1),
PlotFeature(RF_fungi_Bmass, "IncMSE") +
labs(title = "Predictors", x=NULL, y="% Increase MSE "),
PlotFeature(RF_fungi_Bmass, "IncNodePurity") +
labs(title = "Predictors", x=NULL, y="Increase Node Purity"),
PlotError(RF_fungi_Bmass) +
annotate("text", x=Inf, y = Inf,
label= paste("% Explained var.:", round(last(RF_fungi_Bmass$rsq*100),2)), size=2.5, vjust=1, hjust=1) +
annotate("text", x=Inf, y = Inf,
label=paste("MSE:", round(last(RF_fungi_Bmass$mse), 2)), size=2.5, vjust=3, hjust=1) +
annotate("text", x=Inf, y = Inf,
label = paste("italic(p) ==", round(perm_RF_fungi_Bmass$pValue, 4)), parse = TRUE, size=2.5, vjust=4, hjust=1),
labels = c("A","B","C", "D"),
widths = c(1.1, 0.6, 0.6, 0.5),
align = "h",
ncol = 4,
nrow =1)
# *** Bacteria *** -----------------------------------------------------------------------------------------
head(physeq_bact_new@sam_data)
otu_bact_new <- as(physeq_bact_new@otu_table, "matrix")
otu_bact_new <- t(as.data.frame(otu_bact_new))
str(otu_bact_new)
colSums(otu_bact_new)
taxa_bact_new <- as(physeq_bact_new@tax_table, "matrix")
taxa_bact_new <- as.data.frame(taxa_bact_new)
head(taxa_bact_new)
meta_bact_new <-
as(physeq_bact_new@sam_data, "data.frame") %>%
dplyr::select(Pot, N_Collared_Leaves_12wk, Total_leaves_12wks, Dead_leaves_12wks,
Yellow_leaves_12wks, N_tillers_12wks, Flower_12wk, Spikelets_Emerged_percent_12wks,
Anters_Emerged_percent_12wks, Stress_12wks, plant_height_12wks_cm, Stage_12wks,
n_tillers_16wks, Plant_height_16wks_cm, Root_lenght_16wks_cm,
developmental.estage_16wkE, developmental.stage_16wks_detail, Development_16wks_ERS,
Flower_16wk, nodes_LeadingTiller_16wks, aerial_part_dry_weight_16wks_grams,
AverageDryWeightPerGroup, PotMinusAGroupDryWeightAverage,
pH, P, K, Ca, Mg, OM, NO3)
str(meta_bact_new)
# Calculating PCoA in vegan --------------------------------------------------------------------------------
cmdscale(vegdist(otu_bact_new, method = "bray"), eig=TRUE) -> pcoa_16s
as.data.frame(pcoa_16s$points) -> pcoa_bact
colnames(pcoa_bact) <- c("PCoA.1", "PCoA.2")
identical(rownames(pcoa_bact), rownames(alpha_df_bact_filt))
alpha_df_bact_filt <-
left_join(tibble::rownames_to_column(alpha_df_bact_filt), #keep this samples only
tibble::rownames_to_column(pcoa_bact), by="rowname")
str(alpha_df_bact_filt)
nmds_bact <- metaMDS(otu_bact_new, k=2, trymax=200, distance = "bray", autotransform = TRUE, weakties = TRUE)
stressplot(nmds_bact)
nmds_bact <- metaMDS(otu_bact_new, previous.best = nmds_bact)
df_nmds_bact <- as.data.frame(nmds_bact$points)
head(df_nmds_bact)
# Calculating dispersion
head(otu_bact_new)
otu_bact_new_filt <-
subset(otu_bact_new, rownames(otu_bact_new) %in% alpha_df_bact_filt$rowname) %>%
as.data.frame()
otu_bact_new_filt <-
otu_bact_new_filt[, colSums(otu_bact_new_filt)>0]
str(otu_bact_new_filt)
str(alpha_df_bact_filt)
# reorder
rownames(alpha_df_bact_filt) <- alpha_df_bact_filt$rowname
identical(rownames(alpha_df_bact_filt), rownames(otu_bact_new_filt))
order_bact <- match(rownames(otu_bact_new_filt), rownames(alpha_df_bact_filt))
alpha_df_bact_filt <- alpha_df_bact_filt[order_bact,]
permdisp_bact_soil <-
betadisper(vegdist(otu_bact_new_filt, method = "bray"), alpha_df_bact_filt$Soil)
dist_bact_soil<-
data.frame(permdisp_bact_soil$group, permdisp_bact_soil$distances)
colnames(dist_bact_soil) <- c("value", "dispSoil")
dist_bact_soil
permdisp_bact_genotype <-
betadisper(vegdist(otu_bact_new_filt, method = "bray"), alpha_df_bact_filt$Genotype)
dist_bact_genotype<-
data.frame(permdisp_bact_genotype$group, permdisp_bact_genotype$distances)
colnames(dist_bact_genotype) <- c("value", "dispGenotype")
dist_bact_genotype
identical(rownames(dist_bact_genotype), rownames(dist_bact_soil))
meta_bact_merged <-
cbind(dist_bact_genotype, dist_bact_soil) %>%
dplyr::select(dispGenotype, dispSoil) %>%
tibble::rownames_to_column() %>%
left_join(alpha_df_bact_filt, by="rowname") %>%
left_join(tibble::rownames_to_column(meta_bact_new[,-1]), by="rowname")
str(meta_bact_merged)
# transform to z-score -------------------------------------------------------------------------------------
mod_bact_1 <- meta_bact_merged[, c("Pot",
"richness",
"shannon",
"PCoA.1",
"PCoA.2",
"dispSoil",
"dispGenotype",
"readNo")]
VarStand(mod_bact_1)
mod_bact_2 <- meta_bact_merged[, c("Pot",
"aerial_part_dry_weight_16wks_grams")]
VarStand(mod_bact_2)
mod_bact_3 <- meta_bact_merged[, c("Pot",
"pH",
"P",
"K",
"Ca",
"Mg",
"OM",
"NO3")]
VarStand(mod_bact_3)
mod_bact_3 %>% mutate_if(is.numeric, as.factor)
mod_bact_4 <- meta_bact_merged[, c("Pot",
#"Flower_12wk",
#"Stress_12wks",
#"Stage_12wks",
#"developmental.estage_16wkE",
#"developmental.stage_16wks_detail",
#"Development_16wks_ERS",
#"Flower_16wk",
"Soil",
"Genotype",
"Ecotype")]
# RF with chemistry variables as numeric -------------------------------------------------------------------
# physeq_bact_new@sam_data$SoilGen <-
# paste(physeq_bact_new@sam_data$Soil, physeq_bact_new@sam_data$Genotype)
meta_bact_filt_2 <-
left_join(mod_bact_2,
VarStand(mod_bact_1), by="Pot") %>%
left_join(VarStand(mod_bact_3), by="Pot") %>%
left_join(mod_bact_4, by="Pot")
# remove Pot and remove 1 sample with too many NA
meta_bact_filt_2 <-
meta_bact_filt_2[,-1][complete.cases(meta_bact_filt_2[,-1])==TRUE, ]
str(meta_bact_filt_2)
#recoding varaiables
# meta_bact_filt_2$Stress_12wks <-
# as.factor(ifelse(meta_bact_filt_2$Stress_12wks=="", "N", paste(meta_bact_filt_2$Stress_12wks)))
# rename colnames
colnames(meta_bact_filt_2) <-c("Biomass",
"Richness","Shannon",
"PCoA.1","PCoA.2","Disp.Soil","Disp.Genotype",
"Read.No",
"pH","P","K","Ca","Mg","OM","NO3",
#"Stress.12wks",
"Soil","Genotype","Ecotype")
head(meta_bact_filt_2)
# loop to store 100 boruta runs
sign_var_bact <- vector(mode = "character")
for(i in 1:99) {
sel_attr[i] <- Boruta(Biomass ~., meta_bact_filt_2, pValue = 0.05,
mcAdj = TRUE, maxRuns=100, doTrace = 3)
sign_var_bact <- append(sign_var_bact, getSelectedAttributes(sel_attr, withTentative = TRUE))
}
sign_var_bact
unique(sign_var_bact)
unique(sign_var)
df_bact_RF_2 <-
meta_bact_filt_2[,c(unique(sign_var_bact), "Biomass")]
# try tuning the model first
round(sqrt(ncol(df_bact_RF_2[, 1:(ncol(df_bact_RF_2) - 1)])))
set.seed(12345)
bestmtry_bact_Bmass_2 <-
tuneRF(
x = df_bact_RF_2[, 1:(ncol(df_bact_RF_2) - 1)],
y = df_bact_RF_2$Biomass,
mtryStart = 4,
ntreeTry = 1001,
improve = 0.01,
stepFactor = 0.5,
nodesize = 1,
doBest = TRUE,
trace = TRUE,
plot = TRUE
)
RF_bact_Bmass_2 <-
randomForest(
x = df_bact_RF_2[, 1:(ncol(df_bact_RF_2) - 1)],
y = df_bact_RF_2$Biomass,
ntree = 1001,
mtry = 2,
importance = TRUE,
proximity = TRUE
)
RF_bact_Bmass_2
plot(RF_bact_Bmass_2)
# Assessing model significance using permutations
set.seed(110324)
perm_RF_bact_Bmass_2 <-
rf.significance(
x = RF_bact_Bmass_2,
xdata = df_bact_RF_2[, 1:(ncol(df_bact_RF_2) - 1)],
nperm = 999,
nmtry = 2,
ntree = 1001
)
perm_RF_bact_Bmass_2 # model significant = 0.001
# Changing tick labels of the bar plot ---------------------------------------------------------------------
sort(rownames(RF_fungi_Bmass_2$importance))
sort(rownames(RF_bact_Bmass_2$importance))
PlotFeature(RF_fungi_Bmass_2, "IncMSE") +
scale_x_discrete(labels=c(
"Richness" = "Richness", "Shannon" = "Shannon",
"PCoA.1" = "PCoA.Axis1", "PCoA.2"="PCoA.Axis2",
"Disp.Soil"="Disp.Soil", "Read.No"="Read.No",
"pH"="pH",
"P" = expression(PO[4]^{"-"}),
"K" = expression(K^{"+"}),
"Ca" =expression(Ca^{"2+"}),
"Mg" =expression(Mg^{"2+"}),
"OM"="OM",
"NO3" = expression(NO[3]^{"-"}) ,
"Soil"="Soil","Genotype"="Genotype", "Ecotype"="Ecotype"))
sort(rownames(RF_bact_Bmass_2$importance))
PlotFeature(RF_bact_Bmass_2, "IncMSE") +
scale_x_discrete(labels=c(
"Richness"="Richness", "Shannon" = "Shannon",
"PCoA.1" = "PCoA.Axis1", "PCoA.2"="PCaA.Axis2",
"Disp.Soil" = "Disp.Soil" ,"Disp.Genotype" = "Disp.Genotype",
"Read.No" = "Read.No", "pH"="pH",
"P" = expression(PO[4]^{"-"}),
"K" = expression(K^{"+"}),
"Ca" =expression(Ca^{"2+"}),
"Mg" =expression(Mg^{"2+"}),
"OM" = "OM",
"NO3" = expression(NO[3]^{"-"}) ,
"Soil" = "Soil", "Genotype"="Genotype", "Ecotype"="Ecotype"))
# FIGURE 3 - RF MODELS fungi -----------------------------------------------------------------------------
# adjust colnames
pca_fungi <-
PlotNeutralPCAfungi(meta_fungi_filt_2) +
scale_color_manual(values = Pal_soil) +
scale_fill_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8)) +
theme(legend.position = "none")
# modify loading thickness
pca_fungi$layers[[2]]$aes_params$size <- 0.3
pca_fungi$layers[[2]]$geom_params$arrow$length <- unit(6, units = "points")
pca_fungi
Fig_3_RF_fungi <-
ggarrange(PlotLine(RF_fungi_Bmass_2, df_fungi_RF_2) +
#theme(legend.position = c(0.35, 0.8)) +
theme(legend.position = "none") +
scale_color_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8)) +
labs(title = "Random Forest ", y="Predicted\nAerial Plant Biomass", x="Observed\nAerial Plant Biomass") +
annotate("text", x=Inf, y = -Inf, colour="black",
label=paste("Mean squared error:", round(last(RF_fungi_Bmass_2$mse), 2)), size=2.5, vjust=-6, hjust=1) +
annotate("text", x=Inf, y = -Inf, colour="black",
label= paste("% Var explained:", round(last(RF_fungi_Bmass_2$rsq*100),2)), size=2.5, vjust=-4, hjust=1) +
annotate("text", x=Inf, y = -Inf, colour="black",
label = paste("italic(p) ==", round(perm_RF_fungi_Bmass_2$pValue, 4)), parse = TRUE, size=2.5, vjust=-1.5, hjust=1),
PlotFeature(RF_fungi_Bmass_2, "IncMSE") +
scale_x_discrete(labels=c(
"Richness" = "Richness", "Shannon" = "Shannon",
"PCoA.1" = "PCoA.1", "PCoA.2"="PCoA.2",
"Disp.Soil"="Disp.Soil", "Read.No"="Read.No",
"pH"="pH",
"P" = expression(PO[4]^{"3-"}),
"K" = expression(K^{"+"}),
"Ca" =expression(Ca^{"2+"}),
"Mg" =expression(Mg^{"2+"}),
"OM"="OM",
"NO3" = expression(NO[3]^{"-"}) ,
"Soil"="Soil","Genotype"="Genotype", "Ecotype"="Ecotype")) +
labs(title = "Predictors", x=NULL, y="% Increase MSE ") +
scale_y_continuous(labels = scales::number_format(accuracy = 0.01)),
pca_fungi,
labels = c("A","B","C"),
widths = c(1, 0.65, 1),
align = "h",
ncol = 3,
nrow =1)
Fig_3_RF_fungi
# FIGURE SXX - Random Forest supplementary ---------------------------------------------------------------
Fig_SXX_fungi <-
ggarrange(
PlotFeature(RF_fungi_Bmass_2, "IncNodePurity") +
labs(title = "Predictors", x=NULL, y="Increase Node Purity"),
PlotError(RF_fungi_Bmass_2) +
annotate("text", x=Inf, y = Inf,
label= paste("% Explained var.:", round(last(RF_fungi_Bmass_2$rsq*100),2)), size=2.5, vjust=1, hjust=1) +
annotate("text", x=Inf, y = Inf,
label=paste("MSE:", round(last(RF_fungi_Bmass_2$mse), 2)), size=2.5, vjust=3, hjust=1) +
annotate("text", x=Inf, y = Inf,
label = paste("italic(p) ==", round(perm_RF_fungi_Bmass_2$pValue, 4)), parse = TRUE, size=2.5, vjust=4, hjust=1),
labels = c("A","B"),
widths = c(1, 1),
align = "h",
ncol = 2,
nrow =1)
Fig_SXX_fungi
# FIGURE 3 - RF MODELS bacteria-----------------------------------------------------------------------------
pca_bact <-
PlotNeutralPCAbact(meta_bact_filt_2) +
scale_color_manual(values = Pal_soil) +
scale_fill_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8)) +
theme(legend.position = "none")
# modify loading thickness
pca_bact$layers[[2]]$aes_params$size <- 0.3
pca_bact$layers[[2]]$geom_params$arrow$length <- unit(6, units = "points")
pca_bact
Fig_3_RF_bact <-
ggarrange(PlotLine(RF_bact_Bmass_2, df_bact_RF_2) +
theme(legend.position = "none") +
scale_color_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8)) +
labs(title = "Random Forest ", y="Predicted\nAerial Plant Biomass", x="Observed\nAerial Plant Biomass") +
annotate("text", x=Inf, y = -Inf, color="black",
label=paste("Mean squared error:", round(last(RF_bact_Bmass_2$mse), 2)), size=2.5, vjust=-6, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label= paste("% Var explained:", round(last(RF_bact_Bmass_2$rsq*100),2)), size=2.5, vjust=-4, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label = paste("italic(p) ==", round(perm_RF_bact_Bmass_2$pValue, 4)), parse = TRUE, size=2.5, vjust=-1.5, hjust=1) +
theme(legend.position = "none"),
PlotFeature(RF_bact_Bmass_2, "IncMSE") +
scale_x_discrete(labels=c(
"Richness"="Richness", "Shannon" = "Shannon",
"PCoA.1" = "PCoA.1", "PCoA.2"="PCoA.2",
"Disp.Soil" = "Disp.Soil" ,"Disp.Genotype" = "Disp.Genotype",
"Read.No" = "Read.No", "pH"="pH",
"P" = expression(PO[4]^{"3-"}),
"K" = expression(K^{"+"}),
"Ca" =expression(Ca^{"2+"}),
"Mg" =expression(Mg^{"2+"}),
"OM" = "OM",
"NO3" = expression(NO[3]^{"-"}) ,
"Soil" = "Soil", "Genotype"="Genotype", "Ecotype"="Ecotype")) +
labs(title = "Predictors", x=NULL, y="% Increase MSE ") +
scale_y_continuous(labels = scales::number_format(accuracy = 0.01)),
pca_bact,
labels = c("D","E","F"),
widths = c(1, 0.65, 1),
align = "h",
ncol = 3,
nrow =1)
Fig_3_RF_bact
# FIGURE SXX - Random Forest supplementary ---------------------------------------------------------------
Fig_SXX_bact <-
ggarrange(
PlotFeature(RF_bact_Bmass_2, "IncNodePurity") +
labs(title = "Predictors", x=NULL, y="Increase Node Purity"),
PlotError(RF_bact_Bmass_2) +
annotate("text", x=Inf, y = Inf,
label= paste("% Explained var.:", round(last(RF_bact_Bmass_2$rsq*100),2)), size=2.5, vjust=1, hjust=1) +
annotate("text", x=Inf, y = Inf,
label=paste("MSE:", round(last(RF_bact_Bmass_2$mse), 2)), size=2.5, vjust=3, hjust=1) +
annotate("text", x=Inf, y = Inf,
label = paste("italic(p) ==", round(perm_RF_bact_Bmass_2$pValue, 4)), parse = TRUE, size=2.5, vjust=4, hjust=1),
labels = c("A","B"),
widths = c(1, 1),
align = "h",
ncol = 2,
nrow =1)
Fig_SXX_bact
# *** FINAL FIGURE 3 ---------------------------------------------------------------------------------------
# extracting the legend for plotting
get_legend(
PlotLine(RF_bact_Bmass_2, df_bact_RF_2) +
theme(legend.position = c(0.2, 0.8)) +
scale_color_manual(values = Pal_soil) +
guides(color = guide_legend(nrow = 2),
shape = guide_legend(nrow = 2)) +
scale_shape_manual(values = c(0,1,2,5,3,8),
labels = c("Alamo"=expression(bold("Alamo")),
"Blackwell",
"Cave-in-rock",
"Kanlow"=expression(bold("Kanlow")),
"Shelter",
"Southlow")) +
theme(legend.position = "bottom")) -> legend1
as_ggplot(legend1)
title3 = text_grob("Fungi", size = 12, face = 2)
title4 = text_grob("Bacteria", size = 12, face = 2)
ggarrange(
grid.arrange(Fig_3_RF_fungi, top=title3),
grid.arrange(Fig_3_RF_bact, top=title4),
as_ggplot(legend1),
ncol = 1,
nrow = 3,
heights = c(1,1,0.1))
# Treating as a factor -------------------------------------------------------------------------------------
meta_bact_filt <-
left_join(mod_bact_2,
VarStand(mod_bact_1), by="Pot") %>%
left_join(mutate_if(mod_bact_3, is.numeric, as.factor), by="Pot") %>%
left_join(mod_bact_4, by="Pot")
# remove Pot and remove 1 sample with too many NA
meta_bact_filt <-
meta_bact_filt[,-1][complete.cases(meta_bact_filt[,-1])==TRUE, ]
str(meta_bact_filt)
#recoding varaiables
meta_bact_filt$Stress_12wks <-
as.factor(ifelse(meta_bact_filt$Stress_12wks=="", "N", paste(meta_bact_filt$Stress_12wks)))
# loop to store 100 boruta runs
sign_var <- vector(mode = "character")
for(i in 1:99) {
sel_attr[i] <- Boruta(aerial_part_dry_weight_16wks_grams ~., meta_bact_filt, pValue = 0.05,
mcAdj = TRUE, maxRuns=100, doTrace = 3)
sign_var <- append(sign_var, getSelectedAttributes(sel_attr, withTentative = TRUE))
}
sign_var
unique(sign_var)
df_bact_RF <-
meta_bact_filt[,c(unique(sign_var), "aerial_part_dry_weight_16wks_grams")]
# try tuning the model first
round(sqrt(ncol(df_bact_RF[, 1:(ncol(df_bact_RF) - 1)])))
set.seed(12345)
bestmtry_bact_Bmass <-
tuneRF(
x = df_bact_RF[, 1:(ncol(df_bact_RF) - 1)],
y = df_bact_RF$aerial_part_dry_weight_16wks_grams,
mtryStart = 4,
ntreeTry = 1001,
improve = 0.01,
stepFactor = 0.5,
nodesize = 1,
doBest = TRUE,
trace = TRUE,
plot = TRUE
)
RF_bact_Bmass <-
randomForest(
x = df_bact_RF[, 1:(ncol(df_bact_RF) - 1)],
y = df_bact_RF$aerial_part_dry_weight_16wks_grams,
ntree = 1001,
mtry = 2,
importance = TRUE,
proximity = TRUE
)
RF_bact_Bmass
plot(RF_bact_Bmass)
# Assessing model significance using permitations
set.seed(110324)
perm_RF_bact_Bmass <-
rf.significance(
x = RF_bact_Bmass,
xdata = df_bact_RF[, 1:(ncol(df_bact_RF) - 1)],
nperm = 999,
nmtry = 1,
ntree = 1001
)
perm_RF_bact_Bmass # model significant = 0.001
# *****************************************************************-----------------------------------------
# INFLUENTIAL OTUs on plant biomass ------------------------------------------------------------------------
# Fungi ----------------------------------------------------------------------------------------------------
str(otu_fungi_new)
str(meta_fungi_new)
identical(rownames(otu_fungi_new), rownames(meta_fungi_new))
df_fungi_RF_otu <-
data.frame(otu_fungi_new,
biomass = meta_fungi_new$aerial_part_dry_weight_16wks_grams)
str(df_fungi_RF_otu)
head(df_fungi_RF_otu)
# Recursive feature selection
set.seed(110321)
rfe_fungi_biom <- Boruta(
biomass ~ .,
df_fungi_RF_otu,
pValue = 0.05,
mcAdj = TRUE,
maxRuns = ncol(df_fungi_RF_otu),
doTrace = 3
)
rfe_fungi_biom
# Get significant variables including tentatives
impfe_fungi_biom <-
getSelectedAttributes(rfe_fungi_biom, withTentative = TRUE)
impfe_fungi_biom
#subset to important features only
df_fungi_RF_otu[, c(impfe_fungi_biom)] -> df_fungi_RF_otu_sel
identical(rownames(df_fungi_RF_otu_sel), rownames(df_fungi_RF_otu))
df_fungi_RF_otu_sel$biomass <- df_fungi_RF_otu$biomass
# select optimal mtry
round(sqrt(ncol(df_fungi_RF_otu_sel[, 1:(ncol(df_fungi_RF_otu_sel) - 1)])))
set.seed(110322)
bestmtry_fungi_biom <-
tuneRF(
x = df_fungi_RF_otu_sel[, 1:(ncol(df_fungi_RF_otu_sel) - 1)],
y = df_fungi_RF_otu_sel$biomass,
mtryStart = 7,
ntreeTry = 1001,
improve = 0.01,
stepFactor = 0.5,
nodesize = 1,
doBest = TRUE,
trace = TRUE,
plot = TRUE
)
bestmtry_fungi_biom
set.seed(110323)
RF_fungi_biom <-
randomForest(
x = df_fungi_RF_otu_sel[, 1:(ncol(df_fungi_RF_otu_sel) - 1)],
y = df_fungi_RF_otu_sel$biomass,
ntree = 1001,
mtry = 7,
importance = TRUE,
proximity = TRUE
)
RF_fungi_biom
plot(RF_fungi_biom)
# Assessing model significance using permitations
set.seed(110324)
perm_RF_fungi_biom <-
rf.significance(
x = RF_fungi_biom,
xdata = df_fungi_RF_otu_sel[, 1:(ncol(df_fungi_RF_otu_sel) - 1)],
nperm = 999,
nmtry = 1,
ntree = 1001
)
perm_RF_fungi_biom # model significant = 0.001
# Dataframe for PCA
df_fungi_RF_otu_sel_pca <-
left_join(tibble::rownames_to_column(df_fungi_RF_otu_sel), #keep this samples only
tibble::rownames_to_column(as(physeq_fungi_new@sam_data, "data.frame") %>%
dplyr::select(Genotype, Soil_location) %>%
rename(Soil = Soil_location)), by="rowname")
head(df_fungi_RF_otu_sel_pca)
colnames(df_fungi_RF_otu_sel_pca)[56] <- "Biomass"
# Plotting the model... Need a different function for plotting features.
# New plotting functions -----------------------------------------------------------------------------------
PlotOTU <- function(rf_model, taxa, Var){
require(tibble)
imp_RF <- as.data.frame(rf_model$importance)
imp_RF$features <- rownames(imp_RF)
imp_RF <- arrange(imp_RF, desc(Var))
rownames(imp_RF) <- imp_RF$features
# adding marker taxon info
taxa[rownames(taxa)%in%rownames(imp_RF), ] -> taxa_RF
identical(rownames(taxa_RF), rownames(imp_RF))
order_taxa <- match(rownames(taxa_RF), rownames(imp_RF))
imp_RF <- imp_RF[order_taxa,]
imp_RF$Taxonomy <- taxa_RF$Taxon
imp_RF <- imp_RF[order(imp_RF[,Var], decreasing = TRUE),]
imp_RF <- left_join(tibble::rownames_to_column(imp_RF),
tibble::rownames_to_column(
taxa[rownames(imp_RF), c(9:11, 16)]))
imp_RF$Taxonomy <-
gsub("FOTU_", "", imp_RF$Taxonomy)
imp_RF$Taxonomy <-
gsub("OTU_", "",imp_RF$Taxonomy)
imp_RF$glbrc <-
ifelse(is.na(imp_RF$Isolate), "no", "yes")
imp_RF %T>% print()
ggplot(data=imp_RF) +
geom_bar(aes(x= reorder(Taxonomy, -get(Var)),
y= get(Var),
color= glbrc, fill=glbrc), stat="identity") +
theme_classic() +
scale_y_continuous(expand = c(0, 0)) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.y = element_text(angle =0, size = 8, hjust = 0.5, vjust = 1),
axis.text.x = element_text(angle = 90, size = 7 ,hjust = 1, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8),
plot.margin=unit(c(1.5,1.5,1.5,1.5),"pt"),
legend.position = "none") -> plot_importance
return(plot_importance)
}
PlotOTU(RF_fungi_biom, taxa_fungi_new, "IncNodePurity") +
labs(title = "Fungal OTUs affecting areal plant biomass",
x= NULL, y= "Node Purity")
PlotOTU(RF_fungi_biom, taxa_fungi_new, "%IncMSE") +
labs(title = "Fungal OTUs affecting areal plant biomass",
x= NULL, y= "% Increase MSE")
PlotLineBiom <- function(rf_model, metadata){
df_model <-
left_join(tibble::rownames_to_column(
data.frame(actual = rf_model$y,
pred = rf_model$predicted)),
metadata %>%
dplyr::select(rowname, Genotype, Soil), by = "rowname")
df_model %T>% print()
ggplot(data=df_model, aes(x=actual, y=pred)) +
geom_point(aes(shape=Genotype, color=Soil), size=1.5, stroke=0.5) +
geom_smooth(data=df_model, method = "lm", formula = "y ~ x", se = TRUE, color="black", size=0.5) +
theme_classic() +
scale_y_continuous(labels = scales::number_format(accuracy = 1)) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle = 0, size = 8, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_blank(), legend.background = element_blank(),
legend.text = element_text(size = 8)) +
guides(color = guide_legend(nrow = 2),shape = guide_legend(nrow = 2)) -> line_plot
return(line_plot)
}
PlotLineBiom(RF_fungi_biom, df_fungi_RF_otu_sel_pca) + theme(legend.position = c(0.1, 0.8))
PlotPCAotu <- function(df, Var){
colnames(df) <-
gsub("FOTU_", "", colnames(df))
colnames(df) <-
gsub("OTU_", "", colnames(df))
pca_plot <-
autoplot(
prcomp(x = df, scale= TRUE, center=TRUE), # calculates principal compenents and pltos with ggplot2
data = Var, label = FALSE, shape = "Genotype", colour="Soil", # add metadate, labels of objects
loadings = TRUE, loadings.colour = "black", size=1.5, stroke=0.8,
max.overlaps = getOption("ggrepel.max.overlaps", default = 0),
frame = FALSE, frame.colour = "Soil", loadings.label.colour = "black",
loadings.label = TRUE, loadings.label.size = 1.5, loadings.label.repel = TRUE) +
labs(title = "PCA") +
# scale_colour_manual(values = paletteCB4) +
# scale_fill_manual(values = paletteCB4) +
# scale_shape_manual(values = c(21,22,24)) +
theme_classic() +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 8, hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8))
# guides(color = guide_legend(ncol = 2), #title.position="top"
# fill = guide_legend(ncol= 2),
# shape = guide_legend(ncol = 1)) +
# theme(legend.margin = margin(0,-0.5,0,0, unit="cm")) # reduce space betweem legends
return(pca_plot)
}
str(df_fungi_RF_otu_sel_pca)
PlotPCAotu(df_fungi_RF_otu_sel_pca[,2:56], df_fungi_RF_otu_sel_pca[, 57:58])
# Extract feaures seq for BLAST ----------------------------------------------------------------------------
filterTaxa = function(physeq, badTaxa){
allTaxa = taxa_names(physeq)
myTaxa <- allTaxa[(allTaxa %in% badTaxa)]
return(prune_taxa(myTaxa, physeq))
}
write.dna(refseq(filterTaxa(physeq_fungi_new,
rownames(taxa_fungi_new[colnames(df_fungi_RF_otu_sel), ])[-55])),
format="fasta",
colsep="",
file="fungi_plant_biomass.fasta")
write.dna(refseq(filterTaxa(physeq_bact_new,
rownames(taxa_bact_new[colnames(df_bact_RF_otu_sel), ])[-55])),
format="fasta",
colsep="",
file="bact_plant_biomass.fasta")
# Correcting taxonomy tables ------------------------------------------------------------------------------
taxa_fungi_new[colnames(df_fungi_RF_otu_sel), ][, c(1,9,16)]
taxa_fungi_new$Taxonomy <- as.character(taxa_fungi_new$Taxonomy)
taxa_fungi_new[taxa_fungi_new == "FOTU_492-Glomeraceae"] <- "FOTU_492-Rhizophagus irregularis"
taxa_fungi_new[taxa_fungi_new == "FOTU_1-Ascomycota"] <- "FOTU_1-Setophoma sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_191-Glomeraceae"] <- "FOTU_191-Glomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_126-Chaetothyriales"] <- "FOTU_191-Knufia sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_10071-Glomeraceae"] <- "FOTU_10071-Rhizophagus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_139-Branch06"] <- "FOTU_139-Sordariomycetes"
taxa_fungi_new[taxa_fungi_new == "FOTU_2313-Glomeraceae"] <- "FOTU_2313-Glomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_57-Hypocreales"] <- "FOTU_57-Fusarium sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_690-Fungi"] <- "FOTU_690-Glomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_258-Glomus"] <- "FOTU_258-Glomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_3189-Glomeraceae"] <- "FOTU_258-Glomus cf. macrocarpum"
taxa_fungi_new[taxa_fungi_new == "FOTU_725-Glomeraceae"] <- "FOTU_725-Glomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_1183-Glomeraceae"] <- "FOTU_1183-Glomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_772-Glomeraceae"] <- "FOTU_772-Glomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_967-Piriformospora sp."] <- "FOTU_967-Serendipita sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_10512-Glomeraceae"] <- "FOTU_10512-Septoglomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_10558-Glomeraceae"] <- "FOTU_10558-Glomus sp."
taxa_fungi_new[taxa_fungi_new == "FOTU_4264-Glomeraceae"] <- "FOTU_4264-Glomus sp."
taxa_bact_new[colnames(df_bact_RF_otu_sel), ][, c(1,9,16)]
taxa_bact_new$Taxonomy <- as.character(taxa_bact_new$Taxonomy)
taxa_bact_new[taxa_bact_new == "OTU_64-67-14"] <- "OTU_64-Solirubrobacter sp."
taxa_bact_new[taxa_bact_new == "OTU_516-Ffch7168 sp."] <- "OTU_516-Bacteria"
taxa_bact_new[taxa_bact_new == "OTU_306-Uncultured7 sp."] <- "OTU_306-Rhizobiales"
taxa_bact_new[taxa_bact_new == "OTU_2263-Uncultured 60"] <- "OTU_2263-Acidobacteria"
taxa_bact_new[taxa_bact_new == "OTU_754-Uncultured 76 sp."] <- "OTU_754-Gemmatimonas sp."
taxa_bact_new[taxa_bact_new == "OTU_568-Kd4-96"] <- "OTU_568-Acidobacteria"
taxa_bact_new[taxa_bact_new == "OTU_7284-Uncultured 76 sp."] <- "OTU_7284-Gemmatimonadetes"
taxa_bact_new[taxa_bact_new == "OTU_232-type III"] <- "OTU_232-Cand. Moen. glomeromycotorum"
taxa_bact_new[taxa_bact_new == "OTU_2472-Allorhizobium-neorhizobium-pararhizobium-rhizobium sp."] <- "OTU_2472-Rhizobium sp."
taxa_bact_new[taxa_bact_new == "OTU_427-67-14"] <- "OTU_427-Actinobacteria"
taxa_bact_new[taxa_bact_new == "OTU_451-Plta13"] <- "OTU_451-Xanthomonadales"
taxa_bact_new[taxa_bact_new == "OTU_4189-Uncultured51 sp."] <- "OTU_4189-Rhizobiales"
taxa_bact_new[taxa_bact_new == "OTU_796-Uncultured bacterium 2214 sp."] <- "OTU_796-Methylocystaceae"
taxa_bact_new[taxa_bact_new == "OTU_20144-D05-2"] <- "OTU_20144-Bacteria"
taxa_bact_new[taxa_bact_new == "OTU_4834-Uncultured 77 sp."] <- "OTU_4834-Myxococcales"
taxa_bact_new[taxa_bact_new == "OTU_727-Uncultured mollicutes bacterium 8 sp."] <- "OTU_727-Cand. Moen. glomeromycotorum"
taxa_bact_new[taxa_bact_new == "OTU_962-A4b"] <- "OTU_962-Bacteria"
taxa_bact_new[taxa_bact_new == "OTU_2597-Uncultured 87 sp."] <- "OTU_2597-Rhizobiales"
taxa_bact_new[taxa_bact_new == "OTU_6199-Uncultured31 sp."] <- " OTU_6199-Bacteria"
taxa_bact_new[taxa_bact_new == "OTU_760-Uncultured36 sp."] <- "OTU_760-Burkholderiales"
taxa_bact_new[taxa_bact_new == "OTU_700-Uncultured2 sp."] <- "OTU_700-Blastopirellula sp."
taxa_bact_new[taxa_bact_new == "OTU_6802-Sm2d12"] <- "OTU_6802-Bacteria"
taxa_bact_new[taxa_bact_new == "OTU_699-Uncultured 76 sp."] <- "OTU_699-Gemmatimonas sp."
# NEUTRAL MODELS ------------------------------------------------------------------------------------
library(tyRa)
require(minpack.lm)
require(Hmisc)
require(stats4)
neutral_model_fungi <-
tyRa::fit_sncm(spp = t(otu_table(physeq_fungi_new)), pool=NULL, taxon=data.frame(tax_table(physeq_fungi_new)))
plot_sncm_fit(neutral_model_fungi, fill = NULL, title = "Model Fit") +
theme_classic()
neutral_model_bact <-
tyRa::fit_sncm(spp = t(otu_table(physeq_bact_new)), pool=NULL, taxon=data.frame(tax_table(physeq_bact_new)))
plot_sncm_fit(neutral_model_bact, fill = NULL, title = "Model Fit") +
theme_classic()
# INDICATOR SPECIES ANALYSIS ------------------------------------------------------------------
library(indicspecies)
GetIndicators <-function(dataframe, var){
require(phyloseq); require(indicspecies); require(dplyr)
otu <- as.data.frame(otu_table(dataframe))
metadata = as(sample_data(dataframe), "data.frame")
taxa <- as.data.frame(as.matrix(tax_table(dataframe)))
multipatt <- multipatt(t(otu), metadata[,var], func = "r.g",
control=how(nperm=999), duleg=TRUE)
multipatt -> multipatt_fdr
multipatt_fdr$sign$p.value <- p.adjust(multipatt_fdr$sign$p.value, "fdr")
multipatt_fdr$sign[which(
multipatt_fdr$sign$p.value <= 0.05), ] -> indicator_taxa
taxa$OTU <- rownames(taxa)
data.frame(OTU = as.factor(row.names(indicator_taxa)), indicator_taxa) %>%
dplyr::left_join(taxa, by="OTU") -> indicator_taxa
rownames(indicator_taxa) <- indicator_taxa$OTU
indicator_taxa <- arrange(indicator_taxa, desc(stat))
return(indicator_taxa)
}
# indicator value >0.5 and p-value <0.05 after fdr correction
head(physeq_fungi_new@sam_data)
ind_ITS_soil <-
GetIndicators(
physeq_fungi_new %>%
subset_samples(Root_soil%in%"Root"),
"Soil_location")
head(ind_ITS_soil)
dim(ind_ITS_soil)
ind_16s_soil <-
GetIndicators(
physeq_bact_new %>%
subset_samples(Root_soil%in%"Root"),
"Soil_location")
head(ind_16s_soil)
dim(ind_16s_soil)
# RF AND INDICATORS MATCH ---------------------------------------------------------------------
GetIndTab <- function(df_ind) {
df_ind$IndVal <-
df_ind$index %>%
recode_factor(
"1" = "Hancock",
"2" = "Lake City",
"3" = "Lux Arbor",
"4" = "Rhineland")
return(df_ind[, c(1,7,8,25)])
}
# Match indicators with RF models features accuracy -------------------------------------------
ind_ITS_soil_tab <-
GetIndTab(ind_ITS_soil)
ind_16s_soil_tab <-
GetIndTab(ind_16s_soil)
PlotOTUInd <- function(rf_model, taxa, NeutMod, Var, df_ind, df_name){
require(tibble)
imp_RF <- as.data.frame(rf_model$importance)
imp_RF$features <- rownames(imp_RF)
imp_RF <- arrange(imp_RF, desc(Var))
rownames(imp_RF) <- imp_RF$features
# adding marker taxon info
taxa[rownames(taxa)%in%rownames(imp_RF), ] -> taxa_RF
identical(rownames(taxa_RF), rownames(imp_RF))
order_taxa <- match(rownames(taxa_RF), rownames(imp_RF))
imp_RF <- imp_RF[order_taxa,]
imp_RF$Taxonomy <- taxa_RF$Taxon
imp_RF <- imp_RF[order(imp_RF[,Var], decreasing = TRUE),]
imp_RF <- left_join(tibble::rownames_to_column(imp_RF),
tibble::rownames_to_column(
taxa[rownames(imp_RF), c(9:11, 16)]))
imp_RF$Taxonomy <-
gsub("FOTU_", "", imp_RF$Taxonomy)
imp_RF$Taxonomy <-
gsub("OTU_", "",imp_RF$Taxonomy)
imp_RF$glbrc <-
ifelse(is.na(imp_RF$Isolate), "no", "yes")
# adding neutral model results
df_neutral <-
as.data.frame(NeutMod[2])
df_neutral$rowname <- rownames(df_neutral)
new_df <-
df_neutral %>%
dplyr::select("predictions.fit_class", "rowname")
new_df <-
left_join(imp_RF, new_df, by= "rowname")
# adding match with indicators
colnames(df_ind)[1] <- "rowname"
final_df <-
left_join(new_df, df_ind, by= "rowname")
head(print(final_df))
# saving intermediate df to R environemnt, pick the right name
final_df %T>%
#assign(paste(df_name, Var1, Var2, sep = ""),., envir = .GlobalEnv) %>% # saving the plot
assign(df_name, ., envir = .GlobalEnv)
# plotting
ggplot(data=final_df) +
geom_bar(aes(x= reorder(Taxonomy, -get(Var)),
y= get(Var),
color= IndVal, fill=IndVal), stat="identity") +
theme_classic() +
scale_y_continuous(expand = c(0, 0)) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.y = element_text(angle =0, size = 8, hjust = 0.5, vjust = 1),
axis.text.x = element_text(angle = 90, size = 7 ,hjust = 1, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8),
plot.margin=unit(c(1.5,1.5,1.5,1.5),"pt"),
legend.position = "none") -> plot_importance
return(plot_importance)
}
PlotOTUInd(RF_fungi_biom, taxa_fungi_new, neutral_model_fungi,
"%IncMSE", ind_ITS_soil_tab, "rf_taxa_fungi") +
labs(title = "Most important OTUs for plant biomass",
x= NULL, y= "% Increase MSE") +
scale_colour_manual(values = Pal_soil) +
scale_fill_manual(values = Pal_soil)
PlotOTUInd(RF_bact_biom, taxa_bact_new, neutral_model_bact,
"%IncMSE", ind_16s_soil_tab, "rf_taxa_bact") +
labs(title = "Most important OTUs for plant biomass",
x= NULL, y= "% Increase MSE") +
scale_colour_manual(values = Pal_soil) +
scale_fill_manual(values = Pal_soil)
# *** FIGURE 4 ---------------------------------------------------------------------------------------------
pca_fungi_otu <-
PlotPCAotu(df_fungi_RF_otu_sel_pca[,2:56], df_fungi_RF_otu_sel_pca[, 57:58]) +
scale_color_manual(values = Pal_soil) +
scale_fill_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8)) +
theme(legend.position = "none")
# modify loading thickness
pca_fungi_otu$layers[[2]]$aes_params$size <- 0.2
pca_fungi_otu$layers[[2]]$geom_params$arrow$length <- unit(4, units = "points")
pca_fungi_otu
Fig_5_RF_fungi_OTU <-
ggarrange(
ggarrange(
PlotLineBiom(RF_fungi_biom, df_fungi_RF_otu_sel_pca) +
theme(legend.position = "bottom") +
theme(legend.position = c(0.15, 0.8)) +
scale_color_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8),
labels = c("Alamo",
"Blackwell",
"Cave-in-rock",
"Kanlow",
"Shelter",
"Southlow")) +
labs(title = "Random Forest ", y="Predicted\nAerial Plant Biomass", x="Observed\nAerial Plant Biomass") +
annotate("text", x=Inf, y = -Inf,
label=paste("Mean squared error:", round(last(RF_fungi_biom$mse), 2)), size=2.5, vjust=-6, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label= paste("% Var explained:", round(last(RF_fungi_biom$rsq*100),2)), size=2.5, vjust=-4, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label = paste("italic(p) ==", round(perm_RF_fungi_biom$pValue, 4)), parse = TRUE, size=2.5, vjust=-1.5, hjust=1),
pca_fungi_otu,
labels = c("A","B"),
widths = c(1, 1),
align = "h",
ncol = 2,
nrow = 1,
common.legend = TRUE,
legend = "bottom"),
PlotOTUInd(RF_fungi_biom, taxa_fungi_new, neutral_model_fungi,
"%IncMSE", ind_ITS_soil_tab, "rf_taxa_fungi") +
labs(title = "Most important OTUs for plant biomass",
x= NULL, y= "% Increase MSE") +
scale_colour_manual(values = Pal_soil) +
scale_fill_manual(values = Pal_soil),
labels = c("", "C"),
heights = c(1, 1),
ncol = 1,
nrow =2)
Fig_5_RF_fungi_OTU
grid.arrange(Fig_5_RF_fungi_OTU, top=title3)
PlotOTU(RF_fungi_biom, taxa_fungi_new, "%IncMSE") +
labs(title = "Most important OTUs for plant biomass",
x= NULL, y= "% Increase MSE") +
scale_colour_manual(values = c("grey80", "grey40")) +
scale_fill_manual(values = c("grey80", "grey40"))
# matching root isolaets -----------------------------------------------------------------------------------
isolates_fungi <-
taxa_fungi_new[colnames(df_fungi_RF_otu_sel), ] %>%
dplyr::select(Family, Isolate, Isolate_percent_id, Isolate_query_cover,Taxonomy)
isolates_fungi <-
left_join(
tibble::rownames_to_column(isolates_fungi),
tibble::rownames_to_column(as.data.frame(RF_fungi_biom$importance)),
by="rowname")
isolates_fungi <-
left_join(
isolates_fungi,
data.frame(rowname = names(All_otus_ITS), sequence = paste(All_otus_ITS)),
by="rowname")
str(isolates_fungi)
write.csv(arrange(isolates_fungi, desc(`%IncMSE`)), "RF_fungi_otu.csv")
# Plot taxonomic proprotions ------------------------------------------------------------
fungi_bar_plot<-
as.data.frame(
table(gsub(".*-","",isolates_fungi$Taxonomy))) %>%
ggplot(aes(Var1, Freq)) +
geom_col() +
coord_flip() +
theme_classic() +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.y = element_text(angle = 0, size = 7, hjust = 1, vjust = 0.5),
axis.text.x = element_text(angle = 0, size = 7 ,hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8),
plot.margin=unit(c(1.5,1.5,1.5,1.5),"pt")) +
labs(title = "Fungal OTUs", x="Taxon", y="Number of OTUs")
fungi_bar_plot
# Bacteria -------------------------------------------------------------------------------------------------
str(otu_bact_new)
str(meta_bact_new)
identical(rownames(otu_bact_new), rownames(meta_bact_new))
df_bact_RF_otu <-
data.frame(otu_bact_new,
biomass = meta_bact_new$aerial_part_dry_weight_16wks_grams)
str(df_bact_RF_otu)
head(df_bact_RF_otu)
# Recursive feature selection
set.seed(110321)
rfe_bact_biom <- Boruta(
biomass ~ .,
df_bact_RF_otu,
pValue = 0.05,
mcAdj = TRUE,
maxRuns = ncol(df_bact_RF_otu),
doTrace = 3
)
rfe_bact_biom
# Get significant variables including tentatives
impfe_bact_biom <-
getSelectedAttributes(rfe_bact_biom, withTentative = TRUE)
impfe_bact_biom
#subset to important features only
df_bact_RF_otu[, c(impfe_bact_biom)] -> df_bact_RF_otu_sel
identical(rownames(df_bact_RF_otu_sel), rownames(df_bact_RF_otu))
df_bact_RF_otu_sel$biomass <- df_bact_RF_otu$biomass
# select optimal mtry
round(sqrt(ncol(df_bact_RF_otu_sel[, 1:(ncol(df_bact_RF_otu_sel) - 1)])))
set.seed(110322)
bestmtry_bact_biom <-
tuneRF(
x = df_bact_RF_otu_sel[, 1:(ncol(df_bact_RF_otu_sel) - 1)],
y = df_bact_RF_otu_sel$biomass,
mtryStart = 7,
ntreeTry = 1001,
improve = 0.01,
stepFactor = 0.5,
nodesize = 1,
doBest = TRUE,
trace = TRUE,
plot = TRUE
)
bestmtry_bact_biom
set.seed(110323)
RF_bact_biom <-
randomForest(
x = df_bact_RF_otu_sel[, 1:(ncol(df_bact_RF_otu_sel) - 1)],
y = df_bact_RF_otu_sel$biomass,
ntree = 1001,
mtry = 14,
importance = TRUE,
proximity = TRUE
)
RF_bact_biom
plot(RF_bact_biom)
# Assessing model significance using permitations
set.seed(110324)
perm_RF_bact_biom <-
rf.significance(
x = RF_bact_biom,
xdata = df_bact_RF_otu_sel[, 1:(ncol(df_bact_RF_otu_sel) - 1)],
nperm = 999,
nmtry = 1,
ntree = 1001
)
perm_RF_bact_biom # model significant = 0.001
# Dataframe for PCA
df_bact_RF_otu_sel_pca <-
left_join(tibble::rownames_to_column(df_bact_RF_otu_sel), #keep this samples only
tibble::rownames_to_column(as(physeq_bact_new@sam_data, "data.frame") %>%
dplyr::select(Genotype, Soil_location) %>%
rename(Soil = Soil_location)), by="rowname")
str(df_bact_RF_otu_sel_pca)
head(df_bact_RF_otu_sel_pca)
head(df_bact_RF_otu_sel_pca)
colnames(df_bact_RF_otu_sel_pca)[54] <- "Biomass"
# Plotting the model...
# *** FIGURE 5 ---------------------------------------------------------------------------------------------
pca_bact_otu <-
PlotPCAotu(df_bact_RF_otu_sel_pca[,2:54], df_bact_RF_otu_sel_pca[, 55:56]) +
scale_color_manual(values = Pal_soil) +
scale_fill_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8)) +
theme(legend.position = "none")
# modify loading thickness
pca_bact_otu$layers[[2]]$aes_params$size <- 0.25
pca_bact_otu$layers[[2]]$geom_params$arrow$length <- unit(4, units = "points")
pca_bact_otu
Fig_5_RF_bact_OTU <-
ggarrange(
ggarrange(
PlotLineBiom(RF_bact_biom, df_bact_RF_otu_sel_pca) +
theme(legend.position = "bottom") +
theme(legend.position = c(0.15, 0.8)) +
scale_color_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8),
labels = c("Alamo",
"Blackwell",
"Cave-in-rock",
"Kanlow",
"Shelter",
"Southlow"))+
labs(title = "Random Forest ", y="Predicted\nAerial Plant Biomass", x="Observed\nAerial Plant Biomass") +
annotate("text", x=Inf, y = -Inf,
label=paste("Mean squared error:", round(last(RF_bact_biom$mse), 2)), size=2.5, vjust=-6, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label= paste("% Var explained:", round(last(RF_bact_biom$rsq*100),2)), size=2.5, vjust=-4, hjust=1) +
annotate("text", x=Inf, y = -Inf,
label = paste("italic(p) ==", round(perm_RF_bact_biom$pValue, 4)), parse = TRUE, size=2.5, vjust=-1.5, hjust=1),
pca_bact_otu,
labels = c("A","B"),
widths = c(1, 1),
align = "h",
ncol = 2,
nrow = 1,
common.legend = TRUE,
legend = "bottom"),
PlotOTUInd(RF_bact_biom, taxa_bact_new, neutral_model_bact,
"%IncMSE", ind_16s_soil_tab, "rf_taxa_bact") +
labs(title = "Most important OTUs for plant biomass",
x= NULL, y= "% Increase MSE") +
scale_colour_manual(values = Pal_soil) +
scale_fill_manual(values = Pal_soil),
labels = c("", "C"),
heights = c(1, 1),
ncol = 1,
nrow =2)
Fig_5_RF_bact_OTU
grid.arrange(Fig_5_RF_bact_OTU, top=title4)
PlotOTU(RF_bact_biom, taxa_bact_new, "%IncMSE") +
labs(title = "Most important OTUs for plant biomass",
x= NULL, y= "% Increase MSE") +
scale_colour_manual(values = c("grey80", "grey40")) +
scale_fill_manual(values = c("grey80", "grey40"))
# matching root isolaets
isolates_bact <-
taxa_bact_new[colnames(df_bact_RF_otu_sel), ] %>%
dplyr::select(Class, Isolate, `Isolate Isolate_percent_id`, Isolate_query_cover, Taxonomy)
isolates_bact <-
left_join(
tibble::rownames_to_column(isolates_bact),
tibble::rownames_to_column(as.data.frame(RF_bact_biom$importance)),
by="rowname")
isolates_bact <-
left_join(
isolates_bact,
data.frame(rowname = names(All_otus_16S), sequence = paste(All_otus_16S)),
by="rowname")
str(isolates_bact)
write.csv(arrange(isolates_bact, desc(`%IncMSE`)), "RF_bact_otu.csv")
# taxonomic proprotions
bact_bar_plot <-
as.data.frame(
table(gsub(".*-","",isolates_bact$Taxonomy))) %>%
ggplot(aes(Var1, Freq)) +
geom_col() +
coord_flip() +
theme_classic() +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.y = element_text(angle = 0, size = 7, hjust = 1, vjust = 0.5),
axis.text.x = element_text(angle = 0, size = 7 ,hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8),
plot.margin=unit(c(1.5,1.5,1.5,1.5),"pt")) +
labs(title = "Bacterial OTUs", x="Taxon", y="Number of OTUs")
bact_bar_plot
# FIGURE SX - RF taxa barplot ------------------------------------------------------------------------------
ggarrange(
ggarrange(fungi_bar_plot,
ncol = 1,
nrow = 2,
heights = c(1, 0.3),
labels = "A"),
bact_bar_plot,
labels = c("","B"),
ncol = 2,
nrow = 1)
# **********************************************************************------------------------------------
# BETA DIVERSITY -------------------------------------------------------------------------------------------
str(meta_fungi_merged)
str(meta_bact_merged)
# match samples to those used for modeling: pot 340 raw 55, 124
meta_fungi_merged[rownames(meta_fungi_merged)==55,]
meta_fungi_merged_filt <-
meta_fungi_merged[-55, ]
str(meta_fungi_merged_filt)
meta_bact_merged[rownames(meta_bact_merged)==124,]
meta_bact_merged_filt <-
meta_bact_merged[-124, ]
str(meta_bact_merged_filt)
subset(meta_bact_merged, rowname%in%c("Amp489"))
subset(meta_bact_merged, rowname%in%c("Amp504"))
# Calculating fit of environemtnal variables ---------------------------------------------------------------
GenerateEnvFit <- function(df){
envfit(df[,c("PCoA.1","PCoA.2")],
df[, c("pH","P","K","Ca","Mg","OM","NO3")],
perm = 9999) -> res_envfit
df_envfit <- as.data.frame(scores(res_envfit, display = "vectors"))
df_envfit <- cbind(df_envfit, Var = rownames(df_envfit))
df_envfit$pvals <-res_envfit$vectors$pvals
df_envfit <- subset(df_envfit, df_envfit$pvals<=0.05)
df_envfit
return(df_envfit)
}
set.seed(1)
GenerateEnvFit(meta_fungi_merged_filt) -> res_envfit_fungi
res_envfit_fungi
GenerateEnvFit(meta_bact_merged_filt) -> res_envfit_bact
res_envfit_bact
# plot ordination
PlotPCOA <- function(df, envfit){
pcoa <-
ggplot(df,
aes(x=PCoA.1, y=PCoA.2, color = Soil, shape = Genotype)) +
geom_point(size = 1.5) +
scale_colour_manual("Soil", values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8)) +
theme_classic() +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =0, size = 7, face = "bold", hjust = 0.5, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, face = "bold",hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8), legend.position="bottom") +
guides(color = guide_legend(nrow = 2),shape = guide_legend(nrow = 2)) +
#grids(linetype = "dashed") +
geom_segment(data = envfit, inherit.aes = FALSE,
mapping = aes(x = 0,
xend = 1.05 * envfit$PCoA.1/2,
y = 0,
yend = 1.05 * envfit$PCoA.2/2),
color = "black",
arrow = arrow(length = unit(0.02, "npc"))) +
annotate("text", x = envfit$PCoA.1*0.555, y = envfit$PCoA.2*0.555,
size = 3,
label = c(
"pH",
expression(PO[4]^{"3-"}),
expression(K^{"+"}),
expression(Ca^{"2+"}),
expression(Mg^{"2+"}),
"OM",
expression(NO[3]^{"-"})))
# geom_text(data = envfit,inherit.aes = FALSE,
# mapping = aes(x = 1.05 * envfit[,1]/2, y = 1.05 * envfit[,2]/2,
# label = Var),
# size = 3)
return(pcoa)
}
PlotPCOA(meta_fungi_merged_filt, res_envfit_fungi)
plot_pcoa_fungi <-
PlotPCOA(meta_fungi_merged_filt, res_envfit_fungi) +
labs(title = "Fungi")
plot_pcoa_fungi$layers[[2]]$aes_params$size <- 0.3
plot_pcoa_fungi$layers[[2]]$geom_params$arrow$length <- unit(6, units = "points")
plot_pcoa_fungi
plot_pcoa_bact <-
PlotPCOA(meta_bact_merged_filt, res_envfit_bact) +
labs(title = "Bacteria")
plot_pcoa_bact$layers[[2]]$aes_params$size <- 0.3
plot_pcoa_bact$layers[[2]]$geom_params$arrow$length <- unit(6, units = "points")
plot_pcoa_bact
plot_pcoa_bact + scale_x_reverse() + scale_y_reverse()
# Final FIGURE 2 -------------------------------------------------------------------------------------------
ggarrange(
plot_pcoa_fungi,
plot_pcoa_bact + scale_x_reverse() + scale_y_reverse(),
ncol = 2,
nrow = 1,
common.legend = TRUE,
legend = "none")
## PERMANOVA -----------------------------------------------------------------------------------------------
identical(rownames(alpha_df_fungi_filt), rownames(otu_fungi_new_filt))
str(otu_fungi_new_filt)
identical(rownames(alpha_df_bact_filt), rownames(otu_bact_new_filt))
str(otu_bact_new_filt)
adonis_fungi <- adonis(otu_fungi_new_filt ~ readNo + Genotype * Soil,
alpha_df_fungi_filt, method = "bray", permutations=9999)
adonis_fungi
adonis_fungi2 <- adonis(otu_fungi_new_filt ~ readNo + Soil * Genotype,
alpha_df_fungi_filt, method = "bray", permutations=9999)
adonis_fungi2
adonis_bact <- adonis(otu_bact_new_filt ~ readNo + Genotype * Soil,
alpha_df_bact_filt, method = "bray", permutations=9999)
adonis_bact
adonis_bact2 <- adonis(otu_bact_new_filt ~ readNo + Soil * Genotype,
alpha_df_bact_filt, method = "bray", permutations=9999)
adonis_bact2
# BETA DISPERSION ------------------------------------------------------------------------------------------
anova(permdisp_fungi_soil, permutations = 9999)
data.frame(multcompLetters(p.adjust(
permutest(
permdisp_fungi_soil,
permutations = 9999,
pairwise = T
)$pairwise$observed,
method = "BH"
))['Letters']) -> pair_fungi_soil
anova(permdisp_fungi_genotype, permutations = 9999)
anova(permdisp_bact_soil, permutations = 9999)
data.frame(multcompLetters(p.adjust(
permutest(
permdisp_bact_soil,
permutations = 9999,
pairwise = T
)$pairwise$observed,
method = "BH"
))['Letters']) -> pair_bact_soil
anova(permdisp_bact_genotype, permutations = 9999)
# plot multivariate dispersion
PlotBetadisper <- function(betadisp, Var, my_labels, metadata){
# creating a label and dataframe
max(betadisp$distances + 0.1* max(betadisp$distances)) -> labels_y
#labels_y = 0.9
data.frame(betadisp$group, betadisp$distances) -> df
colnames(df) <- c("Fact", "distance")
#print(head(df))
metadata <-
left_join(tibble::rownames_to_column(df),
tibble::rownames_to_column(metadata), by="rowname")
head(metadata) %T>% print()
# metadata$Fact <- dplyr::recode(metadata$Fact,
# Alamo = "Alamo",
# Blackwell = "Blackwell",
# `Cave-in-rock` = "Cave-in-rock",
# Kanlow ="Kanlow",
# Shleter="Shelter",
# Southlow="Southlow")
# metadata$Genotype <- dplyr::recode(metadata$Genotype,
# Alamo = "Alamo",
# Blackwell = "Blackwell",
# `Cave-in-rock` = "Cave-in-rock",
# Kanlow ="Kanlow",
# Shleter="Shelter",
# Southlow="Southlow")
#labels = c("Alamo", "Blackwell", "Cave-in-rock", "Kanlow", "Shelter","Southlow")
# plotting
betaplot <-
ggplot(metadata, aes(x=get(Var), y=distance)) +
geom_jitter(aes(shape = Genotype, color = Soil), alpha = 0.8, lwd=1) +
geom_boxplot(outlier.colour="black", outlier.shape = 8, outlier.size = 1,
alpha=0.6, lwd = 0.4) +
stat_summary(fun=mean, geom="point", shape=18, size=1.9, color="red", fill="red") +
stat_summary(geom = 'text', label = my_labels, fun= max, aes(y = labels_y), size=3, color="black") +
theme_classic() +
scale_color_manual(values = Pal_soil) +
scale_shape_manual(values = c(0,1,2,5,3,8),
labels = c("Alamo"=expression(bold("Alamo")),
"Blackwell",
"Cave-in-rock",
"Kanlow"=expression(bold("Kanlow")),
"Shelter",
"Southlow")) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =45, size = 8, hjust = 1, vjust = 1),
axis.text.y = element_text(angle = 0, size = 8, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8), legend.position="bottom") +
guides(color = guide_legend(nrow = 2),
shape = guide_legend(nrow = 2))
return(betaplot)
}
PlotBetadisper(permdisp_fungi_soil, "Soil", as.character(pair_fungi_soil$Letters), alpha_df_fungi_filt)
PlotBetadisper(permdisp_bact_soil, "Soil", as.character(pair_bact_soil$Letters), alpha_df_bact_filt)
PlotBetadisper(permdisp_fungi_genotype, "Genotype", NA, alpha_df_fungi_filt) +
scale_x_discrete(labels=c(Alamo = "Alamo",
Blackwell = "Blackwell",
`Cave-in-rock` = "Cave-in-rock",
Kanlow ="Kanlow",
Shlelter="Shelter",
Southlow="Southlow"))
PlotBetadisper(permdisp_bact_genotype, "Genotype", NA, alpha_df_bact_filt)
# *** FIGURE 2B - Beta Dispersion --------------------------------------------------------------------------
ggarrange(PlotBetadisper(permdisp_fungi_soil, "Soil", as.character(pair_fungi_soil$Letters), alpha_df_fungi_filt) +
labs(title = "Soil", y ="Distance to centroid", x= NULL),
PlotBetadisper(permdisp_fungi_genotype, "Genotype", NA, alpha_df_fungi_filt) +
labs(title = "Genotype", y ="Distance to centroid", x= NULL)+
scale_x_discrete(labels=c(Alamo = "Alamo",
Blackwell = "Blackwell",
`Cave-in-rock` = "Cave-in-rock",
Kanlow ="Kanlow",
Shlelter="Shelter",
Southlow="Southlow")),
PlotBetadisper(permdisp_bact_soil, "Soil", as.character(pair_bact_soil$Letters), alpha_df_bact_filt) +
labs(title = "Soil", y ="Distance to centroid", x= NULL),
PlotBetadisper(permdisp_bact_genotype, "Genotype", NA, alpha_df_bact_filt) +
labs(title = "Genotype", y ="Distance to centroid", x= NULL) +
scale_x_discrete(labels=c(Alamo = "Alamo",
Blackwell = "Blackwell",
`Cave-in-rock` = "Cave-in-rock",
Kanlow ="Kanlow",
Shlelter="Shelter",
Southlow="Southlow")),
widths = c(1, 1.25, 1, 1.25),
labels = c("C","D","E","F"),
align = "hv",
ncol = 4,
nrow = 1,
common.legend = TRUE,
legend = c("bottom")) -> betadisp_plot
betadisp_plot
# Calculating axis % of variation in cmdscale --------------------------------------------------------------
var_axes_fungi <-
round(pcoa_its$eig*100/sum(pcoa_its$eig),1)
var_axes_bact <-
round(pcoa_16s$eig*100/sum(pcoa_16s$eig),1)
# *** FIGURE 2 - complete ----------------------------------------------------------------------------------
ggarrange(
ggarrange(
plot_pcoa_fungi +
annotate("text", -Inf, Inf,
label = expression(paste("Read No. ", italic(R) ^ 2,"= 7.7%***"),
parse=TRUE), size = 2.5, hjust = -0.05, vjust = 1.1) +
annotate("text", -Inf, Inf,
label = expression(paste("Genotype ", italic(R) ^ 2,"= 2.8%***"),
parse=TRUE), size = 2.5, hjust = -0.05, vjust = 2) +
annotate("text", -Inf, Inf,
label = expression(paste("Soil ", italic(R) ^ 2,"= 43.4%***"),
parse=TRUE), size = 2.5, hjust = -0.06, vjust = 3.2) +
labs(x=as.expression(paste("PCoA.1 (",var_axes_fungi[1],"%)"), parse=TRUE),
y=as.expression(paste("PCoA.2 (",var_axes_fungi[2],"%)"), parse=TRUE)),
plot_pcoa_bact +
annotate("text", Inf, -Inf,
label = expression(paste("Read No. ", italic(R) ^ 2,"= 12.9%***"),
parse=TRUE), size = 2.5, hjust = -0.05, vjust = 1.1) +
annotate("text", Inf, -Inf,
label = expression(paste("Genotype ", italic(R) ^ 2,"= 3.4%***"),
parse=TRUE), size = 2.5, hjust = -0.05, vjust = 2) +
annotate("text", Inf, -Inf,
label = expression(paste("Soil ", italic(R) ^ 2,"= 32.4%***"),
parse=TRUE), size = 2.5, hjust = -0.06, vjust = 3.2) +
labs(x=as.expression(paste("PCoA.1 (",var_axes_bact[1],"%)"), parse=TRUE),
y=as.expression(paste("PCoA.2 (",var_axes_bact[2],"%)"), parse=TRUE)) +
scale_x_reverse() +
scale_y_reverse(),
ncol = 2,
nrow = 1,
common.legend = TRUE,
legend = "none",
labels = c("A","B")),
betadisp_plot,
nrow = 2,
heights = c(1, 1)) -> Fig2_beta_div
Fig2_beta_div
# *******************************************************************---------------------------------------
# TAXONOMIC COMPOSITION ------------------------------------------------------------------------------------
AbundData <- function(physeq, rank){
tax_table(physeq)[tax_table(physeq)==""]<- NA
tax_table(physeq)[tax_table(physeq)=="unidentified"]<- NA
tax_table(physeq)[is.na(tax_table(physeq))]<-"Unclassified"
tax_table(physeq) <- tax_table(physeq)[, !(colnames(tax_table(physeq)) %in% c("OTU_ID"))]
physeq_phylum <- tax_glom(physeq, rank)
otu_physeq <- taxa_sums(physeq_phylum)/sum(taxa_sums(physeq_phylum))*100
tax_physeq <- as(tax_table(physeq_phylum), "matrix")
tax_physeq <- as.data.frame(tax_physeq)
#tax_physeq <- tax_physeq[c(2)]
tax_physeq$abundance <- as.vector(otu_physeq)
tax_physeq <- tax_physeq[order(tax_physeq$abundance, decreasing = TRUE),]
return(tax_physeq)
}
AbundData(physeq_fungi_new, "Phylum")
AbundData(physeq_bact_new, "Phylum")
str(physeq_fungi_new@sam_data)
physeq_fungi_new@sam_data
tail(physeq_fungi_new@sam_data)
table(physeq_fungi_new@sam_data$Ecotype)
#*********************************************************************************--------------------------
# Models - From Ming-Yi
# # Colinear removal + MLR + Stepwise Regression ###
# library(statsr)
# library(MASS)
# library(dplyr)
# library(ggplot2)
# library(BAS)
# library(olsrr)
# library(corpcor)
#
#
# full.model <- lm(Severity16_Raw ~ pH+OM+N+C+P+S+Ca+Mg+K+Al+Cu+Fe+Mn+Mo+Na+Zn , data = soil.sever)
# summary(full.model)
#
# step.model <- stepAIC(full.model, direction = "backward", trace = FALSE)
# summary(step.model)
#
# # remove colinear #
# variables=soil.sever[(1:18),(11:27)]
#
# # run vif_func at: https://gist.github.com/fawda123/4717702 #
# vif_func(in_frame=variables,thresh=10,trace=T)
# rerun the model #
# full.model <- lm(Severity16_Raw ~ pH+OM+K+C+P+S++Cu+Fe+Mn+Mo+Na+Zn , data = soil.sever)
# step.model <- stepAIC(full.model, direction = "backward", trace = FALSE)
# summary(step.model)
#
# data.frame(richness = c(10, 12, 23, 25, 2, 45,8, 5, 9, 10,23,44),
# Calcium = c(4,4,5,5,10,10,12,12,30,30, 23,23),
# Genotype = c("Alamo", "Alamo", "Cave-in-rock","Cave-in-rock","Kanlow","Kanlow",
# "Alamo", "Alamo", "Cave-in-rock","Cave-in-rock","Kanlow","Kanlow"),
# Soil = c("Hancock", "Hancock", "Hancock", "Hancock", "Hancock", "Hancock",
# "Lake city", "Lake city","Lake city", "Lake city","Lake city", "Lake city"))
# Additional analyses --------------------------------------------------------------------------------------
# FUNGI
# now, we separate samples as high-beta (hancock/lake city) and low beta (Lux/rhineland)
physeq_fungi_new_H <-
subset_samples(physeq_fungi_new, Soil_location == "Hancock" | Soil_location == "Lake City")
physeq_fungi_new_H <-
prune_taxa(taxa_sums(physeq_fungi_new_H) > 0, physeq_fungi_new_H)
physeq_fungi_new_L <-
subset_samples(physeq_fungi_new, Soil_location == "Rhineland" | Soil_location == "Lux Arbor")
physeq_fungi_new_L <-
prune_taxa(taxa_sums(physeq_fungi_new_L) > 0, physeq_fungi_new_L)
## high diversity samples, simple model ##
metadata_fungi_H <- as(sample_data(physeq_fungi_new_H), "data.frame")
adonis(phyloseq::distance(t(otu_table(physeq_fungi_new_H)), method="bray") ~ LibrarySize + Genotype * Soil_location, permutations=999, data = metadata_fungi_H)
adonis(phyloseq::distance(t(otu_table(physeq_fungi_new_H)), method="bray") ~ LibrarySize + Soil_location * Genotype, permutations=999, data = metadata_fungi_H)
adonis(phyloseq::distance(t(otu_table(physeq_fungi_new_H)), method="bray") ~ LibrarySize + Genotype,
strata = metadata_fungi_H$Soil_location, permutations=999, data = metadata_fungi_H)
anova(
betadisper(phyloseq::distance(t(otu_table(physeq_fungi_new_H)), method="bray"), metadata_fungi_H$Genotype),
permutations = 999)
## low diversity soil, simple model ##
metadata_fungi_L <- as(sample_data(physeq_fungi_new_L), "data.frame")
adonis(phyloseq::distance(t(otu_table(physeq_fungi_new_L)), method="bray") ~ LibrarySize + Genotype * Soil_location, permutations=999, data = metadata_fungi_L)
adonis(phyloseq::distance(t(otu_table(physeq_fungi_new_L)), method="bray") ~ LibrarySize + Soil_location * Genotype, permutations=999, data = metadata_fungi_L)
adonis(phyloseq::distance(t(otu_table(physeq_fungi_new_L)), method="bray") ~ LibrarySize + Genotype,
strata = metadata_fungi_L$Soil_location, permutations=999, data = metadata_fungi_L)
anova(
betadisper(phyloseq::distance(t(otu_table(physeq_fungi_new_L)), method="bray"), metadata_fungi_L$Genotype),
permutations = 999)
# BACTERIA
# now, we separate samples as high-beta (hancock/lake city) and low beta (Lux/rhineland)
physeq_bact_new_H <-
subset_samples(physeq_bact_new, Soil_location == "Hancock" | Soil_location == "Lake City")
physeq_bact_new_H <-
prune_taxa(taxa_sums(physeq_bact_new_H) > 0, physeq_bact_new_H)
physeq_bact_new_L <-
subset_samples(physeq_bact_new, Soil_location == "Rhineland" | Soil_location == "Lux Arbor")
physeq_bact_new_L <-
prune_taxa(taxa_sums(physeq_bact_new_L) > 0, physeq_bact_new_L)
## high diversity samples, simple model ##
metadata_bact_H <- as(sample_data(physeq_bact_new_H), "data.frame")
adonis(phyloseq::distance(t(otu_table(physeq_bact_new_H)), method="bray") ~ LibrarySize + Genotype * Soil_location, permutations=999, data = metadata_bact_H)
adonis(phyloseq::distance(t(otu_table(physeq_bact_new_H)), method="bray") ~ LibrarySize + Soil_location * Genotype, permutations=999, data = metadata_bact_H)
adonis(phyloseq::distance(t(otu_table(physeq_bact_new_H)), method="bray") ~ LibrarySize + Genotype,
strata = metadata_bact_H$Soil_location, permutations=999, data = metadata_bact_H)
anova(
betadisper(phyloseq::distance(t(otu_table(physeq_bact_new_H)), method="bray"), metadata_bact_H$Genotype),
permutations = 999)
## low diversity soil, simple model ##
metadata_bact_L <- as(sample_data(physeq_bact_new_L), "data.frame")
adonis(phyloseq::distance(t(otu_table(physeq_bact_new_L)), method="bray") ~ LibrarySize + Genotype * Soil_location, permutations=999, data = metadata_bact_L)
adonis(phyloseq::distance(t(otu_table(physeq_bact_new_L)), method="bray") ~ LibrarySize + Soil_location * Genotype, permutations=999, data = metadata_bact_L)
adonis(phyloseq::distance(t(otu_table(physeq_bact_new_L)), method="bray") ~ LibrarySize + Genotype,
strata = metadata_bact_L$Soil_location, permutations=999, data = metadata_bact_L)
anova(
betadisper(phyloseq::distance(t(otu_table(physeq_bact_new_L)), method="bray"), metadata_bact_L$Genotype),
permutations = 999)
# Plot diversity in soils soils ----------------------------------------------------------------------------
# Extract soils
physeq_fungi_new_soil <-
subset_samples(physeq_fungi_new, Genotype == "Control")
physeq_fungi_new_soil <-
prune_taxa(taxa_sums(physeq_fungi_new_soil) > 0, physeq_fungi_new_soil)
physeq_fungi_new_soil
physeq_fungi_new_soil@sam_data
physeq_bact_new_soil <-
subset_samples(physeq_bact_new, Genotype == "Control")
physeq_bact_new_soil <-
prune_taxa(taxa_sums(physeq_bact_new_soil) > 0, physeq_bact_new_soil)
physeq_bact_new_soil
physeq_bact_new_soil@sam_data
# Create dataframes for plotting Alpha diversity in soils
alpha_df_fungi_soil <-
MakeDf(physeq_fungi_new_soil, 10000)
alpha_df_fungi_soil
alpha_df_bact_soil <-
MakeDf(physeq_bact_new_soil, 10000)
alpha_df_bact_soil
CompSamplSoil <- function(dataframe, formula){
require(multcompView)
compare_means(formula, data = dataframe,method = "wilcox.test",
p.adjust.method = "BH") -> test_CC
test_CC <- as.data.frame(test_CC)[,c(2,3,5)] # to change form p to p.adj do 4 to 5
test_CC2 <- data.frame(test_CC[,2], test_CC[,1], test_CC[,3])
colnames(test_CC2) <- c("group1", "group2", "p.adj") # change p to p.adj
rbind(test_CC, test_CC2) -> test_all
as.dist(xtabs(test_all[, 3] ~ (test_all[, 2] + test_all[, 1])), diag = TRUE) -> dist_CC
data.frame(multcompLetters(dist_CC)['Letters']) -> res_CC
res_CC$sample <- rownames(res_CC)
res_CC %>% slice(match(c("Hancock", "Lake City","Rhineland","Lux Arbor"), sample)) -> res_CC
return(res_CC)
}
CompSamplSoil(alpha_df_fungi_soil, formula(richness ~ Soil))
CompSamplSoil(alpha_df_bact_soil, formula(richness ~ Soil))
PlotAlphaDivSoil <- function(df, Var){
plot_div <-
ggplot(df, aes(x=Soil, y=get(Var))) +
geom_boxplot(outlier.shape = 1, outlier.size = 1, outlier.stroke = 1,
position = position_dodge(preserve = "single"), alpha=0.6, lwd = 0.5) +
stat_summary(fun=mean, geom="point", shape=18, size=1.9, color="red", fill="red") +
ylim(0, NA) +
theme_classic() +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.x = element_text(angle =45, size = 8, hjust = 1, vjust = 1),
axis.text.y = element_text(angle = 0, size = 7, hjust = 0.5, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8)) +
labs(x=NULL)
return(plot_div)
}
PlotAlphaDivSoil(alpha_df_fungi_soil, "richness") +
stat_summary(geom = 'text',
label = CompSamplSoil(alpha_df_fungi_soil, formula(richness ~ Soil))$Letters,
fun = max, aes(y = 1000), size = 3.5, color = "black") +
labs(title = "Fungi Richness Soil")
ggarrange(
PlotAlphaDivSoil(alpha_df_fungi_soil, "richness") +
stat_summary(geom = 'text',
label = CompSamplSoil(alpha_df_fungi_soil, formula(richness ~ Soil))$Letters,
fun = max, aes(y = 1000), size = 3.5, color = "black") +
labs(title = "Fungi soil", y="OTU richness"),
PlotAlphaDivSoil(alpha_df_fungi, "richness") +
stat_summary(geom = 'text',
label = CompSamplSoil(alpha_df_fungi, formula(richness ~ Soil))$Letters,
fun = max, aes(y = 1000), size = 3.5, color = "black") +
labs(title = "Fungi roots",y="OTU richness"),
PlotAlphaDivSoil(alpha_df_bact_soil, "richness") +
stat_summary(geom = 'text',
label = CompSamplSoil(alpha_df_bact_soil, formula(richness ~ Soil))$Letters,
fun = max, aes(y = 5000), size = 3.5, color = "black") +
labs(title = "Bacteria soil",y="OTU richness"),
PlotAlphaDivSoil(alpha_df_bact, "richness") +
stat_summary(geom = 'text',
label = CompSamplSoil(alpha_df_bact, formula(richness ~ Soil))$Letters,
fun = max, aes(y = 5000), size = 3.5, color = "black") +
labs(title = "Bacteria roots", y="OTU richness"),
ncol = 2,
nrow = 2)
# NEUTRAL MODELS ------------------------------------------------------------------------------------------
library(tyRa)
require(minpack.lm)
require(Hmisc)
require(stats4)
neutral_model_fungi <-
tyRa::fit_sncm(spp = t(otu_table(physeq_fungi_new)), pool=NULL, taxon=data.frame(tax_table(physeq_fungi_new)))
plot_sncm_fit(neutral_model_fungi, fill = NULL, title = "Model Fit") +
theme_classic()
neutral_model_bact <-
tyRa::fit_sncm(spp = t(otu_table(physeq_bact_new)), pool=NULL, taxon=data.frame(tax_table(physeq_bact_new)))
plot_sncm_fit(neutral_model_bact, fill = NULL, title = "Model Fit") +
theme_classic()
PlotOTU <- function(rf_model, taxa, NeutMod, Var, df_name){
require(tibble)
imp_RF <- as.data.frame(rf_model$importance)
imp_RF$features <- rownames(imp_RF)
imp_RF <- arrange(imp_RF, desc(Var))
rownames(imp_RF) <- imp_RF$features
# adding marker taxon info
taxa[rownames(taxa)%in%rownames(imp_RF), ] -> taxa_RF
identical(rownames(taxa_RF), rownames(imp_RF))
order_taxa <- match(rownames(taxa_RF), rownames(imp_RF))
imp_RF <- imp_RF[order_taxa,]
imp_RF$Taxonomy <- taxa_RF$Taxon
imp_RF <- imp_RF[order(imp_RF[,Var], decreasing = TRUE),]
imp_RF <- left_join(tibble::rownames_to_column(imp_RF),
tibble::rownames_to_column(
taxa[rownames(imp_RF), c(9:11, 16)]))
imp_RF$Taxonomy <-
gsub("FOTU_", "", imp_RF$Taxonomy)
imp_RF$Taxonomy <-
gsub("OTU_", "",imp_RF$Taxonomy)
imp_RF$glbrc <-
ifelse(is.na(imp_RF$Isolate), "no", "yes")
# adding neutral model results
df_neutral <-
as.data.frame(NeutMod[2])
df_neutral$rowname <- rownames(df_neutral)
new_df <-
df_neutral %>%
dplyr::select("predictions.fit_class", "rowname")
new_df <-
left_join(imp_RF, new_df, by= "rowname")
new_df %T>% print()
# saving intermediate df to R environemnt, pick the right name
new_df %T>%
print() %T>%
#assign(paste(df_name, Var1, Var2, sep = ""),., envir = .GlobalEnv) %>% # saving the plot
assign(df_name, ., envir = .GlobalEnv)
# plotting
ggplot(data=imp_RF) +
geom_bar(aes(x= reorder(Taxonomy, -get(Var)),
y= get(Var),
color= glbrc, fill=glbrc), stat="identity") +
theme_classic() +
scale_y_continuous(expand = c(0, 0)) +
theme(plot.title = element_text(size = 10, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 10, face = "bold", hjust = 0.5),
axis.title = element_text(angle = 0, size = 10, face = "bold"),
axis.text.y = element_text(angle =0, size = 8, hjust = 0.5, vjust = 1),
axis.text.x = element_text(angle = 90, size = 7 ,hjust = 1, vjust = 0.5),
legend.key.height = unit(0.2, "cm"), legend.key.width = unit(0.3, "cm"),
legend.title = element_text(size = 10, face = "bold"),
legend.text = element_text(size = 8),
plot.margin=unit(c(1.5,1.5,1.5,1.5),"pt"),
legend.position = "none") -> plot_importance
return(plot_importance)
}
PlotOTU(RF_fungi_biom, taxa_fungi_new, neutral_model_fungi, "%IncMSE", "rf_taxa_fungi")
head(rf_taxa_fungi)
table(rf_taxa_fungi$predictions.fit_class)
PlotOTU(RF_bact_biom, taxa_bact_new, neutral_model_bact, "%IncMSE", "rf_taxa_bact")
head(rf_taxa_bact)
table(rf_taxa_bact$predictions.fit_class)
write.csv(rf_taxa_fungi, "rf_taxa_fungi.csv")
write.csv(rf_taxa_bact, "rf_taxa_bact.csv")
# Spearmna corraltions -------------------------------------------------------------------------------------
library(corrplot)
head(df_bact_RF_2)
head(df_fungi_RF_2)
soil_data_fungi_norm <-
data.frame(
apply(df_fungi_RF_2[, -c(14,15,16)], 2, function(x) scale(x, center = TRUE, scale = TRUE))
)
soil_data_fungi_norm
cor_fungi_soil <- cor(soil_data_fungi_norm, method = "spearman")
corrplot(cor_fungi_soil,
method = 'number',
type = 'lower',
title="Spearman correlations Fungi",
diag = FALSE,
mar=c(0,0,1,0)) # colorful number
soil_data_bact_norm <-
data.frame(
apply(df_bact_RF_2[, -c(6,15,16,17)], 2, function(x) scale(x, center = TRUE, scale = TRUE))
)
soil_data_bact_norm
cor_bact_soil <- cor(soil_data_bact_norm, method = "spearman")
corrplot(cor_bact_soil,
method = 'number',
type = 'upper',
title="Spearman correlations Bacteria",
diag = FALSE,
mar=c(0,0,1,0)) # colorful number
soil_data <- meta_fungi_merged_filt[, 37:43]
soil_data$Biomass <- meta_fungi_filt$aerial_part_dry_weight_16wks_grams
soil_data
soil_data_norm <-
data.frame(
apply(soil_data, 2, function(x) scale(x, center = TRUE, scale = TRUE))
)
soil_data_norm
hist(soil_data_norm$Biomass)
hist(soil_data_norm$P)
hist(soil_data_norm$NO3)
cor_soil <- cor(soil_data_norm, method = "spearman")
corrplot(cor_soil,
method = 'number',
type = 'upper',
title="Spearman correlations",
diag = FALSE,
mar=c(0,0,1,0)) # colorful number
# OTU that correlate to plant biomass ----------------------------------------------------------------------
library(corrplot)
features_fungi <- c("FOTU_227","FOTU_57","FOTU_772", "FOTU_12")
features_bact <- c("BOTU_700", "BOTU_291", "BOTU_64", "BOTU_727")
physeq_fungi_new
# function to remove bad taxa
removeSamples = function(physeq, goodSamples) {
allSamples = sample_names(physeq)
mySamples <- allSamples[(allSamples %in% goodSamples)]
return(prune_samples(mySamples, physeq))
}
physeq_fungi_filt <-
removeSamples(physeq_fungi_new, meta_fungi_merged_filt$rowname)
physeq_fungi_filt
rf_features_fungi <-
subset_taxa(physeq_fungi_filt, OTU_ID%in%features_fungi)
rf_features_fungi@tax_table
rf_features_fungi@otu_table
df_otu_cor <-
as.data.frame(t(as(rf_features_fungi@otu_table, "matrix")))
identical(rownames(df_otu_cor),rownames(rf_features_fungi@sam_data))
df_otu_cor$Biomass <- rf_features_fungi@sam_data$aerial_part_dry_weight_16wks_grams
df_otu_cor
df_otu_cor_norm <-
data.frame(
apply(df_otu_cor, 2, function(x) scale(x, center = TRUE, scale = TRUE))
)
df_otu_cor_norm
cor_otu_cor_norm <- cor(df_otu_cor_norm, method = "spearman")
cor_otu_cor_norm
corrplot(cor_otu_cor_norm,
method = 'number',
type = 'upper',
title="Spearman correlations",
diag = FALSE,
mar=c(0,0,1,0)) # colorful number
# SEQUENCING RESULTS ----------------------------------------------------------------------------------------
physeq_fungi_new %>%
subset_samples(Ecotype%in%c("Lowland", "Upland"))
mean(sample_sums(physeq_fungi_filt))
sd(sample_sums(physeq_fungi_filt))
min(sample_sums(physeq_fungi_filt))
max(sample_sums(physeq_fungi_filt))
mean(sample_sums(physeq_fungi_new_soil))
sd(sample_sums(physeq_fungi_new_soil))
min(sample_sums(physeq_fungi_new_soil))
max(sample_sums(physeq_fungi_new_soil))
mean(sample_sums(physeq_bact_new_soil))
sd(sample_sums(physeq_bact_new_soil))
mean(sample_sums(
physeq_bact_new %>%
subset_samples(Ecotype%in%c("Lowland", "Upland"))
))
sd(sample_sums(
physeq_bact_new %>%
subset_samples(Ecotype%in%c("Lowland", "Upland"))
))
# subsetting data for Ming-Yi -------------------------------------------------------------------------------
rownames(as.data.frame(RF_fungi_biom$importance))
MSE_features_fungi <-
subset_taxa(physeq_fungi_new, OTU_ID%in%rownames(as.data.frame(RF_fungi_biom$importance)))
MSE_features_fungi
MSE_features_fungi@tax_table
rownames(as.data.frame(RF_bact_biom$importance))
MSE_features_bact <-
subset_taxa(physeq_bact_new, OTU_ID%in%rownames(as.data.frame(RF_bact_biom$importance)))
MSE_features_bact
MSE_features_bact@tax_table
MSE_features_fungi@sam_data$Ecotype
MSE_features_fungi <-
subset_samples(MSE_features_fungi, Ecotype%in%c("Upland", "Lowland"))
otu_table(MSE_features_fungi) <-
otu_table(MSE_features_fungi)[which(rowSums(otu_table(MSE_features_fungi)) > 0),]
MSE_features_bact@sam_data$Ecotype
MSE_features_bact <-
subset_samples(MSE_features_bact, Ecotype%in%c("Upland", "Lowland"))
otu_table(MSE_features_bact) <-
otu_table(MSE_features_bact)[which(rowSums(otu_table(MSE_features_bact)) > 0),]
# Save an object to a file
saveRDS(MSE_features_fungi, file = "MSE_features_fungi.rds")
saveRDS(MSE_features_bact, file = "MSE_features_bact.rds")
# Restore the object
readRDS(file = "MSE_features_fungi.rds")
readRDS(file = "MSE_features_bact.rds")
# LINEAR MODELS --------------------------------------------------------------------------------------------
## read data ##
fun <- readRDS("MSE_features_fungi.rds")
bac <- readRDS("MSE_features_bact.rds")
#Fungi
fun.fac <- t(as.data.frame(fun@otu_table))
fun.bio <- as.data.frame(fun@sam_data)
df<-as.data.frame(cbind(fun.fac, fun.bio$aerial_part_dry_weight_16wks_grams))
colnames(df)[55]<- "biomass"
full.model <- lm(biomass ~ . , data = df)
summary(full.model)
step.model <- stepAIC(full.model, direction = "backward", trace = FALSE)
summary(step.model)
#Bacteria
bac.fac <- t(as.data.frame(bac@otu_table))
bac.bio <- as.data.frame(bac@sam_data)
df.bac<-as.data.frame(cbind(bac.fac, bac.bio$aerial_part_dry_weight_16wks_grams))
colnames(df.bac)[53]<- "biomass"
full.model.bac <- lm(biomass ~ . , data = df.bac)
summary(full.model.bac)
step.model.bac <- stepAIC(full.model.bac, direction = "backward", trace = FALSE)
summary(step.model.bac)
#### Mantel Test ####
### Correlation between microbial composition and plant overall phenotype ###
# read file for correlation #
#Mantel 16S
library(fastDummies)
otus.bray.s # 16S microbe matrices
otus.bray.s.f # ITS microbe matrices
d.16s.pheno <- m.summer[,(44:52)] # 16S plant phenotype data
d.ITS.pheno <- m.summer.f[,(44:52)] # ITS plant phenotype data
d.16s.pheno.1 <- d.16s.pheno[,-(4:5)]
d.ITS.pheno.1 <- d.ITS.pheno[,-(4:5)]
d.16s.pheno.1 <- dummy_columns(d.16s.pheno.1)
d.ITS.pheno.1 <- dummy_columns(d.ITS.pheno.1)
d.16s.pheno.2 <- d.16s.pheno.1[,-(4:5)]
d.ITS.pheno.2 <- d.ITS.pheno.1[,-(4:5)]
m.16s.pheno <- vegdist(d.16s.pheno.2,"bray") # 16S plant phenotype matric
m.ITS.pheno <- vegdist(d.ITS.pheno.2,"bray") # ITS plant phenotype matric
set.seed(15110)
Mantel.plant.16S <- mantel(otus.bray.s, m.16s.pheno, method="spear", permutations=999, strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores"))
Mantel.plant.ITS <- mantel(otus.bray.s.f, m.ITS.pheno , method="spear", permutations=999, strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores"))
Mantel.Plant.chem<- mantel(m.16s.pheno,m.16s.chem , method="spear", permutations=999, strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores"))
# Mantel ITS
otus.fac_filt <- subset(otus.fac, !Pot%in%c("326", "93", "284", "117", "256", "117", "128","340"))
otus.summer <- subset(otus.fac_filt, Genotype != "Control") # run if extra filtering
o.summer <-otus.summer[,(1:12072)]
m.summer <-otus.summer[,(12073:12135)]
otus.bray.s <- vegdist(o.summer) # 16S microbe matrices without 340
otus.fac.f_filt <- subset(otus.fac.f, !Pot%in%c("184", "109", "89", "87", "303", "198","340"))
otus.summer.f <- subset(otus.fac.f_filt, Genotype != "Control") # run if extra filtering
o.summer.f <-otus.summer.f[,(1:5837)]
m.summer.f <-otus.summer.f[,(5838:5901)]
otus.bray.s.f <- vegdist(o.summer.f) # ITS microbe matrices without 340
d.16s.chem <- m.summer[,(55:61)] # 16S soil chem data
d.ITS.chem <- m.summer.f[,(55:61)] # ITS soil chem data
m.16s.chem <- vegdist(d.16s.chem,"bray") # 16S soil chem matric
m.ITS.chem <- vegdist(d.ITS.chem,"bray") # ITS soil chem matric
set.seed(15110)
Mantel.chem.16S <- mantel(otus.bray.s, m.16s.chem, method="spear", permutations=999, strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores"))
Mantel.chem.ITS <- mantel(otus.bray.s.f, m.ITS.chem , method="spear", permutations=999, strata = NULL, na.rm = FALSE, parallel = getOption("mc.cores"))
|
3d55e51eff94a96714e52b15686c716aa2242162 | 16206bf1a9ef3e0591b9aa5b25ec04cbbde2ce29 | /tcga/mutations.r | 20ef89d4b6620f6af5ef366b09c746cbe54bcb98 | [] | no_license | ipstone/data | 73227781f3f65dfeee7aa2c552c6488fef742859 | 0f7606d5790a340d93a6d04eb2b929340603e782 | refs/heads/master | 2022-06-10T04:02:05.207453 | 2022-01-22T18:54:36 | 2022-01-22T18:54:38 | 270,737,979 | 0 | 0 | null | 2020-06-08T16:18:17 | 2020-06-08T16:18:16 | null | UTF-8 | R | false | false | 617 | r | mutations.r | `%>%` = magrittr::`%>%`
.map_id = import('./map_id')$map_id
#' Get a data.frame listing all mutations and types
#'
#' @param id_type Where to cut the barcode, either "patient", "specimen", or "full"
#' @return A data.frame with data for all the simple mutations
mutations = function(cohort, id_type="specimen", ...) {
fdir = module_file("TCGAbiolinks-downloader", "snv_mutect2")
fname = file.path(fdir, sprintf("TCGA-%s.rds", cohort))
tibble::as_tibble(readRDS(fname)) %>%
dplyr::mutate(Sample = .map_id(Tumor_Sample_Barcode, id_type=id_type)) %>%
select(Sample, everything())
}
|
a9fb833bf990ea1d46ef43853fe5015698374383 | 790bd50934037b9d5c44373151f9a442774a818b | /example02.R | 77fb6b269da0e0fb59df15fce323b3317e545620 | [] | no_license | jamespaul007/iotoolsExamples | a2bfa3df6e21a3f017d7fedd4a4054d10c774f3a | 15c26841dc42573b5a96ebbc821f59191c4704cb | refs/heads/master | 2021-01-15T22:56:49.653817 | 2014-07-16T22:48:33 | 2014-07-16T22:48:33 | 22,072,342 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,168 | r | example02.R | #!/usr/bin/env Rscript
#
# Name: example02.R
# Date: 2014-07-16
# Author: Taylor Arnold <taylor@research.att.com>
# Purpose: Show a more involved example using the hmr function
# in the iotools package to merge two datasets on a common key.
# Assumes that create_data.R as been run to create the data, and
# example01.R has been run to push the data to hdfs.
# Load in the iotools package and configure the environment
# variables and set conveniance function hfs; refer to the
# comments in example01.R for help with this step.
library("iotools")
if(Sys.getenv("HADOOP_PREFIX") == "") {
Sys.setenv(HADOOP_PREFIX="/usr/lib/hadoop")
}
if(Sys.getenv("HADOOP_STREAMING_JAR") != "") {
Sys.setenv(HADOOP_STREAMING_JAR="your_location_here")
}
hfs = paste0(Sys.getenv("HADOOP_PREFIX"), "/bin/hadoop fs")
# In this example, we have two (simulated) datasets. The first,
# input02_hh_income_2013, is a two column csv file giving a
# household alphanumeric key, followed by the household's 2013
# income. The second file, input02_hh_income_2014, is a three
# column matrix in csv format giving a household alphanumeric
# key, followed by the household's 2014 income, and a two digit
# code giving the household's state abbreviation. Approximately
# 50% of the keys in one file matches the keys in the other file;
# also, many of the state codes are missing in the 2014 file.
# Our goal is to do a left join of the 2014 data on the 2013 data,
# joining on the alphanumeric household key. We also want to convert
# the state abbreviations to the full state names.
# Consider the additional difficulties in this example over the first:
#
# (1) Must the specify that the data is seperated by commas
# (2) Need matching keys to go to the same reducer, but can have multiple
# reducers for the job
# (3) Need to input multiple files
# (4) The different files have a different number of columns
# (5) Need to pass information mapping state abbreviations to state names
# (6) We need to be able to identify if a record came from the 2013 dataset
# or the 2014 data. Since the state abbreviations are missing sometimes,
# we cannot rely on this.
# Because the input files have similar names, it is possible to work
# around issue (3) by specifying the hinput as "data/input0[23]_hh_income_201[34].csv"
# This works well is some situations, but we will give a solution that does
# not depend on inputs with similar filenames. As a state names mapping is present
# in the base environment of R, we can also work around issue (5), but will again
# present a more generic solution.
# So, for generic solutions to these issues we will do the following:
#
# (1) Specify the map formatter as function(m) {mstrsplit(m, ",")}
# (2) Set the row names of the output appropriately
# (3) Declare an additional input file using the "hadoop.opt" option to hmr
# (4) This is not actually a problem as an input to the mappers, but will need
# to address this in the output of the mappers by making sure all of the
# output from the mappers have the same number of columns
# (5) Will send additional data to the mappers and reducers using the "aux"
# option to hmr
# (6) We will add an additional column to the output of the mappers giving an
# id the data records to indicate which input the record came from.
# The reason that (4) is not a problem for the mappers is that a single mapper
# will only ever get inputs from a single input file.
# Now, we show the code to implement these changes:
# We need to remove the output directory, if it exists:
system(paste0(hfs, " -rm -r iotools_examples/output02"))
# We also construct the map between state abbreviation and name:
state_map = cbind(state.abb, state.name)
# Run the streaming job (note, there will be a lot of output to the console
# from this call).
r = hmr(input = hinput("iotools_examples/input/input02_hh_income_2013.csv"),
output = hpath("iotools_examples/output02"),
formatter = list(map = function(m) return(mstrsplit(m, ",")), # for csv file
reduce = function(m) mstrsplit(m, "|", "\t")),
wait = TRUE,
aux = list(state_map = state_map),
reducers = 10, # probably overkill, but helps to illustrate the example
map = function(m) {
if(ncol(m) == 2) { # Test if this mapper has 2013 data
output = cbind("2013", m[,2], "") # note: extra column for missing state
rownames(output) = m[,1]
}
if(ncol(m) == 3) { # Test if this mapper has 2014 data
output = cbind("2014", m[,2:3])
rownames(output) = m[,1]
# Convert state abbreviations to state names
index = match(output[,3], state_map[,1])
output[!is.na(index),3] = state_map[index[!is.na(index)],2]
}
return(output)
},
reduce = function(m) {
mat_2013 = m[m[,1] == "2013",-1]
mat_2014 = m[m[,1] == "2014",-1]
# Left join mat_2014 on mat_2013
index = match(rownames(mat_2014), rownames(mat_2013))
mat_2014 = cbind(mat_2014, "") # make an empty column for 2013 data
if(any(!is.na(index))) {
mat_2014[!is.na(index),3] = mat_2013[index[!is.na(index)],1]
}
# Reformat the output data
output = cbind(rownames(mat_2014), mat_2014[,c(3,1,2)])
rownames(output) = NULL
return(output) # format: key|income_2013|income_2014|state_name
},
hadoop.opt="-input iotools_examples/input/input03_hh_income_2014.csv"
)
# You can see how hadoop distributed the job by seeing the file sizes of the
# ten reducers:
system(paste0(hfs, " -du iotools_examples/output02"))
# And we can view a few rows of the join by calling hadoop fs -cat (also
# can use -tail on a particular file, but this requires making sure you
# know which reducers actually received input data, which may not be all
# of them in this small example)
system(paste0(hfs, " -cat iotools_examples/output02/part-* | head -n 30"))
|
87550a12810f8754944989296cbf20beb1de5671 | c1140c29282b6135c1c828196d7245972c018de7 | /TwoLassoCpp/R/GriffingsBrown.R | 844531c3a48791ce41670035ee199b151779fbb9 | [] | no_license | lenarcica/SimulationStackForBayesSpike | 467835d2cac63099c357ceb3f29b0e641d806965 | 3a54f517a835a094b60f9fba20efa7b760949e3f | refs/heads/master | 2020-04-15T19:01:25.806506 | 2019-01-09T18:36:48 | 2019-01-09T18:36:48 | 164,934,682 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,773 | r | GriffingsBrown.R | #############################################################################
## 2009-2011 Alan Lenarcic
##
## Implementation of this alternate estimator as requested by a suggestion
## from outside request, did not quite achieve what it was hoped to.
## Using Coordinate descent against an integrated penalty proved to require
## a bit more intuition than the Bayesian EM version used in other penalties.
## It is believed that a continuous implementation of a psy function
## would make the work on this estimator easier
##
## In any case, this estimator had multiple free parameters, and such
## the method to conduct cross validation did not have success.
## Cross validation likely requires more experience from using east estimator
## to better search relevant space for anestimator.
##
##
SaveDataOnce <- function(OnLambda, OnGammaSq) {
MyDirs = unlist(searchpaths());
if (any( substr(MyDirs, nchar(MyDirs) - nchar("TwoLasso")+1, nchar(MyDirs))
== "TwoLasso")) {
PathOfPackage = MyDirs[
substr(MyDirs, nchar(MyDirs) - nchar("TwoLasso")+1, nchar(MyDirs))
== "TwoLasso" ];
}
WholePathData = paste(PathOfPackage, "//data", sep="");
dir.create(WholePathData, showWarnings=FALSE);
NameFile = NameNEGRData(OnLambda, OnGammaSq);
ListFiles = list.files(WholePathData);
if (any(ListFiles == NameFile)) {
load( paste(WholePathData, "//", NameFile, sep=""), .GlobalEnv);
if (!exists("DensBeta") || is.null(DensBeta)) {
DensBeta = SIntegral /
( .5 * sum( SIntegral[2:(length(SIntegral)-1)] *
(Beta[3:(length(SIntegral))] - Beta[1:(length(SIntegral)-2)] ) ) +
SIntegral[1] *(Beta[2] - Beta[1]) +
SIntegral[length(SIntegral)] * (Beta[length(SIntegral)] -
Beta[length(SIntegral)-1])
)
}
return(list(Beta=Beta, QuadraticPen = QuadraticPen,
SIntegral=SIntegral, SInvSum = SInvSum,
SSecondInvSum = SSecondInvSum, DensBeta = DensBeta));
} else {
AllBetaValues = .5 * exp((-400:300)/50);
LessOneBetaValues = AllBetaValues[AllBetaValues <= 1];
MoreOneBetaValues = AllBetaValues[AllBetaValues > 1];
LogMaxBetaSq = log( max(AllBetaValues)^2 ) / log(10);
NPsi = 49000;
PsiValues = c((1:9)/10000,(1:999)/1000, 10^( 5 * LogMaxBetaSq * (0:NPsi) / NPsi) );
logOutInvPsiValues = -log(min(PsiValues)) + (0:50)/10
On2 = MakeQuadraticPen(OnLambda, OnGammaSq, VerboseInt = 25,
Beta = MoreOneBetaValues, PsiValues = PsiValues,
logOutInvPsiValues = logOutInvPsiValues);
NPsi = 50000;
PsiValues = c((1:9)/10000,(1:999)/1000, 1+1000 * (1:NPsi) / (NPsi) );
DoubleFactor = 2*abs( min(log(AllBetaValues)));
logOutInvPsiValues = -log( min(PsiValues) ) +
( DoubleFactor + 4 + log( min(PsiValues) )) *(0:NPsi) / NPsi;
On1 = MakeQuadraticPen(OnLambda, OnGammaSq, VerboseInt = 25,
Beta = LessOneBetaValues, PsiValues = PsiValues,
logOutInvPsiValues = logOutInvPsiValues );
Beta = c(On1$Beta, On2$Beta);
QuadraticPen = c(On1$QuadraticPen, On2$QuadraticPen);
SInvSum = c(On1$SInvSum, On2$SInvSum);
SSecondInvSum = c(On1$SSecondInvSum, On2$SSecondInvSum);
SIntegral = c(On1$SIntegral, On2$SIntegral);
DensBeta = SIntegral /
( .5 * sum( SIntegral[2:(length(SIntegral)-1)] *
(Beta[3:(length(SIntegral))] - Beta[1:(length(SIntegral)-2)] ) ) +
SIntegral[1] *(Beta[2] - Beta[1]) +
SIntegral[length(SIntegral)] * (Beta[length(SIntegral)] -
Beta[length(SIntegral)-1])
)
save(Beta=Beta, QuadraticPen=QuadraticPen,
SIntegral=SIntegral, SInvSum = SInvSum,
SSecondInvSum = SSecondInvSum,
file=paste(WholePathData, "//", NameFile, sep="") )
return(list(Beta=Beta, QuadraticPen = QuadraticPen,
SIntegral=SIntegral, SInvSum = SInvSum,
SSecondInvSum = SSecondInvSum));
}
}
NameNEGRData <- function(OnLambda, OnGammaSq) {
paste("OnL", tSeq(OnLambda), "OnG", tSeq(OnGammaSq), ".Rdata", sep="")
}
MakeQuadraticPen <- function(OnLambda, OnGammaSq, VerboseInt = 100,
Beta = .1 * exp((-450:450)/40), PsiValues = (1:100000) / 100,
logOutInvPsiValues = log(100) + (0:10000)/10) {
QuadraticPen = Beta * 0;
SIntegral = Beta * 0; SSecondInvSum = Beta*0; SInvSum = Beta*0;
.Call("NEGLassoMakeTableShell", QuadraticPen, PsiValues, logOutInvPsiValues,
Beta, OnLambda, OnGammaSq, SIntegral, SInvSum,
SSecondInvSum, VerboseInt)
return(list(QuadraticPen = QuadraticPen, Beta = Beta,
SIntegral = SIntegral, SInvSum = SInvSum, SSecondInvSum = SSecondInvSum,
OnLambda = OnLambda, OnGammaSq = OnGammaSq,
PsiValues=PsiValues, logOutInvPsiValues=logOutInvPsiValues));
}
# On = MakeInvPsiOfBeta(OnLambda = .5, OnGammaSq = 1, VerboseInt= 100);
# plot( On$InvPsiValues~ On$Beta, type="l");
# plot( log(On$InvPsiValues) ~ log(On$Beta), type="l");
SuperNEGLasso <- function(X,Y, OnLambda = .5, OnGammaSq = 1,
OnBeta = -1, NRandomStarts = 5,
OnGammas = -999, InitKKs = -5,
NumCDOConv = -999, CDOEpsilon = -999,
NumEMSteps = 10, EMCauchy=.00001,
TotalLoops = 100, MaxEpsilon=.00001,
StartBeta = NULL, Verbose = 0) {
SVDX = NULL;
try(SVDX <- svd(X));
if (is.null(SVDX)) {
print("SuperNEG: Cannot work with NULL svd of X"); flush.console();
return(NULL);
}
PenaltyInfo = SaveDataOnce(OnLambda, OnGammaSq);
DensBeta = PenaltyInfo$DensBeta;
if (Verbose > 0) {
print("SuperNEGLasso: Starting "); flush.console();
}
A = SVDX$v;
library(corpcor);
MLLSBeta <- NULL;
try(MLLSBeta <- pseudoinverse( t(X) %*% X ) %*% t(X) %*% Y);
if (Verbose > 0) {
print("SuperNEGLasso: Got MLLSBeta! "); flush.console();
}
if (is.null(MLLSBeta)) {
try( MLLSBeta <- (FitNEGLasso(X=X, Y=Y, OnLambda=OnLambda,
OnGammaSq= OnGammaSq, OnBeta = -1, OnGammas= OnGammas,
InitKKs = InitKKs, NumCDOConv = NumCDOConv, CDOEpsilon = CDOEpsilon,
NumEMSteps=NumEMSteps, EMCauchy = EMCauchy,
TotalLoops = TotalLoops, MaxEpsilon = MaxEpsilon,
StartBeta = StartBeta, Verbose=Verbose-1))$ReturnBetas );
if (is.null(MLLSBeta)) {
print("SuperNEG: MLSSbeta is Still Null \n");
flush.console();return(NULL);
}
}
MyFits = list();
Scores = rep(0, NRandomStarts);
ReturnBetas = matrix(0, NRandomStarts, length(X[1,]) );
for (ii in 1:NRandomStarts) {
Z = rnorm( length(A[,1]) );
TryBeta = MLLSBeta + Z - A %*% t(A) %*% Z;
RT1 = NULL;
if (Verbose > 0) {
print(paste("SuperNEGLasso: Fitting ",ii, sep="")); flush.console();
}
try( RT1 <- FitNEGLasso(X=X, Y=Y, OnLambda=OnLambda, OnGammaSq=OnGammaSq,
StartBeta = TryBeta, OnGammas=OnGammas, InitKKs=InitKKs,
NumCDOConv=NumCDOConv, CDOEpsilon=CDOEpsilon,
NumEMSteps, EMCauchy=EMCauchy, TotalLoops=TotalLoops, MaxEpsilon=MaxEpsilon,
Verbose = Verbose -1) );
if (is.null(RT1) || length(RT1) == 1) {
print("SuperNEGLasso: Failed to get a fit at this one");
} else {
if (Verbose > 0) {
print(paste("SuperNEGLasso: Succesful fit for ii = ", ii, sep=""));
flush.console();
}
MapBackReturnBetas = .MapBackBeta(PenaltyInfo$Beta, abs(RT1$ReturnBetas))
Score = .5 * sum( (Y - X %*% RT1$ReturnBetas)^2 ) -
sum(log(DensBeta[MapBackReturnBetas]));
RT1$Score = Score;
ReturnBetas[ii,] = RT1$ReturnBetas;
Scores[ii] = Score;
if (Verbose > 0) {
print(paste("SuperNEGLasso: Fit was ", Score, sep=""));
print(paste(" CurrentScores: ", paste(Scores, collapse=", "), sep=""));
flush.console();
}
}
MyFits[[ii]] = RT1;
}
if (Verbose > 0) {
print("SuperNEGLasso: AllDone "); flush.console();
}
return(list(ReturnBetas = ReturnBetas, Scores=Scores, AllFits=MyFits));
}
FitNEGLasso <-function(X, Y, OnLambda = .5, OnGammaSq = 1,
OnGammas = -999, InitKKs = -5,
NumCDOConv = -999, CDOEpsilon = -999,
NumEMSteps = 10, EMCauchy=.00001,
TotalLoops = 100, MaxEpsilon=.00001,
StartBeta = NULL, Verbose = 0) {
if (length(X) == 1) {
print("FitNEGLasso: X is not good"); return;
}
if (length(Y) == 1) {print("FitNEGLasso: Y is not Good")}
PenaltyInfo = SaveDataOnce(OnLambda, OnGammaSq);
StartPenii = .MapBackBeta(PenaltyInfo$Beta, 1);
StartPenalty = PenaltyInfo$QuadraticPen[StartPenii] * .5;
p = length(X[1,]);
if (Verbose > 0) {
print("NEGLasso: Starting to run first CDO");
flush.console();
}
if (Verbose > 1) {PrintFlag = Verbose -1;} else {PrintFlag = -1;}
## We find LaPaplace distribution with variance matching Psi
## Since 2/Lambda = 1/ Gamma = Psi, this is LaPlace with 1/Psi
FirstCDO = NULL;
if (is.null(StartBeta) || length(StartBeta) != p ) {
FirstCDO = CoordinateDescent(xx = X, yy = Y,
OnBeta = rep(0, length(X[1,])),
OnGammas = rep(StartPenalty, p),
InitKKs=InitKKs, NumCDOConv=NumCDOConv, CDOEpsilon=CDOEpsilon,
TotalLoops=TotalLoops, MaxEpsilon=MaxEpsilon, Verbose = PrintFlag);
OnBeta = FirstCDO$ReturnBetas;
} else {
OnBeta = StartBeta;
}
LinBeta = PenaltyInfo$QuadraticPen[
.MapBackBeta(PenaltyInfo$Beta,abs(OnBeta)) ]
for (jj in 1:NumEMSteps) {
NewCDO = CoordinateDescent(xx = X, yy = Y,
OnBeta = OnBeta,
OnGammas = LinBeta,
InitKKs=InitKKs, NumCDOConv=NumCDOConv, CDOEpsilon=CDOEpsilon,
TotalLoops=TotalLoops, MaxEpsilon=MaxEpsilon, Verbose = PrintFlag);
NewBeta = NewCDO$ReturnBetas;
if (sum(abs(NewBeta-OnBeta)) <= EMCauchy) {
OnBeta = NewBeta;
break;
}
OnBeta = NewBeta;
LinBeta = PenaltyInfo$QuadraticPen[
.MapBackBeta(PenaltyInfo$Beta,abs(OnBeta)) ];
}
LinBeta = PenaltyInfo$QuadraticPen[
.MapBackBeta(PenaltyInfo$Beta,abs(OnBeta)) ];
RetList = list( ReturnBetas = OnBeta, LinearPenalty = LinBeta,
CDOOut = NewCDO, FirstCDO = FirstCDO);
return(RetList);
}
##CoordinateDescent(xx = -1, yy = -1, XTX = -1, XTY = -1, NLen = -1,
## TotalLoops = -999, MaxEpsilon = -999, OnBeta = -1, OnGamma = -1, OnLambda = -1,
## RecordBetasFlag = FALSE, OnGammas = -999, InitKKs = -5,
## NumCDOConv = -999, CDOEpsilon = -999, WLSWeights = -1)
.MapBackBeta <- function(BetaList, FindBeta) {
LinBeta = log(BetaList);
logFindBeta = log(FindBeta);
##LinBeta = LinBeta[1] + (0:( length(LinBeta)-1)) * (
## LinBeta[length(LinBeta)] -LinBeta[1] ) / (length(LinBeta)-1);
iiFind = round( (logFindBeta - LinBeta[1] ) * (length(LinBeta)-1) /
( LinBeta[length(LinBeta)] -LinBeta[1] ) + 1 );
iiFind[iiFind < 1] = 1;
iiFind[iiFind > length(LinBeta)] = length(LinBeta);
return(iiFind);
}
|
ac71e5f3d2979cda17993abeea01523a35789c6b | a5bdae6a6d80f6cfb4fbf4a22869cc59fe1bfaa9 | /portfolio_optim.R | f4190263ff044848a6c8e81bbea49e02a1ef51ab | [] | no_license | K-Schubert/R_Programming_For_Data_Science | c223a7a1c7320718b580c594b57c356c8a94669f | c8003dc7b6033d3c125ab78b101a3e65202d391f | refs/heads/master | 2020-06-13T10:28:08.075762 | 2019-07-05T09:26:57 | 2019-07-05T09:26:57 | 194,627,385 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,443 | r | portfolio_optim.R | install.packages('quantmod')
library(quantmod)
library(magrittr)
library(tidyr)
library(dplyr)
library(ggplot2)
symbols <- c("AAPL", "GOOG")
getSymbols(symbols, from = "2018-01-01", to = Sys.Date(), src = "yahoo", adjust = TRUE)
AAPL.ret <- dailyReturn(na.omit(AAPL$AAPL.Close))
GOOG.ret <- dailyReturn(na.omit(GOOG$GOOG.Close))
rets <- cbind(AAPL.ret$daily.returns, GOOG.ret$daily.returns)
mean.ret <- c(mean(AAPL.ret), mean(GOOG.ret))
cov.matrix <- cov(rets)
omega.star <- (cov.matrix[2,2] - cov.matrix[2,1])/(cov.matrix[1,1] + cov.matrix[2,2]
- 2*cov.matrix[2,1])
omega.star
# Invest 41% in AAPL and 59% in GOOG
mean_investment <- omega.star*mean.ret[1] + (1 - omega.star)*mean.ret[2]
var_investment <- omega.star^2*cov.matrix[1,1] + (1 - omega.star)^2*cov.matrix[2,2] +
2*omega.star*(1 - omega.star)*cov.matrix[1,2]
investment_summary <- matrix(NA, 2, 3)
dimnames(investment_summary)[[1]] <- c("Expected value", "Variance")
dimnames(investment_summary)[[2]] <- c("Apple", "Google", "Investment")
investment_summary[1, ] <- c(mean.ret, mean_investment)
investment_summary[2, ] <- c(diag(cov.matrix), var_investment)
knitr::kable(investment_summary)
plot(sqrt(investment_summary[2,]), investment_summary[1,], col=c(1,2,3), pch=16,
xlab='St Dev of Returns', ylab='Mean Return')
legend('topleft', c('AAPL', 'GOOG', 'Investment'), cex=0.75, pch=16, col=c(1,2,3))
grid()
|
8830315ef934997c949ef331433295ee64d99d3b | 285274eae7ac53e41fb660d5b5d147dc15042362 | /tests/testthat/test_read.bed.R | c31dca101c48800410dd9e13f011c17493b01e5d | [] | no_license | cran/chicane | ebea6d449d21ccfa9a74aa8a4c86a5ad49e77a45 | a6c7fced08336275e6c339192518e05af0954c7a | refs/heads/master | 2023-03-20T07:55:49.276385 | 2021-11-06T14:00:16 | 2021-11-06T14:00:16 | 146,753,580 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 972 | r | test_read.bed.R | context('read.bed');
baits <- system.file('extdata', '2q35.bed', package = 'chicane');
test_that('Throws error on bad input', {
# file doesn't exist
expect_error( read.bed( tempfile() ) );
# not a single character string
expect_error( read.bed( as.factor(baits) ) );
expect_error( read.bed( c(baits, baits) ) );
});
test_that('Zero-based works as expected', {
zero.based.ids <- read.bed(baits);
one.based.ids <- read.bed(baits, zero.based = FALSE);
zero.based.starts <- as.numeric(
gsub('(.*):(.*)-(.*)', '\\2', zero.based.ids)
);
one.based.starts <- as.numeric(
gsub('(.*):(.*)-(.*)', '\\2', one.based.ids)
);
expect_equal(zero.based.starts + 1, one.based.starts);
});
test_that('Can read gzipped files', {
fragments <- system.file('extdata', 'GRCh38_HindIII_chr2.bed.gz', package = 'chicane');
expect_silent( read.bed(fragments) );
});
|
4a115cf838a974853d1b05bbbcc079338ed31f22 | 71b54ce8c696f37bc7674bdc3ff4c57f8b400342 | /components/Pflege.R | f48d61cbe6455ea4f1855da06b2ce6a42cb8353a | [] | no_license | wolass/dermiantor4 | 634b33b530a80837ae29b6c7a2e0e051083e353c | 7c217f1f086b996735ce8b09f2a037ec6463320b | refs/heads/master | 2023-08-28T13:15:08.751895 | 2021-10-08T16:28:26 | 2021-10-08T16:28:26 | 413,715,889 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 161 | r | Pflege.R | Pflege <- c("Excipial U10 Lipolotio",
"Cetaphil® Feuchtigkeitscreme",
"5% Polidocanol in Mischsalbe und Basiscreme DAC",
"") |
829d387aded989e4ab3dcbaefd1df834d6af51f7 | d1dcf5b0b158603b82dae6b9eaef1a67395e8442 | /kribb-r-script4.R | f51b2542a7730221fededa4228b6b7b94b433def | [] | no_license | greendaygh/KRIBBR2020 | c3c84cef745319634e99e9accbd75d620610f4a9 | 14ef98253f196b593df4628d8506c2e9879703dd | refs/heads/master | 2022-12-31T03:33:36.420181 | 2020-10-16T14:26:18 | 2020-10-16T14:26:18 | 303,541,581 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,131 | r | kribb-r-script4.R | ## https://greendaygh.github.io/KRIBBR2020/
library(tidyverse)
library(Biobase)
library(ALL)
library(hgu95av2.db)
data(ALL)
ex_data <- exprs(ALL)[1:30,]
ph_data <- pData(ALL)[,c("cod", "sex", "BT")]
ph_data %>% head
ph_data <- ph_data[complete.cases(ph_data),]
feature_names <- rownames(ex_data)
gene_names <- unlist(as.list(hgu95av2SYMBOL[feature_names]))
idx <- which(is.na(gene_names) | duplicated(gene_names))
ex_data <- as.data.frame(ex_data[-idx,])
rownames(ex_data) <- gene_names[-idx]
ex_data[1:3,1:3]
ex_data_mlt <- ex_data %>%
rownames_to_column(var="symbol") %>%
pivot_longer(-symbol) %>%
mutate(bt=ph_data[name,"BT"])
## indexing example
m <- matrix(sample(100), 10, 10)
rownames(m) <- LETTERS[1:10]
m["A",]
m[c("A", "C"),]
m[rep(c("A", "C"), 10),]
ex_data_mlt %>%
group_by(symbol) %>%
ggplot(aes(x=bt, y=value, group=bt)) +
geom_boxplot() +
facet_wrap(~symbol, ncol=9, scales="free") +
theme(
axis.text.x = element_text(angle = 90, size=8, hjust = 1, vjust=0.5)
)
# theme_bw()
### ----------------
ex_summary <- ex_data_mlt %>%
group_by(symbol, bt) %>%
summarise(m = mean(value))
ggplot(ex_summary, aes(x=bt, y=m)) +
geom_bar(stat="identity") +
facet_wrap(~symbol, nrow=3)
### ------------------------
ex_data_mlt <- ex_data %>%
rownames_to_column(var="symbol") %>%
pivot_longer(-symbol) %>%
mutate(sex=ph_data[name,"sex"])
## example
t.test(c(1:5), c(6:10))
z <- data.frame(y=c(1:10),
x=c(rep("a", 5), rep("b", 5)))
t.test(y~x, data=z)
z
t.test(value~sex, data=ex_data_mlt)
ex_data_mlt %>%
group_by(symbol) %>%
summarise(
tstat=t.test(value~sex)$statistic,
pval=t.test(value~sex)$p.value
)
#### test all
data(ALL)
## data
ex_data <- exprs(ALL)
ph_data <- pData(ALL)[,c("cod", "sex", "BT")]
## remove missing | duplicated genes
ph_data <- ph_data[complete.cases(ph_data),]
feature_names <- rownames(ex_data)
gene_names <- unlist(as.list(hgu95av2SYMBOL[feature_names]))
idx <- which(is.na(gene_names) | duplicated(gene_names))
ex_data <- as.data.frame(ex_data[-idx,])
rownames(ex_data) <- gene_names[-idx]
dim(ex_data)
ex_data_mlt <- ex_data %>%
rownames_to_column(var="symbol") %>%
pivot_longer(-symbol) %>%
mutate(sex=ph_data[name, "sex"]) %>%
drop_na()
## check na
sum(is.na(tmp))
test_results <- ex_data_mlt %>%
group_by(symbol) %>%
summarise(
tstat=t.test(value~sex)$statistic,
pval=t.test(value~sex)$p.value
)
sig_results <- test_results %>%
filter(pval<0.01)
sel_genes <- ex_data_mlt %>%
filter(symbol %in% sig_results$symbol)
sel_genes %>%
group_by(symbol) %>%
ggplot(aes(x=sex, y=value, fill=sex)) +
geom_boxplot() +
facet_wrap(~symbol, ncol=8, scales="free")
### ANOVA
ex_data_mlt <- ex_data %>%
rownames_to_column(var="symbol") %>%
pivot_longer(-symbol) %>%
mutate(bt=ph_data[name, "BT"]) %>%
drop_na()
## anova
fit <- anova(lm(data=ex_data_mlt, formula = value~bt))
pval <- fit$`Pr(>F)`[1]
test_results <- ex_data_mlt %>%
group_by(symbol) %>%
summarise(
pval <- anova(lm(data=ex_data_mlt, formula = value~bt))$`Pr(>F)`[1]
)
sig_results <- test_results %>%
filter(pval<0.01)
sel_genes <- ex_data_mlt %>%
filter(symbol %in% sig_results$symbol)
sel_genes %>%
group_by(symbol) %>%
ggplot(aes(x=sex, y=value, fill=sex)) +
geom_boxplot() +
facet_wrap(~symbol, ncol=8, scales="free")
## ggplot heatmap
library(tidyverse)
library(Biostrings)
refseq <- readDNAStringSet("./day4/dmpR_GESSv4.fasta.txt")
class(refseq)
pfm <- consensusMatrix(refseq)
dim(pfm)
pfm[1:10,1:5]
pfm_atgc <- t(pfm[1:4,])
load("./day4/R1_target_freq.Rdata")
ls()
target_freq %>% head
ref_freq1 <- pfm_atgc * target_freq
load("./day4/R2_target_freq.Rdata")
ref_freq2 <- pfm_atgc * target_freq
ref_freq2 %>% head
load("./day4/R3_target_freq.Rdata")
ref_freq3 <- pfm_atgc * target_freq
load("./day4/R4_target_freq.Rdata")
ref_freq4 <- pfm_atgc * target_freq
ref_freq1 %>% head(3)
ref_freq2 %>% head(3)
ref_freq3 %>% head(3)
ref_freq4 %>% head(3)
mydata <- data.frame(
r1=rowSums(ref_freq1),
r2=rowSums(ref_freq2),
r3=rowSums(ref_freq3),
r4=rowSums(ref_freq4)
)
mydata %>%
rownames_to_column(var="pos") %>%
arrange(pos) %>%
head
mydata2 <- mydata %>%
mutate(pos=nrow(.):1) %>%
arrange(pos) %>%
pivot_longer(-pos)
ggplot(mydata2, aes(x=pos, y=name, fill=value)) +
geom_tile()
## subset
mydata2 %>%
filter(pos <= 100) %>%
ggplot(aes(x=pos, y=name, fill=value)) +
geom_tile()
## scale
mydata_scaled <- mydata %>%
t %>%
scale %>%
t %>%
data.frame
## clutering
library(NbClust)
?NbClust
nc <- NbClust(mydata_scaled,
min.nc=3,
max.nc=10,
method="kmeans")
cl <- kmeans(mydata_scaled, centers=4, iter.max=10000)
table(cl$cluster)
mydata_long <- mydata %>%
mutate(pos=factor(nrow(.):1), cl=cl$cluster) %>%
#mutate(posf=factor(pos)) %>%
pivot_longer(-c(pos, cl)) %>%
arrange(cl)
mydata_long %>%
ggplot(aes(x=pos, y=name, fill=value)) +
scale_fill_viridis_c() +
geom_tile()
tiff("Figure.tiff",
width = 15,
height = 5.5,
units = 'in',
res = 300,
compression = 'lzw')
mydata_long %>%
head(100) %>%
ggplot(aes(x=pos, y=name, fill=value)) +
scale_fill_viridis_c() +
geom_tile(color="black", size=0.5) +
scale_x_discrete() +
coord_cartesian(ylim = c(0.5, 4.5), expand = FALSE, clip = "off") +
theme(panel.spacing = unit(0.5, "lines"),
plot.margin = unit(c(1,1,4,1), "lines"),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_text(size=12, angle = 60, hjust = 1, vjust=1),
axis.text.y = element_text(size=12)
)
dev.off()
mydata_long %>%
# filter(pos < 1000) %>%
ggplot(aes(x=pos, y=name, fill=value)) +
scale_fill_viridis_c() +
geom_tile(color="black", size=0.5)
## annotate
x <- seq(-5, 5, by=0.01)
x2 <- x + runif(1001)
y <- 1/(1+exp(-x))
z <- data.frame(x2, y)
ggplot(z, aes(x=x2, y=y)) +
geom_point() +
geom_smooth()
|
67954d90059c241d42372fa1dee17c551ae302a8 | ab53f25f227a5e5108a862556916f8fd748cd606 | /R/fs_download.R | 9420cf4ff22c61aa7fffc459ec2fe0f1f97b476b | [
"CC0-1.0"
] | permissive | cran/rfigshare | cf0188e3edd618810eae7343438d6601a31993d4 | 2de78690a7f7d509ef7b66aacbc64f9afa86f97d | refs/heads/master | 2022-05-12T18:50:09.881487 | 2022-05-09T18:40:02 | 2022-05-09T18:40:02 | 17,699,169 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,765 | r | fs_download.R | #' Get details for an article
#'
#' @author Carl Boettiger \email{cboettig@@gmail.com}
#' @importFrom utils download.file
#' @importFrom utils select.list
#' @param article_id number
#' @param urls_only logical (default TRUE) to only return the URLs to the
#' downloadable objects but do not call download.file. If FALSE, will download files
#' @param mine logical (default FALSE). Set to true to see article details for your own non-public articles
#' @param session the authentication credentials from \code{\link{fs_auth}}
#' @param show_versions logical, show what versions are available
#' @param version show a given version number
#' @param ... additional arguments to \code{\link{download.file}}
#' @seealso \code{\link{fs_auth}} \code{\link{download.file}}
#' @references \url{http://api.figshare.com} \url{https://github.com/ropensci/rfigshare}
#' @import httr
#' @export
#' @examples \dontrun{
#' url <- fs_download(90818)
#' data <- read.csv(url)
#' articles <- fs_search("SciFund")
#' ids <- fs_ids(articles)
#' fs_download(ids, urls_only=FALSE)
#' }
fs_download <-
function(article_id, urls_only = TRUE, mine=is_mine(article_id), session = fs_get_auth(),
show_versions=FALSE, version=NULL, ...) {
details <- lapply(article_id, fs_details, mine = mine, session = session,
show_versions = show_versions, version = NULL)
filenames <- unlist(sapply(details, function(output)
unlist(lapply(output$files, function(f) f$name))))
urls <- unlist(sapply(details, function(output)
unlist(lapply(output$files, function(f) f$download_url))))
if(!urls_only)
sapply(1:length(urls), function(i)
download.file(urls[i], destfile=filenames[i],
...))
urls
}
|
a4c4886fd61eabf0a19b3189122891fc3a0f7d28 | 6a99a74b47d04da0e74111b37dcac005d98e547e | /Feno.R | 68fca330c68eb662dfbaaf9773a3693710d33ab9 | [] | no_license | NtsoaBe/Anah | 265b044e3dc5314592d7361b4312e02049fcfce0 | 52c199bd1695597f9d12eaf173e47d0237048eeb | refs/heads/master | 2022-06-15T23:32:41.929305 | 2022-06-03T09:16:26 | 2022-06-03T09:16:26 | 218,938,615 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,169 | r | Feno.R | library(rvest)
library(tidyverse)
link="https://duproprio.com/fr/quebec-rive-sud/levis/maison-a-vendre?pageNumber=2"
page=read_html(link)
adresse<- page %>% html_nodes(".search-results-listings-list__item-description__address") %>% html_text()
adresse<-str_replace_all(adresse, "\n","")
metadata_aire_terrain <- page %>% html_nodes(".search-results-listings-list__item-description__characteristics") %>% html_text() %>%
str_split("\n", simplify = TRUE)
aireHabitable <- metadata_aire_terrain[,13]
aireHabitable <- parse_number(str_extract(aireHabitable, "\\d+(?:[.,\\s]\\d{0,3})?"), locale = locale(decimal_mark = ".", grouping_mark = " "))
metadata_aire_terrain <- page %>% html_nodes(".search-results-listings-list__item-description__characteristics") %>% html_text() %>%
str_split("\n", simplify = TRUE)
taille_terrain <- metadata_aire_terrain[,17]
taille_terrain <- parse_number(str_extract(taille_terrain , "\\d+(?:[.,\\s]\\d{0,3})?"), locale = locale(decimal_mark = ".", grouping_mark = " "))
prix <- page %>% html_nodes(".search-results-listings-list__item-description__price h2") %>% html_text()
prix <- extract_numeric(prix)
region <- page %>% html_nodes(".search-results-listings-list__item-description__city span") %>% html_text()
nombre_chambre<- page %>% html_nodes(".search-results-listings-list__item-description__characteristics__item:nth-child(1)") %>% html_text() %>%
str_split("\n", simplify = TRUE)
nombre_chambre<- parse_number(nombre_chambre[,3])
salle_bain<- page %>% html_nodes(".search-results-listings-list__item-description__characteristics__item:nth-child(2)") %>% html_text()%>%
str_split("\n", simplify = TRUE)
#replace_na(): replace NA value in a column x and y by 0
nbre_salleBain <- tibble(x=parse_number(salle_bain[,3]),y=parse_number(salle_bain[,4])) %>% replace_na(list(x=0,y=0)) %>%mutate(nbr=x+y)%>% select(nbr)
df <- data.frame(Prix=prix, Aire_habitable=aireHabitable, Taille_terrain=taille_terrain, Nombre_chambre=nombre_chambre, Nombre_salleBain=nbre_salleBain, Adresse=adresse, Region=region)
|
ad9bc030b93c2c5642535adcec9097872d18354a | 1afe352deb16399782ba31511f70ccb4bf584314 | /Routines_Package/Rpu2.Rcheck/00_pkg_src/Rpu2/man/resume.motif.Rd | 64fbd3a084e059557244b45741794720cd7b0877 | [] | no_license | jcrb/RPU_Doc | 8b5a5915aa0afc3d5c11d94cb22469b5e714f9a6 | cccef7acd34406a51bc7db18a43c6d6408f94d4f | refs/heads/master | 2021-01-17T09:25:11.745058 | 2016-10-03T08:34:33 | 2016-10-03T08:34:33 | 22,011,440 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 442 | rd | resume.motif.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rapport.R
\name{resume.motif}
\alias{resume.motif}
\title{analyse un vecteur de MOTIF}
\usage{
summary.motif(vx)
}
\arguments{
\item{vx}{vecteur de Char (motif)}
}
\value{
vecteur nommé: "n.na", "p.na", "n.rens", "p.rens"
}
\description{
retourne: le nombre d'éléments du vecteur (NA inclus), le nombre de NA, nombre et pourcentage de valeurs renseignées,
}
|
700024a1d14f9ae094c30ad3ed6249d59359e098 | cd5b9ad17f02006367fd52a8b06d2c36b647b12d | /test_boot.R | 79e772b2baf3c4e57673580ed571de0deed40eb5 | [] | no_license | aaamini/GMarkov-sampling | 9f404846a9ac803f2ed8072915083b2e22fc26fb | c3c6c06d4a3bd74f58900786f9ae1af3bd16084a | refs/heads/master | 2023-04-18T10:19:05.798792 | 2022-02-18T21:00:03 | 2022-02-18T21:00:03 | 434,848,180 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,089 | r | test_boot.R | library(clime)
library(glasso)
library(tidyr)
library(dplyr)
library(tibble)
library(ggplot2)
source("viz.R")
source("utils.R")
# This is assumes Gam_seq exists in the environment -- e.g. run test_rand_sampling.R
source("test_rand_sampling.R")
# This is just the vector of lambdas for computing the regularization path (glasso calls lambda rho)
rholist = 10^seq(-2.5,0, len=100)
### Many regularization paths and the average
n = 50
Sigh_seq = lapply(Gam_seq, function(Gam) gen_data_from_Gam(n, Gam)$Sigh) # generate new data from Gam_seq
reg_path = do.call(
rbind,
lapply(seq_along(Sigh_seq[1:50]), function(rep) {
Sigh = Sigh_seq[[rep]]
Gam = Gam_seq[[rep]]
Omegalist_glasso = glassopath(as.matrix(Sigh), rholist, trace = 0)$wi
clime_out = clime(Sigh, lambda = rholist, sigma = T)
data.frame(
lambda = rholist,
glasso = apply(Omegalist_glasso, 3, function(G) norm(G - Gam)/norm(Gam)),
clime = sapply(clime_out$Omegalist, function(G) norm(G - Gam)/norm(Gam)),
n = n,
rep = rep
)
})
)
reg_path %>%
pivot_longer(c(glasso, clime), names_to = "method", values_to = "error") %>%
plot_paths_and_avg(error, alpha_range = c(0.2,1)) +
scale_x_continuous(trans = "log10") +
ylim(c(0.2, 1.4)) +
inline_legend(0.2,0.2) +
xlab("Regularization parameter") +
ylab("Operator norm relative error")
ggsave(sprintf("reg_path_rel_many_reps_n = %d.pdf", n), width = 5, height = 5)
### Plot a single reg_path
Sigh = Sigh_seq[[1]]
Omegalist_glasso = glassopath(as.matrix(Sigh), rholist, trace = 0)$wi
clime_out = clime(Sigh, lambda = rholist, sigma = T)
reg_path1 = data.frame(
lambda = rholist,
glasso = apply(Omegalist_glasso, 3, function(G) norm(G - Gam)),
clime = sapply(clime_out$Omegalist, function(G) norm(G - Gam))
)
reg_path1 %>%
pivot_longer(-lambda, names_to = "method", values_to = "error") %>%
ggplot(aes(lambda, error, color = method)) +
geom_line(size = 1.2) +
scale_x_continuous(trans = "log10") +
ylab("Operator norm error") +
theme_minimal()
ggsave("reg_path_single_rep.pdf", width = 5, height = 5)
# Optimal lambda
rholist[which.min(reg_path1$glasso)]
rholist[which.min(reg_path1$clime)]
### Simple test
lambda = 0.12 # roughly the optimal lambda for both methods
Gamh_glasso = glasso(as.matrix(Sigh), lambda)$wi
Gamh_clime = clime(Sigh, lambda = lambda, sigma = T)$Omega[[1]]
Gh_glasso = extract_graph(Gamh_glasso)
Gh_clime = extract_graph(Gamh_clime)
image(Matrix(Gamh_glasso))
image(Matrix(Gamh_clime))
image(Gh_glasso)
image(Gh_clime)
image(G)
### Systematic test
methods = list()
methods[["glasso"]] = function(Sigh) {
extract_graph( glasso(as.matrix(Sigh), lambda)$wi )
}
methods[["clime"]] = function(Sigh) {
extract_graph( clime(Sigh, lambda = lambda, sigma = T)$Omega[[1]] )
}
n_methods = length(methods)
# n = 50
# # generate new data from Gam_seq
# Sigh_seq = lapply(Gam_seq, function(Gam) gen_data_from_Gam(n, Gam)$Sigh)
# # apply the methods to get estimated graphs Gh[[method]][[i]]
# Gh_seq = lapply(methods, function(method) lapply(Sigh_seq, function(Sigh) method(Sigh)))
#
# hist(comp_pairwise_dist(Gh_seq$glasso))
# # comp_dist_to_G0(Gh_seq$clime[-1], Gh_seq$clime[[1]])
runs = expand.grid(n = c(50, 100, 200))
res = do.call(
rbind,
lapply(1:nrow(runs), function(j) {
n = runs[j, "n"]
# generate new data from Gam_seq
Sigh_seq = lapply(Gam_seq, function(Gam) gen_data_from_Gam(n, Gam)$Sigh)
# apply the methods to get estimated graphs Gh[[method]][[i]]
Gh_seq = lapply(methods, function(method) lapply(Sigh_seq, function(Sigh) method(Sigh)))
do.call(
rbind,
lapply(names(methods), function(method)
data.frame(pdist = comp_pairwise_dist(Gh_seq[[method]]), n = n, method = method)
))
})
)
res %>% ggplot(aes(factor(n), pdist, fill = method)) +
geom_violin(color = NA) +
ylab("Pairwise Normalized Hamming Dist.") +
xlab("Sample Size") + inline_legend(0.9,0.9)
ggsave(sprintf("var_plot_d = %d, lambda = %2.2f.pdf", d, lambda), width = 5, height = 5)
|
2ca6aa32519a080fa393c4a82db251c109273377 | 6b955291e90d4097e13c3808523e2d20b3a71398 | /man/S.value.Rd | 515dfc6080d7cc635dd89fc8b6867f5c07a13245 | [] | no_license | cran/shipunov | 640f34408ae65c59a8fa655c23d01e5e86af38bc | 8cd6acac881f048a17ddafcfc414a4894fa02f63 | refs/heads/master | 2023-03-16T23:19:10.341396 | 2023-02-05T13:42:56 | 2023-02-05T13:42:56 | 185,279,307 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,166 | rd | S.value.Rd | \name{S.value}
\alias{S.value}
\title{S-value}
\description{
\code{S.value} returns S-values, Shannon information transforms of p-values.
}
\usage{
S.value(x)
}
\arguments{
\item{x}{Either numerical vector of p-values, or list where at least one element has the name similar to "p.value".}
}
\details{
Greenland (2019) proposes that researchers "think of p-values as measuring the _compatibility_ between
hypotheses and datas." S-values should help to understand this concept better.
From Wasserstein et al. (2019): S-values supplement a focal p-value p with its Shannon information transform
(s-value or surprisal) s = -log2(p). This measures the amount of information supplied by the test against the tested
hypothesis (or model): rounded off, the s-value shows the number of heads in a row one would need to see when tossing
a coin to get the same amount of information against the tosses being ``fair'' (independent with ``heads'' probability
of 1/2) instead of being loaded for heads. For example, if p = 0.03, this represents -log2(0.03) = 5 bits of information
against the hypothesis (like getting 5 heads in a trial of ``fairness'' with 5 coin tosses); and if p = 0.25, this
represents only -log2(0.25) = 2 bits of information against the hypothesis (like getting 2 heads in a trial of
``fairness'' with only 2 coin tosses).
For the convenience, S.value() works directly with output of many statistical tests (see examples). If the output is a list
which has more than one component with name similar to "pvalue", only first will be used.
}
\value{
Numerical vector.
}
\references{
Wasserstein R.L., Schirm A.L., Lazar N.A. 2019. Moving to a World Beyond ``p < 0.05''.
The American Statistician. 73(S1): 1--19.
Greenland S. 2019. Valid P-Values Behave Exactly as They Should: Some Misleading Criticisms of P-Values
and Their Resolution With S-Values. The American Statistician. 73(S1): 106--114.
}
\author{Alexey Shipunov}
% \seealso{}
\examples{
S.value(0.05)
S.value(0.01)
S.value(0.1)
S.value(0.00000000001)
S.value(t.test(extra ~ group, data = sleep))
S.value(list(pvalues=c(0.01, 0.002)))
}
\keyword{htest}
|
db9aa07b6e4074cf1073e9c3b39a553489ccbc49 | 87ecd00409f3e509080a7b1471f4903b08c1ae09 | /From-the-older-website/geometric_growth.R | 3e0f2a17e4f0ec8e6fa5e9843545413d8ae2e0b5 | [] | no_license | markjuhn/UCLA-Bootcamp | 6fb7046836114b5a773af4aee618930db1dfecd0 | 2b1a5b0e2320846cd66e0bc335fce7943e9e3f21 | refs/heads/master | 2020-04-18T02:56:05.738659 | 2016-09-22T18:19:24 | 2016-09-22T18:19:24 | 67,103,533 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 702 | r | geometric_growth.R | #Geometric growth
#geometricGrowthScript.R
#A script to simulate and plot the discrete logistic model
# Setup
# none needed, since program is so simple
# Set parameter values and initioal conditions, and initialize variables for output
NO <- 25
RR <- 1.05
ttMax <- 100 #total number of timesteps to simulate
NN <- matrix(NA, nrow = 1, ncol = ttMax + 1) # initialize variable to a vector of NA values
NN[1] <- NO #set first value to initial condition
# Loop over ttMax timesteps, using the model equation to update NN
for (tt in 1:ttMax) {
NN[tt+1] <- RR*NN[tt]
}
#Plot the results
plot(1:(ttMax+1), NN, xlab = "time", ylab = "N", type = "b", col = "blue")
#3.2.1 Mini exercise
#3.2.2 Exercise
|
0973f079c8140c923441d83169224022912da018 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/grattan/examples/prohibit_vector_recycling.Rd.R | 9f29223d6e7cd58bfd34d641389269ec38d8f1e3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 549 | r | prohibit_vector_recycling.Rd.R | library(grattan)
### Name: prohibit_vector_recycling
### Title: Prohibit vector recycling
### Aliases: prohibit_vector_recycling prohibit_vector_recycling.MAXLENGTH
### prohibit_arg_recycling.MAXLENGTH
### ** Examples
## Not run:
##D # Returns nothing because they are of the same length
##D prohibit_vector_recycling(c(2, 2), c(2, 2))
##D # Returns nothing also, because the only different length is 1
##D prohibit_vector_recycling(c(2, 2), 1)
##D # Returns an error:
##D prohibit_vector_recycling(c(2, 2), 1, c(3, 3, 3))
## End(Not run)
|
58e86de414d24f0dc01a4881f0bcc069d6d39e71 | 01fee130f72e10e8976c2d5b40cfd411844d119c | /Map.R | f721924f9ab91971c38d7ec3c751bf5fe82159eb | [] | no_license | SOLV-Code/Shiny-App-for-Synoptic | 648de8818e4f0b1ae115310c62ee451fb6f1cb89 | 02de6349d47c66ec84afd9a289d9e9c947ddd6e5 | refs/heads/master | 2021-07-13T02:08:49.349805 | 2020-06-25T22:04:03 | 2020-06-25T22:04:03 | 173,374,340 | 0 | 0 | null | 2020-06-25T22:04:05 | 2019-03-01T22:07:04 | R | UTF-8 | R | false | false | 55,283 | r | Map.R | #-------------------Leaflet Map for CU and Population Selection ------------------
# state information for controls; can't use shiny inputs directly for these,
# because the inputs don't exist until modal menus are opened
mapCtrl.isVisible <- reactiveValues(CUMarkers = TRUE, CUPolygons=FALSE, PopMarkers=TRUE, Streams=TRUE)
mapCtrl.backdrop <- reactiveVal('map')
mapCtrl.CUmarkerHoverHighlights <- reactiveVal(value = c('Polygon', 'Pops'), label = 'CUmarkerHoverHighlights')
mapCtrl.CUpolyHoverHighlights <- reactiveVal(value = c('Polygon', 'Pops'), label = 'CUpolyHoverHighlights')
mapCtrl.CUstreamHoverHighlights <- reactiveVal(value = c('Marker','Pops'), label = 'CUstreamHoverHighlights')
# ---------- Spider fly (functions for pulling overlapping markers apart ) ---------------
map.getOverlapping <- function(id, type) {
if (type == 'CUs') {
df <- map.CUMarkerData()
df[df$Lat == df[df$CU_ID == id, 'Lat'] & df$Lon == df[df$CU_ID == id, 'Lon'], 'CU_ID']
}
else if (type == 'Pops') {
df <- map.popMarkerData()
df[df$Lat == df[df$Pop_UID == id, 'Lat'] & df$Lon == df[df$Pop_UID == id, 'Lon'], 'Pop_UID']
}
}
TwoPi <- 2* pi
#startAngle <- TwoPi / 12
# spread overlapping points out on a circle around the common center
# to do this 'properly' would require transforming lat-long coordinates to planar (screen) coordinates
# and then back-transforming the locations to lat-long after arranging points on the circle
# instead, use a quick and dirty correction factor to put lat and long coordinates
# on approximately the same scale
map.spreadOnCircle <- function(df) {
cLat <- mean(df$Lat)
cLon <- mean(df$Lon)
markerSep <- abs(input$CUmap_bounds$north - input$CUmap_bounds$south) / 40
radius <- markerSep * (2 + nrow(df)) / TwoPi
angleStep <- TwoPi / nrow(df)
for (i in 1:nrow(df)) {
angle <- i * angleStep # + startAngle
df[i, 'Lat'] <- cLat + radius * cos(angle)
df[i, 'Lon'] <- cLon + 1.5 * radius * sin(angle) #0.8914291
}
st_geometry(df) <- st_sfc(lapply(1:nrow(df), function(r) {
st_linestring(matrix(c(cLon, df[r, 'Lon'], cLat, df[r, 'Lat']), nrow=2), dim='XY')
}))
df
}
spiderFlyMarkers <- reactiveVal()
# ------ Functions to do with creating 'popup' information panes to be shown on mouseover ------
# some helper functions for putting together information to be shown in the popover panes
# get the list of CUs associated with the given stream segment
map.getCUsFromStreamSeg <- function(seg) {
CUs <- unpack(data.Streams$CUsSelectable[data.Streams$code == seg])
CUs[CUs %in% data.currentCUs()]
}
# get the list of populations associated with the given stream segment
map.getPopsFromStreamSeg <- function(seg) {
pops <- unpack(data.Streams$PopsSelectable[data.Streams$code == seg])
pops[pops %in% data.currentPops()]
}
# create one row with information on the given metric
map.makeCUInfoTableRow <- function(metric, end, start) {
style <- getMetricTextStyle(end['Status'])
label <- GetLabel(metric)
if (is.na(end['Value'])) value <- 'NA'
else value <- as.character(round(as.numeric(end['Value']), 2))
if (is.null(start) || is.na(start['Value']) || start['Value'] == 'NA' || !(abs(as.numeric(start['Value'])) > 0)) change <- ''
else change <- HTML(makeArrow(polarAngle((as.numeric(end['Value']) - as.numeric(start['Value']))/as.numeric(start['Value']))))
tags$tr(tags$td(style=style, label),
tags$td(style=style, value),
tags$td(style=style, change))
}
# get together the information needed to output information for the given metric
# the pane shows arrows indicating the direction and magnitude of change, either
# over the period from change year 1 to change year 2, or over the period leading
# up to the selected year.
map.makeCUInfoMetricRow <- function(m, CU) {
if (filter$change == "Change") {
endYear <- filter$changeyear_2
startYear <- filter$changeyear_1
} else {
endYear <- filter$year
if (length(data.CU.Metrics[data.CU.Metrics$CU_ID == CU, 'Year']) > 0 &&
filter$year > min(data.CU.Metrics[data.CU.Metrics$CU_ID == CU, 'Year'])) {
startYear <- as.numeric(filter$year) - 1
# count back if the current startYear isn't in the dataset
while(!(startYear %in% data.CU.Metrics[data.CU.Metrics$CU_ID == CU, 'Year'])) startYear <- startYear - 1
} else {
startYear <- NULL
}
}
end <- c( Value = data.CU.Metrics[paste(CU, endYear, sep="."), m],
Status = as.character(data.CU.Metrics[paste(CU, endYear, sep="."), paste0(m, '.Status')]))
if (is.null(startYear)) start <- NULL
else start <- c( Value = data.CU.Metrics[paste(CU, startYear, sep="."), m],
Status = as.character(data.CU.Metrics[paste(CU, startYear, sep="."), paste0(m, '.Status')]))
map.makeCUInfoTableRow(m, start = start, end = end)
}
# put together an information pane for one CU
map.makeCUInfoPane <- function(CU) {
df <- data.CU.TimeSeries[data.CU.TimeSeries$CU_ID == CU, ]
p <- tags$div(class = 'sidebar-sparkline-box',
tags$div(class = 'sidebar-sparkline-box-header', getCUname(CU)),
spark.makeSparklineChart(df, attribs=CUTableAttribs[['sidebar']]),
tags$table(lapply(MapLabelMetrics[MapLabelMetrics %in% filter$metrics], map.makeCUInfoMetricRow , CU)))
if (data.showPops()) {
p <- tagList(p, spark.makePopSparklineTable(data.getAvailablePopsForCUs(CU), mode='sidebar', CUheader='none'))
}
p
}
# put together an information pane for each population, to be shown on mouse-over on the map
map.makePopInfoPane <- function(pop) {
if (data.Pop.Lookup[pop, 'HasTimeSeriesData'] == 'Yes') {
tsSparkline <- tags$div(spark.makePopSparklineTable(pop, mode='sidebar', CUheader = 'none'))
} else {
tsSparkline <- tags$div(style=('background-color: black; color: #b0b0b0'), '<no time series data>')
}
tags$div(class = 'sidebar-sparkline-box',
tags$div(class = 'sidebar-sparkline-box-header', data.Pop.Lookup[pop, "Pop_Name"]),
tsSparkline,
tags$table(tags$tr(tags$td('ID: '), tags$td(data.Pop.Lookup[pop, "Pop_ID"])),
tags$tr(tags$td("CU:"),
tags$td(data.CU.Lookup[data.CU.Lookup$CU_ID == data.Pop.Lookup[pop, "CU_ID"], 'CU_Name'][1]))))
}
map.makeMarkerInfoPane <- function(items, type) {
if(length(items) > 1) {
if (type == 'CUs') spark.makeCUSparklineTable(items, mode='sidebar')
else if (type == 'Pops') spark.makePopSparklineTable(items, mode='sidebar', CUheader='labelOnly')
} else if (length(items) == 1) {
if (type == 'CUs') map.makeCUInfoPane(items)
else if (type == 'Pops') map.makePopInfoPane(items)
}
}
# put together an information pane to be shown when user moves mouse over a stream segment
map.makeStreamInfoPane <- function(segCode) {
p <- tags$div('no CUs on this stream segment match current filter criteria')
CUs <- map.getCUsFromStreamSeg(segCode)
if (length(CUs) > 0)
p <- spark.makeCUSparklineTable(CUs)
if(data.showPops()) {
pops <- map.getPopsFromStreamSeg(segCode)
if (length(pops) > 0) {
p <- tagList(p, spark.makePopSparklineTable(pops, mode='sidebar', CUheader = 'labelOnly'))
} else {
p <- tagList(p, tags$div('no CUs on this stream segment match current filter criteria'))
}
}
tags$div(class = 'sidebar-sparkline-box', p)
}
# ------ Map element display --------
# add markers to map
map.renderMarkers <- function(map, df, pane, group, styleOpts, spiderFly=FALSE) {
if (spiderFly) {
df <- map.spreadOnCircle(df)
spiderFlyMarkers(df)
map.showSpiderLegs(map, df, pane=pane, group=group)
}
addCircleMarkers(map, data=df, lng=~Lon, lat=~Lat,
layerId = ~layer,
label = ~lapply(label, HTML),
options = pathOptions(pane = pane),
group = group,
radius = styleOpts$radius,
opacity = styleOpts$opacity,
fillColor = ~fillColor,
stroke = styleOpts$stroke,
fill = styleOpts$fill,
fillOpacity = styleOpts$fillOpacity,
weight = styleOpts$weight,
color = ~color
)
}
# add polygons to map
map.showPolygons <- function(map, df, pane, group, styleOpts) {
addPolygons(map=map, data=df,
layerId = ~layer,
label = ~lapply(label, HTML),
options = pathOptions(pane = pane),
color = ~color,
fillColor = ~fillColor,
fillOpacity = styleOpts$fillOpacity,
stroke=styleOpts$stroke,
weight=styleOpts$weight,
opacity= styleOpts$opacity,
group=group)
}
map.showStream <- function(map, segCode, df, layer, pane, group, styleOpts) {
addPolylines(map=map, data=df[segCode, ],
label = ~Name,
color = styleOpts$color,
weight = styleOpts$weight,
opacity = styleOpts$opacity,
layerId = layer,
group = group,
options = pathOptions(pane = pane))
}
map.showSpiderLegs <- function(map, df, pane, group, styleOpts=SpiderLegs) {
addPolylines(map=map, data=df,
color = styleOpts$color,
weight = styleOpts$weight,
opacity = styleOpts$opacity,
group = group,
options = pathOptions(pane = pane))
}
map.CUMarkerData <- reactive({
df <- unique.data.frame(data.CU.Lookup.filtered()[, c('CU_ID', MapAttribs)])
if (nrow(df) > 0) {
df$layer <- df$CU_ID
df$label <- unlist(lapply(df$CU_ID, getCUname))
df.m <- data.filtered()
df.m$CU_ID <- row.names(df.m)
metrics <- names(df.m)[grep('.Status', names(df.m))]
df <- merge(df, df.m[ , c('CU_ID', metrics)], by=c("CU_ID"), all.x=T, all.y=F)
if (colorCtrl.colorScheme() %in% names(df)) {
df$color <- colorCtrl.getColor(df[ , colorCtrl.colorScheme()], override=CUMarkerStyle.normal$color)
df$fillColor <- colorCtrl.getColor(df[ , colorCtrl.colorScheme()], override=CUMarkerStyle.normal$fillColor)
}
else {
df$color <- rep('black', nrow(df))
df$fillColor <- rep('#eeeeee', nrow(df))
}
}
df
})
# Add markers to map that represent the CUs listed (or all CUs currently in the filter set if CUs = NULL)
map.showCUMarkers <- function(leafletMap, CUs=data.currentCUs(), styleOpts = CUMarkerStyle.normal, layer=NULL, spiderFly=FALSE, df=NULL) {
if (is.null(df)) df <- map.CUMarkerData()
df <- df[df$CU_ID %in% CUs, ]
if (nrow(df) > 0) {
group <- 'CUMarkers'
if(!is.null(layer)) {
df$layer <- paste0(layer, '.', df$layer)
group <- paste0(layer, '.', group)
}
if (!is.null(styleOpts$color)) df$color <- rep(styleOpts$color, nrow(df))
if (!is.null(styleOpts$fillColor)) df$fillColor <- rep(styleOpts$fillColor, nrow(df))
leafletMap <- map.renderMarkers(leafletMap, df, pane=group, group=group, styleOpts=styleOpts, spiderFly)
}
leafletMap
}
map.popMarkerData <- reactive({
df.sp <- data.Pop.Lookup.filtered()
if (nrow(df.sp) > 0) {
#df.sp <- data.Pop.Spatial[data.Pop.Spatial$Pop_UID %in% df$Pop_UID, ]
#attribsToKeep <- c('Pop_UID', 'CU_ID', 'Pop_ID', 'Pop_Name', 'Species', 'Lat', 'Lon', 'FAZ', 'FWA_WATERSHED_CODE', )
#df.sp <- df.sp[, attribsToKeep]
df.sp$label <- unlist(lapply(df.sp$Pop_UID, getPopName))
df.sp$layer <- df.sp$Pop_UID
if (colorCtrl.colorScheme() %in% names(df.sp)) {
df.sp$color <- colorCtrl.getColor(df.sp[ , colorCtrl.colorScheme()], override=PopMarkerStyle.normal$color)
df.sp$fillColor <- colorCtrl.getColor(df.sp[ , colorCtrl.colorScheme()], override=PopMarkerStyle.normal$fillColor)
}
else {
df.sp$color <- rep('black', nrow(df.sp))
df.sp$fillColor <- rep('white', nrow(df.sp))
}
}
df.sp
})
# add markers to map that represent the populations listed (or all populations currently in the filter set
# if Pops = NULL). Markers for pops currently selected appear highlighted
map.showPopMarkers <- function(leafletMap, pops=data.currentPops(), styleOpts = PopMarkerStyle.normal, layer=NULL, spiderFly=FALSE, df=NULL) {
if (is.null(df)) df <- map.popMarkerData()
df <- df[df$Pop_UID %in% pops, ]
if (nrow(df) > 0) {
group <- 'PopMarkers'
if(!is.null(layer)) {
df$layer <- paste0(layer, '.', df$layer)
group <- paste0(layer, '.', group)
}
if (!is.null(styleOpts$color)) df$color <- rep(styleOpts$color, nrow(df))
if (!is.null(styleOpts$fillColor)) df$fillColor <- rep(styleOpts$fillColor, nrow(df))
if (is.null(layer) || layer != 'mouseover') {
}
leafletMap <- map.renderMarkers(leafletMap, df, pane=group, group=group, styleOpts=styleOpts, spiderFly)
}
leafletMap
}
map.showMarkers <- function(leafletMap, items, type='CUs', style='normal', layer=NULL, spiderFly=FALSE, df=NULL) {
if (type == 'CUs') {
if (style == 'normal')
map.showCUMarkers(leafletMap, CUs=items, styleOpts = CUMarkerStyle.normal, layer=layer, spiderFly=spiderFly, df=df)
else if (style == 'highlighted')
map.showCUMarkers(leafletMap, CUs=items, styleOpts = CUMarkerStyle.highlighted, layer=layer, spiderFly=spiderFly, df=df)
}
else if (type == 'Pops') {
if (style == 'normal')
map.showPopMarkers(leafletMap, pops=items, styleOpts = PopMarkerStyle.normal, layer=layer, spiderFly=spiderFly, df=df)
else if (style == 'highlighted')
map.showPopMarkers(leafletMap, pops=items, styleOpts = PopMarkerStyle.highlighted, layer=layer, spiderFly=spiderFly, df=df)
}
}
map.CUpolyData <- reactive({
df.poly <- data.CU.Spatial[data.CU.Spatial$CU_ID %in% data.currentCUs(), ]
if (nrow(df.poly) > 0) {
df.poly <- sp::merge(df.poly, map.CUMarkerData(), by=c("CU_ID"), all.x=T, all.y=F)
if (!is.null(CUPolyStyle.normal$color)) df.poly$color <- rep(CUPolyStyle.normal$color, nrow(df.poly))
if (!is.null(CUPolyStyle.normal$fillColor)) df.poly$fillColor <- rep(CUPolyStyle.normal$fillColor, nrow(df.poly))
}
df.poly
})
# Add CU boundaries for the CUs listed (or for all CUs currently in the filter if CUs = NULL)
# boundaries for CUs currently selected appear highlighted
map.showCUPolys <- function(leafletMap, CUs=data.currentCUs(), styleOpts = CUPolyStyle.normal, layer=NULL) {
df <- map.CUpolyData()[map.CUpolyData()$CU_ID %in% CUs, ]
if (nrow(df) > 0) {
group <- 'CUPolygons'
if(!is.null(layer)) {
df$layer <- paste0(layer, '.', df$layer)
group <- paste0(layer, '.', group)
}
if (!is.null(styleOpts$color)) df$color <- rep(styleOpts$color, nrow(df))
if (!is.null(styleOpts$fillColor)) df$fillColor <- rep(styleOpts$fillColor, nrow(df))
for (sp in unique(df$Species)) {
leafletMap <- map.showPolygons(leafletMap, df[df$Species == sp, ],
pane = paste0(group, '.', sp), group = group, styleOpts = styleOpts)
}
}
leafletMap
}
# hide a group and associated selection group
map.hide <- function(map, group) {
map %>% hideGroup(map, group) %>% hideGroup(map, paste0('selected.', group))
}
# hide all markers
map.hideMarkers <- function() {
hideGroup(CUmapProxy, 'CUMarkers')
hideGroup(CUmapProxy, 'PopMarkers')
hideGroup(CUmapProxy, 'selected.CUMarkers')
hideGroup(CUmapProxy, 'selected.PopMarkers')
}
# unhide markers if they are supposed to be visible
map.unhideMarkers <- function() {
map.setVisibility('CUMarkers')
map.setVisibility('PopMarkers')
}
# clear spiderFly
map.clearSpiderFly <- function() {
clearGroup(CUmapProxy, 'spider.CUMarkers')
clearGroup(CUmapProxy, 'spider.PopMarkers')
map.unhideMarkers()
}
# ------ Leaflet map rendering ------
# add control widgets to the map
map.addMapControls <- function(leafletMap) {
# add a minimap that shows current map extent within the wider geographic context
leafletMap <- addMiniMap(leafletMap,
tiles = providers$CartoDB.Positron,
zoomLevelOffset = -4,
toggleDisplay = TRUE,
minimized = TRUE)
# add controls for drawing masks for selection
leafletMap <- leaflet.extras::addDrawToolbar(leafletMap,
targetGroup = "mask",
rectangleOptions = T,
polylineOptions = F,
markerOptions = F,
circleMarkerOptions = F,
editOptions = F,
circleOptions = F)
leafletMap <- addEasyButton(leafletMap,
easyButton(icon="fa-arrows",
title="Zoom to map",
onClick=JS("function(btn, map){
// Don't care about the value here. It just needs to change every time the button is clicked.
Shiny.onInputChange('leaflet_button_zoom_out', Math.random());
}")))
# could use native layersCotrol here to show/hide layers, but it works slightly differently
# from shiny modals in that the modal disappears on mouseout, rather than on mouse click outside modal.
# Implement this with an easyButton and shiny modal instead to maintain consistent UI
leafletMap <- addEasyButton(leafletMap,
easyButton(icon="fa-eye",
title="Show/hide layers",
onClick=JS("function(btn, map){
// Don't care about the value here. It just needs to change every time the button is clicked.
Shiny.onInputChange('leaflet_button_layer_visibility', Math.random());
}")))
leafletMap <- addEasyButton(leafletMap,
easyButton(icon="fa-paint-brush",
title="Set color scheme",
onClick=JS("function(btn, map){
// Don't care about the value here. It just needs to change every time the button is clicked.
Shiny.onInputChange('leaflet_button_color_scheme', Math.random());
}")))
leafletMap <- addEasyButton(leafletMap,
easyButton(icon="fa-camera",
title="Save snapshot",
position="topright",
onClick=JS("function(btn, map){
console.log(map);
// Don't care about the value here. It just needs to change every time the button is clicked.
Shiny.onInputChange('leaflet_button_snapshot', Math.random());
}")))
# added to visibility menu
# leafletMap <- addLayersControl(leafletMap,
# position='bottomleft',
# baseGroups=c('map', 'satellite'))
leafletMap <- addEasyButton(leafletMap,
easyButton(icon="fa-cog",
title="Additional settings",
onClick=JS("function(btn, map){
console.log(map);
// Don't care about the value here. It just needs to change every time the button is clicked.
Shiny.onInputChange('leaflet_button_snapshot', Math.random());
}")))
leafletMap
}
# add SoS logo to the lower right corner of the map
# note: there seems to be a bug in leafem::addLogo that prevents the logo
# from showing up when the map gets re-rendered
# also doesn't seem to work with a leaflet proxy :(
map.addLogo <- function(map) {
if (mapCtrl.backdrop() == 'map') logo <- 'SoS-01.png' else logo <- 'SoS-02.png'
leafem::addLogo(map, img=logo, src='local',
position = 'bottomright',
offset.x = 50,
offset.y = 10,
width = 220,
height = 60)
}
# create the map
map.buildMap <- function(forPrinting = FALSE) {
if (forPrinting) {
# only pull in what's actually visible and match view to currently shown map
# since satellite tiles can take a while to load over a slow connection
if (mapCtrl.backdrop() == 'map')
tiles <- 'CartoDB.Positron'
else
tiles <- 'Esri.WorldImagery'
leafletMap <- leaflet(options = leafletOptions(zoomControl = FALSE)) %>%
addProviderTiles(tiles) %>%
fitBounds(lng1 = input$CUmap_bounds$west,
lat1 = input$CUmap_bounds$south,
lng2 = input$CUmap_bounds$east,
lat2 = input$CUmap_bounds$north,
) #%>%
# setView(lng = input$CUmap_center$lng,
# lat = input$CUmap_center$lat,
# zoom = input$CUmap_zoom)
}
else { # building for interactive
leafletMap <- leaflet(options = leafletOptions(zoomSnap = 0.1, zoomDelta = 0.1)) %>%
addProviderTiles('CartoDB.Positron', layerId='map', group='map') %>%
addProviderTiles('Esri.WorldImagery', layerId='satellite', group='satellite')
if(mapCtrl.backdrop() == 'map')
leafletMap <- hideGroup(leafletMap, 'satellite')
else
leafletMap <- hideGroup(leafletMap, 'map')
}
leafletMap <- map.addLogo(leafletMap)
# set up custom z-panes for content; need this to control the order in which map elements are layered
z <- 400 # 400 is the leaflet default overlay pane; start at pane 401 for custom panes
# CU polygons
for (i in 1:length(zPaneOrder)) {
z <- z+1
leafletMap <- addMapPane(leafletMap, name = paste0("CUPolygons.", zPaneOrder[i]), zIndex = z)
}
for (i in 1:length(zPaneOrder)) {
z <- z+1
leafletMap <- addMapPane(leafletMap, name = paste0("selected.CUPolygons.", zPaneOrder[i]), zIndex = z)
}
for (i in 1:length(zPaneOrder)) {
z <- z+1
leafletMap <- addMapPane(leafletMap, name = paste0("mouseover.CUPolygons.", zPaneOrder[i]), zIndex = z)
}
# stream segments
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "streams", zIndex = z)
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "selected.streams", zIndex = z)
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "mouseover.streams", zIndex = z)
# CU markers
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "CUMarkers", zIndex = z)
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "selected.CUMarkers", zIndex = z)
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "mouseover.CUMarkers", zIndex = z)
# Pop markers
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "PopMarkers", zIndex = z)
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "selected.PopMarkers", zIndex = z)
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "mouseover.PopMarkers", zIndex = z)
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "spider.CUMarkers", zIndex = z)
z <- z + 1
leafletMap <- addMapPane(leafletMap, name = "spider.PopMarkers", zIndex = z)
# add the stream segments
leafletMap <- addPolylines(leafletMap,
color=StreamStyle.normal$color,
weight=StreamStyle.normal$weight,
opacity=StreamStyle.normal$opacity,
layerId = ~code,
group = "Streams",
label = ~Name,
data = data.Streams,
options = pathOptions(pane = 'streams'))
# We only want to have the basemap drawn once here, not every time the data filter changes.
# Since the various map display functions use reactive expressions dependent on filtering,
# make sure all calls to these function are isolated here
isolate(leafletMap <- map.showCUMarkers(leafletMap))
isolate(leafletMap <- map.showCUPolys(leafletMap))
isolate(if(!data.currentSelectionEmpty('CUs')) {
CUs <- data.currentSelection[['CUs']]
leafletMap <- map.showCUMarkers(leafletMap, CUs=CUs, styleOpts = CUMarkerStyle.highlighted, layer='selected')
leafletMap <- map.showCUPolys(leafletMap, CUs=CUs, styleOpts = CUPolyStyle.highlighted, layer='selected')
})
isolate(leafletMap <- map.showPopMarkers(leafletMap))
isolate(if(!data.currentSelectionEmpty('Pops')) {
leafletMap <- map.showPopMarkers(leafletMap, pops=data.currentSelection[['Pops']], styleOpts = PopMarkerStyle.highlighted, layer='selected')
})
# hide any groups that aren't supposed to be visible
# need to isolate these to avoid re-rendering of entire map when mapCtrl.isVisible changes
for (group in c('CUMarkers', 'PopMarkers', 'CUPolygons')) {
isolate(if (!mapCtrl.isVisible[[group]]) {
leafletMap <- hideGroup(leafletMap, group)
leafletMap <- hideGroup(leafletMap, paste0('selected.', group))
})
}
isolate(if(!mapCtrl.isVisible[['Streams']])
leafletMap <- hideGroup(leafletMap, 'Streams'))
isolate(colorScheme <- colorCtrl.getColors(colorCtrl.colorScheme()))
leafletMap <- addLegend(leafletMap,
position='bottomleft',
layerId = 'legend',
colors=as.character(colorScheme),
labels=names(colorScheme))
# map controls should not be shown in printed version
if (!forPrinting) leafletMap <- map.addMapControls(leafletMap)
leafletMap
}
output$CUmap <- renderLeaflet({
leafletOutput <- try({map.buildMap()})
if (!inherits(leafletOutput, "try-error")) {
leafletOutput
} else {
NULL
}
})
output$box_LeafletMap <- renderUI({shinycssloaders::withSpinner(leafletOutput("CUmap", height = 500))})
# use this to make changes to the leaflet map without re-rendering the whole thing
CUmapProxy <- leafletProxy('CUmap')
# keeping track of initial view
map.initCenter <- reactiveVal()
map.initZoom <- reactiveVal()
# input$CUmapCreated gets set by a custom event defined in customJSCode.js
# See the comment in customJSCode.js for how to get access to a widget just after
# it has been rendered into the dom, e.g., to modify/add style elements
observeEvent(input$CUmapCreated, {
map.initCenter(input$CUmap_center)
map.initZoom(input$CUmap_zoom)
# javascript custom message handler defined in customJSCode.js
# changes default titles on leaflet draw buttons
session$sendCustomMessage("fixDrawButtonTitles", 'CUmap')
})
# things to do when the Map panel is opened
observeEvent(input$UIPanels, {
if (input$UIPanels == 'Map') clearInfoPane()
})
# ------ Event handlers for EasyButtons ------
observeEvent(input$leaflet_button_zoom_out, {
CUmapProxy %>% setView(lng=map.initCenter()$lng, lat=map.initCenter()$lat, zoom=map.initZoom())
})
observeEvent(input$leaflet_button_layer_visibility, {
showModal(modalDialog(
title = "Layer Visibility",
radioButtons(inputId = 'map_backdrop', label='Backdrop',
choices = c('map', 'satellite'),
selected = mapCtrl.backdrop(),
inline=TRUE),
tags$hr(),
prettyToggle(inputId= 'map_CUMarkers_visible', value = mapCtrl.isVisible[['CUMarkers']],
label_on= 'CU Markers', label_off = 'CU Markers',
icon_on = icon("eye"), icon_off = icon("eye-slash"),
status_on = "primary", status_off = "primary",
outline = TRUE, plain = TRUE),
prettyToggle(inputId= 'map_CUPolygons_visible', value = mapCtrl.isVisible[['CUPolygons']],
label_on= 'CU Boundaries', label_off = 'CU Boundaries',
icon_on = icon("eye"), icon_off = icon("eye-slash"),
status_on = "primary", status_off = "primary",
outline = TRUE, plain = TRUE),
prettyToggle(inputId= 'map_PopMarkers_visible', value = mapCtrl.isVisible[['PopMarkers']],
label_on= 'Sites', label_off = 'Sites',
icon_on = icon("eye"), icon_off = icon("eye-slash"),
status_on = "primary", status_off = "primary",
outline = TRUE, plain = TRUE),
prettyToggle(inputId= 'map_Streams_visible', value = mapCtrl.isVisible[['Streams']],
label_on= 'Streams', label_off = 'Streams',
icon_on = icon("eye"), icon_off = icon("eye-slash"),
status_on = "primary", status_off = "primary",
outline = TRUE, plain = TRUE ),
easyClose = TRUE,
footer = NULL,
size = 's'
))
})
# set the visibility of a group in the map, based on the value of mapCtrl.isVisible[[group]]
map.setVisibility <- function(group) {
if (mapCtrl.isVisible[[group]])
CUmapProxy %>% showGroup(group) %>% showGroup(paste0('selected.', group))
else
CUmapProxy %>% hideGroup(group) %>% hideGroup(paste0('selected.', group))
}
observeEvent(input$map_backdrop, mapCtrl.backdrop(input$map_backdrop))
# observeEvent(mapCtrl.backdrop(), {
# CUmapProxy %>% hideGroup('map') %>% hideGroup('satellite')
# if (mapCtrl.backdrop() == 'map')
# CUmapProxy %>% showGroup('map')
# else
# CUmapProxy %>% showGroup('satellite')
# # map.addLogo(CUmapProxy)
# }, ignoreInit = TRUE)
observeEvent(input$map_CUPolygons_visible, mapCtrl.isVisible[['CUPolygons']] <- input$map_CUPolygons_visible)
observeEvent(mapCtrl.isVisible[['CUPolygons']], map.setVisibility('CUPolygons'))
observeEvent(input$map_CUMarkers_visible, mapCtrl.isVisible[['CUMarkers']] <- input$map_CUMarkers_visible)
observeEvent(mapCtrl.isVisible[['CUMarkers']], map.setVisibility('CUMarkers'))
observeEvent(input$map_PopMarkers_visible, mapCtrl.isVisible[['PopMarkers']] <- input$map_PopMarkers_visible)
observeEvent(mapCtrl.isVisible[['PopMarkers']], map.setVisibility('PopMarkers'))
observeEvent(input$map_Streams_visible, mapCtrl.isVisible[['Streams']] <- input$map_Streams_visible)
observeEvent(mapCtrl.isVisible[['Streams']], map.setVisibility('Streams'))
observeEvent(mapCtrl.isVisible[['PopMarkers']], {
updateCheckboxInput(session, 'sidebarMenu_showPops',
label = 'Show Sites',
value = mapCtrl.isVisible[['PopMarkers']])
})
observeEvent(input$leaflet_button_settings, {
showModal(modalDialog(
checkboxGroupInput(inputId= 'map_showOnCUmarkerHover', label = "On hover over CU marker: ",
choiceNames = c('Highlight CU marker',
'Highlight CU boundaries',
'Highlight sites associated with CU',
'Hide other map elements'),
choiceValues = c('Marker', 'Polygon', 'Pops', 'hideOthers'),
selected = mapCtrl.CUmarkerHoverHighlights(),
inline = FALSE, width = NULL),
checkboxGroupInput(inputId= 'map_showOnCUpolyHover', label = "On hover over CU polygons: ",
choiceNames = c('Highlight CU marker',
'Highlight CU boundaries',
'Highlight sites associated with CU',
'Hide other map elements'),
choiceValues = c('Marker', 'Polygon', 'Pops', 'hideOthers'),
selected = mapCtrl.CUpolyHoverHighlights(),
inline = FALSE, width = NULL),
checkboxGroupInput(inputId= 'map_showOnStreamHover', label = "On hover over stream segments: ",
choiceNames = c('Highlight CU markers of CUs on stream',
'Highlight CU boundaries of CUs on stream',
'Highlight sites on stream',
'Hide other map elements'),
choiceValues = c('Marker', 'Polygon', 'Pops', 'hideOthers'),
selected = mapCtrl.CUstreamHoverHighlights(),
inline = FALSE, width = NULL),
easyClose = TRUE,
footer = NULL,
size = 's'
))
})
observeEvent(input$map_showOnCUmarkerHover, mapCtrl.CUmarkerHoverHighlights(input$map_showOnCUmarkerHover))
observeEvent(input$map_showOnCUpolyHover, mapCtrl.CUpolyHoverHighlights(input$map_showOnCUpolyHover))
observeEvent(input$map_showOnStreamHover, mapCtrl.CUstreamHoverHighlights(input$map_showOnStreamHover))
observeEvent(data.showPops(), {
if (!data.showPops()) { # hide pop markers
mapCtrl.isVisible[['PopMarkers']] <- FALSE
opts <- mapCtrl.CUpolyHoverHighlights()
mapCtrl.CUpolyHoverHighlights(opts[opts != 'Pops'])
opts <- mapCtrl.CUmarkerHoverHighlights()
mapCtrl.CUmarkerHoverHighlights(opts[opts != 'Pops'])
opts <- mapCtrl.CUstreamHoverHighlights()
mapCtrl.CUstreamHoverHighlights(opts[opts != 'Pops'])
}
else { # make populations visible on map
mapCtrl.isVisible[['PopMarkers']] <- TRUE
opts <- mapCtrl.CUpolyHoverHighlights()
mapCtrl.CUpolyHoverHighlights(unique(c(opts, 'Pops')))
opts <- mapCtrl.CUmarkerHoverHighlights()
mapCtrl.CUmarkerHoverHighlights(unique(c(opts, 'Pops')))
opts <- mapCtrl.CUstreamHoverHighlights()
mapCtrl.CUstreamHoverHighlights(unique(c(opts, 'Pops')))
}
})
observeEvent(input$leaflet_button_color_scheme, {
showModal(modalDialog(
selectInput(inputId = 'map_colorScheme', label = 'Color by',
choices = colorCtrl.colorOpts(), selected = colorCtrl.colorScheme(), multiple = FALSE),
easyClose = TRUE,
footer = NULL,
size = 's'
))
})
observeEvent(input$map_colorScheme , {colorCtrl.colorScheme(input$map_colorScheme)})
# -------- Highlighting of map elements ------
map.highlightStream <- function(segCode, type) {
map.showStream(CUmapProxy, segCode, df=data.StreamsExtended,
layer=paste0(type, '.', segCode),
pane=paste0(type, '.streams'),
group=paste0(type, '.Streams'),
styleOpts = StreamStyle.highlighted)
}
# highlight markers of the given type ('CUs' or 'Pops')
map.highlightMarkers <- function(markers, type, highlightLayer='selected', df=NULL) {
if (type == 'CUs')
map.showCUMarkers(CUmapProxy, markers, styleOpts = CUMarkerStyle.highlighted, layer=highlightLayer, df=df)
else if (type == 'Pops')
map.showPopMarkers(CUmapProxy, markers, styleOpts = PopMarkerStyle.highlighted, layer=highlightLayer, df=df)
}
# un-highlight markers of the given type ('CUs' or 'Pops')
map.unhighlightMarkers <- function(markers=NULL, type, highlightLayer='selected') {
if (type == 'CUs') group <- 'CUMarkers' else group <- 'PopMarkers'
if (is.null(markers))
clearGroup(CUmapProxy, paste0(highlightLayer, '.', group))
else
lapply(markers, function(m) {
removeMarker(CUmapProxy, paste0(highlightLayer, '.', m))})
}
# highlight polygons
map.highlightPolygons <- function(polys, highlightLayer='selected') {
map.showCUPolys(CUmapProxy, polys, styleOpts = CUPolyStyle.highlighted, layer=highlightLayer)
}
map.unhighlightPolygons <- function(polys=NULL, highlightLayer='selected') {
if (is.null(polys))
clearGroup(CUmapProxy, paste0(highlightLayer, '.CUPolygons'))
else
lapply(polys, function(p) {removeShape(CUmapProxy, paste0(highlightLayer, '.', p))})
}
# highlight map elements during a mouseover
map.showMouseoverHighlights <- function(CUPolys=NULL, CUMarkers=NULL, PopMarkers=NULL) {
if (!is.null(CUPolys))
map.showCUPolys(CUmapProxy, CUs = CUPolys, styleOpts = CUPolyStyle.mouseover, layer='mouseover')
if (!is.null(CUMarkers))
map.showCUMarkers(CUmapProxy, CUs = CUMarkers, styleOpts = CUMarkerStyle.mouseover, layer='mouseover')
if (!is.null(PopMarkers))
map.showPopMarkers(CUmapProxy, pops = PopMarkers, styleOpts = PopMarkerStyle.mouseover, layer='mouseover')
}
# clear map elements that were highlighted due to a mouseover
# if called w/o parameters, removes all mouseover highlights currently on map
map.clearHighlights <- function(CUPolys=NULL, CUMarkers=NULL, PopMarkers=NULL, Streams=NULL, type='mouseover') {
if (is.null(CUPolys) && is.null(CUMarkers) && is.null(PopMarkers) && is.null(Streams)) { # remove all mouseover highlights
clearGroup(CUmapProxy, paste0(type, '.CUMarkers'))
clearGroup(CUmapProxy, paste0(type, '.PopMarkers'))
clearGroup(CUmapProxy, paste0(type, '.CUPolygons'))
clearGroup(CUmapProxy, paste0(type, '.streams'))
} else { # remove only specified mouseover highlights
for (s in CUPolys) CUmapProxy %>% removeShape(paste0(type, '.', s))
for (m in c(CUMarkers, PopMarkers)) CUmapProxy %>% removeMarker(paste0(type, '.', m))
for (s in Streams) CUmapProxy %>% removeShape(paste0(type, '.', s))
}
}
# ------ Event handlers for Marker events ------
# strip layer information and extract the actual ID of a marker or shape
map.getID <- function(el) {
if (is.null(el)) sel <- ''
else gsub('spider.', '', gsub('selected.', '', gsub('mouseover.', '', el)))}
# show mouseover highlights associated with the selected CUs contained in sel on map
# elementsToHighlight is a list analogous to mapCtrl.CUmarkerHoverHighlights
# that controls what should be highlighted ('Marker', 'Pops', 'Polygon')
map.showCUMouseoverHighlights <- function(sel, elementsToHighlight = mapCtrl.CUmarkerHoverHighlights()) {
if ('Marker' %in% elementsToHighlight) # highlight the marker associated with this CU
map.showMouseoverHighlights(CUMarkers = c(sel))
if ('Pops' %in% elementsToHighlight) # show populations associated with this CU in addition to CU marker
map.showMouseoverHighlights(PopMarkers = data.getAvailablePopsForCUs(sel))
if ('Polygon' %in% elementsToHighlight) # show the boundaries associated with this CU
map.showMouseoverHighlights(CUPolys = c(sel))
}
map.showPopMouseoverHighlights <- function(sel) {
map.showMouseoverHighlights(PopMarkers=c(sel))
}
# ------------- mouseover events -----------------
# things that should occur when the user moves the mouse over a marker
observeEvent(input$CUmap_marker_mouseover,
{ # mouseover events aren't always detected, so if there are residual highlighted markers around,
# they will be on top and therefore block access to the actual marker underneath
# Get rid of any residual mouseover highlights here before continuing
# cat("Marker mouseover event observed for ", input$CUmap_marker_mouseover$id, "\n")
map.clearHighlights(type='mouseover')
sel <- map.getID(input$CUmap_marker_mouseover$id)
InfoPane <- NULL
if (sel %in% data.CUs) { # mouse is over a CU marker
map.showCUMouseoverHighlights(sel, mapCtrl.CUmarkerHoverHighlights())
#InfoPane <- map.makeCUPopup(sel)
} else if (sel %in% data.Pops) { # mouse is over a Pop marker
map.showPopMouseoverHighlights(sel)
#InfoPane <- map.makePopInfoPane(sel)
} else {
#InfoPane <- tags$div(style='padding: 5px;', paste0('unknown marker type: ', sel))
}
if (!is.null(InfoPane)) showInfoPane(InfoPane)
})
# things that should occur when the user moves the mouse away from a marker
observeEvent(input$CUmap_marker_mouseout,
{
#cat("Marker mouseout event observed for ", input$CUmap_marker_mouseover$id, "\n")
map.clearHighlights(type='mouseover')
})
# things that should occur when the user moves the mouse over a shape (i.e., a CU polygon or a stream segment)
observeEvent(input$CUmap_shape_mouseover,
{# mouseover events aren't always detected, so if there are residual highlighted shapes around,
# they will be on top and therefore block access to the shape underneath
# get rid of any residual mouseover highlights before proceeding
#cat("Shape mouseover event observed for ", input$CUmap_shape_mouseover$id, "\n")
map.clearHighlights(type='mouseover')
sel <- map.getID(input$CUmap_shape_mouseover$id)
InfoPane <- NULL
if (sel %in% data.CUs) { # user hovering over a CU polygon
map.showCUMouseoverHighlights(sel, mapCtrl.CUpolyHoverHighlights())
# InfoPane <- map.makeCUPopup(sel)
} else if (sel %in% data.Watersheds) { # user hovering over a stream segment
# special treatment of Pops here: if user wants to see populations, they should only be the ones associated with
# the stream segment, not all the ones associated with the CUs that are associated with the stream segment
elementsToHighlight <- mapCtrl.CUstreamHoverHighlights()
map.showCUMouseoverHighlights(map.getCUsFromStreamSeg(sel), elementsToHighlight[elementsToHighlight != 'Pops'])
if ('Pops' %in% elementsToHighlight)
map.showPopMouseoverHighlights(map.getPopsFromStreamSeg(sel))
# InfoPane <- map.makeStreamInfoPane(sel)
# map.highlightStream(sel, 'mouseover')
} else {
# InfoPane <- tags$div(style='padding: 5px;', paste0('unknown shape type: ', sel))
}
if (!is.null(InfoPane)) showInfoPane(InfoPane)
})
# things that should occur when the user moves the mouse away from a shape
observeEvent(input$CUmap_shape_mouseout,
{
#cat("Shape mouseout event observed for " , input$CUmap_shape_mouseout$id, '\n')
map.clearHighlights(type='mouseover')
})
# ------------- click events -----------------
# keep track of last click location. Need to do this since shiny fires a map click event not just for background clicks,
# but also for shape and marker clicks
lastMarkerClick <- reactiveValues(lat = 0, lng = 0)
# things that should occur when a marker is clicked on
observeEvent(input$CUmap_marker_click,
{
sel <- map.getID(input$CUmap_marker_click$id)
#cat("Marker click event observed for ", sel, "\n")
#print('event info')
#str(input$CUmap_marker_click)
if (sel %in% data.CUs) markerType <- 'CUs' else markerType <- 'Pops'
spiderMode <- startsWith(input$CUmap_marker_click$id, 'spider')
if (spiderMode)
markersAtClickLocation <- c(sel)
else
markersAtClickLocation <- map.getOverlapping(sel, markerType)
InfoPane <- map.makeMarkerInfoPane(markersAtClickLocation, markerType)
if(length(markersAtClickLocation) == 1) { # select/unselect marker
if (markerType == 'CUs') { # select both CU and associated populations
map.addToSelection(sel, 'CUs')
# don't toggle pops here; selection of pops follows selection of CU, i.e., all on if CU is on, all off if CU is off
if (sel %in% data.currentSelection[['CUs']])
data.addToSelection(data.getAvailablePopsForCUs(sel), 'Pops', 'map')
else
data.removeFromSelection(getPopsForCUs(sel), 'Pops', 'map')
} else if (markerType == 'Pops') {
map.addToSelection(sel, 'Pops')
}
if (spiderMode) { # highlight/unhighlight spiderfied marker to reflect selection
if (data.isSelected(sel, markerType)) {
map.showMarkers(CUmapProxy, items=sel, type=markerType, layer='spider', style='highlighted', df=spiderFlyMarkers())
}
else{
map.showMarkers(CUmapProxy, items=sel, type=markerType, layer='spider', style='normal', df=spiderFlyMarkers())
}
}
} else { # spiderfy overlapping markers, hide others
map.hideMarkers()
# show spiderFly
map.showMarkers(CUmapProxy, items=markersAtClickLocation, type=markerType, layer='spider', spiderFly=TRUE)
# if any of the markers within the spider are selected, highlight them now
highlighted <- markersAtClickLocation %in% data.currentSelection[[markerType]]
if (any(highlighted)) {
map.showMarkers(CUmapProxy, items=markersAtClickLocation[highlighted], type=markerType, layer='spider', style='highlighted', df=spiderFlyMarkers())
}
}
if (!is.null(InfoPane)) showInfoPane(InfoPane)
lastMarkerClick$lat <- input$CUmap_marker_click$lat
lastMarkerClick$lng <- input$CUmap_marker_click$lng
})
highlightedStreams <- reactiveVal(c())
# things that should occur when a shape (line or polygon) is clicked
observeEvent(input$CUmap_shape_click,
{
map.clearSpiderFly()
sel <- map.getID(input$CUmap_shape_click$id)
#cat("shape click event observed for shape ", sel, '\n')
#print('event info')
#str(input$CUmap_shape_click)
InfoPane <- NULL
if (sel %in% data.Watersheds) { # user clicked on a stream segment
#InfoPane <- map.makeStreamInfoPane(sel)
CUs <- map.getCUsFromStreamSeg(sel)
pops <- map.getPopsFromStreamSeg(sel)
if (all(CUs %in% data.currentSelection[['CUs']]) && all(pops %in% data.currentSelection[['Pops']])) {
data.removeFromSelection(CUs, 'CUs', 'map')
data.removeFromSelection(pops, 'Pops', 'map')
} else {
data.addToSelection(CUs, 'CUs', 'map')
data.addToSelection(pops, 'Pops', 'map')
}
if (any(CUs %in% data.currentSelection[['CUs']]) || any(pops %in% data.currentSelection[['Pops']])) {
if (!(sel %in% highlightedStreams()))
highlightedStreams(c(highlightedStreams(), sel))
map.highlightStream(sel, 'selected')
}
}
if (!is.null(InfoPane)) showInfoPane(InfoPane)
lastMarkerClick$lat <- input$CUmap_shape_click$lat
lastMarkerClick$lng <- input$CUmap_shape_click$lng
})
# things that should occur when the map background is clicked
observeEvent(input$CUmap_click,
{
#print('map click event observed')
#print('event info')
#str(input$CUmap_click)
if (!(lastMarkerClick$lat == input$CUmap_click$lat && lastMarkerClick$lng == input$CUmap_click$lng )) {
map.clearSpiderFly()
lastMarkerClick$lat <- input$CUmap_click$lat
lastMarkerClick$lng <- input$CUmap_click$lng
}
})
observeEvent({data.currentSelection[['CUs']]
data.currentSelection[['Pops']]}, {
stillHighlighted <- unlist(lapply(highlightedStreams(), function(s) {
CUs <- map.getCUsFromStreamSeg(s)
pops <- map.getPopsFromStreamSeg(s)
if (any(CUs %in% data.currentSelection[['CUs']]) ||
(data.showPops() && any(pops %in% data.currentSelection[['Pops']]))) {
s
}
else {
map.clearHighlights(Streams=c(s), type='selected')
NULL
}
}))
highlightedStreams(stillHighlighted)
}, ignoreNULL = FALSE)
# -------- Selection of CUs and populations ----------
# update selection shown on map; type is the type of data items affected (CU or Pop)
map.updateSelection <- function(type) {
#print(data.currentSelection[[type]])
map.unhighlightMarkers(type=type)
if (!is.null(data.currentSelection[[type]])) {
map.highlightMarkers(data.currentSelection[[type]], type)
}
if (type == 'CUs') {
map.unhighlightPolygons()
if (!is.null(data.currentSelection[[type]]))
map.highlightPolygons(data.currentSelection[[type]])
}
}
observeEvent(data.currentSelection[['CUs']], {map.updateSelection('CUs')}, ignoreNULL = F)
observeEvent(data.currentSelection[['Pops']], {map.updateSelection('Pops')}, ignoreNULL = F)
# add sel (an vector of ids) to current selection; type is the type of data items (CU or Pop)
map.addToSelection <- function(sel, type) {
if (length(sel) > 0) {
alreadySelected <- sel[sel %in% data.currentSelection[[type]]]
if (setequal(sel, alreadySelected)) { # all markers in this list are already selected; toggle to unselect all
data.removeFromSelection(sel, type=type, widget="map")
# map.unhighlightMarkers(sel, type)
# if (type == 'CUs')
# map.unhighlightPolygons(sel)
} else { # at least some markers in this list were not already selected; select all
data.addToSelection(sel, type=type, widget="map")
# map.highlightMarkers(sel, type)
# if (type == 'CUs')
# map.highlightPolygons(sel)
}
}
}
# leaflet projection
proj <- CRS("+proj=longlat +datum=WGS84")
# create a SpatialPolygons object from the coordinate list returned by leafletDraw events
map.makeSPpoly <- function(geomList, ID) {
geom <- t(matrix(unlist(geomList), nrow=2)) # convert from list to matrix
SpatialPolygons(c(Polygons(c(Polygon(coords=geom, hole=F)), ID=as.character(ID))), proj4string = proj)
}
# given a SpatialPoints object and a SpatialPolygons object, identify which of the points are inside the polygon(s)
map.ptsInsidePoly <- function(pts, poly) {
sel <- over(pts, poly)
# over returns the index of the polygon each point is contained in, or NA for points not inside any of the polygons
# convert this into a vector of booleans
sel <- ifelse(is.na(sel), FALSE, TRUE)
}
# Event handler to deal with selection polygon (rectangle or polygon shape drawn by user)
# Gets called when the user finishes drawing the feature
observeEvent(input$CUmap_draw_new_feature, {
id <- input$CUmap_draw_new_feature$properties$`_leaflet_id`
geomList <- input$CUmap_draw_new_feature$geometry$coordinates[[1]]
selPoly <- map.makeSPpoly(geomList, id)
df <- unique(data.CU.Lookup.filtered()[ , c("CU_ID", "Lat", "Lon")])
pts <- SpatialPoints(data.frame(lng=df$Lon, lat=df$Lat), proj4string = proj)
CUs <- df$CU_ID[map.ptsInsidePoly(pts, selPoly)]
map.addToSelection(CUs, 'CUs')
if (data.showPops()) {
df <- data.Pop.Lookup.filtered()[ , c("Pop_UID", "Lat", "Lon")]
pts <- SpatialPoints(data.frame(lng=df$Lon, lat=df$Lat), proj4string = proj)
pops <- df$Pop_UID[map.ptsInsidePoly(pts, selPoly)]
map.addToSelection(pops, 'Pops')
}
# Remove selection polygon from map
# Note that removeShape() won't work for shapes drawn with leafletDraw
# Need to use custom js handler defined in www/customJSCode.js instead
session$sendCustomMessage("removeSelectionPolygon", list(elid="CUmap", layerid=id))
})
# ------ map updating on changes to filter or other changes to global settings -------
# add dynamic map elements when filter changes;
# don't render entire map again, since rendering of stream network takes a while
observeEvent({data.CU.Lookup.filtered()
data.Pop.Lookup.filtered()
data.filtered()
colorCtrl.colorScheme()
data.showPops()}, {
CUmapProxy %>% clearGroup('CUMarkers') %>% clearGroup('CUPolygons') %>% clearGroup('PopMarkers')
CUmapProxy %>% clearGroup('selected.CUMarkers') %>% clearGroup('selected.CUPolygons') %>% clearGroup('selected.PopMarkers')
map.showCUMarkers(CUmapProxy)
map.showCUPolys(CUmapProxy)
if (!data.currentSelectionEmpty('CUs')) {
map.highlightMarkers(data.currentSelection[['CUs']], 'CUs')
map.highlightPolygons(data.currentSelection[['CUs']])
}
map.showPopMarkers(CUmapProxy)
if (!data.currentSelectionEmpty('Pops')) {
map.highlightMarkers(data.currentSelection[['Pops']], 'Pops')
}
colorScheme <- colorCtrl.getColors(colorCtrl.colorScheme())
addLegend(CUmapProxy,
position='bottomleft',
layerId = 'legend',
colors=as.character(colorScheme),
labels=names(colorScheme))
}, ignoreInit=T)
# ------ map screenshot -------
observeEvent(input$leaflet_button_snapshot, {
# check for presence of PhantomJS. If it's not there, try installing it now ...
if (!webshot::is_phantomjs_installed()) webshot::install_phantomjs()
# check again ...
if (!webshot::is_phantomjs_installed()) {
showModal(modalDialog(
"Using this functionality requires PhantomJS, which doesn't appear to be installed on this system.",
"Please use your computer's screen capture capabilities instead.",
footer = NULL,
easyClose = TRUE))
}
else { # good to go
# If mapshot is run without the 'selfcontained' flag, it doesn't seem to respect the viewport
# However, running it in selfcontained mode only works when the app is run locally.
# On shinyapps.io, this creates an 'out of memory' error, so need to catch this here ...
isRunningLocal <- ifelse(session$clientData$url_hostname == "127.0.0.1", T, F)
showModal(modalDialog(
'Use the download button below to create a print-quality map without control widgets. ',
'Depending on your browser, you may experience a delay before the download starts or a save-as dialog window appears.',
'Please be patient ...',
tags$hr(),
downloadButton('mapDownload', label = "Download"),
footer = NULL,
easyClose = TRUE))
width <- input$window_size[1] - 292 # deduct width of sidebar and various margins
output$mapDownload <- downloadHandler(
filename = paste0( "SoS_map_", Sys.Date(), ".png"),
content = function(file) { mapview::mapshot( x = map.buildMap(forPrinting = TRUE),
file = file,
vheight = 500,
vwidth = width,
#debug = TRUE,
selfcontained = isRunningLocal
)})
}
})
|
75f06d7aa46bca7547d6457e3337d175d9bd4f69 | de4acdac5c354f7cd15168f85e3e7b3bae4571ab | /R/utility.R | e77ebdcdc43d59ac18b48e83f4c1bebd44eb67d2 | [] | no_license | hyanworkspace/rangerts | 00f0027610ff9286f1190a83d0c843eec415730c | d0dd2806fbe2d32775414c22f1685a81ae65818c | refs/heads/master | 2021-12-15T21:02:07.729899 | 2021-12-06T17:39:06 | 2021-12-06T17:39:06 | 235,093,218 | 5 | 3 | null | 2021-12-06T17:39:07 | 2020-01-20T12:10:09 | C++ | UTF-8 | R | false | false | 2,779 | r | utility.R | # -------------------------------------------------------------------------------
# This file is part of rangerts.
#
# rangerts is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# -------------------------------------------------------------------------------
# Convert integer to factor
integer.to.factor <- function(x, labels) {
factor(x, levels = seq_along(labels), labels = labels)
}
# Save version of sample() for length(x) == 1
# See help(sample)
save.sample <- function(x, ...) {
x[sample.int(length(x), ...)]
}
# Order factor levels with PCA approach
# Reference: Coppersmith, D., Hong, S.J. & Hosking, J.R. (1999) Partitioning Nominal Attributes in Decision Trees. Data Min Knowl Discov 3:197. \url{https://doi.org/10.1023/A:1009869804967}.
pca.order <- function(y, x) {
x <- droplevels(x)
if (nlevels(x) < 2) {
return(as.character(levels(x)))
}
## Create contingency table of the nominal outcome with the nominal covariate
N <- table(x, droplevels(y))
## PCA of weighted covariance matrix of class probabilites
P <- N/rowSums(N)
S <- cov.wt(P, wt = rowSums(N))$cov
pc1 <- prcomp(S, rank. = 1)$rotation
score <- P %*% pc1
## Return ordered factor levels
as.character(levels(x)[order(score)])
}
# Compute median survival if available or largest quantile available in all strata if median not available.
largest.quantile <- function(formula) {
## Fit survival model
fit <- survival::survfit(formula)
smry <- summary(fit)
## Use median survival if available or largest quantile available in all strata if median not available
max_quant <- max(aggregate(smry$surv ~ smry$strata, FUN = min)[, "smry$surv"])
quantiles <- quantile(fit, conf.int = FALSE, prob = min(0.5, 1 - max_quant))[, 1]
names(quantiles) <- gsub(".+=", "", names(quantiles))
## Return ordered levels
names(sort(quantiles))
}
# Convert rangerts object from version <0.11.5 (without x/y interface)
convert.pre.xy <- function(forest, trees = 1:forest$num.trees) {
if (is.null(forest$status.varID)) {
# Not survival
for (i in 1:forest$num.trees) {
idx <- forest$split.varIDs[[i]] > forest$dependent.varID
forest$split.varIDs[[i]][idx] <- forest$split.varIDs[[i]][idx] - 1
}
} else {
# Survival
for (i in 1:forest$num.trees) {
idx1 <- forest$split.varIDs[[i]] > forest$dependent.varID
idx2 <- forest$split.varIDs[[i]] > forest$status.varID
forest$split.varIDs[[i]][idx1] <- forest$split.varIDs[[i]][idx1] - 1
forest$split.varIDs[[i]][idx2] <- forest$split.varIDs[[i]][idx2] - 1
}
}
return(forest)
}
|
dfd44d1a5823805a906732a317d119b9da6f2332 | 2b7696de761986e7c295da36201f06fca701f059 | /man/hs4_sitc4.Rd | 2734394551570748958a4aa930dc630906d410b8 | [] | no_license | cran/concordance | 130b5cadccfce9cc5ef98432fc2f938c75eebd93 | b8d1e592399f05941ce24a4afd96007b8dae0ec5 | refs/heads/master | 2021-05-04T11:23:30.586684 | 2020-04-24T15:10:08 | 2020-04-24T15:10:08 | 49,413,285 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 746 | rd | hs4_sitc4.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{hs4_sitc4}
\alias{hs4_sitc4}
\title{HS4-SITC4 Concordance}
\format{
A data frame with 5205 rows and 8 variables:
\describe{
\item{HS4_6d}{6-digit HS4 Code}
\item{HS4_4d}{4-digit HS4 Code}
\item{HS4_2d}{2-digit HS4 Code}
\item{SITC4_5d}{5-digit SITC4 Code}
\item{SITC4_4d}{4-digit SITC4 Code}
\item{SITC4_3d}{3-digit SITC4 Code}
\item{SITC4_2d}{2-digit SITC4 Code}
\item{SITC4_1d}{1-digit SITC4 Code}
}
}
\source{
\url{https://unstats.un.org/unsd/trade/classifications/correspondence-tables.asp}
}
\usage{
hs4_sitc4
}
\description{
A dataset containing concordances between HS4 and SITC4 classification.
}
\keyword{datasets}
|
887e007b0039e9a6265fa241eeeb9a1508f02780 | 0491dccb3c67f76ff36f648b6689183d995b33c7 | /man/autoxgboost_space.Rd | 66ae365a9cf46a25d27742ced8630404d50de5c8 | [] | no_license | mb706/autoxgboost3 | 104b113f54da8eac3e31c88d0be6ab9acccd3dce | 16faaf2b7dda6300f090aefe55ded394c980a984 | refs/heads/master | 2020-08-09T19:40:16.126360 | 2020-04-08T10:32:15 | 2020-04-08T10:32:15 | 214,157,414 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,352 | rd | autoxgboost_space.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoxgboost.R
\name{autoxgboost_space}
\alias{autoxgboost_space}
\title{Get an xgboost Search Space and Learner}
\usage{
autoxgboost_space(
task,
predict.type = "response",
max.nrounds = 1e+06,
early.stopping.rounds = 10,
early.stopping.fraction = 4/5,
impact.encoding.boundary = 10,
nthread = NULL,
tune.threshold = TRUE,
emulate_exactly = TRUE
)
}
\arguments{
\item{task\cr}{The task.}
\item{predict.type\cr}{Predict type. One of \dQuote{prob}, \dQuote{response} (default). Only \dQuote{response} supported for regression tasks.}
\item{max.nrounds\cr}{Maximum number of allowed boosting iterations. Default is \code{10^6}.}
\item{early.stopping.rounds\cr}{After how many iterations without an improvement in the boosting OOB error should be stopped?
Default is \code{10}.}
\item{early.stopping.fraction\cr}{What fraction of the data should be used for early stopping (i.e. as a training set). This is apparently just the holdout split for optimization.
Default is \code{4/5}.}
\item{impact.encoding.boundary\cr}{Defines the threshold on how factor variables are handled. Factors with more levels than the \code{"impact.encoding.boundary"} get impact encoded while factor variables with less or equal levels than the \code{"impact.encoding.boundary"} get dummy encoded.
For \code{impact.encoding.boundary = 0L}, all factor variables get impact encoded while for \code{impact.encoding.boundary = .Machine$integer.max}, all of them get dummy encoded.
Default is \code{10}.}
\item{nthread\cr}{Number of cores to use.
If \code{NULL} (default), xgboost will determine internally how many cores to use.}
\item{tune.threshold\cr}{Should thresholds be tuned? This has only an effect for classification, see \code{\link[mlr]{tuneThreshold}}.
Default is \code{TRUE}. Only \code{FALSE} is supported currently; if this is \code{TRUE} an error is thrown.}
\item{emulate_exactly\cr}{Whether to emulate autoxgboost behaviour for impact.encoding.boundary. Autoxgboost applies the boundary to the *whole* task (behaviour if emulate_exactly==TRUE,
while a more exact approach would be to apply it only to the training set (emulate_exactly==FALSE).}
}
\value{
\code{something}.
}
\description{
An xgboost Learner and ParamSet is created that emulates autoxgboost.
}
|
86a20a9aab54a7494d4e2fc5a062358bdae26cbb | 3affbc5578f00b4c432152dd5ddb6cc83cf96b1b | /R/query_endpoint.R | ce11711368f7fa142cb3f0a5f07cd47811129f5e | [] | no_license | INyabuto/dhis2r | 1ab080cb71f646d692fe5410466f2e6aaeb5bdaf | c973a2bc1d85c431a8c7f3fb2349fc1325e55817 | refs/heads/master | 2021-06-25T04:19:06.983910 | 2021-06-13T14:29:31 | 2021-06-13T14:29:31 | 208,319,417 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,814 | r | query_endpoint.R | #' Parse DHIS2 endpoint fields
#'
#' This function parses DHIS2 endpoint fields into a character string.
#'
#' @param ... DHIS2 query parameters.
#' @return A character string.
#'
#' @export
#' @examples
#' # select all the fields
#' api_fields("*")
#' # select only name, id, shortName and code
#' api_fields("name", "id", "shortName", "code")
#' # select fields in a collection
#' api_fields("name", organisationUnits = c("name", "id", "code"))
api_fields <- function(...) {
fields <- list(...)
named_fields <- fields[is_named(fields)]
unnamed_fields <- fields[!is_named(fields)]
parsed_named_fields <- purrr::map(named_fields, commas)
parsed_named_fields <- purrr::imap(parsed_named_fields, function(.x, .y){
paste0(.y, "[", .x, "]")
})
if (length(parsed_named_fields) > 0 && length(unnamed_fields) > 0) {
paste(
paste0(fields[!is_named(fields)], collapse = ","),
paste0(parsed_named_fields, collapse = ","),
sep = ","
) -> api_field
} else if (length(parsed_named_fields) > 0 && length(unnamed_fields) == 0) {
paste0(parsed_named_fields, collapse = ",") -> api_field
}
else {
paste0(fields[!is_named(fields)], collapse = ",") -> api_field
}
parse_presets(api_field)
}
parse_presets <- function(field = NULL) {
if (!is.null(field)) {
to_parse <- field
to_parse <- gsub("all", ":all", to_parse, fixed = T)
to_parse <- gsub("nameable", ":nameable", to_parse, fixed = T)
to_parse <- gsub("identifiable", ":identifiable", to_parse, fixed = T)
to_parse <- gsub("persisted", ":persisted", to_parse, fixed = T)
to_parse <- gsub("owner", ":owner", to_parse, fixed = T)
to_parse
}
}
#' Parse DHIS2 filter
#'
#' Short hand for specifying an filter parameter in an \code{\link{api_query}}.
#'
#' @param x An endpoint field.
#' @param filter A DHIS2 filter object.
#' @param obj Objects to filter.
#' @export
#' @examples
#' api_filter("name", "eq", "TZ FP A360 - Conversion rate < 20")
#' api_filter("name", "ilike", "TZ FP A360 - Conversion rate < 20")
#' @name api_filter
api_filter <- function(x, filter, obj) {
paste0(x, ":", filter, ":", obj)
}
#' Parse DHIS2 order
#'
#' Short hand for specifying an order parameter in an \code{\link{api_query}}.
#'
#' @param x An endpoint field.
#' @param by Type of ordering. Use `asc` to order in ascending order, `desc` in
#' descending order. The variants `iasc` and `idesc` are case insensitive.
#' @export
#' @examples
#' # order name in ascending order
#' api_order("name", by = "asc")
#' # order name in descending order
#' api_order("name", by = "desc")
#' # example in a query to order by ascending order
#' api_query(order = api_order("name", "asc"))
#' # order data elements in descending order
#' modify_endpoint(endpoint = "dataElements", order = api_order("name", "desc"))
api_order <- function(x, by = c("asc", "iasc", "desc", "idesc")) {
paste0(x, ":", match.arg(by))
}
#' Parse DHIS2 endpoint query
#'
#' This function parses DHIS2 endpoint queries into a character string.
#'
#' @param ... DHIS2 query parameters.
#' @return A character string.
#' @name api_query
#' @examples
#' api_query(fields = "name", totalPages = TRUE)
#' api_query(fields = api_fields("name", "id", organisationUnit = c("name")))
#' @export
api_query <- function(...) {
args <- list(...)
check_params(args)
query <- args[is_named(args)]
# parse the query with multiple options as comma separated
query_fields <- purrr::map(query, commas)
query <- purrr::imap(query_fields, ~ paste0(.y, "=", .x))
if (!missing(...)) {
query <- paste0("?", paste0(query, collapse = "&"))
} else {
query <- NULL
}
query
}
check_params <- function(params) {
if (any(!is_named(params))) {
stop("All elements of `...` must be named")
}
}
|
63434f258072fbce34923dc10a172b66331642f5 | af4ec6a193335e8bd4361de465f775f7ff3a398b | /yscripts/R_scriptsb/VoCCPrioritizr_04b_RunPrioritizr.R | 8244978010d6d419263bcc4acc76f5307f9f5787 | [] | no_license | IsaakBM/VoCC_Prioritizr_global | 6cc6ff32054546a736f17d718e830916816d5726 | eedac4db9af04a38c8feed8e72c0bf2821024c58 | refs/heads/master | 2023-04-16T19:27:01.452057 | 2022-04-04T19:27:16 | 2022-04-04T19:27:16 | 271,407,011 | 10 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,474 | r | VoCCPrioritizr_04b_RunPrioritizr.R | # This code was written by Isaac Brito-Morales (i.britomorales@uq.edu.au)
# Please do not distribute this code without permission.
# NO GUARANTEES THAT CODE IS CORRECT
# Caveat Emptor!
pzr_function <- function(path, outdir, cost, blm_df, sol) {
library(raster)
library(sf)
library(data.table)
library(dplyr)
library(prioritizr)
library(gurobi)
library(spatstat)
library(reldist)
library(doParallel)
library(foreach)
# Reading features raster files (AquaMaps | Trajectory classes)
dir.layers <- paste(list.dirs(path = path, full.names = TRUE, recursive = FALSE), sep = "/")
# Begin the parallel structure
ncores <- 5
cl <- makeCluster(ncores)
registerDoParallel(cl)
problem_list <- list()
problems <- foreach(kk = 1:length(dir.layers), .packages = c("prioritizr", "gurobi", "dplyr", "reldist")) %dopar% {
# Identifying files per directories
file_pu <- paste(dir.layers[kk], list.files(path = paste(dir.layers[kk], sep = "/"), pattern = "*pu_.*.dat$"), sep = "/")
file_features <- paste(dir.layers[kk], list.files(path = paste(dir.layers[kk], sep = "/"), pattern = "*spec_.*.dat$"), sep = "/")
file_bound <- paste(dir.layers[kk], list.files(path = paste(dir.layers[kk], sep = "/"), pattern = "*bound.*._.*.dat$"), sep = "/")
file_rij <- paste(dir.layers[kk], list.files(path = paste(dir.layers[kk], sep = "/"), pattern = "*puvsp_0.*.dat$"), sep = "/")
# Reading files per directories
pu <- read.table(file_pu, sep = ",", header = TRUE) %>%
mutate(cost = ifelse(is.na(cost), 0, cost)) %>%
mutate(status = ifelse(status == 3, 0, status))
if(cost == "calibration") {pu$cost <- 0}
if(cost == "area") {pu$cost <- 2619726846}
features <- read.table(file_features, sep = ",", header = TRUE)
bound <- read.table(file_bound, sep = ",", header = TRUE)
rij <- read.table(file_rij, sep = ",", header = TRUE)
# Reading BLM file
blmDF <- read.csv(blm_df)
# Establish the Problem
mp1 <- marxan_problem(x = pu, spec = features, puvspr = rij, bound = bound, blm = blmDF[kk, 2]) %>%
add_gap_portfolio(number_solutions = sol, pool_gap = 0)
# Solve the problem
mp3_solution <- mp1 %>%
add_gurobi_solver(gap = 0, presolve = 2, time_limit = 10800, threads = 2, first_feasible = FALSE) %>%
solve(force = TRUE)
# Write the object
mp3_final <- list(mp1, mp3_solution)
ns <- basename(dir.layers[kk])
saveRDS(mp3_final, paste(outdir, paste(ns, blmDF[kk, 2], sol, sep = "_"), ".rds", sep = ""))
}
stopCluster(cl)
}
system.time(pzr_function(path = "/scratch/user/uqibrito/Project04c/Prioritisation/PrioritizrFiles/features_10100",
outdir = "/scratch/user/uqibrito/Project04c/Prioritisation/PrioritizrSolutionsCost/features_10100/",
cost = "general",
blm_df = "/scratch/user/uqibrito/Project04c/Prioritisation/PrioritizrFiles/BLM_0.csv",
sol = 1))
system.time(pzr_function(path = "Prioritisation/PrioritizrFiles/features_10100",
outdir = "Prioritisation/PrioritizrSolutionsNCost/",
cost = "calibration",
blm_df = "Prioritisation/PrioritizrFiles/BLM_0.csv",
sol = 1))
|
cb85a7047b7c25f2c6017dea6d88d4fb6ecd2ba9 | 5d9b5489a4d130398f2ce46524b940cfff04dd3f | /man/sv_required.Rd | 6d77e94fa45cab4447ff59dd9aaea4c54b9b90f6 | [
"MIT"
] | permissive | SamanthaToet/shinyvalidate | 7677acd3f62ba5df30dca8373b8b84a3fe842698 | 2ed1b47657c5d3ebedfca247d92a0d731b02a83d | refs/heads/master | 2023-06-07T03:57:53.286677 | 2021-06-29T16:43:19 | 2021-06-29T16:43:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,446 | rd | sv_required.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rules.R
\name{sv_required}
\alias{sv_required}
\title{Validate that the field is present}
\usage{
sv_required(message = "Required", test = input_provided)
}
\arguments{
\item{message}{The validation error message to be displayed if the test does
not pass.}
\item{test}{A single-argument function, or single-sided formula (using \code{.} to
access the value to test), that returns \code{TRUE} for success and \code{FALSE} for
failure.}
}
\value{
A function suitable for use as an
\code{\link[=InputValidator]{InputValidator$add_rule()}} rule.
}
\description{
Call \code{sv_required()} to generate a validation function that ensures an input
value is present. By default, the definition of "is present" is based on
\code{\link[=input_provided]{input_provided()}}.
}
\examples{
## Only run examples in interactive R sessions
if (interactive()) {
library(shiny)
library(shinyvalidate)
ui <- fluidPage(
textInput("name", "Name")
)
server <- function(input, output, session) {
# Validation rules are set in the server, start by
# making a new instance of an `InputValidator()`
iv <- InputValidator$new()
# Basic usage: ensure that `input$name` is present,
# and return a terse validation message if not
iv$add_rule("name", sv_required())
# Finally, `enable()` the validation rules
iv$enable()
}
shinyApp(ui, server)
}
# There are some alternatives to the above example,
# and the following snippets can serve to replace
# the `iv$add_rule(...)` statement
# (1) Providing a custom message to display
# when validation fails:
# iv$add_rule("email", sv_required("An email is required"))
# (2) Providing a `test` argument to change
# the definition of "is present"; in this
# snippet, any non-NULL value will be accepted:
# iv$add_rule("choices", sv_required(test = is.null))
}
\seealso{
The \code{\link[=sv_optional]{sv_optional()}} function, which takes a different approach to
field presence.
Other rule functions:
\code{\link{compose_rules}()},
\code{\link{sv_between}()},
\code{\link{sv_email}()},
\code{\link{sv_equal}()},
\code{\link{sv_gte}()},
\code{\link{sv_gt}()},
\code{\link{sv_in_set}()},
\code{\link{sv_integer}()},
\code{\link{sv_lte}()},
\code{\link{sv_lt}()},
\code{\link{sv_not_equal}()},
\code{\link{sv_numeric}()},
\code{\link{sv_optional}()},
\code{\link{sv_regex}()},
\code{\link{sv_url}()}
}
\concept{rule functions}
|
d5f0bb50328b0038899f16b86d896c18641e3600 | 13db5b908ee53411fca6a45c0a7d93de9cc4e3a9 | /R/combine_rttm.R | 57822d2232a5bf9181376d7f4934316f0e25e2bc | [] | no_license | LAAC-LSCP/avutils | 67cb1d7cd41bd7bafa03cdfeec677f7951bd2f66 | 3fa75f5f7447df5a5534486cc6c19428f9509dab | refs/heads/master | 2022-07-18T05:47:25.732313 | 2020-05-05T15:50:11 | 2020-05-05T15:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 777 | r | combine_rttm.R | #' merge rttm file sequence
#'
#' @param rttm_files character, paths to rttm files to merge
#' @param split_dur numeric, the duration of the underlying split audio
#' @param basename character, the name of the file (unmerged)
#'
#' @return a data.frame that can be written as rttm file
#' @export
#'
combine_rttm <- function(rttm_files, split_dur, basename = NULL) {
res <- matrix(ncol = 10, nrow = 0)
for (i in 1:length(rttm_files)) {
if (length(readLines(rttm_files[i])) > 0) {
temp <- read.table(rttm_files[i],
header = FALSE,
stringsAsFactors = FALSE)
temp$V4 <- temp$V4 + split_dur * (i - 1)
res <- rbind(res, temp)
rm(temp)
}
}
if (!is.null(basename)) res$V2 <- basename
res
}
|
7b17e949b8344d5bf0691d8849ef65e7045e4254 | 0f9d44b238c97d9df67a59cb9071ecc667d60abf | /R/print.summary.dbplsr.r | 30e42895f7afa3bd443a7909b433bd7f746ca91d | [] | no_license | cran/dbstats | 4ec407eb23d6eca92323a3755b7e2fe0de279aa5 | af4e78823b3e83652cf5c45127dfec1df1cc0c43 | refs/heads/master | 2022-12-10T13:26:37.539231 | 2022-12-07T14:40:06 | 2022-12-07T14:40:06 | 17,695,407 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,201 | r | print.summary.dbplsr.r | print.summary.dbplsr <- function(x,digits=4,...){
# print the call
cat("\ncall: ")
x$call[[1]]<-as.name("dbplsr")
print(x$call)
ncomp <- x$ncomp
# print the Weighted Residuals
cat(gettextf("\nWeighted Residuals using %i components: ",ncomp),"\n")
print(format(summary(as.numeric(x$residuals[[ncomp+1]])),digits=3),quote = FALSE)
if(x$method!="ncomp"){
cat(gettextf("\nOptimal number of components using method %s: ",x$method," "))
cat(x$ncomp.opt)
}
# print the appropriate statistic according to the using method
if(x$method!="ncomp"){
cat("\n",paste(x$method, "value criterion :", format(min(x$crit.value),scientific=TRUE)),"\n")
cat("\n")
}
R2 <-100*x$r.squared
adjR2 <- 100*x$adj.r.squared
gvar <- 100*(as.numeric(x$gvar)-x$gvar.iter)/as.numeric(x$gvar)
# print R-squared and adjusted R-squared
cat("\n% variance explained: \n")
if(x$method!="ncomp")
var.exp <- t(data.frame(R2=R2,adjR2=adjR2,gvar=gvar,crit=x$crit.value))
else
var.exp <- t(data.frame(R2=R2,adjR2=adjR2,gvar=gvar))
colnames(var.exp)<-names(x$residuals)[2:(ncomp+1)]
print(var.exp,digits=digits)
cat("\n")
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.