content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{bin_mean}
\alias{bin_mean}
\title{Binomial Mean}
\usage{
bin_mean(trials, prob)
}
\arguments{
\item{trials}{trials}
\item{prob}{probability}
}
\value{
binmean
}
\description{
Calculates mean of a binomial distribution of a given probability in n trials
}
\examples{
bin_mean(5, 0.5)
}
|
/binomial/man/bin_mean.Rd
|
no_license
|
stat133-sp19/hw-stat133-J-V-H
|
R
| false
| true
| 382
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{bin_mean}
\alias{bin_mean}
\title{Binomial Mean}
\usage{
bin_mean(trials, prob)
}
\arguments{
\item{trials}{trials}
\item{prob}{probability}
}
\value{
binmean
}
\description{
Calculates mean of a binomial distribution of a given probability in n trials
}
\examples{
bin_mean(5, 0.5)
}
|
getwd()
setwd("/Users/Michael/Udacity/Project 5/Data Visualization/Final Project/R Sketches")
#Import GGPlot Libray
library(ggplot2)
#Read Data
titanic <- read.csv('titanic1.csv')
summary(titanic)
#Bar Chart Age
ggplot(aes(x=Age, fill = Status), data = titanic) + geom_bar() +
ggsave('Age.png')
#Bar Chart Passenger Class
ggplot(aes(x=Passenger.Class, fill = Status), data = titanic) + geom_bar() +
ggsave('Class.png')
#Bar Chart Sex
ggplot(aes(x=Sex, fill = Status), data = titanic) + geom_bar() +
ggsave('Sex.png')
#Bar Chart Embarked
ggplot(aes(x=Embarked, fill = Status), data = titanic) + geom_bar() +
ggsave('Embarked.png')
|
/R Plots/plots.R
|
no_license
|
michaelstrobl90/Udacity-Project-5
|
R
| false
| false
| 651
|
r
|
getwd()
setwd("/Users/Michael/Udacity/Project 5/Data Visualization/Final Project/R Sketches")
#Import GGPlot Libray
library(ggplot2)
#Read Data
titanic <- read.csv('titanic1.csv')
summary(titanic)
#Bar Chart Age
ggplot(aes(x=Age, fill = Status), data = titanic) + geom_bar() +
ggsave('Age.png')
#Bar Chart Passenger Class
ggplot(aes(x=Passenger.Class, fill = Status), data = titanic) + geom_bar() +
ggsave('Class.png')
#Bar Chart Sex
ggplot(aes(x=Sex, fill = Status), data = titanic) + geom_bar() +
ggsave('Sex.png')
#Bar Chart Embarked
ggplot(aes(x=Embarked, fill = Status), data = titanic) + geom_bar() +
ggsave('Embarked.png')
|
setwd('C:/Users/Nithin/Desktop')
install.packages('xgboost')
install.packages("tidyverse")
library(xgboost) # for xgboost
library(tidyverse) # general utility functions
install.packages('readr')
install.packages("DiagrammeR")
install.packages("stringr")
library(DiagrammeR)
library(stringr)
library('readr')
diseaseInfo <- read_csv("Outbreak_240817.csv")
set.seed(1234)
diseaseInfo <- diseaseInfo[sample(1:nrow(diseaseInfo)), ]
head(diseaseInfo)
diseaseInfo_humansRemoved <- diseaseInfo %>% select(-starts_with("human"))
diseaseLabels <- diseaseInfo %>%
select(humansAffected) %>% # get the column with the # of humans affected
is.na() %>% # is it NA?
magrittr::not() # switch TRUE and FALSE (using function from the magrittr package)
# check out the first few lines
head(diseaseLabels) # of our target variable
head(diseaseInfo$humansAffected) # of the original column
# select just the numeric columns
diseaseInfo_numeric <- diseaseInfo_humansRemoved %>%
select(-Id) %>% # the case id shouldn't contain useful information
select(-c(longitude, latitude)) %>% # location data is also in country data
select_if(is.numeric) # select remaining numeric columns
# make sure that our dataframe is all numeric
str(diseaseInfo_numeric)
# check out the first few rows of the country column
head(diseaseInfo$country)
# one-hot matrix for just the first few rows of the "country" column
model.matrix(~country-1,head(diseaseInfo))
# convert categorical factor into one-hot encoding
region <- model.matrix(~country-1,diseaseInfo)
# some of the species
head(diseaseInfo$speciesDescription)
# add a boolean column to our numeric dataframe indicating whether a species is domestic
diseaseInfo_numeric$is_domestic <- str_detect(diseaseInfo$speciesDescription, "domestic")
# get a list of all the species by getting the last
speciesList <- diseaseInfo$speciesDescription %>%
str_replace("[[:punct:]]", "") %>% # remove punctuation (some rows have parentheses)
str_extract("[a-z]*$") # extract the least word in each row
# convert our list into a dataframe...
speciesList <- tibble(species = speciesList)
# and convert to a matrix using 1 hot encoding
options(na.action='na.pass') # don't drop NA values!
species <- model.matrix(~species-1,speciesList)
# add our one-hot encoded variable and convert the dataframe into a matrix
diseaseInfo_numeric <- cbind(diseaseInfo_numeric, region, species)
diseaseInfo_matrix <- data.matrix(diseaseInfo_numeric)
# get the numb 70/30 training test split
numberOfTrainingSamples <- round(length(diseaseLabels) * .7)
# training data
train_data <- diseaseInfo_matrix[1:numberOfTrainingSamples,]
train_labels <- diseaseLabels[1:numberOfTrainingSamples]
# testing data
test_data <- diseaseInfo_matrix[-(1:numberOfTrainingSamples),]
test_labels <- diseaseLabels[-(1:numberOfTrainingSamples)]
# put our testing & training data into two seperates Dmatrixs objects
dtrain <- xgb.DMatrix(data = train_data, label= train_labels)
dtest <- xgb.DMatrix(data = test_data, label= test_labels)
# train a model using our training data
model <- xgboost(data = dtrain, # the data
nround = 2, # max number of boosting iterations
objective = "binary:logistic") # the objective function
# generate predictions for our held-out testing data
pred <- predict(model, dtest)
# get & print the classification error
err <- mean(as.numeric(pred > 0.5) != test_labels)
print(paste("test-error=", err))
# train an xgboost model
model_tuned <- xgboost(data = dtrain, # the data
max.depth = 3, # the maximum depth of each decision tree
nround = 2, # max number of boosting iterations
objective = "binary:logistic") # the objective function
# generate predictions for our held-out testing data
pred <- predict(model_tuned, dtest)
# get & print the classification error
err <- mean(as.numeric(pred > 0.5) != test_labels)
print(paste("test-error=", err))
# get the number of negative & positive cases in our data
negative_cases <- sum(train_labels == FALSE)
postive_cases <- sum(train_labels == TRUE)
# train a model using our training data
model_tuned <- xgboost(data = dtrain, # the data
max.depth = 3, # the maximum depth of each decision tree
nround = 10, # number of boosting rounds
early_stopping_rounds = 3, # if we dont see an improvement in this many rounds, stop
objective = "binary:logistic", # the objective function
scale_pos_weight = negative_cases/postive_cases) # control for imbalanced classes
# generate predictions for our held-out testing data
pred <- predict(model_tuned, dtest)
# get & print the classification error
err <- mean(as.numeric(pred > 0.5) != test_labels)
print(paste("test-error=", err))
# train a model using our training data
model_tuned <- xgboost(data = dtrain, # the data
max.depth = 3, # the maximum depth of each decision tree
nround = 10, # number of boosting rounds
early_stopping_rounds = 3, # if we dont see an improvement in this many rounds, stop
objective = "binary:logistic", # the objective function
scale_pos_weight = negative_cases/postive_cases, # control for imbalanced classes
gamma = 1) # add a regularization term
# generate predictions for our held-out testing data
pred <- predict(model_tuned, dtest)
# get & print the classification error
err <- mean(as.numeric(pred > 0.5) != test_labels)
print(paste("test-error=", err))
# plot them features! what's contributing most to our model?
xgb.plot.multi.trees(feature_names = names(diseaseInfo_matrix),
model = model)
# convert log odds to probability
odds_to_probs <- function(odds){
return(exp(odds)/ (1 + exp(odds)))
}
# probability of leaf above countryPortugul
odds_to_probs(-0.599)
# get information on how important each feature is
importance_matrix <- xgb.importance(names(diseaseInfo_matrix), model = model)
# plot
xgb.plot.importance(importance_matrix)
|
/xgboost.R
|
no_license
|
nithinrajkairali/XGBoost
|
R
| false
| false
| 6,398
|
r
|
setwd('C:/Users/Nithin/Desktop')
install.packages('xgboost')
install.packages("tidyverse")
library(xgboost) # for xgboost
library(tidyverse) # general utility functions
install.packages('readr')
install.packages("DiagrammeR")
install.packages("stringr")
library(DiagrammeR)
library(stringr)
library('readr')
diseaseInfo <- read_csv("Outbreak_240817.csv")
set.seed(1234)
diseaseInfo <- diseaseInfo[sample(1:nrow(diseaseInfo)), ]
head(diseaseInfo)
diseaseInfo_humansRemoved <- diseaseInfo %>% select(-starts_with("human"))
diseaseLabels <- diseaseInfo %>%
select(humansAffected) %>% # get the column with the # of humans affected
is.na() %>% # is it NA?
magrittr::not() # switch TRUE and FALSE (using function from the magrittr package)
# check out the first few lines
head(diseaseLabels) # of our target variable
head(diseaseInfo$humansAffected) # of the original column
# select just the numeric columns
diseaseInfo_numeric <- diseaseInfo_humansRemoved %>%
select(-Id) %>% # the case id shouldn't contain useful information
select(-c(longitude, latitude)) %>% # location data is also in country data
select_if(is.numeric) # select remaining numeric columns
# make sure that our dataframe is all numeric
str(diseaseInfo_numeric)
# check out the first few rows of the country column
head(diseaseInfo$country)
# one-hot matrix for just the first few rows of the "country" column
model.matrix(~country-1,head(diseaseInfo))
# convert categorical factor into one-hot encoding
region <- model.matrix(~country-1,diseaseInfo)
# some of the species
head(diseaseInfo$speciesDescription)
# add a boolean column to our numeric dataframe indicating whether a species is domestic
diseaseInfo_numeric$is_domestic <- str_detect(diseaseInfo$speciesDescription, "domestic")
# get a list of all the species by getting the last
speciesList <- diseaseInfo$speciesDescription %>%
str_replace("[[:punct:]]", "") %>% # remove punctuation (some rows have parentheses)
str_extract("[a-z]*$") # extract the least word in each row
# convert our list into a dataframe...
speciesList <- tibble(species = speciesList)
# and convert to a matrix using 1 hot encoding
options(na.action='na.pass') # don't drop NA values!
species <- model.matrix(~species-1,speciesList)
# add our one-hot encoded variable and convert the dataframe into a matrix
diseaseInfo_numeric <- cbind(diseaseInfo_numeric, region, species)
diseaseInfo_matrix <- data.matrix(diseaseInfo_numeric)
# get the numb 70/30 training test split
numberOfTrainingSamples <- round(length(diseaseLabels) * .7)
# training data
train_data <- diseaseInfo_matrix[1:numberOfTrainingSamples,]
train_labels <- diseaseLabels[1:numberOfTrainingSamples]
# testing data
test_data <- diseaseInfo_matrix[-(1:numberOfTrainingSamples),]
test_labels <- diseaseLabels[-(1:numberOfTrainingSamples)]
# put our testing & training data into two seperates Dmatrixs objects
dtrain <- xgb.DMatrix(data = train_data, label= train_labels)
dtest <- xgb.DMatrix(data = test_data, label= test_labels)
# train a model using our training data
model <- xgboost(data = dtrain, # the data
nround = 2, # max number of boosting iterations
objective = "binary:logistic") # the objective function
# generate predictions for our held-out testing data
pred <- predict(model, dtest)
# get & print the classification error
err <- mean(as.numeric(pred > 0.5) != test_labels)
print(paste("test-error=", err))
# train an xgboost model
model_tuned <- xgboost(data = dtrain, # the data
max.depth = 3, # the maximum depth of each decision tree
nround = 2, # max number of boosting iterations
objective = "binary:logistic") # the objective function
# generate predictions for our held-out testing data
pred <- predict(model_tuned, dtest)
# get & print the classification error
err <- mean(as.numeric(pred > 0.5) != test_labels)
print(paste("test-error=", err))
# get the number of negative & positive cases in our data
negative_cases <- sum(train_labels == FALSE)
postive_cases <- sum(train_labels == TRUE)
# train a model using our training data
model_tuned <- xgboost(data = dtrain, # the data
max.depth = 3, # the maximum depth of each decision tree
nround = 10, # number of boosting rounds
early_stopping_rounds = 3, # if we dont see an improvement in this many rounds, stop
objective = "binary:logistic", # the objective function
scale_pos_weight = negative_cases/postive_cases) # control for imbalanced classes
# generate predictions for our held-out testing data
pred <- predict(model_tuned, dtest)
# get & print the classification error
err <- mean(as.numeric(pred > 0.5) != test_labels)
print(paste("test-error=", err))
# train a model using our training data
model_tuned <- xgboost(data = dtrain, # the data
max.depth = 3, # the maximum depth of each decision tree
nround = 10, # number of boosting rounds
early_stopping_rounds = 3, # if we dont see an improvement in this many rounds, stop
objective = "binary:logistic", # the objective function
scale_pos_weight = negative_cases/postive_cases, # control for imbalanced classes
gamma = 1) # add a regularization term
# generate predictions for our held-out testing data
pred <- predict(model_tuned, dtest)
# get & print the classification error
err <- mean(as.numeric(pred > 0.5) != test_labels)
print(paste("test-error=", err))
# plot them features! what's contributing most to our model?
xgb.plot.multi.trees(feature_names = names(diseaseInfo_matrix),
model = model)
# convert log odds to probability
odds_to_probs <- function(odds){
return(exp(odds)/ (1 + exp(odds)))
}
# probability of leaf above countryPortugul
odds_to_probs(-0.599)
# get information on how important each feature is
importance_matrix <- xgb.importance(names(diseaseInfo_matrix), model = model)
# plot
xgb.plot.importance(importance_matrix)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2263
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2262
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2262
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/wmiforward/stmt1_919_920.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 686
c no.of clauses 2263
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2262
c
c QBFLIB/Basler/wmiforward/stmt1_919_920.qdimacs 686 2263 E1 [1] 0 63 622 2262 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/wmiforward/stmt1_919_920/stmt1_919_920.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 707
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2263
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2262
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2262
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/wmiforward/stmt1_919_920.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 686
c no.of clauses 2263
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2262
c
c QBFLIB/Basler/wmiforward/stmt1_919_920.qdimacs 686 2263 E1 [1] 0 63 622 2262 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bdc-package.R
\docType{data}
\name{merraclim_2.5m_bav_tk25}
\alias{merraclim_2.5m_bav_tk25}
\title{MerraClim bioclimatic data of bavaria}
\format{A \code{data.frame} with 20412 observations and 23 variables.}
\source{
This data has been obtained from: \itemize{\item \url{https://datadryad.org/stash/dataset/doi:10.5061/dryad.s2v81}}
}
\usage{
data(merraclim_2.5m_bav_tk25)
}
\description{
MerraClim bioclimatic data of Bavaria, Germany for 1980, 1990 and 2000.
}
\details{
Bioclimatic data of Bavaria, Germany for 1980, 1990 and 2000
derived from data at 2.5 minute spatial resolution and resampled onto the TK25 grid.
}
\references{
C. Vega, Greta; Pertierra, Luis R.; Olalla-Tárraga, Miguel Ángel (2017),
MERRAclim, a high-resolution global dataset of remotely sensed bioclimatic variables
for ecological modelling, Scientific Data, https://doi.org/10.1038/sdata.2017.78
}
|
/man/merraclim_2.5m_bav_tk25.Rd
|
permissive
|
cszang/bdc
|
R
| false
| true
| 959
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bdc-package.R
\docType{data}
\name{merraclim_2.5m_bav_tk25}
\alias{merraclim_2.5m_bav_tk25}
\title{MerraClim bioclimatic data of bavaria}
\format{A \code{data.frame} with 20412 observations and 23 variables.}
\source{
This data has been obtained from: \itemize{\item \url{https://datadryad.org/stash/dataset/doi:10.5061/dryad.s2v81}}
}
\usage{
data(merraclim_2.5m_bav_tk25)
}
\description{
MerraClim bioclimatic data of Bavaria, Germany for 1980, 1990 and 2000.
}
\details{
Bioclimatic data of Bavaria, Germany for 1980, 1990 and 2000
derived from data at 2.5 minute spatial resolution and resampled onto the TK25 grid.
}
\references{
C. Vega, Greta; Pertierra, Luis R.; Olalla-Tárraga, Miguel Ángel (2017),
MERRAclim, a high-resolution global dataset of remotely sensed bioclimatic variables
for ecological modelling, Scientific Data, https://doi.org/10.1038/sdata.2017.78
}
|
#' Estimate Recovery Rate
#'
#' This function provides a first estimate of the recovery rate, to faciliate
#' convergence of the main algorithm.
#' @param tTarget target time vector
#' @param Q target time-histories of the quarantined cases
#' @param R target time-histories of the recovered cases
#' @param guess initial guess parameters for kappa
#' @param ftol nls.lm.control object. non-negative numeric. Default is \code{1e-6}
#' @param ptol nls.lm.control object. non-negative numeric. Default is \code{1e-6}
#' @param gtol nls.lm.control object. non-negative numeric. Default is \code{1e-6}
#' @param epsfcn nls.lm.control object. Default is \code{0.001}
#' @param factor nls.lm.control object. Default is \code{100}
#' @param maxfev nls.lm.control object. Default is \code{1000}
#' @param maxiter nls.lm.control object. Default is \code{100}
#' @param nprint nls.lm.control object. Default is \code{1}
#' @param trace set \code{TRUE} to trace iteration results
#'
#' @importFrom nlsr nlxb
#'
#' @author Selcuk Korkmaz, \email{selcukorkmaz@gmail.com}
#'
#' @return vector of estimation and optimization function for the recovery rate
#'
#' @seealso \code{\link{SEIQRDP}} \code{\link{fit_SEIQRDP}}
#'
#' @references Peng, L., Yang, W., Zhang, D., Zhuge, C., Hong, L. 2020. “Epidemic analysis of COVID-19 in China by dynamical modeling”, arXiv preprint arXiv:2002.06563.
#' @references \url{https://www.mathworks.com/matlabcentral/fileexchange/74545-generalized-seir-epidemic-model-fitting-and-computation}
getLambdaFun <- function (tTarget, Q, R, guess, ftol,
ptol, gtol, epsfcn, factor, maxfev,
maxiter, nprint, trace){
if (max(R)<20){
lambdaFun = function(a,t) {a[1] / (1+exp(-a[2]*(t-a[3])))}
}else{
myFun1 = function(a,t) {a[1] / (1+exp(-a[2]*(t-a[3])))};
myFun2 = function(a,t) {a[1] + exp(-a[2]*(t+a[3]))};
rate = diff(as.numeric(R))/median(diff(tTarget))/as.numeric(Q)[2:length(as.numeric(Q))]
x = tTarget[2:length(tTarget)]
rate[abs(rate)>1 | abs(rate)==0]=NA
df = cbind.data.frame(tk = x[!is.na(rate)], z = rate[!is.na(rate)])
ctrl = list(phi=1, lamda = 0.0001, offset = 100, laminc=10, lamdec = 4)
model1 <- nlxb(z ~ a1 / (1+exp(-a2*(tk-a3))),
start=list(a1=guess[[5]], a2=guess[[6]], a3=guess[[7]]), data=df, trace=trace,
control = ctrl, lower = c(0,0,0), upper = c(1,1,100))
coeff1 = model1$coefficients
r1 = model1$ssquares
ctrl = list(phi=1, lamda = 0.0001, offset = 100, laminc=10, lamdec = 4)
model2 <- nlxb(z ~ a1 + exp(-a2*(tk+a3)),
start=list(a1=guess[[5]], a2=guess[[6]], a3=guess[[7]]), data=df, trace=trace,
control = ctrl, lower = c(0,0,0), upper = c(1,1,100))
coeff2 = model2$coefficients
r2 = model2$ssquares
if (r1<r2 || coeff2[1]>0.99 || coeff2[2]>4.9){
lambdaGuess = coeff1
lambdaFun = myFun1
}else{
lambdaGuess = coeff2
lambdaFun = myFun2
}
guess[5:7] = lambdaGuess
}
return(list(guess=guess, lambdaFun = lambdaFun))
}
|
/R/getLambdaFun.R
|
no_license
|
cran/genSEIR
|
R
| false
| false
| 3,063
|
r
|
#' Estimate Recovery Rate
#'
#' This function provides a first estimate of the recovery rate, to faciliate
#' convergence of the main algorithm.
#' @param tTarget target time vector
#' @param Q target time-histories of the quarantined cases
#' @param R target time-histories of the recovered cases
#' @param guess initial guess parameters for kappa
#' @param ftol nls.lm.control object. non-negative numeric. Default is \code{1e-6}
#' @param ptol nls.lm.control object. non-negative numeric. Default is \code{1e-6}
#' @param gtol nls.lm.control object. non-negative numeric. Default is \code{1e-6}
#' @param epsfcn nls.lm.control object. Default is \code{0.001}
#' @param factor nls.lm.control object. Default is \code{100}
#' @param maxfev nls.lm.control object. Default is \code{1000}
#' @param maxiter nls.lm.control object. Default is \code{100}
#' @param nprint nls.lm.control object. Default is \code{1}
#' @param trace set \code{TRUE} to trace iteration results
#'
#' @importFrom nlsr nlxb
#'
#' @author Selcuk Korkmaz, \email{selcukorkmaz@gmail.com}
#'
#' @return vector of estimation and optimization function for the recovery rate
#'
#' @seealso \code{\link{SEIQRDP}} \code{\link{fit_SEIQRDP}}
#'
#' @references Peng, L., Yang, W., Zhang, D., Zhuge, C., Hong, L. 2020. “Epidemic analysis of COVID-19 in China by dynamical modeling”, arXiv preprint arXiv:2002.06563.
#' @references \url{https://www.mathworks.com/matlabcentral/fileexchange/74545-generalized-seir-epidemic-model-fitting-and-computation}
getLambdaFun <- function (tTarget, Q, R, guess, ftol,
ptol, gtol, epsfcn, factor, maxfev,
maxiter, nprint, trace){
if (max(R)<20){
lambdaFun = function(a,t) {a[1] / (1+exp(-a[2]*(t-a[3])))}
}else{
myFun1 = function(a,t) {a[1] / (1+exp(-a[2]*(t-a[3])))};
myFun2 = function(a,t) {a[1] + exp(-a[2]*(t+a[3]))};
rate = diff(as.numeric(R))/median(diff(tTarget))/as.numeric(Q)[2:length(as.numeric(Q))]
x = tTarget[2:length(tTarget)]
rate[abs(rate)>1 | abs(rate)==0]=NA
df = cbind.data.frame(tk = x[!is.na(rate)], z = rate[!is.na(rate)])
ctrl = list(phi=1, lamda = 0.0001, offset = 100, laminc=10, lamdec = 4)
model1 <- nlxb(z ~ a1 / (1+exp(-a2*(tk-a3))),
start=list(a1=guess[[5]], a2=guess[[6]], a3=guess[[7]]), data=df, trace=trace,
control = ctrl, lower = c(0,0,0), upper = c(1,1,100))
coeff1 = model1$coefficients
r1 = model1$ssquares
ctrl = list(phi=1, lamda = 0.0001, offset = 100, laminc=10, lamdec = 4)
model2 <- nlxb(z ~ a1 + exp(-a2*(tk+a3)),
start=list(a1=guess[[5]], a2=guess[[6]], a3=guess[[7]]), data=df, trace=trace,
control = ctrl, lower = c(0,0,0), upper = c(1,1,100))
coeff2 = model2$coefficients
r2 = model2$ssquares
if (r1<r2 || coeff2[1]>0.99 || coeff2[2]>4.9){
lambdaGuess = coeff1
lambdaFun = myFun1
}else{
lambdaGuess = coeff2
lambdaFun = myFun2
}
guess[5:7] = lambdaGuess
}
return(list(guess=guess, lambdaFun = lambdaFun))
}
|
## Download and unzip the files, if they do not exist yet.
## Else, skip this step.
getFiles <- function(dest) {
if(!file.exists(dest)) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile=dest)
unzip(dest)
} else if(!file.exists("UCI HAR Dataset")) {
unzip(dest)
}
else {}
}
## Parse filepaths for each file necessary.
## Return full paths for further use.
getFilePaths <- function() {
filenames <- character()
filenames[c(1,2)] <- list.files("UCI HAR Dataset")[c(1,2)]
trains <- list.files("UCI HAR Dataset/train", pattern="\\.txt$")
filenames[3:5] <- paste("train", trains, sep="/")
tests <- list.files("UCI HAR Dataset/test", pattern="\\.txt$")
filenames[6:8] <- paste("test", tests, sep="/")
filenames <- paste("UCI HAR Dataset", filenames, sep="/")
names(filenames) = c("act", "feat", "tr_sub", "tr_x", "tr_y",
"ts_sub", "ts_x", "ts_y")
filenames
}
## Helper function to allow different binds with the same call.
combine <- function(..., FUN) {
result <- FUN(...)
result
}
## Loading the bulk of data from files.
## First, combining train-data and test-data to their own tables,
## then combine the rows as the whole set.
## Take relevant rows from features.txt and parse them into column names.
## Set column names for the complete dataset.
## Factorize the subject- and activity-column and switch numbers to strings
## from activity_labels.txt.
loadData <- function(files) {
activities <- read.table(files["act"])
features <- read.table(files["feat"])
feature_vals <- grep("mean|std", features[,2])
train_set <- read.table(files["tr_x"])[feature_vals]
train_labels <- read.table(files["tr_y"])
train_subs <- read.table(files["tr_sub"])
train_all <- combine(train_subs, train_labels, train_set, FUN=cbind)
test_set <- read.table(files["ts_x"])[feature_vals]
test_labels <- read.table(files["ts_y"])
test_subs <- read.table(files["ts_sub"])
test_all <- combine(test_subs, test_labels, test_set, FUN=cbind)
all <- combine(train_all, test_all, FUN=rbind)
feature_names <- features[feature_vals, 2]
feature_names <- gsub("[-|(|)]","",feature_names)
feature_names <- gsub("mean","Mean",feature_names)
feature_names <- gsub("std","Std",feature_names)
names(all) = c("subject", "activity", feature_names)
all$subject <- as.factor(all$subject)
all$activity <- factor(all$activity,
levels=activities[,1], labels=as.character(activities[,2]))
all
}
## Clean and parse the data into a tidy form with reshape2-library.
## Melt the data, using subject-activity pair as primary key.
## Take means from the melted set by calculating variable means for every
## subject-activity pair.
## Finally save the data into a txt-file.
finalize <- function(all) {
require(reshape2)
melted <- melt(all, id=c("subject", "activity"))
casted <- dcast(melted, subject + activity ~ variable, mean)
write.table(casted, "activitymeans.txt",
quote=FALSE, row.names=FALSE)
}
## High-level function to maintain the proper algorith for cleaning.
main <- function() {
getFiles("project.zip")
allData <- loadData(getFilePaths())
finalize(allData)
}
|
/Clean_activity_tracker/run_analysis.R
|
no_license
|
Ziconin/Data_Science
|
R
| false
| false
| 3,181
|
r
|
## Download and unzip the files, if they do not exist yet.
## Else, skip this step.
getFiles <- function(dest) {
if(!file.exists(dest)) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile=dest)
unzip(dest)
} else if(!file.exists("UCI HAR Dataset")) {
unzip(dest)
}
else {}
}
## Parse filepaths for each file necessary.
## Return full paths for further use.
getFilePaths <- function() {
filenames <- character()
filenames[c(1,2)] <- list.files("UCI HAR Dataset")[c(1,2)]
trains <- list.files("UCI HAR Dataset/train", pattern="\\.txt$")
filenames[3:5] <- paste("train", trains, sep="/")
tests <- list.files("UCI HAR Dataset/test", pattern="\\.txt$")
filenames[6:8] <- paste("test", tests, sep="/")
filenames <- paste("UCI HAR Dataset", filenames, sep="/")
names(filenames) = c("act", "feat", "tr_sub", "tr_x", "tr_y",
"ts_sub", "ts_x", "ts_y")
filenames
}
## Helper function to allow different binds with the same call.
combine <- function(..., FUN) {
result <- FUN(...)
result
}
## Loading the bulk of data from files.
## First, combining train-data and test-data to their own tables,
## then combine the rows as the whole set.
## Take relevant rows from features.txt and parse them into column names.
## Set column names for the complete dataset.
## Factorize the subject- and activity-column and switch numbers to strings
## from activity_labels.txt.
loadData <- function(files) {
activities <- read.table(files["act"])
features <- read.table(files["feat"])
feature_vals <- grep("mean|std", features[,2])
train_set <- read.table(files["tr_x"])[feature_vals]
train_labels <- read.table(files["tr_y"])
train_subs <- read.table(files["tr_sub"])
train_all <- combine(train_subs, train_labels, train_set, FUN=cbind)
test_set <- read.table(files["ts_x"])[feature_vals]
test_labels <- read.table(files["ts_y"])
test_subs <- read.table(files["ts_sub"])
test_all <- combine(test_subs, test_labels, test_set, FUN=cbind)
all <- combine(train_all, test_all, FUN=rbind)
feature_names <- features[feature_vals, 2]
feature_names <- gsub("[-|(|)]","",feature_names)
feature_names <- gsub("mean","Mean",feature_names)
feature_names <- gsub("std","Std",feature_names)
names(all) = c("subject", "activity", feature_names)
all$subject <- as.factor(all$subject)
all$activity <- factor(all$activity,
levels=activities[,1], labels=as.character(activities[,2]))
all
}
## Clean and parse the data into a tidy form with reshape2-library.
## Melt the data, using subject-activity pair as primary key.
## Take means from the melted set by calculating variable means for every
## subject-activity pair.
## Finally save the data into a txt-file.
finalize <- function(all) {
require(reshape2)
melted <- melt(all, id=c("subject", "activity"))
casted <- dcast(melted, subject + activity ~ variable, mean)
write.table(casted, "activitymeans.txt",
quote=FALSE, row.names=FALSE)
}
## High-level function to maintain the proper algorith for cleaning.
main <- function() {
getFiles("project.zip")
allData <- loadData(getFilePaths())
finalize(allData)
}
|
1+2
#lalalala
#bozenko
#nowa Bozenka
|
/wk4/prac4_script/prac_4.R
|
no_license
|
SPadlewski/GIS_code
|
R
| false
| false
| 41
|
r
|
1+2
#lalalala
#bozenko
#nowa Bozenka
|
library(iglu)
data_test = c(101,121,141,151,161,171,191,201,231,251)
test_that("range_glu equivalent to max - min", {
expect_equal((range_glu(data_test)), max(data_test) - min(data_test), tolerance = 1e-04)
expect_equal((range_glu(example_data_1_subject)), max(example_data_1_subject$gl) - min(example_data_1_subject$gl), tolerance = 1e-04)
})
|
/tests/testthat/test-range_glu.R
|
no_license
|
trippsapientae/iglu
|
R
| false
| false
| 349
|
r
|
library(iglu)
data_test = c(101,121,141,151,161,171,191,201,231,251)
test_that("range_glu equivalent to max - min", {
expect_equal((range_glu(data_test)), max(data_test) - min(data_test), tolerance = 1e-04)
expect_equal((range_glu(example_data_1_subject)), max(example_data_1_subject$gl) - min(example_data_1_subject$gl), tolerance = 1e-04)
})
|
## Functions that demonstrates the lexical scoping rules of R
## and how they are used for implementing caching functionality
## Functions specifically calculates the inverse of a matrix if
## not done already and caches the same.
## This function creates a vector with list of following functions
## set -> set the value of input matrix
## get -> get the value of input matrix
## setInverse -> set inverse in a variable (cached)
## getInverse -> return inversed matrix that is in cache (null or populated)
makeCacheMatrix <- function(x = matrix()) {
invcachematrix <- NULL #initialize
set <- function(inmatrix) {
x <<- inmatrix ## use deep assignment arrow (<<-) to set x which is in a different environment
invcachematrix <- NULL
}
get <- function() {
x
}
setInverse <- function(inverse) {
invcachematrix <<- inverse ## use deep assignment arrow (<<-) to set invcachematrix
}
getInverse <- function() {
invcachematrix
}
list(set=set, get=get, setInverse=setInverse, getInverse=getInverse)
}
## Get the inverse of given matrix from cache. If it is null calculate and set
## inverse in cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x' from the special matrix
inverse <- x$getInverse()
##if value is not null i.e., inverse was previously calculated return
if(!is.null(inverse)) {
print("Returning cached inverse")
return(inverse)
}
## if there is no inverse value present calculate the same using solve
inverse <- solve(x$get())
## set the calculated value in cache
x$setInverse(inverse)
print("Returning calculated inverse")
return(inverse)
}
|
/cachematrix.R
|
no_license
|
rajnani/ProgrammingAssignment2
|
R
| false
| false
| 1,725
|
r
|
## Functions that demonstrates the lexical scoping rules of R
## and how they are used for implementing caching functionality
## Functions specifically calculates the inverse of a matrix if
## not done already and caches the same.
## This function creates a vector with list of following functions
## set -> set the value of input matrix
## get -> get the value of input matrix
## setInverse -> set inverse in a variable (cached)
## getInverse -> return inversed matrix that is in cache (null or populated)
makeCacheMatrix <- function(x = matrix()) {
invcachematrix <- NULL #initialize
set <- function(inmatrix) {
x <<- inmatrix ## use deep assignment arrow (<<-) to set x which is in a different environment
invcachematrix <- NULL
}
get <- function() {
x
}
setInverse <- function(inverse) {
invcachematrix <<- inverse ## use deep assignment arrow (<<-) to set invcachematrix
}
getInverse <- function() {
invcachematrix
}
list(set=set, get=get, setInverse=setInverse, getInverse=getInverse)
}
## Get the inverse of given matrix from cache. If it is null calculate and set
## inverse in cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x' from the special matrix
inverse <- x$getInverse()
##if value is not null i.e., inverse was previously calculated return
if(!is.null(inverse)) {
print("Returning cached inverse")
return(inverse)
}
## if there is no inverse value present calculate the same using solve
inverse <- solve(x$get())
## set the calculated value in cache
x$setInverse(inverse)
print("Returning calculated inverse")
return(inverse)
}
|
## Use SIMPER analysis to identify ASVs that are contributing to incongruency between sites/species
# Paul A. O'Brien
# paul.obrien@my.jcu.edu.au
setwd("~/Documents/R/Ramaciotti_16S_dataset/Phylosymbiosis/")
library(vegan)
taxonomy <- read.table("~/Documents/R/Ramaciotti_16S_dataset/taxonomy_silva.tsv", sep = "\t", header = T, strip.white = T) # For ASV IDs
## Coral ----
asv_tablec <- read.table("~/Documents/R/Ramaciotti_16S_dataset/Asv_tables/Filtered/coral-table-f.txt", sep = '\t', row.names = 1, header = T, strip.white = T)
meta_datac <- read.table("~/Documents/R/Ramaciotti_16S_dataset/Metadata/coral_metadata.tsv", sep = '\t', header = T, strip.white = T)
asv_tablec <- as.data.frame(t(asv_tablec))
#SIMPER
simp.testc <- simper(asv_tablec, meta_datac$Species)
sum.simpc <- summary(simp.testc)
# Pull out the comparison you want to look at
pcyl <- sum.simpc$`P. cylindrica_R_P. cylindrica_P`
shyst <- sum.simpc$`S. hysterix_R_S. hysterix_P`
write.csv(pcyl, file = "p.cylindra_simp.csv")
write.csv(shyst, file = "s.hyst_simp.csv")
## Octocoral ----
asv_tableo <- read.table("~/Documents/R/Ramaciotti_16S_dataset/Asv_tables/Filtered/octocoral-table-f.txt", sep = '\t', row.names = 1, header = T, strip.white = T)
meta_datao <- read.table("~/Documents/R/Ramaciotti_16S_dataset/Metadata/octocoral_metadata.tsv", sep = '\t', header = T, strip.white = T)
asv_tableo <- as.data.frame(t(asv_tableo))
#SIMPER
simp.testo <- simper(asv_tableo, meta_datao$Species)
sum.simpo <- summary(simp.testo)
#pull out the comparison you want to look at
sinul <- sum.simpo$`Sinularia spR_Sinularia spP`
sarc <- sum.simpo$`Sarcophyton spR_Sarcophyton spP`
gorg <- sum.simpo$`I. hippurus_Pinnigorgia sp`
write.csv(sinul, file = "sin_simp.csv")
write.csv(sarc, file = "sarc.csv")
write.csv(gorg, file = "gorg.csv")
|
/R_scripts/SIMPER_analysis.R
|
no_license
|
paobrien/Phylosymbiosis-in-coral-reef-invertebrates
|
R
| false
| false
| 1,814
|
r
|
## Use SIMPER analysis to identify ASVs that are contributing to incongruency between sites/species
# Paul A. O'Brien
# paul.obrien@my.jcu.edu.au
setwd("~/Documents/R/Ramaciotti_16S_dataset/Phylosymbiosis/")
library(vegan)
taxonomy <- read.table("~/Documents/R/Ramaciotti_16S_dataset/taxonomy_silva.tsv", sep = "\t", header = T, strip.white = T) # For ASV IDs
## Coral ----
asv_tablec <- read.table("~/Documents/R/Ramaciotti_16S_dataset/Asv_tables/Filtered/coral-table-f.txt", sep = '\t', row.names = 1, header = T, strip.white = T)
meta_datac <- read.table("~/Documents/R/Ramaciotti_16S_dataset/Metadata/coral_metadata.tsv", sep = '\t', header = T, strip.white = T)
asv_tablec <- as.data.frame(t(asv_tablec))
#SIMPER
simp.testc <- simper(asv_tablec, meta_datac$Species)
sum.simpc <- summary(simp.testc)
# Pull out the comparison you want to look at
pcyl <- sum.simpc$`P. cylindrica_R_P. cylindrica_P`
shyst <- sum.simpc$`S. hysterix_R_S. hysterix_P`
write.csv(pcyl, file = "p.cylindra_simp.csv")
write.csv(shyst, file = "s.hyst_simp.csv")
## Octocoral ----
asv_tableo <- read.table("~/Documents/R/Ramaciotti_16S_dataset/Asv_tables/Filtered/octocoral-table-f.txt", sep = '\t', row.names = 1, header = T, strip.white = T)
meta_datao <- read.table("~/Documents/R/Ramaciotti_16S_dataset/Metadata/octocoral_metadata.tsv", sep = '\t', header = T, strip.white = T)
asv_tableo <- as.data.frame(t(asv_tableo))
#SIMPER
simp.testo <- simper(asv_tableo, meta_datao$Species)
sum.simpo <- summary(simp.testo)
#pull out the comparison you want to look at
sinul <- sum.simpo$`Sinularia spR_Sinularia spP`
sarc <- sum.simpo$`Sarcophyton spR_Sarcophyton spP`
gorg <- sum.simpo$`I. hippurus_Pinnigorgia sp`
write.csv(sinul, file = "sin_simp.csv")
write.csv(sarc, file = "sarc.csv")
write.csv(gorg, file = "gorg.csv")
|
#this script gives 'sumcall', a vector of the expression of a particular gene, summed across all genomic positions of a particular big wigs. The vector is 500 values long, containing one summed value for each bigWig file.
#'sumcall' can be used to holistically measure gene expression.
library(rtracklayer)
library(magrittr)
library(stringr)
metadata <- load('/home/other/nkarbhar/sratissue/meta.Rda') #this contains the annotated metadata for the 500 bigWig files.
extract.block <- function(files, chr, start, end, verbose = FALSE){ #extract.block function was provided by Jean-Phillipe Fortin. Thank you Jean-Philippe Fortin!
rl <- IRanges::RangesList(IRanges::IRanges(start=start, end=end))
names(rl) <- chr
rles <- lapply(files, function(xx) {
import(xx, as = "Rle", format = "bw", selection = BigWigSelection(rl))
})
megaMatrix <- do.call(cbind, lapply(rles, function(xx) as.numeric(xx[[chr]][start:end])))
megaMatrix
}
v1 <- as.vector(update_metadata$run_accession) #update_metadata is the data frame name of 'meta.Rda'
filenames <- scan("sra_samples.txt", what="", sep="\n")
path <- '/dcl01/leek/data/sraonrail/sra_batch_0_sample_size_500_align/coverage_bigwigs/'
sumcall <- numeric(0)
for (i in 1:length(filenames)){
call <- extract.block(paste0(path, filenames[i]),'chr20', 61867235, 61871859) #input chromosome, start position, and end position.
sumcall <- c(sumcall, sum(call)) #make vector of summed gene expression for the 500 bigWigs.
}
|
/files for github/sumcall.R
|
no_license
|
nishika/SRA_Annotation
|
R
| false
| false
| 1,505
|
r
|
#this script gives 'sumcall', a vector of the expression of a particular gene, summed across all genomic positions of a particular big wigs. The vector is 500 values long, containing one summed value for each bigWig file.
#'sumcall' can be used to holistically measure gene expression.
library(rtracklayer)
library(magrittr)
library(stringr)
metadata <- load('/home/other/nkarbhar/sratissue/meta.Rda') #this contains the annotated metadata for the 500 bigWig files.
extract.block <- function(files, chr, start, end, verbose = FALSE){ #extract.block function was provided by Jean-Phillipe Fortin. Thank you Jean-Philippe Fortin!
rl <- IRanges::RangesList(IRanges::IRanges(start=start, end=end))
names(rl) <- chr
rles <- lapply(files, function(xx) {
import(xx, as = "Rle", format = "bw", selection = BigWigSelection(rl))
})
megaMatrix <- do.call(cbind, lapply(rles, function(xx) as.numeric(xx[[chr]][start:end])))
megaMatrix
}
v1 <- as.vector(update_metadata$run_accession) #update_metadata is the data frame name of 'meta.Rda'
filenames <- scan("sra_samples.txt", what="", sep="\n")
path <- '/dcl01/leek/data/sraonrail/sra_batch_0_sample_size_500_align/coverage_bigwigs/'
sumcall <- numeric(0)
for (i in 1:length(filenames)){
call <- extract.block(paste0(path, filenames[i]),'chr20', 61867235, 61871859) #input chromosome, start position, and end position.
sumcall <- c(sumcall, sum(call)) #make vector of summed gene expression for the 500 bigWigs.
}
|
x <-c(15,12,8,8,7,7,7,6,5,3) # physics
y <-c(10,25, 17,11,13,17,20,13,9,15) #history
#Karl Pearson's correlation coefficient = covariance(X, Y) / (stdv(X) * stdv(Y))
Karl<- cov(x,y)/(sd(x)*sd(y))
cat(round (Karl,3))
|
/Correlation and Regression Lines - A Quick Recap #1.R
|
no_license
|
golpiraelmi/HackerRank
|
R
| false
| false
| 217
|
r
|
x <-c(15,12,8,8,7,7,7,6,5,3) # physics
y <-c(10,25, 17,11,13,17,20,13,9,15) #history
#Karl Pearson's correlation coefficient = covariance(X, Y) / (stdv(X) * stdv(Y))
Karl<- cov(x,y)/(sd(x)*sd(y))
cat(round (Karl,3))
|
NLQuit <-
function(nl.obj=NULL, all=FALSE)
{
if (all) {
# object names
#objs <- names(.rnetlogo$objects)
objs <- .rnetlogo$objects
# handle gui obj as last, to prevent java.lang.InterruptionException
guiobj <- NULL
if ((!is.null(.rnetlogo$guiobj)) && (.rnetlogo$guiobj %in% objs)) {
objs <- objs[which(!objs == .rnetlogo$guiobj)]
guiobj <- .rnetlogo$guiobj
}
invisible(
lapply(objs, function(x) {NLQuit(nl.obj=x, all=F)})
)
# close gui obj
if (!is.null(guiobj)) {
NLQuit(nl.obj=guiobj, all=F)
}
} else {
obj.name <- nl.obj
if (is.null(obj.name))
{
obj.name = "_nl.intern_"
}
if (obj.name %in% .rnetlogo$objects) {
nl.obj <- get(obj.name, envir=.rnetlogo)
} else {
stop(paste('There is no NetLogo reference stored under the name ',obj.name,".", sep=""))
}
.jcall(nl.obj, "V", "KillWorkspace")
#print("jinit")
#.jinit(force.init=TRUE)
#print("detach RNetLogo")
#detach("package:RNetLogo")
# see http://osdir.com/ml/lang.r.rosuda.devel/2007-01/msg00052.html
#print("doneJVM")
#.Call("doneJVM")
#print("detach rJava")
#detach("package:rJava")
# free the instance
# doesn't work for others than .rnetlogo[['nl.intern']]
nl.obj <- NULL
#.rnetlogo$objects[obj.name] <- NULL
.rnetlogo$objects <- .rnetlogo$objects[-which(.rnetlogo$objects %in% obj.name)]
# call the garbage collector
.jcall('java/lang/System', 'V', 'gc')
# java error handling
if (!is.null(e<-.jgetEx()))
{
if (.jcheck(silent=TRUE))
{
print(e)
stop()
}
}
# reset working directory after last NetLogo instance was closed
if (length(.rnetlogo$objects) == 0) {
setwd(.rnetlogo$savedworkingdir[1])
}
}
}
|
/R/NLQuit.R
|
no_license
|
cran/RNetLogo
|
R
| false
| false
| 1,944
|
r
|
NLQuit <-
function(nl.obj=NULL, all=FALSE)
{
if (all) {
# object names
#objs <- names(.rnetlogo$objects)
objs <- .rnetlogo$objects
# handle gui obj as last, to prevent java.lang.InterruptionException
guiobj <- NULL
if ((!is.null(.rnetlogo$guiobj)) && (.rnetlogo$guiobj %in% objs)) {
objs <- objs[which(!objs == .rnetlogo$guiobj)]
guiobj <- .rnetlogo$guiobj
}
invisible(
lapply(objs, function(x) {NLQuit(nl.obj=x, all=F)})
)
# close gui obj
if (!is.null(guiobj)) {
NLQuit(nl.obj=guiobj, all=F)
}
} else {
obj.name <- nl.obj
if (is.null(obj.name))
{
obj.name = "_nl.intern_"
}
if (obj.name %in% .rnetlogo$objects) {
nl.obj <- get(obj.name, envir=.rnetlogo)
} else {
stop(paste('There is no NetLogo reference stored under the name ',obj.name,".", sep=""))
}
.jcall(nl.obj, "V", "KillWorkspace")
#print("jinit")
#.jinit(force.init=TRUE)
#print("detach RNetLogo")
#detach("package:RNetLogo")
# see http://osdir.com/ml/lang.r.rosuda.devel/2007-01/msg00052.html
#print("doneJVM")
#.Call("doneJVM")
#print("detach rJava")
#detach("package:rJava")
# free the instance
# doesn't work for others than .rnetlogo[['nl.intern']]
nl.obj <- NULL
#.rnetlogo$objects[obj.name] <- NULL
.rnetlogo$objects <- .rnetlogo$objects[-which(.rnetlogo$objects %in% obj.name)]
# call the garbage collector
.jcall('java/lang/System', 'V', 'gc')
# java error handling
if (!is.null(e<-.jgetEx()))
{
if (.jcheck(silent=TRUE))
{
print(e)
stop()
}
}
# reset working directory after last NetLogo instance was closed
if (length(.rnetlogo$objects) == 0) {
setwd(.rnetlogo$savedworkingdir[1])
}
}
}
|
getCall.HLfit <- function(x,...) {
# only one of these call may be present in the object: HLCorcall is removed by fitme and corrHLfit
if ( ! is.null(call <- attr(x,"fitmecall"))) return(call)
if ( ! is.null(call <- attr(x,"HLCorcall"))) return(call) ## eg confint on an HLCor object
if ( ! is.null(call <- attr(x,"corrHLfitcall"))) return(call)
return(x$call) ## this one is the HLfit call
# The [stats::: !]getCall.default method cannot be called b/c it is not exported from stats.
# stats::getCall() with only call getCall.HLfit in an infinite recursion.
}
## to get a call with the structure of the final HLCorcall in fitme of corrHLfit
## ranFix is mandatory: Do not set a default value, so that one has to think about the correct value.
## Therefore, the original ranFix of the outer_object is replaced, unless it is explicitly set to getCall(object)$ranFix or $fixed... (in confint.HLfit)
## Parameters not in ranFix are set to the initial value of of the optimization call.
##
get_HLCorcall <- function(outer_object, ## accepts fit object, or call, or list of call arguments
fixed, ## see comments above
... # anything needed to overcome promises in the call
) {
if (inherits(outer_object,"HLfit")) {
outer_call <- getCall(outer_object) ## gets a corrHLfit/fitme call, => eval it to get HLCor callS
outer_call$data <- outer_object$data ## removes dependence on promise
outer_fn <- paste(outer_call[[1L]])
if (outer_fn=="fitme") {
outer_call$fixed <- fixed
} else if (outer_fn=="HLCor") {
outer_call$ranPars <- fixed
} else outer_call$ranFix <- fixed
}
verbose <- outer_call$verbose
verbose["getCall"] <- TRUE
outer_call$verbose <- verbose
## compare to update.default, commented in R language Definition.
extras <- match.call(expand.dots = FALSE)$...
if (length(extras) > 0) {
existing <- !is.na(match(names(extras), names(outer_call)))
dotlist <- list(...)
for (a in names(extras)[existing]) outer_call[[a]] <- dotlist[[a]]
if (any(!existing)) {
outer_call <- c(as.list(outer_call), dotlist[!existing])
}
}
#
HLCorcall <- eval(as.call(outer_call)) ## calls corrHLfit and bypasses optimization to get the call from within the final HLCor
HLCorcall[[1L]] <- quote(HLCor)
.assignWrapper(HLCorcall$processed,"verbose['getCall'] <- NA")
return(HLCorcall)
}
update.HLfit <- function (object, formula., ..., evaluate = TRUE) {
if (is.null(call <- getCall(object)))
stop("need an object with call component")
extras <- match.call(expand.dots = FALSE)$...
if (!missing(formula.)) {
predictor <- formula(getCall(object)) ## formula.default gets formula from $call, not from $predictor; and we must use getCall, not $call
if (inherits(predictor,"predictor")) {
form <- update.formula(attr(predictor,"oriFormula"),formula.) ## LOSES ALL ATTRIBUTES
} else form <- update.formula(predictor,formula.)
## !!!! FR->FR does not handle etaFix$beta !!!!
if (! is.null(.findOffset(formula.))) {off <- NULL} else { off <- attr(predictor,"offsetObj")$total }
predArgs <- list(formula=form,
LMatrix=NULL, ## F I X M E argument for Predictor, to be removed ? (modif function Predictor() ?)
AMatrix=attr(predictor,"AMatrix"),
offset=off)
## attributes BinDenForm and oriFormula will be reconstructed:
call$formula <- do.call("Predictor",predArgs) ## reconstructs oriFormula... otherwise we have a predictor without it...
}
if (length(extras)) {
existing <- !is.na(match(names(extras), names(call))) ## which to replace and which to add to the call
for (a in names(extras)[existing]) call[[a]] <- extras[[a]] ## replace
if (any(!existing)) {
call <- c(as.list(call), extras[!existing]) ## add
call <- as.call(call)
}
}
if (evaluate)
eval(call, parent.frame())
else call
}
#`update.HLfit` <- function(object,formula.,...) {update.HL(object=object,formula.=formula.,...)}
#`update.HLCor` <- function(object,formula.,...) {update.HL(object=object,formula.=formula.,...)}
|
/CRAN/contrib/spaMM/R/update.HL.R
|
no_license
|
PRL-PRG/dyntrace-instrumented-packages
|
R
| false
| false
| 4,284
|
r
|
getCall.HLfit <- function(x,...) {
# only one of these call may be present in the object: HLCorcall is removed by fitme and corrHLfit
if ( ! is.null(call <- attr(x,"fitmecall"))) return(call)
if ( ! is.null(call <- attr(x,"HLCorcall"))) return(call) ## eg confint on an HLCor object
if ( ! is.null(call <- attr(x,"corrHLfitcall"))) return(call)
return(x$call) ## this one is the HLfit call
# The [stats::: !]getCall.default method cannot be called b/c it is not exported from stats.
# stats::getCall() with only call getCall.HLfit in an infinite recursion.
}
## to get a call with the structure of the final HLCorcall in fitme of corrHLfit
## ranFix is mandatory: Do not set a default value, so that one has to think about the correct value.
## Therefore, the original ranFix of the outer_object is replaced, unless it is explicitly set to getCall(object)$ranFix or $fixed... (in confint.HLfit)
## Parameters not in ranFix are set to the initial value of of the optimization call.
##
get_HLCorcall <- function(outer_object, ## accepts fit object, or call, or list of call arguments
fixed, ## see comments above
... # anything needed to overcome promises in the call
) {
if (inherits(outer_object,"HLfit")) {
outer_call <- getCall(outer_object) ## gets a corrHLfit/fitme call, => eval it to get HLCor callS
outer_call$data <- outer_object$data ## removes dependence on promise
outer_fn <- paste(outer_call[[1L]])
if (outer_fn=="fitme") {
outer_call$fixed <- fixed
} else if (outer_fn=="HLCor") {
outer_call$ranPars <- fixed
} else outer_call$ranFix <- fixed
}
verbose <- outer_call$verbose
verbose["getCall"] <- TRUE
outer_call$verbose <- verbose
## compare to update.default, commented in R language Definition.
extras <- match.call(expand.dots = FALSE)$...
if (length(extras) > 0) {
existing <- !is.na(match(names(extras), names(outer_call)))
dotlist <- list(...)
for (a in names(extras)[existing]) outer_call[[a]] <- dotlist[[a]]
if (any(!existing)) {
outer_call <- c(as.list(outer_call), dotlist[!existing])
}
}
#
HLCorcall <- eval(as.call(outer_call)) ## calls corrHLfit and bypasses optimization to get the call from within the final HLCor
HLCorcall[[1L]] <- quote(HLCor)
.assignWrapper(HLCorcall$processed,"verbose['getCall'] <- NA")
return(HLCorcall)
}
update.HLfit <- function (object, formula., ..., evaluate = TRUE) {
if (is.null(call <- getCall(object)))
stop("need an object with call component")
extras <- match.call(expand.dots = FALSE)$...
if (!missing(formula.)) {
predictor <- formula(getCall(object)) ## formula.default gets formula from $call, not from $predictor; and we must use getCall, not $call
if (inherits(predictor,"predictor")) {
form <- update.formula(attr(predictor,"oriFormula"),formula.) ## LOSES ALL ATTRIBUTES
} else form <- update.formula(predictor,formula.)
## !!!! FR->FR does not handle etaFix$beta !!!!
if (! is.null(.findOffset(formula.))) {off <- NULL} else { off <- attr(predictor,"offsetObj")$total }
predArgs <- list(formula=form,
LMatrix=NULL, ## F I X M E argument for Predictor, to be removed ? (modif function Predictor() ?)
AMatrix=attr(predictor,"AMatrix"),
offset=off)
## attributes BinDenForm and oriFormula will be reconstructed:
call$formula <- do.call("Predictor",predArgs) ## reconstructs oriFormula... otherwise we have a predictor without it...
}
if (length(extras)) {
existing <- !is.na(match(names(extras), names(call))) ## which to replace and which to add to the call
for (a in names(extras)[existing]) call[[a]] <- extras[[a]] ## replace
if (any(!existing)) {
call <- c(as.list(call), extras[!existing]) ## add
call <- as.call(call)
}
}
if (evaluate)
eval(call, parent.frame())
else call
}
#`update.HLfit` <- function(object,formula.,...) {update.HL(object=object,formula.=formula.,...)}
#`update.HLCor` <- function(object,formula.,...) {update.HL(object=object,formula.=formula.,...)}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010306251e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615835645-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 2,048
|
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010306251e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
install.packages("tidyverse")
install.packages("pdftools")
library(tidyverse)
library(pdftools)
text <-read_delim(file = "Z:/GIT/Assignment1.txt",delim=" ",col_names = FALSE,
col_types = cols(.default = col_number()) )
text
risk_free <-t(text[1,])
sp500 <-t(text[2,])
apple <-t(text[3,])
intel <- t(text[4,])
safeway <- t(text[5,])
text2 <- read_delim(file = "Z:/GIT/Assignment1_2.txt",delim=" ",col_names = FALSE )
data <- as.tibble(cbind(risk_free,sp500,apple,intel,safeway))%>%
rename(risk_free=V1,SP500=V2,apple=V3,intel=V4,safeway=V5)
data %>% ggplot(aes(sp500,apple)) + geom_point() +geom_abline()
fit_apple <- lm(sp500 ~ apple, data = data)
summary(fit_apple)
|
/Assignment1.R
|
no_license
|
albolea/econ5100
|
R
| false
| false
| 730
|
r
|
install.packages("tidyverse")
install.packages("pdftools")
library(tidyverse)
library(pdftools)
text <-read_delim(file = "Z:/GIT/Assignment1.txt",delim=" ",col_names = FALSE,
col_types = cols(.default = col_number()) )
text
risk_free <-t(text[1,])
sp500 <-t(text[2,])
apple <-t(text[3,])
intel <- t(text[4,])
safeway <- t(text[5,])
text2 <- read_delim(file = "Z:/GIT/Assignment1_2.txt",delim=" ",col_names = FALSE )
data <- as.tibble(cbind(risk_free,sp500,apple,intel,safeway))%>%
rename(risk_free=V1,SP500=V2,apple=V3,intel=V4,safeway=V5)
data %>% ggplot(aes(sp500,apple)) + geom_point() +geom_abline()
fit_apple <- lm(sp500 ~ apple, data = data)
summary(fit_apple)
|
testlist <- list(b = c(50400896L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
/mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613101640-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 182
|
r
|
testlist <- list(b = c(50400896L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
#' @export
getTrainPerf <- function(x) {
bestPerf <- x$bestTune
colnames(bestPerf) <- gsub("^\\.", "", colnames(bestPerf))
out <- merge(x$results, bestPerf)
out <- out[, colnames(out) %in% x$perfNames, drop = FALSE]
colnames(out) <- paste("Train", colnames(out), sep = "")
out$method <- x$method
out
}
|
/pkg/caret/R/getTrainPerf.R
|
no_license
|
topepo/caret
|
R
| false
| false
| 317
|
r
|
#' @export
getTrainPerf <- function(x) {
bestPerf <- x$bestTune
colnames(bestPerf) <- gsub("^\\.", "", colnames(bestPerf))
out <- merge(x$results, bestPerf)
out <- out[, colnames(out) %in% x$perfNames, drop = FALSE]
colnames(out) <- paste("Train", colnames(out), sep = "")
out$method <- x$method
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{municipality}
\alias{municipality}
\title{Municipality data with keys and polygon-geoms for municipalities of Denmark}
\format{
A data frame with 39,230 rows and 7 columns:
\describe{
\item{long}{Longitude coordinates.}
\item{lat}{Latitude coordinates.}
\item{order}{Order of coordinates in geoms.}
\item{group}{Geom groups.}
\item{id}{Name of entity.}
\item{id_numeric}{Number of entity.}
\item{hole}{Indication of a geom hole.}
}
}
\source{
Statistics Denmark
}
\usage{
municipality
}
\description{
Municipality data with keys and polygon-geoms for municipalities of Denmark
}
\keyword{datasets}
|
/man/municipality.Rd
|
permissive
|
kristianSN/plotDK
|
R
| false
| true
| 727
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{municipality}
\alias{municipality}
\title{Municipality data with keys and polygon-geoms for municipalities of Denmark}
\format{
A data frame with 39,230 rows and 7 columns:
\describe{
\item{long}{Longitude coordinates.}
\item{lat}{Latitude coordinates.}
\item{order}{Order of coordinates in geoms.}
\item{group}{Geom groups.}
\item{id}{Name of entity.}
\item{id_numeric}{Number of entity.}
\item{hole}{Indication of a geom hole.}
}
}
\source{
Statistics Denmark
}
\usage{
municipality
}
\description{
Municipality data with keys and polygon-geoms for municipalities of Denmark
}
\keyword{datasets}
|
#####
# Helper file with functions that construct ROC curves for glm and penfit objects
#####
# Build a function that makes a ROC curve for a given model (GLM or penfit)
constructROC <- function(model, ...) {
UseMethod("constructROC", model)
}
constructROC.logistf <- function(model, test = TRUE, thr = seq(from = 0, to = 1, by = 0.1)) {
m <- length(thr)
roc <- data.frame(x = numeric(m), y = numeric(m))
for(i in seq_along(thr)) {
# Compute predictions
# if test = TRUE, then use the test set asthma_test, otherwise use the training set.
if (test) {
preds <- as.matrix(data.frame(b = 1, asthma_test[, c(86, 256, 43)])) %*% model$coefficients
preds <- ifelse(exp(preds)/(exp(preds) + 1) >= thr[i], "case", "control")
obs <- asthma_test$CASE
} else {
preds <- ifelse(model$predict >= thr[i], "case", "control")
obs <- relevel(factor(model$data$CASE, levels = c(1,0), labels = c("case", "control")), ref = "control")
}
preds <- factor(preds, levels = levels(asthma$CASE))
# Compute confusion matrix
confMatrix <- table(obs, preds, useNA = "always")
# compute false positive rate.
roc[i, "x"] = confMatrix[obs = "control", preds = "case"] / sum(confMatrix[obs = "control", ])
# compute true positive rate.
roc[i, "y"] = confMatrix[obs = "case", preds = "case"] / sum(confMatrix[obs = "case", ])
}
return(dplyr::arrange(roc, x, y))
}
constructROC.penfit <- function(model, test = TRUE, thr = seq(from = 0, to = 1, by = 0.1)) {
m <- length(thr)
roc <- data.frame(x = numeric(m), y = numeric(m))
for(i in seq_along(thr)) {
# Compute predictions
# if test = TRUE, then use the test set asthma_test, otherwise use the training set.
if (test) {
preds <- ifelse(predict(object = model,
penalized = asthma_test[, -1],
data = asthma_test) >= thr[i],
"case",
"control")
obs <- asthma_test$CASE
} else {
preds <- ifelse(predict(object = model,
penalized = asthma_training[, -1],
data = asthma_training) >= thr[i],
"case",
"control")
obs <- asthma_training$CASE
}
preds <- factor(preds, levels = levels(asthma$CASE))
# Compute confusion matrix
confMatrix <- table(obs, preds, useNA = "always")
# compute false positive rate.
roc[i, "x"] = confMatrix[obs = "control", preds = "case"] / sum(confMatrix[obs = "control", ])
# compute true positive rate.
roc[i, "y"] = confMatrix[obs = "case", preds = "case"] / sum(confMatrix[obs = "case", ])
}
return(dplyr::arrange(roc, x, y))
}
# Build a function that returns a ROC plot
plotROC <- function(roc) {
p <- ggplot(roc, aes(x = x, y = y)) +
geom_line(col = "blue") +
geom_point(col = "blue") +
geom_line(data = data.frame(x = c(0, 1), y = c(0, 1)), aes(x = x, y = y), linetype = 2)
return(p)
}
# Build a function to assess how well a model fits the given data
fitData <- function(model, ...) {
UseMethod("fitData", model)
}
fitData.penfit <- function(model) {
# Get the predictions and the observed values
preds <- ifelse(predict(model, penalized = asthma_training[, -1], data = asthma_training) >= 0.5, "case", "control")
preds <- factor(preds, levels = levels(asthma_training$CASE))
obs <- asthma_training$CASE
# Compute the confusion matrix
confMatrix <- caret::confusionMatrix(data = preds,
reference = obs,
positive = "case")
# Return interesting values
list(table = confMatrix$table,
stats = c(confMatrix$overall[1],
confMatrix$byClass[1:4]),
roc = constructROC(model, test = FALSE))
}
fitData.logistf <- function(model) {
# Get the predictions and the observed values
preds <- ifelse(model$predict >= 0.5, "case", "control")
preds <- factor(preds, levels = levels(asthma$CASE))
obs <- relevel(factor(model$data$CASE, levels = c(1,0), labels = c("case", "control")), ref = "control")
# Compute the confusion matrix
confMatrix <- caret::confusionMatrix(data = preds,
reference = obs,
positive = "case")
# Return interesting values
list(table = confMatrix$table,
stats = c(confMatrix$overall[1],
confMatrix$byClass[1:4]),
roc = constructROC(model, test = FALSE))
}
|
/Logistic Regression/helperROCfunctions.R
|
no_license
|
thuijskens/MSc-projects
|
R
| false
| false
| 4,629
|
r
|
#####
# Helper file with functions that construct ROC curves for glm and penfit objects
#####
# Build a function that makes a ROC curve for a given model (GLM or penfit)
constructROC <- function(model, ...) {
UseMethod("constructROC", model)
}
constructROC.logistf <- function(model, test = TRUE, thr = seq(from = 0, to = 1, by = 0.1)) {
m <- length(thr)
roc <- data.frame(x = numeric(m), y = numeric(m))
for(i in seq_along(thr)) {
# Compute predictions
# if test = TRUE, then use the test set asthma_test, otherwise use the training set.
if (test) {
preds <- as.matrix(data.frame(b = 1, asthma_test[, c(86, 256, 43)])) %*% model$coefficients
preds <- ifelse(exp(preds)/(exp(preds) + 1) >= thr[i], "case", "control")
obs <- asthma_test$CASE
} else {
preds <- ifelse(model$predict >= thr[i], "case", "control")
obs <- relevel(factor(model$data$CASE, levels = c(1,0), labels = c("case", "control")), ref = "control")
}
preds <- factor(preds, levels = levels(asthma$CASE))
# Compute confusion matrix
confMatrix <- table(obs, preds, useNA = "always")
# compute false positive rate.
roc[i, "x"] = confMatrix[obs = "control", preds = "case"] / sum(confMatrix[obs = "control", ])
# compute true positive rate.
roc[i, "y"] = confMatrix[obs = "case", preds = "case"] / sum(confMatrix[obs = "case", ])
}
return(dplyr::arrange(roc, x, y))
}
constructROC.penfit <- function(model, test = TRUE, thr = seq(from = 0, to = 1, by = 0.1)) {
m <- length(thr)
roc <- data.frame(x = numeric(m), y = numeric(m))
for(i in seq_along(thr)) {
# Compute predictions
# if test = TRUE, then use the test set asthma_test, otherwise use the training set.
if (test) {
preds <- ifelse(predict(object = model,
penalized = asthma_test[, -1],
data = asthma_test) >= thr[i],
"case",
"control")
obs <- asthma_test$CASE
} else {
preds <- ifelse(predict(object = model,
penalized = asthma_training[, -1],
data = asthma_training) >= thr[i],
"case",
"control")
obs <- asthma_training$CASE
}
preds <- factor(preds, levels = levels(asthma$CASE))
# Compute confusion matrix
confMatrix <- table(obs, preds, useNA = "always")
# compute false positive rate.
roc[i, "x"] = confMatrix[obs = "control", preds = "case"] / sum(confMatrix[obs = "control", ])
# compute true positive rate.
roc[i, "y"] = confMatrix[obs = "case", preds = "case"] / sum(confMatrix[obs = "case", ])
}
return(dplyr::arrange(roc, x, y))
}
# Build a function that returns a ROC plot
plotROC <- function(roc) {
p <- ggplot(roc, aes(x = x, y = y)) +
geom_line(col = "blue") +
geom_point(col = "blue") +
geom_line(data = data.frame(x = c(0, 1), y = c(0, 1)), aes(x = x, y = y), linetype = 2)
return(p)
}
# Build a function to assess how well a model fits the given data
fitData <- function(model, ...) {
UseMethod("fitData", model)
}
fitData.penfit <- function(model) {
# Get the predictions and the observed values
preds <- ifelse(predict(model, penalized = asthma_training[, -1], data = asthma_training) >= 0.5, "case", "control")
preds <- factor(preds, levels = levels(asthma_training$CASE))
obs <- asthma_training$CASE
# Compute the confusion matrix
confMatrix <- caret::confusionMatrix(data = preds,
reference = obs,
positive = "case")
# Return interesting values
list(table = confMatrix$table,
stats = c(confMatrix$overall[1],
confMatrix$byClass[1:4]),
roc = constructROC(model, test = FALSE))
}
fitData.logistf <- function(model) {
# Get the predictions and the observed values
preds <- ifelse(model$predict >= 0.5, "case", "control")
preds <- factor(preds, levels = levels(asthma$CASE))
obs <- relevel(factor(model$data$CASE, levels = c(1,0), labels = c("case", "control")), ref = "control")
# Compute the confusion matrix
confMatrix <- caret::confusionMatrix(data = preds,
reference = obs,
positive = "case")
# Return interesting values
list(table = confMatrix$table,
stats = c(confMatrix$overall[1],
confMatrix$byClass[1:4]),
roc = constructROC(model, test = FALSE))
}
|
read_raw_reads <- function(read_ending){
sort(list.files(raw_reads_dirs, pattern = read_ending, full.names = TRUE))
}
|
/R/read_raw_reads.R
|
no_license
|
ryjohnson09/SSTI090
|
R
| false
| false
| 125
|
r
|
read_raw_reads <- function(read_ending){
sort(list.files(raw_reads_dirs, pattern = read_ending, full.names = TRUE))
}
|
library(data.table)
library(dplyr)
##########Reading data from folder###################################
featureNames <- read.table("UCI HAR Dataset/features.txt")
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE)
subjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE)
activityTrain <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE)
featuresTrain <- read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE)
subjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE)
activityTest <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE)
featuresTest <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE)
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featuresTrain, featuresTest)
##############Merging data to create one big dataset###################
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featuresTrain, featuresTest)
colnames(features) <- t(featureNames[2])
colnames(activity) <- "Activity"
colnames(subject) <- "Subject"
completeData <- cbind(features,activity,subject)
##############Extraction of measurements (Mean/Median)###################
selectMeanStd <- featureNames[grep("*mean\\(\\)*|*std\\(\\)*", featureNames[ , 2]), ]
completeData <- completeData[ , selectMeanStd[ ,1]]
################Name Change - Descriptive variable names#################
names(completeData)<-gsub("Acc", "Accelerometer", names(completeData))
names(completeData)<-gsub("Gyro", "Gyroscope", names(completeData))
names(completeData)<-gsub("BodyBody", "Body", names(completeData))
names(completeData)<-gsub("Mag", "Magnitude", names(completeData))
names(completeData)<-gsub("^t", "Time", names(completeData))
names(completeData)<-gsub("^f", "Frequency", names(completeData))
names(completeData)<-gsub("tBody", "TimeBody", names(completeData))
names(completeData)<-gsub("-mean()", "Mean", names(completeData), ignore.case = TRUE)
names(completeData)<-gsub("-std()", "STD", names(completeData), ignore.case = TRUE)
names(completeData)<-gsub("-freq()", "Frequency", names(completeData), ignore.case = TRUE)
names(completeData)<-gsub("angle", "Angle", names(completeData))
names(completeData)<-gsub("gravity", "Gravity", names(completeData))
###############Creating another tidydata set################
TidyData <- summarise_all(group_by(completeData), funs(mean))
write.table(TidyData, "./UCI HAR Dataset/SummaryData.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
Morf883/Prjoject-4
|
R
| false
| false
| 2,599
|
r
|
library(data.table)
library(dplyr)
##########Reading data from folder###################################
featureNames <- read.table("UCI HAR Dataset/features.txt")
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE)
subjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE)
activityTrain <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE)
featuresTrain <- read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE)
subjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE)
activityTest <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE)
featuresTest <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE)
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featuresTrain, featuresTest)
##############Merging data to create one big dataset###################
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featuresTrain, featuresTest)
colnames(features) <- t(featureNames[2])
colnames(activity) <- "Activity"
colnames(subject) <- "Subject"
completeData <- cbind(features,activity,subject)
##############Extraction of measurements (Mean/Median)###################
selectMeanStd <- featureNames[grep("*mean\\(\\)*|*std\\(\\)*", featureNames[ , 2]), ]
completeData <- completeData[ , selectMeanStd[ ,1]]
################Name Change - Descriptive variable names#################
names(completeData)<-gsub("Acc", "Accelerometer", names(completeData))
names(completeData)<-gsub("Gyro", "Gyroscope", names(completeData))
names(completeData)<-gsub("BodyBody", "Body", names(completeData))
names(completeData)<-gsub("Mag", "Magnitude", names(completeData))
names(completeData)<-gsub("^t", "Time", names(completeData))
names(completeData)<-gsub("^f", "Frequency", names(completeData))
names(completeData)<-gsub("tBody", "TimeBody", names(completeData))
names(completeData)<-gsub("-mean()", "Mean", names(completeData), ignore.case = TRUE)
names(completeData)<-gsub("-std()", "STD", names(completeData), ignore.case = TRUE)
names(completeData)<-gsub("-freq()", "Frequency", names(completeData), ignore.case = TRUE)
names(completeData)<-gsub("angle", "Angle", names(completeData))
names(completeData)<-gsub("gravity", "Gravity", names(completeData))
###############Creating another tidydata set################
TidyData <- summarise_all(group_by(completeData), funs(mean))
write.table(TidyData, "./UCI HAR Dataset/SummaryData.txt", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cimp5_2041_2060_cv.R
\docType{data}
\name{cimp5_2041_2060_cv}
\alias{cimp5_2041_2060_cv}
\title{Coefficient Variation of the Mean Seafloor Climatic Data among CIMP5 Models during 2041 to 2060}
\format{
A RasterBrick object of 4 raster layers:
\describe{
\item{epc_cv_2041_to_2060}{Coefficient variation of the mean export POC flux to seafloor (\%)}
\item{o2_cv_2041_to_2060}{Coefficient variation of the mean dissolved oxygen concentration at seafloor (\%)}
\item{ph_cv_2041_to_2060}{Coefficient variation of the mean pH at seafloor (\%)}
\item{thetao_cv_2041_to_2060}{Coefficient variation of the mean potential temperature at seafllor (\%)}
\item{arag_cv_2041_to_2060}{Coefficient variation of aragonite Concentration (\%)}
\item{calc_cv_2041_to_2060}{Coefficient variation of calcite Concentration (\%)}
\item{co3_cv_2041_to_2060}{Coefficient variation of mole Concentration of Carbonate expressed as Carbon in Sea Water (\%)}
\item{co3satarag_cv_2041_to_2060}{Coefficient variation of mole Concentration of Aragonite expressed as Carbon in Sea Water at Saturation (\%)}
\item{co3satcalc_cv_2041_to_2060}{Coefficient variation of mole Concentration of Calcite expressed as Carbon in Sea Water at Saturation (\%)}
}
}
\source{
\url{https://esgf-node.llnl.gov/search/esgf-llnl/}
}
\description{
Coefficient variation of the mean projected (RCP8.5) export POC flux to seafloor, bottom dissolved oxygen concentration,
hydrogen ion concentration and temperature were averaged from the Geophysical Fluid Dynamics Laboratory’s ESM 2G
(GFDL-ESM-2G), Institut Pierre Simon Laplace’s CM6-MR (IPSL-CM5A-MR) and Max Planck Institute’s ESM-MR (MPI-ESM-MR)
within the Coupled Models Intercomparison Project Phase 5 (CIMP5).
}
\details{
Yearly mean from 2041 to 2060 were calculated for the GFDL-ESM-2G, IPSL-CM5A-MR
and MPI-ESM-MR respectively. Coefficient variation of the mean was calculated among the three models for the periods of 2041 to 2060.
The export POC flux at seafloor was compute from the
export production at 100 m (epc100) using the Martin curve (Martin et al., 1987) following the quation:
\eqn{Flux = epc100*(depth/export depth)^-0.858}. The depth use \code{\link{etopo1}} and export depth was set to 100 m.
All CIMP5 data were download from \url{https://esgf-node.llnl.gov/search/esgf-llnl/}.
}
\keyword{datasets}
|
/man/cimp5_2041_2060_cv.Rd
|
no_license
|
chihlinwei/SCC85
|
R
| false
| true
| 2,424
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cimp5_2041_2060_cv.R
\docType{data}
\name{cimp5_2041_2060_cv}
\alias{cimp5_2041_2060_cv}
\title{Coefficient Variation of the Mean Seafloor Climatic Data among CIMP5 Models during 2041 to 2060}
\format{
A RasterBrick object of 4 raster layers:
\describe{
\item{epc_cv_2041_to_2060}{Coefficient variation of the mean export POC flux to seafloor (\%)}
\item{o2_cv_2041_to_2060}{Coefficient variation of the mean dissolved oxygen concentration at seafloor (\%)}
\item{ph_cv_2041_to_2060}{Coefficient variation of the mean pH at seafloor (\%)}
\item{thetao_cv_2041_to_2060}{Coefficient variation of the mean potential temperature at seafllor (\%)}
\item{arag_cv_2041_to_2060}{Coefficient variation of aragonite Concentration (\%)}
\item{calc_cv_2041_to_2060}{Coefficient variation of calcite Concentration (\%)}
\item{co3_cv_2041_to_2060}{Coefficient variation of mole Concentration of Carbonate expressed as Carbon in Sea Water (\%)}
\item{co3satarag_cv_2041_to_2060}{Coefficient variation of mole Concentration of Aragonite expressed as Carbon in Sea Water at Saturation (\%)}
\item{co3satcalc_cv_2041_to_2060}{Coefficient variation of mole Concentration of Calcite expressed as Carbon in Sea Water at Saturation (\%)}
}
}
\source{
\url{https://esgf-node.llnl.gov/search/esgf-llnl/}
}
\description{
Coefficient variation of the mean projected (RCP8.5) export POC flux to seafloor, bottom dissolved oxygen concentration,
hydrogen ion concentration and temperature were averaged from the Geophysical Fluid Dynamics Laboratory’s ESM 2G
(GFDL-ESM-2G), Institut Pierre Simon Laplace’s CM6-MR (IPSL-CM5A-MR) and Max Planck Institute’s ESM-MR (MPI-ESM-MR)
within the Coupled Models Intercomparison Project Phase 5 (CIMP5).
}
\details{
Yearly mean from 2041 to 2060 were calculated for the GFDL-ESM-2G, IPSL-CM5A-MR
and MPI-ESM-MR respectively. Coefficient variation of the mean was calculated among the three models for the periods of 2041 to 2060.
The export POC flux at seafloor was compute from the
export production at 100 m (epc100) using the Martin curve (Martin et al., 1987) following the quation:
\eqn{Flux = epc100*(depth/export depth)^-0.858}. The depth use \code{\link{etopo1}} and export depth was set to 100 m.
All CIMP5 data were download from \url{https://esgf-node.llnl.gov/search/esgf-llnl/}.
}
\keyword{datasets}
|
#' write_pao
#'
#' Write occupancy and climate data for program Presence
#' @param counts Data frame containing the buffered BBS counts obtained from the function `buffer_BBS()`
#' @param clim Data frame containing the annual bioclim values obtained from the function 'GetBioVars()`
#' @param alpha Four letter alpha code for the species of interest
#' @return A .pao file containing the detection histories, covariates, and summary information to input into Presence
#' @export
write_pao <- function(alpha, sim = FALSE, name = NULL){
opts <- read.csv("inst/model_opts.csv")
covs <- read.csv(paste0("inst/output/", alpha, "/route_clim.csv"))
common <- code_lookup$common[code_lookup$alpha == toupper(alpha)]
if(opts$tenstop) {tot_stops <- 5}else{tot_stops <- 50}
if(!sim){
counts <- read.csv(paste0("inst/output/", alpha, "/count_buff.csv"))
n_seasons <- max(counts$Year) - min(counts$Year) + 1
### Covert count data to long format
counts <- dplyr::select(counts, routeID, Year, grep("count|stop", names(counts)))
if(opts$tenstop) {counts <- dplyr::select(counts, -grep("stoptotal", names(counts)))}
counts <- tidyr::gather(counts, key = "stop", value = "n", -routeID, -Year)
### Add column with presence/absence data
pres <- dplyr::mutate(counts, occ = ifelse(n > 0, 1, 0))
pres <- dplyr::select(pres, -n)
### Covert back to wide w/ 1 column for each year/stop (i.e., svy)
pres <- tidyr::unite(pres, svy, Year, stop, sep = "_")
pres <- pres[!duplicated(pres),]
pres <- tidyr::spread(pres, key = svy, value = occ)
pres <- dplyr::arrange(pres, routeID)
det_hist <- dplyr::select(pres, -routeID)
spp_clim <- dplyr::arrange(covs, routeID)
spp_clim2 <- dplyr::rename(spp_clim, Lat = Latitude, Lon = Longitude)
spp_clim2 <- dplyr::mutate(spp_clim2, Lat = (Lat - mean(Lat))/sd(Lat),
Lon = (Lon - mean(Lon))/sd(Lon))
spp_clim3 <- dplyr::mutate(spp_clim2, sq_Lat = Lat ^ 2, sq_Lon = Lon ^ 2)
for(ss in 1:tot_stops) {
sc <- scale(1:tot_stops)[ss]; #names(sc) <- paste0('Stop',ss)
sc2 <- (scale(1:tot_stops)[ss])^2; #names(sc2) <- paste0('sqStop',ss)
spp_clim3 <- cbind(spp_clim3, sc, sc2)
colnames(spp_clim3)[ncol(spp_clim3)-1] <- paste0('Stop',ss)
colnames(spp_clim3)[ncol(spp_clim3)] <- paste0('sq_Stop',ss)
}
sitecovs <- dplyr::select(spp_clim3, -routeID)
nas <- which(!is.na(sitecovs[,3]))
sitecovs <- sitecovs[nas, ]
det_hist <- det_hist[nas, ]
pname <- paste0("inst/output/", alpha, "/pres/pres_in.pao")
nss <- rep(tot_stops, n_seasons)
spp_pao <- RPresence::create.pao(data = det_hist, nsurveyseason = nss,
unitcov = sitecovs, survcov = NULL,
title = paste(common, "PRESENCE Analysis", sep = " "),
paoname = pname)
}else{
det_hist <- read.csv(paste0("inst/output/", alpha, "/pres/", name, "_hist.csv"))
spp_clim <- dplyr::arrange(covs, routeID)
spp_clim2 <- dplyr::rename(spp_clim, Lat = Latitude, Lon = Longitude)
spp_clim2 <- dplyr::mutate(spp_clim2, Lat = (Lat - mean(Lat))/sd(Lat),
Lon = (Lon - mean(Lon))/sd(Lon))
spp_clim3 <- dplyr::mutate(spp_clim2, sq_Lat = Lat ^ 2, sq_Lon = Lon ^ 2)
for(ss in 1:tot_stops) {
sc <- scale(1:tot_stops)[ss]; #names(sc) <- paste0('Stop',ss)
sc2 <- (scale(1:tot_stops)[ss])^2; #names(sc2) <- paste0('sqStop',ss)
spp_clim3 <- cbind(spp_clim3, sc, sc2)
colnames(spp_clim3)[ncol(spp_clim3)-1] <- paste0('Stop',ss)
colnames(spp_clim3)[ncol(spp_clim3)] <- paste0('sq_Stop',ss)
}
sitecovs <- dplyr::select(spp_clim3, -routeID)
nas <- which(!is.na(sitecovs[,3]))
sitecovs <- sitecovs[nas, ]
det_hist <- det_hist[nas, ]
n_seasons <- dim(det_hist)[2] / tot_stops
spp_pao <- RPresence::create.pao(data = det_hist, nsurveyseason = rep(tot_stops, n_seasons),
unitcov = sitecovs, survcov = NULL,
title = paste(common, "PRESENCE Analysis", sep = " "),
paoname = paste0("inst/output/", alpha, "/pres/", name, ".pao"))
}
RPresence::write.pao(pao = spp_pao)
}
#' write_psi_pao
#'
#' Write occupancy and climate data for program Presence
#' @param counts Data frame containing the buffered BBS counts obtained from the function `buffer_BBS()`
#' @param clim Data frame containing the annual bioclim values obtained from the function 'GetBioVars()`
#' @param alpha Four letter alpha code for the species of interest
#' @return A .pao file containing the detection histories, covariates, and summary information to input into Presence
#' @export
write_psi_pao <- function(alpha){
opts <- read.csv("inst/model_opts.csv")
covs <- read.csv(paste0("inst/output/", alpha, "/route_clim.csv"))
common <- code_lookup$common[code_lookup$alpha == toupper(alpha)]
if(opts$tenstop) {tot_stops <- 5}else{tot_stops <- 50}
counts <- read.csv(paste0("inst/output/", alpha, "/count_buff.csv"))
n_seasons <- max(counts$Year) - min(counts$Year) + 1
### Covert count data to long format
counts <- dplyr::select(counts, routeID, Year, grep("count|stop", names(counts)))
if(opts$tenstop) {counts <- dplyr::select(counts, -grep("stoptotal", names(counts)))}
counts <- tidyr::gather(counts, key = "stop", value = "n", -routeID, -Year)
### Add column with presence/absence data
pres <- dplyr::mutate(counts, occ = ifelse(n > 0, 1, 0))
pres <- dplyr::select(pres, -n)
### Covert back to wide w/ 1 column for each year/stop (i.e., svy)
pres <- tidyr::unite(pres, svy, Year, stop, sep = "_")
pres <- pres[!duplicated(pres),]
pres <- tidyr::spread(pres, key = svy, value = occ)
pres <- dplyr::arrange(pres, routeID)
det_hist <- dplyr::select(pres, -routeID)
det_hist <- det_hist[, 1:tot_stops]
spp_clim <- dplyr::arrange(covs, routeID)
spp_clim2 <- dplyr::rename(spp_clim, Lat = Latitude, Lon = Longitude)
spp_clim2 <- dplyr::mutate(spp_clim2, Lat = (Lat - mean(Lat))/sd(Lat),
Lon = (Lon - mean(Lon))/sd(Lon))
spp_clim3 <- dplyr::mutate(spp_clim2, sq_Lat = Lat ^ 2, sq_Lon = Lon ^ 2)
for(ss in 1:tot_stops) {
sc <- scale(1:tot_stops)[ss]; #names(sc) <- paste0('Stop',ss)
sc2 <- (scale(1:tot_stops)[ss])^2; #names(sc2) <- paste0('sqStop',ss)
spp_clim3 <- cbind(spp_clim3, sc, sc2)
colnames(spp_clim3)[ncol(spp_clim3)-1] <- paste0('Stop',ss)
colnames(spp_clim3)[ncol(spp_clim3)] <- paste0('sq_Stop',ss)
}
sitecovs <- dplyr::select(spp_clim3, -routeID)
sitecovs <- dplyr::select(sitecovs, grep(paste0(min(counts$Year), "|Stop|Lon|Lat"), names(sitecovs)))
nas <- which(!is.na(sitecovs[,3]))
sitecovs <- sitecovs[nas, ]
det_hist <- det_hist[nas, ]
pname <- paste0("inst/output/", alpha, "/pres/pres_in_psi.pao")
nss <- tot_stops
spp_pao <- RPresence::create.pao(data = det_hist, nsurveyseason = nss,
unitcov = as.matrix(sitecovs), survcov = NULL,
title = paste(common, "PRESENCE Analysis", sep = " "),
paoname = pname)
RPresence::write.pao(pao = spp_pao)
}
|
/R/write_pao.R
|
no_license
|
SMBC-NZP/BBSclim
|
R
| false
| false
| 7,453
|
r
|
#' write_pao
#'
#' Write occupancy and climate data for program Presence
#' @param counts Data frame containing the buffered BBS counts obtained from the function `buffer_BBS()`
#' @param clim Data frame containing the annual bioclim values obtained from the function 'GetBioVars()`
#' @param alpha Four letter alpha code for the species of interest
#' @return A .pao file containing the detection histories, covariates, and summary information to input into Presence
#' @export
write_pao <- function(alpha, sim = FALSE, name = NULL){
opts <- read.csv("inst/model_opts.csv")
covs <- read.csv(paste0("inst/output/", alpha, "/route_clim.csv"))
common <- code_lookup$common[code_lookup$alpha == toupper(alpha)]
if(opts$tenstop) {tot_stops <- 5}else{tot_stops <- 50}
if(!sim){
counts <- read.csv(paste0("inst/output/", alpha, "/count_buff.csv"))
n_seasons <- max(counts$Year) - min(counts$Year) + 1
### Covert count data to long format
counts <- dplyr::select(counts, routeID, Year, grep("count|stop", names(counts)))
if(opts$tenstop) {counts <- dplyr::select(counts, -grep("stoptotal", names(counts)))}
counts <- tidyr::gather(counts, key = "stop", value = "n", -routeID, -Year)
### Add column with presence/absence data
pres <- dplyr::mutate(counts, occ = ifelse(n > 0, 1, 0))
pres <- dplyr::select(pres, -n)
### Covert back to wide w/ 1 column for each year/stop (i.e., svy)
pres <- tidyr::unite(pres, svy, Year, stop, sep = "_")
pres <- pres[!duplicated(pres),]
pres <- tidyr::spread(pres, key = svy, value = occ)
pres <- dplyr::arrange(pres, routeID)
det_hist <- dplyr::select(pres, -routeID)
spp_clim <- dplyr::arrange(covs, routeID)
spp_clim2 <- dplyr::rename(spp_clim, Lat = Latitude, Lon = Longitude)
spp_clim2 <- dplyr::mutate(spp_clim2, Lat = (Lat - mean(Lat))/sd(Lat),
Lon = (Lon - mean(Lon))/sd(Lon))
spp_clim3 <- dplyr::mutate(spp_clim2, sq_Lat = Lat ^ 2, sq_Lon = Lon ^ 2)
for(ss in 1:tot_stops) {
sc <- scale(1:tot_stops)[ss]; #names(sc) <- paste0('Stop',ss)
sc2 <- (scale(1:tot_stops)[ss])^2; #names(sc2) <- paste0('sqStop',ss)
spp_clim3 <- cbind(spp_clim3, sc, sc2)
colnames(spp_clim3)[ncol(spp_clim3)-1] <- paste0('Stop',ss)
colnames(spp_clim3)[ncol(spp_clim3)] <- paste0('sq_Stop',ss)
}
sitecovs <- dplyr::select(spp_clim3, -routeID)
nas <- which(!is.na(sitecovs[,3]))
sitecovs <- sitecovs[nas, ]
det_hist <- det_hist[nas, ]
pname <- paste0("inst/output/", alpha, "/pres/pres_in.pao")
nss <- rep(tot_stops, n_seasons)
spp_pao <- RPresence::create.pao(data = det_hist, nsurveyseason = nss,
unitcov = sitecovs, survcov = NULL,
title = paste(common, "PRESENCE Analysis", sep = " "),
paoname = pname)
}else{
det_hist <- read.csv(paste0("inst/output/", alpha, "/pres/", name, "_hist.csv"))
spp_clim <- dplyr::arrange(covs, routeID)
spp_clim2 <- dplyr::rename(spp_clim, Lat = Latitude, Lon = Longitude)
spp_clim2 <- dplyr::mutate(spp_clim2, Lat = (Lat - mean(Lat))/sd(Lat),
Lon = (Lon - mean(Lon))/sd(Lon))
spp_clim3 <- dplyr::mutate(spp_clim2, sq_Lat = Lat ^ 2, sq_Lon = Lon ^ 2)
for(ss in 1:tot_stops) {
sc <- scale(1:tot_stops)[ss]; #names(sc) <- paste0('Stop',ss)
sc2 <- (scale(1:tot_stops)[ss])^2; #names(sc2) <- paste0('sqStop',ss)
spp_clim3 <- cbind(spp_clim3, sc, sc2)
colnames(spp_clim3)[ncol(spp_clim3)-1] <- paste0('Stop',ss)
colnames(spp_clim3)[ncol(spp_clim3)] <- paste0('sq_Stop',ss)
}
sitecovs <- dplyr::select(spp_clim3, -routeID)
nas <- which(!is.na(sitecovs[,3]))
sitecovs <- sitecovs[nas, ]
det_hist <- det_hist[nas, ]
n_seasons <- dim(det_hist)[2] / tot_stops
spp_pao <- RPresence::create.pao(data = det_hist, nsurveyseason = rep(tot_stops, n_seasons),
unitcov = sitecovs, survcov = NULL,
title = paste(common, "PRESENCE Analysis", sep = " "),
paoname = paste0("inst/output/", alpha, "/pres/", name, ".pao"))
}
RPresence::write.pao(pao = spp_pao)
}
#' write_psi_pao
#'
#' Write occupancy and climate data for program Presence
#' @param counts Data frame containing the buffered BBS counts obtained from the function `buffer_BBS()`
#' @param clim Data frame containing the annual bioclim values obtained from the function 'GetBioVars()`
#' @param alpha Four letter alpha code for the species of interest
#' @return A .pao file containing the detection histories, covariates, and summary information to input into Presence
#' @export
write_psi_pao <- function(alpha){
opts <- read.csv("inst/model_opts.csv")
covs <- read.csv(paste0("inst/output/", alpha, "/route_clim.csv"))
common <- code_lookup$common[code_lookup$alpha == toupper(alpha)]
if(opts$tenstop) {tot_stops <- 5}else{tot_stops <- 50}
counts <- read.csv(paste0("inst/output/", alpha, "/count_buff.csv"))
n_seasons <- max(counts$Year) - min(counts$Year) + 1
### Covert count data to long format
counts <- dplyr::select(counts, routeID, Year, grep("count|stop", names(counts)))
if(opts$tenstop) {counts <- dplyr::select(counts, -grep("stoptotal", names(counts)))}
counts <- tidyr::gather(counts, key = "stop", value = "n", -routeID, -Year)
### Add column with presence/absence data
pres <- dplyr::mutate(counts, occ = ifelse(n > 0, 1, 0))
pres <- dplyr::select(pres, -n)
### Covert back to wide w/ 1 column for each year/stop (i.e., svy)
pres <- tidyr::unite(pres, svy, Year, stop, sep = "_")
pres <- pres[!duplicated(pres),]
pres <- tidyr::spread(pres, key = svy, value = occ)
pres <- dplyr::arrange(pres, routeID)
det_hist <- dplyr::select(pres, -routeID)
det_hist <- det_hist[, 1:tot_stops]
spp_clim <- dplyr::arrange(covs, routeID)
spp_clim2 <- dplyr::rename(spp_clim, Lat = Latitude, Lon = Longitude)
spp_clim2 <- dplyr::mutate(spp_clim2, Lat = (Lat - mean(Lat))/sd(Lat),
Lon = (Lon - mean(Lon))/sd(Lon))
spp_clim3 <- dplyr::mutate(spp_clim2, sq_Lat = Lat ^ 2, sq_Lon = Lon ^ 2)
for(ss in 1:tot_stops) {
sc <- scale(1:tot_stops)[ss]; #names(sc) <- paste0('Stop',ss)
sc2 <- (scale(1:tot_stops)[ss])^2; #names(sc2) <- paste0('sqStop',ss)
spp_clim3 <- cbind(spp_clim3, sc, sc2)
colnames(spp_clim3)[ncol(spp_clim3)-1] <- paste0('Stop',ss)
colnames(spp_clim3)[ncol(spp_clim3)] <- paste0('sq_Stop',ss)
}
sitecovs <- dplyr::select(spp_clim3, -routeID)
sitecovs <- dplyr::select(sitecovs, grep(paste0(min(counts$Year), "|Stop|Lon|Lat"), names(sitecovs)))
nas <- which(!is.na(sitecovs[,3]))
sitecovs <- sitecovs[nas, ]
det_hist <- det_hist[nas, ]
pname <- paste0("inst/output/", alpha, "/pres/pres_in_psi.pao")
nss <- tot_stops
spp_pao <- RPresence::create.pao(data = det_hist, nsurveyseason = nss,
unitcov = as.matrix(sitecovs), survcov = NULL,
title = paste(common, "PRESENCE Analysis", sep = " "),
paoname = pname)
RPresence::write.pao(pao = spp_pao)
}
|
# read source file
d <- read.csv2("household_power_consumption.txt", na.strings = "?", sep = ";", header=T, stringsAsFactor=F)
d_subset <- subset(d, Date == "1/2/2007")
d_subset <- rbind(d_subset, subset(d, Date == "2/2/2007"))
# set datetime from date and time
d_subset$DateTime <- strptime(paste(d_subset$Date, d_subset$Time), "%d/%m/%Y %H:%M:%S" )
#convert to numeric
d_subset$Global_active_power <- as.numeric(d_subset$Global_active_power)
# create plot and put into file
png("plot2.png", width=480, height=480)
plot(d_subset$DateTime, d_subset$Global_active_power, type = "l", xlab = "", ylab = "Global Active Poer (killowats)")
dev.off()
|
/Exploratory Data Analysis/Project 1/plot2.R
|
no_license
|
marcin1213/dataScience
|
R
| false
| false
| 646
|
r
|
# read source file
d <- read.csv2("household_power_consumption.txt", na.strings = "?", sep = ";", header=T, stringsAsFactor=F)
d_subset <- subset(d, Date == "1/2/2007")
d_subset <- rbind(d_subset, subset(d, Date == "2/2/2007"))
# set datetime from date and time
d_subset$DateTime <- strptime(paste(d_subset$Date, d_subset$Time), "%d/%m/%Y %H:%M:%S" )
#convert to numeric
d_subset$Global_active_power <- as.numeric(d_subset$Global_active_power)
# create plot and put into file
png("plot2.png", width=480, height=480)
plot(d_subset$DateTime, d_subset$Global_active_power, type = "l", xlab = "", ylab = "Global Active Poer (killowats)")
dev.off()
|
context("Prune superfluous taxonomic ranks.")
test_that("Superfluous ranks get pruned.", {
Cyperus <- subset(Easplist, TaxonName == "Cyperus", slot = "taxonNames")
expect_true(length(levels(Cyperus)) >=
length(levels(prune_levels(Cyperus))))
})
|
/tests/testthat/test-prune_levels.R
|
no_license
|
ropensci/taxlist
|
R
| false
| false
| 254
|
r
|
context("Prune superfluous taxonomic ranks.")
test_that("Superfluous ranks get pruned.", {
Cyperus <- subset(Easplist, TaxonName == "Cyperus", slot = "taxonNames")
expect_true(length(levels(Cyperus)) >=
length(levels(prune_levels(Cyperus))))
})
|
# Problem on Outlier Treatment
library(readr)
# Importing dataset
boston_data <- read_csv("C:/Users/WIN10/Desktop/LEARNING/DS/DPP/datasets/boston_data.csv")
View(boston_data) #to view the data
attach(boston_data) # By attaching file there is no need to call it using $ operator every time
# Checking for NA values
is.na(boston_data)
sum(is.na(boston_data)) # For displaying total number of NA values
table(is.na(boston_data)) # It will display the total number of NA values in our dataset
summary(is.na(boston_data))
# Boxplot to identify outliers in 'Crim' column
x <- boxplot(crim,horizontal = T,xlab='crim',ylab='frequency', main='crim vs frequency',col='blue',border = 'red')
x$out
# Replacing the outliers values by Winsorization
qunt1 <- quantile(crim,probs = c(.25,.75))
qunt1
wins1 <- quantile(crim,probs = c(.01,.98),na.rm = T)
wins1
a <- 1.5*IQR(crim,na.rm=T)
a
# Lower limit formula Q1-(1.5*IQR)
b <- qunt1[1]-a
b
# Lower limit formula Q3+(1.5*IQR)
c <- qunt1[2]+a
c
# To flag outliers from the dataset crim
outliers_flag_crim <- (crim>c)
table(outliers_flag_crim)
View(outliers_flag_crim)
# Replacing the outliers by winsorization method
crim[crim<b] <- wins1[1]
crim[crim>c] <- wins1[2]
boxplot(crim)
# To check with outliers greater than 98% of the limit value
outliers_final_crim <- (crim>wins1[2])
View(outliers_final_crim)
table(outliers_final_crim)
View(crim)
# Boxplot to identify outliers in 'zn' column
y <- boxplot(zn,horizontal = T,xlab='zn',ylab='frequency',main='zn vs frequency',col='blue',border = 'red')
y$out
# To check and remove outliers of second column zn
qunt2 <- quantile(zn,probs = c(.25,.75))
qunt2
wins2 <- quantile(zn,probs = c(.01,.95),na.rm = T)
wins2
wins2[1]
wins2[2]
d <- 1.5*IQR(zn,na.rm=T)
d
# Lower limit formula Q1-(1.5*IQR)
e <- qunt2[1]-d
e
# Lower limit formula Q3+(1.5*IQR)
f <- qunt2[2]+d
f
# To flag outliers from the dataset zn
outliers_flag_zn <- (zn>f)
table(outliers_flag_zn)
View(outliers_flag_zn)
zn[zn<e] <- wins2[1]
zn[zn>f] <- wins2[2]
boxplot(zn)
outliers_final_zn <- (zn>wins2[2])
View(outliers_final_zn)
table(outliers_final_zn)
View(zn)
# Boxplot to identif outliers in 'indus' column
z <- boxplot(indus,horizontal = T,xlab='indus',ylab='frequency',main='indus vs frequency',col='blue',border = 'red')
z$out
# There are no outliers in the indus column
# The column 'chas' can be ignored since it has categorical data
# Boxplot to identify outliers in 'nox' column
p <- boxplot(nox,horizontal = T,xlab='nox',ylab='frequency',main='nox vs frequency',col='blue',border = 'red')
p$out
# There are no outliers in the nox column
# Boxplot to identif outliers in 'rm' column
q <- boxplot(rm,horizontal = T,xlab='rm',ylab='frequency',main='rm vs frequency',col='blue',border = 'red')
q$out
# To check and remove outliers rm
qunt3 <- quantile(rm,probs = c(.25,.75))
qunt3
wins3 <- quantile(rm,probs = c(.01,.95),na.rm = T)
wins3
wins3[1]
wins3[2]
g <- 1.5*IQR(rm,na.rm=T)
g
# Lower limit formula Q1-(1.5*IQR)
h <- qunt3[1]-g
h
# Lower limit formula Q3+(1.5*IQR)
i <- qunt3[2]+g
i
# To flag outliers from the dataset rm
outliers_flag_rm <- c(rm<!h,rm>i)
table(outliers_flag_rm)
View(outliers_flag_rm)
rm[rm<h] <- wins3[1]
rm[rm>i] <- wins3[2]
boxplot(rm)
outliers_final_rm <- c(rm>wins3[2])
View(outliers_final_rm)
table(outliers_final_rm)
View(rm)
# Boxplot to identify outliers in age column
r <- boxplot(age,horizontal = T,xlab='age',ylab='frequency',main='age vs frequency',col='blue',border = 'red')
r$out
# There are no outliers in the age column
# Boxplot to identify outliers in dis column
s <- boxplot(dis,horizontal = T,xlab='dis',ylab='frequency',main='dis vs frequency',col='blue',border = 'red')
s$out
# To check and remove outliers from column dis
qunt4 <- quantile(dis,probs = c(.25,.75))
qunt4
wins4 <- quantile(dis,probs = c(.01,.95),na.rm = T)
wins4
wins4[1]
wins4[2]
j <- 1.5*IQR(dis,na.rm=T)
j
# Lower limit formula Q1-(1.5*IQR)
k <- qunt4[1]-j
k
# Lower limit formula Q3+(1.5*IQR)
l <- qunt4[2]+j
l
# To flag outliers from the dataset dis
outliers_flag_dis <- (dis>!l)
table(outliers_flag_dis)
View(outliers_flag_dis)
dis[dis<k] <- wins4[1]
dis[dis>l] <- wins4[2]
boxplot(dis)
outliers_final_dis <- (dis>!wins4[2])
View(outliers_final_dis)
table(outliers_final_dis)
View(dis)
# Boxplot to identify outliers in rad column
u <- boxplot(rad,horizontal = T,xlab='rad',ylab='frequency',main='rad vs frequency',col='blue',border = 'red')
u$out
# There are no outliers in the rad column
# Boxplot to identify outliers in tax column
v <- boxplot(tax,horizontal = T,xlab='tax',ylab='frequency',main='tax vs frequency',col='blue',border = 'red')
v$out
# There are no outliers in the tax column
# Boxplot to identify outliers in ptratio column
w <- boxplot(ptratio,horizontal = T,xlab='ptratio',ylab='frequency',main='ptratio vs frequency',col='blue',border = 'red')
w$out
# To check and remove outliers from column ptratio
qunt5 <- quantile(ptratio,probs = c(.25,.75))
qunt5
wins5 <- quantile(ptratio,probs = c(.05,.95),na.rm = T)
wins5
wins5[1]
wins5[2]
m <- 1.5*IQR(ptratio,na.rm=T)
m
# Lower limit formula Q1-(1.5*IQR)
n <- qunt5[1]-m
n
# Lower limit formula Q3+(1.5*IQR)
o <- qunt5[2]+m
o
# To flag outliers from the dataset ptratio
outliers_flag_ptratio <- (ptratio<n)
table(outliers_flag_ptratio)
View(outliers_flag_ptratio)
ptratio[ptratio<n] <- wins5[1]
ptratio[ptratio>o] <- wins5[2]
boxplot(ptratio)
outliers_final_ptratio <- (ptratio<!wins5[1])
View(outliers_final_ptratio)
table(outliers_final_ptratio)
View(ptratio)
# Boxplot to identify outliers in black column
x1 <- boxplot(black,horizontal = T,xlab='black',ylab='frequency',main='black vs frequency',col='blue',border = 'red')
x1$out
# To check and remove outliers from column black
qunt6 <- quantile(black,probs = c(.25,.75))
qunt6
wins6 <- quantile(black,probs = c(.05,.95),na.rm = T)
wins6
wins6[1]
wins6[2]
a1 <- 1.5*IQR(black,na.rm=T)
a1
# lower limit formula Q1-(1.5*IQR)
b1 <- qunt6[1]-a1
b1
# lower limit formula Q3+(1.5*IQR)
c1 <- qunt6[2]+a1
c1
# To flag outliers from the dataset black
outliers_flag_black <- (black<b1)
table(outliers_flag_black)
View(outliers_flag_black)
black[black<b1] <- wins6[1]
black[black>c1] <- wins6[2]
boxplot(black)
outliers_final_black <- (black<wins6[1])
View(outliers_final_black)
table(outliers_final_black)
View(black)
# Boxplot to identify outliers in lstat column
x2 <- boxplot(lstat,horizontal = T,xlab='lstat',ylab='frequency',main='lstat vs frequency',col='blue',border = 'red')
x2$out
# To check and remove outliers from column lstat
qunt7 <- quantile(lstat,probs = c(.25,.75))
qunt7
wins7 <- quantile(black,probs = c(.05,.95),na.rm = T)
wins7
wins7[1]
wins7[2]
a2 <- 1.5*IQR(lstat,na.rm=T)
a2
# lower limit formula Q1-(1.5*IQR)
b2 <- qunt7[1]-a2
b2
# lower limit formula Q3+(1.5*IQR)
c2 <- qunt7[2]+a2
c2
# To flag outliers from the dataset lstat
outliers_flag_lstat <- (lstat<b2)
table(outliers_flag_lstat)
View(outliers_flag_lstat)
lstat[lstat<b2] <- wins7[1]
lstat[lstat>c2] <- wins7[2]
boxplot(lstat)
outliers_final_lstat <- (lstat<!wins7[1])
View(outliers_final_lstat)
table(outliers_final_lstat)
View(lstat)
# Boxplot to identify outliers in medv column
x3 <- boxplot(medv,horizontal = T,xlab='medv',ylab='frequency',main='medv vs frequency',col='blue',border = 'red')
x3$out
# To check and remove outliers from column medv
qunt8 <- quantile(medv,probs = c(.25,.75))
qunt8
wins8 <- quantile(medv,probs = c(.05,.95),na.rm = T)
wins8
wins8[1]
wins8[2]
a3 <- 1.5*IQR(medv,na.rm=T)
a3
# lower limit formula Q1-(1.5*IQR)
b3 <- qunt8[1]-a3
b3
# lower limit formula Q3+(1.5*IQR)
c3 <- qunt8[2]+a3
c3
# To flag outliers from the dataset lstat
outliers_flag_medv <- c(medv<b3,medv>c3)
table(outliers_flag_medv)
View(outliers_flag_medv)
medv[medv<b3] <- wins8[1]
medv[medv>c3] <- wins8[2]
boxplot(medv)
outliers_final_medv <- (medv<!wins8[1])
View(outliers_final_medv)
table(outliers_final_medv)
View(medv)
# We have removed all the outliers from our dataset
|
/outlier treatment.R
|
no_license
|
priyankaankireddypalli/Data-Preprocessing
|
R
| false
| false
| 8,275
|
r
|
# Problem on Outlier Treatment
library(readr)
# Importing dataset
boston_data <- read_csv("C:/Users/WIN10/Desktop/LEARNING/DS/DPP/datasets/boston_data.csv")
View(boston_data) #to view the data
attach(boston_data) # By attaching file there is no need to call it using $ operator every time
# Checking for NA values
is.na(boston_data)
sum(is.na(boston_data)) # For displaying total number of NA values
table(is.na(boston_data)) # It will display the total number of NA values in our dataset
summary(is.na(boston_data))
# Boxplot to identify outliers in 'Crim' column
x <- boxplot(crim,horizontal = T,xlab='crim',ylab='frequency', main='crim vs frequency',col='blue',border = 'red')
x$out
# Replacing the outliers values by Winsorization
qunt1 <- quantile(crim,probs = c(.25,.75))
qunt1
wins1 <- quantile(crim,probs = c(.01,.98),na.rm = T)
wins1
a <- 1.5*IQR(crim,na.rm=T)
a
# Lower limit formula Q1-(1.5*IQR)
b <- qunt1[1]-a
b
# Lower limit formula Q3+(1.5*IQR)
c <- qunt1[2]+a
c
# To flag outliers from the dataset crim
outliers_flag_crim <- (crim>c)
table(outliers_flag_crim)
View(outliers_flag_crim)
# Replacing the outliers by winsorization method
crim[crim<b] <- wins1[1]
crim[crim>c] <- wins1[2]
boxplot(crim)
# To check with outliers greater than 98% of the limit value
outliers_final_crim <- (crim>wins1[2])
View(outliers_final_crim)
table(outliers_final_crim)
View(crim)
# Boxplot to identify outliers in 'zn' column
y <- boxplot(zn,horizontal = T,xlab='zn',ylab='frequency',main='zn vs frequency',col='blue',border = 'red')
y$out
# To check and remove outliers of second column zn
qunt2 <- quantile(zn,probs = c(.25,.75))
qunt2
wins2 <- quantile(zn,probs = c(.01,.95),na.rm = T)
wins2
wins2[1]
wins2[2]
d <- 1.5*IQR(zn,na.rm=T)
d
# Lower limit formula Q1-(1.5*IQR)
e <- qunt2[1]-d
e
# Lower limit formula Q3+(1.5*IQR)
f <- qunt2[2]+d
f
# To flag outliers from the dataset zn
outliers_flag_zn <- (zn>f)
table(outliers_flag_zn)
View(outliers_flag_zn)
zn[zn<e] <- wins2[1]
zn[zn>f] <- wins2[2]
boxplot(zn)
outliers_final_zn <- (zn>wins2[2])
View(outliers_final_zn)
table(outliers_final_zn)
View(zn)
# Boxplot to identif outliers in 'indus' column
z <- boxplot(indus,horizontal = T,xlab='indus',ylab='frequency',main='indus vs frequency',col='blue',border = 'red')
z$out
# There are no outliers in the indus column
# The column 'chas' can be ignored since it has categorical data
# Boxplot to identify outliers in 'nox' column
p <- boxplot(nox,horizontal = T,xlab='nox',ylab='frequency',main='nox vs frequency',col='blue',border = 'red')
p$out
# There are no outliers in the nox column
# Boxplot to identif outliers in 'rm' column
q <- boxplot(rm,horizontal = T,xlab='rm',ylab='frequency',main='rm vs frequency',col='blue',border = 'red')
q$out
# To check and remove outliers rm
qunt3 <- quantile(rm,probs = c(.25,.75))
qunt3
wins3 <- quantile(rm,probs = c(.01,.95),na.rm = T)
wins3
wins3[1]
wins3[2]
g <- 1.5*IQR(rm,na.rm=T)
g
# Lower limit formula Q1-(1.5*IQR)
h <- qunt3[1]-g
h
# Lower limit formula Q3+(1.5*IQR)
i <- qunt3[2]+g
i
# To flag outliers from the dataset rm
outliers_flag_rm <- c(rm<!h,rm>i)
table(outliers_flag_rm)
View(outliers_flag_rm)
rm[rm<h] <- wins3[1]
rm[rm>i] <- wins3[2]
boxplot(rm)
outliers_final_rm <- c(rm>wins3[2])
View(outliers_final_rm)
table(outliers_final_rm)
View(rm)
# Boxplot to identify outliers in age column
r <- boxplot(age,horizontal = T,xlab='age',ylab='frequency',main='age vs frequency',col='blue',border = 'red')
r$out
# There are no outliers in the age column
# Boxplot to identify outliers in dis column
s <- boxplot(dis,horizontal = T,xlab='dis',ylab='frequency',main='dis vs frequency',col='blue',border = 'red')
s$out
# To check and remove outliers from column dis
qunt4 <- quantile(dis,probs = c(.25,.75))
qunt4
wins4 <- quantile(dis,probs = c(.01,.95),na.rm = T)
wins4
wins4[1]
wins4[2]
j <- 1.5*IQR(dis,na.rm=T)
j
# Lower limit formula Q1-(1.5*IQR)
k <- qunt4[1]-j
k
# Lower limit formula Q3+(1.5*IQR)
l <- qunt4[2]+j
l
# To flag outliers from the dataset dis
outliers_flag_dis <- (dis>!l)
table(outliers_flag_dis)
View(outliers_flag_dis)
dis[dis<k] <- wins4[1]
dis[dis>l] <- wins4[2]
boxplot(dis)
outliers_final_dis <- (dis>!wins4[2])
View(outliers_final_dis)
table(outliers_final_dis)
View(dis)
# Boxplot to identify outliers in rad column
u <- boxplot(rad,horizontal = T,xlab='rad',ylab='frequency',main='rad vs frequency',col='blue',border = 'red')
u$out
# There are no outliers in the rad column
# Boxplot to identify outliers in tax column
v <- boxplot(tax,horizontal = T,xlab='tax',ylab='frequency',main='tax vs frequency',col='blue',border = 'red')
v$out
# There are no outliers in the tax column
# Boxplot to identify outliers in ptratio column
w <- boxplot(ptratio,horizontal = T,xlab='ptratio',ylab='frequency',main='ptratio vs frequency',col='blue',border = 'red')
w$out
# To check and remove outliers from column ptratio
qunt5 <- quantile(ptratio,probs = c(.25,.75))
qunt5
wins5 <- quantile(ptratio,probs = c(.05,.95),na.rm = T)
wins5
wins5[1]
wins5[2]
m <- 1.5*IQR(ptratio,na.rm=T)
m
# Lower limit formula Q1-(1.5*IQR)
n <- qunt5[1]-m
n
# Lower limit formula Q3+(1.5*IQR)
o <- qunt5[2]+m
o
# To flag outliers from the dataset ptratio
outliers_flag_ptratio <- (ptratio<n)
table(outliers_flag_ptratio)
View(outliers_flag_ptratio)
ptratio[ptratio<n] <- wins5[1]
ptratio[ptratio>o] <- wins5[2]
boxplot(ptratio)
outliers_final_ptratio <- (ptratio<!wins5[1])
View(outliers_final_ptratio)
table(outliers_final_ptratio)
View(ptratio)
# Boxplot to identify outliers in black column
x1 <- boxplot(black,horizontal = T,xlab='black',ylab='frequency',main='black vs frequency',col='blue',border = 'red')
x1$out
# To check and remove outliers from column black
qunt6 <- quantile(black,probs = c(.25,.75))
qunt6
wins6 <- quantile(black,probs = c(.05,.95),na.rm = T)
wins6
wins6[1]
wins6[2]
a1 <- 1.5*IQR(black,na.rm=T)
a1
# lower limit formula Q1-(1.5*IQR)
b1 <- qunt6[1]-a1
b1
# lower limit formula Q3+(1.5*IQR)
c1 <- qunt6[2]+a1
c1
# To flag outliers from the dataset black
outliers_flag_black <- (black<b1)
table(outliers_flag_black)
View(outliers_flag_black)
black[black<b1] <- wins6[1]
black[black>c1] <- wins6[2]
boxplot(black)
outliers_final_black <- (black<wins6[1])
View(outliers_final_black)
table(outliers_final_black)
View(black)
# Boxplot to identify outliers in lstat column
x2 <- boxplot(lstat,horizontal = T,xlab='lstat',ylab='frequency',main='lstat vs frequency',col='blue',border = 'red')
x2$out
# To check and remove outliers from column lstat
qunt7 <- quantile(lstat,probs = c(.25,.75))
qunt7
wins7 <- quantile(black,probs = c(.05,.95),na.rm = T)
wins7
wins7[1]
wins7[2]
a2 <- 1.5*IQR(lstat,na.rm=T)
a2
# lower limit formula Q1-(1.5*IQR)
b2 <- qunt7[1]-a2
b2
# lower limit formula Q3+(1.5*IQR)
c2 <- qunt7[2]+a2
c2
# To flag outliers from the dataset lstat
outliers_flag_lstat <- (lstat<b2)
table(outliers_flag_lstat)
View(outliers_flag_lstat)
lstat[lstat<b2] <- wins7[1]
lstat[lstat>c2] <- wins7[2]
boxplot(lstat)
outliers_final_lstat <- (lstat<!wins7[1])
View(outliers_final_lstat)
table(outliers_final_lstat)
View(lstat)
# Boxplot to identify outliers in medv column
x3 <- boxplot(medv,horizontal = T,xlab='medv',ylab='frequency',main='medv vs frequency',col='blue',border = 'red')
x3$out
# To check and remove outliers from column medv
qunt8 <- quantile(medv,probs = c(.25,.75))
qunt8
wins8 <- quantile(medv,probs = c(.05,.95),na.rm = T)
wins8
wins8[1]
wins8[2]
a3 <- 1.5*IQR(medv,na.rm=T)
a3
# lower limit formula Q1-(1.5*IQR)
b3 <- qunt8[1]-a3
b3
# lower limit formula Q3+(1.5*IQR)
c3 <- qunt8[2]+a3
c3
# To flag outliers from the dataset lstat
outliers_flag_medv <- c(medv<b3,medv>c3)
table(outliers_flag_medv)
View(outliers_flag_medv)
medv[medv<b3] <- wins8[1]
medv[medv>c3] <- wins8[2]
boxplot(medv)
outliers_final_medv <- (medv<!wins8[1])
View(outliers_final_medv)
table(outliers_final_medv)
View(medv)
# We have removed all the outliers from our dataset
|
rm(list=ls())
library(rjags)
library(jagsUI)
library(dplyr)
# Run model 8.2 in MECAL dataset
# Take only 2014-2017 to see what is the trend only when there is the 2 measures (AES and SG)
# - If include it before 2014, I am including years when abundance is high in places where there is
# no SG (because the measure did not exist), so I am including noise in the model
# ---- I ignore counts in each observation (cluster size)
# ---- Data ----
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data")
d <- read.csv("DataDS_ready.csv")
d <- d[which(d$Year %in% c(2014, 2015, 2016, 2017)), ]
# Information: bins, years, sites
strip.width <- 200
dist.breaks <- c(0,25,50,100,200)
int.w <- diff(dist.breaks) # width of distance categories (v)
midpt <- diff(dist.breaks)/2+dist.breaks[-5]
nG <- length(dist.breaks)-1
yrs <- c(2014, 2015, 2016, 2017)
nyrs <- length(yrs)
# To take into account transects with abundance 0
# 1. Select all transects IDs from all species observations
# 2. Join the observations of MECAL (for example) with all transects so that they remain with NA if the
# species was there but it wasnt sampled
d_tr <- d[ ,which(colnames(d) %in% c("Species", "T_Y", "Observer"))]
d_tr_all <- data.frame(T_Y = unique(d_tr$T_Y), id = NA)
# For observer variable
d_tr$Observer <- as.character(d_tr$Observer)
d_tr_all_obs <- left_join(d_tr_all, d_tr)
d_tr_all_obs <- d_tr_all_obs[ ,c(1,4)]
d_tr_all_obs <- d_tr_all_obs[which(!duplicated(d_tr_all_obs)), ] # Table with all sampled fields and which observer sampled it
d_tr_all_obs$Observer <- as.character(d_tr_all_obs$Observer)
d_tr_all_obs$T_Y <- as.character(d_tr_all_obs$T_Y)
mec <- d[which(d$Species == "ALRUF"), which(colnames(d) %in% c("Year", "Banda", "transectID", "T_Y", "Species", "Observer"))] # Select species MECAL and all years
mec <- arrange(mec, Year, transectID) #Ordered
mec_detec_transectID <- unique(mec$transectID)
mec$Observer <- as.character(mec$Observer)
absent <- anti_join(d_tr_all,mec) # Transects with 0 abundance, add to mec.
colnames(absent)[2] <- "Banda" # Format it to add the rows to mec
absent$T_Y <- as.character(absent$T_Y)
absent$Species <- "ALRUF"
absent <- left_join(absent, d_tr_all_obs)
for (i in 1:nrow(absent)){ # Format to join absent - detections
cent <- substr(absent$T_Y[i], 10,10) # To include SI102 (cents)
cent <- as.numeric(cent)
if(is.na(cent)){
absent$Year[i] <- substr(absent$T_Y[i], 6,9)
absent$transectID[i] <- substr(absent$T_Y[i], 1,4)
} else { absent$Year[i] <- substr(absent$T_Y[i], 7,10)
absent$transectID[i] <- substr(absent$T_Y[i], 1,5)}
}
all_mec <- rbind(mec,absent) # Include transects with abundance 0
all_mec <- arrange(all_mec, Year, transectID) # Ordered
absent$count <- 0
# ---- Distance observations ----
# Format
all.sites <- unique(all_mec$transectID)
all.sites <- sort(all.sites,descreasing = TRUE)
max.sites <- length(all.sites)
m <- matrix(NA, nrow = length(all.sites), ncol = nyrs)
rownames(m) <- all.sites
colnames(m) <- yrs
# Add counts > 0
count <- aggregate(Species ~ Year + transectID, FUN = length, data = mec)
for (i in 1:nrow(count)){
m[which(rownames(m) %in% count$transectID[i]), which(colnames(m) %in% count$Year[i])] <- count$Species[i]
}
# Add absences (0)
for (i in 1:nrow(absent)){
m[which(rownames(m) %in% absent$transectID[i]), which(colnames(m) %in% absent$Year[i])] <- absent$count[i]
}
# Only to check: Count of individuals per year
count.year <- colSums(m,na.rm = TRUE)
# ---- Co-variates ----
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data")
manag <- read.csv("management_area_500.csv")
manag <- manag[ , c(1,2,7:14)] # Select years 2014 - 2017
manag <- manag[which(manag$Codi %in% all.sites), ] # Select transects with census
# Be sure the fields are in the same order
order <- as.data.frame(m)
order_codi <- as.vector(rownames(order))
order$Codi <- order_codi
manag <- left_join(order,manag)
# Area AES
area_aes <- as.matrix(manag[ ,c(7:10)])
aes_mean <- mean(area_aes)
aes_sd <- sd(area_aes)
aes_sc <- (area_aes - aes_mean) / aes_sd
# Area SG
area_sg <- as.matrix(manag[ ,c(11:14)])
sg_mean <- mean(area_sg)
sg_sd <- sd(area_sg)
sg_sc <- (area_sg - sg_mean) / sg_sd
# Zone (Occidental = 0; Oriental = 1)
zone <- order
for (i in 1:nrow(zone)){
if(substr(zone$Codi[i], 1,2) == "BA"){zone[i,1:4] <- 0}
if(substr(zone$Codi[i], 1,2) == "BM"){zone[i,1:4] <- 1}
if(substr(zone$Codi[i], 1,2) == "SI"){zone[i,1:4] <- 1}
if(substr(zone$Codi[i], 1,2) == "AF"){zone[i,1:4] <- 0}
if(substr(zone$Codi[i], 1,2) == "BE"){zone[i,1:4] <- 1}
if(substr(zone$Codi[i], 1,2) == "GR"){zone[i,1:4] <- 0}
}
zone <- zone[,-5]
# Observer
# Format
obs <- matrix(NA, nrow = max.sites, ncol = nyrs)
rownames(obs) <- all.sites
colnames(obs) <- yrs
# Add observers for fields with counts > 0
for (i in 1:nrow(mec)){
obs[which(rownames(obs) %in% mec$transectID[i]), which(colnames(obs) %in% mec$Year[i])] <- mec$Observer[i]
}
# Add observers for fields with absences (0)
for (i in 1:nrow(absent)){
obs[which(rownames(obs) %in% absent$transectID[i]), which(colnames(obs) %in% absent$Year[i])] <- absent$Observer[i]
}
# ---- Specify data in JAGS format ----
# Distance class and ind
nind <- nrow(mec)
dclass <- mec$Banda
# Get one long vector with counts per year and site
yLong <- unlist(as.data.frame(m), use.names = F)
sitesYears <- NULL
for (i in 1:nyrs){
sitesYears <- c(sitesYears,c(1:length(all.sites)))}
# Get one long vector for each site-year combination of each dclass observation
###RS: Fixed index to map dclass onto site-year combinations
# For the index, create a vector of ylong where NA are 0 (because I need the same length)
yLong_index <- yLong
yLong_index[which(is.na(yLong_index))] <- 0
n.allSiteYear <- max.sites*nyrs
siteYear.dclass <- NULL
for (i in 1:n.allSiteYear){
siteYear.dclass <- c(siteYear.dclass,rep(i, yLong_index[i]))}
# Get one vector per co-variate
area_AES <- NULL
for (i in 1:nyrs){
area_AES <- c(area_AES,aes_sc[1:length(all.sites),i])}
area_SG <- NULL
for (i in 1:nyrs){
area_SG <- c(area_SG,sg_sc[1:length(all.sites),i])}
zon <- NULL
for (i in 1:nyrs){
zon <- c(zon,zone[1:length(all.sites),i])}
ob <- NULL
for (i in 1:nyrs){
ob <- c(ob,obs[1:length(all.sites),i])}
ob <- as.numeric(factor(ob)) # JAGS doesn't accept categorical variables
## RS: = observer is a covariate so NAs are a problem because nothing
# in the model specifies how the NAs can be estimated ( never shows up
# on the left hand side of a line of code) So there are two solutions:
### 1.Estimate observer for missing observations
####### log(sigma[j,k])<-alpha[observer[j,k]] + beta*X
####### observer[j,k]~dcat(probs)
### 2. Because there is no data points where observer is NA, and because
# I am not trying to estimate sigma in every point (only abundance, and in the
# missing points of data this is estimated using the noNA and the co-variates.
# i.e., you have covariate information for the abundance component of the missing
# year-transect combinations, so you can use that to predict abundance for these missing points)
# Then, you can fill observer NAs with random IDs and it wont affect the model estimates.
# (ONLY BECAUSE THERE IS NO DATA ASSOCIATED WITH THE OBSERVER NAs)
obs_id <- unique(ob)[-4]
ob[which(is.na(ob))] <- sample(obs_id, length(which(is.na(ob))), replace = TRUE)
nobs <- length(unique(ob))
# Create one matrix for indexing year when calculating abundance per year in JAGS
allyears <- NULL
for (i in 1:nyrs){
allyears <- c(allyears,rep(yrs[i],length(all.sites)))
}
ye <- data.frame(allyears = allyears)
ye$allyears <- as.factor(ye$allyears)
indexYears <- model.matrix(~ allyears-1, data = ye)
# ---- Compile data for JAGS model ----
data1 <- list(nyears = nyrs, max.sites = max.sites, nG = nG, siteYear.dclass = siteYear.dclass, int.w=int.w, strip.width = strip.width,
y = yLong, nind = nind, dclass = dclass, midpt = midpt, sitesYears = sitesYears, indexYears = indexYears,
area1 = area_AES, area2 = area_SG, zoneB = zon, ob = ob, nobs = nobs, db = dist.breaks)
# ---- JAGS model ----
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data/Model")
cat("model{
# PRIORS
# Priors for lambda
bzB.lam ~ dnorm(0, 0.001)
ba1.lam ~ dnorm(0, 0.001)
ba2.lam ~ dnorm(0, 0.001)
mu.lam ~ dunif(-10, 10) # Random effects for lambda per site
sig.lam ~ dunif(0, 10)
tau.lam <- 1/(sig.lam*sig.lam)
# Priors for sigma
bzB.sig ~ dnorm(0, 0.001)
mu.sig ~ dunif(-10, 10) # Random effects for sigma per observer
sig.sig ~ dunif(0, 10)
tau.sig <- 1/(sig.sig*sig.sig)
#RANDOM TRANSECT LEVEL EFFECT FOR LAMBDA (doesn't change over time) # takes care of the dependence in data when you repeatedly visit the same transect
for (s in 1:max.sites){
log.lambda[s] ~ dnorm(mu.lam, tau.lam)
}
#RANDOM OBSERVER EFFECT FOR SIGMA
for (o in 1:nobs){
sig.obs[o] ~ dnorm(mu.sig, tau.sig)
}
for(i in 1:nind){
dclass[i] ~ dcat(fct[siteYear.dclass[i], 1:nG])
}
for(j in 1:length(y)){
sigma[j] <- exp(sig.obs[ob[j]] + bzB.sig*zoneB[j])
# Construct cell probabilities for nG multinomial cells (distance categories) PER SITE
for(k in 1:nG){
up[j,k]<-pnorm(db[k+1], 0, 1/sigma[j]^2) ##db are distance bin limits
low[j,k]<-pnorm(db[k], 0, 1/sigma[j]^2)
p[j,k]<- 2 * (up[j,k] - low[j,k])
pi[j,k] <- int.w[k] / strip.width
f[j,k]<- p[j,k]/f.0[j]/int.w[k] ## detection prob. in distance category k
fc[j,k]<- f[j,k] * pi[j,k] ## pi=percent area of k; drops out if constant
fct[j,k]<-fc[j,k]/pcap[j]
}
pcap[j] <- sum(fc[j, 1:nG]) # Different per site and year (sum over all bins)
f.0[j] <- 2 * dnorm(0,0, 1/sigma[j]^2) # Prob density at 0
# To set that prob.of detection at distance 0 is one, you divide by f0 in the loop up
y[j] ~ dbin(pcap[j], N[j])
N[j] ~ dpois(lambda[j])
lambda[j] <- exp(log.lambda[sitesYears[j]] + bzB.lam*zoneB[j]
+ ba1.lam*area1[j] + ba2.lam*area2[j])
}
# Derived parameters
for (i in 1:nyears){
Ntotal[i] <- sum(N*indexYears[,i])
}
}",fill=TRUE, file = "s_sigma(integral)[obs(o,j,t)_covZone(j)]_lambda[alpha(j)_covZone(j)_covArea(j,t)].txt")
# Inits
Nst <- yLong + 1
inits <- function(){list(mu.lam = runif(1), sig.lam = 0.2, #sigma = runif(624, 0, 50), I dont need sigma because I have already priors for his hyperparameters!!!!!
N=Nst,
bzB.lam = runif(1), ba1.lam = runif(1), ba2.lam = runif(1),
mu.sig = runif(1, log(30), log(50)), sig.sig = runif(1), bzB.sig = runif(1)
###changed inits for mu.sig - don't start too small, better start too large
)}
# Params
params <- c("Ntotal", "N",# "sigma", "lambda", I remove it so that it doesnt save the lambdas and takes shorter. It still calculates them
"mu.lam", "sig.lam",
"bzB.lam", "ba1.lam", "ba2.lam",
"mu.sig", "sig.sig", "bzB.sig"
)
# MCMC settings
nc <- 3 ; ni <- 75000 ; nb <- 2000 ; nt <- 2
# With jagsUI
out <- jags(data1, inits, params, "s_sigma(integral)[obs(o,j,t)_covZone(j)]_lambda[alpha(j)_covZone(j)_covArea(j,t)].txt", n.chain = nc,
n.thin = nt, n.iter = ni, n.burnin = nb, parallel = TRUE)
print(out)
summary <- as.data.frame(as.matrix(out$summary))
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data/Results")
write.csv(summary, "8.2.Alruf500_14-17.csv")
###################################################################
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data/Results")
summary <- read.csv("8.2.Alruf500_14-17.csv") # Does not converge but with one rhat value = 1.25 (Zone), so not very bad
results500 <- summary[which(summary$X %in% c("Ntotal[1]", "Ntotal[2]", "Ntotal[3]", "Ntotal[4]", "mu.lam", "sig.lam", "bzB.lam", "ba1.lam", "ba2.lam")), ]
# AES NONE EFFECT NEGATIVE TREND
# SG NONE EFFECT NEGATIVE TREND
# Plot the trend of the population
plot(-100,ylim = c(0,120), xlim=c(0,8),
pch = 21, ylab = "N", xlab = " ", axes = FALSE, main = "BUOED")
axis(1, at = c(1,2,3,4), labels = yrs)
axis(2)
points(results500[1:4,2],pch = 19) # Plot results
points(count.year,pch = 19, col = "red") # Plot counts
x <- seq_along(results500[1:4,2])
low_CI <- as.numeric(results500$X2.5.[1:4])
up_CI <- as.numeric(results500$X97.5.[1:4])
arrows(x, low_CI,x, up_CI, code=3, angle=90, length=0.04)
# To plot the relation with the co-variates
results500_2 <- summary[5:588, ]
plot(results500_2$mean ~ area_AES, ylab = "Abundance")
# Prediction: Fix the rest of the covariates that you dont want to see (to the mean, or zone 1 or 2)
# PREDICTION ABUNDANCE - AES
area_AESpred <- seq(min(area_AES), max(area_AES),length.out = 500) # Create a sequence of values, from minimum to maximun of the covariate to plot the prediction
pred <- exp(results500[which(results500$X == "mu.lam"),2]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),2]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),2]*area_AESpred +
results500[which(results500$X == "ba2.lam"),2]*mean(area_SG)) # Fixed SG area
predlci <- exp(results500[which(results500$X == "mu.lam"),4]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),4]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),4]*area_AESpred +
results500[which(results500$X == "ba2.lam"),4]*mean(area_SG)) # Fixed SG area
preduci <- exp(results500[which(results500$X == "mu.lam"),8]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),8]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),8]*area_AESpred +
results500[which(results500$X == "ba2.lam"),8]*mean(area_SG)) # Fixed SG area
plot(pred ~ area_AESpred, ylim=c(0,0.5), type="l", main = "buffer.500")
points(predlci ~ area_AESpred, pch=16, type="l",lty=2)
points(preduci ~ area_AESpred, pch=16,type="l",lty=2)
pred0 <- exp(results500[which(results500$X == "mu.lam"),2]+
results500[which(results500$X == "bzB.lam"),2]*0 + # Prediction fixed for zone 0 (occidental)
results500[which(results500$X == "ba1.lam"),2]*area_AESpred +
results500[which(results500$X == "ba2.lam"),2]*mean(area_SG)) # Fixed SG area
pred0lci <- exp(results500[which(results500$X == "mu.lam"),4]+ # PREDICTION LOW CI FOR OCCIDENTAL
results500[which(results500$X == "bzB.lam"),4]*0 +
results500[which(results500$X == "ba1.lam"),4]*area_AESpred +
results500[which(results500$X == "ba2.lam"),4]*mean(area_SG))
pred0uci <- exp(results500[which(results500$X == "mu.lam"),8]+ # PREDICTION UP CI FOR OCCIDENTAL
results500[which(results500$X == "bzB.lam"),8]*0 +
results500[which(results500$X == "ba1.lam"),8]*area_AESpred +
results500[which(results500$X == "ba2.lam"),8]*mean(area_SG))
points(pred0 ~ area_AESpred, pch=16, type="l", col="red")
points(pred0lci ~ area_AESpred, pch=16, type="l",lty=2, col="red")
points(pred0uci ~ area_AESpred, pch=16,type="l",lty=2, col="red")
plot(results500_2$mean ~ area_AES, ylab = "Abundance")
points(pred0 ~ area_AESpred, pch=16, type="l", col="red")
points(pred0lci ~ area_AESpred, pch=16, type="l",lty=2, col="red")
points(pred0uci ~ area_AESpred, pch=16,type="l",lty=2, col="red")
# PREDICTION ABUNDANCE - SG
plot(results500_2$mean ~ area_SG, ylab = "Abundance")
area_SGpred <- seq(min(area_SG), max(area_SG),length.out = 500) # Create a sequence of values, from minimum to maximun of the covariate to plot the prediction
pred <- exp(results500[which(results500$X == "mu.lam"),2]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),2]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),2]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),2]*area_SGpred)
predlci <- exp(results500[which(results500$X == "mu.lam"),4]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),4]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),4]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),4]*area_SGpred)
preduci <- exp(results500[which(results500$X == "mu.lam"),8]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),8]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),8]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),8]*area_SGpred)
plot(pred ~ area_SGpred, ylim=c(0,3), type="l", main = "buffer.500")
points(predlci ~ area_SGpred, pch=16, type="l",lty=2)
points(preduci ~ area_SGpred, pch=16,type="l",lty=2)
pred0 <- exp(results500[which(results500$X == "mu.lam"),2]+
results500[which(results500$X == "bzB.lam"),2]*0 + # Prediction fixed for zone 0 (occidental)
results500[which(results500$X == "ba1.lam"),2]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),2]*area_SGpred)
pred0lci <- exp(results500[which(results500$X == "mu.lam"),4]+ # PREDICTION LOW CI FOR OCCIDENTAL
results500[which(results500$X == "bzB.lam"),4]*0 +
results500[which(results500$X == "ba1.lam"),4]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),4]*area_SGpred)
pred0uci <- exp(results500[which(results500$X == "mu.lam"),8]+ # PREDICTION UP CI FOR OCCIDENTAL
results500[which(results500$X == "bzB.lam"),8]*0 +
results500[which(results500$X == "ba1.lam"),8]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),8]*area_SGpred)
points(pred0 ~ area_SGpred, pch=16, type="l", col="red")
points(pred0lci ~ area_SGpred, pch=16, type="l",lty=2, col="red")
points(pred0uci ~ area_SGpred, pch=16,type="l",lty=2, col="red")
plot(results500_2$mean ~ area_SG, ylab = "Abundance")
points(pred0 ~ area_SGpred, pch=16, type="l", col="red")
points(pred0lci ~ area_SGpred, pch=16, type="l",lty=2, col="red")
points(pred0uci ~ area_SGpred, pch=16,type="l",lty=2, col="red")
|
/Ch. 2-3/Ch. 3/Data/Species-specific/ALRUF/8.3. D_14-17_ALRUF500_x[dcat_area_integralBin]_sigma[obs(j,t)_covZone(j)]_lambda[alpha(j)_covZone(j)_covArea(j,t)].r
|
no_license
|
anasanz/MyScripts
|
R
| false
| false
| 19,136
|
r
|
rm(list=ls())
library(rjags)
library(jagsUI)
library(dplyr)
# Run model 8.2 in MECAL dataset
# Take only 2014-2017 to see what is the trend only when there is the 2 measures (AES and SG)
# - If include it before 2014, I am including years when abundance is high in places where there is
# no SG (because the measure did not exist), so I am including noise in the model
# ---- I ignore counts in each observation (cluster size)
# ---- Data ----
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data")
d <- read.csv("DataDS_ready.csv")
d <- d[which(d$Year %in% c(2014, 2015, 2016, 2017)), ]
# Information: bins, years, sites
strip.width <- 200
dist.breaks <- c(0,25,50,100,200)
int.w <- diff(dist.breaks) # width of distance categories (v)
midpt <- diff(dist.breaks)/2+dist.breaks[-5]
nG <- length(dist.breaks)-1
yrs <- c(2014, 2015, 2016, 2017)
nyrs <- length(yrs)
# To take into account transects with abundance 0
# 1. Select all transects IDs from all species observations
# 2. Join the observations of MECAL (for example) with all transects so that they remain with NA if the
# species was there but it wasnt sampled
d_tr <- d[ ,which(colnames(d) %in% c("Species", "T_Y", "Observer"))]
d_tr_all <- data.frame(T_Y = unique(d_tr$T_Y), id = NA)
# For observer variable
d_tr$Observer <- as.character(d_tr$Observer)
d_tr_all_obs <- left_join(d_tr_all, d_tr)
d_tr_all_obs <- d_tr_all_obs[ ,c(1,4)]
d_tr_all_obs <- d_tr_all_obs[which(!duplicated(d_tr_all_obs)), ] # Table with all sampled fields and which observer sampled it
d_tr_all_obs$Observer <- as.character(d_tr_all_obs$Observer)
d_tr_all_obs$T_Y <- as.character(d_tr_all_obs$T_Y)
mec <- d[which(d$Species == "ALRUF"), which(colnames(d) %in% c("Year", "Banda", "transectID", "T_Y", "Species", "Observer"))] # Select species MECAL and all years
mec <- arrange(mec, Year, transectID) #Ordered
mec_detec_transectID <- unique(mec$transectID)
mec$Observer <- as.character(mec$Observer)
absent <- anti_join(d_tr_all,mec) # Transects with 0 abundance, add to mec.
colnames(absent)[2] <- "Banda" # Format it to add the rows to mec
absent$T_Y <- as.character(absent$T_Y)
absent$Species <- "ALRUF"
absent <- left_join(absent, d_tr_all_obs)
for (i in 1:nrow(absent)){ # Format to join absent - detections
cent <- substr(absent$T_Y[i], 10,10) # To include SI102 (cents)
cent <- as.numeric(cent)
if(is.na(cent)){
absent$Year[i] <- substr(absent$T_Y[i], 6,9)
absent$transectID[i] <- substr(absent$T_Y[i], 1,4)
} else { absent$Year[i] <- substr(absent$T_Y[i], 7,10)
absent$transectID[i] <- substr(absent$T_Y[i], 1,5)}
}
all_mec <- rbind(mec,absent) # Include transects with abundance 0
all_mec <- arrange(all_mec, Year, transectID) # Ordered
absent$count <- 0
# ---- Distance observations ----
# Format
all.sites <- unique(all_mec$transectID)
all.sites <- sort(all.sites,descreasing = TRUE)
max.sites <- length(all.sites)
m <- matrix(NA, nrow = length(all.sites), ncol = nyrs)
rownames(m) <- all.sites
colnames(m) <- yrs
# Add counts > 0
count <- aggregate(Species ~ Year + transectID, FUN = length, data = mec)
for (i in 1:nrow(count)){
m[which(rownames(m) %in% count$transectID[i]), which(colnames(m) %in% count$Year[i])] <- count$Species[i]
}
# Add absences (0)
for (i in 1:nrow(absent)){
m[which(rownames(m) %in% absent$transectID[i]), which(colnames(m) %in% absent$Year[i])] <- absent$count[i]
}
# Only to check: Count of individuals per year
count.year <- colSums(m,na.rm = TRUE)
# ---- Co-variates ----
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data")
manag <- read.csv("management_area_500.csv")
manag <- manag[ , c(1,2,7:14)] # Select years 2014 - 2017
manag <- manag[which(manag$Codi %in% all.sites), ] # Select transects with census
# Be sure the fields are in the same order
order <- as.data.frame(m)
order_codi <- as.vector(rownames(order))
order$Codi <- order_codi
manag <- left_join(order,manag)
# Area AES
area_aes <- as.matrix(manag[ ,c(7:10)])
aes_mean <- mean(area_aes)
aes_sd <- sd(area_aes)
aes_sc <- (area_aes - aes_mean) / aes_sd
# Area SG
area_sg <- as.matrix(manag[ ,c(11:14)])
sg_mean <- mean(area_sg)
sg_sd <- sd(area_sg)
sg_sc <- (area_sg - sg_mean) / sg_sd
# Zone (Occidental = 0; Oriental = 1)
zone <- order
for (i in 1:nrow(zone)){
if(substr(zone$Codi[i], 1,2) == "BA"){zone[i,1:4] <- 0}
if(substr(zone$Codi[i], 1,2) == "BM"){zone[i,1:4] <- 1}
if(substr(zone$Codi[i], 1,2) == "SI"){zone[i,1:4] <- 1}
if(substr(zone$Codi[i], 1,2) == "AF"){zone[i,1:4] <- 0}
if(substr(zone$Codi[i], 1,2) == "BE"){zone[i,1:4] <- 1}
if(substr(zone$Codi[i], 1,2) == "GR"){zone[i,1:4] <- 0}
}
zone <- zone[,-5]
# Observer
# Format
obs <- matrix(NA, nrow = max.sites, ncol = nyrs)
rownames(obs) <- all.sites
colnames(obs) <- yrs
# Add observers for fields with counts > 0
for (i in 1:nrow(mec)){
obs[which(rownames(obs) %in% mec$transectID[i]), which(colnames(obs) %in% mec$Year[i])] <- mec$Observer[i]
}
# Add observers for fields with absences (0)
for (i in 1:nrow(absent)){
obs[which(rownames(obs) %in% absent$transectID[i]), which(colnames(obs) %in% absent$Year[i])] <- absent$Observer[i]
}
# ---- Specify data in JAGS format ----
# Distance class and ind
nind <- nrow(mec)
dclass <- mec$Banda
# Get one long vector with counts per year and site
yLong <- unlist(as.data.frame(m), use.names = F)
sitesYears <- NULL
for (i in 1:nyrs){
sitesYears <- c(sitesYears,c(1:length(all.sites)))}
# Get one long vector for each site-year combination of each dclass observation
###RS: Fixed index to map dclass onto site-year combinations
# For the index, create a vector of ylong where NA are 0 (because I need the same length)
yLong_index <- yLong
yLong_index[which(is.na(yLong_index))] <- 0
n.allSiteYear <- max.sites*nyrs
siteYear.dclass <- NULL
for (i in 1:n.allSiteYear){
siteYear.dclass <- c(siteYear.dclass,rep(i, yLong_index[i]))}
# Get one vector per co-variate
area_AES <- NULL
for (i in 1:nyrs){
area_AES <- c(area_AES,aes_sc[1:length(all.sites),i])}
area_SG <- NULL
for (i in 1:nyrs){
area_SG <- c(area_SG,sg_sc[1:length(all.sites),i])}
zon <- NULL
for (i in 1:nyrs){
zon <- c(zon,zone[1:length(all.sites),i])}
ob <- NULL
for (i in 1:nyrs){
ob <- c(ob,obs[1:length(all.sites),i])}
ob <- as.numeric(factor(ob)) # JAGS doesn't accept categorical variables
## RS: = observer is a covariate so NAs are a problem because nothing
# in the model specifies how the NAs can be estimated ( never shows up
# on the left hand side of a line of code) So there are two solutions:
### 1.Estimate observer for missing observations
####### log(sigma[j,k])<-alpha[observer[j,k]] + beta*X
####### observer[j,k]~dcat(probs)
### 2. Because there is no data points where observer is NA, and because
# I am not trying to estimate sigma in every point (only abundance, and in the
# missing points of data this is estimated using the noNA and the co-variates.
# i.e., you have covariate information for the abundance component of the missing
# year-transect combinations, so you can use that to predict abundance for these missing points)
# Then, you can fill observer NAs with random IDs and it wont affect the model estimates.
# (ONLY BECAUSE THERE IS NO DATA ASSOCIATED WITH THE OBSERVER NAs)
obs_id <- unique(ob)[-4]
ob[which(is.na(ob))] <- sample(obs_id, length(which(is.na(ob))), replace = TRUE)
nobs <- length(unique(ob))
# Create one matrix for indexing year when calculating abundance per year in JAGS
allyears <- NULL
for (i in 1:nyrs){
allyears <- c(allyears,rep(yrs[i],length(all.sites)))
}
ye <- data.frame(allyears = allyears)
ye$allyears <- as.factor(ye$allyears)
indexYears <- model.matrix(~ allyears-1, data = ye)
# ---- Compile data for JAGS model ----
data1 <- list(nyears = nyrs, max.sites = max.sites, nG = nG, siteYear.dclass = siteYear.dclass, int.w=int.w, strip.width = strip.width,
y = yLong, nind = nind, dclass = dclass, midpt = midpt, sitesYears = sitesYears, indexYears = indexYears,
area1 = area_AES, area2 = area_SG, zoneB = zon, ob = ob, nobs = nobs, db = dist.breaks)
# ---- JAGS model ----
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data/Model")
cat("model{
# PRIORS
# Priors for lambda
bzB.lam ~ dnorm(0, 0.001)
ba1.lam ~ dnorm(0, 0.001)
ba2.lam ~ dnorm(0, 0.001)
mu.lam ~ dunif(-10, 10) # Random effects for lambda per site
sig.lam ~ dunif(0, 10)
tau.lam <- 1/(sig.lam*sig.lam)
# Priors for sigma
bzB.sig ~ dnorm(0, 0.001)
mu.sig ~ dunif(-10, 10) # Random effects for sigma per observer
sig.sig ~ dunif(0, 10)
tau.sig <- 1/(sig.sig*sig.sig)
#RANDOM TRANSECT LEVEL EFFECT FOR LAMBDA (doesn't change over time) # takes care of the dependence in data when you repeatedly visit the same transect
for (s in 1:max.sites){
log.lambda[s] ~ dnorm(mu.lam, tau.lam)
}
#RANDOM OBSERVER EFFECT FOR SIGMA
for (o in 1:nobs){
sig.obs[o] ~ dnorm(mu.sig, tau.sig)
}
for(i in 1:nind){
dclass[i] ~ dcat(fct[siteYear.dclass[i], 1:nG])
}
for(j in 1:length(y)){
sigma[j] <- exp(sig.obs[ob[j]] + bzB.sig*zoneB[j])
# Construct cell probabilities for nG multinomial cells (distance categories) PER SITE
for(k in 1:nG){
up[j,k]<-pnorm(db[k+1], 0, 1/sigma[j]^2) ##db are distance bin limits
low[j,k]<-pnorm(db[k], 0, 1/sigma[j]^2)
p[j,k]<- 2 * (up[j,k] - low[j,k])
pi[j,k] <- int.w[k] / strip.width
f[j,k]<- p[j,k]/f.0[j]/int.w[k] ## detection prob. in distance category k
fc[j,k]<- f[j,k] * pi[j,k] ## pi=percent area of k; drops out if constant
fct[j,k]<-fc[j,k]/pcap[j]
}
pcap[j] <- sum(fc[j, 1:nG]) # Different per site and year (sum over all bins)
f.0[j] <- 2 * dnorm(0,0, 1/sigma[j]^2) # Prob density at 0
# To set that prob.of detection at distance 0 is one, you divide by f0 in the loop up
y[j] ~ dbin(pcap[j], N[j])
N[j] ~ dpois(lambda[j])
lambda[j] <- exp(log.lambda[sitesYears[j]] + bzB.lam*zoneB[j]
+ ba1.lam*area1[j] + ba2.lam*area2[j])
}
# Derived parameters
for (i in 1:nyears){
Ntotal[i] <- sum(N*indexYears[,i])
}
}",fill=TRUE, file = "s_sigma(integral)[obs(o,j,t)_covZone(j)]_lambda[alpha(j)_covZone(j)_covArea(j,t)].txt")
# Inits
Nst <- yLong + 1
inits <- function(){list(mu.lam = runif(1), sig.lam = 0.2, #sigma = runif(624, 0, 50), I dont need sigma because I have already priors for his hyperparameters!!!!!
N=Nst,
bzB.lam = runif(1), ba1.lam = runif(1), ba2.lam = runif(1),
mu.sig = runif(1, log(30), log(50)), sig.sig = runif(1), bzB.sig = runif(1)
###changed inits for mu.sig - don't start too small, better start too large
)}
# Params
params <- c("Ntotal", "N",# "sigma", "lambda", I remove it so that it doesnt save the lambdas and takes shorter. It still calculates them
"mu.lam", "sig.lam",
"bzB.lam", "ba1.lam", "ba2.lam",
"mu.sig", "sig.sig", "bzB.sig"
)
# MCMC settings
nc <- 3 ; ni <- 75000 ; nb <- 2000 ; nt <- 2
# With jagsUI
out <- jags(data1, inits, params, "s_sigma(integral)[obs(o,j,t)_covZone(j)]_lambda[alpha(j)_covZone(j)_covArea(j,t)].txt", n.chain = nc,
n.thin = nt, n.iter = ni, n.burnin = nb, parallel = TRUE)
print(out)
summary <- as.data.frame(as.matrix(out$summary))
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data/Results")
write.csv(summary, "8.2.Alruf500_14-17.csv")
###################################################################
setwd("C:/Users/ana.sanz/OneDrive/PhD/Second chapter/Data/Results")
summary <- read.csv("8.2.Alruf500_14-17.csv") # Does not converge but with one rhat value = 1.25 (Zone), so not very bad
results500 <- summary[which(summary$X %in% c("Ntotal[1]", "Ntotal[2]", "Ntotal[3]", "Ntotal[4]", "mu.lam", "sig.lam", "bzB.lam", "ba1.lam", "ba2.lam")), ]
# AES NONE EFFECT NEGATIVE TREND
# SG NONE EFFECT NEGATIVE TREND
# Plot the trend of the population
plot(-100,ylim = c(0,120), xlim=c(0,8),
pch = 21, ylab = "N", xlab = " ", axes = FALSE, main = "BUOED")
axis(1, at = c(1,2,3,4), labels = yrs)
axis(2)
points(results500[1:4,2],pch = 19) # Plot results
points(count.year,pch = 19, col = "red") # Plot counts
x <- seq_along(results500[1:4,2])
low_CI <- as.numeric(results500$X2.5.[1:4])
up_CI <- as.numeric(results500$X97.5.[1:4])
arrows(x, low_CI,x, up_CI, code=3, angle=90, length=0.04)
# To plot the relation with the co-variates
results500_2 <- summary[5:588, ]
plot(results500_2$mean ~ area_AES, ylab = "Abundance")
# Prediction: Fix the rest of the covariates that you dont want to see (to the mean, or zone 1 or 2)
# PREDICTION ABUNDANCE - AES
area_AESpred <- seq(min(area_AES), max(area_AES),length.out = 500) # Create a sequence of values, from minimum to maximun of the covariate to plot the prediction
pred <- exp(results500[which(results500$X == "mu.lam"),2]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),2]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),2]*area_AESpred +
results500[which(results500$X == "ba2.lam"),2]*mean(area_SG)) # Fixed SG area
predlci <- exp(results500[which(results500$X == "mu.lam"),4]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),4]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),4]*area_AESpred +
results500[which(results500$X == "ba2.lam"),4]*mean(area_SG)) # Fixed SG area
preduci <- exp(results500[which(results500$X == "mu.lam"),8]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),8]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),8]*area_AESpred +
results500[which(results500$X == "ba2.lam"),8]*mean(area_SG)) # Fixed SG area
plot(pred ~ area_AESpred, ylim=c(0,0.5), type="l", main = "buffer.500")
points(predlci ~ area_AESpred, pch=16, type="l",lty=2)
points(preduci ~ area_AESpred, pch=16,type="l",lty=2)
pred0 <- exp(results500[which(results500$X == "mu.lam"),2]+
results500[which(results500$X == "bzB.lam"),2]*0 + # Prediction fixed for zone 0 (occidental)
results500[which(results500$X == "ba1.lam"),2]*area_AESpred +
results500[which(results500$X == "ba2.lam"),2]*mean(area_SG)) # Fixed SG area
pred0lci <- exp(results500[which(results500$X == "mu.lam"),4]+ # PREDICTION LOW CI FOR OCCIDENTAL
results500[which(results500$X == "bzB.lam"),4]*0 +
results500[which(results500$X == "ba1.lam"),4]*area_AESpred +
results500[which(results500$X == "ba2.lam"),4]*mean(area_SG))
pred0uci <- exp(results500[which(results500$X == "mu.lam"),8]+ # PREDICTION UP CI FOR OCCIDENTAL
results500[which(results500$X == "bzB.lam"),8]*0 +
results500[which(results500$X == "ba1.lam"),8]*area_AESpred +
results500[which(results500$X == "ba2.lam"),8]*mean(area_SG))
points(pred0 ~ area_AESpred, pch=16, type="l", col="red")
points(pred0lci ~ area_AESpred, pch=16, type="l",lty=2, col="red")
points(pred0uci ~ area_AESpred, pch=16,type="l",lty=2, col="red")
plot(results500_2$mean ~ area_AES, ylab = "Abundance")
points(pred0 ~ area_AESpred, pch=16, type="l", col="red")
points(pred0lci ~ area_AESpred, pch=16, type="l",lty=2, col="red")
points(pred0uci ~ area_AESpred, pch=16,type="l",lty=2, col="red")
# PREDICTION ABUNDANCE - SG
plot(results500_2$mean ~ area_SG, ylab = "Abundance")
area_SGpred <- seq(min(area_SG), max(area_SG),length.out = 500) # Create a sequence of values, from minimum to maximun of the covariate to plot the prediction
pred <- exp(results500[which(results500$X == "mu.lam"),2]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),2]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),2]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),2]*area_SGpred)
predlci <- exp(results500[which(results500$X == "mu.lam"),4]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),4]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),4]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),4]*area_SGpred)
preduci <- exp(results500[which(results500$X == "mu.lam"),8]+ # Add the intercept (random effect), also fixed to the mean of the random effect
results500[which(results500$X == "bzB.lam"),8]*1 + # Prediction for fixed zone 1 (ORIENTAL)
results500[which(results500$X == "ba1.lam"),8]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),8]*area_SGpred)
plot(pred ~ area_SGpred, ylim=c(0,3), type="l", main = "buffer.500")
points(predlci ~ area_SGpred, pch=16, type="l",lty=2)
points(preduci ~ area_SGpred, pch=16,type="l",lty=2)
pred0 <- exp(results500[which(results500$X == "mu.lam"),2]+
results500[which(results500$X == "bzB.lam"),2]*0 + # Prediction fixed for zone 0 (occidental)
results500[which(results500$X == "ba1.lam"),2]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),2]*area_SGpred)
pred0lci <- exp(results500[which(results500$X == "mu.lam"),4]+ # PREDICTION LOW CI FOR OCCIDENTAL
results500[which(results500$X == "bzB.lam"),4]*0 +
results500[which(results500$X == "ba1.lam"),4]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),4]*area_SGpred)
pred0uci <- exp(results500[which(results500$X == "mu.lam"),8]+ # PREDICTION UP CI FOR OCCIDENTAL
results500[which(results500$X == "bzB.lam"),8]*0 +
results500[which(results500$X == "ba1.lam"),8]*mean(area_AES) + # Fixed AES area
results500[which(results500$X == "ba2.lam"),8]*area_SGpred)
points(pred0 ~ area_SGpred, pch=16, type="l", col="red")
points(pred0lci ~ area_SGpred, pch=16, type="l",lty=2, col="red")
points(pred0uci ~ area_SGpred, pch=16,type="l",lty=2, col="red")
plot(results500_2$mean ~ area_SG, ylab = "Abundance")
points(pred0 ~ area_SGpred, pch=16, type="l", col="red")
points(pred0lci ~ area_SGpred, pch=16, type="l",lty=2, col="red")
points(pred0uci ~ area_SGpred, pch=16,type="l",lty=2, col="red")
|
################# ~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~ #################
## ##
## Data Science Capstone Project 2016 ##
## ##
## By D. C. Tee ##
## ##
## Github Repo: https://github.com/teedinchai/m10_Capstone ##
## ##
################# ~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~ #################
library(shiny)
library(tm)
library(NLP)
#setwd("C:/Users/user/Desktop/capstone/m10_Capstone")
source("./global.R")
#load preprocess data for faster prediction application
load('./Data/tdm2.RData')
load('./Data/tdm3.RData')
load('./Data/tdm1a.RData')
# Define server logic required to calculate reactive output and plot graph
shinyServer(function(input, output) {
#output$PredictedText <- reactive({
PredictedText2 <- reactive({
if (input$Predict == 0)
return()
#clean up input text
userinput <- as.character(input$text)
userinput <- tolower(userinput)
userinput <- removeNumbers(userinput)
userinput <- removePunctuation(userinput)
userinput <- stripWhitespace(userinput)
if (nchar(userinput)==0 |input == ''|input == "na na"){
#top6 word if usertext=NULL
stop ('No input detected. Please key in some input text')
#tdm1a_5 <- tdm1a[1:6,]
}
#if only single text is typed in
else if(length(userinput)==1){
usertext<-strsplit(userinput,split=" ")
usertext<-tail(usertext[[1]],n=1)
usertext<-paste(usertext,sep="", collapse=" ")
greptext<-tdm2$MyBigram[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm2$MyBigram)]
# greptext<-tdm2$dimnames$Terms[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm2$dimnames$Terms)]
if (length(greptext)!=0) {
#2gram prediction
sortgreptext<-data.frame(greptext)
sortgreptext<-data.frame(tdm2=names(sortgreptext), frequency=sortgreptext)
# sortgreptext<-sort(rowSums(as.matrix(tdm2[greptext,])), decreasing = TRUE)
# sortgreptext<-data.frame(tdm2=names(sortgreptext), frequency=sortgreptext)
} else {
#1gram prediction
#top6 word if usertext not found from N-grams
tdm1a_5 <- tdm1a[1:6,]
}
}
# for input text more than one text/word
else {
usertext<-strsplit(userinput,split=" ")
usertext<-tail(usertext[[1]],n=2)
usertext<-paste(usertext,sep="", collapse=" ")
greptext<-tdm3$MyTrigram[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm3$MyTrigram)]
# greptext<-tdm3$dimnames$Terms[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm3$dimnames$Terms)]
if (length(greptext)!=0){
#Predict by 3-gram first
sortgreptext<-data.frame(greptext)
sortgreptext<-data.frame(tdm3=names(sortgreptext), frequency=sortgreptext)
# sortgreptext<-sort(rowSums(as.matrix(tdm3[greptext,])), decreasing = TRUE)
# sortgreptext<-data.frame(tdm3=names(sortgreptext), frequency=sortgreptext)
} else {
usertext<-strsplit(userinput,split=" ")
usertext<-tail(usertext[[1]],n=1)
usertext<-paste(usertext,sep="", collapse=" ")
greptext<-tdm2$MyBigram[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm2$MyBigram)]
# greptext<-tdm2$dimnames$Terms[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm2$dimnames$Terms)]
if (length(greptext)!=0) {
#Predict by 2-gram next
sortgreptext<-data.frame(greptext)
sortgreptext<-data.frame(tdm2=names(sortgreptext), frequency=sortgreptext)
# sortgreptext<-sort(rowSums(as.matrix(tdm2[greptext,])), decreasing = TRUE)
# sortgreptext<-data.frame(tdm2=names(sortgreptext), frequency=sortgreptext)
} else {
#Predict by 1-gram last
tdm1a_5 <- tdm1a[1:6,]
}
}
}
})
#Most possible next text/word
output$PredictedText <- renderText({
if (input$Predict == 0)
return()
a<-PredictedText2()
# lastword<-as.character(a[1,1])
# lastword<-tail(strsplit(lastword, split=" ")[[1]],1)
lastword<-as.character(a[1,2])
lastword<-tail(strsplit(lastword, split=" ")[[1]],1)
})
#Other possible text(s)/word(s)
output$PredictedText1 <- renderTable({
if (input$Predict == 0)
return()
ab<-PredictedText2()
ab<-as.character(ab[,2])
ab<-strsplit(ab, split=" ")
ab<-data.frame(ab)
ab<-t(ab)
ab<-data.frame(ab[-3,ncol(ab)])
names(ab)<-"other possibilities"
rownames(ab)<-NULL
head(ab,5)
# ab<-as.character(ab[,1])
# ab<-strsplit(ab, split=" ")
# ab<-data.frame(ab)
# ab<-t(ab)
# ab<-data.frame(ab[-1,ncol(ab)])
# names(ab)<-"other possibilities"
# rownames(ab)<-NULL
# head(ab,5)
})
})
|
/server.R
|
no_license
|
teedinchai/m10_Capstone
|
R
| false
| false
| 5,108
|
r
|
################# ~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~ #################
## ##
## Data Science Capstone Project 2016 ##
## ##
## By D. C. Tee ##
## ##
## Github Repo: https://github.com/teedinchai/m10_Capstone ##
## ##
################# ~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~ #################
library(shiny)
library(tm)
library(NLP)
#setwd("C:/Users/user/Desktop/capstone/m10_Capstone")
source("./global.R")
#load preprocess data for faster prediction application
load('./Data/tdm2.RData')
load('./Data/tdm3.RData')
load('./Data/tdm1a.RData')
# Define server logic required to calculate reactive output and plot graph
shinyServer(function(input, output) {
#output$PredictedText <- reactive({
PredictedText2 <- reactive({
if (input$Predict == 0)
return()
#clean up input text
userinput <- as.character(input$text)
userinput <- tolower(userinput)
userinput <- removeNumbers(userinput)
userinput <- removePunctuation(userinput)
userinput <- stripWhitespace(userinput)
if (nchar(userinput)==0 |input == ''|input == "na na"){
#top6 word if usertext=NULL
stop ('No input detected. Please key in some input text')
#tdm1a_5 <- tdm1a[1:6,]
}
#if only single text is typed in
else if(length(userinput)==1){
usertext<-strsplit(userinput,split=" ")
usertext<-tail(usertext[[1]],n=1)
usertext<-paste(usertext,sep="", collapse=" ")
greptext<-tdm2$MyBigram[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm2$MyBigram)]
# greptext<-tdm2$dimnames$Terms[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm2$dimnames$Terms)]
if (length(greptext)!=0) {
#2gram prediction
sortgreptext<-data.frame(greptext)
sortgreptext<-data.frame(tdm2=names(sortgreptext), frequency=sortgreptext)
# sortgreptext<-sort(rowSums(as.matrix(tdm2[greptext,])), decreasing = TRUE)
# sortgreptext<-data.frame(tdm2=names(sortgreptext), frequency=sortgreptext)
} else {
#1gram prediction
#top6 word if usertext not found from N-grams
tdm1a_5 <- tdm1a[1:6,]
}
}
# for input text more than one text/word
else {
usertext<-strsplit(userinput,split=" ")
usertext<-tail(usertext[[1]],n=2)
usertext<-paste(usertext,sep="", collapse=" ")
greptext<-tdm3$MyTrigram[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm3$MyTrigram)]
# greptext<-tdm3$dimnames$Terms[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm3$dimnames$Terms)]
if (length(greptext)!=0){
#Predict by 3-gram first
sortgreptext<-data.frame(greptext)
sortgreptext<-data.frame(tdm3=names(sortgreptext), frequency=sortgreptext)
# sortgreptext<-sort(rowSums(as.matrix(tdm3[greptext,])), decreasing = TRUE)
# sortgreptext<-data.frame(tdm3=names(sortgreptext), frequency=sortgreptext)
} else {
usertext<-strsplit(userinput,split=" ")
usertext<-tail(usertext[[1]],n=1)
usertext<-paste(usertext,sep="", collapse=" ")
greptext<-tdm2$MyBigram[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm2$MyBigram)]
# greptext<-tdm2$dimnames$Terms[grep(paste("^",usertext,"[[:space:]]",sep=""),tdm2$dimnames$Terms)]
if (length(greptext)!=0) {
#Predict by 2-gram next
sortgreptext<-data.frame(greptext)
sortgreptext<-data.frame(tdm2=names(sortgreptext), frequency=sortgreptext)
# sortgreptext<-sort(rowSums(as.matrix(tdm2[greptext,])), decreasing = TRUE)
# sortgreptext<-data.frame(tdm2=names(sortgreptext), frequency=sortgreptext)
} else {
#Predict by 1-gram last
tdm1a_5 <- tdm1a[1:6,]
}
}
}
})
#Most possible next text/word
output$PredictedText <- renderText({
if (input$Predict == 0)
return()
a<-PredictedText2()
# lastword<-as.character(a[1,1])
# lastword<-tail(strsplit(lastword, split=" ")[[1]],1)
lastword<-as.character(a[1,2])
lastword<-tail(strsplit(lastword, split=" ")[[1]],1)
})
#Other possible text(s)/word(s)
output$PredictedText1 <- renderTable({
if (input$Predict == 0)
return()
ab<-PredictedText2()
ab<-as.character(ab[,2])
ab<-strsplit(ab, split=" ")
ab<-data.frame(ab)
ab<-t(ab)
ab<-data.frame(ab[-3,ncol(ab)])
names(ab)<-"other possibilities"
rownames(ab)<-NULL
head(ab,5)
# ab<-as.character(ab[,1])
# ab<-strsplit(ab, split=" ")
# ab<-data.frame(ab)
# ab<-t(ab)
# ab<-data.frame(ab[-1,ncol(ab)])
# names(ab)<-"other possibilities"
# rownames(ab)<-NULL
# head(ab,5)
})
})
|
calcTripTime <- function(data, velocity) {
#Input: Trip Data and a velocity vector
#
#Output: Trip time in seconds
#
#Find the first positive velocity
count = 1; checkZero = TRUE; indexStart = numeric()
while (count < length(velocity) & checkZero) {
checkZero = velocity[count] == 0
count = count + 1
indexStart = count - 1
}
#Find the last positive velocity
reverseTime = rev(velocity)
count = 1; checkZero = TRUE; indexEnd = numeric()
while (count < length(reverseTime) & checkZero) {
checkZero = reverseTime[count] == 0
count = count + 1
indexEnd = count
}
indexEnd = length(data$speed) - indexEnd
#Subtract the time stamp of the final positive velocity from that
#of the final negative velocity
tripTime = data$timestamp[indexEnd] - data$timestamp[indexStart]
return(tripTime)
}
|
/calcTripTime.R
|
no_license
|
Lycanthrope1/tripDataAnalysis
|
R
| false
| false
| 812
|
r
|
calcTripTime <- function(data, velocity) {
#Input: Trip Data and a velocity vector
#
#Output: Trip time in seconds
#
#Find the first positive velocity
count = 1; checkZero = TRUE; indexStart = numeric()
while (count < length(velocity) & checkZero) {
checkZero = velocity[count] == 0
count = count + 1
indexStart = count - 1
}
#Find the last positive velocity
reverseTime = rev(velocity)
count = 1; checkZero = TRUE; indexEnd = numeric()
while (count < length(reverseTime) & checkZero) {
checkZero = reverseTime[count] == 0
count = count + 1
indexEnd = count
}
indexEnd = length(data$speed) - indexEnd
#Subtract the time stamp of the final positive velocity from that
#of the final negative velocity
tripTime = data$timestamp[indexEnd] - data$timestamp[indexStart]
return(tripTime)
}
|
library(gtalibrary)
library(ggplot2)
rm(list = ls())
# font_import()
loadfonts(device="postscript")
loadfonts(device="win")
setwd("C:/Users/jfrit/Desktop/Dropbox/GTA cloud")
#Settings
chapter.number=11
chapter.name="What's new"
output.path=paste("0 report production/GTA 24/tables & figures/", paste(chapter.number, chapter.name, sep=" - "),sep="")
source("0 report production/GTA 24/help files/GTA 23 cutoff and definitions.R")
### THE GTA standard colour palette
gta_colour_palette()
# Please prepare a chart showing the quarter by quarter totals of the number of policy interventions included in the GTA database from Q3 2009 to Q2 2018.
# FIGURE 13.1, total amount in database per quarter
gta_data_slicer(keep.implementation.na = T)
# FIGURE 13.1, total aggregate amount in database per quarter
agg.interventions <- master.sliced
agg.interventions$quarter <- quarter(agg.interventions$date.published)
agg.interventions$year <- year(agg.interventions$date.published)
fig10.1 <- data.frame(total =numeric(),
quarter=numeric(),
year=numeric())
for(y in 2008:2019) {
for (q in 1:4) {
total = length(unique(subset(agg.interventions, year<=y-1 | (year==y & quarter <= q))$intervention.id))
x <- data.frame(total = total,
quarter = q,
year = y)
fig10.1 <- rbind(fig10.1, x)
rm(x)
}
}
fig10.1 <- subset(fig10.1, ! ((year==2019 & quarter > 2) |(year==2009 & quarter <= 1) | (year==2008)))
fig10.1$quarter.name <- paste("Q",fig10.1$quarter," - ", fig10.1$year, sep="")
fig10.1 <- fig10.1[with(fig10.1, order(year, quarter)),]
fig10.1$quarter.name <- as.factor(fig10.1$quarter.name)
row.names(fig10.1) <- NULL
fig10.1.xlsx <- fig10.1[,c("total","quarter.name","quarter","year")]
names(fig10.1.xlsx) <- c("Number of interventions","Quarter Name", "Quarter", "Year")
xlsx::write.xlsx(fig10.1.xlsx, file=paste(output.path,"/Table ",chapter.number,".1 - Data for Figure ",chapter.number,".1.xlsx", sep=""), row.names=F)
fig10.1$quarter.name.2 <- gsub("\\s","",as.character(fig10.1$quarter.name))
fig10.1$quarter.name.2[fig10.1$quarter==1] <- ""
fig10.1$quarter.name.2[fig10.1$quarter==3] <- ""
p2 <- ggplot()+
geom_line(data=fig10.1, aes(x=forcats::fct_inorder(quarter.name), y=total, group = 1), colour=gta_colour$blue[1], size=1)+
geom_text(data=fig10.1[1,], aes(x=quarter.name, y=1400, label=total), colour=gta_colour$blue[1], nudge_x = 1)+
geom_text(data=fig10.1[nrow(fig10.1),], aes(x=quarter.name, y=20100, label=total), colour=gta_colour$blue[1], nudge_x = -3.5)+
xlab("Quarter")+
scale_x_discrete(labels=fig10.1$quarter.name.2)+
ylab("Number of interventions documented since GTA launch")+
scale_y_continuous(limits=c(-100, 21000), breaks=seq(1000,21000,1000), sec.axis = sec_axis(~., name="Number of interventions documented since GTA launch", breaks=seq(1000, 21000, 1000)), expand = c(0,0))+
gta_theme(x.bottom.angle = 90)+
theme(axis.text.x = element_text(size=10, vjust=0.5, hjust=0),
axis.title.y.left = element_text(size=10),
axis.title.y.right = element_text(size=10),
line = element_line(lineend = "round"))
p2
gta_plot_saver(plot=p2,
path=output.path,
name=paste("Figure ",chapter.number,".1 - Sum of intervention types from beginning to quarter", sep=""))
|
/code/11 - What's new/11 What's new.R
|
no_license
|
global-trade-alert/gta-24
|
R
| false
| false
| 3,366
|
r
|
library(gtalibrary)
library(ggplot2)
rm(list = ls())
# font_import()
loadfonts(device="postscript")
loadfonts(device="win")
setwd("C:/Users/jfrit/Desktop/Dropbox/GTA cloud")
#Settings
chapter.number=11
chapter.name="What's new"
output.path=paste("0 report production/GTA 24/tables & figures/", paste(chapter.number, chapter.name, sep=" - "),sep="")
source("0 report production/GTA 24/help files/GTA 23 cutoff and definitions.R")
### THE GTA standard colour palette
gta_colour_palette()
# Please prepare a chart showing the quarter by quarter totals of the number of policy interventions included in the GTA database from Q3 2009 to Q2 2018.
# FIGURE 13.1, total amount in database per quarter
gta_data_slicer(keep.implementation.na = T)
# FIGURE 13.1, total aggregate amount in database per quarter
agg.interventions <- master.sliced
agg.interventions$quarter <- quarter(agg.interventions$date.published)
agg.interventions$year <- year(agg.interventions$date.published)
fig10.1 <- data.frame(total =numeric(),
quarter=numeric(),
year=numeric())
for(y in 2008:2019) {
for (q in 1:4) {
total = length(unique(subset(agg.interventions, year<=y-1 | (year==y & quarter <= q))$intervention.id))
x <- data.frame(total = total,
quarter = q,
year = y)
fig10.1 <- rbind(fig10.1, x)
rm(x)
}
}
fig10.1 <- subset(fig10.1, ! ((year==2019 & quarter > 2) |(year==2009 & quarter <= 1) | (year==2008)))
fig10.1$quarter.name <- paste("Q",fig10.1$quarter," - ", fig10.1$year, sep="")
fig10.1 <- fig10.1[with(fig10.1, order(year, quarter)),]
fig10.1$quarter.name <- as.factor(fig10.1$quarter.name)
row.names(fig10.1) <- NULL
fig10.1.xlsx <- fig10.1[,c("total","quarter.name","quarter","year")]
names(fig10.1.xlsx) <- c("Number of interventions","Quarter Name", "Quarter", "Year")
xlsx::write.xlsx(fig10.1.xlsx, file=paste(output.path,"/Table ",chapter.number,".1 - Data for Figure ",chapter.number,".1.xlsx", sep=""), row.names=F)
fig10.1$quarter.name.2 <- gsub("\\s","",as.character(fig10.1$quarter.name))
fig10.1$quarter.name.2[fig10.1$quarter==1] <- ""
fig10.1$quarter.name.2[fig10.1$quarter==3] <- ""
p2 <- ggplot()+
geom_line(data=fig10.1, aes(x=forcats::fct_inorder(quarter.name), y=total, group = 1), colour=gta_colour$blue[1], size=1)+
geom_text(data=fig10.1[1,], aes(x=quarter.name, y=1400, label=total), colour=gta_colour$blue[1], nudge_x = 1)+
geom_text(data=fig10.1[nrow(fig10.1),], aes(x=quarter.name, y=20100, label=total), colour=gta_colour$blue[1], nudge_x = -3.5)+
xlab("Quarter")+
scale_x_discrete(labels=fig10.1$quarter.name.2)+
ylab("Number of interventions documented since GTA launch")+
scale_y_continuous(limits=c(-100, 21000), breaks=seq(1000,21000,1000), sec.axis = sec_axis(~., name="Number of interventions documented since GTA launch", breaks=seq(1000, 21000, 1000)), expand = c(0,0))+
gta_theme(x.bottom.angle = 90)+
theme(axis.text.x = element_text(size=10, vjust=0.5, hjust=0),
axis.title.y.left = element_text(size=10),
axis.title.y.right = element_text(size=10),
line = element_line(lineend = "round"))
p2
gta_plot_saver(plot=p2,
path=output.path,
name=paste("Figure ",chapter.number,".1 - Sum of intervention types from beginning to quarter", sep=""))
|
leadingSpPlotsCaribou <- function(years = c(2011, 2100),
pathData,
pathOutputs,
Scenarios = c("LandR_SCFM", "LandR.CS_SCFM",
"LandR_fS", "LandR.CS_fS"),
runs = paste0("run", 1:10),
leadingPercentage = 0.50001,
treeSpecies = c("Betu_Pap","Lari_Lar","Pice_Gla",
"Pice_Mar","Pinu_Ban","Popu_Tre"),
treeType = NULL,
flammableRTM,
rasterToMatch,
shpPoly = NULL,
useProportionLeading = FALSE){
Require::Require("reproducible")
outputFolder <- checkPath(file.path(pathOutputs, "vegetationPlots"), create = TRUE)
# ~~~ LEADING SPECIES ~~~~~~~~~~~~~~~~~~
allScenarios <- lapply(Scenarios, function(scenario){
tic(paste0("Calculating leading species change for ", scenario))
allRuns <- lapply(runs, function(RUN){
# FOR YEAR 2011
bothYears <- lapply(years, function(Y){
coh <- bringObjectTS(path = file.path(pathData, paste(scenario, RUN, sep = "_")),
rastersNamePattern = c("cohortData", Y))[[1]]
ras <- bringObjectTS(path = file.path(pathData, paste(scenario, RUN, sep = "_")),
rastersNamePattern = c("pixelGroupMap", Y))[[1]]
cohortReduced <- coh[, list(sumBio = sum(B, na.rm = TRUE)), by = c("speciesCode", "pixelGroup")]
biomassStack <- raster::stack(lapply(treeSpecies, function(tSp){
message(paste0("Creating biomass map for ", tSp))
r <- SpaDES.tools::rasterizeReduced(reduced = cohortReduced[speciesCode == tSp, ],
fullRaster = ras,
newRasterCols = "sumBio",
mapcode = "pixelGroup")
r[is.na(r[])] <- 0
r[is.na(rasterToMatch)] <- NA
return(r)
}))
names(biomassStack) <- treeSpecies
biomassDT <- data.table(pixelID = 1:raster::ncell(biomassStack),
raster::getValues(biomassStack))
biomassDT[, totalBiomass := rowSums(.SD, na.rm = TRUE), .SDcols = names(biomassDT)[names(biomassDT) != "pixelID"]]
biomassDT <- biomassDT[totalBiomass != 0,]
biomassDT[, leading := apply(.SD, 1, .defineLeading,
leadingPercentage = leadingPercentage,
totalCol = "totalBiomass"),
.SDcols = names(biomassDT)[names(biomassDT) != "pixelID"]]
# Reclassify leading to conifer or deciduous
if (is.null(treeType)){
warning(paste0("treeType is null. Creating the following table. ",
"\nIf the species do not match, please provide treeType table ",
"with the correct classification in a column named 'newClass'"),
immediate. = TRUE)
treeType <- structure(list(ID = c(1L, 2L, 3L, 4L, 5L, 6L, 71L, 72L, 73L,
74L, 75L, 76L), landcover = c("Betu_Pap", "Lari_Lar", "Pice_Gla",
"Pice_Mar", "Pinu_Ban", "Popu_Tre", "Mixed_Betu_Pap", "Mixed_Lari_Lar",
"Mixed_Pice_Gla", "Mixed_Pice_Mar", "Mixed_Pinu_Ban", "Mixed_Popu_Tre"
), leadingType = c("deciduous", "deciduous", "conifer", "conifer",
"conifer", "deciduous", "mixed", "mixed", "mixed", "mixed", "mixed",
"mixed"), newClass = c(1, 1, 0, 0, 0, 1, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5)), row.names = c(NA, -12L), class = "data.frame")
names(treeType)[names(treeType) == "ID"] <- "leading"
print(treeType)
}
biomassDT <- merge(biomassDT, treeType[, c("leading","newClass")])
allPixels <- data.table(pixelID = 1:raster::ncell(biomassStack))
biomassDTfilled <- merge(allPixels, biomassDT, all.x = TRUE, by = "pixelID")
leadingSpeciesRaster <- raster::setValues(raster(biomassStack),
biomassDTfilled[["newClass"]])
names(leadingSpeciesRaster) <- paste("biomassMap", scenario, RUN, Y, sep = "_")
return(leadingSpeciesRaster)
})
names(bothYears) <- paste0("Year", years)
rasLastYear <- bothYears[[paste0("Year", years[length(years)])]]
rasFirstYear <- -bothYears[[paste0("Year", years[1])]]
leadingStackChange <- calc(stack(rasLastYear, rasFirstYear),
fun = sum,
na.rm = TRUE)
testthat::expect_true(all(minValue(leadingStackChange) >= -1 , maxValue(leadingStackChange) <= 1))
leadingStackChange[is.na(rasterToMatch)] <- NA
names(leadingStackChange) <- paste("leadingMapChange", scenario, RUN, sep = "_")
return(leadingStackChange)
})
names(allRuns) <- runs
return(allRuns)
})
names(allScenarios) <- Scenarios
# DO EACH CLIMATE SCENARIO
eachScenarioAverage <- lapply(names(allScenarios), FUN = function(eachScenario){
rasStk <- raster::stack(allScenarios[[eachScenario]])
biomassDiffPath <- file.path(outputFolder, paste("averageChange",
eachScenario, "Leading",
sep = "_"))
biomassDiffPlotPath <- file.path(outputFolder, paste0(paste("averageChange",
eachScenario, "Leading",
sep = "_"), ".png"))
climateDiffAverage <- calc(x = rasStk, fun = mean,
na.rm = TRUE,
filename = biomassDiffPath,
overwrite = TRUE,
format = "GTiff")
averageChange <- 100*(mean(climateDiffAverage[], na.rm = TRUE))
# Now plotting
library(viridis)
# pal <- RColorBrewer::brewer.pal(11, "RdYlBu")
# pal[6] <- "#f7f4f2"
pal <- c('#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2')
maxV <- max(abs(round(minValue(climateDiffAverage), 1)),
abs(round(maxValue(climateDiffAverage), 1)))
AT <- seq(-maxV, maxV, length.out = 12)
climateDiffAverage[is.na(flammableRTM)] <- NA
if (!file.exists(biomassDiffPlotPath)){
png(filename = biomassDiffPlotPath,
width = 21, height = 29,
units = "cm", res = 300)
print(levelplot(climateDiffAverage,
sub = paste0("Proportional change in leading species under GCM ",
eachScenario,
"\nRed: conversion to conifer \nBlue: conversion to deciduous"),
margin = FALSE,
maxpixels = 7e6,
at = AT,
colorkey = list(
space = 'bottom',
axis.line = list(col = 'black'),
width = 0.75
),
par.settings = list(
strip.border = list(col = 'transparent'),
strip.background = list(col = 'transparent'),
axis.line = list(col = 'transparent')),
scales = list(draw = FALSE),
col.regions = pal,
par.strip.text = list(cex = 0.8,
lines = 1,
col = "black"),
panel = function(...){
lattice::panel.levelplot.raster(...)
sp::sp.polygons(shpPoly, fill = 'black', lwd = 1)
}))
dev.off()
}
toc()
return(list(ras = climateDiffAverage, averageChangePerc = averageChange))
})
names(eachScenarioAverage) <- names(allScenarios)
# DO A MEAN ONE FOR ALL SCENARIOS
rasStk <- raster::stack(unlist(allScenarios))
biomassDiffPath <- file.path(outputFolder, paste("averageChange",
"allScenarios", "Leading",
sep = "_"))
biomassDiffPlotPath <- file.path(outputFolder, paste0(paste("averageChange",
"allScenarios", "Leading",
sep = "_"), ".png"))
climateDiffAverage <- calc(x = rasStk, fun = mean,
na.rm = TRUE,
filename = biomassDiffPath,
overwrite = TRUE,
format = "GTiff")
averageChange <- 100*(mean(climateDiffAverage[], na.rm = TRUE))
# Now plotting
library("viridis")
# pal <- RColorBrewer::brewer.pal(11, "RdYlBu")
# pal[6] <- "#f7f4f2"
pal <- c('#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2')
maxV <- max(abs(round(minValue(climateDiffAverage), 1)),
abs(round(maxValue(climateDiffAverage), 1)))
AT <- seq(-maxV, maxV, length.out = 12)
climateDiffAverage[is.na(flammableRTM)] <- NA
if (!file.exists(biomassDiffPlotPath)){
png(filename = biomassDiffPlotPath,
width = 21, height = 29,
units = "cm", res = 300)
print(levelplot(climateDiffAverage,
sub = paste0("Average proportional change in leading species under all GCMs ",
"\nRed: conversion to conifer \nBlue-Purple: conversion to deciduous"),
margin = FALSE,
maxpixels = 7e6,
at = AT,
colorkey = list(
labels=list(cex = 1),
space = 'bottom',
axis.line = list(col = 'black'),
width = 0.75
),
par.settings = list(
strip.border = list(col = 'transparent'),
strip.background = list(col = 'transparent'),
axis.line = list(col = 'transparent')),
scales = list(draw = FALSE),
col.regions = pal,
par.strip.text = list(cex = 1,
lines = 1,
col = "black"),
panel = function(...){
lattice::panel.levelplot.raster(...)
sp::sp.polygons(shpPoly, fill = 'black', lwd = 1)
}))
dev.off()
}
toc()
# Make the tables better
tb <- unlist(lapply(eachScenarioAverage, `[[`, "averageChangePerc"))
finalTable <- data.table(climateScenario = c(names(tb), "allScenarios"),
averageChange = c(as.numeric(tb), averageChange))
write.csv(finalTable, file = file.path(outputFolder, paste("averageChangeInLeadingSp.csv")))
return(list(eachScenario = eachScenarioAverage,
allScenarios = list(ras = climateDiffAverage,
averageChangePerc = averageChange)))
}
.defineLeading <- function(x, leadingPercentage = 0.8, totalCol){
colID <- which(x[-length(x)] > (leadingPercentage*x[[totalCol]]))
if (length(colID) == 0){
# If we don't have a leading, we need to id conifer leading,
# or deciduous leading
colID1 <- which.max(x[-length(x)])
colID <- as.integer(paste0(length(x), colID1))
}
return(colID)
}
|
/posthocFunctions/leadingSpPlotsCaribou.R
|
no_license
|
tati-micheletti/NWT
|
R
| false
| false
| 12,032
|
r
|
leadingSpPlotsCaribou <- function(years = c(2011, 2100),
pathData,
pathOutputs,
Scenarios = c("LandR_SCFM", "LandR.CS_SCFM",
"LandR_fS", "LandR.CS_fS"),
runs = paste0("run", 1:10),
leadingPercentage = 0.50001,
treeSpecies = c("Betu_Pap","Lari_Lar","Pice_Gla",
"Pice_Mar","Pinu_Ban","Popu_Tre"),
treeType = NULL,
flammableRTM,
rasterToMatch,
shpPoly = NULL,
useProportionLeading = FALSE){
Require::Require("reproducible")
outputFolder <- checkPath(file.path(pathOutputs, "vegetationPlots"), create = TRUE)
# ~~~ LEADING SPECIES ~~~~~~~~~~~~~~~~~~
allScenarios <- lapply(Scenarios, function(scenario){
tic(paste0("Calculating leading species change for ", scenario))
allRuns <- lapply(runs, function(RUN){
# FOR YEAR 2011
bothYears <- lapply(years, function(Y){
coh <- bringObjectTS(path = file.path(pathData, paste(scenario, RUN, sep = "_")),
rastersNamePattern = c("cohortData", Y))[[1]]
ras <- bringObjectTS(path = file.path(pathData, paste(scenario, RUN, sep = "_")),
rastersNamePattern = c("pixelGroupMap", Y))[[1]]
cohortReduced <- coh[, list(sumBio = sum(B, na.rm = TRUE)), by = c("speciesCode", "pixelGroup")]
biomassStack <- raster::stack(lapply(treeSpecies, function(tSp){
message(paste0("Creating biomass map for ", tSp))
r <- SpaDES.tools::rasterizeReduced(reduced = cohortReduced[speciesCode == tSp, ],
fullRaster = ras,
newRasterCols = "sumBio",
mapcode = "pixelGroup")
r[is.na(r[])] <- 0
r[is.na(rasterToMatch)] <- NA
return(r)
}))
names(biomassStack) <- treeSpecies
biomassDT <- data.table(pixelID = 1:raster::ncell(biomassStack),
raster::getValues(biomassStack))
biomassDT[, totalBiomass := rowSums(.SD, na.rm = TRUE), .SDcols = names(biomassDT)[names(biomassDT) != "pixelID"]]
biomassDT <- biomassDT[totalBiomass != 0,]
biomassDT[, leading := apply(.SD, 1, .defineLeading,
leadingPercentage = leadingPercentage,
totalCol = "totalBiomass"),
.SDcols = names(biomassDT)[names(biomassDT) != "pixelID"]]
# Reclassify leading to conifer or deciduous
if (is.null(treeType)){
warning(paste0("treeType is null. Creating the following table. ",
"\nIf the species do not match, please provide treeType table ",
"with the correct classification in a column named 'newClass'"),
immediate. = TRUE)
treeType <- structure(list(ID = c(1L, 2L, 3L, 4L, 5L, 6L, 71L, 72L, 73L,
74L, 75L, 76L), landcover = c("Betu_Pap", "Lari_Lar", "Pice_Gla",
"Pice_Mar", "Pinu_Ban", "Popu_Tre", "Mixed_Betu_Pap", "Mixed_Lari_Lar",
"Mixed_Pice_Gla", "Mixed_Pice_Mar", "Mixed_Pinu_Ban", "Mixed_Popu_Tre"
), leadingType = c("deciduous", "deciduous", "conifer", "conifer",
"conifer", "deciduous", "mixed", "mixed", "mixed", "mixed", "mixed",
"mixed"), newClass = c(1, 1, 0, 0, 0, 1, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5)), row.names = c(NA, -12L), class = "data.frame")
names(treeType)[names(treeType) == "ID"] <- "leading"
print(treeType)
}
biomassDT <- merge(biomassDT, treeType[, c("leading","newClass")])
allPixels <- data.table(pixelID = 1:raster::ncell(biomassStack))
biomassDTfilled <- merge(allPixels, biomassDT, all.x = TRUE, by = "pixelID")
leadingSpeciesRaster <- raster::setValues(raster(biomassStack),
biomassDTfilled[["newClass"]])
names(leadingSpeciesRaster) <- paste("biomassMap", scenario, RUN, Y, sep = "_")
return(leadingSpeciesRaster)
})
names(bothYears) <- paste0("Year", years)
rasLastYear <- bothYears[[paste0("Year", years[length(years)])]]
rasFirstYear <- -bothYears[[paste0("Year", years[1])]]
leadingStackChange <- calc(stack(rasLastYear, rasFirstYear),
fun = sum,
na.rm = TRUE)
testthat::expect_true(all(minValue(leadingStackChange) >= -1 , maxValue(leadingStackChange) <= 1))
leadingStackChange[is.na(rasterToMatch)] <- NA
names(leadingStackChange) <- paste("leadingMapChange", scenario, RUN, sep = "_")
return(leadingStackChange)
})
names(allRuns) <- runs
return(allRuns)
})
names(allScenarios) <- Scenarios
# DO EACH CLIMATE SCENARIO
eachScenarioAverage <- lapply(names(allScenarios), FUN = function(eachScenario){
rasStk <- raster::stack(allScenarios[[eachScenario]])
biomassDiffPath <- file.path(outputFolder, paste("averageChange",
eachScenario, "Leading",
sep = "_"))
biomassDiffPlotPath <- file.path(outputFolder, paste0(paste("averageChange",
eachScenario, "Leading",
sep = "_"), ".png"))
climateDiffAverage <- calc(x = rasStk, fun = mean,
na.rm = TRUE,
filename = biomassDiffPath,
overwrite = TRUE,
format = "GTiff")
averageChange <- 100*(mean(climateDiffAverage[], na.rm = TRUE))
# Now plotting
library(viridis)
# pal <- RColorBrewer::brewer.pal(11, "RdYlBu")
# pal[6] <- "#f7f4f2"
pal <- c('#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2')
maxV <- max(abs(round(minValue(climateDiffAverage), 1)),
abs(round(maxValue(climateDiffAverage), 1)))
AT <- seq(-maxV, maxV, length.out = 12)
climateDiffAverage[is.na(flammableRTM)] <- NA
if (!file.exists(biomassDiffPlotPath)){
png(filename = biomassDiffPlotPath,
width = 21, height = 29,
units = "cm", res = 300)
print(levelplot(climateDiffAverage,
sub = paste0("Proportional change in leading species under GCM ",
eachScenario,
"\nRed: conversion to conifer \nBlue: conversion to deciduous"),
margin = FALSE,
maxpixels = 7e6,
at = AT,
colorkey = list(
space = 'bottom',
axis.line = list(col = 'black'),
width = 0.75
),
par.settings = list(
strip.border = list(col = 'transparent'),
strip.background = list(col = 'transparent'),
axis.line = list(col = 'transparent')),
scales = list(draw = FALSE),
col.regions = pal,
par.strip.text = list(cex = 0.8,
lines = 1,
col = "black"),
panel = function(...){
lattice::panel.levelplot.raster(...)
sp::sp.polygons(shpPoly, fill = 'black', lwd = 1)
}))
dev.off()
}
toc()
return(list(ras = climateDiffAverage, averageChangePerc = averageChange))
})
names(eachScenarioAverage) <- names(allScenarios)
# DO A MEAN ONE FOR ALL SCENARIOS
rasStk <- raster::stack(unlist(allScenarios))
biomassDiffPath <- file.path(outputFolder, paste("averageChange",
"allScenarios", "Leading",
sep = "_"))
biomassDiffPlotPath <- file.path(outputFolder, paste0(paste("averageChange",
"allScenarios", "Leading",
sep = "_"), ".png"))
climateDiffAverage <- calc(x = rasStk, fun = mean,
na.rm = TRUE,
filename = biomassDiffPath,
overwrite = TRUE,
format = "GTiff")
averageChange <- 100*(mean(climateDiffAverage[], na.rm = TRUE))
# Now plotting
library("viridis")
# pal <- RColorBrewer::brewer.pal(11, "RdYlBu")
# pal[6] <- "#f7f4f2"
pal <- c('#9e0142','#d53e4f','#f46d43','#fdae61','#fee08b','#ffffbf','#e6f598','#abdda4','#66c2a5','#3288bd','#5e4fa2')
maxV <- max(abs(round(minValue(climateDiffAverage), 1)),
abs(round(maxValue(climateDiffAverage), 1)))
AT <- seq(-maxV, maxV, length.out = 12)
climateDiffAverage[is.na(flammableRTM)] <- NA
if (!file.exists(biomassDiffPlotPath)){
png(filename = biomassDiffPlotPath,
width = 21, height = 29,
units = "cm", res = 300)
print(levelplot(climateDiffAverage,
sub = paste0("Average proportional change in leading species under all GCMs ",
"\nRed: conversion to conifer \nBlue-Purple: conversion to deciduous"),
margin = FALSE,
maxpixels = 7e6,
at = AT,
colorkey = list(
labels=list(cex = 1),
space = 'bottom',
axis.line = list(col = 'black'),
width = 0.75
),
par.settings = list(
strip.border = list(col = 'transparent'),
strip.background = list(col = 'transparent'),
axis.line = list(col = 'transparent')),
scales = list(draw = FALSE),
col.regions = pal,
par.strip.text = list(cex = 1,
lines = 1,
col = "black"),
panel = function(...){
lattice::panel.levelplot.raster(...)
sp::sp.polygons(shpPoly, fill = 'black', lwd = 1)
}))
dev.off()
}
toc()
# Make the tables better
tb <- unlist(lapply(eachScenarioAverage, `[[`, "averageChangePerc"))
finalTable <- data.table(climateScenario = c(names(tb), "allScenarios"),
averageChange = c(as.numeric(tb), averageChange))
write.csv(finalTable, file = file.path(outputFolder, paste("averageChangeInLeadingSp.csv")))
return(list(eachScenario = eachScenarioAverage,
allScenarios = list(ras = climateDiffAverage,
averageChangePerc = averageChange)))
}
.defineLeading <- function(x, leadingPercentage = 0.8, totalCol){
colID <- which(x[-length(x)] > (leadingPercentage*x[[totalCol]]))
if (length(colID) == 0){
# If we don't have a leading, we need to id conifer leading,
# or deciduous leading
colID1 <- which.max(x[-length(x)])
colID <- as.integer(paste0(length(x), colID1))
}
return(colID)
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/h181.ssp');
year <- 2015
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU15, VARSTR=VARSTR15)
if(year <= 1998) FYC <- FYC %>% rename(PERWT15F = WTDPER15)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE15X, AGE42X, AGE31X))
FYC$ind = 1
# Education
if(year <= 1998){
FYC <- FYC %>% mutate(EDUCYR = EDUCYR15)
}else if(year <= 2004){
FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR)
}
if(year >= 2012){
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDRECODE & EDRECODE < 13),
high_school = (EDRECODE == 13),
some_college = (EDRECODE > 13))
}else{
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDUCYR & EDUCYR < 12),
high_school = (EDUCYR == 12),
some_college = (EDUCYR > 12))
}
FYC <- FYC %>% mutate(
education = 1*less_than_hs + 2*high_school + 3*some_college,
education = replace(education, AGELAST < 18, 9),
education = recode_factor(education, .default = "Missing",
"1" = "Less than high school",
"2" = "High school",
"3" = "Some college",
"9" = "Inapplicable (age < 18)",
"0" = "Missing"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(education,ind, DUPERSID, PERWT15F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/h178a.ssp')
DVT <- read.xport('C:/MEPS/h178b.ssp')
IPT <- read.xport('C:/MEPS/h178d.ssp')
ERT <- read.xport('C:/MEPS/h178e.ssp')
OPT <- read.xport('C:/MEPS/h178f.ssp')
OBV <- read.xport('C:/MEPS/h178g.ssp')
HHT <- read.xport('C:/MEPS/h178h.ssp')
# Define sub-levels for office-based and outpatient
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR15X = PV15X + TR15X,
OZ15X = OF15X + SL15X + OT15X + OR15X + OU15X + WC15X + VA15X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP15X, SF15X, MR15X, MD15X, PR15X, OZ15X)
pers_events <- stacked_events %>%
group_by(DUPERSID) %>%
summarise(ANY = sum(XP15X >= 0),
EXP = sum(XP15X > 0),
SLF = sum(SF15X > 0),
MCR = sum(MR15X > 0),
MCD = sum(MD15X > 0),
PTR = sum(PR15X > 0),
OTZ = sum(OZ15X > 0)) %>%
ungroup
n_events <- full_join(pers_events,FYCsub,by='DUPERSID') %>%
mutate_at(vars(ANY,EXP,SLF,MCR,MCD,PTR,OTZ),
function(x) ifelse(is.na(x),0,x))
nEVTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT15F,
data = n_events,
nest = TRUE)
svyby(~ANY, FUN=svymean, by = ~education + ind, design = nEVTdsgn)
|
/_check/test_code/use/r_codes/avgEVT_education_ind_2015.R
|
permissive
|
HHS-AHRQ/MEPS-summary-tables
|
R
| false
| false
| 3,422
|
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/h181.ssp');
year <- 2015
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU15, VARSTR=VARSTR15)
if(year <= 1998) FYC <- FYC %>% rename(PERWT15F = WTDPER15)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE15X, AGE42X, AGE31X))
FYC$ind = 1
# Education
if(year <= 1998){
FYC <- FYC %>% mutate(EDUCYR = EDUCYR15)
}else if(year <= 2004){
FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR)
}
if(year >= 2012){
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDRECODE & EDRECODE < 13),
high_school = (EDRECODE == 13),
some_college = (EDRECODE > 13))
}else{
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDUCYR & EDUCYR < 12),
high_school = (EDUCYR == 12),
some_college = (EDUCYR > 12))
}
FYC <- FYC %>% mutate(
education = 1*less_than_hs + 2*high_school + 3*some_college,
education = replace(education, AGELAST < 18, 9),
education = recode_factor(education, .default = "Missing",
"1" = "Less than high school",
"2" = "High school",
"3" = "Some college",
"9" = "Inapplicable (age < 18)",
"0" = "Missing"))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(education,ind, DUPERSID, PERWT15F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/h178a.ssp')
DVT <- read.xport('C:/MEPS/h178b.ssp')
IPT <- read.xport('C:/MEPS/h178d.ssp')
ERT <- read.xport('C:/MEPS/h178e.ssp')
OPT <- read.xport('C:/MEPS/h178f.ssp')
OBV <- read.xport('C:/MEPS/h178g.ssp')
HHT <- read.xport('C:/MEPS/h178h.ssp')
# Define sub-levels for office-based and outpatient
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR15X = PV15X + TR15X,
OZ15X = OF15X + SL15X + OT15X + OR15X + OU15X + WC15X + VA15X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP15X, SF15X, MR15X, MD15X, PR15X, OZ15X)
pers_events <- stacked_events %>%
group_by(DUPERSID) %>%
summarise(ANY = sum(XP15X >= 0),
EXP = sum(XP15X > 0),
SLF = sum(SF15X > 0),
MCR = sum(MR15X > 0),
MCD = sum(MD15X > 0),
PTR = sum(PR15X > 0),
OTZ = sum(OZ15X > 0)) %>%
ungroup
n_events <- full_join(pers_events,FYCsub,by='DUPERSID') %>%
mutate_at(vars(ANY,EXP,SLF,MCR,MCD,PTR,OTZ),
function(x) ifelse(is.na(x),0,x))
nEVTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT15F,
data = n_events,
nest = TRUE)
svyby(~ANY, FUN=svymean, by = ~education + ind, design = nEVTdsgn)
|
library("edgeR")
library("ggplot2")
library("dplyr")
setwd("/home/yiliao/Documents/Pepper_2021/Final/3_Hicheatmaps")
distance <- read.table("All.tsv.bed.bed", header=T)
distance
distance$Tissues <- as.factor(distance$Tissues)
mydata <-distance %>% filter(Tissues=='HL2' | Tissues=='GR1' | Tissues=='TZ1' | Tissues=='YP1') %>% filter (Distance=='Ratio')
ggplot(mydata, aes(x=Tissues, y=Frequency, fill=factor(Distance))) + geom_boxplot(outlier.shape = NA) +labs(fill = "Distance")
setwd("/home/yiliao/Documents/Pepper_2021/Final/3_Hicheatmaps/Final")
distance <- read.table("5species.bed", header=T)
distance
distance$Tissues <- as.factor(distance$Tissues)
mydata <-distance %>% filter(Tissues=='soybean' | Tissues=='SL4' | Tissues=='maize' | Tissues=='YP1' | Tissues == 'Nip') %>% filter (Distance=='Ratio')
mydata
ggplot(mydata, aes(x=Tissues, y=Frequency, fill=factor(Distance))) + geom_boxplot(outlier.shape = NA) +labs(fill = "Distance")
dev.copy2pdf(file="~/Documents/Pepper_2021/Final/3_Hicheatmaps/Final/5species.pdf")
mydata1 <-distance %>% filter(Tissues=='HL1' | Tissues=='GR1') %>% filter (Distance=='Ratio')
mydata1
wilcox.test(Frequency~Tissues, data = mydata1, paired = T)
mydata2 <-distance %>% filter(Tissues=='HL1' | Tissues=='TZ1') %>% filter (Distance=='Ratio')
mydata2
wilcox.test(Frequency~Tissues, data = mydata2, paired = T)
mydata3 <-distance %>% filter(Tissues=='YP1' | Tissues=='GR1') %>% filter (Distance=='Ratio')
mydata3
wilcox.test(Frequency~Tissues, data = mydata3)
mydata4 <-distance %>% filter(Tissues=='YP1' | Tissues=='TZ1') %>% filter (Distance=='Ratio')
mydata4
wilcox.test(Frequency~Tissues, data = mydata4)
#### For juicer HiCMap
distance <- read.table("Juicer.all.bed.bed", header=T)
distance
distance$Tissues <- as.factor(distance$Tissues)
mydata <-distance %>% filter(Tissues=='HL1' | Tissues=='GR1' | Tissues=='TZ1' | Tissues=='YP1') %>% filter (Distance=='Ratio')
ggplot(mydata, aes(x=Tissues, y=Frequency, fill=factor(Distance))) + geom_boxplot(outlier.shape = NA) +labs(fill = "Distance")
mydata <-distance %>% filter(Tissues=='HL1' | Tissues=='GR1') %>% filter (Distance=='Ratio')
mydata
wilcox.test(Frequency~Tissues, data = mydata, paired = T)
#### For juicer plot
distance1 <- read.table("GR_HL_TZ_YP.bed.bed.out", header=T)
ggplot(distance1 %>%
group_by(Tissue) %>%
mutate(weight = 1 / n()),
aes(x = Distance, fill = Tissue, colour=Tissue)) +
geom_line(aes(weight = Count), stat = 'density', position='dodge')
|
/Bin/Rscript/Figure3.r
|
no_license
|
yiliao1022/Pepper3Dgenome
|
R
| false
| false
| 2,512
|
r
|
library("edgeR")
library("ggplot2")
library("dplyr")
setwd("/home/yiliao/Documents/Pepper_2021/Final/3_Hicheatmaps")
distance <- read.table("All.tsv.bed.bed", header=T)
distance
distance$Tissues <- as.factor(distance$Tissues)
mydata <-distance %>% filter(Tissues=='HL2' | Tissues=='GR1' | Tissues=='TZ1' | Tissues=='YP1') %>% filter (Distance=='Ratio')
ggplot(mydata, aes(x=Tissues, y=Frequency, fill=factor(Distance))) + geom_boxplot(outlier.shape = NA) +labs(fill = "Distance")
setwd("/home/yiliao/Documents/Pepper_2021/Final/3_Hicheatmaps/Final")
distance <- read.table("5species.bed", header=T)
distance
distance$Tissues <- as.factor(distance$Tissues)
mydata <-distance %>% filter(Tissues=='soybean' | Tissues=='SL4' | Tissues=='maize' | Tissues=='YP1' | Tissues == 'Nip') %>% filter (Distance=='Ratio')
mydata
ggplot(mydata, aes(x=Tissues, y=Frequency, fill=factor(Distance))) + geom_boxplot(outlier.shape = NA) +labs(fill = "Distance")
dev.copy2pdf(file="~/Documents/Pepper_2021/Final/3_Hicheatmaps/Final/5species.pdf")
mydata1 <-distance %>% filter(Tissues=='HL1' | Tissues=='GR1') %>% filter (Distance=='Ratio')
mydata1
wilcox.test(Frequency~Tissues, data = mydata1, paired = T)
mydata2 <-distance %>% filter(Tissues=='HL1' | Tissues=='TZ1') %>% filter (Distance=='Ratio')
mydata2
wilcox.test(Frequency~Tissues, data = mydata2, paired = T)
mydata3 <-distance %>% filter(Tissues=='YP1' | Tissues=='GR1') %>% filter (Distance=='Ratio')
mydata3
wilcox.test(Frequency~Tissues, data = mydata3)
mydata4 <-distance %>% filter(Tissues=='YP1' | Tissues=='TZ1') %>% filter (Distance=='Ratio')
mydata4
wilcox.test(Frequency~Tissues, data = mydata4)
#### For juicer HiCMap
distance <- read.table("Juicer.all.bed.bed", header=T)
distance
distance$Tissues <- as.factor(distance$Tissues)
mydata <-distance %>% filter(Tissues=='HL1' | Tissues=='GR1' | Tissues=='TZ1' | Tissues=='YP1') %>% filter (Distance=='Ratio')
ggplot(mydata, aes(x=Tissues, y=Frequency, fill=factor(Distance))) + geom_boxplot(outlier.shape = NA) +labs(fill = "Distance")
mydata <-distance %>% filter(Tissues=='HL1' | Tissues=='GR1') %>% filter (Distance=='Ratio')
mydata
wilcox.test(Frequency~Tissues, data = mydata, paired = T)
#### For juicer plot
distance1 <- read.table("GR_HL_TZ_YP.bed.bed.out", header=T)
ggplot(distance1 %>%
group_by(Tissue) %>%
mutate(weight = 1 / n()),
aes(x = Distance, fill = Tissue, colour=Tissue)) +
geom_line(aes(weight = Count), stat = 'density', position='dodge')
|
##
## copied from combAna but here we're only going to use the
## lum data, ont the lum+mat included in combAna
## want to try and figure out why adding the mat makes it much
## worse
##
## perhaps need to parcoord the model design to check the "spacing" in some sense
source("~/local/include/libRbind/EmuRbind.R") # load the emu bindings
arch <- system("uname -s", intern=TRUE)
if(is.loaded("callEstimate") == FALSE){
libNAME <- "~/local/lib/libRBIND"
if(arch == "Linux"){
libNAME <- paste(libNAME, ".so", sep="")
} else if(arch =="Darwin"){
libNAME <- paste(libNAME, ".dylib", sep="")
} else {
buffer <- paste("error: uname -s gives ", arch , " not supported", sep="")
stop(buffer)
}
dyn.load(libNAME)
}
#library(MASS)
source("~/local/include/libRbind/emuOverDesign.R") # functions for running the emulator over the design in high dims
source("~/local/include/libRbind/implausOverDesign.R") # functions for computing the implausibility
source("~/local/include/libRbind/testEst.R") # for testing the estimation of thetas
source("fnAnalysis.R")
nsamples <- 45
## load the model data
##
## first the luminosity data
lumOutputFile <- "./wave-1/lum_fun_outputs.dat"
modelDataLum <- as.matrix(read.table(lumOutputFile))
nbinsLum <- dim(modelDataLum)[2]
nruns <- dim(modelDataLum)[1]
##
## now the metallicity data
## metOutputFile <- "./wave-1/metallicity_MV_outputs.dat"
## modelDataMet <- as.matrix(read.table(metOutputFile))
## nbinsMet <- dim(modelDataMet)[2]
if(nruns != dim(modelDataMet)[1]){
stop("nruns modelDataMet doesn't match modelDataLum")
}
## redefine nruns
nruns <- nsamples
## now this only works each time we rebuild the model
samp.index <- sample(seq(1,45), size=nsamples)
nbins <- nbinsLum
modelData.big <- modelDataLum
modelData <- modelData.big[samp.index,]
## load the design
designFile <- "./design/design_sorted_wave_1.dat"
desNames <- c("Zr", "Fescp", "Fbary")
nparams <- length(desNames)
designData.big <- as.matrix(read.table(designFile, col.names=desNames))
designData <- designData.big[samp.index,]
## load the experimental data
##
## lum
expDataFileLum <- "./lum_fun_observed.dat"
expDataLum <- as.matrix(read.table(expDataFileLum))
## the lum data errors are "poisson" ~ sqrt(\lambda), if N_obs was very big we could approximate it as a normal
## distribution with mean \lambda and sd \sqrt{\lambda}
##
## the met data are 95% confidence bounds, 95% of the data fall within mu +- 2 \sigma
## so sigma = upper-lower / 2
## where upper = mean + conf, lower = mean - conf
## so sigma = ((mean + conf ) - (mean - conf)) / 2 = conf
expData <- list(obsValue=c(expDataLum[2,]),
obsError=c(expDataLum[3,]))
rebuild <- 0
buffer <- "functional-data-lum.dat"
if(rebuild == 1 || file.exists(buffer) == FALSE){
##
## generate a functional sample from the vars in global sope
fnData <- fn.sample.gen(cov.fn=1, reg.order=1)
## now do the pca decomp
fnData <- fn.pca.gen(fnData, cutOff=0.99)
## estimate the thetas
fnData <- fn.estimate(fnData)
save(fnData, file=buffer)
} else {
load(buffer)
}
## stepData <- fn.emu.steps(fnData, 1, 3, 2, range.Min=0.0, range.Max=1.0)
## #fn.plot.steps(fnData, stepData, 2)
## impSteps <- fn.implaus.steps(fnData, stepData)
## pdf("images-lum/implaus-comb.pdf")
## fn.plot.imp.steps(fnData, impSteps, plot.joint=TRUE)
## dev.off()
## plot the pca.decomp eigenvalues
pdf("images-lum/pca-decomp.pdf")
plot(fnData$pca$t, fnData$pca.decomp$ur.h[,1], type="b", ylim=c(-1,1), xlab="obs index", ylab="scaled value")
for(i in 2:nbins){
lines(fnData$pca$t, fnData$pca.decomp$ur.h[,i], type="b", col=i)
}
title(sub="principle components, including all observables")
legend("topright", paste(1:nbins), lty=rep(1,nbins), col=1:nbins)
dev.off()
## test how stable the hyperparameters are, create them a bunch of times
#fnData <- fn.sample.gen(cov.fn=1, reg.order=1)
#fnData <- fn.pca.gen(fnData, cutOff=0.99)
gen.testThetas <- function(ntest=25){
fn.list <- vector("list", ntest)
for(i in 1:ntest){
fn.list[[i]] <- fn.estimate(fnData)
}
save(fn.list, file="fndata-thetas-test.dat")
}
|
/msu-chemtreeN-analysis/exp-analysis/wave-1/comb-obs/combAnaLum.R
|
no_license
|
scottedwardpratt/stat
|
R
| false
| false
| 4,137
|
r
|
##
## copied from combAna but here we're only going to use the
## lum data, ont the lum+mat included in combAna
## want to try and figure out why adding the mat makes it much
## worse
##
## perhaps need to parcoord the model design to check the "spacing" in some sense
source("~/local/include/libRbind/EmuRbind.R") # load the emu bindings
arch <- system("uname -s", intern=TRUE)
if(is.loaded("callEstimate") == FALSE){
libNAME <- "~/local/lib/libRBIND"
if(arch == "Linux"){
libNAME <- paste(libNAME, ".so", sep="")
} else if(arch =="Darwin"){
libNAME <- paste(libNAME, ".dylib", sep="")
} else {
buffer <- paste("error: uname -s gives ", arch , " not supported", sep="")
stop(buffer)
}
dyn.load(libNAME)
}
#library(MASS)
source("~/local/include/libRbind/emuOverDesign.R") # functions for running the emulator over the design in high dims
source("~/local/include/libRbind/implausOverDesign.R") # functions for computing the implausibility
source("~/local/include/libRbind/testEst.R") # for testing the estimation of thetas
source("fnAnalysis.R")
nsamples <- 45
## load the model data
##
## first the luminosity data
lumOutputFile <- "./wave-1/lum_fun_outputs.dat"
modelDataLum <- as.matrix(read.table(lumOutputFile))
nbinsLum <- dim(modelDataLum)[2]
nruns <- dim(modelDataLum)[1]
##
## now the metallicity data
## metOutputFile <- "./wave-1/metallicity_MV_outputs.dat"
## modelDataMet <- as.matrix(read.table(metOutputFile))
## nbinsMet <- dim(modelDataMet)[2]
if(nruns != dim(modelDataMet)[1]){
stop("nruns modelDataMet doesn't match modelDataLum")
}
## redefine nruns
nruns <- nsamples
## now this only works each time we rebuild the model
samp.index <- sample(seq(1,45), size=nsamples)
nbins <- nbinsLum
modelData.big <- modelDataLum
modelData <- modelData.big[samp.index,]
## load the design
designFile <- "./design/design_sorted_wave_1.dat"
desNames <- c("Zr", "Fescp", "Fbary")
nparams <- length(desNames)
designData.big <- as.matrix(read.table(designFile, col.names=desNames))
designData <- designData.big[samp.index,]
## load the experimental data
##
## lum
expDataFileLum <- "./lum_fun_observed.dat"
expDataLum <- as.matrix(read.table(expDataFileLum))
## the lum data errors are "poisson" ~ sqrt(\lambda), if N_obs was very big we could approximate it as a normal
## distribution with mean \lambda and sd \sqrt{\lambda}
##
## the met data are 95% confidence bounds, 95% of the data fall within mu +- 2 \sigma
## so sigma = upper-lower / 2
## where upper = mean + conf, lower = mean - conf
## so sigma = ((mean + conf ) - (mean - conf)) / 2 = conf
expData <- list(obsValue=c(expDataLum[2,]),
obsError=c(expDataLum[3,]))
rebuild <- 0
buffer <- "functional-data-lum.dat"
if(rebuild == 1 || file.exists(buffer) == FALSE){
##
## generate a functional sample from the vars in global sope
fnData <- fn.sample.gen(cov.fn=1, reg.order=1)
## now do the pca decomp
fnData <- fn.pca.gen(fnData, cutOff=0.99)
## estimate the thetas
fnData <- fn.estimate(fnData)
save(fnData, file=buffer)
} else {
load(buffer)
}
## stepData <- fn.emu.steps(fnData, 1, 3, 2, range.Min=0.0, range.Max=1.0)
## #fn.plot.steps(fnData, stepData, 2)
## impSteps <- fn.implaus.steps(fnData, stepData)
## pdf("images-lum/implaus-comb.pdf")
## fn.plot.imp.steps(fnData, impSteps, plot.joint=TRUE)
## dev.off()
## plot the pca.decomp eigenvalues
pdf("images-lum/pca-decomp.pdf")
plot(fnData$pca$t, fnData$pca.decomp$ur.h[,1], type="b", ylim=c(-1,1), xlab="obs index", ylab="scaled value")
for(i in 2:nbins){
lines(fnData$pca$t, fnData$pca.decomp$ur.h[,i], type="b", col=i)
}
title(sub="principle components, including all observables")
legend("topright", paste(1:nbins), lty=rep(1,nbins), col=1:nbins)
dev.off()
## test how stable the hyperparameters are, create them a bunch of times
#fnData <- fn.sample.gen(cov.fn=1, reg.order=1)
#fnData <- fn.pca.gen(fnData, cutOff=0.99)
gen.testThetas <- function(ntest=25){
fn.list <- vector("list", ntest)
for(i in 1:ntest){
fn.list[[i]] <- fn.estimate(fnData)
}
save(fn.list, file="fndata-thetas-test.dat")
}
|
#' @title EcoData - A collection of ecological datasets for teaching
#' @name EcoData
#' @author Florian Hartig
#' @docType package
#' @description A collection of ecological datasets for teaching
#' @details See index / vignette for details
#' @examples
#' vignette("EcoData", package="EcoData")
NULL
|
/EcoData.Rcheck/00_pkg_src/EcoData/R/EcoData.R
|
no_license
|
EttnerAndreas/ecodata
|
R
| false
| false
| 304
|
r
|
#' @title EcoData - A collection of ecological datasets for teaching
#' @name EcoData
#' @author Florian Hartig
#' @docType package
#' @description A collection of ecological datasets for teaching
#' @details See index / vignette for details
#' @examples
#' vignette("EcoData", package="EcoData")
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_summarize_years.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{fars_summarize_years}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{a vector of character or numeric years}
}
\value{
a tibble
}
\description{
for the years supplied,
return a tibble of the numbers of per-month accident occurrences for those years
}
\details{
an error will be thrown if all supplied years are invalid.
}
\examples{
\dontrun{
fars_summarize_years(c(2015, 2013, 2014))
fars_summarize_years(c(1970, 100000,200000))
}
}
|
/man/fars_summarize_years.Rd
|
no_license
|
leonardskynyrd/fars_package
|
R
| false
| true
| 617
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_summarize_years.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{fars_summarize_years}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{a vector of character or numeric years}
}
\value{
a tibble
}
\description{
for the years supplied,
return a tibble of the numbers of per-month accident occurrences for those years
}
\details{
an error will be thrown if all supplied years are invalid.
}
\examples{
\dontrun{
fars_summarize_years(c(2015, 2013, 2014))
fars_summarize_years(c(1970, 100000,200000))
}
}
|
simulate_error <-
function(samplesize, errordensity)
{
if(errordensity == "normal")
{
err = rnorm(samplesize)
}
if(errordensity == "asyclaw")
{
err = simasyclaw(samplesize)
}
if(errordensity == "asydoubleclaw")
{
err = simasydoubleclaw(samplesize)
}
if(errordensity == "bimodal")
{
err = simbimodal(samplesize)
}
if(errordensity == "claw")
{
err = simclaw(samplesize)
}
if(errordensity == "discretecomb")
{
err = simdiscretecomb(samplesize)
}
if(errordensity == "doubleclaw")
{
err = simdoubleclaw(samplesize)
}
if(errordensity == "kurtotic")
{
err = simkurtotic(samplesize)
}
if(errordensity == "outlier")
{
err = simoutlier(samplesize)
}
if(errordensity == "sepbimodal")
{
err = simsepbimodal(samplesize)
}
if(errordensity == "skewbimodal")
{
err = simskewbimodal(samplesize)
}
if(errordensity == "skewunimodal")
{
err = simskewunimodal(samplesize)
}
if(errordensity == "smoothcomb")
{
err = simsmoothcomb(samplesize)
}
if(errordensity == "strongskew")
{
err = simstrongskew(samplesize)
}
if(errordensity == "trimodal")
{
err = simtrimodal(samplesize)
}
return(err)
}
|
/bbefkr/R/simulate_error.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,375
|
r
|
simulate_error <-
function(samplesize, errordensity)
{
if(errordensity == "normal")
{
err = rnorm(samplesize)
}
if(errordensity == "asyclaw")
{
err = simasyclaw(samplesize)
}
if(errordensity == "asydoubleclaw")
{
err = simasydoubleclaw(samplesize)
}
if(errordensity == "bimodal")
{
err = simbimodal(samplesize)
}
if(errordensity == "claw")
{
err = simclaw(samplesize)
}
if(errordensity == "discretecomb")
{
err = simdiscretecomb(samplesize)
}
if(errordensity == "doubleclaw")
{
err = simdoubleclaw(samplesize)
}
if(errordensity == "kurtotic")
{
err = simkurtotic(samplesize)
}
if(errordensity == "outlier")
{
err = simoutlier(samplesize)
}
if(errordensity == "sepbimodal")
{
err = simsepbimodal(samplesize)
}
if(errordensity == "skewbimodal")
{
err = simskewbimodal(samplesize)
}
if(errordensity == "skewunimodal")
{
err = simskewunimodal(samplesize)
}
if(errordensity == "smoothcomb")
{
err = simsmoothcomb(samplesize)
}
if(errordensity == "strongskew")
{
err = simstrongskew(samplesize)
}
if(errordensity == "trimodal")
{
err = simtrimodal(samplesize)
}
return(err)
}
|
# This example shows how to use alternative/custom CDF environments
# for Affy chips (c) Leo Lahti 2009-2010
# Instructions for creating your own custom CDFs are available at
# http://masker.nci.nih.gov/ev/instructions.html
# Ready-made custom CDFs are available at
# http://brainarray.mbni.med.umich.edu/Brainarray/Database/CustomCDF/CDF_download.asp
# This BrainArray data is the BioConductor standard and installed in
# the default version. Other customCDFs by various groups available in
# the web.
# These instructions show how to use BrainArray CDFs with your data:
#Loading required package:
require(affy)
require(affydata)
data(Dilution) # 'Dilution' affybatch
# Option 1:
# Change the name of CDF environment directly to the affybatch object
# This assumes that the CDF environment with that name is available.
# The "HGU95Av2_Hs_ENTREZG" is readily available in BioConductor, as
# are all other BrainArray customCDFs for various chips.
# See http://brainarray.mbni.med.umich.edu/Brainarray/Database/CustomCDF/CDF_download.asp
Dilution@cdfName <- "HGU95Av2_Hs_ENTREZG"
eset <- rma(Dilution)
# Option 2 (not run):
# Load data (CEL files) directly from folder
# and specify CDF name here:
# eset <- justRMA(celfile.path = "put/your/path/here", cdfname = "HGU95Av2_Hs_ENTREZG")
|
/R/customCDF.R
|
no_license
|
antagomir/scripts
|
R
| false
| false
| 1,294
|
r
|
# This example shows how to use alternative/custom CDF environments
# for Affy chips (c) Leo Lahti 2009-2010
# Instructions for creating your own custom CDFs are available at
# http://masker.nci.nih.gov/ev/instructions.html
# Ready-made custom CDFs are available at
# http://brainarray.mbni.med.umich.edu/Brainarray/Database/CustomCDF/CDF_download.asp
# This BrainArray data is the BioConductor standard and installed in
# the default version. Other customCDFs by various groups available in
# the web.
# These instructions show how to use BrainArray CDFs with your data:
#Loading required package:
require(affy)
require(affydata)
data(Dilution) # 'Dilution' affybatch
# Option 1:
# Change the name of CDF environment directly to the affybatch object
# This assumes that the CDF environment with that name is available.
# The "HGU95Av2_Hs_ENTREZG" is readily available in BioConductor, as
# are all other BrainArray customCDFs for various chips.
# See http://brainarray.mbni.med.umich.edu/Brainarray/Database/CustomCDF/CDF_download.asp
Dilution@cdfName <- "HGU95Av2_Hs_ENTREZG"
eset <- rma(Dilution)
# Option 2 (not run):
# Load data (CEL files) directly from folder
# and specify CDF name here:
# eset <- justRMA(celfile.path = "put/your/path/here", cdfname = "HGU95Av2_Hs_ENTREZG")
|
#SAMPLER: PERFORMING METROPOLIS-HASTINGS SAMPLING FOR A LINEAR MODEL SPECIFIED #
#OVER POINT-REFERENCED GEOSPATIAL DATA
### LAST UPDATE: 11/08/2020; Le Bao
#' Sampling Technique Using Metropolis-Hastings
#'
#' This function performs Metropolis-Hastings sampling for a linear model specified
#' over point-referenced geospatial data. It returns MCMC iterations, with which
#' results of the geospatial linear model can be summarized.
#'
#' @param formula An object of class \code{\link[stats:formula]{formula}} (or one
#' that can be coerced to that classes). A symbolic description of the model to
#' be fitted. Alternatively, the model can be specified in \code{y} (a vector of
#' the outcome variable) and \code{X} (a matrix of explanatory variables).
#' @param coords A matrix of coordinates for all observations or a vector of variable
#' names indicating the coordinates variables in the data. Alternatively, the
#' coordinates can also be specified seperately using \code{east} and \code{north}.
#' @param data An data frame containing the variables in the model.
#' @param powered.exp This exponent, which must be greater than 0 and less than
#' or equal to 2, specifies a powered exponential correlation structure for the
#' data. One widely used specification is setting this to 1, which yields an
#' exponential correlation structure. Another common specification is setting
#' this to 2 (the default), which yields a Gaussian correlation structure.
#' @param n.iter Number of MCMC iterations (defaults to 100).
#' @param n.burnin Number of iterations that will be discarded for burnin (warmup).
#' The number of burnin should not be larger than \code{n.iter} and the default is 0.
#' @param y Alternative specification for the outcome variable that is used in the
#' kriging model. If formula is used, this argument will be suppressed.
#' @param X Alternative specification for the matrix of explanatory variables used
#' in the kriging model. Different forms of the variables such as transformations
#' and interactions also need to be specified accordingly beforehand.
#' @param east Alternative specification for the vector of eastings for all observations.
#' @param north Alternative specification for the vector of northing for all observations.
#' @param na.action A function which indicates what should happen when the data
#' contain NAs. The default is "na.fail." Another possible value is "na.omit."
#' @param spatial.share Prior for proportion of unexplained variance that is spatial
#' in nature. Must be greater than 0 and less than 1. Defaults to an even split,
#' valued at 0.5.
#' @param range.share Prior for the effective range term, as a proportion of the
#' maximum distance in the data. Users should choose the proportion of distance
#' at which they think the spatial correlation will become negligible. Must be
#' greater than 0. Values greater than 1 are permitted, but users should recognize
#' that this implies that meaningful spatial correlation would persist outside
#' of the convex hull of data. Defaults to half the maximum distance, valued at 0.5.
#' @param beta.var Prior for the variance on zero-meaned normal priors on the
#' regression coefficients. Must be greater than 0. Defaults to 10.
#' @param range.tol Tolerance term for setting the effective range. At the distance
#' where the spatial correlation drops below this term, it is judged that the
#' effective range has been met. The default value is the commonly-used 0.05.
#' Must be greater than 0 and less than 1.
#' @param b.tune Tuning parameter for candidate generation of regression coefficients
#' that must be greater than 0. A value of 1 means that draws will be based on
#' the variance-covariance matrix of coefficients from OLS. Larger steps are taken
#' for values greater than 1, and smaller steps are taken for values from 0 to 1.
#' Defaults to 1.0.
#' @param nugget.tune Tuning parameter for candidate generation of the nugget term
#' (\code{tau2}) that must be greater than 0. A value of 1 means that draws will
#' be based on the typical variance of an inverse gamma distribution. \emph{Smaller}
#' steps are taken for values \emph{greater} than 1, and \emph{larger} steps are
#' taken for \emph{decimal} values from 0 to 1. Defaults to 10.0.
#' @param psill.tune Tuning parameter for candidate generation of the partial sill
#' term (\code{sigma2}) that must be greater than 0. A value of 1 means that draws
#' will be based on the typical variance of an inverse gamma distribution.
#' \emph{Smaller} steps are taken for values \emph{greater} than 1, and \emph{larger}
#' steps are taken for \emph{decimal} values from 0 to 1. Defaults to 1.0.
#' @param distance.matrix Logical value indicates whether to save the distance matrix
#' in the output object. Saving distance matrix can save time for furthur use such as
#' in \code{update()} function but may results in larger file size. Defaults to \code{FALSE}.
#' @param progress.bar Types of progress bar. The default is "message" and will
#' report variance terms. Other possible values are "TRUE" (simple percentage)
#' and "FALSE" (suppress the progress bar).
#' @param accept.rate.warning Logical values indicating whether to show the warnings
#' when the acceptance rates are too high or too low. Defaults to \code{TRUE}.
#'
#' @return An object of class \code{krige} that includes the output MCMC matrix
#' of sampled values from the posterior distribution as well as the record of
#' function arguments, model frame, acceptance rates, and standing parameters.
#'
#' @details Analysts should use this function if they want to estimate a linear
#' regression model in which each observation can be located at points in geographic
#' space. That is, each observation is observed for a set of coordinates in eastings
#' & northings or longitude & latitude.
#'
#' Researchers must specify their model in the following manner: \code{formula}
#' should be a symbolic description of the model to be fitted; it is similar to
#' \code{R} model syntax as used in \code{lm()}. In addition, a matrix of
#' coordinates must be specified for the geospatial model in \code{coords}. \code{coords}
#' should be a matrix with two columns that specify west-east and north-south
#' coordinates, respectively (ideally eastings and northings but possibly longitude
#' and latitude). It can also be a vector of strings indicating the variables names
#' of the coordinates in the \code{data}. \code{data} should be a data frame
#' containing the variables in the model including both the formula and coordinates
#' (if only the names are provided). Alternatively, users can also specify the
#' variables using \code{y}, \code{X}, \code{east}, and \code{north} for outcome,
#' explanatory, west-east coordinates, and north-south coordinates variables,
#' respectively. This alternative specification is compatible with the one used
#' in the early version of this package.
#'
#' \code{n.iter} is the number of iterations to sample from the posterior distribution
#' using the Metropolis-Hastings algorithm. This defaults to 100 iterations, but
#' many more iterations would normally be preferred. \code{n.burnin} is set to 0
#' by default to preserve all the iterations since the kriging model usually takes
#' a relatively long time to run. Users can set a number for burnin or use \code{burnin}
#' function afterwards to discard the burnin period. The output of the function
#' prints the proportion of candidate values for the coefficients and for the
#' variance terms accepted by the Metropolis-Hastings algorithm. Particularly
#' low or high acceptance rates respectively may indicate slow mixing (requiring
#' more iterations) or a transient state (leading to nonconvergence), so additional
#' messages will print for extreme acceptance rates. Users may want to adjust the
#' tuning parameters \code{b.tune}, \code{nugget.tune}, or \code{psill.tune},
#' or perhaps the tolerance parameter \code{range.tol} if the acceptance rate
#' is too high or too low.
#'
#' The function returns a "krige" list object including the output MCMC matrix
#' of sampled values from the posterior distribution as well as the record of
#' function arguments, model frame, acceptance rates, and standing parameters.
#' Users can use the generic \code{summary} function to summarize the results or
#' extract the elements of the object for further use.
#'
#' @references
#' Jeff Gill. 2020. Measuring Constituency Ideology Using Bayesian Universal Kriging.
#' \emph{State Politics & Policy Quarterly}. \code{doi:10.1177/1532440020930197}
#'
#' @examples
#' \dontrun{
#' # Summarize example data
#' summary(ContrivedData)
#'
#' # Initial OLS model
#' contrived.ols<-lm(y~x.1+x.2,data=ContrivedData)
#' # summary(contrived.ols)
#'
#' # Set seed
#' set.seed(1241060320)
#'
#' #For simple illustration, we set to few iterations.
#' #In this case, a 10,000-iteration run converges to the true parameters.
#' #If you have considerable time and hardware, delete the # on the next line.
#' #10,000 iterations took 39 min. with 8 GB RAM & a 1.5 GHz Quad-Core processor.
#' M <- 100
#' #M<-10000
#'
#' contrived.run <- metropolis.krige(y ~ x.1 + x.2, coords = c("s.1","s.2"),
#' data = ContrivedData, n.iter = M, n.burnin=20, range.tol = 0.05)
#' # Alternatively, use burnin() after estimation
#' #contrived.run <- burnin(contrived.run, n.burnin=20)
#'
#' # Summarize the results and examine results against true coefficients
#' summary(contrived.run)
#' (TRUTH<-c(0.5,2.5,0.5,0,1,2))
#'
#' # Extract the MCMC matrix of the posterior distribution
#' contrived.run.mat <- mcmc.samples(contrived.run)
#' head(contrived.run.mat)
#'
#' # Diagnostics
#' geweke(contrived.run, early.prop=0.5)
#' heidel.welch(contrived.run)
#'
#' # Semivariogram
#' ### Semivariance
#' semivariance(contrived.run)
#' ### Plot semivariogram
#' semivariogram(contrived.run)
#' ### Alternatively, use generic plot() on a krige object
#' plot(contrived.run)
#' }
#'
#' @importFrom stats rbeta rgamma runif vcov var formula resid model.matrix model.response model.frame lm
#' @importFrom Rcpp evalCpp
#'
#' @export
metropolis.krige <- function(formula,coords,data,n.iter=100,powered.exp=2,n.burnin=0,
y,X,east,north,na.action="na.fail",spatial.share=0.5,
range.share=0.5, beta.var=10,range.tol=0.05,b.tune=1.0,
nugget.tune=10.0, psill.tune=1.0, distance.matrix=FALSE,
progress.bar="message", accept.rate.warning=TRUE){
# ERROR CHECKS
if(n.iter <= 0) stop("'n.iter' must be greater than 0.")
if(n.iter%%1 != 0) stop("'n.iter' must be an integer.")
if(powered.exp<=0 | powered.exp>2) stop("powered.exp must be greater than 0 and less than or equal to 2.")
if(spatial.share<=0 | spatial.share>=1) stop("spatial.share must be between 0 and 1.")
if(range.share<=0) stop("range.share must be greater than 0.")
if(range.tol<=0 | range.tol>=1) stop("p.range.tol must be between 0 and 1.")
if(beta.var<=0) stop("beta.var must be greater than 0.")
if(b.tune<=0) stop("b.tune must be greater than 0.")
if(nugget.tune<=0) stop("nugget.tune must be greater than 0.")
if(psill.tune<=0) stop("psill.tune must be greater than 0.")
if (n.burnin < 0) {
n.burnin <- 0; warning("The burn-in period is negative. 'n.burnin = 0' is used.")
} else if (n.burnin >= n.iter) {stop("The number of iterations is less than the burn-in period.")}
# IMPUT DATA
## Model frame
cl <- match.call()
if (missing(formula) || is.null(formula)) formula <- y ~ X - 1
if (missing(coords) || is.null(coords)) coords <- cbind(east, north)
if (missing(data) || is.null(data)) data <- model.frame(formula)
if (is.character(coords) & length(coords) == 2){
coords.names <- coords
east <- data[coords[1]]; north <- data[coords[2]]
coords <- cbind(east, north)
colnames(coords) <- coords.names
}
if (is.null(colnames(coords))) colnames(coords) <- c("east", "north")
data <- as.data.frame(cbind(model.frame(formula,data), coords))
allvar <- update(formula, paste("~ . +",paste(colnames(coords), collapse=" + ")))
data <- model.frame(formula = allvar, data = data, na.action = na.action)
coords <- unlist(colnames(coords))
# DATA
mf <- model.frame(formula = formula, na.action = na.action,
data = data)
y <- model.response(mf, type = "numeric")
X <- model.matrix(formula, data = data)
if(is.null(dimnames(X)[[2]])) {
mynames<-rep(".",dim(X)[2])
for(k in 1:dim(X)[2]) mynames[k]<-paste("x.",k,sep="")
dimnames(X)[[2]]<-mynames
}
easting <- as.vector(unlist(data[coords[1]]))
northing <- as.vector(unlist(data[coords[2]]))
#DEFINE STANDING PARAMETERS
dist.mat<-k_distmat(cbind(easting,northing))
max.distance<-max(dist.mat)
min.distance<-min(dist.mat[dist.mat>0])
init.ols <- lm(formula, data=data) # STARTING VALUES
b.cand.var<-b.tune*vcov(init.ols)
err.var<-var(resid(init.ols))
beta.A<-round(100*range.share/((-log(range.tol))^(1/powered.exp)))
beta.B<-round(100*(1-(range.share/((-log(range.tol))^(1/powered.exp)))))
attr(init.ols$terms, ".Environment") <- NULL
#CREATE OUTPUT MATRIX
mcmc.mat<-matrix(NA,nrow=n.iter,ncol=3+ncol(X))
dimnames(mcmc.mat)[[2]]<-c("tau2","phi","sigma2",dimnames(X)[[2]])
start.decay<-((-log(range.tol))^(1/powered.exp))/(max(dist.mat)*range.share)
mcmc.mat[1,]<-c((1-spatial.share)*err.var,start.decay,spatial.share*err.var,init.ols$coef)
local.Sigma<-NULL
#INITIALIZE ACCEPTANCE RATES
accepted.beta<-0
accepted.nugget<-0
accepted.decay<-0
accepted.psill<-0
#START OF MARKOV CHAIN
for (i in 1:(nrow(mcmc.mat)-1)) {
### Progress Bar
if (!progress.bar==FALSE) progBar(iter=i, total=n.iter, progress.bar=progress.bar,
nugget=mcmc.mat[i,1], decay=mcmc.mat[i,2],
partial.sill=mcmc.mat[i,3], interac = interactive())
old.beta<-mcmc.mat[i,4:ncol(mcmc.mat)]
old.var<-var(y-X%*%old.beta)
beta<-simp.mvrnorm(n=1,mu=old.beta,Sigma=b.cand.var)
local.share<-mcmc.mat[i,3]/(mcmc.mat[i,1]+mcmc.mat[i,3])
local.tau2.shape<-1+nugget.tune/(1-local.share)
local.sigma2.shape<-1+psill.tune/local.share
tau2<-1/rgamma(1,shape=local.tau2.shape,rate=nugget.tune*old.var)
sigma2<-1/rgamma(1,shape=local.sigma2.shape,rate=psill.tune*old.var)
phi<-1/(max.distance*rbeta(1,shape1=beta.A,shape2=beta.B))
if(is.null(local.Sigma)) local.Sigma<-ifelse(dist.mat>0,
mcmc.mat[i,3]*exp(-abs(mcmc.mat[i,2]*dist.mat)^powered.exp),
mcmc.mat[i,1]+mcmc.mat[i,3])
current <- krige.posterior(mcmc.mat[i,1],mcmc.mat[i,2],mcmc.mat[i,3],old.beta,
y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,tot.var=err.var,
local.Sigma=local.Sigma,max.distance=max.distance)
candidate.beta <- krige.posterior(mcmc.mat[i,1],mcmc.mat[i,2],mcmc.mat[i,3],
beta,y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,tot.var=err.var,
local.Sigma=local.Sigma,max.distance=max.distance)
candidate.nugget <- krige.posterior(tau2,mcmc.mat[i,2],mcmc.mat[i,3],old.beta,
y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,
tot.var=err.var,local.Sigma=NULL,
max.distance=max.distance)
candidate.decay <- krige.posterior(mcmc.mat[i,1],phi,mcmc.mat[i,3],old.beta,
y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,
tot.var=err.var,local.Sigma=NULL,
max.distance=max.distance)
candidate.psill <- krige.posterior(mcmc.mat[i,1],mcmc.mat[i,2],sigma2,old.beta,
y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,
tot.var=err.var,local.Sigma=NULL,
max.distance=max.distance)
a.beta<-exp(candidate.beta-current)
a.nugget<-exp(candidate.nugget-current)
a.decay<-exp(candidate.decay-current)
a.psill<-exp(candidate.psill-current)
if (a.beta > runif(1)) {
accepted.beta <- accepted.beta + 1
mcmc.mat[(i+1),4:ncol(mcmc.mat)] <- beta
} else mcmc.mat[(i+1),4:ncol(mcmc.mat)] <- mcmc.mat[i,4:ncol(mcmc.mat)]
if (a.nugget > runif(1)) {
accepted.nugget<-accepted.nugget + 1
mcmc.mat[(i+1),1] <- tau2
local.Sigma<-NULL
} else mcmc.mat[(i+1),1] <- mcmc.mat[i,1]
if (a.decay > runif(1)) {
accepted.decay<-accepted.decay+1
mcmc.mat[(i+1),2] <- phi
local.Sigma<-NULL
} else mcmc.mat[(i+1),2] <- mcmc.mat[i,2]
if (a.psill > runif(1)) {
accepted.psill<-accepted.psill + 1
mcmc.mat[(i+1),3] <- sigma2
local.Sigma<-NULL
} else mcmc.mat[(i+1),3] <- mcmc.mat[i,3]
if (!progress.bar==FALSE & i==n.iter-1)
progBar(iter=i+1, total=n.iter, progress.bar=progress.bar,
nugget=mcmc.mat[i,1], decay=mcmc.mat[i,2],
partial.sill=mcmc.mat[i,3], interac = interactive())
}
beta.rate<-accepted.beta/(nrow(mcmc.mat)-1)
tau2.rate<-accepted.nugget/(nrow(mcmc.mat)-1)
phi.rate<-accepted.decay/(nrow(mcmc.mat)-1)
sigma2.rate<-accepted.psill/(nrow(mcmc.mat)-1)
ar.rate <- list(beta.rate = beta.rate, tau2.rate = tau2.rate,
phi.rate = phi.rate, sigma2.rate = sigma2.rate)
if (n.burnin > 0) {mcmc.mat2 <- burnin.matrix(mcmc.mat, n.burnin = n.burnin)
} else {mcmc.mat2 <- mcmc.mat}
if (distance.matrix == FALSE) dist.mat <- NULL
standing <- list(dist.mat = dist.mat, max.distance = max.distance,
min.distance = min.distance, b.cand.var = b.cand.var,
err.var = err.var, beta.A = beta.A, beta.B = beta.B,
local.Sigma = local.Sigma, accepted.beta=accepted.beta,
accepted.nugget=accepted.nugget, accepted.decay=accepted.decay,
accepted.psill=accepted.psill, powered.exp=powered.exp)
krige.out <- list(call = cl,
formula = formula,
coords = coords,
n.iter = n.iter,
n.burnin = n.burnin,
init.ols = init.ols,
priors = list(spatial.share = spatial.share,
range.share = range.share, beta.var = beta.var,
range.tol = range.tol, b.tune = b.tune,
nugget.tune = nugget.tune, psill.tune = psill.tune),
data = data,
model.data.list = list(y = y, X = X, easting = easting,
northing = northing),
standing.parameter = standing,
acceptance.rate = ar.rate,
start = mcmc.mat[1,],
end = mcmc.mat[nrow(mcmc.mat),],
mcmc.mat = mcmc.mat2)
class(krige.out) <- "krige"
krige.out
}
|
/krige/R/metropolis.krige.R
|
no_license
|
akhikolla/InformationHouse
|
R
| false
| false
| 20,423
|
r
|
#SAMPLER: PERFORMING METROPOLIS-HASTINGS SAMPLING FOR A LINEAR MODEL SPECIFIED #
#OVER POINT-REFERENCED GEOSPATIAL DATA
### LAST UPDATE: 11/08/2020; Le Bao
#' Sampling Technique Using Metropolis-Hastings
#'
#' This function performs Metropolis-Hastings sampling for a linear model specified
#' over point-referenced geospatial data. It returns MCMC iterations, with which
#' results of the geospatial linear model can be summarized.
#'
#' @param formula An object of class \code{\link[stats:formula]{formula}} (or one
#' that can be coerced to that classes). A symbolic description of the model to
#' be fitted. Alternatively, the model can be specified in \code{y} (a vector of
#' the outcome variable) and \code{X} (a matrix of explanatory variables).
#' @param coords A matrix of coordinates for all observations or a vector of variable
#' names indicating the coordinates variables in the data. Alternatively, the
#' coordinates can also be specified seperately using \code{east} and \code{north}.
#' @param data An data frame containing the variables in the model.
#' @param powered.exp This exponent, which must be greater than 0 and less than
#' or equal to 2, specifies a powered exponential correlation structure for the
#' data. One widely used specification is setting this to 1, which yields an
#' exponential correlation structure. Another common specification is setting
#' this to 2 (the default), which yields a Gaussian correlation structure.
#' @param n.iter Number of MCMC iterations (defaults to 100).
#' @param n.burnin Number of iterations that will be discarded for burnin (warmup).
#' The number of burnin should not be larger than \code{n.iter} and the default is 0.
#' @param y Alternative specification for the outcome variable that is used in the
#' kriging model. If formula is used, this argument will be suppressed.
#' @param X Alternative specification for the matrix of explanatory variables used
#' in the kriging model. Different forms of the variables such as transformations
#' and interactions also need to be specified accordingly beforehand.
#' @param east Alternative specification for the vector of eastings for all observations.
#' @param north Alternative specification for the vector of northing for all observations.
#' @param na.action A function which indicates what should happen when the data
#' contain NAs. The default is "na.fail." Another possible value is "na.omit."
#' @param spatial.share Prior for proportion of unexplained variance that is spatial
#' in nature. Must be greater than 0 and less than 1. Defaults to an even split,
#' valued at 0.5.
#' @param range.share Prior for the effective range term, as a proportion of the
#' maximum distance in the data. Users should choose the proportion of distance
#' at which they think the spatial correlation will become negligible. Must be
#' greater than 0. Values greater than 1 are permitted, but users should recognize
#' that this implies that meaningful spatial correlation would persist outside
#' of the convex hull of data. Defaults to half the maximum distance, valued at 0.5.
#' @param beta.var Prior for the variance on zero-meaned normal priors on the
#' regression coefficients. Must be greater than 0. Defaults to 10.
#' @param range.tol Tolerance term for setting the effective range. At the distance
#' where the spatial correlation drops below this term, it is judged that the
#' effective range has been met. The default value is the commonly-used 0.05.
#' Must be greater than 0 and less than 1.
#' @param b.tune Tuning parameter for candidate generation of regression coefficients
#' that must be greater than 0. A value of 1 means that draws will be based on
#' the variance-covariance matrix of coefficients from OLS. Larger steps are taken
#' for values greater than 1, and smaller steps are taken for values from 0 to 1.
#' Defaults to 1.0.
#' @param nugget.tune Tuning parameter for candidate generation of the nugget term
#' (\code{tau2}) that must be greater than 0. A value of 1 means that draws will
#' be based on the typical variance of an inverse gamma distribution. \emph{Smaller}
#' steps are taken for values \emph{greater} than 1, and \emph{larger} steps are
#' taken for \emph{decimal} values from 0 to 1. Defaults to 10.0.
#' @param psill.tune Tuning parameter for candidate generation of the partial sill
#' term (\code{sigma2}) that must be greater than 0. A value of 1 means that draws
#' will be based on the typical variance of an inverse gamma distribution.
#' \emph{Smaller} steps are taken for values \emph{greater} than 1, and \emph{larger}
#' steps are taken for \emph{decimal} values from 0 to 1. Defaults to 1.0.
#' @param distance.matrix Logical value indicates whether to save the distance matrix
#' in the output object. Saving distance matrix can save time for furthur use such as
#' in \code{update()} function but may results in larger file size. Defaults to \code{FALSE}.
#' @param progress.bar Types of progress bar. The default is "message" and will
#' report variance terms. Other possible values are "TRUE" (simple percentage)
#' and "FALSE" (suppress the progress bar).
#' @param accept.rate.warning Logical values indicating whether to show the warnings
#' when the acceptance rates are too high or too low. Defaults to \code{TRUE}.
#'
#' @return An object of class \code{krige} that includes the output MCMC matrix
#' of sampled values from the posterior distribution as well as the record of
#' function arguments, model frame, acceptance rates, and standing parameters.
#'
#' @details Analysts should use this function if they want to estimate a linear
#' regression model in which each observation can be located at points in geographic
#' space. That is, each observation is observed for a set of coordinates in eastings
#' & northings or longitude & latitude.
#'
#' Researchers must specify their model in the following manner: \code{formula}
#' should be a symbolic description of the model to be fitted; it is similar to
#' \code{R} model syntax as used in \code{lm()}. In addition, a matrix of
#' coordinates must be specified for the geospatial model in \code{coords}. \code{coords}
#' should be a matrix with two columns that specify west-east and north-south
#' coordinates, respectively (ideally eastings and northings but possibly longitude
#' and latitude). It can also be a vector of strings indicating the variables names
#' of the coordinates in the \code{data}. \code{data} should be a data frame
#' containing the variables in the model including both the formula and coordinates
#' (if only the names are provided). Alternatively, users can also specify the
#' variables using \code{y}, \code{X}, \code{east}, and \code{north} for outcome,
#' explanatory, west-east coordinates, and north-south coordinates variables,
#' respectively. This alternative specification is compatible with the one used
#' in the early version of this package.
#'
#' \code{n.iter} is the number of iterations to sample from the posterior distribution
#' using the Metropolis-Hastings algorithm. This defaults to 100 iterations, but
#' many more iterations would normally be preferred. \code{n.burnin} is set to 0
#' by default to preserve all the iterations since the kriging model usually takes
#' a relatively long time to run. Users can set a number for burnin or use \code{burnin}
#' function afterwards to discard the burnin period. The output of the function
#' prints the proportion of candidate values for the coefficients and for the
#' variance terms accepted by the Metropolis-Hastings algorithm. Particularly
#' low or high acceptance rates respectively may indicate slow mixing (requiring
#' more iterations) or a transient state (leading to nonconvergence), so additional
#' messages will print for extreme acceptance rates. Users may want to adjust the
#' tuning parameters \code{b.tune}, \code{nugget.tune}, or \code{psill.tune},
#' or perhaps the tolerance parameter \code{range.tol} if the acceptance rate
#' is too high or too low.
#'
#' The function returns a "krige" list object including the output MCMC matrix
#' of sampled values from the posterior distribution as well as the record of
#' function arguments, model frame, acceptance rates, and standing parameters.
#' Users can use the generic \code{summary} function to summarize the results or
#' extract the elements of the object for further use.
#'
#' @references
#' Jeff Gill. 2020. Measuring Constituency Ideology Using Bayesian Universal Kriging.
#' \emph{State Politics & Policy Quarterly}. \code{doi:10.1177/1532440020930197}
#'
#' @examples
#' \dontrun{
#' # Summarize example data
#' summary(ContrivedData)
#'
#' # Initial OLS model
#' contrived.ols<-lm(y~x.1+x.2,data=ContrivedData)
#' # summary(contrived.ols)
#'
#' # Set seed
#' set.seed(1241060320)
#'
#' #For simple illustration, we set to few iterations.
#' #In this case, a 10,000-iteration run converges to the true parameters.
#' #If you have considerable time and hardware, delete the # on the next line.
#' #10,000 iterations took 39 min. with 8 GB RAM & a 1.5 GHz Quad-Core processor.
#' M <- 100
#' #M<-10000
#'
#' contrived.run <- metropolis.krige(y ~ x.1 + x.2, coords = c("s.1","s.2"),
#' data = ContrivedData, n.iter = M, n.burnin=20, range.tol = 0.05)
#' # Alternatively, use burnin() after estimation
#' #contrived.run <- burnin(contrived.run, n.burnin=20)
#'
#' # Summarize the results and examine results against true coefficients
#' summary(contrived.run)
#' (TRUTH<-c(0.5,2.5,0.5,0,1,2))
#'
#' # Extract the MCMC matrix of the posterior distribution
#' contrived.run.mat <- mcmc.samples(contrived.run)
#' head(contrived.run.mat)
#'
#' # Diagnostics
#' geweke(contrived.run, early.prop=0.5)
#' heidel.welch(contrived.run)
#'
#' # Semivariogram
#' ### Semivariance
#' semivariance(contrived.run)
#' ### Plot semivariogram
#' semivariogram(contrived.run)
#' ### Alternatively, use generic plot() on a krige object
#' plot(contrived.run)
#' }
#'
#' @importFrom stats rbeta rgamma runif vcov var formula resid model.matrix model.response model.frame lm
#' @importFrom Rcpp evalCpp
#'
#' @export
metropolis.krige <- function(formula,coords,data,n.iter=100,powered.exp=2,n.burnin=0,
y,X,east,north,na.action="na.fail",spatial.share=0.5,
range.share=0.5, beta.var=10,range.tol=0.05,b.tune=1.0,
nugget.tune=10.0, psill.tune=1.0, distance.matrix=FALSE,
progress.bar="message", accept.rate.warning=TRUE){
# ERROR CHECKS
if(n.iter <= 0) stop("'n.iter' must be greater than 0.")
if(n.iter%%1 != 0) stop("'n.iter' must be an integer.")
if(powered.exp<=0 | powered.exp>2) stop("powered.exp must be greater than 0 and less than or equal to 2.")
if(spatial.share<=0 | spatial.share>=1) stop("spatial.share must be between 0 and 1.")
if(range.share<=0) stop("range.share must be greater than 0.")
if(range.tol<=0 | range.tol>=1) stop("p.range.tol must be between 0 and 1.")
if(beta.var<=0) stop("beta.var must be greater than 0.")
if(b.tune<=0) stop("b.tune must be greater than 0.")
if(nugget.tune<=0) stop("nugget.tune must be greater than 0.")
if(psill.tune<=0) stop("psill.tune must be greater than 0.")
if (n.burnin < 0) {
n.burnin <- 0; warning("The burn-in period is negative. 'n.burnin = 0' is used.")
} else if (n.burnin >= n.iter) {stop("The number of iterations is less than the burn-in period.")}
# IMPUT DATA
## Model frame
cl <- match.call()
if (missing(formula) || is.null(formula)) formula <- y ~ X - 1
if (missing(coords) || is.null(coords)) coords <- cbind(east, north)
if (missing(data) || is.null(data)) data <- model.frame(formula)
if (is.character(coords) & length(coords) == 2){
coords.names <- coords
east <- data[coords[1]]; north <- data[coords[2]]
coords <- cbind(east, north)
colnames(coords) <- coords.names
}
if (is.null(colnames(coords))) colnames(coords) <- c("east", "north")
data <- as.data.frame(cbind(model.frame(formula,data), coords))
allvar <- update(formula, paste("~ . +",paste(colnames(coords), collapse=" + ")))
data <- model.frame(formula = allvar, data = data, na.action = na.action)
coords <- unlist(colnames(coords))
# DATA
mf <- model.frame(formula = formula, na.action = na.action,
data = data)
y <- model.response(mf, type = "numeric")
X <- model.matrix(formula, data = data)
if(is.null(dimnames(X)[[2]])) {
mynames<-rep(".",dim(X)[2])
for(k in 1:dim(X)[2]) mynames[k]<-paste("x.",k,sep="")
dimnames(X)[[2]]<-mynames
}
easting <- as.vector(unlist(data[coords[1]]))
northing <- as.vector(unlist(data[coords[2]]))
#DEFINE STANDING PARAMETERS
dist.mat<-k_distmat(cbind(easting,northing))
max.distance<-max(dist.mat)
min.distance<-min(dist.mat[dist.mat>0])
init.ols <- lm(formula, data=data) # STARTING VALUES
b.cand.var<-b.tune*vcov(init.ols)
err.var<-var(resid(init.ols))
beta.A<-round(100*range.share/((-log(range.tol))^(1/powered.exp)))
beta.B<-round(100*(1-(range.share/((-log(range.tol))^(1/powered.exp)))))
attr(init.ols$terms, ".Environment") <- NULL
#CREATE OUTPUT MATRIX
mcmc.mat<-matrix(NA,nrow=n.iter,ncol=3+ncol(X))
dimnames(mcmc.mat)[[2]]<-c("tau2","phi","sigma2",dimnames(X)[[2]])
start.decay<-((-log(range.tol))^(1/powered.exp))/(max(dist.mat)*range.share)
mcmc.mat[1,]<-c((1-spatial.share)*err.var,start.decay,spatial.share*err.var,init.ols$coef)
local.Sigma<-NULL
#INITIALIZE ACCEPTANCE RATES
accepted.beta<-0
accepted.nugget<-0
accepted.decay<-0
accepted.psill<-0
#START OF MARKOV CHAIN
for (i in 1:(nrow(mcmc.mat)-1)) {
### Progress Bar
if (!progress.bar==FALSE) progBar(iter=i, total=n.iter, progress.bar=progress.bar,
nugget=mcmc.mat[i,1], decay=mcmc.mat[i,2],
partial.sill=mcmc.mat[i,3], interac = interactive())
old.beta<-mcmc.mat[i,4:ncol(mcmc.mat)]
old.var<-var(y-X%*%old.beta)
beta<-simp.mvrnorm(n=1,mu=old.beta,Sigma=b.cand.var)
local.share<-mcmc.mat[i,3]/(mcmc.mat[i,1]+mcmc.mat[i,3])
local.tau2.shape<-1+nugget.tune/(1-local.share)
local.sigma2.shape<-1+psill.tune/local.share
tau2<-1/rgamma(1,shape=local.tau2.shape,rate=nugget.tune*old.var)
sigma2<-1/rgamma(1,shape=local.sigma2.shape,rate=psill.tune*old.var)
phi<-1/(max.distance*rbeta(1,shape1=beta.A,shape2=beta.B))
if(is.null(local.Sigma)) local.Sigma<-ifelse(dist.mat>0,
mcmc.mat[i,3]*exp(-abs(mcmc.mat[i,2]*dist.mat)^powered.exp),
mcmc.mat[i,1]+mcmc.mat[i,3])
current <- krige.posterior(mcmc.mat[i,1],mcmc.mat[i,2],mcmc.mat[i,3],old.beta,
y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,tot.var=err.var,
local.Sigma=local.Sigma,max.distance=max.distance)
candidate.beta <- krige.posterior(mcmc.mat[i,1],mcmc.mat[i,2],mcmc.mat[i,3],
beta,y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,tot.var=err.var,
local.Sigma=local.Sigma,max.distance=max.distance)
candidate.nugget <- krige.posterior(tau2,mcmc.mat[i,2],mcmc.mat[i,3],old.beta,
y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,
tot.var=err.var,local.Sigma=NULL,
max.distance=max.distance)
candidate.decay <- krige.posterior(mcmc.mat[i,1],phi,mcmc.mat[i,3],old.beta,
y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,
tot.var=err.var,local.Sigma=NULL,
max.distance=max.distance)
candidate.psill <- krige.posterior(mcmc.mat[i,1],mcmc.mat[i,2],sigma2,old.beta,
y,X,easting,northing,semivar.exp=powered.exp,
p.spatial.share=spatial.share,p.range.share=range.share,
p.range.tol=range.tol,p.beta.var=beta.var,
tot.var=err.var,local.Sigma=NULL,
max.distance=max.distance)
a.beta<-exp(candidate.beta-current)
a.nugget<-exp(candidate.nugget-current)
a.decay<-exp(candidate.decay-current)
a.psill<-exp(candidate.psill-current)
if (a.beta > runif(1)) {
accepted.beta <- accepted.beta + 1
mcmc.mat[(i+1),4:ncol(mcmc.mat)] <- beta
} else mcmc.mat[(i+1),4:ncol(mcmc.mat)] <- mcmc.mat[i,4:ncol(mcmc.mat)]
if (a.nugget > runif(1)) {
accepted.nugget<-accepted.nugget + 1
mcmc.mat[(i+1),1] <- tau2
local.Sigma<-NULL
} else mcmc.mat[(i+1),1] <- mcmc.mat[i,1]
if (a.decay > runif(1)) {
accepted.decay<-accepted.decay+1
mcmc.mat[(i+1),2] <- phi
local.Sigma<-NULL
} else mcmc.mat[(i+1),2] <- mcmc.mat[i,2]
if (a.psill > runif(1)) {
accepted.psill<-accepted.psill + 1
mcmc.mat[(i+1),3] <- sigma2
local.Sigma<-NULL
} else mcmc.mat[(i+1),3] <- mcmc.mat[i,3]
if (!progress.bar==FALSE & i==n.iter-1)
progBar(iter=i+1, total=n.iter, progress.bar=progress.bar,
nugget=mcmc.mat[i,1], decay=mcmc.mat[i,2],
partial.sill=mcmc.mat[i,3], interac = interactive())
}
beta.rate<-accepted.beta/(nrow(mcmc.mat)-1)
tau2.rate<-accepted.nugget/(nrow(mcmc.mat)-1)
phi.rate<-accepted.decay/(nrow(mcmc.mat)-1)
sigma2.rate<-accepted.psill/(nrow(mcmc.mat)-1)
ar.rate <- list(beta.rate = beta.rate, tau2.rate = tau2.rate,
phi.rate = phi.rate, sigma2.rate = sigma2.rate)
if (n.burnin > 0) {mcmc.mat2 <- burnin.matrix(mcmc.mat, n.burnin = n.burnin)
} else {mcmc.mat2 <- mcmc.mat}
if (distance.matrix == FALSE) dist.mat <- NULL
standing <- list(dist.mat = dist.mat, max.distance = max.distance,
min.distance = min.distance, b.cand.var = b.cand.var,
err.var = err.var, beta.A = beta.A, beta.B = beta.B,
local.Sigma = local.Sigma, accepted.beta=accepted.beta,
accepted.nugget=accepted.nugget, accepted.decay=accepted.decay,
accepted.psill=accepted.psill, powered.exp=powered.exp)
krige.out <- list(call = cl,
formula = formula,
coords = coords,
n.iter = n.iter,
n.burnin = n.burnin,
init.ols = init.ols,
priors = list(spatial.share = spatial.share,
range.share = range.share, beta.var = beta.var,
range.tol = range.tol, b.tune = b.tune,
nugget.tune = nugget.tune, psill.tune = psill.tune),
data = data,
model.data.list = list(y = y, X = X, easting = easting,
northing = northing),
standing.parameter = standing,
acceptance.rate = ar.rate,
start = mcmc.mat[1,],
end = mcmc.mat[nrow(mcmc.mat),],
mcmc.mat = mcmc.mat2)
class(krige.out) <- "krige"
krige.out
}
|
test_that("dm_from_src supports 'Pool'", {
# expect no error
conn <- pool::dbPool(RSQLite::SQLite(), "", timeout = 10)
DBI::dbWriteTable(conn, "mtcars", mtcars)
dm <- dm::dm_from_src(conn, learn_keys = FALSE)
expect_identical(names(dm), "mtcars")
})
|
/tests/testthat/test-pool-support.R
|
permissive
|
brancengregory/dm
|
R
| false
| false
| 260
|
r
|
test_that("dm_from_src supports 'Pool'", {
# expect no error
conn <- pool::dbPool(RSQLite::SQLite(), "", timeout = 10)
DBI::dbWriteTable(conn, "mtcars", mtcars)
dm <- dm::dm_from_src(conn, learn_keys = FALSE)
expect_identical(names(dm), "mtcars")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paramplot.OEFPIL.R
\name{paramplot.OEFPIL}
\alias{paramplot.OEFPIL}
\title{Plot parameters of an OEFPIL object}
\usage{
paramplot.OEFPIL(object)
}
\arguments{
\item{object}{an object or a \code{list} of objects of class \code{"OEFPIL"} (a result of a call to \code{\link{OEFPIL}}).}
}
\value{
A ggplot graph of the estimated parameter values with error bars. The result can be edit using other ggplot components as usually.
}
\description{
Function for plotting the estimated values of the parameters with error bars (plus minus standard deviation) using \code{ggplot} for an object (or list of objects) of class \code{"OEFPIL"}.
}
\details{
The input list has to be without \code{NaN}, \code{NA}, \code{Inf} or \code{-Inf} values in the estimated parameters or covariance matrix in the source \code{"OEFPIL"} object. In that case the function returns a warning message and no graph is plotted.
}
\note{
Due to possible large differences in units of estimated parameters, the \code{scale} argument for facetting in the \code{ggplot} graph is set to \code{"free"}. It should be taken into account when interpreting the results.
}
\examples{
\dontshow{
utils::example("coef.OEFPIL",echo=FALSE)}
##-- Continuing the coef.OEFPIL(.) example:
n <- nrow(steamdata)
CM2 <- diag(c(rep(0.2^2,n), rep(0.1^2,n)))
st2 <- OEFPIL(steamdata, y ~ b1 * 10^(b2 * x/ (b3 + x)), list(b1 = 5, b2 = 8, b3 = 200),
CM2, useNLS = FALSE)
##Example 1 - Use of paramplot.OEFPIL function on an object of class 'OEFPIL'
paramplot.OEFPIL(st2)
##Example 2 - Use of paramplot.OEFPIL function on a list of objects of class 'OEFPIL'
paramplot.OEFPIL(list(st1,st2))
}
\seealso{
\code{\link{OEFPIL}}, \code{\link{curvplot.OEFPIL}} and \code{\link{plot.OEFPIL}}.
}
|
/man/paramplot.OEFPIL.Rd
|
no_license
|
stazam/OEFPIL-
|
R
| false
| true
| 1,822
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paramplot.OEFPIL.R
\name{paramplot.OEFPIL}
\alias{paramplot.OEFPIL}
\title{Plot parameters of an OEFPIL object}
\usage{
paramplot.OEFPIL(object)
}
\arguments{
\item{object}{an object or a \code{list} of objects of class \code{"OEFPIL"} (a result of a call to \code{\link{OEFPIL}}).}
}
\value{
A ggplot graph of the estimated parameter values with error bars. The result can be edit using other ggplot components as usually.
}
\description{
Function for plotting the estimated values of the parameters with error bars (plus minus standard deviation) using \code{ggplot} for an object (or list of objects) of class \code{"OEFPIL"}.
}
\details{
The input list has to be without \code{NaN}, \code{NA}, \code{Inf} or \code{-Inf} values in the estimated parameters or covariance matrix in the source \code{"OEFPIL"} object. In that case the function returns a warning message and no graph is plotted.
}
\note{
Due to possible large differences in units of estimated parameters, the \code{scale} argument for facetting in the \code{ggplot} graph is set to \code{"free"}. It should be taken into account when interpreting the results.
}
\examples{
\dontshow{
utils::example("coef.OEFPIL",echo=FALSE)}
##-- Continuing the coef.OEFPIL(.) example:
n <- nrow(steamdata)
CM2 <- diag(c(rep(0.2^2,n), rep(0.1^2,n)))
st2 <- OEFPIL(steamdata, y ~ b1 * 10^(b2 * x/ (b3 + x)), list(b1 = 5, b2 = 8, b3 = 200),
CM2, useNLS = FALSE)
##Example 1 - Use of paramplot.OEFPIL function on an object of class 'OEFPIL'
paramplot.OEFPIL(st2)
##Example 2 - Use of paramplot.OEFPIL function on a list of objects of class 'OEFPIL'
paramplot.OEFPIL(list(st1,st2))
}
\seealso{
\code{\link{OEFPIL}}, \code{\link{curvplot.OEFPIL}} and \code{\link{plot.OEFPIL}}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift_operations.R
\name{redshift_create_cluster_parameter_group}
\alias{redshift_create_cluster_parameter_group}
\title{Creates an Amazon Redshift parameter group}
\usage{
redshift_create_cluster_parameter_group(ParameterGroupName,
ParameterGroupFamily, Description, Tags)
}
\arguments{
\item{ParameterGroupName}{[required] The name of the cluster parameter group.
Constraints:
\itemize{
\item Must be 1 to 255 alphanumeric characters or hyphens
\item First character must be a letter.
\item Cannot end with a hyphen or contain two consecutive hyphens.
\item Must be unique withing your AWS account.
}
This value is stored as a lower-case string.}
\item{ParameterGroupFamily}{[required] The Amazon Redshift engine version to which the cluster parameter group
applies. The cluster engine version determines the set of parameters.
To get a list of valid parameter group family names, you can call
DescribeClusterParameterGroups. By default, Amazon Redshift returns a
list of all the parameter groups that are owned by your AWS account,
including the default parameter groups for each Amazon Redshift engine
version. The parameter group family names associated with the default
parameter groups provide you the valid values. For example, a valid
family name is \"redshift-1.0\".}
\item{Description}{[required] A description of the parameter group.}
\item{Tags}{A list of tag instances.}
}
\description{
Creates an Amazon Redshift parameter group.
}
\details{
Creating parameter groups is independent of creating clusters. You can
associate a cluster with a parameter group when you create the cluster.
You can also associate an existing cluster with a parameter group after
the cluster is created by using ModifyCluster.
Parameters in the parameter group define specific behavior that applies
to the databases you create on the cluster. For more information about
parameters and parameter groups, go to \href{https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html}{Amazon Redshift Parameter Groups}
in the \emph{Amazon Redshift Cluster Management Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_cluster_parameter_group(
ParameterGroupName = "string",
ParameterGroupFamily = "string",
Description = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
/cran/paws.database/man/redshift_create_cluster_parameter_group.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 2,450
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift_operations.R
\name{redshift_create_cluster_parameter_group}
\alias{redshift_create_cluster_parameter_group}
\title{Creates an Amazon Redshift parameter group}
\usage{
redshift_create_cluster_parameter_group(ParameterGroupName,
ParameterGroupFamily, Description, Tags)
}
\arguments{
\item{ParameterGroupName}{[required] The name of the cluster parameter group.
Constraints:
\itemize{
\item Must be 1 to 255 alphanumeric characters or hyphens
\item First character must be a letter.
\item Cannot end with a hyphen or contain two consecutive hyphens.
\item Must be unique withing your AWS account.
}
This value is stored as a lower-case string.}
\item{ParameterGroupFamily}{[required] The Amazon Redshift engine version to which the cluster parameter group
applies. The cluster engine version determines the set of parameters.
To get a list of valid parameter group family names, you can call
DescribeClusterParameterGroups. By default, Amazon Redshift returns a
list of all the parameter groups that are owned by your AWS account,
including the default parameter groups for each Amazon Redshift engine
version. The parameter group family names associated with the default
parameter groups provide you the valid values. For example, a valid
family name is \"redshift-1.0\".}
\item{Description}{[required] A description of the parameter group.}
\item{Tags}{A list of tag instances.}
}
\description{
Creates an Amazon Redshift parameter group.
}
\details{
Creating parameter groups is independent of creating clusters. You can
associate a cluster with a parameter group when you create the cluster.
You can also associate an existing cluster with a parameter group after
the cluster is created by using ModifyCluster.
Parameters in the parameter group define specific behavior that applies
to the databases you create on the cluster. For more information about
parameters and parameter groups, go to \href{https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html}{Amazon Redshift Parameter Groups}
in the \emph{Amazon Redshift Cluster Management Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_cluster_parameter_group(
ParameterGroupName = "string",
ParameterGroupFamily = "string",
Description = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base_data.R
\docType{data}
\name{data_mekko}
\alias{data_mekko}
\title{Random data for plotting mekko chart examples}
\format{
Dataset of 2 columns and 1000 rows
\describe{
\item{var1}{\code{character}}
\item{var2}{\code{numeric}}
}
}
\usage{
data_mekko
}
\description{
This dataset is used in the examples,
moreover it can be used as a reference if you notice a bug.
}
\keyword{datasets}
|
/man/data_mekko.Rd
|
no_license
|
datastorm-open/rAmCharts
|
R
| false
| true
| 471
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/base_data.R
\docType{data}
\name{data_mekko}
\alias{data_mekko}
\title{Random data for plotting mekko chart examples}
\format{
Dataset of 2 columns and 1000 rows
\describe{
\item{var1}{\code{character}}
\item{var2}{\code{numeric}}
}
}
\usage{
data_mekko
}
\description{
This dataset is used in the examples,
moreover it can be used as a reference if you notice a bug.
}
\keyword{datasets}
|
# Part of the mi package for multiple imputation of missing data
# Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Trustees of Columbia University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
.guess_type <-
function(y, favor_ordered = TRUE, favor_positive = FALSE, threshold = 5,
variable_name = deparse(substitute(y))) {
if(!is.null(dim(y))) stop(paste(variable_name, ": must be a vector"))
if(is.factor(y)) y <- factor(y) # to drop unused levels
values <- unique(y)
values <- sort(values[!is.na(values)])
len <- length(values)
if(len == 0) {
warning(paste(variable_name, ": cannot infer variable type when all values are NA, guessing 'irrelevant'"))
type <- "irrelevant"
}
else if( !is(y,"factor") &&
sum((values[!is.na(values)] %% 1) == 0) == length(values[!is.na(values)]) &&
min(values,na.rm = T) >= 0 &&
length(values) >= 8) type <- "count"
else if(len == 1) type <- "fixed"
else if(grepl("^[[:punct:]]",
variable_name)) type <- "irrelevant"
else if(identical("id",
tolower(variable_name))) type <- "irrelevant"
else if(len == 2) {
if(!is.numeric(values)) type <- "binary"
else if(all(values ==
as.integer(values))) type <- "binary"
else if(favor_positive) {
if(all(values > 0)) type <- "positive-continuous"
else if(all(values >= 0)) type <- "nonnegative-continuous"
else type <- "continuous"
}
else type <- "continuous"
}
else if(is.ts(y)) {
if(favor_positive) {
if(all(values > 0)) type <- "positive-continuous"
else if(all(values >= 0)) type <- "nonnegative-continuous"
else type <- "continuous"
}
else type <- "continuous"
}
else if(is.ordered(y)) type <- "ordered-categorical"
else if(is.factor(y)) type <- "unordered-categorical"
else if(is.character(y)) type <- "unordered-categorical"
else if(is.numeric(y)) {
if(all(values >= 0) &&
all(values <= 1)) {
if(any(values %in% 0:1)) type <- "SC_proportion"
else type <- "proportion"
}
else if(len <= threshold &&
all(values == as.integer(values)))
type <- if(favor_ordered) "ordered-categorical" else "unordered-categorical"
else if(favor_positive) {
if(all(values > 0)) type <- "positive-continuous"
else if(all(values >= 0)) type <- "nonnegative-continuous"
else type <- "continuous"
}
else type <- "continuous"
}
else stop(paste("cannot infer variable type for", variable_name))
return(type)
}
## this constructor largely supplants typecast in previous versions of library(mi)
setMethod("missing_variable", signature(y = "ANY", type = "missing"), def =
function(y, favor_ordered = TRUE, favor_positive = FALSE, threshold = 5,
variable_name = deparse(substitute(y))) {
type <- .guess_type(y, favor_ordered, favor_positive, threshold, variable_name)
return(missing_variable(y = y, type = type, variable_name = variable_name))
})
setMethod("missing_variable", signature(y = "ANY", type = "character"), def =
function(y, type, variable_name = deparse(substitute(y)), ...) {
return(new(type, raw_data = y, variable_name = variable_name, ...))
})
.show_helper <-
function(object) {
type <- class(object)
missingness <- object@n_miss
meth <- object@imputation_method
if(object@n_miss) {
if(is.character(object@family)) {
fam <- object@family
link <- NA_character_
}
else {
fam <- object@family$family
link <- object@family$link
}
}
else fam <- link <- NA_character_
if(is(object, "continuous")) trans <- .parse_trans(object@transformation)
else trans <- NA_character_
df <- data.frame(type = type, missing = missingness, method = meth,
family = fam, link = link, transformation = trans)
rownames(df) <- object@variable_name
if(is(object, "semi-continuous")) df <- rbind(df, .show_helper(object@indicator))
return(df)
}
setMethod("show", signature(object = "missing_variable"), def =
function(object) {
df <- .show_helper(object)
print(df)
})
## setAs methods cause subtle problems with auto-coercion
# setAs(from = "unordered-categorical", to = "ordered-categorical", def =
# function(from) {
# class(from) <- "ordered-categorical"
# return(from)
# })
#
# setAs(from = "ordered-categorical", to = "unordered-categorical", def =
# function(from) {
# class(from) <- "unordered-categorical"
# return(from)
# })
#
# setAs(from = "binary", to = "unordered-categorical", def =
# function(from) {
# stop("not possible or necessary to coerce from 'binary' to 'unordered-categorical'")
# })
# setAs(from = "binary", to = "ordered-categorical", def =
# function(from) {
# stop("not possible or necessary to coerce from 'binary' to 'unordered-categorical'")
# })
# setAs(from = "nonnegative-continuous", to = "continuous", def =
# function(from) {
# mean <- mean(from@raw_data, na.rm = TRUE)
# sd <- sd(from@raw_data, na.rm = TRUE)
# from@transformation <- .standardize_transform
# formals(from@transformation)$mean <- mean
# formals(from@transformation)$sd <- sd
# from@inverse_transformation <- .standardize_transform
# formals(from@inverse_transformation)$mean <- mean
# formals(from@inverse_transformation)$sd <- sd
# formals(from@inverse_transformation)$inverse <- TRUE
# from@data <- from@transformation(from@raw_data)
# class(from) <- "continuous"
# return(from)
# })
#
# setAs(from = "continuous", to = "positive-continuous", def =
# function(from) {
# from@transformation <- log
# from@inverse_transformation <- exp
# from@data <- from@transformation(from@raw_data)
# class(from) <- "positive-continuous"
# validObject(from)
# return(from)
# })
#
## maybe add more methods
## NOTE: If you change something here, look also at the change_type.R file
|
/R/missing_variable.R
|
no_license
|
gbiele/mi
|
R
| false
| false
| 6,960
|
r
|
# Part of the mi package for multiple imputation of missing data
# Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Trustees of Columbia University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
.guess_type <-
function(y, favor_ordered = TRUE, favor_positive = FALSE, threshold = 5,
variable_name = deparse(substitute(y))) {
if(!is.null(dim(y))) stop(paste(variable_name, ": must be a vector"))
if(is.factor(y)) y <- factor(y) # to drop unused levels
values <- unique(y)
values <- sort(values[!is.na(values)])
len <- length(values)
if(len == 0) {
warning(paste(variable_name, ": cannot infer variable type when all values are NA, guessing 'irrelevant'"))
type <- "irrelevant"
}
else if( !is(y,"factor") &&
sum((values[!is.na(values)] %% 1) == 0) == length(values[!is.na(values)]) &&
min(values,na.rm = T) >= 0 &&
length(values) >= 8) type <- "count"
else if(len == 1) type <- "fixed"
else if(grepl("^[[:punct:]]",
variable_name)) type <- "irrelevant"
else if(identical("id",
tolower(variable_name))) type <- "irrelevant"
else if(len == 2) {
if(!is.numeric(values)) type <- "binary"
else if(all(values ==
as.integer(values))) type <- "binary"
else if(favor_positive) {
if(all(values > 0)) type <- "positive-continuous"
else if(all(values >= 0)) type <- "nonnegative-continuous"
else type <- "continuous"
}
else type <- "continuous"
}
else if(is.ts(y)) {
if(favor_positive) {
if(all(values > 0)) type <- "positive-continuous"
else if(all(values >= 0)) type <- "nonnegative-continuous"
else type <- "continuous"
}
else type <- "continuous"
}
else if(is.ordered(y)) type <- "ordered-categorical"
else if(is.factor(y)) type <- "unordered-categorical"
else if(is.character(y)) type <- "unordered-categorical"
else if(is.numeric(y)) {
if(all(values >= 0) &&
all(values <= 1)) {
if(any(values %in% 0:1)) type <- "SC_proportion"
else type <- "proportion"
}
else if(len <= threshold &&
all(values == as.integer(values)))
type <- if(favor_ordered) "ordered-categorical" else "unordered-categorical"
else if(favor_positive) {
if(all(values > 0)) type <- "positive-continuous"
else if(all(values >= 0)) type <- "nonnegative-continuous"
else type <- "continuous"
}
else type <- "continuous"
}
else stop(paste("cannot infer variable type for", variable_name))
return(type)
}
## this constructor largely supplants typecast in previous versions of library(mi)
setMethod("missing_variable", signature(y = "ANY", type = "missing"), def =
function(y, favor_ordered = TRUE, favor_positive = FALSE, threshold = 5,
variable_name = deparse(substitute(y))) {
type <- .guess_type(y, favor_ordered, favor_positive, threshold, variable_name)
return(missing_variable(y = y, type = type, variable_name = variable_name))
})
setMethod("missing_variable", signature(y = "ANY", type = "character"), def =
function(y, type, variable_name = deparse(substitute(y)), ...) {
return(new(type, raw_data = y, variable_name = variable_name, ...))
})
.show_helper <-
function(object) {
type <- class(object)
missingness <- object@n_miss
meth <- object@imputation_method
if(object@n_miss) {
if(is.character(object@family)) {
fam <- object@family
link <- NA_character_
}
else {
fam <- object@family$family
link <- object@family$link
}
}
else fam <- link <- NA_character_
if(is(object, "continuous")) trans <- .parse_trans(object@transformation)
else trans <- NA_character_
df <- data.frame(type = type, missing = missingness, method = meth,
family = fam, link = link, transformation = trans)
rownames(df) <- object@variable_name
if(is(object, "semi-continuous")) df <- rbind(df, .show_helper(object@indicator))
return(df)
}
setMethod("show", signature(object = "missing_variable"), def =
function(object) {
df <- .show_helper(object)
print(df)
})
## setAs methods cause subtle problems with auto-coercion
# setAs(from = "unordered-categorical", to = "ordered-categorical", def =
# function(from) {
# class(from) <- "ordered-categorical"
# return(from)
# })
#
# setAs(from = "ordered-categorical", to = "unordered-categorical", def =
# function(from) {
# class(from) <- "unordered-categorical"
# return(from)
# })
#
# setAs(from = "binary", to = "unordered-categorical", def =
# function(from) {
# stop("not possible or necessary to coerce from 'binary' to 'unordered-categorical'")
# })
# setAs(from = "binary", to = "ordered-categorical", def =
# function(from) {
# stop("not possible or necessary to coerce from 'binary' to 'unordered-categorical'")
# })
# setAs(from = "nonnegative-continuous", to = "continuous", def =
# function(from) {
# mean <- mean(from@raw_data, na.rm = TRUE)
# sd <- sd(from@raw_data, na.rm = TRUE)
# from@transformation <- .standardize_transform
# formals(from@transformation)$mean <- mean
# formals(from@transformation)$sd <- sd
# from@inverse_transformation <- .standardize_transform
# formals(from@inverse_transformation)$mean <- mean
# formals(from@inverse_transformation)$sd <- sd
# formals(from@inverse_transformation)$inverse <- TRUE
# from@data <- from@transformation(from@raw_data)
# class(from) <- "continuous"
# return(from)
# })
#
# setAs(from = "continuous", to = "positive-continuous", def =
# function(from) {
# from@transformation <- log
# from@inverse_transformation <- exp
# from@data <- from@transformation(from@raw_data)
# class(from) <- "positive-continuous"
# validObject(from)
# return(from)
# })
#
## maybe add more methods
## NOTE: If you change something here, look also at the change_type.R file
|
#' apply a gating method to the \code{GatingSet}
#'
#' When interacting with the existing gated data, this function provides the alternative way to interact with he GatingSet
#' by supplying the gating description directly through arguments without the need to write the compelete
#' csv gating template.
#'
#' @param gs GatingSet or GatingSetList
#' @param alias,pop,parent,dims,gating_method,gating_args,collapseDataForGating,groupBy,preprocessing_method,preprocessing_args see details in \link[openCyto:gatingTemplate-class]{gatingTemplate}
#' @param strip_extra_quotes \code{logical} Extra quotes are added to strings by fread. This causes problems with parsing R strings to expressions in some cases. Default FALSE for usual behaviour. TRUE should be passed if parsing gating_args fails.
#' @param ... other arguments
#' \itemize{
#' \item{mc.cores}{ passed to \code{multicore} package for parallel computing}
#' \item{parallel_type}{ \code{character} specifying the parallel type. The valid options are "none", "multicore", "cluster".}
#' \item{cl}{ \code{cluster} object passed to \code{parallel} package (when \code{parallel_type} is "cluster")}
#' }
#' @export
#' @examples
#' \dontrun{
#' # add quad gates
#' add_pop(gs, gating_method = "mindensity", dims = "CCR7,CD45RA", parent = "cd4-cd8+", pop = "CCR7+/-CD45RA+/-")
#'
#' # polyfunctional gates (boolean combinations of exsiting marginal gates)
#' add_pop(gs, gating_method = "polyFunctions", parent = "cd8", gating_args = "cd8/IFNg:cd8/IL2:cd8/TNFa")
#'
#' #boolGate method
#' add_pop(gs, alias = "IL2orIFNg", gating_method = "boolGate", parent = "cd4", gating_args = "cd4/IL2|cd4/IFNg")
#' }
add_pop <- function(gs, alias = "*"
, pop = "+"
, parent
, dims = NA
, gating_method
, gating_args = NA
, collapseDataForGating = NA
, groupBy = NA
, preprocessing_method = NA
, preprocessing_args = NA
, strip_extra_quotes = FALSE
, ...) {
#still check this new pop
.validity_check_alias(alias)
#generate the dummy template based on the existing gating hierarchy
dt <- as.data.table(templateGen(gs[[1]]))
if(nrow(dt)>0){
#Can't use the existing dummy_gate since it is dedicated as dummy_ref gate generated by multiPos entry (alias = '*')
#which requires the ref node to be explicitly supplied
dt[, gating_method := "dummy"]
}
thisRow <- data.table(alias = alias
, pop = pop
, parent = parent
, dims = dims
, gating_method = gating_method
, gating_args = gating_args
, collapseDataForGating = collapseDataForGating
, groupBy = groupBy
, preprocessing_method = preprocessing_method
, preprocessing_args = preprocessing_args
)
if(nrow(thisRow)>1)
stop("Can't add multiple rows!Please make sure each argument is of length 1.")
#there's a weird bug where rbinding a 0-row dt and a non-zero row dt returns > 4M rows.
if(nrow(dt)>0){
dt <- rbind(dt, thisRow)
}else{
dt = thisRow
}
tmp <- tempfile(fileext = ".csv")
write.csv(dt, tmp, row.names = F)
#skip the validity check on the other entries
# Pass ... to gatingTemplate to allow strip_extra_quotes to be passed
suppressMessages(gt <- gatingTemplate(tmp, strict = FALSE,strip_extra_quotes = strip_extra_quotes))
message("...")
suppressMessages(gating(gt, gs, ...))
message("done")
invisible(thisRow)
}
|
/R/add_pop.R
|
no_license
|
biodev/openCyto
|
R
| false
| false
| 3,875
|
r
|
#' apply a gating method to the \code{GatingSet}
#'
#' When interacting with the existing gated data, this function provides the alternative way to interact with he GatingSet
#' by supplying the gating description directly through arguments without the need to write the compelete
#' csv gating template.
#'
#' @param gs GatingSet or GatingSetList
#' @param alias,pop,parent,dims,gating_method,gating_args,collapseDataForGating,groupBy,preprocessing_method,preprocessing_args see details in \link[openCyto:gatingTemplate-class]{gatingTemplate}
#' @param strip_extra_quotes \code{logical} Extra quotes are added to strings by fread. This causes problems with parsing R strings to expressions in some cases. Default FALSE for usual behaviour. TRUE should be passed if parsing gating_args fails.
#' @param ... other arguments
#' \itemize{
#' \item{mc.cores}{ passed to \code{multicore} package for parallel computing}
#' \item{parallel_type}{ \code{character} specifying the parallel type. The valid options are "none", "multicore", "cluster".}
#' \item{cl}{ \code{cluster} object passed to \code{parallel} package (when \code{parallel_type} is "cluster")}
#' }
#' @export
#' @examples
#' \dontrun{
#' # add quad gates
#' add_pop(gs, gating_method = "mindensity", dims = "CCR7,CD45RA", parent = "cd4-cd8+", pop = "CCR7+/-CD45RA+/-")
#'
#' # polyfunctional gates (boolean combinations of exsiting marginal gates)
#' add_pop(gs, gating_method = "polyFunctions", parent = "cd8", gating_args = "cd8/IFNg:cd8/IL2:cd8/TNFa")
#'
#' #boolGate method
#' add_pop(gs, alias = "IL2orIFNg", gating_method = "boolGate", parent = "cd4", gating_args = "cd4/IL2|cd4/IFNg")
#' }
add_pop <- function(gs, alias = "*"
, pop = "+"
, parent
, dims = NA
, gating_method
, gating_args = NA
, collapseDataForGating = NA
, groupBy = NA
, preprocessing_method = NA
, preprocessing_args = NA
, strip_extra_quotes = FALSE
, ...) {
#still check this new pop
.validity_check_alias(alias)
#generate the dummy template based on the existing gating hierarchy
dt <- as.data.table(templateGen(gs[[1]]))
if(nrow(dt)>0){
#Can't use the existing dummy_gate since it is dedicated as dummy_ref gate generated by multiPos entry (alias = '*')
#which requires the ref node to be explicitly supplied
dt[, gating_method := "dummy"]
}
thisRow <- data.table(alias = alias
, pop = pop
, parent = parent
, dims = dims
, gating_method = gating_method
, gating_args = gating_args
, collapseDataForGating = collapseDataForGating
, groupBy = groupBy
, preprocessing_method = preprocessing_method
, preprocessing_args = preprocessing_args
)
if(nrow(thisRow)>1)
stop("Can't add multiple rows!Please make sure each argument is of length 1.")
#there's a weird bug where rbinding a 0-row dt and a non-zero row dt returns > 4M rows.
if(nrow(dt)>0){
dt <- rbind(dt, thisRow)
}else{
dt = thisRow
}
tmp <- tempfile(fileext = ".csv")
write.csv(dt, tmp, row.names = F)
#skip the validity check on the other entries
# Pass ... to gatingTemplate to allow strip_extra_quotes to be passed
suppressMessages(gt <- gatingTemplate(tmp, strict = FALSE,strip_extra_quotes = strip_extra_quotes))
message("...")
suppressMessages(gating(gt, gs, ...))
message("done")
invisible(thisRow)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/de_analysis.R
\name{get_healthy_position}
\alias{get_healthy_position}
\title{To always get the same "fold change direction" we have to set the healthy comparison as first condition
return position of comparison}
\usage{
get_healthy_position(comparison_with_healthy, condition_matrix, disease_pos)
}
\description{
To always get the same "fold change direction" we have to set the healthy comparison as first condition
return position of comparison
}
|
/gToolbox/man/get_healthy_position.Rd
|
no_license
|
aqibrahimbt/BioMarkerAnalysis
|
R
| false
| true
| 528
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/de_analysis.R
\name{get_healthy_position}
\alias{get_healthy_position}
\title{To always get the same "fold change direction" we have to set the healthy comparison as first condition
return position of comparison}
\usage{
get_healthy_position(comparison_with_healthy, condition_matrix, disease_pos)
}
\description{
To always get the same "fold change direction" we have to set the healthy comparison as first condition
return position of comparison
}
|
#' @title Create a Proportional Euler Diagram
#' @description Create a 2-way Proportional Euler Diagram
#' @param mytab1 First table with objects and effect sizes to compare
#' @param mytab2 Secib table with objects and effect sizes to compare
#' @param titletext Plot Title, Default: ''
#' @param legendtext Plot legend, default is the names of the input tables, if set must be acharacter vector of length 2, Default: NULL
#' @param plotteIntermediates for debug only, plot intermediate venn plots, Default: F
#' @param createSimplifiedLabels show only Number of objects with identical direction in label of the intersect , Default: F
#' @param object_colname Column name in table mytab1 with the objects to compare, Default: 'object'
#' @param direction_colname Column name in table mytab2 with the objects to compare, Default: 'direction'
#' @param increasingCutoff cutoff of same size, typical 0 for standard metric effect sizes or 1 for Foldchanges, Default: 0
#' @return Returns a plot and the code to generate the plot
#' @details DETAILS
#' @examples
#' #EXAMPLE1
#' library(eulerr)
#'library(toolboxH)
#'
#'tab1 = data.table(object = letters[1:10], direction = rnorm(10))
#'tab1
#'tab2 = data.table(object = letters[5:25], direction = rnorm(21))
#'tab2
#'
#'plottext = makeEuler(tab1 , tab2 , titletext = "Yes, we can Make a Title great again")
#' # savePlotInMultiFormats("exampleplot", 4, 3, plotecode = plottext)
#' @rdname makeEuler
#' @export
#'
#'
makeEuler = function(mytab1,
mytab2,
titletext = "",
legendtext = NULL,
plotteIntermediates = F,
createSimplifiedLabels=F,
object_colname = 'object',
direction_colname = 'direction',
increasingCutoff = 0) {
if(is.null(legendtext)) legendtext =c(deparse(substitute(mytab1)), deparse(substitute(mytab2)))
mytab1 = copy(mytab1)
mytab2 = copy(mytab2)
mytab1[, object := get(object_colname)]
mytab1[, direction := get(direction_colname)]
mytab2[, object := get(object_colname)]
mytab2[, direction := get(direction_colname)]
tab1_all = mytab1[,object]
tab1_inc = mytab1[direction >increasingCutoff ,object]
tab1_dec = mytab1[direction <increasingCutoff ,object]
qlist1 = venn3(tab1_all, tab1_dec, tab1_inc, plotte = plotteIntermediates)
tab2_all = mytab2[,object]
tab2_inc = mytab2[direction >increasingCutoff ,object]
tab2_dec = mytab2[direction <increasingCutoff ,object]
qlist2 = venn3(tab2_all, tab2_dec, tab2_inc, plotte = plotteIntermediates)
qall = venn2(tab1_all,tab2_all, plotte = plotteIntermediates)
if(createSimplifiedLabels==T) {
qinc = venn2(tab1_inc,tab2_inc, plotte = plotteIntermediates)
qdec = venn2(tab1_dec,tab2_dec, plotte = plotteIntermediates)
label1 = paste0(intToUtf8(9650), qinc$q2 %>% length(), " \n", intToUtf8(9660),qdec$q2 %>% length(), " ")
label2 = paste0(intToUtf8(9650),qinc$q3 %>% length(), "\n",intToUtf8(9660), qdec$q3 %>% length())
label3 = paste0(intToUtf8(9650),qinc$q1 %>% length(), "\n",intToUtf8(9660), qdec$q1 %>% length())
} else {
# str(qall)
only1_inc = sum(qall$q2 %in% tab1_inc)
only1_dec = sum(qall$q2 %in% tab1_dec)
both_inc1_inc2 = sum(qall$q1 %in% tab1_inc & qall$q1 %in% tab2_inc)
both_dec1_dec2 = sum(qall$q1 %in% tab1_dec & qall$q1 %in% tab2_dec)
both_inc1_dec2 = sum(qall$q1 %in% tab1_inc & qall$q1 %in% tab2_dec)
both_dec1_inc2 = sum(qall$q1 %in% tab1_dec & qall$q1 %in% tab2_inc)
only2_inc = sum(qall$q3 %in% tab2_inc)
only2_dec = sum(qall$q3 %in% tab2_dec)
# qcompl = venn4(tab1_inc,tab2_inc, tab1_dec,tab2_dec, plotte = plotte)
label1 = paste0(intToUtf8(9650), only1_inc, " \n", intToUtf8(9660),only1_dec, " ")
label3 = paste0(" ",intToUtf8(9650),intToUtf8(9650),both_inc1_inc2, " (",intToUtf8(9650),intToUtf8(9660),both_inc1_dec2, ")", "\n ",intToUtf8(9660),intToUtf8(9660), both_dec1_dec2, " (", intToUtf8(9660),intToUtf8(9650), both_dec1_inc2, ")")
label2 = paste0(intToUtf8(9650),only2_inc, "\n",intToUtf8(9660), only2_dec)
}
plotcolors = c(alpha("darkslateblue",0.8), alpha("lightblue", 0.8))
plottext = paste0('print(eulerr:::plot.euler(eulerr::euler(list(with = tab1_all, without = tab2_all), shape = "circle"), quantities = c(label1,label2,label3), fills = c(alpha("darkslateblue",0.8), alpha("lightblue", 0.8)), main = titletext, labels = F, legend=list(labels = legendtext)))') # legendGrob fuer legend
eval(parse(text = plottext))
plottext
}
|
/R/makeEuler.R
|
no_license
|
holgerman/toolboxH
|
R
| false
| false
| 4,630
|
r
|
#' @title Create a Proportional Euler Diagram
#' @description Create a 2-way Proportional Euler Diagram
#' @param mytab1 First table with objects and effect sizes to compare
#' @param mytab2 Secib table with objects and effect sizes to compare
#' @param titletext Plot Title, Default: ''
#' @param legendtext Plot legend, default is the names of the input tables, if set must be acharacter vector of length 2, Default: NULL
#' @param plotteIntermediates for debug only, plot intermediate venn plots, Default: F
#' @param createSimplifiedLabels show only Number of objects with identical direction in label of the intersect , Default: F
#' @param object_colname Column name in table mytab1 with the objects to compare, Default: 'object'
#' @param direction_colname Column name in table mytab2 with the objects to compare, Default: 'direction'
#' @param increasingCutoff cutoff of same size, typical 0 for standard metric effect sizes or 1 for Foldchanges, Default: 0
#' @return Returns a plot and the code to generate the plot
#' @details DETAILS
#' @examples
#' #EXAMPLE1
#' library(eulerr)
#'library(toolboxH)
#'
#'tab1 = data.table(object = letters[1:10], direction = rnorm(10))
#'tab1
#'tab2 = data.table(object = letters[5:25], direction = rnorm(21))
#'tab2
#'
#'plottext = makeEuler(tab1 , tab2 , titletext = "Yes, we can Make a Title great again")
#' # savePlotInMultiFormats("exampleplot", 4, 3, plotecode = plottext)
#' @rdname makeEuler
#' @export
#'
#'
makeEuler = function(mytab1,
mytab2,
titletext = "",
legendtext = NULL,
plotteIntermediates = F,
createSimplifiedLabels=F,
object_colname = 'object',
direction_colname = 'direction',
increasingCutoff = 0) {
if(is.null(legendtext)) legendtext =c(deparse(substitute(mytab1)), deparse(substitute(mytab2)))
mytab1 = copy(mytab1)
mytab2 = copy(mytab2)
mytab1[, object := get(object_colname)]
mytab1[, direction := get(direction_colname)]
mytab2[, object := get(object_colname)]
mytab2[, direction := get(direction_colname)]
tab1_all = mytab1[,object]
tab1_inc = mytab1[direction >increasingCutoff ,object]
tab1_dec = mytab1[direction <increasingCutoff ,object]
qlist1 = venn3(tab1_all, tab1_dec, tab1_inc, plotte = plotteIntermediates)
tab2_all = mytab2[,object]
tab2_inc = mytab2[direction >increasingCutoff ,object]
tab2_dec = mytab2[direction <increasingCutoff ,object]
qlist2 = venn3(tab2_all, tab2_dec, tab2_inc, plotte = plotteIntermediates)
qall = venn2(tab1_all,tab2_all, plotte = plotteIntermediates)
if(createSimplifiedLabels==T) {
qinc = venn2(tab1_inc,tab2_inc, plotte = plotteIntermediates)
qdec = venn2(tab1_dec,tab2_dec, plotte = plotteIntermediates)
label1 = paste0(intToUtf8(9650), qinc$q2 %>% length(), " \n", intToUtf8(9660),qdec$q2 %>% length(), " ")
label2 = paste0(intToUtf8(9650),qinc$q3 %>% length(), "\n",intToUtf8(9660), qdec$q3 %>% length())
label3 = paste0(intToUtf8(9650),qinc$q1 %>% length(), "\n",intToUtf8(9660), qdec$q1 %>% length())
} else {
# str(qall)
only1_inc = sum(qall$q2 %in% tab1_inc)
only1_dec = sum(qall$q2 %in% tab1_dec)
both_inc1_inc2 = sum(qall$q1 %in% tab1_inc & qall$q1 %in% tab2_inc)
both_dec1_dec2 = sum(qall$q1 %in% tab1_dec & qall$q1 %in% tab2_dec)
both_inc1_dec2 = sum(qall$q1 %in% tab1_inc & qall$q1 %in% tab2_dec)
both_dec1_inc2 = sum(qall$q1 %in% tab1_dec & qall$q1 %in% tab2_inc)
only2_inc = sum(qall$q3 %in% tab2_inc)
only2_dec = sum(qall$q3 %in% tab2_dec)
# qcompl = venn4(tab1_inc,tab2_inc, tab1_dec,tab2_dec, plotte = plotte)
label1 = paste0(intToUtf8(9650), only1_inc, " \n", intToUtf8(9660),only1_dec, " ")
label3 = paste0(" ",intToUtf8(9650),intToUtf8(9650),both_inc1_inc2, " (",intToUtf8(9650),intToUtf8(9660),both_inc1_dec2, ")", "\n ",intToUtf8(9660),intToUtf8(9660), both_dec1_dec2, " (", intToUtf8(9660),intToUtf8(9650), both_dec1_inc2, ")")
label2 = paste0(intToUtf8(9650),only2_inc, "\n",intToUtf8(9660), only2_dec)
}
plotcolors = c(alpha("darkslateblue",0.8), alpha("lightblue", 0.8))
plottext = paste0('print(eulerr:::plot.euler(eulerr::euler(list(with = tab1_all, without = tab2_all), shape = "circle"), quantities = c(label1,label2,label3), fills = c(alpha("darkslateblue",0.8), alpha("lightblue", 0.8)), main = titletext, labels = F, legend=list(labels = legendtext)))') # legendGrob fuer legend
eval(parse(text = plottext))
plottext
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{model_log_likelihood}
\alias{model_log_likelihood}
\alias{bpr_log_likelihood}
\alias{bpr_gradient}
\alias{betareg_log_likelihood}
\alias{betareg_gradient}
\alias{sum_weighted_bpr_lik}
\alias{sum_weighted_bpr_grad}
\alias{sum_weighted_betareg_lik}
\alias{sum_weighted_betareg_grad}
\title{model_log_likelihood}
\usage{
bpr_log_likelihood(w, X, H, lambda, is_nll)
bpr_gradient(w, X, H, lambda, is_nll)
betareg_log_likelihood(w, X, H, lambda, is_nll)
betareg_gradient(w, X, H, lambda, is_nll)
sum_weighted_bpr_lik(w, X_list, H_list, r_nk, lambda, is_nll)
sum_weighted_bpr_grad(w, X_list, H_list, r_nk, lambda, is_nll)
sum_weighted_betareg_lik(w, X_list, H_list, r_nk, lambda, is_nll)
sum_weighted_betareg_grad(w, X_list, H_list, r_nk, lambda, is_nll)
}
\arguments{
\item{w}{a vector}
\item{X}{a matrix}
\item{H}{a matrix}
\item{lambda}{a number}
\item{is_nll}{boolean}
\item{X_list}{a list}
\item{H_list}{a list}
\item{r_nk}{a vector}
}
\description{
model_log_likelihood
}
|
/man/model_log_likelihood.Rd
|
permissive
|
geneprophet/BSDMR
|
R
| false
| true
| 1,083
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{model_log_likelihood}
\alias{model_log_likelihood}
\alias{bpr_log_likelihood}
\alias{bpr_gradient}
\alias{betareg_log_likelihood}
\alias{betareg_gradient}
\alias{sum_weighted_bpr_lik}
\alias{sum_weighted_bpr_grad}
\alias{sum_weighted_betareg_lik}
\alias{sum_weighted_betareg_grad}
\title{model_log_likelihood}
\usage{
bpr_log_likelihood(w, X, H, lambda, is_nll)
bpr_gradient(w, X, H, lambda, is_nll)
betareg_log_likelihood(w, X, H, lambda, is_nll)
betareg_gradient(w, X, H, lambda, is_nll)
sum_weighted_bpr_lik(w, X_list, H_list, r_nk, lambda, is_nll)
sum_weighted_bpr_grad(w, X_list, H_list, r_nk, lambda, is_nll)
sum_weighted_betareg_lik(w, X_list, H_list, r_nk, lambda, is_nll)
sum_weighted_betareg_grad(w, X_list, H_list, r_nk, lambda, is_nll)
}
\arguments{
\item{w}{a vector}
\item{X}{a matrix}
\item{H}{a matrix}
\item{lambda}{a number}
\item{is_nll}{boolean}
\item{X_list}{a list}
\item{H_list}{a list}
\item{r_nk}{a vector}
}
\description{
model_log_likelihood
}
|
# Exercise 4: Creating and operating on vectors
# Create a vector `names` that contains your name and the names of 2 people next to you.
names=c("Jayanth","Amogh","Somesh")
# Use the colon operator : to create a vector `n` of numbers from 10:49
n=10:49
print(n)
# Use `length()` to get the number of elements in `n`
number=length(n)
# Create a vector `m` that contains the numbers 10 to 1. Hint: use the `seq()` function
m=seq(10,1)
print(m)
# Subtract `m` FROM `n`. Note the recycling!
o=n-m
print(o)
# Use the `seq()` function to produce a range of numbers from -5 to 10 in `.1` increments.
# Store it in a variable `x`
# Create a vector `sin.wave` by calling the `sin()` function on each element in `x`.
# Create a vector `cos.wave` by calling the `cos()` function on each element in `x`.
# Create a vector `wave` by multiplying `sin.wave` and `cos.wave` together, then adding `sin.wave`
# Use the `plot()` function to plot your `wave`!
|
/exercise-4/exercise.R
|
permissive
|
amoghdave/module7-vectors
|
R
| false
| false
| 956
|
r
|
# Exercise 4: Creating and operating on vectors
# Create a vector `names` that contains your name and the names of 2 people next to you.
names=c("Jayanth","Amogh","Somesh")
# Use the colon operator : to create a vector `n` of numbers from 10:49
n=10:49
print(n)
# Use `length()` to get the number of elements in `n`
number=length(n)
# Create a vector `m` that contains the numbers 10 to 1. Hint: use the `seq()` function
m=seq(10,1)
print(m)
# Subtract `m` FROM `n`. Note the recycling!
o=n-m
print(o)
# Use the `seq()` function to produce a range of numbers from -5 to 10 in `.1` increments.
# Store it in a variable `x`
# Create a vector `sin.wave` by calling the `sin()` function on each element in `x`.
# Create a vector `cos.wave` by calling the `cos()` function on each element in `x`.
# Create a vector `wave` by multiplying `sin.wave` and `cos.wave` together, then adding `sin.wave`
# Use the `plot()` function to plot your `wave`!
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Bgee.R
\docType{class}
\name{Bgee-class}
\alias{Bgee}
\alias{Bgee-class}
\title{Retrieving the Bgee database data}
\value{
\itemize{
\item{A \code{get_annotation()} list, lists the annotation of experiments for chosen species.}
\item{A \code{get_data()}, if empty returns a list of experiments, if chosen experiment ID, then returns the dataframe of the chosen experiment; for chosen species}
\item{A \code{format_data()}, transforms the data into matrix of expression values, e.g. RPKMs or raw counts}}
}
\description{
A Reference Class to give annotation available on Bgee for particular species and the requested data (rna_seq, affymetrix)
}
\details{
The expression calls come from Bgee (http://r.bgee.org), that integrates different expression data types (RNA-seq, Affymetrix microarray, ESTs, or in-situ hybridizations) in multiple animal species. Expression patterns are based exclusively on curated "normal", healthy, expression data (e.g., no gene knock-out, no treatment, no disease), to provide a reference of normal gene expression.
This Class retrieves annotation of all experiments in Bgee database (get_annotation), downloading the data (get_data), and formating the data into expression matrix (format_data). See examples and vignette.
}
\section{Fields}{
\describe{
\item{\code{species}}{A character of species name as listed from Bgee. The species are:
\itemize{
\item{"Anolis_carolinensis"}
\item{"Bos_taurus"}
\item{"Caenorhabditis_elegans"}
\item{"Danio_rerio"}
\item{"Drosophila_melanogaster"}
\item{"Gallus_gallus"}
\item{"Gorilla_gorilla"}
\item{"Homo_sapiens"}
\item{"Macaca_mulatta"}
\item{"Monodelphis_domestica"}
\item{"Mus_musculus"}
\item{"Ornithorhynchus_anatinus"}
\item{"Pan_paniscus"}
\item{"Pan_troglodytes"}
\item{"Rattus_norvegicus"}
\item{"Sus_scrofa"}
\item{"Xenopus_tropicalis"}}
Homo sapiens is default species.}
\item{\code{datatype}}{A character of data platform. Two types of datasets can be downloaded:
\itemize{
\item{"rna_seq"}
\item{"affymetrix"}}
By default, RNA-seq data is retrieved from database.}
\item{\code{experiment.id}}{A character.
On default is NULL: takes all available data for that species.
If GSE[0-9]+: takes specified experiment, eg. GSE30617.}
\item{\code{data}}{A dataframe of downloaded Bgee data.}
\item{\code{calltype}}{A character. There exist two types of expression calls in Bgee - present and absent.
\itemize{
\item{"expressed"}
\item{"all"}}
User can retrieve only expressed (present) calls, or mixed (present and absent) calls. The default is expressed (present) calltype.}
\item{\code{stats}}{A character. The expression values can be retrieved in RPKMs and raw counts:
\itemize{
\item{"rpkm"}
\item{"counts"}}
The default is RPKMs.}
}}
\examples{
{
bgee <- Bgee$new(species = "Mus_musculus", datatype = "rna_seq")
annotation_bgee_mouse <- bgee$get_annotation()
data_bgee_mouse <- bgee$get_data()
data_bgee_mouse_gse30617 <- bgee$get_data(experiment.id = "GSE30617")
gene.expression.mouse.rpkm <- bgee$format_data(data_bgee_mouse_gse30617,
calltype = "expressed", stats = "rpkm")
}
}
\author{
Andrea Komljenovic \email{andrea.komljenovic at unil.ch}.
}
|
/2016-SIB/data/rnaseq/BgeeDB-master/man/Bgee-class.Rd
|
no_license
|
wurmlab/genomicscourse
|
R
| false
| true
| 3,305
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Bgee.R
\docType{class}
\name{Bgee-class}
\alias{Bgee}
\alias{Bgee-class}
\title{Retrieving the Bgee database data}
\value{
\itemize{
\item{A \code{get_annotation()} list, lists the annotation of experiments for chosen species.}
\item{A \code{get_data()}, if empty returns a list of experiments, if chosen experiment ID, then returns the dataframe of the chosen experiment; for chosen species}
\item{A \code{format_data()}, transforms the data into matrix of expression values, e.g. RPKMs or raw counts}}
}
\description{
A Reference Class to give annotation available on Bgee for particular species and the requested data (rna_seq, affymetrix)
}
\details{
The expression calls come from Bgee (http://r.bgee.org), that integrates different expression data types (RNA-seq, Affymetrix microarray, ESTs, or in-situ hybridizations) in multiple animal species. Expression patterns are based exclusively on curated "normal", healthy, expression data (e.g., no gene knock-out, no treatment, no disease), to provide a reference of normal gene expression.
This Class retrieves annotation of all experiments in Bgee database (get_annotation), downloading the data (get_data), and formating the data into expression matrix (format_data). See examples and vignette.
}
\section{Fields}{
\describe{
\item{\code{species}}{A character of species name as listed from Bgee. The species are:
\itemize{
\item{"Anolis_carolinensis"}
\item{"Bos_taurus"}
\item{"Caenorhabditis_elegans"}
\item{"Danio_rerio"}
\item{"Drosophila_melanogaster"}
\item{"Gallus_gallus"}
\item{"Gorilla_gorilla"}
\item{"Homo_sapiens"}
\item{"Macaca_mulatta"}
\item{"Monodelphis_domestica"}
\item{"Mus_musculus"}
\item{"Ornithorhynchus_anatinus"}
\item{"Pan_paniscus"}
\item{"Pan_troglodytes"}
\item{"Rattus_norvegicus"}
\item{"Sus_scrofa"}
\item{"Xenopus_tropicalis"}}
Homo sapiens is default species.}
\item{\code{datatype}}{A character of data platform. Two types of datasets can be downloaded:
\itemize{
\item{"rna_seq"}
\item{"affymetrix"}}
By default, RNA-seq data is retrieved from database.}
\item{\code{experiment.id}}{A character.
On default is NULL: takes all available data for that species.
If GSE[0-9]+: takes specified experiment, eg. GSE30617.}
\item{\code{data}}{A dataframe of downloaded Bgee data.}
\item{\code{calltype}}{A character. There exist two types of expression calls in Bgee - present and absent.
\itemize{
\item{"expressed"}
\item{"all"}}
User can retrieve only expressed (present) calls, or mixed (present and absent) calls. The default is expressed (present) calltype.}
\item{\code{stats}}{A character. The expression values can be retrieved in RPKMs and raw counts:
\itemize{
\item{"rpkm"}
\item{"counts"}}
The default is RPKMs.}
}}
\examples{
{
bgee <- Bgee$new(species = "Mus_musculus", datatype = "rna_seq")
annotation_bgee_mouse <- bgee$get_annotation()
data_bgee_mouse <- bgee$get_data()
data_bgee_mouse_gse30617 <- bgee$get_data(experiment.id = "GSE30617")
gene.expression.mouse.rpkm <- bgee$format_data(data_bgee_mouse_gse30617,
calltype = "expressed", stats = "rpkm")
}
}
\author{
Andrea Komljenovic \email{andrea.komljenovic at unil.ch}.
}
|
#' Filters loci that show significant departure from Hardy-Weinberg Equilibrium
#'
#' Calculates the probabilities of agreement with H-W equilibrium based on observed
#' frequencies of reference homozygotes, heterozygotes and alternate homozygotes.
#' Uses the exact calculations contained in function prob.hwe() as developed by
#' Wigginton, JE, Cutler, DJ, and Abecasis, GR.
#'
#' Input is a genlight {adegenet} object containing SNP genotypes (0 homozygous for reference SNP,
#' 1 heterozygous, 2 homozygous for alternate SNP).
#'
#' Loci are filtered if they show HWE departure in any one population.
#' Note that power to detect departures from HWE is affected by sample size and that
#' effective filtering may require substantial sample sizes (n > 20).
#'
#' @param x -- a genlight object containing the SNP genotypes [Required]
#' @param alpha -- level of significance (per locus) [Default 0.05]
#' @param basis -- basis for filtering out loci (any, HWE departure in any one population) [default basis="any"]
#' @param bon -- apply bonferroni correction to significance levels for filtering [default TRUE]
#' @param v -- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2]
#' @return a genlight object with the loci departing significantly from HWE removed
#' @author Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
#' @export
#' @examples
#' list <- gl.filter.hwe(testset.gl, 0.05, bon=TRUE)
gl.filter.hwe <- function(x, alpha=0.05, basis="any", bon=TRUE, v=2) {
# ERROR CHECKING
if(class(x)!="genlight") {
cat("Fatal Error: genlight object required!\n"); stop()
}
if (v > 0) {
cat("Starting gl.filter.hwe: Filtering on departure from HWE\n")
}
if (alpha < 0 | alpha > 1){
cat(" Warning: level of significance per locus alpha must be an integer between 0 and 1, set to 0.05\n")
alpha <- 0.05
}
if (basis != "any"){
cat(" Warning: basis of assessment must be by population, other options not yet implemented, set to \'any\'\n")
basis <- "any"
}
if (basis == "any") {
# Split the gl object into a list of populations
poplist <- seppop(x)
}
count <- 0
for (i in poplist) {
count <- count + 1
if (count==1) {
result <- utils.hwe(i, prob=alpha)
Population <- rep(names(poplist)[count],nrow(result))
result <- cbind(Population,result)
} else {
r <- utils.hwe(i, prob=alpha)
Population <- rep(names(poplist)[count],nrow(r))
r <- cbind(Population,r)
result <- rbind(result, r)
}
}
rprob <- as.numeric(as.character(result$Prob))
if (bon==TRUE) {
result <- result[result$Bonsig=="*",]
} else {
result <- result[(rprob>0 & rprob<=alpha),]
}
failed.loci <- as.character(unique(result$Locus))
if (v >= 2){
cat("Loci examined:", nLoc(x),"\n")
if (bon) {
cat(" Deleted",length(failed.loci),"loci with significant departure from HWE, bonferroni corrected, at experiment-wide alpha =",alpha,"\n")
} else {
cat(" Deleted",length(failed.loci),"loci with significant departure from HWE at alpha =",alpha,"applied locus by locus\n")
}
}
x <- x[,!locNames(x) %in% failed.loci]
if (v >= 2){
cat(" Loci retained:",nLoc(x),"\n")
}
if ( v > 0) {cat("gl.filter.hwe completed\n")}
# Return the result
return(x)
}
|
/R/gl.filter.hwe.r
|
no_license
|
hakancengiz1/dartR
|
R
| false
| false
| 3,461
|
r
|
#' Filters loci that show significant departure from Hardy-Weinberg Equilibrium
#'
#' Calculates the probabilities of agreement with H-W equilibrium based on observed
#' frequencies of reference homozygotes, heterozygotes and alternate homozygotes.
#' Uses the exact calculations contained in function prob.hwe() as developed by
#' Wigginton, JE, Cutler, DJ, and Abecasis, GR.
#'
#' Input is a genlight {adegenet} object containing SNP genotypes (0 homozygous for reference SNP,
#' 1 heterozygous, 2 homozygous for alternate SNP).
#'
#' Loci are filtered if they show HWE departure in any one population.
#' Note that power to detect departures from HWE is affected by sample size and that
#' effective filtering may require substantial sample sizes (n > 20).
#'
#' @param x -- a genlight object containing the SNP genotypes [Required]
#' @param alpha -- level of significance (per locus) [Default 0.05]
#' @param basis -- basis for filtering out loci (any, HWE departure in any one population) [default basis="any"]
#' @param bon -- apply bonferroni correction to significance levels for filtering [default TRUE]
#' @param v -- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2]
#' @return a genlight object with the loci departing significantly from HWE removed
#' @author Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
#' @export
#' @examples
#' list <- gl.filter.hwe(testset.gl, 0.05, bon=TRUE)
gl.filter.hwe <- function(x, alpha=0.05, basis="any", bon=TRUE, v=2) {
# ERROR CHECKING
if(class(x)!="genlight") {
cat("Fatal Error: genlight object required!\n"); stop()
}
if (v > 0) {
cat("Starting gl.filter.hwe: Filtering on departure from HWE\n")
}
if (alpha < 0 | alpha > 1){
cat(" Warning: level of significance per locus alpha must be an integer between 0 and 1, set to 0.05\n")
alpha <- 0.05
}
if (basis != "any"){
cat(" Warning: basis of assessment must be by population, other options not yet implemented, set to \'any\'\n")
basis <- "any"
}
if (basis == "any") {
# Split the gl object into a list of populations
poplist <- seppop(x)
}
count <- 0
for (i in poplist) {
count <- count + 1
if (count==1) {
result <- utils.hwe(i, prob=alpha)
Population <- rep(names(poplist)[count],nrow(result))
result <- cbind(Population,result)
} else {
r <- utils.hwe(i, prob=alpha)
Population <- rep(names(poplist)[count],nrow(r))
r <- cbind(Population,r)
result <- rbind(result, r)
}
}
rprob <- as.numeric(as.character(result$Prob))
if (bon==TRUE) {
result <- result[result$Bonsig=="*",]
} else {
result <- result[(rprob>0 & rprob<=alpha),]
}
failed.loci <- as.character(unique(result$Locus))
if (v >= 2){
cat("Loci examined:", nLoc(x),"\n")
if (bon) {
cat(" Deleted",length(failed.loci),"loci with significant departure from HWE, bonferroni corrected, at experiment-wide alpha =",alpha,"\n")
} else {
cat(" Deleted",length(failed.loci),"loci with significant departure from HWE at alpha =",alpha,"applied locus by locus\n")
}
}
x <- x[,!locNames(x) %in% failed.loci]
if (v >= 2){
cat(" Loci retained:",nLoc(x),"\n")
}
if ( v > 0) {cat("gl.filter.hwe completed\n")}
# Return the result
return(x)
}
|
context("Quick tests for survey factors")
library(srvyr)
library(survey)
source("utilities.R")
data(api)
dstrata <- apistrat %>%
as_survey_design(strata = stype, weights = pw)
# One group
out_survey <- svymean(~awards, dstrata)
out_srvyr <- dstrata %>%
group_by(awards) %>%
summarize(pct = survey_mean())
test_that(
"survey_mean gets correct values for factors with single grouped surveys",
expect_equal(c(out_survey[[1]], sqrt(diag(attr(out_survey, "var")))[[1]]),
c(out_srvyr[[2]][[1]], out_srvyr[[3]][[1]])))
test_that("survey_mean preserves factor levels",
expect_equal(levels(apistrat$awards), levels(out_srvyr$awards)))
out_srvyr <- dstrata %>%
group_by(awards = as.character(awards)) %>%
summarize(pct = survey_mean())
test_that("survey_mean preserves factor levels",
expect_equal("character", class(out_srvyr$awards)))
# More than 2 groups
out_srvyr <- dstrata %>%
group_by(stype, awards) %>%
summarize(tot = survey_total())
out_survey <- svyby(~awards, ~stype, dstrata, svytotal)
test_that("survey_total is correct when doing props with multiple groups",
expect_equal(out_survey[["awardsNo"]],
out_srvyr %>% filter(awards == "No") %>% .$tot))
test_that("survey_total is correct when doing props with multiple groups (se)",
expect_df_equal(out_survey[["se.awardsNo"]],
out_srvyr %>%
filter(awards == "No") %>%
.$tot_se))
# Preserves factor orderings and character
out_srvyr <- dstrata %>%
mutate(stype2 = relevel(stype, "H")) %>%
group_by(stype2) %>%
summarize(tot = survey_total())
test_that("survey_* preserves factor levels when calculating a statistic (1 grp)",
expect_true(class(out_srvyr$stype2) == "factor" &
all(levels(out_srvyr$stype2) == c("H", "E", "M")))
)
out_srvyr <- dstrata %>%
mutate(stype2 = as.character(stype)) %>%
group_by(stype2) %>%
summarize(tot = survey_total())
test_that("survey_* preserves character when calculating a statistic (1 grp)",
expect_true(class(out_srvyr$stype2) == "character")
)
out_srvyr <- dstrata %>%
mutate(stype2 = relevel(stype, "H")) %>%
group_by(awards, stype2) %>%
summarize(tot = survey_total())
test_that("survey_* preserves factor levels when calculating a statistic (multi grps)",
expect_true(class(out_srvyr$stype2) == "factor" &
all(levels(out_srvyr$stype2) == c("H", "E", "M")))
)
out_srvyr <- dstrata %>%
mutate(stype2 = as.character(stype)) %>%
group_by(awards, stype2) %>%
summarize(tot = survey_total())
test_that("survey_* preserves character when calculating a statistic (multi grps)",
expect_true(class(out_srvyr$stype2) == "character")
)
# confidence intervals
out_survey_mn <- svymean(~awards, dstrata)
out_survey_tot <- svytotal(~awards, dstrata)
out_survey <- tibble::tibble(
awards = factor(c("No", "Yes")),
pct = as.numeric(out_survey_mn),
pct_low = as.numeric(confint(out_survey_mn, df = degf(dstrata))[, 1]),
pct_upp = as.numeric(confint(out_survey_mn, df = degf(dstrata))[, 2]),
tot = as.numeric(out_survey_tot),
tot_low = as.numeric(confint(out_survey_tot, df = degf(dstrata))[, 1]),
tot_upp = as.numeric(confint(out_survey_tot, df = degf(dstrata))[, 2])
)
out_srvyr <- dstrata %>%
group_by(awards) %>%
summarize(pct = survey_mean(vartype = "ci"),
tot = survey_total(vartype = "ci"))
test_that(
"survey_mean and survey_total work with cis",
expect_df_equal(out_srvyr, out_survey))
# One group
out_survey <- svymean(~awards, dstrata)
out_srvyr <- dstrata %>%
group_by(awards) %>%
summarize(pct = survey_mean())
test_that(
"survey_mean gets correct values for factors with single grouped surveys",
expect_equal(c(out_survey[[1]], sqrt(diag(attr(out_survey, "var")))[[1]]),
c(out_srvyr[[2]][[1]], out_srvyr[[3]][[1]])))
test_that("survey_mean preserves factor levels",
expect_equal(levels(apistrat$awards), levels(out_srvyr$awards)))
## Special characters in peel
dstrata <- dstrata %>%
mutate(grp = rep_len(c("ac\\a+", "[320+](1)"), nrow(dstrata$variables)))
# 1 group
out_survey <- svymean(~grp, dstrata)
out_srvyr <- dstrata %>%
group_by(grp) %>%
summarize(pct = survey_mean())
test_that(
"survey_mean gets correct values for factors with special characters in single grouped surveys",
expect_equal(c(out_survey[[1]], sqrt(diag(attr(out_survey, "var")))[[1]]),
c(out_srvyr[[2]][[1]], out_srvyr[[3]][[1]])))
# More than 2 groups
out_srvyr <- dstrata %>%
group_by(stype, grp) %>%
summarize(tot = survey_total())
out_survey <- svyby(~grp, ~stype, dstrata, svytotal)
test_that("survey_total is correct with special chars in peel",
expect_equal(out_survey[["grp[320+](1)"]],
out_srvyr %>% filter(grp == "[320+](1)") %>% .$tot))
test_that("survey_total is correct with special chars in peel (se)",
expect_equal(out_survey[["se.grp[320+](1)"]],
out_srvyr %>%
filter(grp == "[320+](1)") %>%
.$tot_se))
|
/tests/testthat/test_survey_mean_factor.r
|
no_license
|
tilltnet/srvyr
|
R
| false
| false
| 5,219
|
r
|
context("Quick tests for survey factors")
library(srvyr)
library(survey)
source("utilities.R")
data(api)
dstrata <- apistrat %>%
as_survey_design(strata = stype, weights = pw)
# One group
out_survey <- svymean(~awards, dstrata)
out_srvyr <- dstrata %>%
group_by(awards) %>%
summarize(pct = survey_mean())
test_that(
"survey_mean gets correct values for factors with single grouped surveys",
expect_equal(c(out_survey[[1]], sqrt(diag(attr(out_survey, "var")))[[1]]),
c(out_srvyr[[2]][[1]], out_srvyr[[3]][[1]])))
test_that("survey_mean preserves factor levels",
expect_equal(levels(apistrat$awards), levels(out_srvyr$awards)))
out_srvyr <- dstrata %>%
group_by(awards = as.character(awards)) %>%
summarize(pct = survey_mean())
test_that("survey_mean preserves factor levels",
expect_equal("character", class(out_srvyr$awards)))
# More than 2 groups
out_srvyr <- dstrata %>%
group_by(stype, awards) %>%
summarize(tot = survey_total())
out_survey <- svyby(~awards, ~stype, dstrata, svytotal)
test_that("survey_total is correct when doing props with multiple groups",
expect_equal(out_survey[["awardsNo"]],
out_srvyr %>% filter(awards == "No") %>% .$tot))
test_that("survey_total is correct when doing props with multiple groups (se)",
expect_df_equal(out_survey[["se.awardsNo"]],
out_srvyr %>%
filter(awards == "No") %>%
.$tot_se))
# Preserves factor orderings and character
out_srvyr <- dstrata %>%
mutate(stype2 = relevel(stype, "H")) %>%
group_by(stype2) %>%
summarize(tot = survey_total())
test_that("survey_* preserves factor levels when calculating a statistic (1 grp)",
expect_true(class(out_srvyr$stype2) == "factor" &
all(levels(out_srvyr$stype2) == c("H", "E", "M")))
)
out_srvyr <- dstrata %>%
mutate(stype2 = as.character(stype)) %>%
group_by(stype2) %>%
summarize(tot = survey_total())
test_that("survey_* preserves character when calculating a statistic (1 grp)",
expect_true(class(out_srvyr$stype2) == "character")
)
out_srvyr <- dstrata %>%
mutate(stype2 = relevel(stype, "H")) %>%
group_by(awards, stype2) %>%
summarize(tot = survey_total())
test_that("survey_* preserves factor levels when calculating a statistic (multi grps)",
expect_true(class(out_srvyr$stype2) == "factor" &
all(levels(out_srvyr$stype2) == c("H", "E", "M")))
)
out_srvyr <- dstrata %>%
mutate(stype2 = as.character(stype)) %>%
group_by(awards, stype2) %>%
summarize(tot = survey_total())
test_that("survey_* preserves character when calculating a statistic (multi grps)",
expect_true(class(out_srvyr$stype2) == "character")
)
# confidence intervals
out_survey_mn <- svymean(~awards, dstrata)
out_survey_tot <- svytotal(~awards, dstrata)
out_survey <- tibble::tibble(
awards = factor(c("No", "Yes")),
pct = as.numeric(out_survey_mn),
pct_low = as.numeric(confint(out_survey_mn, df = degf(dstrata))[, 1]),
pct_upp = as.numeric(confint(out_survey_mn, df = degf(dstrata))[, 2]),
tot = as.numeric(out_survey_tot),
tot_low = as.numeric(confint(out_survey_tot, df = degf(dstrata))[, 1]),
tot_upp = as.numeric(confint(out_survey_tot, df = degf(dstrata))[, 2])
)
out_srvyr <- dstrata %>%
group_by(awards) %>%
summarize(pct = survey_mean(vartype = "ci"),
tot = survey_total(vartype = "ci"))
test_that(
"survey_mean and survey_total work with cis",
expect_df_equal(out_srvyr, out_survey))
# One group
out_survey <- svymean(~awards, dstrata)
out_srvyr <- dstrata %>%
group_by(awards) %>%
summarize(pct = survey_mean())
test_that(
"survey_mean gets correct values for factors with single grouped surveys",
expect_equal(c(out_survey[[1]], sqrt(diag(attr(out_survey, "var")))[[1]]),
c(out_srvyr[[2]][[1]], out_srvyr[[3]][[1]])))
test_that("survey_mean preserves factor levels",
expect_equal(levels(apistrat$awards), levels(out_srvyr$awards)))
## Special characters in peel
dstrata <- dstrata %>%
mutate(grp = rep_len(c("ac\\a+", "[320+](1)"), nrow(dstrata$variables)))
# 1 group
out_survey <- svymean(~grp, dstrata)
out_srvyr <- dstrata %>%
group_by(grp) %>%
summarize(pct = survey_mean())
test_that(
"survey_mean gets correct values for factors with special characters in single grouped surveys",
expect_equal(c(out_survey[[1]], sqrt(diag(attr(out_survey, "var")))[[1]]),
c(out_srvyr[[2]][[1]], out_srvyr[[3]][[1]])))
# More than 2 groups
out_srvyr <- dstrata %>%
group_by(stype, grp) %>%
summarize(tot = survey_total())
out_survey <- svyby(~grp, ~stype, dstrata, svytotal)
test_that("survey_total is correct with special chars in peel",
expect_equal(out_survey[["grp[320+](1)"]],
out_srvyr %>% filter(grp == "[320+](1)") %>% .$tot))
test_that("survey_total is correct with special chars in peel (se)",
expect_equal(out_survey[["se.grp[320+](1)"]],
out_srvyr %>%
filter(grp == "[320+](1)") %>%
.$tot_se))
|
#!/usr/bin/env Rscript
# don't run lines 3-13 if running from R workspace
args = commandArgs(trailingOnly = T)
# check if there are exactly 3 arguments. If not, return an error
if (length(args) < 3 | length(args) > 3) {
stop("Exactly three arguments must be supplied: projectDir, procDataSubdir and graphSubdir.", call.=FALSE)
}
if (length(args)==3){
projectDir = args[1]
procDataSubdir = args[2]
graphSubdir = args[3]
}
library(tidyverse)
library(magrittr)
# projectDir = '~/Dropbox (RajLab)/Shared_IanM/cellid_201807_onward/'
# procDataSubdir = 'procDataScripted/'
# graphSubdir = 'graphs'
procDataDir = paste0(projectDir, 'extractedData/')
graphDir = paste0(projectDir, graphSubdir)
if (!dir.exists(paste0(graphDir, '/hiFT'))){
dir.create(paste0(graphDir, '/hiFT'))
}
if (!dir.exists(paste0(graphDir, '/hiFT/IAMhiFT4a'))){
dir.create(paste0(graphDir, '/hiFT/IAMhiFT4a'))
}
hits <- c('control', 'ZNF652', 'ZBTB38', 'ATOH8', 'CERS2', 'KLF13')
hiFT4a_colonies <- as_tibble(read.table(paste0(procDataDir, 'hiFT/IAMhiFT4a_ second set of shRNAs - Sheet1.tsv'), header = T, stringsAsFactors = F, skip = 69, sep = '\t'))
hiFT4a_colonies <- hiFT4a_colonies[,1:5]
hiFT4a_colonies_hits <- hiFT4a_colonies %>% filter(target %in% hits, shID != 'SHC003') %>%
mutate(shIndex = ifelse(shID == 'SHC007', 3, shIndex))
hiFT4a_colonies_hits_sum <- hiFT4a_colonies_hits %>%
group_by(target, shIndex) %>%
summarise(meanCount = mean(colonies),
semCount = sd(colonies)/sqrt(length(colonies)),
errbMax = meanCount + semCount,
errbMin = ifelse(meanCount - semCount > 0, meanCount - semCount, 0))
hiFT4a_colonies_hits$target <- factor(hiFT4a_colonies_hits$target)
hiFT4a_colonies_hits$target <- factor(hiFT4a_colonies_hits$target, levels = c('control', 'ZNF652', 'ZBTB38', 'ATOH8', 'CERS2', 'KLF13', 'CEBPB', 'RUNX1', 'PRRX2'))
hiFT4a_colonies_hits_sum$target <- factor(hiFT4a_colonies_hits_sum$target)
hiFT4a_colonies_hits_sum$target <- factor(hiFT4a_colonies_hits_sum$target, levels = c('control', 'ZNF652', 'ZBTB38', 'ATOH8', 'CERS2', 'KLF13', 'CEBPB', 'RUNX1', 'PRRX2'))
controlMeans <- hiFT4a_colonies %>%
filter(target == 'control') %>%
summarise(meanCtlCount = mean(colonies),
semCtlCount = sd(colonies)/sqrt(length(colonies)))
colony_dotsNbars <- ggplot() +
geom_point(data = hiFT4a_colonies_hits %>% filter(target != 'PO'), aes(shIndex, colonies), position = position_jitter(w = 0.15, h = 0), alpha = 0.2) +
geom_bar(data = hiFT4a_colonies_hits_sum %>% filter(target != 'PO'), aes(shIndex, meanCount), alpha = 0.4, width = 0.7, stat = 'identity') +
geom_errorbar(data = hiFT4a_colonies_hits_sum %>% filter(target != 'PO'), aes(x = shIndex, ymin = errbMin, ymax = errbMax), width = 0.25, alpha = 0.5) +
facet_grid(~target) +
geom_hline(yintercept = controlMeans$meanCtlCount, color = 'blue', alpha = 0.4) +
theme_classic() +
ylab('Alkaline Phosphatase-positive colonies per well') +
xlab('shRNA ID for target') +
ggtitle('hiF-T iPSC colony counts after perturbable factor knockdown') +
theme(axis.text.y = element_text(size = rel(2)),
axis.title = element_text(size = rel(1.5)),
panel.spacing = unit(0.3, "lines"))
ggsave(colony_dotsNbars, file = paste0(graphDir, '/hiFT/IAMhiFT4a/colony_counts_dotsNbars_forSlide1.pdf'), width = 10, height = 7, useDingbats = F)
colony_dotsNbars_forSlide <- ggplot() +
geom_point(data = hiFT4a_colonies_hits %>% filter(target != 'PO'), aes(as.character(shIndex), colonies), position = position_jitter(w = 0.15, h = 0), alpha = 0.2) +
geom_bar(data = hiFT4a_colonies_hits_sum %>% filter(target != 'PO'), aes(as.character(shIndex), meanCount), alpha = 0.4, width = 0.7, stat = 'identity') +
geom_errorbar(data = hiFT4a_colonies_hits_sum %>% filter(target != 'PO'), aes(x = as.character(shIndex), ymin = errbMin, ymax = errbMax), width = 0.25, alpha = 0.5) +
facet_grid(~target) +
geom_hline(yintercept = controlMeans$meanCtlCount, color = 'blue', alpha = 0.8) +
theme_classic() +
ylab('Alkaline Phosphatase-positive colonies per well') +
xlab('shRNA ID for target') +
ggtitle('hiF-T iPSC colony counts after perturbable factor knockdown') +
theme(axis.text.y = element_text(size = rel(2)),
axis.title = element_blank(),
plot.title = element_blank(),
strip.text = element_text(size = rel(1.6)),
panel.spacing = unit(0.3, "lines"))
ggsave(colony_dotsNbars_forSlide, file = paste0(graphDir, '/hiFT/IAMhiFT4a/colony_counts_dotsNbars_forSlide.pdf'), width = 10, height = 5, useDingbats = F)
#fisher's exact
targets = unique(as.character(hiFT4a_colonies$target))[!unique(as.character(hiFT4a_colonies$target)) %in% c('PO', 'control')]
ctlDat <- hiFT4a_colonies %>%
filter(shID %in% c('SHC001', 'SHC002', 'SHC007'))
FET_pvals_4a <- list()
for (targ in targets){
tempDat <- hiFT4a_colonies %>%
filter(target == targ)
tempFET <- fisher.test(matrix(c(sum(tempDat$colonies), length(tempDat$colonies)*1e4 - sum(tempDat$colonies),
sum(ctlDat$colonies), length(ctlDat$colonies)*1e4 - sum(ctlDat$colonies)), ncol = 2), alternative = 'g')
if(is.null(dim(FET_pvals_4a))){
FET_pvals_4a = tibble(target = targ,
FET_pval = tempFET$p.value)
} else {
FET_pvals_4a %<>% bind_rows(tibble(target = targ, FET_pval = tempFET$p.value))
}
}
write.csv(FET_pvals_4a, file = paste0(graphDir, '/hiFT/IAMhiFT4a/FET_pvals_4a.csv'))
#
# # qPCR ####
#
# genes <- c('SKIL', 'YBX3', 'TSC22D1', 'ID1', 'KLF13', 'CERS2', 'LARP1', 'TBX3', 'ZBTB38', 'ATOH8', 'ZNF652', 'NFATC4')
# plates <- 1:6
# exp <- 'IAMhiFT4a'
#
# all_results<-list()
# for (plate in plates) {
#
# genesInPlate = genes[c(2*(plate-1) + 1, 2*(plate-1) + 2)]
#
# results <- as_tibble(read.csv(paste0('~/Google Drive/Cell ID/Reprogramming designs/hiF-T/qPCR results/', exp, '-qPCRplate', as.character(plate), '_somecols - Sheet1.csv'), header = T, stringsAsFactors = F))
#
# if (plate != 4) {
# samples <- as_tibble(read.csv('~/Downloads/IAMhiFT4a-qPCRplate1 20181018 - Sheet1.csv', skip = 1, stringsAsFactors = F, header = T, nrows = 8)) %>%
# dplyr::rename(row = cDNA) %>%
# gather(col, sample, 2:13) %>%
# mutate(col = substring(col, 2)) %>%
# mutate(target = ifelse(sample %in% c('SKILsh1', 'SKILsh2', 'SKILsh3'), 'SKIL',
# ifelse(sample %in% c('YBX3sh2', 'YBX3sh3', 'YBX3sh5'), 'YBX3',
# ifelse(sample %in% c('SHC001', 'SHC002', 'SHC003', 'SHC007'), 'control', 'empty'))),
# shID = ifelse(sample %in% c('SKILsh1', 'YBX3sh2', 'SHC001'), 1,
# ifelse(sample %in% c('SKILsh2', 'YBX3sh3', 'SHC002'), 2,
# ifelse(sample %in% c('SKILsh3', 'YBX3sh5', 'SHC003'), 3,
# ifelse(sample == 'SHC007', 4, NA)))))
#
# primers <- as_tibble(read.csv('~/Downloads/IAMhiFT4a-qPCRplate1 20181018 - Sheet1.csv', skip = 11, stringsAsFactors = F, header = T, nrows = 8)) %>%
# dplyr::rename(row = Primers) %>%
# gather(col, primer, 2:13) %>%
# mutate(col = substring(col, 2))
#
# } else {
# samples <- as_tibble(read.csv('~/Downloads/IAMhiFT4a-qPCRplate1 20181018 ALT - Sheet1.csv', skip = 1, stringsAsFactors = F, header = T, nrows = 8)) %>%
# dplyr::rename(row = cDNA) %>%
# gather(col, sample, 2:13) %>%
# mutate(col = substring(col, 2)) %>%
# mutate(target = ifelse(sample %in% c('SKILsh1', 'SKILsh2', 'SKILsh3'), 'SKIL',
# ifelse(sample %in% c('YBX3sh2', 'YBX3sh3', 'YBX3sh5'), 'YBX3',
# ifelse(sample %in% c('SHC001', 'SHC002', 'SHC003', 'SHC007'), 'control', 'empty'))),
# shID = ifelse(sample %in% c('SKILsh1', 'YBX3sh2', 'SHC001'), 1,
# ifelse(sample %in% c('SKILsh2', 'YBX3sh3', 'SHC002'), 2,
# ifelse(sample %in% c('SKILsh3', 'YBX3sh5', 'SHC003'), 3,
# ifelse(sample == 'SHC007', 4, NA)))))
#
# primers <- as_tibble(read.csv('~/Downloads/IAMhiFT4a-qPCRplate1 20181018 ALT - Sheet1.csv', skip = 11, stringsAsFactors = F, header = T, nrows = 8)) %>%
# dplyr::rename(row = Primers) %>%
# gather(col, primer, 2:13) %>%
# mutate(col = substring(col, 2))
#
# }
#
#
#
# layout_results <- inner_join(samples, primers, by = c('row', 'col')) %>%
# mutate(target = ifelse(target == 'SKIL', genesInPlate[1],
# ifelse(target == 'YBX3', genesInPlate[2], 'control')),
# primers = ifelse(primer == 'SKIL', genesInPlate[1],
# ifelse(primer == 'YBX3', genesInPlate[2], primer))) %>%
# mutate(Well = paste0(row, col)) %>%
# inner_join(results, by = 'Well') %>%
# dplyr::select(-c(sample, Sample.Name, Task)) %>%
# mutate(plateID = plate)
#
#
# if (is.null(dim(all_results))) {
# all_results <- layout_results
# } else {
# all_results %<>% bind_rows(layout_results)
# }
#
# }
#
# CTC <- all_results %>%
# filter(target == 'control', !is.na(shID), shID != 3) %>%
# group_by(plateID, primers, shID) %>%
# summarise(ControlMeanCt = mean(as.numeric(Ct), na.rm = T))
#
# CTE <- all_results %>%
# filter(target != 'control', !is.na(shID)) %>%
# group_by(plateID, target, primers, shID) %>%
# summarise(ControlMeanCt = ifelse(sum(is.na(as.numeric(Ct))) > 1, 40, mean(as.numeric(Ct), na.rm = T)))
#
# HE <- CTE %>%
# filter(primers == 'GAPDH') %>%
# dplyr::rename(HE = ControlMeanCt)
# TE <- CTE %>%
# filter(primers != 'GAPDH') %>%
# dplyr::rename(TE = ControlMeanCt)
# dCTE <- inner_join(HE, TE, by = c('plateID', 'target', 'shID')) %>%
# mutate(dCTE = TE-HE)
#
# HC <- CTC %>%
# filter(primers == 'GAPDH') %>%
# dplyr::rename(HC = ControlMeanCt)
# TC <- CTC %>%
# filter(primers != 'GAPDH') %>%
# dplyr::rename(TC = ControlMeanCt)
# dCTC <- inner_join(HC, TC, by = c('plateID', 'shID')) %>%
# mutate(dCTC = TC-HC)
#
# dCTCavg <- dCTC %>%
# group_by(plateID, primers.y) %>%
# summarise(dCTCavg = mean(dCTC))
#
# ddCT <- left_join(dCTE, dCTCavg, by = c('plateID', 'primers.y')) %>%
# mutate(ddCT = dCTE - dCTCavg,
# FC = 2^(-ddCT),
# percKD = 1-FC) %>%
# filter(dCTE > 0)
#
# ddCT$target <- factor(ddCT$target)
# ddCT$target <- factor(ddCT$target, levels = levels(hiFT4a_colonies_sum$target)[levels(hiFT4a_colonies_sum$target) != 'PO'])
#
#
# KDbarplot1 <- ggplot() +
# geom_bar(data = ddCT, aes(shID, percKD), stat = 'identity') +
# facet_grid(.~target) +
# ylim(c(0,1)) +
# theme_classic() +
# ylab('Average % KD (1 - 2^(-ddCt))') +
# xlab('shRNA ID for target') +
# ggtitle('Estimated KD efficiency of shRNAs') +
# theme(axis.text.y = element_text(size = rel(2)),
# axis.title = element_blank(),
# plot.title = element_blank(),
# strip.text = element_text(size = rel(1.6)))
# ggsave(KDbarplot1, file = '~/Dropbox (RajLab)/Projects/cellid/graphs/hiFT/IAMhiFT4a/KDbarplot_forSlide.pdf', width = 10, height = 5)
#
#
# ## merge the two
#
# colony_percKD_dotsNbars_forSlide <- ggplot() +
# geom_point(data = hiFT4a_colonies %>% filter(target != 'PO') %>% mutate(measurement = 'colonies'), aes(shIndex, colonies), position = position_jitter(w = 0.15, h = 0), alpha = 0.2) +
# geom_bar(data = hiFT4a_colonies_sum %>% filter(target != 'PO') %>% mutate(measurement = 'colonies'), aes(shIndex, meanCount), alpha = 0.4, width = 0.7, stat = 'identity') +
# geom_errorbar(data = hiFT4a_colonies_sum %>% filter(target != 'PO') %>% mutate(measurement = 'colonies'), aes(x = shIndex, ymin = errbMin, ymax = errbMax), width = 0.25, alpha = 0.5) +
# facet_grid(measurement~target, scales = 'free_y') +
# geom_bar(data = ddCT %>% dplyr::rename(shIndex = shID) %>% mutate(measurement = 'qPCR'), aes(shIndex, percKD), stat = 'identity') +
# geom_hline(yintercept = controlMeans$meanCtlCount, color = 'blue', alpha = 0.8) +
# theme_classic() +
# ylab('Alkaline Phosphatase-positive colonies per well') +
# xlab('shRNA ID for target') +
# ggtitle('hiF-T iPSC colony counts after perturbable factor knockdown') +
# theme(axis.text.y = element_text(size = rel(2)),
# axis.title = element_blank(),
# plot.title = element_blank())
# ggsave(colony_percKD_dotsNbars_forSlide, file = '~/Dropbox (RajLab)/Projects/cellid/graphs/hiFT/IAMhiFT4a/colony_percKD_counts_dotsNbars_forSlide.pdf', width = 10, height = 5)
#
|
/analysisScripts/hiFT4a.R
|
permissive
|
arjunrajlaboratory/P3_public_analysis
|
R
| false
| false
| 12,623
|
r
|
#!/usr/bin/env Rscript
# don't run lines 3-13 if running from R workspace
args = commandArgs(trailingOnly = T)
# check if there are exactly 3 arguments. If not, return an error
if (length(args) < 3 | length(args) > 3) {
stop("Exactly three arguments must be supplied: projectDir, procDataSubdir and graphSubdir.", call.=FALSE)
}
if (length(args)==3){
projectDir = args[1]
procDataSubdir = args[2]
graphSubdir = args[3]
}
library(tidyverse)
library(magrittr)
# projectDir = '~/Dropbox (RajLab)/Shared_IanM/cellid_201807_onward/'
# procDataSubdir = 'procDataScripted/'
# graphSubdir = 'graphs'
procDataDir = paste0(projectDir, 'extractedData/')
graphDir = paste0(projectDir, graphSubdir)
if (!dir.exists(paste0(graphDir, '/hiFT'))){
dir.create(paste0(graphDir, '/hiFT'))
}
if (!dir.exists(paste0(graphDir, '/hiFT/IAMhiFT4a'))){
dir.create(paste0(graphDir, '/hiFT/IAMhiFT4a'))
}
hits <- c('control', 'ZNF652', 'ZBTB38', 'ATOH8', 'CERS2', 'KLF13')
hiFT4a_colonies <- as_tibble(read.table(paste0(procDataDir, 'hiFT/IAMhiFT4a_ second set of shRNAs - Sheet1.tsv'), header = T, stringsAsFactors = F, skip = 69, sep = '\t'))
hiFT4a_colonies <- hiFT4a_colonies[,1:5]
hiFT4a_colonies_hits <- hiFT4a_colonies %>% filter(target %in% hits, shID != 'SHC003') %>%
mutate(shIndex = ifelse(shID == 'SHC007', 3, shIndex))
hiFT4a_colonies_hits_sum <- hiFT4a_colonies_hits %>%
group_by(target, shIndex) %>%
summarise(meanCount = mean(colonies),
semCount = sd(colonies)/sqrt(length(colonies)),
errbMax = meanCount + semCount,
errbMin = ifelse(meanCount - semCount > 0, meanCount - semCount, 0))
hiFT4a_colonies_hits$target <- factor(hiFT4a_colonies_hits$target)
hiFT4a_colonies_hits$target <- factor(hiFT4a_colonies_hits$target, levels = c('control', 'ZNF652', 'ZBTB38', 'ATOH8', 'CERS2', 'KLF13', 'CEBPB', 'RUNX1', 'PRRX2'))
hiFT4a_colonies_hits_sum$target <- factor(hiFT4a_colonies_hits_sum$target)
hiFT4a_colonies_hits_sum$target <- factor(hiFT4a_colonies_hits_sum$target, levels = c('control', 'ZNF652', 'ZBTB38', 'ATOH8', 'CERS2', 'KLF13', 'CEBPB', 'RUNX1', 'PRRX2'))
controlMeans <- hiFT4a_colonies %>%
filter(target == 'control') %>%
summarise(meanCtlCount = mean(colonies),
semCtlCount = sd(colonies)/sqrt(length(colonies)))
colony_dotsNbars <- ggplot() +
geom_point(data = hiFT4a_colonies_hits %>% filter(target != 'PO'), aes(shIndex, colonies), position = position_jitter(w = 0.15, h = 0), alpha = 0.2) +
geom_bar(data = hiFT4a_colonies_hits_sum %>% filter(target != 'PO'), aes(shIndex, meanCount), alpha = 0.4, width = 0.7, stat = 'identity') +
geom_errorbar(data = hiFT4a_colonies_hits_sum %>% filter(target != 'PO'), aes(x = shIndex, ymin = errbMin, ymax = errbMax), width = 0.25, alpha = 0.5) +
facet_grid(~target) +
geom_hline(yintercept = controlMeans$meanCtlCount, color = 'blue', alpha = 0.4) +
theme_classic() +
ylab('Alkaline Phosphatase-positive colonies per well') +
xlab('shRNA ID for target') +
ggtitle('hiF-T iPSC colony counts after perturbable factor knockdown') +
theme(axis.text.y = element_text(size = rel(2)),
axis.title = element_text(size = rel(1.5)),
panel.spacing = unit(0.3, "lines"))
ggsave(colony_dotsNbars, file = paste0(graphDir, '/hiFT/IAMhiFT4a/colony_counts_dotsNbars_forSlide1.pdf'), width = 10, height = 7, useDingbats = F)
colony_dotsNbars_forSlide <- ggplot() +
geom_point(data = hiFT4a_colonies_hits %>% filter(target != 'PO'), aes(as.character(shIndex), colonies), position = position_jitter(w = 0.15, h = 0), alpha = 0.2) +
geom_bar(data = hiFT4a_colonies_hits_sum %>% filter(target != 'PO'), aes(as.character(shIndex), meanCount), alpha = 0.4, width = 0.7, stat = 'identity') +
geom_errorbar(data = hiFT4a_colonies_hits_sum %>% filter(target != 'PO'), aes(x = as.character(shIndex), ymin = errbMin, ymax = errbMax), width = 0.25, alpha = 0.5) +
facet_grid(~target) +
geom_hline(yintercept = controlMeans$meanCtlCount, color = 'blue', alpha = 0.8) +
theme_classic() +
ylab('Alkaline Phosphatase-positive colonies per well') +
xlab('shRNA ID for target') +
ggtitle('hiF-T iPSC colony counts after perturbable factor knockdown') +
theme(axis.text.y = element_text(size = rel(2)),
axis.title = element_blank(),
plot.title = element_blank(),
strip.text = element_text(size = rel(1.6)),
panel.spacing = unit(0.3, "lines"))
ggsave(colony_dotsNbars_forSlide, file = paste0(graphDir, '/hiFT/IAMhiFT4a/colony_counts_dotsNbars_forSlide.pdf'), width = 10, height = 5, useDingbats = F)
#fisher's exact
targets = unique(as.character(hiFT4a_colonies$target))[!unique(as.character(hiFT4a_colonies$target)) %in% c('PO', 'control')]
ctlDat <- hiFT4a_colonies %>%
filter(shID %in% c('SHC001', 'SHC002', 'SHC007'))
FET_pvals_4a <- list()
for (targ in targets){
tempDat <- hiFT4a_colonies %>%
filter(target == targ)
tempFET <- fisher.test(matrix(c(sum(tempDat$colonies), length(tempDat$colonies)*1e4 - sum(tempDat$colonies),
sum(ctlDat$colonies), length(ctlDat$colonies)*1e4 - sum(ctlDat$colonies)), ncol = 2), alternative = 'g')
if(is.null(dim(FET_pvals_4a))){
FET_pvals_4a = tibble(target = targ,
FET_pval = tempFET$p.value)
} else {
FET_pvals_4a %<>% bind_rows(tibble(target = targ, FET_pval = tempFET$p.value))
}
}
write.csv(FET_pvals_4a, file = paste0(graphDir, '/hiFT/IAMhiFT4a/FET_pvals_4a.csv'))
#
# # qPCR ####
#
# genes <- c('SKIL', 'YBX3', 'TSC22D1', 'ID1', 'KLF13', 'CERS2', 'LARP1', 'TBX3', 'ZBTB38', 'ATOH8', 'ZNF652', 'NFATC4')
# plates <- 1:6
# exp <- 'IAMhiFT4a'
#
# all_results<-list()
# for (plate in plates) {
#
# genesInPlate = genes[c(2*(plate-1) + 1, 2*(plate-1) + 2)]
#
# results <- as_tibble(read.csv(paste0('~/Google Drive/Cell ID/Reprogramming designs/hiF-T/qPCR results/', exp, '-qPCRplate', as.character(plate), '_somecols - Sheet1.csv'), header = T, stringsAsFactors = F))
#
# if (plate != 4) {
# samples <- as_tibble(read.csv('~/Downloads/IAMhiFT4a-qPCRplate1 20181018 - Sheet1.csv', skip = 1, stringsAsFactors = F, header = T, nrows = 8)) %>%
# dplyr::rename(row = cDNA) %>%
# gather(col, sample, 2:13) %>%
# mutate(col = substring(col, 2)) %>%
# mutate(target = ifelse(sample %in% c('SKILsh1', 'SKILsh2', 'SKILsh3'), 'SKIL',
# ifelse(sample %in% c('YBX3sh2', 'YBX3sh3', 'YBX3sh5'), 'YBX3',
# ifelse(sample %in% c('SHC001', 'SHC002', 'SHC003', 'SHC007'), 'control', 'empty'))),
# shID = ifelse(sample %in% c('SKILsh1', 'YBX3sh2', 'SHC001'), 1,
# ifelse(sample %in% c('SKILsh2', 'YBX3sh3', 'SHC002'), 2,
# ifelse(sample %in% c('SKILsh3', 'YBX3sh5', 'SHC003'), 3,
# ifelse(sample == 'SHC007', 4, NA)))))
#
# primers <- as_tibble(read.csv('~/Downloads/IAMhiFT4a-qPCRplate1 20181018 - Sheet1.csv', skip = 11, stringsAsFactors = F, header = T, nrows = 8)) %>%
# dplyr::rename(row = Primers) %>%
# gather(col, primer, 2:13) %>%
# mutate(col = substring(col, 2))
#
# } else {
# samples <- as_tibble(read.csv('~/Downloads/IAMhiFT4a-qPCRplate1 20181018 ALT - Sheet1.csv', skip = 1, stringsAsFactors = F, header = T, nrows = 8)) %>%
# dplyr::rename(row = cDNA) %>%
# gather(col, sample, 2:13) %>%
# mutate(col = substring(col, 2)) %>%
# mutate(target = ifelse(sample %in% c('SKILsh1', 'SKILsh2', 'SKILsh3'), 'SKIL',
# ifelse(sample %in% c('YBX3sh2', 'YBX3sh3', 'YBX3sh5'), 'YBX3',
# ifelse(sample %in% c('SHC001', 'SHC002', 'SHC003', 'SHC007'), 'control', 'empty'))),
# shID = ifelse(sample %in% c('SKILsh1', 'YBX3sh2', 'SHC001'), 1,
# ifelse(sample %in% c('SKILsh2', 'YBX3sh3', 'SHC002'), 2,
# ifelse(sample %in% c('SKILsh3', 'YBX3sh5', 'SHC003'), 3,
# ifelse(sample == 'SHC007', 4, NA)))))
#
# primers <- as_tibble(read.csv('~/Downloads/IAMhiFT4a-qPCRplate1 20181018 ALT - Sheet1.csv', skip = 11, stringsAsFactors = F, header = T, nrows = 8)) %>%
# dplyr::rename(row = Primers) %>%
# gather(col, primer, 2:13) %>%
# mutate(col = substring(col, 2))
#
# }
#
#
#
# layout_results <- inner_join(samples, primers, by = c('row', 'col')) %>%
# mutate(target = ifelse(target == 'SKIL', genesInPlate[1],
# ifelse(target == 'YBX3', genesInPlate[2], 'control')),
# primers = ifelse(primer == 'SKIL', genesInPlate[1],
# ifelse(primer == 'YBX3', genesInPlate[2], primer))) %>%
# mutate(Well = paste0(row, col)) %>%
# inner_join(results, by = 'Well') %>%
# dplyr::select(-c(sample, Sample.Name, Task)) %>%
# mutate(plateID = plate)
#
#
# if (is.null(dim(all_results))) {
# all_results <- layout_results
# } else {
# all_results %<>% bind_rows(layout_results)
# }
#
# }
#
# CTC <- all_results %>%
# filter(target == 'control', !is.na(shID), shID != 3) %>%
# group_by(plateID, primers, shID) %>%
# summarise(ControlMeanCt = mean(as.numeric(Ct), na.rm = T))
#
# CTE <- all_results %>%
# filter(target != 'control', !is.na(shID)) %>%
# group_by(plateID, target, primers, shID) %>%
# summarise(ControlMeanCt = ifelse(sum(is.na(as.numeric(Ct))) > 1, 40, mean(as.numeric(Ct), na.rm = T)))
#
# HE <- CTE %>%
# filter(primers == 'GAPDH') %>%
# dplyr::rename(HE = ControlMeanCt)
# TE <- CTE %>%
# filter(primers != 'GAPDH') %>%
# dplyr::rename(TE = ControlMeanCt)
# dCTE <- inner_join(HE, TE, by = c('plateID', 'target', 'shID')) %>%
# mutate(dCTE = TE-HE)
#
# HC <- CTC %>%
# filter(primers == 'GAPDH') %>%
# dplyr::rename(HC = ControlMeanCt)
# TC <- CTC %>%
# filter(primers != 'GAPDH') %>%
# dplyr::rename(TC = ControlMeanCt)
# dCTC <- inner_join(HC, TC, by = c('plateID', 'shID')) %>%
# mutate(dCTC = TC-HC)
#
# dCTCavg <- dCTC %>%
# group_by(plateID, primers.y) %>%
# summarise(dCTCavg = mean(dCTC))
#
# ddCT <- left_join(dCTE, dCTCavg, by = c('plateID', 'primers.y')) %>%
# mutate(ddCT = dCTE - dCTCavg,
# FC = 2^(-ddCT),
# percKD = 1-FC) %>%
# filter(dCTE > 0)
#
# ddCT$target <- factor(ddCT$target)
# ddCT$target <- factor(ddCT$target, levels = levels(hiFT4a_colonies_sum$target)[levels(hiFT4a_colonies_sum$target) != 'PO'])
#
#
# KDbarplot1 <- ggplot() +
# geom_bar(data = ddCT, aes(shID, percKD), stat = 'identity') +
# facet_grid(.~target) +
# ylim(c(0,1)) +
# theme_classic() +
# ylab('Average % KD (1 - 2^(-ddCt))') +
# xlab('shRNA ID for target') +
# ggtitle('Estimated KD efficiency of shRNAs') +
# theme(axis.text.y = element_text(size = rel(2)),
# axis.title = element_blank(),
# plot.title = element_blank(),
# strip.text = element_text(size = rel(1.6)))
# ggsave(KDbarplot1, file = '~/Dropbox (RajLab)/Projects/cellid/graphs/hiFT/IAMhiFT4a/KDbarplot_forSlide.pdf', width = 10, height = 5)
#
#
# ## merge the two
#
# colony_percKD_dotsNbars_forSlide <- ggplot() +
# geom_point(data = hiFT4a_colonies %>% filter(target != 'PO') %>% mutate(measurement = 'colonies'), aes(shIndex, colonies), position = position_jitter(w = 0.15, h = 0), alpha = 0.2) +
# geom_bar(data = hiFT4a_colonies_sum %>% filter(target != 'PO') %>% mutate(measurement = 'colonies'), aes(shIndex, meanCount), alpha = 0.4, width = 0.7, stat = 'identity') +
# geom_errorbar(data = hiFT4a_colonies_sum %>% filter(target != 'PO') %>% mutate(measurement = 'colonies'), aes(x = shIndex, ymin = errbMin, ymax = errbMax), width = 0.25, alpha = 0.5) +
# facet_grid(measurement~target, scales = 'free_y') +
# geom_bar(data = ddCT %>% dplyr::rename(shIndex = shID) %>% mutate(measurement = 'qPCR'), aes(shIndex, percKD), stat = 'identity') +
# geom_hline(yintercept = controlMeans$meanCtlCount, color = 'blue', alpha = 0.8) +
# theme_classic() +
# ylab('Alkaline Phosphatase-positive colonies per well') +
# xlab('shRNA ID for target') +
# ggtitle('hiF-T iPSC colony counts after perturbable factor knockdown') +
# theme(axis.text.y = element_text(size = rel(2)),
# axis.title = element_blank(),
# plot.title = element_blank())
# ggsave(colony_percKD_dotsNbars_forSlide, file = '~/Dropbox (RajLab)/Projects/cellid/graphs/hiFT/IAMhiFT4a/colony_percKD_counts_dotsNbars_forSlide.pdf', width = 10, height = 5)
#
|
tags$div(
h4("Filter Tab"),
p(
'The "Filter" tab allows the user to select a column in the',
"inventory and a feature or multiple features of that column to filter the data.",
'The filters will update data displayed in the "Map" and "Table" tabs.',
'If no filters are selected, then all the data will be displayed',
"(i.e., the data is not subset by a filter)."),
p(
"It is possible to apply multiple filters that yield no data.",
'For example, if the "Primary Filter" acts on the program name column',
'and the "Secondary Filter" acts on the contact name column,',
'then it is very easy to select a contact name in the "Secondary Filter"',
'that does not belong to the program name selected in the "Primary Filter:"',
'ultimately resulting in no data to display.',
"When this happens, several text-boxes will appear informing the",
"user that there is no data available for that particular filter."
),
br(),
# h4("Map Tab"),
# p(
# 'The map displays water quality monitoring locations that are',
# 'currently included in the inventory.',
# "This is a subset of all the information included in the",
# "inventory's table view."
# ),
# p(
# 'By double-clicking, the user can zoom in to an area of interest.',
# 'Additionally, the user can hold shift, then click and drag to',
# 'create a zoom extent.',
# 'This enables the user to draw a box around an area of',
# 'interest and subsequently zoom in to that area.'
# ),
# p(
# "Clicking on a point will provide a pop-up window with information",
# "related to the selected point.",
# "Some of the points have hyperlinks to the data provider's website."
# ),
# br(),
h4("Table Tab"),
p(
"This tab enables the user to view the data in a tabular format.",
"The table is interactive.",
"Each of the columns can be sorted by clicking on the column header",
"and each column can be filtered by entering values into the text-boxes",
"located below the column headers.",
"A global table filter is available in the top right corner, which enables",
"the user to filter all columns using text or numeric values entered into this",
"text-box."
),
p(
"Located below the table are two download buttons.",
'The "Download All Available Data" button allows the user to download all of the',
'data in the Water Quality Data Inventory.',
'The "Download Filtered Data" button allows the user to download the data',
'filtered by the "Filter" tab.',
tags$b('The data downloaded with this button is not influenced by the filters',
'applied in the "Table" tab.'),
"Those filters are just for exploring the tabular data."
)
)
|
/shiny/shiny_wqdi/ui/ui_instructions.R
|
no_license
|
InterstateCommissionPotomacRiverBasin/wq_data_inventory
|
R
| false
| false
| 2,758
|
r
|
tags$div(
h4("Filter Tab"),
p(
'The "Filter" tab allows the user to select a column in the',
"inventory and a feature or multiple features of that column to filter the data.",
'The filters will update data displayed in the "Map" and "Table" tabs.',
'If no filters are selected, then all the data will be displayed',
"(i.e., the data is not subset by a filter)."),
p(
"It is possible to apply multiple filters that yield no data.",
'For example, if the "Primary Filter" acts on the program name column',
'and the "Secondary Filter" acts on the contact name column,',
'then it is very easy to select a contact name in the "Secondary Filter"',
'that does not belong to the program name selected in the "Primary Filter:"',
'ultimately resulting in no data to display.',
"When this happens, several text-boxes will appear informing the",
"user that there is no data available for that particular filter."
),
br(),
# h4("Map Tab"),
# p(
# 'The map displays water quality monitoring locations that are',
# 'currently included in the inventory.',
# "This is a subset of all the information included in the",
# "inventory's table view."
# ),
# p(
# 'By double-clicking, the user can zoom in to an area of interest.',
# 'Additionally, the user can hold shift, then click and drag to',
# 'create a zoom extent.',
# 'This enables the user to draw a box around an area of',
# 'interest and subsequently zoom in to that area.'
# ),
# p(
# "Clicking on a point will provide a pop-up window with information",
# "related to the selected point.",
# "Some of the points have hyperlinks to the data provider's website."
# ),
# br(),
h4("Table Tab"),
p(
"This tab enables the user to view the data in a tabular format.",
"The table is interactive.",
"Each of the columns can be sorted by clicking on the column header",
"and each column can be filtered by entering values into the text-boxes",
"located below the column headers.",
"A global table filter is available in the top right corner, which enables",
"the user to filter all columns using text or numeric values entered into this",
"text-box."
),
p(
"Located below the table are two download buttons.",
'The "Download All Available Data" button allows the user to download all of the',
'data in the Water Quality Data Inventory.',
'The "Download Filtered Data" button allows the user to download the data',
'filtered by the "Filter" tab.',
tags$b('The data downloaded with this button is not influenced by the filters',
'applied in the "Table" tab.'),
"Those filters are just for exploring the tabular data."
)
)
|
library(ggplot2)
library(dplyr)
df_raw <- read.csv("Desktop/git/STB_social_media_analytics/experimentation/jiaxin_experiment/descriptive_stats/2.csv")
#getting all ctrip data
#number of reviews by month
df <-df_raw[order(df_raw$REVIEW_DATE,df_raw$REVIEW_TIME),]
df$Yr_month=format(as.Date(df$REVIEW_DATE), "%Y-%m")
overall <-(ggplot(df %>% select("Yr_month"),aes(Yr_month))+
geom_bar(fill='cadetblue3')+
geom_text(stat='count',aes(label=..count..),vjust=-0.5)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
ggtitle("review numbers"))
#generally a increasing trend
#facet plot of number of reviews
df$by_Yr=format(as.Date(df$REVIEW_DATE), "%Y")
df$by_mth= factor(months(as.POSIXlt(df$REVIEW_DATE, format="%Y-%m-%d")),
levels=c("January","February","March","April","May","June","July",
"August","September","October","November","December"))
by_Year <- ggplot(df, aes(x=by_mth)) +
geom_bar(fill='cadetblue3')+
geom_text(stat='count',aes(label=..count..),vjust=-0.1)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
facet_grid(by_Yr~.)
#significant increase in the august
|
/experimentation/jiaxin_experiment/ctrip/codes/Gardens_by_the_bay.R
|
no_license
|
mtaziz/STB_social_media
|
R
| false
| false
| 1,231
|
r
|
library(ggplot2)
library(dplyr)
df_raw <- read.csv("Desktop/git/STB_social_media_analytics/experimentation/jiaxin_experiment/descriptive_stats/2.csv")
#getting all ctrip data
#number of reviews by month
df <-df_raw[order(df_raw$REVIEW_DATE,df_raw$REVIEW_TIME),]
df$Yr_month=format(as.Date(df$REVIEW_DATE), "%Y-%m")
overall <-(ggplot(df %>% select("Yr_month"),aes(Yr_month))+
geom_bar(fill='cadetblue3')+
geom_text(stat='count',aes(label=..count..),vjust=-0.5)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
ggtitle("review numbers"))
#generally a increasing trend
#facet plot of number of reviews
df$by_Yr=format(as.Date(df$REVIEW_DATE), "%Y")
df$by_mth= factor(months(as.POSIXlt(df$REVIEW_DATE, format="%Y-%m-%d")),
levels=c("January","February","March","April","May","June","July",
"August","September","October","November","December"))
by_Year <- ggplot(df, aes(x=by_mth)) +
geom_bar(fill='cadetblue3')+
geom_text(stat='count',aes(label=..count..),vjust=-0.1)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
facet_grid(by_Yr~.)
#significant increase in the august
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{parse_match}
\alias{parse_match}
\title{parse_match}
\usage{
parse_match(x)
}
\arguments{
\item{x}{[\code{list(2L)}]\cr}
}
\value{
list of two data.frames (matches & goals)
}
\description{
MISSING
}
|
/man/parse_match.Rd
|
no_license
|
sfohr/openligadbR
|
R
| false
| true
| 302
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{parse_match}
\alias{parse_match}
\title{parse_match}
\usage{
parse_match(x)
}
\arguments{
\item{x}{[\code{list(2L)}]\cr}
}
\value{
list of two data.frames (matches & goals)
}
\description{
MISSING
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_score.R
\name{update_score}
\alias{update_score}
\title{Update Score with Advantage Final Set}
\usage{
update_score(pointa, pointb, gamea, gameb, seta, setb, bestof3 = T)
}
\arguments{
\item{bestof3}{Logical indicator if best-of-3 match}
\item{point_a}{Numeric game points won by current server at start of point}
\item{point_b}{Numeric game points won by current returner at start of point}
\item{game_a}{Numeric games won by current server in the current set}
\item{game_b}{Numeric games won by current returner in the current set}
\item{set_a}{Numeric sets won by current server}
\item{set_b}{Numeric sets won by current returner}
}
\description{
Check score change based on result of current point and return score in terms of serving player of the previous point
}
|
/man/update_score.Rd
|
no_license
|
jotremblay/inmatch_api
|
R
| false
| true
| 861
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_score.R
\name{update_score}
\alias{update_score}
\title{Update Score with Advantage Final Set}
\usage{
update_score(pointa, pointb, gamea, gameb, seta, setb, bestof3 = T)
}
\arguments{
\item{bestof3}{Logical indicator if best-of-3 match}
\item{point_a}{Numeric game points won by current server at start of point}
\item{point_b}{Numeric game points won by current returner at start of point}
\item{game_a}{Numeric games won by current server in the current set}
\item{game_b}{Numeric games won by current returner in the current set}
\item{set_a}{Numeric sets won by current server}
\item{set_b}{Numeric sets won by current returner}
}
\description{
Check score change based on result of current point and return score in terms of serving player of the previous point
}
|
library(readr)
library(dplyr)
library(magrittr)
library(leaps)
library(lars)
library(caret)
library(ROCR)
library(rpart)
library(randomForest)
library(pROC)
library(e1071)
HR_comma_sep <- read.csv("C:/Users/vasir/Downloads/HR_comma_sep.csv")
HR_comma_sep<-data.frame(HR_comma_sep)
#2.Renaming the variables names
colnames(HR_comma_sep)[9]<-"Department"
#3.Add unique identifier for each employee
HR_comma_sep["ID"]<-seq.int(nrow(HR_comma_sep))
length(HR_comma_sep)
HR_comma_sep<-HR_comma_sep[colnames(HR_comma_sep)[c(11,1:10)]]
#10.finding the NA values in the table
sum(is.na(HR_comma_sep))
#8.Finding the distribution for numeric variables
par(mfrow=c(3,3))
for(i in 2:6){hist(HR_comma_sep[,i],xlab=names(HR_comma_sep)[i])}
#1.Coventing the variables to proper data type
HR_comma_sep$left=as.factor(HR_comma_sep$left)
HR_comma_sep$salary<-as.factor(HR_comma_sep$salary)
HR_comma_sep$Work_accident<-as.factor(HR_comma_sep$Work_accident)
HR_comma_sep$Department<-as.factor(HR_comma_sep$Department)
HR_comma_sep$promotion_last_5years<-as.factor(HR_comma_sep$promotion_last_5years)
#2 converting the salary to ordinal variable
HR_Comma$salary<-ordered(HR_Comma$salary,levels=c("low","medium","high"))
#15. finding the descriptive statistics
summary(HR_comma_sep)
#4.finding distributions for variables
ggplot(HR_comma_sep,aes(x=salary,y=satisfaction_level,fill=factor(left),colour=factor(left)))+geom_boxplot(outlier.colour = "black")+xlab("salary")+ylab("Satisfaction Level")
ggplot(HR_comma_sep,aes(x=factor(time_spend_company),y=average_montly_hours,fill=factor(left),colour=factor(left)))+geom_boxplot(outlier.colour = NA)+xlab("Time Spend Company")+ylab("Average Monthly Hours")
#5. Finding the correlation between variables
nums<-sapply(HR_comma_sep,is.numeric)
cor_matrix<-cor(HR_comma_sep[,nums])
corrplot(cor_matrix,method = 'number')
HR_Corr<-HR_comma_sep %>% select(satisfaction_level:promotion_last_5years)
#Cp model
model.mat<-model.matrix(left~satisfaction_level+last_evaluation+number_project+average_montly_hours+time_spend_company+Work_accident+promotion_last_5years+Department+salary,data=HR_comma_sep)
sb<-leaps(x=model.mat[,2:19],y=HR_comma_sep[,7],method = 'Cp')
plot(sb$size,sb$Cp,pch=19)
sb$which[which(sb$Cp==min(sb$Cp)),]
#Forward selection and Backward Selection
forward_fit<-lm(left~1,data = HR_comma_sep)
fit.forward<-step(forward_fit,scope = list(lower=left~1,upper=left~satisfaction_level+last_evaluation+number_project+average_montly_hours+time_spend_company+Work_accident+promotion_last_5years+sales+salary),direction = 'forward')
summary(fit.forward)
fit.backward<-lm(left~satisfaction_level+last_evaluation+number_project+average_montly_hours+time_spend_company+Work_accident+promotion_last_5years+sales+salary,data = HR_comma_sep)
fit.back<-step(fit.backward,scope = list(lower=left~1,upper=left~satisfaction_level+last_evaluation+number_project+average_montly_hours+time_spend_company+Work_accident+promotion_last_5years+sales+salary),direction = 'backward')
summary(fit.back)
#LASSO
Xvars<-HR_comma_sep[,c(1:6,8:10)]
YVars<-HR_comma_sep[,7]
fit.lasso<-lars(x=as.matrix(model.mat[,2:19]),y=as.matrix(YVars),type = 'lasso')
plot(fit.lasso)
#Exploration in Data
#plotting the left and average monthly hours
ggplot(HR_comma_sep,aes(factor(left),average_montly_hours))+geom_boxplot(outlier.colour = "green", outlier.size = 3)
ggplot(HR_comma_sep,aes(factor(left),time_spend_company))+geom_boxplot(outlier.colour = "green", outlier.size = 3)+xlab("Left")+ylab("Time Spend Company")
ggplot(HR_comma_sep,aes(sales))+geom_bar(aes(fill=factor(left)),position='dodge')
# we observe that the highest employees left from the company belong to departments 'Management' and 'RandD'
ggplot(HR_comma_sep,aes(sales))+geom_bar(aes(fill=factor(time_spend_company)),position='dodge')
#More number of employee from Management and sales are spending more than 8 years in the company compared to other departments. So we cannot
#remove outliers.
# There are few outliers in the data set. So we cannotignore these observations because employees who are spending more than
#8 years in the company are from sales and management. As 'sales' and 'management' are playing important role in this comapnay.
left=HR_comma_sep[(HR_comma_sep$left==1),]
non_left=HR_comma_sep[(HR_comma_sep$left==0),]
ggplot(left,aes(time_spend_company))+geom_histogram(binwidth = 0.5)+xlab("Time Spend at the company")+ylab("Number of Observations")+ggtitle("left")
ggplot(non_left,aes(time_spend_company))+geom_histogram(binwidth = 0.5)+xlab("Time Spend at the company")+ylab("Number of Observations")+ggtitle("Not left")
# 1. From the above plots we can say that, people who work more than 6 years and who work for 2 years are less likely to leave
# 2. People are more likely to leave when they spend 3 to 5 years
# 3. People with 5-years are more likely to leave
# 4. When the years people spent in the company lies in 3-5: the more they've been here, the more likely they leave.
ggplot(HR_comma_sep,aes(x=time_spend_company,y=left,fill=factor(promotion_last_5years),colour=factor(promotion_last_5years)))+geom_bar(position='stack', stat='identity')+xlab("Time Spend in Company")+ylab("promotion in last 5 years")
#Very less people got promoted even though they are spending more time in the office
ggplot(HR_comma_sep,aes(x=salary,y=time_spend_company,fill=factor(left),colour=factor(left)))+geom_boxplot(outlier.colour = NA)+xlab("salary")+ylab("Time Spend Company")
# The low and medium income people are leaving the company
#Number of projects and average monthly hours are correlated. So find average time for spending time on single project
#creating new column for finding the effecient employees
HR_comma_sep['avg_hr_prj']<-(HR_comma_sep['average_montly_hours'] * 12)/HR_comma_sep['number_project']
#Dividing the variable into 3 parts
HR_comma_sep['avg_hr_prj_range']<-cut(HR_comma_sep$avg_hr_prj,3)
#Assigning a variable with labels 0, 1, 2 according to monthly hours spent range
HR_comma_sep['HR_Cat']<-cut(HR_comma_sep$avg_hr_prj,3,labels = c(0:2))
#who are valuable employess??
#The evaluation criteria and Monthly hours spend in the company are considered as valuable. Here we are not considering the
#promotion because very less people got promoted in last 5 years.
#For our analysis we are finding the average time an employee spent on each project. Then, we converted the variable into 3 levels.
#In general an employee must work for 160 hours per month. We have splitted this variable into 3 levels and then according to the level
#we have given categories as [0,1,2]
b1<-HR_comma_sep$last_evaluation > 0.5
b2<-HR_comma_sep$HR_Cat==1 | HR_comma_sep$HR_Cat==2
sum(b1 & b2)
#There are total of 4386 valuable employees
#Decide who all are valuable employees
HR_comma_sep['valuedEmployee']<-0
head(HR_comma_sep)
for (i in (1: nrow(HR_comma_sep))){
b1<-(HR_comma_sep[i,'last_evaluation'] > 0.5)
b2<-((HR_comma_sep[i,'HR_Cat']==1) | (HR_comma_sep[i,'HR_Cat']==2))
if(b1 & b2){
HR_comma_sep[i,'valuedEmployee'] = 1
}
}
#finding the number of employees in each department
lev<-levels(as.factor(HR_comma_sep$Department))
for(i in (1:length(lev))){
lev[i]<-sum(grepl(lev[i],HR_comma_sep$Department))
}
lev_list<-as.data.frame(levels(as.factor(HR_comma_sep$Department)))
colnames(lev_list)<-"Department"
lev_list['number_of_employees']<-lev
left_employees<-HR_comma_sep[(HR_comma_sep$left==1),]
lev_left<-levels(as.factor(left_employees$Department))
for(i in (1:length(lev_left))){
lev_left[i]<-sum(grepl(lev_left[i],left_employees$Department))
}
lev_list['number_of_employees_left']<-lev_left
f<-function(x,y) as.numeric(x)/as.numeric(y)
p<-as.data.frame(mapply(f,lev_list[,2],lev_list[,3]))
lev_list['percent']<-p[,1]
#Fitting models
#Stratified sampling
xvars=c('satisfaction_level','last_evaluation','number_project','average_montly_hours','time_spend_company','Work_accident','promotion_last_5years','sales','salary')
yvars='left'
p1<-0.8
set.seed(12345)
inTrain<-createDataPartition(y=HR_comma_sep[,yvars],p=p1,list=FALSE)
train_HR<-HR_comma_sep[inTrain,]
test_HR<-HR_comma_sep[-inTrain,]
stopifnot(nrow(train_HR)+nrow(test_HR)==nrow(HR_comma_sep))
#Fitting GLM
glm.fit<-glm(left~.,data=train_HR,family = binomial(link="logit"))
summary(glm.fit)
plot(glm.fit)
#confusion matrix
test_HR[,'Yhat']<-predict(glm.fit,newdata=test_HR)
fitted.values<-test_HR[,'Yhat']
test_HR$Yhat<-ifelse( test_HR$Yhat>0.5,1,0)
conf<-confusionMatrix(test_HR$Yhat,test_HR$left)
conf
#ROC Curve
fit_values<-prediction(fitted.values,test_HR$left)
p<-performance(fit_values,measure = 'tpr',x.measure = 'fpr')
plot(p)
abline(0, 1, lty = 2)
#CART implementation
cart.fit<-rpart(left~.,data=train_HR,method='class')
summary(cart.fit)
#predicting using CART model
fit.values.cart<-predict(cart.fit,newdata = test_HR)
fit.val1<-ifelse(fit.values.cart[,1]>0.5,1,0)
fit.val2<-ifelse(fit.values.cart[,2]>0.5,1,0)
conf.cart<-confusionMatrix(fit.val2,test_HR$left)
conf.cart
p.cart<-prediction(fit.values.cart[,2],test_HR$left)
p.cart<-performance(p.cart,measure = 'tpr',x.measure = 'fpr')
plot(p.cart)
abline(0,1,lty=2)
#Fitting random forest
fit_rf<-randomForest(as.factor(left)~.,data=train_HR,importance=TRUE,ntree=1000)
fit_rf$confusion
#confusion matrix for random forest
fitted.values.rf<-predict(fit_rf,newdata = test_HR,type='class')
fitted.values.rf1<-predict(fit_rf,newdata = test_HR,type='prob')
conf.rf<-confusionMatrix(fitted.values.rf,test_HR$left)
conf.rf
#ROC curve for random forest
HR.rf<-roc(test_HR$left, fitted.values.rf1[,2])
plot(HR.rf, print.auc=TRUE, auc.polygon=TRUE)
#Fitting SVM algorithm
svm_model<-svm(left~.,data=train_HR,type='C-classification')
svm_model1<-svm(left~.,data=train_HR,type='C-classification',probability = TRUE)
summary(svm_model)
#predicting values and confusion matrix
pred<-predict(svm_model,newdata = test_HR)
pred.prob<-predict(svm_model1,newdata = test_HR,type='prob',probability = TRUE)
conf.svm<-confusionMatrix(pred,test_HR$left)
conf.svm
#ROC curve for SVM
p.svm<-prediction(attr(pred.prob,"probabilities")[,2],test_HR$left)
svm.perf<-performance(p.svm,measure = 'tpr',x.measure = 'fpr')
plot(svm.perf,add=TRUE,col=6)
#K-means clusttering
|
/Project.R
|
no_license
|
nivimerla/HR_analytics
|
R
| false
| false
| 10,503
|
r
|
library(readr)
library(dplyr)
library(magrittr)
library(leaps)
library(lars)
library(caret)
library(ROCR)
library(rpart)
library(randomForest)
library(pROC)
library(e1071)
HR_comma_sep <- read.csv("C:/Users/vasir/Downloads/HR_comma_sep.csv")
HR_comma_sep<-data.frame(HR_comma_sep)
#2.Renaming the variables names
colnames(HR_comma_sep)[9]<-"Department"
#3.Add unique identifier for each employee
HR_comma_sep["ID"]<-seq.int(nrow(HR_comma_sep))
length(HR_comma_sep)
HR_comma_sep<-HR_comma_sep[colnames(HR_comma_sep)[c(11,1:10)]]
#10.finding the NA values in the table
sum(is.na(HR_comma_sep))
#8.Finding the distribution for numeric variables
par(mfrow=c(3,3))
for(i in 2:6){hist(HR_comma_sep[,i],xlab=names(HR_comma_sep)[i])}
#1.Coventing the variables to proper data type
HR_comma_sep$left=as.factor(HR_comma_sep$left)
HR_comma_sep$salary<-as.factor(HR_comma_sep$salary)
HR_comma_sep$Work_accident<-as.factor(HR_comma_sep$Work_accident)
HR_comma_sep$Department<-as.factor(HR_comma_sep$Department)
HR_comma_sep$promotion_last_5years<-as.factor(HR_comma_sep$promotion_last_5years)
#2 converting the salary to ordinal variable
HR_Comma$salary<-ordered(HR_Comma$salary,levels=c("low","medium","high"))
#15. finding the descriptive statistics
summary(HR_comma_sep)
#4.finding distributions for variables
ggplot(HR_comma_sep,aes(x=salary,y=satisfaction_level,fill=factor(left),colour=factor(left)))+geom_boxplot(outlier.colour = "black")+xlab("salary")+ylab("Satisfaction Level")
ggplot(HR_comma_sep,aes(x=factor(time_spend_company),y=average_montly_hours,fill=factor(left),colour=factor(left)))+geom_boxplot(outlier.colour = NA)+xlab("Time Spend Company")+ylab("Average Monthly Hours")
#5. Finding the correlation between variables
nums<-sapply(HR_comma_sep,is.numeric)
cor_matrix<-cor(HR_comma_sep[,nums])
corrplot(cor_matrix,method = 'number')
HR_Corr<-HR_comma_sep %>% select(satisfaction_level:promotion_last_5years)
#Cp model
model.mat<-model.matrix(left~satisfaction_level+last_evaluation+number_project+average_montly_hours+time_spend_company+Work_accident+promotion_last_5years+Department+salary,data=HR_comma_sep)
sb<-leaps(x=model.mat[,2:19],y=HR_comma_sep[,7],method = 'Cp')
plot(sb$size,sb$Cp,pch=19)
sb$which[which(sb$Cp==min(sb$Cp)),]
#Forward selection and Backward Selection
forward_fit<-lm(left~1,data = HR_comma_sep)
fit.forward<-step(forward_fit,scope = list(lower=left~1,upper=left~satisfaction_level+last_evaluation+number_project+average_montly_hours+time_spend_company+Work_accident+promotion_last_5years+sales+salary),direction = 'forward')
summary(fit.forward)
fit.backward<-lm(left~satisfaction_level+last_evaluation+number_project+average_montly_hours+time_spend_company+Work_accident+promotion_last_5years+sales+salary,data = HR_comma_sep)
fit.back<-step(fit.backward,scope = list(lower=left~1,upper=left~satisfaction_level+last_evaluation+number_project+average_montly_hours+time_spend_company+Work_accident+promotion_last_5years+sales+salary),direction = 'backward')
summary(fit.back)
#LASSO
Xvars<-HR_comma_sep[,c(1:6,8:10)]
YVars<-HR_comma_sep[,7]
fit.lasso<-lars(x=as.matrix(model.mat[,2:19]),y=as.matrix(YVars),type = 'lasso')
plot(fit.lasso)
#Exploration in Data
#plotting the left and average monthly hours
ggplot(HR_comma_sep,aes(factor(left),average_montly_hours))+geom_boxplot(outlier.colour = "green", outlier.size = 3)
ggplot(HR_comma_sep,aes(factor(left),time_spend_company))+geom_boxplot(outlier.colour = "green", outlier.size = 3)+xlab("Left")+ylab("Time Spend Company")
ggplot(HR_comma_sep,aes(sales))+geom_bar(aes(fill=factor(left)),position='dodge')
# we observe that the highest employees left from the company belong to departments 'Management' and 'RandD'
ggplot(HR_comma_sep,aes(sales))+geom_bar(aes(fill=factor(time_spend_company)),position='dodge')
#More number of employee from Management and sales are spending more than 8 years in the company compared to other departments. So we cannot
#remove outliers.
# There are few outliers in the data set. So we cannotignore these observations because employees who are spending more than
#8 years in the company are from sales and management. As 'sales' and 'management' are playing important role in this comapnay.
left=HR_comma_sep[(HR_comma_sep$left==1),]
non_left=HR_comma_sep[(HR_comma_sep$left==0),]
ggplot(left,aes(time_spend_company))+geom_histogram(binwidth = 0.5)+xlab("Time Spend at the company")+ylab("Number of Observations")+ggtitle("left")
ggplot(non_left,aes(time_spend_company))+geom_histogram(binwidth = 0.5)+xlab("Time Spend at the company")+ylab("Number of Observations")+ggtitle("Not left")
# 1. From the above plots we can say that, people who work more than 6 years and who work for 2 years are less likely to leave
# 2. People are more likely to leave when they spend 3 to 5 years
# 3. People with 5-years are more likely to leave
# 4. When the years people spent in the company lies in 3-5: the more they've been here, the more likely they leave.
ggplot(HR_comma_sep,aes(x=time_spend_company,y=left,fill=factor(promotion_last_5years),colour=factor(promotion_last_5years)))+geom_bar(position='stack', stat='identity')+xlab("Time Spend in Company")+ylab("promotion in last 5 years")
#Very less people got promoted even though they are spending more time in the office
ggplot(HR_comma_sep,aes(x=salary,y=time_spend_company,fill=factor(left),colour=factor(left)))+geom_boxplot(outlier.colour = NA)+xlab("salary")+ylab("Time Spend Company")
# The low and medium income people are leaving the company
#Number of projects and average monthly hours are correlated. So find average time for spending time on single project
#creating new column for finding the effecient employees
HR_comma_sep['avg_hr_prj']<-(HR_comma_sep['average_montly_hours'] * 12)/HR_comma_sep['number_project']
#Dividing the variable into 3 parts
HR_comma_sep['avg_hr_prj_range']<-cut(HR_comma_sep$avg_hr_prj,3)
#Assigning a variable with labels 0, 1, 2 according to monthly hours spent range
HR_comma_sep['HR_Cat']<-cut(HR_comma_sep$avg_hr_prj,3,labels = c(0:2))
#who are valuable employess??
#The evaluation criteria and Monthly hours spend in the company are considered as valuable. Here we are not considering the
#promotion because very less people got promoted in last 5 years.
#For our analysis we are finding the average time an employee spent on each project. Then, we converted the variable into 3 levels.
#In general an employee must work for 160 hours per month. We have splitted this variable into 3 levels and then according to the level
#we have given categories as [0,1,2]
b1<-HR_comma_sep$last_evaluation > 0.5
b2<-HR_comma_sep$HR_Cat==1 | HR_comma_sep$HR_Cat==2
sum(b1 & b2)
#There are total of 4386 valuable employees
#Decide who all are valuable employees
HR_comma_sep['valuedEmployee']<-0
head(HR_comma_sep)
for (i in (1: nrow(HR_comma_sep))){
b1<-(HR_comma_sep[i,'last_evaluation'] > 0.5)
b2<-((HR_comma_sep[i,'HR_Cat']==1) | (HR_comma_sep[i,'HR_Cat']==2))
if(b1 & b2){
HR_comma_sep[i,'valuedEmployee'] = 1
}
}
#finding the number of employees in each department
lev<-levels(as.factor(HR_comma_sep$Department))
for(i in (1:length(lev))){
lev[i]<-sum(grepl(lev[i],HR_comma_sep$Department))
}
lev_list<-as.data.frame(levels(as.factor(HR_comma_sep$Department)))
colnames(lev_list)<-"Department"
lev_list['number_of_employees']<-lev
left_employees<-HR_comma_sep[(HR_comma_sep$left==1),]
lev_left<-levels(as.factor(left_employees$Department))
for(i in (1:length(lev_left))){
lev_left[i]<-sum(grepl(lev_left[i],left_employees$Department))
}
lev_list['number_of_employees_left']<-lev_left
f<-function(x,y) as.numeric(x)/as.numeric(y)
p<-as.data.frame(mapply(f,lev_list[,2],lev_list[,3]))
lev_list['percent']<-p[,1]
#Fitting models
#Stratified sampling
xvars=c('satisfaction_level','last_evaluation','number_project','average_montly_hours','time_spend_company','Work_accident','promotion_last_5years','sales','salary')
yvars='left'
p1<-0.8
set.seed(12345)
inTrain<-createDataPartition(y=HR_comma_sep[,yvars],p=p1,list=FALSE)
train_HR<-HR_comma_sep[inTrain,]
test_HR<-HR_comma_sep[-inTrain,]
stopifnot(nrow(train_HR)+nrow(test_HR)==nrow(HR_comma_sep))
#Fitting GLM
glm.fit<-glm(left~.,data=train_HR,family = binomial(link="logit"))
summary(glm.fit)
plot(glm.fit)
#confusion matrix
test_HR[,'Yhat']<-predict(glm.fit,newdata=test_HR)
fitted.values<-test_HR[,'Yhat']
test_HR$Yhat<-ifelse( test_HR$Yhat>0.5,1,0)
conf<-confusionMatrix(test_HR$Yhat,test_HR$left)
conf
#ROC Curve
fit_values<-prediction(fitted.values,test_HR$left)
p<-performance(fit_values,measure = 'tpr',x.measure = 'fpr')
plot(p)
abline(0, 1, lty = 2)
#CART implementation
cart.fit<-rpart(left~.,data=train_HR,method='class')
summary(cart.fit)
#predicting using CART model
fit.values.cart<-predict(cart.fit,newdata = test_HR)
fit.val1<-ifelse(fit.values.cart[,1]>0.5,1,0)
fit.val2<-ifelse(fit.values.cart[,2]>0.5,1,0)
conf.cart<-confusionMatrix(fit.val2,test_HR$left)
conf.cart
p.cart<-prediction(fit.values.cart[,2],test_HR$left)
p.cart<-performance(p.cart,measure = 'tpr',x.measure = 'fpr')
plot(p.cart)
abline(0,1,lty=2)
#Fitting random forest
fit_rf<-randomForest(as.factor(left)~.,data=train_HR,importance=TRUE,ntree=1000)
fit_rf$confusion
#confusion matrix for random forest
fitted.values.rf<-predict(fit_rf,newdata = test_HR,type='class')
fitted.values.rf1<-predict(fit_rf,newdata = test_HR,type='prob')
conf.rf<-confusionMatrix(fitted.values.rf,test_HR$left)
conf.rf
#ROC curve for random forest
HR.rf<-roc(test_HR$left, fitted.values.rf1[,2])
plot(HR.rf, print.auc=TRUE, auc.polygon=TRUE)
#Fitting SVM algorithm
svm_model<-svm(left~.,data=train_HR,type='C-classification')
svm_model1<-svm(left~.,data=train_HR,type='C-classification',probability = TRUE)
summary(svm_model)
#predicting values and confusion matrix
pred<-predict(svm_model,newdata = test_HR)
pred.prob<-predict(svm_model1,newdata = test_HR,type='prob',probability = TRUE)
conf.svm<-confusionMatrix(pred,test_HR$left)
conf.svm
#ROC curve for SVM
p.svm<-prediction(attr(pred.prob,"probabilities")[,2],test_HR$left)
svm.perf<-performance(p.svm,measure = 'tpr',x.measure = 'fpr')
plot(svm.perf,add=TRUE,col=6)
#K-means clusttering
|
# Chapter 6
# Example 6.16 page no. 192 from the pdf..
# Normal Approximation To the Binomial..
# to find the prob. that sheer guesswork yields from 25 to 30 correct answer for 80 of the 200
# problem about which student has no knowledge..
# since the sample size is large we can do normal approx.
p <- 0.25 # prob. of guessing a correct answer.
n <- 80
# now using normal approx. mu = p*n and sd <- sqrt(n*p*(1-p))
mu = p*n
sd <- sqrt(n*p*(1-p))
cat("The prob. of correctly guessing from 25 to 30 questions is",pnorm(30.5,mu,sd)-pnorm(24.5,mu,sd))
|
/Probability_And_Statistics_For_Engineers_And_Scientists_by_Ronald_E._Walpole,_Raymond_H._Myers,_Sharon_L._Myers,_Keying_Ye/CH6/EX6.16/Ex6_16.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false
| false
| 589
|
r
|
# Chapter 6
# Example 6.16 page no. 192 from the pdf..
# Normal Approximation To the Binomial..
# to find the prob. that sheer guesswork yields from 25 to 30 correct answer for 80 of the 200
# problem about which student has no knowledge..
# since the sample size is large we can do normal approx.
p <- 0.25 # prob. of guessing a correct answer.
n <- 80
# now using normal approx. mu = p*n and sd <- sqrt(n*p*(1-p))
mu = p*n
sd <- sqrt(n*p*(1-p))
cat("The prob. of correctly guessing from 25 to 30 questions is",pnorm(30.5,mu,sd)-pnorm(24.5,mu,sd))
|
/cachematrix.R
|
no_license
|
TUrurahy/Assignment-Programming-Assignment-2-Lexical-Scoping
|
R
| false
| false
| 1,926
|
r
| ||
\name{apci_spectrum}
\alias{apci_spectrum}
\docType{data}
\title{
apci_spectrum
}
\description{
Example spectrum of Glutamic acid (3TMS) measured on a Bruker impact II.
}
\usage{data("apci_spectrum")}
\format{
A data frame with 47 observations on the following 2 variables.
\describe{
\item{\code{mz}}{a numeric vector}
\item{\code{int}}{a numeric vector}
}
}
\examples{
data(apci_spectrum)
head(apci_spectrum)
PlotSpec(apci_spectrum)
}
\keyword{datasets}
\keyword{internal}
|
/man/apci_spectrum.Rd
|
no_license
|
MetabolomicsHK/InterpretMSSpectrum
|
R
| false
| false
| 511
|
rd
|
\name{apci_spectrum}
\alias{apci_spectrum}
\docType{data}
\title{
apci_spectrum
}
\description{
Example spectrum of Glutamic acid (3TMS) measured on a Bruker impact II.
}
\usage{data("apci_spectrum")}
\format{
A data frame with 47 observations on the following 2 variables.
\describe{
\item{\code{mz}}{a numeric vector}
\item{\code{int}}{a numeric vector}
}
}
\examples{
data(apci_spectrum)
head(apci_spectrum)
PlotSpec(apci_spectrum)
}
\keyword{datasets}
\keyword{internal}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/p1neurons-data.R
\docType{data}
\name{p1s}
\alias{p1s}
\title{List of 143 traced P1 neurons from Chiang et al 2011 as neuron objects}
\description{
This R list (which has additional class \code{neuronlist}) contains 143
traced \emph{Drosophila} P1 neurons as \code{neuron} objects. Original data
is due to Chiang et al. [1], who have generously shared their raw data at
\url{http://flycircuit.tw}. Automated tracing of neuron skeletons was carried
out by Lee et al [2]. Image registration and further processing was carried
out by Greg Jefferis, Marta Costa and James Manton [3].
}
\references{
[1] Chiang A.S., Lin C.Y., Chuang C.C., Chang H.M., Hsieh C.H.,
Yeh C.W., Shih C.T., Wu J.J., Wang G.T., Chen Y.C., Wu C.C., Chen G.Y.,
Ching Y.T., Lee P.C., Lin C.Y., Lin H.H., Wu C.C., Hsu H.W., Huang Y.A.,
Chen J.Y., et al. (2011). Three-dimensional reconstruction of brain-wide
wiring networks in Drosophila at single-cell resolution. Curr Biol 21 (1),
1--11.
[2] P.-C. Lee, C.-C. Chuang, A.-S. Chiang, and Y.-T. Ching. (2012).
High-throughput computer method for 3d neuronal structure reconstruction
from the image stack of the Drosophila brain and its applications. PLoS
Comput Biol, 8(9):e1002658, Sep 2012. doi: 10.1371/journal.pcbi.1002658.
[3] NBLAST: Rapid, sensitive comparison of neuronal structure and
construction of neuron family databases. Marta Costa, Aaron D. Ostrovsky,
James D. Manton, Steffen Prohaska, Gregory S.X.E. Jefferis. bioRxiv doi:
http://dx.doi.org/10.1101/006346.
}
|
/man/p1s.Rd
|
no_license
|
jefferis/p1neurons
|
R
| false
| false
| 1,609
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/p1neurons-data.R
\docType{data}
\name{p1s}
\alias{p1s}
\title{List of 143 traced P1 neurons from Chiang et al 2011 as neuron objects}
\description{
This R list (which has additional class \code{neuronlist}) contains 143
traced \emph{Drosophila} P1 neurons as \code{neuron} objects. Original data
is due to Chiang et al. [1], who have generously shared their raw data at
\url{http://flycircuit.tw}. Automated tracing of neuron skeletons was carried
out by Lee et al [2]. Image registration and further processing was carried
out by Greg Jefferis, Marta Costa and James Manton [3].
}
\references{
[1] Chiang A.S., Lin C.Y., Chuang C.C., Chang H.M., Hsieh C.H.,
Yeh C.W., Shih C.T., Wu J.J., Wang G.T., Chen Y.C., Wu C.C., Chen G.Y.,
Ching Y.T., Lee P.C., Lin C.Y., Lin H.H., Wu C.C., Hsu H.W., Huang Y.A.,
Chen J.Y., et al. (2011). Three-dimensional reconstruction of brain-wide
wiring networks in Drosophila at single-cell resolution. Curr Biol 21 (1),
1--11.
[2] P.-C. Lee, C.-C. Chuang, A.-S. Chiang, and Y.-T. Ching. (2012).
High-throughput computer method for 3d neuronal structure reconstruction
from the image stack of the Drosophila brain and its applications. PLoS
Comput Biol, 8(9):e1002658, Sep 2012. doi: 10.1371/journal.pcbi.1002658.
[3] NBLAST: Rapid, sensitive comparison of neuronal structure and
construction of neuron family databases. Marta Costa, Aaron D. Ostrovsky,
James D. Manton, Steffen Prohaska, Gregory S.X.E. Jefferis. bioRxiv doi:
http://dx.doi.org/10.1101/006346.
}
|
library(rJava)
if("memoise" %in% installed.packages("memoise") == FALSE)install.packages("memoise")
library(memoise)
if("KoNLP" %in% installed.packages("KoNLP") == FALSE)install.packages("KoNLP")
library(KoNLP)
if("tm" %in% installed.packages("tm") == FALSE)install.packages("tm")
library(tm)
if("wordcloud" %in% installed.packages("wordcloud") == FALSE)install.packages("wordcloud")
library(wordcloud)
if("dplyr" %in% installed.packages("dplyr") == FALSE)install.packages("dplyr")
library(dplyr)
library(ggplot2)
if("stringr" %in% installed.packages("stringr") == FALSE)install.packages("stringr")
library(stringr)
if("RColorBrewer" %in% installed.packages("RColorBrewer") == FALSE)install.packages("RColorBrewer")
library(RColorBrewer)
KoNLP::useSejongDic()
getwd()
# setp 1. 데이터를 로딩한다
txt <- readLines("jeju.txt")
head(txt)
# step 3. 명사만 추출
nouns <-
sapply(
txt,
extractNoun,
USE.NAMES = F
)
class(nouns)
head(nouns, 10)
cdata <- unlist(nouns) # 각 단어를 낱개로 분리
cdata <- stringr::str_replace_all(cdata,"[^[:alpha:]]"," ")
cdata <- gsub(" ","",cdata)
gsubTxt <- readLines("제주도여행코스gsub(1).txt")
cnt <- length(gsubTxt)
for (i in 1:cnt) {
cdata <- gsub(gsubTxt[i],"", cdata)
}
cdata
cdata <- Filter(function(x){nchar(x) >= 2}, cdata)
write(unlist(cdata), "jeju_2.txt")
nouns <- read.table("jeju_2.txt")
nrow(nouns)
wordcloud <- table(nouns)
head(sort(wordcount, decreasing = T),30)
top10 <- head(sort(wordcount, decreasing = T),10)
pie(top10,
col = rainbow(10),
radius = 1,
main="제주도 추천 여행코스 TOP 10")
# step 2. 특수문자를 제거
# txt <- stringr::str_replace_all(txt, "\\W", " ") # 특수문자 제거, 한글만 남기는건 대문자 W
# txt <- stringr::str_replace_all(txt,"[^[:alpha:]]"," ")
# step 3-1. 특정단어 삭제하기
gsubTxt <- readLines("제주도여행코스gsub(1).txt")
gsubTxt
cnt <- length(gsubTxt)
print(paste("삭제하려는 단어의 수:", cnt))
for (i in 1:cnt) {
nouns <- gsub(gsubTxt[i],"", nouns)
}
nouns
# step 4. 단어별로 빈도표 작성
wordcount <- table(unlist(nouns))
df_word <- as.data.frame(wordcount, stringsAsFactors = F)
# step 5. 변수명 수정
names(df_word)
df_word <- dplyr::rename(
df_word,
word = Var1,
freq = Freq
)
df_word
# step 6. 2글자 이상만 추출
df_word <- dplyr::filter(df_word, nchar(word)>=2)
df_word
# step 7. 빈도순 정렬 후 상위 20단어만 추출
top_20 <- df_word %>%
dplyr::arrange(desc(freq)) %>%
head(20)
top_20
# step 8. Word cloud 만들기
wordcloud::wordcloud(
words = df_word$word,
freq = df_word$freq,
min.freq = 2,
max.words = 200,
random.order = F,
rot.per = .1,
scale = c(4, 0.3),
colors = brewer.pal(8, "Dark2")
)
|
/TM03.R
|
no_license
|
Arahansproject/Data_R_180929
|
R
| false
| false
| 2,762
|
r
|
library(rJava)
if("memoise" %in% installed.packages("memoise") == FALSE)install.packages("memoise")
library(memoise)
if("KoNLP" %in% installed.packages("KoNLP") == FALSE)install.packages("KoNLP")
library(KoNLP)
if("tm" %in% installed.packages("tm") == FALSE)install.packages("tm")
library(tm)
if("wordcloud" %in% installed.packages("wordcloud") == FALSE)install.packages("wordcloud")
library(wordcloud)
if("dplyr" %in% installed.packages("dplyr") == FALSE)install.packages("dplyr")
library(dplyr)
library(ggplot2)
if("stringr" %in% installed.packages("stringr") == FALSE)install.packages("stringr")
library(stringr)
if("RColorBrewer" %in% installed.packages("RColorBrewer") == FALSE)install.packages("RColorBrewer")
library(RColorBrewer)
KoNLP::useSejongDic()
getwd()
# setp 1. 데이터를 로딩한다
txt <- readLines("jeju.txt")
head(txt)
# step 3. 명사만 추출
nouns <-
sapply(
txt,
extractNoun,
USE.NAMES = F
)
class(nouns)
head(nouns, 10)
cdata <- unlist(nouns) # 각 단어를 낱개로 분리
cdata <- stringr::str_replace_all(cdata,"[^[:alpha:]]"," ")
cdata <- gsub(" ","",cdata)
gsubTxt <- readLines("제주도여행코스gsub(1).txt")
cnt <- length(gsubTxt)
for (i in 1:cnt) {
cdata <- gsub(gsubTxt[i],"", cdata)
}
cdata
cdata <- Filter(function(x){nchar(x) >= 2}, cdata)
write(unlist(cdata), "jeju_2.txt")
nouns <- read.table("jeju_2.txt")
nrow(nouns)
wordcloud <- table(nouns)
head(sort(wordcount, decreasing = T),30)
top10 <- head(sort(wordcount, decreasing = T),10)
pie(top10,
col = rainbow(10),
radius = 1,
main="제주도 추천 여행코스 TOP 10")
# step 2. 특수문자를 제거
# txt <- stringr::str_replace_all(txt, "\\W", " ") # 특수문자 제거, 한글만 남기는건 대문자 W
# txt <- stringr::str_replace_all(txt,"[^[:alpha:]]"," ")
# step 3-1. 특정단어 삭제하기
gsubTxt <- readLines("제주도여행코스gsub(1).txt")
gsubTxt
cnt <- length(gsubTxt)
print(paste("삭제하려는 단어의 수:", cnt))
for (i in 1:cnt) {
nouns <- gsub(gsubTxt[i],"", nouns)
}
nouns
# step 4. 단어별로 빈도표 작성
wordcount <- table(unlist(nouns))
df_word <- as.data.frame(wordcount, stringsAsFactors = F)
# step 5. 변수명 수정
names(df_word)
df_word <- dplyr::rename(
df_word,
word = Var1,
freq = Freq
)
df_word
# step 6. 2글자 이상만 추출
df_word <- dplyr::filter(df_word, nchar(word)>=2)
df_word
# step 7. 빈도순 정렬 후 상위 20단어만 추출
top_20 <- df_word %>%
dplyr::arrange(desc(freq)) %>%
head(20)
top_20
# step 8. Word cloud 만들기
wordcloud::wordcloud(
words = df_word$word,
freq = df_word$freq,
min.freq = 2,
max.words = 200,
random.order = F,
rot.per = .1,
scale = c(4, 0.3),
colors = brewer.pal(8, "Dark2")
)
|
svg(paste(resultDir, '/context_detection_result.svg', sep='') ,width=6, height=3.5, bg=bg, family = family)
dp <- subset(devices_filtered_pre_context, device == 'phone')
dt <- subset(devices_filtered_pre_context, device == 'tablet')
plotData <- data.frame("data" = c(
nrow(subset(dp, dp$homeContextsWifi > 0)) / nrow(dp),
nrow(subset(dp, dp$officeContextsWifi > 0)) / nrow(dp),
nrow(subset(dp, dp$otherContextsWifi > 0)) / nrow(dp),
nrow(subset(dp, dp$homeContextsGSM > 0)) / nrow(dp),
nrow(subset(dp, dp$officeContextsGSM > 0)) / nrow(dp),
nrow(subset(dp, dp$otherContextsGSM > 0)) / nrow(dp),
nrow(subset(dp, dp$homeContextsWifi + dp$homeContextsGSM> 0)) / nrow(dp),
nrow(subset(dp, dp$officeContextsWifi + dp$officeContextsGSM > 0)) / nrow(dp),
nrow(subset(dp, dp$otherContextsWifi + dp$otherContextsGSM > 0)) / nrow(dp),
nrow(subset(dt, dt$homeContextsWifi > 0)) / nrow(dt),
nrow(subset(dt, dt$officeContextsWifi > 0)) / nrow(dt),
nrow(subset(dt, dt$otherContextsWifi > 0)) / nrow(dt),
nrow(subset(dt, dt$homeContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$officeContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$otherContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$homeContextsWifi + dt$homeContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$officeContextsWifi + dt$officeContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$otherContextsWifi + dt$otherContextsGSM > 0)) / nrow(dt)
),
"device" = rep(c("Phone", "Tablet"), each=9),
"c_source" = factor(rep(c("Wi-Fi", "Cell", "Combined"), each=3, times=2), levels=(c( "Combined", "Wi-Fi", "Cell"))),
"context" = factor(rep(c("Home", "Office", "Other mean."), each=1, times=6), levels=(c("Home", "Office", "Other mean." )))
)
ggplot(data = plotData,
color="black",
aes(x = factor(c_source), y = data, fill=c_source)) +
geom_bar(stat="identity", position = "dodge", color="black") +
#geom_bar(stat="identity", position = "dodge", color="black" , show_guide=FALSE) +
facet_grid(context~device) +
labs(x = "", y = "", fill="Context Source: ") +
scale_y_continuous(labels = percent) +
theme(legend.position = 'bottom', legend.spacing = unit(-1, "cm"), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1), panel.spacing = unit(.6, "lines")) +
guides(fill = guide_legend(reverse=TRUE)) +
coord_flip(ylim =c(0, 1.0)) +
ggplotConfig
dev.off()
rm(dp,dt)
|
/r/output/contextPlot.R
|
no_license
|
hintzed/mobile-device-usage-processing
|
R
| false
| false
| 2,380
|
r
|
svg(paste(resultDir, '/context_detection_result.svg', sep='') ,width=6, height=3.5, bg=bg, family = family)
dp <- subset(devices_filtered_pre_context, device == 'phone')
dt <- subset(devices_filtered_pre_context, device == 'tablet')
plotData <- data.frame("data" = c(
nrow(subset(dp, dp$homeContextsWifi > 0)) / nrow(dp),
nrow(subset(dp, dp$officeContextsWifi > 0)) / nrow(dp),
nrow(subset(dp, dp$otherContextsWifi > 0)) / nrow(dp),
nrow(subset(dp, dp$homeContextsGSM > 0)) / nrow(dp),
nrow(subset(dp, dp$officeContextsGSM > 0)) / nrow(dp),
nrow(subset(dp, dp$otherContextsGSM > 0)) / nrow(dp),
nrow(subset(dp, dp$homeContextsWifi + dp$homeContextsGSM> 0)) / nrow(dp),
nrow(subset(dp, dp$officeContextsWifi + dp$officeContextsGSM > 0)) / nrow(dp),
nrow(subset(dp, dp$otherContextsWifi + dp$otherContextsGSM > 0)) / nrow(dp),
nrow(subset(dt, dt$homeContextsWifi > 0)) / nrow(dt),
nrow(subset(dt, dt$officeContextsWifi > 0)) / nrow(dt),
nrow(subset(dt, dt$otherContextsWifi > 0)) / nrow(dt),
nrow(subset(dt, dt$homeContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$officeContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$otherContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$homeContextsWifi + dt$homeContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$officeContextsWifi + dt$officeContextsGSM > 0)) / nrow(dt),
nrow(subset(dt, dt$otherContextsWifi + dt$otherContextsGSM > 0)) / nrow(dt)
),
"device" = rep(c("Phone", "Tablet"), each=9),
"c_source" = factor(rep(c("Wi-Fi", "Cell", "Combined"), each=3, times=2), levels=(c( "Combined", "Wi-Fi", "Cell"))),
"context" = factor(rep(c("Home", "Office", "Other mean."), each=1, times=6), levels=(c("Home", "Office", "Other mean." )))
)
ggplot(data = plotData,
color="black",
aes(x = factor(c_source), y = data, fill=c_source)) +
geom_bar(stat="identity", position = "dodge", color="black") +
#geom_bar(stat="identity", position = "dodge", color="black" , show_guide=FALSE) +
facet_grid(context~device) +
labs(x = "", y = "", fill="Context Source: ") +
scale_y_continuous(labels = percent) +
theme(legend.position = 'bottom', legend.spacing = unit(-1, "cm"), axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1), panel.spacing = unit(.6, "lines")) +
guides(fill = guide_legend(reverse=TRUE)) +
coord_flip(ylim =c(0, 1.0)) +
ggplotConfig
dev.off()
rm(dp,dt)
|
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
X <- rbind(x_train, x_test)
Y <- rbind(y_train, y_test)
Sbj<-rbind(subject_train, subject_test)
Merged<-cbind(Sbj, Y, X)
TdData <- Merged %>% select(subject, code, contains("mean"), contains("std"))
names(TdData)[2] = "activity"
names(TdData)<-gsub("Acc", "Accelerometer", names(TdData))
names(TdData)<-gsub("Gyro", "Gyroscope", names(TdData))
names(TdData)<-gsub("BodyBody", "Body", names(TdData))
names(TdData)<-gsub("Mag", "Magnitude", names(TdData))
names(TdData)<-gsub("^t", "Time", names(TdData))
names(TdData)<-gsub("^f", "Frequency", names(TdData))
names(TdData)<-gsub("tBody", "TimeBody", names(TdData))
names(TdData)<-gsub("-mean()", "Mean", names(TdData), ignore.case = TRUE)
names(TdData)<-gsub("-std()", "STD", names(TdData), ignore.case = TRUE)
names(TdData)<-gsub("-freq()", "Frequency", names(TdData), ignore.case = TRUE)
names(TdData)<-gsub("angle", "Angle", names(TdData))
names(TdData)<-gsub("gravity", "Gravity", names(TdData))
LastData <- TdData %>%
group_by(subject, activity) %>%
summarise_all(list(~mean))
write.table(LastData, "LastData.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
KamranAghayev/Getting-and-Cleaning-Data-Week
|
R
| false
| false
| 1,733
|
r
|
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
X <- rbind(x_train, x_test)
Y <- rbind(y_train, y_test)
Sbj<-rbind(subject_train, subject_test)
Merged<-cbind(Sbj, Y, X)
TdData <- Merged %>% select(subject, code, contains("mean"), contains("std"))
names(TdData)[2] = "activity"
names(TdData)<-gsub("Acc", "Accelerometer", names(TdData))
names(TdData)<-gsub("Gyro", "Gyroscope", names(TdData))
names(TdData)<-gsub("BodyBody", "Body", names(TdData))
names(TdData)<-gsub("Mag", "Magnitude", names(TdData))
names(TdData)<-gsub("^t", "Time", names(TdData))
names(TdData)<-gsub("^f", "Frequency", names(TdData))
names(TdData)<-gsub("tBody", "TimeBody", names(TdData))
names(TdData)<-gsub("-mean()", "Mean", names(TdData), ignore.case = TRUE)
names(TdData)<-gsub("-std()", "STD", names(TdData), ignore.case = TRUE)
names(TdData)<-gsub("-freq()", "Frequency", names(TdData), ignore.case = TRUE)
names(TdData)<-gsub("angle", "Angle", names(TdData))
names(TdData)<-gsub("gravity", "Gravity", names(TdData))
LastData <- TdData %>%
group_by(subject, activity) %>%
summarise_all(list(~mean))
write.table(LastData, "LastData.txt", row.name=FALSE)
|
#Boxplots A vs Z, after removing SBG
#Clear all states
rm(list=ls(all=TRUE))
dev.off()
############### Paths and folders
### Folder containind expression data before removing SBG
folder_1 <- "/crex/proj/uppstore2017185/b2014034_nobackup/Luis/3_DosageCompensation_LS/03_Normalized_libs"
### Folder containind expression data after removing SBG
folder_2 <-"/crex/proj/uppstore2017185/b2014034_nobackup/Luis/3_DosageCompensation_LS/12_Filter_sex_biased_genes"
############## Load data
### First dataset
setwd(folder_1)
# Before removing SBG; genes with zero counts removed individually for each sex
instar_V_f <- read.delim("instar_V-assigned_A_or_Z_female-filtered.txt", header = TRUE)
instar_V_m <- read.delim("instar_V-assigned_A_or_Z_male-filtered.txt", header = TRUE)
pupa_f <- read.delim("pupa-assigned_A_or_Z_female-filtered.txt", header = TRUE)
pupa_m <- read.delim("pupa-assigned_A_or_Z_male-filtered.txt", header = TRUE)
adult_f <- read.delim("adult-assigned_A_or_Z_female-filtered.txt", header = TRUE)
adult_m <- read.delim("adult-assigned_A_or_Z_male-filtered.txt", header = TRUE)
### Second dataset
setwd(folder_2)
####### After removing SBG; genes with zero counts removed individually for each sex
instar_V_f_nonbiased <- read.delim("nonbiased_genes-instar_V-assigned_A_or_Z_female-filtered.txt", header = TRUE)
instar_V_m_nonbiased <- read.delim("nonbiased_genes-instar_V-assigned_A_or_Z_male-filtered.txt", header = TRUE)
pupa_f_nonbiased <- read.delim("nonbiased_genes-pupa-assigned_A_or_Z_female-filtered.txt", header = TRUE)
pupa_m_nonbiased <- read.delim("nonbiased_genes-pupa-assigned_A_or_Z_male-filtered.txt", header = TRUE)
adult_f_nonbiased <- read.delim("nonbiased_genes-adult-assigned_A_or_Z_female-filtered.txt", header = TRUE)
adult_m_nonbiased <- read.delim("nonbiased_genes-adult-assigned_A_or_Z_male-filtered.txt", header = TRUE)
##################################### PROCESS DATA
############### Dataset I
instar_V_female <- instar_V_f[c(4,6)]
instar_V_female$group <- rep("1_instar_V_female", nrow(instar_V_female))
instar_V_female$group <- paste(instar_V_female$group, instar_V_female$chromosome)
instar_V_female <- instar_V_female[c(1,3)]
names(instar_V_female)[1] <-"FPKM"
pupa_female <- pupa_f[c(4,6)]
pupa_female$group <- rep("3_pupa_female", nrow(pupa_female))
pupa_female$group <- paste(pupa_female$group, pupa_female$chromosome)
pupa_female <- pupa_female[c(1,3)]
names(pupa_female)[1] <-"FPKM"
adult_female <- adult_f[c(4,6)]
adult_female$group <- rep("5_adult_female", nrow(adult_female))
adult_female$group <- paste(adult_female$group, adult_female$chromosome)
adult_female <- adult_female[c(1,3)]
names(adult_female)[1] <-"FPKM"
instar_V_male <- instar_V_m[c(5,6)]
instar_V_male$group <- rep("2_instar_V_male", nrow(instar_V_male))
instar_V_male$group <- paste(instar_V_male$group, instar_V_male$chromosome)
instar_V_male <- instar_V_male[c(1,3)]
names(instar_V_male)[1] <-"FPKM"
pupa_male <- pupa_m[c(5,6)]
pupa_male$group <- rep("4_pupa_male", nrow(pupa_male))
pupa_male$group <- paste(pupa_male$group, pupa_male$chromosome)
pupa_male <- pupa_male[c(1,3)]
names(pupa_male)[1] <-"FPKM"
adult_male <- adult_m[c(5,6)]
adult_male$group <- rep("6_adult_male", nrow(adult_male))
adult_male$group <- paste(adult_male$group, adult_male$chromosome)
adult_male <- adult_male[c(1,3)]
names(adult_male)[1] <-"FPKM"
all_samples <- rbind(instar_V_female, instar_V_male, pupa_female, pupa_male, adult_female, adult_male)
############### Dataset II
instar_V_female_nonbiased <- instar_V_f_nonbiased[c(4,6)]
instar_V_female_nonbiased$group <- rep("1_instar_V_female_nonbiased", nrow(instar_V_female_nonbiased))
instar_V_female_nonbiased$group <- paste(instar_V_female_nonbiased$group, instar_V_female_nonbiased$chromosome)
instar_V_female_nonbiased <- instar_V_female_nonbiased[c(1,3)]
names(instar_V_female_nonbiased)[1] <-"FPKM"
pupa_female_nonbiased <- pupa_f_nonbiased[c(4,6)]
pupa_female_nonbiased$group <- rep("3_pupa_female_nonbiased", nrow(pupa_female_nonbiased))
pupa_female_nonbiased$group <- paste(pupa_female_nonbiased$group, pupa_female_nonbiased$chromosome)
pupa_female_nonbiased <- pupa_female_nonbiased[c(1,3)]
names(pupa_female_nonbiased)[1] <-"FPKM"
adult_female_nonbiased <- adult_f_nonbiased[c(4,6)]
adult_female_nonbiased$group <- rep("5_adult_female_nonbiased", nrow(adult_female_nonbiased))
adult_female_nonbiased$group <- paste(adult_female_nonbiased$group, adult_female_nonbiased$chromosome)
adult_female_nonbiased <- adult_female_nonbiased[c(1,3)]
names(adult_female_nonbiased)[1] <-"FPKM"
instar_V_male_nonbiased <- instar_V_m_nonbiased[c(5,6)]
instar_V_male_nonbiased$group <- rep("2_instar_V_male_nonbiased", nrow(instar_V_male_nonbiased))
instar_V_male_nonbiased$group <- paste(instar_V_male_nonbiased$group, instar_V_male_nonbiased$chromosome)
instar_V_male_nonbiased <- instar_V_male_nonbiased[c(1,3)]
names(instar_V_male_nonbiased)[1] <-"FPKM"
pupa_male_nonbiased <- pupa_m_nonbiased[c(5,6)]
pupa_male_nonbiased$group <- rep("4_pupa_male_nonbiased", nrow(pupa_male_nonbiased))
pupa_male_nonbiased$group <- paste(pupa_male_nonbiased$group, pupa_male_nonbiased$chromosome)
pupa_male_nonbiased <- pupa_male_nonbiased[c(1,3)]
names(pupa_male_nonbiased)[1] <-"FPKM"
adult_male_nonbiased <- adult_m_nonbiased[c(5,6)]
adult_male_nonbiased$group <- rep("6_adult_male_nonbiased", nrow(adult_male_nonbiased))
adult_male_nonbiased$group <- paste(adult_male_nonbiased$group, adult_male_nonbiased$chromosome)
adult_male_nonbiased <- adult_male_nonbiased[c(1,3)]
names(adult_male_nonbiased)[1] <-"FPKM"
nonbiased <- rbind(instar_V_female_nonbiased, instar_V_male_nonbiased, pupa_female_nonbiased, pupa_male_nonbiased, adult_female_nonbiased, adult_male_nonbiased)
##############################################################################
### Boxplot
boxplot(log2(all_samples$FPKM)~all_samples$group, ylim = c(-6, 18),
col = c("grey90", "grey90"), notch = TRUE,
at = c(1,2, 4,5, 8,9, 11,12, 15,16, 18,19),
outline = FALSE, boxwex = 0.7, xaxt = "n", frame.plot = FALSE, border="grey65",
boxlty = 2)
axis(2, labels = "log2 FPKM(>0)", cex.axis = 1.2, at = 4, line = 1.5, tck = 0)
boxplot(log2(nonbiased$FPKM)~nonbiased$group, ylim = c(-8, 14), add = TRUE,
col = c("lightgrey", "darkorange"), notch = FALSE,
at = c(1.2,2.2, 4.2,5.2, 8.2,9.2, 11.2,12.2, 15.2,16.2, 18.2,19.2),
outline = FALSE, boxwex = 0.7, xaxt = "n", frame.plot = FALSE)
text(1.5, -6.5, "\\VE", vfont=c("sans serif","plain"), cex =1.9)
text(4.5, -6.5, "\\MA", vfont=c("sans serif","plain"), cex =1.9)
text(8.5, -6.5, "\\VE", vfont=c("sans serif","plain"), cex =1.9)
text(11.5, -6.5, "\\MA", vfont=c("sans serif","plain"), cex =1.9)
text(15.5, -6.5, "\\VE", vfont=c("sans serif","plain"), cex =1.9)
text(18.5, -6.5, "\\MA", vfont=c("sans serif","plain"), cex =1.9)
axis(1, labels = c("Larva", "Pupa", "Adult"), lwd = 0, cex.axis = 1.5,
at = c(3, 10, 17), line = 1)
legend(16.5, 18, legend = c("Autosomes","Z"), cex = 1,
fill = c("lightgrey", "darkorange"), bty = "n")
#legend(0.5, 19, legend = c("Autosomes","Z"), cex = 1,
# fill = c("lightgrey", "darkorange"), bty = "n")
segments(6.5, -5.5, 6.5, 15, lty = 3, lwd = 1)
segments(13.5, -5.5, 13.5, 15, lty = 3, lwd = 1)
segments(1, -5.5, 19, -5.5, lty = 1, lwd = 1)
|
/16A_boxplots_a_vs_z_after_removing_SBG.R
|
no_license
|
LLN273/Dosage_compensation_Lsinapis
|
R
| false
| false
| 7,391
|
r
|
#Boxplots A vs Z, after removing SBG
#Clear all states
rm(list=ls(all=TRUE))
dev.off()
############### Paths and folders
### Folder containind expression data before removing SBG
folder_1 <- "/crex/proj/uppstore2017185/b2014034_nobackup/Luis/3_DosageCompensation_LS/03_Normalized_libs"
### Folder containind expression data after removing SBG
folder_2 <-"/crex/proj/uppstore2017185/b2014034_nobackup/Luis/3_DosageCompensation_LS/12_Filter_sex_biased_genes"
############## Load data
### First dataset
setwd(folder_1)
# Before removing SBG; genes with zero counts removed individually for each sex
instar_V_f <- read.delim("instar_V-assigned_A_or_Z_female-filtered.txt", header = TRUE)
instar_V_m <- read.delim("instar_V-assigned_A_or_Z_male-filtered.txt", header = TRUE)
pupa_f <- read.delim("pupa-assigned_A_or_Z_female-filtered.txt", header = TRUE)
pupa_m <- read.delim("pupa-assigned_A_or_Z_male-filtered.txt", header = TRUE)
adult_f <- read.delim("adult-assigned_A_or_Z_female-filtered.txt", header = TRUE)
adult_m <- read.delim("adult-assigned_A_or_Z_male-filtered.txt", header = TRUE)
### Second dataset
setwd(folder_2)
####### After removing SBG; genes with zero counts removed individually for each sex
instar_V_f_nonbiased <- read.delim("nonbiased_genes-instar_V-assigned_A_or_Z_female-filtered.txt", header = TRUE)
instar_V_m_nonbiased <- read.delim("nonbiased_genes-instar_V-assigned_A_or_Z_male-filtered.txt", header = TRUE)
pupa_f_nonbiased <- read.delim("nonbiased_genes-pupa-assigned_A_or_Z_female-filtered.txt", header = TRUE)
pupa_m_nonbiased <- read.delim("nonbiased_genes-pupa-assigned_A_or_Z_male-filtered.txt", header = TRUE)
adult_f_nonbiased <- read.delim("nonbiased_genes-adult-assigned_A_or_Z_female-filtered.txt", header = TRUE)
adult_m_nonbiased <- read.delim("nonbiased_genes-adult-assigned_A_or_Z_male-filtered.txt", header = TRUE)
##################################### PROCESS DATA
############### Dataset I
instar_V_female <- instar_V_f[c(4,6)]
instar_V_female$group <- rep("1_instar_V_female", nrow(instar_V_female))
instar_V_female$group <- paste(instar_V_female$group, instar_V_female$chromosome)
instar_V_female <- instar_V_female[c(1,3)]
names(instar_V_female)[1] <-"FPKM"
pupa_female <- pupa_f[c(4,6)]
pupa_female$group <- rep("3_pupa_female", nrow(pupa_female))
pupa_female$group <- paste(pupa_female$group, pupa_female$chromosome)
pupa_female <- pupa_female[c(1,3)]
names(pupa_female)[1] <-"FPKM"
adult_female <- adult_f[c(4,6)]
adult_female$group <- rep("5_adult_female", nrow(adult_female))
adult_female$group <- paste(adult_female$group, adult_female$chromosome)
adult_female <- adult_female[c(1,3)]
names(adult_female)[1] <-"FPKM"
instar_V_male <- instar_V_m[c(5,6)]
instar_V_male$group <- rep("2_instar_V_male", nrow(instar_V_male))
instar_V_male$group <- paste(instar_V_male$group, instar_V_male$chromosome)
instar_V_male <- instar_V_male[c(1,3)]
names(instar_V_male)[1] <-"FPKM"
pupa_male <- pupa_m[c(5,6)]
pupa_male$group <- rep("4_pupa_male", nrow(pupa_male))
pupa_male$group <- paste(pupa_male$group, pupa_male$chromosome)
pupa_male <- pupa_male[c(1,3)]
names(pupa_male)[1] <-"FPKM"
adult_male <- adult_m[c(5,6)]
adult_male$group <- rep("6_adult_male", nrow(adult_male))
adult_male$group <- paste(adult_male$group, adult_male$chromosome)
adult_male <- adult_male[c(1,3)]
names(adult_male)[1] <-"FPKM"
all_samples <- rbind(instar_V_female, instar_V_male, pupa_female, pupa_male, adult_female, adult_male)
############### Dataset II
instar_V_female_nonbiased <- instar_V_f_nonbiased[c(4,6)]
instar_V_female_nonbiased$group <- rep("1_instar_V_female_nonbiased", nrow(instar_V_female_nonbiased))
instar_V_female_nonbiased$group <- paste(instar_V_female_nonbiased$group, instar_V_female_nonbiased$chromosome)
instar_V_female_nonbiased <- instar_V_female_nonbiased[c(1,3)]
names(instar_V_female_nonbiased)[1] <-"FPKM"
pupa_female_nonbiased <- pupa_f_nonbiased[c(4,6)]
pupa_female_nonbiased$group <- rep("3_pupa_female_nonbiased", nrow(pupa_female_nonbiased))
pupa_female_nonbiased$group <- paste(pupa_female_nonbiased$group, pupa_female_nonbiased$chromosome)
pupa_female_nonbiased <- pupa_female_nonbiased[c(1,3)]
names(pupa_female_nonbiased)[1] <-"FPKM"
adult_female_nonbiased <- adult_f_nonbiased[c(4,6)]
adult_female_nonbiased$group <- rep("5_adult_female_nonbiased", nrow(adult_female_nonbiased))
adult_female_nonbiased$group <- paste(adult_female_nonbiased$group, adult_female_nonbiased$chromosome)
adult_female_nonbiased <- adult_female_nonbiased[c(1,3)]
names(adult_female_nonbiased)[1] <-"FPKM"
instar_V_male_nonbiased <- instar_V_m_nonbiased[c(5,6)]
instar_V_male_nonbiased$group <- rep("2_instar_V_male_nonbiased", nrow(instar_V_male_nonbiased))
instar_V_male_nonbiased$group <- paste(instar_V_male_nonbiased$group, instar_V_male_nonbiased$chromosome)
instar_V_male_nonbiased <- instar_V_male_nonbiased[c(1,3)]
names(instar_V_male_nonbiased)[1] <-"FPKM"
pupa_male_nonbiased <- pupa_m_nonbiased[c(5,6)]
pupa_male_nonbiased$group <- rep("4_pupa_male_nonbiased", nrow(pupa_male_nonbiased))
pupa_male_nonbiased$group <- paste(pupa_male_nonbiased$group, pupa_male_nonbiased$chromosome)
pupa_male_nonbiased <- pupa_male_nonbiased[c(1,3)]
names(pupa_male_nonbiased)[1] <-"FPKM"
adult_male_nonbiased <- adult_m_nonbiased[c(5,6)]
adult_male_nonbiased$group <- rep("6_adult_male_nonbiased", nrow(adult_male_nonbiased))
adult_male_nonbiased$group <- paste(adult_male_nonbiased$group, adult_male_nonbiased$chromosome)
adult_male_nonbiased <- adult_male_nonbiased[c(1,3)]
names(adult_male_nonbiased)[1] <-"FPKM"
nonbiased <- rbind(instar_V_female_nonbiased, instar_V_male_nonbiased, pupa_female_nonbiased, pupa_male_nonbiased, adult_female_nonbiased, adult_male_nonbiased)
##############################################################################
### Boxplot
boxplot(log2(all_samples$FPKM)~all_samples$group, ylim = c(-6, 18),
col = c("grey90", "grey90"), notch = TRUE,
at = c(1,2, 4,5, 8,9, 11,12, 15,16, 18,19),
outline = FALSE, boxwex = 0.7, xaxt = "n", frame.plot = FALSE, border="grey65",
boxlty = 2)
axis(2, labels = "log2 FPKM(>0)", cex.axis = 1.2, at = 4, line = 1.5, tck = 0)
boxplot(log2(nonbiased$FPKM)~nonbiased$group, ylim = c(-8, 14), add = TRUE,
col = c("lightgrey", "darkorange"), notch = FALSE,
at = c(1.2,2.2, 4.2,5.2, 8.2,9.2, 11.2,12.2, 15.2,16.2, 18.2,19.2),
outline = FALSE, boxwex = 0.7, xaxt = "n", frame.plot = FALSE)
text(1.5, -6.5, "\\VE", vfont=c("sans serif","plain"), cex =1.9)
text(4.5, -6.5, "\\MA", vfont=c("sans serif","plain"), cex =1.9)
text(8.5, -6.5, "\\VE", vfont=c("sans serif","plain"), cex =1.9)
text(11.5, -6.5, "\\MA", vfont=c("sans serif","plain"), cex =1.9)
text(15.5, -6.5, "\\VE", vfont=c("sans serif","plain"), cex =1.9)
text(18.5, -6.5, "\\MA", vfont=c("sans serif","plain"), cex =1.9)
axis(1, labels = c("Larva", "Pupa", "Adult"), lwd = 0, cex.axis = 1.5,
at = c(3, 10, 17), line = 1)
legend(16.5, 18, legend = c("Autosomes","Z"), cex = 1,
fill = c("lightgrey", "darkorange"), bty = "n")
#legend(0.5, 19, legend = c("Autosomes","Z"), cex = 1,
# fill = c("lightgrey", "darkorange"), bty = "n")
segments(6.5, -5.5, 6.5, 15, lty = 3, lwd = 1)
segments(13.5, -5.5, 13.5, 15, lty = 3, lwd = 1)
segments(1, -5.5, 19, -5.5, lty = 1, lwd = 1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_ALDEx2.R
\name{process_ALDEx2}
\alias{process_ALDEx2}
\title{Process the DAF analysis through the ALDEx2 package}
\usage{
process_ALDEx2(data, ...)
}
\arguments{
\item{data}{the ouput of the \code{\link{build_DAF_data}} function}
\item{...}{additionnal parameters of the method}
}
\value{
a list countaining the raw output of ALDEx2 analysis and a curated version
}
\description{
Process the DAF analysis through the ALDEx2 package
}
|
/man/process_ALDEx2.Rd
|
no_license
|
leonarDubois/metaDAF
|
R
| false
| true
| 540
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_ALDEx2.R
\name{process_ALDEx2}
\alias{process_ALDEx2}
\title{Process the DAF analysis through the ALDEx2 package}
\usage{
process_ALDEx2(data, ...)
}
\arguments{
\item{data}{the ouput of the \code{\link{build_DAF_data}} function}
\item{...}{additionnal parameters of the method}
}
\value{
a list countaining the raw output of ALDEx2 analysis and a curated version
}
\description{
Process the DAF analysis through the ALDEx2 package
}
|
test_that("strings with varying widths", {
local_options(width = 80)
# Generated by data-raw/create-chr-tests.R
expect_snapshot({ options(width = 54); ctl_colonnade(df_str[c(28L, 34L, 16L, 29L, 47L, 25L, 42L, 27L, 44L, 20L, 14L, 36L, 43L, 41L, 26L, 45L, 22L, 9L, 13L, 32L, 31L, 12L, 19L, 48L, 49L, 35L, 3L, 11L, 23L, 24L, 40L, 15L, 38L, 10L, 46L, 5L, 50L, 18L, 21L, 6L, 30L, 2L, 7L, 1L, 4L, 8L, 17L, 33L, 39L, 37L)], width = 516) })
expect_snapshot({ options(width = 42); ctl_colonnade(df_str[c(28L, 41L, 12L, 29L, 13L, 43L, 24L, 50L, 48L, 35L, 44L, 21L, 33L, 45L, 47L, 34L, 25L, 14L, 18L, 23L, 7L, 3L, 42L, 36L, 11L, 2L, 20L, 31L, 1L, 4L, 38L, 9L, 27L, 40L, 32L, 17L, 6L, 49L, 16L, 19L, 15L, 22L, 39L, 10L, 46L, 5L, 30L, 8L, 26L, 37L)], width = 1365) })
expect_snapshot({ options(width = 39); ctl_colonnade(df_str[c(40L, 17L, 13L, 23L, 22L, 2L, 18L, 3L, 29L, 45L, 14L, 19L, 33L, 37L, 47L, 43L, 44L, 10L, 31L, 27L, 34L, 35L, 41L, 21L, 4L, 25L, 38L, 48L, 9L, 24L, 26L, 39L, 20L, 36L, 42L, 16L, 6L, 11L, 7L, 12L, 1L, 46L, 15L, 5L, 8L, 50L, 32L, 30L, 49L, 28L)], width = 934) })
expect_snapshot({ options(width = 32); ctl_colonnade(df_str[c(11L, 36L, 17L, 14L, 31L, 35L, 23L, 13L, 6L, 44L, 45L, 22L, 21L, 18L, 33L, 10L, 43L, 2L, 46L, 34L, 3L, 19L, 1L, 38L, 9L, 37L, 5L, 8L, 25L, 49L, 27L, 29L, 15L, 39L, 24L, 40L, 48L, 26L, 47L, 42L, 41L, 12L, 28L, 30L, 7L, 16L, 4L, 50L, 20L, 32L)], width = 565) })
expect_snapshot({ options(width = 35); ctl_colonnade(df_str[c(18L, 46L, 11L, 43L, 31L, 47L, 48L, 44L, 50L, 15L, 28L, 33L, 13L, 4L, 22L, 3L, 37L, 32L, 40L, 9L, 25L, 16L, 45L, 23L, 21L, 6L, 49L, 36L, 27L, 38L, 14L, 34L, 8L, 24L, 29L, 1L, 12L, 2L, 20L, 17L, 35L, 5L, 19L, 30L, 7L, 26L, 42L, 41L, 39L, 10L)], width = 1121) })
expect_snapshot({ options(width = 32); ctl_colonnade(df_str[c(43L, 1L, 3L, 15L, 28L, 12L, 46L, 34L, 31L, 7L, 11L, 4L, 44L, 8L, 9L, 5L, 36L, 22L, 17L, 39L, 18L, 45L, 37L, 13L, 29L, 6L, 30L, 16L, 20L, 10L, 19L, 26L, 33L, 40L, 35L, 48L, 38L, 25L, 2L, 47L, 42L, 41L, 27L, 14L, 21L, 24L, 50L, 49L, 23L, 32L)], width = 446) })
expect_snapshot({ options(width = 31); ctl_colonnade(df_str[c(37L, 46L, 21L, 3L, 16L, 39L, 34L, 33L, 10L, 17L, 19L, 36L, 45L, 49L, 11L, 50L, 14L, 29L, 44L, 13L, 30L, 38L, 32L, 40L, 42L, 1L, 31L, 41L, 7L, 23L, 35L, 28L, 6L, 25L, 2L, 9L, 12L, 15L, 5L, 18L, 20L, 27L, 43L, 8L, 47L, 4L, 48L, 24L, 26L, 22L)], width = 1166) })
expect_snapshot({ options(width = 58); ctl_colonnade(df_str[c(31L, 39L, 40L, 30L, 10L, 21L, 9L, 16L, 46L, 25L, 15L, 24L, 3L, 50L, 35L, 1L, 12L, 34L, 48L, 4L, 29L, 23L, 37L, 36L, 28L, 43L, 11L, 17L, 32L, 8L, 41L, 13L, 44L, 7L, 38L, 26L, 33L, 20L, 19L, 2L, 18L, 49L, 27L, 47L, 22L, 14L, 6L, 5L, 45L, 42L)], width = 546) })
expect_snapshot({ options(width = 57); ctl_colonnade(df_str[c(43L, 21L, 41L, 48L, 22L, 25L, 2L, 8L, 1L, 24L, 6L, 39L, 38L, 20L, 49L, 45L, 47L, 12L, 9L, 13L, 36L, 26L, 44L, 11L, 46L, 28L, 7L, 18L, 50L, 16L, 29L, 30L, 4L, 23L, 17L, 40L, 33L, 14L, 27L, 19L, 34L, 32L, 3L, 37L, 15L, 10L, 5L, 35L, 31L, 42L)], width = 1035) })
expect_snapshot({ options(width = 33); ctl_colonnade(df_str[c(40L, 6L, 25L, 5L, 26L, 17L, 19L, 2L, 11L, 34L, 45L, 24L, 22L, 44L, 35L, 7L, 4L, 49L, 1L, 36L, 12L, 41L, 39L, 13L, 48L, 27L, 18L, 30L, 42L, 28L, 3L, 46L, 21L, 20L, 16L, 29L, 50L, 10L, 9L, 8L, 47L, 31L, 14L, 38L, 33L, 32L, 43L, 23L, 15L, 37L)], width = 1217) })
expect_snapshot({ options(width = 32); ctl_colonnade(df_str[c(43L, 23L, 22L, 11L, 6L, 26L, 48L, 17L, 7L, 42L, 36L, 21L, 35L, 50L, 13L, 19L, 29L, 8L, 15L, 4L, 2L, 27L, 49L, 47L, 30L, 31L, 25L, 28L, 46L, 12L, 32L, 39L, 24L, 10L, 45L, 5L, 37L, 14L, 40L, 20L, 41L, 44L, 33L, 18L, 38L, 3L, 1L, 34L, 16L, 9L)], width = 770) })
expect_snapshot({ options(width = 46); ctl_colonnade(df_str[c(5L, 24L, 43L, 46L, 35L, 39L, 18L, 26L, 8L, 27L, 2L, 50L, 6L, 14L, 29L, 47L, 9L, 16L, 36L, 4L, 13L, 41L, 3L, 28L, 37L, 33L, 38L, 31L, 34L, 19L, 42L, 32L, 1L, 45L, 15L, 7L, 11L, 49L, 23L, 12L, 48L, 20L, 21L, 44L, 25L, 10L, 22L, 30L, 40L, 17L)], width = 1439) })
expect_snapshot({ options(width = 52); ctl_colonnade(df_str[c(7L, 44L, 19L, 21L, 18L, 35L, 23L, 50L, 33L, 37L, 25L, 26L, 10L, 39L, 2L, 47L, 42L, 14L, 9L, 41L, 45L, 6L, 4L, 11L, 24L, 43L, 32L, 3L, 38L, 5L, 49L, 27L, 17L, 8L, 22L, 40L, 12L, 15L, 1L, 28L, 31L, 29L, 13L, 48L, 34L, 36L, 30L, 20L, 16L, 46L)], width = 1065) })
expect_snapshot({ options(width = 35); ctl_colonnade(df_str[c(38L, 18L, 23L, 36L, 35L, 20L, 44L, 19L, 13L, 41L, 31L, 7L, 11L, 29L, 2L, 14L, 26L, 46L, 40L, 45L, 9L, 34L, 33L, 22L, 1L, 17L, 28L, 10L, 21L, 30L, 47L, 49L, 6L, 12L, 4L, 25L, 32L, 15L, 43L, 24L, 48L, 3L, 37L, 50L, 42L, 39L, 16L, 8L, 27L, 5L)], width = 393) })
expect_snapshot({ options(width = 41); ctl_colonnade(df_str[c(22L, 9L, 11L, 26L, 19L, 16L, 32L, 25L, 1L, 30L, 31L, 6L, 24L, 10L, 39L, 21L, 50L, 7L, 29L, 12L, 46L, 43L, 15L, 35L, 20L, 40L, 49L, 38L, 36L, 48L, 34L, 3L, 8L, 4L, 27L, 42L, 44L, 33L, 45L, 18L, 5L, 2L, 13L, 47L, 28L, 17L, 37L, 14L, 41L, 23L)], width = 999) })
})
|
/tests/testthat/test-ctl_colonnade_2.R
|
permissive
|
Tubbz-alt/pillar
|
R
| false
| false
| 4,972
|
r
|
test_that("strings with varying widths", {
local_options(width = 80)
# Generated by data-raw/create-chr-tests.R
expect_snapshot({ options(width = 54); ctl_colonnade(df_str[c(28L, 34L, 16L, 29L, 47L, 25L, 42L, 27L, 44L, 20L, 14L, 36L, 43L, 41L, 26L, 45L, 22L, 9L, 13L, 32L, 31L, 12L, 19L, 48L, 49L, 35L, 3L, 11L, 23L, 24L, 40L, 15L, 38L, 10L, 46L, 5L, 50L, 18L, 21L, 6L, 30L, 2L, 7L, 1L, 4L, 8L, 17L, 33L, 39L, 37L)], width = 516) })
expect_snapshot({ options(width = 42); ctl_colonnade(df_str[c(28L, 41L, 12L, 29L, 13L, 43L, 24L, 50L, 48L, 35L, 44L, 21L, 33L, 45L, 47L, 34L, 25L, 14L, 18L, 23L, 7L, 3L, 42L, 36L, 11L, 2L, 20L, 31L, 1L, 4L, 38L, 9L, 27L, 40L, 32L, 17L, 6L, 49L, 16L, 19L, 15L, 22L, 39L, 10L, 46L, 5L, 30L, 8L, 26L, 37L)], width = 1365) })
expect_snapshot({ options(width = 39); ctl_colonnade(df_str[c(40L, 17L, 13L, 23L, 22L, 2L, 18L, 3L, 29L, 45L, 14L, 19L, 33L, 37L, 47L, 43L, 44L, 10L, 31L, 27L, 34L, 35L, 41L, 21L, 4L, 25L, 38L, 48L, 9L, 24L, 26L, 39L, 20L, 36L, 42L, 16L, 6L, 11L, 7L, 12L, 1L, 46L, 15L, 5L, 8L, 50L, 32L, 30L, 49L, 28L)], width = 934) })
expect_snapshot({ options(width = 32); ctl_colonnade(df_str[c(11L, 36L, 17L, 14L, 31L, 35L, 23L, 13L, 6L, 44L, 45L, 22L, 21L, 18L, 33L, 10L, 43L, 2L, 46L, 34L, 3L, 19L, 1L, 38L, 9L, 37L, 5L, 8L, 25L, 49L, 27L, 29L, 15L, 39L, 24L, 40L, 48L, 26L, 47L, 42L, 41L, 12L, 28L, 30L, 7L, 16L, 4L, 50L, 20L, 32L)], width = 565) })
expect_snapshot({ options(width = 35); ctl_colonnade(df_str[c(18L, 46L, 11L, 43L, 31L, 47L, 48L, 44L, 50L, 15L, 28L, 33L, 13L, 4L, 22L, 3L, 37L, 32L, 40L, 9L, 25L, 16L, 45L, 23L, 21L, 6L, 49L, 36L, 27L, 38L, 14L, 34L, 8L, 24L, 29L, 1L, 12L, 2L, 20L, 17L, 35L, 5L, 19L, 30L, 7L, 26L, 42L, 41L, 39L, 10L)], width = 1121) })
expect_snapshot({ options(width = 32); ctl_colonnade(df_str[c(43L, 1L, 3L, 15L, 28L, 12L, 46L, 34L, 31L, 7L, 11L, 4L, 44L, 8L, 9L, 5L, 36L, 22L, 17L, 39L, 18L, 45L, 37L, 13L, 29L, 6L, 30L, 16L, 20L, 10L, 19L, 26L, 33L, 40L, 35L, 48L, 38L, 25L, 2L, 47L, 42L, 41L, 27L, 14L, 21L, 24L, 50L, 49L, 23L, 32L)], width = 446) })
expect_snapshot({ options(width = 31); ctl_colonnade(df_str[c(37L, 46L, 21L, 3L, 16L, 39L, 34L, 33L, 10L, 17L, 19L, 36L, 45L, 49L, 11L, 50L, 14L, 29L, 44L, 13L, 30L, 38L, 32L, 40L, 42L, 1L, 31L, 41L, 7L, 23L, 35L, 28L, 6L, 25L, 2L, 9L, 12L, 15L, 5L, 18L, 20L, 27L, 43L, 8L, 47L, 4L, 48L, 24L, 26L, 22L)], width = 1166) })
expect_snapshot({ options(width = 58); ctl_colonnade(df_str[c(31L, 39L, 40L, 30L, 10L, 21L, 9L, 16L, 46L, 25L, 15L, 24L, 3L, 50L, 35L, 1L, 12L, 34L, 48L, 4L, 29L, 23L, 37L, 36L, 28L, 43L, 11L, 17L, 32L, 8L, 41L, 13L, 44L, 7L, 38L, 26L, 33L, 20L, 19L, 2L, 18L, 49L, 27L, 47L, 22L, 14L, 6L, 5L, 45L, 42L)], width = 546) })
expect_snapshot({ options(width = 57); ctl_colonnade(df_str[c(43L, 21L, 41L, 48L, 22L, 25L, 2L, 8L, 1L, 24L, 6L, 39L, 38L, 20L, 49L, 45L, 47L, 12L, 9L, 13L, 36L, 26L, 44L, 11L, 46L, 28L, 7L, 18L, 50L, 16L, 29L, 30L, 4L, 23L, 17L, 40L, 33L, 14L, 27L, 19L, 34L, 32L, 3L, 37L, 15L, 10L, 5L, 35L, 31L, 42L)], width = 1035) })
expect_snapshot({ options(width = 33); ctl_colonnade(df_str[c(40L, 6L, 25L, 5L, 26L, 17L, 19L, 2L, 11L, 34L, 45L, 24L, 22L, 44L, 35L, 7L, 4L, 49L, 1L, 36L, 12L, 41L, 39L, 13L, 48L, 27L, 18L, 30L, 42L, 28L, 3L, 46L, 21L, 20L, 16L, 29L, 50L, 10L, 9L, 8L, 47L, 31L, 14L, 38L, 33L, 32L, 43L, 23L, 15L, 37L)], width = 1217) })
expect_snapshot({ options(width = 32); ctl_colonnade(df_str[c(43L, 23L, 22L, 11L, 6L, 26L, 48L, 17L, 7L, 42L, 36L, 21L, 35L, 50L, 13L, 19L, 29L, 8L, 15L, 4L, 2L, 27L, 49L, 47L, 30L, 31L, 25L, 28L, 46L, 12L, 32L, 39L, 24L, 10L, 45L, 5L, 37L, 14L, 40L, 20L, 41L, 44L, 33L, 18L, 38L, 3L, 1L, 34L, 16L, 9L)], width = 770) })
expect_snapshot({ options(width = 46); ctl_colonnade(df_str[c(5L, 24L, 43L, 46L, 35L, 39L, 18L, 26L, 8L, 27L, 2L, 50L, 6L, 14L, 29L, 47L, 9L, 16L, 36L, 4L, 13L, 41L, 3L, 28L, 37L, 33L, 38L, 31L, 34L, 19L, 42L, 32L, 1L, 45L, 15L, 7L, 11L, 49L, 23L, 12L, 48L, 20L, 21L, 44L, 25L, 10L, 22L, 30L, 40L, 17L)], width = 1439) })
expect_snapshot({ options(width = 52); ctl_colonnade(df_str[c(7L, 44L, 19L, 21L, 18L, 35L, 23L, 50L, 33L, 37L, 25L, 26L, 10L, 39L, 2L, 47L, 42L, 14L, 9L, 41L, 45L, 6L, 4L, 11L, 24L, 43L, 32L, 3L, 38L, 5L, 49L, 27L, 17L, 8L, 22L, 40L, 12L, 15L, 1L, 28L, 31L, 29L, 13L, 48L, 34L, 36L, 30L, 20L, 16L, 46L)], width = 1065) })
expect_snapshot({ options(width = 35); ctl_colonnade(df_str[c(38L, 18L, 23L, 36L, 35L, 20L, 44L, 19L, 13L, 41L, 31L, 7L, 11L, 29L, 2L, 14L, 26L, 46L, 40L, 45L, 9L, 34L, 33L, 22L, 1L, 17L, 28L, 10L, 21L, 30L, 47L, 49L, 6L, 12L, 4L, 25L, 32L, 15L, 43L, 24L, 48L, 3L, 37L, 50L, 42L, 39L, 16L, 8L, 27L, 5L)], width = 393) })
expect_snapshot({ options(width = 41); ctl_colonnade(df_str[c(22L, 9L, 11L, 26L, 19L, 16L, 32L, 25L, 1L, 30L, 31L, 6L, 24L, 10L, 39L, 21L, 50L, 7L, 29L, 12L, 46L, 43L, 15L, 35L, 20L, 40L, 49L, 38L, 36L, 48L, 34L, 3L, 8L, 4L, 27L, 42L, 44L, 33L, 45L, 18L, 5L, 2L, 13L, 47L, 28L, 17L, 37L, 14L, 41L, 23L)], width = 999) })
})
|
setwd("C:\\Users\\Gemma\\Documents\\ExploratoryDataAnalysis")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Across the United States, how have emissions from coal combustion-related sources changed
#from 1999-2008?
EISectorCoal <- grep("coal",SCC$EI.Sector, ignore.case = TRUE)
SCCCoal<-SCC[EISectorCoal,]$SCC
emissionsCoal<- subset(NEI, SCC %in% SCCCoal)
emissionsCoalByYear <- aggregate(Emissions~year, data= emissionsCoal, FUN = sum)
emissionsCoalByYear$kiloTons<- emissionsCoalByYear$Emissions/1000
# Open device
png(filename='plot4.png', width=480, height=480, units='px')
ggplot(emissionsCoalByYear, aes(year, kiloTons)) +
geom_line() +
geom_point(size=3) +
geom_text(aes(label=sprintf("%1.2f", kiloTons)), size=3,hjust=.5, vjust=1.5) +
labs(x="Years", y="PM2.5 Emissions(kiloTons)") +
labs(title="PM2.5 Emissions from Coal in the United States")
dev.off()
|
/Plot4.R
|
no_license
|
GARGVI/ExploratoryDataAnalysisProject2
|
R
| false
| false
| 965
|
r
|
setwd("C:\\Users\\Gemma\\Documents\\ExploratoryDataAnalysis")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Across the United States, how have emissions from coal combustion-related sources changed
#from 1999-2008?
EISectorCoal <- grep("coal",SCC$EI.Sector, ignore.case = TRUE)
SCCCoal<-SCC[EISectorCoal,]$SCC
emissionsCoal<- subset(NEI, SCC %in% SCCCoal)
emissionsCoalByYear <- aggregate(Emissions~year, data= emissionsCoal, FUN = sum)
emissionsCoalByYear$kiloTons<- emissionsCoalByYear$Emissions/1000
# Open device
png(filename='plot4.png', width=480, height=480, units='px')
ggplot(emissionsCoalByYear, aes(year, kiloTons)) +
geom_line() +
geom_point(size=3) +
geom_text(aes(label=sprintf("%1.2f", kiloTons)), size=3,hjust=.5, vjust=1.5) +
labs(x="Years", y="PM2.5 Emissions(kiloTons)") +
labs(title="PM2.5 Emissions from Coal in the United States")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.guardduty_operations.R
\name{list_filters}
\alias{list_filters}
\title{Returns a paginated list of the current filters}
\usage{
list_filters(DetectorId, MaxResults = NULL, NextToken = NULL)
}
\arguments{
\item{DetectorId}{[required] The ID of the detector that specifies the GuardDuty service where you want to list filters.}
\item{MaxResults}{Indicates the maximum number of items that you want in the response. The maximum value is 50.}
\item{NextToken}{Paginates results. Set the value of this parameter to NULL on your first call to the ListFilters operation.For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.}
}
\description{
Returns a paginated list of the current filters.
}
\section{Accepted Parameters}{
\preformatted{list_filters(
DetectorId = "string",
MaxResults = 123,
NextToken = "string"
)
}
}
|
/service/paws.guardduty/man/list_filters.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 1,000
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.guardduty_operations.R
\name{list_filters}
\alias{list_filters}
\title{Returns a paginated list of the current filters}
\usage{
list_filters(DetectorId, MaxResults = NULL, NextToken = NULL)
}
\arguments{
\item{DetectorId}{[required] The ID of the detector that specifies the GuardDuty service where you want to list filters.}
\item{MaxResults}{Indicates the maximum number of items that you want in the response. The maximum value is 50.}
\item{NextToken}{Paginates results. Set the value of this parameter to NULL on your first call to the ListFilters operation.For subsequent calls to the operation, fill nextToken in the request with the value of nextToken from the previous response to continue listing data.}
}
\description{
Returns a paginated list of the current filters.
}
\section{Accepted Parameters}{
\preformatted{list_filters(
DetectorId = "string",
MaxResults = 123,
NextToken = "string"
)
}
}
|
library(igraph)
num = 1000
iteration = 50
p1 = 0.01
d1 = numeric()
count1 = numeric()
p2 = 0.05
d2 = numeric()
count2 = numeric()
p3 = 0.1
d3 = numeric()
count3 = numeric()
for (i in 1:iteration) {
g1 = random.graph.game(num, p1, directed = FALSE)
d1 = c(d1, diameter(g1))
count1 = c(count1, as.integer(is.connected(g1)))
g2 = random.graph.game(num, p2, directed = FALSE)
d2 = c(d2, diameter(g2))
count2 = c(count2, as.integer(is.connected(g2)))
g3 = random.graph.game(num, p3, directed = FALSE)
d3 = c(d3, diameter(g3))
count3 = c(count3, as.integer(is.connected(g3)))
}
is.connected(g1)
mean(count1)
mean(d1)
is.connected(g2)
mean(count2)
mean(d2)
is.connected(g3)
mean(count3)
mean(d3)
|
/HWK1/1_b.R
|
no_license
|
realmichaelzyy/Graphs_and_Network_Flows
|
R
| false
| false
| 704
|
r
|
library(igraph)
num = 1000
iteration = 50
p1 = 0.01
d1 = numeric()
count1 = numeric()
p2 = 0.05
d2 = numeric()
count2 = numeric()
p3 = 0.1
d3 = numeric()
count3 = numeric()
for (i in 1:iteration) {
g1 = random.graph.game(num, p1, directed = FALSE)
d1 = c(d1, diameter(g1))
count1 = c(count1, as.integer(is.connected(g1)))
g2 = random.graph.game(num, p2, directed = FALSE)
d2 = c(d2, diameter(g2))
count2 = c(count2, as.integer(is.connected(g2)))
g3 = random.graph.game(num, p3, directed = FALSE)
d3 = c(d3, diameter(g3))
count3 = c(count3, as.integer(is.connected(g3)))
}
is.connected(g1)
mean(count1)
mean(d1)
is.connected(g2)
mean(count2)
mean(d2)
is.connected(g3)
mean(count3)
mean(d3)
|
\name{getLayoutNameMapping}
\alias{getLayoutNameMapping}
\alias{getLayoutNameMapping,CytoscapeConnectionClass-method}
\title{getLayoutNameMapping}
\description{
The Cytoscape 'Layout' menu lists many layout algorithms, but the
names presented there are different from the names by which these
algorithms are known to layout method. This method returns a named
list in which the names are from the GUI, and the values identify
the names you must use to choose an algorithms in the
programmatic interface.
}
\usage{
getLayoutNameMapping(obj)
}
\arguments{
\item{obj}{a \code{CytoscapeConnectionClass} object. }
}
\value{
A named list of strings.
}
\author{Tanja Muetze, Georgi Kolishovski, Paul Shannon}
\seealso{
getLayoutNames
getLayoutPropertyNames
getLayoutPropertyType
getLayoutPropertyValue
setLayoutProperties
}
\examples{
\dontrun{
# first, delete existing windows to save memory:
deleteAllWindows(CytoscapeConnection())
cy <- CytoscapeConnection ()
layout.name.map <- getLayoutNameMapping (cy)
print (head (names (layout.name.map), n=3))
# [1] "Attribute Circle Layout" "Stacked Node Layout"
# [3] "Degree Sorted Circle Layout"
print (head (as.character (layout.name.map), n=3))
# [1] "attribute-circle" "stacked-node-layout" "degree-circle"
}
}
\keyword{graph}
|
/man/getLayoutNameMapping.Rd
|
no_license
|
sebastianrossel/Bioconductor_RCy3_the_new_RCytoscape
|
R
| false
| false
| 1,330
|
rd
|
\name{getLayoutNameMapping}
\alias{getLayoutNameMapping}
\alias{getLayoutNameMapping,CytoscapeConnectionClass-method}
\title{getLayoutNameMapping}
\description{
The Cytoscape 'Layout' menu lists many layout algorithms, but the
names presented there are different from the names by which these
algorithms are known to layout method. This method returns a named
list in which the names are from the GUI, and the values identify
the names you must use to choose an algorithms in the
programmatic interface.
}
\usage{
getLayoutNameMapping(obj)
}
\arguments{
\item{obj}{a \code{CytoscapeConnectionClass} object. }
}
\value{
A named list of strings.
}
\author{Tanja Muetze, Georgi Kolishovski, Paul Shannon}
\seealso{
getLayoutNames
getLayoutPropertyNames
getLayoutPropertyType
getLayoutPropertyValue
setLayoutProperties
}
\examples{
\dontrun{
# first, delete existing windows to save memory:
deleteAllWindows(CytoscapeConnection())
cy <- CytoscapeConnection ()
layout.name.map <- getLayoutNameMapping (cy)
print (head (names (layout.name.map), n=3))
# [1] "Attribute Circle Layout" "Stacked Node Layout"
# [3] "Degree Sorted Circle Layout"
print (head (as.character (layout.name.map), n=3))
# [1] "attribute-circle" "stacked-node-layout" "degree-circle"
}
}
\keyword{graph}
|
# |----------------------------------------------------------------------------------|
# | Project: ICD-9 Shiny App |
# | Script: ICD-9 Shiny App |
# | Authors: Davit Sargsyan |
# | Created: 03/31/2018 |
# | Modified: 04/03/2018, DS: replaced text boxes wit DT table. Download only |
# | SELECTED rows (all selected by default) |
# | Output a map file, i.e. R list with mapped diagnoses |
# | 04/27/2018, DS: Added ICD-9 procedure codes. NOTE: 'major' category is |
# | just a copy of 'sub-chapter', too many labels to create|
# | by hand. Find a full table online and use it. |
# | 05/24/2018, DS: Switched to icd Version 3.2.0 (developer) and icd.data |
# | version 1.0.1. Added functions to merge different |
# | versions of ICD data (currently, V23-V32). |
# | 07/21/2018, DS: updated package icd on CVI computer; |
# | switched app to shinydashboard; added conversion tab |
# | ToDo: Keep selected diagnoses after switching to the next category |
# |----------------------------------------------------------------------------------|
# Source: https://stackoverflow.com/questions/30894780/cant-read-an-rdata-fileinput
options(stringsAsFactors = FALSE,
shiny.maxRequestSize = 1024^3)
# devtools::install_github("jackwasey/icd")
# devtools::install_github("jackwasey/icd.data")
require(shiny)
require(icd)
require(icd.data)
require(data.table)
require(DT)
library(shinydashboard)
library(shinythemes)
require(xlsx)
require(foreach)
source("source/icd9_dx_get_data_v1.R")
source("source/icd9_sg_get_data_v1.R")
# # TEST: bypass user interface!
# input <- list()
# # Diagnoses----
# dt1 <- icd9cm_merge_version_dx(32)
# # Procedures----
# dt1 <- icd9cm_merge_version_pcs(32)
# # individual nodes----
# input$chapter = unique(as.character(dt1$chapter))[4]
# input$subchapter = unique(as.character(dt1$sub_chapter[dt1$chapter == input$chapter]))[1]
# input$major = unique(as.character(dt1$major[dt1$sub_chapter == input$subchapter]))[2]
# input$dx = unique(as.character(dt1$long_desc[dt1$major == input$major]))[1]
ui <- dashboardPage(dashboardHeader(title = "Shiny ICD",
dropdownMenu(type = "notifications",
notificationItem(text = "5 new users today",
icon = icon("users")),
notificationItem(text = "12 items delivered",
icon = icon("truck"),
status = "success"),
notificationItem(text = "Server load at 86%",
icon = icon("exclamation-triangle"),
status = "warning"))),
dashboardSidebar(sidebarMenu(menuItem(text = "Dashboard",
tabName = "dashboard",
icon = icon("dashboard")),
menuItem(text = "Mapping",
tabName = "mapping",
icon = icon("th")),
menuItem(text = "Convert",
tabName = "convert",
icon = icon("th")))),
dashboardBody(tabItems(tabItem(tabName = "dashboard",
h2("Hello1")),
tabItem(tabName = "mapping",
sidebarPanel(radioButtons(inputId = "dataset",
label = "Select List",
choices = c("Diagnoses",
"Procedures"),
selected = "Diagnoses"),
selectInput(inputId = "icd9_version",
label = "ICD-9 Version",
choices = available_icd9_versions()),
uiOutput(outputId = "chapterIn"),
uiOutput(outputId = "subchapterIn"),
uiOutput(outputId = "majorIn"),
uiOutput(outputId = "dxIn"),
checkboxInput(inputId = "selectAll",
label = "Select All"),
textInput(inputId = "comorb",
label = "Comorbidity",
value = ""),
br(),
downloadButton(outputId = "downloadData",
label = "Download Selected Rows"),
br(),
downloadButton(outputId = "downloadMap",
label = "Download Map of Selected Rows")),
mainPanel(DT:: dataTableOutput("tbl"),
br(),
actionButton(inputId = "do",
label = "Save Selection"),
br(),
DT:: dataTableOutput("tbl2"))),
tabItem(tabName = "convert",
sidebarPanel(fileInput(inputId = "browseMap",
label = "Select Mapping File",
multiple = FALSE),
br(),
uiOutput(outputId = "mapIcdColIn"),
uiOutput(outputId = "mapDiagColsIn"),
br(),
fileInput(inputId = "browseData",
label = "Select ICD Data File",
multiple = FALSE),
br(),
uiOutput(outputId = "idColIn"),
uiOutput(outputId = "icdColsIn"),
br(),
actionButton(inputId = "convert",
label = "Convert"),
br(),
br(),
br(),
downloadButton(outputId = "downloadComorb",
label = "Download comorbidities")),
mainPanel(DT::dataTableOutput("tblComorb"),
DT:: dataTableOutput("tblMap"),
br(),
DT:: dataTableOutput("tblICD"))))))
server <- function(input, output, session) {
dt0f <- reactive({
if(input$dataset == "Diagnoses"){
dt0 <- icd9cm_merge_version_dx(input$icd9_version)
# TEMPORARY PATCH: see Issue4: https://github.com/CVIRU/shiny.icd/issues/4
# Why dose this work?
dt0$long_desc[dt0$code == "0413"] <- "Friedländer's bacillus infection in conditions classified elsewhere and of unspecified site"
} else {
dt0 <- icd9cm_merge_version_pcs(input$icd9_version)
}
dt0$short_desc <- NULL
dt0 <- data.table(comorb = input$comorb,
dt0)
dt0
})
dt1f <- reactive({
# Display ICD codes along with labels----
dt1 <- dt0f()
dt1$long_desc <- paste(dt1$code,
dt1$long_desc,
sep = ":")
dt1$major <- paste(dt1$major_code,
dt1$major,
sep = ":")
dt1
})
dtcmbf <- reactive({
validate(need(input$browseData != "", ""))
ne <- new.env()
fname <- load(file = input$browseData$datapath,
envir = ne)
dt.dx <- ne[[fname]]
validate(need(input$browseMap != "", ""))
ne <- new.env()
map <- fread(input$browseMap$datapath,
colClasses = c("character"))
l1 <- as.comorbidity_map(split(x = c(map[, input$mapIcdIn, with = FALSE][[1]]),
f = c(map[, input$mapDiagIn, with = FALSE][[1]])))
dtt <- list()
for(i in 1:length(input$icdIn)){
dtt[[i]] <- comorbid(x = dt.dx,
map = l1,
visit_name = input$idIn,
icd_name = input$icdIn[i])
}
dt.comorb <- data.table(unique(dt.dx[,
colnames(dt.dx) == input$idIn,
with = FALSE]),
apply(Reduce("+",
dtt),
MARGIN = 2,
function(a){
a > 0
}))
dt.comorb
})
output$chapterIn <- renderUI({
dt1 <- dt1f()
selectInput(inputId = "chapter",
label = "Chapter",
choices = unique(as.character(dt1$chapter)))
})
output$subchapterIn <- renderUI({
dt1 <- dt1f()
selectInput(inputId = "subchapter",
label = "Sub-chapter",
choices = unique(as.character(dt1$sub_chapter[dt1$chapter == input$chapter])))
})
output$majorIn <- renderUI({
dt1 <- dt1f()
selectInput(inputId = "major",
label = "Major",
choices = unique(as.character(dt1$major[dt1$sub_chapter == input$subchapter])))
})
output$dxIn <- renderUI({
dt1 <- dt1f()
selectInput(inputId = "dx",
label = "Diagnosis",
choices = unique(as.character(dt1$long_desc[dt1$major == input$major])),
multiple = TRUE)
})
observeEvent(input$selectAll,{
dt1 <- dt1f()
updateSelectInput(session = session,
inputId = "dx",
selected = if(input$selectAll){
unique(as.character(dt1$long_desc))
} else {
""
})
})
# Source: https://yihui.shinyapps.io/DT-rows/
output$tbl <- DT::renderDT({
dt0 <- dt0f()
dt1 <- dt1f()
DT::datatable(unique(dt0[dt1$long_desc %in% input$dx, ]),
options = list(pageLength = 10),
selection = list(mode = "multiple"),
rownames = FALSE)
})
# Source: http://shiny.rstudio.com/articles/action-buttons.html
observeEvent(input$do, {
dt0 <- dt0f()
dt1 <- dt1f()
if (!exists("dtt")) {
dtt <<- unique(dt0[dt1$long_desc %in% input$dx, ])
} else {
dtt <<- unique(rbind.data.frame(dtt,
dt0[dt1$long_desc %in% input$dx, ]))
}
dtt <<- dtt[order(as.numeric(rownames(dtt))), ]
output$tbl2 <- DT::renderDT({
DT::datatable(dtt,
options = list(pageLength = 10),
selection = list(mode = "multiple",
selected = 1:nrow(dtt),
target = "row"),
rownames = FALSE)
})
})
# Source: https://shiny.rstudio.com/articles/download.html
output$downloadData <- downloadHandler(
filename = function() {
paste("icd9_codes_",
Sys.Date(),
".csv",
sep = "")
},
content = function(file) {
tmp <- dtt[input$tbl2_rows_selected, ]
write.table(tmp,
file,
row.names = FALSE,
sep = "\t")
}
)
# New comorbidity map
output$downloadMap <- downloadHandler(
filename = function() {
paste("icd9_map_",
Sys.Date(),
".RData",
sep = "")
},
content = function(file) {
tmp <- dtt[input$tbl2_rows_selected, ]
l1 <- list()
for (i in 1:length(unique(tmp$major))) {
l1[[i]] <- unique(c(tmp$code[tmp$major == unique(tmp$major)[i]]))
}
names(l1) <- unique(tmp$major)
l1 <- as.icd_comorbidity_map(l1)
save(l1,
file = file)
}
)
# Convert codes to comorbidities----
output$mapIcdColIn <- renderUI({
validate(need(input$browseMap != "", ""))
ne <- new.env()
dt3 <- fread(input$browseMap$datapath)
cnames <- colnames(dt3)
selectInput(inputId = "mapIcdIn",
label = "Select ICD Column",
choices = cnames,
multiple = FALSE)
})
output$mapDiagColsIn <- renderUI({
validate(need(input$browseMap != "", ""))
ne <- new.env()
dt3 <- fread(input$browseMap$datapath)
cnames <- colnames(dt3)
selectInput(inputId = "mapDiagIn",
label = "Select Diagnosis Column",
choices = cnames,
multiple = FALSE)
})
output$idColIn <- renderUI({
validate(need(input$browseData != "", ""))
ne <- new.env()
fname <- load(file = input$browseData$datapath,
envir = ne)
dt2 <- ne[[fname]]
cnames <- colnames(dt2)
selectInput(inputId = "idIn",
label = "Select ID Column",
choices = cnames,
multiple = FALSE)
})
output$icdColsIn <- renderUI({
validate(need(input$browseData != "", ""))
ne <- new.env()
fname <- load(file = input$browseData$datapath,
envir = ne)
dt2 <- ne[[fname]]
cnames <- colnames(dt2)
selectInput(inputId = "icdIn",
label = "Select ICD Column(s)",
choices = cnames,
multiple = TRUE)
})
output$tblMap <- DT::renderDT({
validate(need(input$browseMap != "", ""))
ne <- new.env()
dt3 <- fread(input$browseMap$datapath)
DT::datatable(head(dt3, 3),
options = list(pageLength = 10),
selection = list(mode = "multiple"),
rownames = FALSE)
})
output$tblICD <- DT::renderDT({
validate(need(input$browseData != "", ""))
ne <- new.env()
fname <- load(file = input$browseData$datapath,
envir = ne)
dt2 <- ne[[fname]]
DT::datatable(head(dt2, 20),
options = list(pageLength = 10),
selection = list(mode = "multiple"),
rownames = FALSE)
})
observeEvent(input$convert, {
output$tblComorb <- DT::renderDT({
dt.comorb <- isolate(dtcmbf())
DT::datatable(head(dt.comorb, 20),
options = list(pageLength = 10),
selection = list(mode = "multiple"),
rownames = FALSE)
})
})
output$downloadComorb <- downloadHandler(
filename = function() {
paste("comorb_",
Sys.Date(),
".RData",
sep = "")
},
content = function(file) {
dt.comorb <- dtcmbf()
save(dt.comorb,
file = file)
}
)
}
shinyApp(ui, server)
|
/app.R
|
no_license
|
CVIRU/shiny.icd
|
R
| false
| false
| 17,742
|
r
|
# |----------------------------------------------------------------------------------|
# | Project: ICD-9 Shiny App |
# | Script: ICD-9 Shiny App |
# | Authors: Davit Sargsyan |
# | Created: 03/31/2018 |
# | Modified: 04/03/2018, DS: replaced text boxes wit DT table. Download only |
# | SELECTED rows (all selected by default) |
# | Output a map file, i.e. R list with mapped diagnoses |
# | 04/27/2018, DS: Added ICD-9 procedure codes. NOTE: 'major' category is |
# | just a copy of 'sub-chapter', too many labels to create|
# | by hand. Find a full table online and use it. |
# | 05/24/2018, DS: Switched to icd Version 3.2.0 (developer) and icd.data |
# | version 1.0.1. Added functions to merge different |
# | versions of ICD data (currently, V23-V32). |
# | 07/21/2018, DS: updated package icd on CVI computer; |
# | switched app to shinydashboard; added conversion tab |
# | ToDo: Keep selected diagnoses after switching to the next category |
# |----------------------------------------------------------------------------------|
# Source: https://stackoverflow.com/questions/30894780/cant-read-an-rdata-fileinput
options(stringsAsFactors = FALSE,
shiny.maxRequestSize = 1024^3)
# devtools::install_github("jackwasey/icd")
# devtools::install_github("jackwasey/icd.data")
require(shiny)
require(icd)
require(icd.data)
require(data.table)
require(DT)
library(shinydashboard)
library(shinythemes)
require(xlsx)
require(foreach)
source("source/icd9_dx_get_data_v1.R")
source("source/icd9_sg_get_data_v1.R")
# # TEST: bypass user interface!
# input <- list()
# # Diagnoses----
# dt1 <- icd9cm_merge_version_dx(32)
# # Procedures----
# dt1 <- icd9cm_merge_version_pcs(32)
# # individual nodes----
# input$chapter = unique(as.character(dt1$chapter))[4]
# input$subchapter = unique(as.character(dt1$sub_chapter[dt1$chapter == input$chapter]))[1]
# input$major = unique(as.character(dt1$major[dt1$sub_chapter == input$subchapter]))[2]
# input$dx = unique(as.character(dt1$long_desc[dt1$major == input$major]))[1]
ui <- dashboardPage(dashboardHeader(title = "Shiny ICD",
dropdownMenu(type = "notifications",
notificationItem(text = "5 new users today",
icon = icon("users")),
notificationItem(text = "12 items delivered",
icon = icon("truck"),
status = "success"),
notificationItem(text = "Server load at 86%",
icon = icon("exclamation-triangle"),
status = "warning"))),
dashboardSidebar(sidebarMenu(menuItem(text = "Dashboard",
tabName = "dashboard",
icon = icon("dashboard")),
menuItem(text = "Mapping",
tabName = "mapping",
icon = icon("th")),
menuItem(text = "Convert",
tabName = "convert",
icon = icon("th")))),
dashboardBody(tabItems(tabItem(tabName = "dashboard",
h2("Hello1")),
tabItem(tabName = "mapping",
sidebarPanel(radioButtons(inputId = "dataset",
label = "Select List",
choices = c("Diagnoses",
"Procedures"),
selected = "Diagnoses"),
selectInput(inputId = "icd9_version",
label = "ICD-9 Version",
choices = available_icd9_versions()),
uiOutput(outputId = "chapterIn"),
uiOutput(outputId = "subchapterIn"),
uiOutput(outputId = "majorIn"),
uiOutput(outputId = "dxIn"),
checkboxInput(inputId = "selectAll",
label = "Select All"),
textInput(inputId = "comorb",
label = "Comorbidity",
value = ""),
br(),
downloadButton(outputId = "downloadData",
label = "Download Selected Rows"),
br(),
downloadButton(outputId = "downloadMap",
label = "Download Map of Selected Rows")),
mainPanel(DT:: dataTableOutput("tbl"),
br(),
actionButton(inputId = "do",
label = "Save Selection"),
br(),
DT:: dataTableOutput("tbl2"))),
tabItem(tabName = "convert",
sidebarPanel(fileInput(inputId = "browseMap",
label = "Select Mapping File",
multiple = FALSE),
br(),
uiOutput(outputId = "mapIcdColIn"),
uiOutput(outputId = "mapDiagColsIn"),
br(),
fileInput(inputId = "browseData",
label = "Select ICD Data File",
multiple = FALSE),
br(),
uiOutput(outputId = "idColIn"),
uiOutput(outputId = "icdColsIn"),
br(),
actionButton(inputId = "convert",
label = "Convert"),
br(),
br(),
br(),
downloadButton(outputId = "downloadComorb",
label = "Download comorbidities")),
mainPanel(DT::dataTableOutput("tblComorb"),
DT:: dataTableOutput("tblMap"),
br(),
DT:: dataTableOutput("tblICD"))))))
server <- function(input, output, session) {
dt0f <- reactive({
if(input$dataset == "Diagnoses"){
dt0 <- icd9cm_merge_version_dx(input$icd9_version)
# TEMPORARY PATCH: see Issue4: https://github.com/CVIRU/shiny.icd/issues/4
# Why dose this work?
dt0$long_desc[dt0$code == "0413"] <- "Friedländer's bacillus infection in conditions classified elsewhere and of unspecified site"
} else {
dt0 <- icd9cm_merge_version_pcs(input$icd9_version)
}
dt0$short_desc <- NULL
dt0 <- data.table(comorb = input$comorb,
dt0)
dt0
})
dt1f <- reactive({
# Display ICD codes along with labels----
dt1 <- dt0f()
dt1$long_desc <- paste(dt1$code,
dt1$long_desc,
sep = ":")
dt1$major <- paste(dt1$major_code,
dt1$major,
sep = ":")
dt1
})
dtcmbf <- reactive({
validate(need(input$browseData != "", ""))
ne <- new.env()
fname <- load(file = input$browseData$datapath,
envir = ne)
dt.dx <- ne[[fname]]
validate(need(input$browseMap != "", ""))
ne <- new.env()
map <- fread(input$browseMap$datapath,
colClasses = c("character"))
l1 <- as.comorbidity_map(split(x = c(map[, input$mapIcdIn, with = FALSE][[1]]),
f = c(map[, input$mapDiagIn, with = FALSE][[1]])))
dtt <- list()
for(i in 1:length(input$icdIn)){
dtt[[i]] <- comorbid(x = dt.dx,
map = l1,
visit_name = input$idIn,
icd_name = input$icdIn[i])
}
dt.comorb <- data.table(unique(dt.dx[,
colnames(dt.dx) == input$idIn,
with = FALSE]),
apply(Reduce("+",
dtt),
MARGIN = 2,
function(a){
a > 0
}))
dt.comorb
})
output$chapterIn <- renderUI({
dt1 <- dt1f()
selectInput(inputId = "chapter",
label = "Chapter",
choices = unique(as.character(dt1$chapter)))
})
output$subchapterIn <- renderUI({
dt1 <- dt1f()
selectInput(inputId = "subchapter",
label = "Sub-chapter",
choices = unique(as.character(dt1$sub_chapter[dt1$chapter == input$chapter])))
})
output$majorIn <- renderUI({
dt1 <- dt1f()
selectInput(inputId = "major",
label = "Major",
choices = unique(as.character(dt1$major[dt1$sub_chapter == input$subchapter])))
})
output$dxIn <- renderUI({
dt1 <- dt1f()
selectInput(inputId = "dx",
label = "Diagnosis",
choices = unique(as.character(dt1$long_desc[dt1$major == input$major])),
multiple = TRUE)
})
observeEvent(input$selectAll,{
dt1 <- dt1f()
updateSelectInput(session = session,
inputId = "dx",
selected = if(input$selectAll){
unique(as.character(dt1$long_desc))
} else {
""
})
})
# Source: https://yihui.shinyapps.io/DT-rows/
output$tbl <- DT::renderDT({
dt0 <- dt0f()
dt1 <- dt1f()
DT::datatable(unique(dt0[dt1$long_desc %in% input$dx, ]),
options = list(pageLength = 10),
selection = list(mode = "multiple"),
rownames = FALSE)
})
# Source: http://shiny.rstudio.com/articles/action-buttons.html
observeEvent(input$do, {
dt0 <- dt0f()
dt1 <- dt1f()
if (!exists("dtt")) {
dtt <<- unique(dt0[dt1$long_desc %in% input$dx, ])
} else {
dtt <<- unique(rbind.data.frame(dtt,
dt0[dt1$long_desc %in% input$dx, ]))
}
dtt <<- dtt[order(as.numeric(rownames(dtt))), ]
output$tbl2 <- DT::renderDT({
DT::datatable(dtt,
options = list(pageLength = 10),
selection = list(mode = "multiple",
selected = 1:nrow(dtt),
target = "row"),
rownames = FALSE)
})
})
# Source: https://shiny.rstudio.com/articles/download.html
output$downloadData <- downloadHandler(
filename = function() {
paste("icd9_codes_",
Sys.Date(),
".csv",
sep = "")
},
content = function(file) {
tmp <- dtt[input$tbl2_rows_selected, ]
write.table(tmp,
file,
row.names = FALSE,
sep = "\t")
}
)
# New comorbidity map
output$downloadMap <- downloadHandler(
filename = function() {
paste("icd9_map_",
Sys.Date(),
".RData",
sep = "")
},
content = function(file) {
tmp <- dtt[input$tbl2_rows_selected, ]
l1 <- list()
for (i in 1:length(unique(tmp$major))) {
l1[[i]] <- unique(c(tmp$code[tmp$major == unique(tmp$major)[i]]))
}
names(l1) <- unique(tmp$major)
l1 <- as.icd_comorbidity_map(l1)
save(l1,
file = file)
}
)
# Convert codes to comorbidities----
output$mapIcdColIn <- renderUI({
validate(need(input$browseMap != "", ""))
ne <- new.env()
dt3 <- fread(input$browseMap$datapath)
cnames <- colnames(dt3)
selectInput(inputId = "mapIcdIn",
label = "Select ICD Column",
choices = cnames,
multiple = FALSE)
})
output$mapDiagColsIn <- renderUI({
validate(need(input$browseMap != "", ""))
ne <- new.env()
dt3 <- fread(input$browseMap$datapath)
cnames <- colnames(dt3)
selectInput(inputId = "mapDiagIn",
label = "Select Diagnosis Column",
choices = cnames,
multiple = FALSE)
})
output$idColIn <- renderUI({
validate(need(input$browseData != "", ""))
ne <- new.env()
fname <- load(file = input$browseData$datapath,
envir = ne)
dt2 <- ne[[fname]]
cnames <- colnames(dt2)
selectInput(inputId = "idIn",
label = "Select ID Column",
choices = cnames,
multiple = FALSE)
})
output$icdColsIn <- renderUI({
validate(need(input$browseData != "", ""))
ne <- new.env()
fname <- load(file = input$browseData$datapath,
envir = ne)
dt2 <- ne[[fname]]
cnames <- colnames(dt2)
selectInput(inputId = "icdIn",
label = "Select ICD Column(s)",
choices = cnames,
multiple = TRUE)
})
output$tblMap <- DT::renderDT({
validate(need(input$browseMap != "", ""))
ne <- new.env()
dt3 <- fread(input$browseMap$datapath)
DT::datatable(head(dt3, 3),
options = list(pageLength = 10),
selection = list(mode = "multiple"),
rownames = FALSE)
})
output$tblICD <- DT::renderDT({
validate(need(input$browseData != "", ""))
ne <- new.env()
fname <- load(file = input$browseData$datapath,
envir = ne)
dt2 <- ne[[fname]]
DT::datatable(head(dt2, 20),
options = list(pageLength = 10),
selection = list(mode = "multiple"),
rownames = FALSE)
})
observeEvent(input$convert, {
output$tblComorb <- DT::renderDT({
dt.comorb <- isolate(dtcmbf())
DT::datatable(head(dt.comorb, 20),
options = list(pageLength = 10),
selection = list(mode = "multiple"),
rownames = FALSE)
})
})
output$downloadComorb <- downloadHandler(
filename = function() {
paste("comorb_",
Sys.Date(),
".RData",
sep = "")
},
content = function(file) {
dt.comorb <- dtcmbf()
save(dt.comorb,
file = file)
}
)
}
shinyApp(ui, server)
|
set.seed(355)
library(caret); library(ggplot2); library(RANN); library(klaR)
setwd("D:/Coursera/MLFirst")
testing3<- read.csv("pml-testing.csv", na.strings=c("NA","#DIV/0!", ""))
training3<- read.csv("pml-training.csv", na.strings=c("NA","#DIV/0!", ""))
training3<-training3[ , apply( training3, 2, function(x) !any(is.na(x)))]
prePt<- preProcess(training3, method=c("knnImpute"))
training1<- predict(prePt,training3)
testing1<- predict(prePt,testing3)
training1$classe <- factor(training1$classe)
validation<- testing1[,-c(1:7)]
training2<- training1[,-c(1:7)]
INtraining<- createDataPartition(training2$classe, p=0.7, list=FALSE)
training<- training2[INtraining,]
testing<- training2[-INtraining,]
train_control <- trainControl(method="cv", number=5)
fitnb<- train(classe~., data=training, trControl=train_control, method="nb", na.action=na.pass)
fittednb<- predict(fitnb, testing)
CMnb<- confusionMatrix(fittednb,testing$classe)
fitgbm<- train(classe~., data=training, trControl=train_control, method="gbm", na.action=na.pass, verbose=FALSE)
fittedgbm<- predict(fitgbm, testing)
CMgbm<- confusionMatrix(fittedgbm,testing$classe)
fitrf<- train(classe~., data=training, trControl=train_control, method="rf", na.action=na.pass)
fittedrf<- predict(fitrf, testing)
CMrf<- confusionMatrix(fittedrf,testing$classe)
prDF<-data.frame(fittednb,fittedrf, fittedgbm, classe=testing$classe)
fitcombined<- train(classe~., data=prDF, trControl=train_control, method="rf", na.action=na.pass)
fittedcombined<- predict(fitcombined, prDF)
CMcombined<- confusionMatrix(fittedcombined,testing$classe)
fittedrfV<- predict(fitrf, validation)
fittedgbmV<- predict(fitgbm, validation)
fittednbV<- predict(fitnb, validation)
fittedcombinedV<- predict(fitcombined, prDF)
|
/Wearable.R
|
no_license
|
Peyman-Heidari/WearableML
|
R
| false
| false
| 1,764
|
r
|
set.seed(355)
library(caret); library(ggplot2); library(RANN); library(klaR)
setwd("D:/Coursera/MLFirst")
testing3<- read.csv("pml-testing.csv", na.strings=c("NA","#DIV/0!", ""))
training3<- read.csv("pml-training.csv", na.strings=c("NA","#DIV/0!", ""))
training3<-training3[ , apply( training3, 2, function(x) !any(is.na(x)))]
prePt<- preProcess(training3, method=c("knnImpute"))
training1<- predict(prePt,training3)
testing1<- predict(prePt,testing3)
training1$classe <- factor(training1$classe)
validation<- testing1[,-c(1:7)]
training2<- training1[,-c(1:7)]
INtraining<- createDataPartition(training2$classe, p=0.7, list=FALSE)
training<- training2[INtraining,]
testing<- training2[-INtraining,]
train_control <- trainControl(method="cv", number=5)
fitnb<- train(classe~., data=training, trControl=train_control, method="nb", na.action=na.pass)
fittednb<- predict(fitnb, testing)
CMnb<- confusionMatrix(fittednb,testing$classe)
fitgbm<- train(classe~., data=training, trControl=train_control, method="gbm", na.action=na.pass, verbose=FALSE)
fittedgbm<- predict(fitgbm, testing)
CMgbm<- confusionMatrix(fittedgbm,testing$classe)
fitrf<- train(classe~., data=training, trControl=train_control, method="rf", na.action=na.pass)
fittedrf<- predict(fitrf, testing)
CMrf<- confusionMatrix(fittedrf,testing$classe)
prDF<-data.frame(fittednb,fittedrf, fittedgbm, classe=testing$classe)
fitcombined<- train(classe~., data=prDF, trControl=train_control, method="rf", na.action=na.pass)
fittedcombined<- predict(fitcombined, prDF)
CMcombined<- confusionMatrix(fittedcombined,testing$classe)
fittedrfV<- predict(fitrf, validation)
fittedgbmV<- predict(fitgbm, validation)
fittednbV<- predict(fitnb, validation)
fittedcombinedV<- predict(fitcombined, prDF)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_csvy.R
\name{extract_colclasses}
\alias{extract_colclasses}
\title{Extract column classes from metadata}
\usage{
extract_colclasses(meta_data, verbose = TRUE)
}
\arguments{
\item{meta_data}{Named list of metadata values returned by
\link{read_yaml_header}}
\item{verbose}{Logical. If \code{TRUE}, warn about missing fields.}
}
\value{
Named vector of column classes, suitable for \code{colClasses} argument
of \link[data.table:fread]{data.table::fread}.
}
\description{
Extract column classes from metadata
}
|
/man/extract_colclasses.Rd
|
no_license
|
ashiklom/metar
|
R
| false
| true
| 593
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_csvy.R
\name{extract_colclasses}
\alias{extract_colclasses}
\title{Extract column classes from metadata}
\usage{
extract_colclasses(meta_data, verbose = TRUE)
}
\arguments{
\item{meta_data}{Named list of metadata values returned by
\link{read_yaml_header}}
\item{verbose}{Logical. If \code{TRUE}, warn about missing fields.}
}
\value{
Named vector of column classes, suitable for \code{colClasses} argument
of \link[data.table:fread]{data.table::fread}.
}
\description{
Extract column classes from metadata
}
|
library(dementiaSunday2)
# Optional: specify where the temporary files (used by the ff package) will be created:
options(fftempdir = "s:/FFtemp")
# Maximum number of cores to be used:
maxCores <- parallel::detectCores()
# The folder where the study intermediate and result files will be written:
outputFolder <- "s:/dementiaSunday2"
# Details for connecting to the server:
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = "pdw",
server = Sys.getenv("PDW_SERVER"),
user = NULL,
password = NULL,
port = Sys.getenv("PDW_PORT"))
# The name of the database schema where the CDM data can be found:
cdmDatabaseSchema <- "cdm_truven_mdcd_v699.dbo"
# The name of the database schema and table where the study-specific cohorts will be instantiated:
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "mschuemi_skeleton"
# Some meta-information that will be used by the export function:
databaseId <- "Synpuf"
databaseName <- "Medicare Claims Synthetic Public Use Files (SynPUFs)"
databaseDescription <- "Medicare Claims Synthetic Public Use Files (SynPUFs) were created to allow interested parties to gain familiarity using Medicare claims data while protecting beneficiary privacy. These files are intended to promote development of software and applications that utilize files in this format, train researchers on the use and complexities of Centers for Medicare and Medicaid Services (CMS) claims, and support safe data mining innovations. The SynPUFs were created by combining randomized information from multiple unique beneficiaries and changing variable values. This randomization and combining of beneficiary information ensures privacy of health information."
# For Oracle: define a schema that can be used to emulate temp tables:
oracleTempSchema <- NULL
execute(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
createCohorts = TRUE,
synthesizePositiveControls = TRUE,
runAnalyses = TRUE,
runDiagnostics = TRUE,
packageResults = TRUE,
maxCores = maxCores)
resultsZipFile <- file.path(outputFolder, "export", paste0("Results", databaseId, ".zip"))
dataFolder <- file.path(outputFolder, "shinyData")
prepareForEvidenceExplorer(resultsZipFile = resultsZipFile, dataFolder = dataFolder)
launchEvidenceExplorer(dataFolder = dataFolder, blind = TRUE, launch.browser = FALSE)
|
/OhdsiDataThonKorea2019/Studies/dementiaSunday2/extras/CodeToRun.R
|
permissive
|
ohdsi-korea/OhdsiKoreaTutorials
|
R
| false
| false
| 2,937
|
r
|
library(dementiaSunday2)
# Optional: specify where the temporary files (used by the ff package) will be created:
options(fftempdir = "s:/FFtemp")
# Maximum number of cores to be used:
maxCores <- parallel::detectCores()
# The folder where the study intermediate and result files will be written:
outputFolder <- "s:/dementiaSunday2"
# Details for connecting to the server:
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = "pdw",
server = Sys.getenv("PDW_SERVER"),
user = NULL,
password = NULL,
port = Sys.getenv("PDW_PORT"))
# The name of the database schema where the CDM data can be found:
cdmDatabaseSchema <- "cdm_truven_mdcd_v699.dbo"
# The name of the database schema and table where the study-specific cohorts will be instantiated:
cohortDatabaseSchema <- "scratch.dbo"
cohortTable <- "mschuemi_skeleton"
# Some meta-information that will be used by the export function:
databaseId <- "Synpuf"
databaseName <- "Medicare Claims Synthetic Public Use Files (SynPUFs)"
databaseDescription <- "Medicare Claims Synthetic Public Use Files (SynPUFs) were created to allow interested parties to gain familiarity using Medicare claims data while protecting beneficiary privacy. These files are intended to promote development of software and applications that utilize files in this format, train researchers on the use and complexities of Centers for Medicare and Medicaid Services (CMS) claims, and support safe data mining innovations. The SynPUFs were created by combining randomized information from multiple unique beneficiaries and changing variable values. This randomization and combining of beneficiary information ensures privacy of health information."
# For Oracle: define a schema that can be used to emulate temp tables:
oracleTempSchema <- NULL
execute(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder,
databaseId = databaseId,
databaseName = databaseName,
databaseDescription = databaseDescription,
createCohorts = TRUE,
synthesizePositiveControls = TRUE,
runAnalyses = TRUE,
runDiagnostics = TRUE,
packageResults = TRUE,
maxCores = maxCores)
resultsZipFile <- file.path(outputFolder, "export", paste0("Results", databaseId, ".zip"))
dataFolder <- file.path(outputFolder, "shinyData")
prepareForEvidenceExplorer(resultsZipFile = resultsZipFile, dataFolder = dataFolder)
launchEvidenceExplorer(dataFolder = dataFolder, blind = TRUE, launch.browser = FALSE)
|
#=====================================================================
# Filename: crossValGam_v1.R
# Objective: Build and score a gam using n-fold cross-validation
# Date: 2013-09-10
# Version: v1
# Depends: gam
# Author: Madhav Kumar
#
#
# Inputs: 01. Formula
# 02. Training data
# 03. Testing data (op) -- if not supplied
# 20% of training data is randomly
# selected for testing
# 04. ID (op) unique id variable(s) -- must
# be present in train and test
# -- If not supplied, it is internally created
# with the original sort of train and test
# 05. Distribution (op) - family
# 06. Model name (op) - currently ineffective
# 07. Seed (op) - seed for reproducibility
#
# op: optional
#
# Outputs: 00. Ouput is list object
# 01. var.pred - cross validation predictions
# 02. test.pred - predictions on test data
# 03. importance - average importance of variables
# accross folds
# 04. gam model, val predictions, test predictions, and
# importance for each repition
#
#=====================================================================
crossValGam <- function(formula, train, test= NULL, id= NULL,
nfolds= 2, distribution= "gaussian",
model.name= "gam_model", seed= 314159){
require(gam)
# start time counter
start.time <- Sys.time()
splitData <- function(data, nfolds, seed){
set.seed(seed)
rows <- nrow(data)
# folds
folds <- rep(1:nfolds, length.out= rows)[sample(rows, rows)]
lapply(unique(folds), function(x) which(folds == x))
}
foldid <- splitData(train, nfolds, seed)
# empty list to store results
result <- lapply(1:nfolds, function(x) vector('list', 4))
names(result) <- paste("fold", 1:nfolds, sep= "")
if(is.null(test)){
d <- sort(sample(nrow(train), nrow(train)*0.2))
test <- train[d,]
train <- train[-d,]
}
if(is.null(id)) {
id.train <- data.frame(1:nrow(train))
names(id.train) <- "id"
id.test <- data.frame(1:nrow(test))
names(id.test) <- "id"
} else{
id.train <- data.frame(train[, id])
names(id.train) <- id
id.test <- data.frame(test[, id])
names(id.test) <- id
}
for (i in 1:nfolds){
print(paste('Fold', i, 'of', nfolds, sep= " "))
# rows for training and vaidation
r <- sort(foldid[[i]])
# train model
set.seed(seed)
result[[i]][[1]] <- gam(formula, train[-r,], family= distribution, trace= TRUE)
# score on val
val.pred <- predict(result[[i]][[1]], train[r, ], type= "response")
result[[i]][[2]] <- data.frame(id.train[r,], val.pred)
names(result[[i]][[2]]) <- c(names(id.train), "pred")
# score on test
test.pred <- predict(result[[i]][[1]], test, type= "response")
result[[i]][[3]] <- data.frame(id.test, test.pred)
names(result[[i]][[3]]) <- c(names(id.train), "pred")
# important variables
n <- length(row.names(summary(result[[i]][[1]])$parametric.anova))
result[[i]][[4]] <- data.frame(var= row.names(summary(result[[i]][[1]])$parametric.anova)[1:(n-1)],
imp= summary(result[[i]][[1]])$parametric.anova[1:(n-1),4])
}
# aggregate outputs
output <- list()
# validation
output[[1]]<- do.call(rbind, lapply(1:nfolds, function(x) result[[x]][[2]]))
names(output[[1]]) <- c(names(id.train), "pred")
output[[1]] <- output[[1]][order(output[[1]][, names(id.train)]), ]
# test
test.out <- do.call(cbind, lapply(1:nfolds,
function(x){
if (x == 1){
result[[x]][[3]]
} else {
result[[x]][[3]][,ncol(result[[x]][[3]])]
}
}))
output[[2]] <- data.frame(id.test, pred= rowMeans(test.out[, (dim(id.train)[2] + 1):ncol(test.out)]))
# importance
imp.out <- do.call(rbind, lapply(1:nfolds, function(x) result[[x]][[4]]))
imp.out <- aggregate(imp.out[, "imp"], by= list(imp.out$var), mean, na.rm= TRUE)
names(imp.out) <- c("var", "avg.imp")
imp.out <- imp.out[order(-imp.out$avg.imp), ]
output[[3]] <- imp.out
# collate output
names(output) <- c("val.pred", "test.pred", "importance")
output <- c(output, result)
# time elapsed
end.time <- Sys.time()
print(end.time - start.time)
output
}
|
/crossValGam_v1.R
|
no_license
|
madhavkumar2005/cross-validation
|
R
| false
| false
| 4,819
|
r
|
#=====================================================================
# Filename: crossValGam_v1.R
# Objective: Build and score a gam using n-fold cross-validation
# Date: 2013-09-10
# Version: v1
# Depends: gam
# Author: Madhav Kumar
#
#
# Inputs: 01. Formula
# 02. Training data
# 03. Testing data (op) -- if not supplied
# 20% of training data is randomly
# selected for testing
# 04. ID (op) unique id variable(s) -- must
# be present in train and test
# -- If not supplied, it is internally created
# with the original sort of train and test
# 05. Distribution (op) - family
# 06. Model name (op) - currently ineffective
# 07. Seed (op) - seed for reproducibility
#
# op: optional
#
# Outputs: 00. Ouput is list object
# 01. var.pred - cross validation predictions
# 02. test.pred - predictions on test data
# 03. importance - average importance of variables
# accross folds
# 04. gam model, val predictions, test predictions, and
# importance for each repition
#
#=====================================================================
crossValGam <- function(formula, train, test= NULL, id= NULL,
nfolds= 2, distribution= "gaussian",
model.name= "gam_model", seed= 314159){
require(gam)
# start time counter
start.time <- Sys.time()
splitData <- function(data, nfolds, seed){
set.seed(seed)
rows <- nrow(data)
# folds
folds <- rep(1:nfolds, length.out= rows)[sample(rows, rows)]
lapply(unique(folds), function(x) which(folds == x))
}
foldid <- splitData(train, nfolds, seed)
# empty list to store results
result <- lapply(1:nfolds, function(x) vector('list', 4))
names(result) <- paste("fold", 1:nfolds, sep= "")
if(is.null(test)){
d <- sort(sample(nrow(train), nrow(train)*0.2))
test <- train[d,]
train <- train[-d,]
}
if(is.null(id)) {
id.train <- data.frame(1:nrow(train))
names(id.train) <- "id"
id.test <- data.frame(1:nrow(test))
names(id.test) <- "id"
} else{
id.train <- data.frame(train[, id])
names(id.train) <- id
id.test <- data.frame(test[, id])
names(id.test) <- id
}
for (i in 1:nfolds){
print(paste('Fold', i, 'of', nfolds, sep= " "))
# rows for training and vaidation
r <- sort(foldid[[i]])
# train model
set.seed(seed)
result[[i]][[1]] <- gam(formula, train[-r,], family= distribution, trace= TRUE)
# score on val
val.pred <- predict(result[[i]][[1]], train[r, ], type= "response")
result[[i]][[2]] <- data.frame(id.train[r,], val.pred)
names(result[[i]][[2]]) <- c(names(id.train), "pred")
# score on test
test.pred <- predict(result[[i]][[1]], test, type= "response")
result[[i]][[3]] <- data.frame(id.test, test.pred)
names(result[[i]][[3]]) <- c(names(id.train), "pred")
# important variables
n <- length(row.names(summary(result[[i]][[1]])$parametric.anova))
result[[i]][[4]] <- data.frame(var= row.names(summary(result[[i]][[1]])$parametric.anova)[1:(n-1)],
imp= summary(result[[i]][[1]])$parametric.anova[1:(n-1),4])
}
# aggregate outputs
output <- list()
# validation
output[[1]]<- do.call(rbind, lapply(1:nfolds, function(x) result[[x]][[2]]))
names(output[[1]]) <- c(names(id.train), "pred")
output[[1]] <- output[[1]][order(output[[1]][, names(id.train)]), ]
# test
test.out <- do.call(cbind, lapply(1:nfolds,
function(x){
if (x == 1){
result[[x]][[3]]
} else {
result[[x]][[3]][,ncol(result[[x]][[3]])]
}
}))
output[[2]] <- data.frame(id.test, pred= rowMeans(test.out[, (dim(id.train)[2] + 1):ncol(test.out)]))
# importance
imp.out <- do.call(rbind, lapply(1:nfolds, function(x) result[[x]][[4]]))
imp.out <- aggregate(imp.out[, "imp"], by= list(imp.out$var), mean, na.rm= TRUE)
names(imp.out) <- c("var", "avg.imp")
imp.out <- imp.out[order(-imp.out$avg.imp), ]
output[[3]] <- imp.out
# collate output
names(output) <- c("val.pred", "test.pred", "importance")
output <- c(output, result)
# time elapsed
end.time <- Sys.time()
print(end.time - start.time)
output
}
|
#Load Required Libraries
library(plyr)
#I had issues dowloading the .zip file wihout this.
library(RCurl)
#R will need write right to the current directory
filename <- "getdata_dataset.zip"
#Check if file exists, if not download it and unzip it
if (!file.exists(filename)){
download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",destfile=filename,method="libcurl")
}
if (!file.exists("UCI HAR Dataset")) {unzip(filename)}
#Get Labels and Features
activLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activLabels[,2] <- as.character(activLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
#Pick only mean and std (desiredFeatures)
desFeatures <- grep(".*mean.*|.*std.*", features[,2])
desFeatures.names <- features[desFeatures,2]
#Load actual activity tain data
activityTrainX <- read.table("UCI HAR Dataset/train/X_train.txt")[desFeatures]
activityTrainY <- read.table("UCI HAR Dataset/train/Y_train.txt")
activityTrainSubj <- read.table("UCI HAR Dataset/train/subject_train.txt")
activityTrain <- cbind(activityTrainSubj, activityTrainY, activityTrainX)
#Similarly load actual activity test data
activityTestX <- read.table("UCI HAR Dataset/test/X_test.txt")[desFeatures]
activityTestY <- read.table("UCI HAR Dataset/test/Y_test.txt")
activityTestSubj <- read.table("UCI HAR Dataset/test/subject_test.txt")
activityTest <- cbind(activityTestSubj, activityTestY, activityTestX)
#Merge Test and train data
trainAndTest <- rbind(activityTrain, activityTest)
colnames(trainAndTest) <- c("subject", "activity", desFeatures.names)
#Turn Data into factors
trainAndTest$activity <- factor(trainAndTest$activity, levels = activLabels[,1], labels = activLabels[,2])
trainAndTest$subject <- as.factor(trainAndTest$subject)
trainAndTest.melted <- melt(trainAndTest, id = c("subject", "activity"))
trainAndTest.mean <- dcast(trainAndTest.melted, subject + activity ~ variable, mean)
#Write out the output file with means only
write.table(trainAndTest.mean, "tidyActivityData.txt", row.names = FALSE)
write.table(desFeatures.names, "featureNames.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
akitbalian/Coursera-Getting-and-Cleaning-Data-Course-Project
|
R
| false
| false
| 2,381
|
r
|
#Load Required Libraries
library(plyr)
#I had issues dowloading the .zip file wihout this.
library(RCurl)
#R will need write right to the current directory
filename <- "getdata_dataset.zip"
#Check if file exists, if not download it and unzip it
if (!file.exists(filename)){
download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",destfile=filename,method="libcurl")
}
if (!file.exists("UCI HAR Dataset")) {unzip(filename)}
#Get Labels and Features
activLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activLabels[,2] <- as.character(activLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
#Pick only mean and std (desiredFeatures)
desFeatures <- grep(".*mean.*|.*std.*", features[,2])
desFeatures.names <- features[desFeatures,2]
#Load actual activity tain data
activityTrainX <- read.table("UCI HAR Dataset/train/X_train.txt")[desFeatures]
activityTrainY <- read.table("UCI HAR Dataset/train/Y_train.txt")
activityTrainSubj <- read.table("UCI HAR Dataset/train/subject_train.txt")
activityTrain <- cbind(activityTrainSubj, activityTrainY, activityTrainX)
#Similarly load actual activity test data
activityTestX <- read.table("UCI HAR Dataset/test/X_test.txt")[desFeatures]
activityTestY <- read.table("UCI HAR Dataset/test/Y_test.txt")
activityTestSubj <- read.table("UCI HAR Dataset/test/subject_test.txt")
activityTest <- cbind(activityTestSubj, activityTestY, activityTestX)
#Merge Test and train data
trainAndTest <- rbind(activityTrain, activityTest)
colnames(trainAndTest) <- c("subject", "activity", desFeatures.names)
#Turn Data into factors
trainAndTest$activity <- factor(trainAndTest$activity, levels = activLabels[,1], labels = activLabels[,2])
trainAndTest$subject <- as.factor(trainAndTest$subject)
trainAndTest.melted <- melt(trainAndTest, id = c("subject", "activity"))
trainAndTest.mean <- dcast(trainAndTest.melted, subject + activity ~ variable, mean)
#Write out the output file with means only
write.table(trainAndTest.mean, "tidyActivityData.txt", row.names = FALSE)
write.table(desFeatures.names, "featureNames.txt", row.names = FALSE)
|
#' @description Generates multivariate mixed normal random variables
#' @export
#' @title rmvnmix
#' @name rmvnmix
#' @param n The number of observations
#' @param alpha m by 1 vector that represents proportions of components
#' @param mu d by m matrix that represents mu
#' @param sigma d by d*m matrix that represents variance of components
#' @return n by d vector
rmvnmix <- function(n, alpha, mu, sigma){
m <- length(alpha)
d <- nrow(mu)
Ind <- sample((1:m), n, replace=TRUE, prob=alpha)
y <- matrix(0, nrow=n, ncol=d)
for (j in (1:m)){
nj <- sum(Ind==j)
muj <- mu[,j]
sigmaj <- sigma[,(d*(j-1)+1):(d*j)]
yj <- rmvnorm(nj, mu=muj, sigma = sigmaj)
y[Ind==j,] <- yj
}
y
}
#' @description Convert sigma vector to matrix
#' @export
#' @title sigmavec2mat
#' @name sigmavec2mat
#' @param n The number of observations
#' @param alpha m by 1 vector that represents proportions of components
#' @param mu d by m matrix that represents mu
#' @param sigma d by d*m matrix that represents variance of components
#' @return n by d vector
sigmavec2mat <- function(sigma.vec, d){
# sigma.vec is a vector of length d(d+1)/2
sigma <- diag(d)
sigma[lower.tri(sigma, diag=TRUE)] <- sigma.vec
sigma <- t(sigma) + sigma
diag(sigma) <- diag(sigma)/2
sigma
} # end function sigmavec2mat
|
/R/mvn_methods.R
|
no_license
|
chiyahn/mvnMix
|
R
| false
| false
| 1,317
|
r
|
#' @description Generates multivariate mixed normal random variables
#' @export
#' @title rmvnmix
#' @name rmvnmix
#' @param n The number of observations
#' @param alpha m by 1 vector that represents proportions of components
#' @param mu d by m matrix that represents mu
#' @param sigma d by d*m matrix that represents variance of components
#' @return n by d vector
rmvnmix <- function(n, alpha, mu, sigma){
m <- length(alpha)
d <- nrow(mu)
Ind <- sample((1:m), n, replace=TRUE, prob=alpha)
y <- matrix(0, nrow=n, ncol=d)
for (j in (1:m)){
nj <- sum(Ind==j)
muj <- mu[,j]
sigmaj <- sigma[,(d*(j-1)+1):(d*j)]
yj <- rmvnorm(nj, mu=muj, sigma = sigmaj)
y[Ind==j,] <- yj
}
y
}
#' @description Convert sigma vector to matrix
#' @export
#' @title sigmavec2mat
#' @name sigmavec2mat
#' @param n The number of observations
#' @param alpha m by 1 vector that represents proportions of components
#' @param mu d by m matrix that represents mu
#' @param sigma d by d*m matrix that represents variance of components
#' @return n by d vector
sigmavec2mat <- function(sigma.vec, d){
# sigma.vec is a vector of length d(d+1)/2
sigma <- diag(d)
sigma[lower.tri(sigma, diag=TRUE)] <- sigma.vec
sigma <- t(sigma) + sigma
diag(sigma) <- diag(sigma)/2
sigma
} # end function sigmavec2mat
|
rm(list = ls())
#add all packages
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
library(splines)
library(data.table)
#sourcing
source("/media/qnap/Data/code/R_functions/CV_splits.r")
source("/media/qnap/Data/code/R_functions/rmspe.r")
res=c()
obs=c()
res_raw=c()
res_raw2=c()
#load data
mod1_all <-readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.AQ.2003_2015.PM25_Daily/mod1.AQ.2003_2015.PM25_Daily_re.rds")
mod1_all$year=substr(mod1_all$year,1,4)
# creating a filter field of the forward scattering (FS=1) and the backward scaterring (BS=0 or else)
mod1_all$FS_BS=1
# # First option for data devision be Azimuth angle:
mod1_all <- mod1_all[RelAZ> 90, FS_BS := 0]
y=c(2003:2015)
# AQUA data
for (i in y)
{
mod1=filter(mod1_all, year==i)
mod1=as.data.table(mod1)
### ADD ventilation coefficient
mod1$vc_D=c(mod1$WS_D/(mod1$daily_hpbl*1000))
mod1$Ur_Ru_1 <- NA
mod1$Ur_Ru_1<- mod1$P_Ur
mod1$Ur_Ru_1[mod1$Ur_Ru_1<=20] <-1
mod1$Ur_Ru_1[20<= mod1$Ur_Ru_1 & mod1$Ur_Ru_1<=40] <-2
mod1$Ur_Ru_1[40<= mod1$Ur_Ru_1 & mod1$Ur_Ru_1<=60] <-3
mod1$Ur_Ru_1[60<= mod1$Ur_Ru_1 & mod1$Ur_Ru_1<=80] <-4
mod1$Ur_Ru_1[mod1$Ur_Ru_1>=80] <-5
#scale variables
names=c("Elev","dis_inventory","Dis_Mroads","road_den","Pop_dens","Dis_Rd1_2012","Dis_Rd2_2012","Dist_Railw",
"Dist_WB","Temp_D","P_In","P_OS","P_Ur","P_Ag","Dist_WB",
"WS_D","RH_D","Rain_D","NO2_D" ,"SO2_D","pbl_02","pbl_11","vc_D","Elev_200m","Pop_dens_200m",
"Road_den_200m" ,"Road_dis_200m","Dis_Railways_200m","P_In_200m","P_Ur_200m",
"dist_inven_200m","ndvi","daily_hpbl","su_ndvi_L_200m","sp_ndvi_L_200m")
mean(names %in% names(mod1))
a=names %in% names(mod1)
b=names
b[which(a==FALSE)]
mod1 = mod1 %>% as.data.frame
scaled = mod1[,names] %>% dplyr::mutate_each(funs(scale))
colnames(scaled) = paste0(colnames(scaled), ".scaled")
mod1 = cbind(mod1, scaled)
names(mod1)
#Raw full correlation
m1.formula <- as.formula(PM25 ~ aod_047_mean)
mod1fit = lm(m1.formula,data=mod1)
summary(mod1fit)
mod1$pred.m1 <- predict(mod1fit)
res1<- summary(lm(PM25~pred.m1,data=mod1))$r.squared
res_raw=c(res_raw,res1)
# Raw cleaned correlation
mod1=as.data.table(mod1)
mod1<-filter(mod1,RelAZ < 90)
mod1<-filter(mod1,UN < 0.04 & UN > 0)
mod1<-filter(mod1,aod_047_mean < 3)
# check massimo clean
#massimos thresholds
x<-dplyr::select(mod1,aod_047_mean,stn)
x$c<-1
x <- x %>%
dplyr::group_by (stn) %>%
dplyr::summarise(saod=sum(c))
x=as.data.table(x)
mod1=as.data.table(mod1)
#merge back count
setkey(x,stn)
setkey(mod1,stn)
mod1 <- merge(mod1,x, all.x = T)
mod1$exobs<-0
mod1<-mod1[aod_047_mean < quantile(aod_047_mean, c(.50)) & PM25 > quantile(PM25, c(.90)), exobs := 2]
mod1<-mod1[aod_047_mean > quantile(aod_047_mean, c(.90)) & PM25 < quantile(PM25, c(.50)), exobs := 3]
mod1<-mod1[saod < 20 , exobs := 5]
#take out bad exobs
mod1<-filter(mod1,exobs==0)
mod1fit = lm(m1.formula,data=mod1)
summary(mod1fit)
mod1$pred.m1 <- predict(mod1fit)
res2<- summary(lm(PM25~pred.m1,data=mod1))$r.squared
res_raw2=c(res_raw2,res2)
mod1=as.data.table(mod1)
#based mixed model
m1.formula <- as.formula(PM25 ~ aod_047_mean
#temporal
+(1+aod_047_mean|day))
#stage 1
mod1fit <- lmer(m1.formula,data=mod1)
summary(mod1fit)
mod1$pred.m1 <- predict(mod1fit)
print(summary(lm(PM25~pred.m1,data=mod1))$r.squared)
#
# #RMSPE
# res[res$type=="PM25", 'm1.rmspe'] <- print(rmse(residuals(mod1fit)))
#spatial
spatialall<-mod1 %>%
group_by(stn) %>%
dplyr::summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.all.s <- lm(barpm ~ barpred, data=spatialall)
# res[res$type=="PM25", 'm1.R2.space'] <-print(summary(lm(barpm ~ barpred, data=spatialall))$r.squared)
# res[res$type=="PM25", 'm1.rmspe.space'] <- print(rmse(residuals(m1.fit.all.s)))
#temporal
#temporal (take out daily PM from yearly mean)
tempoall<-left_join(mod1,spatialall)
tempoall$delpm <-tempoall$PM25-tempoall$barpm
tempoall$delpred <-tempoall$pred.m1-tempoall$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempoall)
# res[res$type=="PM25", 'm1.R2.time']<- print(summary(lm(delpm ~ delpred, data=tempoall))$r.squared)
mod1=mod1[!is.na(mod1$NO2_D.scaled),]
m1.formula <- as.formula(PM25 ~ aod_047_mean
# +aod_047_mean*c
# #spatial
# +aod_047_mean*FS_BS
# +FS_BS
# +stn_type
# +aod_047_mean*stn_type
+Elev.scaled+ndvi
# +dis_inventory.scaled
# +road_den.scaled
+Dist_WB.scaled
# +Dist_Railw.scaled
+Dis_Rd1_2012.scaled
# +road_den.scaled
+Dist_WB.scaled
+ Pop_dens.scaled
# +Dist_Railw.scaled
+P_In.scaled+P_Ur.scaled+P_Ag.scaled+P_OS.scaled
#temporal
+daily_hpbl.scaled
+pbl_02.scaled
# +vc_D.scaled
#met
# + Temp_D.scaled
+Rain_D.scaled
+RH_D.scaled
# +WS_D.scaled
+SO2_D.scaled
+NO2_D.scaled
+(1+aod_047_mean|day/metreg))
#stage 1
# mod1fit <- lmer(m1.formula,data=mod1,weights=normwt)
mod1fit <- lmer(m1.formula,data=mod1)
summary(mod1fit)
mod1$pred.m1 <- predict(mod1fit)
R2=summary(lm(PM25~pred.m1,data=mod1))$r.squared
res=c(res,R2)
n=nrow(mod1)
obs=c(obs,n)
}
all_years_FS_BS=data.frame(R2_all=res,y,obs_all=obs)
all_years_FS=data.frame(R2=res,obs)
results=cbind(all_years_FS,all_years_FS_BS)
for (i in 1:length(names))
{
mod1_f=filter(mod1,stn==names[i])
mod1_s=rbind(mod1_s,mod1_f)
}
mod1$month <- as.numeric(substr(mod1$day,6,7))
#1-winter, 2-spring,3-summer,4-autum
mod1$season<-car::recode(mod1$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1")
raw_r2=c()
m1.formula <- as.formula(PM25 ~ aod_047_mean)
for (i in 1:4)
{
mod1_s=filter(mod1,season==i)
mod1fit = lm(m1.formula,data=mod1_s)
mod1_s$pred.m1 <- predict(mod1fit)
r2=print(summary(lm(PM25~pred.m1,data=mod1_s))$r.squared)
raw_r2=c(raw_r2,r2)
}
|
/Archive/check_mod1_calibration.R
|
no_license
|
alexandrashtein/Model-code
|
R
| false
| false
| 6,656
|
r
|
rm(list = ls())
#add all packages
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
library(splines)
library(data.table)
#sourcing
source("/media/qnap/Data/code/R_functions/CV_splits.r")
source("/media/qnap/Data/code/R_functions/rmspe.r")
res=c()
obs=c()
res_raw=c()
res_raw2=c()
#load data
mod1_all <-readRDS("/media/qnap/Projects/P028.IL.Israel.MAIAC.PM.V2/work/RDS_files/mod1/mod1.AQ.2003_2015.PM25_Daily/mod1.AQ.2003_2015.PM25_Daily_re.rds")
mod1_all$year=substr(mod1_all$year,1,4)
# creating a filter field of the forward scattering (FS=1) and the backward scaterring (BS=0 or else)
mod1_all$FS_BS=1
# # First option for data devision be Azimuth angle:
mod1_all <- mod1_all[RelAZ> 90, FS_BS := 0]
y=c(2003:2015)
# AQUA data
for (i in y)
{
mod1=filter(mod1_all, year==i)
mod1=as.data.table(mod1)
### ADD ventilation coefficient
mod1$vc_D=c(mod1$WS_D/(mod1$daily_hpbl*1000))
mod1$Ur_Ru_1 <- NA
mod1$Ur_Ru_1<- mod1$P_Ur
mod1$Ur_Ru_1[mod1$Ur_Ru_1<=20] <-1
mod1$Ur_Ru_1[20<= mod1$Ur_Ru_1 & mod1$Ur_Ru_1<=40] <-2
mod1$Ur_Ru_1[40<= mod1$Ur_Ru_1 & mod1$Ur_Ru_1<=60] <-3
mod1$Ur_Ru_1[60<= mod1$Ur_Ru_1 & mod1$Ur_Ru_1<=80] <-4
mod1$Ur_Ru_1[mod1$Ur_Ru_1>=80] <-5
#scale variables
names=c("Elev","dis_inventory","Dis_Mroads","road_den","Pop_dens","Dis_Rd1_2012","Dis_Rd2_2012","Dist_Railw",
"Dist_WB","Temp_D","P_In","P_OS","P_Ur","P_Ag","Dist_WB",
"WS_D","RH_D","Rain_D","NO2_D" ,"SO2_D","pbl_02","pbl_11","vc_D","Elev_200m","Pop_dens_200m",
"Road_den_200m" ,"Road_dis_200m","Dis_Railways_200m","P_In_200m","P_Ur_200m",
"dist_inven_200m","ndvi","daily_hpbl","su_ndvi_L_200m","sp_ndvi_L_200m")
mean(names %in% names(mod1))
a=names %in% names(mod1)
b=names
b[which(a==FALSE)]
mod1 = mod1 %>% as.data.frame
scaled = mod1[,names] %>% dplyr::mutate_each(funs(scale))
colnames(scaled) = paste0(colnames(scaled), ".scaled")
mod1 = cbind(mod1, scaled)
names(mod1)
#Raw full correlation
m1.formula <- as.formula(PM25 ~ aod_047_mean)
mod1fit = lm(m1.formula,data=mod1)
summary(mod1fit)
mod1$pred.m1 <- predict(mod1fit)
res1<- summary(lm(PM25~pred.m1,data=mod1))$r.squared
res_raw=c(res_raw,res1)
# Raw cleaned correlation
mod1=as.data.table(mod1)
mod1<-filter(mod1,RelAZ < 90)
mod1<-filter(mod1,UN < 0.04 & UN > 0)
mod1<-filter(mod1,aod_047_mean < 3)
# check massimo clean
#massimos thresholds
x<-dplyr::select(mod1,aod_047_mean,stn)
x$c<-1
x <- x %>%
dplyr::group_by (stn) %>%
dplyr::summarise(saod=sum(c))
x=as.data.table(x)
mod1=as.data.table(mod1)
#merge back count
setkey(x,stn)
setkey(mod1,stn)
mod1 <- merge(mod1,x, all.x = T)
mod1$exobs<-0
mod1<-mod1[aod_047_mean < quantile(aod_047_mean, c(.50)) & PM25 > quantile(PM25, c(.90)), exobs := 2]
mod1<-mod1[aod_047_mean > quantile(aod_047_mean, c(.90)) & PM25 < quantile(PM25, c(.50)), exobs := 3]
mod1<-mod1[saod < 20 , exobs := 5]
#take out bad exobs
mod1<-filter(mod1,exobs==0)
mod1fit = lm(m1.formula,data=mod1)
summary(mod1fit)
mod1$pred.m1 <- predict(mod1fit)
res2<- summary(lm(PM25~pred.m1,data=mod1))$r.squared
res_raw2=c(res_raw2,res2)
mod1=as.data.table(mod1)
#based mixed model
m1.formula <- as.formula(PM25 ~ aod_047_mean
#temporal
+(1+aod_047_mean|day))
#stage 1
mod1fit <- lmer(m1.formula,data=mod1)
summary(mod1fit)
mod1$pred.m1 <- predict(mod1fit)
print(summary(lm(PM25~pred.m1,data=mod1))$r.squared)
#
# #RMSPE
# res[res$type=="PM25", 'm1.rmspe'] <- print(rmse(residuals(mod1fit)))
#spatial
spatialall<-mod1 %>%
group_by(stn) %>%
dplyr::summarise(barpm = mean(PM25, na.rm=TRUE), barpred = mean(pred.m1, na.rm=TRUE))
m1.fit.all.s <- lm(barpm ~ barpred, data=spatialall)
# res[res$type=="PM25", 'm1.R2.space'] <-print(summary(lm(barpm ~ barpred, data=spatialall))$r.squared)
# res[res$type=="PM25", 'm1.rmspe.space'] <- print(rmse(residuals(m1.fit.all.s)))
#temporal
#temporal (take out daily PM from yearly mean)
tempoall<-left_join(mod1,spatialall)
tempoall$delpm <-tempoall$PM25-tempoall$barpm
tempoall$delpred <-tempoall$pred.m1-tempoall$barpred
mod_temporal <- lm(delpm ~ delpred, data=tempoall)
# res[res$type=="PM25", 'm1.R2.time']<- print(summary(lm(delpm ~ delpred, data=tempoall))$r.squared)
mod1=mod1[!is.na(mod1$NO2_D.scaled),]
m1.formula <- as.formula(PM25 ~ aod_047_mean
# +aod_047_mean*c
# #spatial
# +aod_047_mean*FS_BS
# +FS_BS
# +stn_type
# +aod_047_mean*stn_type
+Elev.scaled+ndvi
# +dis_inventory.scaled
# +road_den.scaled
+Dist_WB.scaled
# +Dist_Railw.scaled
+Dis_Rd1_2012.scaled
# +road_den.scaled
+Dist_WB.scaled
+ Pop_dens.scaled
# +Dist_Railw.scaled
+P_In.scaled+P_Ur.scaled+P_Ag.scaled+P_OS.scaled
#temporal
+daily_hpbl.scaled
+pbl_02.scaled
# +vc_D.scaled
#met
# + Temp_D.scaled
+Rain_D.scaled
+RH_D.scaled
# +WS_D.scaled
+SO2_D.scaled
+NO2_D.scaled
+(1+aod_047_mean|day/metreg))
#stage 1
# mod1fit <- lmer(m1.formula,data=mod1,weights=normwt)
mod1fit <- lmer(m1.formula,data=mod1)
summary(mod1fit)
mod1$pred.m1 <- predict(mod1fit)
R2=summary(lm(PM25~pred.m1,data=mod1))$r.squared
res=c(res,R2)
n=nrow(mod1)
obs=c(obs,n)
}
all_years_FS_BS=data.frame(R2_all=res,y,obs_all=obs)
all_years_FS=data.frame(R2=res,obs)
results=cbind(all_years_FS,all_years_FS_BS)
for (i in 1:length(names))
{
mod1_f=filter(mod1,stn==names[i])
mod1_s=rbind(mod1_s,mod1_f)
}
mod1$month <- as.numeric(substr(mod1$day,6,7))
#1-winter, 2-spring,3-summer,4-autum
mod1$season<-car::recode(mod1$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1")
raw_r2=c()
m1.formula <- as.formula(PM25 ~ aod_047_mean)
for (i in 1:4)
{
mod1_s=filter(mod1,season==i)
mod1fit = lm(m1.formula,data=mod1_s)
mod1_s$pred.m1 <- predict(mod1fit)
r2=print(summary(lm(PM25~pred.m1,data=mod1_s))$r.squared)
raw_r2=c(raw_r2,r2)
}
|
#Construction of PNG Plot4 for House hold energy consumption
datafile <- "./household_power_consumption.txt"
power_data <- read.table(datafile, header=TRUE, sep=";")
subset_data <- power_data[power_data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subset_data$Date, subset_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GlobalActivePower <- as.numeric(subset_data$Global_active_power)
GlobalReactivePower <- as.numeric(subset_data$Global_reactive_power)
voltage <- as.numeric(subset_data$Voltage)
SubMetering1 <- as.numeric(subset_data$Sub_metering_1)
SubMetering2 <- as.numeric(subset_data$Sub_metering_2)
SubMetering3 <- as.numeric(subset_data$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, GlobalActivePower, type="l", xlab="", ylab="Global Active Power")
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, SubMetering1, type="l", ylab="Energy SubMetering", xlab="")
lines(datetime, SubMetering2, type="l", col="red")
lines(datetime, SubMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, GlobalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
siddu138/Exp_Data
|
R
| false
| false
| 1,341
|
r
|
#Construction of PNG Plot4 for House hold energy consumption
datafile <- "./household_power_consumption.txt"
power_data <- read.table(datafile, header=TRUE, sep=";")
subset_data <- power_data[power_data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subset_data$Date, subset_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GlobalActivePower <- as.numeric(subset_data$Global_active_power)
GlobalReactivePower <- as.numeric(subset_data$Global_reactive_power)
voltage <- as.numeric(subset_data$Voltage)
SubMetering1 <- as.numeric(subset_data$Sub_metering_1)
SubMetering2 <- as.numeric(subset_data$Sub_metering_2)
SubMetering3 <- as.numeric(subset_data$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, GlobalActivePower, type="l", xlab="", ylab="Global Active Power")
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, SubMetering1, type="l", ylab="Energy SubMetering", xlab="")
lines(datetime, SubMetering2, type="l", col="red")
lines(datetime, SubMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, GlobalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
# # The purpose of this script is to create a data object (dto) which will hold all data and metadata.
# # Run the lines below to stitch a basic html output.
# knitr::stitch_rmd(
# script="./manipulation/map2016/Level1_models_full_workingmem.R",
# output="./manipulation/map2016/output/level1_models_wm_full.md"
# )
# # The above lines are executed only when the file is run in RStudio, !! NOT when an Rmd/Rnw file calls it !!
#
options(scipen=20)
# ----- load-source ------
rm(list=ls(all=TRUE)) #Clear the variables from previous runs.
cat("\f") # clear console
# Attach these packages so their functions don't need to be qualified: http://r-pkgs.had.co.nz/namespace.html#search-path
library(magrittr) # enables piping : %>%
library(lmerTest)
library(outliers)
library(psych)
# Call `base::source()` on any repo file that defines functions needed below. Ideally, no real operations are performed.
source("./scripts/common-functions.R") # used in multiple reports
source("./scripts/graph-presets.R")
source("./scripts/general-graphs.R") #in scripts folder
source("./scripts/specific-graphs.R")
source("./scripts/specific-graphs-pred.R")
source("./scripts/graphs-pred.R")
source("./scripts/graphs-predVID.R")
source("./scripts/functions-for-glm-models.R")
source("./scripts/multiplot-function.R")
source("./scripts/map-specific-graphs.R")
source("./scripts/graph_themes.R")
source("./scripts/multiplot-function.R")
source("./scripts/graph_themes.R")
# source("./scripts/graph-presets.R") # fonts, colors, themes
# Verify these packages are available on the machine, but their functions need to be qualified: http://r-pkgs.had.co.nz/namespace.html#search-path
requireNamespace("ggplot2") # graphing
# requireNamespace("readr") # data input
requireNamespace("tidyr") # data manipulation
requireNamespace("dplyr") # Avoid attaching dplyr, b/c its function names conflict with a lot of packages (esp base, stats, and plyr).
requireNamespace("testit")# For asserting conditions meet expected patterns.
requireNamespace("nlme") # estimate mixed models | esp. gls()
requireNamespace("lme4") # estimate mixed models | esp. lmer()
requireNamespace("arm") # process model objects
getwd()
# ----- specify-objects ------
path_input0 <- "./data/unshared/derived/map2016/map_full_bio_centered.rds"
# ----- load-data ------
ds0 <- readRDS(path_input0) #total raw data
names(ds0)
# str(ds0)
describe(ds0$sdmt)
ds0$sdmt_origional<-ds0$sdmt
ds0$sdmt<-ds0$sdmt/2
#models--------------------------
eq <- as.formula("sdmt ~ 1 +
( 1 |id)")
model<- lmerTest::lmer(eq, data=ds0, REML= FALSE)
lmerTest::summary((model))
#resid var= 12.45
eq <- as.formula("sdmt ~ 1 + year_in_study +
( 1 |id)")
model<- lmerTest::lmer(eq, data=ds0, REML= FALSE)
lmerTest::summary((model))
#df= 10587
#dev = 59601.7
eq <- as.formula("sdmt ~ 1 + year_in_study +
( 1 + year_in_study |id)")
model<- lmerTest::lmer(eq, data=ds0, REML= FALSE)
lmerTest::summary((model))
#df= 10585
#dev = 58014.5
10587-10585
59601.7-58014.5
#int 31.3347
5.5977/ (sqrt(10591))
#year 0.4034
0.6351/ (sqrt(10591))
#resid 7.1566
2.6752 / (sqrt(10591))
#pseudo r^2
(12.45 - 7.1566 ) / 12.45
# #AGE BL-------------
#
# eq2 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc +
# ( 1 + year_in_study |id)")
# model_2<- lmerTest::lmer(eq2, data=ds0, REML= FALSE)
# lmerTest::summary((model_2))
#
# #chi sq
# #df
# #df=
# 10585 - 10583
# #deviance
# 58014.5- 57653.7
#
#
# #int 27.5670
# 5.2504/ (sqrt(10591))
# #year 0.3081
# 0.5551/ (sqrt(10591))
# #resid 7.1819
# 2.6799/ (sqrt(10591))
#
# ################ + gender
#
# eq3 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex +
# ( 1 + year_in_study |id)")
# model_3<- lmerTest::lmer(eq3, data=ds0, REML= FALSE)
# lmerTest::summary((model_3))
#
# #df=
# 10583- 10581
# #dev =
# 57653.7-57644.1
#
#
# #int 27.3997
# 5.2345/ (sqrt(10591))
# #year 0.308
# 0.5551/ (sqrt(10591))
# #resid 7.1821
# 2.6799 / (sqrt(10591))
#
################# + education
eq4 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
( 1 + year_in_study |id)")
model_4<- lmerTest::lmer(eq4, data=ds0, REML= FALSE)
lmerTest::summary((model_4))
#
# #df=
# 10581- 10579
# #dev
# 57644.1- 57471.3
#compared to random time model
10585- 10579
#dev
58014.5 - 57471.3
#int 24.5726
4.9571/ (sqrt(10591))
#year 0.3101
0.5568/ (sqrt(10591))
#resid 7.1787
2.6793 / (sqrt(10591))
#Physical Activity --------------
eq5 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
year_in_study*phys_pmeanC + phys_wp +
( 1 + year_in_study |id)")
model_5<- lmerTest::lmer(eq5, data=ds0, REML= FALSE)
lmerTest::summary((model_5))
#the model with the random effects of PA is a better model than the one witout
56614.6- 56600.6
10451 - 10448
eq5 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
+ phys_pmeanC*year_in_study + phys_wp +
( 1 + year_in_study + phys_wp |id)")
model_5<- lmerTest::lmer(eq5, data=ds0, REML= FALSE)
lmerTest::summary((model_5))
#df=
10579 - 10449
#dev =
57471.3 - 56616.9
#int 24.1643
4.9096/ (sqrt(10466))
#year 0.271076
0.5206 / (sqrt(10466))
#phys_wp 0.0048
0.0695/ (sqrt(10466))
#resid 7.073466
2.6596 / (sqrt(10466))
#wp varience explained compred to the random effects of time only
(7.1566 - 7.066991)/(7.1566)
# # gender X PA
# eq6 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
# phys_pmeanC*msex + phys_wp*msex +
# ( 1 + year_in_study + phys_wp|id)")
# model_6<- lmerTest::lmer(eq6, data=ds0, REML= FALSE)
# lmerTest::summary((model_6))
#
#stress------------------------------
eq5b <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
pss_pmeanC*year_in_study + pss_wp +
( 1 + year_in_study |id)")
model_5b<- lmerTest::lmer(eq5b, data=ds0, REML= FALSE)
lmerTest::summary((model_5b))
#df=
#the addition of pss_wp in the random effects is NS
#people aren't very differnt in their stress fluctuations
#therefore there is nothing to explain
17674.8
3193
eq5b <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
pss_pmeanC*year_in_study + pss_wp +
( 1 + year_in_study + pss_wp |id)")
model_5b<- lmerTest::lmer(eq5b, data=ds0, REML= FALSE)
lmerTest::summary((model_5b))
#df=
10579 - 3191
#dev =
57471.3 - 17674.5
#int 18.91792
4.349 / (sqrt(3208))
#year 0.12653
0.3557 / (sqrt(3208))
#pss_wp 0.08321
0.2885/ (sqrt(3208))
#resid 6.70488
2.5894/ (sqrt(3208))
# eq5b <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
# pss_pmeanC*msex + pss_wp*msex +
# ( 1 + year_in_study + pss_wp |id)")
# model_5b<- lmerTest::lmer(eq5b, data=ds0, REML= FALSE)
# lmerTest::summary((model_5b))
################# interaction with stress
#---- PSS and interaction
#Physical Activity --------------
eq7 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
phys_pmeanC*pss_pmeanC + phys_wp*pss_pmeanC +
( 1 + year_in_study + phys_wp |id)")
model_7<- lmerTest::lmer(eq7, data=ds0, REML= FALSE)
lmerTest::summary((model_7))
|
/manipulation/map2016/archived/5-models.R
|
no_license
|
beccav8/psy564_longitudinal_models
|
R
| false
| false
| 7,659
|
r
|
# # The purpose of this script is to create a data object (dto) which will hold all data and metadata.
# # Run the lines below to stitch a basic html output.
# knitr::stitch_rmd(
# script="./manipulation/map2016/Level1_models_full_workingmem.R",
# output="./manipulation/map2016/output/level1_models_wm_full.md"
# )
# # The above lines are executed only when the file is run in RStudio, !! NOT when an Rmd/Rnw file calls it !!
#
options(scipen=20)
# ----- load-source ------
rm(list=ls(all=TRUE)) #Clear the variables from previous runs.
cat("\f") # clear console
# Attach these packages so their functions don't need to be qualified: http://r-pkgs.had.co.nz/namespace.html#search-path
library(magrittr) # enables piping : %>%
library(lmerTest)
library(outliers)
library(psych)
# Call `base::source()` on any repo file that defines functions needed below. Ideally, no real operations are performed.
source("./scripts/common-functions.R") # used in multiple reports
source("./scripts/graph-presets.R")
source("./scripts/general-graphs.R") #in scripts folder
source("./scripts/specific-graphs.R")
source("./scripts/specific-graphs-pred.R")
source("./scripts/graphs-pred.R")
source("./scripts/graphs-predVID.R")
source("./scripts/functions-for-glm-models.R")
source("./scripts/multiplot-function.R")
source("./scripts/map-specific-graphs.R")
source("./scripts/graph_themes.R")
source("./scripts/multiplot-function.R")
source("./scripts/graph_themes.R")
# source("./scripts/graph-presets.R") # fonts, colors, themes
# Verify these packages are available on the machine, but their functions need to be qualified: http://r-pkgs.had.co.nz/namespace.html#search-path
requireNamespace("ggplot2") # graphing
# requireNamespace("readr") # data input
requireNamespace("tidyr") # data manipulation
requireNamespace("dplyr") # Avoid attaching dplyr, b/c its function names conflict with a lot of packages (esp base, stats, and plyr).
requireNamespace("testit")# For asserting conditions meet expected patterns.
requireNamespace("nlme") # estimate mixed models | esp. gls()
requireNamespace("lme4") # estimate mixed models | esp. lmer()
requireNamespace("arm") # process model objects
getwd()
# ----- specify-objects ------
path_input0 <- "./data/unshared/derived/map2016/map_full_bio_centered.rds"
# ----- load-data ------
ds0 <- readRDS(path_input0) #total raw data
names(ds0)
# str(ds0)
describe(ds0$sdmt)
ds0$sdmt_origional<-ds0$sdmt
ds0$sdmt<-ds0$sdmt/2
#models--------------------------
eq <- as.formula("sdmt ~ 1 +
( 1 |id)")
model<- lmerTest::lmer(eq, data=ds0, REML= FALSE)
lmerTest::summary((model))
#resid var= 12.45
eq <- as.formula("sdmt ~ 1 + year_in_study +
( 1 |id)")
model<- lmerTest::lmer(eq, data=ds0, REML= FALSE)
lmerTest::summary((model))
#df= 10587
#dev = 59601.7
eq <- as.formula("sdmt ~ 1 + year_in_study +
( 1 + year_in_study |id)")
model<- lmerTest::lmer(eq, data=ds0, REML= FALSE)
lmerTest::summary((model))
#df= 10585
#dev = 58014.5
10587-10585
59601.7-58014.5
#int 31.3347
5.5977/ (sqrt(10591))
#year 0.4034
0.6351/ (sqrt(10591))
#resid 7.1566
2.6752 / (sqrt(10591))
#pseudo r^2
(12.45 - 7.1566 ) / 12.45
# #AGE BL-------------
#
# eq2 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc +
# ( 1 + year_in_study |id)")
# model_2<- lmerTest::lmer(eq2, data=ds0, REML= FALSE)
# lmerTest::summary((model_2))
#
# #chi sq
# #df
# #df=
# 10585 - 10583
# #deviance
# 58014.5- 57653.7
#
#
# #int 27.5670
# 5.2504/ (sqrt(10591))
# #year 0.3081
# 0.5551/ (sqrt(10591))
# #resid 7.1819
# 2.6799/ (sqrt(10591))
#
# ################ + gender
#
# eq3 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex +
# ( 1 + year_in_study |id)")
# model_3<- lmerTest::lmer(eq3, data=ds0, REML= FALSE)
# lmerTest::summary((model_3))
#
# #df=
# 10583- 10581
# #dev =
# 57653.7-57644.1
#
#
# #int 27.3997
# 5.2345/ (sqrt(10591))
# #year 0.308
# 0.5551/ (sqrt(10591))
# #resid 7.1821
# 2.6799 / (sqrt(10591))
#
################# + education
eq4 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
( 1 + year_in_study |id)")
model_4<- lmerTest::lmer(eq4, data=ds0, REML= FALSE)
lmerTest::summary((model_4))
#
# #df=
# 10581- 10579
# #dev
# 57644.1- 57471.3
#compared to random time model
10585- 10579
#dev
58014.5 - 57471.3
#int 24.5726
4.9571/ (sqrt(10591))
#year 0.3101
0.5568/ (sqrt(10591))
#resid 7.1787
2.6793 / (sqrt(10591))
#Physical Activity --------------
eq5 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
year_in_study*phys_pmeanC + phys_wp +
( 1 + year_in_study |id)")
model_5<- lmerTest::lmer(eq5, data=ds0, REML= FALSE)
lmerTest::summary((model_5))
#the model with the random effects of PA is a better model than the one witout
56614.6- 56600.6
10451 - 10448
eq5 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
+ phys_pmeanC*year_in_study + phys_wp +
( 1 + year_in_study + phys_wp |id)")
model_5<- lmerTest::lmer(eq5, data=ds0, REML= FALSE)
lmerTest::summary((model_5))
#df=
10579 - 10449
#dev =
57471.3 - 56616.9
#int 24.1643
4.9096/ (sqrt(10466))
#year 0.271076
0.5206 / (sqrt(10466))
#phys_wp 0.0048
0.0695/ (sqrt(10466))
#resid 7.073466
2.6596 / (sqrt(10466))
#wp varience explained compred to the random effects of time only
(7.1566 - 7.066991)/(7.1566)
# # gender X PA
# eq6 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
# phys_pmeanC*msex + phys_wp*msex +
# ( 1 + year_in_study + phys_wp|id)")
# model_6<- lmerTest::lmer(eq6, data=ds0, REML= FALSE)
# lmerTest::summary((model_6))
#
#stress------------------------------
eq5b <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
pss_pmeanC*year_in_study + pss_wp +
( 1 + year_in_study |id)")
model_5b<- lmerTest::lmer(eq5b, data=ds0, REML= FALSE)
lmerTest::summary((model_5b))
#df=
#the addition of pss_wp in the random effects is NS
#people aren't very differnt in their stress fluctuations
#therefore there is nothing to explain
17674.8
3193
eq5b <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
pss_pmeanC*year_in_study + pss_wp +
( 1 + year_in_study + pss_wp |id)")
model_5b<- lmerTest::lmer(eq5b, data=ds0, REML= FALSE)
lmerTest::summary((model_5b))
#df=
10579 - 3191
#dev =
57471.3 - 17674.5
#int 18.91792
4.349 / (sqrt(3208))
#year 0.12653
0.3557 / (sqrt(3208))
#pss_wp 0.08321
0.2885/ (sqrt(3208))
#resid 6.70488
2.5894/ (sqrt(3208))
# eq5b <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
# pss_pmeanC*msex + pss_wp*msex +
# ( 1 + year_in_study + pss_wp |id)")
# model_5b<- lmerTest::lmer(eq5b, data=ds0, REML= FALSE)
# lmerTest::summary((model_5b))
################# interaction with stress
#---- PSS and interaction
#Physical Activity --------------
eq7 <- as.formula("sdmt ~ 1 + year_in_study*age_bl_gmc + year_in_study*msex + year_in_study*edu +
phys_pmeanC*pss_pmeanC + phys_wp*pss_pmeanC +
( 1 + year_in_study + phys_wp |id)")
model_7<- lmerTest::lmer(eq7, data=ds0, REML= FALSE)
lmerTest::summary((model_7))
|
#
source("textFunctions.R")
source("TopicFunctions.R")
#Specify Working Folder
getwd()
workingfolder<-"../Ag.Nutrition.Grants"
messages<-readMails("../Fullset/Fullset_Demo","../mails")
table(reader::get.ext(list.files("../Fullset/Fullset_Demo")))
#removed .bmp and .db files manually
filelist<-list.files("../Fullset/Fullset_Demo")[unique(unlist(sapply(c(".pdf",".doc",".docx",".DOC"),function(X) which(str_detect(list.files("../Fullset/Fullset_Demo"),fixed(X))))))]
fileslist<-lapply(file.path("../Fullset/Fullset_Demo",filelist),getTextR)
baseinput1<-readRDS("../Ag.Nutrition.Grants/base_input1.rds")
head(baseinput1$SentFrame)
inspect(head(baseinput1$SentFrame))
fileslist<-fileslist[sapply(fileslist, function(X){length(X[[1]])})>1]
jgc()
which(sapply(tcorp,function(X) length(content(X)))<5)
tcorp<-do.call(c,fileslist)
saveRDS(tcorp,file.path("..",workingfolder,"corpus.rds"))
#clean up workspace
saveRDS(PreTopicFrame(tcorp[1:256],1),file.path("..",workingfolder,"base_input1.rds"))
jgc()
sent_token_annotator <- Maxent_Sent_Token_Annotator()
word_token_annotator <- Maxent_Word_Token_Annotator()
org.annotate<-Maxent_Entity_Annotator(language = "en", kind="organization", probs = FALSE,model = NULL)
pers.annotate<-Maxent_Entity_Annotator(language = "en", kind="person", probs = FALSE,model = NULL)
location.annotate<-Maxent_Entity_Annotator(language = "en", kind="location", probs = FALSE,model = NULL)
saveRDS(PreTopicFrame(tcorp[257:length(tcorp)],1),file.path("..",workingfolder,"base_input2.rds"))
tcorp[257:length(tcorp)]
baseinput1<-readRDS("../Ag.Nutrition.Grants/base_input1.rds")
baseinput2<-readRDS("../Ag.Nutrition.Grants/base_input2.rds")
mergeins<-function(x1,x2) {list("SentFrame"=rbind(x1[[1]],x2[[1]]),"Annotations"=c(x1[[2]],x2[[2]]),"processed"=c(x1[[3]],x2[[3]]),"out"=c(x1[[4]],x2[[4]]))}
baseinput<-mergeins(baseinput1,baseinput2)
rm(baseinput1,baseinput2)
#here we add opportunity labels to documents
nex<-read.csv("../Fullset/fullset.csv",stringsAsFactors=FALSE)
head(nex)
nexjoin<-plyr::join(data.frame("name"=baseinput$SentFrame$id),data.frame("name"=basename(as.character(nex$path)),"OpID"=as.character(nex$Opportunity.ID)),type="left",match="first")
baseinput$SentFrame$OpID<-nexjoin$OpID
nexjoin2<-plyr::join(data.frame("name"=baseinput$out$meta$id),data.frame("name"=basename(as.character(nex$path)),"OpID"=as.character(nex$Opportunity.ID)),type="left",match="first")
baseinput$out$meta$OpID<-nexjoin2$OpID
rm("nexjoin","nexjoin2","nex")
buildcliff()
startcliff()
library(httr)
pred1<-PredictCountryByDoc(baseinput)
rm(f1)
stopcliff()
head(baseinput$out)
baseinput$out$meta$Orig
baseinput1<-readRDS(file.path(workingfolder,"base_input1.rds"))
baseinput2<-readRDS(file.path(workingfolder,"base_input2.rds"))
baseinput$out$meta<-rbind(baseinput1$out$meta[,1:11],baseinput2$out$meta[,1:11])
names(baseinput2$out$documents)<-as.character(as.numeric(names(baseinput2$out$documents))+as.numeric(names(baseinput1$out$documents))[length(names(baseinput1$out$documents))])
l_ply(names(baseinput2$out$documents),function(X) baseinput2$out$documents$X[1,]<-baseinput2$out$documents$X[1,]+length(baseinput1$out$vocab))
baseinput$out$documents<-c(baseinput1$out$documents,baseinput2$out$documents)
rm(baseinput2)
baseinput$processed <-textProcessor(baseinput$SentFrame$SnE,metadata=baseinput$SentFrame,sparselevel=1)
baseinput$out <- prepDocuments(baseinput$processed$documents,baseinput$processed$vocab,baseinput$processed$meta,lower.thresh=10)
pred1<-read.csv(file.path(workingfolder, "countrypredictions1.csv"),stringsAsFactors=FALSE)
head(pred1)
baseinput$out$meta
baseinput$out$meta<-reflectCountryCol(baseinput$out$meta,pred1,50,FALSE)
workingfolder<-"../Ag.Nutrition.Grants"
saveRDS(baseinput,file.path(workingfolder,"basefile.rds"))
baseinput$out[[1]]
write.csv(pred1,file.path(workingfolder,"countrypredictions1.csv"))
form1<-paste("~as.factor(Orig)",paste(select.list(colnames(baseinput$out$meta),multiple=TRUE),sep="",collapse="+"),sep="+")
writeLines(form1,file.path(workingfolder,"formula1.txt"))
max(unlist(baseinput$out$documents))
length(baseinput$out$vocab)
length(baseinput$out$documents)
length(baseinput$out$documents)
nrow(baseinput$out$meta)
baseinput$out$meta$OpID<-as.character(baseinput$out$meta$OpID)
baseinput$out$meta$OpID[is.na(baseinput$out$meta$OpID)]<-'missing'
system("R CMD BATCH --no-restore run_topic_in_background.R", wait=FALSE)
st1<-readRDS(file.path(workingfolder,"topicmodel.rds"))
baseinput$top.topics<-max.col(st1$theta))
?saveWidget
dir.create("../results")
toplabels<-stm::labelTopics(st1)
head(toplabels)
library(wordcloud)
stmpout<-plot.STM(st1, type="perspectives", topics=c(1,2))
?toLDAvis
toLDAvis(st1, baseinput$out$documents,out.dir="../results/topicmodel",open.browser = FALSE)
baseinput$top.topics<-max.col(st1$theta)
ggplot()+geom_bar(aes(x=baseinput$out$meta$OpID[which(baseinput$top.topics==28)]))
topOps<-function(TOPICNUMBER,howMany,topicmodel,out){
topicWR<-function(TOPICNUMBER){
temp<-data.frame("OpID"=out$meta$OpID,"score"=topicmodel$theta[,TOPICNUMBER],"words"=sapply(out$documents,ncol))
ddply(temp,.(OpID),summarise,"sumwordweight"=sum(score*c(words/sum(words)))/length(score))}
topictry1<-topicWR(28)
data.frame("OpID"=as.character(topictry1$OpID[sort(topictry1$sumwordweight,index.return=TRUE,decreasing=TRUE)$ix[1:howMany]]),"Score"=topictry1$sumwordweight[sort(topictry1$sumwordweight,index.return=TRUE,decreasing=TRUE)$ix[1:howMany]])
}
topID<-function(TOPICNUMBER,howMany,topicmodel,out){
topicWR<-function(TOPICNUMBER){
temp<-data.frame("id"=out$meta$id,"score"=topicmodel$theta[,TOPICNUMBER],"words"=sapply(out$documents,ncol))
ddply(temp,.(id),summarise,"sumwordweight"=sum(score*c(words/sum(words)))/length(score))}
topictry1<-topicWR(28)
data.frame("id"=as.character(topictry1$id[sort(topictry1$sumwordweight,index.return=TRUE,decreasing=TRUE)$ix[1:howMany]]),"Score"=topictry1$sumwordweight[sort(topictry1$sumwordweight,index.return=TRUE,decreasing=TRUE)$ix[1:howMany]],"Topic"=TOPICNUMBER)
}
?labelTopics
stm::l
sum(topOps(28,50,st1,baseinput$out)$Score)
library(shiny)
dir.create("../../bucket1/Wellcome")
getwd()
toLDAvis(st1, baseinput$out$documents,open.browser = FALSE,,out.dir="../../bucket1/Wellcome/Topic")
?toLDAvis
shinyApp(ui=fluidPage(
visOutput("vis1"),
DT::dataTableOutput("tab1")),
server=function(input,output){
output$vis1<-renderVis({toLDAvis(st1, baseinput$out$documents,open.browser = FALSE)})
output$tab1<-renderDataTable({DT::datatable(topID(28,10,st1,baseinput$out))})
})
library("gistr")
DT::re
?toLDAvis
?shinyApp
?datatable
#here, we read from a table of verbs to the wd dataframe. The function allows you to edit a google docs frame shared from the address, so you can add, subtract words. You also could replace the reading of the csv with a call to a local dataframe.
wd<-read.csv(textConnection(RCurl::getURL("https://docs.google.com/spreadsheets/d/1ng7I5QoYu_NzegZvO9568M42q52iVCsLz-D0WhVa--8/pub?gid=0&single=true&output=csv")),stringsAsFactors=FALSE)
allwords<-c(wd$Up.Words,wd$Down.Words)
AnnotatesLarge<-AnnotateVerbsTopicJoin(allwords,baseinput$processed,baseinput$out,baseinput$Annotations,baseinput$SentFrame,baseinput$top.topics)
AnnotatesSmaller<-CombinationFrame(AnnotatesLarge)
rm(AnnotatesLarge)
saveRDS(AnnotatesSmaller,file.path(workingfolder,"AnnotationFrame.rds"))
ProcessedANNS<-ProcessforAPI(AnnotatesSmaller)
saveRDS(ProcessedANNS,file.path(workingfolder,"ProcessedFrame.rds"))
ProcessedANNS<-readRDS(file.path(workingfolder,"ProcessedFrame.rds"))
nrow(ProcessedANNS)
#edit runAlchemy and source
FillFolder(ProcessedANNS,workingfolder)
library(jsonlite)
Frame1<-processFolder(workingfolder,ProcessedANNS)
rm(BASE_INPUT)
rm(BASEINPUT)
saveRDS(Frame1,file.path(workingfolder,"ParsedFrame.rds"))
frametable<-function(PARSEFRAME,BASEINPUT,origent){
basejoin<-BASEINPUT$out$meta
basejoin$TopTopics<-BASEINPUT$top.topics
joined<-cbind(PARSEFRAME,basejoin[PARSEFRAME$rowid,])
colnames(joined)
joined<-joined[,c(c(1:ncol(PARSEFRAME)),18,19,22,27,c(28+origent),ncol(joined))]
joined<-joined[,c(1:ncol(joined))[-which(colnames(joined)%in%c("filename","comboID","rowid"))]]
joined$ents<-gsub(",",";",joined$ents)
joined
}
matchtable<-frametable(Frame1,baseinput,0)
dir.create("../AG_NUTRITION_RESULTS")
tableapp(matchtable,st1)
save("matchtable","st1",file="../AG_NUTRITION_RESULTS/agnutshinydata.R")
workpage<-read.csv(textConnection(RCurl::getURL("https://docs.google.com/spreadsheets/d/1jRTIHINPLvE8w-_xIBxvxId4n_fFT7Qsg2tjCNrUm3A/pub?output=csv")))
workpage$Ag
?sageLabels
toplabs<-stm::labelTopics(st1,workpage$Ag)$prob
.heldout()
tcorp<-readRDS(file.path(workingfolder,"corpus.rds"))
fulldocs1<-textProcessor(unlist(sapply(tcorp[1:5], `[`, "content")))
install.packages("d3Network")
library(d3Network)
?d3Sankey
d3Sankey(
tempframe<-data.frame(source=c(0,1,2,3,4),target=c(1,2,3,4,4))
tempframe$value<-c(100,100,100,100,100)
nodeframe<-data.frame("name"=c("bacon","sandwich","pie","cake","lemon"))
library(networkD3)
sankeyNetwork(Links=tempframe,Nodes=nodeframe,Source="source",Target="target",Value="value",NodeID="name",fontSize=24,nodeWidth=30)
matchtable$object.keywords
ntr<-filter(matchtable,str_detect(subject.keywords,"micronutrients"))
l1<-lapply(1:nrow(ntr),function(i) {
tryCatch({part1<-data.frame("enter"=as.character(unlist(ntr$subject.keywords[i])),"out"=unlist(as.character(ntr$action.lemmatized[i])))
part1<-part1[str_detect(part1[,1],fixed("micro")),]
part2<-data.frame("enter"=unlist(as.character(ntr$action.lemmatized[i])),"out"=unlist(ntr$object.keywords[i]))
part2<-filter(part2,str_detect(enter,as.character(part1$out)))
rbind(part1,part2)},error=function(e){NA})})
which(is.na(l1))
l1<-na.omit(l1)
l1<-do.call(rbind,l1)
l1<-na.omit(l1)
networkD3::simpleNetwork(l1,Source="enter",Target="out",fontSize=20)
l12<-data.frame("name"=as.factor(unique(c(as.character(l1$enter),as.character(l1$out)))),"id"=0:c(length(unique(c(as.character(l1$enter),as.character(l1$out))))-1))
l1$enter<-as.numeric(as.character(mapvalues(l1$enter, from = l12$name, to = l12$id)))
l1$out<-as.numeric(as.character(mapvalues(l1$out, from = l12$name, to = l12$id)))
l1$value<-10
l1
childmaker<-function(K) {lapply(K,function(X) list(name=X))}
chartmaker("micronutrients")
?diagonalNetwork
dplyr::f
makenet<-function(WORD){
ntr<-dplyr::filter(matchtable,str_detect(tolower(subject.keywords),tolower(WORD)))
l1<-lapply(1:nrow(ntr),function(i) {
tryCatch({part1<-data.frame("enter"=as.character(unlist(ntr$subject.keywords[i])),"out"=unlist(as.character(ntr$action.lemmatized[i])))
part1<-part1[str_detect(part1[,1],fixed(tolower(WORD))),]
part2<-data.frame("enter"=unlist(as.character(ntr$action.lemmatized[i])),"out"=unlist(ntr$object.keywords[i]))
part2<-filter(part2,str_detect(enter,as.character(part1$out)))
rbind(part1,part2)
},error=function(e){NA})
})
l1<-na.omit(l1)
l1<-do.call(rbind,l1)
chartmaker<-function(E) {list(name=E,children=lapply(unique(dplyr::filter(l1,enter==E)$out),function(X){list(name=as.character(X))}))}
lch<-chartmaker(tolower(WORD))
for(i in 1:length(lch[[2]])) {lch[[2]][[i]]$children<-chartmaker(lch[[2]][[i]]$name)$children}
lch %>% diagonalNetwork(fontSize=30,linkColour = "#000")
}
makenet.radial<-function(WORD,mtable){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
l1<-lapply(1:nrow(ntr),function(i) {
tryCatch({part1<-data.frame("enter"=as.character(unlist(ntr$subject.keywords[i])),"out"=unlist(as.character(ntr$action.lemmatized[i])))
part1<-part1[str_detect(part1[,1],fixed(tolower(WORD))),]
part2<-data.frame("enter"=unlist(as.character(ntr$action.lemmatized[i])),"out"=unlist(ntr$object.keywords[i]))
part2<-dplyr::filter(part2,str_detect(enter,as.character(part1$out)))
rbind(part1,part2)
},error=function(e){NA})
})
l1<-na.omit(l1)
l1<-do.call(rbind,l1)
chartmaker<-function(E) {list(name=tolower(E),children=lapply(unique(dplyr::filter(l1,str_detect(tolower(enter),tolower(E)))$out),function(X){list(name=as.character(X))}))}
lch<-chartmaker(tolower(WORD))
for(i in 1:length(lch[[2]])) {lch[[2]][[i]]$children<-chartmaker(lch[[2]][[i]]$name)$children}
lch %>% radialNetwork(fontSize=30,linkColour = "#000")
}
library(plyr)
library(dplyr)
library(networkD3)
makenet("micronutrients")
makenet("anemia")
matchtable$TopTopics%in%
library(stringr)
makenet.radial("protein",dplyr::filter(matchtable,TopTopics%in%workpage$Ag))
makenet.radial("protein",dplyr::filter(matchtable,TopTopics%in%workpage$Nut))
breedtopics<-dplyr::filter(matchtable,TopTopics%in%c(21,19,42))
table(unique(unlist(breedtopics$subject.keywords)))
makenet.radial("vari",dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
makenet.radial("drought",dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
makenet.radial("food",dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
library(stm)
?findThoughts
slabs<-stm::sageLabels(st1,n=10)
slabs$marginal$prob[21,]
findThoughts(st1,21)
makenet.radial.json<-function(WORD,mtable){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
l1<-lapply(1:nrow(ntr),function(i) {
tryCatch({part1<-data.frame("enter"=as.character(unlist(ntr$subject.keywords[i])),"out"=unlist(as.character(ntr$action.lemmatized[i])))
part1<-part1[str_detect(part1[,1],fixed(tolower(WORD))),]
part2<-data.frame("enter"=unlist(as.character(ntr$action.lemmatized[i])),"out"=unlist(ntr$object.keywords[i]))
part2<-dplyr::filter(part2,str_detect(enter,as.character(part1$out)))
rbind(part1,part2)
},error=function(e){NA})
})
l1<-na.omit(l1)
l1<-do.call(rbind,l1)
chartmaker<-function(E) {list(name=tolower(E),children=lapply(unique(dplyr::filter(l1,str_detect(tolower(enter),tolower(E)))$out),function(X){list(name=as.character(X))}))}
lch<-chartmaker(tolower(WORD))
for(i in 1:length(lch[[2]])) {lch[[2]][[i]]$children<-chartmaker(lch[[2]][[i]]$name)$children}
lch}
makenet.radial.json(slabs$marginal$prob[21,][1],dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
lapply()
l_ply(makenet.radial.json(slabs$marginal$prob[21,][1],dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))[[2]]
doublechild<-function(V){
V<-makenet.radial.json(slabs$marginal$prob[21,][1],dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))[[2]][[1]]
l_ply(V[[2]],function(X){
X<-V[[2]][[1]]
X$children=makenet.radial.json(X$name,filter(matchtable, TopTopics%in%c(21,19,42)))$children
V}
makenet.radial("seed",dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
library(plyr)
library(networkD3)
library(dplyr)
library(stringr)
makenet.radial(slabs$marginal$prob[21,][1],filter(matchtable,TopTopics%in%c(21,19,42)))
matchtable$To
makenet.radial("survey",dplyr::filter(matchtable,TopTopics%in%c(22,53)))
makenet.radial("iron",dplyr::filter(matchtable,TopTopics%in%workpage$Nut))
makenet.radial("potato",dplyr::filter(matchtable,TopTopics%in%workpage$Ag))
saveRDS(matchtable,file="../Ag.Nutrition.Grants/matchtab.rds")
shinyApp(ui=fluidPage(sidebarPanel(textInput("Word","Word","potato"),selectInput("fvar","Filter Variable",names(matchtable),"TopTopics"),textInput("f2","Filter Text","1")),mainPanel(radialNetworkOutput("radout"))),server=function(input,output){
output$radout<-networkD3::renderRadialNetwork({makenet.radial("potato",matchtable[as.character(input$fvar)%in%as.character(input$f2),])})})
})
Nts<-filter(matchtable,TopTopics%in%workpage$Nut)
table(unlist(Nts$subject.keywords))
chartmaker(X$name)
lch[[2]] %>% .[[1]]
l1$children[[1]]$children<-
chartmaker("offer")
charmaker2("micronutrients")
chartmaker("offer")
lapply(unique(l1$enter), chartmaker)[[1]] %>% diagonalNetwork(fontSize=30,linkColour = "#000")
/
E="micronutrients"
chartmaker<-function(E) {list(name=E,children=lapply(table(filter(l1,enter==E)$out),function(X){list(name=X)}))}
lapply(1:nrow(l1), function(i) list(name=l1$enter[i],children=l1$out[i]))
rsplit <- function(x) {
x <- x[!is.na(x[,1]),,drop=FALSE]
if(nrow(x)==0) return(NULL)
if(ncol(x)==1) return(lapply(x[,1], function(v) list(name=v)))
s <- split(x[,-1, drop=FALSE], x[,1])
unname(mapply(function(v,n) {if(!is.null(v)) list(name=n, children=v) else list(name=n)}, lapply(s, rsplit), names(s), SIMPLIFY=FALSE))
}
temp
temp)
l3<-filter(l1,enter==enter[[1]])
?dlply
diagonalNetwork(l1,Source="enter",Target="out",fontSize=20)
?simpleNetwork
networkD3::simpleNetwork(l1,Source="enter",Target="out",fontSize=20)
sankeyNetwork(Links = l1, Nodes = l12, Source = 'enter', Target = 'out',Value='value',NodeID = "name", fontSize = 24, nodeWidth = 30)
sankeyNetwork(Links = energy$links, Nodes = energy$nodes, Source = 'source', Target = 'target', Value = 'value', NodeID = 'name',units = 'TWh', fontSize = 12, nodeWidth = 30)
ntr$subject.keywords
length(unlist(ntr$action.lemmatized))
unlist(),"act"=unlist(ntr$action.lemmatized)
"ob"=unlist(ntr$object.keywords))
netframe$key[1]
URL <- paste0('https://cdn.rawgit.com/christophergandrud/networkD3/','master/JSONdata/energy.json')
energy <- jsonlite::fromJSON(URL)
# Plot
sankeyNetwork(Links = energy$links, Nodes = energy$nodes, Source = 'source', Target = 'target', Value = 'value', NodeID = 'name',units = 'TWh', fontSize = 12, nodeWidth = 30)
desc<-read.csv("../Fullset/fullset.csv")
head(desc)
matchtable<-readRDS(file.path(workingfolder,"matchtab.rds"))
reop<-join(data.frame("OpID"=desc$Opportunity.ID,"basename"=basename(as.character(desc$path))),data.frame("basename"=baseinput$SentFrame$id,"Orig"=baseinput$SentFrame$Orig),type="left",match="first")
matchtable<-join(matchtable,reop,match="first")
head(matchtable)
matchtable$basename
saveRDS(matchtable,file.path(workingfolder,"matchtable2.rds"))
matchtable<-unique(matchtable)
getwd()
data_mapper(read.csv("../Ag.Nutrition.Grants/countrypredictions1.csv"),unique(baseinput$SentFrame$OpID))
|
/working/new_topic_script.R
|
permissive
|
ryscott5/eparTextTools
|
R
| false
| false
| 18,044
|
r
|
#
source("textFunctions.R")
source("TopicFunctions.R")
#Specify Working Folder
getwd()
workingfolder<-"../Ag.Nutrition.Grants"
messages<-readMails("../Fullset/Fullset_Demo","../mails")
table(reader::get.ext(list.files("../Fullset/Fullset_Demo")))
#removed .bmp and .db files manually
filelist<-list.files("../Fullset/Fullset_Demo")[unique(unlist(sapply(c(".pdf",".doc",".docx",".DOC"),function(X) which(str_detect(list.files("../Fullset/Fullset_Demo"),fixed(X))))))]
fileslist<-lapply(file.path("../Fullset/Fullset_Demo",filelist),getTextR)
baseinput1<-readRDS("../Ag.Nutrition.Grants/base_input1.rds")
head(baseinput1$SentFrame)
inspect(head(baseinput1$SentFrame))
fileslist<-fileslist[sapply(fileslist, function(X){length(X[[1]])})>1]
jgc()
which(sapply(tcorp,function(X) length(content(X)))<5)
tcorp<-do.call(c,fileslist)
saveRDS(tcorp,file.path("..",workingfolder,"corpus.rds"))
#clean up workspace
saveRDS(PreTopicFrame(tcorp[1:256],1),file.path("..",workingfolder,"base_input1.rds"))
jgc()
sent_token_annotator <- Maxent_Sent_Token_Annotator()
word_token_annotator <- Maxent_Word_Token_Annotator()
org.annotate<-Maxent_Entity_Annotator(language = "en", kind="organization", probs = FALSE,model = NULL)
pers.annotate<-Maxent_Entity_Annotator(language = "en", kind="person", probs = FALSE,model = NULL)
location.annotate<-Maxent_Entity_Annotator(language = "en", kind="location", probs = FALSE,model = NULL)
saveRDS(PreTopicFrame(tcorp[257:length(tcorp)],1),file.path("..",workingfolder,"base_input2.rds"))
tcorp[257:length(tcorp)]
baseinput1<-readRDS("../Ag.Nutrition.Grants/base_input1.rds")
baseinput2<-readRDS("../Ag.Nutrition.Grants/base_input2.rds")
mergeins<-function(x1,x2) {list("SentFrame"=rbind(x1[[1]],x2[[1]]),"Annotations"=c(x1[[2]],x2[[2]]),"processed"=c(x1[[3]],x2[[3]]),"out"=c(x1[[4]],x2[[4]]))}
baseinput<-mergeins(baseinput1,baseinput2)
rm(baseinput1,baseinput2)
#here we add opportunity labels to documents
nex<-read.csv("../Fullset/fullset.csv",stringsAsFactors=FALSE)
head(nex)
nexjoin<-plyr::join(data.frame("name"=baseinput$SentFrame$id),data.frame("name"=basename(as.character(nex$path)),"OpID"=as.character(nex$Opportunity.ID)),type="left",match="first")
baseinput$SentFrame$OpID<-nexjoin$OpID
nexjoin2<-plyr::join(data.frame("name"=baseinput$out$meta$id),data.frame("name"=basename(as.character(nex$path)),"OpID"=as.character(nex$Opportunity.ID)),type="left",match="first")
baseinput$out$meta$OpID<-nexjoin2$OpID
rm("nexjoin","nexjoin2","nex")
buildcliff()
startcliff()
library(httr)
pred1<-PredictCountryByDoc(baseinput)
rm(f1)
stopcliff()
head(baseinput$out)
baseinput$out$meta$Orig
baseinput1<-readRDS(file.path(workingfolder,"base_input1.rds"))
baseinput2<-readRDS(file.path(workingfolder,"base_input2.rds"))
baseinput$out$meta<-rbind(baseinput1$out$meta[,1:11],baseinput2$out$meta[,1:11])
names(baseinput2$out$documents)<-as.character(as.numeric(names(baseinput2$out$documents))+as.numeric(names(baseinput1$out$documents))[length(names(baseinput1$out$documents))])
l_ply(names(baseinput2$out$documents),function(X) baseinput2$out$documents$X[1,]<-baseinput2$out$documents$X[1,]+length(baseinput1$out$vocab))
baseinput$out$documents<-c(baseinput1$out$documents,baseinput2$out$documents)
rm(baseinput2)
baseinput$processed <-textProcessor(baseinput$SentFrame$SnE,metadata=baseinput$SentFrame,sparselevel=1)
baseinput$out <- prepDocuments(baseinput$processed$documents,baseinput$processed$vocab,baseinput$processed$meta,lower.thresh=10)
pred1<-read.csv(file.path(workingfolder, "countrypredictions1.csv"),stringsAsFactors=FALSE)
head(pred1)
baseinput$out$meta
baseinput$out$meta<-reflectCountryCol(baseinput$out$meta,pred1,50,FALSE)
workingfolder<-"../Ag.Nutrition.Grants"
saveRDS(baseinput,file.path(workingfolder,"basefile.rds"))
baseinput$out[[1]]
write.csv(pred1,file.path(workingfolder,"countrypredictions1.csv"))
form1<-paste("~as.factor(Orig)",paste(select.list(colnames(baseinput$out$meta),multiple=TRUE),sep="",collapse="+"),sep="+")
writeLines(form1,file.path(workingfolder,"formula1.txt"))
max(unlist(baseinput$out$documents))
length(baseinput$out$vocab)
length(baseinput$out$documents)
length(baseinput$out$documents)
nrow(baseinput$out$meta)
baseinput$out$meta$OpID<-as.character(baseinput$out$meta$OpID)
baseinput$out$meta$OpID[is.na(baseinput$out$meta$OpID)]<-'missing'
system("R CMD BATCH --no-restore run_topic_in_background.R", wait=FALSE)
st1<-readRDS(file.path(workingfolder,"topicmodel.rds"))
baseinput$top.topics<-max.col(st1$theta))
?saveWidget
dir.create("../results")
toplabels<-stm::labelTopics(st1)
head(toplabels)
library(wordcloud)
stmpout<-plot.STM(st1, type="perspectives", topics=c(1,2))
?toLDAvis
toLDAvis(st1, baseinput$out$documents,out.dir="../results/topicmodel",open.browser = FALSE)
baseinput$top.topics<-max.col(st1$theta)
ggplot()+geom_bar(aes(x=baseinput$out$meta$OpID[which(baseinput$top.topics==28)]))
topOps<-function(TOPICNUMBER,howMany,topicmodel,out){
topicWR<-function(TOPICNUMBER){
temp<-data.frame("OpID"=out$meta$OpID,"score"=topicmodel$theta[,TOPICNUMBER],"words"=sapply(out$documents,ncol))
ddply(temp,.(OpID),summarise,"sumwordweight"=sum(score*c(words/sum(words)))/length(score))}
topictry1<-topicWR(28)
data.frame("OpID"=as.character(topictry1$OpID[sort(topictry1$sumwordweight,index.return=TRUE,decreasing=TRUE)$ix[1:howMany]]),"Score"=topictry1$sumwordweight[sort(topictry1$sumwordweight,index.return=TRUE,decreasing=TRUE)$ix[1:howMany]])
}
topID<-function(TOPICNUMBER,howMany,topicmodel,out){
topicWR<-function(TOPICNUMBER){
temp<-data.frame("id"=out$meta$id,"score"=topicmodel$theta[,TOPICNUMBER],"words"=sapply(out$documents,ncol))
ddply(temp,.(id),summarise,"sumwordweight"=sum(score*c(words/sum(words)))/length(score))}
topictry1<-topicWR(28)
data.frame("id"=as.character(topictry1$id[sort(topictry1$sumwordweight,index.return=TRUE,decreasing=TRUE)$ix[1:howMany]]),"Score"=topictry1$sumwordweight[sort(topictry1$sumwordweight,index.return=TRUE,decreasing=TRUE)$ix[1:howMany]],"Topic"=TOPICNUMBER)
}
?labelTopics
stm::l
sum(topOps(28,50,st1,baseinput$out)$Score)
library(shiny)
dir.create("../../bucket1/Wellcome")
getwd()
toLDAvis(st1, baseinput$out$documents,open.browser = FALSE,,out.dir="../../bucket1/Wellcome/Topic")
?toLDAvis
shinyApp(ui=fluidPage(
visOutput("vis1"),
DT::dataTableOutput("tab1")),
server=function(input,output){
output$vis1<-renderVis({toLDAvis(st1, baseinput$out$documents,open.browser = FALSE)})
output$tab1<-renderDataTable({DT::datatable(topID(28,10,st1,baseinput$out))})
})
library("gistr")
DT::re
?toLDAvis
?shinyApp
?datatable
#here, we read from a table of verbs to the wd dataframe. The function allows you to edit a google docs frame shared from the address, so you can add, subtract words. You also could replace the reading of the csv with a call to a local dataframe.
wd<-read.csv(textConnection(RCurl::getURL("https://docs.google.com/spreadsheets/d/1ng7I5QoYu_NzegZvO9568M42q52iVCsLz-D0WhVa--8/pub?gid=0&single=true&output=csv")),stringsAsFactors=FALSE)
allwords<-c(wd$Up.Words,wd$Down.Words)
AnnotatesLarge<-AnnotateVerbsTopicJoin(allwords,baseinput$processed,baseinput$out,baseinput$Annotations,baseinput$SentFrame,baseinput$top.topics)
AnnotatesSmaller<-CombinationFrame(AnnotatesLarge)
rm(AnnotatesLarge)
saveRDS(AnnotatesSmaller,file.path(workingfolder,"AnnotationFrame.rds"))
ProcessedANNS<-ProcessforAPI(AnnotatesSmaller)
saveRDS(ProcessedANNS,file.path(workingfolder,"ProcessedFrame.rds"))
ProcessedANNS<-readRDS(file.path(workingfolder,"ProcessedFrame.rds"))
nrow(ProcessedANNS)
#edit runAlchemy and source
FillFolder(ProcessedANNS,workingfolder)
library(jsonlite)
Frame1<-processFolder(workingfolder,ProcessedANNS)
rm(BASE_INPUT)
rm(BASEINPUT)
saveRDS(Frame1,file.path(workingfolder,"ParsedFrame.rds"))
frametable<-function(PARSEFRAME,BASEINPUT,origent){
basejoin<-BASEINPUT$out$meta
basejoin$TopTopics<-BASEINPUT$top.topics
joined<-cbind(PARSEFRAME,basejoin[PARSEFRAME$rowid,])
colnames(joined)
joined<-joined[,c(c(1:ncol(PARSEFRAME)),18,19,22,27,c(28+origent),ncol(joined))]
joined<-joined[,c(1:ncol(joined))[-which(colnames(joined)%in%c("filename","comboID","rowid"))]]
joined$ents<-gsub(",",";",joined$ents)
joined
}
matchtable<-frametable(Frame1,baseinput,0)
dir.create("../AG_NUTRITION_RESULTS")
tableapp(matchtable,st1)
save("matchtable","st1",file="../AG_NUTRITION_RESULTS/agnutshinydata.R")
workpage<-read.csv(textConnection(RCurl::getURL("https://docs.google.com/spreadsheets/d/1jRTIHINPLvE8w-_xIBxvxId4n_fFT7Qsg2tjCNrUm3A/pub?output=csv")))
workpage$Ag
?sageLabels
toplabs<-stm::labelTopics(st1,workpage$Ag)$prob
.heldout()
tcorp<-readRDS(file.path(workingfolder,"corpus.rds"))
fulldocs1<-textProcessor(unlist(sapply(tcorp[1:5], `[`, "content")))
install.packages("d3Network")
library(d3Network)
?d3Sankey
d3Sankey(
tempframe<-data.frame(source=c(0,1,2,3,4),target=c(1,2,3,4,4))
tempframe$value<-c(100,100,100,100,100)
nodeframe<-data.frame("name"=c("bacon","sandwich","pie","cake","lemon"))
library(networkD3)
sankeyNetwork(Links=tempframe,Nodes=nodeframe,Source="source",Target="target",Value="value",NodeID="name",fontSize=24,nodeWidth=30)
matchtable$object.keywords
ntr<-filter(matchtable,str_detect(subject.keywords,"micronutrients"))
l1<-lapply(1:nrow(ntr),function(i) {
tryCatch({part1<-data.frame("enter"=as.character(unlist(ntr$subject.keywords[i])),"out"=unlist(as.character(ntr$action.lemmatized[i])))
part1<-part1[str_detect(part1[,1],fixed("micro")),]
part2<-data.frame("enter"=unlist(as.character(ntr$action.lemmatized[i])),"out"=unlist(ntr$object.keywords[i]))
part2<-filter(part2,str_detect(enter,as.character(part1$out)))
rbind(part1,part2)},error=function(e){NA})})
which(is.na(l1))
l1<-na.omit(l1)
l1<-do.call(rbind,l1)
l1<-na.omit(l1)
networkD3::simpleNetwork(l1,Source="enter",Target="out",fontSize=20)
l12<-data.frame("name"=as.factor(unique(c(as.character(l1$enter),as.character(l1$out)))),"id"=0:c(length(unique(c(as.character(l1$enter),as.character(l1$out))))-1))
l1$enter<-as.numeric(as.character(mapvalues(l1$enter, from = l12$name, to = l12$id)))
l1$out<-as.numeric(as.character(mapvalues(l1$out, from = l12$name, to = l12$id)))
l1$value<-10
l1
childmaker<-function(K) {lapply(K,function(X) list(name=X))}
chartmaker("micronutrients")
?diagonalNetwork
dplyr::f
makenet<-function(WORD){
ntr<-dplyr::filter(matchtable,str_detect(tolower(subject.keywords),tolower(WORD)))
l1<-lapply(1:nrow(ntr),function(i) {
tryCatch({part1<-data.frame("enter"=as.character(unlist(ntr$subject.keywords[i])),"out"=unlist(as.character(ntr$action.lemmatized[i])))
part1<-part1[str_detect(part1[,1],fixed(tolower(WORD))),]
part2<-data.frame("enter"=unlist(as.character(ntr$action.lemmatized[i])),"out"=unlist(ntr$object.keywords[i]))
part2<-filter(part2,str_detect(enter,as.character(part1$out)))
rbind(part1,part2)
},error=function(e){NA})
})
l1<-na.omit(l1)
l1<-do.call(rbind,l1)
chartmaker<-function(E) {list(name=E,children=lapply(unique(dplyr::filter(l1,enter==E)$out),function(X){list(name=as.character(X))}))}
lch<-chartmaker(tolower(WORD))
for(i in 1:length(lch[[2]])) {lch[[2]][[i]]$children<-chartmaker(lch[[2]][[i]]$name)$children}
lch %>% diagonalNetwork(fontSize=30,linkColour = "#000")
}
makenet.radial<-function(WORD,mtable){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
l1<-lapply(1:nrow(ntr),function(i) {
tryCatch({part1<-data.frame("enter"=as.character(unlist(ntr$subject.keywords[i])),"out"=unlist(as.character(ntr$action.lemmatized[i])))
part1<-part1[str_detect(part1[,1],fixed(tolower(WORD))),]
part2<-data.frame("enter"=unlist(as.character(ntr$action.lemmatized[i])),"out"=unlist(ntr$object.keywords[i]))
part2<-dplyr::filter(part2,str_detect(enter,as.character(part1$out)))
rbind(part1,part2)
},error=function(e){NA})
})
l1<-na.omit(l1)
l1<-do.call(rbind,l1)
chartmaker<-function(E) {list(name=tolower(E),children=lapply(unique(dplyr::filter(l1,str_detect(tolower(enter),tolower(E)))$out),function(X){list(name=as.character(X))}))}
lch<-chartmaker(tolower(WORD))
for(i in 1:length(lch[[2]])) {lch[[2]][[i]]$children<-chartmaker(lch[[2]][[i]]$name)$children}
lch %>% radialNetwork(fontSize=30,linkColour = "#000")
}
library(plyr)
library(dplyr)
library(networkD3)
makenet("micronutrients")
makenet("anemia")
matchtable$TopTopics%in%
library(stringr)
makenet.radial("protein",dplyr::filter(matchtable,TopTopics%in%workpage$Ag))
makenet.radial("protein",dplyr::filter(matchtable,TopTopics%in%workpage$Nut))
breedtopics<-dplyr::filter(matchtable,TopTopics%in%c(21,19,42))
table(unique(unlist(breedtopics$subject.keywords)))
makenet.radial("vari",dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
makenet.radial("drought",dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
makenet.radial("food",dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
library(stm)
?findThoughts
slabs<-stm::sageLabels(st1,n=10)
slabs$marginal$prob[21,]
findThoughts(st1,21)
makenet.radial.json<-function(WORD,mtable){
ntr<-dplyr::filter(mtable,str_detect(tolower(subject.keywords),tolower(WORD)))
l1<-lapply(1:nrow(ntr),function(i) {
tryCatch({part1<-data.frame("enter"=as.character(unlist(ntr$subject.keywords[i])),"out"=unlist(as.character(ntr$action.lemmatized[i])))
part1<-part1[str_detect(part1[,1],fixed(tolower(WORD))),]
part2<-data.frame("enter"=unlist(as.character(ntr$action.lemmatized[i])),"out"=unlist(ntr$object.keywords[i]))
part2<-dplyr::filter(part2,str_detect(enter,as.character(part1$out)))
rbind(part1,part2)
},error=function(e){NA})
})
l1<-na.omit(l1)
l1<-do.call(rbind,l1)
chartmaker<-function(E) {list(name=tolower(E),children=lapply(unique(dplyr::filter(l1,str_detect(tolower(enter),tolower(E)))$out),function(X){list(name=as.character(X))}))}
lch<-chartmaker(tolower(WORD))
for(i in 1:length(lch[[2]])) {lch[[2]][[i]]$children<-chartmaker(lch[[2]][[i]]$name)$children}
lch}
makenet.radial.json(slabs$marginal$prob[21,][1],dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
lapply()
l_ply(makenet.radial.json(slabs$marginal$prob[21,][1],dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))[[2]]
doublechild<-function(V){
V<-makenet.radial.json(slabs$marginal$prob[21,][1],dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))[[2]][[1]]
l_ply(V[[2]],function(X){
X<-V[[2]][[1]]
X$children=makenet.radial.json(X$name,filter(matchtable, TopTopics%in%c(21,19,42)))$children
V}
makenet.radial("seed",dplyr::filter(matchtable,TopTopics%in%c(21,19,42)))
library(plyr)
library(networkD3)
library(dplyr)
library(stringr)
makenet.radial(slabs$marginal$prob[21,][1],filter(matchtable,TopTopics%in%c(21,19,42)))
matchtable$To
makenet.radial("survey",dplyr::filter(matchtable,TopTopics%in%c(22,53)))
makenet.radial("iron",dplyr::filter(matchtable,TopTopics%in%workpage$Nut))
makenet.radial("potato",dplyr::filter(matchtable,TopTopics%in%workpage$Ag))
saveRDS(matchtable,file="../Ag.Nutrition.Grants/matchtab.rds")
shinyApp(ui=fluidPage(sidebarPanel(textInput("Word","Word","potato"),selectInput("fvar","Filter Variable",names(matchtable),"TopTopics"),textInput("f2","Filter Text","1")),mainPanel(radialNetworkOutput("radout"))),server=function(input,output){
output$radout<-networkD3::renderRadialNetwork({makenet.radial("potato",matchtable[as.character(input$fvar)%in%as.character(input$f2),])})})
})
Nts<-filter(matchtable,TopTopics%in%workpage$Nut)
table(unlist(Nts$subject.keywords))
chartmaker(X$name)
lch[[2]] %>% .[[1]]
l1$children[[1]]$children<-
chartmaker("offer")
charmaker2("micronutrients")
chartmaker("offer")
lapply(unique(l1$enter), chartmaker)[[1]] %>% diagonalNetwork(fontSize=30,linkColour = "#000")
/
E="micronutrients"
chartmaker<-function(E) {list(name=E,children=lapply(table(filter(l1,enter==E)$out),function(X){list(name=X)}))}
lapply(1:nrow(l1), function(i) list(name=l1$enter[i],children=l1$out[i]))
rsplit <- function(x) {
x <- x[!is.na(x[,1]),,drop=FALSE]
if(nrow(x)==0) return(NULL)
if(ncol(x)==1) return(lapply(x[,1], function(v) list(name=v)))
s <- split(x[,-1, drop=FALSE], x[,1])
unname(mapply(function(v,n) {if(!is.null(v)) list(name=n, children=v) else list(name=n)}, lapply(s, rsplit), names(s), SIMPLIFY=FALSE))
}
temp
temp)
l3<-filter(l1,enter==enter[[1]])
?dlply
diagonalNetwork(l1,Source="enter",Target="out",fontSize=20)
?simpleNetwork
networkD3::simpleNetwork(l1,Source="enter",Target="out",fontSize=20)
sankeyNetwork(Links = l1, Nodes = l12, Source = 'enter', Target = 'out',Value='value',NodeID = "name", fontSize = 24, nodeWidth = 30)
sankeyNetwork(Links = energy$links, Nodes = energy$nodes, Source = 'source', Target = 'target', Value = 'value', NodeID = 'name',units = 'TWh', fontSize = 12, nodeWidth = 30)
ntr$subject.keywords
length(unlist(ntr$action.lemmatized))
unlist(),"act"=unlist(ntr$action.lemmatized)
"ob"=unlist(ntr$object.keywords))
netframe$key[1]
URL <- paste0('https://cdn.rawgit.com/christophergandrud/networkD3/','master/JSONdata/energy.json')
energy <- jsonlite::fromJSON(URL)
# Plot
sankeyNetwork(Links = energy$links, Nodes = energy$nodes, Source = 'source', Target = 'target', Value = 'value', NodeID = 'name',units = 'TWh', fontSize = 12, nodeWidth = 30)
desc<-read.csv("../Fullset/fullset.csv")
head(desc)
matchtable<-readRDS(file.path(workingfolder,"matchtab.rds"))
reop<-join(data.frame("OpID"=desc$Opportunity.ID,"basename"=basename(as.character(desc$path))),data.frame("basename"=baseinput$SentFrame$id,"Orig"=baseinput$SentFrame$Orig),type="left",match="first")
matchtable<-join(matchtable,reop,match="first")
head(matchtable)
matchtable$basename
saveRDS(matchtable,file.path(workingfolder,"matchtable2.rds"))
matchtable<-unique(matchtable)
getwd()
data_mapper(read.csv("../Ag.Nutrition.Grants/countrypredictions1.csv"),unique(baseinput$SentFrame$OpID))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{is.POSIX}
\alias{is.POSIX}
\title{Srhoads wrote this to allow you to...}
\usage{
is.POSIX(v)
}
\description{
Srhoads wrote this to allow you to...
}
\examples{
is.POSIX(v)
}
|
/man/is.POSIX.Rd
|
no_license
|
srhoads/srhoads
|
R
| false
| true
| 270
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{is.POSIX}
\alias{is.POSIX}
\title{Srhoads wrote this to allow you to...}
\usage{
is.POSIX(v)
}
\description{
Srhoads wrote this to allow you to...
}
\examples{
is.POSIX(v)
}
|
test_that("tf_graph_new() works", {
expect_s3_class(tf_graph_new(), "tf_graph")
})
test_that("tf_graph_list_operations() works", {
expect_identical(
tf_graph_list_operations(tf_graph_new()),
data.frame(
name = character(),
op_type = character(),
device = character(),
num_inputs = integer(),
num_outputs = integer()
)
)
})
test_that("tf_graph can be formatted and printed", {
expect_match(format(tf_graph_new()), "<tf_graph at")
expect_output(print(tf_graph_new()), "<tf_graph at")
})
test_that("tf_graph_import_graph_def() errors for invalid buffer", {
expect_error(
tf_graph_import_graph_def(tf_graph_new(), charToRaw("this is definitely not a valid buffer")),
"Invalid GraphDef"
)
})
test_that("tf_graph_to_graph_def() can roundtrip an empty graph", {
roundtrip_empty <- tf_graph_import_graph_def(tf_graph_new(), tf_graph_to_graph_def(tf_graph_new()))
expect_identical(
tf_graph_list_operations(roundtrip_empty),
tf_graph_list_operations(tf_graph_new())
)
})
|
/tests/testthat/test-tf-graph.R
|
permissive
|
paleolimbot/tf
|
R
| false
| false
| 1,045
|
r
|
test_that("tf_graph_new() works", {
expect_s3_class(tf_graph_new(), "tf_graph")
})
test_that("tf_graph_list_operations() works", {
expect_identical(
tf_graph_list_operations(tf_graph_new()),
data.frame(
name = character(),
op_type = character(),
device = character(),
num_inputs = integer(),
num_outputs = integer()
)
)
})
test_that("tf_graph can be formatted and printed", {
expect_match(format(tf_graph_new()), "<tf_graph at")
expect_output(print(tf_graph_new()), "<tf_graph at")
})
test_that("tf_graph_import_graph_def() errors for invalid buffer", {
expect_error(
tf_graph_import_graph_def(tf_graph_new(), charToRaw("this is definitely not a valid buffer")),
"Invalid GraphDef"
)
})
test_that("tf_graph_to_graph_def() can roundtrip an empty graph", {
roundtrip_empty <- tf_graph_import_graph_def(tf_graph_new(), tf_graph_to_graph_def(tf_graph_new()))
expect_identical(
tf_graph_list_operations(roundtrip_empty),
tf_graph_list_operations(tf_graph_new())
)
})
|
library(car)
model = lm(dist ~ speed, data = cars)
sSq = sum(model$residuals^2)/model$df.residual
p = 2
newdata = data.frame(speed=cars$speed)
yhat = predict(object = model, newdata = newdata)
cooksD = cooks.distance(model = model)
n = dim(cars)[1]
cooksD1 = vector(mode = 'numeric', length = n)
df_mini = data.frame(speed = NA)
i=1
for (i in 1:n){
df_mini[1] = cars$speed[i]
mycars = cars[-i,]
modelx = lm(dist ~ speed, data = mycars)
yhatx = predict(object = modelx, newdata = newdata)
cooksD1[i] = sum((yhatx-yhat)^2) / (p*sSq)
}
max(abs(cooksD1-cooksD))
order(cooksD)
|
/CooksDistance.R
|
no_license
|
doobops/Machine-Learning-in-R
|
R
| false
| false
| 589
|
r
|
library(car)
model = lm(dist ~ speed, data = cars)
sSq = sum(model$residuals^2)/model$df.residual
p = 2
newdata = data.frame(speed=cars$speed)
yhat = predict(object = model, newdata = newdata)
cooksD = cooks.distance(model = model)
n = dim(cars)[1]
cooksD1 = vector(mode = 'numeric', length = n)
df_mini = data.frame(speed = NA)
i=1
for (i in 1:n){
df_mini[1] = cars$speed[i]
mycars = cars[-i,]
modelx = lm(dist ~ speed, data = mycars)
yhatx = predict(object = modelx, newdata = newdata)
cooksD1[i] = sum((yhatx-yhat)^2) / (p*sSq)
}
max(abs(cooksD1-cooksD))
order(cooksD)
|
xvals <- 1:100
yvals <- rnorm(100)
data <- data.frame(xvals,yvals)
library(manipulate)
library(ggplot2)
manipulate({
#define plotting function
ggplot(data,aes(xvals,yvals)) +
geom_smooth(method="loess",span=span.val) +
browser()
geom_point()},
#define variable that will be changed in plot
span.val=slider(0.1,1)
)
|
/testMan.R
|
no_license
|
saminehbagheri/SOCU
|
R
| false
| false
| 350
|
r
|
xvals <- 1:100
yvals <- rnorm(100)
data <- data.frame(xvals,yvals)
library(manipulate)
library(ggplot2)
manipulate({
#define plotting function
ggplot(data,aes(xvals,yvals)) +
geom_smooth(method="loess",span=span.val) +
browser()
geom_point()},
#define variable that will be changed in plot
span.val=slider(0.1,1)
)
|
# 01_read_data
## Reads data and reshapes into various arrays for JAGS analysis
## Later in program selects which trials to include for main and sensitivity analyses
## based on whether data_choose is specified
# Packages ----
library(dplyr)
library(reshape2)
library(glmmBUGS)
# Source functions (for all the files with code in this project) ----
source ("scripts/00_functions.r")
# Read in csv files with data ----
main <- read.csv (file = "data/P2Y12meta_combined.csv", as.is = TRUE)
names(main) <- tolower(names(main))
names(main) <- gsub(".", "_", names(main), fixed = TRUE)
bleed <- read.csv(file = "data/P2Y12_bleeding.csv", as.is = TRUE)
names(bleed) <- tolower(names(bleed))
names(bleed) <- gsub(".", "_", names(bleed), fixed = TRUE)
# identify follow-up period ----
main$duration_of_follow_up
main$time <- c("12 months" = 12, "Hospital discharge or 28 days." = 1, "30 days" = 1, "15 months" = 15,
"12 months" = 12, "30 months" = 30, "90 days" = 3, "3.4 years" = 3.4*12, "90 days" = 3)/12
# Treat prasugrel and ticagrelor as single treatment ----
# Collapse ticagrelor and prasugrel into a single comparison
main <- filter(main, indication %in% c("ACS", "Stroke")) %>%
mutate (drug_intervention = ifelse (drug_intervention == "ticagrelor", "prasugrel", drug_intervention))
# select study characteristics needed for analyses ----
study <- main [ , c('trial','indication', 'drug_control', 'drug_intervention', 'time')]
study$comparison <- paste0(study$drug_intervention, "_", study$drug_control)
# Collapse all stroke studies into a single comparison as there is only one ticagrelor versus aspirin study
study$comparison[study$indication == "Stroke"] <- "clopidogrel_placebo"
## Create a study ID
study <- study[, c("trial","indication", "comparison", "time")]
study$study <- 1:nrow(study)
# Relabel events and n for main outcome
events_n <- main[, c(
'women_intervention_event', 'women_intervention_no_event',
'women_control_event', 'women_control_no_event',
'men_intervention_event', 'men_intervention_no_event',
'men_control_event', 'men_control_no_event')]
events_n$study <- 1:nrow(events_n)
events_n <- melt(events_n, id = "study")
events_n$women <- grepl("women|female", events_n$variable )
events_n$tx <- grepl("intervention", events_n$variable )
events_n$event <- grepl("no_event", events_n$variable)
events_n$event <- factor (events_n$event, c(T,F), labels = c("no_event", "events"))
events_n <- dcast(events_n, study + women + tx ~ event, value.var = "value")
events_n$n <- events_n$events + events_n$no_event
events_n <- events_n[ , c("study","women", "tx", "events", "n")]
# Create a wide format dataset ----
wide <- events_n
wide$women <- factor(wide$women, levels = c(T,F), labels = c("w", "m"))
wide$tx <- factor(wide$tx, levels = c(T,F), labels = c("t", "c"))
events <- reshape2::dcast (wide, study ~ women + tx, value.var = "events" )
names(events) <- gsub("_", "", names(events))
names(events) <- paste0("r", names(events))
n <- reshape2::dcast (wide, study ~ women + tx, value.var = "n" )
names(n) <- gsub("_", "", names(n))
names(n) <- paste0("n", names(n))
main_wide <- cbind(study, n[,-1], events[,-1])
## Set-up ragged array with indication nested within treatment ----
main_wide <- main_wide %>%
arrange(comparison, indication, trial)
wide_rag <- winBugsRaggedArray(main_wide,
effects = c("comparison", "indication"),
covariates = list(trial = "trial"),
observations = "rwt",
returnData = FALSE)
## Set-up ragged array with trial nested within a single treatment comparison/indication variable ----
# ie flatten the complexity
main_wide_flat <- main_wide %>%
mutate(compar_indic = paste(comparison, indication, sep = "_")) %>%
arrange(compar_indic, trial)
wide_rag_flat <- winBugsRaggedArray(main_wide_flat,
effects = c("compar_indic"),
covariates = list(trial = "trial"),
observations = "rwt",
returnData = FALSE) # checked ordering it is unchanged
## Select trials with just prasugrel versus clopidogrel ----
pras <- filter(main_wide, comparison == "prasugrel_clopidogrel" &
indication == "ACS")
## Set-up bleeding data for treatment comparison within indication ----
bleed <- merge(bleed,
main_wide_flat [, ! names(main_wide_flat) %in% c('nwt', 'nwc', 'nmt', 'nmc', 'rwt', 'rwc', 'rmt', 'rmc')],
by = "trial")
bleed$study <- 1:nrow(bleed)
bleed$rwt <- bleed$women_intervention_bleed ; bleed$women_intervention_bleed <- NULL
bleed$rwc <- bleed$women_control_bleed ; bleed$women_control_bleed <- NULL
bleed$rmt <- bleed$men_intervention_bleed ; bleed$men_intervention_bleed<- NULL
bleed$rmc <- bleed$men_control_bleed ; bleed$men_control_bleed <- NULL
bleed$xwt <- bleed$women_intervention_no_bleed ; bleed$women_intervention_no_bleed <- NULL
bleed$xwc <- bleed$women_control_no_bleed ; bleed$women_control_no_bleed <- NULL
bleed$xmt <- bleed$men_intervention_no_bleed ; bleed$men_intervention_no_bleed<- NULL
bleed$xmc <- bleed$men_control_no_bleed ; bleed$men_control_no_bleed <- NULL
bleed$nwt <- bleed$xwt + bleed$rwt
bleed$nwc <- bleed$xwc + bleed$rwc
bleed$nmt <- bleed$xmt + bleed$rmt
bleed$nmc <- bleed$xmc + bleed$rmc
bleed <- bleed [ , ! names(bleed) %in% c("xwt", "xwc", "xmt", "xmc")]
bleed <- bleed %>%
arrange(indication, comparison, trial)
bleed_rag <- winBugsRaggedArray(bleed,
effects = c("indication", "comparison"),
covariates = list(trial = "trial"),
observations = "rwt",
returnData = FALSE) # checked ordering it is unchanged
## Set-up bleeding data for prasugrel_only comparison ----
bleed_pras_men <- filter (bleed, trial %in% pras$trial)
## Read in data from J Am Coll Cardiol. 2017 Mar 28;69(12):1549-1559. doi: 10.1016/j.jacc.2017.01.028.
lau <- read.csv(file = "data/lau_et_al.csv", as.is = TRUE)
lau_time <- read.csv(file = "data/lau_et_al_time.csv", as.is = TRUE)
lau <- merge(lau_time, lau, by = "trial")
lau$study <- seq_along(lau$trial)
rm(lau_time)
## Create dataset with data from both systematic reviews
lau_sa <- lau[lau$trial %in% setdiff(lau$trial, main_wide$trial),]
lau_sa <- bind_rows(main_wide, lau_sa)
lau_sa$study <- seq_along(lau_sa$trial)
## Save data ----
save.image(file = "data/Data.Rdata")
|
/scripts/01_read_data.R
|
no_license
|
dmcalli2/gender_dapt_manuscript
|
R
| false
| false
| 6,778
|
r
|
# 01_read_data
## Reads data and reshapes into various arrays for JAGS analysis
## Later in program selects which trials to include for main and sensitivity analyses
## based on whether data_choose is specified
# Packages ----
library(dplyr)
library(reshape2)
library(glmmBUGS)
# Source functions (for all the files with code in this project) ----
source ("scripts/00_functions.r")
# Read in csv files with data ----
main <- read.csv (file = "data/P2Y12meta_combined.csv", as.is = TRUE)
names(main) <- tolower(names(main))
names(main) <- gsub(".", "_", names(main), fixed = TRUE)
bleed <- read.csv(file = "data/P2Y12_bleeding.csv", as.is = TRUE)
names(bleed) <- tolower(names(bleed))
names(bleed) <- gsub(".", "_", names(bleed), fixed = TRUE)
# identify follow-up period ----
main$duration_of_follow_up
main$time <- c("12 months" = 12, "Hospital discharge or 28 days." = 1, "30 days" = 1, "15 months" = 15,
"12 months" = 12, "30 months" = 30, "90 days" = 3, "3.4 years" = 3.4*12, "90 days" = 3)/12
# Treat prasugrel and ticagrelor as single treatment ----
# Collapse ticagrelor and prasugrel into a single comparison
main <- filter(main, indication %in% c("ACS", "Stroke")) %>%
mutate (drug_intervention = ifelse (drug_intervention == "ticagrelor", "prasugrel", drug_intervention))
# select study characteristics needed for analyses ----
study <- main [ , c('trial','indication', 'drug_control', 'drug_intervention', 'time')]
study$comparison <- paste0(study$drug_intervention, "_", study$drug_control)
# Collapse all stroke studies into a single comparison as there is only one ticagrelor versus aspirin study
study$comparison[study$indication == "Stroke"] <- "clopidogrel_placebo"
## Create a study ID
study <- study[, c("trial","indication", "comparison", "time")]
study$study <- 1:nrow(study)
# Relabel events and n for main outcome
events_n <- main[, c(
'women_intervention_event', 'women_intervention_no_event',
'women_control_event', 'women_control_no_event',
'men_intervention_event', 'men_intervention_no_event',
'men_control_event', 'men_control_no_event')]
events_n$study <- 1:nrow(events_n)
events_n <- melt(events_n, id = "study")
events_n$women <- grepl("women|female", events_n$variable )
events_n$tx <- grepl("intervention", events_n$variable )
events_n$event <- grepl("no_event", events_n$variable)
events_n$event <- factor (events_n$event, c(T,F), labels = c("no_event", "events"))
events_n <- dcast(events_n, study + women + tx ~ event, value.var = "value")
events_n$n <- events_n$events + events_n$no_event
events_n <- events_n[ , c("study","women", "tx", "events", "n")]
# Create a wide format dataset ----
wide <- events_n
wide$women <- factor(wide$women, levels = c(T,F), labels = c("w", "m"))
wide$tx <- factor(wide$tx, levels = c(T,F), labels = c("t", "c"))
events <- reshape2::dcast (wide, study ~ women + tx, value.var = "events" )
names(events) <- gsub("_", "", names(events))
names(events) <- paste0("r", names(events))
n <- reshape2::dcast (wide, study ~ women + tx, value.var = "n" )
names(n) <- gsub("_", "", names(n))
names(n) <- paste0("n", names(n))
main_wide <- cbind(study, n[,-1], events[,-1])
## Set-up ragged array with indication nested within treatment ----
main_wide <- main_wide %>%
arrange(comparison, indication, trial)
wide_rag <- winBugsRaggedArray(main_wide,
effects = c("comparison", "indication"),
covariates = list(trial = "trial"),
observations = "rwt",
returnData = FALSE)
## Set-up ragged array with trial nested within a single treatment comparison/indication variable ----
# ie flatten the complexity
main_wide_flat <- main_wide %>%
mutate(compar_indic = paste(comparison, indication, sep = "_")) %>%
arrange(compar_indic, trial)
wide_rag_flat <- winBugsRaggedArray(main_wide_flat,
effects = c("compar_indic"),
covariates = list(trial = "trial"),
observations = "rwt",
returnData = FALSE) # checked ordering it is unchanged
## Select trials with just prasugrel versus clopidogrel ----
pras <- filter(main_wide, comparison == "prasugrel_clopidogrel" &
indication == "ACS")
## Set-up bleeding data for treatment comparison within indication ----
bleed <- merge(bleed,
main_wide_flat [, ! names(main_wide_flat) %in% c('nwt', 'nwc', 'nmt', 'nmc', 'rwt', 'rwc', 'rmt', 'rmc')],
by = "trial")
bleed$study <- 1:nrow(bleed)
bleed$rwt <- bleed$women_intervention_bleed ; bleed$women_intervention_bleed <- NULL
bleed$rwc <- bleed$women_control_bleed ; bleed$women_control_bleed <- NULL
bleed$rmt <- bleed$men_intervention_bleed ; bleed$men_intervention_bleed<- NULL
bleed$rmc <- bleed$men_control_bleed ; bleed$men_control_bleed <- NULL
bleed$xwt <- bleed$women_intervention_no_bleed ; bleed$women_intervention_no_bleed <- NULL
bleed$xwc <- bleed$women_control_no_bleed ; bleed$women_control_no_bleed <- NULL
bleed$xmt <- bleed$men_intervention_no_bleed ; bleed$men_intervention_no_bleed<- NULL
bleed$xmc <- bleed$men_control_no_bleed ; bleed$men_control_no_bleed <- NULL
bleed$nwt <- bleed$xwt + bleed$rwt
bleed$nwc <- bleed$xwc + bleed$rwc
bleed$nmt <- bleed$xmt + bleed$rmt
bleed$nmc <- bleed$xmc + bleed$rmc
bleed <- bleed [ , ! names(bleed) %in% c("xwt", "xwc", "xmt", "xmc")]
bleed <- bleed %>%
arrange(indication, comparison, trial)
bleed_rag <- winBugsRaggedArray(bleed,
effects = c("indication", "comparison"),
covariates = list(trial = "trial"),
observations = "rwt",
returnData = FALSE) # checked ordering it is unchanged
## Set-up bleeding data for prasugrel_only comparison ----
bleed_pras_men <- filter (bleed, trial %in% pras$trial)
## Read in data from J Am Coll Cardiol. 2017 Mar 28;69(12):1549-1559. doi: 10.1016/j.jacc.2017.01.028.
lau <- read.csv(file = "data/lau_et_al.csv", as.is = TRUE)
lau_time <- read.csv(file = "data/lau_et_al_time.csv", as.is = TRUE)
lau <- merge(lau_time, lau, by = "trial")
lau$study <- seq_along(lau$trial)
rm(lau_time)
## Create dataset with data from both systematic reviews
lau_sa <- lau[lau$trial %in% setdiff(lau$trial, main_wide$trial),]
lau_sa <- bind_rows(main_wide, lau_sa)
lau_sa$study <- seq_along(lau_sa$trial)
## Save data ----
save.image(file = "data/Data.Rdata")
|
test_that("rename returns an error on duplicated names", {
expect_error(rename(c(letters[1:4],"a()","a["), check_dup = TRUE), fixed = TRUE,
paste("Internal renaming of variables led to duplicated names.",
"\nOccured for variables: a, a(), a["))
expect_error(rename(c("aDb","a/b","b"), check_dup = TRUE), fixed = TRUE,
paste("Internal renaming of variables led to duplicated names.",
"\nOccured for variables: aDb, a/b"))
expect_error(rename(c("log(a,b)","logab","bac","ba"), check_dup = TRUE), fixed = TRUE,
paste("Internal renaming of variables led to duplicated names.",
"\nOccured for variables: log(a,b), logab"))
})
test_that("rename perform correct renaming", {
names <- c("acd", "a[23]", "b__")
expect_equal(rename(names, symbols = c("[", "]", "__"), subs = c(".", ".", ":")),
c("acd", "a.23.", "b:"))
expect_equal(rename(names, symbols = c("^\\[", "\\]", "__$"),
subs = c(".", ".", ":"), fixed = FALSE),
c("acd", "a[23.", "b:"))
})
test_that("model_names works correctly", {
expect_equal(model_name(NA), "brms-model")
expect_equal(model_name(gaussian()), "gaussian(identity) brms-model")
expect_equal(model_name(bernoulli(type = "2PL")),
"bernoulli(logit, 2PL) brms-model")
})
test_that("make_group_frame returns correct first and last indices", {
expect_equal(make_group_frame(list(a = c("x","Int"), b = c("x"))),
data.frame(g = c("a", "b"), nlp = "", first = c(1, 1),
last = c(2, 1)))
expect_equal(make_group_frame(list(a = c("x","Int"), b = c("x"),
a = c("y","z"), b = c("b"))),
data.frame(g = c("a", "b", "a", "b"), nlp = "",
first = c(1, 1, 3, 2), last = c(2, 1, 4, 2)))
expect_equal(make_group_frame(list(a = c("x","Int"), b = c("x"),
a = c("y","z"), a = c("b"))),
data.frame(g = c("a", "b", "a", "a"), nlp = "",
first = c(1, 1, 3, 5), last = c(2, 1, 4, 5)))
# test in case of a non-linear model
ranef <- list(a = structure(c("x","Int"), nlpar = "U"),
b = structure(c("x"), nlpar = "U"),
a = structure(c("y","z"), nlpar = "V"))
})
test_that("make_index_names returns correct 1 and 2 dimensional indices", {
expect_equal(make_index_names(rownames = 1:2), c("[1]", "[2]"))
expect_equal(make_index_names(rownames = 1:2, colnames = 1:3, dim = 1),
c("[1]", "[2]"))
expect_equal(make_index_names(rownames = c("a","b"), colnames = 1:3, dim = 2),
c("[a,1]", "[b,1]", "[a,2]", "[b,2]", "[a,3]", "[b,3]"))
})
test_that("combine_duplicates works as expected", {
expect_equal(combine_duplicates(list(a = c(2,2), b = c("a", "c"))),
list(a = c(2,2), b = c("a", "c")))
expect_equal(combine_duplicates(list(a = c(2,2), b = c("a", "c"), a = c(4,2))),
list(a = c(2,2,4,2), b = c("a", "c")))
})
test_that("change_prior returns correct lists to be understood by rename_pars", {
pars <- c("b", "b_1", "bp", "bp_1", "prior_b", "prior_b_1",
"prior_b_3", "sd_x[1]", "prior_bp_1")
expect_equal(change_prior(class = "b", pars = pars,
names = c("x1", "x3", "x2")),
list(list(pos = 6, oldname = "prior_b_1",
pnames = "prior_b_x1", fnames = "prior_b_x1"),
list(pos = 7, oldname = "prior_b_3",
pnames = "prior_b_x2", fnames = "prior_b_x2")))
expect_equal(change_prior(class = "bp", pars = pars,
names = c("x1", "x2"), new_class = "b"),
list(list(pos = 9, oldname = "prior_bp_1",
pnames = "prior_b_x1", fnames = "prior_b_x1")))
})
test_that("change_fixef suggests renaming of fixed effects intercepts", {
pars <- c("b[1]", "b_Intercept[1]", "b_Intercept[2]", "sigma_y")
expect_equal(change_fixef(fixef = "x", intercepts = c("main", "spec"),
pars = pars)[[2]],
list(pos = c(FALSE, TRUE, TRUE, FALSE), oldname = "b_Intercept",
pnames = c("b_main", "b_spec"), fnames = c("b_main", "b_spec")))
})
|
/brms/tests/testthat/tests.rename.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 4,455
|
r
|
test_that("rename returns an error on duplicated names", {
expect_error(rename(c(letters[1:4],"a()","a["), check_dup = TRUE), fixed = TRUE,
paste("Internal renaming of variables led to duplicated names.",
"\nOccured for variables: a, a(), a["))
expect_error(rename(c("aDb","a/b","b"), check_dup = TRUE), fixed = TRUE,
paste("Internal renaming of variables led to duplicated names.",
"\nOccured for variables: aDb, a/b"))
expect_error(rename(c("log(a,b)","logab","bac","ba"), check_dup = TRUE), fixed = TRUE,
paste("Internal renaming of variables led to duplicated names.",
"\nOccured for variables: log(a,b), logab"))
})
test_that("rename perform correct renaming", {
names <- c("acd", "a[23]", "b__")
expect_equal(rename(names, symbols = c("[", "]", "__"), subs = c(".", ".", ":")),
c("acd", "a.23.", "b:"))
expect_equal(rename(names, symbols = c("^\\[", "\\]", "__$"),
subs = c(".", ".", ":"), fixed = FALSE),
c("acd", "a[23.", "b:"))
})
test_that("model_names works correctly", {
expect_equal(model_name(NA), "brms-model")
expect_equal(model_name(gaussian()), "gaussian(identity) brms-model")
expect_equal(model_name(bernoulli(type = "2PL")),
"bernoulli(logit, 2PL) brms-model")
})
test_that("make_group_frame returns correct first and last indices", {
expect_equal(make_group_frame(list(a = c("x","Int"), b = c("x"))),
data.frame(g = c("a", "b"), nlp = "", first = c(1, 1),
last = c(2, 1)))
expect_equal(make_group_frame(list(a = c("x","Int"), b = c("x"),
a = c("y","z"), b = c("b"))),
data.frame(g = c("a", "b", "a", "b"), nlp = "",
first = c(1, 1, 3, 2), last = c(2, 1, 4, 2)))
expect_equal(make_group_frame(list(a = c("x","Int"), b = c("x"),
a = c("y","z"), a = c("b"))),
data.frame(g = c("a", "b", "a", "a"), nlp = "",
first = c(1, 1, 3, 5), last = c(2, 1, 4, 5)))
# test in case of a non-linear model
ranef <- list(a = structure(c("x","Int"), nlpar = "U"),
b = structure(c("x"), nlpar = "U"),
a = structure(c("y","z"), nlpar = "V"))
})
test_that("make_index_names returns correct 1 and 2 dimensional indices", {
expect_equal(make_index_names(rownames = 1:2), c("[1]", "[2]"))
expect_equal(make_index_names(rownames = 1:2, colnames = 1:3, dim = 1),
c("[1]", "[2]"))
expect_equal(make_index_names(rownames = c("a","b"), colnames = 1:3, dim = 2),
c("[a,1]", "[b,1]", "[a,2]", "[b,2]", "[a,3]", "[b,3]"))
})
test_that("combine_duplicates works as expected", {
expect_equal(combine_duplicates(list(a = c(2,2), b = c("a", "c"))),
list(a = c(2,2), b = c("a", "c")))
expect_equal(combine_duplicates(list(a = c(2,2), b = c("a", "c"), a = c(4,2))),
list(a = c(2,2,4,2), b = c("a", "c")))
})
test_that("change_prior returns correct lists to be understood by rename_pars", {
pars <- c("b", "b_1", "bp", "bp_1", "prior_b", "prior_b_1",
"prior_b_3", "sd_x[1]", "prior_bp_1")
expect_equal(change_prior(class = "b", pars = pars,
names = c("x1", "x3", "x2")),
list(list(pos = 6, oldname = "prior_b_1",
pnames = "prior_b_x1", fnames = "prior_b_x1"),
list(pos = 7, oldname = "prior_b_3",
pnames = "prior_b_x2", fnames = "prior_b_x2")))
expect_equal(change_prior(class = "bp", pars = pars,
names = c("x1", "x2"), new_class = "b"),
list(list(pos = 9, oldname = "prior_bp_1",
pnames = "prior_b_x1", fnames = "prior_b_x1")))
})
test_that("change_fixef suggests renaming of fixed effects intercepts", {
pars <- c("b[1]", "b_Intercept[1]", "b_Intercept[2]", "sigma_y")
expect_equal(change_fixef(fixef = "x", intercepts = c("main", "spec"),
pars = pars)[[2]],
list(pos = c(FALSE, TRUE, TRUE, FALSE), oldname = "b_Intercept",
pnames = c("b_main", "b_spec"), fnames = c("b_main", "b_spec")))
})
|
#' Find Duplicated Words in a Text String
#'
#' Find duplicated word/word chunks in a string. Intended for internal use.
#' @param string A character string.
#' @param threshold An integer of the minimal number of repeats.
#' @return Returns a vector of all duplicated words/chunks.
#' @export
#' @examples
#' \dontrun{
#' duplicates(DATA$state)
#' duplicates(DATA$state[1])
#' }
duplicates <- #used in trans.venn
function(string, threshold=1){
x<-sort(unlist(strsplit(string, " ")))
if (threshold > 1) {
names(table(x))[table(x) >= threshold]
} else {
unique(x[duplicated(x)])
}
}
|
/R/duplicates.R
|
no_license
|
abresler/qdap
|
R
| false
| false
| 616
|
r
|
#' Find Duplicated Words in a Text String
#'
#' Find duplicated word/word chunks in a string. Intended for internal use.
#' @param string A character string.
#' @param threshold An integer of the minimal number of repeats.
#' @return Returns a vector of all duplicated words/chunks.
#' @export
#' @examples
#' \dontrun{
#' duplicates(DATA$state)
#' duplicates(DATA$state[1])
#' }
duplicates <- #used in trans.venn
function(string, threshold=1){
x<-sort(unlist(strsplit(string, " ")))
if (threshold > 1) {
names(table(x))[table(x) >= threshold]
} else {
unique(x[duplicated(x)])
}
}
|
source('pval_correction.R')
ParsePASCALFilePermuted <- function(resultPaths,clusterPaths,appendInfo=T,empirical){
#Load all PASCAL results
studyNames <- sapply(resultPaths,function(x){
splitStr <- unlist(strsplit(x,'/'));
fileName <- splitStr[length(splitStr)];
fileNameSplit <- unlist(strsplit(fileName,'[.]'));
endIndex <- grep('PathwaySet',fileNameSplit);
studyName <- paste(fileNameSplit[1:(endIndex-1)],collapse = '_');
return(studyName)
})
resultDf <- do.call(rbind,lapply(1:length(resultPaths),function(i) {
df <- data.table::fread(resultPaths[[i]],header = T);
df$StudyName <- rep(studyNames[i],nrow(df));
return(df)}))
resultDf <- resultDf %>%
dplyr::mutate(Perm = unlist(lapply(Name,function(x) as.numeric(gsub(x=x,pattern = '.*_perm_',replacement = '')))),
RealName = unlist(lapply(Name,function(x) gsub(x=x,pattern = '_perm_.*',replacement = ''))))
#For each cluster, load it's respective gene ID
clustersInfo <- unlist(lapply(clusterPaths,function(x) qusage::read.gmt(file=x)),recursive = F)
clustersDf <- data_frame(ClusterName = names(clustersInfo),Genes = clustersInfo)
#Join result and gene info for each cluster. Filter for those with P-values. Then, extract metainfo from cluster Name (level,type)
joinedDf <- dplyr::left_join(resultDf,clustersDf,by=c('Name'='ClusterName')) %>% filter(!is.na(chi2Pvalue)) %>%
dplyr::mutate(Size=sapply(Genes,length)) %>% #Size info
dplyr::mutate(Biotype=factor(sapply(Name,function(x) unlist(strsplit(x,'_'))[1]))) %>% #Biotype
dplyr::mutate(CellType=factor(sapply(Name,function(x) unlist(strsplit(x,'_'))[2]))) %>% #Cell Type
dplyr::mutate(AggregationType=factor(sapply(Name,function(x) unlist(strsplit(x,'_'))[3]))) %>% #Gene or transcript
dplyr::mutate(Level=sapply(Name,function(x){
splitString <- unlist(strsplit(x,'_'));
return(as.numeric(splitString[which(splitString=='level')+1]))})) #Gene or transcript
#Have a gene Name column for convenience
load(file='../../Count_Data/geneGtfTableFull.rda')
geneIdToName <- hashmap::hashmap(keys = geneGtfTableFull$gene_id,values = geneGtfTableFull$gene_name)
library(parallel)
joinedDf$GeneNames <- mclapply(joinedDf$Genes,function(x) geneIdToName[[x]],mc.cores = 1)
codingGenesInNetwork <- unique(unlist(joinedDf %>% dplyr::filter(Biotype=='coding') %>% {.$GeneNames}))
allGenesInNetwork <- unique(unlist(joinedDf %>% dplyr::filter(Biotype=='all') %>% {.$GeneNames}))
if(appendInfo){
source('append_gene_enrichment.R')
joinedDf <- AppendGeneEnrichment(joinedDf,codingGenesInNetwork=codingGenesInNetwork,allGenesInNetwork=allGenesInNetwork)
if(empirical){
source('append_open_target_empirical.R')
joinedDf <- AppendOpenTargetEmpirical(joinedDf,
permPath = '../../Count_Data/OpenTarget/',
csvPath = '/local/data/public/zmx21/zmx21_private/GSK/OpenTargets_scores/',
codingGenesInNetwork=codingGenesInNetwork,
allGenesInNetwork=allGenesInNetwork)
}else{
source('append_open_target.R')
joinedDf <- AppendOpenTarget(joinedDf,
csvPath = '/local/data/public/zmx21/zmx21_private/GSK/OpenTargets_scores/',
codingGenesInNetwork=codingGenesInNetwork,
allGenesInNetwork=allGenesInNetwork)
}
}
return(joinedDf)
}
#Get path to all studies in a directory
GetPathToPASCALResults <- function(PASCALResultPath){
allStudies <- dir(PASCALResultPath)
allStudies <- allStudies[grep('PathwaySet',allStudies)]
allStudiesPath <- sapply(allStudies,function(x) paste0(PASCALResultPath,x))
return(allStudiesPath)
}
LoadPASCALResultsPermuted <- function(filter=T,resultPaths,clusterPaths,sizeLower=10,sizeUpper=300,appendInfo=T,empirical=F){
source('pval_correction.R')
microgliaAllGenesPath <- GetPathToPASCALResults(resultPaths[1])
JoinedDfMicroglia <- ParsePASCALFilePermuted(microgliaAllGenesPath,clusterPaths[1],appendInfo,empirical)
if(length(resultPaths)== 2 & length(clusterPaths) == 2){
microgliaCodingGenesPath <- GetPathToPASCALResults(resultPaths[2])
JoinedDfMicroglia <- rbind(JoinedDfMicroglia,ParsePASCALFilePermuted(microgliaCodingGenesPath,clusterPaths[2],appendInfo,empirical))
}
JoinedDfMicroglia <- JoinedDfMicroglia %>% dplyr::filter(Size > sizeLower & Size < sizeUpper)
JoinedDfMicroglia <- AppendCorrectedPVal(JoinedDfMicroglia)
if(filter){
#Remove repeating rows
JoinedDfMicroglia <- JoinedDfMicroglia %>% dplyr::distinct(StudyName,chi2Pvalue,Size,Biotype,.keep_all=T)
}
return(JoinedDfMicroglia)
}
JoinedDfMicrogliaPearsonPermuted<- LoadPASCALResultsPermuted(filter=T,c('../../GWAS/PASCAL_results/Permuted_Pearson_100/permuted_Coding_Pearson/'),
c('../../genesets/Permuted_Pearson_100/permuted_Coding_Pearson.gmt'),
sizeLower = 3,sizeUpper = 300,appendInfo = F,empirical = F)
|
/PASCAL/analyze_PASCAL_output_permuted.R
|
no_license
|
zmx21/GSK_NI
|
R
| false
| false
| 5,297
|
r
|
source('pval_correction.R')
ParsePASCALFilePermuted <- function(resultPaths,clusterPaths,appendInfo=T,empirical){
#Load all PASCAL results
studyNames <- sapply(resultPaths,function(x){
splitStr <- unlist(strsplit(x,'/'));
fileName <- splitStr[length(splitStr)];
fileNameSplit <- unlist(strsplit(fileName,'[.]'));
endIndex <- grep('PathwaySet',fileNameSplit);
studyName <- paste(fileNameSplit[1:(endIndex-1)],collapse = '_');
return(studyName)
})
resultDf <- do.call(rbind,lapply(1:length(resultPaths),function(i) {
df <- data.table::fread(resultPaths[[i]],header = T);
df$StudyName <- rep(studyNames[i],nrow(df));
return(df)}))
resultDf <- resultDf %>%
dplyr::mutate(Perm = unlist(lapply(Name,function(x) as.numeric(gsub(x=x,pattern = '.*_perm_',replacement = '')))),
RealName = unlist(lapply(Name,function(x) gsub(x=x,pattern = '_perm_.*',replacement = ''))))
#For each cluster, load it's respective gene ID
clustersInfo <- unlist(lapply(clusterPaths,function(x) qusage::read.gmt(file=x)),recursive = F)
clustersDf <- data_frame(ClusterName = names(clustersInfo),Genes = clustersInfo)
#Join result and gene info for each cluster. Filter for those with P-values. Then, extract metainfo from cluster Name (level,type)
joinedDf <- dplyr::left_join(resultDf,clustersDf,by=c('Name'='ClusterName')) %>% filter(!is.na(chi2Pvalue)) %>%
dplyr::mutate(Size=sapply(Genes,length)) %>% #Size info
dplyr::mutate(Biotype=factor(sapply(Name,function(x) unlist(strsplit(x,'_'))[1]))) %>% #Biotype
dplyr::mutate(CellType=factor(sapply(Name,function(x) unlist(strsplit(x,'_'))[2]))) %>% #Cell Type
dplyr::mutate(AggregationType=factor(sapply(Name,function(x) unlist(strsplit(x,'_'))[3]))) %>% #Gene or transcript
dplyr::mutate(Level=sapply(Name,function(x){
splitString <- unlist(strsplit(x,'_'));
return(as.numeric(splitString[which(splitString=='level')+1]))})) #Gene or transcript
#Have a gene Name column for convenience
load(file='../../Count_Data/geneGtfTableFull.rda')
geneIdToName <- hashmap::hashmap(keys = geneGtfTableFull$gene_id,values = geneGtfTableFull$gene_name)
library(parallel)
joinedDf$GeneNames <- mclapply(joinedDf$Genes,function(x) geneIdToName[[x]],mc.cores = 1)
codingGenesInNetwork <- unique(unlist(joinedDf %>% dplyr::filter(Biotype=='coding') %>% {.$GeneNames}))
allGenesInNetwork <- unique(unlist(joinedDf %>% dplyr::filter(Biotype=='all') %>% {.$GeneNames}))
if(appendInfo){
source('append_gene_enrichment.R')
joinedDf <- AppendGeneEnrichment(joinedDf,codingGenesInNetwork=codingGenesInNetwork,allGenesInNetwork=allGenesInNetwork)
if(empirical){
source('append_open_target_empirical.R')
joinedDf <- AppendOpenTargetEmpirical(joinedDf,
permPath = '../../Count_Data/OpenTarget/',
csvPath = '/local/data/public/zmx21/zmx21_private/GSK/OpenTargets_scores/',
codingGenesInNetwork=codingGenesInNetwork,
allGenesInNetwork=allGenesInNetwork)
}else{
source('append_open_target.R')
joinedDf <- AppendOpenTarget(joinedDf,
csvPath = '/local/data/public/zmx21/zmx21_private/GSK/OpenTargets_scores/',
codingGenesInNetwork=codingGenesInNetwork,
allGenesInNetwork=allGenesInNetwork)
}
}
return(joinedDf)
}
#Get path to all studies in a directory
GetPathToPASCALResults <- function(PASCALResultPath){
allStudies <- dir(PASCALResultPath)
allStudies <- allStudies[grep('PathwaySet',allStudies)]
allStudiesPath <- sapply(allStudies,function(x) paste0(PASCALResultPath,x))
return(allStudiesPath)
}
LoadPASCALResultsPermuted <- function(filter=T,resultPaths,clusterPaths,sizeLower=10,sizeUpper=300,appendInfo=T,empirical=F){
source('pval_correction.R')
microgliaAllGenesPath <- GetPathToPASCALResults(resultPaths[1])
JoinedDfMicroglia <- ParsePASCALFilePermuted(microgliaAllGenesPath,clusterPaths[1],appendInfo,empirical)
if(length(resultPaths)== 2 & length(clusterPaths) == 2){
microgliaCodingGenesPath <- GetPathToPASCALResults(resultPaths[2])
JoinedDfMicroglia <- rbind(JoinedDfMicroglia,ParsePASCALFilePermuted(microgliaCodingGenesPath,clusterPaths[2],appendInfo,empirical))
}
JoinedDfMicroglia <- JoinedDfMicroglia %>% dplyr::filter(Size > sizeLower & Size < sizeUpper)
JoinedDfMicroglia <- AppendCorrectedPVal(JoinedDfMicroglia)
if(filter){
#Remove repeating rows
JoinedDfMicroglia <- JoinedDfMicroglia %>% dplyr::distinct(StudyName,chi2Pvalue,Size,Biotype,.keep_all=T)
}
return(JoinedDfMicroglia)
}
JoinedDfMicrogliaPearsonPermuted<- LoadPASCALResultsPermuted(filter=T,c('../../GWAS/PASCAL_results/Permuted_Pearson_100/permuted_Coding_Pearson/'),
c('../../genesets/Permuted_Pearson_100/permuted_Coding_Pearson.gmt'),
sizeLower = 3,sizeUpper = 300,appendInfo = F,empirical = F)
|
# Extended data set from Thompson (1990)
# -------------------------
# Function mainACS
# --------------------
mainACS <- function(first.sample=c(0,2),choose20=T){
yi <- c(1,0,20,2,10,1000)
N <- length(yi)
n <- 2
pri <- n/N
idx_F <- yi
idx_omega <- yi
# Strategy with B
edgeik <- data.frame(i=c(1,0,20,20,2,10,10,10,1000,1000,1000),k=c(1,0,20,2,2,2,10,1000,2,10,1000))
# Strategy with B*
edgeik_star <- data.frame(i=c(1,0,20,2,10,10,1000,1000),k=c(1,0,20,2,10,1000,10,1000))
# Strategy with Bdagger, with ancestor ntw. {20}
edgeik_dagger_opta <- data.frame(i=c(1,0,20,20,10,10,1000,1000),k=c(1,0,20,2,10,1000,10,1000))
# Strategy with Bdagger, with ancestor ntw. {10,1000}
edgeik_dagger_optb <- data.frame(i=c(1,0,20,10,10,10,1000,1000,1000),k=c(1,0,20,2,10,1000,2,10,1000))
# Multiplicities
card_betak_star <- NULL
card_betak_dagger_opta <- NULL
card_betak_dagger_optb <- NULL
for(k in idx_omega){
card_betak_star <- c(card_betak_star,sum(edgeik_star$k %in% k))
card_betak_dagger_opta <- c(card_betak_dagger_opta,sum(edgeik_dagger_opta$k %in% k))
card_betak_dagger_optb <- c(card_betak_dagger_optb,sum(edgeik_dagger_optb$k %in% k))
}
all.subsets <- combn(N,n)
nr.all.subsets <- dim(all.subsets)[2]
tmp.subset <- c(1:nr.all.subsets)[apply(all.subsets,2,FUN = function(x) sum(x %in% which(idx_F %in% first.sample))==2)]
B <- c(1:nr.all.subsets)
muHT_s0 <- NULL
muHT <- NULL
muHT_SD <- NULL
s1_all <- rep(list(0),nr.all.subsets)
for(b in 1:nr.all.subsets){
if(b==1){s <- tmp.subset}
if(b>1 & b<nr.all.subsets){B <- c(1:nr.all.subsets)[!(c(1:nr.all.subsets) %in% tmp.subset)]
s <- sample(B,1)
tmp.subset <- c(tmp.subset,s)
}
if(b==nr.all.subsets){B <- c(1:nr.all.subsets)[!(c(1:nr.all.subsets) %in% tmp.subset)]
s <- B}
s0 <- idx_F[all.subsets[,s]]
# HTE based on s0
muHT_s0 <- c(muHT_s0,mean(s0))
# Strategy I, B
s1 <- unique(edgeik$k[edgeik$i %in% s0])
# Strategy II, Bstar
s1_star <- unique(edgeik_star$k[edgeik_star$i %in% s0])
# Strategy I, Modified HTE & Strategy II, HTE
prk <- 1-choose(N-card_betak_star,n)/choose(N,n)
tmp.muHT <- sum(yi[idx_F %in% s1_star]/prk[idx_F %in% s1_star])/N
# Sample-dependent strategy
s1_SD <- s1_star
prk_SD <- prk
sample.str <- 'Bstar'
if(sum(intersect(c(20, 10, 1000), first.sample))>0){
if(sum(intersect(c(20,10,1000),first.sample) %in% 20)==1){
s1_SD <- unique(edgeik_dagger_opta$k[edgeik_dagger_opta$i %in% s0])
prk_SD <- 1-choose(N-card_betak_dagger_opta,n)/choose(N,n)
sample.str <- 'Bdagger'
} }
if(sum(intersect(c(10, 1000), first.sample))>0){
if(sum((intersect(c(10,1000),first.sample) %in% c(10,1000)))>0){
s1_SD <- unique(edgeik_dagger_optb$k[edgeik_dagger_optb$i %in% s0])
prk_SD <- 1-choose(N-card_betak_dagger_optb,n)/choose(N,n)
sample.str <- 'Bdagger'
} }
if(sum(c(20,10,1000) %in% first.sample)>0){
# if(20 %in% (intersect(c(20,10,1000),first.sample[first.sample %in% c(20,10,1000)]))){
if(sum(first.sample %in% c(20,10))==2 | sum(first.sample %in% c(20,1000))==2){
if(choose20==T){
s1_SD <- unique(edgeik_dagger_opta$k[edgeik_dagger_opta$i %in% s0])
prk_SD <- 1-choose(N-card_betak_dagger_opta,n)/choose(N,n)
sample.str <- 'Bdagger'
}
if(choose20==F){
s1_SD <- unique(edgeik_dagger_optb$k[edgeik_dagger_optb$i %in% s0])
prk_SD <- 1-choose(N-card_betak_dagger_optb,n)/choose(N,n)
sample.str <- 'Bdagger'
}
} }
tmp.muHT_SD <- sum(yi[idx_F %in% s1_SD]/prk_SD[idx_F %in% s1_SD])/N
s1_all[[b]] <- s1
muHT <- c(muHT,tmp.muHT)
muHT_SD <- c(muHT_SD, tmp.muHT_SD)
}
# Rao-Blackwellisation for strategy I
muHT_modified_RB <- NULL
for(j in 1:nr.all.subsets){
# j <- 14
sel <- sapply(s1_all,FUN = function(x) sum(s1_all[[j]] %in% x)==length(s1_all[[j]]) & length(s1_all[[j]])==length(x))
muHT_modified_RB <- c(muHT_modified_RB,mean(muHT[sel]))
}
cat("first.sample =",first.sample,"\n")
cat("sample_dependent_strategy =",sample.str,"\n")
cat("pop.mean =",mean(yi),"\n")
result_ACS <- t(array(c(mean(muHT_s0),mean(muHT),mean(muHT_modified_RB),mean(muHT),mean(muHT_SD),c(var(muHT_s0),var(muHT),var(muHT_modified_RB),var(muHT),var(muHT_SD))*(nr.all.subsets-1)/nr.all.subsets),c(5,2)))
colnames(result_ACS) <- c('HT_s0','ACS_HTmodif','ACS_HTmodif_RB','ACS_HT_Bstar','ACS_HT_SD')
rownames(result_ACS) <- c('ExpectedValue','Variance')
print(result_ACS)
return(list(muHT_SD=muHT_SD))
}
mainACS(first.sample = c(1,2))
mainACS(first.sample = c(20,2))
mainACS(first.sample = c(20,10))
mainACS(first.sample = c(2,10),choose20=F)
mainACS(first.sample = c(1000,10))
|
/ISI2021_PS_Day1/Day1_PS_ACS_R-codes.R
|
no_license
|
moguzalper/Graph_Sampling_ISI2021
|
R
| false
| false
| 4,719
|
r
|
# Extended data set from Thompson (1990)
# -------------------------
# Function mainACS
# --------------------
mainACS <- function(first.sample=c(0,2),choose20=T){
yi <- c(1,0,20,2,10,1000)
N <- length(yi)
n <- 2
pri <- n/N
idx_F <- yi
idx_omega <- yi
# Strategy with B
edgeik <- data.frame(i=c(1,0,20,20,2,10,10,10,1000,1000,1000),k=c(1,0,20,2,2,2,10,1000,2,10,1000))
# Strategy with B*
edgeik_star <- data.frame(i=c(1,0,20,2,10,10,1000,1000),k=c(1,0,20,2,10,1000,10,1000))
# Strategy with Bdagger, with ancestor ntw. {20}
edgeik_dagger_opta <- data.frame(i=c(1,0,20,20,10,10,1000,1000),k=c(1,0,20,2,10,1000,10,1000))
# Strategy with Bdagger, with ancestor ntw. {10,1000}
edgeik_dagger_optb <- data.frame(i=c(1,0,20,10,10,10,1000,1000,1000),k=c(1,0,20,2,10,1000,2,10,1000))
# Multiplicities
card_betak_star <- NULL
card_betak_dagger_opta <- NULL
card_betak_dagger_optb <- NULL
for(k in idx_omega){
card_betak_star <- c(card_betak_star,sum(edgeik_star$k %in% k))
card_betak_dagger_opta <- c(card_betak_dagger_opta,sum(edgeik_dagger_opta$k %in% k))
card_betak_dagger_optb <- c(card_betak_dagger_optb,sum(edgeik_dagger_optb$k %in% k))
}
all.subsets <- combn(N,n)
nr.all.subsets <- dim(all.subsets)[2]
tmp.subset <- c(1:nr.all.subsets)[apply(all.subsets,2,FUN = function(x) sum(x %in% which(idx_F %in% first.sample))==2)]
B <- c(1:nr.all.subsets)
muHT_s0 <- NULL
muHT <- NULL
muHT_SD <- NULL
s1_all <- rep(list(0),nr.all.subsets)
for(b in 1:nr.all.subsets){
if(b==1){s <- tmp.subset}
if(b>1 & b<nr.all.subsets){B <- c(1:nr.all.subsets)[!(c(1:nr.all.subsets) %in% tmp.subset)]
s <- sample(B,1)
tmp.subset <- c(tmp.subset,s)
}
if(b==nr.all.subsets){B <- c(1:nr.all.subsets)[!(c(1:nr.all.subsets) %in% tmp.subset)]
s <- B}
s0 <- idx_F[all.subsets[,s]]
# HTE based on s0
muHT_s0 <- c(muHT_s0,mean(s0))
# Strategy I, B
s1 <- unique(edgeik$k[edgeik$i %in% s0])
# Strategy II, Bstar
s1_star <- unique(edgeik_star$k[edgeik_star$i %in% s0])
# Strategy I, Modified HTE & Strategy II, HTE
prk <- 1-choose(N-card_betak_star,n)/choose(N,n)
tmp.muHT <- sum(yi[idx_F %in% s1_star]/prk[idx_F %in% s1_star])/N
# Sample-dependent strategy
s1_SD <- s1_star
prk_SD <- prk
sample.str <- 'Bstar'
if(sum(intersect(c(20, 10, 1000), first.sample))>0){
if(sum(intersect(c(20,10,1000),first.sample) %in% 20)==1){
s1_SD <- unique(edgeik_dagger_opta$k[edgeik_dagger_opta$i %in% s0])
prk_SD <- 1-choose(N-card_betak_dagger_opta,n)/choose(N,n)
sample.str <- 'Bdagger'
} }
if(sum(intersect(c(10, 1000), first.sample))>0){
if(sum((intersect(c(10,1000),first.sample) %in% c(10,1000)))>0){
s1_SD <- unique(edgeik_dagger_optb$k[edgeik_dagger_optb$i %in% s0])
prk_SD <- 1-choose(N-card_betak_dagger_optb,n)/choose(N,n)
sample.str <- 'Bdagger'
} }
if(sum(c(20,10,1000) %in% first.sample)>0){
# if(20 %in% (intersect(c(20,10,1000),first.sample[first.sample %in% c(20,10,1000)]))){
if(sum(first.sample %in% c(20,10))==2 | sum(first.sample %in% c(20,1000))==2){
if(choose20==T){
s1_SD <- unique(edgeik_dagger_opta$k[edgeik_dagger_opta$i %in% s0])
prk_SD <- 1-choose(N-card_betak_dagger_opta,n)/choose(N,n)
sample.str <- 'Bdagger'
}
if(choose20==F){
s1_SD <- unique(edgeik_dagger_optb$k[edgeik_dagger_optb$i %in% s0])
prk_SD <- 1-choose(N-card_betak_dagger_optb,n)/choose(N,n)
sample.str <- 'Bdagger'
}
} }
tmp.muHT_SD <- sum(yi[idx_F %in% s1_SD]/prk_SD[idx_F %in% s1_SD])/N
s1_all[[b]] <- s1
muHT <- c(muHT,tmp.muHT)
muHT_SD <- c(muHT_SD, tmp.muHT_SD)
}
# Rao-Blackwellisation for strategy I
muHT_modified_RB <- NULL
for(j in 1:nr.all.subsets){
# j <- 14
sel <- sapply(s1_all,FUN = function(x) sum(s1_all[[j]] %in% x)==length(s1_all[[j]]) & length(s1_all[[j]])==length(x))
muHT_modified_RB <- c(muHT_modified_RB,mean(muHT[sel]))
}
cat("first.sample =",first.sample,"\n")
cat("sample_dependent_strategy =",sample.str,"\n")
cat("pop.mean =",mean(yi),"\n")
result_ACS <- t(array(c(mean(muHT_s0),mean(muHT),mean(muHT_modified_RB),mean(muHT),mean(muHT_SD),c(var(muHT_s0),var(muHT),var(muHT_modified_RB),var(muHT),var(muHT_SD))*(nr.all.subsets-1)/nr.all.subsets),c(5,2)))
colnames(result_ACS) <- c('HT_s0','ACS_HTmodif','ACS_HTmodif_RB','ACS_HT_Bstar','ACS_HT_SD')
rownames(result_ACS) <- c('ExpectedValue','Variance')
print(result_ACS)
return(list(muHT_SD=muHT_SD))
}
mainACS(first.sample = c(1,2))
mainACS(first.sample = c(20,2))
mainACS(first.sample = c(20,10))
mainACS(first.sample = c(2,10),choose20=F)
mainACS(first.sample = c(1000,10))
|
#' exp.fit.all.log.lin
#'
#' Function takes a list of calcium imaging files by filenames in the current working directory (.mat files)
#' and performs a simple exponential regression fit to normalize the deltaF/F signal. This function incorporates a linear
#' to account for gradual rise in signal, which can alter the single exponential. Will output plots showing orignal
#' and fitted values, as well as corrected values. If an object is assigned, this will be a vector of corrected values
#' @param filename filepaths of .mat files which have a "signal" and "time" field.
#' @param skip.time number of seconds to skip at the beginning for the exponential fit. N=10 improves the fit.
#' @param nls use nls to perform exponential fit: signal ~ y0 + exp((-time - T0) / tau)
#' @importFrom magrittr "%>%"
#' @importFrom magrittr "%<>%"
#' @export
#' @examples data <- exp.fit.all.log.lin(files[1], skip.time = 10)
#'
exp.fit.all.log.lin <- function(filename,
skip.time,
matlab = TRUE,
show.plots = TRUE,
linear = TRUE,
nls = FALSE,
startPulse = 29.5,
endPulse = 60.5) {
if(matlab == TRUE) {
matfile <- R.matlab::readMat(filename, fixNames = TRUE)
signal <- matfile$signal
time <- (1:length(signal)) / 4
df <- data.frame(time, signal)
rm(signal)
rm(time)
} else {
df <- read_csv(filename) %>% dplyr::select(signal, MeanGCaMP, time)
}
animal_name <- basename(filename)
#quo_name(enquo(filename))
# fit to first N(skip.time) seconds to 30 sec for log-linear fit
# if using nls, fit only to pre and post-stimulus
if(nls == FALSE) {
if(linear == FALSE) {
fit1 <- lm(data = df[c(skip.time:startPulse*4, (endPulse*4 + 40):nrow(df)), ], signal ~ log(time))
correction <- "log"
} else {
fit1 <- lm(data = df[c(skip.time:120, 300:360), ], signal ~ log(time) + time) # plus last 15s
correction <- "log+linear"
if(fit1$coefficients[2] > 0) {
fit1 <- lm(data = df[c(skip.time:120, 300:360), ], signal ~ log(time))
correction <- "log"
}
}
} else { #for nls = TRUE
fit1 <- try(nls(signal ~ SSasymp(time, Asym, R0, lrc),
data = dplyr::filter(df, time < startPulse | time > (endPulse + 10))))
correction <- "nls"
}
#### get fitted values ####
if (inherits(fit1, "try-error")) {
message("No exponential decay detected, using raw values for file:")
print(filename)
fitted <- 0
correction <- "raw"
} else {
fitted <- predict(fit1, newdata = df)
}
df %<>% dplyr::mutate(fitted = fitted,
correction = correction)
# for linear fit,
# correct after fitted values go below zero (~ 20s)
if(nls == FALSE) {
df %<>% mutate(corrected = dplyr::case_when(
fit1$coefficients[2] > 0 ~ signal, #ignore inverted exp fit
fitted > 0 ~ signal, #ignore cases which have linear fit > 0
TRUE ~ signal - fitted
))
} else {
df %<>% dplyr::mutate(corrected = signal - fitted)
}
# plot fits to inspect
p <- ggplot(df, aes(x = time, y = signal)) +
geom_line(colour = "black") +
geom_line(aes(y = fitted), colour = "red", linetype = "dashed") +
geom_line(aes(y = corrected), colour = "blue") +
annotate("text", label = animal_name, y = max(c(df$corrected,df$signal) + 0.1), x = 50) +
theme_classic()
if(show.plots) {
print(p)
}
return(df %>% dplyr::rename(delF = corrected))
}
|
/R/exp.fit.all.log.lin.R
|
no_license
|
aphilbrook/MF.matR
|
R
| false
| false
| 3,617
|
r
|
#' exp.fit.all.log.lin
#'
#' Function takes a list of calcium imaging files by filenames in the current working directory (.mat files)
#' and performs a simple exponential regression fit to normalize the deltaF/F signal. This function incorporates a linear
#' to account for gradual rise in signal, which can alter the single exponential. Will output plots showing orignal
#' and fitted values, as well as corrected values. If an object is assigned, this will be a vector of corrected values
#' @param filename filepaths of .mat files which have a "signal" and "time" field.
#' @param skip.time number of seconds to skip at the beginning for the exponential fit. N=10 improves the fit.
#' @param nls use nls to perform exponential fit: signal ~ y0 + exp((-time - T0) / tau)
#' @importFrom magrittr "%>%"
#' @importFrom magrittr "%<>%"
#' @export
#' @examples data <- exp.fit.all.log.lin(files[1], skip.time = 10)
#'
exp.fit.all.log.lin <- function(filename,
skip.time,
matlab = TRUE,
show.plots = TRUE,
linear = TRUE,
nls = FALSE,
startPulse = 29.5,
endPulse = 60.5) {
if(matlab == TRUE) {
matfile <- R.matlab::readMat(filename, fixNames = TRUE)
signal <- matfile$signal
time <- (1:length(signal)) / 4
df <- data.frame(time, signal)
rm(signal)
rm(time)
} else {
df <- read_csv(filename) %>% dplyr::select(signal, MeanGCaMP, time)
}
animal_name <- basename(filename)
#quo_name(enquo(filename))
# fit to first N(skip.time) seconds to 30 sec for log-linear fit
# if using nls, fit only to pre and post-stimulus
if(nls == FALSE) {
if(linear == FALSE) {
fit1 <- lm(data = df[c(skip.time:startPulse*4, (endPulse*4 + 40):nrow(df)), ], signal ~ log(time))
correction <- "log"
} else {
fit1 <- lm(data = df[c(skip.time:120, 300:360), ], signal ~ log(time) + time) # plus last 15s
correction <- "log+linear"
if(fit1$coefficients[2] > 0) {
fit1 <- lm(data = df[c(skip.time:120, 300:360), ], signal ~ log(time))
correction <- "log"
}
}
} else { #for nls = TRUE
fit1 <- try(nls(signal ~ SSasymp(time, Asym, R0, lrc),
data = dplyr::filter(df, time < startPulse | time > (endPulse + 10))))
correction <- "nls"
}
#### get fitted values ####
if (inherits(fit1, "try-error")) {
message("No exponential decay detected, using raw values for file:")
print(filename)
fitted <- 0
correction <- "raw"
} else {
fitted <- predict(fit1, newdata = df)
}
df %<>% dplyr::mutate(fitted = fitted,
correction = correction)
# for linear fit,
# correct after fitted values go below zero (~ 20s)
if(nls == FALSE) {
df %<>% mutate(corrected = dplyr::case_when(
fit1$coefficients[2] > 0 ~ signal, #ignore inverted exp fit
fitted > 0 ~ signal, #ignore cases which have linear fit > 0
TRUE ~ signal - fitted
))
} else {
df %<>% dplyr::mutate(corrected = signal - fitted)
}
# plot fits to inspect
p <- ggplot(df, aes(x = time, y = signal)) +
geom_line(colour = "black") +
geom_line(aes(y = fitted), colour = "red", linetype = "dashed") +
geom_line(aes(y = corrected), colour = "blue") +
annotate("text", label = animal_name, y = max(c(df$corrected,df$signal) + 0.1), x = 50) +
theme_classic()
if(show.plots) {
print(p)
}
return(df %>% dplyr::rename(delF = corrected))
}
|
" posterior distribution:
p(μ,σ^2∣yi) ∝ p(yi|μ,σ^2).p(μ).p(σ^2)
Likelihood:
p(yi|μ,σ^2) iid~ N(μ,σ^2) ; i = 1,...,n
Here, we consider both mean and variance to be unknown.
In Gibbs sampling, the values for both will be simulated and used as inputs for generating
full conditionals of each other.
Priors:
μ ~ N(μ0,σ0^2)
σ^2 ~ IG(ν0,β0)
When variance is known, normal is conjugate prior for mean.
When mean is known, inverse gamma is conjugate prior for variance.
p(μ|σ^2,yi) ∝ p(μ,σ^2∣yi)
Expanding p(yi|μ,σ^2).p(μ).p(σ^2) and dropping terms without μ gives:
p(μ|σ^2,y1,...,yn) ∝ N ( μ | n.Ybar/σ^2 + μ0/σ0^2 , 1 )
------------------- -------------
n/σ^2 + 1/σ^2 n/σ^2 + 1/σ^2
Similarly,
p(σ^2|μ,yi) ∝ p(μ,σ^2∣yi)
Expanding p(yi|μ,σ^2).p(μ).p(σ^2) and dropping terms without σ^2 gives:
p(σ^2|μ,y1,...,yn) ∝ IG ( σ^2 | ν0 + n/2 , β0 + (Σ(yi-μ)^2)/2 ) ; i = 1,...,n
"
# full conditional for mean
update_mu = function(n, ybar, sig2, mu_0, sig2_0)
{
"n = size of given sample
ybar = mean of given sample
mu_0 = hyperparameter - mean for normal prior distribution
sig2_0 = hyperparameter - variance for normal prior distribution"
# variance
sig2_1 = 1.0 / (n / sig2 + 1.0 / sig2_0)
# mean
mu_1 = sig2_1 * (n * ybar / sig2 + mu_0 / sig2_0)
# simulate a value from full conditional distribution defined by the parameters above
rnorm(n=1, mean=mu_1, sd=sqrt(sig2_1))
}
# full conditional for variance
update_sig2 = function(n, y, mu, nu_0, beta_0)
{
"n = size of given sample
y = given sample values
mu = value of mean for current iteration
nu_0 = hyperparameter - shape for prior inverse-gamma distribution
beta_0 = hyperparameter - rate for prior inverse-gamma distribution"
# shape
nu_1 = nu_0 + n / 2.0
# rate
sumsq = sum( (y - mu)^2 )
beta_1 = beta_0 + sumsq / 2.0
# simulate a value from gamma distribution defined by the parameters above
out_gamma = rgamma(n=1, shape=nu_1, rate=beta_1)
# convert to inverse gamma
1.0 / out_gamma
}
# function for Gibbs sampling
gibbs = function(y, n_iter, init, prior)
{
"y = given sample values
n_iter = number of iterations for Markov chain
init = list with initial state for mu
prior = list with hyperparameters for prior normal and inverse-gamma distributions
"
ybar = mean(y)
n = length(y)
## initialize
mu_out = numeric(n_iter)
sig2_out = numeric(n_iter)
mu_now = init$mu # initialise mean with supplied initial state
## Gibbs sampler
for (i in 1:n_iter) {
# simulate new values for mean and variance using current values of each other
sig2_now = update_sig2(n=n, y=y, mu=mu_now, nu_0=prior$nu_0, beta_0=prior$beta_0)
mu_now = update_mu(n=n, ybar=ybar, sig2=sig2_now, mu_0=prior$mu_0, sig2_0=prior$sig2_0)
# store generated values
sig2_out[i] = sig2_now
mu_out[i] = mu_now
}
cbind(mu=mu_out, sig2=sig2_out)
}
# given sample
y = c(1.2, 1.4, -0.5, 0.3, 0.9, 2.3, 1.0, 0.1, 1.3, 1.9)
ybar = mean(y)
n = length(y)
# hyperparameters for priors
prior = list()
# hyperparameters for normal prior
prior$mu_0 = 0.0
prior$sig2_0 = 1.0
# hyperparameters for inverse-gamma prior
prior$n_0 = 2.0 # prior effective sample size for sig2
prior$s2_0 = 1.0 # prior point estimate for sig2
prior$nu_0 = prior$n_0 / 2.0 # prior parameter for inverse-gamma
prior$beta_0 = prior$n_0 * prior$s2_0 / 2.0 # prior parameter for inverse-gamma
# compare prior for mu and distribution of data
hist(y, freq=FALSE, xlim=c(-1.0, 3.0)) # histogram of the data
curve(dnorm(x=x, mean=prior$mu_0, sd=sqrt(prior$sig2_0)), lty=2, add=TRUE) # prior for mu
points(y, rep(0,n), pch=1) # individual data points
points(ybar, 0, pch=19) # sample mean
# run the sampler
set.seed(53)
init = list()
init$mu = 0.0 # initial state for mu
# run Gibbs sampler for 1000 iterations
post = gibbs(y=y, n_iter=1e3, init=init, prior=prior)
head(post)
library("coda")
plot(as.mcmc(post))
summary(as.mcmc(post))
|
/5_gibbs.R
|
no_license
|
udbj/UCSC-Bayesian-Statistics-II
|
R
| false
| false
| 4,191
|
r
|
" posterior distribution:
p(μ,σ^2∣yi) ∝ p(yi|μ,σ^2).p(μ).p(σ^2)
Likelihood:
p(yi|μ,σ^2) iid~ N(μ,σ^2) ; i = 1,...,n
Here, we consider both mean and variance to be unknown.
In Gibbs sampling, the values for both will be simulated and used as inputs for generating
full conditionals of each other.
Priors:
μ ~ N(μ0,σ0^2)
σ^2 ~ IG(ν0,β0)
When variance is known, normal is conjugate prior for mean.
When mean is known, inverse gamma is conjugate prior for variance.
p(μ|σ^2,yi) ∝ p(μ,σ^2∣yi)
Expanding p(yi|μ,σ^2).p(μ).p(σ^2) and dropping terms without μ gives:
p(μ|σ^2,y1,...,yn) ∝ N ( μ | n.Ybar/σ^2 + μ0/σ0^2 , 1 )
------------------- -------------
n/σ^2 + 1/σ^2 n/σ^2 + 1/σ^2
Similarly,
p(σ^2|μ,yi) ∝ p(μ,σ^2∣yi)
Expanding p(yi|μ,σ^2).p(μ).p(σ^2) and dropping terms without σ^2 gives:
p(σ^2|μ,y1,...,yn) ∝ IG ( σ^2 | ν0 + n/2 , β0 + (Σ(yi-μ)^2)/2 ) ; i = 1,...,n
"
# full conditional for mean
update_mu = function(n, ybar, sig2, mu_0, sig2_0)
{
"n = size of given sample
ybar = mean of given sample
mu_0 = hyperparameter - mean for normal prior distribution
sig2_0 = hyperparameter - variance for normal prior distribution"
# variance
sig2_1 = 1.0 / (n / sig2 + 1.0 / sig2_0)
# mean
mu_1 = sig2_1 * (n * ybar / sig2 + mu_0 / sig2_0)
# simulate a value from full conditional distribution defined by the parameters above
rnorm(n=1, mean=mu_1, sd=sqrt(sig2_1))
}
# full conditional for variance
update_sig2 = function(n, y, mu, nu_0, beta_0)
{
"n = size of given sample
y = given sample values
mu = value of mean for current iteration
nu_0 = hyperparameter - shape for prior inverse-gamma distribution
beta_0 = hyperparameter - rate for prior inverse-gamma distribution"
# shape
nu_1 = nu_0 + n / 2.0
# rate
sumsq = sum( (y - mu)^2 )
beta_1 = beta_0 + sumsq / 2.0
# simulate a value from gamma distribution defined by the parameters above
out_gamma = rgamma(n=1, shape=nu_1, rate=beta_1)
# convert to inverse gamma
1.0 / out_gamma
}
# function for Gibbs sampling
gibbs = function(y, n_iter, init, prior)
{
"y = given sample values
n_iter = number of iterations for Markov chain
init = list with initial state for mu
prior = list with hyperparameters for prior normal and inverse-gamma distributions
"
ybar = mean(y)
n = length(y)
## initialize
mu_out = numeric(n_iter)
sig2_out = numeric(n_iter)
mu_now = init$mu # initialise mean with supplied initial state
## Gibbs sampler
for (i in 1:n_iter) {
# simulate new values for mean and variance using current values of each other
sig2_now = update_sig2(n=n, y=y, mu=mu_now, nu_0=prior$nu_0, beta_0=prior$beta_0)
mu_now = update_mu(n=n, ybar=ybar, sig2=sig2_now, mu_0=prior$mu_0, sig2_0=prior$sig2_0)
# store generated values
sig2_out[i] = sig2_now
mu_out[i] = mu_now
}
cbind(mu=mu_out, sig2=sig2_out)
}
# given sample
y = c(1.2, 1.4, -0.5, 0.3, 0.9, 2.3, 1.0, 0.1, 1.3, 1.9)
ybar = mean(y)
n = length(y)
# hyperparameters for priors
prior = list()
# hyperparameters for normal prior
prior$mu_0 = 0.0
prior$sig2_0 = 1.0
# hyperparameters for inverse-gamma prior
prior$n_0 = 2.0 # prior effective sample size for sig2
prior$s2_0 = 1.0 # prior point estimate for sig2
prior$nu_0 = prior$n_0 / 2.0 # prior parameter for inverse-gamma
prior$beta_0 = prior$n_0 * prior$s2_0 / 2.0 # prior parameter for inverse-gamma
# compare prior for mu and distribution of data
hist(y, freq=FALSE, xlim=c(-1.0, 3.0)) # histogram of the data
curve(dnorm(x=x, mean=prior$mu_0, sd=sqrt(prior$sig2_0)), lty=2, add=TRUE) # prior for mu
points(y, rep(0,n), pch=1) # individual data points
points(ybar, 0, pch=19) # sample mean
# run the sampler
set.seed(53)
init = list()
init$mu = 0.0 # initial state for mu
# run Gibbs sampler for 1000 iterations
post = gibbs(y=y, n_iter=1e3, init=init, prior=prior)
head(post)
library("coda")
plot(as.mcmc(post))
summary(as.mcmc(post))
|
# originally by Ruben Garrido-Oter
# garridoo@mpipz.mpg.de
# cleanup
rm(list=ls())
# SynCom flask dataset
source("load_data_SC_comp_flowpot.R")
# subset samples of interest
idx <- design$system %in% c("FP", "FPL") &
# design$condition %in% c("At+Full", "C+Full", "Full", "Full_MS", "Full_TP") &
design$condition %in% c("At+Full", "At+IRL", "At+ICL", "C+Full", "C+IRL", "C+ICL", "Full_MS", "Full_TP", "Full", "IRL", "ICL") &
# design$treatment %in% c("Full") &
design$fraction %in% c("root", "input", "cells", "soil") &
T
design$fraction_treatment <- paste(design$fraction, design$treatment)
design_subset <- design[idx, ]
asv_table_subset <- asv_table_norm[, idx]
asv_table_subset_counts <- asv_table[, idx]
asv_table_subset_counts <- asv_table_subset_counts[rowSums(asv_table_subset_counts)!=0, ]
# aggregate tables to the family level to compare across SynCom treatments
family_table_norm <- aggregate(asv_table_subset, by=list(taxonomy$family), FUN=sum)
rownames(family_table_norm) <- family_table_norm[, 1]
family_table_norm <- family_table_norm[, -1]
# asv_table_subset <- family_table_norm
### beta diversity
colors <- data.frame(group=c("input", "Cr", "At", "soil"),
color=c(input_color, cr_color, at_color, soil_color))
shapes <- data.frame(group=c("root Full", "root ICL", "root IRL", "soil Full", "soil ICL", "soil IRL", "cells Full", "input Full", "input IRL", "input ICL"),
shape=c(19, 10, 13, 15, 12, 7, 17, 19, 10, 13))
# PCoA Bray-Curtis
bray_curtis <- vegdist(t(asv_table_subset), method="bray")
k <- 2
pcoa <- cmdscale(bray_curtis, k=k, eig=T)
points <- pcoa$points
eig <- pcoa$eig
points <- as.data.frame(points)
colnames(points) <- c("x", "y")
points <- cbind(points, design_subset[match(rownames(points), design_subset$SampleID), ])
colors <- colors[colors$group %in% points$host, ]
points$host <- factor(points$host, levels=colors$group)
shapes <- shapes[shapes$group %in% points$fraction_treatment, ]
points$fraction_treatment <- factor(points$fraction_treatment, levels=shapes$group)
# plot PCo 1 and 2
p <- ggplot(points, aes(x=x, y=y, color=host, shape=fraction_treatment)) +
geom_point(alpha=pcoa_alpha, size=pcoa_size) +
scale_colour_manual(values=as.character(colors$color)) +
scale_shape_manual(values=shapes$shape) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep="")) +
ggtitle("PCoA of Bray-Curtis distances") +
main_theme +
theme(legend.position="right")
ggsave(paste(figures.dir, "competition_flowpot_PCoA.pdf", sep=""), p, width=pcoa_width, height=pcoa_height)
### CPCoA analysis
sqrt_transform <- T
capscale.gen <- capscale(bray_curtis ~ host + Condition(treatment * bio_replicate * experiment * system), data=design_subset, add=F, sqrt.dist=sqrt_transform)
# ANOVA-like permutation analysis
perm_anova.gen <- anova.cca(capscale.gen)
print(perm_anova.gen)
# generate variability tables and calculate confidence intervals for the variance
var_tbl.gen <- variability_table(capscale.gen)
eig <- capscale.gen$CCA$eig
variance <- capscale.gen$CCA$tot.chi / capscale.gen$tot.chi
variance <- var_tbl.gen["constrained", "proportion"]
p.val <- perm_anova.gen[1, 4]
points_cpcoa <- capscale.gen$CCA$wa[, 1:2]
colnames(points_cpcoa) <- c("x", "y")
points_cpcoa <- cbind(points_cpcoa, design_subset[match(rownames(points_cpcoa), design_subset$SampleID), ])
points_cpcoa <- points_cpcoa[points_cpcoa$compartment!="input", ]
colors_cpcoa <- colors[colors$group %in% points_cpcoa$host, ]
points_cpcoa$host <- factor(points_cpcoa$host, levels=colors$group)
shapes_cpcoa <- shapes[shapes$group %in% points_cpcoa$fraction_treatment, ]
points_cpcoa$fraction_treatment <- factor(points_cpcoa$fraction_treatment, levels=shapes$group)
# calculate centroids per condition and joining segments
centroids <- aggregate(cbind(points_cpcoa$x, points_cpcoa$y) ~ host, data=points_cpcoa, FUN=mean)
segments <- merge(points_cpcoa, setNames(centroids, c('host','seg_x','seg_y')), by='host', sort=FALSE)
# plot CPCo 1 and 2
p <- ggplot(points_cpcoa, aes(x=x, y=y, color=host, shape=fraction_treatment)) +
geom_point(alpha=pcoa_alpha, size=pcoa_size) +
geom_segment(data=segments, mapping=aes(xend=seg_x, yend=seg_y), alpha=segment_alpha) +
scale_colour_manual(values=as.character(colors_cpcoa$color)) +
scale_shape_manual(values=shapes_cpcoa$shape) +
labs(x=paste("CPCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("CPCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep="")) +
ggtitle(paste(format(100 * variance, digits=3), " % of variance; P=", format(p.val, digits=2), sep="")) +
main_theme +
theme(legend.position="none")
ggsave(paste(figures.dir, "competition_flowpot_CPCoA.pdf", sep=""), p, width=pcoa_width, height=pcoa_height)
### shannon index
asv_table_subset_rarefied <- rrarefy(asv_table_subset_counts, sample=500)
index <- diversity(t(asv_table_subset_rarefied), index="shannon")
index <- cbind(index, design_subset[match(names(index), design_subset$SampleID), ])
colors_index <- colors[colors$group %in% index$host, ]
index$host <- factor(index$host, levels=colors$group)
shapes_index <- shapes[shapes$group %in% index$fraction, ]
index$fraction <- factor(index$fraction, levels=shapes$group)
p <- ggplot(index, aes(x=compartment, y=index, color=host, shape=fraction)) +
geom_boxplot(alpha=1, outlier.size=0, size=boxplot_size, width=boxplot_width, fill="transparent") +
geom_jitter(position=position_jitterdodge(2), size=boxplot_jitter_size, alpha=shannon_alpha) +
scale_y_continuous(limits=c(0, max(index$index))) +
scale_colour_manual(values=as.character(colors_index$color)) +
scale_shape_manual(values=shapes_index$shape) +
labs(x="", y="Shannon index") +
ggtitle("Shannon diversity") +
main_theme
ggsave(paste(figures.dir, "competition_flowpot_shannon.pdf", sep=""), p, width=shannon_width, height=shannon_height)
|
/scripts/diversity_SC_comp_flowpot.R
|
no_license
|
garridoo/crsphere
|
R
| false
| false
| 6,253
|
r
|
# originally by Ruben Garrido-Oter
# garridoo@mpipz.mpg.de
# cleanup
rm(list=ls())
# SynCom flask dataset
source("load_data_SC_comp_flowpot.R")
# subset samples of interest
idx <- design$system %in% c("FP", "FPL") &
# design$condition %in% c("At+Full", "C+Full", "Full", "Full_MS", "Full_TP") &
design$condition %in% c("At+Full", "At+IRL", "At+ICL", "C+Full", "C+IRL", "C+ICL", "Full_MS", "Full_TP", "Full", "IRL", "ICL") &
# design$treatment %in% c("Full") &
design$fraction %in% c("root", "input", "cells", "soil") &
T
design$fraction_treatment <- paste(design$fraction, design$treatment)
design_subset <- design[idx, ]
asv_table_subset <- asv_table_norm[, idx]
asv_table_subset_counts <- asv_table[, idx]
asv_table_subset_counts <- asv_table_subset_counts[rowSums(asv_table_subset_counts)!=0, ]
# aggregate tables to the family level to compare across SynCom treatments
family_table_norm <- aggregate(asv_table_subset, by=list(taxonomy$family), FUN=sum)
rownames(family_table_norm) <- family_table_norm[, 1]
family_table_norm <- family_table_norm[, -1]
# asv_table_subset <- family_table_norm
### beta diversity
colors <- data.frame(group=c("input", "Cr", "At", "soil"),
color=c(input_color, cr_color, at_color, soil_color))
shapes <- data.frame(group=c("root Full", "root ICL", "root IRL", "soil Full", "soil ICL", "soil IRL", "cells Full", "input Full", "input IRL", "input ICL"),
shape=c(19, 10, 13, 15, 12, 7, 17, 19, 10, 13))
# PCoA Bray-Curtis
bray_curtis <- vegdist(t(asv_table_subset), method="bray")
k <- 2
pcoa <- cmdscale(bray_curtis, k=k, eig=T)
points <- pcoa$points
eig <- pcoa$eig
points <- as.data.frame(points)
colnames(points) <- c("x", "y")
points <- cbind(points, design_subset[match(rownames(points), design_subset$SampleID), ])
colors <- colors[colors$group %in% points$host, ]
points$host <- factor(points$host, levels=colors$group)
shapes <- shapes[shapes$group %in% points$fraction_treatment, ]
points$fraction_treatment <- factor(points$fraction_treatment, levels=shapes$group)
# plot PCo 1 and 2
p <- ggplot(points, aes(x=x, y=y, color=host, shape=fraction_treatment)) +
geom_point(alpha=pcoa_alpha, size=pcoa_size) +
scale_colour_manual(values=as.character(colors$color)) +
scale_shape_manual(values=shapes$shape) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep="")) +
ggtitle("PCoA of Bray-Curtis distances") +
main_theme +
theme(legend.position="right")
ggsave(paste(figures.dir, "competition_flowpot_PCoA.pdf", sep=""), p, width=pcoa_width, height=pcoa_height)
### CPCoA analysis
sqrt_transform <- T
capscale.gen <- capscale(bray_curtis ~ host + Condition(treatment * bio_replicate * experiment * system), data=design_subset, add=F, sqrt.dist=sqrt_transform)
# ANOVA-like permutation analysis
perm_anova.gen <- anova.cca(capscale.gen)
print(perm_anova.gen)
# generate variability tables and calculate confidence intervals for the variance
var_tbl.gen <- variability_table(capscale.gen)
eig <- capscale.gen$CCA$eig
variance <- capscale.gen$CCA$tot.chi / capscale.gen$tot.chi
variance <- var_tbl.gen["constrained", "proportion"]
p.val <- perm_anova.gen[1, 4]
points_cpcoa <- capscale.gen$CCA$wa[, 1:2]
colnames(points_cpcoa) <- c("x", "y")
points_cpcoa <- cbind(points_cpcoa, design_subset[match(rownames(points_cpcoa), design_subset$SampleID), ])
points_cpcoa <- points_cpcoa[points_cpcoa$compartment!="input", ]
colors_cpcoa <- colors[colors$group %in% points_cpcoa$host, ]
points_cpcoa$host <- factor(points_cpcoa$host, levels=colors$group)
shapes_cpcoa <- shapes[shapes$group %in% points_cpcoa$fraction_treatment, ]
points_cpcoa$fraction_treatment <- factor(points_cpcoa$fraction_treatment, levels=shapes$group)
# calculate centroids per condition and joining segments
centroids <- aggregate(cbind(points_cpcoa$x, points_cpcoa$y) ~ host, data=points_cpcoa, FUN=mean)
segments <- merge(points_cpcoa, setNames(centroids, c('host','seg_x','seg_y')), by='host', sort=FALSE)
# plot CPCo 1 and 2
p <- ggplot(points_cpcoa, aes(x=x, y=y, color=host, shape=fraction_treatment)) +
geom_point(alpha=pcoa_alpha, size=pcoa_size) +
geom_segment(data=segments, mapping=aes(xend=seg_x, yend=seg_y), alpha=segment_alpha) +
scale_colour_manual(values=as.character(colors_cpcoa$color)) +
scale_shape_manual(values=shapes_cpcoa$shape) +
labs(x=paste("CPCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("CPCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep="")) +
ggtitle(paste(format(100 * variance, digits=3), " % of variance; P=", format(p.val, digits=2), sep="")) +
main_theme +
theme(legend.position="none")
ggsave(paste(figures.dir, "competition_flowpot_CPCoA.pdf", sep=""), p, width=pcoa_width, height=pcoa_height)
### shannon index
asv_table_subset_rarefied <- rrarefy(asv_table_subset_counts, sample=500)
index <- diversity(t(asv_table_subset_rarefied), index="shannon")
index <- cbind(index, design_subset[match(names(index), design_subset$SampleID), ])
colors_index <- colors[colors$group %in% index$host, ]
index$host <- factor(index$host, levels=colors$group)
shapes_index <- shapes[shapes$group %in% index$fraction, ]
index$fraction <- factor(index$fraction, levels=shapes$group)
p <- ggplot(index, aes(x=compartment, y=index, color=host, shape=fraction)) +
geom_boxplot(alpha=1, outlier.size=0, size=boxplot_size, width=boxplot_width, fill="transparent") +
geom_jitter(position=position_jitterdodge(2), size=boxplot_jitter_size, alpha=shannon_alpha) +
scale_y_continuous(limits=c(0, max(index$index))) +
scale_colour_manual(values=as.character(colors_index$color)) +
scale_shape_manual(values=shapes_index$shape) +
labs(x="", y="Shannon index") +
ggtitle("Shannon diversity") +
main_theme
ggsave(paste(figures.dir, "competition_flowpot_shannon.pdf", sep=""), p, width=shannon_width, height=shannon_height)
|
`ensemble.zones` <- function(
presence.raster=NULL, centroid.object=NULL, x=NULL, ext=NULL,
RASTER.species.name=centroid.object$name, RASTER.stack.name = x@title,
RASTER.format="GTiff", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
# KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(presence.raster) == T) {stop("value for parameter presence.raster is missing (RasterLayer object)")}
if(inherits(presence.raster, "RasterLayer") == F) {stop("x is not a RasterLayer object")}
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x, "RasterStack") == F) {stop("x is not a RasterStack object")}
if (is.null(centroid.object) == T) {stop("value for parameter centroid.object is missing (hint: use the ensemble.centroids function)")}
#
#
# if (KML.out==T && raster::isLonLat(presence.raster)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) of presence.raster is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
#
predict.zone <- function(object=centroid.object, newdata=newdata) {
centroids <- object$centroids
cov.mahal <- object$cov.mahal
nc <- nrow(centroids)
result <- data.frame(array(0, dim=c(nrow(newdata), nc)))
for (i in 1:nc) {
result[,i] <- mahalanobis(newdata, center=as.numeric(centroids[i,]), cov=cov.mahal)
}
p <- apply(result[, 1:nc], 1, which.min)
p <- as.numeric(p)
return(p)
}
#
# check if all variables are present
vars <- names(centroid.object$centroids)
vars.x <- names(x)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.x==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack x \n", sep = "")}
}
nv <- length(vars.x)
for (i in 1:nv) {
if (any(vars==vars.x[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.x[i], "' was not documented in the centroids data set", "\n", sep = ""))
x <- raster::dropLayer(x, which(names(x) %in% c(vars.x[i]) ))
x <- raster::stack(x)
}
}
# same extent for predictors and presence map
if (is.null(ext) == F) {
if(length(x@title) == 0) {x@title <- "stack1"}
title.old <- x@title
x <- raster::crop(x, y=ext, snap="in")
x@title <- title.old
x <- raster::stack(x)
presence.raster <- raster::crop(presence.raster, y=ext, snap="in")
}
# avoid problems with non-existing directories and prepare for output
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/zones", showWarnings = F)
# if(KML.out == T) {
# dir.create("kml", showWarnings = F)
# dir.create("kml/zones", showWarnings = F)
# }
stack.title <- RASTER.stack.name
# if (gsub(".", "_", stack.title, fixed=T) != stack.title) {cat(paste("\n", "WARNING: title of stack (", stack.title, ") contains '.'", "\n\n", sep = ""))}
rasterfull <- paste("ensembles/zones/", RASTER.species.name, "_", stack.title , sep="")
kmlfull <- paste("kml/zones/", RASTER.species.name, "_", stack.title , sep="")
#
# predict
if (CATCH.OFF == F) {
tryCatch(zones.raster <- raster::predict(object=x, model=centroid.object, fun=predict.zone, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("prediction of zones failed"))},
silent=F)
}else{
zones.raster <- raster::predict(object=x, model=centroid.object, fun=predict.zone, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format)
}
# mask the presence area, including areas that are NA in presence raster
zones.raster <- raster::mask(zones.raster, presence.raster, inverse=T, maskvalue=1)
zones.raster <- raster::mask(zones.raster, presence.raster, inverse=F)
cat(paste("\n", "raster layer with zones created", "\n", sep = ""))
print(raster::freq(zones.raster))
#
# avoid possible problems with saving of names of the raster layers
# no longer used with default format of GTiff since DEC-2022
raster::writeRaster(zones.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# raster::writeRaster(zones.raster, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(RASTER.species.name, "_", stack.title , "_zones", sep="")
# raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
# if (KML.out == T) {
# nc <- nrow(centroid.object$centroids)
# raster::KML(working.raster, filename=kmlfull, col = grDevices::rainbow(n = nc, start = 0.2, end = 0.8), colNA = 0,
# blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE, breaks = c(0:nc))
# }
cat(paste("\n", "zones provided in folder: ", getwd(), "//ensembles//zones", "\n", sep=""))
# zones.raster <- raster::raster(rasterfull)
return(zones.raster)
}
|
/R/ensemble.zones.R
|
no_license
|
cran/BiodiversityR
|
R
| false
| false
| 5,346
|
r
|
`ensemble.zones` <- function(
presence.raster=NULL, centroid.object=NULL, x=NULL, ext=NULL,
RASTER.species.name=centroid.object$name, RASTER.stack.name = x@title,
RASTER.format="GTiff", RASTER.datatype="INT2S", RASTER.NAflag=-32767,
# KML.out=FALSE, KML.maxpixels=100000, KML.blur=10,
CATCH.OFF=FALSE
)
{
.BiodiversityR <- new.env()
# if (! require(dismo)) {stop("Please install the dismo package")}
if (is.null(presence.raster) == T) {stop("value for parameter presence.raster is missing (RasterLayer object)")}
if(inherits(presence.raster, "RasterLayer") == F) {stop("x is not a RasterLayer object")}
if(is.null(x) == T) {stop("value for parameter x is missing (RasterStack object)")}
if(inherits(x, "RasterStack") == F) {stop("x is not a RasterStack object")}
if (is.null(centroid.object) == T) {stop("value for parameter centroid.object is missing (hint: use the ensemble.centroids function)")}
#
#
# if (KML.out==T && raster::isLonLat(presence.raster)==F) {
# cat(paste("\n", "NOTE: not possible to generate KML files as Coordinate Reference System (CRS) of presence.raster is not longitude and latitude", "\n", sep = ""))
# KML.out <- FALSE
# }
#
predict.zone <- function(object=centroid.object, newdata=newdata) {
centroids <- object$centroids
cov.mahal <- object$cov.mahal
nc <- nrow(centroids)
result <- data.frame(array(0, dim=c(nrow(newdata), nc)))
for (i in 1:nc) {
result[,i] <- mahalanobis(newdata, center=as.numeric(centroids[i,]), cov=cov.mahal)
}
p <- apply(result[, 1:nc], 1, which.min)
p <- as.numeric(p)
return(p)
}
#
# check if all variables are present
vars <- names(centroid.object$centroids)
vars.x <- names(x)
nv <- length(vars)
for (i in 1:nv) {
if (any(vars.x==vars[i]) == F) {stop("explanatory variable '", vars[i], "' not among grid layers of RasterStack x \n", sep = "")}
}
nv <- length(vars.x)
for (i in 1:nv) {
if (any(vars==vars.x[i]) == F) {
cat(paste("\n", "NOTE: RasterStack layer '", vars.x[i], "' was not documented in the centroids data set", "\n", sep = ""))
x <- raster::dropLayer(x, which(names(x) %in% c(vars.x[i]) ))
x <- raster::stack(x)
}
}
# same extent for predictors and presence map
if (is.null(ext) == F) {
if(length(x@title) == 0) {x@title <- "stack1"}
title.old <- x@title
x <- raster::crop(x, y=ext, snap="in")
x@title <- title.old
x <- raster::stack(x)
presence.raster <- raster::crop(presence.raster, y=ext, snap="in")
}
# avoid problems with non-existing directories and prepare for output
dir.create("ensembles", showWarnings = F)
dir.create("ensembles/zones", showWarnings = F)
# if(KML.out == T) {
# dir.create("kml", showWarnings = F)
# dir.create("kml/zones", showWarnings = F)
# }
stack.title <- RASTER.stack.name
# if (gsub(".", "_", stack.title, fixed=T) != stack.title) {cat(paste("\n", "WARNING: title of stack (", stack.title, ") contains '.'", "\n\n", sep = ""))}
rasterfull <- paste("ensembles/zones/", RASTER.species.name, "_", stack.title , sep="")
kmlfull <- paste("kml/zones/", RASTER.species.name, "_", stack.title , sep="")
#
# predict
if (CATCH.OFF == F) {
tryCatch(zones.raster <- raster::predict(object=x, model=centroid.object, fun=predict.zone, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format),
error= function(err) {print(paste("prediction of zones failed"))},
silent=F)
}else{
zones.raster <- raster::predict(object=x, model=centroid.object, fun=predict.zone, na.rm=TRUE,
filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format)
}
# mask the presence area, including areas that are NA in presence raster
zones.raster <- raster::mask(zones.raster, presence.raster, inverse=T, maskvalue=1)
zones.raster <- raster::mask(zones.raster, presence.raster, inverse=F)
cat(paste("\n", "raster layer with zones created", "\n", sep = ""))
print(raster::freq(zones.raster))
#
# avoid possible problems with saving of names of the raster layers
# no longer used with default format of GTiff since DEC-2022
raster::writeRaster(zones.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
# raster::writeRaster(zones.raster, filename="working.grd", overwrite=T)
# working.raster <- raster::raster("working.grd")
# names(working.raster) <- paste(RASTER.species.name, "_", stack.title , "_zones", sep="")
# raster::writeRaster(working.raster, filename=rasterfull, progress='text', overwrite=TRUE, format=RASTER.format, datatype=RASTER.datatype, NAflag=RASTER.NAflag)
#
# if (KML.out == T) {
# nc <- nrow(centroid.object$centroids)
# raster::KML(working.raster, filename=kmlfull, col = grDevices::rainbow(n = nc, start = 0.2, end = 0.8), colNA = 0,
# blur=KML.blur, maxpixels=KML.maxpixels, overwrite=TRUE, breaks = c(0:nc))
# }
cat(paste("\n", "zones provided in folder: ", getwd(), "//ensembles//zones", "\n", sep=""))
# zones.raster <- raster::raster(rasterfull)
return(zones.raster)
}
|
# first we might need to install a couple packages
if(! is.element("rgl", installed.packages()[,1]))
install.packages("rgl")
if(! is.element("ca", installed.packages()[,1]))
install.packages("ca")
library(ca)
# Get the Data
x = read.table("/research/projects/Sponges/Molecuclar_clock/2012_analysis/support_files/71taxa.txt",header=TRUE)
aa = c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y")
pdf("coa.pdf",width=10, height=10) # open a pdf for printing plots
col_names = names(x)
N = x[,2:length(col_names)] # get data without first column
# Correspondence analysis #
coa = ca(N) # perform correspondence analysis
p = data.frame(coa$colcoord[,1:2]) # get unscaled principal coordinates
p$name = substr(summary(coa)$columns$name,0,2) # add names to a column
#plot(coa,what=c("none","active")) # default plot from CA package
# The default plot is okay, but we can do better
# make an empty plot so we can add our own pretty data points
plot(NULL,xlim=c(min(p$X1),max(p$X1)),
ylim=c(min(p$X2),max(p$X2)),
xlab="PC 1",ylab="PC 2",
cex.lab=1.3,cex.axis=1.3)
chars = c(23,25,15,17,16,8,19,24,25,26,6,15,15,17,22,17,17,6,17,8,19) # list of data point symbols
colors = c("black","black","orange","blue","red","green","violet")
up = unique(p$name)
for(i in 1:length(up)){
d = subset(p,name==up[i]) # data subset for 1 organism
points(x=d$X1,y=d$X2,pch=chars[i],cex=1.5, col=colors[i]) # add points 1 organism
}
legend("topright",cex=1.3,legend=up,pch=chars,col=colors) # make a legend
# make a barplot of the inertia of each amino acid
# i.e. the contribution of each amino acid to the overall variance
barplot(-sort(-coa$rowinertia),
names=aa[order(-coa$rowinertia)],
ylab="Inertia",
cex.names=1.4,cex.lab=1.5,cex.axis=1.3)
Nt = data.frame(t(N)) # transpose data
Nt$org = substr(rownames(Nt),0,2) # get organism headers
orgs = unique(Nt$org) # get unique organism names
par( mfrow = c( 2, 2 ) )
# AA composition boxplots
#ord = x$Cat[order(-coa$rowinertia)] # sort amino acids by inertia
#for(i in 1:length(ord)){
# boxplot for each amino acid
#boxplot(Nt[,which(x$Cat==ord[i])]~Nt$org,main=ord[i])
#}
dev.off()
|
/R_skripts/coa.r
|
no_license
|
dlavrov/my-scripts
|
R
| false
| false
| 2,193
|
r
|
# first we might need to install a couple packages
if(! is.element("rgl", installed.packages()[,1]))
install.packages("rgl")
if(! is.element("ca", installed.packages()[,1]))
install.packages("ca")
library(ca)
# Get the Data
x = read.table("/research/projects/Sponges/Molecuclar_clock/2012_analysis/support_files/71taxa.txt",header=TRUE)
aa = c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y")
pdf("coa.pdf",width=10, height=10) # open a pdf for printing plots
col_names = names(x)
N = x[,2:length(col_names)] # get data without first column
# Correspondence analysis #
coa = ca(N) # perform correspondence analysis
p = data.frame(coa$colcoord[,1:2]) # get unscaled principal coordinates
p$name = substr(summary(coa)$columns$name,0,2) # add names to a column
#plot(coa,what=c("none","active")) # default plot from CA package
# The default plot is okay, but we can do better
# make an empty plot so we can add our own pretty data points
plot(NULL,xlim=c(min(p$X1),max(p$X1)),
ylim=c(min(p$X2),max(p$X2)),
xlab="PC 1",ylab="PC 2",
cex.lab=1.3,cex.axis=1.3)
chars = c(23,25,15,17,16,8,19,24,25,26,6,15,15,17,22,17,17,6,17,8,19) # list of data point symbols
colors = c("black","black","orange","blue","red","green","violet")
up = unique(p$name)
for(i in 1:length(up)){
d = subset(p,name==up[i]) # data subset for 1 organism
points(x=d$X1,y=d$X2,pch=chars[i],cex=1.5, col=colors[i]) # add points 1 organism
}
legend("topright",cex=1.3,legend=up,pch=chars,col=colors) # make a legend
# make a barplot of the inertia of each amino acid
# i.e. the contribution of each amino acid to the overall variance
barplot(-sort(-coa$rowinertia),
names=aa[order(-coa$rowinertia)],
ylab="Inertia",
cex.names=1.4,cex.lab=1.5,cex.axis=1.3)
Nt = data.frame(t(N)) # transpose data
Nt$org = substr(rownames(Nt),0,2) # get organism headers
orgs = unique(Nt$org) # get unique organism names
par( mfrow = c( 2, 2 ) )
# AA composition boxplots
#ord = x$Cat[order(-coa$rowinertia)] # sort amino acids by inertia
#for(i in 1:length(ord)){
# boxplot for each amino acid
#boxplot(Nt[,which(x$Cat==ord[i])]~Nt$org,main=ord[i])
#}
dev.off()
|
#' Vaal River Annual Flow Data
#'
#' @description The annual flow data of Vaal River at Standerton as given by Table 1.1 of Linhart and Zucchini
#' (1986) give the flow in millions of cubic metres.
#'
#' @docType data
#' @keywords datasets
#' @name Vaal.Flow
#' @usage data(Vaal.Flow)
#' @format The format is: int [1:65] 222 1094 452 1298 882 988 276 216 103 490 ...
#' @references Linhart, H., and Zucchini, W., \emph{Model Selection},
#' Wiley Series in Probability and Mathematical Statistics: Applied
#' Probability and Statistics, New York: John Wiley and Sons Inc, 1986.
#' @examples data(Vaal.Flow)
"Vaal.Flow"
|
/R/Vaal-Flow.r
|
no_license
|
cran/mable
|
R
| false
| false
| 639
|
r
|
#' Vaal River Annual Flow Data
#'
#' @description The annual flow data of Vaal River at Standerton as given by Table 1.1 of Linhart and Zucchini
#' (1986) give the flow in millions of cubic metres.
#'
#' @docType data
#' @keywords datasets
#' @name Vaal.Flow
#' @usage data(Vaal.Flow)
#' @format The format is: int [1:65] 222 1094 452 1298 882 988 276 216 103 490 ...
#' @references Linhart, H., and Zucchini, W., \emph{Model Selection},
#' Wiley Series in Probability and Mathematical Statistics: Applied
#' Probability and Statistics, New York: John Wiley and Sons Inc, 1986.
#' @examples data(Vaal.Flow)
"Vaal.Flow"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.