content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
setwd("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/06-Aquilla_202101-a/01-Aquilla-preQC/03-PLINK-QC-files4/ADMIXTURE")
df <- read.table("cv_error.txt", header = T)
library(ggplot2)
ggplot(data=df, aes(x=K, y=Error)) + geom_line()
ggsave("ADMIXTURE-CV-FASE.jpg", plot = last_plot(), device = NULL, scale = 1, width = 16, height = 9, dpi = 300, limitsize = TRUE)
FASE_Ethnicity_from_Aurora <- read.table("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/06-Aquilla_202101-a/01-Aquilla-preQC/03-PLINK-QC-files4/GENESIS/FASe_3894_Ethnicity_from_Aurora.txt", header = T)
FAM <- read.table("FASe_3894_from_AQUILLA_WXS_SNPS_INDELS_picard_biallelic-hwe-geno0.05-mind0.1.fam", header = F)
FAM <- cbind(FAM,FASE_Ethnicity_from_Aurora[match(FAM$V2, FASE_Ethnicity_from_Aurora$IID),])
sum(as.character(FAM$V2) == as.character(FAM$IID))
# plot the Q estimates
tbl=read.table("FASe_3894_from_AQUILLA_WXS_SNPS_INDELS_picard_biallelic-hwe-geno0.05-mind0.1.4.Q")
tbl <- cbind(IID= FAM$IID, Ethnicity=FAM$Population,tbl)
rownames(tbl) <- tbl$IID
library(tidyr)
plot_data <- tbl %>%
mutate(id = row_number())%>%
gather('pop', 'prob', V1:V4) %>%
group_by(id) %>%
mutate(likely_assignment = pop[which.max(prob)],
assingment_prob = max(prob)) %>%
arrange(likely_assignment, desc(assingment_prob)) %>%
ungroup() %>%
mutate(id = forcats::fct_inorder(factor(id)))
plot_data$Ethnicity <- as.character(plot_data$Ethnicity)
plot_data$Ethnicity[(is.na(plot_data$Ethnicity))] <- "-9"
## With facects
p <- ggplot(plot_data, aes(IID, prob, fill = pop)) +
geom_col() +
facet_grid(~likely_assignment, scales = 'free', space = 'free')
p
ggsave("ADMIXTURE-population-k4.jpg", plot = p, device = NULL, scale = 1, width = 16, height = 9, dpi = 300, limitsize = TRUE)
p <- ggplot(plot_data, aes(Ethnicity, prob, fill = pop)) +
geom_col() +
facet_grid(~likely_assignment, scales = 'free', space = 'free')
p
ggsave("ADMIXTURE-population--Aurora-assignment-k4.jpg", plot = p, device = NULL, scale = 1, width = 16, height = 9, dpi = 300, limitsize = TRUE)
## Theme classic
ggplot(plot_data, aes(IID, prob, fill = pop)) +
geom_col() +
theme_classic()
# TANZI <- read.table("/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/FBAT/FASe_3894_from_AQUILLA_WXS_SNPS_INDELS_picard_biallelic-hwe-geno0.05-mind0.1_with_STATUS_nonADSP_post_QC2-geno-0.02-maxmaf-0.01.fam")
| /WashU_codes/ADMIXTURE.r | no_license | achalneupane/rcodes | R | false | false | 2,439 | r | setwd("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/06-Aquilla_202101-a/01-Aquilla-preQC/03-PLINK-QC-files4/ADMIXTURE")
df <- read.table("cv_error.txt", header = T)
library(ggplot2)
ggplot(data=df, aes(x=K, y=Error)) + geom_line()
ggsave("ADMIXTURE-CV-FASE.jpg", plot = last_plot(), device = NULL, scale = 1, width = 16, height = 9, dpi = 300, limitsize = TRUE)
FASE_Ethnicity_from_Aurora <- read.table("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/06-Aquilla_202101-a/01-Aquilla-preQC/03-PLINK-QC-files4/GENESIS/FASe_3894_Ethnicity_from_Aurora.txt", header = T)
FAM <- read.table("FASe_3894_from_AQUILLA_WXS_SNPS_INDELS_picard_biallelic-hwe-geno0.05-mind0.1.fam", header = F)
FAM <- cbind(FAM,FASE_Ethnicity_from_Aurora[match(FAM$V2, FASE_Ethnicity_from_Aurora$IID),])
sum(as.character(FAM$V2) == as.character(FAM$IID))
# plot the Q estimates
tbl=read.table("FASe_3894_from_AQUILLA_WXS_SNPS_INDELS_picard_biallelic-hwe-geno0.05-mind0.1.4.Q")
tbl <- cbind(IID= FAM$IID, Ethnicity=FAM$Population,tbl)
rownames(tbl) <- tbl$IID
library(tidyr)
plot_data <- tbl %>%
mutate(id = row_number())%>%
gather('pop', 'prob', V1:V4) %>%
group_by(id) %>%
mutate(likely_assignment = pop[which.max(prob)],
assingment_prob = max(prob)) %>%
arrange(likely_assignment, desc(assingment_prob)) %>%
ungroup() %>%
mutate(id = forcats::fct_inorder(factor(id)))
plot_data$Ethnicity <- as.character(plot_data$Ethnicity)
plot_data$Ethnicity[(is.na(plot_data$Ethnicity))] <- "-9"
## With facects
p <- ggplot(plot_data, aes(IID, prob, fill = pop)) +
geom_col() +
facet_grid(~likely_assignment, scales = 'free', space = 'free')
p
ggsave("ADMIXTURE-population-k4.jpg", plot = p, device = NULL, scale = 1, width = 16, height = 9, dpi = 300, limitsize = TRUE)
p <- ggplot(plot_data, aes(Ethnicity, prob, fill = pop)) +
geom_col() +
facet_grid(~likely_assignment, scales = 'free', space = 'free')
p
ggsave("ADMIXTURE-population--Aurora-assignment-k4.jpg", plot = p, device = NULL, scale = 1, width = 16, height = 9, dpi = 300, limitsize = TRUE)
## Theme classic
ggplot(plot_data, aes(IID, prob, fill = pop)) +
geom_col() +
theme_classic()
# TANZI <- read.table("/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/FBAT/FASe_3894_from_AQUILLA_WXS_SNPS_INDELS_picard_biallelic-hwe-geno0.05-mind0.1_with_STATUS_nonADSP_post_QC2-geno-0.02-maxmaf-0.01.fam")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Entrance}
\alias{Entrance}
\title{College entrance exam scores for 24 high school seniors}
\format{A data frame/tibble with 24 observations on one variable
\describe{
\item{score}{college entrance exam score}
}}
\usage{
Entrance
}
\description{
Data for Example 1.8
}
\examples{
stem(Entrance$score)
stem(Entrance$score, scale = 2)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\keyword{datasets}
| /man/Entrance.Rd | no_license | nebsnave/BSDA | R | false | true | 560 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Entrance}
\alias{Entrance}
\title{College entrance exam scores for 24 high school seniors}
\format{A data frame/tibble with 24 observations on one variable
\describe{
\item{score}{college entrance exam score}
}}
\usage{
Entrance
}
\description{
Data for Example 1.8
}
\examples{
stem(Entrance$score)
stem(Entrance$score, scale = 2)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\keyword{datasets}
|
library(kwai)
source("../R/compile.R")
source("../R/inferType.R")
source("../R/createIR2.R")
source("../R/visitStackMachine3.R")
source("../R/llvm_native_helper.R")
source("../R/discoverBlocks.R")
myfunction2 <-function(a) {
qq=if (a>2) {
-1
} else {
a
}
return(qq)
}
#debug(inferType2)
llvmFunc=byte2llvm(myfunction2)
print(myfunction2)
print(myfunction2(3))
print(llvmFunc(3))
| /test/test3.R | no_license | doktorschiwago/kwai | R | false | false | 396 | r |
library(kwai)
source("../R/compile.R")
source("../R/inferType.R")
source("../R/createIR2.R")
source("../R/visitStackMachine3.R")
source("../R/llvm_native_helper.R")
source("../R/discoverBlocks.R")
myfunction2 <-function(a) {
qq=if (a>2) {
-1
} else {
a
}
return(qq)
}
#debug(inferType2)
llvmFunc=byte2llvm(myfunction2)
print(myfunction2)
print(myfunction2(3))
print(llvmFunc(3))
|
\name{box_plot}
\alias{box plot}
\title{Box plot of the data}
\usage{
box_plot(x,...)
}
\description{
Visulaising the data as a box plot, with customised colour with orange colour gradient and horizontal arrangement.
}
\arguments{
\item{x}{
data that has been imported as matrix.
}
}
\note{
The data to be imported should be a matrix.
}
\examples{
> mtcars <- as.matrix(mtcars)
> box_plot(mtcars)
}
| /man/box_plot.Rd | no_license | SanthoshKumarKarthikeyan/skkr | R | false | false | 405 | rd | \name{box_plot}
\alias{box plot}
\title{Box plot of the data}
\usage{
box_plot(x,...)
}
\description{
Visulaising the data as a box plot, with customised colour with orange colour gradient and horizontal arrangement.
}
\arguments{
\item{x}{
data that has been imported as matrix.
}
}
\note{
The data to be imported should be a matrix.
}
\examples{
> mtcars <- as.matrix(mtcars)
> box_plot(mtcars)
}
|
#Lista di tutti gli stem
datahere = "C:/Users/Marco/Desktop/IR3"
setwd(datahere)
path="output.txt"
pesi=read.table(text=readLines(path))
#Assegnazione del nome delle variabili
names(pesi)=c("stem","doc","peso")
#Vettore con tutti gli stems (unici e mai ripetuti)
listastems=unique(pesi[, "stem"])
#Matrice con tutti I pesi degli stem dei vari documenti (3204 righe e 5222 colonne)
C=matrix(0, max(pesi[,2]), length(listastems))
for (i in 1:max(pesi[,2]))
{
usedstems=pesi[pesi[, "doc"]==i,1]
usedweights=pesi[pesi[, "doc"]==i,3]
for (j in 1:length(usedstems))
{
pos=match(usedstems[j], listastems)
C[i,pos]= usedweights[j]
}
}
#Normalizzazione matrice dei pesi della matrice C
for (i in 1:max(pesi[,2]))
{
maxw=max(C[i,]);
C[i,]=C[i,]/maxw;
}
R=(t(C)%*%C)
media=mean(R)
#media=min(setdiff(as.vector(R),0))
for(i in 1:5222)
{
for(j in 1:5222)
{
if(R[i,j]<media)
{
R[i,j]=0;
}
}
}
for (i in 1:length(listastems))
{
norm=sqrt(sum(R[i,]^2));
R[i,]=R[i,]/norm;
}
#Matrice contenente tutti gli stem delle varie query (5222 righe e 64 colonne), ogni colonna #rappresenta una query
path="query-stem.txt"
query=read.table(text=readLines(path))
#Assegnazione del nome delle variabili
names(query)=c("queryid","stem")
#Vettore con tutti le query (uniche e mai ripetute)
listaquery=unique(query[, "queryid"])
listaquery=sort(listaquery)
Y=matrix(0, length(listastems), length(listaquery))
for (i in 1:length(listaquery))
{
usedstems=query[query[, "queryid"]==i,2]
for (j in 1:length(usedstems))
{
Y[match(usedstems[j], listastems),i]=1
}
}
#Rank per le varie query (al momento senza correlazione)
ranks=C%*%R%*%Y
#Stampa
for (i in 1:length(listaquery))
{
#Creo un Data-Frame per sfruttare "order", che mi ordina le colonne in automatico
dt=data.frame(ncol=2, col.names = c("doc", "rank"));
dt=data.frame(1:max(pesi[,2]), ranks[,i]);
dt=dt[order(-dt[,2]),];
for (j in 1:1000)
{
{
prova = paste(i,"Q0",dt[j,1],j,dt[j,2],"G7R3");
cat(prova, file="output_lab3.txt", sep="\t", append=TRUE)
cat("\n", file="output_lab3.txt", append=TRUE)
}
}
}
| /IR3/Prova.r | no_license | mabarich/Information-Retrieval | R | false | false | 2,205 | r | #Lista di tutti gli stem
datahere = "C:/Users/Marco/Desktop/IR3"
setwd(datahere)
path="output.txt"
pesi=read.table(text=readLines(path))
#Assegnazione del nome delle variabili
names(pesi)=c("stem","doc","peso")
#Vettore con tutti gli stems (unici e mai ripetuti)
listastems=unique(pesi[, "stem"])
#Matrice con tutti I pesi degli stem dei vari documenti (3204 righe e 5222 colonne)
C=matrix(0, max(pesi[,2]), length(listastems))
for (i in 1:max(pesi[,2]))
{
usedstems=pesi[pesi[, "doc"]==i,1]
usedweights=pesi[pesi[, "doc"]==i,3]
for (j in 1:length(usedstems))
{
pos=match(usedstems[j], listastems)
C[i,pos]= usedweights[j]
}
}
#Normalizzazione matrice dei pesi della matrice C
for (i in 1:max(pesi[,2]))
{
maxw=max(C[i,]);
C[i,]=C[i,]/maxw;
}
R=(t(C)%*%C)
media=mean(R)
#media=min(setdiff(as.vector(R),0))
for(i in 1:5222)
{
for(j in 1:5222)
{
if(R[i,j]<media)
{
R[i,j]=0;
}
}
}
for (i in 1:length(listastems))
{
norm=sqrt(sum(R[i,]^2));
R[i,]=R[i,]/norm;
}
#Matrice contenente tutti gli stem delle varie query (5222 righe e 64 colonne), ogni colonna #rappresenta una query
path="query-stem.txt"
query=read.table(text=readLines(path))
#Assegnazione del nome delle variabili
names(query)=c("queryid","stem")
#Vettore con tutti le query (uniche e mai ripetute)
listaquery=unique(query[, "queryid"])
listaquery=sort(listaquery)
Y=matrix(0, length(listastems), length(listaquery))
for (i in 1:length(listaquery))
{
usedstems=query[query[, "queryid"]==i,2]
for (j in 1:length(usedstems))
{
Y[match(usedstems[j], listastems),i]=1
}
}
#Rank per le varie query (al momento senza correlazione)
ranks=C%*%R%*%Y
#Stampa
for (i in 1:length(listaquery))
{
#Creo un Data-Frame per sfruttare "order", che mi ordina le colonne in automatico
dt=data.frame(ncol=2, col.names = c("doc", "rank"));
dt=data.frame(1:max(pesi[,2]), ranks[,i]);
dt=dt[order(-dt[,2]),];
for (j in 1:1000)
{
{
prova = paste(i,"Q0",dt[j,1],j,dt[j,2],"G7R3");
cat(prova, file="output_lab3.txt", sep="\t", append=TRUE)
cat("\n", file="output_lab3.txt", append=TRUE)
}
}
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/stomach.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.95,family="gaussian",standardize=FALSE)
sink('./stomach_094.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/stomach/stomach_094.R | no_license | esbgkannan/QSMART | R | false | false | 353 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/stomach.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.95,family="gaussian",standardize=FALSE)
sink('./stomach_094.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fipe.R
\name{fipe_vehicle}
\alias{fipe_vehicle}
\title{Access to the Fipe Database}
\usage{
fipe_vehicle(model, make = NULL, year = NULL, date = Sys.Date(),
progress = FALSE, parallel = FALSE)
}
\arguments{
\item{model}{a character vector. Vehicle model name.}
\item{make}{a character vector. If NULL, search all models in all makes,
otherwise only those indicated.}
\item{year}{a numeric vector. Year of manufacture of the vehicle. If 0
returns vehicles 0 km.}
\item{date}{a date vector. Reference date for the vehicle price.}
\item{progress}{a logical, if TRUE print a progress bar.}
\item{parallel}{a logical, if TRUE apply function in parallel.}
}
\value{
A data frame/tibble including model, make, year, date and price.
}
\description{
Access to the Fipe Database
}
\details{
The Fipe Database shows the average purchase price of vehicles in
the Brazilian national market. The prices are effectively used in purchase
negotiations according to region, vehicle’s conservation, color,
accessories or any other factor that might influence the demand and supply
for a specific vehicle. The year of the vehicle refers to the model year,
and the vehicles are not considered for professional or special use. The
values are expressed in R$ (reais) for each month/year of reference.
}
\examples{
\donttest{
fipe_vehicle(
model = "etios platinum", make = "toyota",
date = "2019-08-01", year = c(0, 2019, 2018)
)
}
}
\seealso{
Official Website \url{https://veiculos.fipe.org.br}.
}
| /man/fipe_vehicle.Rd | no_license | cran/fipe | R | false | true | 1,630 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fipe.R
\name{fipe_vehicle}
\alias{fipe_vehicle}
\title{Access to the Fipe Database}
\usage{
fipe_vehicle(model, make = NULL, year = NULL, date = Sys.Date(),
progress = FALSE, parallel = FALSE)
}
\arguments{
\item{model}{a character vector. Vehicle model name.}
\item{make}{a character vector. If NULL, search all models in all makes,
otherwise only those indicated.}
\item{year}{a numeric vector. Year of manufacture of the vehicle. If 0
returns vehicles 0 km.}
\item{date}{a date vector. Reference date for the vehicle price.}
\item{progress}{a logical, if TRUE print a progress bar.}
\item{parallel}{a logical, if TRUE apply function in parallel.}
}
\value{
A data frame/tibble including model, make, year, date and price.
}
\description{
Access to the Fipe Database
}
\details{
The Fipe Database shows the average purchase price of vehicles in
the Brazilian national market. The prices are effectively used in purchase
negotiations according to region, vehicle’s conservation, color,
accessories or any other factor that might influence the demand and supply
for a specific vehicle. The year of the vehicle refers to the model year,
and the vehicles are not considered for professional or special use. The
values are expressed in R$ (reais) for each month/year of reference.
}
\examples{
\donttest{
fipe_vehicle(
model = "etios platinum", make = "toyota",
date = "2019-08-01", year = c(0, 2019, 2018)
)
}
}
\seealso{
Official Website \url{https://veiculos.fipe.org.br}.
}
|
# Cleaning
library(here)
library(tidyverse)
# read raw data
gva1 <- read_csv(here("data", "2013-02-11 - 2017-03-18.csv"),
col_types = list("Incident Date" = col_datetime(format = "%B %d, %Y")))
gva2 <- read_csv(here("data", "2017-03-12 - 2021-10-27.csv"),
col_types = list("Incident Date" = col_datetime(format = "%B %d, %Y")))
# combine two csv files
gva <- gva1 %>% bind_rows(gva2) %>%
arrange(`Incident Date`, `Incident ID`)
# Get unduplicated data (some incidents overlap because of the download procedure)
gva_unique <- gva %>% distinct()
# Rename columns
gva_renamed <- gva_unique %>%
rename(id = "Incident ID",
date = "Incident Date",
state = "State",
city_county = "City Or County",
address = "Address",
killed = "# Killed",
injured = "# Injured")
# Clean missing values in address: "N/A" to NA_character_
gva_renamed <- gva_renamed %>%
mutate(address = str_replace(address, "N/A", NA_character_))
# Save the incidents data without geocode
write_rds(gva_renamed, here("data", "incidents_nogeocode.rds"))
# Check if we already have the geocoded rds file
geocoded_exists <- file.exists(here("data", "incidents.rds"))
# Run the geocode R file (takes time)
if(!geocoded_exists) source(here("r", "geocode.R"))
cat(green("The geocoded file already exists in the data folder.\n
Not running the cleaning script."))
| /r/clean.R | permissive | de-data-lab/delaware-gun-violence | R | false | false | 1,447 | r | # Cleaning
library(here)
library(tidyverse)
# read raw data
gva1 <- read_csv(here("data", "2013-02-11 - 2017-03-18.csv"),
col_types = list("Incident Date" = col_datetime(format = "%B %d, %Y")))
gva2 <- read_csv(here("data", "2017-03-12 - 2021-10-27.csv"),
col_types = list("Incident Date" = col_datetime(format = "%B %d, %Y")))
# combine two csv files
gva <- gva1 %>% bind_rows(gva2) %>%
arrange(`Incident Date`, `Incident ID`)
# Get unduplicated data (some incidents overlap because of the download procedure)
gva_unique <- gva %>% distinct()
# Rename columns
gva_renamed <- gva_unique %>%
rename(id = "Incident ID",
date = "Incident Date",
state = "State",
city_county = "City Or County",
address = "Address",
killed = "# Killed",
injured = "# Injured")
# Clean missing values in address: "N/A" to NA_character_
gva_renamed <- gva_renamed %>%
mutate(address = str_replace(address, "N/A", NA_character_))
# Save the incidents data without geocode
write_rds(gva_renamed, here("data", "incidents_nogeocode.rds"))
# Check if we already have the geocoded rds file
geocoded_exists <- file.exists(here("data", "incidents.rds"))
# Run the geocode R file (takes time)
if(!geocoded_exists) source(here("r", "geocode.R"))
cat(green("The geocoded file already exists in the data folder.\n
Not running the cleaning script."))
|
### Function tidyContrasts ###
#' Function tidyContrasts
#'
#' Takes a DGEobj or contrast list as input and merges them into one tidy dataframe.
#' A contrast list is simply a list of topTable contrast dataframes. For
#' example, DGEobj::getType(mydgeobj, "topTable") would retrieve a list of
#' contrasts from a DGEobj. The contrast list must be a named list as the
#' contrast names are used during the merge operation.
#'
#' The input may or may not have rownames. If supplied rownameColumn does not exist as a colname in
#' the dataframes, it is created from the rownames. In tidy style, the output will have no rownames.
#'
#' The contrast names will be used as a new column in the tidy output format.
#'
#' @author John Thompson, \email{john.thompson@@bms.com}
#' @keywords RNA-Seq; contrasts; tidy merge
#'
#' @param x A DGEobj or named list of contrast dataframes
#' (required).
#' @param rownameColumn Name of the rowname column. If a column by this
#' name does not exist, it is created from the rownames property (rownames_to_column(var=rownameColumn))
#' @param includeColumns A character vector of columns to include in the output (default = colnames of the first contrast)
#'
#' @return A DF with merged contrast data.
#'
#' @examples
#'
#' #Get contrasts directly from a DGEobj
#' MyMergedTidyDF <- tidyContrasts (myDgeObj)
#'
#' #Assemble a list of contrasts from two DGEobjs; just logFC and conf intervals
#' myContrasts <- c(getType(DGEobj1, "topTable"), getType(DEobj2, "topTable"))
#' MyMergedTidyDF <- tidyContrasts (myContrasts, includeColumns = c("logFC", "CI.R", "CI.L"))
#'
#' @importFrom assertthat assert_that
#' @importFrom dplyr select bind_rows
#' @importFrom tidyr gather
#' @importFrom DGEobj getType
#'
#' @export
tidyContrasts <- function(x, rownameColumn="rownames", includeColumns){
assertthat::assert_that(class(x)[[1]] %in% c("DGEobj", "list"))
if (class(x)[[1]] == "DGEobj"){
dgeObj <- x
x <- DGEobj::getType(dgeObj, "topTable")
if (length(x) == 0)
stop("No topTable dataframes found in DGEobj\n")
} #x is now a contrastlist
#make sure list contains only dataframes
if (all(sapply(x, class) == "data.frame") == FALSE)
stop ("Input list must contain only dataframes\n")
#make sure each df has a name.
minNameLen <- min(sapply(names(x), nchar))
if (minNameLen == 0) {
stop("At least one of the dataframes in the input list has no name.\n")
}
#Set default columns
if (missing(includeColumns))
includeColumns <- colnames(x[[1]])
#find the common set of columns present in all dataframes.
commonColumns <- colnames(x[[1]])
for (i in 2:length(x))
commonColumns <- intersect(commonColumns, colnames(x[[i]]))
#make sure user-requested columns are present
if (!all(includeColumns %in% commonColumns))
warning("Some requested columns are not present in all dataframes.")
commonColumns <- intersect(commonColumns, includeColumns)
#Does the rownameColumn exist in df1?
if (!rownameColumn %in% colnames(x[[1]])) {
#move rownames to rownameColumn
x <- lapply(x, rownames_to_column, var=rownameColumn)
commonColumns <- c(rownameColumn, commonColumns)
}
#reduce all dataframes to the selected columns
#this also insures same column order in each df
x <- lapply(x, select, commonColumns)
#add a contrast name column to each DF
for (name in names(x)){
x[[name]]["Contrast"] <- name
}
#Now merge the dataframes vertically
x <- dplyr::bind_rows(x)
return(x)
}
| /R/tidyContrasts.R | no_license | jrthompson54/DGE.Tools2 | R | false | false | 3,523 | r | ### Function tidyContrasts ###
#' Function tidyContrasts
#'
#' Takes a DGEobj or contrast list as input and merges them into one tidy dataframe.
#' A contrast list is simply a list of topTable contrast dataframes. For
#' example, DGEobj::getType(mydgeobj, "topTable") would retrieve a list of
#' contrasts from a DGEobj. The contrast list must be a named list as the
#' contrast names are used during the merge operation.
#'
#' The input may or may not have rownames. If supplied rownameColumn does not exist as a colname in
#' the dataframes, it is created from the rownames. In tidy style, the output will have no rownames.
#'
#' The contrast names will be used as a new column in the tidy output format.
#'
#' @author John Thompson, \email{john.thompson@@bms.com}
#' @keywords RNA-Seq; contrasts; tidy merge
#'
#' @param x A DGEobj or named list of contrast dataframes
#' (required).
#' @param rownameColumn Name of the rowname column. If a column by this
#' name does not exist, it is created from the rownames property (rownames_to_column(var=rownameColumn))
#' @param includeColumns A character vector of columns to include in the output (default = colnames of the first contrast)
#'
#' @return A DF with merged contrast data.
#'
#' @examples
#'
#' #Get contrasts directly from a DGEobj
#' MyMergedTidyDF <- tidyContrasts (myDgeObj)
#'
#' #Assemble a list of contrasts from two DGEobjs; just logFC and conf intervals
#' myContrasts <- c(getType(DGEobj1, "topTable"), getType(DEobj2, "topTable"))
#' MyMergedTidyDF <- tidyContrasts (myContrasts, includeColumns = c("logFC", "CI.R", "CI.L"))
#'
#' @importFrom assertthat assert_that
#' @importFrom dplyr select bind_rows
#' @importFrom tidyr gather
#' @importFrom DGEobj getType
#'
#' @export
tidyContrasts <- function(x, rownameColumn="rownames", includeColumns){
assertthat::assert_that(class(x)[[1]] %in% c("DGEobj", "list"))
if (class(x)[[1]] == "DGEobj"){
dgeObj <- x
x <- DGEobj::getType(dgeObj, "topTable")
if (length(x) == 0)
stop("No topTable dataframes found in DGEobj\n")
} #x is now a contrastlist
#make sure list contains only dataframes
if (all(sapply(x, class) == "data.frame") == FALSE)
stop ("Input list must contain only dataframes\n")
#make sure each df has a name.
minNameLen <- min(sapply(names(x), nchar))
if (minNameLen == 0) {
stop("At least one of the dataframes in the input list has no name.\n")
}
#Set default columns
if (missing(includeColumns))
includeColumns <- colnames(x[[1]])
#find the common set of columns present in all dataframes.
commonColumns <- colnames(x[[1]])
for (i in 2:length(x))
commonColumns <- intersect(commonColumns, colnames(x[[i]]))
#make sure user-requested columns are present
if (!all(includeColumns %in% commonColumns))
warning("Some requested columns are not present in all dataframes.")
commonColumns <- intersect(commonColumns, includeColumns)
#Does the rownameColumn exist in df1?
if (!rownameColumn %in% colnames(x[[1]])) {
#move rownames to rownameColumn
x <- lapply(x, rownames_to_column, var=rownameColumn)
commonColumns <- c(rownameColumn, commonColumns)
}
#reduce all dataframes to the selected columns
#this also insures same column order in each df
x <- lapply(x, select, commonColumns)
#add a contrast name column to each DF
for (name in names(x)){
x[[name]]["Contrast"] <- name
}
#Now merge the dataframes vertically
x <- dplyr::bind_rows(x)
return(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readingandfes.R
\name{summary.hillsfile3d}
\alias{summary.hillsfile3d}
\title{Print summary for hillsfile3d}
\usage{
\method{summary}{hillsfile3d}(object, ...)
}
\arguments{
\item{object}{hillsfile3d object.}
\item{...}{further arguments passed to or from other methods.}
}
\description{
`summary.hillsfile3d` prints dimensionality, size and collective variable ranges of a hillsfile3d object.
}
\examples{
summary(acealanme3d)
}
| /metadynminer3d/man/summary.hillsfile3d.Rd | no_license | akhikolla/InformationHouse | R | false | true | 509 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readingandfes.R
\name{summary.hillsfile3d}
\alias{summary.hillsfile3d}
\title{Print summary for hillsfile3d}
\usage{
\method{summary}{hillsfile3d}(object, ...)
}
\arguments{
\item{object}{hillsfile3d object.}
\item{...}{further arguments passed to or from other methods.}
}
\description{
`summary.hillsfile3d` prints dimensionality, size and collective variable ranges of a hillsfile3d object.
}
\examples{
summary(acealanme3d)
}
|
#' Calculates Individual Variable Profiles aka Ceteris Paribus Profiles
#'
#' This explainer works for individual observations.
#' For each observation it calculates Individual Variable Profiles for selected variables.
#' For this reason it is also called 'Local Profile Plot'.
#'
#' @param x a model to be explained, or an explainer created with function `DALEX::explain()`.
#' @param data validation dataset, will be extracted from `x` if it's an explainer
#' @param predict_function predict function, will be extracted from `x` if it's an explainer
#' @param new_observation a new observation with columns that corresponds to variables used in the model
#' @param y true labels for `new_observation`. If specified then will be added to ceteris paribus plots.
#' @param variables names of variables for which profiles shall be calculated. Will be passed to `calculate_variable_splits()`. If NULL then all variables from the validation data will be used.
#' @param ... other parameters
#' @param variable_splits named list of splits for variables, in most cases created with `calculate_variable_splits()`. If NULL then it will be calculated based on validation data avaliable in the `explainer`.
#' @param grid_points number of points for profile. Will be passed to `calculate_variable_splits()`.
#' @param label name of the model. By default it's extracted from the 'class' attribute of the model
#'
#'
#' @return An object of the class 'ceteris_paribus_explainer'.
#' It's a data frame with calculated average responses.
#' @export
#'
#' @examples
#' library("DALEX2")
#' library("ceterisParibus2")
#' \dontrun{
#' library("randomForest")
#' set.seed(59)
#'
#' apartments_rf <- randomForest(m2.price ~ construction.year + surface + floor +
#' no.rooms + district, data = apartments)
#'
#' explainer_rf <- explain(apartments_rf,
#' data = apartments_test[,2:6], y = apartments_test$m2.price)
#'
#' my_apartment <- apartments_test[1, ]
#'
#' lp_rf <- individual_variable_profile(explainer_rf, my_apartment)
#' head(lp_rf)
#'
#' plot(lp_rf)
#'
#' # --------
#' # multiclass
#'
#' HR_rf <- randomForest(status ~ . , data = HR)
#' explainer_rf <- explain(HR_rf, data = HRTest, y = HRTest)
#'
#' my_HR <- HRTest[1, ]
#'
#' lp_rf <- individual_variable_profile(explainer_rf, my_HR)
#' head(lp_rf)
#'
#' plot(lp_rf, color = "_label_")
#'
#' }
#' @export
#' @rdname local_profile
individual_variable_profile <- function(x, ...)
UseMethod("individual_variable_profile")
#' @export
#' @rdname local_profile
individual_variable_profile.explainer <- function(x, new_observation, y = NULL, variables = NULL,
variable_splits = NULL, grid_points = 101,
...) {
# extracts model, data and predict function from the explainer
model <- x$model
data <- x$data
predict_function <- x$predict_function
label <- x$label
individual_variable_profile.default(model, data, predict_function,
new_observation = new_observation,
label = label,
variables = variables,
grid_points = grid_points,
y = y,
...)
}
#' @export
#' @rdname local_profile
individual_variable_profile.default <- function(x, data, predict_function = predict,
new_observation, y = NULL, variables = NULL,
variable_splits = NULL,
grid_points = 101,
label = class(x)[1], ...) {
# here one can add model and data and new observation
# just in case only some variables are specified
# this will work only for data.frames
if ("data.frame" %in% class(data)) {
common_variables <- intersect(colnames(new_observation), colnames(data))
new_observation <- new_observation[, common_variables, drop = FALSE]
data <- data[,common_variables, drop = FALSE]
}
p <- ncol(data)
# calculate splits
# if splits are not provided, then will be calculated
if (is.null(variable_splits)) {
# need validation data from the explainer
if (is.null(data))
stop("The individual_variable_profile() function requires explainers created with specified 'data'.")
# need variables, if not provided, will be extracted from data
if (is.null(variables))
variables <- colnames(data)
variable_splits <- calculate_variable_split(data, variables = variables, grid_points = grid_points)
}
# calculate profiles
profiles <- calculate_variable_profile(new_observation,
variable_splits, x, predict_function)
# if there is more then one collumn with `_yhat_`
# then we need to convert it to a single collumn
col_yhat <- grep(colnames(profiles), pattern = "^_yhat_")
if (length(col_yhat) == 1) {
profiles$`_label_` <- label
# add points of interests
new_observation$`_yhat_` <- predict_function(x, new_observation)
new_observation$`_label_` <- label
new_observation$`_ids_` <- 1:nrow(new_observation)
if (!is.null(y)) new_observation$`_y_` <- y
} else {
# we need to recreate _yhat_ and create proper labels
new_profiles <- profiles[rep(1:nrow(profiles), times = length(col_yhat)), -col_yhat]
new_profiles$`_yhat_` <- unlist(c(profiles[,col_yhat]))
stripped_names <- gsub(colnames(profiles)[col_yhat], pattern = "_yhat_", replacement = "")
new_profiles$`_label_` <- paste0(label, rep(stripped_names, each = nrow(profiles)))
profiles <- new_profiles
# add points of interests
new_observation_ext <- new_observation[rep(1:nrow(new_observation), times = length(col_yhat)),]
predict_obs <- predict_function(x, new_observation)
new_observation_ext$`_yhat_` <- unlist(c(predict_obs))
new_observation_ext$`_label_` <- paste0(label, rep(stripped_names, each = nrow(new_observation)))
new_observation_ext$`_ids_` <- rep(1:nrow(new_observation), each = nrow(new_observation))
# add y
if (!is.null(y)) new_observation_ext$`_y_` <- rep(y, times = length(col_yhat))
new_observation <- new_observation_ext
}
# prepare final object
attr(profiles, "observations") <- new_observation
class(profiles) = c("individual_variable_profile_explainer", "data.frame")
profiles
}
| /R/ceteris_paribus.R | no_license | ModelOriented/ceterisParibus2 | R | false | false | 6,432 | r | #' Calculates Individual Variable Profiles aka Ceteris Paribus Profiles
#'
#' This explainer works for individual observations.
#' For each observation it calculates Individual Variable Profiles for selected variables.
#' For this reason it is also called 'Local Profile Plot'.
#'
#' @param x a model to be explained, or an explainer created with function `DALEX::explain()`.
#' @param data validation dataset, will be extracted from `x` if it's an explainer
#' @param predict_function predict function, will be extracted from `x` if it's an explainer
#' @param new_observation a new observation with columns that corresponds to variables used in the model
#' @param y true labels for `new_observation`. If specified then will be added to ceteris paribus plots.
#' @param variables names of variables for which profiles shall be calculated. Will be passed to `calculate_variable_splits()`. If NULL then all variables from the validation data will be used.
#' @param ... other parameters
#' @param variable_splits named list of splits for variables, in most cases created with `calculate_variable_splits()`. If NULL then it will be calculated based on validation data avaliable in the `explainer`.
#' @param grid_points number of points for profile. Will be passed to `calculate_variable_splits()`.
#' @param label name of the model. By default it's extracted from the 'class' attribute of the model
#'
#'
#' @return An object of the class 'ceteris_paribus_explainer'.
#' It's a data frame with calculated average responses.
#' @export
#'
#' @examples
#' library("DALEX2")
#' library("ceterisParibus2")
#' \dontrun{
#' library("randomForest")
#' set.seed(59)
#'
#' apartments_rf <- randomForest(m2.price ~ construction.year + surface + floor +
#' no.rooms + district, data = apartments)
#'
#' explainer_rf <- explain(apartments_rf,
#' data = apartments_test[,2:6], y = apartments_test$m2.price)
#'
#' my_apartment <- apartments_test[1, ]
#'
#' lp_rf <- individual_variable_profile(explainer_rf, my_apartment)
#' head(lp_rf)
#'
#' plot(lp_rf)
#'
#' # --------
#' # multiclass
#'
#' HR_rf <- randomForest(status ~ . , data = HR)
#' explainer_rf <- explain(HR_rf, data = HRTest, y = HRTest)
#'
#' my_HR <- HRTest[1, ]
#'
#' lp_rf <- individual_variable_profile(explainer_rf, my_HR)
#' head(lp_rf)
#'
#' plot(lp_rf, color = "_label_")
#'
#' }
#' @export
#' @rdname local_profile
individual_variable_profile <- function(x, ...)
UseMethod("individual_variable_profile")
#' @export
#' @rdname local_profile
individual_variable_profile.explainer <- function(x, new_observation, y = NULL, variables = NULL,
variable_splits = NULL, grid_points = 101,
...) {
# extracts model, data and predict function from the explainer
model <- x$model
data <- x$data
predict_function <- x$predict_function
label <- x$label
individual_variable_profile.default(model, data, predict_function,
new_observation = new_observation,
label = label,
variables = variables,
grid_points = grid_points,
y = y,
...)
}
#' @export
#' @rdname local_profile
individual_variable_profile.default <- function(x, data, predict_function = predict,
new_observation, y = NULL, variables = NULL,
variable_splits = NULL,
grid_points = 101,
label = class(x)[1], ...) {
# here one can add model and data and new observation
# just in case only some variables are specified
# this will work only for data.frames
if ("data.frame" %in% class(data)) {
common_variables <- intersect(colnames(new_observation), colnames(data))
new_observation <- new_observation[, common_variables, drop = FALSE]
data <- data[,common_variables, drop = FALSE]
}
p <- ncol(data)
# calculate splits
# if splits are not provided, then will be calculated
if (is.null(variable_splits)) {
# need validation data from the explainer
if (is.null(data))
stop("The individual_variable_profile() function requires explainers created with specified 'data'.")
# need variables, if not provided, will be extracted from data
if (is.null(variables))
variables <- colnames(data)
variable_splits <- calculate_variable_split(data, variables = variables, grid_points = grid_points)
}
# calculate profiles
profiles <- calculate_variable_profile(new_observation,
variable_splits, x, predict_function)
# if there is more then one collumn with `_yhat_`
# then we need to convert it to a single collumn
col_yhat <- grep(colnames(profiles), pattern = "^_yhat_")
if (length(col_yhat) == 1) {
profiles$`_label_` <- label
# add points of interests
new_observation$`_yhat_` <- predict_function(x, new_observation)
new_observation$`_label_` <- label
new_observation$`_ids_` <- 1:nrow(new_observation)
if (!is.null(y)) new_observation$`_y_` <- y
} else {
# we need to recreate _yhat_ and create proper labels
new_profiles <- profiles[rep(1:nrow(profiles), times = length(col_yhat)), -col_yhat]
new_profiles$`_yhat_` <- unlist(c(profiles[,col_yhat]))
stripped_names <- gsub(colnames(profiles)[col_yhat], pattern = "_yhat_", replacement = "")
new_profiles$`_label_` <- paste0(label, rep(stripped_names, each = nrow(profiles)))
profiles <- new_profiles
# add points of interests
new_observation_ext <- new_observation[rep(1:nrow(new_observation), times = length(col_yhat)),]
predict_obs <- predict_function(x, new_observation)
new_observation_ext$`_yhat_` <- unlist(c(predict_obs))
new_observation_ext$`_label_` <- paste0(label, rep(stripped_names, each = nrow(new_observation)))
new_observation_ext$`_ids_` <- rep(1:nrow(new_observation), each = nrow(new_observation))
# add y
if (!is.null(y)) new_observation_ext$`_y_` <- rep(y, times = length(col_yhat))
new_observation <- new_observation_ext
}
# prepare final object
attr(profiles, "observations") <- new_observation
class(profiles) = c("individual_variable_profile_explainer", "data.frame")
profiles
}
|
.onLoad <- function(libname, pkgname) {
modules <- paste0("stan_fit4", names(stanmodels), "_mod")
for (m in modules) loadModule(m, what=TRUE)
}
| /fuzzedpackages/bayes4psy/R/zzz.R | no_license | akhikolla/testpackages | R | false | false | 152 | r | .onLoad <- function(libname, pkgname) {
modules <- paste0("stan_fit4", names(stanmodels), "_mod")
for (m in modules) loadModule(m, what=TRUE)
}
|
Synthetic_data_MT <- function(k) {
### Load MARKERS ###
MARKERS <- read.csv('data_csv/synthetic/markers_synthetic.csv')
rownames(MARKERS) <- MARKERS[,1]
MARKERS <- MARKERS[,-1]
### Load PHENO ###
PHENO <- read.csv(paste0('data_csv/synthetic/pheno_synthetic_mt_', k, '.csv'))
PHENO <- PHENO[,-1]
row.names(PHENO) <- row.names(MARKERS)
weight_1 <- read.csv(paste0('data_csv/synthetic/weight_1_synthetic_mt_', k, '.csv'))
weight_1 <- weight_1[,-1]
names(weight_1) <- colnames(MARKERS)
major_snp_1 <- read.csv(paste0('data_csv/synthetic/major_snp_1_synthetic_mt_', k, '.csv'))
major_snp_1 <- as.vector(major_snp_1[,-1])
weight_cov <- read.csv(paste0('data_csv/synthetic/weight_cov_mt_', k, '.csv'))
weight_cov <- as.vector(weight_cov[,-1])
MARKERS <- as.data.frame(MARKERS)
PHENO <- as.matrix(PHENO)
Init_data <- list(p.probe=PHENO, m.probe=MARKERS, weight_1=weight_1, major_snp_1=major_snp_1, weight_cov=weight_cov)
return(Init_data)
} | /BDE_data/Synthetic_data_MT.R | no_license | aho25/GS_BDE | R | false | false | 988 | r | Synthetic_data_MT <- function(k) {
### Load MARKERS ###
MARKERS <- read.csv('data_csv/synthetic/markers_synthetic.csv')
rownames(MARKERS) <- MARKERS[,1]
MARKERS <- MARKERS[,-1]
### Load PHENO ###
PHENO <- read.csv(paste0('data_csv/synthetic/pheno_synthetic_mt_', k, '.csv'))
PHENO <- PHENO[,-1]
row.names(PHENO) <- row.names(MARKERS)
weight_1 <- read.csv(paste0('data_csv/synthetic/weight_1_synthetic_mt_', k, '.csv'))
weight_1 <- weight_1[,-1]
names(weight_1) <- colnames(MARKERS)
major_snp_1 <- read.csv(paste0('data_csv/synthetic/major_snp_1_synthetic_mt_', k, '.csv'))
major_snp_1 <- as.vector(major_snp_1[,-1])
weight_cov <- read.csv(paste0('data_csv/synthetic/weight_cov_mt_', k, '.csv'))
weight_cov <- as.vector(weight_cov[,-1])
MARKERS <- as.data.frame(MARKERS)
PHENO <- as.matrix(PHENO)
Init_data <- list(p.probe=PHENO, m.probe=MARKERS, weight_1=weight_1, major_snp_1=major_snp_1, weight_cov=weight_cov)
return(Init_data)
} |
#Spp Machine Learning in R - Project 2
#Predicting whether a loan application is SUCCESS OR NOT
#--------------------------------------------------------
loan.df <- read.csv("D:\\001_Data\\Not Completed\\1R -Loan Prediction Dataset\\Loan Prediction Dataset.csv")
loan.df <- loan.df[-1] #Dropping the loanID column
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#1) EXPLORATORY DATA ANALYSIS
#-------------------------
head(loan.df)
tail(loan.df)
summary(loan.df) #LoanAmount,Loan_Amount_Term,Credit_History has null values
dim(loan.df)
t(t(names(loan.df)))
#1.1)Checking unique values of every 'character' columns
unique(loan.df$Gender)
unique(loan.df$Married)
unique(loan.df$Dependents)
unique(loan.df$Education)
unique(loan.df$Self_Employed)
unique(loan.df$Property_Area)
unique(loan.df$Loan_Status)
#1.2)Manipulating the data
loan.df$Dependents <- as.numeric(as.character(loan.df$Dependents))
loan.df[is.na(loan.df$Dependents), 3] <- 3
loan.df$Gender <- ifelse(loan.df$Gender == 'Male', 1, 0)
loan.df$Married <- ifelse(loan.df$Married == 'Yes', 1, 0)
loan.df$Education <- ifelse(loan.df$Education == 'Graduate', 1, 0)
loan.df$Self_Employed <- ifelse(loan.df$Self_Employed == 'Yes', 1, 0)
loan.df$Loan_Status <- ifelse(loan.df$Loan_Status == "Y", 1, 0)
loan.df[loan.df$Property_Area == 'Rural', 11] <- 0
loan.df[loan.df$Property_Area == 'Semiurban', 11] <- 1
loan.df[loan.df$Property_Area == 'Urban', 11] <- 2
loan.df$Property_Area <- as.numeric(as.character(loan.df$Property_Area))
#Confirming the changed values
unique(loan.df$Gender)
unique(loan.df$Married)
unique(loan.df$Dependents)
unique(loan.df$Education)
unique(loan.df$Self_Employed)
unique(loan.df$Property_Area)
unique(loan.df$Loan_Status)
library(Amelia)
missmap(obj = loan.df, legend = FALSE, main = "Missing values heatmap",
col = c("yellow", "black"))
#Filling null values with the median value of that columns
fill.loan.Amt <- median(loan.df$LoanAmount, na.rm = T)
loan.df[is.na(loan.df$LoanAmount),8] <- fill.loan.Amt
fill.loan.Amt.Term <- median(loan.df$Loan_Amount_Term, na.rm = T)
loan.df[is.na(loan.df$Loan_Amount_Term),9] <- fill.loan.Amt.Term
fill.credit.history <- median(loan.df$Credit_History, na.rm = T)
loan.df[is.na(loan.df$Credit_History),10] <- fill.credit.history
#Checking the heatmap again
missmap(obj = loan.df, legend = FALSE, main = "Missing values heatmap",
col = c("yellow", "black")) #NULL Missing values :)
#1.3)Data Visulaization
#----------------------
library(ggplot2)
applicant.income <- ggplot(loan.df, aes(x=ApplicantIncome))
plot.1 <- applicant.income + geom_histogram(color='red', fill='pink')
plot.1 + ggtitle("Main Applicants Income Distribution") + xlab("Applicant's Income")+
ylab("Count")
co.applicant.income <- ggplot(loan.df, aes(x=CoapplicantIncome))
plot.2 <- co.applicant.income + geom_histogram(color='red', fill='pink')
plot.2 + ggtitle("Co-Applicants Income Distribution") + xlab("Co-Applicant's Income")+
ylab("Count")
education.count <- ggplot(loan.df, aes(x=Education))
plot.3 <- education.count + geom_bar(color='red', fill='pink')
plot.3 + ggtitle("Count of Grads and Non-Grads") + xlab("Graduation Status")+
ylab("Count") + scale_x_discrete(limits=c(0,1))
prop.area.count <- ggplot(loan.df, aes(x=Property_Area))
plot.4 <- prop.area.count + geom_bar(color='red', fill='pink')
plot.4 + ggtitle("Count of Property Area") + xlab("Property Area")+
ylab("Count")
loan.status.count <- ggplot(loan.df, aes(x=Loan_Status))
plot.5 <- prop.area.count + geom_bar(color='red', fill='pink')
plot.5 + ggtitle("Count of Loan Status") + xlab("Loan Status")+
ylab("Count")
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#2) Building Logistic Regression Model
loan.train.rows <- sample(rownames(loan.df),
dim(loan.df)[1]*0.7)
loan.train.df <- loan.df[loan.train.rows, ]
loan.validate.rows <- sample(setdiff(rownames(loan.df),
loan.train.rows))
loan.validate.df <- loan.df[loan.validate.rows, ]
logistic.loan <- glm(Loan_Status ~ ., data = loan.train.df, family = binomial("logit"))
summary(logistic.loan)
loan.approval.predictions <- predict(logistic.loan, loan.validate.df)
prediction.results <- ifelse(loan.approval.predictions > 0.5, 1, 0)
misClassificationRate <- mean(prediction.results != loan.validate.df$Loan_Status)
print((1 - misClassificationRate) * 100)
table(loan.validate.df$Loan_Status, loan.approval.predictions>0.5)
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
| /SPP_ML_2_Loan Outcome Predictions.R | no_license | drdataSpp/Spp-Machine-Learning-using-R | R | false | false | 4,995 | r | #Spp Machine Learning in R - Project 2
#Predicting whether a loan application is SUCCESS OR NOT
#--------------------------------------------------------
loan.df <- read.csv("D:\\001_Data\\Not Completed\\1R -Loan Prediction Dataset\\Loan Prediction Dataset.csv")
loan.df <- loan.df[-1] #Dropping the loanID column
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#1) EXPLORATORY DATA ANALYSIS
#-------------------------
head(loan.df)
tail(loan.df)
summary(loan.df) #LoanAmount,Loan_Amount_Term,Credit_History has null values
dim(loan.df)
t(t(names(loan.df)))
#1.1)Checking unique values of every 'character' columns
unique(loan.df$Gender)
unique(loan.df$Married)
unique(loan.df$Dependents)
unique(loan.df$Education)
unique(loan.df$Self_Employed)
unique(loan.df$Property_Area)
unique(loan.df$Loan_Status)
#1.2)Manipulating the data
loan.df$Dependents <- as.numeric(as.character(loan.df$Dependents))
loan.df[is.na(loan.df$Dependents), 3] <- 3
loan.df$Gender <- ifelse(loan.df$Gender == 'Male', 1, 0)
loan.df$Married <- ifelse(loan.df$Married == 'Yes', 1, 0)
loan.df$Education <- ifelse(loan.df$Education == 'Graduate', 1, 0)
loan.df$Self_Employed <- ifelse(loan.df$Self_Employed == 'Yes', 1, 0)
loan.df$Loan_Status <- ifelse(loan.df$Loan_Status == "Y", 1, 0)
loan.df[loan.df$Property_Area == 'Rural', 11] <- 0
loan.df[loan.df$Property_Area == 'Semiurban', 11] <- 1
loan.df[loan.df$Property_Area == 'Urban', 11] <- 2
loan.df$Property_Area <- as.numeric(as.character(loan.df$Property_Area))
#Confirming the changed values
unique(loan.df$Gender)
unique(loan.df$Married)
unique(loan.df$Dependents)
unique(loan.df$Education)
unique(loan.df$Self_Employed)
unique(loan.df$Property_Area)
unique(loan.df$Loan_Status)
library(Amelia)
missmap(obj = loan.df, legend = FALSE, main = "Missing values heatmap",
col = c("yellow", "black"))
#Filling null values with the median value of that columns
fill.loan.Amt <- median(loan.df$LoanAmount, na.rm = T)
loan.df[is.na(loan.df$LoanAmount),8] <- fill.loan.Amt
fill.loan.Amt.Term <- median(loan.df$Loan_Amount_Term, na.rm = T)
loan.df[is.na(loan.df$Loan_Amount_Term),9] <- fill.loan.Amt.Term
fill.credit.history <- median(loan.df$Credit_History, na.rm = T)
loan.df[is.na(loan.df$Credit_History),10] <- fill.credit.history
#Checking the heatmap again
missmap(obj = loan.df, legend = FALSE, main = "Missing values heatmap",
col = c("yellow", "black")) #NULL Missing values :)
#1.3)Data Visulaization
#----------------------
library(ggplot2)
applicant.income <- ggplot(loan.df, aes(x=ApplicantIncome))
plot.1 <- applicant.income + geom_histogram(color='red', fill='pink')
plot.1 + ggtitle("Main Applicants Income Distribution") + xlab("Applicant's Income")+
ylab("Count")
co.applicant.income <- ggplot(loan.df, aes(x=CoapplicantIncome))
plot.2 <- co.applicant.income + geom_histogram(color='red', fill='pink')
plot.2 + ggtitle("Co-Applicants Income Distribution") + xlab("Co-Applicant's Income")+
ylab("Count")
education.count <- ggplot(loan.df, aes(x=Education))
plot.3 <- education.count + geom_bar(color='red', fill='pink')
plot.3 + ggtitle("Count of Grads and Non-Grads") + xlab("Graduation Status")+
ylab("Count") + scale_x_discrete(limits=c(0,1))
prop.area.count <- ggplot(loan.df, aes(x=Property_Area))
plot.4 <- prop.area.count + geom_bar(color='red', fill='pink')
plot.4 + ggtitle("Count of Property Area") + xlab("Property Area")+
ylab("Count")
loan.status.count <- ggplot(loan.df, aes(x=Loan_Status))
plot.5 <- prop.area.count + geom_bar(color='red', fill='pink')
plot.5 + ggtitle("Count of Loan Status") + xlab("Loan Status")+
ylab("Count")
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#2) Building Logistic Regression Model
loan.train.rows <- sample(rownames(loan.df),
dim(loan.df)[1]*0.7)
loan.train.df <- loan.df[loan.train.rows, ]
loan.validate.rows <- sample(setdiff(rownames(loan.df),
loan.train.rows))
loan.validate.df <- loan.df[loan.validate.rows, ]
logistic.loan <- glm(Loan_Status ~ ., data = loan.train.df, family = binomial("logit"))
summary(logistic.loan)
loan.approval.predictions <- predict(logistic.loan, loan.validate.df)
prediction.results <- ifelse(loan.approval.predictions > 0.5, 1, 0)
misClassificationRate <- mean(prediction.results != loan.validate.df$Loan_Status)
print((1 - misClassificationRate) * 100)
table(loan.validate.df$Loan_Status, loan.approval.predictions>0.5)
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
|
termMeans <- function(mod, term, label.factors=FALSE, abbrev.levels=FALSE){
data <- model.frame(mod)
Y <- model.response(data)
factors <- data[, sapply(data, is.factor), drop=FALSE]
if (missing(term)) stop("a term must be supplied")
term.factors <- unlist(strsplit(term, ":"))
if (any(which <- !term.factors %in% colnames(factors)))
stop(paste(term.factors[which], collapse=", "), " not in the model")
n.factors <- length(term.factors)
factor.values <- factors[,term.factors, drop=FALSE]
rows <- nrow(levs <- unique(factor.values))
means <-matrix(0, nrow=rows, ncol=ncol(Y))
for (j in 1:ncol(Y)) {
mn <- tapply(Y[,j], factor.values, mean)
means[,j] <- as.vector(mn)
}
colnames(means) <- colnames(Y)
nms <- colnames(levs)
if (label.factors)
for (j in 1:ncol(levs)) levs[,j] <- paste(nms[j], levs[,j], sep="")
if (abbrev.levels) {
if(is.logical(abbrev.levels)) levs <- apply(levs, 2, abbreviate)
else levs <- apply(levs, 2, abbreviate, minlength=abbrev.levels)
}
levs <- apply(levs, 1, paste, collapse=":")
rownames(means) <- levs
means
}
| /R/termMeans.R | no_license | friendly/heplots | R | false | false | 1,100 | r | termMeans <- function(mod, term, label.factors=FALSE, abbrev.levels=FALSE){
data <- model.frame(mod)
Y <- model.response(data)
factors <- data[, sapply(data, is.factor), drop=FALSE]
if (missing(term)) stop("a term must be supplied")
term.factors <- unlist(strsplit(term, ":"))
if (any(which <- !term.factors %in% colnames(factors)))
stop(paste(term.factors[which], collapse=", "), " not in the model")
n.factors <- length(term.factors)
factor.values <- factors[,term.factors, drop=FALSE]
rows <- nrow(levs <- unique(factor.values))
means <-matrix(0, nrow=rows, ncol=ncol(Y))
for (j in 1:ncol(Y)) {
mn <- tapply(Y[,j], factor.values, mean)
means[,j] <- as.vector(mn)
}
colnames(means) <- colnames(Y)
nms <- colnames(levs)
if (label.factors)
for (j in 1:ncol(levs)) levs[,j] <- paste(nms[j], levs[,j], sep="")
if (abbrev.levels) {
if(is.logical(abbrev.levels)) levs <- apply(levs, 2, abbreviate)
else levs <- apply(levs, 2, abbreviate, minlength=abbrev.levels)
}
levs <- apply(levs, 1, paste, collapse=":")
rownames(means) <- levs
means
}
|
library(tidyverse)
library(fantasypros)
df <-
read_csv('data-raw/2020-draft-results.csv') %>%
group_by(pos) %>%
mutate(
draft_as_rank = min_rank(desc(draft_amount)),
rank_diff = pos_rank - draft_as_rank
) %>% transmute(
draft_pick,
draft_team,
player,
team,
pos,
tier,
ranked_as = paste0(pos, pos_rank),
drafted_as = paste0(pos, draft_as_rank),
amount_paid = draft_amount,
suggested_bid = value,
draft_value = value - draft_amount,
percent_paid = round((draft_amount / value) * 100, 2)
)
df %>% mutate(
pos_group_num = (pos_rank %/% 12) + 1,
pos_group = paste0(pos, pos_group_num)
) %>%
group_by(
pos_group, pos_group_num
) %>%
summarise(
avg_cost = mean(value, na.rm = TRUE),
median_cost = median(value, na.rm = TRUE)
) %>%
ungroup() %>%
filter(
pos_group_num <= 4
) %>%
arrange(desc(avg_cost)) %>% View()
df %>%
ungroup() %>%
mutate(
flex_spot = if_else(pos %in% c("TE", "RB", "WR"), "FLEX", "NO-FLEX")
) %>%
filter(
flex_spot == "FLEX"
) %>%
mutate(
flex_rank = dense_rank(rank),
pos_group_num = (flex_rank %/% 12) + 1,
pos_group = paste0(flex_spot, pos_group_num)
) %>%
group_by(
pos_group, pos_group_num
) %>%
summarise(
avg_cost = mean(value, na.rm = TRUE),
median_cost = median(value, na.rm = TRUE)
) %>%
ungroup() %>%
# filter(
# pos_group_num <= 7
# ) %>%
arrange(desc(avg_cost)) %>% View()
| /R/01-draft-values.R | no_license | jpiburn/tsu | R | false | false | 1,453 | r | library(tidyverse)
library(fantasypros)
df <-
read_csv('data-raw/2020-draft-results.csv') %>%
group_by(pos) %>%
mutate(
draft_as_rank = min_rank(desc(draft_amount)),
rank_diff = pos_rank - draft_as_rank
) %>% transmute(
draft_pick,
draft_team,
player,
team,
pos,
tier,
ranked_as = paste0(pos, pos_rank),
drafted_as = paste0(pos, draft_as_rank),
amount_paid = draft_amount,
suggested_bid = value,
draft_value = value - draft_amount,
percent_paid = round((draft_amount / value) * 100, 2)
)
df %>% mutate(
pos_group_num = (pos_rank %/% 12) + 1,
pos_group = paste0(pos, pos_group_num)
) %>%
group_by(
pos_group, pos_group_num
) %>%
summarise(
avg_cost = mean(value, na.rm = TRUE),
median_cost = median(value, na.rm = TRUE)
) %>%
ungroup() %>%
filter(
pos_group_num <= 4
) %>%
arrange(desc(avg_cost)) %>% View()
df %>%
ungroup() %>%
mutate(
flex_spot = if_else(pos %in% c("TE", "RB", "WR"), "FLEX", "NO-FLEX")
) %>%
filter(
flex_spot == "FLEX"
) %>%
mutate(
flex_rank = dense_rank(rank),
pos_group_num = (flex_rank %/% 12) + 1,
pos_group = paste0(flex_spot, pos_group_num)
) %>%
group_by(
pos_group, pos_group_num
) %>%
summarise(
avg_cost = mean(value, na.rm = TRUE),
median_cost = median(value, na.rm = TRUE)
) %>%
ungroup() %>%
# filter(
# pos_group_num <= 7
# ) %>%
arrange(desc(avg_cost)) %>% View()
|
# Find similar players ------------------------------
#
# Using t-sne algorithm, find players that have similar characteristics to a given player.
# The objective is to predict his performance in a given year based on the historical performance
# of similar players (see: Nate Silver's CARMELO or PECOTA systems)
#
# Ex: If I want to predict Pau Gasol numbers for the season he will turn 36, I will start
# with his numbers in the previous seasons and I will adjust according to the average
# evolution of similar players when they turned 36.
#
# Ex: To be able to assign predicted characteristics to a rookie player, I will do a
# similar approach. See functions related to rookies and draft
#
.tSNE_prepareRookies <- function(){
# num_iter <- 300
# max_num_neighbors <- 20
# playerName <- "Stephen Curry"
rookieStats <- read.csv("data/rookieStats.csv", stringsAsFactors = FALSE)
rookieStats <- filter(rookieStats, !(College %in% c("International", "Europe")))
rookieStatsHist <- read.csv("data/rookieStatsHist.csv", stringsAsFactors = FALSE)
# transform rookieStatsHist stats to relative numbers
rookieStatsHist <- rookieStatsHist %>%
group_by(Player,Season) %>%
mutate(MP = MP/G, FG = FG/G,
FGA = FGA/G,X3P = X3P/G,X3PA = X3PA/G,
X2P = X2P/G,X2PA = X2PA/G,
FT = FT/G,FTA = FTA/G,
ORB = ORB/G,DRB = DRB/G,
TRB = TRB/G,AST = AST/G,
STL = STL/G,BLK = BLK/G,
TOV = TOV/G,PF = PF/G,
PTS = PTS/G)
# all together, ready for tsne
collegeHist <- bind_rows(rookieStats,rookieStatsHist)
data_tsne <- collegeHist %>%
group_by(Player,Season) %>%
mutate(effFG = FG,
effFGA = FGA,eff3PM = X3P,eff3PA = X3PA,
eff2PM = X2P,eff2PA = X2PA,
effFTM = FT,effFTA = FTA,
effORB = ORB,effDRB = DRB,
effTRB = TRB,effAST = AST,
effSTL = STL,effBLK = BLK,
effTOV = TOV,effPF = PF,
effPTS = PTS) %>%
dplyr::select(Player,Pos,Season,Pick,starts_with("eff"))
# t-sne doesn't like NAs. Impute by assigning the average of the variable.
# If NA means no shot attempted, ie,
# either the player didn't play enough time or is really bad at this particular type of shot.
data_tsne <- as.data.frame(data_tsne)
for (i in 4:ncol(data_tsne)){
data_tsne[is.na(data_tsne[,i]),i] <- mean(data_tsne[,i],na.rm=TRUE)
}
return(data_tsne)
}
.tSNE_computeRookies <- function(num_iter, max_num_neighbors){
data_tsne <- .tSNE_prepareRookies()
# calculate tsne-points Dimensionality reduction to 2-D
if (nrow(data_tsne)>0){
set.seed(456) # reproducitility
tsne_points <- tsne(data_tsne[,-c(1:3)],
max_iter=as.numeric(num_iter),
perplexity=as.numeric(max_num_neighbors),
epoch=num_iter)
} else {
tsne_points <- c()
}
return(tsne_points)
}
# compute colors for regions
.getColorsRookies <- function(num_iter, max_num_neighbors,colVar){
#colVar <- "Pos"
data_tsne <- .tSNE_prepareRookies()
if (colVar == "Season"){
colors <- rainbow(length(unique(data_tsne$Season)))
names(colors) <- unique(data_tsne$Season)
} else {
colors <- rainbow(length(unique(data_tsne$Pos)))
names(colors) <- unique(data_tsne$Pos)
}
return(colors)
}
# tsne chart ---------------------------------------------------------
.tSNE_plotRookies <- function(playerName, num_iter, max_num_neighbors, colVar){
#tsne_points <- .tSNE_compute(num_iter, max_num_neighbors, playerAge)
tsne_points <- read.csv("data/tsne_pointsRookies.csv",stringsAsFactors = FALSE)
if (length(tsne_points)>0){
par(mar=c(0,0,0,0))
plot(tsne_points,t='n', axes=FALSE, frame.plot = FALSE, xlab = "",ylab = "");
graphics::text(tsne_points,labels=as.character(data_tsne$Player), col=.getColorsRookies(num_iter, max_num_neighbors,colVar))
} else {
plot(c(1,1),type="n", frame.plot = FALSE, axes=FALSE, ann=FALSE)
graphics::text(1.5, 1,"Not enough data", col="red", cex=2)
}
}
# tsne dist ---------------------------------------------------------
.tSNE_distRookies <- function(playerName){
data_tsne <- .tSNE_prepareRookies()
lastDraft <- max(data_tsne$Season)
#tsne_points <- .tSNE_compute(num_iter, max_num_neighbors, playerAge)
tsne_points <- read.csv("data/tsne_pointsRookies.csv",stringsAsFactors = FALSE)
if (length(tsne_points)>0 & nrow(filter(data_tsne, Player == playerName))>0){
# calculate the euclidean distance between the selected player and the rest
dist_mat <- cbind(tsne_points,as.character(data_tsne$Player),data_tsne$Season)
if (filter(data_tsne, Player == playerName)$Season == lastDraft){
dist_mat <- dist_mat[!(data_tsne$Season==lastDraft & !(data_tsne$Player==playerName)),]
}
dist_mat <- as.data.frame(dist_mat, stringsAsFactors=FALSE)
dist_mat$V1 <- as.numeric(dist_mat$V1)
dist_mat$V2 <- as.numeric(dist_mat$V2)
distCou1 <- dist_mat[dist_mat[,3]==playerName,1]
distCou2 <- dist_mat[dist_mat[,3]==playerName,2]
dist_mat <- mutate(dist_mat, dist = sqrt((V1-distCou1)^2+(V2-distCou2)^2))
# order by closest distance to selected player
dist_mat <- arrange(dist_mat, dist)[,c(3,4)]
names(dist_mat) <- c("Player","Euclid. distance")
} else {
dist_mat <- data_frame()
}
return(dist_mat)
}
# return predicted stats rookie season for any drafted player from college
.predictPlayerCollegeRookie <- function(playerName){
data_tsne <- .tSNE_prepareRookies()
similarPlayers <- .tSNE_distRookies(playerName)
theirStats <- filter(data_tsne, Player %in% head(similarPlayers[-1,1],5))
rookieNBAStats <- playersHist %>%
group_by(Player) %>%
filter(Season == min(as.character(Season)))
rookieNBAStats <- as.data.frame(rookieNBAStats)
thisSelection <- filter(rookieNBAStats, Player %in% theirStats$Player)
thisSelectionPrep <- .tSNE_prepareSelected(thisSelection)
this_numRows <- nrow(thisSelectionPrep)
for (i in 4:ncol(thisSelectionPrep)){
thisSelectionPrep[this_numRows+1,i] <- mean(thisSelectionPrep[1:this_numRows,i])
}
thisPlayer <- filter(data_tsne, Player == playerName)
thisSelectionPrep$Player <- as.character(thisSelectionPrep$Player)
thisSelectionPrep$Pos <- as.character(thisSelectionPrep$Pos)
thisSelectionPrep$Season <- as.character(thisSelectionPrep$Season)
thisSelectionPrep$Player[nrow(thisSelectionPrep)] <- thisPlayer$Player
thisSelectionPrep$Pos[nrow(thisSelectionPrep)] <- thisPlayer$Pos
thisSelectionPrep$Season[nrow(thisSelectionPrep)] <- as.character(playersNew$Season[1])
playerPredicted <- filter(thisSelectionPrep, Player == playerName)
return(playerPredicted)
}
#.predictPlayerCollegeRookie("Damian Jones")
# Non college players predicted stats
.predictPlayerNonCollegeRookie <- function(playerName){
# Remove from playersHist those who played college and average out their stats by position.
collegePlayersHist <- read.csv("data/collegePlayersHist.csv",stringsAsFactors = FALSE)
collegePlayersHist <- collegePlayersHist %>%
group_by(Player) %>%
filter(Season == max(Season))
onlyCollegeRookies <- dplyr::select(collegePlayersHist,Player)
onlyCollegeRookies <- onlyCollegeRookies$Player
nonCollegeRookies <- playersHist %>%
filter(!(Player %in% onlyCollegeRookies)) %>%
group_by(Player) %>%
filter(Season == min(Season)) %>%
distinct(Player, .keep_all=TRUE)
# Calculate average stats for nonCollegeRookies on their first NBA season by position.
# This will provide players without much statistical background in NBA or College with
# some prior stats.
# Can't do the above so I will do overall priors with no filter
nonCollegeRookies_Stats <- nonCollegeRookies %>%
filter(Season >= "1994-1995") %>%
group_by() %>%
summarise_at(c(5:(ncol(nonCollegeRookies)-1)),funs(mean(.,na.rm=TRUE)))
# assign stats to input player and then adjust those stats like in .tsnePrepare
# get player's postition
rookieStats <- read.csv("data/rookieStats.csv", stringsAsFactors = FALSE)
rookieStats <- filter(rookieStats, College %in% c("International", "Europe"))
#rookieStats <- rookieStats[,1:29]
playerPredicted <- rookieStats %>%
filter(Player == playerName) %>%
dplyr::select(Player,Tm=Team,Pick) %>%
mutate(Season = paste0(lastDraft,"-",lastDraft+1))
playerPredicted <- bind_cols(playerPredicted,nonCollegeRookies_Stats)
playerPredicted <- playerPredicted %>%
mutate(MP = MP/G, FG = FG/G,
FGA = FGA/G,X3P = X3P/G,X3PA = X3PA/G,
X2P = X2P/G,X2PA = X2PA/G,
FT = FT/G,FTA = FTA/G,
ORB = ORB/G,DRB = DRB/G,
TRB = TRB/G,AST = AST/G,
STL = STL/G,BLK = BLK/G,
TOV = TOV/G,PF = PF/G,
PTS = PTS/G)
playerPredicted <- playerPredicted %>%
mutate(effFG = FG,effMin = MP/3936, # this will underestimate the minutes played
# but I don't mind as he's a rookie and will most likely play fewer minutes
effFGA = FGA,eff3PM = X3P,eff3PA = X3PA,
eff2PM = X2P,eff2PA = X2PA,
effFTM = FT,effFTA = FTA,
effORB = ORB,effDRB = DRB,
effTRB = TRB,effAST = AST,
effSTL = STL,effBLK = BLK,
effTOV = TOV,effPF = PF,
effPTS = PTS) %>%
dplyr::select(Player,Season,FGPer = FG.,FG3Per = X3P., FG2Per = X2P., effFGPer = eFG.,
FTPer = FT., starts_with("eff")) %>% mutate(Pos = "X")
return(playerPredicted)
}
| /helper_functions/similarityFunctionsRookies.R | no_license | asRodelgo/NBA | R | false | false | 9,600 | r | # Find similar players ------------------------------
#
# Using t-sne algorithm, find players that have similar characteristics to a given player.
# The objective is to predict his performance in a given year based on the historical performance
# of similar players (see: Nate Silver's CARMELO or PECOTA systems)
#
# Ex: If I want to predict Pau Gasol numbers for the season he will turn 36, I will start
# with his numbers in the previous seasons and I will adjust according to the average
# evolution of similar players when they turned 36.
#
# Ex: To be able to assign predicted characteristics to a rookie player, I will do a
# similar approach. See functions related to rookies and draft
#
.tSNE_prepareRookies <- function(){
# num_iter <- 300
# max_num_neighbors <- 20
# playerName <- "Stephen Curry"
rookieStats <- read.csv("data/rookieStats.csv", stringsAsFactors = FALSE)
rookieStats <- filter(rookieStats, !(College %in% c("International", "Europe")))
rookieStatsHist <- read.csv("data/rookieStatsHist.csv", stringsAsFactors = FALSE)
# transform rookieStatsHist stats to relative numbers
rookieStatsHist <- rookieStatsHist %>%
group_by(Player,Season) %>%
mutate(MP = MP/G, FG = FG/G,
FGA = FGA/G,X3P = X3P/G,X3PA = X3PA/G,
X2P = X2P/G,X2PA = X2PA/G,
FT = FT/G,FTA = FTA/G,
ORB = ORB/G,DRB = DRB/G,
TRB = TRB/G,AST = AST/G,
STL = STL/G,BLK = BLK/G,
TOV = TOV/G,PF = PF/G,
PTS = PTS/G)
# all together, ready for tsne
collegeHist <- bind_rows(rookieStats,rookieStatsHist)
data_tsne <- collegeHist %>%
group_by(Player,Season) %>%
mutate(effFG = FG,
effFGA = FGA,eff3PM = X3P,eff3PA = X3PA,
eff2PM = X2P,eff2PA = X2PA,
effFTM = FT,effFTA = FTA,
effORB = ORB,effDRB = DRB,
effTRB = TRB,effAST = AST,
effSTL = STL,effBLK = BLK,
effTOV = TOV,effPF = PF,
effPTS = PTS) %>%
dplyr::select(Player,Pos,Season,Pick,starts_with("eff"))
# t-sne doesn't like NAs. Impute by assigning the average of the variable.
# If NA means no shot attempted, ie,
# either the player didn't play enough time or is really bad at this particular type of shot.
data_tsne <- as.data.frame(data_tsne)
for (i in 4:ncol(data_tsne)){
data_tsne[is.na(data_tsne[,i]),i] <- mean(data_tsne[,i],na.rm=TRUE)
}
return(data_tsne)
}
.tSNE_computeRookies <- function(num_iter, max_num_neighbors){
data_tsne <- .tSNE_prepareRookies()
# calculate tsne-points Dimensionality reduction to 2-D
if (nrow(data_tsne)>0){
set.seed(456) # reproducitility
tsne_points <- tsne(data_tsne[,-c(1:3)],
max_iter=as.numeric(num_iter),
perplexity=as.numeric(max_num_neighbors),
epoch=num_iter)
} else {
tsne_points <- c()
}
return(tsne_points)
}
# compute colors for regions
.getColorsRookies <- function(num_iter, max_num_neighbors,colVar){
#colVar <- "Pos"
data_tsne <- .tSNE_prepareRookies()
if (colVar == "Season"){
colors <- rainbow(length(unique(data_tsne$Season)))
names(colors) <- unique(data_tsne$Season)
} else {
colors <- rainbow(length(unique(data_tsne$Pos)))
names(colors) <- unique(data_tsne$Pos)
}
return(colors)
}
# tsne chart ---------------------------------------------------------
.tSNE_plotRookies <- function(playerName, num_iter, max_num_neighbors, colVar){
#tsne_points <- .tSNE_compute(num_iter, max_num_neighbors, playerAge)
tsne_points <- read.csv("data/tsne_pointsRookies.csv",stringsAsFactors = FALSE)
if (length(tsne_points)>0){
par(mar=c(0,0,0,0))
plot(tsne_points,t='n', axes=FALSE, frame.plot = FALSE, xlab = "",ylab = "");
graphics::text(tsne_points,labels=as.character(data_tsne$Player), col=.getColorsRookies(num_iter, max_num_neighbors,colVar))
} else {
plot(c(1,1),type="n", frame.plot = FALSE, axes=FALSE, ann=FALSE)
graphics::text(1.5, 1,"Not enough data", col="red", cex=2)
}
}
# tsne dist ---------------------------------------------------------
.tSNE_distRookies <- function(playerName){
data_tsne <- .tSNE_prepareRookies()
lastDraft <- max(data_tsne$Season)
#tsne_points <- .tSNE_compute(num_iter, max_num_neighbors, playerAge)
tsne_points <- read.csv("data/tsne_pointsRookies.csv",stringsAsFactors = FALSE)
if (length(tsne_points)>0 & nrow(filter(data_tsne, Player == playerName))>0){
# calculate the euclidean distance between the selected player and the rest
dist_mat <- cbind(tsne_points,as.character(data_tsne$Player),data_tsne$Season)
if (filter(data_tsne, Player == playerName)$Season == lastDraft){
dist_mat <- dist_mat[!(data_tsne$Season==lastDraft & !(data_tsne$Player==playerName)),]
}
dist_mat <- as.data.frame(dist_mat, stringsAsFactors=FALSE)
dist_mat$V1 <- as.numeric(dist_mat$V1)
dist_mat$V2 <- as.numeric(dist_mat$V2)
distCou1 <- dist_mat[dist_mat[,3]==playerName,1]
distCou2 <- dist_mat[dist_mat[,3]==playerName,2]
dist_mat <- mutate(dist_mat, dist = sqrt((V1-distCou1)^2+(V2-distCou2)^2))
# order by closest distance to selected player
dist_mat <- arrange(dist_mat, dist)[,c(3,4)]
names(dist_mat) <- c("Player","Euclid. distance")
} else {
dist_mat <- data_frame()
}
return(dist_mat)
}
# return predicted stats rookie season for any drafted player from college
.predictPlayerCollegeRookie <- function(playerName){
data_tsne <- .tSNE_prepareRookies()
similarPlayers <- .tSNE_distRookies(playerName)
theirStats <- filter(data_tsne, Player %in% head(similarPlayers[-1,1],5))
rookieNBAStats <- playersHist %>%
group_by(Player) %>%
filter(Season == min(as.character(Season)))
rookieNBAStats <- as.data.frame(rookieNBAStats)
thisSelection <- filter(rookieNBAStats, Player %in% theirStats$Player)
thisSelectionPrep <- .tSNE_prepareSelected(thisSelection)
this_numRows <- nrow(thisSelectionPrep)
for (i in 4:ncol(thisSelectionPrep)){
thisSelectionPrep[this_numRows+1,i] <- mean(thisSelectionPrep[1:this_numRows,i])
}
thisPlayer <- filter(data_tsne, Player == playerName)
thisSelectionPrep$Player <- as.character(thisSelectionPrep$Player)
thisSelectionPrep$Pos <- as.character(thisSelectionPrep$Pos)
thisSelectionPrep$Season <- as.character(thisSelectionPrep$Season)
thisSelectionPrep$Player[nrow(thisSelectionPrep)] <- thisPlayer$Player
thisSelectionPrep$Pos[nrow(thisSelectionPrep)] <- thisPlayer$Pos
thisSelectionPrep$Season[nrow(thisSelectionPrep)] <- as.character(playersNew$Season[1])
playerPredicted <- filter(thisSelectionPrep, Player == playerName)
return(playerPredicted)
}
#.predictPlayerCollegeRookie("Damian Jones")
# Non college players predicted stats
.predictPlayerNonCollegeRookie <- function(playerName){
# Remove from playersHist those who played college and average out their stats by position.
collegePlayersHist <- read.csv("data/collegePlayersHist.csv",stringsAsFactors = FALSE)
collegePlayersHist <- collegePlayersHist %>%
group_by(Player) %>%
filter(Season == max(Season))
onlyCollegeRookies <- dplyr::select(collegePlayersHist,Player)
onlyCollegeRookies <- onlyCollegeRookies$Player
nonCollegeRookies <- playersHist %>%
filter(!(Player %in% onlyCollegeRookies)) %>%
group_by(Player) %>%
filter(Season == min(Season)) %>%
distinct(Player, .keep_all=TRUE)
# Calculate average stats for nonCollegeRookies on their first NBA season by position.
# This will provide players without much statistical background in NBA or College with
# some prior stats.
# Can't do the above so I will do overall priors with no filter
nonCollegeRookies_Stats <- nonCollegeRookies %>%
filter(Season >= "1994-1995") %>%
group_by() %>%
summarise_at(c(5:(ncol(nonCollegeRookies)-1)),funs(mean(.,na.rm=TRUE)))
# assign stats to input player and then adjust those stats like in .tsnePrepare
# get player's postition
rookieStats <- read.csv("data/rookieStats.csv", stringsAsFactors = FALSE)
rookieStats <- filter(rookieStats, College %in% c("International", "Europe"))
#rookieStats <- rookieStats[,1:29]
playerPredicted <- rookieStats %>%
filter(Player == playerName) %>%
dplyr::select(Player,Tm=Team,Pick) %>%
mutate(Season = paste0(lastDraft,"-",lastDraft+1))
playerPredicted <- bind_cols(playerPredicted,nonCollegeRookies_Stats)
playerPredicted <- playerPredicted %>%
mutate(MP = MP/G, FG = FG/G,
FGA = FGA/G,X3P = X3P/G,X3PA = X3PA/G,
X2P = X2P/G,X2PA = X2PA/G,
FT = FT/G,FTA = FTA/G,
ORB = ORB/G,DRB = DRB/G,
TRB = TRB/G,AST = AST/G,
STL = STL/G,BLK = BLK/G,
TOV = TOV/G,PF = PF/G,
PTS = PTS/G)
playerPredicted <- playerPredicted %>%
mutate(effFG = FG,effMin = MP/3936, # this will underestimate the minutes played
# but I don't mind as he's a rookie and will most likely play fewer minutes
effFGA = FGA,eff3PM = X3P,eff3PA = X3PA,
eff2PM = X2P,eff2PA = X2PA,
effFTM = FT,effFTA = FTA,
effORB = ORB,effDRB = DRB,
effTRB = TRB,effAST = AST,
effSTL = STL,effBLK = BLK,
effTOV = TOV,effPF = PF,
effPTS = PTS) %>%
dplyr::select(Player,Season,FGPer = FG.,FG3Per = X3P., FG2Per = X2P., effFGPer = eFG.,
FTPer = FT., starts_with("eff")) %>% mutate(Pos = "X")
return(playerPredicted)
}
|
rm(list = ls()) #Removes all objects from the current workspace (R memory)
mypath <- rprojroot::find_package_root_file
##------------------------------------
##LOADING PACKAGES, FUNCTIONS AND DATA
##------------------------------------
##PACKAGES##
library(mgcv)
library(stringi)
library(dplyr)
library(purrr)
##FUNCTIONS##
source("rfunctions/misc.R")
source("rfunctions/relative_contributions.R")
##DATA##
top_models <- lapply(c("gam_srtotal_reduce_model.rds", "gam_srinvertebrate_reduce_model.rds", "gam_LD_reduce_model.rds",
"gam_srvertebrate_reduce_model.rds", "gam_MFCL_reduce_model.rds", "gam_MTL_reduce_model.rds"), function(mods) {
readRDS(paste0("outputs/SI_IndividualMetricsAnalysis/", mods))
})
model_names = c("srtotal", "srinvertebrate", "LD",
"srvertebrate", "MFCL", "MTL")
##-----------------
##GAM SUMMARY TABLE
##-----------------
#Script based on https://doi.org/10.5281/zenodo.596810
model_tables <- map2(top_models, model_names, function(modd, model_name) { summ <- summary(modd)
summ$p.table
summ$s.table
rel_dev <- get_relative_contribs(modd)
bind_rows(data_frame(Term = stri_extract_first_regex(rownames(summ$p.table), "(?<=\\()[^\\)]+(?=\\))"),
Value = round(summ$p.table[,1], 3),
`Z statistic` = round(summ$p.table[,3], 3),
`Chi-sq statistic` = NA,
`P-value` = ifelse(summ$p.table[,4] > 0.001, as.character(round(summ$p.table[,4], digits=3)), "<0.001"),
`Effective Degrees of Freedom` = NA,
`Total Dev. Explained` = as.character(NA),
`Relative Dev. Explained` = as.character(NA),
model = model_name),
data_frame(Term = stri_extract_first_regex(rownames(summ$s.table), "(?<=s\\()[^\\)]+(?=\\))"),
Value = NA,
`Z statistic` = NA,
`Chi-sq statistic` = round(summ$s.table[,3], 3),
`P-value` = ifelse(summ$s.table[,4] > 0.001, as.character(round(summ$s.table[,4], digits=3)), "<0.001"),
`Effective Degrees of Freedom` = round(summ$s.table[,1], 3),
`Total Dev. Explained` = paste0(round(summary(modd)$dev.expl*100, 1), "%"),
`Relative Dev. Explained` = paste0(round(100*rel_dev$rel_deviance_explained, 1), "%"),
model = model_name))
})
model_rows <- map_int(model_tables, nrow)
model_tables2 <- model_tables %>%
map(~ rbind(.[1,], .)) %>%
map(function(x) {
x$model <- c(x$model[1], rep(NA, nrow(x) -1))
return(x)
}) %>%
bind_rows %>%
mutate_each(funs(as.character), -Term, -model) %>%
#arrange(model, Term !="Intercept") %>%
dplyr::select(9, 1:8)
names(model_tables2)[1] <- ""
| /rscripts/SI_GAM_analysis_individual_metrics_step2.R | permissive | CamilleLeclerc/FoodWebs-EnvironmentalVariables | R | false | false | 4,416 | r | rm(list = ls()) #Removes all objects from the current workspace (R memory)
mypath <- rprojroot::find_package_root_file
##------------------------------------
##LOADING PACKAGES, FUNCTIONS AND DATA
##------------------------------------
##PACKAGES##
library(mgcv)
library(stringi)
library(dplyr)
library(purrr)
##FUNCTIONS##
source("rfunctions/misc.R")
source("rfunctions/relative_contributions.R")
##DATA##
top_models <- lapply(c("gam_srtotal_reduce_model.rds", "gam_srinvertebrate_reduce_model.rds", "gam_LD_reduce_model.rds",
"gam_srvertebrate_reduce_model.rds", "gam_MFCL_reduce_model.rds", "gam_MTL_reduce_model.rds"), function(mods) {
readRDS(paste0("outputs/SI_IndividualMetricsAnalysis/", mods))
})
model_names = c("srtotal", "srinvertebrate", "LD",
"srvertebrate", "MFCL", "MTL")
##-----------------
##GAM SUMMARY TABLE
##-----------------
#Script based on https://doi.org/10.5281/zenodo.596810
model_tables <- map2(top_models, model_names, function(modd, model_name) { summ <- summary(modd)
summ$p.table
summ$s.table
rel_dev <- get_relative_contribs(modd)
bind_rows(data_frame(Term = stri_extract_first_regex(rownames(summ$p.table), "(?<=\\()[^\\)]+(?=\\))"),
Value = round(summ$p.table[,1], 3),
`Z statistic` = round(summ$p.table[,3], 3),
`Chi-sq statistic` = NA,
`P-value` = ifelse(summ$p.table[,4] > 0.001, as.character(round(summ$p.table[,4], digits=3)), "<0.001"),
`Effective Degrees of Freedom` = NA,
`Total Dev. Explained` = as.character(NA),
`Relative Dev. Explained` = as.character(NA),
model = model_name),
data_frame(Term = stri_extract_first_regex(rownames(summ$s.table), "(?<=s\\()[^\\)]+(?=\\))"),
Value = NA,
`Z statistic` = NA,
`Chi-sq statistic` = round(summ$s.table[,3], 3),
`P-value` = ifelse(summ$s.table[,4] > 0.001, as.character(round(summ$s.table[,4], digits=3)), "<0.001"),
`Effective Degrees of Freedom` = round(summ$s.table[,1], 3),
`Total Dev. Explained` = paste0(round(summary(modd)$dev.expl*100, 1), "%"),
`Relative Dev. Explained` = paste0(round(100*rel_dev$rel_deviance_explained, 1), "%"),
model = model_name))
})
model_rows <- map_int(model_tables, nrow)
model_tables2 <- model_tables %>%
map(~ rbind(.[1,], .)) %>%
map(function(x) {
x$model <- c(x$model[1], rep(NA, nrow(x) -1))
return(x)
}) %>%
bind_rows %>%
mutate_each(funs(as.character), -Term, -model) %>%
#arrange(model, Term !="Intercept") %>%
dplyr::select(9, 1:8)
names(model_tables2)[1] <- ""
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cum_downloads.R
\name{cum_downloads}
\alias{cum_downloads}
\title{Cummulative downloads plot}
\usage{
cum_downloads(x = NULL, top_n = 10)
}
\arguments{
\item{x}{File name, e.g., cran_downloads_2016-05-19.csv. Default is \code{NULL},
and if so, we look for file with most recent date in its file name in
\code{rappdirs::user_cache_dir("rostats")} + "/cran_downloads/"}
\item{top_n}{(numeric/integer) number of packages to plot data for,
starting from the most downloaded}
}
\description{
Cummulative downloads plot
}
\examples{
\dontrun{
cum_downloads()
}
}
| /man/cum_downloads.Rd | no_license | ropensci/rostats | R | false | true | 636 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cum_downloads.R
\name{cum_downloads}
\alias{cum_downloads}
\title{Cummulative downloads plot}
\usage{
cum_downloads(x = NULL, top_n = 10)
}
\arguments{
\item{x}{File name, e.g., cran_downloads_2016-05-19.csv. Default is \code{NULL},
and if so, we look for file with most recent date in its file name in
\code{rappdirs::user_cache_dir("rostats")} + "/cran_downloads/"}
\item{top_n}{(numeric/integer) number of packages to plot data for,
starting from the most downloaded}
}
\description{
Cummulative downloads plot
}
\examples{
\dontrun{
cum_downloads()
}
}
|
#### Load packages and settings ####
# Load require packages
library(shiny) # Framework for web apps
library(tidyverse) # Colletion of multiple plugins for various applications
library(lubridate) # Plugin to manipulate "date" data
library(readr) # Plugin to load CSV files
library(shinyWidgets) # Plugin for web application widgets
# Settings: Increase maximum file upload size to 30 megabytes
options(shiny.maxRequestSize = 250*1024^2)
#### UI Function ####
# Define UI for application. Anything in this section dictates what the
# application will display
ui <- fluidPage(
# Application title
titlePanel("Input settings"),
# Sidebar (settings)
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
# inputID allows you to link the input/output with computation part
fileInput(inputId = "Monthly_Data",
# Label of the input widget
label = "Step 1: Import CSV file with monthly transactions",
# Doesn't allow multiple files uploaded at once
multiple = FALSE,
# Describes what file types are accepted (only CSVs atm)
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Select box: choose the column with ID column for monthly data
selectizeInput(inputId = "Monthly_ID_col",
label = "Step 2: Select the column with the unique customer ID for monthly transactions (email, name)",
choices = ""),
# Select box: choose the column with the mothly trnascation date
selectizeInput(inputId = "Monthly_Date_col",
label = "Step 3: Select the column with the transaction date for monthly transactions",
choices = ""),
# Select box: choose the column with transaction amount
selectizeInput(inputId = "Monthly_Amount_col",
label = "Step 4: Select the column with the amount for monthly transactions",
choices = ""),
# Input: Select a file ----
# inputID allows you to link the input/output with computation part
fileInput(inputId = "Annual_Data",
# Label of the input widget
label = "Step 5: Import CSV file with annual transactions (accrual)",
# Doesn't allow multiple files uploaded at once
multiple = FALSE,
# Describes what file types are accepted (only CSVs atm)
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Select box: choose the column with unique customer ID for annual data
selectizeInput(inputId = "Annual_ID_col",
label = "Step 6: Select the column with the unique customer ID for annual transactions (email, name)",
choices = ""),
# Select box: choose the column with the accrual transaction date for annual data
selectizeInput(inputId = "Annual_Date_col",
label = "Step 7: Select the column with the accrual date for annual transactions",
choices = ""),
# Select box: choose the column with the accrual transaction amount for annual data
selectizeInput(inputId = "Annual_Amount_col",
label = "Step 8: Select the column with the accrual amount for annual transactions",
choices = ""),
# Select box: choose the column to be filtered
selectizeInput(inputId = "Filter_Col_1",
label = "Step 9: Select the first column to filter",
choices = ""),
# Select box: choose the filters to be applied on the column selected above
selectizeInput(inputId = "Filter_Var_1",
label = "Step 10: Select what to be included in the data",
choices = "",
multiple = TRUE),
# Select box: choose the second column to be filtered
selectizeInput(inputId = "Filter_Col_2",
label = "Step 11: Select the second column to filter",
choices = ""),
# Select box: choose the filters to be applied on the column selected above
selectizeInput(inputId = "Filter_Var_2",
label = "Step 12: Select what to be included in the data",
choices = "",
multiple = TRUE),
# Data input box: Allows the user to select a data that the metrics table will begin from
dateInput(inputId = "Start_date",
label = "Step 13: Select the start date for the metrics table",
# default date
value = "2019-01-01"),
# Number input: allows the user to input the number of months that the metrics table covers
numericInput(inputId = "Duration",
label = "Step 14: Select the number of months until the end date of metrics table",
# default to 24 months
value = 24),
# Button: Download the transformed data
strong("Step 15: Download the Metrics table"),
downloadButton(outputId ="downloadData",
label = "Download"),
),
# Main panel display (right-hand section)
mainPanel(
# Output: Tabs with tables showing data before and after ----
tabsetPanel(type = "tabs",
# Create the "Monthly Data" tab and puts the table in it
# Shows the imported data in their raw format
tabPanel("Monthly Data", dataTableOutput("Monthly_Table")),
# Create the "Annual Data" tab and puts the table in it
# Shows the imported data in their raw format
tabPanel("Annual Data", dataTableOutput("Annual_Table")),
# Create the "Metrics Table" tab and puts the table in it
# Shows the data after being transformed
tabPanel("Metrics Table", dataTableOutput("Metrics_Table")))
)
)
)
#### Server Function ####
# Define server logic to display and download selected file ----
server <- function(input, output, session) {
End_date <- reactive({
# Makes this a required input
req(input$Start_date)
# Calculates the end date of the metrics table based on the start data
# selected by the user and the number of months
End_date <- ymd(input$Start_date) + months(input$Duration)
})
# Names the uploaded files as "Monthly_Data"
Monthly_Data <- reactive({
# Makes this a required input
req(input$Monthly_Data)
# Reads the CSV file uploaded before
df <- read_csv(input$Monthly_Data$datapath,
col_names = TRUE)
})
# Reactive data table for Monthly data (first tab)
output$Monthly_Table <- renderDataTable({
req(input$Monthly_Data)
return(Monthly_Data())
})
# Creates the dynamic selection options for the drop-down select box in Step 2
# Once a file is uploaded, this function uses all the column names in the file
# as possible selections.
# THe UI (what the user sees) and the Server (how it's computed) are linked
# based on the inputID.
observe({
updateSelectizeInput(inputId = "Monthly_ID_col",
choices = colnames(Monthly_Data()),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down select box in Step 3
observe({
updateSelectizeInput(inputId = "Monthly_Date_col",
choices = colnames(Monthly_Data()),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down select box in Step 4
observe({
updateSelectizeInput(inputId = "Monthly_Amount_col",
choices = colnames(Monthly_Data()),
server = TRUE)
})
# Names the uploaded files as "Annual_Data"
Annual_Data <- reactive({
# Makes this a required input
req(input$Annual_Data)
# Reads the CSV file uploaded before
df <- read_csv(input$Annual_Data$datapath,
col_names = TRUE)
})
# Reactive data table for Annual data (second tab)
output$Annual_Table <- renderDataTable({
req(input$Annual_Data)
return(Annual_Data())
})
# Creates the dynamic selection options for the drop-down select box in Step 6
observe({
updateSelectizeInput(inputId = "Annual_ID_col",
choices = colnames(Annual_Data()),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down select box in Step 7
observe({
updateSelectizeInput(inputId = "Annual_Date_col",
choices = colnames(Annual_Data()),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down select box in Step 8
observe({
updateSelectizeInput(inputId = "Annual_Amount_col",
choices = colnames(Annual_Data()),
server = TRUE)
})
# Creates a new dataset called "Clean_Monthly_Data"
Clean_Monthly_Data <- reactive({
# Uses "Monthly_Data" to create new dataset
Clean_Monthly_Data <- Monthly_Data() %>%
# Creates a new column called "Accrual_Amount" based on the
# selection of the user in Step 4
mutate(Accrual_Amount = parse_number(as.character(.data[[input$Monthly_Amount_col]])),
# Creates a new column called "Date_Accrual" based on the
# selection of the user in Step 3
Date_Accrual = dmy(.data[[input$Monthly_Date_col]]),
# Creates a new column called "Customer_ID" based on the
# selection of the user in Step 2
Customer_ID = as.character(.data[[input$Monthly_ID_col]]),
# Creates a new column called "Filter_1" based on the
# selection of the user in Step 9
Filter_1 = .data[[input$Filter_Col_1]],
# Creates a new column called "Filter_2" based on the
# selection of the user in Step 11
Filter_2 = .data[[input$Filter_Col_2]]) %>%
select(Accrual_Amount, Date_Accrual, Customer_ID, Filter_1, Filter_2)
})
# Same as above but for Annual Data
Clean_Annual_Data <- reactive({
Clean_Annual_Data <- Annual_Data() %>%
mutate(Accrual_Amount = parse_number(as.character(.data[[input$Annual_Amount_col]])),
Date_Accrual = .data[[input$Annual_Date_col]],
Customer_ID = as.character(.data[[input$Annual_ID_col]]),
Filter_1 = .data[[input$Filter_Col_1]],
Filter_2 = .data[[input$Filter_Col_2]]) %>%
select(Accrual_Amount, Date_Accrual, Customer_ID, Filter_1, Filter_2)
})
# Creates the dynamic selection options for the drop-down
# select box in Step 9. Includes all column names from both data sets.
observe({
updateSelectizeInput(inputId = "Filter_Col_1",
choices = flatten_chr(list(colnames(Annual_Data()), colnames(Monthly_Data()))),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down
# select box in Step 10 based on the selection in Step 9.
# Includes all the unique variables from the column selected above
observe({
updateSelectizeInput(inputId = "Filter_Var_1",
choices = flatten_chr(list(unique(Annual_Data()[[input$Filter_Col_1]]),unique(Monthly_Data()[[input$Filter_Col_1]]))),
server = TRUE)
})
observe({
updateSelectizeInput(inputId = "Filter_Col_2",
choices = flatten_chr(list(colnames(Annual_Data()), colnames(Monthly_Data()))),
server = TRUE
)
})
observe({
updateSelectizeInput(inputId = "Filter_Var_2",
choices = flatten_chr(list(unique(Annual_Data()[[input$Filter_Col_2]]),unique(Monthly_Data()[[input$Filter_Col_2]]))),
server = TRUE
)
})
# Combines annual and monthly data and then applies filters
Select_Data <- reactive({
Merged_Data <- Clean_Annual_Data() %>%
bind_rows(Clean_Monthly_Data()) %>%
filter(Filter_1 %in% input$Filter_Var_1,
Filter_2 %in% input$Filter_Var_2)
})
Metrics_Data <- reactive({
MRR <- Select_Data() %>%
mutate(Year = year(Date_Accrual),
Month = month(Date_Accrual)) %>%
group_by(Customer_ID, Year, Month) %>%
summarise(MRR = sum(Accrual_Amount))
Unique_Names <- Select_Data() %>%
summarise(Customer_ID = unique(Customer_ID)) %>%
mutate(Date_Accrual = ymd(End_date()))
Accrual_month <- rep(0:(input$Duration - 1), nrow(Unique_Names))
# Duplicate the data based on input$Period (frequency of transactions)
Expanded_Data <- Unique_Names %>%
slice(rep(1:n(), each = input$Duration)) %>%
# Divide the payment amount by the frequency of transactions
cbind(Accrual_month) %>%
# Create new variable "Accrual Date" by spreading costs over the next x months
mutate(Date_Accrual = Date_Accrual - (Accrual_month*months(1)),
Year = year(Date_Accrual),
Month = month(Date_Accrual)) %>%
select(!c(Accrual_month, Date_Accrual))
Joint_Data <- Expanded_Data %>%
left_join(MRR,
by = c("Customer_ID","Year", "Month")) %>%
mutate(MRR = if_else(is.na(MRR), 0, MRR)) %>%
group_by(Customer_ID) %>%
mutate(lag_MRR = lead(MRR),
lag_MRR = if_else(is.na(lag_MRR), 0, lag_MRR),
Churn_Amount = if_else(MRR == 0, lag_MRR - MRR, 0),
Upgrade_Amount = if_else(lag_MRR == 0, 0,if_else(MRR > lag_MRR, MRR - lag_MRR, 0)),
Downgrade_Amount = if_else(MRR == 0, 0,if_else(MRR < lag_MRR, lag_MRR - MRR, 0)),
Count = if_else(MRR > 0, 1, 0),
Churn_Customers = if_else(Count == 0, lead(Count) - Count, 0)) %>%
ungroup() %>%
group_by(Year, Month) %>%
summarise(MRR = sum(MRR),
Churn_MRR = sum(Churn_Amount),
MRR_Upgrade = sum(Upgrade_Amount),
MRR_Downgrade = sum(Downgrade_Amount),
Total_Customers = sum(Count),
Churn_Customers = sum(Churn_Customers)) %>%
ungroup() %>%
mutate(ARR = MRR*12,
Total_Churn = Churn_MRR + MRR_Downgrade - MRR_Upgrade,
MRR_Churn_Rate = Total_Churn / lag(MRR),
Customer_Churn_Rate = Churn_Customers / lag(Total_Customers),
ARPU = MRR / Total_Customers,
CLV = ARPU / Customer_Churn_Rate,
ARPU = if_else(is.na(ARPU), 0, ARPU),
CLV = if_else(is.na(CLV), 0, if_else(is.infinite(CLV), MRR, CLV)),
Customer_Churn_Rate = if_else(is.na(Customer_Churn_Rate), 0, Customer_Churn_Rate),
MRR_Churn_Rate = if_else(is.na(MRR_Churn_Rate), 0, MRR_Churn_Rate))
})
# Reactive data table for exported data
output$Metrics_Table <- renderDataTable({
return(Metrics_Data())
})
# Download csv of selected dataset ----
output$downloadData <- downloadHandler(
filename = "Metrics.csv",
content = function(file) {
write.csv(Metrics_Data(), file, row.names = FALSE)
}
)
}
#### Run the application ####
shinyApp(ui = ui, server = server) | /Manual_Metrics/app.R | no_license | FrixosL/FEI_Manual_Metrics | R | false | false | 17,364 | r | #### Load packages and settings ####
# Load require packages
library(shiny) # Framework for web apps
library(tidyverse) # Colletion of multiple plugins for various applications
library(lubridate) # Plugin to manipulate "date" data
library(readr) # Plugin to load CSV files
library(shinyWidgets) # Plugin for web application widgets
# Settings: Increase maximum file upload size to 30 megabytes
options(shiny.maxRequestSize = 250*1024^2)
#### UI Function ####
# Define UI for application. Anything in this section dictates what the
# application will display
ui <- fluidPage(
# Application title
titlePanel("Input settings"),
# Sidebar (settings)
sidebarLayout(
sidebarPanel(
# Input: Select a file ----
# inputID allows you to link the input/output with computation part
fileInput(inputId = "Monthly_Data",
# Label of the input widget
label = "Step 1: Import CSV file with monthly transactions",
# Doesn't allow multiple files uploaded at once
multiple = FALSE,
# Describes what file types are accepted (only CSVs atm)
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Select box: choose the column with ID column for monthly data
selectizeInput(inputId = "Monthly_ID_col",
label = "Step 2: Select the column with the unique customer ID for monthly transactions (email, name)",
choices = ""),
# Select box: choose the column with the mothly trnascation date
selectizeInput(inputId = "Monthly_Date_col",
label = "Step 3: Select the column with the transaction date for monthly transactions",
choices = ""),
# Select box: choose the column with transaction amount
selectizeInput(inputId = "Monthly_Amount_col",
label = "Step 4: Select the column with the amount for monthly transactions",
choices = ""),
# Input: Select a file ----
# inputID allows you to link the input/output with computation part
fileInput(inputId = "Annual_Data",
# Label of the input widget
label = "Step 5: Import CSV file with annual transactions (accrual)",
# Doesn't allow multiple files uploaded at once
multiple = FALSE,
# Describes what file types are accepted (only CSVs atm)
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Select box: choose the column with unique customer ID for annual data
selectizeInput(inputId = "Annual_ID_col",
label = "Step 6: Select the column with the unique customer ID for annual transactions (email, name)",
choices = ""),
# Select box: choose the column with the accrual transaction date for annual data
selectizeInput(inputId = "Annual_Date_col",
label = "Step 7: Select the column with the accrual date for annual transactions",
choices = ""),
# Select box: choose the column with the accrual transaction amount for annual data
selectizeInput(inputId = "Annual_Amount_col",
label = "Step 8: Select the column with the accrual amount for annual transactions",
choices = ""),
# Select box: choose the column to be filtered
selectizeInput(inputId = "Filter_Col_1",
label = "Step 9: Select the first column to filter",
choices = ""),
# Select box: choose the filters to be applied on the column selected above
selectizeInput(inputId = "Filter_Var_1",
label = "Step 10: Select what to be included in the data",
choices = "",
multiple = TRUE),
# Select box: choose the second column to be filtered
selectizeInput(inputId = "Filter_Col_2",
label = "Step 11: Select the second column to filter",
choices = ""),
# Select box: choose the filters to be applied on the column selected above
selectizeInput(inputId = "Filter_Var_2",
label = "Step 12: Select what to be included in the data",
choices = "",
multiple = TRUE),
# Data input box: Allows the user to select a data that the metrics table will begin from
dateInput(inputId = "Start_date",
label = "Step 13: Select the start date for the metrics table",
# default date
value = "2019-01-01"),
# Number input: allows the user to input the number of months that the metrics table covers
numericInput(inputId = "Duration",
label = "Step 14: Select the number of months until the end date of metrics table",
# default to 24 months
value = 24),
# Button: Download the transformed data
strong("Step 15: Download the Metrics table"),
downloadButton(outputId ="downloadData",
label = "Download"),
),
# Main panel display (right-hand section)
mainPanel(
# Output: Tabs with tables showing data before and after ----
tabsetPanel(type = "tabs",
# Create the "Monthly Data" tab and puts the table in it
# Shows the imported data in their raw format
tabPanel("Monthly Data", dataTableOutput("Monthly_Table")),
# Create the "Annual Data" tab and puts the table in it
# Shows the imported data in their raw format
tabPanel("Annual Data", dataTableOutput("Annual_Table")),
# Create the "Metrics Table" tab and puts the table in it
# Shows the data after being transformed
tabPanel("Metrics Table", dataTableOutput("Metrics_Table")))
)
)
)
#### Server Function ####
# Define server logic to display and download selected file ----
server <- function(input, output, session) {
End_date <- reactive({
# Makes this a required input
req(input$Start_date)
# Calculates the end date of the metrics table based on the start data
# selected by the user and the number of months
End_date <- ymd(input$Start_date) + months(input$Duration)
})
# Names the uploaded files as "Monthly_Data"
Monthly_Data <- reactive({
# Makes this a required input
req(input$Monthly_Data)
# Reads the CSV file uploaded before
df <- read_csv(input$Monthly_Data$datapath,
col_names = TRUE)
})
# Reactive data table for Monthly data (first tab)
output$Monthly_Table <- renderDataTable({
req(input$Monthly_Data)
return(Monthly_Data())
})
# Creates the dynamic selection options for the drop-down select box in Step 2
# Once a file is uploaded, this function uses all the column names in the file
# as possible selections.
# THe UI (what the user sees) and the Server (how it's computed) are linked
# based on the inputID.
observe({
updateSelectizeInput(inputId = "Monthly_ID_col",
choices = colnames(Monthly_Data()),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down select box in Step 3
observe({
updateSelectizeInput(inputId = "Monthly_Date_col",
choices = colnames(Monthly_Data()),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down select box in Step 4
observe({
updateSelectizeInput(inputId = "Monthly_Amount_col",
choices = colnames(Monthly_Data()),
server = TRUE)
})
# Names the uploaded files as "Annual_Data"
Annual_Data <- reactive({
# Makes this a required input
req(input$Annual_Data)
# Reads the CSV file uploaded before
df <- read_csv(input$Annual_Data$datapath,
col_names = TRUE)
})
# Reactive data table for Annual data (second tab)
output$Annual_Table <- renderDataTable({
req(input$Annual_Data)
return(Annual_Data())
})
# Creates the dynamic selection options for the drop-down select box in Step 6
observe({
updateSelectizeInput(inputId = "Annual_ID_col",
choices = colnames(Annual_Data()),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down select box in Step 7
observe({
updateSelectizeInput(inputId = "Annual_Date_col",
choices = colnames(Annual_Data()),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down select box in Step 8
observe({
updateSelectizeInput(inputId = "Annual_Amount_col",
choices = colnames(Annual_Data()),
server = TRUE)
})
# Creates a new dataset called "Clean_Monthly_Data"
Clean_Monthly_Data <- reactive({
# Uses "Monthly_Data" to create new dataset
Clean_Monthly_Data <- Monthly_Data() %>%
# Creates a new column called "Accrual_Amount" based on the
# selection of the user in Step 4
mutate(Accrual_Amount = parse_number(as.character(.data[[input$Monthly_Amount_col]])),
# Creates a new column called "Date_Accrual" based on the
# selection of the user in Step 3
Date_Accrual = dmy(.data[[input$Monthly_Date_col]]),
# Creates a new column called "Customer_ID" based on the
# selection of the user in Step 2
Customer_ID = as.character(.data[[input$Monthly_ID_col]]),
# Creates a new column called "Filter_1" based on the
# selection of the user in Step 9
Filter_1 = .data[[input$Filter_Col_1]],
# Creates a new column called "Filter_2" based on the
# selection of the user in Step 11
Filter_2 = .data[[input$Filter_Col_2]]) %>%
select(Accrual_Amount, Date_Accrual, Customer_ID, Filter_1, Filter_2)
})
# Same as above but for Annual Data
Clean_Annual_Data <- reactive({
Clean_Annual_Data <- Annual_Data() %>%
mutate(Accrual_Amount = parse_number(as.character(.data[[input$Annual_Amount_col]])),
Date_Accrual = .data[[input$Annual_Date_col]],
Customer_ID = as.character(.data[[input$Annual_ID_col]]),
Filter_1 = .data[[input$Filter_Col_1]],
Filter_2 = .data[[input$Filter_Col_2]]) %>%
select(Accrual_Amount, Date_Accrual, Customer_ID, Filter_1, Filter_2)
})
# Creates the dynamic selection options for the drop-down
# select box in Step 9. Includes all column names from both data sets.
observe({
updateSelectizeInput(inputId = "Filter_Col_1",
choices = flatten_chr(list(colnames(Annual_Data()), colnames(Monthly_Data()))),
server = TRUE)
})
# Creates the dynamic selection options for the drop-down
# select box in Step 10 based on the selection in Step 9.
# Includes all the unique variables from the column selected above
observe({
updateSelectizeInput(inputId = "Filter_Var_1",
choices = flatten_chr(list(unique(Annual_Data()[[input$Filter_Col_1]]),unique(Monthly_Data()[[input$Filter_Col_1]]))),
server = TRUE)
})
observe({
updateSelectizeInput(inputId = "Filter_Col_2",
choices = flatten_chr(list(colnames(Annual_Data()), colnames(Monthly_Data()))),
server = TRUE
)
})
observe({
updateSelectizeInput(inputId = "Filter_Var_2",
choices = flatten_chr(list(unique(Annual_Data()[[input$Filter_Col_2]]),unique(Monthly_Data()[[input$Filter_Col_2]]))),
server = TRUE
)
})
# Combines annual and monthly data and then applies filters
Select_Data <- reactive({
Merged_Data <- Clean_Annual_Data() %>%
bind_rows(Clean_Monthly_Data()) %>%
filter(Filter_1 %in% input$Filter_Var_1,
Filter_2 %in% input$Filter_Var_2)
})
Metrics_Data <- reactive({
MRR <- Select_Data() %>%
mutate(Year = year(Date_Accrual),
Month = month(Date_Accrual)) %>%
group_by(Customer_ID, Year, Month) %>%
summarise(MRR = sum(Accrual_Amount))
Unique_Names <- Select_Data() %>%
summarise(Customer_ID = unique(Customer_ID)) %>%
mutate(Date_Accrual = ymd(End_date()))
Accrual_month <- rep(0:(input$Duration - 1), nrow(Unique_Names))
# Duplicate the data based on input$Period (frequency of transactions)
Expanded_Data <- Unique_Names %>%
slice(rep(1:n(), each = input$Duration)) %>%
# Divide the payment amount by the frequency of transactions
cbind(Accrual_month) %>%
# Create new variable "Accrual Date" by spreading costs over the next x months
mutate(Date_Accrual = Date_Accrual - (Accrual_month*months(1)),
Year = year(Date_Accrual),
Month = month(Date_Accrual)) %>%
select(!c(Accrual_month, Date_Accrual))
Joint_Data <- Expanded_Data %>%
left_join(MRR,
by = c("Customer_ID","Year", "Month")) %>%
mutate(MRR = if_else(is.na(MRR), 0, MRR)) %>%
group_by(Customer_ID) %>%
mutate(lag_MRR = lead(MRR),
lag_MRR = if_else(is.na(lag_MRR), 0, lag_MRR),
Churn_Amount = if_else(MRR == 0, lag_MRR - MRR, 0),
Upgrade_Amount = if_else(lag_MRR == 0, 0,if_else(MRR > lag_MRR, MRR - lag_MRR, 0)),
Downgrade_Amount = if_else(MRR == 0, 0,if_else(MRR < lag_MRR, lag_MRR - MRR, 0)),
Count = if_else(MRR > 0, 1, 0),
Churn_Customers = if_else(Count == 0, lead(Count) - Count, 0)) %>%
ungroup() %>%
group_by(Year, Month) %>%
summarise(MRR = sum(MRR),
Churn_MRR = sum(Churn_Amount),
MRR_Upgrade = sum(Upgrade_Amount),
MRR_Downgrade = sum(Downgrade_Amount),
Total_Customers = sum(Count),
Churn_Customers = sum(Churn_Customers)) %>%
ungroup() %>%
mutate(ARR = MRR*12,
Total_Churn = Churn_MRR + MRR_Downgrade - MRR_Upgrade,
MRR_Churn_Rate = Total_Churn / lag(MRR),
Customer_Churn_Rate = Churn_Customers / lag(Total_Customers),
ARPU = MRR / Total_Customers,
CLV = ARPU / Customer_Churn_Rate,
ARPU = if_else(is.na(ARPU), 0, ARPU),
CLV = if_else(is.na(CLV), 0, if_else(is.infinite(CLV), MRR, CLV)),
Customer_Churn_Rate = if_else(is.na(Customer_Churn_Rate), 0, Customer_Churn_Rate),
MRR_Churn_Rate = if_else(is.na(MRR_Churn_Rate), 0, MRR_Churn_Rate))
})
# Reactive data table for exported data
output$Metrics_Table <- renderDataTable({
return(Metrics_Data())
})
# Download csv of selected dataset ----
output$downloadData <- downloadHandler(
filename = "Metrics.csv",
content = function(file) {
write.csv(Metrics_Data(), file, row.names = FALSE)
}
)
}
#### Run the application ####
shinyApp(ui = ui, server = server) |
# DO THI
install.packages('igraph')
library(igraph)
# Vo Huong
help(graph)
vo_huong <- make_graph( ~ A-B-C-D-A, E-A:B:C:D, directed = FALSE)
plot(vo_huong)
#Co huong
help(make_directed_graph)
a = make_graph(c(1, 2, 2, 3, 3, 4, 5, 6, 4,1, 2,4, 4,5, 6,1), directed = TRUE)
plot(a) | /CTDLGT_R/BT12_BT13_DoThiCoHuong_DoThiVoHuong.R | no_license | thanhlamnguyen/CTDL_GT_DoAN | R | false | false | 298 | r | # DO THI
install.packages('igraph')
library(igraph)
# Vo Huong
help(graph)
vo_huong <- make_graph( ~ A-B-C-D-A, E-A:B:C:D, directed = FALSE)
plot(vo_huong)
#Co huong
help(make_directed_graph)
a = make_graph(c(1, 2, 2, 3, 3, 4, 5, 6, 4,1, 2,4, 4,5, 6,1), directed = TRUE)
plot(a) |
\name{ustemp}
\alias{ustemp}
\non_function{}
\title{U.S. temperature data}
\usage{data(ustemp)}
\description{
The \code{ustemp} data frame has 56 observations on the temperature
and location of 56 U.S. cities.
}
\format{
This data frame contains the following columns:
\describe{
\item{city}{character string giving name of city and state
(two-letter abbreviation).}
\item{min.temp}{average minimum January temperature.}
\item{latitude}{degrees latitude (north of Equator).}
\item{longitude}{degrees longitude (west of Greenwich).}
}
}
\source{
Peixoto, J.L. (1990). A property of well-formulated polynomial
regression models. \emph{American Statistician}, \bold{44},
26-30.
}
\references{
Ruppert, D., Wand, M.P. and Carroll, R.J. (2003)\cr
\emph{Semiparametric Regression} Cambridge University Press.\cr
\url{http://stat.tamu.edu/~carroll/semiregbook/}
}
\examples{
library(SemiPar)
data(ustemp)
attach(ustemp)
grey.levs <- min.temp+20
col.vec <- paste("grey",as.character(grey.levs),sep="")
plot(-longitude,latitude,col=col.vec,pch=16,cex=3,xlim=c(-130,-60))
text(-longitude,latitude,as.character(city))
}
\keyword{datasets}
| /man/ustemp.Rd | no_license | cran/SemiPar | R | false | false | 1,180 | rd | \name{ustemp}
\alias{ustemp}
\non_function{}
\title{U.S. temperature data}
\usage{data(ustemp)}
\description{
The \code{ustemp} data frame has 56 observations on the temperature
and location of 56 U.S. cities.
}
\format{
This data frame contains the following columns:
\describe{
\item{city}{character string giving name of city and state
(two-letter abbreviation).}
\item{min.temp}{average minimum January temperature.}
\item{latitude}{degrees latitude (north of Equator).}
\item{longitude}{degrees longitude (west of Greenwich).}
}
}
\source{
Peixoto, J.L. (1990). A property of well-formulated polynomial
regression models. \emph{American Statistician}, \bold{44},
26-30.
}
\references{
Ruppert, D., Wand, M.P. and Carroll, R.J. (2003)\cr
\emph{Semiparametric Regression} Cambridge University Press.\cr
\url{http://stat.tamu.edu/~carroll/semiregbook/}
}
\examples{
library(SemiPar)
data(ustemp)
attach(ustemp)
grey.levs <- min.temp+20
col.vec <- paste("grey",as.character(grey.levs),sep="")
plot(-longitude,latitude,col=col.vec,pch=16,cex=3,xlim=c(-130,-60))
text(-longitude,latitude,as.character(city))
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
update.packages(ask = FALSE, checkBuilt = TRUE, Ncpus = parallel::detectCores())
## args <- commandArgs(trailingOnly = TRUE)[1]
args <- "test"
if (length(args) > 0 && args == "test") {
if (!requireNamespace("callr", quietly = TRUE)) {
install.packages("callr")
}
# Compiled packages on which many others depend
problem_pkgs <- c("quantreg", "SparseM", "igraph", "stringi",
"Rcpp", "RcppEigen", "RcppArmadillo",
"TMB", "TTR", "robustbase", "timereg",
"rgdal", "minpack.lm")
all_pkgs <- rownames(installed.packages())
exclude_pkgs <- c("tcltk")
pkgs <- setdiff(c(problem_pkgs, setdiff(all_pkgs, problem_pkgs)),
exclude_pkgs)
writeLines(pkgs, "~/.R/pkgs_list")
pb <- txtProgressBar(min = 1, max = length(pkgs), style = 3)
i <- 0
## libpaths <- sprintf("(%s)", paste(.libPaths(), collapse = "|"))
## libpaths <- gsub("\\.", "\\\\.", libpaths)
for (p in pkgs) {
i <- i + 1
setTxtProgressBar(pb, i)
err <- callr::r(function(p) tryCatch(
library(p, character.only = TRUE, quietly = TRUE),
error = function(e) conditionMessage(e)
), list(p = p))
if (!(p %in% err)) {
message("Hit the following error loading package ", p, ":\n", err)
if (grepl("no package called", err)) {
# Continue to re-installing package
invisible(TRUE)
} else if (grepl("unable to load shared object", err)) {
bad_so_full <- regexec(
## paste0(".*? load shared object '",
## libpaths,
## "([[:alnum:]]+)/.*\\.so'.*"),
".*? load shared object '/Users/shik544/R/([[:alnum:].]+)/.*\\.so'.*",
err,
perl = TRUE
)
bad_so <- regmatches(err, bad_so_full)[[1]][[2]]
message("Problem with dependent library ", bad_so,
". Trying to reinstall.")
install.packages(bad_so)
}
if (bad_so != p) {
message("Reinstalling package ", p)
install.packages(p)
}
}
}
close(pb)
}
| /utils-r/.local/bin/rupdate2 | no_license | ashiklom/dotfiles | R | false | false | 2,090 | #!/usr/bin/env Rscript
update.packages(ask = FALSE, checkBuilt = TRUE, Ncpus = parallel::detectCores())
## args <- commandArgs(trailingOnly = TRUE)[1]
args <- "test"
if (length(args) > 0 && args == "test") {
if (!requireNamespace("callr", quietly = TRUE)) {
install.packages("callr")
}
# Compiled packages on which many others depend
problem_pkgs <- c("quantreg", "SparseM", "igraph", "stringi",
"Rcpp", "RcppEigen", "RcppArmadillo",
"TMB", "TTR", "robustbase", "timereg",
"rgdal", "minpack.lm")
all_pkgs <- rownames(installed.packages())
exclude_pkgs <- c("tcltk")
pkgs <- setdiff(c(problem_pkgs, setdiff(all_pkgs, problem_pkgs)),
exclude_pkgs)
writeLines(pkgs, "~/.R/pkgs_list")
pb <- txtProgressBar(min = 1, max = length(pkgs), style = 3)
i <- 0
## libpaths <- sprintf("(%s)", paste(.libPaths(), collapse = "|"))
## libpaths <- gsub("\\.", "\\\\.", libpaths)
for (p in pkgs) {
i <- i + 1
setTxtProgressBar(pb, i)
err <- callr::r(function(p) tryCatch(
library(p, character.only = TRUE, quietly = TRUE),
error = function(e) conditionMessage(e)
), list(p = p))
if (!(p %in% err)) {
message("Hit the following error loading package ", p, ":\n", err)
if (grepl("no package called", err)) {
# Continue to re-installing package
invisible(TRUE)
} else if (grepl("unable to load shared object", err)) {
bad_so_full <- regexec(
## paste0(".*? load shared object '",
## libpaths,
## "([[:alnum:]]+)/.*\\.so'.*"),
".*? load shared object '/Users/shik544/R/([[:alnum:].]+)/.*\\.so'.*",
err,
perl = TRUE
)
bad_so <- regmatches(err, bad_so_full)[[1]][[2]]
message("Problem with dependent library ", bad_so,
". Trying to reinstall.")
install.packages(bad_so)
}
if (bad_so != p) {
message("Reinstalling package ", p)
install.packages(p)
}
}
}
close(pb)
}
| |
#finds file
filename <- "household_power_consumption.txt"
# Sets date range for the time need
data <- read.table(filename, header = TRUE, sep = ";")
data <- data[ data$Date %in% c( "1/2/2007", "2/2/2007" ), ]
#Makes sure correct data type is used
data$Global_active_power <- as.numeric( as.character( data$Global_active_power ) )
data$Global_reactive_power <- as.numeric( as.character( data$Global_reactive_power ) )
data$Voltage <- as.numeric( as.character( data$Voltage ) )
data$Sub_metering_1 <- as.numeric( as.character( data$Sub_metering_1 ) )
data$Sub_metering_2 <- as.numeric( as.character( data$Sub_metering_2 ) )
data$Sub_metering_3 <- as.numeric( as.character( data$Sub_metering_3 ) )
dataTime <- paste( data$Date, data$Time )
data$DateTime <- strptime( dataTime, format = "%d/%m/%Y %H:%M:%S" )
### Builds Plot 3
png( filename = "plot3.png", width = 480, height = 480, units = "px", bg = "#ffffff" )
plot( data$DateTime,
data$Sub_metering_1,
type = "n",
xlab = "",
ylab = "Energy sub metering"
)
## Create lines
lines( data$DateTime, data$Sub_metering_1, col = "black" )
lines( data$DateTime, data$Sub_metering_2, col = "red" )
lines( data$DateTime, data$Sub_metering_3, col = "blue" )
## Create legend
legend(
"topright",
c( "Sub_metering_1", "Sub_metering_2", "Sub_metering_3" ),
lty = c( 1, 1, 1 ),
lwd = c( 2, 2, 2 ),
col = c( "black", "red", "blue" )
)
dev.off() | /plot3.R | no_license | perlichman/ExData_Plotting1 | R | false | false | 1,423 | r | #finds file
filename <- "household_power_consumption.txt"
# Sets date range for the time need
data <- read.table(filename, header = TRUE, sep = ";")
data <- data[ data$Date %in% c( "1/2/2007", "2/2/2007" ), ]
#Makes sure correct data type is used
data$Global_active_power <- as.numeric( as.character( data$Global_active_power ) )
data$Global_reactive_power <- as.numeric( as.character( data$Global_reactive_power ) )
data$Voltage <- as.numeric( as.character( data$Voltage ) )
data$Sub_metering_1 <- as.numeric( as.character( data$Sub_metering_1 ) )
data$Sub_metering_2 <- as.numeric( as.character( data$Sub_metering_2 ) )
data$Sub_metering_3 <- as.numeric( as.character( data$Sub_metering_3 ) )
dataTime <- paste( data$Date, data$Time )
data$DateTime <- strptime( dataTime, format = "%d/%m/%Y %H:%M:%S" )
### Builds Plot 3
png( filename = "plot3.png", width = 480, height = 480, units = "px", bg = "#ffffff" )
plot( data$DateTime,
data$Sub_metering_1,
type = "n",
xlab = "",
ylab = "Energy sub metering"
)
## Create lines
lines( data$DateTime, data$Sub_metering_1, col = "black" )
lines( data$DateTime, data$Sub_metering_2, col = "red" )
lines( data$DateTime, data$Sub_metering_3, col = "blue" )
## Create legend
legend(
"topright",
c( "Sub_metering_1", "Sub_metering_2", "Sub_metering_3" ),
lty = c( 1, 1, 1 ),
lwd = c( 2, 2, 2 ),
col = c( "black", "red", "blue" )
)
dev.off() |
# boxplot with facet_grid
generateTFPlot<-function(a,b,c,d,e,f){
numService<-c(a,b,c,d,e)
nameService<-c(rep("Total",1549),rep("Housing",1549),rep("Behavior",1549),rep("Nutrition",1549),rep("Mental",1549))
placed<-rep(f,5)
datPlot<-data.frame(numService,services=as.factor(nameService),placed)
p<-ggplot(datPlot, aes(services, numService))
p+ geom_boxplot(varwidth = TRUE, outlier.colour = "grey")+
facet_grid(.~placed)+
ylab("numServiceFamily")+
ggtitle("Number of Services and Children Placement Comparison Graph")
}
| /Graph/TFplot.R | no_license | nicolethegrape/DHSTeam1-Ziyi-Shuning-Xiaoying | R | false | false | 542 | r | # boxplot with facet_grid
generateTFPlot<-function(a,b,c,d,e,f){
numService<-c(a,b,c,d,e)
nameService<-c(rep("Total",1549),rep("Housing",1549),rep("Behavior",1549),rep("Nutrition",1549),rep("Mental",1549))
placed<-rep(f,5)
datPlot<-data.frame(numService,services=as.factor(nameService),placed)
p<-ggplot(datPlot, aes(services, numService))
p+ geom_boxplot(varwidth = TRUE, outlier.colour = "grey")+
facet_grid(.~placed)+
ylab("numServiceFamily")+
ggtitle("Number of Services and Children Placement Comparison Graph")
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{cpp_compute_occ_exp}
\alias{cpp_compute_occ_exp}
\title{cpp_compute_occ_exp}
\usage{
cpp_compute_occ_exp(lambda, alpha, alpha_offset, tau)
}
\arguments{
\item{lambda}{a matrix of lifelines whose rows are units of observation (individuals),
and whose columns are (in order): start time, end time, event time}
\item{alpha}{a matrix whose rows are age groups and whose columns are
(in order): start time, end time (both starting from 0)}
\item{alpha_offset}{a vector with the birthdate of each unit of observation or,
more generally, the offset to use for the age groups in alpha}
\item{tau}{a matrix of time periods whose rows are units of observation (individuals),
and whose columns are (in order): start time, end time}
}
\value{
a matrix with one row for each unit of observation (individua) whose
columns are (in order): age group 1 exposure, ..., last age group exposure,
age group 1 number of events, ..., last age group number of events
}
\description{
compute occurrences and exposures from lifelines,
age groups, and time intervals
}
\details{
TODO - should write a more detailed description
}
| /man/cpp_compute_occ_exp.Rd | no_license | dfeehan/dhstools | R | false | false | 1,228 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{cpp_compute_occ_exp}
\alias{cpp_compute_occ_exp}
\title{cpp_compute_occ_exp}
\usage{
cpp_compute_occ_exp(lambda, alpha, alpha_offset, tau)
}
\arguments{
\item{lambda}{a matrix of lifelines whose rows are units of observation (individuals),
and whose columns are (in order): start time, end time, event time}
\item{alpha}{a matrix whose rows are age groups and whose columns are
(in order): start time, end time (both starting from 0)}
\item{alpha_offset}{a vector with the birthdate of each unit of observation or,
more generally, the offset to use for the age groups in alpha}
\item{tau}{a matrix of time periods whose rows are units of observation (individuals),
and whose columns are (in order): start time, end time}
}
\value{
a matrix with one row for each unit of observation (individua) whose
columns are (in order): age group 1 exposure, ..., last age group exposure,
age group 1 number of events, ..., last age group number of events
}
\description{
compute occurrences and exposures from lifelines,
age groups, and time intervals
}
\details{
TODO - should write a more detailed description
}
|
testlist <- list(A = structure(c(2.31584332411508e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613099932-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(2.31584332411508e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
%
%
% on Wed Feb 08 14:38:05 2006.
%
% Generator was the Rdoc class, which is part of the R.oo package written
% by Henrik Bengtsson, 2001-2004.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{addCount.BigBang}
\alias{addCount.BigBang}
\alias{BigBang.addCount}
\alias{addCount.BigBang}
\alias{addCount,BigBang-method}
\alias{addCount}
\keyword{methods}
\keyword{internal}
\title{Add a chromosome to rank and frequency stability counting}
\description{
Add a chromosome to the rank and frequency stability counting.
This is an internal function
}
\usage{\method{addCount}{BigBang}(o, chr, ...)}
\arguments{
\item{chr}{Chromosome}
}
\value{
Nothing.
}
\author{Victor Trevino. Francesco Falciani Group. University of Birmingham, U.K. http://www.bip.bham.ac.uk/bioinf}
\seealso{
For more information see \code{\link{BigBang}}.
}
\keyword{methods}
| /man/addCount.BigBang.Rd | no_license | cran/galgo | R | false | false | 1,060 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
%
%
% on Wed Feb 08 14:38:05 2006.
%
% Generator was the Rdoc class, which is part of the R.oo package written
% by Henrik Bengtsson, 2001-2004.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{addCount.BigBang}
\alias{addCount.BigBang}
\alias{BigBang.addCount}
\alias{addCount.BigBang}
\alias{addCount,BigBang-method}
\alias{addCount}
\keyword{methods}
\keyword{internal}
\title{Add a chromosome to rank and frequency stability counting}
\description{
Add a chromosome to the rank and frequency stability counting.
This is an internal function
}
\usage{\method{addCount}{BigBang}(o, chr, ...)}
\arguments{
\item{chr}{Chromosome}
}
\value{
Nothing.
}
\author{Victor Trevino. Francesco Falciani Group. University of Birmingham, U.K. http://www.bip.bham.ac.uk/bioinf}
\seealso{
For more information see \code{\link{BigBang}}.
}
\keyword{methods}
|
require("tidyverse")
require("BiocParallel")
#input_folder="~/Projects/nobackup/181022_time_test/output/mutation_count/"
#sample_ID="~/Projects/nobackup/181022_time_test/output/barcode_samples.txt"
#output_folder = "~/Projects/nobackup/181022_time_test/output/new_synthesised_reads"
#core = 10
#SNP_VCF = "~/Projects/nobackup/181022_time_test/output/combind_control_bam/SNP.vcf"
args = commandArgs(trailingOnly=TRUE)
input_folder=args[1]
sample_ID=args[2]
output_folder=args[3]
core = as.numeric(args[4])
SNP_VCF = args[5]
quality_filter = 45
end_dist = 0
target_mut_filter_rate = 0.3
if(!file.exists(output_folder)) {
dir.create(output_folder)
}
sample_names = (read_csv(sample_ID, col_names = F))$X1
#sample_names = sample_names[1:5]
SNP = read.table(SNP_VCF, sep = "\t", header = T)
SNP$chr_pos = str_c(SNP$Chrom, SNP$Position, SNP$Ref, SNP$Var, sep = "-")
function_sample <- function(target_id, input_folder, output_folder, core, SNP, quality_filter = 45, end_dist = 0, target_mut_filter_rate = 0.3) {
cat("Process sample: ", target_id)
cat("\n")
align_file = file.path(input_folder, paste0(target_id, ".align"))
test_input = read_tsv(align_file, col_types = cols(.default = "c"))
colnames(test_input)[1] = "READ_NAME"
ori_test = test_input
ori_test$READ_POS = as.numeric(as.character(ori_test$READ_POS))
ori_test$REF_POS = as.numeric(as.character(ori_test$REF_POS))
ori_test$FLAG = as.numeric(as.character(ori_test$FLAG))
test_input = ori_test
test_input = test_input %>% filter(!is.na(CHROM))
test_input = test_input %>% filter(BASE != ".", REF != ".")
test_input$REF = str_to_upper(test_input$REF)
test_input$BASE = str_to_upper(test_input$BASE)
test_input = test_input %>% filter((BASE) != (REF))
if(nrow(test_input) == 0) {
return(-1)
}
# test_input = test_input %>% filter(QUAL %in% c("A", "E"))
#length(unique((test_input %>% filter((FLAG == 0 & REF == "T" & BASE == "C") | (FLAG == 16 & REF == "A" & BASE == "G")))$READ_NAME)) / length(unique(ori_test$READ_NAME))
# Remove SNPs
test_input$chr_pos = str_c(test_input$CHROM, test_input$REF_POS, test_input$REF, test_input$BASE, sep = "-")
test_input = test_input %>% filter(!(chr_pos %in% SNP$chr_pos))
if(nrow(test_input) == 0) {
return(-1)
}
test_input = test_input[(sapply(test_input$QUAL, utf8ToInt)) > quality_filter, ]
if(nrow(test_input) == 0) {
return(-1)
}
#length(unique((test_input %>% filter((FLAG == 0 & REF == "T" & BASE == "C") | (FLAG == 16 & REF == "A" & BASE == "G")))$READ_NAME)) / length(unique(ori_test$READ_NAME))
# remove based on mutation distance to end point
end_pos = ori_test %>% filter(!is.na(READ_POS)) %>% group_by(READ_NAME) %>% summarise(end_point = max(READ_POS))
test_input = left_join(test_input, end_pos %>% select(READ_NAME, end_point))
tmp = test_input %>% filter(READ_POS > end_dist, READ_POS < end_point - end_dist)
if(nrow(tmp) == 0) {
return(-1)
}
#length(unique((tmp %>% filter((FLAG == 0 & REF == "T" & BASE == "C") | (FLAG == 16 & REF == "A" & BASE == "G")))$READ_NAME)) / length(unique(ori_test$READ_NAME))
# remove distance based on the ratio of T-> C mutation
tmp_mut_num = tmp %>% group_by(READ_NAME) %>% summarise(mut_num = n())
tmp_mut_num = tmp %>% group_by(READ_NAME) %>% summarise(mut_num = n())
tmp_target_mut_num = (tmp %>% filter((FLAG == 0 & REF == "T" & BASE == "C") | (FLAG == 16 & REF == "A" & BASE == "G"))
%>% group_by(READ_NAME) %>% summarise(target_mut_num = n()))
tmp_target_mut_num = tmp_target_mut_num %>% select(target_mut_num)
write_csv(tmp_target_mut_num, file.path(output_folder, paste0(target_id, ".csv")), col_names = F)
return(0)
}
newly_synthesised_read <- bplapply(sample_names, function(target_id) {
function_sample(target_id, input_folder = input_folder, output_folder = output_folder, core = core, SNP = SNP, quality_filter = quality_filter, end_dist = end_dist, target_mut_filter_rate = target_mut_filter_rate)
}, BPPARAM = MulticoreParam(workers = core)) | /script/check_mutation_num_read.R | no_license | JunyueC/scifate | R | false | false | 4,165 | r |
require("tidyverse")
require("BiocParallel")
#input_folder="~/Projects/nobackup/181022_time_test/output/mutation_count/"
#sample_ID="~/Projects/nobackup/181022_time_test/output/barcode_samples.txt"
#output_folder = "~/Projects/nobackup/181022_time_test/output/new_synthesised_reads"
#core = 10
#SNP_VCF = "~/Projects/nobackup/181022_time_test/output/combind_control_bam/SNP.vcf"
args = commandArgs(trailingOnly=TRUE)
input_folder=args[1]
sample_ID=args[2]
output_folder=args[3]
core = as.numeric(args[4])
SNP_VCF = args[5]
quality_filter = 45
end_dist = 0
target_mut_filter_rate = 0.3
if(!file.exists(output_folder)) {
dir.create(output_folder)
}
sample_names = (read_csv(sample_ID, col_names = F))$X1
#sample_names = sample_names[1:5]
SNP = read.table(SNP_VCF, sep = "\t", header = T)
SNP$chr_pos = str_c(SNP$Chrom, SNP$Position, SNP$Ref, SNP$Var, sep = "-")
function_sample <- function(target_id, input_folder, output_folder, core, SNP, quality_filter = 45, end_dist = 0, target_mut_filter_rate = 0.3) {
cat("Process sample: ", target_id)
cat("\n")
align_file = file.path(input_folder, paste0(target_id, ".align"))
test_input = read_tsv(align_file, col_types = cols(.default = "c"))
colnames(test_input)[1] = "READ_NAME"
ori_test = test_input
ori_test$READ_POS = as.numeric(as.character(ori_test$READ_POS))
ori_test$REF_POS = as.numeric(as.character(ori_test$REF_POS))
ori_test$FLAG = as.numeric(as.character(ori_test$FLAG))
test_input = ori_test
test_input = test_input %>% filter(!is.na(CHROM))
test_input = test_input %>% filter(BASE != ".", REF != ".")
test_input$REF = str_to_upper(test_input$REF)
test_input$BASE = str_to_upper(test_input$BASE)
test_input = test_input %>% filter((BASE) != (REF))
if(nrow(test_input) == 0) {
return(-1)
}
# test_input = test_input %>% filter(QUAL %in% c("A", "E"))
#length(unique((test_input %>% filter((FLAG == 0 & REF == "T" & BASE == "C") | (FLAG == 16 & REF == "A" & BASE == "G")))$READ_NAME)) / length(unique(ori_test$READ_NAME))
# Remove SNPs
test_input$chr_pos = str_c(test_input$CHROM, test_input$REF_POS, test_input$REF, test_input$BASE, sep = "-")
test_input = test_input %>% filter(!(chr_pos %in% SNP$chr_pos))
if(nrow(test_input) == 0) {
return(-1)
}
test_input = test_input[(sapply(test_input$QUAL, utf8ToInt)) > quality_filter, ]
if(nrow(test_input) == 0) {
return(-1)
}
#length(unique((test_input %>% filter((FLAG == 0 & REF == "T" & BASE == "C") | (FLAG == 16 & REF == "A" & BASE == "G")))$READ_NAME)) / length(unique(ori_test$READ_NAME))
# remove based on mutation distance to end point
end_pos = ori_test %>% filter(!is.na(READ_POS)) %>% group_by(READ_NAME) %>% summarise(end_point = max(READ_POS))
test_input = left_join(test_input, end_pos %>% select(READ_NAME, end_point))
tmp = test_input %>% filter(READ_POS > end_dist, READ_POS < end_point - end_dist)
if(nrow(tmp) == 0) {
return(-1)
}
#length(unique((tmp %>% filter((FLAG == 0 & REF == "T" & BASE == "C") | (FLAG == 16 & REF == "A" & BASE == "G")))$READ_NAME)) / length(unique(ori_test$READ_NAME))
# remove distance based on the ratio of T-> C mutation
tmp_mut_num = tmp %>% group_by(READ_NAME) %>% summarise(mut_num = n())
tmp_mut_num = tmp %>% group_by(READ_NAME) %>% summarise(mut_num = n())
tmp_target_mut_num = (tmp %>% filter((FLAG == 0 & REF == "T" & BASE == "C") | (FLAG == 16 & REF == "A" & BASE == "G"))
%>% group_by(READ_NAME) %>% summarise(target_mut_num = n()))
tmp_target_mut_num = tmp_target_mut_num %>% select(target_mut_num)
write_csv(tmp_target_mut_num, file.path(output_folder, paste0(target_id, ".csv")), col_names = F)
return(0)
}
newly_synthesised_read <- bplapply(sample_names, function(target_id) {
function_sample(target_id, input_folder = input_folder, output_folder = output_folder, core = core, SNP = SNP, quality_filter = quality_filter, end_dist = end_dist, target_mut_filter_rate = target_mut_filter_rate)
}, BPPARAM = MulticoreParam(workers = core)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_boundary.R
\name{get_boundary}
\alias{get_boundary}
\title{Get boundary boxes from Atlantis box information.}
\usage{
get_boundary(boxinfo)
}
\arguments{
\item{boxinfo}{A \code{list} as returned from \code{\link{load_box}}.}
}
\value{
A \code{vector} specifying which boxes are on the boundary.
}
\description{
Use the output from \code{\link{load_box}} and obtain a \code{vector}
specifying which boxes are along the boundary.
}
\author{
Kelli Faye Johnson
}
\seealso{
\code{\link{load_box}}
Other get functions: \code{\link{get_groups}}
}
| /man/get_boundary.Rd | no_license | hmorzaria/atlantisom | R | false | true | 625 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_boundary.R
\name{get_boundary}
\alias{get_boundary}
\title{Get boundary boxes from Atlantis box information.}
\usage{
get_boundary(boxinfo)
}
\arguments{
\item{boxinfo}{A \code{list} as returned from \code{\link{load_box}}.}
}
\value{
A \code{vector} specifying which boxes are on the boundary.
}
\description{
Use the output from \code{\link{load_box}} and obtain a \code{vector}
specifying which boxes are along the boundary.
}
\author{
Kelli Faye Johnson
}
\seealso{
\code{\link{load_box}}
Other get functions: \code{\link{get_groups}}
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/similarity.r
\name{sokal_sneath}
\alias{sokal_sneath}
\title{Computes the Sokal-Sneath similarity of two clusterings of the same data set.}
\usage{
sokal_sneath(labels1, labels2)
}
\arguments{
\item{labels1}{a vector of \code{n} clustering labels}
\item{labels2}{a vector of \code{n} clustering labels}
}
\value{
the Sokal-Sneath index for the two sets of cluster labels
}
\description{
For two clusterings of the same data set, this function calculates the Sokal-Sneath
similarity coefficient of the clusterings from the comemberships of the
observations. Basically, the comembership is defined as the pairs of
observations that are clustered together.
}
\details{
To calculate the Sokal-Sneath similarity, we compute the 2x2 contingency table,
consisting of the following four cells:
\describe{
\item{n_11:}{the number of observation pairs where both observations are
comembers in both clusterings}
\item{n_10:}{the number of observation pairs where the observations are
comembers in the first clustering but not the second}
\item{n_01:}{the number of observation pairs where the observations are
comembers in the second clustering but not the first}
\item{n_00:}{the number of observation pairs where neither pair are comembers
in either clustering}
}
The Sokal-Sneath similarity is defined as:
\deqn{\frac{2 (n_{11} + n_{00})}{2 n_{11} + n_{10} + n_{01} + 2 n_{00}}.}
To compute the contingency table, we use the \code{\link{comembership_table}}
function.
}
\examples{
\dontrun{
# We generate K = 3 labels for each of n = 10 observations and compute the
# Sokal-Sneath similarity index between the two clusterings.
set.seed(42)
K <- 3
n <- 10
labels1 <- sample.int(K, n, replace = TRUE)
labels2 <- sample.int(K, n, replace = TRUE)
sokal_sneath(labels1, labels2)
# Here, we cluster the \\code{\\link{iris}} data set with the K-means and
# hierarchical algorithms using the true number of clusters, K = 3.
# Then, we compute the Sokal_Sneath similarity index between the two clusterings.
iris_kmeans <- kmeans(iris[, -5], centers = 3)$cluster
iris_hclust <- cutree(hclust(dist(iris[, -5])), k = 3)
sokal_sneath(iris_kmeans, iris_hclust)
}
}
| /man/sokal_sneath.Rd | no_license | khughitt/clusteval | R | false | false | 2,242 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/similarity.r
\name{sokal_sneath}
\alias{sokal_sneath}
\title{Computes the Sokal-Sneath similarity of two clusterings of the same data set.}
\usage{
sokal_sneath(labels1, labels2)
}
\arguments{
\item{labels1}{a vector of \code{n} clustering labels}
\item{labels2}{a vector of \code{n} clustering labels}
}
\value{
the Sokal-Sneath index for the two sets of cluster labels
}
\description{
For two clusterings of the same data set, this function calculates the Sokal-Sneath
similarity coefficient of the clusterings from the comemberships of the
observations. Basically, the comembership is defined as the pairs of
observations that are clustered together.
}
\details{
To calculate the Sokal-Sneath similarity, we compute the 2x2 contingency table,
consisting of the following four cells:
\describe{
\item{n_11:}{the number of observation pairs where both observations are
comembers in both clusterings}
\item{n_10:}{the number of observation pairs where the observations are
comembers in the first clustering but not the second}
\item{n_01:}{the number of observation pairs where the observations are
comembers in the second clustering but not the first}
\item{n_00:}{the number of observation pairs where neither pair are comembers
in either clustering}
}
The Sokal-Sneath similarity is defined as:
\deqn{\frac{2 (n_{11} + n_{00})}{2 n_{11} + n_{10} + n_{01} + 2 n_{00}}.}
To compute the contingency table, we use the \code{\link{comembership_table}}
function.
}
\examples{
\dontrun{
# We generate K = 3 labels for each of n = 10 observations and compute the
# Sokal-Sneath similarity index between the two clusterings.
set.seed(42)
K <- 3
n <- 10
labels1 <- sample.int(K, n, replace = TRUE)
labels2 <- sample.int(K, n, replace = TRUE)
sokal_sneath(labels1, labels2)
# Here, we cluster the \\code{\\link{iris}} data set with the K-means and
# hierarchical algorithms using the true number of clusters, K = 3.
# Then, we compute the Sokal_Sneath similarity index between the two clusterings.
iris_kmeans <- kmeans(iris[, -5], centers = 3)$cluster
iris_hclust <- cutree(hclust(dist(iris[, -5])), k = 3)
sokal_sneath(iris_kmeans, iris_hclust)
}
}
|
a=round(1/3*sum(titanic.raw$Survived=="Yes"))
b=round(1/3*sum(titanic.raw$Survived=="No"))
a;b
set.seed(1)
sub=strata(titanic.raw,stratanames="Survived",size=c(b,a),method="srswor")
table(sub$AgeGroup)
Train_titanic=titanic.raw[-sub$ID_unit,]
Test_titanic=titanic.raw[sub$ID_unit,]
nrow(Train_titanic);nrow(Test_titanic)
| /bak/1481468760.R | no_license | sulaxd/data4course | R | false | false | 321 | r | a=round(1/3*sum(titanic.raw$Survived=="Yes"))
b=round(1/3*sum(titanic.raw$Survived=="No"))
a;b
set.seed(1)
sub=strata(titanic.raw,stratanames="Survived",size=c(b,a),method="srswor")
table(sub$AgeGroup)
Train_titanic=titanic.raw[-sub$ID_unit,]
Test_titanic=titanic.raw[sub$ID_unit,]
nrow(Train_titanic);nrow(Test_titanic)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qc_main.R
\name{qc_md_results_table}
\alias{qc_md_results_table}
\title{Main function to resume Metadata QC in one data frame}
\usage{
qc_md_results_table(md_cols, factor_values, email_check, site_md_coordfix,
species_md, plant_md, species_md_spnames, plant_md_spnames, sp_verification,
env_var_presence, parent_logger = "test")
}
\arguments{
\item{md_cols}{}
\item{factor_values}{}
\item{email_check}{}
\item{site_md_coordfix}{}
\item{species_md}{}
\item{plant_md}{}
\item{species_md_spnames}{}
\item{plant_md_spnames}{}
\item{sp_verification}{}
\item{env_var_presence}{}
}
\value{
A data frame with the highlights of the QC
}
\description{
Metadata QC codified results in one data frame
}
\seealso{
Other Quality Checks Functions: \code{\link{create_dic}},
\code{\link{qc_coordinates}},
\code{\link{qc_data_results_table}},
\code{\link{qc_env_ranges}},
\code{\link{qc_env_vars_presence}},
\code{\link{qc_ext_radiation}},
\code{\link{qc_factor_values}},
\code{\link{qc_fix_timestamp}},
\code{\link{qc_get_biomes_spdf}},
\code{\link{qc_get_biome}}, \code{\link{qc_get_sapw_md}},
\code{\link{qc_get_timestep}},
\code{\link{qc_get_timezone}},
\code{\link{qc_is_timestamp}}, \code{\link{qc_md_cols}},
\code{\link{qc_mind_the_gap_eff}},
\code{\link{qc_mind_the_gap}},
\code{\link{qc_out_hampel_filter}},
\code{\link{qc_out_of_range}},
\code{\link{qc_out_remove}},
\code{\link{qc_outliers_process}},
\code{\link{qc_outliers_subs}},
\code{\link{qc_pl_treatments}},
\code{\link{qc_rad_conversion}},
\code{\link{qc_range_dic}}, \code{\link{qc_sapf_ranges}},
\code{\link{qc_sapw_area_calculator}},
\code{\link{qc_sapw_conversion}},
\code{\link{qc_set_timezone}},
\code{\link{qc_soil_texture}},
\code{\link{qc_species_names_info}},
\code{\link{qc_species_names}},
\code{\link{qc_species_verification}},
\code{\link{qc_start_process}},
\code{\link{qc_swc_check}}, \code{\link{qc_swc_fix}},
\code{\link{qc_time_interval}},
\code{\link{qc_timestamp_concordance}},
\code{\link{qc_timestamp_errors}},
\code{\link{qc_timestamp_nas}}
}
| /man/qc_md_results_table.Rd | no_license | sapfluxnet/sapfluxnetQC1 | R | false | true | 2,180 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qc_main.R
\name{qc_md_results_table}
\alias{qc_md_results_table}
\title{Main function to resume Metadata QC in one data frame}
\usage{
qc_md_results_table(md_cols, factor_values, email_check, site_md_coordfix,
species_md, plant_md, species_md_spnames, plant_md_spnames, sp_verification,
env_var_presence, parent_logger = "test")
}
\arguments{
\item{md_cols}{}
\item{factor_values}{}
\item{email_check}{}
\item{site_md_coordfix}{}
\item{species_md}{}
\item{plant_md}{}
\item{species_md_spnames}{}
\item{plant_md_spnames}{}
\item{sp_verification}{}
\item{env_var_presence}{}
}
\value{
A data frame with the highlights of the QC
}
\description{
Metadata QC codified results in one data frame
}
\seealso{
Other Quality Checks Functions: \code{\link{create_dic}},
\code{\link{qc_coordinates}},
\code{\link{qc_data_results_table}},
\code{\link{qc_env_ranges}},
\code{\link{qc_env_vars_presence}},
\code{\link{qc_ext_radiation}},
\code{\link{qc_factor_values}},
\code{\link{qc_fix_timestamp}},
\code{\link{qc_get_biomes_spdf}},
\code{\link{qc_get_biome}}, \code{\link{qc_get_sapw_md}},
\code{\link{qc_get_timestep}},
\code{\link{qc_get_timezone}},
\code{\link{qc_is_timestamp}}, \code{\link{qc_md_cols}},
\code{\link{qc_mind_the_gap_eff}},
\code{\link{qc_mind_the_gap}},
\code{\link{qc_out_hampel_filter}},
\code{\link{qc_out_of_range}},
\code{\link{qc_out_remove}},
\code{\link{qc_outliers_process}},
\code{\link{qc_outliers_subs}},
\code{\link{qc_pl_treatments}},
\code{\link{qc_rad_conversion}},
\code{\link{qc_range_dic}}, \code{\link{qc_sapf_ranges}},
\code{\link{qc_sapw_area_calculator}},
\code{\link{qc_sapw_conversion}},
\code{\link{qc_set_timezone}},
\code{\link{qc_soil_texture}},
\code{\link{qc_species_names_info}},
\code{\link{qc_species_names}},
\code{\link{qc_species_verification}},
\code{\link{qc_start_process}},
\code{\link{qc_swc_check}}, \code{\link{qc_swc_fix}},
\code{\link{qc_time_interval}},
\code{\link{qc_timestamp_concordance}},
\code{\link{qc_timestamp_errors}},
\code{\link{qc_timestamp_nas}}
}
|
cv_GIC_indexed <- function(X, y, nfolds, model_function, ...) {
family = list(...)$family
if (family == "gaussian"){
n <- length(y)
real_n <- 0 #recount of test instances
foldid <- sample(rep(1:nfolds,length.out=n)) #PP replaces cvfolds by a simpler sample(rep()) function
err <- list(); rss <- list(); #md <- list()
model.full <- model_function(X, y, ...)
lambda.full<- model.full$lambda
for (fold in 1:nfolds){
Xte <- X[foldid == fold, ,drop = FALSE]
yte <- y[foldid == fold, drop = FALSE]
Xtr <- X[foldid != fold, ,drop = FALSE]
ytr <- y[foldid != fold, drop = FALSE]
compute_model <- cv_compute_model(model_function, Xtr, ytr, Xte, yte, real_n, lambda.full = lambda.full, ...) #three letter abbreviations (lambda.full vs lam) make this function call confused, so explicit passing of named parameter i.e. lambda.full=lambda.full is required
model<-compute_model$model
Xtr<-compute_model$Xtr
ytr<-compute_model$ytr
Xte<-compute_model$Xte
yte<-compute_model$yte
real_n<-compute_model$real_n
#PP new code
rss[[fold]] <- model$rss
pred <- predict.DMR(model, newx = as.data.frame(Xte))
#PP new code error[[fold]] <- apply(pred, 2, function(z) sum((z - yte)^2))
err[[fold]] <- apply(pred, 2, function(z) mean((z - yte)^2))
}
len_err <- sapply(err, length)
foldmin <- min(len_err)
ERR <- sapply(1:nfolds, function(i) err[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
if (foldmin == 1) {
ERR<-t(as.matrix((ERR))) #making it a horizontal one-row matrix
}
#err <- rowMeans(ERR); kt <- which(err == min(err)); df.min <- dmr$df[kt[length(kt)]]; plot(err, type="o")
p1 <- model.full$df[1]
s2 <- model.full$rss[1]/(n-p1)
p <- ncol(model.full$beta)
if (is.null(p))
p <- length(model.full$beta)
RIC_constant <- constants()$RIC_gaussian_constant
Const <- exp(seq(log(RIC_constant/50),log(RIC_constant*50), length=81))
laGIC <- Const*log(p)*s2
RSS <- sapply(1:nfolds, function(i) rss[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
if (is.null(dim(RSS))) {
RSS<-t(as.matrix((RSS))) #making it a horizontal one-row matrix
}
#MD <- sapply(1:nfolds, function(i) md[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
IND <- apply( RSS, 2, function(r) sapply( laGIC, function(la) which.min(r+la*length(r):1) ) )
errGIC <- apply( IND, 1, function(ind) mean(ERR[cbind(ind,1:nfolds)]) )
#mdGIC <- apply( IND, 1, function(ind) mean(MD[cbind(ind,1:10)]) )
#plot(mdGIC[length(laGIC):1],errGIC[length(laGIC):1]/s2, xlab="MD", ylab="PE", type="o")
r <- model.full$rss
kt <- which(errGIC == min(errGIC))
indGIC <- kt[length(kt)] #TODO: why last?
gic.full <- (r+laGIC[indGIC]*length(r):1)/(real_n*s2)
#plot(gic.full[length(gic.full):1])
} else{
if (family == "binomial"){
if (!inherits(y, "factor")){
stop("Error: y should be a factor")
}
lev <- levels(factor(y))
if (length(lev) != 2){
stop("Error: factor y should have 2 levels")
}
n1 <- table(y)[1]
n2 <- table(y)[2]
real_n <- 0 #recount of test instances
foldid1 <- sample(rep(1:nfolds,length.out=n1)) #PP replaces cvfolds by a simpler sample(rep()) function
foldid2 <- sample(rep(1:nfolds,length.out=n2)) #PP replaces cvfolds by a simpler sample(rep()) function
foldid <- c()
foldid[which(y == levels(factor(y))[1])] = foldid1
foldid[which(y == levels(factor(y))[2])] = foldid2
#PP new code error <- list()
err <- list(); loglik <- list(); #md <- list()
model.full <- model_function(X, y, ...)
lambda.full<- model.full$lambda
for (fold in 1:nfolds) {
Xte <- X[foldid == fold, , drop = FALSE]
yte <- y[foldid == fold, drop = FALSE]
Xtr <- X[foldid != fold, , drop = FALSE]
ytr <- y[foldid != fold, drop = FALSE]
compute_model <- cv_compute_model(model_function, Xtr, ytr, Xte, yte, real_n, lambda.full = lambda.full, ...) #three letter abbreviations (lambda.full vs lam) make this function call confused, so explicit passing of named parameter i.e. lambda.full=lambda.full is required
model<-compute_model$model
Xtr<-compute_model$Xtr
ytr<-compute_model$ytr
Xte<-compute_model$Xte
yte<-compute_model$yte
real_n<-compute_model$real_n
#SzN new code based on PP new code
loglik[[fold]] <- -2*model$loglik
pred <- predict.DMR(model, newx = as.data.frame(Xte), type = "class")
#SzN new code based on PP new code error[[fold]] <- apply(pred, 2, function(z) sum(z != yte))
err[[fold]] <- apply(pred, 2, function(z) mean(z != yte))
}
len_err <- sapply(err, length)
foldmin <- min(len_err)
ERR <- sapply(1:nfolds, function(i) err[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
#err <- rowMeans(ERR); kt <- which(err == min(err)); df.min <- dmr$df[kt[length(kt)]]; plot(err, type="o")
if (foldmin == 1) {
ERR<-t(as.matrix((ERR))) #making it a horizontal one-row matrix
}
p <- ncol(model.full$beta)
if (is.null(p))
p <- length(model.full$beta)
RIC_constant <- constants()$RIC_binomial_constant
Const <- exp(seq(log(RIC_constant/50),log(RIC_constant*50), length=81))
laGIC <- Const*log(p)
LOGLIK <- sapply(1:nfolds, function(i) loglik[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
if (is.null(dim(LOGLIK))) {
LOGLIK<-t(as.matrix((LOGLIK))) #making it a horizontal one-row matrix
}
#MD <- sapply(1:nfolds, function(i) md[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
IND <- apply( LOGLIK, 2, function(ll) sapply( laGIC, function(la) which.min(ll+la*length(ll):1) ) )
errGIC <- apply( IND, 1, function(ind) mean(ERR[cbind(ind,1:nfolds)]) )
#mdGIC <- apply( IND, 1, function(ind) mean(MD[cbind(ind,1:10)]) )
#plot(mdGIC[length(laGIC):1],errGIC[length(laGIC):1]/s2, xlab="MD", ylab="PE", type="o")
ll <- -2*model.full$loglik
kt <- which(errGIC == min(errGIC))
indGIC <- kt[length(kt)] #TODO: why last?
gic.full <- (ll+laGIC[indGIC]*length(ll):1)/real_n
#plot(gic.full[length(gic.full):1])
}
else{
stop("Error: wrong family, should be one of: gaussian, binomial")
}
}
kt <- which(gic.full == min(stats::na.omit(gic.full))) #kt stores indexes in error equal to a minimum error.
#if there is more than one such index, the LAST one is the one returned, because LAST means a smaller model.
indMod <- kt[length(kt)]
df.min <- model.full$df[indMod]
kt <- which(gic.full <= min(stats::na.omit(gic.full)) + stats::sd(stats::na.omit(gic.full[gic.full!=Inf & gic.full!=-Inf])))
if (length(kt) == 0) {
df.1se <- NULL
} else {
indMod <- kt[length(kt)]
df.1se <- model.full$df[indMod]
}
out <- list(df.min = df.min, df.1se = df.1se, dmr.fit = model.full, cvm = gic.full, foldid = foldid)
return(out)
}
| /R/cv_GIC_indexed.R | no_license | cran/DMRnet | R | false | false | 9,441 | r | cv_GIC_indexed <- function(X, y, nfolds, model_function, ...) {
family = list(...)$family
if (family == "gaussian"){
n <- length(y)
real_n <- 0 #recount of test instances
foldid <- sample(rep(1:nfolds,length.out=n)) #PP replaces cvfolds by a simpler sample(rep()) function
err <- list(); rss <- list(); #md <- list()
model.full <- model_function(X, y, ...)
lambda.full<- model.full$lambda
for (fold in 1:nfolds){
Xte <- X[foldid == fold, ,drop = FALSE]
yte <- y[foldid == fold, drop = FALSE]
Xtr <- X[foldid != fold, ,drop = FALSE]
ytr <- y[foldid != fold, drop = FALSE]
compute_model <- cv_compute_model(model_function, Xtr, ytr, Xte, yte, real_n, lambda.full = lambda.full, ...) #three letter abbreviations (lambda.full vs lam) make this function call confused, so explicit passing of named parameter i.e. lambda.full=lambda.full is required
model<-compute_model$model
Xtr<-compute_model$Xtr
ytr<-compute_model$ytr
Xte<-compute_model$Xte
yte<-compute_model$yte
real_n<-compute_model$real_n
#PP new code
rss[[fold]] <- model$rss
pred <- predict.DMR(model, newx = as.data.frame(Xte))
#PP new code error[[fold]] <- apply(pred, 2, function(z) sum((z - yte)^2))
err[[fold]] <- apply(pred, 2, function(z) mean((z - yte)^2))
}
len_err <- sapply(err, length)
foldmin <- min(len_err)
ERR <- sapply(1:nfolds, function(i) err[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
if (foldmin == 1) {
ERR<-t(as.matrix((ERR))) #making it a horizontal one-row matrix
}
#err <- rowMeans(ERR); kt <- which(err == min(err)); df.min <- dmr$df[kt[length(kt)]]; plot(err, type="o")
p1 <- model.full$df[1]
s2 <- model.full$rss[1]/(n-p1)
p <- ncol(model.full$beta)
if (is.null(p))
p <- length(model.full$beta)
RIC_constant <- constants()$RIC_gaussian_constant
Const <- exp(seq(log(RIC_constant/50),log(RIC_constant*50), length=81))
laGIC <- Const*log(p)*s2
RSS <- sapply(1:nfolds, function(i) rss[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
if (is.null(dim(RSS))) {
RSS<-t(as.matrix((RSS))) #making it a horizontal one-row matrix
}
#MD <- sapply(1:nfolds, function(i) md[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
IND <- apply( RSS, 2, function(r) sapply( laGIC, function(la) which.min(r+la*length(r):1) ) )
errGIC <- apply( IND, 1, function(ind) mean(ERR[cbind(ind,1:nfolds)]) )
#mdGIC <- apply( IND, 1, function(ind) mean(MD[cbind(ind,1:10)]) )
#plot(mdGIC[length(laGIC):1],errGIC[length(laGIC):1]/s2, xlab="MD", ylab="PE", type="o")
r <- model.full$rss
kt <- which(errGIC == min(errGIC))
indGIC <- kt[length(kt)] #TODO: why last?
gic.full <- (r+laGIC[indGIC]*length(r):1)/(real_n*s2)
#plot(gic.full[length(gic.full):1])
} else{
if (family == "binomial"){
if (!inherits(y, "factor")){
stop("Error: y should be a factor")
}
lev <- levels(factor(y))
if (length(lev) != 2){
stop("Error: factor y should have 2 levels")
}
n1 <- table(y)[1]
n2 <- table(y)[2]
real_n <- 0 #recount of test instances
foldid1 <- sample(rep(1:nfolds,length.out=n1)) #PP replaces cvfolds by a simpler sample(rep()) function
foldid2 <- sample(rep(1:nfolds,length.out=n2)) #PP replaces cvfolds by a simpler sample(rep()) function
foldid <- c()
foldid[which(y == levels(factor(y))[1])] = foldid1
foldid[which(y == levels(factor(y))[2])] = foldid2
#PP new code error <- list()
err <- list(); loglik <- list(); #md <- list()
model.full <- model_function(X, y, ...)
lambda.full<- model.full$lambda
for (fold in 1:nfolds) {
Xte <- X[foldid == fold, , drop = FALSE]
yte <- y[foldid == fold, drop = FALSE]
Xtr <- X[foldid != fold, , drop = FALSE]
ytr <- y[foldid != fold, drop = FALSE]
compute_model <- cv_compute_model(model_function, Xtr, ytr, Xte, yte, real_n, lambda.full = lambda.full, ...) #three letter abbreviations (lambda.full vs lam) make this function call confused, so explicit passing of named parameter i.e. lambda.full=lambda.full is required
model<-compute_model$model
Xtr<-compute_model$Xtr
ytr<-compute_model$ytr
Xte<-compute_model$Xte
yte<-compute_model$yte
real_n<-compute_model$real_n
#SzN new code based on PP new code
loglik[[fold]] <- -2*model$loglik
pred <- predict.DMR(model, newx = as.data.frame(Xte), type = "class")
#SzN new code based on PP new code error[[fold]] <- apply(pred, 2, function(z) sum(z != yte))
err[[fold]] <- apply(pred, 2, function(z) mean(z != yte))
}
len_err <- sapply(err, length)
foldmin <- min(len_err)
ERR <- sapply(1:nfolds, function(i) err[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
#err <- rowMeans(ERR); kt <- which(err == min(err)); df.min <- dmr$df[kt[length(kt)]]; plot(err, type="o")
if (foldmin == 1) {
ERR<-t(as.matrix((ERR))) #making it a horizontal one-row matrix
}
p <- ncol(model.full$beta)
if (is.null(p))
p <- length(model.full$beta)
RIC_constant <- constants()$RIC_binomial_constant
Const <- exp(seq(log(RIC_constant/50),log(RIC_constant*50), length=81))
laGIC <- Const*log(p)
LOGLIK <- sapply(1:nfolds, function(i) loglik[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
if (is.null(dim(LOGLIK))) {
LOGLIK<-t(as.matrix((LOGLIK))) #making it a horizontal one-row matrix
}
#MD <- sapply(1:nfolds, function(i) md[[i]][ (len_err[i] - foldmin + 1) : len_err[i] ] )
IND <- apply( LOGLIK, 2, function(ll) sapply( laGIC, function(la) which.min(ll+la*length(ll):1) ) )
errGIC <- apply( IND, 1, function(ind) mean(ERR[cbind(ind,1:nfolds)]) )
#mdGIC <- apply( IND, 1, function(ind) mean(MD[cbind(ind,1:10)]) )
#plot(mdGIC[length(laGIC):1],errGIC[length(laGIC):1]/s2, xlab="MD", ylab="PE", type="o")
ll <- -2*model.full$loglik
kt <- which(errGIC == min(errGIC))
indGIC <- kt[length(kt)] #TODO: why last?
gic.full <- (ll+laGIC[indGIC]*length(ll):1)/real_n
#plot(gic.full[length(gic.full):1])
}
else{
stop("Error: wrong family, should be one of: gaussian, binomial")
}
}
kt <- which(gic.full == min(stats::na.omit(gic.full))) #kt stores indexes in error equal to a minimum error.
#if there is more than one such index, the LAST one is the one returned, because LAST means a smaller model.
indMod <- kt[length(kt)]
df.min <- model.full$df[indMod]
kt <- which(gic.full <= min(stats::na.omit(gic.full)) + stats::sd(stats::na.omit(gic.full[gic.full!=Inf & gic.full!=-Inf])))
if (length(kt) == 0) {
df.1se <- NULL
} else {
indMod <- kt[length(kt)]
df.1se <- model.full$df[indMod]
}
out <- list(df.min = df.min, df.1se = df.1se, dmr.fit = model.full, cvm = gic.full, foldid = foldid)
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_amDataset.R
\name{amStockMultiSet}
\alias{amStockMultiSet}
\title{Plotting multi data-sets}
\usage{
amStockMultiSet(data, panelColumn = NULL, ZoomButtonPosition = "bottom",
ZoomButton = data.frame(Unit = "MAX", multiple = 1, label = "All"),
color = c("#2E2EFE", "#31B404", "#FF4000"), precision = 1,
export = FALSE, percentHeightPanel = NULL,
creditsPosition = "top-right", ...)
}
\arguments{
\item{data}{\code{list}, list of data.frame (same structure) first column is date, others are values}
\item{panelColumn}{\code{vector}, numeric vector, controle panel adding for selected series}
\item{ZoomButtonPosition}{\code{character}, zoom button position. Possible values are :
"left", "right", "bottom", "top"}
\item{ZoomButton}{\code{data.frame}, 3 columns :
Unit, times unit
multiple : multiple*unit
label : button's label}
\item{color}{\code{character}, color of data-sets (in hexadecimal).}
\item{precision}{\code{numeric}, digits precision}
\item{export}{\code{logical}, default set to FALSE. TRUE to display export feature.}
\item{percentHeightPanel}{\code{numeric}, vector of size panel, same length than data}
\item{creditsPosition}{\code{character}, credits position. Possible values are :
"top-right", "top-left", "bottom-right", "bottom-left"}
\item{...}{other first level attributes}
}
\description{
amStockMultiSet compute a stock of multi data-sets, still in dev
}
\examples{
data(data_stock1)
data_stock1$chartData1$value2 <- as.numeric(data_stock1$chartData1$value) + 10
data_stock1$chartData2$value2 <- as.numeric(data_stock1$chartData2$value) + 10
data_stock1$chartData3$value2 <- as.numeric(data_stock1$chartData3$value) + 10
data_stock1$chartData4$value2 <- as.numeric(data_stock1$chartData4$value) + 10
data_stock1$chartData1$value3 <- as.numeric(data_stock1$chartData1$value) - 10
data_stock1$chartData2$value3 <- as.numeric(data_stock1$chartData2$value) - 10
data_stock1$chartData3$value3 <- as.numeric(data_stock1$chartData3$value) - 10
data_stock1$chartData4$value3 <- as.numeric(data_stock1$chartData4$value) - 10
amStockMultiSet(data = data_stock1)
amStockMultiSet(data = data_stock1, panelColumn = c(1,2,1,1))
\donttest{
amStockMultiSet(data = data_stock1, panelColumn = c(1,2,3,4))
ZoomButton <- data.frame(Unit = c("DD", "DD", "MAX"), multiple = c(1, 10 ,1),
label = c("Day","10 days", "MAX"))
ZoomButtonPosition <- "bottom"
amStockMultiSet(data = data_stock1, panelColumn = c(1,2,1,1), ZoomButton = ZoomButton,
ZoomButtonPosition = "top")
amStockMultiSet(data = data_stock1, precision = 2)
amStockMultiSet(data = data_stock1, panelColumn = c(1,2,1,1), percentHeightPanel = c(3,1))
}
}
| /man/amStockMultiSet.Rd | no_license | msabr027/rAmCharts | R | false | true | 2,764 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_amDataset.R
\name{amStockMultiSet}
\alias{amStockMultiSet}
\title{Plotting multi data-sets}
\usage{
amStockMultiSet(data, panelColumn = NULL, ZoomButtonPosition = "bottom",
ZoomButton = data.frame(Unit = "MAX", multiple = 1, label = "All"),
color = c("#2E2EFE", "#31B404", "#FF4000"), precision = 1,
export = FALSE, percentHeightPanel = NULL,
creditsPosition = "top-right", ...)
}
\arguments{
\item{data}{\code{list}, list of data.frame (same structure) first column is date, others are values}
\item{panelColumn}{\code{vector}, numeric vector, controle panel adding for selected series}
\item{ZoomButtonPosition}{\code{character}, zoom button position. Possible values are :
"left", "right", "bottom", "top"}
\item{ZoomButton}{\code{data.frame}, 3 columns :
Unit, times unit
multiple : multiple*unit
label : button's label}
\item{color}{\code{character}, color of data-sets (in hexadecimal).}
\item{precision}{\code{numeric}, digits precision}
\item{export}{\code{logical}, default set to FALSE. TRUE to display export feature.}
\item{percentHeightPanel}{\code{numeric}, vector of size panel, same length than data}
\item{creditsPosition}{\code{character}, credits position. Possible values are :
"top-right", "top-left", "bottom-right", "bottom-left"}
\item{...}{other first level attributes}
}
\description{
amStockMultiSet compute a stock of multi data-sets, still in dev
}
\examples{
data(data_stock1)
data_stock1$chartData1$value2 <- as.numeric(data_stock1$chartData1$value) + 10
data_stock1$chartData2$value2 <- as.numeric(data_stock1$chartData2$value) + 10
data_stock1$chartData3$value2 <- as.numeric(data_stock1$chartData3$value) + 10
data_stock1$chartData4$value2 <- as.numeric(data_stock1$chartData4$value) + 10
data_stock1$chartData1$value3 <- as.numeric(data_stock1$chartData1$value) - 10
data_stock1$chartData2$value3 <- as.numeric(data_stock1$chartData2$value) - 10
data_stock1$chartData3$value3 <- as.numeric(data_stock1$chartData3$value) - 10
data_stock1$chartData4$value3 <- as.numeric(data_stock1$chartData4$value) - 10
amStockMultiSet(data = data_stock1)
amStockMultiSet(data = data_stock1, panelColumn = c(1,2,1,1))
\donttest{
amStockMultiSet(data = data_stock1, panelColumn = c(1,2,3,4))
ZoomButton <- data.frame(Unit = c("DD", "DD", "MAX"), multiple = c(1, 10 ,1),
label = c("Day","10 days", "MAX"))
ZoomButtonPosition <- "bottom"
amStockMultiSet(data = data_stock1, panelColumn = c(1,2,1,1), ZoomButton = ZoomButton,
ZoomButtonPosition = "top")
amStockMultiSet(data = data_stock1, precision = 2)
amStockMultiSet(data = data_stock1, panelColumn = c(1,2,1,1), percentHeightPanel = c(3,1))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Fires2011.R
\docType{data}
\name{Fires2011}
\alias{Fires2011}
\title{Data for Fires for year 2011 from La Tigra National Park, Honduras}
\format{
Simple feature collection with 36 features and 2 fields
\itemize{
\item{id} {}
\item{Area_ha} {}
}
}
\source{
fire area estimation using landsat images from La Tigra National Park for the year 2011.
}
\usage{
Fires2011
}
\description{
Data for Fires for La Tigra National Park, Honduras for the year 2011
}
\examples{
if (requireNamespace("sf", quietly = TRUE)) {
library(sf)
data(Fires2011)
plot(st_geometry(Fires2011), axes=TRUE, col="darkred")
}
}
\references{
\url{https://www.amitigra.com/}
}
\keyword{datasets}
\keyword{sf}
| /man/Fires2011.Rd | permissive | klauswiese/pnlt | R | false | true | 792 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Fires2011.R
\docType{data}
\name{Fires2011}
\alias{Fires2011}
\title{Data for Fires for year 2011 from La Tigra National Park, Honduras}
\format{
Simple feature collection with 36 features and 2 fields
\itemize{
\item{id} {}
\item{Area_ha} {}
}
}
\source{
fire area estimation using landsat images from La Tigra National Park for the year 2011.
}
\usage{
Fires2011
}
\description{
Data for Fires for La Tigra National Park, Honduras for the year 2011
}
\examples{
if (requireNamespace("sf", quietly = TRUE)) {
library(sf)
data(Fires2011)
plot(st_geometry(Fires2011), axes=TRUE, col="darkred")
}
}
\references{
\url{https://www.amitigra.com/}
}
\keyword{datasets}
\keyword{sf}
|
plot3 <- function(){
## Aim of this function is to
## 1. read the household_power_consumption.txt file
## 2. subset for data taken from 2 days: 2007-02-01 and 2007-02-02
## 3. generate a plot of different submetering vs time
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", xlab="Day", ylab="Energy sub metering")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2, col=c("black", "red", "blue"))
dev.off()
} | /plot3.R | no_license | banerjeerdb/ExData_Plotting1 | R | false | false | 1,194 | r | plot3 <- function(){
## Aim of this function is to
## 1. read the household_power_consumption.txt file
## 2. subset for data taken from 2 days: 2007-02-01 and 2007-02-02
## 3. generate a plot of different submetering vs time
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", xlab="Day", ylab="Energy sub metering")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2, col=c("black", "red", "blue"))
dev.off()
} |
ReadTransactionsSubset1Train <- function(path) {
# return transactions.al.sfr.subset1 object
train <- NULL
loaded <- load(path)
stopifnot(!is.null(train))
train
}
| /ReadTransactionsSubset1Train.R | permissive | rlowrance/re-local-linear | R | false | false | 183 | r | ReadTransactionsSubset1Train <- function(path) {
# return transactions.al.sfr.subset1 object
train <- NULL
loaded <- load(path)
stopifnot(!is.null(train))
train
}
|
# create_directory.r
#
# Copyright (c) 2020 VIB (Belgium) & Babraham Institute (United Kingdom)
#
# Software written by Carlos P. Roca, as research funded by the European Union.
#
# This software may be modified and distributed under the terms of the MIT
# license. See the LICENSE file for details.
# Creates figure and table directories.
# Returns directories for scatter figures.
create.directory <- function( flow.control, asp )
{
if ( ! is.null( asp$figure.scatter.dir.base ) ) {
figure.scatter.dir <- sprintf( "%s_%s", asp$figure.scatter.dir.base,
flow.control$sample )
names( figure.scatter.dir ) <- flow.control$sample
}
else
figure.scatter.dir <- NULL
figure.dir <- c(
asp$figure.compensation.dir,
asp$figure.convergence.dir,
asp$figure.gate.dir,
asp$figure.skewness.dir,
asp$figure.slope.error.dir,
asp$figure.spillover.dir,
figure.scatter.dir
)
table.dir <- c(
asp$table.compensation.dir,
asp$table.convergence.dir,
asp$table.skewness.dir,
asp$table.slope.error.dir,
asp$table.spillover.dir
)
for ( ftd in c( figure.dir, table.dir ) )
if ( ! is.null( ftd ) && ! file.exists( ftd ) )
dir.create( ftd, recursive = TRUE )
figure.scatter.dir
}
| /R/create_directory.r | permissive | hally166/autospill | R | false | false | 1,347 | r | # create_directory.r
#
# Copyright (c) 2020 VIB (Belgium) & Babraham Institute (United Kingdom)
#
# Software written by Carlos P. Roca, as research funded by the European Union.
#
# This software may be modified and distributed under the terms of the MIT
# license. See the LICENSE file for details.
# Creates figure and table directories.
# Returns directories for scatter figures.
create.directory <- function( flow.control, asp )
{
if ( ! is.null( asp$figure.scatter.dir.base ) ) {
figure.scatter.dir <- sprintf( "%s_%s", asp$figure.scatter.dir.base,
flow.control$sample )
names( figure.scatter.dir ) <- flow.control$sample
}
else
figure.scatter.dir <- NULL
figure.dir <- c(
asp$figure.compensation.dir,
asp$figure.convergence.dir,
asp$figure.gate.dir,
asp$figure.skewness.dir,
asp$figure.slope.error.dir,
asp$figure.spillover.dir,
figure.scatter.dir
)
table.dir <- c(
asp$table.compensation.dir,
asp$table.convergence.dir,
asp$table.skewness.dir,
asp$table.slope.error.dir,
asp$table.spillover.dir
)
for ( ftd in c( figure.dir, table.dir ) )
if ( ! is.null( ftd ) && ! file.exists( ftd ) )
dir.create( ftd, recursive = TRUE )
figure.scatter.dir
}
|
context('binomtest')
test_that('output from binom_calc matches the expected output', {
k <- binom_calc(32, 8)
expect_equal(k$n, 32)
expect_equal(k$k, 8)
expect_equal(k$exp_k, 16)
expect_equal(k$obs_p, 0.25)
expect_equal(k$exp_p, 0.5)
expect_equal(k$ik, 24)
expect_equal(k$lower, 0.0035)
expect_equal(k$upper, 0.998949)
expect_equal(k$two_tail, 0.007)
k <- binom_calc(32, 20)
expect_equal(k$n, 32)
expect_equal(k$k, 20)
expect_equal(k$exp_k, 16)
expect_equal(k$obs_p, 0.625)
expect_equal(k$exp_p, 0.5)
expect_equal(k$ik, 11)
expect_equal(k$lower, 0.944908)
expect_equal(k$upper, 0.107664)
expect_equal(k$two_tail, 0.162756)
})
test_that('binom_calc throws the appropriate error', {
expect_error(binom_calc('32', 20), 'n must be an integer')
expect_error(binom_calc(32, '20'), 'success must be an integer')
expect_error(binom_calc(32, 20, '0.5'), 'prob must be numeric')
expect_error(binom_calc(32, 20, 1.5), 'prob must be between 0 and 1')
expect_error(binom_calc(32, 20, -1.5), 'prob must be between 0 and 1')
})
test_that('output from binom_test matches the expected output', {
k <- binom_test(as.factor(mtcars$vs))
expect_equal(k$n, 32)
expect_equal(k$k, 14)
expect_equal(k$exp_k, 16)
expect_equal(k$obs_p, 0.4375)
expect_equal(k$exp_p, 0.5)
expect_equal(k$ik, 18)
expect_equal(k$lower, 0.298307)
expect_equal(k$upper, 0.811457)
expect_equal(k$two_tail, 0.596615)
})
test_that('binom_test throws the appropriate error', {
expect_error(binom_test(mtcars$cyl), 'data must be of type factor')
expect_error(binom_test(as.factor(mtcars$cyl), '0.5'), 'prob must be numeric')
expect_error(binom_test(as.factor(mtcars$cyl), 1.5), 'prob must be between 0 and 1')
expect_error(binom_test(as.factor(mtcars$cyl), -1.5), 'prob must be between 0 and 1')
})
| /tests/testthat/test-binom.R | no_license | raghubhatt/inferr | R | false | false | 1,911 | r | context('binomtest')
test_that('output from binom_calc matches the expected output', {
k <- binom_calc(32, 8)
expect_equal(k$n, 32)
expect_equal(k$k, 8)
expect_equal(k$exp_k, 16)
expect_equal(k$obs_p, 0.25)
expect_equal(k$exp_p, 0.5)
expect_equal(k$ik, 24)
expect_equal(k$lower, 0.0035)
expect_equal(k$upper, 0.998949)
expect_equal(k$two_tail, 0.007)
k <- binom_calc(32, 20)
expect_equal(k$n, 32)
expect_equal(k$k, 20)
expect_equal(k$exp_k, 16)
expect_equal(k$obs_p, 0.625)
expect_equal(k$exp_p, 0.5)
expect_equal(k$ik, 11)
expect_equal(k$lower, 0.944908)
expect_equal(k$upper, 0.107664)
expect_equal(k$two_tail, 0.162756)
})
test_that('binom_calc throws the appropriate error', {
expect_error(binom_calc('32', 20), 'n must be an integer')
expect_error(binom_calc(32, '20'), 'success must be an integer')
expect_error(binom_calc(32, 20, '0.5'), 'prob must be numeric')
expect_error(binom_calc(32, 20, 1.5), 'prob must be between 0 and 1')
expect_error(binom_calc(32, 20, -1.5), 'prob must be between 0 and 1')
})
test_that('output from binom_test matches the expected output', {
k <- binom_test(as.factor(mtcars$vs))
expect_equal(k$n, 32)
expect_equal(k$k, 14)
expect_equal(k$exp_k, 16)
expect_equal(k$obs_p, 0.4375)
expect_equal(k$exp_p, 0.5)
expect_equal(k$ik, 18)
expect_equal(k$lower, 0.298307)
expect_equal(k$upper, 0.811457)
expect_equal(k$two_tail, 0.596615)
})
test_that('binom_test throws the appropriate error', {
expect_error(binom_test(mtcars$cyl), 'data must be of type factor')
expect_error(binom_test(as.factor(mtcars$cyl), '0.5'), 'prob must be numeric')
expect_error(binom_test(as.factor(mtcars$cyl), 1.5), 'prob must be between 0 and 1')
expect_error(binom_test(as.factor(mtcars$cyl), -1.5), 'prob must be between 0 and 1')
})
|
library(mlbench)
library (Boruta)
setwd("C:/Users/Arafat Habib/Desktop/R")
Data1 <- read.csv(file = 'AEFeat.csv')
head(Data1)
set.seed(1)
Boruta.Data1<-Boruta(Outcome~.,data=Data1,doTrace=5,ntree=1000)
Boruta.Data1
plot(Boruta.Data1)
getConfirmedFormula(Boruta.Data1)
a<-attStats(Boruta.Data1)
print(a)
| /Boruta_implR.R | permissive | ArafatHabib/Concrete-detection-using-AE-features-K-NN-and-Boruta | R | false | false | 317 | r | library(mlbench)
library (Boruta)
setwd("C:/Users/Arafat Habib/Desktop/R")
Data1 <- read.csv(file = 'AEFeat.csv')
head(Data1)
set.seed(1)
Boruta.Data1<-Boruta(Outcome~.,data=Data1,doTrace=5,ntree=1000)
Boruta.Data1
plot(Boruta.Data1)
getConfirmedFormula(Boruta.Data1)
a<-attStats(Boruta.Data1)
print(a)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnd-restarts.R
\name{with_restarts}
\alias{with_restarts}
\title{Establish a restart point on the stack}
\usage{
with_restarts(.expr, ...)
}
\arguments{
\item{.expr}{An expression to execute with new restarts established
on the stack. This argument is passed by expression and supports
\link[=quasiquotation]{unquoting}. It is evaluated in a context where
restarts are established.}
\item{...}{Named restart functions. The name is taken as the
restart name and the function is executed after the jump. These
dots support \link[=tidy-dots]{tidy dots} features.}
}
\description{
Restart points are named functions that are established with
\code{with_restarts()}. Once established, you can interrupt the normal
execution of R code, jump to the restart, and resume execution from
there. Each restart is established along with a restart function
that is executed after the jump and that provides a return value
from the establishing point (i.e., a return value for
\code{with_restarts()}).
}
\details{
Restarts are not the only way of jumping to a previous call frame
(see \code{\link[=return_from]{return_from()}} or \code{\link[=return_to]{return_to()}}). However, they have the advantage of
being callable by name once established.
}
\examples{
# Restarts are not the only way to jump to a previous frame, but
# they have the advantage of being callable by name:
fn <- function() with_restarts(g(), my_restart = function() "returned")
g <- function() h()
h <- function() { rst_jump("my_restart"); "not returned" }
fn()
# Whereas a non-local return requires to manually pass the calling
# frame to the return function:
fn <- function() g(current_env())
g <- function(env) h(env)
h <- function(env) { return_from(env, "returned"); "not returned" }
fn()
# rst_maybe_jump() checks that a restart exists before trying to jump:
fn <- function() {
g()
cat("will this be called?\\n")
}
g <- function() {
rst_maybe_jump("my_restart")
cat("will this be called?\\n")
}
# Here no restart are on the stack:
fn()
# If a restart point called `my_restart` was established on the
# stack before calling fn(), the control flow will jump there:
rst <- function() {
cat("restarting...\\n")
"return value"
}
with_restarts(fn(), my_restart = rst)
# Restarts are particularly useful to provide alternative default
# values when the normal output cannot be computed:
fn <- function(valid_input) {
if (valid_input) {
return("normal value")
}
# We decide to return the empty string "" as default value. An
# altenative strategy would be to signal an error. In any case,
# we want to provide a way for the caller to get a different
# output. For this purpose, we provide two restart functions that
# returns alternative defaults:
restarts <- list(
rst_empty_chr = function() character(0),
rst_null = function() NULL
)
with_restarts(splice(restarts), .expr = {
# Signal a typed condition to let the caller know that we are
# about to return an empty string as default value:
cnd_signal("default_empty_string")
# If no jump to with_restarts, return default value:
""
})
}
# Normal value for valid input:
fn(TRUE)
# Default value for bad input:
fn(FALSE)
# Change the default value if you need an empty character vector by
# defining an inplace handler that jumps to the restart. It has to
# be inplace because exiting handlers jump to the place where they
# are established before being executed, and the restart is not
# defined anymore at that point:
rst_handler <- inplace(function(c) rst_jump("rst_empty_chr"))
with_handlers(fn(FALSE), default_empty_string = rst_handler)
# You can use restarting() to create restarting handlers easily:
with_handlers(fn(FALSE), default_empty_string = restarting("rst_null"))
}
\seealso{
\code{\link[=return_from]{return_from()}} and \code{\link[=return_to]{return_to()}} for a more flexible way
of performing a non-local jump to an arbitrary call frame.
}
| /man/with_restarts.Rd | no_license | shizidushu/rlang | R | false | true | 4,025 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnd-restarts.R
\name{with_restarts}
\alias{with_restarts}
\title{Establish a restart point on the stack}
\usage{
with_restarts(.expr, ...)
}
\arguments{
\item{.expr}{An expression to execute with new restarts established
on the stack. This argument is passed by expression and supports
\link[=quasiquotation]{unquoting}. It is evaluated in a context where
restarts are established.}
\item{...}{Named restart functions. The name is taken as the
restart name and the function is executed after the jump. These
dots support \link[=tidy-dots]{tidy dots} features.}
}
\description{
Restart points are named functions that are established with
\code{with_restarts()}. Once established, you can interrupt the normal
execution of R code, jump to the restart, and resume execution from
there. Each restart is established along with a restart function
that is executed after the jump and that provides a return value
from the establishing point (i.e., a return value for
\code{with_restarts()}).
}
\details{
Restarts are not the only way of jumping to a previous call frame
(see \code{\link[=return_from]{return_from()}} or \code{\link[=return_to]{return_to()}}). However, they have the advantage of
being callable by name once established.
}
\examples{
# Restarts are not the only way to jump to a previous frame, but
# they have the advantage of being callable by name:
fn <- function() with_restarts(g(), my_restart = function() "returned")
g <- function() h()
h <- function() { rst_jump("my_restart"); "not returned" }
fn()
# Whereas a non-local return requires to manually pass the calling
# frame to the return function:
fn <- function() g(current_env())
g <- function(env) h(env)
h <- function(env) { return_from(env, "returned"); "not returned" }
fn()
# rst_maybe_jump() checks that a restart exists before trying to jump:
fn <- function() {
g()
cat("will this be called?\\n")
}
g <- function() {
rst_maybe_jump("my_restart")
cat("will this be called?\\n")
}
# Here no restart are on the stack:
fn()
# If a restart point called `my_restart` was established on the
# stack before calling fn(), the control flow will jump there:
rst <- function() {
cat("restarting...\\n")
"return value"
}
with_restarts(fn(), my_restart = rst)
# Restarts are particularly useful to provide alternative default
# values when the normal output cannot be computed:
fn <- function(valid_input) {
if (valid_input) {
return("normal value")
}
# We decide to return the empty string "" as default value. An
# altenative strategy would be to signal an error. In any case,
# we want to provide a way for the caller to get a different
# output. For this purpose, we provide two restart functions that
# returns alternative defaults:
restarts <- list(
rst_empty_chr = function() character(0),
rst_null = function() NULL
)
with_restarts(splice(restarts), .expr = {
# Signal a typed condition to let the caller know that we are
# about to return an empty string as default value:
cnd_signal("default_empty_string")
# If no jump to with_restarts, return default value:
""
})
}
# Normal value for valid input:
fn(TRUE)
# Default value for bad input:
fn(FALSE)
# Change the default value if you need an empty character vector by
# defining an inplace handler that jumps to the restart. It has to
# be inplace because exiting handlers jump to the place where they
# are established before being executed, and the restart is not
# defined anymore at that point:
rst_handler <- inplace(function(c) rst_jump("rst_empty_chr"))
with_handlers(fn(FALSE), default_empty_string = rst_handler)
# You can use restarting() to create restarting handlers easily:
with_handlers(fn(FALSE), default_empty_string = restarting("rst_null"))
}
\seealso{
\code{\link[=return_from]{return_from()}} and \code{\link[=return_to]{return_to()}} for a more flexible way
of performing a non-local jump to an arbitrary call frame.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eubon.R
\name{eubon_search}
\alias{eubon_search}
\title{EUBON taxonomy search}
\usage{
eubon_search(
query,
providers = "pesi",
searchMode = "scientificNameExact",
addSynonymy = FALSE,
addParentTaxon = FALSE,
timeout = 0,
dedup = NULL,
limit = 20,
page = 1,
...
)
}
\arguments{
\item{query}{(character) The scientific name to search for. For example:
"Bellis perennis", "Prionus" or "Bolinus brandaris". This is an exact search
so wildcard characters are not supported}
\item{providers}{(character) A list of provider id strings concatenated by
comma characters. The default : "pesi,bgbm-cdm-server[col]" will be
used if this parameter is not set. A list of all available provider ids
can be obtained from the '/capabilities' service end point. Providers can be
nested, that is a parent provider can have sub providers. If the id of the
parent provider is supplied all subproviders will be queried. The query
can also be restricted to one or more subproviders by using the following
syntax: parent-id[sub-id-1,sub-id2,...]}
\item{searchMode}{(character) Specifies the searchMode. Possible search
modes are: \code{scientificNameExact}, \code{scientificNameLike} (begins with),
\code{vernacularNameExact}, \code{vernacularNameLike}
(contains), \code{findByIdentifier}. If the a provider does not support the
chosen searchMode it will be skipped and the status message in the
tnrClientStatus will be set to 'unsupported search mode' in this case.}
\item{addSynonymy}{(logical) Indicates whether the synonymy of the accepted
taxon should be included into the response. Turning this option on may
cause an increased response time. Default: \code{FALSE}}
\item{addParentTaxon}{(logical) Indicates whether the the parent taxon of
the accepted taxon should be included into the response. Turning this option
on may cause a slightly increased response time. Default: \code{FALSE}}
\item{timeout}{(numeric) The maximum of milliseconds to wait for responses
from any of the providers. If the timeout is exceeded the service will just
return the responses that have been received so far. The default timeout is
0 ms (wait for ever)}
\item{dedup}{(character) Allows to deduplicate the results by making use of
a deduplication strategy. The deduplication is done by comparing
specific properties of the taxon:
\itemize{
\item id: compares 'taxon.identifier'
\item id_name: compares 'taxon.identifier' AND
'taxon.taxonName.scientificName'
\item name: compares 'taxon.taxonName.scientificName' Using the pure
'name' strategy is not recommended.
}}
\item{limit}{(numeric/integer) number of records to retrieve. default: 20.
This only affects the search mode \code{scientificNameLike} and
\code{vernacularNameLike}; other search modes are expected to return only one
record per check list}
\item{page}{(numeric/integer) page to retrieve. default: 1. This only
affects the search mode \code{scientificNameLike} and \code{vernacularNameLike}; other
search modes are expected to return only one record per check list}
\item{...}{Curl options passed on to \link[crul:verb-GET]{crul::verb-GET}}
}
\description{
EUBON taxonomy search
}
\examples{
\dontrun{
eubon_search("Prionus")
eubon_search("Salmo", "pesi")
eubon_search("Salmo", c("pesi", "worms"))
eubon_search("Salmo", "worms", "scientificNameLike")
eubon_search("Salmo", "worms", "scientificNameLike", limit = 3)
eubon_search("Salmo", "worms", "scientificNameLike", limit = 20, page = 2)
eubon_search("Salmo", "worms", addSynonymy = TRUE)
eubon_search("Salmo", "worms", addParentTaxon = TRUE)
}
}
\references{
https://cybertaxonomy.eu/eu-bon/utis/1.3/doc.html
}
\seealso{
Other eubon-methods:
\code{\link{eubon_capabilities}()},
\code{\link{eubon_children}()},
\code{\link{eubon_hierarchy}()}
}
\concept{eubon-methods}
| /man/eubon_search.Rd | permissive | ropensci/taxize | R | false | true | 3,860 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eubon.R
\name{eubon_search}
\alias{eubon_search}
\title{EUBON taxonomy search}
\usage{
eubon_search(
query,
providers = "pesi",
searchMode = "scientificNameExact",
addSynonymy = FALSE,
addParentTaxon = FALSE,
timeout = 0,
dedup = NULL,
limit = 20,
page = 1,
...
)
}
\arguments{
\item{query}{(character) The scientific name to search for. For example:
"Bellis perennis", "Prionus" or "Bolinus brandaris". This is an exact search
so wildcard characters are not supported}
\item{providers}{(character) A list of provider id strings concatenated by
comma characters. The default : "pesi,bgbm-cdm-server[col]" will be
used if this parameter is not set. A list of all available provider ids
can be obtained from the '/capabilities' service end point. Providers can be
nested, that is a parent provider can have sub providers. If the id of the
parent provider is supplied all subproviders will be queried. The query
can also be restricted to one or more subproviders by using the following
syntax: parent-id[sub-id-1,sub-id2,...]}
\item{searchMode}{(character) Specifies the searchMode. Possible search
modes are: \code{scientificNameExact}, \code{scientificNameLike} (begins with),
\code{vernacularNameExact}, \code{vernacularNameLike}
(contains), \code{findByIdentifier}. If the a provider does not support the
chosen searchMode it will be skipped and the status message in the
tnrClientStatus will be set to 'unsupported search mode' in this case.}
\item{addSynonymy}{(logical) Indicates whether the synonymy of the accepted
taxon should be included into the response. Turning this option on may
cause an increased response time. Default: \code{FALSE}}
\item{addParentTaxon}{(logical) Indicates whether the the parent taxon of
the accepted taxon should be included into the response. Turning this option
on may cause a slightly increased response time. Default: \code{FALSE}}
\item{timeout}{(numeric) The maximum of milliseconds to wait for responses
from any of the providers. If the timeout is exceeded the service will just
return the responses that have been received so far. The default timeout is
0 ms (wait for ever)}
\item{dedup}{(character) Allows to deduplicate the results by making use of
a deduplication strategy. The deduplication is done by comparing
specific properties of the taxon:
\itemize{
\item id: compares 'taxon.identifier'
\item id_name: compares 'taxon.identifier' AND
'taxon.taxonName.scientificName'
\item name: compares 'taxon.taxonName.scientificName' Using the pure
'name' strategy is not recommended.
}}
\item{limit}{(numeric/integer) number of records to retrieve. default: 20.
This only affects the search mode \code{scientificNameLike} and
\code{vernacularNameLike}; other search modes are expected to return only one
record per check list}
\item{page}{(numeric/integer) page to retrieve. default: 1. This only
affects the search mode \code{scientificNameLike} and \code{vernacularNameLike}; other
search modes are expected to return only one record per check list}
\item{...}{Curl options passed on to \link[crul:verb-GET]{crul::verb-GET}}
}
\description{
EUBON taxonomy search
}
\examples{
\dontrun{
eubon_search("Prionus")
eubon_search("Salmo", "pesi")
eubon_search("Salmo", c("pesi", "worms"))
eubon_search("Salmo", "worms", "scientificNameLike")
eubon_search("Salmo", "worms", "scientificNameLike", limit = 3)
eubon_search("Salmo", "worms", "scientificNameLike", limit = 20, page = 2)
eubon_search("Salmo", "worms", addSynonymy = TRUE)
eubon_search("Salmo", "worms", addParentTaxon = TRUE)
}
}
\references{
https://cybertaxonomy.eu/eu-bon/utis/1.3/doc.html
}
\seealso{
Other eubon-methods:
\code{\link{eubon_capabilities}()},
\code{\link{eubon_children}()},
\code{\link{eubon_hierarchy}()}
}
\concept{eubon-methods}
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 1.19240064555554e+106, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) | /dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609868474-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,199 | r | testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 1.19240064555554e+106, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) |
#' Local biplot at input data points
#'
#' @param mds_matrices The output from make_mds_matrices.
#' @param dist_fns The output from make_dist_fns.
#' @param k The number of embedding dimensions for multi-dimensional scaling. Defaults to 2.
#' @param samples Which of the points to compute sensitivities for.
#'
#' @return A data frame. Each row describes one LB axis for one
#' sample. Columns labeled 'Embedding' give the embedding of the
#' sample in MDS space, columns labeled 'Axis' give the LB axis
#' for a given sample and variable. Columns labeled 'variable' and
#' 'sample' give the variable and sample for the LB axis values.
compute_lb_samples <- function(mds_matrices, dist_fns, k, samples) {
dist_matrix = mds_matrices$delta^(.5)
biplot_list = list()
Ylambdainv = sweep(mds_matrices$Y[,1:k], MARGIN = 2,
STATS = mds_matrices$Lambda[1:k], FUN = "/")
for(j in samples) {
dist_to_j = dist_matrix[,j]
dist_jacobian = apply(mds_matrices$X, 1, function(x) {
-dist_fns$dist_deriv(x, mds_matrices$X[j,])
})
Jd = sweep(dist_jacobian, MARGIN = 2, STATS = dist_to_j, FUN = "*")
biplot_axes = Jd %*% Ylambdainv
embedding = mds_matrices$Y[j,1:k]
axis_center = matrix(embedding, nrow = ncol(mds_matrices$X), ncol = k, byrow = TRUE)
biplot_df = data.frame(axis_center, biplot_axes)
names(biplot_df) = c(paste0("Embedding", 1:k), paste0("Axis", 1:k))
biplot_df$variable = colnames(mds_matrices$X)
biplot_df$sample = paste0("Original", j)
biplot_list[[j]] = biplot_df
}
return(Reduce(rbind, biplot_list))
}
#' Local biplot at new points
#'
#' @param mds_matrices The output from make_mds_matrices.
#' @param dist_fns The output from make_dist_fns.
#' @param k The number of embedding dimensions for multi-dimensional scaling. Defaults to 2.
#' @param new_points A list with new points to compute biplot axes for.
#'
#' @return A data frame. Each row describes one LB axis for one
#' sample. Columns labeled 'Embedding' give the embedding of the
#' sample in MDS space, columns labeled 'Axis' give the LB axis
#' for a given sample and variable. Columns labeled 'variable' and
#' 'sample' give the variable and sample for the LB axis values.
compute_lb_new_points <- function(mds_matrices, dist_fns, k, new_points) {
biplot_list = list()
Ylambdainv = sweep(mds_matrices$Y[,1:k], MARGIN = 2,
STATS = mds_matrices$Lambda[1:k], FUN = "/")
for(i in 1:length(new_points)) {
new_point = new_points[[i]]
dist_to_new_point = apply(mds_matrices$X, 1, function(x) {
as.matrix(dist_fns$dist_fn(rbind(x, new_point)))[1,2]
})
dist_jacobian = apply(mds_matrices$X, 1, function(x) {
-dist_fns$dist_deriv(x, new_point)
})
embedding = .5 * (diag(mds_matrices$jdj) - dist_to_new_point^2) %*% Ylambdainv
axis_center = matrix(embedding, nrow = ncol(mds_matrices$X), ncol = k, byrow = TRUE)
Jd = sweep(dist_jacobian, MARGIN = 2, STATS = dist_to_new_point, FUN = "*")
biplot_axes = Jd %*% Ylambdainv
biplot_df = data.frame(axis_center, biplot_axes)
names(biplot_df) = c(paste0("Embedding", 1:k), paste0("Axis", 1:k))
biplot_df$variable = colnames(mds_matrices$X)
biplot_df$sample = paste0("New", i)
biplot_list[[i]] = biplot_df
}
return(Reduce(rbind, biplot_list))
}
#' Computes MDS representation and other associated values
#'
#' @param X A samples x variables data matrix
#' @param dist_fn A function that computes the distances between the rows of Y.
#' @param dist_mat If this argument is non-null, use it as the distance matrix instead of calling dist_fn on the rows of X.
#'
#' @return A list, containing
#' - delta: Matrix of squared distances.
#' - jdj: Row- and column-centered -.5 * delta
#' - d2: The diagonal elements of jdj
#' - Y: The embeddings of the samples in the MDS space.
#' - Lambda: The eigenvalues of jdj.
#' - X: The original data.
make_mds_matrices <- function(X = NULL, dist_fn = NULL, dist_mat = NULL) {
if(is.null(dist_mat)) {
dist_output = dist_fn(X)
n = nrow(X)
} else {
dist_output = as.matrix(dist_mat)
n = nrow(dist_output)
}
## as.matrix allows us to handle the output from the 'dist'
## function as well as matrix-valued outputs
## delta is the matrix that contains the squared distances
delta = as.matrix(dist_output)^2
A = -.5 * delta
## there is a faster way to do this
J = diag(1, n) - n^(-1) * matrix(1, nrow = n, ncol = n)
jdj = J %*% A %*% J
Beig = eigen(jdj, symmetric = TRUE)
Beig$vectors = Beig$vectors[,1:(n-1)]
Beig$values = Beig$values[1:(n-1)]
smallest_positive_eval_idx = max(which(Beig$values > 0))
Y = Beig$vectors[,1:smallest_positive_eval_idx] %*% diag(sqrt(Beig$values[1:smallest_positive_eval_idx]))
colnames(Y) = paste("Axis", 1:ncol(Y), sep = "")
out = list()
out$d2 = diag(jdj)
out$jdj = jdj
out$delta = delta
out$X = X
out$Lambda = Beig$values
out$Y = Y
return(out)
}
#' Creates distance function and corresponding derivative function
#'
#' @param dist_fn Either a string or a function.
#' @param dist_deriv Either NULL or a function.
#'
#' @return A list containing two functions, dist_fn and
#' dist_deriv. dist_fn takes a matrix and computes a distance
#' between the rows. dist_deriv takes two vectors, x and y, and
#' computes \eqn{\frac{\partial}{\partial y_j}d(x,y)}, j = 1,...,p.
#' @importFrom stats dist
make_dist_fns <- function(dist_fn, dist_deriv) {
if(typeof(dist_fn) == "closure" & typeof(dist_deriv) == "closure") {
return(list(dist_fn = dist_fn, dist_deriv = dist_deriv))
}
if(dist_fn == "euclidean") {
dist_fn = function(x) dist(x, method = "euclidean")
return(list(dist_fn = dist_fn, dist_deriv = euclidean_dist_deriv))
} else if(dist_fn == "manhattan-pos") {
dist_fn = function(x) dist(x, method = "manhattan")
return(list(dist_fn = dist_fn, dist_deriv = manhattan_dist_deriv_pos))
} else if(dist_fn == "manhattan-neg") {
dist_fn = function(x) dist(x, method = "manhattan")
return(list(dist_fn = dist_fn, dist_deriv = manhattan_dist_deriv_neg))
} else if(dist_fn == "maximum-pos") {
dist_fn = function(x) dist(x, method = "maximum")
return(list(dist_fn = dist_fn, dist_deriv = maximum_dist_deriv_pos))
} else if(dist_fn == "maximum-neg") {
dist_fn = function(x) dist(x, method = "maximum")
return(list(dist_fn = dist_fn, dist_deriv = maximum_dist_deriv_neg))
} else {
stop("Unsupported distance")
}
}
#' Partial derivatives for Euclidean distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}} d(x,y)}
euclidean_dist_deriv <- function(x, y) {
if(sum((y - x)^2) == 0) {
return(rep(0, length(y)))
}
return((y - x) * (sum((y - x)^2))^(-.5))
}
#' Partial derivatives for Manhattan distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}} d(x,y)}.
manhattan_dist_deriv_pos <- function(x, y) {
derivs = ifelse(y < x, -1, 1)
return(derivs)
}
#' Partial derivatives for Manhattan distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}}} d(x,y).
manhattan_dist_deriv_neg <- function(x, y) {
derivs = ifelse(y <= x, -1, 1)
return(derivs)
}
#' Partial derivatives for max distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}}} d(x,y)
maximum_dist_deriv_pos <- function(x, y) {
max_abs = max(abs(y - x))
active_coordinates = which(abs(y - x) == max_abs)
deriv = rep(0, length(x))
if(length(active_coordinates) > 1) {
deriv[active_coordinates] = ifelse(y[active_coordinates] > x[active_coordinates], 1, 0)
} else {
deriv[active_coordinates] = ifelse(y[active_coordinates] >= x[active_coordinates], 1, -1)
}
return(deriv)
}
#' Partial derivatives for max distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}}} d(x,y)
maximum_dist_deriv_neg <- function(x, y) {
max_abs = max(abs(y - x))
active_coordinates = which(abs(y - x) == max_abs)
deriv = rep(0, length(x))
if(length(active_coordinates) > 1) {
deriv[active_coordinates] = ifelse(y[active_coordinates] > x[active_coordinates], 0, -1)
} else {
deriv[active_coordinates] = ifelse(y[active_coordinates] <= x[active_coordinates], -1, 1)
}
return(deriv)
}
#' Create local biplot axes
#'
#' @param X A data matrix, samples as rows.
#' @param dist Either a string describing one of the supported
#' distances or a function that takes a matrix and returns the
#' distances between the rows of the matrix.
#' @param dist_deriv Either NULL (if dist is a string describing one
#' of the supported distances) or a function that takes two
#' vectors and computes \eqn{\frac{\partial}{\partial y_j}d(x,y)}.
#' @param k The number of embedding dimensions.
#' @param samples The samples to compute local biplot axes
#' at. Defaults to all of the original samples.
#' @param new_points New points (not one of the original samples) to
#' compute local biplot axes at.
#'
#' @return A data frame. Each row describes one LB axis for one
#' sample. Columns labeled 'Embedding' give the embedding of the
#' sample in MDS space, columns labeled 'Axis' give the LB axis
#' for a given sample and variable. Columns labeled 'variable' and
#' 'sample' give the variable and sample for the LB axis values.
#' @export
local_biplot <- function(X, dist, dist_deriv = NULL, k = 2,
samples = 1:nrow(X),
new_points = list()) {
dist_fns = make_dist_fns(dist, dist_deriv)
mds_matrices = make_mds_matrices(X, dist_fns$dist_fn)
lb_dfs = list()
if(length(samples) > 0) {
lb_dfs[["original"]] = compute_lb_samples(
mds_matrices, dist_fns, k = k, samples = samples
)
}
if(length(new_points) > 0) {
lb_dfs[["new"]] = compute_lb_new_points(
mds_matrices, dist_fns, k = k,
new_points = new_points)
}
return(Reduce(rbind, lb_dfs))
}
#' Make a correlation biplot
#'
#' @param X The data matrix, samples in the rows.
#' @param dist A function that takes a matrix and computes the
#' distance between the rows.
#' @param plotting_axes The MDS embedding axes to plot.
#' @importFrom stats cor
#' @export
correlation_biplot <- function(X, dist, plotting_axes = 1:2) {
mds_matrices = make_mds_matrices(X, dist)
biplot_axes = cor(X, mds_matrices$Y)[,plotting_axes]
colnames(biplot_axes) = sapply(plotting_axes, function(i) sprintf("Axis%i", i))
return(biplot_axes)
}
#' Embed new points in an MDS diagram
#'
#' @param mds_matrices The output from make_mds_matrices.
#' @param new_points A matrix, each row corresponding to a new sample, each column corresponding to a variable.
#' @param dist_fn A function taking a matrix as its argument, returns distances between the rows.
#' @param old_new_dist_mat A matrix with n_old rows and n_new columns containing distances between the old and new points. If this argument is non-null, these distances will be used instead of computing the distances using dist_fn.
#'
#' @return A matrix containing the embedding locations of the new points. Rows correspond to new samples, columns correspond to embedding dimensions.
#' @export
embed_new_points <- function(mds_matrices, new_points = NULL, dist_fn = NULL, old_new_dist_mat = NULL) {
if(is.null(old_new_dist_mat)) {
d2_old_new = get_distances(mds_matrices$X, new_points, dist_fn)^2
} else {
d2_old_new = old_new_dist_mat^2
}
a = -d2_old_new + mds_matrices$d2
fz = .5 * t(a) %*% mds_matrices$Y %*% diag(mds_matrices$Lambda[1:ncol(mds_matrices$Y)]^(-1))
return(fz)
}
#' Get distances between a set of old points and a set of new points
#'
#' @param old_points n_old x n_variables matrix with rows containing the old points.
#' @param new_points n_new x n_variables matrix with rows containing the new point.
#' @param dist_fn A function taking as input a matrix, returning the distances between the rows of the matrix.
#'
#' @return A matrix with n_old rows and n_new columns containing the
#' distance between each pair of old and new points.
get_distances <- function(old_points, new_points, dist_fn) {
X = rbind(old_points, new_points)
# this is doing a lot of extra computation, but it's reasonably
# likely that making all the new data structures you would need to
# avoid that would be even worse
dists = as.matrix(dist_fn(X))
n_old = nrow(old_points)
n_new = nrow(new_points)
return(dists[1:n_old, (n_old + 1):(n_old + n_new)])
}
#' Add new points to an ordination object created by phyloseq
#'
#' @param ps_old A phyloseq object containing the original samples used for phyloseq::ordinate.
#' @param ps_old_and_new A phyloseq object containing both the old samples (those used for phyloseq::ordinate) and the new samples to be added to the embedding diagram.
#' @param distance The same argument that was passed to phyloseq::ordinate
#' @export
add_to_phyloseq_ordination <- function(ps_old, ps_old_and_new, distance) {
new_sample_names = setdiff(sample_names(ps_old_and_new), sample_names(ps_old))
new_sample_indices = which(sample_names(ps_old_and_new) %in% new_sample_names)
old_sample_indices = which(!(sample_names(ps_old_and_new) %in% new_sample_names))
old_and_new_distance = as.matrix(distance(ps_old_and_new, method = distance))[old_sample_indices, new_sample_indices]
old_distance = distance(ps_old, method = distance)
mds_matrices = make_mds_matrices(dist_mat = old_distance)
embeddings = embed_new_points(mds_matrices, old_new_dist_mat = old_and_new_distance)
embeddings = data.frame(embeddings)
names(embeddings) = paste("Axis", 1:ncol(embeddings), sep = "")
embeddings = data.frame(embeddings, sample_data(ps_old_and_new)[new_sample_indices,])
return(embeddings)
}
| /R/mdessence-functions.R | no_license | jfukuyama/localBiplots | R | false | false | 14,927 | r | #' Local biplot at input data points
#'
#' @param mds_matrices The output from make_mds_matrices.
#' @param dist_fns The output from make_dist_fns.
#' @param k The number of embedding dimensions for multi-dimensional scaling. Defaults to 2.
#' @param samples Which of the points to compute sensitivities for.
#'
#' @return A data frame. Each row describes one LB axis for one
#' sample. Columns labeled 'Embedding' give the embedding of the
#' sample in MDS space, columns labeled 'Axis' give the LB axis
#' for a given sample and variable. Columns labeled 'variable' and
#' 'sample' give the variable and sample for the LB axis values.
compute_lb_samples <- function(mds_matrices, dist_fns, k, samples) {
dist_matrix = mds_matrices$delta^(.5)
biplot_list = list()
Ylambdainv = sweep(mds_matrices$Y[,1:k], MARGIN = 2,
STATS = mds_matrices$Lambda[1:k], FUN = "/")
for(j in samples) {
dist_to_j = dist_matrix[,j]
dist_jacobian = apply(mds_matrices$X, 1, function(x) {
-dist_fns$dist_deriv(x, mds_matrices$X[j,])
})
Jd = sweep(dist_jacobian, MARGIN = 2, STATS = dist_to_j, FUN = "*")
biplot_axes = Jd %*% Ylambdainv
embedding = mds_matrices$Y[j,1:k]
axis_center = matrix(embedding, nrow = ncol(mds_matrices$X), ncol = k, byrow = TRUE)
biplot_df = data.frame(axis_center, biplot_axes)
names(biplot_df) = c(paste0("Embedding", 1:k), paste0("Axis", 1:k))
biplot_df$variable = colnames(mds_matrices$X)
biplot_df$sample = paste0("Original", j)
biplot_list[[j]] = biplot_df
}
return(Reduce(rbind, biplot_list))
}
#' Local biplot at new points
#'
#' @param mds_matrices The output from make_mds_matrices.
#' @param dist_fns The output from make_dist_fns.
#' @param k The number of embedding dimensions for multi-dimensional scaling. Defaults to 2.
#' @param new_points A list with new points to compute biplot axes for.
#'
#' @return A data frame. Each row describes one LB axis for one
#' sample. Columns labeled 'Embedding' give the embedding of the
#' sample in MDS space, columns labeled 'Axis' give the LB axis
#' for a given sample and variable. Columns labeled 'variable' and
#' 'sample' give the variable and sample for the LB axis values.
compute_lb_new_points <- function(mds_matrices, dist_fns, k, new_points) {
biplot_list = list()
Ylambdainv = sweep(mds_matrices$Y[,1:k], MARGIN = 2,
STATS = mds_matrices$Lambda[1:k], FUN = "/")
for(i in 1:length(new_points)) {
new_point = new_points[[i]]
dist_to_new_point = apply(mds_matrices$X, 1, function(x) {
as.matrix(dist_fns$dist_fn(rbind(x, new_point)))[1,2]
})
dist_jacobian = apply(mds_matrices$X, 1, function(x) {
-dist_fns$dist_deriv(x, new_point)
})
embedding = .5 * (diag(mds_matrices$jdj) - dist_to_new_point^2) %*% Ylambdainv
axis_center = matrix(embedding, nrow = ncol(mds_matrices$X), ncol = k, byrow = TRUE)
Jd = sweep(dist_jacobian, MARGIN = 2, STATS = dist_to_new_point, FUN = "*")
biplot_axes = Jd %*% Ylambdainv
biplot_df = data.frame(axis_center, biplot_axes)
names(biplot_df) = c(paste0("Embedding", 1:k), paste0("Axis", 1:k))
biplot_df$variable = colnames(mds_matrices$X)
biplot_df$sample = paste0("New", i)
biplot_list[[i]] = biplot_df
}
return(Reduce(rbind, biplot_list))
}
#' Computes MDS representation and other associated values
#'
#' @param X A samples x variables data matrix
#' @param dist_fn A function that computes the distances between the rows of Y.
#' @param dist_mat If this argument is non-null, use it as the distance matrix instead of calling dist_fn on the rows of X.
#'
#' @return A list, containing
#' - delta: Matrix of squared distances.
#' - jdj: Row- and column-centered -.5 * delta
#' - d2: The diagonal elements of jdj
#' - Y: The embeddings of the samples in the MDS space.
#' - Lambda: The eigenvalues of jdj.
#' - X: The original data.
make_mds_matrices <- function(X = NULL, dist_fn = NULL, dist_mat = NULL) {
if(is.null(dist_mat)) {
dist_output = dist_fn(X)
n = nrow(X)
} else {
dist_output = as.matrix(dist_mat)
n = nrow(dist_output)
}
## as.matrix allows us to handle the output from the 'dist'
## function as well as matrix-valued outputs
## delta is the matrix that contains the squared distances
delta = as.matrix(dist_output)^2
A = -.5 * delta
## there is a faster way to do this
J = diag(1, n) - n^(-1) * matrix(1, nrow = n, ncol = n)
jdj = J %*% A %*% J
Beig = eigen(jdj, symmetric = TRUE)
Beig$vectors = Beig$vectors[,1:(n-1)]
Beig$values = Beig$values[1:(n-1)]
smallest_positive_eval_idx = max(which(Beig$values > 0))
Y = Beig$vectors[,1:smallest_positive_eval_idx] %*% diag(sqrt(Beig$values[1:smallest_positive_eval_idx]))
colnames(Y) = paste("Axis", 1:ncol(Y), sep = "")
out = list()
out$d2 = diag(jdj)
out$jdj = jdj
out$delta = delta
out$X = X
out$Lambda = Beig$values
out$Y = Y
return(out)
}
#' Creates distance function and corresponding derivative function
#'
#' @param dist_fn Either a string or a function.
#' @param dist_deriv Either NULL or a function.
#'
#' @return A list containing two functions, dist_fn and
#' dist_deriv. dist_fn takes a matrix and computes a distance
#' between the rows. dist_deriv takes two vectors, x and y, and
#' computes \eqn{\frac{\partial}{\partial y_j}d(x,y)}, j = 1,...,p.
#' @importFrom stats dist
make_dist_fns <- function(dist_fn, dist_deriv) {
if(typeof(dist_fn) == "closure" & typeof(dist_deriv) == "closure") {
return(list(dist_fn = dist_fn, dist_deriv = dist_deriv))
}
if(dist_fn == "euclidean") {
dist_fn = function(x) dist(x, method = "euclidean")
return(list(dist_fn = dist_fn, dist_deriv = euclidean_dist_deriv))
} else if(dist_fn == "manhattan-pos") {
dist_fn = function(x) dist(x, method = "manhattan")
return(list(dist_fn = dist_fn, dist_deriv = manhattan_dist_deriv_pos))
} else if(dist_fn == "manhattan-neg") {
dist_fn = function(x) dist(x, method = "manhattan")
return(list(dist_fn = dist_fn, dist_deriv = manhattan_dist_deriv_neg))
} else if(dist_fn == "maximum-pos") {
dist_fn = function(x) dist(x, method = "maximum")
return(list(dist_fn = dist_fn, dist_deriv = maximum_dist_deriv_pos))
} else if(dist_fn == "maximum-neg") {
dist_fn = function(x) dist(x, method = "maximum")
return(list(dist_fn = dist_fn, dist_deriv = maximum_dist_deriv_neg))
} else {
stop("Unsupported distance")
}
}
#' Partial derivatives for Euclidean distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}} d(x,y)}
euclidean_dist_deriv <- function(x, y) {
if(sum((y - x)^2) == 0) {
return(rep(0, length(y)))
}
return((y - x) * (sum((y - x)^2))^(-.5))
}
#' Partial derivatives for Manhattan distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}} d(x,y)}.
manhattan_dist_deriv_pos <- function(x, y) {
derivs = ifelse(y < x, -1, 1)
return(derivs)
}
#' Partial derivatives for Manhattan distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}}} d(x,y).
manhattan_dist_deriv_neg <- function(x, y) {
derivs = ifelse(y <= x, -1, 1)
return(derivs)
}
#' Partial derivatives for max distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}}} d(x,y)
maximum_dist_deriv_pos <- function(x, y) {
max_abs = max(abs(y - x))
active_coordinates = which(abs(y - x) == max_abs)
deriv = rep(0, length(x))
if(length(active_coordinates) > 1) {
deriv[active_coordinates] = ifelse(y[active_coordinates] > x[active_coordinates], 1, 0)
} else {
deriv[active_coordinates] = ifelse(y[active_coordinates] >= x[active_coordinates], 1, -1)
}
return(deriv)
}
#' Partial derivatives for max distance
#'
#' @param x A p-vector.
#' @param y A p-vector.
#'
#' @return If x and y each have length p, the function returns a
#' p-vector with jth element equal to
#' \eqn{\frac{\partial}{\partial y_{j}}} d(x,y)
maximum_dist_deriv_neg <- function(x, y) {
max_abs = max(abs(y - x))
active_coordinates = which(abs(y - x) == max_abs)
deriv = rep(0, length(x))
if(length(active_coordinates) > 1) {
deriv[active_coordinates] = ifelse(y[active_coordinates] > x[active_coordinates], 0, -1)
} else {
deriv[active_coordinates] = ifelse(y[active_coordinates] <= x[active_coordinates], -1, 1)
}
return(deriv)
}
#' Create local biplot axes
#'
#' @param X A data matrix, samples as rows.
#' @param dist Either a string describing one of the supported
#' distances or a function that takes a matrix and returns the
#' distances between the rows of the matrix.
#' @param dist_deriv Either NULL (if dist is a string describing one
#' of the supported distances) or a function that takes two
#' vectors and computes \eqn{\frac{\partial}{\partial y_j}d(x,y)}.
#' @param k The number of embedding dimensions.
#' @param samples The samples to compute local biplot axes
#' at. Defaults to all of the original samples.
#' @param new_points New points (not one of the original samples) to
#' compute local biplot axes at.
#'
#' @return A data frame. Each row describes one LB axis for one
#' sample. Columns labeled 'Embedding' give the embedding of the
#' sample in MDS space, columns labeled 'Axis' give the LB axis
#' for a given sample and variable. Columns labeled 'variable' and
#' 'sample' give the variable and sample for the LB axis values.
#' @export
local_biplot <- function(X, dist, dist_deriv = NULL, k = 2,
samples = 1:nrow(X),
new_points = list()) {
dist_fns = make_dist_fns(dist, dist_deriv)
mds_matrices = make_mds_matrices(X, dist_fns$dist_fn)
lb_dfs = list()
if(length(samples) > 0) {
lb_dfs[["original"]] = compute_lb_samples(
mds_matrices, dist_fns, k = k, samples = samples
)
}
if(length(new_points) > 0) {
lb_dfs[["new"]] = compute_lb_new_points(
mds_matrices, dist_fns, k = k,
new_points = new_points)
}
return(Reduce(rbind, lb_dfs))
}
#' Make a correlation biplot
#'
#' @param X The data matrix, samples in the rows.
#' @param dist A function that takes a matrix and computes the
#' distance between the rows.
#' @param plotting_axes The MDS embedding axes to plot.
#' @importFrom stats cor
#' @export
correlation_biplot <- function(X, dist, plotting_axes = 1:2) {
mds_matrices = make_mds_matrices(X, dist)
biplot_axes = cor(X, mds_matrices$Y)[,plotting_axes]
colnames(biplot_axes) = sapply(plotting_axes, function(i) sprintf("Axis%i", i))
return(biplot_axes)
}
#' Embed new points in an MDS diagram
#'
#' @param mds_matrices The output from make_mds_matrices.
#' @param new_points A matrix, each row corresponding to a new sample, each column corresponding to a variable.
#' @param dist_fn A function taking a matrix as its argument, returns distances between the rows.
#' @param old_new_dist_mat A matrix with n_old rows and n_new columns containing distances between the old and new points. If this argument is non-null, these distances will be used instead of computing the distances using dist_fn.
#'
#' @return A matrix containing the embedding locations of the new points. Rows correspond to new samples, columns correspond to embedding dimensions.
#' @export
embed_new_points <- function(mds_matrices, new_points = NULL, dist_fn = NULL, old_new_dist_mat = NULL) {
if(is.null(old_new_dist_mat)) {
d2_old_new = get_distances(mds_matrices$X, new_points, dist_fn)^2
} else {
d2_old_new = old_new_dist_mat^2
}
a = -d2_old_new + mds_matrices$d2
fz = .5 * t(a) %*% mds_matrices$Y %*% diag(mds_matrices$Lambda[1:ncol(mds_matrices$Y)]^(-1))
return(fz)
}
#' Get distances between a set of old points and a set of new points
#'
#' @param old_points n_old x n_variables matrix with rows containing the old points.
#' @param new_points n_new x n_variables matrix with rows containing the new point.
#' @param dist_fn A function taking as input a matrix, returning the distances between the rows of the matrix.
#'
#' @return A matrix with n_old rows and n_new columns containing the
#' distance between each pair of old and new points.
get_distances <- function(old_points, new_points, dist_fn) {
X = rbind(old_points, new_points)
# this is doing a lot of extra computation, but it's reasonably
# likely that making all the new data structures you would need to
# avoid that would be even worse
dists = as.matrix(dist_fn(X))
n_old = nrow(old_points)
n_new = nrow(new_points)
return(dists[1:n_old, (n_old + 1):(n_old + n_new)])
}
#' Add new points to an ordination object created by phyloseq
#'
#' @param ps_old A phyloseq object containing the original samples used for phyloseq::ordinate.
#' @param ps_old_and_new A phyloseq object containing both the old samples (those used for phyloseq::ordinate) and the new samples to be added to the embedding diagram.
#' @param distance The same argument that was passed to phyloseq::ordinate
#' @export
add_to_phyloseq_ordination <- function(ps_old, ps_old_and_new, distance) {
new_sample_names = setdiff(sample_names(ps_old_and_new), sample_names(ps_old))
new_sample_indices = which(sample_names(ps_old_and_new) %in% new_sample_names)
old_sample_indices = which(!(sample_names(ps_old_and_new) %in% new_sample_names))
old_and_new_distance = as.matrix(distance(ps_old_and_new, method = distance))[old_sample_indices, new_sample_indices]
old_distance = distance(ps_old, method = distance)
mds_matrices = make_mds_matrices(dist_mat = old_distance)
embeddings = embed_new_points(mds_matrices, old_new_dist_mat = old_and_new_distance)
embeddings = data.frame(embeddings)
names(embeddings) = paste("Axis", 1:ncol(embeddings), sep = "")
embeddings = data.frame(embeddings, sample_data(ps_old_and_new)[new_sample_indices,])
return(embeddings)
}
|
#' Sliced Latin Hypercube Design (SLHD)
#'
#' \code{SLHD} returns a \code{n} by \code{k} LHD matrix generated by improved two-stage algorithm
#'
#' @param n A positive integer, which stands for the number of rows (or run size).
#' @param k A positive integer, which stands for the number of columns (or factor size).
#' @param t A positive integer, which stands for the number of slices. \code{n}/\code{t} must be a positive integer, that is, n is divisible by t. \code{t} must be smaller or equal to \code{k} when \code{n} is 9 or larger. \code{t} must be smaller than \code{k} when \code{n} is smaller than 9. Otherwise, the funtion will never stop. The default is set to be 1.
#' @param N A positive integer, which stands for the number of iterations. The default is set to be 10. A large value of \code{N} will result a high CPU time, and it is recommended to be no greater than 500.
#' @param T0 A positive number, which stands for the user-defined initial temperature. The default is set to be 10.
#' @param rate A positive percentage, which stands for temperature decrease rate, and it should be in (0,1). For example, rate=0.25 means the temperature decreases by 25\% each time. The default is set to be 10\%.
#' @param Tmin A positive number, which stands for the minimium temperature allowed. When current temperature becomes smaller or equal to \code{Tmin}, the stopping criterion for current loop is met. The default is set to be 1.
#' @param Imax A positive integer, which stands for the maximum perturbations the algorithm will try without improvements before temperature is reduced. The default is set to be 5. For the computation complexity consideration, \code{Imax} is recommended to be smaller or equal to 5.
#' @param OC An optimality criterion. The default setting is "phi_p", and it could be one of the following: "phi_p", "AvgAbsCor", "MaxAbsCor", "MaxProCriterion".
#' @param p A positive integer, which is the parameter in the phi_p formula, and \code{p} is prefered to be large. The default is set to be 15.
#' @param q The default is set to be 1, and it could be either 1 or 2. If \code{q} is 1, \code{dij} is the Manhattan (rectangular) distance. If \code{q} is 2, \code{dij} is the Euclidean distance.
#' @param stage2 A logic input argument, and it could be either FALSE or TRUE. If \code{stage2} is FALSE (the default setting), \code{SLHD} will only implement the first stage of the algorithm. If \code{stage2} is TRUE, \code{SLHD} will implement the whole algorithm.
#' @param maxtime A positive number, which indicates the expected maximum CPU time given by user, and it is measured by minutes. For example, maxtime=3.5 indicates the CPU time will be no greater than three and half minutes. The default is set to be 5.
#'
#' @return If all inputs are logical, then the output will be a \code{n} by \code{k} LHD. As mentioned from the original paper, the first stage plays a much more important role since it optimizes the slice level. More resources should be given to the first stage if computational budgets are limited. Let m=n/t, where m is the number of rows for each slice, if (m)^k >> n, the second stage becomes optional. That is the reason why we add a \code{stage2} parameter to let users decide if the second stage is needed.
#'
#' @references Ba, S., Myers, W.R., and Brenneman, W.A. (2015) Optimal Sliced Latin Hypercube Designs. \emph{Technometrics}, \strong{57}, 479-487.
#'
#' @examples
#' #generate a 5 by 3 maximin distance LHD with the default setting
#' try=SLHD(n=5,k=3)
#' try
#' phi_p(try) #calculate the phi_p of "try".
#'
#' #generate a 5 by 3 maximin distance LHD with stage II
#' #let stage2=TRUE and other input are the same as above
#' try2=SLHD(n=5,k=3,stage2=TRUE)
#' try2
#' phi_p(try2) #calculate the phi_p of "try2".
#'
#' #Another example
#' #generate a 8 by 4 nearly orthogonal LHD
#' try3=SLHD(n=8,k=4,OC="AvgAbsCor",stage2=TRUE)
#' try3
#' AvgAbsCor(try3) #calculate the average absolute correlation.
#' @export
SLHD=function(n,k,t=1,N=10,T0=10,rate=0.1,Tmin=1,Imax=3,OC="phi_p",p=15,q=1,stage2=FALSE,maxtime=5){
#n and k are the rs and fa.
#t: number of slices. n/t must be an integer, that is, n is divisible by t.
#N: maximum number of iterations.
#T0: initial temperature
#rate: temperature decrease rate. 0<rate<1
#Tmin: minumum temperature for each itertaion,TPmin > 0
#Imax:# of perturbations the algorithm will try without improvements before Temperature is reduced
#OC: optimality criterion, the default is "phi_p", along with default p and q
#stage2: if stage II of the algorithm will be performed. The default is FALSE (not-run).
#Note that stage I plays a much more important role in the whole algorithm. More resources should
#be allocated for stage I if computational budgets are tight.
maxtime=maxtime*60 #convert minutes to seconds
timeALL1=NULL #record all cpu time
timeALL2=NULL #record all cpu time
C=1 #Initialize counter index
m=n/t #the rs for each slice.
#step 1 on page 481 starts
Y=rep(0,n*k)
dim(Y)=c(m,k,t)
#Independtly generate t small LHD for t slices. Each slice is an m by k LHD.
for (i in 1:t) {
Y[,,i]=rLHD(m,k)
}
#stack the t slices to form an n by k LHD matrix.
X=NULL
for (i in 1:t) {
X=rbind(X,Y[,,i])
}
#step 1 on page 481 ends
#The improved two-stage algorithms starts
#Stage I starts
SX=0 #the S(X) in stage I
DR=NULL #this is the duplicated rows index, which is used to record row numbers.
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=X,i=i,j=j,q=q)
if (I==0){SX=SX+1;DR=c(DR,i,j)}
}
}
DR=unique(DR)
#step ii starts: if S(X)>0
while (SX>0){
rrow1=sample(DR,1)
slice=ceiling(rrow1/m) #locate the rrow1's slice
#select another row within the same slice
rrow2=sample(seq(from=slice*m,by=-1,length.out=m)[seq(from=slice*m,by=-1,length.out=m)!=rrow1],1)
rcol=sample(1:k,1)
Xnew=X
e1=Xnew[rrow1,rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow2,rcol]
Xnew[rrow1,rcol]=e2
Xnew[rrow2,rcol]=e1
SXnew=0 #S(Xnew)
DRnew=NULL #DR for Xnew
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1;DRnew=c(DRnew,i,j)}
}
}
DRnew=unique(DRnew)
if (SXnew<SX){X=Xnew;SX=SXnew;DR=DRnew} #this is step iii
}
#step ii and iii ends
Xbest=X;TP=T0;Flag=1
if(OC=="phi_p"){
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
#at this point, S(X)==0 already.
#step iv starts
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
#step iv ends
SXnew=0 #S(Xnew)
#step v starts
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
while (SXnew>0) {
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
SXnew=0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
}
#step v ends
#step vi starts
a=phi_p(X=Xnew,p=p,q=q)
b=phi_p(X=X,p=p,q=q)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=phi_p(X=Xbest,p=p,q=q)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
#step vi ends
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL1=c(timeALL1,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
#Stage I ends
#step 2 on page 481 starts
if(t>1){
pi_l=rep(0,n) #the \Pi_l for l=1, ... , m
dim(pi_l)=c(t,1,m)
for (i in 1:m) {
pi_l[,,i]=seq(from=(i-1)*t+1,to=i*t,1)
}
for (j in 1:k) {
for (i in 1:m) {
Xbest[,j][Xbest[,j]==i]=sample(pi_l[,,i])*100
}
}
Xbest=Xbest/100
}
#step 2 on page 481 ends
#Stage II starts
if (stage2==TRUE){
C=1
X=Xbest
TP=T0;Flag=1
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
z=stats::runif(1,0,1) #step i
#step ii
if (z<=0){
rt=sample(1:t,1) #randomly select a slice from X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
#step iii
if (z>0){
rcol=sample(1:k,1) #randomly choose a column
if(t>1){
rl=sample(1:m,1) #randomly choose a l, where l=1, ..., m
re=sample(pi_l[,,rl],2) #randomly choose 2 elements from pi_rl that will be exchanged
Xnew=X
rrow=c(which(Xnew[,rcol]==re[1]),which(Xnew[,rcol]==re[2]))
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
if(t==1){
rrow=sample(1:n,2) #if there is only one slice
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
}
#step iv
a=phi_p(X=Xnew,p=p,q=q)
b=phi_p(X=X,p=p,q=q)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=phi_p(X=Xbest,p=p,q=q)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL2=c(timeALL2,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
}
#Stage II ends
}
if(OC=="AvgAbsCor"){
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
#at this point, S(X)==0 already.
#step iv starts
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
#step iv ends
SXnew=0 #S(Xnew)
#step v starts
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
while (SXnew>0) {
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
SXnew=0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
}
#step v ends
#step vi starts
a=AvgAbsCor(X=Xnew)
b=AvgAbsCor(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=AvgAbsCor(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
#step vi ends
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL1=c(timeALL1,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
#Stage I ends
#step 2 on page 481 starts
if(t>1){
pi_l=rep(0,n) #the \Pi_l for l=1, ... , m
dim(pi_l)=c(t,1,m)
for (i in 1:m) {
pi_l[,,i]=seq(from=(i-1)*t+1,to=i*t,1)
}
for (j in 1:k) {
for (i in 1:m) {
Xbest[,j][Xbest[,j]==i]=sample(pi_l[,,i])*100
}
}
Xbest=Xbest/100
}
#step 2 on page 481 ends
#Stage II starts
if (stage2==TRUE){
C=1
X=Xbest
TP=T0;Flag=1
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
z=stats::runif(1,0,1) #step i
#step ii
if (z<=0){
rt=sample(1:t,1) #randomly select a slice from X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
#step iii
if (z>0){
rcol=sample(1:k,1) #randomly choose a column
if(t>1){
rl=sample(1:m,1) #randomly choose a l, where l=1, ..., m
re=sample(pi_l[,,rl],2) #randomly choose 2 elements from pi_rl that will be exchanged
Xnew=X
rrow=c(which(Xnew[,rcol]==re[1]),which(Xnew[,rcol]==re[2]))
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
if(t==1){
rrow=sample(1:n,2) #if there is only one slice
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
}
#step iv
a=AvgAbsCor(X=Xnew)
b=AvgAbsCor(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=AvgAbsCor(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL2=c(timeALL2,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
}
#Stage II ends
}
if(OC=="MaxAbsCor"){
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
#at this point, S(X)==0 already.
#step iv starts
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
#step iv ends
SXnew=0 #S(Xnew)
#step v starts
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
while (SXnew>0) {
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
SXnew=0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
}
#step v ends
#step vi starts
a=MaxAbsCor(X=Xnew)
b=MaxAbsCor(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=MaxAbsCor(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
#step vi ends
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL1=c(timeALL1,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
#Stage I ends
#step 2 on page 481 starts
if(t>1){
pi_l=rep(0,n) #the \Pi_l for l=1, ... , m
dim(pi_l)=c(t,1,m)
for (i in 1:m) {
pi_l[,,i]=seq(from=(i-1)*t+1,to=i*t,1)
}
for (j in 1:k) {
for (i in 1:m) {
Xbest[,j][Xbest[,j]==i]=sample(pi_l[,,i])*100
}
}
Xbest=Xbest/100
}
#step 2 on page 481 ends
#Stage II starts
if (stage2==TRUE){
C=1
X=Xbest
TP=T0;Flag=1
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
z=stats::runif(1,0,1) #step i
#step ii
if (z<=0){
rt=sample(1:t,1) #randomly select a slice from X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
#step iii
if (z>0){
rcol=sample(1:k,1) #randomly choose a column
if(t>1){
rl=sample(1:m,1) #randomly choose a l, where l=1, ..., m
re=sample(pi_l[,,rl],2) #randomly choose 2 elements from pi_rl that will be exchanged
Xnew=X
rrow=c(which(Xnew[,rcol]==re[1]),which(Xnew[,rcol]==re[2]))
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
if(t==1){
rrow=sample(1:n,2) #if there is only one slice
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
}
#step iv
a=MaxAbsCor(X=Xnew)
b=MaxAbsCor(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=MaxAbsCor(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL2=c(timeALL2,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
}
#Stage II ends
}
if(OC=="MaxProCriterion"){
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
#at this point, S(X)==0 already.
#step iv starts
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
#step iv ends
SXnew=0 #S(Xnew)
#step v starts
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
while (SXnew>0) {
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
SXnew=0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
}
#step v ends
#step vi starts
a=MaxProCriterion(X=Xnew)
b=MaxProCriterion(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=MaxProCriterion(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
#step vi ends
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL1=c(timeALL1,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
#Stage I ends
#step 2 on page 481 starts
if(t>1){
pi_l=rep(0,n) #the \Pi_l for l=1, ... , m
dim(pi_l)=c(t,1,m)
for (i in 1:m) {
pi_l[,,i]=seq(from=(i-1)*t+1,to=i*t,1)
}
for (j in 1:k) {
for (i in 1:m) {
Xbest[,j][Xbest[,j]==i]=sample(pi_l[,,i])*100
}
}
Xbest=Xbest/100
}
#step 2 on page 481 ends
#Stage II starts
if (stage2==TRUE){
C=1
X=Xbest
TP=T0;Flag=1
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
z=stats::runif(1,0,1) #step i
#step ii
if (z<=0){
rt=sample(1:t,1) #randomly select a slice from X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
#step iii
if (z>0){
rcol=sample(1:k,1) #randomly choose a column
if(t>1){
rl=sample(1:m,1) #randomly choose a l, where l=1, ..., m
re=sample(pi_l[,,rl],2) #randomly choose 2 elements from pi_rl that will be exchanged
Xnew=X
rrow=c(which(Xnew[,rcol]==re[1]),which(Xnew[,rcol]==re[2]))
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
if(t==1){
rrow=sample(1:n,2) #if there is only one slice
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
}
#step iv
a=MaxProCriterion(X=Xnew)
b=MaxProCriterion(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=MaxProCriterion(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL2=c(timeALL2,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
}
#Stage II ends
}
avgtime1=round(mean(timeALL1),2)
iterations1=length(timeALL1)
if(stage2==TRUE){
avgtime2=round(mean(timeALL2),2)
iterations2=length(timeALL2)
}
close(progressbar)
print(paste0("average CPU time per iteration for Stage I is: ", avgtime1, " seconds"))
if(stage2==TRUE){print(paste0("average CPU time per iteration for Stage II is: ", avgtime2, " seconds"))}
print(paste0("the number of iterations completed for Stage I is: ", iterations1))
if(stage2==TRUE){print(paste0("the number of iterations completed for Stage II is: ", iterations2))}
print(paste0("the elements in design matrix is scaled to be 1 to ", n))
Xbest
}
| /R/SLHD.R | no_license | cran/LHD | R | false | false | 31,028 | r | #' Sliced Latin Hypercube Design (SLHD)
#'
#' \code{SLHD} returns a \code{n} by \code{k} LHD matrix generated by improved two-stage algorithm
#'
#' @param n A positive integer, which stands for the number of rows (or run size).
#' @param k A positive integer, which stands for the number of columns (or factor size).
#' @param t A positive integer, which stands for the number of slices. \code{n}/\code{t} must be a positive integer, that is, n is divisible by t. \code{t} must be smaller or equal to \code{k} when \code{n} is 9 or larger. \code{t} must be smaller than \code{k} when \code{n} is smaller than 9. Otherwise, the funtion will never stop. The default is set to be 1.
#' @param N A positive integer, which stands for the number of iterations. The default is set to be 10. A large value of \code{N} will result a high CPU time, and it is recommended to be no greater than 500.
#' @param T0 A positive number, which stands for the user-defined initial temperature. The default is set to be 10.
#' @param rate A positive percentage, which stands for temperature decrease rate, and it should be in (0,1). For example, rate=0.25 means the temperature decreases by 25\% each time. The default is set to be 10\%.
#' @param Tmin A positive number, which stands for the minimium temperature allowed. When current temperature becomes smaller or equal to \code{Tmin}, the stopping criterion for current loop is met. The default is set to be 1.
#' @param Imax A positive integer, which stands for the maximum perturbations the algorithm will try without improvements before temperature is reduced. The default is set to be 5. For the computation complexity consideration, \code{Imax} is recommended to be smaller or equal to 5.
#' @param OC An optimality criterion. The default setting is "phi_p", and it could be one of the following: "phi_p", "AvgAbsCor", "MaxAbsCor", "MaxProCriterion".
#' @param p A positive integer, which is the parameter in the phi_p formula, and \code{p} is prefered to be large. The default is set to be 15.
#' @param q The default is set to be 1, and it could be either 1 or 2. If \code{q} is 1, \code{dij} is the Manhattan (rectangular) distance. If \code{q} is 2, \code{dij} is the Euclidean distance.
#' @param stage2 A logic input argument, and it could be either FALSE or TRUE. If \code{stage2} is FALSE (the default setting), \code{SLHD} will only implement the first stage of the algorithm. If \code{stage2} is TRUE, \code{SLHD} will implement the whole algorithm.
#' @param maxtime A positive number, which indicates the expected maximum CPU time given by user, and it is measured by minutes. For example, maxtime=3.5 indicates the CPU time will be no greater than three and half minutes. The default is set to be 5.
#'
#' @return If all inputs are logical, then the output will be a \code{n} by \code{k} LHD. As mentioned from the original paper, the first stage plays a much more important role since it optimizes the slice level. More resources should be given to the first stage if computational budgets are limited. Let m=n/t, where m is the number of rows for each slice, if (m)^k >> n, the second stage becomes optional. That is the reason why we add a \code{stage2} parameter to let users decide if the second stage is needed.
#'
#' @references Ba, S., Myers, W.R., and Brenneman, W.A. (2015) Optimal Sliced Latin Hypercube Designs. \emph{Technometrics}, \strong{57}, 479-487.
#'
#' @examples
#' #generate a 5 by 3 maximin distance LHD with the default setting
#' try=SLHD(n=5,k=3)
#' try
#' phi_p(try) #calculate the phi_p of "try".
#'
#' #generate a 5 by 3 maximin distance LHD with stage II
#' #let stage2=TRUE and other input are the same as above
#' try2=SLHD(n=5,k=3,stage2=TRUE)
#' try2
#' phi_p(try2) #calculate the phi_p of "try2".
#'
#' #Another example
#' #generate a 8 by 4 nearly orthogonal LHD
#' try3=SLHD(n=8,k=4,OC="AvgAbsCor",stage2=TRUE)
#' try3
#' AvgAbsCor(try3) #calculate the average absolute correlation.
#' @export
SLHD=function(n,k,t=1,N=10,T0=10,rate=0.1,Tmin=1,Imax=3,OC="phi_p",p=15,q=1,stage2=FALSE,maxtime=5){
#n and k are the rs and fa.
#t: number of slices. n/t must be an integer, that is, n is divisible by t.
#N: maximum number of iterations.
#T0: initial temperature
#rate: temperature decrease rate. 0<rate<1
#Tmin: minumum temperature for each itertaion,TPmin > 0
#Imax:# of perturbations the algorithm will try without improvements before Temperature is reduced
#OC: optimality criterion, the default is "phi_p", along with default p and q
#stage2: if stage II of the algorithm will be performed. The default is FALSE (not-run).
#Note that stage I plays a much more important role in the whole algorithm. More resources should
#be allocated for stage I if computational budgets are tight.
maxtime=maxtime*60 #convert minutes to seconds
timeALL1=NULL #record all cpu time
timeALL2=NULL #record all cpu time
C=1 #Initialize counter index
m=n/t #the rs for each slice.
#step 1 on page 481 starts
Y=rep(0,n*k)
dim(Y)=c(m,k,t)
#Independtly generate t small LHD for t slices. Each slice is an m by k LHD.
for (i in 1:t) {
Y[,,i]=rLHD(m,k)
}
#stack the t slices to form an n by k LHD matrix.
X=NULL
for (i in 1:t) {
X=rbind(X,Y[,,i])
}
#step 1 on page 481 ends
#The improved two-stage algorithms starts
#Stage I starts
SX=0 #the S(X) in stage I
DR=NULL #this is the duplicated rows index, which is used to record row numbers.
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=X,i=i,j=j,q=q)
if (I==0){SX=SX+1;DR=c(DR,i,j)}
}
}
DR=unique(DR)
#step ii starts: if S(X)>0
while (SX>0){
rrow1=sample(DR,1)
slice=ceiling(rrow1/m) #locate the rrow1's slice
#select another row within the same slice
rrow2=sample(seq(from=slice*m,by=-1,length.out=m)[seq(from=slice*m,by=-1,length.out=m)!=rrow1],1)
rcol=sample(1:k,1)
Xnew=X
e1=Xnew[rrow1,rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow2,rcol]
Xnew[rrow1,rcol]=e2
Xnew[rrow2,rcol]=e1
SXnew=0 #S(Xnew)
DRnew=NULL #DR for Xnew
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1;DRnew=c(DRnew,i,j)}
}
}
DRnew=unique(DRnew)
if (SXnew<SX){X=Xnew;SX=SXnew;DR=DRnew} #this is step iii
}
#step ii and iii ends
Xbest=X;TP=T0;Flag=1
if(OC=="phi_p"){
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
#at this point, S(X)==0 already.
#step iv starts
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
#step iv ends
SXnew=0 #S(Xnew)
#step v starts
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
while (SXnew>0) {
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
SXnew=0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
}
#step v ends
#step vi starts
a=phi_p(X=Xnew,p=p,q=q)
b=phi_p(X=X,p=p,q=q)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=phi_p(X=Xbest,p=p,q=q)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
#step vi ends
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL1=c(timeALL1,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
#Stage I ends
#step 2 on page 481 starts
if(t>1){
pi_l=rep(0,n) #the \Pi_l for l=1, ... , m
dim(pi_l)=c(t,1,m)
for (i in 1:m) {
pi_l[,,i]=seq(from=(i-1)*t+1,to=i*t,1)
}
for (j in 1:k) {
for (i in 1:m) {
Xbest[,j][Xbest[,j]==i]=sample(pi_l[,,i])*100
}
}
Xbest=Xbest/100
}
#step 2 on page 481 ends
#Stage II starts
if (stage2==TRUE){
C=1
X=Xbest
TP=T0;Flag=1
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
z=stats::runif(1,0,1) #step i
#step ii
if (z<=0){
rt=sample(1:t,1) #randomly select a slice from X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
#step iii
if (z>0){
rcol=sample(1:k,1) #randomly choose a column
if(t>1){
rl=sample(1:m,1) #randomly choose a l, where l=1, ..., m
re=sample(pi_l[,,rl],2) #randomly choose 2 elements from pi_rl that will be exchanged
Xnew=X
rrow=c(which(Xnew[,rcol]==re[1]),which(Xnew[,rcol]==re[2]))
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
if(t==1){
rrow=sample(1:n,2) #if there is only one slice
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
}
#step iv
a=phi_p(X=Xnew,p=p,q=q)
b=phi_p(X=X,p=p,q=q)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=phi_p(X=Xbest,p=p,q=q)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL2=c(timeALL2,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
}
#Stage II ends
}
if(OC=="AvgAbsCor"){
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
#at this point, S(X)==0 already.
#step iv starts
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
#step iv ends
SXnew=0 #S(Xnew)
#step v starts
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
while (SXnew>0) {
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
SXnew=0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
}
#step v ends
#step vi starts
a=AvgAbsCor(X=Xnew)
b=AvgAbsCor(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=AvgAbsCor(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
#step vi ends
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL1=c(timeALL1,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
#Stage I ends
#step 2 on page 481 starts
if(t>1){
pi_l=rep(0,n) #the \Pi_l for l=1, ... , m
dim(pi_l)=c(t,1,m)
for (i in 1:m) {
pi_l[,,i]=seq(from=(i-1)*t+1,to=i*t,1)
}
for (j in 1:k) {
for (i in 1:m) {
Xbest[,j][Xbest[,j]==i]=sample(pi_l[,,i])*100
}
}
Xbest=Xbest/100
}
#step 2 on page 481 ends
#Stage II starts
if (stage2==TRUE){
C=1
X=Xbest
TP=T0;Flag=1
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
z=stats::runif(1,0,1) #step i
#step ii
if (z<=0){
rt=sample(1:t,1) #randomly select a slice from X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
#step iii
if (z>0){
rcol=sample(1:k,1) #randomly choose a column
if(t>1){
rl=sample(1:m,1) #randomly choose a l, where l=1, ..., m
re=sample(pi_l[,,rl],2) #randomly choose 2 elements from pi_rl that will be exchanged
Xnew=X
rrow=c(which(Xnew[,rcol]==re[1]),which(Xnew[,rcol]==re[2]))
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
if(t==1){
rrow=sample(1:n,2) #if there is only one slice
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
}
#step iv
a=AvgAbsCor(X=Xnew)
b=AvgAbsCor(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=AvgAbsCor(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL2=c(timeALL2,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
}
#Stage II ends
}
if(OC=="MaxAbsCor"){
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
#at this point, S(X)==0 already.
#step iv starts
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
#step iv ends
SXnew=0 #S(Xnew)
#step v starts
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
while (SXnew>0) {
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
SXnew=0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
}
#step v ends
#step vi starts
a=MaxAbsCor(X=Xnew)
b=MaxAbsCor(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=MaxAbsCor(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
#step vi ends
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL1=c(timeALL1,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
#Stage I ends
#step 2 on page 481 starts
if(t>1){
pi_l=rep(0,n) #the \Pi_l for l=1, ... , m
dim(pi_l)=c(t,1,m)
for (i in 1:m) {
pi_l[,,i]=seq(from=(i-1)*t+1,to=i*t,1)
}
for (j in 1:k) {
for (i in 1:m) {
Xbest[,j][Xbest[,j]==i]=sample(pi_l[,,i])*100
}
}
Xbest=Xbest/100
}
#step 2 on page 481 ends
#Stage II starts
if (stage2==TRUE){
C=1
X=Xbest
TP=T0;Flag=1
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
z=stats::runif(1,0,1) #step i
#step ii
if (z<=0){
rt=sample(1:t,1) #randomly select a slice from X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
#step iii
if (z>0){
rcol=sample(1:k,1) #randomly choose a column
if(t>1){
rl=sample(1:m,1) #randomly choose a l, where l=1, ..., m
re=sample(pi_l[,,rl],2) #randomly choose 2 elements from pi_rl that will be exchanged
Xnew=X
rrow=c(which(Xnew[,rcol]==re[1]),which(Xnew[,rcol]==re[2]))
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
if(t==1){
rrow=sample(1:n,2) #if there is only one slice
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
}
#step iv
a=MaxAbsCor(X=Xnew)
b=MaxAbsCor(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=MaxAbsCor(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL2=c(timeALL2,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
}
#Stage II ends
}
if(OC=="MaxProCriterion"){
progressbar = utils::txtProgressBar(min = 0, max = N, style = 3)
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
#at this point, S(X)==0 already.
#step iv starts
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
#step iv ends
SXnew=0 #S(Xnew)
#step v starts
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
while (SXnew>0) {
rt=sample(1:t,1) #randomly select a slice from the current X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
SXnew=0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
I=dij(X=Xnew,i=i,j=j,q=q)
if (I==0){SXnew=SXnew+1}
}
}
}
#step v ends
#step vi starts
a=MaxProCriterion(X=Xnew)
b=MaxProCriterion(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=MaxProCriterion(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
#step vi ends
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL1=c(timeALL1,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
#Stage I ends
#step 2 on page 481 starts
if(t>1){
pi_l=rep(0,n) #the \Pi_l for l=1, ... , m
dim(pi_l)=c(t,1,m)
for (i in 1:m) {
pi_l[,,i]=seq(from=(i-1)*t+1,to=i*t,1)
}
for (j in 1:k) {
for (i in 1:m) {
Xbest[,j][Xbest[,j]==i]=sample(pi_l[,,i])*100
}
}
Xbest=Xbest/100
}
#step 2 on page 481 ends
#Stage II starts
if (stage2==TRUE){
C=1
X=Xbest
TP=T0;Flag=1
while (C<=N) {
time0=Sys.time()
while(Flag==1 & TP>Tmin){
Flag=0;I=1
while (I<=Imax) {
z=stats::runif(1,0,1) #step i
#step ii
if (z<=0){
rt=sample(1:t,1) #randomly select a slice from X
rcol=sample(1:k,1) #randomly choose a column
rrow=sample(seq(from=rt*m,by=-1,length.out=m),2) #randomly choose 2 rows from slice rt
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
#step iii
if (z>0){
rcol=sample(1:k,1) #randomly choose a column
if(t>1){
rl=sample(1:m,1) #randomly choose a l, where l=1, ..., m
re=sample(pi_l[,,rl],2) #randomly choose 2 elements from pi_rl that will be exchanged
Xnew=X
rrow=c(which(Xnew[,rcol]==re[1]),which(Xnew[,rcol]==re[2]))
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
if(t==1){
rrow=sample(1:n,2) #if there is only one slice
Xnew=X
e1=Xnew[rrow[1],rcol] #exchange 2 elements to form Xnew
e2=Xnew[rrow[2],rcol]
Xnew[rrow[1],rcol]=e2
Xnew[rrow[2],rcol]=e1
}
}
#step iv
a=MaxProCriterion(X=Xnew)
b=MaxProCriterion(X=X)
if (a<b){X=Xnew;Flag=1}
if (a>=b){
prob=exp((b-a)/TP)
draw=sample(c(0,1),1,prob=c(1-prob,prob)) #draw==1 means replace
if(draw==1){X=Xnew;Flag=1}
} #step 5 ends here
c=MaxProCriterion(X=Xbest)
if (a<c){Xbest=Xnew;I=1}
if (a>=c){I=I+1}
}
TP=TP*(1-rate)
}
time1=Sys.time()
timediff=time1-time0
timeALL2=c(timeALL2,timediff)
##########progress bar codes
utils::setTxtProgressBar(progressbar, C)
##########
timeALL=sum(timeALL1)+sum(timeALL2)
if(as.numeric(timeALL+timediff)<=maxtime){C=C+1}
if(as.numeric(timeALL+timediff)>maxtime){C=N+1}
TP=T0;Flag=1
}
}
#Stage II ends
}
avgtime1=round(mean(timeALL1),2)
iterations1=length(timeALL1)
if(stage2==TRUE){
avgtime2=round(mean(timeALL2),2)
iterations2=length(timeALL2)
}
close(progressbar)
print(paste0("average CPU time per iteration for Stage I is: ", avgtime1, " seconds"))
if(stage2==TRUE){print(paste0("average CPU time per iteration for Stage II is: ", avgtime2, " seconds"))}
print(paste0("the number of iterations completed for Stage I is: ", iterations1))
if(stage2==TRUE){print(paste0("the number of iterations completed for Stage II is: ", iterations2))}
print(paste0("the elements in design matrix is scaled to be 1 to ", n))
Xbest
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz.R
\name{drawPriorGamma}
\alias{drawPriorGamma}
\title{Draw prior distributions of location parameters (gamma estimates)}
\usage{
drawPriorGamma(combat.estimates, col = NULL, xlim = c(-3, 1.5), ylim = c(0, 3))
}
\arguments{
\item{combat.estimates}{estimates list from neuroCombat output}
\item{col}{Character or numeric vector specifying site colors. If NULL, default colors will be used.}
\item{xlim}{x-axis limits}
\item{ylim}{y-axis limits#'}
}
\value{
Nothing. A plot will be produced as side effect.
}
\description{
Draw prior distributions of location parameters (gamma estimates).
}
| /man/drawPriorGamma.Rd | no_license | Tibetan-eagle/neuroCombat_Rpackage | R | false | true | 674 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz.R
\name{drawPriorGamma}
\alias{drawPriorGamma}
\title{Draw prior distributions of location parameters (gamma estimates)}
\usage{
drawPriorGamma(combat.estimates, col = NULL, xlim = c(-3, 1.5), ylim = c(0, 3))
}
\arguments{
\item{combat.estimates}{estimates list from neuroCombat output}
\item{col}{Character or numeric vector specifying site colors. If NULL, default colors will be used.}
\item{xlim}{x-axis limits}
\item{ylim}{y-axis limits#'}
}
\value{
Nothing. A plot will be produced as side effect.
}
\description{
Draw prior distributions of location parameters (gamma estimates).
}
|
# source("http://bioconductor.org/biocLite.R")
# source("https://bioconductor.org/biocLite.R")
# biocLite("Biostrings")
# install.packages(c("R.oo","compoisson","R.methodsS3"))
# install.packages("/Users/arives/Downloads/phylosim_2.1.1.tar.gz")
library(readr)
library(phylosim)
library(ape)
library(Biostrings)
library(phylolm)
#rank_fh is a function that takes a dataframe and add a rank column according to one column (given the column number)
#such that the numbers above 0 are ranked from high to low as 1,2,3,4 while numbers below 0 are ranked from
#low to high as -1, -2, -3, -4. 0 is ranked as 0.
rank_fh <- function(df, number) {
df_ord <- df[order(df[, number], decreasing = TRUE), ]
i <- 1
for (a in df_ord[, number]) {
if (a > 0) {
df_ord$rank[i] <- i
} else if (a == 0) {
df_ord$rank[i] <- 0
} else {
df_ord$rank[i] <- i - nrow(df_ord) - 1
}
i <- i + 1
}
return(df_ord)
}
setwd("~/Box Sync/Traveller Box/Huan GWAS/rpo_GY84_simulations_folder")
k <- 9
# Input tree
# cat("(((((MAF_11821_03:0.412,(MAF_GM_0981:0.582,MTB_95_0545:0.550):0.184):0.212,((MTB_K21:0.242,(MTB_K67:0.280,MTB_K93:0.234):0.096):0.108,(MTB_T17:0.624,MTB_T92:0.584):0.150):0.114):0.096,(((((MTB_00_1695:0.510,MTB_T67:0.518):0.182,MTB_T85:0.278):0.154,(MTB_98_1833:0.392,MTB_M4100A:0.072):0.142):0.134,(MTB_4783_04:0.574,MTB_GM_1503:0.596):0.166):0.118,MTB_91_0079:0.576):0.094):0.074,(MTB_K37:0.518,MTB_K49:0.288):0.070):0.138,MTB_H37Rv:0.256);",
# file = "TBSimulation.nwk")
phy <- read.tree(file = "TBSimulation.nwk")
p <- Ntip(phy) #number of tips/species
# sort(phy$tip.label)
# This matches the species list from kmercount
# MAF_11821_03
# MAF_GM_0981
# MTB_00_1695
# MTB_4783_04
# MTB_91_0079
# MTB_95_0545
# MTB_98_1833
# MTB_GM_1503
# MTB_H37Rv
# MTB_K21
# MTB_K37
# MTB_K49
# MTB_K67
# MTB_K93
# MTB_M4100A
# MTB_T17
# MTB_T67
# MTB_T85
# MTB_T92
Vphy <- vcv(phy)[sort(phy$tip.label), sort(phy$tip.label)]
C <- as.matrix(Vphy)
C <- C/det(C)^(1/p) #some transformation of C to make the determinant = 1, which should make calculations easier
iC <- solve(C) #inverse of C
ones <- array(1, c(p, 1)) # array of ones for the intercept
for (repn in 50) {
sim <- readRDS(file = paste("sim_", repn, ".rds", sep = ""))
alignment.names <- names(sim$alignment[1, ])
index <- (1:1172)[alignment.names == 450 & !is.na(alignment.names)]
#Load in the shared kmer table
phylokmer <- read.delim(paste("rpoB_GY84_", repn, "/phylokmer.dat", sep = ""), header = FALSE)
colnames(phylokmer) <- c("kmer", sort(phy$tip.label))
#Grep the patterns for kmers invloving S450 (kmer_list)
#generate kmers including S450 (we do need to run aaf_phylosim.py first)
kmer <- sim$alignment[, (index - k/3 + 1):(index + k/3 - 1)]
kmer_list <- NULL #This does not consider possible deletions ('NA' in some codon)
for (x in 1:p) {
kmers <- paste(kmer[x, ], collapse = "")
for (j in 1:(nchar(kmers) - k + 1)) {
kmer_list <- c(kmer_list, substr(kmers, j, j + k - 1))
}
}
kmer_df <- data.frame(kmer_list)
kmer_df <- rename(kmer_df, c(kmer_list = "kmer"))
kmer_df$position <- rep(c((nchar(kmers) - k + 1):1), p)
#one kmer won't have different positions, too short
kmer_df <- unique(kmer_df)
#add their reverse compliment conterparts in (kmer_count only uses whoever that is alphabetically
#first, the original kmer or it's rc.)
rc <- NULL
for (i in 1:nrow(kmer_df)) {
kmer_df <- rbind(kmer_df, data.frame(kmer = as.character(reverseComplement(DNAString(kmer_df$kmer[i]))), position = -kmer_df$position[i]))
}
kmer_df <- unique(kmer_df)
kmer_list_rc <- kmer_df$kmer
S450_kmers <- phylokmer[phylokmer$kmer %in% kmer_list_rc, ]
#Note that less than half of the kmers in kmer_list_rc ends up in S450_kmers because
#1. only the original OR the rc kmer is in phylokmer
#2. only kmers shared at least by two species are in phylokmer
w <- read.csv(file = paste("rpoB_GY84_trait_", repn, ".csv", sep = ""))
##############################################
# scoring
##############################################
#y should be kmer pattern
#X should be the trait
#read in the kmer pattern as Y, and calculate score for each y.
Y <- read.fwf(file = paste("rpoB_GY84_", repn, "_kmerPattern.stats", sep = ""), widths = array(1, c(1, p)), header = F)
colnames(Y) <- sort(phy$tip.label)
#sort the dataframe by tip name, get the serine column with Trues and falses and convert it into 0/1(by *1 or +0)
trait <- w$serine[order(w$tip)]
names(trait) <- w$tip[order(w$tip)]
X <- t(t(trait * 1))
xx <- cbind(ones, X)
threshold <- 3
pattern <- array(NA, c(nrow(Y), 1))
output <- data.frame(pattern, sumy=0, scoreLS=0, scoreGLS=0, scoreGLM=0, scoreLog=0)
for (i in 1:nrow(Y)) {
#for (i in 1:500) {
y <- t(Y[i, ])
output$pattern[i] <- paste(y, collapse = "")
sumy <- sum(y) #sum of the pattern
output$sumy[i] <- sumy
if (sumy >= threshold & sumy <= (p - threshold)) {
##################
# LS
XX <- t(xx) %*% xx
XY <- t(xx) %*% y
b <- solve(XX, XY)
h <- y - (xx %*% b)
MSE <- t(h) %*% iC %*% h/(p - 2)
iXX <- solve(XX)
bSE <- (MSE * iXX[2, 2])^0.5
output$scoreLS[i] <- b[2]/bSE
##################
# GLS
XiCX <- t(xx) %*% iC %*% xx
XiCY <- t(xx) %*% iC %*% y
b <- solve(XiCX, XiCY)
h <- y - (xx %*% b)
MSE <- t(h) %*% iC %*% h/(p - 2)
iXiCX <- solve(XiCX)
bSE <- (MSE * iXiCX[2, 2])^0.5
output$scoreGLS[i] <- b[2]/bSE
##################
# GLM
mu = mean(y)
B.init = matrix(c(log(mu/(1-mu)),0.0001), ncol=1)
show(c(i, 1, sumy, system.time(z.GLM <- binaryPGLM(y ~ X, phy = phy, s2 = 1, B.init = B.init))))
if(z.GLM$convergeflag == "converged") {
output$scoreGLM[i] <- z.GLM$B[2]/z.GLM$B.se[2]
} else {
show('not converged')
}
##################
# GLMM
# show(c(i, 1, sumy, system.time(z.GLMM <- binaryPGLMM(y ~ X, phy=phy, s2.init = 0.001))))
# if(z.GLMM$convergeflag == "converged") {
# output$scoreGLM[i] <- z.GLMM$B[2]/z.GLMM$B.se[2]
# } else {
# show('not converged')
# }
##################
# Log
show(c(i, 2, sumy, system.time(z.Log <- phyloglm(y ~ X, phy=phy))))
output$scoreLog[i] <- z.Log$coefficients[2]/z.Log$sd[2]
}
}
output <- output[1:i,]
# show(plot(output[abs(output[,3]) < 1000,3:6]))
# show(plot(output[,4:6]))
ranked.LS <- rank_fh(output, 3)[,c(1,3,7)]
ranked.GLS <- rank_fh(output, 4)[,c(1,4,7)]
ranked.GLM <- rank_fh(output, 5)[,c(1,5,7)]
ranked.Log <- rank_fh(output, 6)[,c(1,6,7)]
colnames(ranked.LS)[3] <- 'rankLS'
colnames(ranked.GLS)[3] <- 'rankGLS'
colnames(ranked.GLM)[3] <- 'rankGLM'
colnames(ranked.Log)[3] <- 'rankLog'
#read in the kmer pattern again, focus on the frequency this time.
kmerPattern.stats <- read.table(file = paste("rpoB_GY84_", repn, "_kmerPattern.stats", sep = ""), colClasses = c("character", "integer"), col.names = c("pattern", "freq"))
total_score <- merge(ranked.LS, kmerPattern.stats, by = "pattern", all.x = T)
total_score <- merge(ranked.GLS, total_score, by = "pattern", all.x = T)
total_score <- merge(ranked.GLM, total_score, by = "pattern", all.x = T)
total_score <- merge(ranked.Log, total_score, by = "pattern", all.x = T)
write.csv(total_score, paste("rpoB_GY84_", as.character(repn), "_scores.csv", sep = ""), row.names = FALSE, )
#Get scores for S450 containing kmers.
kft <- 1 #kmer frequency threshold
pattern450 <- array(0, nrow(S450_kmers))
for (ii in 1:nrow(S450_kmers)) {
pattern450_j <- array(1, ncol(S450_kmers) - 1)
for (jj in 2:ncol(S450_kmers)) {
if (S450_kmers[ii, ][jj] < kft) {
pattern450_j[jj - 1] <- 0
}
}
pattern450[ii] <- paste(pattern450_j, collapse = "")
}
S450_kmers$pattern <- pattern450
S450_kmers_score <- merge(S450_kmers, total_score, by = "pattern", all.x = T)
#Note that some of the kmers does not have scores because only patterns with more than two 1 or 0 are scored.
#Merge S450_kmer_score with kmer_df
output_450 <- merge(S450_kmers_score, kmer_df, by = "kmer", all.x = T)
output_450_light <- subset(output_450, select = c("kmer", "position", "pattern", "freq", "scoreLS", "rankLS", "scoreGLS", "rankGLS", "scoreGLM", "rankGLM", "scoreLog", "rankLog"))
write.csv(output_450_light, paste("rpoB_GY84_", as.character(repn), "_450summary.csv", sep = ""), row.names = FALSE, )
outputplot <- output[abs(output$scoreLS) < 1000,]
outputplot <- outputplot[order(abs(outputplot$scoreGLS)),]
col450 <- is.element(outputplot$pattern, output_450_light$pattern)
show(plot(outputplot[,3:6], col=(1+col450), pch=(20 - col450), cex=(.5 + 1*col450)))
pdf(paste("rpoB_GY84_", as.character(repn), "_450summary.pdf", sep = ""), width=6, height=6)
par(mfrow=c(2,2))
for(ii in c(6,8,10,12)) hist(output_450_light[,ii], main=colnames(output_450_light)[ii], xlab="rank")
dev.off()
par(mfrow=c(1,1))
} | /obsolete/TI_score_tests_18Sep15_4scoringcomparison.R | no_license | fanhuan/AAF-GWAS | R | false | false | 8,828 | r | # source("http://bioconductor.org/biocLite.R")
# source("https://bioconductor.org/biocLite.R")
# biocLite("Biostrings")
# install.packages(c("R.oo","compoisson","R.methodsS3"))
# install.packages("/Users/arives/Downloads/phylosim_2.1.1.tar.gz")
library(readr)
library(phylosim)
library(ape)
library(Biostrings)
library(phylolm)
#rank_fh is a function that takes a dataframe and add a rank column according to one column (given the column number)
#such that the numbers above 0 are ranked from high to low as 1,2,3,4 while numbers below 0 are ranked from
#low to high as -1, -2, -3, -4. 0 is ranked as 0.
rank_fh <- function(df, number) {
df_ord <- df[order(df[, number], decreasing = TRUE), ]
i <- 1
for (a in df_ord[, number]) {
if (a > 0) {
df_ord$rank[i] <- i
} else if (a == 0) {
df_ord$rank[i] <- 0
} else {
df_ord$rank[i] <- i - nrow(df_ord) - 1
}
i <- i + 1
}
return(df_ord)
}
setwd("~/Box Sync/Traveller Box/Huan GWAS/rpo_GY84_simulations_folder")
k <- 9
# Input tree
# cat("(((((MAF_11821_03:0.412,(MAF_GM_0981:0.582,MTB_95_0545:0.550):0.184):0.212,((MTB_K21:0.242,(MTB_K67:0.280,MTB_K93:0.234):0.096):0.108,(MTB_T17:0.624,MTB_T92:0.584):0.150):0.114):0.096,(((((MTB_00_1695:0.510,MTB_T67:0.518):0.182,MTB_T85:0.278):0.154,(MTB_98_1833:0.392,MTB_M4100A:0.072):0.142):0.134,(MTB_4783_04:0.574,MTB_GM_1503:0.596):0.166):0.118,MTB_91_0079:0.576):0.094):0.074,(MTB_K37:0.518,MTB_K49:0.288):0.070):0.138,MTB_H37Rv:0.256);",
# file = "TBSimulation.nwk")
phy <- read.tree(file = "TBSimulation.nwk")
p <- Ntip(phy) #number of tips/species
# sort(phy$tip.label)
# This matches the species list from kmercount
# MAF_11821_03
# MAF_GM_0981
# MTB_00_1695
# MTB_4783_04
# MTB_91_0079
# MTB_95_0545
# MTB_98_1833
# MTB_GM_1503
# MTB_H37Rv
# MTB_K21
# MTB_K37
# MTB_K49
# MTB_K67
# MTB_K93
# MTB_M4100A
# MTB_T17
# MTB_T67
# MTB_T85
# MTB_T92
Vphy <- vcv(phy)[sort(phy$tip.label), sort(phy$tip.label)]
C <- as.matrix(Vphy)
C <- C/det(C)^(1/p) #some transformation of C to make the determinant = 1, which should make calculations easier
iC <- solve(C) #inverse of C
ones <- array(1, c(p, 1)) # array of ones for the intercept
for (repn in 50) {
sim <- readRDS(file = paste("sim_", repn, ".rds", sep = ""))
alignment.names <- names(sim$alignment[1, ])
index <- (1:1172)[alignment.names == 450 & !is.na(alignment.names)]
#Load in the shared kmer table
phylokmer <- read.delim(paste("rpoB_GY84_", repn, "/phylokmer.dat", sep = ""), header = FALSE)
colnames(phylokmer) <- c("kmer", sort(phy$tip.label))
#Grep the patterns for kmers invloving S450 (kmer_list)
#generate kmers including S450 (we do need to run aaf_phylosim.py first)
kmer <- sim$alignment[, (index - k/3 + 1):(index + k/3 - 1)]
kmer_list <- NULL #This does not consider possible deletions ('NA' in some codon)
for (x in 1:p) {
kmers <- paste(kmer[x, ], collapse = "")
for (j in 1:(nchar(kmers) - k + 1)) {
kmer_list <- c(kmer_list, substr(kmers, j, j + k - 1))
}
}
kmer_df <- data.frame(kmer_list)
kmer_df <- rename(kmer_df, c(kmer_list = "kmer"))
kmer_df$position <- rep(c((nchar(kmers) - k + 1):1), p)
#one kmer won't have different positions, too short
kmer_df <- unique(kmer_df)
#add their reverse compliment conterparts in (kmer_count only uses whoever that is alphabetically
#first, the original kmer or it's rc.)
rc <- NULL
for (i in 1:nrow(kmer_df)) {
kmer_df <- rbind(kmer_df, data.frame(kmer = as.character(reverseComplement(DNAString(kmer_df$kmer[i]))), position = -kmer_df$position[i]))
}
kmer_df <- unique(kmer_df)
kmer_list_rc <- kmer_df$kmer
S450_kmers <- phylokmer[phylokmer$kmer %in% kmer_list_rc, ]
#Note that less than half of the kmers in kmer_list_rc ends up in S450_kmers because
#1. only the original OR the rc kmer is in phylokmer
#2. only kmers shared at least by two species are in phylokmer
w <- read.csv(file = paste("rpoB_GY84_trait_", repn, ".csv", sep = ""))
##############################################
# scoring
##############################################
#y should be kmer pattern
#X should be the trait
#read in the kmer pattern as Y, and calculate score for each y.
Y <- read.fwf(file = paste("rpoB_GY84_", repn, "_kmerPattern.stats", sep = ""), widths = array(1, c(1, p)), header = F)
colnames(Y) <- sort(phy$tip.label)
#sort the dataframe by tip name, get the serine column with Trues and falses and convert it into 0/1(by *1 or +0)
trait <- w$serine[order(w$tip)]
names(trait) <- w$tip[order(w$tip)]
X <- t(t(trait * 1))
xx <- cbind(ones, X)
threshold <- 3
pattern <- array(NA, c(nrow(Y), 1))
output <- data.frame(pattern, sumy=0, scoreLS=0, scoreGLS=0, scoreGLM=0, scoreLog=0)
for (i in 1:nrow(Y)) {
#for (i in 1:500) {
y <- t(Y[i, ])
output$pattern[i] <- paste(y, collapse = "")
sumy <- sum(y) #sum of the pattern
output$sumy[i] <- sumy
if (sumy >= threshold & sumy <= (p - threshold)) {
##################
# LS
XX <- t(xx) %*% xx
XY <- t(xx) %*% y
b <- solve(XX, XY)
h <- y - (xx %*% b)
MSE <- t(h) %*% iC %*% h/(p - 2)
iXX <- solve(XX)
bSE <- (MSE * iXX[2, 2])^0.5
output$scoreLS[i] <- b[2]/bSE
##################
# GLS
XiCX <- t(xx) %*% iC %*% xx
XiCY <- t(xx) %*% iC %*% y
b <- solve(XiCX, XiCY)
h <- y - (xx %*% b)
MSE <- t(h) %*% iC %*% h/(p - 2)
iXiCX <- solve(XiCX)
bSE <- (MSE * iXiCX[2, 2])^0.5
output$scoreGLS[i] <- b[2]/bSE
##################
# GLM
mu = mean(y)
B.init = matrix(c(log(mu/(1-mu)),0.0001), ncol=1)
show(c(i, 1, sumy, system.time(z.GLM <- binaryPGLM(y ~ X, phy = phy, s2 = 1, B.init = B.init))))
if(z.GLM$convergeflag == "converged") {
output$scoreGLM[i] <- z.GLM$B[2]/z.GLM$B.se[2]
} else {
show('not converged')
}
##################
# GLMM
# show(c(i, 1, sumy, system.time(z.GLMM <- binaryPGLMM(y ~ X, phy=phy, s2.init = 0.001))))
# if(z.GLMM$convergeflag == "converged") {
# output$scoreGLM[i] <- z.GLMM$B[2]/z.GLMM$B.se[2]
# } else {
# show('not converged')
# }
##################
# Log
show(c(i, 2, sumy, system.time(z.Log <- phyloglm(y ~ X, phy=phy))))
output$scoreLog[i] <- z.Log$coefficients[2]/z.Log$sd[2]
}
}
output <- output[1:i,]
# show(plot(output[abs(output[,3]) < 1000,3:6]))
# show(plot(output[,4:6]))
ranked.LS <- rank_fh(output, 3)[,c(1,3,7)]
ranked.GLS <- rank_fh(output, 4)[,c(1,4,7)]
ranked.GLM <- rank_fh(output, 5)[,c(1,5,7)]
ranked.Log <- rank_fh(output, 6)[,c(1,6,7)]
colnames(ranked.LS)[3] <- 'rankLS'
colnames(ranked.GLS)[3] <- 'rankGLS'
colnames(ranked.GLM)[3] <- 'rankGLM'
colnames(ranked.Log)[3] <- 'rankLog'
#read in the kmer pattern again, focus on the frequency this time.
kmerPattern.stats <- read.table(file = paste("rpoB_GY84_", repn, "_kmerPattern.stats", sep = ""), colClasses = c("character", "integer"), col.names = c("pattern", "freq"))
total_score <- merge(ranked.LS, kmerPattern.stats, by = "pattern", all.x = T)
total_score <- merge(ranked.GLS, total_score, by = "pattern", all.x = T)
total_score <- merge(ranked.GLM, total_score, by = "pattern", all.x = T)
total_score <- merge(ranked.Log, total_score, by = "pattern", all.x = T)
write.csv(total_score, paste("rpoB_GY84_", as.character(repn), "_scores.csv", sep = ""), row.names = FALSE, )
#Get scores for S450 containing kmers.
kft <- 1 #kmer frequency threshold
pattern450 <- array(0, nrow(S450_kmers))
for (ii in 1:nrow(S450_kmers)) {
pattern450_j <- array(1, ncol(S450_kmers) - 1)
for (jj in 2:ncol(S450_kmers)) {
if (S450_kmers[ii, ][jj] < kft) {
pattern450_j[jj - 1] <- 0
}
}
pattern450[ii] <- paste(pattern450_j, collapse = "")
}
S450_kmers$pattern <- pattern450
S450_kmers_score <- merge(S450_kmers, total_score, by = "pattern", all.x = T)
#Note that some of the kmers does not have scores because only patterns with more than two 1 or 0 are scored.
#Merge S450_kmer_score with kmer_df
output_450 <- merge(S450_kmers_score, kmer_df, by = "kmer", all.x = T)
output_450_light <- subset(output_450, select = c("kmer", "position", "pattern", "freq", "scoreLS", "rankLS", "scoreGLS", "rankGLS", "scoreGLM", "rankGLM", "scoreLog", "rankLog"))
write.csv(output_450_light, paste("rpoB_GY84_", as.character(repn), "_450summary.csv", sep = ""), row.names = FALSE, )
outputplot <- output[abs(output$scoreLS) < 1000,]
outputplot <- outputplot[order(abs(outputplot$scoreGLS)),]
col450 <- is.element(outputplot$pattern, output_450_light$pattern)
show(plot(outputplot[,3:6], col=(1+col450), pch=(20 - col450), cex=(.5 + 1*col450)))
pdf(paste("rpoB_GY84_", as.character(repn), "_450summary.pdf", sep = ""), width=6, height=6)
par(mfrow=c(2,2))
for(ii in c(6,8,10,12)) hist(output_450_light[,ii], main=colnames(output_450_light)[ii], xlab="rank")
dev.off()
par(mfrow=c(1,1))
} |
## 21st Dec 2016
## BISCUIT R implementation
## Start_file with user inputs
##
## Code author SP
###
###
############## packages required ##############
library(MCMCpack)
library(mvtnorm)
library(ellipse)
library(coda)
library(Matrix)
library(Rtsne)
library(gtools)
library(foreach)
library(doParallel)
library(doSNOW)
library(snow)
library(lattice)
library(MASS)
library(bayesm)
library(robustbase)
library(chron)
library(mnormt)
library(schoolmath)
library(RColorBrewer)
#############################################
input_file_name <- "expression_mRNA_17-Aug-2014.txt";
input_data_tab_delimited <- TRUE; #set to TRUE if the input data is tab-delimited
is_format_genes_cells <- TRUE; #set to TRUE if input data has rows as genes and columns as cells
choose_cells <- 3000; #comment if you want all the cells to be considered
choose_genes <- 150; #comment if you want all the genes to be considered
gene_batch <- 50; #number of genes per batch, therefore num_batches = choose_genes (or numgenes)/gene_batch. Max value is 150
num_iter <- 20; #number of iterations, choose based on data size.
num_cores <- detectCores() - 4; #number of cores for parallel processing. Ensure that detectCores() > 1 for parallel processing to work, else set num_cores to 1.
z_true_labels_avl <- TRUE; #set this to TRUE if the true labels of cells are available, else set it to FALSE. If TRUE, ensure to populate 'z_true' with the true labels in 'BISCUIT_process_data.R'
num_cells_batch <- 1000; #set this to 1000 if input number of cells is in the 1000s, else set it to 100.
alpha <- 1; #DPMM dispersion parameter. A higher value spins more clusters whereas a lower value spins lesser clusters.
output_folder_name <- "output"; #give a name for your output folder.
## call BISCUIT
source("BISCUIT_Rfunction/BISCUIT_main.R")
| /AlgorithmWeb/upload/BISCUIT/start_file.R | no_license | TJZhiyeWang/ScClusterWebsite | R | false | false | 1,824 | r | ## 21st Dec 2016
## BISCUIT R implementation
## Start_file with user inputs
##
## Code author SP
###
###
############## packages required ##############
library(MCMCpack)
library(mvtnorm)
library(ellipse)
library(coda)
library(Matrix)
library(Rtsne)
library(gtools)
library(foreach)
library(doParallel)
library(doSNOW)
library(snow)
library(lattice)
library(MASS)
library(bayesm)
library(robustbase)
library(chron)
library(mnormt)
library(schoolmath)
library(RColorBrewer)
#############################################
input_file_name <- "expression_mRNA_17-Aug-2014.txt";
input_data_tab_delimited <- TRUE; #set to TRUE if the input data is tab-delimited
is_format_genes_cells <- TRUE; #set to TRUE if input data has rows as genes and columns as cells
choose_cells <- 3000; #comment if you want all the cells to be considered
choose_genes <- 150; #comment if you want all the genes to be considered
gene_batch <- 50; #number of genes per batch, therefore num_batches = choose_genes (or numgenes)/gene_batch. Max value is 150
num_iter <- 20; #number of iterations, choose based on data size.
num_cores <- detectCores() - 4; #number of cores for parallel processing. Ensure that detectCores() > 1 for parallel processing to work, else set num_cores to 1.
z_true_labels_avl <- TRUE; #set this to TRUE if the true labels of cells are available, else set it to FALSE. If TRUE, ensure to populate 'z_true' with the true labels in 'BISCUIT_process_data.R'
num_cells_batch <- 1000; #set this to 1000 if input number of cells is in the 1000s, else set it to 100.
alpha <- 1; #DPMM dispersion parameter. A higher value spins more clusters whereas a lower value spins lesser clusters.
output_folder_name <- "output"; #give a name for your output folder.
## call BISCUIT
source("BISCUIT_Rfunction/BISCUIT_main.R")
|
/data/scr_12/chap7/chap7.R | no_license | yugitti/bayze | R | false | false | 2,898 | r | ||
# cleaning data in r
library(dplyr)
df <- ...
class()
dim()
names()
str()
glimplse(df)
summary(df)
head()
tail()
# print()
# na ----------------
is.na(df)
any(is.na(df))
# rows with so missing vaules
complete.cases(df)
df[complete.cases(df), ]
na.omit(df)
# outliers --------------
| /datacamp/eda.r | no_license | felix-ha/r_ds | R | false | false | 292 | r | # cleaning data in r
library(dplyr)
df <- ...
class()
dim()
names()
str()
glimplse(df)
summary(df)
head()
tail()
# print()
# na ----------------
is.na(df)
any(is.na(df))
# rows with so missing vaules
complete.cases(df)
df[complete.cases(df), ]
na.omit(df)
# outliers --------------
|
err_return <- function(api_content){
list(
error_code = api_content$errors[[1]]$code,
error_message = api_content$errors[[1]]$message
)
}
null_to_na <- function(results){
for (i in 1:vec_depth(results) - 1){
results <- modify_depth(
results, i, function(x){
if (is.null(x)){
NA
} else {
x
}
}, .ragged = TRUE
)
}
results
}
parse_row <- function(
res_data,
res_names,
include_stats,
stats,
meta,
res_meta,
format
){
if (format == "std") {
# Type = row
# Special case for handling arrays
# browser()
res <- attempt::attempt({
purrr::map_depth(res_data, 3, tibble::as_tibble) %>%
purrr::map(purrr::flatten) %>%
purrr::transpose() %>%
purrr::map(rbindlist_to_tibble)
}, silent = TRUE)
if (class(res)[1] == "try-error") {
res <- flatten(res_data) %>%
purrr::map_dfr(purrr::flatten_dfc) %>%
list()
}
if (length(res_data) != 0){
res <- res %>%
setNames(res_names)
}
if (include_stats) {
res <- c(res, list(stats = stats))
} else {
class(res) <- c("neo", class(res))
}
if (meta) {
res <- c(res, res_meta)
}
class(res) <- c("neo", class(res))
return(res)
} else if (format == "table") {
res <- res_data %>% purrr::map("row")
res <- res %>% map(magrittr::set_names, res_names) %>% map_dfr(as_tibble)
res <- list(results = res)
if (include_stats) {
res <- c(res, list(stats = stats))
}
if (meta) {
res <- c(res, res_meta)
}
return(res)
}
}
parse_graph <- function(
res_data,
res_names,
include_stats,
stats,
meta,
res_meta
){
# Get the graph sublist
graph <- map(res_data, "graph")
# Get the nodes
nodes <- compact(map(graph, "nodes"))
# Get the relationships
relations <- compact(map(graph, "relationships"))
# Set the tbl upfront
nodes_tbl <- NULL
relations_tbl <- NULL
# Verify that there is something to return
if (length(nodes) == 0 & length(relations) == 0) {
message("No graph data found.")
message("Either your call can't be converted to a graph \nor there is no data at all matching your call.")
message("Verify your call or try type = \"row\".")
}
# Do something only if there are nodes
if (length(nodes) != 0) {
# Tbl
nodes <- purrr::flatten(nodes)
nodes_tbl <- unique(tibble(
id = map_chr(nodes, ~.x$id),
label = map(nodes, ~as.character(.x$labels)),
properties = map(nodes, ~.x$properties)
))
}
# Do something only if there are relations
if (length(relations) != 0) {
# Tbl
relations <- flatten(relations)
relations_tbl <- unique(tibble(
id = as.character(map(relations, ~.x$id)),
type = as.character(map(relations, ~.x$type)),
startNode = as.character(map(relations, ~.x$startNode)),
endNode = as.character(map(relations, ~.x$endNode)),
properties = map(relations, ~.x$properties)
))
}
# Should we include stats?
if (include_stats) {
res <- compact(
list(
nodes = nodes_tbl,
relationships = relations_tbl,
stats = stats
)
)
class(res) <- c("neo", class(res))
} else {
res <- compact(
list(
nodes = nodes_tbl,
relationships = relations_tbl
)
)
if (length(res) != 0) {
class(res) <- c("neo", class(res))
}
}
class(res) <- unique(class(res))
return(res)
}
#' @importFrom httr content
#' @importFrom attempt stop_if
#' @importFrom purrr flatten transpose modify_depth map map_df as_vector map_chr compact flatten_dfr vec_depth map_dfr
#' @importFrom tidyr gather
#' @importFrom stats setNames
#' @importFrom tibble tibble
replaceInList <- function (x, FUN, ...)
{
if (is.list(x)) {
for (i in seq_along(x)) {
x[i] <- list(replaceInList(x[[i]], FUN, ...))
}
x
}
else FUN(x, ...)
}
parse_api_results <- function(res, type, include_stats, meta, format) {
# Get the content as an R list
api_content <- content(res)
# Return the errors if there are any (code + message)
if (length(api_content$errors) > 0) return(err_return(api_content))
# Get the result element
results <- api_content$results[[1]]
# turn the null to NA
# results <- modify_depth(
# results, vec_depth(results) - 1, function(x){
# if (is.null(x)){
# NA
# } else {
# x
# }
# }, .ragged = TRUE
# )
results <- replaceInList(results, function(x) if(is.null(x)) NA else x)
# Get the stats (if any)
if (!is.null(results$stats)) {
stats <- tibble(
type = names(results$stats),
value = as.numeric(results$stats)
)
} else {
stats <- NULL
}
if (length(results$data) == 0) {
message("No data returned.")
if (include_stats) {
return(stats)
}
}
# Get the name of the columns
res_names <- results$columns
# Get the data
res_data <- results$data
if (meta) {
res_meta <- map(res_data, "meta") %>%
map(flatten_dfr) %>%
setNames(paste(res_names, "meta", sep = "_"))
}
res_data <- map(res_data, function(x) {
x$meta <- NULL
x
})
if (type == "row") {
return(
parse_row(
res_data,
res_names,
include_stats,
stats,
meta,
res_meta,
format
)
)
} else if (type == "graph") {
return(
parse_graph(
res_data,
res_names,
include_stats,
stats,
meta,
res_meta
)
)
}
}
rbindlist_to_tibble <- function(l){
tibble::as_tibble(data.table::rbindlist(l, fill = TRUE))
}
| /R/api_result_parsing.R | permissive | gregleleu/neo4r | R | false | false | 5,700 | r | err_return <- function(api_content){
list(
error_code = api_content$errors[[1]]$code,
error_message = api_content$errors[[1]]$message
)
}
null_to_na <- function(results){
for (i in 1:vec_depth(results) - 1){
results <- modify_depth(
results, i, function(x){
if (is.null(x)){
NA
} else {
x
}
}, .ragged = TRUE
)
}
results
}
parse_row <- function(
res_data,
res_names,
include_stats,
stats,
meta,
res_meta,
format
){
if (format == "std") {
# Type = row
# Special case for handling arrays
# browser()
res <- attempt::attempt({
purrr::map_depth(res_data, 3, tibble::as_tibble) %>%
purrr::map(purrr::flatten) %>%
purrr::transpose() %>%
purrr::map(rbindlist_to_tibble)
}, silent = TRUE)
if (class(res)[1] == "try-error") {
res <- flatten(res_data) %>%
purrr::map_dfr(purrr::flatten_dfc) %>%
list()
}
if (length(res_data) != 0){
res <- res %>%
setNames(res_names)
}
if (include_stats) {
res <- c(res, list(stats = stats))
} else {
class(res) <- c("neo", class(res))
}
if (meta) {
res <- c(res, res_meta)
}
class(res) <- c("neo", class(res))
return(res)
} else if (format == "table") {
res <- res_data %>% purrr::map("row")
res <- res %>% map(magrittr::set_names, res_names) %>% map_dfr(as_tibble)
res <- list(results = res)
if (include_stats) {
res <- c(res, list(stats = stats))
}
if (meta) {
res <- c(res, res_meta)
}
return(res)
}
}
parse_graph <- function(
res_data,
res_names,
include_stats,
stats,
meta,
res_meta
){
# Get the graph sublist
graph <- map(res_data, "graph")
# Get the nodes
nodes <- compact(map(graph, "nodes"))
# Get the relationships
relations <- compact(map(graph, "relationships"))
# Set the tbl upfront
nodes_tbl <- NULL
relations_tbl <- NULL
# Verify that there is something to return
if (length(nodes) == 0 & length(relations) == 0) {
message("No graph data found.")
message("Either your call can't be converted to a graph \nor there is no data at all matching your call.")
message("Verify your call or try type = \"row\".")
}
# Do something only if there are nodes
if (length(nodes) != 0) {
# Tbl
nodes <- purrr::flatten(nodes)
nodes_tbl <- unique(tibble(
id = map_chr(nodes, ~.x$id),
label = map(nodes, ~as.character(.x$labels)),
properties = map(nodes, ~.x$properties)
))
}
# Do something only if there are relations
if (length(relations) != 0) {
# Tbl
relations <- flatten(relations)
relations_tbl <- unique(tibble(
id = as.character(map(relations, ~.x$id)),
type = as.character(map(relations, ~.x$type)),
startNode = as.character(map(relations, ~.x$startNode)),
endNode = as.character(map(relations, ~.x$endNode)),
properties = map(relations, ~.x$properties)
))
}
# Should we include stats?
if (include_stats) {
res <- compact(
list(
nodes = nodes_tbl,
relationships = relations_tbl,
stats = stats
)
)
class(res) <- c("neo", class(res))
} else {
res <- compact(
list(
nodes = nodes_tbl,
relationships = relations_tbl
)
)
if (length(res) != 0) {
class(res) <- c("neo", class(res))
}
}
class(res) <- unique(class(res))
return(res)
}
#' @importFrom httr content
#' @importFrom attempt stop_if
#' @importFrom purrr flatten transpose modify_depth map map_df as_vector map_chr compact flatten_dfr vec_depth map_dfr
#' @importFrom tidyr gather
#' @importFrom stats setNames
#' @importFrom tibble tibble
replaceInList <- function (x, FUN, ...)
{
if (is.list(x)) {
for (i in seq_along(x)) {
x[i] <- list(replaceInList(x[[i]], FUN, ...))
}
x
}
else FUN(x, ...)
}
parse_api_results <- function(res, type, include_stats, meta, format) {
# Get the content as an R list
api_content <- content(res)
# Return the errors if there are any (code + message)
if (length(api_content$errors) > 0) return(err_return(api_content))
# Get the result element
results <- api_content$results[[1]]
# turn the null to NA
# results <- modify_depth(
# results, vec_depth(results) - 1, function(x){
# if (is.null(x)){
# NA
# } else {
# x
# }
# }, .ragged = TRUE
# )
results <- replaceInList(results, function(x) if(is.null(x)) NA else x)
# Get the stats (if any)
if (!is.null(results$stats)) {
stats <- tibble(
type = names(results$stats),
value = as.numeric(results$stats)
)
} else {
stats <- NULL
}
if (length(results$data) == 0) {
message("No data returned.")
if (include_stats) {
return(stats)
}
}
# Get the name of the columns
res_names <- results$columns
# Get the data
res_data <- results$data
if (meta) {
res_meta <- map(res_data, "meta") %>%
map(flatten_dfr) %>%
setNames(paste(res_names, "meta", sep = "_"))
}
res_data <- map(res_data, function(x) {
x$meta <- NULL
x
})
if (type == "row") {
return(
parse_row(
res_data,
res_names,
include_stats,
stats,
meta,
res_meta,
format
)
)
} else if (type == "graph") {
return(
parse_graph(
res_data,
res_names,
include_stats,
stats,
meta,
res_meta
)
)
}
}
rbindlist_to_tibble <- function(l){
tibble::as_tibble(data.table::rbindlist(l, fill = TRUE))
}
|
# Build factor_survey_vector with clean levels
survey_vector <- c("M", "F", "F", "M", "M")
factor_survey_vector <- factor(survey_vector)
levels(factor_survey_vector) <- c("Female", "Male")
# Male
male <- factor_survey_vector[1]
male
# Female
female <- factor_survey_vector[2]
# Battle of the sexes: Male 'larger' than female?
male > female
args(sd)
?mean
?read.table
Sys.Date()
library(sas7bdat)
setwd("C:/Model/V2")
ySASData <- read.sas7bdat("model_data_2d2_tw.sas7bdat")
library(tidyverse);library(dplyr);library(ggplot2);library(skimr)
glimpse(ySASData)
skim(ySASData)
| /test/.Rproj.user/2C42C49F/sources/per/t/8A249A64-contents | no_license | li42125msa/R-projects | R | false | false | 575 | # Build factor_survey_vector with clean levels
survey_vector <- c("M", "F", "F", "M", "M")
factor_survey_vector <- factor(survey_vector)
levels(factor_survey_vector) <- c("Female", "Male")
# Male
male <- factor_survey_vector[1]
male
# Female
female <- factor_survey_vector[2]
# Battle of the sexes: Male 'larger' than female?
male > female
args(sd)
?mean
?read.table
Sys.Date()
library(sas7bdat)
setwd("C:/Model/V2")
ySASData <- read.sas7bdat("model_data_2d2_tw.sas7bdat")
library(tidyverse);library(dplyr);library(ggplot2);library(skimr)
glimpse(ySASData)
skim(ySASData)
| |
################################################################################
######### 04: Creating saliva.cortisol datasets ########
################################################################################
######################################################################
##### 1.0 Loading data #####
######################################################################
########## 1.1 Set working directory & download packages ##########
rm(list = ls())
setwd("~/Documents/R/Saliva")
options(stringsAsFactors = F)
library(tidyverse)
hyenadata::update_tables("1.2.88")
library(hyenadata)
load("02.cleaned_data.Rdata")
#Remove Talek samples (only 3 - different ecological context than rest of samples)
saliva.final <- filter(saliva.final, clan != "talek.w")
#Add freeze-thaw data (estimated)
saliva.freezethaw <- read.csv("00.raw_data/saliva_sample_freezethaw.csv")
saliva.freezethaw <- unique(saliva.freezethaw[,c(2:3)])
saliva.freezethaw$assay_date <- as.Date(saliva.freezethaw$assay_date, format = "%d-%b-%y")
#Everyone thawed on first day to transfer samples from cryotubes to microcentrifuge tubes [removal of Kimbo]
saliva.transfer <- data.frame(sample_id = unique(saliva.freezethaw$sample_id),
assay_date = min(saliva.freezethaw$assay_date))
saliva.freezethaw <- rbind(saliva.freezethaw, saliva.transfer)
saliva.freezethaw <- unique(saliva.freezethaw)
#Create dataset
saliva.freezethaw <- saliva.freezethaw %>% group_by(sample_id) %>%
mutate(freeze_thaw = as.numeric(min_rank(assay_date)))
rm(saliva.transfer)
########## 1.2 hyenadata tables ##########
#Add weaning dates to tblLifeHistory
data("tblLifeHistory")
weaning <- read.csv("00.raw_data/weaning_update_Sept21.csv", na = "")
weaning$done <- NA #keep track of if-function
for(i in 1:nrow(weaning)){
id.i <- weaning$id[i]
date.i <- weaning$WeanDate[i]
error.i <- weaning$Error.days[i]
event_status.i <- weaning$status[i]
if(nrow(tblLifeHistory[tblLifeHistory$id == id.i & tblLifeHistory$event_code == "Weaned",]) == 1){
tblLifeHistory[tblLifeHistory$id == id.i & tblLifeHistory$event_code == "Weaned",]$date <- date.i
tblLifeHistory[tblLifeHistory$id == id.i & tblLifeHistory$event_code == "Weaned",]$error <- error.i
tblLifeHistory[tblLifeHistory$id == id.i & tblLifeHistory$event_code == "Weaned",]$event_status <- event_status.i
weaning$done[i] <- TRUE
}
}
summary(weaning$done)
rm(id.i, date.i, error.i, event_status.i, i)
rm(weaning)
#tblLifeHistory
tblLifeHistory$id <- gsub(" ", "", tblLifeHistory$id)
tblLifeHistory[tblLifeHistory$event_code == "disappeared",]$event_code <- "Disappeared"
tblLifeHistory$event_code <- as.factor(tblLifeHistory$event_code)
tblLifeHistory$error <- as.numeric(tblLifeHistory$error)
tblLifeHistory$event_status <- as.factor(tblLifeHistory$event_status)
tblLifeHistory <- tblLifeHistory[,1:7]
tblLifeHistory <- filter(tblLifeHistory, !is.na(id) & !is.na(event_code)) #remove 0
tblLifeHistory <- unique(tblLifeHistory)
#tblHyenas
data("tblHyenas")
tblHyenas$id <- gsub(" ", "", tblHyenas$id)
tblHyenas$sex <- as.factor(tblHyenas$sex)
tblHyenas$status <- as.factor(tblHyenas$status)
tblHyenas$mom <- gsub(" ", "", tblHyenas$mom)
tblHyenas$dad <- gsub(" ", "", tblHyenas$dad)
tblHyenas$number_littermates <- as.numeric(tblHyenas$number_littermates)
tblHyenas$litter_rank <- as.numeric(tblHyenas$litrank)
tblHyenas <- tblHyenas[,c(1,4,7:12,17,15)]
tblHyenas <- filter(tblHyenas, !is.na(id)) #remove 0
#Adding litter_status variable
tblHyenas$litter_status <- NA
tblHyenas[tblHyenas$number_littermates != '0' & !is.na(tblHyenas$number_littermates) &
!is.na(tblHyenas$litter_rank) & tblHyenas$litter_rank == '1',]$litter_status <- 'dominant'
tblHyenas[tblHyenas$number_littermates != '0' & !is.na(tblHyenas$number_littermates) &
!is.na(tblHyenas$litter_rank) & tblHyenas$litter_rank == '2',]$litter_status <- 'subordinate'
tblHyenas[is.na(tblHyenas$number_littermates) | tblHyenas$number_littermates == '0',]$litter_status <- 'singleton'
tblHyenas$litter_status <- as.factor(tblHyenas$litter_status)
#Adding clan variable - because all natal animals, use natal clan
tblHyenas$clan <- NA
for(i in 1:nrow(tblHyenas)){
id.i <- tblHyenas$id[i]
if(id.i %in% tblLifeHistory$id){
tblHyenas$clan[i] <- filter(tblLifeHistory, id == id.i & event_code == "DOB")$event_data
}
}
rm(i, id.i)
tblHyenas$clan <- as.factor(tblHyenas$clan)
#tblRanks
data("tblFemaleRanks")
tblFemaleRanks <- filter(tblFemaleRanks, clan == "serena.n" | clan == "serena.s" |
clan == "happy.zebra")
#tblWeather
data("tblWeather")
tblWeather <- filter(tblWeather, park == "Conservancy")
tblWeather[,3:5] <- sapply(tblWeather[,3:5], as.numeric)
#tblPreyCensus
data("tblPreyCensus")
tblPreyCensus <- filter(tblPreyCensus, region == "Conservancy" & !is.na(clan))
tblPreyCensus$year <- as.numeric(format(tblPreyCensus$date, "%Y"))
tblPreyCensus$month <- as.numeric(format(tblPreyCensus$date, "%m"))
tblPreyCensus[,c(4,9:39)] <- sapply(tblPreyCensus[,c(4,9:39)], as.numeric)
########## 1.3 Calculate prey density ##########
#Data: each clan has 2-4 transects
# Transects are 1.45-5.40 km in length
# Prey is surveyed for 100m on either side of transect
# Transects are surveyed 2x per month (once in first half of month, once in second half)
#Set up column categories
prey_all <- c("thomsons", "impala", "zebra", "wildebeest", "topi", "warthog", "hartebeest", "grants",
"buffalo", "hippo", "giraffe", "ostrich", "eland", "elephant", "oribi", "reedbuck",
"waterbuck", "baboon", "bushbuck")
to.sum <- c("distance", prey_all)
#Calculate prey summary data
prey.summary <- tblPreyCensus %>% group_by(region, clan, year, month) %>%
summarise_at(vars(all_of(to.sum)), sum) #prey density calculated per month
prey.summary$total_prey_count <- rowSums(prey.summary[,prey_all])
prey.summary$area <- prey.summary$distance*0.2 #prey censused for 100m on either side of road (e.g., census of distance = 1km has an area of 0.2 km2)
prey.summary$prey_density <- as.numeric(prey.summary$total_prey_count/prey.summary$area) #prey density calculated as # animals per km2
tblPreyDensity <- prey.summary[,c(1:5,25:27)]
#Sanity check
boxplot(tblPreyDensity$prey_density ~ tblPreyDensity$month, ylim = c(0,1000))
abline(h = median(tblPreyDensity$prey_density))
######################################################################
##### 2.0 Combining data #####
######################################################################
########## 2.1 Combine tables ##########
#Add weather
saliva.horm <- left_join(saliva.final, tblWeather[,c(2:5)], by = "date")
#Add prey density
saliva.horm$year <- as.numeric(format(saliva.horm$date, "%Y"))
saliva.horm$month <- as.numeric(format(saliva.horm$date, "%m"))
saliva.horm <- left_join(saliva.horm, tblPreyDensity[,c(2:4,8)], by = c("clan", "year", "month"))
#Add age
saliva.horm <- left_join(saliva.horm, tblHyenas[,c(1,4)], by = c("hyena_id" = "id"))
saliva.horm$age <- (as.numeric(saliva.horm$date - saliva.horm$birthdate)/365)*12 #age in months
#Add sex, litter_status
saliva.horm <- left_join(saliva.horm, tblHyenas[,c(1,3,6,8,9,11)], by = c("hyena_id" = "id"))
#Add maternal rank
saliva.horm$mat_rank <- NA
for(i in 1:nrow(saliva.horm)){
mom.i <- saliva.horm$mom[i]
year.i <- as.numeric(format(saliva.horm$date[i], "%Y"))
if(!is.na(mom.i) & !is.na(year.i)){
rank.i <- filter(tblFemaleRanks, id == mom.i & year == year.i)$stan_rank
#Add mom's rank from current year
if(length(rank.i) == 1){
saliva.horm$mat_rank[i] <- rank.i
}
#Add mom's rank from previous year if current year rank is not available
if(length(rank.i) == 0){
rank.i <- filter(tblFemaleRanks, id == mom.i & year == (year.i-1))$stan_rank
if(length(rank.i) == 1){
saliva.horm$mat_rank[i] <- rank.i
}
if(length(rank.i) == 0){
saliva.horm$mat_rank[i] <- NA
}
}
}
}
rm(mom.i, year.i, rank.i, i)
#Reformat clan
saliva.horm$clan <- as.factor(as.character(saliva.horm$clan))
#Fix sex
saliva.horm$sex <- as.character(saliva.horm$sex)
saliva.horm[saliva.horm$sex == "u" & !is.na(saliva.horm$sex),]$sex <- NA #if unknown sex - 3 samples
saliva.horm$sex <- as.factor(saliva.horm$sex)
#Fix litter_status
saliva.horm$litter_status <- relevel(saliva.horm$litter_status, ref = "singleton")
saliva.horm[saliva.horm$number_littermates == 0,]$litter_rank <- 0 #if singleton
#Add litter_id
saliva.horm$litter_id <- NA
for(i in 1:nrow(saliva.horm)){
if(is.na(saliva.horm$litter_id[i])){
mom.i <- saliva.horm$mom[i]
bd.i <- saliva.horm$birthdate[i]
littermates.i <- sort(unique(filter(saliva.horm, mom == mom.i & birthdate == bd.i)$hyena_id))
if(length(littermates.i) > 1){
saliva.horm$litter_id[i] <- paste0(littermates.i, collapse = "-")
}
}
}
rm(mom.i, bd.i, littermates.i, i)
#Add weaning date
saliva.horm$wean_date <- NA
for(i in 1:nrow(saliva.horm)){
id.i <- saliva.horm$hyena_id[i]
saliva.horm$wean_date[i] <- as.character(filter(tblLifeHistory, event_code == "Weaned" & id == id.i)$date)
if(is.na(saliva.horm$wean_date[i])){
if(nrow(filter(tblLifeHistory, event_code == "Weaned" & id == id.i & event_status == "dbw")) == 1){
saliva.horm$wean_date[i] <- as.character(filter(tblLifeHistory, event_code == "Disappeared" & id == id.i)$date)
}
}
if(is.na(saliva.horm$wean_date[i])){
if(nrow(filter(tblLifeHistory, event_code == "Weaned" & id == id.i & event_status == "sugu")) == 1){
saliva.horm$wean_date[i] <- as.character(filter(tblLifeHistory, event_code == "DFS" & id == id.i)$date)
}
}
}
rm(id.i, i)
saliva.horm$wean_date <- as.Date(saliva.horm$wean_date)
saliva.horm$weaning_status <- ifelse(saliva.horm$date > saliva.horm$wean_date, "weaned", "nursing")
#Add AM/PM column for time
saliva.horm$ampm <- format(saliva.horm$start_time, '%p')
saliva.horm$ampm <- as.factor(saliva.horm$ampm)
#Reformat times
saliva.horm$start_time <- as.POSIXct(format(saliva.horm$start_time, format = "%H:%M"),
format = "%H:%M")
saliva.horm$stop_time <- as.POSIXct(format(saliva.horm$stop_time, format = "%H:%M"),
format = "%H:%M")
saliva.horm$ln2_time <- as.POSIXct(format(saliva.horm$ln2_time, format = "%H:%M"),
format = "%H:%M")
#Calculate time lag from sunrise/sunset
# Data on sunrise/sunset from esrl.noaa.gov
# June 21 2020: 6:40 18:42 @ Talek Gate
# Dec 21 2020: 6:31 18:43
# June 21 2020: 6:41 18:43 @ South Mara Bridge
# Dec 21 2020: 6:32 18:45
#Take average time of sunrise/sunset
# Final time: 6:36 18:43
#Add time lag data
saliva.horm$time_lag <- NA
for(i in 1:nrow(saliva.horm)){
if(saliva.horm$ampm[i] == "AM"){
saliva.horm$time_lag[i] <- as.numeric(difftime(saliva.horm$start_time[i],
as.POSIXct("06:36", format = "%H:%M"),
units = "mins"))
}
if(saliva.horm$ampm[i] == "PM"){
saliva.horm$time_lag[i] <- as.numeric(difftime(saliva.horm$start_time[i],
as.POSIXct("18:43", format = "%H:%M"),
units = "mins"))
}
}
#Calculate time differences
saliva.horm$chew_time <- as.numeric(difftime(saliva.horm$stop_time, saliva.horm$start_time,
units = "mins")) #time chewed on rope in mins
saliva.horm$chew_time <- ifelse(saliva.horm$chew_time == 0, 1, saliva.horm$chew_time)
saliva.horm$cortisol_assay_diff <- (as.numeric(saliva.horm$cortisol_assay_date -
saliva.horm$date)/365)*12 #time collection to assay in months
saliva.horm$ln2_diff <- as.numeric(difftime(saliva.horm$ln2_time, saliva.horm$start_time,
units = "mins")) #time collection to freezing in mins
saliva.horm$ln2_diff <- ifelse(saliva.horm$ln2_diff < 0, NA, saliva.horm$ln2_diff)
########## 2.2 Select columns ##########
#Cortisol
saliva.cortisol <- saliva.horm[,c("saliva_sample_id", "repeated", "clan", "hyena_id",
"date", "start_time", "stop_time", "ampm", "time_lag",
"chew_time", "ln2_time", "ln2_diff", "cortisol_ug_dl",
"cortisol_assay_date", "cortisol_assay_diff", "temp_min",
"temp_max", "precip", "prey_density", "age", "sex",
"mat_rank", "number_littermates", "litter_rank",
"litter_status", "litter_id", "weaning_status")]
saliva.cortisol <- filter(saliva.cortisol, !is.na(cortisol_ug_dl))
#Log-transform to achieve normality
qqnorm(saliva.cortisol$cortisol_ug_dl)
shapiro.test(saliva.cortisol$cortisol_ug_dl)
# Shapiro-Wilk normality test
# W = 0.41647, p-value < 2.2e-16
saliva.cortisol$log_cortisol_ug_dl <- log(saliva.cortisol$cortisol_ug_dl)
qqnorm(saliva.cortisol$log_cortisol_ug_dl)
shapiro.test(saliva.cortisol$log_cortisol_ug_dl)
# Shapiro-Wilk normality test
# W = 0.97428, p-value = 8.969e-05
# approximates normal
#Add freeze-thaw cycles to data
saliva.cortisol <- left_join(saliva.cortisol, saliva.freezethaw,
by = c("saliva_sample_id" = "sample_id",
"cortisol_assay_date" = "assay_date"))
summary(saliva.cortisol)
########## 2.3 Final clean of dataset ##########
#Filter to only juveniles
saliva.cortisol <- filter(saliva.cortisol, age < 24) #remove 1
######################################################################
##### 3.0 Save data #####
######################################################################
save(file = "05.saliva_hormones.Rdata", list = c("saliva.cortisol"))
| /04.saliva_hormones.R | no_license | tracymont/hyena_saliva_cortisol | R | false | false | 14,012 | r | ################################################################################
######### 04: Creating saliva.cortisol datasets ########
################################################################################
######################################################################
##### 1.0 Loading data #####
######################################################################
########## 1.1 Set working directory & download packages ##########
rm(list = ls())
setwd("~/Documents/R/Saliva")
options(stringsAsFactors = F)
library(tidyverse)
hyenadata::update_tables("1.2.88")
library(hyenadata)
load("02.cleaned_data.Rdata")
#Remove Talek samples (only 3 - different ecological context than rest of samples)
saliva.final <- filter(saliva.final, clan != "talek.w")
#Add freeze-thaw data (estimated)
saliva.freezethaw <- read.csv("00.raw_data/saliva_sample_freezethaw.csv")
saliva.freezethaw <- unique(saliva.freezethaw[,c(2:3)])
saliva.freezethaw$assay_date <- as.Date(saliva.freezethaw$assay_date, format = "%d-%b-%y")
#Everyone thawed on first day to transfer samples from cryotubes to microcentrifuge tubes [removal of Kimbo]
saliva.transfer <- data.frame(sample_id = unique(saliva.freezethaw$sample_id),
assay_date = min(saliva.freezethaw$assay_date))
saliva.freezethaw <- rbind(saliva.freezethaw, saliva.transfer)
saliva.freezethaw <- unique(saliva.freezethaw)
#Create dataset
saliva.freezethaw <- saliva.freezethaw %>% group_by(sample_id) %>%
mutate(freeze_thaw = as.numeric(min_rank(assay_date)))
rm(saliva.transfer)
########## 1.2 hyenadata tables ##########
#Add weaning dates to tblLifeHistory
data("tblLifeHistory")
weaning <- read.csv("00.raw_data/weaning_update_Sept21.csv", na = "")
weaning$done <- NA #keep track of if-function
for(i in 1:nrow(weaning)){
id.i <- weaning$id[i]
date.i <- weaning$WeanDate[i]
error.i <- weaning$Error.days[i]
event_status.i <- weaning$status[i]
if(nrow(tblLifeHistory[tblLifeHistory$id == id.i & tblLifeHistory$event_code == "Weaned",]) == 1){
tblLifeHistory[tblLifeHistory$id == id.i & tblLifeHistory$event_code == "Weaned",]$date <- date.i
tblLifeHistory[tblLifeHistory$id == id.i & tblLifeHistory$event_code == "Weaned",]$error <- error.i
tblLifeHistory[tblLifeHistory$id == id.i & tblLifeHistory$event_code == "Weaned",]$event_status <- event_status.i
weaning$done[i] <- TRUE
}
}
summary(weaning$done)
rm(id.i, date.i, error.i, event_status.i, i)
rm(weaning)
#tblLifeHistory
tblLifeHistory$id <- gsub(" ", "", tblLifeHistory$id)
tblLifeHistory[tblLifeHistory$event_code == "disappeared",]$event_code <- "Disappeared"
tblLifeHistory$event_code <- as.factor(tblLifeHistory$event_code)
tblLifeHistory$error <- as.numeric(tblLifeHistory$error)
tblLifeHistory$event_status <- as.factor(tblLifeHistory$event_status)
tblLifeHistory <- tblLifeHistory[,1:7]
tblLifeHistory <- filter(tblLifeHistory, !is.na(id) & !is.na(event_code)) #remove 0
tblLifeHistory <- unique(tblLifeHistory)
#tblHyenas
data("tblHyenas")
tblHyenas$id <- gsub(" ", "", tblHyenas$id)
tblHyenas$sex <- as.factor(tblHyenas$sex)
tblHyenas$status <- as.factor(tblHyenas$status)
tblHyenas$mom <- gsub(" ", "", tblHyenas$mom)
tblHyenas$dad <- gsub(" ", "", tblHyenas$dad)
tblHyenas$number_littermates <- as.numeric(tblHyenas$number_littermates)
tblHyenas$litter_rank <- as.numeric(tblHyenas$litrank)
tblHyenas <- tblHyenas[,c(1,4,7:12,17,15)]
tblHyenas <- filter(tblHyenas, !is.na(id)) #remove 0
#Adding litter_status variable
tblHyenas$litter_status <- NA
tblHyenas[tblHyenas$number_littermates != '0' & !is.na(tblHyenas$number_littermates) &
!is.na(tblHyenas$litter_rank) & tblHyenas$litter_rank == '1',]$litter_status <- 'dominant'
tblHyenas[tblHyenas$number_littermates != '0' & !is.na(tblHyenas$number_littermates) &
!is.na(tblHyenas$litter_rank) & tblHyenas$litter_rank == '2',]$litter_status <- 'subordinate'
tblHyenas[is.na(tblHyenas$number_littermates) | tblHyenas$number_littermates == '0',]$litter_status <- 'singleton'
tblHyenas$litter_status <- as.factor(tblHyenas$litter_status)
#Adding clan variable - because all natal animals, use natal clan
tblHyenas$clan <- NA
for(i in 1:nrow(tblHyenas)){
id.i <- tblHyenas$id[i]
if(id.i %in% tblLifeHistory$id){
tblHyenas$clan[i] <- filter(tblLifeHistory, id == id.i & event_code == "DOB")$event_data
}
}
rm(i, id.i)
tblHyenas$clan <- as.factor(tblHyenas$clan)
#tblRanks
data("tblFemaleRanks")
tblFemaleRanks <- filter(tblFemaleRanks, clan == "serena.n" | clan == "serena.s" |
clan == "happy.zebra")
#tblWeather
data("tblWeather")
tblWeather <- filter(tblWeather, park == "Conservancy")
tblWeather[,3:5] <- sapply(tblWeather[,3:5], as.numeric)
#tblPreyCensus
data("tblPreyCensus")
tblPreyCensus <- filter(tblPreyCensus, region == "Conservancy" & !is.na(clan))
tblPreyCensus$year <- as.numeric(format(tblPreyCensus$date, "%Y"))
tblPreyCensus$month <- as.numeric(format(tblPreyCensus$date, "%m"))
tblPreyCensus[,c(4,9:39)] <- sapply(tblPreyCensus[,c(4,9:39)], as.numeric)
########## 1.3 Calculate prey density ##########
#Data: each clan has 2-4 transects
# Transects are 1.45-5.40 km in length
# Prey is surveyed for 100m on either side of transect
# Transects are surveyed 2x per month (once in first half of month, once in second half)
#Set up column categories
prey_all <- c("thomsons", "impala", "zebra", "wildebeest", "topi", "warthog", "hartebeest", "grants",
"buffalo", "hippo", "giraffe", "ostrich", "eland", "elephant", "oribi", "reedbuck",
"waterbuck", "baboon", "bushbuck")
to.sum <- c("distance", prey_all)
#Calculate prey summary data
prey.summary <- tblPreyCensus %>% group_by(region, clan, year, month) %>%
summarise_at(vars(all_of(to.sum)), sum) #prey density calculated per month
prey.summary$total_prey_count <- rowSums(prey.summary[,prey_all])
prey.summary$area <- prey.summary$distance*0.2 #prey censused for 100m on either side of road (e.g., census of distance = 1km has an area of 0.2 km2)
prey.summary$prey_density <- as.numeric(prey.summary$total_prey_count/prey.summary$area) #prey density calculated as # animals per km2
tblPreyDensity <- prey.summary[,c(1:5,25:27)]
#Sanity check
boxplot(tblPreyDensity$prey_density ~ tblPreyDensity$month, ylim = c(0,1000))
abline(h = median(tblPreyDensity$prey_density))
######################################################################
##### 2.0 Combining data #####
######################################################################
########## 2.1 Combine tables ##########
#Add weather
saliva.horm <- left_join(saliva.final, tblWeather[,c(2:5)], by = "date")
#Add prey density
saliva.horm$year <- as.numeric(format(saliva.horm$date, "%Y"))
saliva.horm$month <- as.numeric(format(saliva.horm$date, "%m"))
saliva.horm <- left_join(saliva.horm, tblPreyDensity[,c(2:4,8)], by = c("clan", "year", "month"))
#Add age
saliva.horm <- left_join(saliva.horm, tblHyenas[,c(1,4)], by = c("hyena_id" = "id"))
saliva.horm$age <- (as.numeric(saliva.horm$date - saliva.horm$birthdate)/365)*12 #age in months
#Add sex, litter_status
saliva.horm <- left_join(saliva.horm, tblHyenas[,c(1,3,6,8,9,11)], by = c("hyena_id" = "id"))
#Add maternal rank
saliva.horm$mat_rank <- NA
for(i in 1:nrow(saliva.horm)){
mom.i <- saliva.horm$mom[i]
year.i <- as.numeric(format(saliva.horm$date[i], "%Y"))
if(!is.na(mom.i) & !is.na(year.i)){
rank.i <- filter(tblFemaleRanks, id == mom.i & year == year.i)$stan_rank
#Add mom's rank from current year
if(length(rank.i) == 1){
saliva.horm$mat_rank[i] <- rank.i
}
#Add mom's rank from previous year if current year rank is not available
if(length(rank.i) == 0){
rank.i <- filter(tblFemaleRanks, id == mom.i & year == (year.i-1))$stan_rank
if(length(rank.i) == 1){
saliva.horm$mat_rank[i] <- rank.i
}
if(length(rank.i) == 0){
saliva.horm$mat_rank[i] <- NA
}
}
}
}
rm(mom.i, year.i, rank.i, i)
#Reformat clan
saliva.horm$clan <- as.factor(as.character(saliva.horm$clan))
#Fix sex
saliva.horm$sex <- as.character(saliva.horm$sex)
saliva.horm[saliva.horm$sex == "u" & !is.na(saliva.horm$sex),]$sex <- NA #if unknown sex - 3 samples
saliva.horm$sex <- as.factor(saliva.horm$sex)
#Fix litter_status
saliva.horm$litter_status <- relevel(saliva.horm$litter_status, ref = "singleton")
saliva.horm[saliva.horm$number_littermates == 0,]$litter_rank <- 0 #if singleton
#Add litter_id
saliva.horm$litter_id <- NA
for(i in 1:nrow(saliva.horm)){
if(is.na(saliva.horm$litter_id[i])){
mom.i <- saliva.horm$mom[i]
bd.i <- saliva.horm$birthdate[i]
littermates.i <- sort(unique(filter(saliva.horm, mom == mom.i & birthdate == bd.i)$hyena_id))
if(length(littermates.i) > 1){
saliva.horm$litter_id[i] <- paste0(littermates.i, collapse = "-")
}
}
}
rm(mom.i, bd.i, littermates.i, i)
#Add weaning date
saliva.horm$wean_date <- NA
for(i in 1:nrow(saliva.horm)){
id.i <- saliva.horm$hyena_id[i]
saliva.horm$wean_date[i] <- as.character(filter(tblLifeHistory, event_code == "Weaned" & id == id.i)$date)
if(is.na(saliva.horm$wean_date[i])){
if(nrow(filter(tblLifeHistory, event_code == "Weaned" & id == id.i & event_status == "dbw")) == 1){
saliva.horm$wean_date[i] <- as.character(filter(tblLifeHistory, event_code == "Disappeared" & id == id.i)$date)
}
}
if(is.na(saliva.horm$wean_date[i])){
if(nrow(filter(tblLifeHistory, event_code == "Weaned" & id == id.i & event_status == "sugu")) == 1){
saliva.horm$wean_date[i] <- as.character(filter(tblLifeHistory, event_code == "DFS" & id == id.i)$date)
}
}
}
rm(id.i, i)
saliva.horm$wean_date <- as.Date(saliva.horm$wean_date)
saliva.horm$weaning_status <- ifelse(saliva.horm$date > saliva.horm$wean_date, "weaned", "nursing")
#Add AM/PM column for time
saliva.horm$ampm <- format(saliva.horm$start_time, '%p')
saliva.horm$ampm <- as.factor(saliva.horm$ampm)
#Reformat times
saliva.horm$start_time <- as.POSIXct(format(saliva.horm$start_time, format = "%H:%M"),
format = "%H:%M")
saliva.horm$stop_time <- as.POSIXct(format(saliva.horm$stop_time, format = "%H:%M"),
format = "%H:%M")
saliva.horm$ln2_time <- as.POSIXct(format(saliva.horm$ln2_time, format = "%H:%M"),
format = "%H:%M")
#Calculate time lag from sunrise/sunset
# Data on sunrise/sunset from esrl.noaa.gov
# June 21 2020: 6:40 18:42 @ Talek Gate
# Dec 21 2020: 6:31 18:43
# June 21 2020: 6:41 18:43 @ South Mara Bridge
# Dec 21 2020: 6:32 18:45
#Take average time of sunrise/sunset
# Final time: 6:36 18:43
#Add time lag data
saliva.horm$time_lag <- NA
for(i in 1:nrow(saliva.horm)){
if(saliva.horm$ampm[i] == "AM"){
saliva.horm$time_lag[i] <- as.numeric(difftime(saliva.horm$start_time[i],
as.POSIXct("06:36", format = "%H:%M"),
units = "mins"))
}
if(saliva.horm$ampm[i] == "PM"){
saliva.horm$time_lag[i] <- as.numeric(difftime(saliva.horm$start_time[i],
as.POSIXct("18:43", format = "%H:%M"),
units = "mins"))
}
}
#Calculate time differences
saliva.horm$chew_time <- as.numeric(difftime(saliva.horm$stop_time, saliva.horm$start_time,
units = "mins")) #time chewed on rope in mins
saliva.horm$chew_time <- ifelse(saliva.horm$chew_time == 0, 1, saliva.horm$chew_time)
saliva.horm$cortisol_assay_diff <- (as.numeric(saliva.horm$cortisol_assay_date -
saliva.horm$date)/365)*12 #time collection to assay in months
saliva.horm$ln2_diff <- as.numeric(difftime(saliva.horm$ln2_time, saliva.horm$start_time,
units = "mins")) #time collection to freezing in mins
saliva.horm$ln2_diff <- ifelse(saliva.horm$ln2_diff < 0, NA, saliva.horm$ln2_diff)
########## 2.2 Select columns ##########
#Cortisol
saliva.cortisol <- saliva.horm[,c("saliva_sample_id", "repeated", "clan", "hyena_id",
"date", "start_time", "stop_time", "ampm", "time_lag",
"chew_time", "ln2_time", "ln2_diff", "cortisol_ug_dl",
"cortisol_assay_date", "cortisol_assay_diff", "temp_min",
"temp_max", "precip", "prey_density", "age", "sex",
"mat_rank", "number_littermates", "litter_rank",
"litter_status", "litter_id", "weaning_status")]
saliva.cortisol <- filter(saliva.cortisol, !is.na(cortisol_ug_dl))
#Log-transform to achieve normality
qqnorm(saliva.cortisol$cortisol_ug_dl)
shapiro.test(saliva.cortisol$cortisol_ug_dl)
# Shapiro-Wilk normality test
# W = 0.41647, p-value < 2.2e-16
saliva.cortisol$log_cortisol_ug_dl <- log(saliva.cortisol$cortisol_ug_dl)
qqnorm(saliva.cortisol$log_cortisol_ug_dl)
shapiro.test(saliva.cortisol$log_cortisol_ug_dl)
# Shapiro-Wilk normality test
# W = 0.97428, p-value = 8.969e-05
# approximates normal
#Add freeze-thaw cycles to data
saliva.cortisol <- left_join(saliva.cortisol, saliva.freezethaw,
by = c("saliva_sample_id" = "sample_id",
"cortisol_assay_date" = "assay_date"))
summary(saliva.cortisol)
########## 2.3 Final clean of dataset ##########
#Filter to only juveniles
saliva.cortisol <- filter(saliva.cortisol, age < 24) #remove 1
######################################################################
##### 3.0 Save data #####
######################################################################
save(file = "05.saliva_hormones.Rdata", list = c("saliva.cortisol"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{get.vct.table}
\alias{get.vct.table}
\title{Return a data frame for the vct table.}
\usage{
get.vct.table(db)
}
\arguments{
\item{db}{SQLite3 database file path.}
}
\value{
Data frame of vct table.
}
\description{
Return a data frame for the vct table.
}
\examples{
\dontrun{
vct.table <- get.vct.table(db)
}
}
| /man/get.vct.table.Rd | no_license | strategist922/popcycle | R | false | true | 401 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{get.vct.table}
\alias{get.vct.table}
\title{Return a data frame for the vct table.}
\usage{
get.vct.table(db)
}
\arguments{
\item{db}{SQLite3 database file path.}
}
\value{
Data frame of vct table.
}
\description{
Return a data frame for the vct table.
}
\examples{
\dontrun{
vct.table <- get.vct.table(db)
}
}
|
#This script makes a plot for the first part of assignment for exploratory data analysis ob coursera
data <- read.csv2 ("household_power_consumption.txt", , stringsAsFactor=F)
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data$Global_active_power <- as.numeric(data$Global_active_power)
#subsetting to the data needed
library(plyr)
curr <- subset(data, data$Date >= "2007-02-01" & data$Date <= "2007-02-02")
#getting the historgram
png("plot1.png")
hist(curr$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off()
| /plot1.R | no_license | taonick/ExData_Plotting1 | R | false | false | 573 | r | #This script makes a plot for the first part of assignment for exploratory data analysis ob coursera
data <- read.csv2 ("household_power_consumption.txt", , stringsAsFactor=F)
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data$Global_active_power <- as.numeric(data$Global_active_power)
#subsetting to the data needed
library(plyr)
curr <- subset(data, data$Date >= "2007-02-01" & data$Date <= "2007-02-02")
#getting the historgram
png("plot1.png")
hist(curr$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off()
|
### Load libraries ###
#################################################################################################################
library(tidyverse)
library(data.table)
library(readxl)
library(DESeq2)
library(GO.db)
library(topGO)
### Load data ###
#################################################################################################################
library_info <- fread("./data/Library_Codes.csv") %>%
mutate_at(vars(-Library_Name), factor)
b73_counts <- fread("./data/Gene_Count_Matrix_with_B73_V4_Genome.csv") %>%
dplyr::select(GeneID, (filter(library_info, Genotype == "B73"))$Library_Name)
mo17_counts <- fread("./data/Gene_Count_Matrix_with_CAU_Mo17_Genome.csv") %>%
dplyr::select(GeneID, (filter(library_info, Genotype == "Mo17"))$Library_Name)
go_annotation_descriptions <- fread("./data/GO_Annotations.csv")
syntenic_gene_pairs <- fread("./data/V4_B73_vs_CAU_Mo17_Syntenic_Orthologs.csv") %>%
distinct(B73_V4_GeneID, .keep_all = TRUE) %>%
distinct(Mo17_CAU_GeneID, .keep_all = TRUE) %>%
arrange(B73_V4_GeneID) %>%
mutate(Syntenic_Pair = paste("Gene Pair", seq(1:nrow(.))))
### Define functions ###
#################################################################################################################
# Differential expression analysis
DiffExp_func <- function(count_data, genotypes = c("B73", "Mo17"), sections = c("A", "B", "C", "D", "E"), silk_locations = c("Emerged", "Husk-Encased"), years = c("2014", "2015"),
harvest_date = c("7/30/14", "7/30/15", "8/2/15", "7/31/15", "8/4/15"), design, contrast, read_depth_cutoff = 5, prop_libs_with_reads = 0.25, FDR = 0.05, FC = 2){
sample_info <- filter(library_info, UQ(as.name(contrast[1])) %in% contrast[2:3], Genotype %in% genotypes, Section %in% sections, Year %in% years, Location_Of_Silk %in% silk_locations,
Harvest_Date %in% Harvest_Date) %>%
arrange(UQ(as.name(contrast[1]))) %>% column_to_rownames(var = "Library_Name")
count_data <- dplyr::select(count_data, GeneID, rownames(sample_info)) %>% arrange(GeneID)
max_low_expressed_libraries <- (1 - prop_libs_with_reads) * (ncol(count_data) -1)
disqualified_genes <- count_data[rowSums(count_data < read_depth_cutoff) > max_low_expressed_libraries, ]$GeneID
count_data <- count_data %>% column_to_rownames(var = "GeneID")
ddsDF <- DESeqDataSetFromMatrix(countData = count_data, colData = sample_info, design = design)
ddsDF <- DESeq(ddsDF)
`%notIN%` <- Negate(`%in%`)
res <- as.data.frame(results(ddsDF, contrast = contrast, alpha = FDR)) %>% rownames_to_column(var = "GeneID") %>% filter(padj <= FDR, GeneID %notIN% disqualified_genes)
res <- bind_rows(filter(res, log2FoldChange >= log2(FC)), filter(res, log2FoldChange <= -log2(FC))) %>%
dplyr::select(GeneID:log2FoldChange, padj) %>%
mutate(Expression_Change = ifelse(log2FoldChange > 0, "Expression Decreases", "Expression Increases"))
return(res)
}
# Gene enrichment of DE gene sets with TopGO
GO_enrichment_func <- function(myInterestingGenes, go_file_path, target_ontologies, p_value_cutoff){
geneID2GO <- readMappings(file = go_file_path, sep = "\t", IDsep = ",")
geneNames <- names(geneID2GO)
geneList <- factor(as.integer(geneNames %in% myInterestingGenes))
names(geneList) <- geneNames
ontologies <- target_ontologies
for(j in seq_along(ontologies)){
go_data <- new(Class = "topGOdata", ontology = ontologies[j], allGenes = geneList,
nodeSize = 10, annot = annFUN.gene2GO, gene2GO = geneID2GO)
Fisher_Test <- runTest(go_data, algorithm = "weight", statistic = "fisher")
temp_go_results <- GenTable(go_data, P_Value = Fisher_Test, topNodes = length(Fisher_Test@score[Fisher_Test@score <= p_value_cutoff]))
colnames(temp_go_results) <- c("GO", "Term", "Annotated", "Significant", "Expected", "P_Value")
go_results <- mutate(temp_go_results, P_Value = as.numeric(sub('<', '', P_Value)),
Ontology_Type = as.character(ontologies[j]))
}
return(go_results)
}
### Compute Stats ###
#################################################################################################################
# Differential expression analysis across silk section transitions
sections <- c("A", "B", "C", "D", "E")
degs_across_sections <- ""
'%ni%' <- Negate('%in%')
for(i in 1:(length(sections) - 1)){
b73_temp_df <- DiffExp_func(count_data = b73_counts, genotypes = "B73", design = ~ Section + Year, contrast = c("Section", sections[i], sections[i + 1])) %>%
mutate(Transition = paste(sections[i], "to", sections[i + 1], sep = " "))
mo17_temp_df <- DiffExp_func(count_data = mo17_counts, genotypes = "Mo17", design = ~ Section + Year, contrast = c("Section", sections[i], sections[i + 1])) %>%
mutate(Transition = paste(sections[i], "to", sections[i + 1], sep = " "))
b73_df <- left_join(dplyr::rename(b73_temp_df, "B73_V4_GeneID" = GeneID), syntenic_gene_pairs, by = "B73_V4_GeneID") %>%
mutate(Gene_Type = ifelse(Mo17_CAU_GeneID %in% mo17_temp_df$GeneID, "Commonly Differentially Expressed Syntelog",
ifelse(Mo17_CAU_GeneID %ni% mo17_temp_df$GeneID & Mo17_CAU_GeneID %ni% syntenic_gene_pairs$Mo17_CAU_GeneID, "Non-syntenic", "Syntenic, genotype specific differential expression"))) %>%
dplyr::select(Syntenic_Pair, B73_V4_GeneID, Mo17_CAU_GeneID, Transition, Expression_Change, Gene_Type)
mo17_df <- left_join(dplyr::rename(mo17_temp_df, "Mo17_CAU_GeneID" = GeneID), syntenic_gene_pairs, by = "Mo17_CAU_GeneID") %>%
mutate(Gene_Type = ifelse(B73_V4_GeneID %in% b73_temp_df$GeneID, "Commonly Differentially Expressed Syntelog",
ifelse(B73_V4_GeneID %ni% b73_temp_df$GeneID & B73_V4_GeneID %ni% syntenic_gene_pairs$B73_V4_GeneID, "Non-syntenic", "Syntenic, genotype specific differential expression"))) %>%
dplyr::select(Syntenic_Pair, B73_V4_GeneID, Mo17_CAU_GeneID, Transition, Expression_Change, Gene_Type)
syntenic_compare <- full_join(filter(b73_df, Gene_Type == "Commonly Differentially Expressed Syntelog") %>% dplyr::rename("B73_Expression_Change" = Expression_Change),
filter(mo17_df, Gene_Type == "Commonly Differentially Expressed Syntelog") %>% dplyr::rename("Mo17_Expression_Change" = Expression_Change),
by = c("Syntenic_Pair", "B73_V4_GeneID", "Mo17_CAU_GeneID", "Gene_Type", "Transition")) %>%
mutate(Gene_Type = ifelse(B73_Expression_Change == Mo17_Expression_Change, "Syntenic, concordant differential expression", "Syntenic, discordant differential expression"))
b73_df <- bind_rows((dplyr::rename(b73_df, "GeneID" = B73_V4_GeneID) %>% mutate(Genotype = "B73") %>% dplyr::select(GeneID, Genotype, Transition, Expression_Change, Gene_Type) %>% filter(Gene_Type != "Commonly Differentially Expressed Syntelog")),
(mutate(syntenic_compare, Genotype = "B73") %>% dplyr::rename("GeneID" = B73_V4_GeneID, "Expression_Change" = B73_Expression_Change) %>% dplyr::select(GeneID, Genotype, Transition, Expression_Change, Gene_Type)))
mo17_df <- bind_rows((dplyr::rename(mo17_df, "GeneID" = Mo17_CAU_GeneID) %>% mutate(Genotype = "Mo17") %>% dplyr::select(GeneID, Genotype, Transition, Expression_Change, Gene_Type) %>% filter(Gene_Type != "Commonly Differentially Expressed Syntelog")),
(mutate(syntenic_compare, Genotype = "Mo17") %>% dplyr::rename("GeneID" = Mo17_CAU_GeneID, "Expression_Change" = Mo17_Expression_Change) %>% dplyr::select(GeneID, Genotype, Transition, Expression_Change, Gene_Type)))
degs_across_sections <- rbind(degs_across_sections, b73_df, mo17_df)
}
degs_across_sections_summary <- degs_across_sections[-1, ] %>% group_by(Genotype, Transition, Expression_Change, Gene_Type) %>% summarise(Gene_Count = n()) %>%
ungroup() %>% tidyr::complete(Genotype, Transition, Gene_Type, Expression_Change, fill = list(Gene_Count = 0)) %>%
mutate(Gene_Count = ifelse(Expression_Change == "Expression Increases", Gene_Count, -1*Gene_Count))
# GO enrichment of syntenic C vs D DEGs with TopGO - biological processes; cellular component; molecular function
c_to_d_go_results <- tibble(GO = as.character(), Term = as.character(),
Annotated = as.integer(), Significant = as.integer(),
Expected = as.numeric(), P_Value = as.numeric(),
Ontology_Type = as.character())
domains <- c("BP", "CC", "MF")
for(i in seq_along(domains)){
c_to_d_go_results_temp <- GO_enrichment_func(myInterestingGenes = filter(degs_across_sections, Genotype == "B73", Transition == "C to D", Gene_Type == "Syntenic, concordant differential expression")$GeneID,
go_file_path = "./data/V4_B73_maize_GAMER_GOs.txt",
target_ontologies = domains[i], p_value_cutoff = 0.001)
c_to_d_go_results_temp <- dplyr::select(c_to_d_go_results_temp, -Term) %>% left_join(., go_annotation_descriptions, by = "GO")
c_to_d_go_results <- bind_rows(c_to_d_go_results, c_to_d_go_results_temp)
}
c_to_d_go_results <- c_to_d_go_results %>% group_by(Ontology_Type) %>% top_n(-20, P_Value) %>%
mutate(Term = ifelse(Term == "oxidoreductase activity, acting on paired donors, with incorporation or reduction of molecular oxygen",
"oxidoreductase activity, acting on paired donors", Term))
### Graph ###
#################################################################################################################
# Differential expression across section transitions
ggplot(filter(degs_across_sections_summary, Gene_Type != "Syntenic, discordant differential expression"), aes(x = Transition, y = Gene_Count, fill = reorder(Gene_Type, Gene_Count))) +
guides(fill = FALSE) +
geom_bar(stat = "identity", color = "black", size = 0.75, width = 0.75) +
facet_grid(. ~ Genotype) +
scale_fill_brewer(palette = "Greys") +
geom_hline(yintercept = 0, size = 1.25, color = "black") +
scale_y_continuous(limits = c(-500, 1750), breaks = c(-500, 0, 500, 1000, 1500),
labels = c("500", "0", "500", "1000", "1500")) +
theme_bw() +
theme(plot.title = element_blank(),
axis.title = element_text(size = 40),
axis.text = element_text(size = 40),
panel.border = element_rect(size = 5),
legend.text = element_text(size = 16),
legend.background = element_rect(color = "black", size = 1),
legend.position = "right",
strip.background = element_rect(fill = "white", size = 2, color = "black"),
strip.text = element_text(size = 64, face = "bold"),
panel.spacing = unit(0.05, "lines")) +
labs(y = "# of Differentially\nExpressed Genes", fill = NULL)
# GO enrichment syntenic C vs D DEGs with TopGO
ggplot(c_to_d_go_results, aes(x = reorder(Term, Significant/Annotated), y = 100*Significant/Annotated, size = Annotated, color = -log10(P_Value))) +
geom_point() + coord_flip() + facet_grid(Ontology_Type ~ ., scales = "free", space = "free_y") +
scale_color_gradient(low = "blue", high = "red", breaks = c(4, 12, 20, 28), labels = c("4", "12", "20", "28")) +
scale_size_continuous(breaks = c(100, 1000, 10000), range = c(2, 15)) +
scale_y_continuous(limits = c(0, 31), breaks = c(0, 10, 20, 30), labels = c("0", "10", "20", "30")) +
theme_bw() +
theme(axis.text = element_text(size = 20),
axis.title = element_text(size = 24),
legend.text = element_text(size = 16),
legend.title = element_text(size = 16),
legend.position = c(0.65, 0.7),
legend.background = element_rect(size = 0.5, color = "black"),
panel.border = element_rect(size = 3),
strip.background = element_rect(fill = "white", size = 2, color = "black"),
strip.text = element_text(size = 40, face = "bold"),
plot.margin = unit(c(1,1,1,1.25), "cm")) +
labs(x = NULL,
y = "% of Annotated Genes\nFound Significant",
size = "Annotated Genes\nper GO Term",
color = "-log10(p-value)")
| /scripts/figure_02.R | no_license | rak16128/maize_silk_spatio_temporal_gene_expression | R | false | false | 12,163 | r | ### Load libraries ###
#################################################################################################################
library(tidyverse)
library(data.table)
library(readxl)
library(DESeq2)
library(GO.db)
library(topGO)
### Load data ###
#################################################################################################################
library_info <- fread("./data/Library_Codes.csv") %>%
mutate_at(vars(-Library_Name), factor)
b73_counts <- fread("./data/Gene_Count_Matrix_with_B73_V4_Genome.csv") %>%
dplyr::select(GeneID, (filter(library_info, Genotype == "B73"))$Library_Name)
mo17_counts <- fread("./data/Gene_Count_Matrix_with_CAU_Mo17_Genome.csv") %>%
dplyr::select(GeneID, (filter(library_info, Genotype == "Mo17"))$Library_Name)
go_annotation_descriptions <- fread("./data/GO_Annotations.csv")
syntenic_gene_pairs <- fread("./data/V4_B73_vs_CAU_Mo17_Syntenic_Orthologs.csv") %>%
distinct(B73_V4_GeneID, .keep_all = TRUE) %>%
distinct(Mo17_CAU_GeneID, .keep_all = TRUE) %>%
arrange(B73_V4_GeneID) %>%
mutate(Syntenic_Pair = paste("Gene Pair", seq(1:nrow(.))))
### Define functions ###
#################################################################################################################
# Differential expression analysis
DiffExp_func <- function(count_data, genotypes = c("B73", "Mo17"), sections = c("A", "B", "C", "D", "E"), silk_locations = c("Emerged", "Husk-Encased"), years = c("2014", "2015"),
harvest_date = c("7/30/14", "7/30/15", "8/2/15", "7/31/15", "8/4/15"), design, contrast, read_depth_cutoff = 5, prop_libs_with_reads = 0.25, FDR = 0.05, FC = 2){
sample_info <- filter(library_info, UQ(as.name(contrast[1])) %in% contrast[2:3], Genotype %in% genotypes, Section %in% sections, Year %in% years, Location_Of_Silk %in% silk_locations,
Harvest_Date %in% Harvest_Date) %>%
arrange(UQ(as.name(contrast[1]))) %>% column_to_rownames(var = "Library_Name")
count_data <- dplyr::select(count_data, GeneID, rownames(sample_info)) %>% arrange(GeneID)
max_low_expressed_libraries <- (1 - prop_libs_with_reads) * (ncol(count_data) -1)
disqualified_genes <- count_data[rowSums(count_data < read_depth_cutoff) > max_low_expressed_libraries, ]$GeneID
count_data <- count_data %>% column_to_rownames(var = "GeneID")
ddsDF <- DESeqDataSetFromMatrix(countData = count_data, colData = sample_info, design = design)
ddsDF <- DESeq(ddsDF)
`%notIN%` <- Negate(`%in%`)
res <- as.data.frame(results(ddsDF, contrast = contrast, alpha = FDR)) %>% rownames_to_column(var = "GeneID") %>% filter(padj <= FDR, GeneID %notIN% disqualified_genes)
res <- bind_rows(filter(res, log2FoldChange >= log2(FC)), filter(res, log2FoldChange <= -log2(FC))) %>%
dplyr::select(GeneID:log2FoldChange, padj) %>%
mutate(Expression_Change = ifelse(log2FoldChange > 0, "Expression Decreases", "Expression Increases"))
return(res)
}
# Gene enrichment of DE gene sets with TopGO
GO_enrichment_func <- function(myInterestingGenes, go_file_path, target_ontologies, p_value_cutoff){
geneID2GO <- readMappings(file = go_file_path, sep = "\t", IDsep = ",")
geneNames <- names(geneID2GO)
geneList <- factor(as.integer(geneNames %in% myInterestingGenes))
names(geneList) <- geneNames
ontologies <- target_ontologies
for(j in seq_along(ontologies)){
go_data <- new(Class = "topGOdata", ontology = ontologies[j], allGenes = geneList,
nodeSize = 10, annot = annFUN.gene2GO, gene2GO = geneID2GO)
Fisher_Test <- runTest(go_data, algorithm = "weight", statistic = "fisher")
temp_go_results <- GenTable(go_data, P_Value = Fisher_Test, topNodes = length(Fisher_Test@score[Fisher_Test@score <= p_value_cutoff]))
colnames(temp_go_results) <- c("GO", "Term", "Annotated", "Significant", "Expected", "P_Value")
go_results <- mutate(temp_go_results, P_Value = as.numeric(sub('<', '', P_Value)),
Ontology_Type = as.character(ontologies[j]))
}
return(go_results)
}
### Compute Stats ###
#################################################################################################################
# Differential expression analysis across silk section transitions
sections <- c("A", "B", "C", "D", "E")
degs_across_sections <- ""
'%ni%' <- Negate('%in%')
for(i in 1:(length(sections) - 1)){
b73_temp_df <- DiffExp_func(count_data = b73_counts, genotypes = "B73", design = ~ Section + Year, contrast = c("Section", sections[i], sections[i + 1])) %>%
mutate(Transition = paste(sections[i], "to", sections[i + 1], sep = " "))
mo17_temp_df <- DiffExp_func(count_data = mo17_counts, genotypes = "Mo17", design = ~ Section + Year, contrast = c("Section", sections[i], sections[i + 1])) %>%
mutate(Transition = paste(sections[i], "to", sections[i + 1], sep = " "))
b73_df <- left_join(dplyr::rename(b73_temp_df, "B73_V4_GeneID" = GeneID), syntenic_gene_pairs, by = "B73_V4_GeneID") %>%
mutate(Gene_Type = ifelse(Mo17_CAU_GeneID %in% mo17_temp_df$GeneID, "Commonly Differentially Expressed Syntelog",
ifelse(Mo17_CAU_GeneID %ni% mo17_temp_df$GeneID & Mo17_CAU_GeneID %ni% syntenic_gene_pairs$Mo17_CAU_GeneID, "Non-syntenic", "Syntenic, genotype specific differential expression"))) %>%
dplyr::select(Syntenic_Pair, B73_V4_GeneID, Mo17_CAU_GeneID, Transition, Expression_Change, Gene_Type)
mo17_df <- left_join(dplyr::rename(mo17_temp_df, "Mo17_CAU_GeneID" = GeneID), syntenic_gene_pairs, by = "Mo17_CAU_GeneID") %>%
mutate(Gene_Type = ifelse(B73_V4_GeneID %in% b73_temp_df$GeneID, "Commonly Differentially Expressed Syntelog",
ifelse(B73_V4_GeneID %ni% b73_temp_df$GeneID & B73_V4_GeneID %ni% syntenic_gene_pairs$B73_V4_GeneID, "Non-syntenic", "Syntenic, genotype specific differential expression"))) %>%
dplyr::select(Syntenic_Pair, B73_V4_GeneID, Mo17_CAU_GeneID, Transition, Expression_Change, Gene_Type)
syntenic_compare <- full_join(filter(b73_df, Gene_Type == "Commonly Differentially Expressed Syntelog") %>% dplyr::rename("B73_Expression_Change" = Expression_Change),
filter(mo17_df, Gene_Type == "Commonly Differentially Expressed Syntelog") %>% dplyr::rename("Mo17_Expression_Change" = Expression_Change),
by = c("Syntenic_Pair", "B73_V4_GeneID", "Mo17_CAU_GeneID", "Gene_Type", "Transition")) %>%
mutate(Gene_Type = ifelse(B73_Expression_Change == Mo17_Expression_Change, "Syntenic, concordant differential expression", "Syntenic, discordant differential expression"))
b73_df <- bind_rows((dplyr::rename(b73_df, "GeneID" = B73_V4_GeneID) %>% mutate(Genotype = "B73") %>% dplyr::select(GeneID, Genotype, Transition, Expression_Change, Gene_Type) %>% filter(Gene_Type != "Commonly Differentially Expressed Syntelog")),
(mutate(syntenic_compare, Genotype = "B73") %>% dplyr::rename("GeneID" = B73_V4_GeneID, "Expression_Change" = B73_Expression_Change) %>% dplyr::select(GeneID, Genotype, Transition, Expression_Change, Gene_Type)))
mo17_df <- bind_rows((dplyr::rename(mo17_df, "GeneID" = Mo17_CAU_GeneID) %>% mutate(Genotype = "Mo17") %>% dplyr::select(GeneID, Genotype, Transition, Expression_Change, Gene_Type) %>% filter(Gene_Type != "Commonly Differentially Expressed Syntelog")),
(mutate(syntenic_compare, Genotype = "Mo17") %>% dplyr::rename("GeneID" = Mo17_CAU_GeneID, "Expression_Change" = Mo17_Expression_Change) %>% dplyr::select(GeneID, Genotype, Transition, Expression_Change, Gene_Type)))
degs_across_sections <- rbind(degs_across_sections, b73_df, mo17_df)
}
degs_across_sections_summary <- degs_across_sections[-1, ] %>% group_by(Genotype, Transition, Expression_Change, Gene_Type) %>% summarise(Gene_Count = n()) %>%
ungroup() %>% tidyr::complete(Genotype, Transition, Gene_Type, Expression_Change, fill = list(Gene_Count = 0)) %>%
mutate(Gene_Count = ifelse(Expression_Change == "Expression Increases", Gene_Count, -1*Gene_Count))
# GO enrichment of syntenic C vs D DEGs with TopGO - biological processes; cellular component; molecular function
c_to_d_go_results <- tibble(GO = as.character(), Term = as.character(),
Annotated = as.integer(), Significant = as.integer(),
Expected = as.numeric(), P_Value = as.numeric(),
Ontology_Type = as.character())
domains <- c("BP", "CC", "MF")
for(i in seq_along(domains)){
c_to_d_go_results_temp <- GO_enrichment_func(myInterestingGenes = filter(degs_across_sections, Genotype == "B73", Transition == "C to D", Gene_Type == "Syntenic, concordant differential expression")$GeneID,
go_file_path = "./data/V4_B73_maize_GAMER_GOs.txt",
target_ontologies = domains[i], p_value_cutoff = 0.001)
c_to_d_go_results_temp <- dplyr::select(c_to_d_go_results_temp, -Term) %>% left_join(., go_annotation_descriptions, by = "GO")
c_to_d_go_results <- bind_rows(c_to_d_go_results, c_to_d_go_results_temp)
}
c_to_d_go_results <- c_to_d_go_results %>% group_by(Ontology_Type) %>% top_n(-20, P_Value) %>%
mutate(Term = ifelse(Term == "oxidoreductase activity, acting on paired donors, with incorporation or reduction of molecular oxygen",
"oxidoreductase activity, acting on paired donors", Term))
### Graph ###
#################################################################################################################
# Differential expression across section transitions
ggplot(filter(degs_across_sections_summary, Gene_Type != "Syntenic, discordant differential expression"), aes(x = Transition, y = Gene_Count, fill = reorder(Gene_Type, Gene_Count))) +
guides(fill = FALSE) +
geom_bar(stat = "identity", color = "black", size = 0.75, width = 0.75) +
facet_grid(. ~ Genotype) +
scale_fill_brewer(palette = "Greys") +
geom_hline(yintercept = 0, size = 1.25, color = "black") +
scale_y_continuous(limits = c(-500, 1750), breaks = c(-500, 0, 500, 1000, 1500),
labels = c("500", "0", "500", "1000", "1500")) +
theme_bw() +
theme(plot.title = element_blank(),
axis.title = element_text(size = 40),
axis.text = element_text(size = 40),
panel.border = element_rect(size = 5),
legend.text = element_text(size = 16),
legend.background = element_rect(color = "black", size = 1),
legend.position = "right",
strip.background = element_rect(fill = "white", size = 2, color = "black"),
strip.text = element_text(size = 64, face = "bold"),
panel.spacing = unit(0.05, "lines")) +
labs(y = "# of Differentially\nExpressed Genes", fill = NULL)
# GO enrichment syntenic C vs D DEGs with TopGO
ggplot(c_to_d_go_results, aes(x = reorder(Term, Significant/Annotated), y = 100*Significant/Annotated, size = Annotated, color = -log10(P_Value))) +
geom_point() + coord_flip() + facet_grid(Ontology_Type ~ ., scales = "free", space = "free_y") +
scale_color_gradient(low = "blue", high = "red", breaks = c(4, 12, 20, 28), labels = c("4", "12", "20", "28")) +
scale_size_continuous(breaks = c(100, 1000, 10000), range = c(2, 15)) +
scale_y_continuous(limits = c(0, 31), breaks = c(0, 10, 20, 30), labels = c("0", "10", "20", "30")) +
theme_bw() +
theme(axis.text = element_text(size = 20),
axis.title = element_text(size = 24),
legend.text = element_text(size = 16),
legend.title = element_text(size = 16),
legend.position = c(0.65, 0.7),
legend.background = element_rect(size = 0.5, color = "black"),
panel.border = element_rect(size = 3),
strip.background = element_rect(fill = "white", size = 2, color = "black"),
strip.text = element_text(size = 40, face = "bold"),
plot.margin = unit(c(1,1,1,1.25), "cm")) +
labs(x = NULL,
y = "% of Annotated Genes\nFound Significant",
size = "Annotated Genes\nper GO Term",
color = "-log10(p-value)")
|
#this snippet useis to make TSNE and PCA plots for initial cell types
#
#it requires to run first seuratNorm.r, which among others sets the appropriate experimentType ( allCells, WT, WT_sox10)
#in ipmc@project.name
makePCAInitTypesPlots <- function(ipmc, comps){
source("R/getClusterTypes.r")
source("R/calcTSNE_PCASpace.r")
source("R/setCellTypeColors.r")
source("R/setClusterColors.r")
source("R/plotInitCellTypePCAs.r")
source("R/calcUMAP_PCASpace.r")
resDir <- file.path(getwd(), "Res")
cat(resDir)
plotDir <- file.path(resDir, "Plots")
dir.create(plotDir, showWarnings = FALSE)
experimentTypeDir <- file.path(plotDir, ipmc@project.name)
dir.create( experimentTypeDir, showWarnings = FALSE)
pcaPlotDir <- file.path( experimentTypeDir, "PCAdimReduction")
dir.create( pcaPlotDir, showWarnings = FALSE)
#comps <- 17
compsCorr <- if (comps == 45) comps-1 else comps
compsDir <- file.path( pcaPlotDir, paste0("comps", comps))
dir.create(compsDir, showWarnings = FALSE)
ipmc <- SetAllIdent( ipmc, id = "originalCellTypes")
ipmc <- RunPCA(ipmc, pc.genes = rownames( ipmc@data), pcs.compute = compsCorr, do.print = FALSE, fastpath = FALSE)
#Tree of initial cell types with PCA dimension reduction but without clustering
ipmc <- BuildClusterTree( ipmc, pcs.use = 1:compsCorr, do.plot = FALSE, do.reorder = FALSE)
png( file.path( compsDir, "InitCellTypePCATree.png"))
PlotClusterTree( ipmc)
dev.off()
#Now calculate TSNE for initial cell types with dimension reduction
TSNESeed <- as.numeric(as.POSIXct(Sys.time()))
cat( file = file.path( compsDir, "TSNESeed.txt"), TSNESeed, "\n")
ipmc <- calcTSNE_PCASpace( ipmc, compsCorr, TSNESeed)
png( file.path( compsDir, "tSNE_PCA_initialCellTypes.png"))
TSNEPlot( ipmc, colors.use = setCellTypeColors( ipmc))
dev.off()
UMAPSeed <- as.numeric(as.POSIXct(Sys.time()))
cat( file = file.path( compsDir, "UMAPSeed.txt"), UMAPSeed, "\n")
ipmc <- calcUMAP_PCASpace( ipmc, compsCorr, TSNESeed, 2)
png( file.path( compsDir, "UMAP_PCA_initialCellTypes.png"))
DimPlot( ipmc, reduction.use = "umap", cols.use = setCellTypeColors( ipmc))
dev.off()
#remove values, that are too close to zero to make zero-expressed genes for DotPlot; we need to keep initial ipmc for good
noiseTol <- log2(19)
ipmcDenoised <- ipmc #for not spoiling initial clustering data
ipmcDenoised@data <- apply( ipmcDenoised@data, c(1,2), function(x) if(x>noiseTol) x else 0)
png( file.path( compsDir, paste0("DotPlotInitTypesPCASpace_c", comps, "_HR.png")), width = 1536, height = 2048)
DotPlot(ipmcDenoised, genes.plot = rownames(ipmcDenoised@data), x.lab.rot = TRUE, dot.scale = 5, plot.legend = TRUE, dot.min = 0, scale.by = "radius")
dev.off()
#source("R/makePCAClusteringPlots.r")
#for (clResolution in seq(0.8)) {makePCAClusteringPlots( ipmc, clResolution)}
return(ipmc)
}
| /R_work/makePCAInitTypesPlots.r | no_license | SevaVigg/scNCCnano | R | false | false | 2,815 | r | #this snippet useis to make TSNE and PCA plots for initial cell types
#
#it requires to run first seuratNorm.r, which among others sets the appropriate experimentType ( allCells, WT, WT_sox10)
#in ipmc@project.name
makePCAInitTypesPlots <- function(ipmc, comps){
source("R/getClusterTypes.r")
source("R/calcTSNE_PCASpace.r")
source("R/setCellTypeColors.r")
source("R/setClusterColors.r")
source("R/plotInitCellTypePCAs.r")
source("R/calcUMAP_PCASpace.r")
resDir <- file.path(getwd(), "Res")
cat(resDir)
plotDir <- file.path(resDir, "Plots")
dir.create(plotDir, showWarnings = FALSE)
experimentTypeDir <- file.path(plotDir, ipmc@project.name)
dir.create( experimentTypeDir, showWarnings = FALSE)
pcaPlotDir <- file.path( experimentTypeDir, "PCAdimReduction")
dir.create( pcaPlotDir, showWarnings = FALSE)
#comps <- 17
compsCorr <- if (comps == 45) comps-1 else comps
compsDir <- file.path( pcaPlotDir, paste0("comps", comps))
dir.create(compsDir, showWarnings = FALSE)
ipmc <- SetAllIdent( ipmc, id = "originalCellTypes")
ipmc <- RunPCA(ipmc, pc.genes = rownames( ipmc@data), pcs.compute = compsCorr, do.print = FALSE, fastpath = FALSE)
#Tree of initial cell types with PCA dimension reduction but without clustering
ipmc <- BuildClusterTree( ipmc, pcs.use = 1:compsCorr, do.plot = FALSE, do.reorder = FALSE)
png( file.path( compsDir, "InitCellTypePCATree.png"))
PlotClusterTree( ipmc)
dev.off()
#Now calculate TSNE for initial cell types with dimension reduction
TSNESeed <- as.numeric(as.POSIXct(Sys.time()))
cat( file = file.path( compsDir, "TSNESeed.txt"), TSNESeed, "\n")
ipmc <- calcTSNE_PCASpace( ipmc, compsCorr, TSNESeed)
png( file.path( compsDir, "tSNE_PCA_initialCellTypes.png"))
TSNEPlot( ipmc, colors.use = setCellTypeColors( ipmc))
dev.off()
UMAPSeed <- as.numeric(as.POSIXct(Sys.time()))
cat( file = file.path( compsDir, "UMAPSeed.txt"), UMAPSeed, "\n")
ipmc <- calcUMAP_PCASpace( ipmc, compsCorr, TSNESeed, 2)
png( file.path( compsDir, "UMAP_PCA_initialCellTypes.png"))
DimPlot( ipmc, reduction.use = "umap", cols.use = setCellTypeColors( ipmc))
dev.off()
#remove values, that are too close to zero to make zero-expressed genes for DotPlot; we need to keep initial ipmc for good
noiseTol <- log2(19)
ipmcDenoised <- ipmc #for not spoiling initial clustering data
ipmcDenoised@data <- apply( ipmcDenoised@data, c(1,2), function(x) if(x>noiseTol) x else 0)
png( file.path( compsDir, paste0("DotPlotInitTypesPCASpace_c", comps, "_HR.png")), width = 1536, height = 2048)
DotPlot(ipmcDenoised, genes.plot = rownames(ipmcDenoised@data), x.lab.rot = TRUE, dot.scale = 5, plot.legend = TRUE, dot.min = 0, scale.by = "radius")
dev.off()
#source("R/makePCAClusteringPlots.r")
#for (clResolution in seq(0.8)) {makePCAClusteringPlots( ipmc, clResolution)}
return(ipmc)
}
|
# import libraries
library(tidyverse)
library(grid)
library(gridExtra)
library(reshape2)
rm(list = ls())
##########################################################################
# enter parameters
frame_pre <- 10; # final frame before ligand addition
time_ligand <- 9; # ligand added after this time point
##########################################################################
# gather files
setwd("/Users/payamfarahani/Documents/Python_R/RTK BIOSENSORS/CD3 ITAM Validation/2022_06_21_CD3_validation");
ZtSH2_names <- list.files(pattern="*2.csv");
ZtSH2_list <- lapply(ZtSH2_names, read.csv);
ZtSH2_names <- t(str_replace(ZtSH2_names,"_ZtSH2.csv",""));
# create lists for dataframes
list_raw <- list();
list_abs_decrease <- list();
list_rate_of_change <- list();
list_half_life <- list();
list <- list();
##########################################################################
# data analysis
for (i in seq_along(ZtSH2_names)) {
# raw values
matrix <- ZtSH2_list[[i]];
colnames(matrix) <- str_replace_all(colnames(matrix), "cell.", "");
# melt raw ZtSH2 values matrix and classify observations by condition & date
matrix_melt <- melt(matrix,id = c("time"));
colnames(matrix_melt) <- c("time", "cell", "ZtSH2_raw");
matrix_melt <- mutate(matrix_melt,
condition = str_replace(str_extract(ZtSH2_names[[i]],"[:alnum:]*_"),"_",""),
date = str_extract(ZtSH2_names[[i]],"[:digit:]{8}"));
condition <- data.frame(matrix(ncol = 1, nrow = nrow(matrix_melt)));
colnames(condition) <- c("condition");
condition$condition <- str_replace(str_extract(ZtSH2_names[[i]],"[:alnum:]*_"),"_","");
date <- data.frame(matrix(ncol = 1, nrow = nrow(matrix_melt)));
colnames(date) <- c("date");
date$date <- str_extract(ZtSH2_names[[i]],"[:digit:]{8}");
# normalize time to point of ligand addition
matrix_melt["time"] <- matrix_melt["time"] - time_ligand;
# incorporate expression levels into data matrix
expression <- data.frame(matrix(ncol = 1, nrow = nrow(matrix_melt)));
colnames(expression) <- c("ZtSH2_expression");
for (j in 1:nrow(matrix_melt)){
expression[j,1] <- filter(matrix_melt, time == 0 & cell == matrix_melt[j,"cell"])[3];
}
matrix_melt <- cbind(matrix_melt, expression);
# calculate ZtSH2 response
matrix_melt <- matrix_melt %>%
group_by(cell) %>%
mutate(ZtSH2_norm_max = ZtSH2_raw / mean(head(ZtSH2_raw,frame_pre)),
ZtSH2_abs_decrease = (ZtSH2_norm_max - 1) * -100);
list[[i]] <- matrix_melt;
print(str_c(ZtSH2_names[[i]]," done"))
}
output <- rbind_list(list);
##########################################################################
# plot themes
theme_black <- theme_classic() +
theme(legend.position="right",
legend.background = element_rect(fill = 'black', colour = 'black'),
legend.text = element_text(colour="white"),
plot.background = element_rect(fill = 'black', colour = 'black'),
panel.background = element_rect(fill = 'black', colour = 'black'),
axis.text = element_text(colour="white"),
axis.title = element_text(colour="white"),
axis.line = element_line(colour="white"),
text = element_text(family = "Arial")) +
theme(aspect.ratio=1)
theme_white <- theme_classic() +
theme(legend.position="right",
legend.background = element_rect(fill = 'white', colour = 'white'),
legend.text = element_text(colour="black"),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.background = element_rect(fill = 'white', colour = 'white'),
axis.text = element_text(colour="black"),
axis.title = element_text(colour="black"),
axis.line = element_line(colour="black"),
text = element_text(family = "Arial")) +
theme(aspect.ratio=1)
##########################################################################
# mean EGFR activity vs. time (heatmap)
dat <- output %>%
group_by(time,condition) %>%
mutate(mean_ZtSH2_abs_decrease = mean(ZtSH2_abs_decrease))
x = c("epsilon","delta", "gamma", "zeta3", "zeta2","zeta1", "EGFR", "none")
dat$condition <- factor(dat$condition,
levels = x)
p1 <- ggplot() +
geom_tile(data = dat, aes(x = time,
y = condition,
fill = mean_ZtSH2_abs_decrease)) +
scale_fill_viridis_c()
p1 + theme_white
##########################################################################
# boxplot
dat <- filter(output, time == "10" | time == "60") %>%
unite("cond", c(condition,time), sep = "_", remove = FALSE)
# x = c("none_20", "none_60","EGFR_20", "EGFR_60","zeta1_20", "zeta1_60","zeta2_20", "zeta2_60","zeta3_20", "zeta3_60","gamma_20", "gamma_60","delta_20", "delta_60","epsilon_20", "epsilon_60")
x = c("none_10", "none_60","EGFR_10", "EGFR_60","zeta1_10", "zeta1_60","zeta2_10", "zeta2_60","zeta3_10", "zeta3_60","gamma_10", "gamma_60","delta_10", "delta_60","epsilon_10", "epsilon_60")
dat$cond <- factor(dat$cond,
levels = x)
p1 <- ggplot(data = dat,
aes(x = cond,
y = ZtSH2_abs_decrease,
fill = condition)) +
geom_boxplot(colour = "black") +
labs(x = "reporter", y = "RTK activity (t = 1 h)")
p1 + theme_classic() +
theme(legend.position="right",
legend.background = element_rect(fill = 'white', colour = 'white'),
legend.text = element_text(colour="black"),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.background = element_rect(fill = 'white', colour = 'white'),
axis.text = element_text(colour="black"),
axis.title = element_text(colour="black"),
axis.line = element_line(colour="black"),
text = element_text(family = "Arial"))
# run K-S tests
ks.test(filter(output, time == "10" & condition == "none")[["ZtSH2_abs_decrease"]],
filter(output, time == "60" & condition == "epsilon")[["ZtSH2_abs_decrease"]])
# check number of cells analyzed for each independent experiment
num <- output %>%
group_by(condition,date) %>%
filter(time == 0 & cell == max(cell))
| /Fig1_CD3_ITAM_validation/2022_06_21_CD3_validation.R | no_license | toettchlab/Farahani2022 | R | false | false | 6,282 | r | # import libraries
library(tidyverse)
library(grid)
library(gridExtra)
library(reshape2)
rm(list = ls())
##########################################################################
# enter parameters
frame_pre <- 10; # final frame before ligand addition
time_ligand <- 9; # ligand added after this time point
##########################################################################
# gather files
setwd("/Users/payamfarahani/Documents/Python_R/RTK BIOSENSORS/CD3 ITAM Validation/2022_06_21_CD3_validation");
ZtSH2_names <- list.files(pattern="*2.csv");
ZtSH2_list <- lapply(ZtSH2_names, read.csv);
ZtSH2_names <- t(str_replace(ZtSH2_names,"_ZtSH2.csv",""));
# create lists for dataframes
list_raw <- list();
list_abs_decrease <- list();
list_rate_of_change <- list();
list_half_life <- list();
list <- list();
##########################################################################
# data analysis
for (i in seq_along(ZtSH2_names)) {
# raw values
matrix <- ZtSH2_list[[i]];
colnames(matrix) <- str_replace_all(colnames(matrix), "cell.", "");
# melt raw ZtSH2 values matrix and classify observations by condition & date
matrix_melt <- melt(matrix,id = c("time"));
colnames(matrix_melt) <- c("time", "cell", "ZtSH2_raw");
matrix_melt <- mutate(matrix_melt,
condition = str_replace(str_extract(ZtSH2_names[[i]],"[:alnum:]*_"),"_",""),
date = str_extract(ZtSH2_names[[i]],"[:digit:]{8}"));
condition <- data.frame(matrix(ncol = 1, nrow = nrow(matrix_melt)));
colnames(condition) <- c("condition");
condition$condition <- str_replace(str_extract(ZtSH2_names[[i]],"[:alnum:]*_"),"_","");
date <- data.frame(matrix(ncol = 1, nrow = nrow(matrix_melt)));
colnames(date) <- c("date");
date$date <- str_extract(ZtSH2_names[[i]],"[:digit:]{8}");
# normalize time to point of ligand addition
matrix_melt["time"] <- matrix_melt["time"] - time_ligand;
# incorporate expression levels into data matrix
expression <- data.frame(matrix(ncol = 1, nrow = nrow(matrix_melt)));
colnames(expression) <- c("ZtSH2_expression");
for (j in 1:nrow(matrix_melt)){
expression[j,1] <- filter(matrix_melt, time == 0 & cell == matrix_melt[j,"cell"])[3];
}
matrix_melt <- cbind(matrix_melt, expression);
# calculate ZtSH2 response
matrix_melt <- matrix_melt %>%
group_by(cell) %>%
mutate(ZtSH2_norm_max = ZtSH2_raw / mean(head(ZtSH2_raw,frame_pre)),
ZtSH2_abs_decrease = (ZtSH2_norm_max - 1) * -100);
list[[i]] <- matrix_melt;
print(str_c(ZtSH2_names[[i]]," done"))
}
output <- rbind_list(list);
##########################################################################
# plot themes
theme_black <- theme_classic() +
theme(legend.position="right",
legend.background = element_rect(fill = 'black', colour = 'black'),
legend.text = element_text(colour="white"),
plot.background = element_rect(fill = 'black', colour = 'black'),
panel.background = element_rect(fill = 'black', colour = 'black'),
axis.text = element_text(colour="white"),
axis.title = element_text(colour="white"),
axis.line = element_line(colour="white"),
text = element_text(family = "Arial")) +
theme(aspect.ratio=1)
theme_white <- theme_classic() +
theme(legend.position="right",
legend.background = element_rect(fill = 'white', colour = 'white'),
legend.text = element_text(colour="black"),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.background = element_rect(fill = 'white', colour = 'white'),
axis.text = element_text(colour="black"),
axis.title = element_text(colour="black"),
axis.line = element_line(colour="black"),
text = element_text(family = "Arial")) +
theme(aspect.ratio=1)
##########################################################################
# mean EGFR activity vs. time (heatmap)
dat <- output %>%
group_by(time,condition) %>%
mutate(mean_ZtSH2_abs_decrease = mean(ZtSH2_abs_decrease))
x = c("epsilon","delta", "gamma", "zeta3", "zeta2","zeta1", "EGFR", "none")
dat$condition <- factor(dat$condition,
levels = x)
p1 <- ggplot() +
geom_tile(data = dat, aes(x = time,
y = condition,
fill = mean_ZtSH2_abs_decrease)) +
scale_fill_viridis_c()
p1 + theme_white
##########################################################################
# boxplot
dat <- filter(output, time == "10" | time == "60") %>%
unite("cond", c(condition,time), sep = "_", remove = FALSE)
# x = c("none_20", "none_60","EGFR_20", "EGFR_60","zeta1_20", "zeta1_60","zeta2_20", "zeta2_60","zeta3_20", "zeta3_60","gamma_20", "gamma_60","delta_20", "delta_60","epsilon_20", "epsilon_60")
x = c("none_10", "none_60","EGFR_10", "EGFR_60","zeta1_10", "zeta1_60","zeta2_10", "zeta2_60","zeta3_10", "zeta3_60","gamma_10", "gamma_60","delta_10", "delta_60","epsilon_10", "epsilon_60")
dat$cond <- factor(dat$cond,
levels = x)
p1 <- ggplot(data = dat,
aes(x = cond,
y = ZtSH2_abs_decrease,
fill = condition)) +
geom_boxplot(colour = "black") +
labs(x = "reporter", y = "RTK activity (t = 1 h)")
p1 + theme_classic() +
theme(legend.position="right",
legend.background = element_rect(fill = 'white', colour = 'white'),
legend.text = element_text(colour="black"),
plot.background = element_rect(fill = 'white', colour = 'white'),
panel.background = element_rect(fill = 'white', colour = 'white'),
axis.text = element_text(colour="black"),
axis.title = element_text(colour="black"),
axis.line = element_line(colour="black"),
text = element_text(family = "Arial"))
# run K-S tests
ks.test(filter(output, time == "10" & condition == "none")[["ZtSH2_abs_decrease"]],
filter(output, time == "60" & condition == "epsilon")[["ZtSH2_abs_decrease"]])
# check number of cells analyzed for each independent experiment
num <- output %>%
group_by(condition,date) %>%
filter(time == 0 & cell == max(cell))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/compareProportion.R
\name{compareProportion}
\alias{compareProportion}
\title{Compare proportions between 2 groups}
\usage{
compareProportion(formula, data = NULL, ...)
}
\arguments{
\item{formula}{a formula}
\item{data}{a data frame in which \code{x} is evaluated if \code{x} is a
formula.}
\item{\dots}{other arguments}
}
\value{
the difference in proportions between the second and first group
}
\description{
A function to facilitate 2 group permutation tests for a categorical outcome variable
}
\note{
This funciton has been deprecated. Use \code{\link{diffprop}} instead.
}
\examples{
if (require(mosaicData)) {
data(HELPrct)
# calculate the observed difference
mean(homeless=="housed" ~ sex, data=HELPrct)
obs <- diffprop(homeless=="housed" ~ sex, data=HELPrct); obs
# calculate the permutation distribution
nulldist <- do(100) * diffprop(homeless=="housed" ~ shuffle(sex), data=HELPrct)
histogram(~ diffprop, groups=(diffprop>= obs), nulldist,
xlab="difference in proportions")
}
}
\keyword{iteration}
\keyword{stats}
| /man/compareProportion.Rd | no_license | mpetruc/mosaic | R | false | false | 1,134 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/compareProportion.R
\name{compareProportion}
\alias{compareProportion}
\title{Compare proportions between 2 groups}
\usage{
compareProportion(formula, data = NULL, ...)
}
\arguments{
\item{formula}{a formula}
\item{data}{a data frame in which \code{x} is evaluated if \code{x} is a
formula.}
\item{\dots}{other arguments}
}
\value{
the difference in proportions between the second and first group
}
\description{
A function to facilitate 2 group permutation tests for a categorical outcome variable
}
\note{
This funciton has been deprecated. Use \code{\link{diffprop}} instead.
}
\examples{
if (require(mosaicData)) {
data(HELPrct)
# calculate the observed difference
mean(homeless=="housed" ~ sex, data=HELPrct)
obs <- diffprop(homeless=="housed" ~ sex, data=HELPrct); obs
# calculate the permutation distribution
nulldist <- do(100) * diffprop(homeless=="housed" ~ shuffle(sex), data=HELPrct)
histogram(~ diffprop, groups=(diffprop>= obs), nulldist,
xlab="difference in proportions")
}
}
\keyword{iteration}
\keyword{stats}
|
##########################################################################
# DEFINITION OF MIGRATORY SEASONS FOR SATELLITE-TRACKED EGYPTIAN VULTURES
# original script written by Steffen Oppel in September 2014
# modified in February 2016
# re-written on 9 February 2019 to include Evan Buechley's NSD model approach
##########################################################################
## updated 10 Feb to include date comparison with manually annotated tracks
## updated 14 Feb to facilitate easy manual annotation of remaining tracks
# Load necessary library
library(maptools)
library(sp)
require(maps)
require(mapdata)
require(geosphere)
library(lubridate)
library(ggplot2)
basemap <- map_data("world")
library(scales)
library(tidyverse)
library(data.table)
library(readxl)
library(plotly)
### DEFINE FUNCTIONS TO MANUALLY ENTER DATES ###
readStartDate <- function(){
no <- readline(prompt="Is the suggested START date correct? (y/n)")
if(no=='n'){
start <- ymd(readline(prompt="Enter correct start date as YYYY-mm-dd"))
}
}
readEndDate <- function(){
no <- readline(prompt="Is the suggested END date correct? (y/n)")
if(no=='n'){
start <- ymd(readline(prompt="Enter correct end date as YYYY-mm-dd"))
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# LOAD PREVIOUSLY SAVED DATA (prepared in script 2.EV-all-migration delineation.R)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set working directory
setwd("C:\\STEFFEN\\MANUSCRIPTS\\in_prep\\EGVU_papers\\FrontiersMigrationPaper\\EGVUmigration")
# # read in clean csv
# locs = read.csv("EV-all_1ptperhr-filtered-utm-NSD-season.csv")
# head(locs)
#
# migs<-unique(locs$id.yr.season) ## specify the unique migration journeys
# migs<-migs[!migs %in% c("Cabuk_2016_spring")]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CALCULATE TRAVEL DISTANCES AND SPEEDS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## this is not a particularly efficient way to calculate that, but it is easy to understand and manipulate
## run time ~25 min
# migration<-data.frame()
#
# migs<-migs[!migs %in% unique(migration$id.yr.season)] ## use this line if loop was interrupted
# for (a in migs){
#
# input<-locs %>% filter(id.yr.season==a) %>%
# mutate(DateTime=ymd_h(paste(Year,Month, Day, Hour, sep=","))) %>%
# dplyr::select(id.yr.season,study,tag,id,DateTime, long, lat, NSD, ND,utm.e,utm.n) %>% arrange(DateTime) %>%
# mutate(step_dist=0,home_dist=0,cumul_dist=0,time_diff=0,speed=0)
# first<-SpatialPoints(data.frame(input$long[1], input$lat[1]), proj4string=CRS("+proj=longlat + datum=wgs84"))
#
# for (l in 2: dim(input)[1]){
# input$time_diff[l]<-as.numeric(difftime(input$DateTime[l],input$DateTime[l-1], units="hours"))
# fromloc<-SpatialPoints(data.frame(input$long[l-1], input$lat[l-1]), proj4string=CRS("+proj=longlat + datum=wgs84"))
# toloc<-SpatialPoints(data.frame(input$long[l], input$lat[l]), proj4string=CRS("+proj=longlat + datum=wgs84"))
# input$step_dist[l]<-spDistsN1(fromloc, toloc, longlat=T)
# input$home_dist[l]<-spDistsN1(first, toloc, longlat=T)
# input$cumul_dist[l]<-sum(input$step_dist)
# input$speed[l]<-input$step_dist[l]/input$time_diff[l]
# }
#
# migration<-rbind(migration, input)
#
# }
#
#fwrite(migration,"EGVU_migration_preformatted.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# READ IN MANUALLY ANNOTATED DATA FOR INDIVIDUAL MIGRATION JOURNEYS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set working directory
setwd("C:\\STEFFEN\\MANUSCRIPTS\\in_prep\\EGVU_papers\\FrontiersMigrationPaper\\EGVUmigration")
# read in raw migration data (prepared in script 2.EV-all-migration delineation.R and EGVU_migration_season_definition_CALIBRATION.R)
migration<-fread("EGVU_migration_preformatted.csv")
## remove non-existent migrations [they should already be excluded...]
migration<-migration[!(migration$id.yr.season=="Agri_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Agri_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="Ardahan_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Haydi_2014_spring"),]
migration<-migration[!(migration$id.yr.season=="Iste_2014_spring"),]
migration<-migration[!(migration$id.yr.season=="Serhat_2014_spring"),]
migration<-migration[!(migration$id.yr.season=="Tuzluca_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Tuzluca_2016_fall"),]
migration<-migration[!(migration$id.yr.season=="Batuecasa_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Huebra_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="2HN_2017_fall"),]
migration<-migration[!(migration$id.yr.season=="2HN_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Douro_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Faia_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Faia_2018_fall"),]
migration<-migration[!(migration$id.yr.season=="Poiares_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Akaga_2018_spring"),]
migration<-migration[!(migration$id.yr.season=="Aoos_2015_spring"),]
migration<-migration[!(migration$id.yr.season=="Boyana_2018_spring"),]
migration<-migration[!(migration$id.yr.season=="Castor_2014_spring"),]
migration<-migration[!(migration$id.yr.season=="Batuecasa_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Lazaros_2012_spring"),]
migration<-migration[!(migration$id.yr.season=="Polya_2018_spring"),]
migration<-migration[!(migration$id.yr.season=="Volen_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="A75658_2011_spring"),]
migration<-migration[!(migration$id.yr.season=="A75658_2011_fall"),]
migration<-migration[!(migration$id.yr.season=="A75659_2011_fall"),]
migration<-migration[!(migration$id.yr.season=="A80420_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="A89731_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="Pyrenees_2016_fall"),]
migration<-migration[!(migration$id.yr.season=="A89731_2012_fall"),]
migration<-migration[!(migration$id.yr.season=="Ardahan_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="95R_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="2HN_2016_spring"),]
migration<-migration[!(migration$id.yr.season=="2HN_2016_fall"),]
migration<-migration[!(migration$id.yr.season=="95R_2017_fall"),]
migration<-migration[!(migration$id.yr.season=="A75658_2010_spring"),]
migration<-migration[!(migration$id.yr.season=="A75658_2010_fall"),]
migration<-migration[!(migration$id.yr.season=="Anna_2018_fall"),]
migration<-migration[!(migration$id.yr.season=="BatuecasP_2017_fall"),]
migration<-migration[!(migration$id.yr.season=="Iliaz_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Iliaz_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="Iliaz_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="Levkipos_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Levkipos_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="Mille_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="Sanie_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="Svetlina_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="Volen_2013_spring"),]
## remove other data sets that are marginal ##
migsDATA<-unique(migration$id.yr.season) ## specify all the unique migration journeys
dim(migration)
for (a in migsDATA){
x<-migration %>% filter(id.yr.season==a) %>% mutate(Day=as.Date(DateTime))
if (dim(x)[1] <20 | max(x$home_dist)<500) {
print(sprintf("%s is not a proper migratory journey",a))
migration<-migration %>% filter(id.yr.season != a)
}
}
dim(migration)
migsDATA<-unique(migration$id.yr.season) ## specify all the unique migration journeys that have passed the basic filter
# # read in results tables
# manudates<- read_xlsx("EV_mig_calibration.xlsx", sheet="complete migrations")
# manudates <- manudates %>% rename(id.yr.season=group,season=season,start_mig_MANU=begin, end_mig_MANU=end)
# head(manudates)
#
#
# ### MANUAL ANNNOTATION FROM CLEMENTINE BOUGAIN's THESIS ####
# manudates$end_mig_MANU[manudates$id.yr.season=="Dobromir_2014_spring"]<-ymd("2014-06-03")
# manudates$start_mig_MANU[manudates$id.yr.season=="Dobromir_2015_spring"]<-ymd("2015-04-30")
# manudates$end_mig_MANU[manudates$id.yr.season=="Dobromir_2015_spring"]<-ymd("2015-05-15")
# manudates$start_mig_MANU[manudates$id.yr.season=="Sanie_2015_spring"]<-ymd("2015-05-04")
# manudates$end_mig_MANU[manudates$id.yr.season=="Sanie_2015_spring"]<-ymd("2015-06-18")
# manudates$start_mig_MANU[manudates$id.yr.season=="Castor_2015_spring"]<-ymd("2015-03-07")
# manudates$end_mig_MANU[manudates$id.yr.season=="Castor_2015_spring"]<-ymd("2015-04-05")
# manudates$start_mig_MANU[manudates$id.yr.season=="Lazaros_2013_spring"]<-ymd("2013-03-08")
# manudates$end_mig_MANU[manudates$id.yr.season=="Lazaros_2013_spring"]<-ymd("2013-03-31")
# manudates$start_mig_MANU[manudates$id.yr.season=="Iliaz_2016_spring"]<-ymd("2016-03-17")
# manudates$end_mig_MANU[manudates$id.yr.season=="Iliaz_2016_spring"]<-ymd("2016-05-09")
# manudates$start_mig_MANU[manudates$id.yr.season=="Boris_2016_spring"]<-ymd("2016-03-01")
# manudates$end_mig_MANU[manudates$id.yr.season=="Boris_2016_spring"]<-ymd("2016-03-20")
# manudates$start_mig_MANU[manudates$id.yr.season=="Jenny_2016_spring"]<-ymd("2016-03-16")
# manudates$end_mig_MANU[manudates$id.yr.season=="Jenny_2016_spring"]<-ymd("2016-04-16")
# manudates$start_mig_MANU[manudates$id.yr.season=="Dobromir_2016_spring"]<-ymd("2016-05-01")
# manudates$start_mig_MANU[manudates$id.yr.season=="Sanie_2016_spring"]<-ymd("2016-04-13")
# manudates$start_mig_MANU[manudates$id.yr.season=="Aoos_2016_spring"]<-ymd("2016-03-16")
# manudates$end_mig_MANU[manudates$id.yr.season=="Aoos_2016_spring"]<-ymd("2016-04-18")
# manudates$end_mig_MANU[manudates$id.yr.season=="Dobromir_2016_spring"]<-ymd("2016-05-30")
# manudates$end_mig_MANU[manudates$id.yr.season=="Sanie_2016_spring"]<-ymd("2016-05-13")
# manudates$start_mig_MANU[manudates$id.yr.season=="Sanie_2015_fall"]<-ymd("2015-07-20")
# manudates$start_mig_MANU[manudates$id.yr.season=="Dobromir_2015_fall"]<-ymd("2015-08-22")
#fwrite(manudates,"EGVU_migration_dates_manually_classified.csv")
#manudates<-fread("EGVU_migration_dates_manually_classified.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### IDENTIFY THOSE JOURNEYS THAT STILL NEED TO BE ANNOTATED
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mig_dates<-fread("EGVU_manually_classified_migration_dates.csv")
mig_dates$start<-ymd(mig_dates$start) ## use dmy if you opened, modified, and saved in MS Excel
mig_dates$end<-ymd(mig_dates$end) ## use dmy if you opened, modified, and saved in MS Excel
NEEDEDmigs<-migsDATA[!(migsDATA %in% mig_dates$id.yr.season)]
counter=0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# MANUALLY REPEAT THE CODE FROM THIS LINE ONWARDS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### THIS INTERACTIVE CODE DOES NOT RUN IN A LOOP!!
### IT REQUIRES MANUAL INCREMENTS TO GO THROUGH EACH MIGRATORY JOURNEY
counter=counter+1
a=NEEDEDmigs[counter]
source("manual_threshold_function.R")
### ~~~~~~~~~ 2. SHOW THE INTERACTIVE GRAPH OF DISTANCE TO SELECT APPROPRIATE DATES ~~~~~~~~~~~~~~~~ ###
## visually assess whether the threshold dates make sense
distgraph<-ggplot(x) + geom_point(aes(x=DateTime, y=home_dist, col=MIG)) + scale_x_datetime(date_breaks="2 weeks", date_labels="%b-%Y")
ggplotly(distgraph)
### ~~~~~~~~~ 3. SHOW A MAP WITH MIGRATION LOCATIONS ~~~~~~~~~~~~~~~~ ###
## geographically assess whether the threshold dates make sense
if(dim(xmig)[1]>5){
ggplot() + geom_polygon(data = basemap, aes(x=long, y = lat, group = group)) +
coord_fixed(xlim = xlim, ylim = ylim, ratio = 1.3)+
geom_path(data=x, aes(x=long, y=lat))+
geom_point(data=xmig, aes(x=long, y=lat),col='darkred',size=1.2)
}else{
ggplot() + geom_polygon(data = basemap, aes(x=long, y = lat, group = group)) +
coord_fixed(xlim = c(min(x$long)-3,max(x$long)+3), ylim = c(min(x$lat)-6,max(x$lat)+6), ratio = 1.3)+
geom_path(data=x, aes(x=long, y=lat))
}
### ~~~~~~~~~ 4. FILL IN START AND END DATE MANUALLY ~~~~~~~~~~~~~~~~ ###
source('C:/STEFFEN/MANUSCRIPTS/in_prep/EGVU_papers/FrontiersMigrationPaper/EGVUmigration/manual_annotation_function.R')
#### IF THERE WAS NO MIGRATION REMOVE THE LINE
## mig_dates<-mig_dates[mig_dates$id.yr.season==a,]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# REMOVE ALL THE NON-MIGRATION DATA FROM THE DATASET
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
head(mig_dates)
head(migration)
MIG_DATA<-data.frame()
for (a in unique(mig_dates$id.yr.season)){
if(year(mig_dates$end[mig_dates$id.yr.season==a])<2000 | is.null(mig_dates$end[mig_dates$id.yr.season==a]) | is.na(mig_dates$end[mig_dates$id.yr.season==a])){
mig_window<-interval(mig_dates$start[mig_dates$id.yr.season==a],ymd("2020-01-01")) ## for those journeys with no end date make sure that all data are used
}
mig_window<-interval(mig_dates$start[mig_dates$id.yr.season==a],mig_dates$end[mig_dates$id.yr.season==a])
x<-migration %>% filter(id.yr.season==a) %>% mutate(Day=as.Date(DateTime)) %>%
filter(Day %within% mig_window)
MIG_DATA<-rbind(MIG_DATA,x)
}
fwrite(MIG_DATA,"EGVU_manually_selected_migration_data.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# COMPILE ALL THE DATASETS THAT HAVE ALREADY BEEN MANUALLY ANNOTATED
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# mig_dates1<-fread("EGVU_migration_dates_manually_classified_PART2.csv")%>%
# mutate(start=ymd(start)) %>%
# mutate(end=ymd(end))
# mig_dates2<-fread("EGVU_migration_dates_manually_classified.csv")%>%
# mutate(start=as.Date(start_mig_MANU)) %>%
# mutate(end=as.Date(end_mig_MANU)) %>%
# filter(!(start_mig_MANU=="")) %>%
# dplyr::select(id.yr.season,start,end)
# mig_dates3<-fread("migration.dates.mideast.csv") %>%
# mutate(start=as.Date(start, format="%m/%d/%y")) %>%
# mutate(end=as.Date(end, format="%m/%d/%y")) ## opened, modified, and saved in MS Excel in US date format
# mig_dates4<-fread("EGVU_migration_dates_manually_classified_PART3.csv")%>%
# filter(!(id.yr.season %in% mig_dates1$id.yr.season))%>%
# mutate(start=ymd(start)) %>%
# mutate(end=ymd(end))
# mig_dates5<-fread("EGVU_migration_dates_manually_classified_PART4.csv")%>%
# filter(!(id.yr.season %in% mig_dates1$id.yr.season))%>%
# mutate(start=ymd(start)) %>%
# mutate(end=ymd(end))
#
# all_migdates<-rbind(mig_dates1,mig_dates2,mig_dates3, mig_dates4, mig_dates5) %>% distinct()
# fwrite(all_migdates,"EGVU_manually_classified_migration_dates.csv")
### REMOVE DUPLICATES IN MIGRATION DATES TABLE
dim(mig_dates)
mig_dates<-mig_dates %>% distinct()
dim(mig_dates)
fwrite(mig_dates,"EGVU_manually_classified_migration_dates.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ### THIS BELOW DID NOT WORK #################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
#
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # MANUALLY ANNOTATE START AND END DATES OF MIGRATION FOR INDIVIDUAL ANIMALS
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
# ### LOOP TO CALCULATE START AND END DATES FOR AUTUMN MIGRATION #######
# ### COMPARE WITH MANUALLY ANNOTATED DATES AND CALCULATE DIFFERENCE
# ### based on Evan's script to use models, but if model fails we use basic rules of thumb
#
#
# #mig_dates<-data.frame() ### create blank data frame that will hold all the data to evaluate accuracy of algorithmic start and end definition
# mig_dates<-fread("EGVU_migration_dates_manually_classified_PART2.csv")
# mig_dates$start<-ymd(mig_dates$start)
# mig_dates$end<-ymd(mig_dates$end)
#
# NEEDEDmigs<-NEEDEDmigs[!(NEEDEDmigs %in% mig_dates$id.yr.season)]
#
# for (a in NEEDEDmigs){
#
#
#
# ### SELECT THE DATA FOR THIS ANIMAL
# x<-migration %>% filter(id.yr.season==a) %>% mutate(Day=as.Date(DateTime))
#
# if (dim(x)[1] <20 | max(x$home_dist)<500) {
# print(sprintf("%s is not a proper migratory journey",a))
# } else {
#
# print(sprintf("starting with migration journey %s",a))
#
# ### ~~~~~~~~~ 1. DEFINE START AND END DATES WITH SIMPLE THRESHOLDS ~~~~~~~~~~~~~~~~ ###
# ## MIGRATION STARTS WHEN DIST TO HOME CONTINUOUSLY INCREASES
#
# dailyhomedist<- x %>% group_by(Day) %>%
# summarise(away=max(home_dist))
#
# ### find the first day where home_dist is greater than on any day before, and where home_dist is greater on any day afterwards
# THRESH_start<-NA
# for (d in 2:(dim(dailyhomedist)[1]-1)){
# maxbef<-max(dailyhomedist$away[1:(d-1)])
# minaft<-min(dailyhomedist$away[(d+1):dim(dailyhomedist)[1]])
# dmax<-d
# if(is.na(THRESH_start)==TRUE) { ## prevent that the first day gets overwritten by subsequent days
# if(dailyhomedist$away[d]>maxbef & dailyhomedist$away[d]<minaft){THRESH_start<-dailyhomedist$Day[d]}
# }
# if(is.na(THRESH_start)==FALSE) break
# } # end loop over every day in the data set
#
#
# ### going backwards, find the first day where home_dist is smaller than on any day afterwards, and where home_dist is smaller on any day before
# THRESH_end<-NA
# for (d in (dim(dailyhomedist)[1]):dmax){
# maxbef<-max(dailyhomedist$away[dmax:(d-1)])
# minaft<-min(dailyhomedist$away[(d):dim(dailyhomedist)[1]])
# if(is.na(THRESH_end)==TRUE) { ## prevent that the first day gets overwritten by subsequent days
# if(dailyhomedist$away[d]>maxbef & dailyhomedist$away[d]<=minaft){THRESH_end<-dailyhomedist$Day[d]}
# }
#
# # if(is.na(start)==FALSE & is.na(end)==TRUE) { ## prevent that the end is defined before the start
# # if(dailyhomedist$away[d]>maxbef & dailyhomedist$away[d]>=minaft){end<-dailyhomedist$Day[d]}
# # }
# if(is.na(THRESH_end)==FALSE) break
# } # end loop over every day in the data set
#
#
#
#
# ### ~~~~~~~~~ 2. SHOW THE INTERACTIVE GRAPH OF DISTANCE TO SELECT APPROPRIATE DATES ~~~~~~~~~~~~~~~~ ###
# ## visually assess whether the threshold dates make sense
#
# mig_time<-interval(start=THRESH_start,end=THRESH_end)
# x<- x %>% mutate(MIG=if_else(Day %within% mig_time,"migrating","stationary")) %>%
# mutate(MIG=if_else(is.na(MIG),"stationary",MIG))
#
#
# distgraph<-ggplot(x) + geom_point(aes(x=DateTime, y=home_dist, col=MIG))
# ggplotly(distgraph)
#
#
#
# ### ~~~~~~~~~ 3. SHOW A MAP WITH MIGRATION LOCATIONS ~~~~~~~~~~~~~~~~ ###
# ## geographically assess whether the threshold dates make sense
#
# xmig<- x %>% filter(MIG=="migrating")
# xlim<-c(min(xmig$long)-3,max(xmig$long)+3)
# ylim<-c(min(xmig$lat)-3,max(xmig$lat)+3)
#
# if(dim(xmig)[1]>5){
# ggplot() + geom_polygon(data = basemap, aes(x=long, y = lat, group = group)) +
# coord_fixed(xlim = xlim, ylim = ylim, ratio = 1.3)+
# geom_path(data=x, aes(x=long, y=lat))+
# geom_point(data=xmig, aes(x=long, y=lat),col='darkred',size=1.2)
# }else{
# ggplot() + geom_polygon(data = basemap, aes(x=long, y = lat, group = group)) +
# coord_fixed(xlim = c(min(x$long)-3,max(x$long)+3), ylim = c(min(x$lat)-3,max(x$lat)+3), ratio = 1.3)+
# geom_path(data=x, aes(x=long, y=lat))
# }
#
#
#
# ### ~~~~~~~~~ 4. FILL IN START AND END DATE MANUALLY ~~~~~~~~~~~~~~~~ ###
# ## only need to adjust the dates that are wrong
# #fix(THRESH_calib)
#
# StartDate <- readStartDate()
# EndDate <- readEndDate()
#
# ### CAPTURE OUTPUT FOR CALIBRATION
# THRESH_calib<-data.frame('id.yr.season'=a) %>%
# mutate(start=if_else(is.null(StartDate),THRESH_start,StartDate)) %>%
# mutate(end=if_else(is.null(EndDate),THRESH_end,EndDate))
#
#
# ### ~~~~~~~~~ 5. SAVE DATA AND CLEAN UP ~~~~~~~~~~~~~~~~ ###
# mig_dates<-rbind(mig_dates,THRESH_calib)
# fwrite(mig_dates,"EGVU_migration_dates_manually_classified_PART2.csv")
# dev.off()
# rm(THRESH_end,THRESH_start,x,xmig,xlim,ylim,mig_time,distgraph,THRESH_calib)
# pause()
#
# print(sprintf("finished with migration journey %s",a))
#
# }} #closes the else loop for migrations and the animal loop
#
#
#
#
| /EGVU_migration_season_MANUAL_definition.R | no_license | steffenoppel/EGVUmigration | R | false | false | 22,180 | r | ##########################################################################
# DEFINITION OF MIGRATORY SEASONS FOR SATELLITE-TRACKED EGYPTIAN VULTURES
# original script written by Steffen Oppel in September 2014
# modified in February 2016
# re-written on 9 February 2019 to include Evan Buechley's NSD model approach
##########################################################################
## updated 10 Feb to include date comparison with manually annotated tracks
## updated 14 Feb to facilitate easy manual annotation of remaining tracks
# Load necessary library
library(maptools)
library(sp)
require(maps)
require(mapdata)
require(geosphere)
library(lubridate)
library(ggplot2)
basemap <- map_data("world")
library(scales)
library(tidyverse)
library(data.table)
library(readxl)
library(plotly)
### DEFINE FUNCTIONS TO MANUALLY ENTER DATES ###
readStartDate <- function(){
no <- readline(prompt="Is the suggested START date correct? (y/n)")
if(no=='n'){
start <- ymd(readline(prompt="Enter correct start date as YYYY-mm-dd"))
}
}
readEndDate <- function(){
no <- readline(prompt="Is the suggested END date correct? (y/n)")
if(no=='n'){
start <- ymd(readline(prompt="Enter correct end date as YYYY-mm-dd"))
}
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# LOAD PREVIOUSLY SAVED DATA (prepared in script 2.EV-all-migration delineation.R)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set working directory
setwd("C:\\STEFFEN\\MANUSCRIPTS\\in_prep\\EGVU_papers\\FrontiersMigrationPaper\\EGVUmigration")
# # read in clean csv
# locs = read.csv("EV-all_1ptperhr-filtered-utm-NSD-season.csv")
# head(locs)
#
# migs<-unique(locs$id.yr.season) ## specify the unique migration journeys
# migs<-migs[!migs %in% c("Cabuk_2016_spring")]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CALCULATE TRAVEL DISTANCES AND SPEEDS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## this is not a particularly efficient way to calculate that, but it is easy to understand and manipulate
## run time ~25 min
# migration<-data.frame()
#
# migs<-migs[!migs %in% unique(migration$id.yr.season)] ## use this line if loop was interrupted
# for (a in migs){
#
# input<-locs %>% filter(id.yr.season==a) %>%
# mutate(DateTime=ymd_h(paste(Year,Month, Day, Hour, sep=","))) %>%
# dplyr::select(id.yr.season,study,tag,id,DateTime, long, lat, NSD, ND,utm.e,utm.n) %>% arrange(DateTime) %>%
# mutate(step_dist=0,home_dist=0,cumul_dist=0,time_diff=0,speed=0)
# first<-SpatialPoints(data.frame(input$long[1], input$lat[1]), proj4string=CRS("+proj=longlat + datum=wgs84"))
#
# for (l in 2: dim(input)[1]){
# input$time_diff[l]<-as.numeric(difftime(input$DateTime[l],input$DateTime[l-1], units="hours"))
# fromloc<-SpatialPoints(data.frame(input$long[l-1], input$lat[l-1]), proj4string=CRS("+proj=longlat + datum=wgs84"))
# toloc<-SpatialPoints(data.frame(input$long[l], input$lat[l]), proj4string=CRS("+proj=longlat + datum=wgs84"))
# input$step_dist[l]<-spDistsN1(fromloc, toloc, longlat=T)
# input$home_dist[l]<-spDistsN1(first, toloc, longlat=T)
# input$cumul_dist[l]<-sum(input$step_dist)
# input$speed[l]<-input$step_dist[l]/input$time_diff[l]
# }
#
# migration<-rbind(migration, input)
#
# }
#
#fwrite(migration,"EGVU_migration_preformatted.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# READ IN MANUALLY ANNOTATED DATA FOR INDIVIDUAL MIGRATION JOURNEYS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Set working directory
setwd("C:\\STEFFEN\\MANUSCRIPTS\\in_prep\\EGVU_papers\\FrontiersMigrationPaper\\EGVUmigration")
# read in raw migration data (prepared in script 2.EV-all-migration delineation.R and EGVU_migration_season_definition_CALIBRATION.R)
migration<-fread("EGVU_migration_preformatted.csv")
## remove non-existent migrations [they should already be excluded...]
migration<-migration[!(migration$id.yr.season=="Agri_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Agri_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="Ardahan_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Haydi_2014_spring"),]
migration<-migration[!(migration$id.yr.season=="Iste_2014_spring"),]
migration<-migration[!(migration$id.yr.season=="Serhat_2014_spring"),]
migration<-migration[!(migration$id.yr.season=="Tuzluca_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Tuzluca_2016_fall"),]
migration<-migration[!(migration$id.yr.season=="Batuecasa_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Huebra_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="2HN_2017_fall"),]
migration<-migration[!(migration$id.yr.season=="2HN_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Douro_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Faia_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Faia_2018_fall"),]
migration<-migration[!(migration$id.yr.season=="Poiares_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Akaga_2018_spring"),]
migration<-migration[!(migration$id.yr.season=="Aoos_2015_spring"),]
migration<-migration[!(migration$id.yr.season=="Boyana_2018_spring"),]
migration<-migration[!(migration$id.yr.season=="Castor_2014_spring"),]
migration<-migration[!(migration$id.yr.season=="Batuecasa_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="Lazaros_2012_spring"),]
migration<-migration[!(migration$id.yr.season=="Polya_2018_spring"),]
migration<-migration[!(migration$id.yr.season=="Volen_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="A75658_2011_spring"),]
migration<-migration[!(migration$id.yr.season=="A75658_2011_fall"),]
migration<-migration[!(migration$id.yr.season=="A75659_2011_fall"),]
migration<-migration[!(migration$id.yr.season=="A80420_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="A89731_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="Pyrenees_2016_fall"),]
migration<-migration[!(migration$id.yr.season=="A89731_2012_fall"),]
migration<-migration[!(migration$id.yr.season=="Ardahan_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="95R_2017_spring"),]
migration<-migration[!(migration$id.yr.season=="2HN_2016_spring"),]
migration<-migration[!(migration$id.yr.season=="2HN_2016_fall"),]
migration<-migration[!(migration$id.yr.season=="95R_2017_fall"),]
migration<-migration[!(migration$id.yr.season=="A75658_2010_spring"),]
migration<-migration[!(migration$id.yr.season=="A75658_2010_fall"),]
migration<-migration[!(migration$id.yr.season=="Anna_2018_fall"),]
migration<-migration[!(migration$id.yr.season=="BatuecasP_2017_fall"),]
migration<-migration[!(migration$id.yr.season=="Iliaz_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Iliaz_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="Iliaz_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="Levkipos_2013_spring"),]
migration<-migration[!(migration$id.yr.season=="Levkipos_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="Mille_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="Sanie_2014_fall"),]
migration<-migration[!(migration$id.yr.season=="Svetlina_2013_fall"),]
migration<-migration[!(migration$id.yr.season=="Volen_2013_spring"),]
## remove other data sets that are marginal ##
migsDATA<-unique(migration$id.yr.season) ## specify all the unique migration journeys
dim(migration)
for (a in migsDATA){
x<-migration %>% filter(id.yr.season==a) %>% mutate(Day=as.Date(DateTime))
if (dim(x)[1] <20 | max(x$home_dist)<500) {
print(sprintf("%s is not a proper migratory journey",a))
migration<-migration %>% filter(id.yr.season != a)
}
}
dim(migration)
migsDATA<-unique(migration$id.yr.season) ## specify all the unique migration journeys that have passed the basic filter
# # read in results tables
# manudates<- read_xlsx("EV_mig_calibration.xlsx", sheet="complete migrations")
# manudates <- manudates %>% rename(id.yr.season=group,season=season,start_mig_MANU=begin, end_mig_MANU=end)
# head(manudates)
#
#
# ### MANUAL ANNNOTATION FROM CLEMENTINE BOUGAIN's THESIS ####
# manudates$end_mig_MANU[manudates$id.yr.season=="Dobromir_2014_spring"]<-ymd("2014-06-03")
# manudates$start_mig_MANU[manudates$id.yr.season=="Dobromir_2015_spring"]<-ymd("2015-04-30")
# manudates$end_mig_MANU[manudates$id.yr.season=="Dobromir_2015_spring"]<-ymd("2015-05-15")
# manudates$start_mig_MANU[manudates$id.yr.season=="Sanie_2015_spring"]<-ymd("2015-05-04")
# manudates$end_mig_MANU[manudates$id.yr.season=="Sanie_2015_spring"]<-ymd("2015-06-18")
# manudates$start_mig_MANU[manudates$id.yr.season=="Castor_2015_spring"]<-ymd("2015-03-07")
# manudates$end_mig_MANU[manudates$id.yr.season=="Castor_2015_spring"]<-ymd("2015-04-05")
# manudates$start_mig_MANU[manudates$id.yr.season=="Lazaros_2013_spring"]<-ymd("2013-03-08")
# manudates$end_mig_MANU[manudates$id.yr.season=="Lazaros_2013_spring"]<-ymd("2013-03-31")
# manudates$start_mig_MANU[manudates$id.yr.season=="Iliaz_2016_spring"]<-ymd("2016-03-17")
# manudates$end_mig_MANU[manudates$id.yr.season=="Iliaz_2016_spring"]<-ymd("2016-05-09")
# manudates$start_mig_MANU[manudates$id.yr.season=="Boris_2016_spring"]<-ymd("2016-03-01")
# manudates$end_mig_MANU[manudates$id.yr.season=="Boris_2016_spring"]<-ymd("2016-03-20")
# manudates$start_mig_MANU[manudates$id.yr.season=="Jenny_2016_spring"]<-ymd("2016-03-16")
# manudates$end_mig_MANU[manudates$id.yr.season=="Jenny_2016_spring"]<-ymd("2016-04-16")
# manudates$start_mig_MANU[manudates$id.yr.season=="Dobromir_2016_spring"]<-ymd("2016-05-01")
# manudates$start_mig_MANU[manudates$id.yr.season=="Sanie_2016_spring"]<-ymd("2016-04-13")
# manudates$start_mig_MANU[manudates$id.yr.season=="Aoos_2016_spring"]<-ymd("2016-03-16")
# manudates$end_mig_MANU[manudates$id.yr.season=="Aoos_2016_spring"]<-ymd("2016-04-18")
# manudates$end_mig_MANU[manudates$id.yr.season=="Dobromir_2016_spring"]<-ymd("2016-05-30")
# manudates$end_mig_MANU[manudates$id.yr.season=="Sanie_2016_spring"]<-ymd("2016-05-13")
# manudates$start_mig_MANU[manudates$id.yr.season=="Sanie_2015_fall"]<-ymd("2015-07-20")
# manudates$start_mig_MANU[manudates$id.yr.season=="Dobromir_2015_fall"]<-ymd("2015-08-22")
#fwrite(manudates,"EGVU_migration_dates_manually_classified.csv")
#manudates<-fread("EGVU_migration_dates_manually_classified.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### IDENTIFY THOSE JOURNEYS THAT STILL NEED TO BE ANNOTATED
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mig_dates<-fread("EGVU_manually_classified_migration_dates.csv")
mig_dates$start<-ymd(mig_dates$start) ## use dmy if you opened, modified, and saved in MS Excel
mig_dates$end<-ymd(mig_dates$end) ## use dmy if you opened, modified, and saved in MS Excel
NEEDEDmigs<-migsDATA[!(migsDATA %in% mig_dates$id.yr.season)]
counter=0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# MANUALLY REPEAT THE CODE FROM THIS LINE ONWARDS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### THIS INTERACTIVE CODE DOES NOT RUN IN A LOOP!!
### IT REQUIRES MANUAL INCREMENTS TO GO THROUGH EACH MIGRATORY JOURNEY
counter=counter+1
a=NEEDEDmigs[counter]
source("manual_threshold_function.R")
### ~~~~~~~~~ 2. SHOW THE INTERACTIVE GRAPH OF DISTANCE TO SELECT APPROPRIATE DATES ~~~~~~~~~~~~~~~~ ###
## visually assess whether the threshold dates make sense
distgraph<-ggplot(x) + geom_point(aes(x=DateTime, y=home_dist, col=MIG)) + scale_x_datetime(date_breaks="2 weeks", date_labels="%b-%Y")
ggplotly(distgraph)
### ~~~~~~~~~ 3. SHOW A MAP WITH MIGRATION LOCATIONS ~~~~~~~~~~~~~~~~ ###
## geographically assess whether the threshold dates make sense
if(dim(xmig)[1]>5){
ggplot() + geom_polygon(data = basemap, aes(x=long, y = lat, group = group)) +
coord_fixed(xlim = xlim, ylim = ylim, ratio = 1.3)+
geom_path(data=x, aes(x=long, y=lat))+
geom_point(data=xmig, aes(x=long, y=lat),col='darkred',size=1.2)
}else{
ggplot() + geom_polygon(data = basemap, aes(x=long, y = lat, group = group)) +
coord_fixed(xlim = c(min(x$long)-3,max(x$long)+3), ylim = c(min(x$lat)-6,max(x$lat)+6), ratio = 1.3)+
geom_path(data=x, aes(x=long, y=lat))
}
### ~~~~~~~~~ 4. FILL IN START AND END DATE MANUALLY ~~~~~~~~~~~~~~~~ ###
source('C:/STEFFEN/MANUSCRIPTS/in_prep/EGVU_papers/FrontiersMigrationPaper/EGVUmigration/manual_annotation_function.R')
#### IF THERE WAS NO MIGRATION REMOVE THE LINE
## mig_dates<-mig_dates[mig_dates$id.yr.season==a,]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# REMOVE ALL THE NON-MIGRATION DATA FROM THE DATASET
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
head(mig_dates)
head(migration)
MIG_DATA<-data.frame()
for (a in unique(mig_dates$id.yr.season)){
if(year(mig_dates$end[mig_dates$id.yr.season==a])<2000 | is.null(mig_dates$end[mig_dates$id.yr.season==a]) | is.na(mig_dates$end[mig_dates$id.yr.season==a])){
mig_window<-interval(mig_dates$start[mig_dates$id.yr.season==a],ymd("2020-01-01")) ## for those journeys with no end date make sure that all data are used
}
mig_window<-interval(mig_dates$start[mig_dates$id.yr.season==a],mig_dates$end[mig_dates$id.yr.season==a])
x<-migration %>% filter(id.yr.season==a) %>% mutate(Day=as.Date(DateTime)) %>%
filter(Day %within% mig_window)
MIG_DATA<-rbind(MIG_DATA,x)
}
fwrite(MIG_DATA,"EGVU_manually_selected_migration_data.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# COMPILE ALL THE DATASETS THAT HAVE ALREADY BEEN MANUALLY ANNOTATED
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# mig_dates1<-fread("EGVU_migration_dates_manually_classified_PART2.csv")%>%
# mutate(start=ymd(start)) %>%
# mutate(end=ymd(end))
# mig_dates2<-fread("EGVU_migration_dates_manually_classified.csv")%>%
# mutate(start=as.Date(start_mig_MANU)) %>%
# mutate(end=as.Date(end_mig_MANU)) %>%
# filter(!(start_mig_MANU=="")) %>%
# dplyr::select(id.yr.season,start,end)
# mig_dates3<-fread("migration.dates.mideast.csv") %>%
# mutate(start=as.Date(start, format="%m/%d/%y")) %>%
# mutate(end=as.Date(end, format="%m/%d/%y")) ## opened, modified, and saved in MS Excel in US date format
# mig_dates4<-fread("EGVU_migration_dates_manually_classified_PART3.csv")%>%
# filter(!(id.yr.season %in% mig_dates1$id.yr.season))%>%
# mutate(start=ymd(start)) %>%
# mutate(end=ymd(end))
# mig_dates5<-fread("EGVU_migration_dates_manually_classified_PART4.csv")%>%
# filter(!(id.yr.season %in% mig_dates1$id.yr.season))%>%
# mutate(start=ymd(start)) %>%
# mutate(end=ymd(end))
#
# all_migdates<-rbind(mig_dates1,mig_dates2,mig_dates3, mig_dates4, mig_dates5) %>% distinct()
# fwrite(all_migdates,"EGVU_manually_classified_migration_dates.csv")
### REMOVE DUPLICATES IN MIGRATION DATES TABLE
dim(mig_dates)
mig_dates<-mig_dates %>% distinct()
dim(mig_dates)
fwrite(mig_dates,"EGVU_manually_classified_migration_dates.csv")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ### THIS BELOW DID NOT WORK #################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
#
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # MANUALLY ANNOTATE START AND END DATES OF MIGRATION FOR INDIVIDUAL ANIMALS
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
# ### LOOP TO CALCULATE START AND END DATES FOR AUTUMN MIGRATION #######
# ### COMPARE WITH MANUALLY ANNOTATED DATES AND CALCULATE DIFFERENCE
# ### based on Evan's script to use models, but if model fails we use basic rules of thumb
#
#
# #mig_dates<-data.frame() ### create blank data frame that will hold all the data to evaluate accuracy of algorithmic start and end definition
# mig_dates<-fread("EGVU_migration_dates_manually_classified_PART2.csv")
# mig_dates$start<-ymd(mig_dates$start)
# mig_dates$end<-ymd(mig_dates$end)
#
# NEEDEDmigs<-NEEDEDmigs[!(NEEDEDmigs %in% mig_dates$id.yr.season)]
#
# for (a in NEEDEDmigs){
#
#
#
# ### SELECT THE DATA FOR THIS ANIMAL
# x<-migration %>% filter(id.yr.season==a) %>% mutate(Day=as.Date(DateTime))
#
# if (dim(x)[1] <20 | max(x$home_dist)<500) {
# print(sprintf("%s is not a proper migratory journey",a))
# } else {
#
# print(sprintf("starting with migration journey %s",a))
#
# ### ~~~~~~~~~ 1. DEFINE START AND END DATES WITH SIMPLE THRESHOLDS ~~~~~~~~~~~~~~~~ ###
# ## MIGRATION STARTS WHEN DIST TO HOME CONTINUOUSLY INCREASES
#
# dailyhomedist<- x %>% group_by(Day) %>%
# summarise(away=max(home_dist))
#
# ### find the first day where home_dist is greater than on any day before, and where home_dist is greater on any day afterwards
# THRESH_start<-NA
# for (d in 2:(dim(dailyhomedist)[1]-1)){
# maxbef<-max(dailyhomedist$away[1:(d-1)])
# minaft<-min(dailyhomedist$away[(d+1):dim(dailyhomedist)[1]])
# dmax<-d
# if(is.na(THRESH_start)==TRUE) { ## prevent that the first day gets overwritten by subsequent days
# if(dailyhomedist$away[d]>maxbef & dailyhomedist$away[d]<minaft){THRESH_start<-dailyhomedist$Day[d]}
# }
# if(is.na(THRESH_start)==FALSE) break
# } # end loop over every day in the data set
#
#
# ### going backwards, find the first day where home_dist is smaller than on any day afterwards, and where home_dist is smaller on any day before
# THRESH_end<-NA
# for (d in (dim(dailyhomedist)[1]):dmax){
# maxbef<-max(dailyhomedist$away[dmax:(d-1)])
# minaft<-min(dailyhomedist$away[(d):dim(dailyhomedist)[1]])
# if(is.na(THRESH_end)==TRUE) { ## prevent that the first day gets overwritten by subsequent days
# if(dailyhomedist$away[d]>maxbef & dailyhomedist$away[d]<=minaft){THRESH_end<-dailyhomedist$Day[d]}
# }
#
# # if(is.na(start)==FALSE & is.na(end)==TRUE) { ## prevent that the end is defined before the start
# # if(dailyhomedist$away[d]>maxbef & dailyhomedist$away[d]>=minaft){end<-dailyhomedist$Day[d]}
# # }
# if(is.na(THRESH_end)==FALSE) break
# } # end loop over every day in the data set
#
#
#
#
# ### ~~~~~~~~~ 2. SHOW THE INTERACTIVE GRAPH OF DISTANCE TO SELECT APPROPRIATE DATES ~~~~~~~~~~~~~~~~ ###
# ## visually assess whether the threshold dates make sense
#
# mig_time<-interval(start=THRESH_start,end=THRESH_end)
# x<- x %>% mutate(MIG=if_else(Day %within% mig_time,"migrating","stationary")) %>%
# mutate(MIG=if_else(is.na(MIG),"stationary",MIG))
#
#
# distgraph<-ggplot(x) + geom_point(aes(x=DateTime, y=home_dist, col=MIG))
# ggplotly(distgraph)
#
#
#
# ### ~~~~~~~~~ 3. SHOW A MAP WITH MIGRATION LOCATIONS ~~~~~~~~~~~~~~~~ ###
# ## geographically assess whether the threshold dates make sense
#
# xmig<- x %>% filter(MIG=="migrating")
# xlim<-c(min(xmig$long)-3,max(xmig$long)+3)
# ylim<-c(min(xmig$lat)-3,max(xmig$lat)+3)
#
# if(dim(xmig)[1]>5){
# ggplot() + geom_polygon(data = basemap, aes(x=long, y = lat, group = group)) +
# coord_fixed(xlim = xlim, ylim = ylim, ratio = 1.3)+
# geom_path(data=x, aes(x=long, y=lat))+
# geom_point(data=xmig, aes(x=long, y=lat),col='darkred',size=1.2)
# }else{
# ggplot() + geom_polygon(data = basemap, aes(x=long, y = lat, group = group)) +
# coord_fixed(xlim = c(min(x$long)-3,max(x$long)+3), ylim = c(min(x$lat)-3,max(x$lat)+3), ratio = 1.3)+
# geom_path(data=x, aes(x=long, y=lat))
# }
#
#
#
# ### ~~~~~~~~~ 4. FILL IN START AND END DATE MANUALLY ~~~~~~~~~~~~~~~~ ###
# ## only need to adjust the dates that are wrong
# #fix(THRESH_calib)
#
# StartDate <- readStartDate()
# EndDate <- readEndDate()
#
# ### CAPTURE OUTPUT FOR CALIBRATION
# THRESH_calib<-data.frame('id.yr.season'=a) %>%
# mutate(start=if_else(is.null(StartDate),THRESH_start,StartDate)) %>%
# mutate(end=if_else(is.null(EndDate),THRESH_end,EndDate))
#
#
# ### ~~~~~~~~~ 5. SAVE DATA AND CLEAN UP ~~~~~~~~~~~~~~~~ ###
# mig_dates<-rbind(mig_dates,THRESH_calib)
# fwrite(mig_dates,"EGVU_migration_dates_manually_classified_PART2.csv")
# dev.off()
# rm(THRESH_end,THRESH_start,x,xmig,xlim,ylim,mig_time,distgraph,THRESH_calib)
# pause()
#
# print(sprintf("finished with migration journey %s",a))
#
# }} #closes the else loop for migrations and the animal loop
#
#
#
#
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ShroutFleissICC1k.R
\name{ShroutFleissICC1}
\alias{ShroutFleissICC1}
\alias{ShroutFleissICC11}
\alias{ShroutFleissICC1k}
\title{Shrout and Fleiss intra-class correlation functions}
\usage{
ShroutFleissICC1(dta, clustercol, cols)
}
\arguments{
\item{dta}{A data frame containing within-subject measures, one participant per line;}
\item{clustercol}{is the column index where cluster belonging are given;}
\item{cols}{A vector indicating the columns containing the measures.}
}
\value{
ICC the intra-class measure of association.
}
\description{
The functions ShroutFleissICC1, ShroutFleissICC11
and ShroutFleissICC1k computes the intra-class correlation ICC
for a given data frame containing repeated measures in columns cols
when the measures are in distinct clusters, identified in column clustercol.
See \insertCite{sf79}{superb}.
}
\examples{
# creates a small data frames with 4 subject's scores for 5 measures:
dta <- data.frame(cbind(
clus <- c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3),
col1 <- c(2, 4, 4, 6, 4, 5, 8, 8, 5, 8, 9, 9)
))
ShroutFleissICC1(dta, 1, 2)
# 0.434343434
ShroutFleissICC11(dta[, 1], dta[,2])
# 0.434343434
dta2 <- data.frame(cbind(
clus <- c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3),
col1 <- c(1, 3, 3, 5, 3, 4, 7, 7, 4, 7, 8, 8),
col1 <- c(2, 4, 4, 6, 4, 5, 8, 8, 5, 8, 9, 9),
col1 <- c(3, 5, 5, 7, 5, 6, 9, 9, 6, 9, 10, 10)
))
ShroutFleissICC1(dta2, 1, 2:4)
# 0.7543859649
ShroutFleissICC1k(dta2[, 1], dta2[,2:4])
# 0.7543859649
}
\references{
\insertAllCited{}
\insertAllCited{}
}
| /man/ShroutFleissICC1.Rd | no_license | humanfactors/superb | R | false | true | 1,651 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ShroutFleissICC1k.R
\name{ShroutFleissICC1}
\alias{ShroutFleissICC1}
\alias{ShroutFleissICC11}
\alias{ShroutFleissICC1k}
\title{Shrout and Fleiss intra-class correlation functions}
\usage{
ShroutFleissICC1(dta, clustercol, cols)
}
\arguments{
\item{dta}{A data frame containing within-subject measures, one participant per line;}
\item{clustercol}{is the column index where cluster belonging are given;}
\item{cols}{A vector indicating the columns containing the measures.}
}
\value{
ICC the intra-class measure of association.
}
\description{
The functions ShroutFleissICC1, ShroutFleissICC11
and ShroutFleissICC1k computes the intra-class correlation ICC
for a given data frame containing repeated measures in columns cols
when the measures are in distinct clusters, identified in column clustercol.
See \insertCite{sf79}{superb}.
}
\examples{
# creates a small data frames with 4 subject's scores for 5 measures:
dta <- data.frame(cbind(
clus <- c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3),
col1 <- c(2, 4, 4, 6, 4, 5, 8, 8, 5, 8, 9, 9)
))
ShroutFleissICC1(dta, 1, 2)
# 0.434343434
ShroutFleissICC11(dta[, 1], dta[,2])
# 0.434343434
dta2 <- data.frame(cbind(
clus <- c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3),
col1 <- c(1, 3, 3, 5, 3, 4, 7, 7, 4, 7, 8, 8),
col1 <- c(2, 4, 4, 6, 4, 5, 8, 8, 5, 8, 9, 9),
col1 <- c(3, 5, 5, 7, 5, 6, 9, 9, 6, 9, 10, 10)
))
ShroutFleissICC1(dta2, 1, 2:4)
# 0.7543859649
ShroutFleissICC1k(dta2[, 1], dta2[,2:4])
# 0.7543859649
}
\references{
\insertAllCited{}
\insertAllCited{}
}
|
# find a starting point to generate
# silluett:
# clv:
# clValid
# clusterSim
# indexS
# cosine similarity
data <- read.csv("data/classes.csv", header=TRUE)
cols <- dim(data)[2]
rows <- dim(data)[1]
train_data <- data[ 1:(0.75*rows), ]
test_data <- data[ (0.75*rows):rows, ]
k <- 4
clusters <- matrix(0, k, (cols-1) )
clusters_census <- matrix(0, k, 1)
colnames(clusters_census) <- c("Count")
total_dists <- 0
get_dist_cluster <- function(nclusters, row) {
dists <- t( apply( nclusters, 1, function(x) (x - row)^2 ) )
dists <- cbind( apply( dists, 1, sum ) )
dists <- cbind( apply( dists, 1, sqrt ) )
# assigned <- order(dists, decreasing=TRUE)[1]
assigned <- which.min(dists)
clusters_census[assigned,1] <<- clusters_census[assigned,1]+1
total_dists <<- total_dists+dists[assigned,1]
}
a <- t( apply( train_data[,2:cols], 1, function(x) get_dist_cluster(clusters, x) ) )
# max_changes <- (2^(cols-1))
max_changes <- 100000
total_dists_min <- total_dists
clusters_census_min <- clusters_census
for(i in 1:max_changes){
new_clusters <- matrix(sample(0:1,k*(cols-1), replace=TRUE),k,cols-1)
# print(new_clusters)
total_dists <<- 0
clusters_census <<- matrix(0, k, 1)
colnames(clusters_census) <- c("Count")
a <- t( apply( train_data[,2:cols], 1, function(x) get_dist_cluster(new_clusters, x) ) )
if(total_dists<total_dists_min){
clusters <<- new_clusters
clusters_census_min <<- clusters_census
total_dists_min <<- total_dists
print("Min")
}
print(paste("Step ", i, "/", max_changes, "... Dist: ", total_dists, sep=""))
}
print(clusters)
| /classes.kmeans.r | no_license | GustavoKatel/datamining-algorithms | R | false | false | 1,619 | r | # find a starting point to generate
# silluett:
# clv:
# clValid
# clusterSim
# indexS
# cosine similarity
data <- read.csv("data/classes.csv", header=TRUE)
cols <- dim(data)[2]
rows <- dim(data)[1]
train_data <- data[ 1:(0.75*rows), ]
test_data <- data[ (0.75*rows):rows, ]
k <- 4
clusters <- matrix(0, k, (cols-1) )
clusters_census <- matrix(0, k, 1)
colnames(clusters_census) <- c("Count")
total_dists <- 0
get_dist_cluster <- function(nclusters, row) {
dists <- t( apply( nclusters, 1, function(x) (x - row)^2 ) )
dists <- cbind( apply( dists, 1, sum ) )
dists <- cbind( apply( dists, 1, sqrt ) )
# assigned <- order(dists, decreasing=TRUE)[1]
assigned <- which.min(dists)
clusters_census[assigned,1] <<- clusters_census[assigned,1]+1
total_dists <<- total_dists+dists[assigned,1]
}
a <- t( apply( train_data[,2:cols], 1, function(x) get_dist_cluster(clusters, x) ) )
# max_changes <- (2^(cols-1))
max_changes <- 100000
total_dists_min <- total_dists
clusters_census_min <- clusters_census
for(i in 1:max_changes){
new_clusters <- matrix(sample(0:1,k*(cols-1), replace=TRUE),k,cols-1)
# print(new_clusters)
total_dists <<- 0
clusters_census <<- matrix(0, k, 1)
colnames(clusters_census) <- c("Count")
a <- t( apply( train_data[,2:cols], 1, function(x) get_dist_cluster(new_clusters, x) ) )
if(total_dists<total_dists_min){
clusters <<- new_clusters
clusters_census_min <<- clusters_census
total_dists_min <<- total_dists
print("Min")
}
print(paste("Step ", i, "/", max_changes, "... Dist: ", total_dists, sep=""))
}
print(clusters)
|
# Used in case we need to special-case packages what packages are cached
isCacheable <- function(package) {
TRUE
}
isUsingCache <- function(project) {
isTRUE(get_opts("use.cache", project = project))
}
installedDescLookup <- function(pkgName) {
system.file("DESCRIPTION", package = pkgName)
}
# We assume 'path' is the path to a DESCRIPTION file, or a data frame (the
# data frame data must have stringsAsFactors = FALSE).
#
# descLookup is a function that takes a single argument pkgName and must
# return one of: 1) a file path to DESCRIPTION file, 2) a data frame (with
# stringsAsFactors = FALSE) of the DESCRIPTION dcf data, or 3) NULL if
# the DESCRIPTION is not available. By default, installedDescLookup is
# used, which looks in the active lib paths for the desired DESCRIPTION
# files.
#
#' @importFrom tools md5sum
hash <- function(path, descLookup = installedDescLookup) {
if (!file.exists(path))
stop("No DESCRIPTION file at path '", path, "'!")
if (is.data.frame(path)) {
DESCRIPTION <- path
} else {
DESCRIPTION <- as.data.frame(readDcf(path), stringsAsFactors = FALSE)
}
pkgName <- DESCRIPTION[["Package"]]
# Remote SHA backwards compatible with cache v2: use 'GithubSHA1' if exists, otherwise all 'Remote' fields
remote_fields <- if ("GithubSHA1" %in% names(DESCRIPTION)) {
"GithubSHA1"
} else if (is.null(DESCRIPTION[["RemoteType"]]) || DESCRIPTION[["RemoteType"]] == "cran") {
# Packages installed with install.packages or locally without remotes
c()
} else {
# Mirror the order used by devtools when augmenting the DESCRIPTION.
c("RemoteType", "RemoteHost", "RemoteRepo", "RemoteUsername", "RemoteRef", "RemoteSha", "RemoteSubdir")
}
# Mirror the order of DESCRIPTION fields produced by `package.skeleton` and
# `devtools::create_description`.
fields <- c("Package", "Version", "Depends", "Imports", "Suggests", "LinkingTo", remote_fields)
# TODO: Do we want the 'Built' field used for hashing? The main problem with using that is
# it essentially makes packages installed from source un-recoverable, since they will get
# built transiently and installed (and so that field could never be replicated).
# Create a "sub" data frame with a consistently ordered set of columns.
#
# This ensures that package hashing is not sensitive to DESCRIPTION field
# order.
common <- intersect(fields, names(DESCRIPTION))
sub <- DESCRIPTION[common]
# Handle LinkingTo specially -- we need to discover what version of packages in LinkingTo
# were actually linked against in order to properly disambiguate e.g. httpuv 1.0 linked
# against Rcpp 0.11.2 and httpuv 1.0 linked against Rcpp 0.11.2.1
# TODO: It would really be best if, on installation, we recorded what version of LinkingTo
# packages were actually linked to, in case that package is not available in the library
# (or, even worse, is actually a different version!)
linkingToField <- unlist(strsplit(as.character(sub[["LinkingTo"]]), "\\s*,\\s*"))
linkingToPkgs <- gsub("\\s*\\(.*", "", linkingToField)
linkingToPkgs <- gsub("^\\s*(.*?)\\s*$", "\\1", linkingToPkgs, perl = TRUE)
linkingToHashes <- lapply(linkingToPkgs, function(x) {
linkingToDesc <- descLookup(x)
# If we return NULL
if (is.null(linkingToDesc))
return(NULL)
else if (is.character(linkingToDesc) && !file.exists(linkingToDesc))
return(NULL)
else
hash(linkingToDesc, descLookup = descLookup)
})
missingLinkingToPkgs <- linkingToPkgs[vapply(linkingToHashes, is.null, logical(1))]
if (length(missingLinkingToPkgs)) {
warning("The following packages specified in the LinkingTo field for package '",
pkgName,
"' are unavailable:\n- ",
paste(shQuote(missingLinkingToPkgs), collapse = ", "),
"\nThese packages are required to be installed when attempting to hash this package for caching.",
call. = FALSE)
}
linkingToHashes <- if (length(linkingToHashes))
paste(
collapse = "",
sort_c(unlist(dropNull(linkingToHashes)))
)
# Normalize for hashing and add in the linkingTo hashes as well
ready <- normalizeForHash(sub)
ready <- paste(ready, linkingToHashes)
tempfile <- tempfile()
cat(ready, file = tempfile)
result <- md5sum(tempfile)
unlink(tempfile)
if (is.na(result)) stop("Failed to hash file!")
unname(result)
}
normalizeForHash <- function(item) {
gsub("[[:space:]]", "", paste(unlist(item), collapse = ""))
}
isVerboseCache <- function() {
return(isTRUE(getOption("packrat.verbose.cache")))
}
# helper function to remove the package from its original location and
# create a symlink to the cached version.
symlinkPackageToCache <- function(packagePath, cachedPackagePath) {
packageName <- basename(packagePath)
backupPackagePath <- tempfile(tmpdir = dirname(packagePath))
if (!file.rename(packagePath, backupPackagePath)) {
stop("failed to back up package directory '", packagePath, "'; cannot safely link to cache.")
}
on.exit(unlink(backupPackagePath, recursive = TRUE), add = TRUE)
if (!symlink(cachedPackagePath, packagePath)) {
# symlink failed; attempt to restore the backup back to its original name.
if (!file.rename(backupPackagePath, packagePath)) {
stop("failed to restore package from '", backupPackagePath, "' to ",
"'", packagePath, "' after symlink to ",
"'", cachedPackagePath, "' failed; package may be lost")
}
stop("failed to create a symlink from '", packagePath, "' to '", cachedPackagePath, "'")
}
if (isVerboseCache()) {
message("Using cached ", packageName, ".")
}
return(cachedPackagePath)
}
# Given a path to an installed package (outside the packrat cache), move that
# package into the cache and replace the original directory with a symbolic
# link into the package cache.
#
# If the package already exists inside the cache, overwrite=TRUE causes
# replacement of the cached content while overwrite=FALSE with fatal=FALSE
# uses the cached package. Using overwrite=TRUE with fatal=TRUE will err.
moveInstalledPackageToCache <- function(packagePath,
hash,
overwrite = TRUE,
fatal = FALSE,
cacheDir = cacheLibDir())
{
ensureDirectory(cacheDir)
packageName <- basename(packagePath)
cachedPackagePath <- file.path(cacheDir, packageName, hash, packageName)
backupPackagePath <- tempfile(tmpdir = dirname(cachedPackagePath))
# check for existence of package in cache
if (file.exists(cachedPackagePath)) {
if (fatal && !overwrite) {
stop("cached package already exists at path '", cachedPackagePath, "'")
}
if (!fatal) {
return(symlinkPackageToCache(packagePath, cachedPackagePath))
}
}
# back up a pre-existing cached package (restore on failure)
if (file.exists(cachedPackagePath)) {
if (!file.rename(cachedPackagePath, backupPackagePath)) {
stop("failed to back up package '", packageName, "'; cannot safely copy to cache")
}
on.exit(unlink(backupPackagePath, recursive = TRUE), add = TRUE)
}
if (isVerboseCache()) {
message("Caching ", packageName, ".")
}
# attempt to rename to cache
if (suppressWarnings(file.rename(packagePath, cachedPackagePath))) {
return(symlinkPackageToCache(packagePath, cachedPackagePath))
}
# rename failed; copy to temporary destination in same directory
# and then attempt to rename from there
tempPath <- tempfile(tmpdir = dirname(cachedPackagePath))
on.exit(unlink(tempPath, recursive = TRUE), add = TRUE)
if (all(dir_copy(packagePath, tempPath))) {
# check to see if the cached package path exists now; if it does,
# assume that this was generated by another R process that successfully
# populated the cache
if (file.exists(cachedPackagePath)) {
return(symlinkPackageToCache(packagePath, cachedPackagePath))
}
# attempt to rename to target path
if (suppressWarnings(file.rename(tempPath, cachedPackagePath))) {
return(symlinkPackageToCache(packagePath, cachedPackagePath))
}
}
# failed to insert package into cache -- clean up and return error
if (!file.rename(backupPackagePath, cachedPackagePath)) {
stop("failed to restore package '", packageName, "' in cache; package may be lost from cache")
}
# return failure
stop("failed to copy package '", packageName, "' to cache")
}
# Pull out cached package information from the DESCRIPTION
cachedPackages <- function(cacheDir = cacheLibDir(), fields = NULL) {
pkgCachePaths <- list.files(cacheDir, full.names = TRUE)
pkgPaths <- setNames(lapply(pkgCachePaths, function(x) {
list.files(x, full.names = TRUE)
}), basename(pkgCachePaths))
lapply(seq_along(pkgPaths), function(i) {
pkgName <- names(pkgPaths)[[i]]
hashedPaths <- pkgPaths[[i]]
result <- setNames(lapply(hashedPaths, function(path) {
as.list(readDcf(file.path(path, pkgName, "DESCRIPTION"), all = TRUE))
}), pkgName)
if (!is.null(fields)) {
lapply(result, `[`, fields)
} else {
result
}
})
}
listCachedPackages <- cachedPackages
clearPackageCache <- function(cacheDir = cacheLibDir(), ask = TRUE) {
if (ask) {
message("The packrat cache directory was resolved to:\n- ",
shQuote(cacheDir))
msg <- "Are you sure you want to clear the packrat cache? [Y/n]: "
response <- readline(msg)
if (tolower(substring(response, 1, 1)) != "y") {
message("Operation aborted.")
return(invisible(NULL))
}
}
unlink(cacheDir, recursive = TRUE)
}
deletePackagesFromCache <- function(packages, cacheDir = cacheLibDir()) {
paths <- file.path(cacheDir, packages)
lapply(paths, function(path) {
unlink(path, recursive = TRUE)
})
}
| /packrat/src/packrat/packrat/R/cache.R | permissive | rachjone/iapsr | R | false | false | 9,912 | r | # Used in case we need to special-case packages what packages are cached
isCacheable <- function(package) {
TRUE
}
isUsingCache <- function(project) {
isTRUE(get_opts("use.cache", project = project))
}
installedDescLookup <- function(pkgName) {
system.file("DESCRIPTION", package = pkgName)
}
# We assume 'path' is the path to a DESCRIPTION file, or a data frame (the
# data frame data must have stringsAsFactors = FALSE).
#
# descLookup is a function that takes a single argument pkgName and must
# return one of: 1) a file path to DESCRIPTION file, 2) a data frame (with
# stringsAsFactors = FALSE) of the DESCRIPTION dcf data, or 3) NULL if
# the DESCRIPTION is not available. By default, installedDescLookup is
# used, which looks in the active lib paths for the desired DESCRIPTION
# files.
#
#' @importFrom tools md5sum
hash <- function(path, descLookup = installedDescLookup) {
if (!file.exists(path))
stop("No DESCRIPTION file at path '", path, "'!")
if (is.data.frame(path)) {
DESCRIPTION <- path
} else {
DESCRIPTION <- as.data.frame(readDcf(path), stringsAsFactors = FALSE)
}
pkgName <- DESCRIPTION[["Package"]]
# Remote SHA backwards compatible with cache v2: use 'GithubSHA1' if exists, otherwise all 'Remote' fields
remote_fields <- if ("GithubSHA1" %in% names(DESCRIPTION)) {
"GithubSHA1"
} else if (is.null(DESCRIPTION[["RemoteType"]]) || DESCRIPTION[["RemoteType"]] == "cran") {
# Packages installed with install.packages or locally without remotes
c()
} else {
# Mirror the order used by devtools when augmenting the DESCRIPTION.
c("RemoteType", "RemoteHost", "RemoteRepo", "RemoteUsername", "RemoteRef", "RemoteSha", "RemoteSubdir")
}
# Mirror the order of DESCRIPTION fields produced by `package.skeleton` and
# `devtools::create_description`.
fields <- c("Package", "Version", "Depends", "Imports", "Suggests", "LinkingTo", remote_fields)
# TODO: Do we want the 'Built' field used for hashing? The main problem with using that is
# it essentially makes packages installed from source un-recoverable, since they will get
# built transiently and installed (and so that field could never be replicated).
# Create a "sub" data frame with a consistently ordered set of columns.
#
# This ensures that package hashing is not sensitive to DESCRIPTION field
# order.
common <- intersect(fields, names(DESCRIPTION))
sub <- DESCRIPTION[common]
# Handle LinkingTo specially -- we need to discover what version of packages in LinkingTo
# were actually linked against in order to properly disambiguate e.g. httpuv 1.0 linked
# against Rcpp 0.11.2 and httpuv 1.0 linked against Rcpp 0.11.2.1
# TODO: It would really be best if, on installation, we recorded what version of LinkingTo
# packages were actually linked to, in case that package is not available in the library
# (or, even worse, is actually a different version!)
linkingToField <- unlist(strsplit(as.character(sub[["LinkingTo"]]), "\\s*,\\s*"))
linkingToPkgs <- gsub("\\s*\\(.*", "", linkingToField)
linkingToPkgs <- gsub("^\\s*(.*?)\\s*$", "\\1", linkingToPkgs, perl = TRUE)
linkingToHashes <- lapply(linkingToPkgs, function(x) {
linkingToDesc <- descLookup(x)
# If we return NULL
if (is.null(linkingToDesc))
return(NULL)
else if (is.character(linkingToDesc) && !file.exists(linkingToDesc))
return(NULL)
else
hash(linkingToDesc, descLookup = descLookup)
})
missingLinkingToPkgs <- linkingToPkgs[vapply(linkingToHashes, is.null, logical(1))]
if (length(missingLinkingToPkgs)) {
warning("The following packages specified in the LinkingTo field for package '",
pkgName,
"' are unavailable:\n- ",
paste(shQuote(missingLinkingToPkgs), collapse = ", "),
"\nThese packages are required to be installed when attempting to hash this package for caching.",
call. = FALSE)
}
linkingToHashes <- if (length(linkingToHashes))
paste(
collapse = "",
sort_c(unlist(dropNull(linkingToHashes)))
)
# Normalize for hashing and add in the linkingTo hashes as well
ready <- normalizeForHash(sub)
ready <- paste(ready, linkingToHashes)
tempfile <- tempfile()
cat(ready, file = tempfile)
result <- md5sum(tempfile)
unlink(tempfile)
if (is.na(result)) stop("Failed to hash file!")
unname(result)
}
normalizeForHash <- function(item) {
gsub("[[:space:]]", "", paste(unlist(item), collapse = ""))
}
isVerboseCache <- function() {
return(isTRUE(getOption("packrat.verbose.cache")))
}
# helper function to remove the package from its original location and
# create a symlink to the cached version.
symlinkPackageToCache <- function(packagePath, cachedPackagePath) {
packageName <- basename(packagePath)
backupPackagePath <- tempfile(tmpdir = dirname(packagePath))
if (!file.rename(packagePath, backupPackagePath)) {
stop("failed to back up package directory '", packagePath, "'; cannot safely link to cache.")
}
on.exit(unlink(backupPackagePath, recursive = TRUE), add = TRUE)
if (!symlink(cachedPackagePath, packagePath)) {
# symlink failed; attempt to restore the backup back to its original name.
if (!file.rename(backupPackagePath, packagePath)) {
stop("failed to restore package from '", backupPackagePath, "' to ",
"'", packagePath, "' after symlink to ",
"'", cachedPackagePath, "' failed; package may be lost")
}
stop("failed to create a symlink from '", packagePath, "' to '", cachedPackagePath, "'")
}
if (isVerboseCache()) {
message("Using cached ", packageName, ".")
}
return(cachedPackagePath)
}
# Given a path to an installed package (outside the packrat cache), move that
# package into the cache and replace the original directory with a symbolic
# link into the package cache.
#
# If the package already exists inside the cache, overwrite=TRUE causes
# replacement of the cached content while overwrite=FALSE with fatal=FALSE
# uses the cached package. Using overwrite=TRUE with fatal=TRUE will err.
moveInstalledPackageToCache <- function(packagePath,
hash,
overwrite = TRUE,
fatal = FALSE,
cacheDir = cacheLibDir())
{
ensureDirectory(cacheDir)
packageName <- basename(packagePath)
cachedPackagePath <- file.path(cacheDir, packageName, hash, packageName)
backupPackagePath <- tempfile(tmpdir = dirname(cachedPackagePath))
# check for existence of package in cache
if (file.exists(cachedPackagePath)) {
if (fatal && !overwrite) {
stop("cached package already exists at path '", cachedPackagePath, "'")
}
if (!fatal) {
return(symlinkPackageToCache(packagePath, cachedPackagePath))
}
}
# back up a pre-existing cached package (restore on failure)
if (file.exists(cachedPackagePath)) {
if (!file.rename(cachedPackagePath, backupPackagePath)) {
stop("failed to back up package '", packageName, "'; cannot safely copy to cache")
}
on.exit(unlink(backupPackagePath, recursive = TRUE), add = TRUE)
}
if (isVerboseCache()) {
message("Caching ", packageName, ".")
}
# attempt to rename to cache
if (suppressWarnings(file.rename(packagePath, cachedPackagePath))) {
return(symlinkPackageToCache(packagePath, cachedPackagePath))
}
# rename failed; copy to temporary destination in same directory
# and then attempt to rename from there
tempPath <- tempfile(tmpdir = dirname(cachedPackagePath))
on.exit(unlink(tempPath, recursive = TRUE), add = TRUE)
if (all(dir_copy(packagePath, tempPath))) {
# check to see if the cached package path exists now; if it does,
# assume that this was generated by another R process that successfully
# populated the cache
if (file.exists(cachedPackagePath)) {
return(symlinkPackageToCache(packagePath, cachedPackagePath))
}
# attempt to rename to target path
if (suppressWarnings(file.rename(tempPath, cachedPackagePath))) {
return(symlinkPackageToCache(packagePath, cachedPackagePath))
}
}
# failed to insert package into cache -- clean up and return error
if (!file.rename(backupPackagePath, cachedPackagePath)) {
stop("failed to restore package '", packageName, "' in cache; package may be lost from cache")
}
# return failure
stop("failed to copy package '", packageName, "' to cache")
}
# Pull out cached package information from the DESCRIPTION
cachedPackages <- function(cacheDir = cacheLibDir(), fields = NULL) {
pkgCachePaths <- list.files(cacheDir, full.names = TRUE)
pkgPaths <- setNames(lapply(pkgCachePaths, function(x) {
list.files(x, full.names = TRUE)
}), basename(pkgCachePaths))
lapply(seq_along(pkgPaths), function(i) {
pkgName <- names(pkgPaths)[[i]]
hashedPaths <- pkgPaths[[i]]
result <- setNames(lapply(hashedPaths, function(path) {
as.list(readDcf(file.path(path, pkgName, "DESCRIPTION"), all = TRUE))
}), pkgName)
if (!is.null(fields)) {
lapply(result, `[`, fields)
} else {
result
}
})
}
listCachedPackages <- cachedPackages
clearPackageCache <- function(cacheDir = cacheLibDir(), ask = TRUE) {
if (ask) {
message("The packrat cache directory was resolved to:\n- ",
shQuote(cacheDir))
msg <- "Are you sure you want to clear the packrat cache? [Y/n]: "
response <- readline(msg)
if (tolower(substring(response, 1, 1)) != "y") {
message("Operation aborted.")
return(invisible(NULL))
}
}
unlink(cacheDir, recursive = TRUE)
}
deletePackagesFromCache <- function(packages, cacheDir = cacheLibDir()) {
paths <- file.path(cacheDir, packages)
lapply(paths, function(path) {
unlink(path, recursive = TRUE)
})
}
|
# ---
# title: "Accuracy as number of samples grows"
# author: "James Browne"
# date: "May 16 2017"
#output: html_document
# ---
library(ggplot2)
leg <- theme(legend.text = element_text(size = 12), legend.title=element_blank(), plot.title = element_text(size = 16, face="bold"), plot.subtitle = element_text(size = 12),axis.title.x = element_text(size=12), axis.text.x = element_text(size=12), axis.title.y = element_text(size=12), axis.text.y = element_text(size=12))
mydata <- read.csv(file="bench.csv", header=FALSE, sep=",")
colnames(mydata) <- c("Dataset", "System", "Threads", "RelativeSpeed")
mydata[mydata$Dataset=="MNIST",4] <- mydata[mydata$Dataset=="MNIST",4]/mydata[mydata$Dataset=="MNIST" & mydata$System=="binnedBase" ,4]
mydata[mydata$Dataset=="higgs",4] <- mydata[mydata$Dataset=="higgs",4]/mydata[mydata$Dataset=="higgs" & mydata$System=="binnedBase" ,4]
mydata[mydata$Dataset=="p53",4] <- mydata[mydata$Dataset=="p53",4]/mydata[mydata$Dataset=="p53" & mydata$System=="binnedBase" ,4]
#cols <- c("Ideal"="#000000", "RerF"="#009E73", "XGBoost"="#E69F00", "Ranger"="#0072B2", "RF"="#CC79A7")
png(filename="benchRF.png")
p <- ggplot(mydata, aes(System, RelativeSpeed,color = System, fill=System))
p <- p + geom_bar(stat="identity",position=position_dodge())
p <- p +leg + labs(title="Training Times Single Core", x="", y="Relative Training Time", subtitle=paste(""))
p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
p <- p + facet_grid(Dataset ~ ., scales = "free_y")
print(p)
dev.off()
| /accSpeedTest/printResults.R | no_license | jbrowne6/fpExperiments | R | false | false | 1,569 | r | # ---
# title: "Accuracy as number of samples grows"
# author: "James Browne"
# date: "May 16 2017"
#output: html_document
# ---
library(ggplot2)
leg <- theme(legend.text = element_text(size = 12), legend.title=element_blank(), plot.title = element_text(size = 16, face="bold"), plot.subtitle = element_text(size = 12),axis.title.x = element_text(size=12), axis.text.x = element_text(size=12), axis.title.y = element_text(size=12), axis.text.y = element_text(size=12))
mydata <- read.csv(file="bench.csv", header=FALSE, sep=",")
colnames(mydata) <- c("Dataset", "System", "Threads", "RelativeSpeed")
mydata[mydata$Dataset=="MNIST",4] <- mydata[mydata$Dataset=="MNIST",4]/mydata[mydata$Dataset=="MNIST" & mydata$System=="binnedBase" ,4]
mydata[mydata$Dataset=="higgs",4] <- mydata[mydata$Dataset=="higgs",4]/mydata[mydata$Dataset=="higgs" & mydata$System=="binnedBase" ,4]
mydata[mydata$Dataset=="p53",4] <- mydata[mydata$Dataset=="p53",4]/mydata[mydata$Dataset=="p53" & mydata$System=="binnedBase" ,4]
#cols <- c("Ideal"="#000000", "RerF"="#009E73", "XGBoost"="#E69F00", "Ranger"="#0072B2", "RF"="#CC79A7")
png(filename="benchRF.png")
p <- ggplot(mydata, aes(System, RelativeSpeed,color = System, fill=System))
p <- p + geom_bar(stat="identity",position=position_dodge())
p <- p +leg + labs(title="Training Times Single Core", x="", y="Relative Training Time", subtitle=paste(""))
p <- p + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())
p <- p + facet_grid(Dataset ~ ., scales = "free_y")
print(p)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BinaryS3.R
\name{is.binary}
\alias{is.binary}
\title{is Binary Vector}
\usage{
is.binary(x)
}
\arguments{
\item{x}{object to test.}
}
\value{
TRUE or FALSE.
}
\description{
test for object "binary".
}
\seealso{
\link{as.binary} and \link{binary}
}
| /man/is.binary.Rd | no_license | tanho63/binaryLogic | R | false | true | 327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BinaryS3.R
\name{is.binary}
\alias{is.binary}
\title{is Binary Vector}
\usage{
is.binary(x)
}
\arguments{
\item{x}{object to test.}
}
\value{
TRUE or FALSE.
}
\description{
test for object "binary".
}
\seealso{
\link{as.binary} and \link{binary}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimDesign.R
\docType{package}
\name{SimDesign}
\alias{SimDesign}
\title{Structure for Organizing Monte Carlo Simulation Designs}
\description{
Structure for Organizing Monte Carlo Simulation Designs
}
\details{
Provides tools to help organize Monte Carlo simulations in R. The package
controls the structure and back-end of Monte Carlo simulations
by utilizing a general generate-analyse-summarise strategy. The functions provided control common
simulation issues such as re-simulating non-convergent results, support parallel
back-end and MPI distributed computations, save and restore temporary files,
aggregate results across independent nodes, and provide native support for debugging.
The primary function for organizing the simulations is \code{\link{runSimulation}}.
For a didactic presentation of the package refer to Sigal and Chalmers
(2016; \doi{10.1080/10691898.2016.1246953}), and see the associated
wiki on Github (\url{https://github.com/philchalmers/SimDesign/wiki})
for other tutorial material, examples, and applications of \code{SimDesign} to real-world simulations.
}
\references{
Sigal, M. J., & Chalmers, R. P. (2016). Play it again: Teaching statistics with Monte
Carlo simulation. \code{Journal of Statistics Education, 24}(3), 136-156.
\doi{10.1080/10691898.2016.1246953}
}
\author{
Phil Chalmers \email{rphilip.chalmers@gmail.com}
}
\keyword{package}
| /man/SimDesign.Rd | no_license | GloriaColmenares/SimDesign | R | false | true | 1,455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimDesign.R
\docType{package}
\name{SimDesign}
\alias{SimDesign}
\title{Structure for Organizing Monte Carlo Simulation Designs}
\description{
Structure for Organizing Monte Carlo Simulation Designs
}
\details{
Provides tools to help organize Monte Carlo simulations in R. The package
controls the structure and back-end of Monte Carlo simulations
by utilizing a general generate-analyse-summarise strategy. The functions provided control common
simulation issues such as re-simulating non-convergent results, support parallel
back-end and MPI distributed computations, save and restore temporary files,
aggregate results across independent nodes, and provide native support for debugging.
The primary function for organizing the simulations is \code{\link{runSimulation}}.
For a didactic presentation of the package refer to Sigal and Chalmers
(2016; \doi{10.1080/10691898.2016.1246953}), and see the associated
wiki on Github (\url{https://github.com/philchalmers/SimDesign/wiki})
for other tutorial material, examples, and applications of \code{SimDesign} to real-world simulations.
}
\references{
Sigal, M. J., & Chalmers, R. P. (2016). Play it again: Teaching statistics with Monte
Carlo simulation. \code{Journal of Statistics Education, 24}(3), 136-156.
\doi{10.1080/10691898.2016.1246953}
}
\author{
Phil Chalmers \email{rphilip.chalmers@gmail.com}
}
\keyword{package}
|
test <- function(){success('Excellent! Machine learning models trained with 10-fold cross-validation repeated 5 times often have optimal performance.')}
| /exercises/test_03_12_2.R | permissive | flor14/supervised-ML-case-studies-course-1 | R | false | false | 156 | r | test <- function(){success('Excellent! Machine learning models trained with 10-fold cross-validation repeated 5 times often have optimal performance.')}
|
library(tidyverse)
library(outliers)
library(ggplot2)
library(cowplot)
library(magrittr)
library(multipanelfigure)
library(stats)
library(reshape)
# KPIS_Processes <- list(ALL, ALL_totals, ALL_avg, ALL_perc, ALL_quarterly_totals, ALL_quarterly_avg, ALL_quarterly_perc,
# ALL_monthly_totals, ALL_monthly_avg, ALL_monthly_perc, ALL_weekly_avg, ALL_weekly_perc)
#
# names(KPIS_Processes) <- c("ALL", "ALL_totals", "ALL_avg", "ALL_perc", "ALL_quarterly_totals", "ALL_quarterly_avg", "ALL_quarterly_perc",
# "ALL_monthly_totals", "ALL_monthly_avg", "ALL_monthly_perc", "ALL_weekly_avg", "ALL_weekly_perc")
#
# rm(ALL, ALL_totals, ALL_avg, ALL_perc, ALL_quarterly_totals, ALL_quarterly_avg, ALL_quarterly_perc,
# ALL_monthly_totals, ALL_monthly_avg, ALL_monthly_perc, ALL_weekly_avg, ALL_weekly_perc)
## rough work
y <- procs[grepl("2018", procs$Year) & grepl("3", procs$Quarter) & grepl("QA", procs$Team),] %>% group_by(Project) %>%
summarise(x = length(unique(AUTHOR)))
unique(y$AUTHOR)
table(x$Project, x$AUTHOR)
x <- x %>% group_by(Project, Quarter) %>% summarise(x = sum(TIMESPENT))
mean(x$x)
# this is df of all developers time logged
x <- procs[grepl("Dev", procs$Team),]
x <- x[!grepl("VD|TD|EXP", x$Project),]
## run the following 4 lines to get avg. developers per Live application
x <- procs[grepl("Dev", procs$Team),]
x <- x[!grepl("VD|TD|EXP|OP|Inter|SP", x$Project),]
x$Project[grepl("AV", x$Project)] <- "Availability"
x$Project[grepl("PT", x$Project)] <- "Payroll"
x$Project[grepl("RT", x$Project)] <- "Roster"
## total employess per module per year----------------
dev_year <- x %>%
group_by(Project, Year) %>%
summarise("Total Devs" = length(unique(AUTHOR))) %>%
group_by(Project, Year) %>%
summarise(avg_dev = round(mean(`Total Devs`), digits =0))
dev_year <- cast(dev_year, Year ~ Project)
## avg dev. per module per week----------------
dev_week <- x %>%
group_by(Project, Year, Week) %>%
summarise("Total Devs" = length(unique(AUTHOR))) %>%
group_by(Project, Year) %>%
summarise(avg_dev = round(mean(`Total Devs`), digits = 0))
dev_week <- cast(dev_week, Year ~ Project)
## avg dev. per module per month ----------------
dev_month <- x %>%
group_by(Project, Year, Month) %>%
summarise("Total Devs" = length(unique(AUTHOR))) %>%
group_by(Project, Year) %>%
summarise(avg_dev = round(mean(`Total Devs`), digits = 0))
dev_month <- cast(dev_month, Year ~ Project)
## avg dev. per module per quarter ----------------
dev_quarter <- x %>%
group_by(Project, Year, Quarter) %>%
summarise("Total Devs" = length(unique(AUTHOR))) %>%
group_by(Project, Year) %>%
summarise(avg_dev = round(mean(`Total Devs`), digits = 0))
dev_quarter <- cast(dev_quarter, Year ~ Project)
## list of each dev. dataframe
dev_all_apps <- list("Totals dev per year" = dev_year,"avg. dev per quarter" = dev_quarter, "avg. dev per month" = dev_month,
"avg.dev per week" = dev_week)
dev_live_apps <- list( "Totals dev per year" = dev_year,"avg. dev per quarter" = dev_quarter, "avg. dev per month" = dev_month,
"avg.dev per week" = dev_week)
rm(dev_month, dev_quarter, dev_week, dev_year)
# Devs per Quarter: total devs per quarter, yer and project ---------------
#dev & qa per quarter
dev_quarter <- function(a){
x <- procs[grepl(paste(a), procs$Team) & !grepl("VD|TD|EXP", procs$Project),]
dev_quarter <- x %>%
group_by(Year, Quarter, Project,Team) %>%
summarise("Total" = length(unique(AUTHOR)))
x <- split(dev_quarter, dev_quarter$Year)
for(e in names(x)){
x[[e]] <- cast(x[[e]], Year + Quarter ~ Project)
}
assign(paste(a), x, envir = .GlobalEnv)
}
#dev & qa per ticket type per quarter
team_total <- function (a, b){
x <- procs[grepl(paste(a), procs$Team) & !grepl("VD|TD|EXP", procs$Project),]
dev_quarter <- x %>%
group_by(Year, Quarter, Team) %>%
summarise("Total" = length(unique(AUTHOR)))
dev_quarter <- cast(dev_quarter, Quarter ~ Year)
assign(paste(b), dev_quarter, envir = .GlobalEnv)
write.csv(dev_quarter, paste(b, ".csv"))
}
#excel function
library(xlsx)
wb <- createWorkbook()
sheetone <- createSheet(wb,"Dev")
sheet1 <- createSheet(wb,"QA")
currRow <- 1
xl_sheet <- function(a, p){
for(i in 1:length(a)){
cs <- CellStyle(wb) + Font(wb, isBold=TRUE) + Border(position=c("BOTTOM", "LEFT",
"TOP", "RIGHT"))
addDataFrame(a[[i]],
sheet= sheetone,
startRow=currRow,
row.names=FALSE,
colnamesStyle = cs, rownamesStyle = cs, colStyle=cs)
currRow <- currRow + nrow(a[[i]]) + 4
}
currRow <- 1
for(i in 1:length(p)){
cs <- CellStyle(wb) + Font(wb, isBold=TRUE) + Border(position=c("BOTTOM", "LEFT",
"TOP", "RIGHT"))
addDataFrame(p[[i]],
sheet= sheet1,
startRow=currRow,
row.names=FALSE,
colnamesStyle=cs)
currRow <- currRow + nrow(p[[i]]) + 4
}
}
##number of QA and Devs that logged time each project for each quarter of each year
dev_quarter(a = "Dev")
dev_quarter(a = "QA")
##number of QA and Devs that logged time for each quarter of each year
team_total(a = "Dev", b = "Dev_count")
team_total(a = "QA", b = "QA_count")
QA <-c(QA_count = list(QA_count), QA)
Dev <- c(Dev_count = list(Dev_count),Dev)
xl_sheet(a = Dev, p = QA)
saveWorkbook(wb, file = "Dev_QA_quarterly.xlsx")
| /dev_perticket.R | no_license | RichardCurran/R-scripts-Jira-tickets | R | false | false | 5,659 | r | library(tidyverse)
library(outliers)
library(ggplot2)
library(cowplot)
library(magrittr)
library(multipanelfigure)
library(stats)
library(reshape)
# KPIS_Processes <- list(ALL, ALL_totals, ALL_avg, ALL_perc, ALL_quarterly_totals, ALL_quarterly_avg, ALL_quarterly_perc,
# ALL_monthly_totals, ALL_monthly_avg, ALL_monthly_perc, ALL_weekly_avg, ALL_weekly_perc)
#
# names(KPIS_Processes) <- c("ALL", "ALL_totals", "ALL_avg", "ALL_perc", "ALL_quarterly_totals", "ALL_quarterly_avg", "ALL_quarterly_perc",
# "ALL_monthly_totals", "ALL_monthly_avg", "ALL_monthly_perc", "ALL_weekly_avg", "ALL_weekly_perc")
#
# rm(ALL, ALL_totals, ALL_avg, ALL_perc, ALL_quarterly_totals, ALL_quarterly_avg, ALL_quarterly_perc,
# ALL_monthly_totals, ALL_monthly_avg, ALL_monthly_perc, ALL_weekly_avg, ALL_weekly_perc)
## rough work
y <- procs[grepl("2018", procs$Year) & grepl("3", procs$Quarter) & grepl("QA", procs$Team),] %>% group_by(Project) %>%
summarise(x = length(unique(AUTHOR)))
unique(y$AUTHOR)
table(x$Project, x$AUTHOR)
x <- x %>% group_by(Project, Quarter) %>% summarise(x = sum(TIMESPENT))
mean(x$x)
# this is df of all developers time logged
x <- procs[grepl("Dev", procs$Team),]
x <- x[!grepl("VD|TD|EXP", x$Project),]
## run the following 4 lines to get avg. developers per Live application
x <- procs[grepl("Dev", procs$Team),]
x <- x[!grepl("VD|TD|EXP|OP|Inter|SP", x$Project),]
x$Project[grepl("AV", x$Project)] <- "Availability"
x$Project[grepl("PT", x$Project)] <- "Payroll"
x$Project[grepl("RT", x$Project)] <- "Roster"
## total employess per module per year----------------
dev_year <- x %>%
group_by(Project, Year) %>%
summarise("Total Devs" = length(unique(AUTHOR))) %>%
group_by(Project, Year) %>%
summarise(avg_dev = round(mean(`Total Devs`), digits =0))
dev_year <- cast(dev_year, Year ~ Project)
## avg dev. per module per week----------------
dev_week <- x %>%
group_by(Project, Year, Week) %>%
summarise("Total Devs" = length(unique(AUTHOR))) %>%
group_by(Project, Year) %>%
summarise(avg_dev = round(mean(`Total Devs`), digits = 0))
dev_week <- cast(dev_week, Year ~ Project)
## avg dev. per module per month ----------------
dev_month <- x %>%
group_by(Project, Year, Month) %>%
summarise("Total Devs" = length(unique(AUTHOR))) %>%
group_by(Project, Year) %>%
summarise(avg_dev = round(mean(`Total Devs`), digits = 0))
dev_month <- cast(dev_month, Year ~ Project)
## avg dev. per module per quarter ----------------
dev_quarter <- x %>%
group_by(Project, Year, Quarter) %>%
summarise("Total Devs" = length(unique(AUTHOR))) %>%
group_by(Project, Year) %>%
summarise(avg_dev = round(mean(`Total Devs`), digits = 0))
dev_quarter <- cast(dev_quarter, Year ~ Project)
## list of each dev. dataframe
dev_all_apps <- list("Totals dev per year" = dev_year,"avg. dev per quarter" = dev_quarter, "avg. dev per month" = dev_month,
"avg.dev per week" = dev_week)
dev_live_apps <- list( "Totals dev per year" = dev_year,"avg. dev per quarter" = dev_quarter, "avg. dev per month" = dev_month,
"avg.dev per week" = dev_week)
rm(dev_month, dev_quarter, dev_week, dev_year)
# Devs per Quarter: total devs per quarter, yer and project ---------------
#dev & qa per quarter
dev_quarter <- function(a){
x <- procs[grepl(paste(a), procs$Team) & !grepl("VD|TD|EXP", procs$Project),]
dev_quarter <- x %>%
group_by(Year, Quarter, Project,Team) %>%
summarise("Total" = length(unique(AUTHOR)))
x <- split(dev_quarter, dev_quarter$Year)
for(e in names(x)){
x[[e]] <- cast(x[[e]], Year + Quarter ~ Project)
}
assign(paste(a), x, envir = .GlobalEnv)
}
#dev & qa per ticket type per quarter
team_total <- function (a, b){
x <- procs[grepl(paste(a), procs$Team) & !grepl("VD|TD|EXP", procs$Project),]
dev_quarter <- x %>%
group_by(Year, Quarter, Team) %>%
summarise("Total" = length(unique(AUTHOR)))
dev_quarter <- cast(dev_quarter, Quarter ~ Year)
assign(paste(b), dev_quarter, envir = .GlobalEnv)
write.csv(dev_quarter, paste(b, ".csv"))
}
#excel function
library(xlsx)
wb <- createWorkbook()
sheetone <- createSheet(wb,"Dev")
sheet1 <- createSheet(wb,"QA")
currRow <- 1
xl_sheet <- function(a, p){
for(i in 1:length(a)){
cs <- CellStyle(wb) + Font(wb, isBold=TRUE) + Border(position=c("BOTTOM", "LEFT",
"TOP", "RIGHT"))
addDataFrame(a[[i]],
sheet= sheetone,
startRow=currRow,
row.names=FALSE,
colnamesStyle = cs, rownamesStyle = cs, colStyle=cs)
currRow <- currRow + nrow(a[[i]]) + 4
}
currRow <- 1
for(i in 1:length(p)){
cs <- CellStyle(wb) + Font(wb, isBold=TRUE) + Border(position=c("BOTTOM", "LEFT",
"TOP", "RIGHT"))
addDataFrame(p[[i]],
sheet= sheet1,
startRow=currRow,
row.names=FALSE,
colnamesStyle=cs)
currRow <- currRow + nrow(p[[i]]) + 4
}
}
##number of QA and Devs that logged time each project for each quarter of each year
dev_quarter(a = "Dev")
dev_quarter(a = "QA")
##number of QA and Devs that logged time for each quarter of each year
team_total(a = "Dev", b = "Dev_count")
team_total(a = "QA", b = "QA_count")
QA <-c(QA_count = list(QA_count), QA)
Dev <- c(Dev_count = list(Dev_count),Dev)
xl_sheet(a = Dev, p = QA)
saveWorkbook(wb, file = "Dev_QA_quarterly.xlsx")
|
# Q4: # Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
# Load data
#################################################################################
NEI <- readRDS('../data/exdata-data-NEI_data/summarySCC_PM25.rds')
SCC <- readRDS('../data/exdata-data-NEI_data/Source_Classification_Code.rds')
# Generate plot4.png
#################################################################################
# According to the code book: http://www3.epa.gov/ttn/chief/net/2008neiv3/2008_neiv3_tsd_draft.pdf
# SCC.Level.Three with a keyword of "Coal" would be coal combustion-related record
coal <- NEI[NEI$SCC %in% SCC$SCC[grep("Coal", SCC$SCC.Level.Three)], ]
toPlot4 <- with(coal, tapply(Emissions, year, sum))
toPlot4
png("plot4.png")
plot(x = names(toPlot4), y = toPlot4, type = 'l', main = "Total emission of Coal related source", xlab = "Year", ylab = "Total Emission in tons")
dev.off()
| /plot4.R | permissive | cniedotus/ExData_Plotting2 | R | false | false | 948 | r | # Q4: # Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
# Load data
#################################################################################
NEI <- readRDS('../data/exdata-data-NEI_data/summarySCC_PM25.rds')
SCC <- readRDS('../data/exdata-data-NEI_data/Source_Classification_Code.rds')
# Generate plot4.png
#################################################################################
# According to the code book: http://www3.epa.gov/ttn/chief/net/2008neiv3/2008_neiv3_tsd_draft.pdf
# SCC.Level.Three with a keyword of "Coal" would be coal combustion-related record
coal <- NEI[NEI$SCC %in% SCC$SCC[grep("Coal", SCC$SCC.Level.Three)], ]
toPlot4 <- with(coal, tapply(Emissions, year, sum))
toPlot4
png("plot4.png")
plot(x = names(toPlot4), y = toPlot4, type = 'l', main = "Total emission of Coal related source", xlab = "Year", ylab = "Total Emission in tons")
dev.off()
|
# this code was used to check whether the distribnution of MHL would change for WGBS dataset with the regions of RRBS
bedwithgap<-function(bed,gap){
bed<-as.matrix(bed)
bed[,2]=as.numeric(bed[,2])-gap
bed[,3]=as.numeric(bed[,3])+gap
bed<-data.frame(bed)
bed
}
Rbedtools<-function(functionstring="intersectBed",bed1,bed2,opt.string=""){
#create temp files
a.file=tempfile()
b.file=tempfile()
out =tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bed formatted dataframes to tempfile
write.table(bed1,file=a.file,quote=F,sep="\t",col.names=F,row.names=F)
write.table(bed2,file=b.file,quote=F,sep="\t",col.names=F,row.names=F)
# create the command string and call the command using system()
command=paste(functionstring,"-a",a.file,"-b",b.file,opt.string,">",out,sep=" ")
cat(command,"\n")
try(system(command))
res=read.table(out,header=F)
unlink(a.file);unlink(b.file);unlink(out)
return(res)
}
cor2bed<-function(cor){
a<-unlist(lapply(strsplit(as.character(cor),split=c(":")),function(x) strsplit(x,"-")))
bed<-matrix(a,ncol=3,byrow=T)
return(data.frame(bed))
}
bed2cor<-function(bed){
cor<-apply(bed,1,function(x){paste(unlist(strsplit(x,"\t"))[1],":",unlist(strsplit(x,"\t"))[2],"-",unlist(strsplit(x,"\t"))[3],sep="")})
return(cor)
}
setwd("/home/shg047/monod/phase2")
file1<-read.table("WGBS_methHap_load_matrix_July2015.txt",head=T,sep="\t",row.names=1,as.is=T,check.names=F)
file2<-read.table("RRBS_methHap_load_matrix_July2015.txt",head=T,sep="\t",row.names=1,as.is=T,check.names=F)
bed1<-cor2bed(rownames(file1))
bed2<-cor2bed(rownames(file2))
bed<-Rbedtools(functionstring="intersectBed",bed1,bed2,opt.string="-wa -u")
cor1<-gsub("[ ]","",bed2cor(bed))
file1<-file1[match(cor1,rownames(file1)),]
colnames(file1)
colnames(file1)<-gsub("_","-",colnames(file1))
samplename1=sapply(strsplit(colnames(file1),"[.]"),function(x) unlist(x)[1])
samplename2=sapply(strsplit(samplename1,"_"),function(x) unlist(x)[1])
cor1<-match(samplename2,new[,3])
lab1<-new[cor1,4]
groupname=lab1
matrix=file1
samplename2<-gsub("6-P","CC-P",samplename2)
samplename2<-gsub("7-P","LC-P",samplename2)
samplename2<-gsub("6-T","CC-T",samplename2)
samplename2<-gsub("7-T","LC-T",samplename2)
samplename2<-gsub("frozen","Frozen",samplename2)
samplename2<-gsub("-100ng","",samplename2)
samplename2<-gsub("-5ng","",samplename2)
samplename2<-gsub("CTT","CC-T",samplename2)
colnames(matrix)=samplename2
matrix<-matrix[,-c(11,12)]
d <- dist(t(matrix)) # distance matrix
fit <- hclust(d, method="complete") # distance matrix
setwd("C:\\Users\\User\\Dropbox\\Project\\methylation\\monod")
load("WGBS.RRBS.RData")
par(mar=c(2,7,1,1))
boxplot(matrix[,1:ncol(matrix)],outline=F,horizontal=T,notch=T,las=1,cex.axis=0.65)
| /monod/analysis/code/WGBS.RRBS.ShareRegion.Distrubntion.R | no_license | Shicheng-Guo/methylation2020 | R | false | false | 2,870 | r | # this code was used to check whether the distribnution of MHL would change for WGBS dataset with the regions of RRBS
bedwithgap<-function(bed,gap){
bed<-as.matrix(bed)
bed[,2]=as.numeric(bed[,2])-gap
bed[,3]=as.numeric(bed[,3])+gap
bed<-data.frame(bed)
bed
}
Rbedtools<-function(functionstring="intersectBed",bed1,bed2,opt.string=""){
#create temp files
a.file=tempfile()
b.file=tempfile()
out =tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bed formatted dataframes to tempfile
write.table(bed1,file=a.file,quote=F,sep="\t",col.names=F,row.names=F)
write.table(bed2,file=b.file,quote=F,sep="\t",col.names=F,row.names=F)
# create the command string and call the command using system()
command=paste(functionstring,"-a",a.file,"-b",b.file,opt.string,">",out,sep=" ")
cat(command,"\n")
try(system(command))
res=read.table(out,header=F)
unlink(a.file);unlink(b.file);unlink(out)
return(res)
}
cor2bed<-function(cor){
a<-unlist(lapply(strsplit(as.character(cor),split=c(":")),function(x) strsplit(x,"-")))
bed<-matrix(a,ncol=3,byrow=T)
return(data.frame(bed))
}
bed2cor<-function(bed){
cor<-apply(bed,1,function(x){paste(unlist(strsplit(x,"\t"))[1],":",unlist(strsplit(x,"\t"))[2],"-",unlist(strsplit(x,"\t"))[3],sep="")})
return(cor)
}
setwd("/home/shg047/monod/phase2")
file1<-read.table("WGBS_methHap_load_matrix_July2015.txt",head=T,sep="\t",row.names=1,as.is=T,check.names=F)
file2<-read.table("RRBS_methHap_load_matrix_July2015.txt",head=T,sep="\t",row.names=1,as.is=T,check.names=F)
bed1<-cor2bed(rownames(file1))
bed2<-cor2bed(rownames(file2))
bed<-Rbedtools(functionstring="intersectBed",bed1,bed2,opt.string="-wa -u")
cor1<-gsub("[ ]","",bed2cor(bed))
file1<-file1[match(cor1,rownames(file1)),]
colnames(file1)
colnames(file1)<-gsub("_","-",colnames(file1))
samplename1=sapply(strsplit(colnames(file1),"[.]"),function(x) unlist(x)[1])
samplename2=sapply(strsplit(samplename1,"_"),function(x) unlist(x)[1])
cor1<-match(samplename2,new[,3])
lab1<-new[cor1,4]
groupname=lab1
matrix=file1
samplename2<-gsub("6-P","CC-P",samplename2)
samplename2<-gsub("7-P","LC-P",samplename2)
samplename2<-gsub("6-T","CC-T",samplename2)
samplename2<-gsub("7-T","LC-T",samplename2)
samplename2<-gsub("frozen","Frozen",samplename2)
samplename2<-gsub("-100ng","",samplename2)
samplename2<-gsub("-5ng","",samplename2)
samplename2<-gsub("CTT","CC-T",samplename2)
colnames(matrix)=samplename2
matrix<-matrix[,-c(11,12)]
d <- dist(t(matrix)) # distance matrix
fit <- hclust(d, method="complete") # distance matrix
setwd("C:\\Users\\User\\Dropbox\\Project\\methylation\\monod")
load("WGBS.RRBS.RData")
par(mar=c(2,7,1,1))
boxplot(matrix[,1:ncol(matrix)],outline=F,horizontal=T,notch=T,las=1,cex.axis=0.65)
|
#### Data collection and Creation of Variables ####
dataFile <- "household_power_consumption.txt"
power <- read.table(dataFile, header=TRUE, sep=";")
power$Date <- as.Date(power$Date, format="%d/%m/%Y")
df <- power[(power$Date=="2007-02-01") | (power$Date=="2007-02-02"),]
df$Global_active_power <- as.numeric(as.character(df$Global_active_power))
### PLOT 1 CODE ###
plot1 <- function() {
hist(df$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
cat("Plot1.png has been saved in", getwd())
}
plot1()
| /plot1.R | no_license | r1n0sh/ExData_Plotting1 | R | false | false | 660 | r | #### Data collection and Creation of Variables ####
dataFile <- "household_power_consumption.txt"
power <- read.table(dataFile, header=TRUE, sep=";")
power$Date <- as.Date(power$Date, format="%d/%m/%Y")
df <- power[(power$Date=="2007-02-01") | (power$Date=="2007-02-02"),]
df$Global_active_power <- as.numeric(as.character(df$Global_active_power))
### PLOT 1 CODE ###
plot1 <- function() {
hist(df$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
cat("Plot1.png has been saved in", getwd())
}
plot1()
|
library(ggmap)
data1 <- read.csv("/home/eduardo/Doutorado/vagas.csv",
header=FALSE, sep = ";",
colClasses= c("character","character","numeric","numeric"),
col.names=c("time","event","lat","long"))
tartu_map <- get_map(location = "são paulo", maptype = "roadmap", zoom = 12)
png('mapa_calor.png', width = 600, height = 600)
ggmap(tartu_map, size = c(600, 600)) +
geom_density2d(data = data1,
aes(x = long, y = lat), size = 0.3) + stat_density2d(data = data1,
aes(x = long, y = lat, fill = ..level.., alpha = ..level..), size = 0.01,
bins = 16, geom = "polygon") + scale_fill_gradient(low = "green", high = "red") +
scale_alpha(range = c(0, 0.3), guide = FALSE)
dev.off()
| /simulator_analyses/platform/estacionamento.R | no_license | ezambomsantana/doutorado | R | false | false | 898 | r | library(ggmap)
data1 <- read.csv("/home/eduardo/Doutorado/vagas.csv",
header=FALSE, sep = ";",
colClasses= c("character","character","numeric","numeric"),
col.names=c("time","event","lat","long"))
tartu_map <- get_map(location = "são paulo", maptype = "roadmap", zoom = 12)
png('mapa_calor.png', width = 600, height = 600)
ggmap(tartu_map, size = c(600, 600)) +
geom_density2d(data = data1,
aes(x = long, y = lat), size = 0.3) + stat_density2d(data = data1,
aes(x = long, y = lat, fill = ..level.., alpha = ..level..), size = 0.01,
bins = 16, geom = "polygon") + scale_fill_gradient(low = "green", high = "red") +
scale_alpha(range = c(0, 0.3), guide = FALSE)
dev.off()
|
### --- Test Setup --- ###
if(TRUE) {
## Not really needed, but can be handy
## when writing tests
library("RUnit")
library("finmix")
}
".setUp.y" <- function()
{
## Get path ##
pkg <- "finmix"
if (Sys.getenv("RCMDCHECK") == FALSE) {
data.path <- file.path(getwd(), "..",
"data", "poisson.data.csv")
} else {
data.path <- system.file(package = pkg,
'data/poisson.data.csv')
}
read.csv(data.path, header = FALSE, sep = ",")
}
".setUp.S" <- function()
{
if (Sys.getenv("RCMDCHECK") == FALSE) {
ind.path <- file.path(getwd(), "..",
"data",
"poisson.ind.csv")
} else {
ind.path <- system.file(package = pkg,
'data/poisson.ind.csv')
}
read.csv(ind.path, header = FALSE, sep = ",")
}
## Start testing ##
"test.fdata" <- function()
{
## Default ##
fdata.obj <- fdata()
checkTrue(all(is.na(fdata.obj@y)), "check1")
checkTrue(all(is.na(fdata.obj@S)), "check2")
checkTrue(all(is.na(fdata.obj@exp)),"check3")
checkTrue(all(is.na(fdata.obj@T)), "check4")
checkTrue(fdata.obj@bycolumn, "check5")
checkTrue(!fdata.obj@sim, "check6")
checkEquals(fdata.obj@N, 1)
checkEquals(fdata.obj@r, 1)
checkEquals(fdata.obj@type, "discrete")
checkEquals(length(fdata.obj@name), 0)
}
"test.fdata.check.y" <- function()
{
## Setup ##
y <- .setUp.y()
fdata.obj <- fdata(y = y)
checkTrue(!all(is.na(fdata.obj@y)), "check1")
checkEquals(fdata.obj@N, nrow(fdata.obj@y))
checkEquals(fdata.obj@r, ncol(fdata.obj@y))
checkTrue(fdata.obj@bycolumn, "check2")
## Check row-ordering ##
y <- t(.setUp.y())
fdata.obj <- fdata(y = y)
checkTrue(!all(is.na(fdata.obj@y)), "check3")
checkEquals(fdata.obj@N, ncol(fdata.obj@y))
checkEquals(fdata.obj@r, nrow(fdata.obj@y))
checkTrue(!fdata.obj@bycolumn, "check4")
## Check exception
y <- matrix("", nrow = 20)
checkException(fdata(y = y), "check5")
}
"test.fdata.check.N" <- function()
{
## Setup
fdata.obj <- fdata(N = 200)
checkEquals(fdata.obj@N, 200)
y <- .setUp.y()
fdata.obj <- fdata(y = y, N = 100)
## Check exception
checkException(fdata(y = y, N = 200), "check1")
## Check row-ordering
y <- t(y)
fdata.obj <- fdata(y = y, N = 100)
checkEquals(fdata.obj@N, 100)
checkException(fdata(y = y, N = 200), "check2")
}
"test.fdata.check.r" <- function()
{
## Setup
fdata.obj <- fdata(r = 2, type = "continuous")
checkEquals(fdata.obj@r, 2)
y <- .setUp.y()
fdata.obj <- fdata(y = y, r = 1)
## Check exception
checkException(fdata(y = y, r = 2), "check1")
## Check row-ordering
y <- t(y)
fdata.obj <- fdata(y = y, r = 1)
checkEquals(fdata.obj@r, 1)
checkException(fdata(y = y, r = 2), "check2")
}
"test.fdata.check.type" <- function()
{
checkException(fdata(type = "jump"), "check1")
}
"test.fdata.check.S" <- function()
{
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
checkTrue(!all(is.na(fdata.obj@S)), "check1")
checkEquals(fdata.obj@N, NROW(S$V1))
checkEquals(fdata.obj@r, 1)
## Check row-ordering
S <- t(S$V1)
fdata.obj <- fdata(S = S)
checkTrue(!all(is.na(fdata.obj@S)), "check2")
checkEquals(fdata.obj@N, NCOL(S))
checkEquals(fdata.obj@r, 1)
## Check Exception
S <- matrix("", nrow = 10)
checkException(fdata(S = S), "check23")
S <- matrix(c(2.3, 4.1, 2.3))
fdata.obj <- fdata(S = S)
checkEquals(fdata.obj@S[1], 2)
S <- .setUp.S()
S <- S$V1[1:50]
y <- .setUp.y()
checkException(fdata(y = y, S = S), "check4")
S <- .setUp.S()
S <- cbind(S$V1, S$V1)
checkException(fdata(S = S), "check5")
checkException(fdata(S = t(S)), "check6")
S <- c(2, 1, 1, 1, 2, -1)
checkException(fdata(S = S), "check7")
}
"test.fdata.check.T" <- function()
{
T <- .setUp.S()
fdata.obj <- fdata(T = T$V1)
checkTrue(!all(is.na(fdata.obj@T)), "check1")
checkEquals(fdata.obj@N, NROW(T))
checkEquals(fdata.obj@r, 1)
## Check row-ordering
T <- t(T$V1)
fdata.obj <- fdata(T = T)
checkTrue(!all(is.na(fdata.obj@T)), "check2")
checkEquals(fdata.obj@N, NCOL(T))
checkEquals(fdata.obj@r, 1)
## Check exceptions
T <- matrix("", nrow = 10)
checkException(fdata(T = T), "check3")
T <- matrix(c(2.3, 4.1, 2.3))
fdata.obj <- fdata(T = T)
checkEquals(fdata.obj@T[1], 2)
T <- .setUp.S()
T <- T$V1[1:50]
y <- .setUp.y()
checkException(fdata(y = y, T = T), "check4")
T <- .setUp.S()
T <- cbind(T$V1, T$V1)
checkException(fdata(T = T), "check5")
checkException(fdata(T = t(T)), "check6")
T <- c(2, 1, 2, 2, 0)
checkException(fdata(T = T), "check7")
}
"test.fdata.check.exp" <- function()
{
expos <- .setUp.y()
fdata.obj <- fdata(exp = expos$V1)
checkTrue(!all(is.na(fdata.obj@exp)), "check1")
checkEquals(fdata.obj@N, NROW(expos))
checkEquals(fdata.obj@r, 1)
## Check row-ordering
expos <- t(expos$V1)
fdata.obj <- fdata(exp = expos)
checkTrue(!all(is.na(fdata.obj@exp)), "check2")
checkEquals(fdata.obj@N, NCOL(expos))
checkEquals(fdata.obj@r, 1)
## Check exceptions
expos <- matrix("", nrow = 10)
checkException(fdata(exp = expos), "check3")
expos <- .setUp.y()
expos <- expos$V1[1:50]
y <- .setUp.y()
checkException(fdata(y = y, exp = expos), "check4")
expos <- .setUp.y()
expos <- cbind(expos$V1, expos$V1)
checkException(fdata(exp = expos), "check5")
checkException(fdata(exp = t(expos)), "check6")
expos <- c(2, -1, 3, 1, 2, 0.0003)
checkException(fdata(exp = expos), "check7")
}
"test.fdata.setY" <- function()
{
## Default
fdata.obj <- fdata()
y <- .setUp.y()
setY(fdata.obj) <- y
checkTrue(!all(is.na(fdata.obj@y)), "check1")
checkEquals(fdata.obj@N, NROW(y))
checkEquals(fdata.obj@r, 1)
## Check row-ordering
setY(fdata.obj) <- t(y)
## Check with S
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
checkEquals(fdata.obj@N, NROW(S$V1))
setY(fdata.obj) <- y
checkTrue(!all(is.na(fdata.obj@y)), "check2")
checkEquals(fdata.obj@N, NROW(S$V1))
checkEquals(fdata.obj@r, 1)
setY(fdata.obj) <- t(y)
checkEquals(nrow(fdata.obj@y), NROW(S$V1))
checkEquals(ncol(fdata.obj@y), 1)
y <- cbind(y, y)
setType(fdata.obj) <- "continuous"
setY(fdata.obj) <- y
setY(fdata.obj) <- t(y)
## Check exception
y <- matrix("", nrow = 10)
checkException(setY(fdata.obj) <- y, "check3")
}
"test.fdata.setBycolumn" <- function()
{
## Default
fdata.obj <- fdata()
setBycolumn(fdata.obj) <- FALSE
y <- .setUp.y()
fdata.obj <- fdata(y = y)
setBycolumn(fdata.obj) <- TRUE
checkTrue(getBycolumn(fdata.obj), "check1")
setBycolumn(fdata.obj) <- FALSE
checkTrue(!getBycolumn(fdata.obj), "check2")
checkEquals(nrow(fdata.obj@y), NCOL(y))
checkEquals(ncol(fdata.obj@y), NROW(y))
checkEquals(fdata.obj@N, NROW(y))
checkEquals(fdata.obj@r, NCOL(y))
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
setBycolumn(fdata.obj) <- FALSE
checkEquals(nrow(fdata.obj@S), NCOL(S$V1))
checkEquals(ncol(fdata.obj@S), NROW(S$V1))
}
"test.fdata.setS" <- function()
{
## Default
fdata.obj <- fdata()
S <- .setUp.S()
setS(fdata.obj) <- S$V1
checkTrue(!all(is.na(fdata.obj@S)), "check1")
## Check row-ordering
setS(fdata.obj) <- t(S$V1)
checkEquals(nrow(fdata.obj@S), NROW(S$V1))
checkEquals(ncol(fdata.obj@S), NCOL(S$V1))
## Check with y
y <- .setUp.y()
fdata.obj <- fdata(y = y)
setS(fdata.obj) <- S$V1
checkEquals(nrow(fdata.obj@S), NROW(S$V1))
checkEquals(ncol(fdata.obj@S), NCOL(S$V1))
## Check with y and row-ordering
fdata.obj <- fdata(y = t(y))
setS(fdata.obj) <- S$V1
checkEquals(nrow(fdata.obj@S), NCOL(S$V1))
checkEquals(ncol(fdata.obj@S), NROW(S$V1))
fdata.obj <- fdata(y = y)
setS(fdata.obj) <- t(S$V1)
checkEquals(nrow(fdata.obj@S), NROW(S$V1))
checkEquals(ncol(fdata.obj@S), NCOL(S$V1))
## Check exception
S <- c(2, 1, 2, - 1)
checkException(setS(fdata.obj) <- S, "check2")
S <- matrix("", nrow = 10)
checkException(setS(fdata.obj) <- S, "check3")
}
"test.fdata.setExp" <- function()
{
## Default
fdata.obj <- fdata()
expos <- .setUp.y()
expos <- matrix(expos$V1)
setExp(fdata.obj) <- expos
checkTrue(!all(is.na(fdata.obj@exp)), "check1")
## Check row-ordering
setExp(fdata.obj) <- t(expos)
checkEquals(nrow(fdata.obj@exp), NROW(expos))
checkEquals(ncol(fdata.obj@exp), NCOL(expos))
## Check with y
y <- .setUp.y()
fdata.obj <- fdata(y = y)
setExp(fdata.obj) <- expos
checkEquals(nrow(fdata.obj@exp), NROW(expos))
checkEquals(ncol(fdata.obj@exp), NCOL(expos))
## Check with y and row-ordering
fdata.obj <- fdata(y = t(y))
setExp(fdata.obj) <- expos
checkEquals(nrow(fdata.obj@exp), NCOL(expos))
checkEquals(ncol(fdata.obj@exp), NROW(expos))
fdata.obj <- fdata(y = y)
setExp(fdata.obj) <- t(expos)
checkEquals(nrow(fdata.obj@exp), NROW(expos))
checkEquals(ncol(fdata.obj@exp), NCOL(expos))
## Check exception
expos <- c(2, 1, 2, - 1)
checkException(setExp(fdata.obj) <- expos, "check2")
expos <- matrix("", nrow = 10)
checkException(setExp(fdata.obj) <- expos, "check3")
}
"test.fdata.setT" <- function()
{
## Default
fdata.obj <- fdata()
T <- .setUp.S()
setT(fdata.obj) <- T$V1
checkTrue(!all(is.na(fdata.obj@T)), "check1")
## Check row-ordering
setT(fdata.obj) <- t(T$V1)
checkEquals(nrow(fdata.obj@T), NROW(T$V1))
checkEquals(ncol(fdata.obj@T), NCOL(T$V1))
## Check with y
y <- .setUp.y()
fdata.obj <- fdata(y = y)
setT(fdata.obj) <- T$V1
checkEquals(nrow(fdata.obj@T), NROW(T$V1))
checkEquals(ncol(fdata.obj@T), NCOL(T$V1))
## Check with y and row-ordering
fdata.obj <- fdata(y = t(y))
setT(fdata.obj) <- T$V1
checkEquals(nrow(fdata.obj@T), NCOL(T$V1))
checkEquals(ncol(fdata.obj@T), NROW(T$V1))
fdata.obj <- fdata(y = y)
setT(fdata.obj) <- t(T$V1)
checkEquals(nrow(fdata.obj@T), NROW(T$V1))
checkEquals(ncol(fdata.obj@T), NCOL(T$V1))
## Check exception
T <- c(2, 1, 2, - 1)
checkException(setT(fdata.obj) <- T, "check2")
T <- matrix("", nrow = 10)
checkException(setT(fdata.obj) <- T, "check3")
}
"test.fdata.hasY" <- function()
{
## Default
fdata.obj <- fdata()
checkTrue(!hasY(fdata.obj), "check1")
checkException(hasY(fdata.obj, verbose = TRUE), "check2")
y <- .setUp.y()
fdata.obj <- fdata(y = y)
checkTrue(hasY(fdata.obj), "check3")
}
"test.fdata.hasS" <- function()
{
## Default
fdata.obj <- fdata()
checkTrue(!hasS(fdata.obj), "check1")
checkException(hasS(fdata.obj, verbose = TRUE), "check2")
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
checkTrue(hasS(fdata.obj), "check3")
}
"test.fdata.hasExp" <- function()
{
## Default
fdata.obj <- fdata()
checkTrue(!hasExp(fdata.obj), "check1")
checkException(hasExp(fdata.obj, verbose = TRUE), "check2")
expos <- .setUp.y()
fdata.obj <- fdata(exp = expos$V1)
checkTrue(hasExp(fdata.obj), "check3")
}
"test.fdata.hasT" <- function()
{
## Default
fdata.obj <- fdata()
checkTrue(!hasT(fdata.obj), "check1")
checkException(hasT(fdata.obj, verbose = TRUE), "check2")
T <- .setUp.S()
fdata.obj <- fdata(T = T$V1)
checkTrue(hasT(fdata.obj), "check3")
}
"test.fdata.getColY" <- function()
{
## Default
y <- .setUp.y()
fdata.obj <- fdata(y = y)
y.out <- getColY(fdata.obj)
checkEquals(nrow(y.out), NROW(y))
checkEquals(ncol(y.out), NCOL(y))
}
"test.fdata.getRowY" <- function()
{
## Default
y <- .setUp.y()
fdata.obj <- fdata(y = y)
y.out <- getRowY(fdata.obj)
checkEquals(ncol(y.out), NROW(y))
checkEquals(nrow(y.out), NCOL(y))
}
"test.fdata.getColS" <- function()
{
## Default
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
S.out <- getColS(fdata.obj)
checkEquals(nrow(S.out), NROW(S))
checkEquals(ncol(S.out), NCOL(S))
}
"test.fdata.getRowS" <- function()
{
## Default
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
S.out <- getRowS(fdata.obj)
checkEquals(ncol(S.out), NROW(S))
checkEquals(nrow(S.out), NCOL(S))
}
"test.fdata.getColExp" <- function()
{
## Default
expos <- .setUp.y()
fdata.obj <- fdata(exp = expos$V1)
exp.out <- getColExp(fdata.obj)
checkEquals(nrow(exp.out), NROW(expos))
checkEquals(ncol(exp.out), NCOL(expos))
}
"test.fdata.getRowY" <- function()
{
## Default
expos <- .setUp.y()
fdata.obj <- fdata(exp = expos$V1)
exp.out <- getRowExp(fdata.obj)
checkEquals(ncol(exp.out), NROW(expos))
checkEquals(nrow(exp.out), NCOL(expos))
}
"test.fdata.getColT" <- function()
{
## Default
T <- .setUp.S()
fdata.obj <- fdata(T = T$V1)
T.out <- getColT(fdata.obj)
checkEquals(nrow(T.out), NROW(T))
checkEquals(ncol(T.out), NCOL(T))
}
"test.fdata.getRowT" <- function()
{
## Default
T <- .setUp.S()
fdata.obj <- fdata(T = T$V1)
T.out <- getRowT(fdata.obj)
checkEquals(ncol(T.out), NROW(T))
checkEquals(nrow(T.out), NCOL(T))
}
| /inst/unitTests/runit.fdata.R | no_license | gaborcsardi/finmix | R | false | false | 13,761 | r | ### --- Test Setup --- ###
if(TRUE) {
## Not really needed, but can be handy
## when writing tests
library("RUnit")
library("finmix")
}
".setUp.y" <- function()
{
## Get path ##
pkg <- "finmix"
if (Sys.getenv("RCMDCHECK") == FALSE) {
data.path <- file.path(getwd(), "..",
"data", "poisson.data.csv")
} else {
data.path <- system.file(package = pkg,
'data/poisson.data.csv')
}
read.csv(data.path, header = FALSE, sep = ",")
}
".setUp.S" <- function()
{
if (Sys.getenv("RCMDCHECK") == FALSE) {
ind.path <- file.path(getwd(), "..",
"data",
"poisson.ind.csv")
} else {
ind.path <- system.file(package = pkg,
'data/poisson.ind.csv')
}
read.csv(ind.path, header = FALSE, sep = ",")
}
## Start testing ##
"test.fdata" <- function()
{
## Default ##
fdata.obj <- fdata()
checkTrue(all(is.na(fdata.obj@y)), "check1")
checkTrue(all(is.na(fdata.obj@S)), "check2")
checkTrue(all(is.na(fdata.obj@exp)),"check3")
checkTrue(all(is.na(fdata.obj@T)), "check4")
checkTrue(fdata.obj@bycolumn, "check5")
checkTrue(!fdata.obj@sim, "check6")
checkEquals(fdata.obj@N, 1)
checkEquals(fdata.obj@r, 1)
checkEquals(fdata.obj@type, "discrete")
checkEquals(length(fdata.obj@name), 0)
}
"test.fdata.check.y" <- function()
{
## Setup ##
y <- .setUp.y()
fdata.obj <- fdata(y = y)
checkTrue(!all(is.na(fdata.obj@y)), "check1")
checkEquals(fdata.obj@N, nrow(fdata.obj@y))
checkEquals(fdata.obj@r, ncol(fdata.obj@y))
checkTrue(fdata.obj@bycolumn, "check2")
## Check row-ordering ##
y <- t(.setUp.y())
fdata.obj <- fdata(y = y)
checkTrue(!all(is.na(fdata.obj@y)), "check3")
checkEquals(fdata.obj@N, ncol(fdata.obj@y))
checkEquals(fdata.obj@r, nrow(fdata.obj@y))
checkTrue(!fdata.obj@bycolumn, "check4")
## Check exception
y <- matrix("", nrow = 20)
checkException(fdata(y = y), "check5")
}
"test.fdata.check.N" <- function()
{
## Setup
fdata.obj <- fdata(N = 200)
checkEquals(fdata.obj@N, 200)
y <- .setUp.y()
fdata.obj <- fdata(y = y, N = 100)
## Check exception
checkException(fdata(y = y, N = 200), "check1")
## Check row-ordering
y <- t(y)
fdata.obj <- fdata(y = y, N = 100)
checkEquals(fdata.obj@N, 100)
checkException(fdata(y = y, N = 200), "check2")
}
"test.fdata.check.r" <- function()
{
## Setup
fdata.obj <- fdata(r = 2, type = "continuous")
checkEquals(fdata.obj@r, 2)
y <- .setUp.y()
fdata.obj <- fdata(y = y, r = 1)
## Check exception
checkException(fdata(y = y, r = 2), "check1")
## Check row-ordering
y <- t(y)
fdata.obj <- fdata(y = y, r = 1)
checkEquals(fdata.obj@r, 1)
checkException(fdata(y = y, r = 2), "check2")
}
"test.fdata.check.type" <- function()
{
checkException(fdata(type = "jump"), "check1")
}
"test.fdata.check.S" <- function()
{
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
checkTrue(!all(is.na(fdata.obj@S)), "check1")
checkEquals(fdata.obj@N, NROW(S$V1))
checkEquals(fdata.obj@r, 1)
## Check row-ordering
S <- t(S$V1)
fdata.obj <- fdata(S = S)
checkTrue(!all(is.na(fdata.obj@S)), "check2")
checkEquals(fdata.obj@N, NCOL(S))
checkEquals(fdata.obj@r, 1)
## Check Exception
S <- matrix("", nrow = 10)
checkException(fdata(S = S), "check23")
S <- matrix(c(2.3, 4.1, 2.3))
fdata.obj <- fdata(S = S)
checkEquals(fdata.obj@S[1], 2)
S <- .setUp.S()
S <- S$V1[1:50]
y <- .setUp.y()
checkException(fdata(y = y, S = S), "check4")
S <- .setUp.S()
S <- cbind(S$V1, S$V1)
checkException(fdata(S = S), "check5")
checkException(fdata(S = t(S)), "check6")
S <- c(2, 1, 1, 1, 2, -1)
checkException(fdata(S = S), "check7")
}
"test.fdata.check.T" <- function()
{
T <- .setUp.S()
fdata.obj <- fdata(T = T$V1)
checkTrue(!all(is.na(fdata.obj@T)), "check1")
checkEquals(fdata.obj@N, NROW(T))
checkEquals(fdata.obj@r, 1)
## Check row-ordering
T <- t(T$V1)
fdata.obj <- fdata(T = T)
checkTrue(!all(is.na(fdata.obj@T)), "check2")
checkEquals(fdata.obj@N, NCOL(T))
checkEquals(fdata.obj@r, 1)
## Check exceptions
T <- matrix("", nrow = 10)
checkException(fdata(T = T), "check3")
T <- matrix(c(2.3, 4.1, 2.3))
fdata.obj <- fdata(T = T)
checkEquals(fdata.obj@T[1], 2)
T <- .setUp.S()
T <- T$V1[1:50]
y <- .setUp.y()
checkException(fdata(y = y, T = T), "check4")
T <- .setUp.S()
T <- cbind(T$V1, T$V1)
checkException(fdata(T = T), "check5")
checkException(fdata(T = t(T)), "check6")
T <- c(2, 1, 2, 2, 0)
checkException(fdata(T = T), "check7")
}
"test.fdata.check.exp" <- function()
{
expos <- .setUp.y()
fdata.obj <- fdata(exp = expos$V1)
checkTrue(!all(is.na(fdata.obj@exp)), "check1")
checkEquals(fdata.obj@N, NROW(expos))
checkEquals(fdata.obj@r, 1)
## Check row-ordering
expos <- t(expos$V1)
fdata.obj <- fdata(exp = expos)
checkTrue(!all(is.na(fdata.obj@exp)), "check2")
checkEquals(fdata.obj@N, NCOL(expos))
checkEquals(fdata.obj@r, 1)
## Check exceptions
expos <- matrix("", nrow = 10)
checkException(fdata(exp = expos), "check3")
expos <- .setUp.y()
expos <- expos$V1[1:50]
y <- .setUp.y()
checkException(fdata(y = y, exp = expos), "check4")
expos <- .setUp.y()
expos <- cbind(expos$V1, expos$V1)
checkException(fdata(exp = expos), "check5")
checkException(fdata(exp = t(expos)), "check6")
expos <- c(2, -1, 3, 1, 2, 0.0003)
checkException(fdata(exp = expos), "check7")
}
"test.fdata.setY" <- function()
{
## Default
fdata.obj <- fdata()
y <- .setUp.y()
setY(fdata.obj) <- y
checkTrue(!all(is.na(fdata.obj@y)), "check1")
checkEquals(fdata.obj@N, NROW(y))
checkEquals(fdata.obj@r, 1)
## Check row-ordering
setY(fdata.obj) <- t(y)
## Check with S
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
checkEquals(fdata.obj@N, NROW(S$V1))
setY(fdata.obj) <- y
checkTrue(!all(is.na(fdata.obj@y)), "check2")
checkEquals(fdata.obj@N, NROW(S$V1))
checkEquals(fdata.obj@r, 1)
setY(fdata.obj) <- t(y)
checkEquals(nrow(fdata.obj@y), NROW(S$V1))
checkEquals(ncol(fdata.obj@y), 1)
y <- cbind(y, y)
setType(fdata.obj) <- "continuous"
setY(fdata.obj) <- y
setY(fdata.obj) <- t(y)
## Check exception
y <- matrix("", nrow = 10)
checkException(setY(fdata.obj) <- y, "check3")
}
"test.fdata.setBycolumn" <- function()
{
## Default
fdata.obj <- fdata()
setBycolumn(fdata.obj) <- FALSE
y <- .setUp.y()
fdata.obj <- fdata(y = y)
setBycolumn(fdata.obj) <- TRUE
checkTrue(getBycolumn(fdata.obj), "check1")
setBycolumn(fdata.obj) <- FALSE
checkTrue(!getBycolumn(fdata.obj), "check2")
checkEquals(nrow(fdata.obj@y), NCOL(y))
checkEquals(ncol(fdata.obj@y), NROW(y))
checkEquals(fdata.obj@N, NROW(y))
checkEquals(fdata.obj@r, NCOL(y))
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
setBycolumn(fdata.obj) <- FALSE
checkEquals(nrow(fdata.obj@S), NCOL(S$V1))
checkEquals(ncol(fdata.obj@S), NROW(S$V1))
}
"test.fdata.setS" <- function()
{
## Default
fdata.obj <- fdata()
S <- .setUp.S()
setS(fdata.obj) <- S$V1
checkTrue(!all(is.na(fdata.obj@S)), "check1")
## Check row-ordering
setS(fdata.obj) <- t(S$V1)
checkEquals(nrow(fdata.obj@S), NROW(S$V1))
checkEquals(ncol(fdata.obj@S), NCOL(S$V1))
## Check with y
y <- .setUp.y()
fdata.obj <- fdata(y = y)
setS(fdata.obj) <- S$V1
checkEquals(nrow(fdata.obj@S), NROW(S$V1))
checkEquals(ncol(fdata.obj@S), NCOL(S$V1))
## Check with y and row-ordering
fdata.obj <- fdata(y = t(y))
setS(fdata.obj) <- S$V1
checkEquals(nrow(fdata.obj@S), NCOL(S$V1))
checkEquals(ncol(fdata.obj@S), NROW(S$V1))
fdata.obj <- fdata(y = y)
setS(fdata.obj) <- t(S$V1)
checkEquals(nrow(fdata.obj@S), NROW(S$V1))
checkEquals(ncol(fdata.obj@S), NCOL(S$V1))
## Check exception
S <- c(2, 1, 2, - 1)
checkException(setS(fdata.obj) <- S, "check2")
S <- matrix("", nrow = 10)
checkException(setS(fdata.obj) <- S, "check3")
}
"test.fdata.setExp" <- function()
{
## Default
fdata.obj <- fdata()
expos <- .setUp.y()
expos <- matrix(expos$V1)
setExp(fdata.obj) <- expos
checkTrue(!all(is.na(fdata.obj@exp)), "check1")
## Check row-ordering
setExp(fdata.obj) <- t(expos)
checkEquals(nrow(fdata.obj@exp), NROW(expos))
checkEquals(ncol(fdata.obj@exp), NCOL(expos))
## Check with y
y <- .setUp.y()
fdata.obj <- fdata(y = y)
setExp(fdata.obj) <- expos
checkEquals(nrow(fdata.obj@exp), NROW(expos))
checkEquals(ncol(fdata.obj@exp), NCOL(expos))
## Check with y and row-ordering
fdata.obj <- fdata(y = t(y))
setExp(fdata.obj) <- expos
checkEquals(nrow(fdata.obj@exp), NCOL(expos))
checkEquals(ncol(fdata.obj@exp), NROW(expos))
fdata.obj <- fdata(y = y)
setExp(fdata.obj) <- t(expos)
checkEquals(nrow(fdata.obj@exp), NROW(expos))
checkEquals(ncol(fdata.obj@exp), NCOL(expos))
## Check exception
expos <- c(2, 1, 2, - 1)
checkException(setExp(fdata.obj) <- expos, "check2")
expos <- matrix("", nrow = 10)
checkException(setExp(fdata.obj) <- expos, "check3")
}
"test.fdata.setT" <- function()
{
## Default
fdata.obj <- fdata()
T <- .setUp.S()
setT(fdata.obj) <- T$V1
checkTrue(!all(is.na(fdata.obj@T)), "check1")
## Check row-ordering
setT(fdata.obj) <- t(T$V1)
checkEquals(nrow(fdata.obj@T), NROW(T$V1))
checkEquals(ncol(fdata.obj@T), NCOL(T$V1))
## Check with y
y <- .setUp.y()
fdata.obj <- fdata(y = y)
setT(fdata.obj) <- T$V1
checkEquals(nrow(fdata.obj@T), NROW(T$V1))
checkEquals(ncol(fdata.obj@T), NCOL(T$V1))
## Check with y and row-ordering
fdata.obj <- fdata(y = t(y))
setT(fdata.obj) <- T$V1
checkEquals(nrow(fdata.obj@T), NCOL(T$V1))
checkEquals(ncol(fdata.obj@T), NROW(T$V1))
fdata.obj <- fdata(y = y)
setT(fdata.obj) <- t(T$V1)
checkEquals(nrow(fdata.obj@T), NROW(T$V1))
checkEquals(ncol(fdata.obj@T), NCOL(T$V1))
## Check exception
T <- c(2, 1, 2, - 1)
checkException(setT(fdata.obj) <- T, "check2")
T <- matrix("", nrow = 10)
checkException(setT(fdata.obj) <- T, "check3")
}
"test.fdata.hasY" <- function()
{
## Default
fdata.obj <- fdata()
checkTrue(!hasY(fdata.obj), "check1")
checkException(hasY(fdata.obj, verbose = TRUE), "check2")
y <- .setUp.y()
fdata.obj <- fdata(y = y)
checkTrue(hasY(fdata.obj), "check3")
}
"test.fdata.hasS" <- function()
{
## Default
fdata.obj <- fdata()
checkTrue(!hasS(fdata.obj), "check1")
checkException(hasS(fdata.obj, verbose = TRUE), "check2")
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
checkTrue(hasS(fdata.obj), "check3")
}
"test.fdata.hasExp" <- function()
{
## Default
fdata.obj <- fdata()
checkTrue(!hasExp(fdata.obj), "check1")
checkException(hasExp(fdata.obj, verbose = TRUE), "check2")
expos <- .setUp.y()
fdata.obj <- fdata(exp = expos$V1)
checkTrue(hasExp(fdata.obj), "check3")
}
"test.fdata.hasT" <- function()
{
## Default
fdata.obj <- fdata()
checkTrue(!hasT(fdata.obj), "check1")
checkException(hasT(fdata.obj, verbose = TRUE), "check2")
T <- .setUp.S()
fdata.obj <- fdata(T = T$V1)
checkTrue(hasT(fdata.obj), "check3")
}
"test.fdata.getColY" <- function()
{
## Default
y <- .setUp.y()
fdata.obj <- fdata(y = y)
y.out <- getColY(fdata.obj)
checkEquals(nrow(y.out), NROW(y))
checkEquals(ncol(y.out), NCOL(y))
}
"test.fdata.getRowY" <- function()
{
## Default
y <- .setUp.y()
fdata.obj <- fdata(y = y)
y.out <- getRowY(fdata.obj)
checkEquals(ncol(y.out), NROW(y))
checkEquals(nrow(y.out), NCOL(y))
}
"test.fdata.getColS" <- function()
{
## Default
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
S.out <- getColS(fdata.obj)
checkEquals(nrow(S.out), NROW(S))
checkEquals(ncol(S.out), NCOL(S))
}
"test.fdata.getRowS" <- function()
{
## Default
S <- .setUp.S()
fdata.obj <- fdata(S = S$V1)
S.out <- getRowS(fdata.obj)
checkEquals(ncol(S.out), NROW(S))
checkEquals(nrow(S.out), NCOL(S))
}
"test.fdata.getColExp" <- function()
{
## Default
expos <- .setUp.y()
fdata.obj <- fdata(exp = expos$V1)
exp.out <- getColExp(fdata.obj)
checkEquals(nrow(exp.out), NROW(expos))
checkEquals(ncol(exp.out), NCOL(expos))
}
"test.fdata.getRowY" <- function()
{
## Default
expos <- .setUp.y()
fdata.obj <- fdata(exp = expos$V1)
exp.out <- getRowExp(fdata.obj)
checkEquals(ncol(exp.out), NROW(expos))
checkEquals(nrow(exp.out), NCOL(expos))
}
"test.fdata.getColT" <- function()
{
## Default
T <- .setUp.S()
fdata.obj <- fdata(T = T$V1)
T.out <- getColT(fdata.obj)
checkEquals(nrow(T.out), NROW(T))
checkEquals(ncol(T.out), NCOL(T))
}
"test.fdata.getRowT" <- function()
{
## Default
T <- .setUp.S()
fdata.obj <- fdata(T = T$V1)
T.out <- getRowT(fdata.obj)
checkEquals(ncol(T.out), NROW(T))
checkEquals(nrow(T.out), NCOL(T))
}
|
rm(list = ls())
source("./call_NN.R")
source("./call_LM.R")
start.time = Sys.time()
list_model = list(
GPMP, LGMP,
EPMP, DCMP,
EPMN, LGMN,
DCMN, GPMN
)
iter <- 10
# RunLM(PRE_OR_NOT = "not", problem = "bbob", list.model = list_model,
# method = "BL", iter = iter, option = "each")
RunNN(PRE_OR_NOT = "not", problem = "bbob")
end.time = Sys.time()
cat("cosume time :", end.time - start.time, "\n") | /run.R | no_license | qiqi-helloworld/Numeric-Represents-on-Evolutionary-Fitness-Results | R | false | false | 497 | r | rm(list = ls())
source("./call_NN.R")
source("./call_LM.R")
start.time = Sys.time()
list_model = list(
GPMP, LGMP,
EPMP, DCMP,
EPMN, LGMN,
DCMN, GPMN
)
iter <- 10
# RunLM(PRE_OR_NOT = "not", problem = "bbob", list.model = list_model,
# method = "BL", iter = iter, option = "each")
RunNN(PRE_OR_NOT = "not", problem = "bbob")
end.time = Sys.time()
cat("cosume time :", end.time - start.time, "\n") |
library(GPareto)
design.init <- matrix(seq(0, 1, length.out = 6), ncol = 1)
response.init <- MOP2(design.init)
mf1 <- km(~1, design = design.init, response = response.init[, 1])
mf2 <- km(~1, design = design.init, response = response.init[, 2])
model <- list(mf1, mf2)
res <- GParetoptim(model = model, fn = MOP2, crit = "EHI", nsteps = 7,
lower = 0, upper = 1, critcontrol = list(refPoint = c(2, 2)))
plotParetoGrid(P1)
myobj <- function (x)
{
if (is.null(dim(x))) {
x <- matrix(x, nrow = 1)
}
#n <- ncol(x)
#g <- 1 + rowSums(x[, 2:n, drop = FALSE]) * 9/(n - 1)
#return(cbind(x[, 1], g * (1 - sqrt(x[, 1]/g))))
return(cbind(x^2,x[, 1]))
# f1 <- x1, f2 <- g(x)*(1-sqrt(x1/g(x)))
}
res <- easyGParetoptim(fn = myobj, budget = 50, lower = rep(0, 4),upper = rep(1, 4))
library(caret)
data("iris")
control <- trainControl(method="cv", number = 5, classProbs = TRUE, summaryFunction = mnLogLoss)
set.seed(7)
fit <- train(Species~. ,data = iris, method = "rf", metric="logLoss", trControl = control)
print(fit)
| /GPareto.R | no_license | misteliy/Rscripts | R | false | false | 1,058 | r | library(GPareto)
design.init <- matrix(seq(0, 1, length.out = 6), ncol = 1)
response.init <- MOP2(design.init)
mf1 <- km(~1, design = design.init, response = response.init[, 1])
mf2 <- km(~1, design = design.init, response = response.init[, 2])
model <- list(mf1, mf2)
res <- GParetoptim(model = model, fn = MOP2, crit = "EHI", nsteps = 7,
lower = 0, upper = 1, critcontrol = list(refPoint = c(2, 2)))
plotParetoGrid(P1)
myobj <- function (x)
{
if (is.null(dim(x))) {
x <- matrix(x, nrow = 1)
}
#n <- ncol(x)
#g <- 1 + rowSums(x[, 2:n, drop = FALSE]) * 9/(n - 1)
#return(cbind(x[, 1], g * (1 - sqrt(x[, 1]/g))))
return(cbind(x^2,x[, 1]))
# f1 <- x1, f2 <- g(x)*(1-sqrt(x1/g(x)))
}
res <- easyGParetoptim(fn = myobj, budget = 50, lower = rep(0, 4),upper = rep(1, 4))
library(caret)
data("iris")
control <- trainControl(method="cv", number = 5, classProbs = TRUE, summaryFunction = mnLogLoss)
set.seed(7)
fit <- train(Species~. ,data = iris, method = "rf", metric="logLoss", trControl = control)
print(fit)
|
# para is the variance prior for the random position d
# choose is choosing which one as the focal to let it's d value be 0 as standard
# dist can be 1 or 2
# 1 is normal distribution and 2 is student t distribution
# If you use t-distribution,the degree of freedom will be n-2, n is the total amount of species
# Install the rstan package from stan website before running this function
bay_isi<-function(data1,para=1000,choose,dist=1){
library(rstan)
bayesiani_si<-"
data{
int n;
int y[n,n];
real sigma1;
int focal;
int dist;
}
parameters{
real <lower=-15,upper=15> d[n];
}
transformed parameters{
real <lower=-15,upper=15> d1[n];
d1<-d;
d1[focal]<-0;
}
model{
if (dist==2){
for (i in 1:n)
{d[i]~student_t(n-2,0,sigma1);}
}else{
for (i in 1:n)
{d[i]~normal(0,sigma1);}
}
for (i in 1:(n-1)){
for(j in (i+1):n){
y[i,j]~binomial(y[i,j]+y[j,i],1/(1+exp(d1[j]-d1[i])));
}
}
}
"
n=nrow(data1)
data=list(y=data1,n=n,focal=choose,dist=dist,sigma1=para)
fit <- stan(model_code = bayesiani_si, model_name = "example11",
data = data, iter = 10000, chains = 2, verbose = FALSE)
combine<-function(string1){
n=length(string1)
save=''
for (i in 1:n) save=paste0(save,string1[i])
return (save)
}
b=matrix(0,n,10000)
newletter=c(letters,LETTERS)
for (j in 1:2){
a=fit@sim$samples[[j]]
for (i in 1:n){
if (j==1) {
b[i,1:5000]=a[[n+i]][5001:10000]}
else{
b[i,5001:10000]=a[[n+i]][5001:10000]
}
}
}
save1=NULL
number=NULL
for (i in 1:10000){
index=combine(newletter[sort(b[,i],index.return=TRUE,decreasing=TRUE)$ix])
if(any(save1==index)==FALSE){
save1=c(save1,index)
number=c(number,1)
}else if (any(save1==index)==TRUE){
number[which(save1==index)]=number[which(save1==index)]+1
}
}
result=sort(number,decreasing=TRUE,index.return=TRUE)
prob=result$x[1:8]/10000
ranking=save1[result$ix[1:8]]
kk=list(model=fit,ranking=ranking,prob=prob,mean=(attr(fit@sim$samples[[1]],"mean_pars")+attr(fit@sim$samples[[2]],"mean_pars"))/2)
return (kk)
}
# data1=matrix(c(0,10,2,3,2,9,0,5,3,3,12,9,0,0,0,6,12,0,0,4,27,12,2,2,0),5,5)
# fun_1(data1,1000,3,1)
| /R/bay_isi.R | no_license | KayShen/compete | R | false | false | 2,077 | r |
# para is the variance prior for the random position d
# choose is choosing which one as the focal to let it's d value be 0 as standard
# dist can be 1 or 2
# 1 is normal distribution and 2 is student t distribution
# If you use t-distribution,the degree of freedom will be n-2, n is the total amount of species
# Install the rstan package from stan website before running this function
bay_isi<-function(data1,para=1000,choose,dist=1){
library(rstan)
bayesiani_si<-"
data{
int n;
int y[n,n];
real sigma1;
int focal;
int dist;
}
parameters{
real <lower=-15,upper=15> d[n];
}
transformed parameters{
real <lower=-15,upper=15> d1[n];
d1<-d;
d1[focal]<-0;
}
model{
if (dist==2){
for (i in 1:n)
{d[i]~student_t(n-2,0,sigma1);}
}else{
for (i in 1:n)
{d[i]~normal(0,sigma1);}
}
for (i in 1:(n-1)){
for(j in (i+1):n){
y[i,j]~binomial(y[i,j]+y[j,i],1/(1+exp(d1[j]-d1[i])));
}
}
}
"
n=nrow(data1)
data=list(y=data1,n=n,focal=choose,dist=dist,sigma1=para)
fit <- stan(model_code = bayesiani_si, model_name = "example11",
data = data, iter = 10000, chains = 2, verbose = FALSE)
combine<-function(string1){
n=length(string1)
save=''
for (i in 1:n) save=paste0(save,string1[i])
return (save)
}
b=matrix(0,n,10000)
newletter=c(letters,LETTERS)
for (j in 1:2){
a=fit@sim$samples[[j]]
for (i in 1:n){
if (j==1) {
b[i,1:5000]=a[[n+i]][5001:10000]}
else{
b[i,5001:10000]=a[[n+i]][5001:10000]
}
}
}
save1=NULL
number=NULL
for (i in 1:10000){
index=combine(newletter[sort(b[,i],index.return=TRUE,decreasing=TRUE)$ix])
if(any(save1==index)==FALSE){
save1=c(save1,index)
number=c(number,1)
}else if (any(save1==index)==TRUE){
number[which(save1==index)]=number[which(save1==index)]+1
}
}
result=sort(number,decreasing=TRUE,index.return=TRUE)
prob=result$x[1:8]/10000
ranking=save1[result$ix[1:8]]
kk=list(model=fit,ranking=ranking,prob=prob,mean=(attr(fit@sim$samples[[1]],"mean_pars")+attr(fit@sim$samples[[2]],"mean_pars"))/2)
return (kk)
}
# data1=matrix(c(0,10,2,3,2,9,0,5,3,3,12,9,0,0,0,6,12,0,0,4,27,12,2,2,0),5,5)
# fun_1(data1,1000,3,1)
|
library(vcdExtra)
library(MASS)
data("UCBAdmissions")
structable(Dept ~ Admit+Gender,UCBAdmissions)
berk.loglm0 <- loglm(~ Admit + Dept + Gender, data=UCBAdmissions, param=TRUE, fitted=TRUE)
berk.loglm0
names(berk.loglm0)
# show parameters
coef(berk.loglm0)
# fitted frequencies
structable(Dept ~ Admit+Gender, fitted(berk.loglm0))
# residuals
structable(Dept ~ Admit+Gender, residuals(berk.loglm0))
## conditional independence in UCB admissions data
berk.loglm1 <- loglm(~ Dept * (Gender + Admit), data=UCBAdmissions)
berk.loglm1
#mosaic(berk.loglm1, gp=shading_Friendly)
coef(berk.loglm1)
## all two-way model
berk.loglm2 <-loglm(~(Admit+Dept+Gender)^2, data=UCBAdmissions)
berk.loglm2
mosaic(berk.loglm2, gp=shading_Friendly)
# compare models
anova(berk.loglm0, berk.loglm1, berk.loglm2, test="Chisq")
##################
# same, using glm() -- need to transform the data to a data.frame
berkeley <- as.data.frame(UCBAdmissions)
head(berkeley)
berk.glm1 <- glm(Freq ~ Dept * (Gender+Admit), data=berkeley, family="poisson")
summary(berk.glm1)
mosaic(berk.glm1, gp=shading_Friendly, labeling=labeling_residuals, formula=~Admit+Dept+Gender)
# test terms, using Type I tests
anova(berk.glm1, test="Chisq")
# type II tests
library(car)
Anova(berk.glm1, test="LR")
# the same, displaying studentized residuals note use of formula to reorder factors in the mosaic
mosaic(berk.glm1, shade=TRUE, formula=~Admit+Dept+Gender,
residuals_type="rstandard", labeling=labeling_residuals,
main="Model: [AdmitDept][GenderDept]")
## all two-way model
berk.glm2 <- glm(Freq ~ (Dept + Gender + Admit)^2, data=berkeley, family="poisson")
summary(berk.glm2)
mosaic.glm(berk.glm2, residuals_type="rstandard", labeling = labeling_residuals, shade=TRUE,
formula=~Admit+Dept+Gender, main="Model: [DeptGender][DeptAdmit][AdmitGender]")
anova(berk.glm1, berk.glm2, test="Chisq")
# Add 1 df term for association of [GenderAdmit] only in Dept A
berkeley <- within(berkeley, dept1AG <- (Dept=='A')*(Gender=='Female')*(Admit=='Admitted'))
head(berkeley)
berk.glm3 <- glm(Freq ~ Dept * (Gender+Admit) + dept1AG, data=berkeley, family="poisson")
summarise(glmlist(berk.glm1, berk.glm2, berk.glm3))
anova(berk.glm1, berk.glm3, test="Chisq")
# interpret coefficient
coef(berk.glm3)[["dept1AG"]]
exp(coef(berk.glm3)[["dept1AG"]])
#summary(berk.glm3)
mosaic.glm(berk.glm3, residuals_type="rstandard", labeling = labeling_residuals, shade=TRUE,
formula=~Admit+Dept+Gender, main="Model: [DeptGender][DeptAdmit] + DeptA*[GA]")
| /ch08/R/berkeley-glm.R | no_license | friendly/VCDR | R | false | false | 2,535 | r | library(vcdExtra)
library(MASS)
data("UCBAdmissions")
structable(Dept ~ Admit+Gender,UCBAdmissions)
berk.loglm0 <- loglm(~ Admit + Dept + Gender, data=UCBAdmissions, param=TRUE, fitted=TRUE)
berk.loglm0
names(berk.loglm0)
# show parameters
coef(berk.loglm0)
# fitted frequencies
structable(Dept ~ Admit+Gender, fitted(berk.loglm0))
# residuals
structable(Dept ~ Admit+Gender, residuals(berk.loglm0))
## conditional independence in UCB admissions data
berk.loglm1 <- loglm(~ Dept * (Gender + Admit), data=UCBAdmissions)
berk.loglm1
#mosaic(berk.loglm1, gp=shading_Friendly)
coef(berk.loglm1)
## all two-way model
berk.loglm2 <-loglm(~(Admit+Dept+Gender)^2, data=UCBAdmissions)
berk.loglm2
mosaic(berk.loglm2, gp=shading_Friendly)
# compare models
anova(berk.loglm0, berk.loglm1, berk.loglm2, test="Chisq")
##################
# same, using glm() -- need to transform the data to a data.frame
berkeley <- as.data.frame(UCBAdmissions)
head(berkeley)
berk.glm1 <- glm(Freq ~ Dept * (Gender+Admit), data=berkeley, family="poisson")
summary(berk.glm1)
mosaic(berk.glm1, gp=shading_Friendly, labeling=labeling_residuals, formula=~Admit+Dept+Gender)
# test terms, using Type I tests
anova(berk.glm1, test="Chisq")
# type II tests
library(car)
Anova(berk.glm1, test="LR")
# the same, displaying studentized residuals note use of formula to reorder factors in the mosaic
mosaic(berk.glm1, shade=TRUE, formula=~Admit+Dept+Gender,
residuals_type="rstandard", labeling=labeling_residuals,
main="Model: [AdmitDept][GenderDept]")
## all two-way model
berk.glm2 <- glm(Freq ~ (Dept + Gender + Admit)^2, data=berkeley, family="poisson")
summary(berk.glm2)
mosaic.glm(berk.glm2, residuals_type="rstandard", labeling = labeling_residuals, shade=TRUE,
formula=~Admit+Dept+Gender, main="Model: [DeptGender][DeptAdmit][AdmitGender]")
anova(berk.glm1, berk.glm2, test="Chisq")
# Add 1 df term for association of [GenderAdmit] only in Dept A
berkeley <- within(berkeley, dept1AG <- (Dept=='A')*(Gender=='Female')*(Admit=='Admitted'))
head(berkeley)
berk.glm3 <- glm(Freq ~ Dept * (Gender+Admit) + dept1AG, data=berkeley, family="poisson")
summarise(glmlist(berk.glm1, berk.glm2, berk.glm3))
anova(berk.glm1, berk.glm3, test="Chisq")
# interpret coefficient
coef(berk.glm3)[["dept1AG"]]
exp(coef(berk.glm3)[["dept1AG"]])
#summary(berk.glm3)
mosaic.glm(berk.glm3, residuals_type="rstandard", labeling = labeling_residuals, shade=TRUE,
formula=~Admit+Dept+Gender, main="Model: [DeptGender][DeptAdmit] + DeptA*[GA]")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc-doc.R
\docType{data}
\name{Tcomp_reproduction}
\alias{Tcomp_reproduction}
\title{Reproduction of selected tourism competition results}
\format{{A list of three elements named \code{monthly}, \code{quarterly} and \code{yearly}.
These correspond to tables 4, 5 and 6 in the Athanasopoulos et al 2011 article.
}}
\source{
\url{http://robjhyndman.com/papers/the-tourism-forecasting-competition}
}
\usage{
Tcomp_reproduction
}
\description{
Reproduction of selected results from the tourism forecasting competition described in
Athanasopoulos et al. 2011 (\url{http://robjhyndman.com/papers/forecompijf.pdf})
}
\details{
Note that only Mean Absolute Percentage Error of the naive forecasts matches exactly that published.
All Mean Absolute Scaled Error results are slightly higher than those published due to an unknown
difference in MASE method. All results for ARIMA, ETS and Theta method forecasts differ due to
changes in the forecasting methods since 2011.
See Vignette for details,
including the code required to re-create the `Tcomp_reproduction` object.
}
\examples{
Tcomp_reproduction
}
\keyword{datasets}
| /man/Tcomp_reproduction.Rd | no_license | cran/Tcomp | R | false | true | 1,231 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc-doc.R
\docType{data}
\name{Tcomp_reproduction}
\alias{Tcomp_reproduction}
\title{Reproduction of selected tourism competition results}
\format{{A list of three elements named \code{monthly}, \code{quarterly} and \code{yearly}.
These correspond to tables 4, 5 and 6 in the Athanasopoulos et al 2011 article.
}}
\source{
\url{http://robjhyndman.com/papers/the-tourism-forecasting-competition}
}
\usage{
Tcomp_reproduction
}
\description{
Reproduction of selected results from the tourism forecasting competition described in
Athanasopoulos et al. 2011 (\url{http://robjhyndman.com/papers/forecompijf.pdf})
}
\details{
Note that only Mean Absolute Percentage Error of the naive forecasts matches exactly that published.
All Mean Absolute Scaled Error results are slightly higher than those published due to an unknown
difference in MASE method. All results for ARIMA, ETS and Theta method forecasts differ due to
changes in the forecasting methods since 2011.
See Vignette for details,
including the code required to re-create the `Tcomp_reproduction` object.
}
\examples{
Tcomp_reproduction
}
\keyword{datasets}
|
# keyboard shortcuts for R command line
# to be used only in R interactive mode
# executed automatically upon loading the packge
ksInit <- function()
{
# set up a general prototype, and S3 object
lst <<- list(f = function() NA,prnt=TRUE)
class(lst) <<- 'ksr'
ksProto <<- lst
print.ksr <<- function(ksobj) {
if (ksobj$prnt) print(ksobj$f()) else ksobj$f()
}
### '/.ksr' <<- function(ksobj,x) ksobj$f(x)
# print(getwd())
gtd <<- ksProto
gtd$f <<- getwd
ksList <<- data.frame(opName='gtd',op='getwd()')
# setwd('..')
upd <<- ksProto
upd$f <<- function() {setwd('..'); message(getwd())}
upd$prnt <<- FALSE
ksList <<- rbind(ksList,data.frame(opName='upd',op='setwd(..)'))
# setwd(dir) but also save current directory for later use with bkd;
# sstd = "save and set directory"
sstd <<- ksProto
sstd$f <<- function() {
d <- readline('new directory: ')
saveDir <<- getwd()
setwd(d)
print(getwd())
}
ksList <<- rbind(ksList,data.frame(opName='sstd',op='save dir, setwd()'))
# restore dir to the one saved above
bkd <<- ksProto
bkd$f <<- function() {setwd(saveDir); saveDir <<- NULL; message(getwd())}
ksList <<- rbind(ksList,data.frame(opName='bkd',op='back to saved dir'))
}
# forms the abbreviation 'name' for the operation 'op'
ksAbbrev <- function(name,op,hasArgs) {
cmd <- paste0(name,' <<- ksProto')
evalrstring(cmd)
cmd <- paste0(name,'$f <<- function() ',op)
evalrstring(cmd)
ksList <<- rbind(ksList,data.frame(opName=name,op=op))
}
# example:
# x <- 3
# ksAbbrev('ad1','x <<- x + 1')
# ad1
# print(x) # 4
| /R/ksREPL.R | no_license | matloff/ksREPL | R | false | false | 1,716 | r |
# keyboard shortcuts for R command line
# to be used only in R interactive mode
# executed automatically upon loading the packge
ksInit <- function()
{
# set up a general prototype, and S3 object
lst <<- list(f = function() NA,prnt=TRUE)
class(lst) <<- 'ksr'
ksProto <<- lst
print.ksr <<- function(ksobj) {
if (ksobj$prnt) print(ksobj$f()) else ksobj$f()
}
### '/.ksr' <<- function(ksobj,x) ksobj$f(x)
# print(getwd())
gtd <<- ksProto
gtd$f <<- getwd
ksList <<- data.frame(opName='gtd',op='getwd()')
# setwd('..')
upd <<- ksProto
upd$f <<- function() {setwd('..'); message(getwd())}
upd$prnt <<- FALSE
ksList <<- rbind(ksList,data.frame(opName='upd',op='setwd(..)'))
# setwd(dir) but also save current directory for later use with bkd;
# sstd = "save and set directory"
sstd <<- ksProto
sstd$f <<- function() {
d <- readline('new directory: ')
saveDir <<- getwd()
setwd(d)
print(getwd())
}
ksList <<- rbind(ksList,data.frame(opName='sstd',op='save dir, setwd()'))
# restore dir to the one saved above
bkd <<- ksProto
bkd$f <<- function() {setwd(saveDir); saveDir <<- NULL; message(getwd())}
ksList <<- rbind(ksList,data.frame(opName='bkd',op='back to saved dir'))
}
# forms the abbreviation 'name' for the operation 'op'
ksAbbrev <- function(name,op,hasArgs) {
cmd <- paste0(name,' <<- ksProto')
evalrstring(cmd)
cmd <- paste0(name,'$f <<- function() ',op)
evalrstring(cmd)
ksList <<- rbind(ksList,data.frame(opName=name,op=op))
}
# example:
# x <- 3
# ksAbbrev('ad1','x <<- x + 1')
# ad1
# print(x) # 4
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upsert.R
\name{sf_upsert}
\alias{sf_upsert}
\title{Upsert Records}
\usage{
sf_upsert(input_data, object_name, external_id_fieldname,
api_type = c("SOAP", "REST", "Bulk 1.0", "Bulk 2.0"),
control = list(...), ..., verbose = FALSE)
}
\arguments{
\item{input_data}{\code{named vector}, \code{matrix}, \code{data.frame}, or
\code{tbl_df}; data can be coerced into a \code{data.frame}}
\item{object_name}{character; the name of one Salesforce objects that the
function is operating against (e.g. "Account", "Contact", "CustomObject__c")}
\item{external_id_fieldname}{character; string identifying a custom field on the
object that has been set as an "External ID" field. This field is used to reference
objects during upserts to determine if the record already exists in Salesforce or not.}
\item{api_type}{character; one of "REST", "SOAP", "Bulk 1.0", "Bulk 2.0", or
"Chatter" indicating which API to use when making the request}
\item{control}{\code{list}; a list of parameters for controlling the behavior of
the API call being used. For more information of what parameters are available
look at the documentation for \code{\link{sf_control}}}
\item{...}{arguments passed to \code{\link{sf_control}} or further downstream
to \code{\link{sf_bulk_operation}}}
\item{verbose}{logical; do you want informative messages?}
}
\value{
\code{tbl_df} of records with success indicator
}
\description{
Upserts one or more new records to your organization’s data.
}
\examples{
\dontrun{
n <- 2
new_contacts <- tibble(FirstName = rep("Test", n),
LastName = paste0("Contact-Create-", 1:n),
My_External_Id__c=letters[1:n])
new_contacts_result <- sf_create(new_contacts, object_name="Contact")
upserted_contacts <- tibble(FirstName = rep("Test", n),
LastName = paste0("Contact-Upsert-", 1:n),
My_External_Id__c=letters[1:n])
new_record <- tibble(FirstName = "Test",
LastName = paste0("Contact-Upsert-", n+1),
My_External_Id__c=letters[n+1])
upserted_contacts <- bind_rows(upserted_contacts, new_record)
upserted_contacts_result1 <- sf_upsert(upserted_contacts,
object_name="Contact",
"My_External_Id__c")
}
}
| /man/sf_upsert.Rd | permissive | TonyWhiteSMS/salesforcer | R | false | true | 2,415 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upsert.R
\name{sf_upsert}
\alias{sf_upsert}
\title{Upsert Records}
\usage{
sf_upsert(input_data, object_name, external_id_fieldname,
api_type = c("SOAP", "REST", "Bulk 1.0", "Bulk 2.0"),
control = list(...), ..., verbose = FALSE)
}
\arguments{
\item{input_data}{\code{named vector}, \code{matrix}, \code{data.frame}, or
\code{tbl_df}; data can be coerced into a \code{data.frame}}
\item{object_name}{character; the name of one Salesforce objects that the
function is operating against (e.g. "Account", "Contact", "CustomObject__c")}
\item{external_id_fieldname}{character; string identifying a custom field on the
object that has been set as an "External ID" field. This field is used to reference
objects during upserts to determine if the record already exists in Salesforce or not.}
\item{api_type}{character; one of "REST", "SOAP", "Bulk 1.0", "Bulk 2.0", or
"Chatter" indicating which API to use when making the request}
\item{control}{\code{list}; a list of parameters for controlling the behavior of
the API call being used. For more information of what parameters are available
look at the documentation for \code{\link{sf_control}}}
\item{...}{arguments passed to \code{\link{sf_control}} or further downstream
to \code{\link{sf_bulk_operation}}}
\item{verbose}{logical; do you want informative messages?}
}
\value{
\code{tbl_df} of records with success indicator
}
\description{
Upserts one or more new records to your organization’s data.
}
\examples{
\dontrun{
n <- 2
new_contacts <- tibble(FirstName = rep("Test", n),
LastName = paste0("Contact-Create-", 1:n),
My_External_Id__c=letters[1:n])
new_contacts_result <- sf_create(new_contacts, object_name="Contact")
upserted_contacts <- tibble(FirstName = rep("Test", n),
LastName = paste0("Contact-Upsert-", 1:n),
My_External_Id__c=letters[1:n])
new_record <- tibble(FirstName = "Test",
LastName = paste0("Contact-Upsert-", n+1),
My_External_Id__c=letters[n+1])
upserted_contacts <- bind_rows(upserted_contacts, new_record)
upserted_contacts_result1 <- sf_upsert(upserted_contacts,
object_name="Contact",
"My_External_Id__c")
}
}
|
#plot a heatmap of core genes
#May, 8th, 2017
#Joo Hyun Im (ji72)
#heatmap_core_genes.R was adjusted and saved here
rm(list=ls(all=TRUE)) #delete any previous entry
setwd("/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/")
#Check which core genes are regulated only in live infections:
core = read.table("finding.core.genes/core_genes_for_heatmap/core_upgenes_list.txt", header=T, sep="\t") #166 genes
#core = read.table("finding.core.genes/core_genes_for_heatmap/core_dogenes_list.txt", header=T, sep="\t") #166 genes
live_only = read.table("specific.comparisons/overlap_between_live_and_cleanprick_and_heatkilled/list.of.degs.for.live.only.when.core_genes_were_used.txt", header=F) #105 genes
for(i in 1:dim(core)[1]){
if ( as.character(core[i,1]) %in% live_only[,1] ){
core[i,4] = 1
}
else{
core[i,4] = 0
}
}
write.table(core, file="finding.core.genes/core_genes_for_heatmap/up_sig_only_in_live_results.txt", quote=F, row.names = F, col.names = T)
#write.table(core, file="finding.core.genes/core_genes_for_heatmap/do_sig_only_in_live_results.txt", quote=F, row.names = F, col.names = T)
########################################
#Core upregulated genes
rm(list=ls(all=TRUE)) #delete any previous entry
setwd("/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/")
library(gplots); library(RColorBrewer); library("devtools")
#Prepare data
selected_core_genes = read.table("finding.core.genes/core_genes_for_heatmap/core_upgenes_for_heatmap_function_classified_050817_simplified.txt", header=T, stringsAsFactors = F, sep="\t")
expression_data = read.table("edgeR_results_with_cpm_filter_all_UCs_Nov_2015/edgeR_basic_comparison_all_genes_FC.txt", header=T) #FC, not count data
expression_data_subset = expression_data[match(selected_core_genes[,1], expression_data[,1]),]
expression_data_subset_with_name = expression_data_subset[,c(9,15,21,27,33,39,45,47,49,51)] #for FC
rownames(expression_data_subset_with_name) = c(selected_core_genes[,2])
colnames(expression_data_subset_with_name) = c("M.luteus", "E.coli", "S.marcescens Type", "E.faecalis", "P.rettgeri", "Ecc15", "S.aureus", "P.sneebia", "S.marcescens Db11", "P.entomophila")
expression_data_subset_with_name = expression_data_subset_with_name[,c(1:3,6,5,4,7:10)]
#Prepare colors
my_palette = colorRampPalette(c("#4575b4","white","#d73027"))(n = 299) #blue, white, and red (final)
#shades_for_bar = c("#525252","#969696", "#cccccc","#f7f7f7") #for the number of conditions (10, 9, 8, 7): grey
shades_for_bar = c("#525252","#969696", "#cccccc","#e2e2e2") #for the number of conditions (10, 9, 8, 7): grey, 7 is a bit darker here
colors_for_bars = c(brewer.pal(11,"Set3")[1:9], brewer.pal(11,"Set3")[11:12]) #for functional groups -- 11 functional groups
yes_no_bars = c("black") #for showing up only in live infections or not
#Create column color bars
selected_core_genes[,c(3:15)] = as.numeric(selected_core_genes[,c(3:15)])
selected_core_genes = selected_core_genes[,c(1:3,5:15)] #remove Signaling
#Correct the column names
colnames(selected_core_genes) = c("gene_id", "gene_name", "N.sig", "Antimicrobial response", "Stress response", "Metabolism", "Secretion", "Metal ion homeostasis", "Translation control", "Wound healing/tissue repair", "Cell redox/cell cycle control/cell growth", "Proteolysis", "Neuron-related","only_sig_in_live")
#for the number of conditions (10, 9, 8, 7)
Numcondition = replace(selected_core_genes$N.sig, selected_core_genes$N.sig==10, shades_for_bar[1])
Numcondition = replace(Numcondition, Numcondition==9, shades_for_bar[2])
Numcondition = replace(Numcondition, Numcondition==8, shades_for_bar[3])
Numcondition = replace(Numcondition, Numcondition==7, shades_for_bar[4])
#for whether only in live infections or not
liveInfonly = replace(selected_core_genes$only_sig_in_live, selected_core_genes$only_sig_in_live==1, yes_no_bars[1])
liveInfonly = replace(liveInfonly, liveInfonly ==0, "white")
#for the functional groups
C1 = replace(selected_core_genes[,4], selected_core_genes[,4]==1, colors_for_bars[1]); C1 = replace(C1, C1==0, "white")
C2 = replace(selected_core_genes[,5], selected_core_genes[,5]==1, colors_for_bars[2]); C2 = replace(C2, C2==0, "white")
C3 = replace(selected_core_genes[,6], selected_core_genes[,6]==1, colors_for_bars[3]); C3 = replace(C3, C3==0, "white")
C4 = replace(selected_core_genes[,7], selected_core_genes[,7]==1, colors_for_bars[4]); C4 = replace(C4, C4==0, "white")
C5 = replace(selected_core_genes[,8], selected_core_genes[,8]==1, colors_for_bars[5]); C5 = replace(C5, C5==0, "white")
C6 = replace(selected_core_genes[,9], selected_core_genes[,9]==1, colors_for_bars[6]); C6 = replace(C6, C6==0, "white")
C7 = replace(selected_core_genes[,10], selected_core_genes[,10]==1, colors_for_bars[7]); C7 = replace(C7, C7==0, "white")
C8 = replace(selected_core_genes[,11], selected_core_genes[,11]==1, colors_for_bars[8]); C8 = replace(C8, C8==0, "white")
C9 = replace(selected_core_genes[,12], selected_core_genes[,12]==1, colors_for_bars[9]); C9 = replace(C9, C9==0, "white")
C10 = replace(selected_core_genes[,13], selected_core_genes[,13]==1, colors_for_bars[10]); C10 = replace(C10, C10==0, "white")
#create a white bar for space (white regardless)
whiteBar = c(rep("white", each=length(C1)))
#put all of them together
rlab=t(cbind(Numcondition, whiteBar, liveInfonly, whiteBar, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10))
#Load latest version of heatmap.3 function
source_url("https://raw.githubusercontent.com/obigriffith/biostar-tutorials/master/Heatmaps/heatmap.3.R") #when run, the following msg comes: "SHA-1 hash of file is.."
#heatmap
pdf(file="/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/finding.core.genes/core_genes_for_heatmap/051117_final/core_upgenes_for_heatmap_17x35_ft1.9.pdf", height=35, width=17)
heatmap.3(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", key=F, col=my_palette, cexCol=1.3, cexRow = 1.9, RowSideColors=rlab, RowSideColorsSize=22, margin=c(3,9))
dev.off()
#legend
heatmap.3(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", key=F, col=my_palette, cexCol=1.3, cexRow = 1.4, RowSideColors=rlab, RowSideColorsSize=22, margin=c(9,9))
#legend("topright",legend=c("Antimicrobial response","Stress response","Metabolism","Secretion","Metal ion homeostasis","Translation control", "Wound healing/tissue repair", "Cell redox/cell cycle control/cell growth", "Proteolysis", "Neuron-related","","10","9","8","7","","Regulated only in live infection"), fill=c(colors_for_bars,"white",shades_for_bar,"white",yes_no_bars), border=FALSE, bty="n", y.intersp = 0.7, cex=1)
legend("topright",legend=c("10","9","8","7","","Regulated only in live infection"), fill=c(shades_for_bar,"white",yes_no_bars), border=FALSE, bty="n", y.intersp = 0.7, cex=1)
#colorkey
heatmap.2(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", col=my_palette, key=T, keysize = 1, key.xlab="Expression Fold Change", cexCol=1.3, cexRow=0.75, margin=c(9,9), lmat = rbind(c(0,4),c(2,1),c(3,0)), lwid = c(1.5,4), lhei = c(1,4,1))
########################################
#Core downregulated genes
rm(list=ls(all=TRUE)) #delete any previous entry
setwd("/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/")
library(gplots); library(RColorBrewer); library("devtools")
#Prepare data
selected_core_genes = read.table("finding.core.genes/core_genes_for_heatmap/core_dogenes_for_heatmap_function_classified_050817_simplified.txt", header=T, stringsAsFactors = F, sep="\t")
expression_data = read.table("edgeR_results_with_cpm_filter_all_UCs_Nov_2015/edgeR_basic_comparison_all_genes_FC.txt", header=T) #FC, not count data
expression_data_subset = expression_data[match(selected_core_genes[,1], expression_data[,1]),]
expression_data_subset_with_name = expression_data_subset[,c(9,15,21,27,33,39,45,47,49,51)] #for FC
rownames(expression_data_subset_with_name) = c(selected_core_genes[,2])
colnames(expression_data_subset_with_name) = c("M.luteus", "E.coli", "S.marcescens Type", "E.faecalis", "P.rettgeri", "Ecc15", "S.aureus", "P.sneebia", "S.marcescens Db11", "P.entomophila")
expression_data_subset_with_name = expression_data_subset_with_name[,c(1:3,6,5,4,7:10)]
#Prepare colors
my_palette = colorRampPalette(c("#4575b4","white","#d73027"))(n = 299) #blue, white, and red (final)
#shades_for_bar = c("#525252","#969696", "#cccccc","#f7f7f7") #for the number of conditions (10, 9, 8, 7): grey
shades_for_bar = c("#525252","#969696", "#cccccc","#e2e2e2") #for the number of conditions (10, 9, 8, 7): grey, 7 is a bit darker here
colors_for_bars = c(brewer.pal(11,"Set3")[11], brewer.pal(7,"Set2")[1], brewer.pal(11,"Set3")[3], brewer.pal(7,"Set2")[2:4], brewer.pal(11,"Set3")[9], brewer.pal(7,"Set2")[6:7]) #for functional groups -- 9 functional groups, colors matching with core upregulated
yes_no_bars = c("black") #for showing up only in live infections or not
#Create column color bars
selected_core_genes[,c(3:13)] = as.numeric(selected_core_genes[,c(3:13)])
#Correct the column names
colnames(selected_core_genes) = c("gene_id", "gene_name", "N.sig", "Neuron-related", "Response to toxin", "Metabolism", "Drug metabolism", "Reproduction", "Response to pheromone/olfactory behavior", "Proteolysis", "Oxidation reduction", "Carbohydrate binding", "only_sig_in_live")
#for the number of conditions (10, 9, 8, 7)
Numcondition = replace(selected_core_genes$N.sig, selected_core_genes$N.sig==10, shades_for_bar[1])
Numcondition = replace(Numcondition, Numcondition==9, shades_for_bar[2])
Numcondition = replace(Numcondition, Numcondition==8, shades_for_bar[3])
Numcondition = replace(Numcondition, Numcondition==7, shades_for_bar[4])
#for whether only in live infections or not
liveInfonly = replace(selected_core_genes$only_sig_in_live, selected_core_genes$only_sig_in_live==1, yes_no_bars[1])
liveInfonly = replace(liveInfonly, liveInfonly ==0, "white")
#for the functional groups
C1 = replace(selected_core_genes[,4], selected_core_genes[,4]==1, colors_for_bars[1]); C1 = replace(C1, C1==0, "white")
C2 = replace(selected_core_genes[,5], selected_core_genes[,5]==1, colors_for_bars[2]); C2 = replace(C2, C2==0, "white")
C3 = replace(selected_core_genes[,6], selected_core_genes[,6]==1, colors_for_bars[3]); C3 = replace(C3, C3==0, "white")
C4 = replace(selected_core_genes[,7], selected_core_genes[,7]==1, colors_for_bars[4]); C4 = replace(C4, C4==0, "white")
C5 = replace(selected_core_genes[,8], selected_core_genes[,8]==1, colors_for_bars[5]); C5 = replace(C5, C5==0, "white")
C6 = replace(selected_core_genes[,9], selected_core_genes[,9]==1, colors_for_bars[6]); C6 = replace(C6, C6==0, "white")
C7 = replace(selected_core_genes[,10], selected_core_genes[,10]==1, colors_for_bars[7]); C7 = replace(C7, C7==0, "white")
C8 = replace(selected_core_genes[,11], selected_core_genes[,11]==1, colors_for_bars[8]); C8 = replace(C8, C8==0, "white")
C9 = replace(selected_core_genes[,12], selected_core_genes[,12]==1, colors_for_bars[9]); C9 = replace(C9, C9==0, "white")
#create a white bar for space (white regardless)
whiteBar = c(rep("white", each=length(C1)))
#put all of them together
rlab=t(cbind(Numcondition, whiteBar, liveInfonly, whiteBar, C1, C2, C3, C4, C5, C6, C7, C8, C9))
#Load latest version of heatmap.3 function
source_url("https://raw.githubusercontent.com/obigriffith/biostar-tutorials/master/Heatmaps/heatmap.3.R") #when run, the following msg comes: "SHA-1 hash of file is.."
#heatmap
pdf(file="/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/finding.core.genes/core_genes_for_heatmap/051117_final/core_downgenes_for_heatmap_11x11_ft1.3.pdf", height=35, width=17)
heatmap.3(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", key=F, col=my_palette, cexCol=1.3, cexRow = 1.9, RowSideColors=rlab, RowSideColorsSize=22, margin=c(3,9))
dev.off()
#legend
heatmap.3(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", key=F, col=my_palette, cexCol=1.3, cexRow = 1.4, RowSideColors=rlab, RowSideColorsSize=22)
#legend("topright",legend=c("Neuron-related","Response to toxin", "Metabolism", "Drug metabolism", "Reproduction", "Response to pheromone/olfactory behavior", "Proteolysis", "Oxidation-reduction", "Carbohydrate binding","","","","","10","9","8","7","","Regulated only in live infection"), fill=c(colors_for_bars,"white",shades_for_bar,"white","white", "white", "white", yes_no_bars), border=FALSE, bty="n", y.intersp = 0.7, cex=1)
legend("topright",legend=c("10","9","8","7","","Regulated only in live infection"), fill=c(shades_for_bar,"white",yes_no_bars), border=FALSE, bty="n", y.intersp = 0.7, cex=1)
#colorkey
heatmap.2(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", col=my_palette, key=T, keysize = 1, key.xlab="Expression Fold Change", cexCol=1.3, cexRow=0.75, margin=c(9,9), lmat = rbind(c(0,4),c(2,1),c(3,0)), lwid = c(1.5,4), lhei = c(1,4,1))
| /heatmap_core_genes_final.R | no_license | imjoohyu/R.scripts.for.RNA-seq | R | false | false | 13,364 | r | #plot a heatmap of core genes
#May, 8th, 2017
#Joo Hyun Im (ji72)
#heatmap_core_genes.R was adjusted and saved here
rm(list=ls(all=TRUE)) #delete any previous entry
setwd("/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/")
#Check which core genes are regulated only in live infections:
core = read.table("finding.core.genes/core_genes_for_heatmap/core_upgenes_list.txt", header=T, sep="\t") #166 genes
#core = read.table("finding.core.genes/core_genes_for_heatmap/core_dogenes_list.txt", header=T, sep="\t") #166 genes
live_only = read.table("specific.comparisons/overlap_between_live_and_cleanprick_and_heatkilled/list.of.degs.for.live.only.when.core_genes_were_used.txt", header=F) #105 genes
for(i in 1:dim(core)[1]){
if ( as.character(core[i,1]) %in% live_only[,1] ){
core[i,4] = 1
}
else{
core[i,4] = 0
}
}
write.table(core, file="finding.core.genes/core_genes_for_heatmap/up_sig_only_in_live_results.txt", quote=F, row.names = F, col.names = T)
#write.table(core, file="finding.core.genes/core_genes_for_heatmap/do_sig_only_in_live_results.txt", quote=F, row.names = F, col.names = T)
########################################
#Core upregulated genes
rm(list=ls(all=TRUE)) #delete any previous entry
setwd("/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/")
library(gplots); library(RColorBrewer); library("devtools")
#Prepare data
selected_core_genes = read.table("finding.core.genes/core_genes_for_heatmap/core_upgenes_for_heatmap_function_classified_050817_simplified.txt", header=T, stringsAsFactors = F, sep="\t")
expression_data = read.table("edgeR_results_with_cpm_filter_all_UCs_Nov_2015/edgeR_basic_comparison_all_genes_FC.txt", header=T) #FC, not count data
expression_data_subset = expression_data[match(selected_core_genes[,1], expression_data[,1]),]
expression_data_subset_with_name = expression_data_subset[,c(9,15,21,27,33,39,45,47,49,51)] #for FC
rownames(expression_data_subset_with_name) = c(selected_core_genes[,2])
colnames(expression_data_subset_with_name) = c("M.luteus", "E.coli", "S.marcescens Type", "E.faecalis", "P.rettgeri", "Ecc15", "S.aureus", "P.sneebia", "S.marcescens Db11", "P.entomophila")
expression_data_subset_with_name = expression_data_subset_with_name[,c(1:3,6,5,4,7:10)]
#Prepare colors
my_palette = colorRampPalette(c("#4575b4","white","#d73027"))(n = 299) #blue, white, and red (final)
#shades_for_bar = c("#525252","#969696", "#cccccc","#f7f7f7") #for the number of conditions (10, 9, 8, 7): grey
shades_for_bar = c("#525252","#969696", "#cccccc","#e2e2e2") #for the number of conditions (10, 9, 8, 7): grey, 7 is a bit darker here
colors_for_bars = c(brewer.pal(11,"Set3")[1:9], brewer.pal(11,"Set3")[11:12]) #for functional groups -- 11 functional groups
yes_no_bars = c("black") #for showing up only in live infections or not
#Create column color bars
selected_core_genes[,c(3:15)] = as.numeric(selected_core_genes[,c(3:15)])
selected_core_genes = selected_core_genes[,c(1:3,5:15)] #remove Signaling
#Correct the column names
colnames(selected_core_genes) = c("gene_id", "gene_name", "N.sig", "Antimicrobial response", "Stress response", "Metabolism", "Secretion", "Metal ion homeostasis", "Translation control", "Wound healing/tissue repair", "Cell redox/cell cycle control/cell growth", "Proteolysis", "Neuron-related","only_sig_in_live")
#for the number of conditions (10, 9, 8, 7)
Numcondition = replace(selected_core_genes$N.sig, selected_core_genes$N.sig==10, shades_for_bar[1])
Numcondition = replace(Numcondition, Numcondition==9, shades_for_bar[2])
Numcondition = replace(Numcondition, Numcondition==8, shades_for_bar[3])
Numcondition = replace(Numcondition, Numcondition==7, shades_for_bar[4])
#for whether only in live infections or not
liveInfonly = replace(selected_core_genes$only_sig_in_live, selected_core_genes$only_sig_in_live==1, yes_no_bars[1])
liveInfonly = replace(liveInfonly, liveInfonly ==0, "white")
#for the functional groups
C1 = replace(selected_core_genes[,4], selected_core_genes[,4]==1, colors_for_bars[1]); C1 = replace(C1, C1==0, "white")
C2 = replace(selected_core_genes[,5], selected_core_genes[,5]==1, colors_for_bars[2]); C2 = replace(C2, C2==0, "white")
C3 = replace(selected_core_genes[,6], selected_core_genes[,6]==1, colors_for_bars[3]); C3 = replace(C3, C3==0, "white")
C4 = replace(selected_core_genes[,7], selected_core_genes[,7]==1, colors_for_bars[4]); C4 = replace(C4, C4==0, "white")
C5 = replace(selected_core_genes[,8], selected_core_genes[,8]==1, colors_for_bars[5]); C5 = replace(C5, C5==0, "white")
C6 = replace(selected_core_genes[,9], selected_core_genes[,9]==1, colors_for_bars[6]); C6 = replace(C6, C6==0, "white")
C7 = replace(selected_core_genes[,10], selected_core_genes[,10]==1, colors_for_bars[7]); C7 = replace(C7, C7==0, "white")
C8 = replace(selected_core_genes[,11], selected_core_genes[,11]==1, colors_for_bars[8]); C8 = replace(C8, C8==0, "white")
C9 = replace(selected_core_genes[,12], selected_core_genes[,12]==1, colors_for_bars[9]); C9 = replace(C9, C9==0, "white")
C10 = replace(selected_core_genes[,13], selected_core_genes[,13]==1, colors_for_bars[10]); C10 = replace(C10, C10==0, "white")
#create a white bar for space (white regardless)
whiteBar = c(rep("white", each=length(C1)))
#put all of them together
rlab=t(cbind(Numcondition, whiteBar, liveInfonly, whiteBar, C1, C2, C3, C4, C5, C6, C7, C8, C9, C10))
#Load latest version of heatmap.3 function
source_url("https://raw.githubusercontent.com/obigriffith/biostar-tutorials/master/Heatmaps/heatmap.3.R") #when run, the following msg comes: "SHA-1 hash of file is.."
#heatmap
pdf(file="/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/finding.core.genes/core_genes_for_heatmap/051117_final/core_upgenes_for_heatmap_17x35_ft1.9.pdf", height=35, width=17)
heatmap.3(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", key=F, col=my_palette, cexCol=1.3, cexRow = 1.9, RowSideColors=rlab, RowSideColorsSize=22, margin=c(3,9))
dev.off()
#legend
heatmap.3(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", key=F, col=my_palette, cexCol=1.3, cexRow = 1.4, RowSideColors=rlab, RowSideColorsSize=22, margin=c(9,9))
#legend("topright",legend=c("Antimicrobial response","Stress response","Metabolism","Secretion","Metal ion homeostasis","Translation control", "Wound healing/tissue repair", "Cell redox/cell cycle control/cell growth", "Proteolysis", "Neuron-related","","10","9","8","7","","Regulated only in live infection"), fill=c(colors_for_bars,"white",shades_for_bar,"white",yes_no_bars), border=FALSE, bty="n", y.intersp = 0.7, cex=1)
legend("topright",legend=c("10","9","8","7","","Regulated only in live infection"), fill=c(shades_for_bar,"white",yes_no_bars), border=FALSE, bty="n", y.intersp = 0.7, cex=1)
#colorkey
heatmap.2(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", col=my_palette, key=T, keysize = 1, key.xlab="Expression Fold Change", cexCol=1.3, cexRow=0.75, margin=c(9,9), lmat = rbind(c(0,4),c(2,1),c(3,0)), lwid = c(1.5,4), lhei = c(1,4,1))
########################################
#Core downregulated genes
rm(list=ls(all=TRUE)) #delete any previous entry
setwd("/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/")
library(gplots); library(RColorBrewer); library("devtools")
#Prepare data
selected_core_genes = read.table("finding.core.genes/core_genes_for_heatmap/core_dogenes_for_heatmap_function_classified_050817_simplified.txt", header=T, stringsAsFactors = F, sep="\t")
expression_data = read.table("edgeR_results_with_cpm_filter_all_UCs_Nov_2015/edgeR_basic_comparison_all_genes_FC.txt", header=T) #FC, not count data
expression_data_subset = expression_data[match(selected_core_genes[,1], expression_data[,1]),]
expression_data_subset_with_name = expression_data_subset[,c(9,15,21,27,33,39,45,47,49,51)] #for FC
rownames(expression_data_subset_with_name) = c(selected_core_genes[,2])
colnames(expression_data_subset_with_name) = c("M.luteus", "E.coli", "S.marcescens Type", "E.faecalis", "P.rettgeri", "Ecc15", "S.aureus", "P.sneebia", "S.marcescens Db11", "P.entomophila")
expression_data_subset_with_name = expression_data_subset_with_name[,c(1:3,6,5,4,7:10)]
#Prepare colors
my_palette = colorRampPalette(c("#4575b4","white","#d73027"))(n = 299) #blue, white, and red (final)
#shades_for_bar = c("#525252","#969696", "#cccccc","#f7f7f7") #for the number of conditions (10, 9, 8, 7): grey
shades_for_bar = c("#525252","#969696", "#cccccc","#e2e2e2") #for the number of conditions (10, 9, 8, 7): grey, 7 is a bit darker here
colors_for_bars = c(brewer.pal(11,"Set3")[11], brewer.pal(7,"Set2")[1], brewer.pal(11,"Set3")[3], brewer.pal(7,"Set2")[2:4], brewer.pal(11,"Set3")[9], brewer.pal(7,"Set2")[6:7]) #for functional groups -- 9 functional groups, colors matching with core upregulated
yes_no_bars = c("black") #for showing up only in live infections or not
#Create column color bars
selected_core_genes[,c(3:13)] = as.numeric(selected_core_genes[,c(3:13)])
#Correct the column names
colnames(selected_core_genes) = c("gene_id", "gene_name", "N.sig", "Neuron-related", "Response to toxin", "Metabolism", "Drug metabolism", "Reproduction", "Response to pheromone/olfactory behavior", "Proteolysis", "Oxidation reduction", "Carbohydrate binding", "only_sig_in_live")
#for the number of conditions (10, 9, 8, 7)
Numcondition = replace(selected_core_genes$N.sig, selected_core_genes$N.sig==10, shades_for_bar[1])
Numcondition = replace(Numcondition, Numcondition==9, shades_for_bar[2])
Numcondition = replace(Numcondition, Numcondition==8, shades_for_bar[3])
Numcondition = replace(Numcondition, Numcondition==7, shades_for_bar[4])
#for whether only in live infections or not
liveInfonly = replace(selected_core_genes$only_sig_in_live, selected_core_genes$only_sig_in_live==1, yes_no_bars[1])
liveInfonly = replace(liveInfonly, liveInfonly ==0, "white")
#for the functional groups
C1 = replace(selected_core_genes[,4], selected_core_genes[,4]==1, colors_for_bars[1]); C1 = replace(C1, C1==0, "white")
C2 = replace(selected_core_genes[,5], selected_core_genes[,5]==1, colors_for_bars[2]); C2 = replace(C2, C2==0, "white")
C3 = replace(selected_core_genes[,6], selected_core_genes[,6]==1, colors_for_bars[3]); C3 = replace(C3, C3==0, "white")
C4 = replace(selected_core_genes[,7], selected_core_genes[,7]==1, colors_for_bars[4]); C4 = replace(C4, C4==0, "white")
C5 = replace(selected_core_genes[,8], selected_core_genes[,8]==1, colors_for_bars[5]); C5 = replace(C5, C5==0, "white")
C6 = replace(selected_core_genes[,9], selected_core_genes[,9]==1, colors_for_bars[6]); C6 = replace(C6, C6==0, "white")
C7 = replace(selected_core_genes[,10], selected_core_genes[,10]==1, colors_for_bars[7]); C7 = replace(C7, C7==0, "white")
C8 = replace(selected_core_genes[,11], selected_core_genes[,11]==1, colors_for_bars[8]); C8 = replace(C8, C8==0, "white")
C9 = replace(selected_core_genes[,12], selected_core_genes[,12]==1, colors_for_bars[9]); C9 = replace(C9, C9==0, "white")
#create a white bar for space (white regardless)
whiteBar = c(rep("white", each=length(C1)))
#put all of them together
rlab=t(cbind(Numcondition, whiteBar, liveInfonly, whiteBar, C1, C2, C3, C4, C5, C6, C7, C8, C9))
#Load latest version of heatmap.3 function
source_url("https://raw.githubusercontent.com/obigriffith/biostar-tutorials/master/Heatmaps/heatmap.3.R") #when run, the following msg comes: "SHA-1 hash of file is.."
#heatmap
pdf(file="/Users/JooHyun/Dropbox/Cornell/Lab/Projects/Mega_RNA-seq/finding.core.genes/core_genes_for_heatmap/051117_final/core_downgenes_for_heatmap_11x11_ft1.3.pdf", height=35, width=17)
heatmap.3(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", key=F, col=my_palette, cexCol=1.3, cexRow = 1.9, RowSideColors=rlab, RowSideColorsSize=22, margin=c(3,9))
dev.off()
#legend
heatmap.3(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", key=F, col=my_palette, cexCol=1.3, cexRow = 1.4, RowSideColors=rlab, RowSideColorsSize=22)
#legend("topright",legend=c("Neuron-related","Response to toxin", "Metabolism", "Drug metabolism", "Reproduction", "Response to pheromone/olfactory behavior", "Proteolysis", "Oxidation-reduction", "Carbohydrate binding","","","","","10","9","8","7","","Regulated only in live infection"), fill=c(colors_for_bars,"white",shades_for_bar,"white","white", "white", "white", yes_no_bars), border=FALSE, bty="n", y.intersp = 0.7, cex=1)
legend("topright",legend=c("10","9","8","7","","Regulated only in live infection"), fill=c(shades_for_bar,"white",yes_no_bars), border=FALSE, bty="n", y.intersp = 0.7, cex=1)
#colorkey
heatmap.2(as.matrix(expression_data_subset_with_name), Rowv=FALSE, Colv=FALSE, density.info="none", dendrogram="none",trace="none", symm=F, scale="none", col=my_palette, key=T, keysize = 1, key.xlab="Expression Fold Change", cexCol=1.3, cexRow=0.75, margin=c(9,9), lmat = rbind(c(0,4),c(2,1),c(3,0)), lwid = c(1.5,4), lhei = c(1,4,1))
|
#' Example dataset 3 for HIMA: Survival data outcome
#'
#' A \code{list} dataset containing a phenotype dataset and a mediator
#' dataset (n=200 participants, p=200 biomarkers). The variables in the phenotype are as follows:
#'
#' \itemize{
#' \item Treatment: treated (value = 1) or not treated (value = 0)
#' \item Status: Status indicator: dead (value = 1) or alive (value = 0)
#' \item Time: time to event
#' \item Sex: female (value = 1) or male (value = 0)
#' \item Age: Age of the participant
#' }
#'
#' The datasets are simulated using parameters generated from a real dataset. The code
#' used to generate the data can be found in /inst/script folder of the package.
#'
#' @return A list containing two objects: \code{PhenoData} and \code{Mediator}
"Example3"
| /R/example3.R | no_license | cran/HIMA | R | false | false | 798 | r | #' Example dataset 3 for HIMA: Survival data outcome
#'
#' A \code{list} dataset containing a phenotype dataset and a mediator
#' dataset (n=200 participants, p=200 biomarkers). The variables in the phenotype are as follows:
#'
#' \itemize{
#' \item Treatment: treated (value = 1) or not treated (value = 0)
#' \item Status: Status indicator: dead (value = 1) or alive (value = 0)
#' \item Time: time to event
#' \item Sex: female (value = 1) or male (value = 0)
#' \item Age: Age of the participant
#' }
#'
#' The datasets are simulated using parameters generated from a real dataset. The code
#' used to generate the data can be found in /inst/script folder of the package.
#'
#' @return A list containing two objects: \code{PhenoData} and \code{Mediator}
"Example3"
|
Sampling <- function(data, num.of.plots, expert, volunteer, num.experts,
frequency.year, frequency.month, frequency.day,
outputall) {
CheckingInputs (data, num.of.plots, expert, volunteer,
num.experts, frequency.year, frequency.month, frequency.day)
currentdata <- data
currentdata$expert.volunteer <- 0
currentdata$costs <- 0
chosenplots <- ChoosePlots (currentdata, num.of.plots)
expert.plots <- ExpertPlots (chosenplots, num.of.plots, num.experts)
volunteer.plots <- VolunteerPlots (chosenplots, expert.plots)
year <- sort( unique (currentdata[, 6]))
year <- year[seq (1, length(year), frequency.year)]
if (frequency.month > 0) {
month <- sort (unique (currentdata[, 7]))
month <- month[seq (1, length(month), frequency.month)]
} else {
month <- 0
}
# (length(month)%/%frequency.month / 2) starting in the middle or
# at the beginning??
# Need to be tested for a different amount of days per month or different
# dates
if (frequency.day > 0) {
day <- sort (unique (currentdata[, 8])) # not sure - maybe using the total amount of days of a month
day <- day[seq (1, length(day), frequency.day)]
} else {
day <- 0
}
currentdata$expert.volunteer[currentdata[, 1] %in% expert.plots &
currentdata[, 6] %in% year & currentdata[, 7]
%in% month & currentdata[, 8]
%in% day] <- "expert"
currentdata$costs[currentdata[, 1] %in% expert.plots & currentdata[, 6]
%in% year & currentdata[, 7] %in% month & currentdata[, 8]
%in% day] <- expert[5]
currentdata$expert.volunteer[currentdata[, 1] %in% volunteer.plots &
currentdata[, 6] %in% year &
currentdata[, 7] %in% month &
currentdata[, 8] %in% day] <- "volunteer"
currentdata$costs[currentdata[, 1] %in% volunteer.plots & currentdata[, 6]
%in% year & currentdata[, 7] %in% month &
currentdata[, 8] %in% day] <- volunteer[5]
if (num.experts > 0) {
currentdata <- SamplingEcologist (currentdata, expert.plots, expert[1],
expert[2], expert[3], expert[4])
}
if (num.experts != num.of.plots) {
currentdata <- SamplingEcologist (currentdata, volunteer.plots,
volunteer[1], volunteer[2], volunteer[3],
volunteer[4])
}
if (outputall) {
return (currentdata)
} else {
currentdata <- currentdata[currentdata$expert.volunteer != 0, ]
return (currentdata)
}
}
#' Error messages for the function \code{Sampling}
#'
#' \code{CheckingInputs} tests the inputs of the function \code{Sampling} and
#' returns an error message if necessary.
#'
#'
CheckingInputs <- function (data, num.of.plots, expert, volunteer, num.experts,
frequency.year, frequency.month, frequency.day) {
if (num.of.plots < num.experts) {
stop ("num.of.plots need to be equal or more than num.experts")
}
if (frequency.year < 1) {
stop ("frequency.year has to be 1 or higher")
}
if (frequency.month == 0 & data[1, 7] > 0) {
stop ("frequency.month has to be 1 or higher")
}
if (frequency.day == 0 & data[1, 8] > 0) {
stop ("frequency.day has to be 1 or higher")
}
# checking expert/volunteer input with is.nummeric == T. For all or for each number
}
| /R/sampling_r_code.R | permissive | fschirr/sampling_r_package | R | false | false | 3,699 | r | Sampling <- function(data, num.of.plots, expert, volunteer, num.experts,
frequency.year, frequency.month, frequency.day,
outputall) {
CheckingInputs (data, num.of.plots, expert, volunteer,
num.experts, frequency.year, frequency.month, frequency.day)
currentdata <- data
currentdata$expert.volunteer <- 0
currentdata$costs <- 0
chosenplots <- ChoosePlots (currentdata, num.of.plots)
expert.plots <- ExpertPlots (chosenplots, num.of.plots, num.experts)
volunteer.plots <- VolunteerPlots (chosenplots, expert.plots)
year <- sort( unique (currentdata[, 6]))
year <- year[seq (1, length(year), frequency.year)]
if (frequency.month > 0) {
month <- sort (unique (currentdata[, 7]))
month <- month[seq (1, length(month), frequency.month)]
} else {
month <- 0
}
# (length(month)%/%frequency.month / 2) starting in the middle or
# at the beginning??
# Need to be tested for a different amount of days per month or different
# dates
if (frequency.day > 0) {
day <- sort (unique (currentdata[, 8])) # not sure - maybe using the total amount of days of a month
day <- day[seq (1, length(day), frequency.day)]
} else {
day <- 0
}
currentdata$expert.volunteer[currentdata[, 1] %in% expert.plots &
currentdata[, 6] %in% year & currentdata[, 7]
%in% month & currentdata[, 8]
%in% day] <- "expert"
currentdata$costs[currentdata[, 1] %in% expert.plots & currentdata[, 6]
%in% year & currentdata[, 7] %in% month & currentdata[, 8]
%in% day] <- expert[5]
currentdata$expert.volunteer[currentdata[, 1] %in% volunteer.plots &
currentdata[, 6] %in% year &
currentdata[, 7] %in% month &
currentdata[, 8] %in% day] <- "volunteer"
currentdata$costs[currentdata[, 1] %in% volunteer.plots & currentdata[, 6]
%in% year & currentdata[, 7] %in% month &
currentdata[, 8] %in% day] <- volunteer[5]
if (num.experts > 0) {
currentdata <- SamplingEcologist (currentdata, expert.plots, expert[1],
expert[2], expert[3], expert[4])
}
if (num.experts != num.of.plots) {
currentdata <- SamplingEcologist (currentdata, volunteer.plots,
volunteer[1], volunteer[2], volunteer[3],
volunteer[4])
}
if (outputall) {
return (currentdata)
} else {
currentdata <- currentdata[currentdata$expert.volunteer != 0, ]
return (currentdata)
}
}
#' Error messages for the function \code{Sampling}
#'
#' \code{CheckingInputs} tests the inputs of the function \code{Sampling} and
#' returns an error message if necessary.
#'
#'
CheckingInputs <- function (data, num.of.plots, expert, volunteer, num.experts,
frequency.year, frequency.month, frequency.day) {
if (num.of.plots < num.experts) {
stop ("num.of.plots need to be equal or more than num.experts")
}
if (frequency.year < 1) {
stop ("frequency.year has to be 1 or higher")
}
if (frequency.month == 0 & data[1, 7] > 0) {
stop ("frequency.month has to be 1 or higher")
}
if (frequency.day == 0 & data[1, 8] > 0) {
stop ("frequency.day has to be 1 or higher")
}
# checking expert/volunteer input with is.nummeric == T. For all or for each number
}
|
LOFCraft <- function(data=iris[,-5],threshold=2.0,outlier.scores)
{
#threshold=3.1
#data=iris[,-5]
#k=c(5:10)
#outlier.scores <- lof(data, k= kk)
mean <- rowMeans(outlier.scores) #Calculating the mean of every execution
outlier.scores <- data.frame(outlier.scores, mean) #adding mean to data frame
#library("RegressionLibs")
#DensityPlot(outlier.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
aux <- outlier.scores[,7]>threshold #by default 1.7 is the threshold selected
#aux <- outlier.scores[,7]>1.7 #by default 1.7 is the threshold selected
#install.packages("plyr")
library("plyr") ##required for count()
numberOfOutliersFound<-count(aux)[2,2] #Number of outliers found
if(is.na(numberOfOutliersFound)){
withoutOutliers.scores <- outlier.scores
outliers<-NULL
dataWithoutOutliers<-data
}
else{
outliers <- order(outlier.scores[,7], decreasing=T)[1:numberOfOutliersFound] #Getting the values that are on the threshold
Score <- outlier.scores[outliers,7] #Getting outliers scores
outliers <- data.frame(outliers,Score)
names(outliers) <- c("Position","Score")
#View(outliers)
withoutOutliers.scores <- outlier.scores[-outliers[1:numberOfOutliersFound,1],] #Eliminating the 527 most remote instances!
#DensityPlot(outlier.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
#DensityPlot(withoutOutliers.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
#select instances
dataWithoutOutliers<-data[-outliers[1:numberOfOutliersFound,1],]
#class(outlier.scores)
}
return(list(withoutOutliers.scores,dataWithoutOutliers,numberOfOutliersFound,outliers))
}
#
# res<-LOFCraft(iris[,-5],threshold = 1.6,k = c(5:10)) ##calling LOF
# outlier.scores=data.frame(res[1]) ## scores for the original data
# #str(outlier.scores)
# withoutOutliers.scores=data.frame(res[2]) ## scores of data without outliers
# #str(withoutOutliers.scores)
# dataWithoutOutliers<-data.frame(res[3]) ##the data without outliers
# #str(dataWithoutOutliers)
# #View(dataWithoutOutliers)
# DensityPlot(outlier.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
# DensityPlot(withoutOutliers.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
#
#
#
# ##example 2 ...
# res<-LOFCraft(mtcars,threshold = 1.25,k = c(5:10)) ##calling LOF
# outlier.scores=data.frame(res[1]) ## scores for the original data
# str(outlier.scores)
# withoutOutliers.scores=data.frame(res[2]) ## scores of data without outliers
# str(withoutOutliers.scores)
# dataWithoutOutliers<-data.frame(res[3]) ##the data without outliers
# str(dataWithoutOutliers)
# #View(dataWithoutOutliers)
# howManyOutliers<-as.numeric(res[4]) ## the total number of outliers
# outliers=data.frame(res[5]) ## the positions of the outliers in the original data and theirs respective scores
# str(outliers)
#
#DensityPlot(outlier.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
# DensityPlot(withoutOutliers.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
#
| /funciones/LOF.R | no_license | lilianabarrosc/Shiny | R | false | false | 3,105 | r |
LOFCraft <- function(data=iris[,-5],threshold=2.0,outlier.scores)
{
#threshold=3.1
#data=iris[,-5]
#k=c(5:10)
#outlier.scores <- lof(data, k= kk)
mean <- rowMeans(outlier.scores) #Calculating the mean of every execution
outlier.scores <- data.frame(outlier.scores, mean) #adding mean to data frame
#library("RegressionLibs")
#DensityPlot(outlier.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
aux <- outlier.scores[,7]>threshold #by default 1.7 is the threshold selected
#aux <- outlier.scores[,7]>1.7 #by default 1.7 is the threshold selected
#install.packages("plyr")
library("plyr") ##required for count()
numberOfOutliersFound<-count(aux)[2,2] #Number of outliers found
if(is.na(numberOfOutliersFound)){
withoutOutliers.scores <- outlier.scores
outliers<-NULL
dataWithoutOutliers<-data
}
else{
outliers <- order(outlier.scores[,7], decreasing=T)[1:numberOfOutliersFound] #Getting the values that are on the threshold
Score <- outlier.scores[outliers,7] #Getting outliers scores
outliers <- data.frame(outliers,Score)
names(outliers) <- c("Position","Score")
#View(outliers)
withoutOutliers.scores <- outlier.scores[-outliers[1:numberOfOutliersFound,1],] #Eliminating the 527 most remote instances!
#DensityPlot(outlier.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
#DensityPlot(withoutOutliers.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
#select instances
dataWithoutOutliers<-data[-outliers[1:numberOfOutliersFound,1],]
#class(outlier.scores)
}
return(list(withoutOutliers.scores,dataWithoutOutliers,numberOfOutliersFound,outliers))
}
#
# res<-LOFCraft(iris[,-5],threshold = 1.6,k = c(5:10)) ##calling LOF
# outlier.scores=data.frame(res[1]) ## scores for the original data
# #str(outlier.scores)
# withoutOutliers.scores=data.frame(res[2]) ## scores of data without outliers
# #str(withoutOutliers.scores)
# dataWithoutOutliers<-data.frame(res[3]) ##the data without outliers
# #str(dataWithoutOutliers)
# #View(dataWithoutOutliers)
# DensityPlot(outlier.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
# DensityPlot(withoutOutliers.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
#
#
#
# ##example 2 ...
# res<-LOFCraft(mtcars,threshold = 1.25,k = c(5:10)) ##calling LOF
# outlier.scores=data.frame(res[1]) ## scores for the original data
# str(outlier.scores)
# withoutOutliers.scores=data.frame(res[2]) ## scores of data without outliers
# str(withoutOutliers.scores)
# dataWithoutOutliers<-data.frame(res[3]) ##the data without outliers
# str(dataWithoutOutliers)
# #View(dataWithoutOutliers)
# howManyOutliers<-as.numeric(res[4]) ## the total number of outliers
# outliers=data.frame(res[5]) ## the positions of the outliers in the original data and theirs respective scores
# str(outliers)
#
#DensityPlot(outlier.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
# DensityPlot(withoutOutliers.scores, ncol(outlier.scores)) #Generating a plot of outliers scores
#
|
\name{grad_ns_bonds_grid}
\alias{grad_ns_bonds_grid}
\title{Nelson/Siegel Gradient Function for the Grid Search
}
\description{
Calculates the gradient of the objective function for the grid search. The objective function minimizes the sum of the weighted squared price errors. The spot rate function is based on Nelson/Siegel.
}
\usage{
grad_ns_bonds_grid(beta, tau, m, cf, w, p)
}
\arguments{
\item{beta}{Spot rate parameter vector
}
\item{tau}{fixed parameters
}
\item{m}{maturity matrix
}
\item{cf}{cashflow matrix
}
\item{w}{weights vector
}
\item{p}{price vector
}
}
\value{ returns the gradient vector}
| /man/grad_ns_bonds_grid.Rd | no_license | gcamilo/termstrc | R | false | false | 628 | rd | \name{grad_ns_bonds_grid}
\alias{grad_ns_bonds_grid}
\title{Nelson/Siegel Gradient Function for the Grid Search
}
\description{
Calculates the gradient of the objective function for the grid search. The objective function minimizes the sum of the weighted squared price errors. The spot rate function is based on Nelson/Siegel.
}
\usage{
grad_ns_bonds_grid(beta, tau, m, cf, w, p)
}
\arguments{
\item{beta}{Spot rate parameter vector
}
\item{tau}{fixed parameters
}
\item{m}{maturity matrix
}
\item{cf}{cashflow matrix
}
\item{w}{weights vector
}
\item{p}{price vector
}
}
\value{ returns the gradient vector}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
clearreturn(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | uxana-lab/ProgrammingAssignment2 | R | false | false | 725 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
clearreturn(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
testlist <- list(DAT = structure(c(3.10045080631114e-140, 5.64837120621297e+125, 4.06956751982026e+95, 6.59400621480488e+135, 1.18393360738069e-272, 5.35892842952669e-173, 2.86977581121643e-15, 2.2386738721768e+270, 2.98853116521547e+143, 3.08420528117937e+276, 3.33889424831818e-239, 2.00058425814441e-113, 2.46562043224079e-85, 1.89261876993418e-113, 1.78433108967668e+169, 9.46801839280429e-51, 2.48283345537456e-176, 8.21195771569161e+288, 4.5560582271321e+117, 1.0797329973067e+245, 3.91726596706926e-283, 2.36534175024629e+188, 4.62886637564784e+149, 1.95531992383552e-132, 2.24216371882707e+243, 3.15962229513625e-133, -Inf, 1.03502208669886e-277, 1.44329558427272e+189, 2.15560984776751e+185, 1.75933361941065e-114, 8.24221549962438e-287, 6.79398327699747e+55, 7.20263011526498e+40, 3.80926860974838e-156, 1.33550472691882e+204, 2.62538996893194e+129, 6.8576940616979e-16, 1.98743400939048e+154, 1.51886024823543e-282, 9.00123031400698e+84, 3.0243884984874e+234, 1.08707866440307e+120, 2.96591522379483e-146, 1.95816798750811e-131, 1.1753306209927e-122, 1.0936207305258e-194, 6.71962574015995e-160, Inf, -Inf, 1.92199862573809e-190, 4.96534090618195e+107, 1.35722207577192e-292, 3.18497333306282e+232, 3.64103714844602e-233, 2.88131094116101e+218, 76065827.688744, Inf, 2.8816084901775e+131, 1.27133442567742e+256, Inf, 2.58069493284837e+92, 1.83131586623993e+43, 2.30802117553111e-243, 3.00755495315194e+162, 9.88725471179051e+56, 6.83873559857537e-277, 4.07538849532164e+27, 1.15617076673217e+141, 5.3595347089513e+194, 1.83289283459492e-105, 3.22121394014806e-307, 5.10252627988266e-139, 1.55281134536723e-61, 2.06418845533417e+82, 8.83202732272626e-282, 4.79072923958292e+75, 6.2030748819062e-218, 27.5113146236504, 7.46244315476878e+67, 2460.21952908724), .Dim = c(9L, 9L)), DATRESP = structure(c(2.29157327002727e+103, 1.71090868935708e-32, 5.31232893578367e+184, 6147.55175587533, 6.93103357665744e+279, 5.92871189432898e+180), .Dim = c(6L, 1L )), GUESS = c(1.55591213325014e+162, 8.63031018389494e-255, NaN, 1.61089170743615e+221, NaN, -2.97824390177137e-198, -2.02672679124856e+265, -4.09672374931837e+105, -3.55042108182604e+163, -1.9150834128595e+25, 3.03926171802815e-246, 8.75722384783549e-71, -5.9539182104698e+28, 9.7054729016666e+22, NaN, -382818358160.389, -1.69542457924557e+47, 2.64297568621164e-111, 8.46097541770778e+100, -1.19732208473033e+90, -Inf), LATRESP = structure(c(NA, NaN, NA, 3.19343794378365e-193, 2.38321605042757e-73, 3.22001366855571e-128), .Dim = c(1L, 6L )), SLIP = c(-3.77063484535263e-05, 6.28890620448226e+190, 6.08729433160384e-232, -1.61117534396225e+300, 3.61015168119058e+131, 8.041084637703e-159, -9.63564433138483e-293, -9.60941475400201e+153, 2.76131403207845e+80, -1.18033525358934e+188, 1.55329352025042e+109, -3.02382294508374e+224, 7.50163458170595e-87, 4.20512184659921e-74, -2.84719579668055e-06, 4.50552038952894e+266, -1.92601461032577e-262, -3.28123543137527e-305, 1.22974973746519e-265, 3.11543962813383e-114, 5.2232756672494e-106, 4.77946622220252e+115, -5.68111847630834e-164, -Inf, -2.36808496497317e+49, -1.6023561721826e-75, NaN, 5.95905851469336e-174, -1.27549007575047e+189, -6.58490364599251e-43, 6.19214849642716e+187, 6.59067728250347e-16, 1.07687189901527e+222, -2.09951154432044e-103, 3.53686265923991, 7.50472634476625e+195, 2.47258019511576e+112, Inf, -5.76947889082735e+46, -3.47717096246514e+154, -1.25619439876261e-99, 9.6463253813567e+187, 1.63809301132753e+155, 8.53645486225398e-260, 1.95830155307574e-297, 5.06171079016514e-58, 160902267687.653, -5.54671001171036e+58, Inf, -5.14204436699264e+67, 2.71854784686493e+110, -8.88242953026711e-197, 1.95844540598717e-85, -6.42686074719734e-86, 2.99178843246918e-245, -3.75569745417869e+227, -4.74057848799521e-82, -4.79800845303098e-296, -1.98597194599615e-206, -6.73212413673609e+63, 2.66530388339769e-305, -5.86855380753907e-298, 7.60372464445266e-106, 0))
result <- do.call(CDM:::cdm_rcpp_din_deterministic_devcrit,testlist)
str(result) | /issuestests/CDM/inst/testfiles/cdm_rcpp_din_deterministic_devcrit/cdm_rcpp_din_deterministic_devcrit_output/log_24315dd461a6552998ee96cbc071755998af683c/cdm_rcpp_din_deterministic_devcrit-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 4,024 | r | testlist <- list(DAT = structure(c(3.10045080631114e-140, 5.64837120621297e+125, 4.06956751982026e+95, 6.59400621480488e+135, 1.18393360738069e-272, 5.35892842952669e-173, 2.86977581121643e-15, 2.2386738721768e+270, 2.98853116521547e+143, 3.08420528117937e+276, 3.33889424831818e-239, 2.00058425814441e-113, 2.46562043224079e-85, 1.89261876993418e-113, 1.78433108967668e+169, 9.46801839280429e-51, 2.48283345537456e-176, 8.21195771569161e+288, 4.5560582271321e+117, 1.0797329973067e+245, 3.91726596706926e-283, 2.36534175024629e+188, 4.62886637564784e+149, 1.95531992383552e-132, 2.24216371882707e+243, 3.15962229513625e-133, -Inf, 1.03502208669886e-277, 1.44329558427272e+189, 2.15560984776751e+185, 1.75933361941065e-114, 8.24221549962438e-287, 6.79398327699747e+55, 7.20263011526498e+40, 3.80926860974838e-156, 1.33550472691882e+204, 2.62538996893194e+129, 6.8576940616979e-16, 1.98743400939048e+154, 1.51886024823543e-282, 9.00123031400698e+84, 3.0243884984874e+234, 1.08707866440307e+120, 2.96591522379483e-146, 1.95816798750811e-131, 1.1753306209927e-122, 1.0936207305258e-194, 6.71962574015995e-160, Inf, -Inf, 1.92199862573809e-190, 4.96534090618195e+107, 1.35722207577192e-292, 3.18497333306282e+232, 3.64103714844602e-233, 2.88131094116101e+218, 76065827.688744, Inf, 2.8816084901775e+131, 1.27133442567742e+256, Inf, 2.58069493284837e+92, 1.83131586623993e+43, 2.30802117553111e-243, 3.00755495315194e+162, 9.88725471179051e+56, 6.83873559857537e-277, 4.07538849532164e+27, 1.15617076673217e+141, 5.3595347089513e+194, 1.83289283459492e-105, 3.22121394014806e-307, 5.10252627988266e-139, 1.55281134536723e-61, 2.06418845533417e+82, 8.83202732272626e-282, 4.79072923958292e+75, 6.2030748819062e-218, 27.5113146236504, 7.46244315476878e+67, 2460.21952908724), .Dim = c(9L, 9L)), DATRESP = structure(c(2.29157327002727e+103, 1.71090868935708e-32, 5.31232893578367e+184, 6147.55175587533, 6.93103357665744e+279, 5.92871189432898e+180), .Dim = c(6L, 1L )), GUESS = c(1.55591213325014e+162, 8.63031018389494e-255, NaN, 1.61089170743615e+221, NaN, -2.97824390177137e-198, -2.02672679124856e+265, -4.09672374931837e+105, -3.55042108182604e+163, -1.9150834128595e+25, 3.03926171802815e-246, 8.75722384783549e-71, -5.9539182104698e+28, 9.7054729016666e+22, NaN, -382818358160.389, -1.69542457924557e+47, 2.64297568621164e-111, 8.46097541770778e+100, -1.19732208473033e+90, -Inf), LATRESP = structure(c(NA, NaN, NA, 3.19343794378365e-193, 2.38321605042757e-73, 3.22001366855571e-128), .Dim = c(1L, 6L )), SLIP = c(-3.77063484535263e-05, 6.28890620448226e+190, 6.08729433160384e-232, -1.61117534396225e+300, 3.61015168119058e+131, 8.041084637703e-159, -9.63564433138483e-293, -9.60941475400201e+153, 2.76131403207845e+80, -1.18033525358934e+188, 1.55329352025042e+109, -3.02382294508374e+224, 7.50163458170595e-87, 4.20512184659921e-74, -2.84719579668055e-06, 4.50552038952894e+266, -1.92601461032577e-262, -3.28123543137527e-305, 1.22974973746519e-265, 3.11543962813383e-114, 5.2232756672494e-106, 4.77946622220252e+115, -5.68111847630834e-164, -Inf, -2.36808496497317e+49, -1.6023561721826e-75, NaN, 5.95905851469336e-174, -1.27549007575047e+189, -6.58490364599251e-43, 6.19214849642716e+187, 6.59067728250347e-16, 1.07687189901527e+222, -2.09951154432044e-103, 3.53686265923991, 7.50472634476625e+195, 2.47258019511576e+112, Inf, -5.76947889082735e+46, -3.47717096246514e+154, -1.25619439876261e-99, 9.6463253813567e+187, 1.63809301132753e+155, 8.53645486225398e-260, 1.95830155307574e-297, 5.06171079016514e-58, 160902267687.653, -5.54671001171036e+58, Inf, -5.14204436699264e+67, 2.71854784686493e+110, -8.88242953026711e-197, 1.95844540598717e-85, -6.42686074719734e-86, 2.99178843246918e-245, -3.75569745417869e+227, -4.74057848799521e-82, -4.79800845303098e-296, -1.98597194599615e-206, -6.73212413673609e+63, 2.66530388339769e-305, -5.86855380753907e-298, 7.60372464445266e-106, 0))
result <- do.call(CDM:::cdm_rcpp_din_deterministic_devcrit,testlist)
str(result) |
###################################################################################################
getGlobalActivePowerWithTime <- function() {
###################################################################################################
# import local data
importdata <- read.table("data/household_power_consumption.txt", header = T, sep = ";")
# re cast date
importdata$Date <- as.Date(importdata$Date, format = "%d/%m/%Y")
# get rows from target range
df <- importdata[(importdata$Date == "2007-02-01") | (importdata$Date == "2007-02-02"), ]
# add column for timestamp
df <- transform(df, timestamp = as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
# asssign variables to return
gl <- df$Global_active_power <- as.numeric(as.character(df$Global_active_power))
ts <- df$timestamp
return(list(gl, ts))
}
###################################################################################################
plotGlobalActivePowerWithTime <- function() {
###################################################################################################
# get data and assign variables
data <- getGlobalActivePowerWithTime()
gl <- data[[1]]
ts <- data[[2]]
# open png file
png("plot2.png", width = 480, height = 480, units = "px")
plot(ts, gl, type = "l", xlab = "" , ylab = "Global Active Power (kilowats)")
dev.off()
}
# Rock and Roll
plotGlobalActivePowerWithTime() | /plot2.R | no_license | SeanPlusPlus/ExData_Plotting1 | R | false | false | 1,409 | r | ###################################################################################################
getGlobalActivePowerWithTime <- function() {
###################################################################################################
# import local data
importdata <- read.table("data/household_power_consumption.txt", header = T, sep = ";")
# re cast date
importdata$Date <- as.Date(importdata$Date, format = "%d/%m/%Y")
# get rows from target range
df <- importdata[(importdata$Date == "2007-02-01") | (importdata$Date == "2007-02-02"), ]
# add column for timestamp
df <- transform(df, timestamp = as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
# asssign variables to return
gl <- df$Global_active_power <- as.numeric(as.character(df$Global_active_power))
ts <- df$timestamp
return(list(gl, ts))
}
###################################################################################################
plotGlobalActivePowerWithTime <- function() {
###################################################################################################
# get data and assign variables
data <- getGlobalActivePowerWithTime()
gl <- data[[1]]
ts <- data[[2]]
# open png file
png("plot2.png", width = 480, height = 480, units = "px")
plot(ts, gl, type = "l", xlab = "" , ylab = "Global Active Power (kilowats)")
dev.off()
}
# Rock and Roll
plotGlobalActivePowerWithTime() |
#************************************************************************************************************
#* plot4.R *
#* Date 04-09-2014 *
#* Made by Ignacio Labella *
#* ignacio_labella@outlook.es *
#************************************************************************************************************
# Script for reading the data
# Note that downloaded file MUST be into working directory
print("Please be sure that household_power_consumption.txt file is into working directory...")
raw_data<-read.csv("./household_power_consumption.txt",header=TRUE, sep=";",dec=".",na.strings="?")
raw_data$Date<-as.Date(raw_data$Date, format="%d/%m/%Y")
#subset data
tidy_data<-raw_data[raw_data$Date=="2007-02-01"|raw_data$Date=="2007-02-02",]
#Plot section
par(mfrow=c(2,2))
# First plot. Top-Left
plot(tidy_data$Global_active_power, type="l",ylab="Global active power (kilowatts)", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(ylab="Global Active Power")
# Second plot. Top-Right
plot(tidy_data$Voltage, type="l",ylab="Voltage", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(xlab="datetime", ylab="Voltage")
# Third plot Bottom-Left
plot(tidy_data$Sub_metering_1, type="l", col="black",xlim=c(0,2880),xaxt="n",ann=FALSE)
lines(tidy_data$Sub_metering_2, col="red")
lines(tidy_data$Sub_metering_3, col="blue")
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(ylab="Energy sub metering")
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n")
# Fourth ploy Bottom-Right
plot(tidy_data$Global_reactive_power, type="l",ylab="Global_reactive_power", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(xlab="datetime", ylab="Global_reactive_power")
#copy plot to plot4.png
png(filename="plot4.png",width=480,heigh=480)
#Plot section
par(mfrow=c(2,2))
# First plot. Top-Left
plot(tidy_data$Global_active_power, type="l",ylab="Global active power (kilowatts)", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(ylab="Global Active Power")
# Second plot. Top-Right
plot(tidy_data$Voltage, type="l",ylab="Voltage", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(xlab="datetime", ylab="Voltage")
# Third plot Bottom-Left
plot(tidy_data$Sub_metering_1, type="l", col="black",xlim=c(0,2880),xaxt="n",ann=FALSE)
lines(tidy_data$Sub_metering_2, col="red")
lines(tidy_data$Sub_metering_3, col="blue")
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(ylab="Energy sub metering")
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n")
# Fourth ploy Bottom-Right
plot(tidy_data$Global_reactive_power, type="l",ylab="Global_reactive_power", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(xlab="datetime", ylab="Global_reactive_power")
dev.off()
| /plot4.R | no_license | ignaciolabella/ExData_Plotting1 | R | false | false | 3,404 | r | #************************************************************************************************************
#* plot4.R *
#* Date 04-09-2014 *
#* Made by Ignacio Labella *
#* ignacio_labella@outlook.es *
#************************************************************************************************************
# Script for reading the data
# Note that downloaded file MUST be into working directory
print("Please be sure that household_power_consumption.txt file is into working directory...")
raw_data<-read.csv("./household_power_consumption.txt",header=TRUE, sep=";",dec=".",na.strings="?")
raw_data$Date<-as.Date(raw_data$Date, format="%d/%m/%Y")
#subset data
tidy_data<-raw_data[raw_data$Date=="2007-02-01"|raw_data$Date=="2007-02-02",]
#Plot section
par(mfrow=c(2,2))
# First plot. Top-Left
plot(tidy_data$Global_active_power, type="l",ylab="Global active power (kilowatts)", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(ylab="Global Active Power")
# Second plot. Top-Right
plot(tidy_data$Voltage, type="l",ylab="Voltage", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(xlab="datetime", ylab="Voltage")
# Third plot Bottom-Left
plot(tidy_data$Sub_metering_1, type="l", col="black",xlim=c(0,2880),xaxt="n",ann=FALSE)
lines(tidy_data$Sub_metering_2, col="red")
lines(tidy_data$Sub_metering_3, col="blue")
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(ylab="Energy sub metering")
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n")
# Fourth ploy Bottom-Right
plot(tidy_data$Global_reactive_power, type="l",ylab="Global_reactive_power", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(xlab="datetime", ylab="Global_reactive_power")
#copy plot to plot4.png
png(filename="plot4.png",width=480,heigh=480)
#Plot section
par(mfrow=c(2,2))
# First plot. Top-Left
plot(tidy_data$Global_active_power, type="l",ylab="Global active power (kilowatts)", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(ylab="Global Active Power")
# Second plot. Top-Right
plot(tidy_data$Voltage, type="l",ylab="Voltage", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(xlab="datetime", ylab="Voltage")
# Third plot Bottom-Left
plot(tidy_data$Sub_metering_1, type="l", col="black",xlim=c(0,2880),xaxt="n",ann=FALSE)
lines(tidy_data$Sub_metering_2, col="red")
lines(tidy_data$Sub_metering_3, col="blue")
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(ylab="Energy sub metering")
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty="n")
# Fourth ploy Bottom-Right
plot(tidy_data$Global_reactive_power, type="l",ylab="Global_reactive_power", xlim=c(0,2880),xaxt="n",ann=FALSE)
axis(side=1,c(1,1440,2880), c("Thu","Fri","Sat"))
title(xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{input_select}
\alias{input_checkboxgroup}
\alias{input_radiobuttons}
\alias{input_select}
\title{Create interactive control to select one (or more options) from a list.}
\usage{
input_select(choices, selected = NULL, multiple = FALSE, label = "",
id = rand_id("select_"), map = identity)
input_radiobuttons(choices, selected = NULL, label = "",
id = rand_id("radio_"), map = identity)
input_checkboxgroup(choices, selected = NULL, label = "",
id = rand_id("radio_"), map = identity)
}
\arguments{
\item{choices}{List of values to select from. If elements of the list are
named then that name rather than the value is displayed to the user.}
\item{selected}{The initially selected value (or multiple values if
\code{multiple = TRUE}). If not specified then defaults to the first value
for single-select lists and no values for multiple select lists.}
\item{multiple}{Is selection of multiple items allowed?}
\item{label}{Display label for the control, or \code{NULL}}
\item{id}{A unique identifier for this input. Usually generated
automatically.}
\item{map}{A function with single argument \code{x}, the value of the
control on the client. Returns a modified value.}
}
\description{
\itemize{
\item \code{input_radiobuttons} only ever selects one value
\item \code{input_checkboxgroup} can alway select multiple values
\item \code{input_select} can select only one if \code{multiple = FALSE},
otherwise the user can select multiple by using modifier keys
}
}
\examples{
# Dropdown
input_select(c("a", "b", "c"))
input_select(c("a", "b", "c"), multiple = TRUE)
input_select(c("a", "b", "c"), selected = "c")
# If you want to select variable names, you need to convert
# the output of the input to a name with map so that they get
# computed correctly
input_select(names(mtcars), map = as.name)
# Radio buttons
input_radiobuttons(choices = c("Linear" = "lm", "LOESS" = "loess"),
label = "Model type")
input_radiobuttons(choices = c("Linear" = "lm", "LOESS" = "loess"),
selected = "loess",
label = "Model type")
# Used in layer_model_predictions
mtcars \%>\% ggvis(~wt, ~mpg) \%>\%
layer_model_predictions(model = input_radiobuttons(
choices = c("Linear" = "lm", "LOESS" = "loess"),
selected = "loess",
label = "Model type"))
# Checkbox group
mtcars \%>\% ggvis(x = ~wt, y = ~mpg) \%>\%
layer_points(
fill := input_checkboxgroup(
choices = c("Red" = "r", "Green" = "g", "Blue" = "b"),
label = "Point color components",
map = function(val) {
rgb(0.8 * "r" \%in\% val, 0.8 * "g" \%in\% val, 0.8 * "b" \%in\% val)
}
)
)
}
\seealso{
Other interactive input: \code{\link{input_checkbox}};
\code{\link{input_numeric}}, \code{\link{input_text}};
\code{\link{input_slider}}
}
| /man/input_select.Rd | no_license | jjallaire/ggvis | R | false | false | 2,867 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{input_select}
\alias{input_checkboxgroup}
\alias{input_radiobuttons}
\alias{input_select}
\title{Create interactive control to select one (or more options) from a list.}
\usage{
input_select(choices, selected = NULL, multiple = FALSE, label = "",
id = rand_id("select_"), map = identity)
input_radiobuttons(choices, selected = NULL, label = "",
id = rand_id("radio_"), map = identity)
input_checkboxgroup(choices, selected = NULL, label = "",
id = rand_id("radio_"), map = identity)
}
\arguments{
\item{choices}{List of values to select from. If elements of the list are
named then that name rather than the value is displayed to the user.}
\item{selected}{The initially selected value (or multiple values if
\code{multiple = TRUE}). If not specified then defaults to the first value
for single-select lists and no values for multiple select lists.}
\item{multiple}{Is selection of multiple items allowed?}
\item{label}{Display label for the control, or \code{NULL}}
\item{id}{A unique identifier for this input. Usually generated
automatically.}
\item{map}{A function with single argument \code{x}, the value of the
control on the client. Returns a modified value.}
}
\description{
\itemize{
\item \code{input_radiobuttons} only ever selects one value
\item \code{input_checkboxgroup} can alway select multiple values
\item \code{input_select} can select only one if \code{multiple = FALSE},
otherwise the user can select multiple by using modifier keys
}
}
\examples{
# Dropdown
input_select(c("a", "b", "c"))
input_select(c("a", "b", "c"), multiple = TRUE)
input_select(c("a", "b", "c"), selected = "c")
# If you want to select variable names, you need to convert
# the output of the input to a name with map so that they get
# computed correctly
input_select(names(mtcars), map = as.name)
# Radio buttons
input_radiobuttons(choices = c("Linear" = "lm", "LOESS" = "loess"),
label = "Model type")
input_radiobuttons(choices = c("Linear" = "lm", "LOESS" = "loess"),
selected = "loess",
label = "Model type")
# Used in layer_model_predictions
mtcars \%>\% ggvis(~wt, ~mpg) \%>\%
layer_model_predictions(model = input_radiobuttons(
choices = c("Linear" = "lm", "LOESS" = "loess"),
selected = "loess",
label = "Model type"))
# Checkbox group
mtcars \%>\% ggvis(x = ~wt, y = ~mpg) \%>\%
layer_points(
fill := input_checkboxgroup(
choices = c("Red" = "r", "Green" = "g", "Blue" = "b"),
label = "Point color components",
map = function(val) {
rgb(0.8 * "r" \%in\% val, 0.8 * "g" \%in\% val, 0.8 * "b" \%in\% val)
}
)
)
}
\seealso{
Other interactive input: \code{\link{input_checkbox}};
\code{\link{input_numeric}}, \code{\link{input_text}};
\code{\link{input_slider}}
}
|
corr <- function(directory, threshold = 0){
findComplete <- complete("specdata", 1:332)
nobs <- findComplete$nobs
ids <- findComplete$id[nobs > threshold]
idLength <- length(ids)
corrVector <- rep(0, idLength)
counter <-1
for(i in ids){
filename <-''
if(i>0 && i<10){
filename <- paste(c(directory, '/00' , i , '.csv'), collapse = '')
}else if(i>9 && i<100){
filename <- paste(c(directory, '/0' , i , '.csv'), collapse = '')
}else{
filename <- paste(c(directory , '/', i , '.csv'), collapse = '')
}
currentFile <- read.csv(filename, sep=",")
corrVector[counter] <- cor(currentFile$sulfate, currentFile$nitrate, use="complete.obs")
counter <- counter + 1
}
result <- corrVector
result
} | /Week2/corr.R | no_license | oryani/R-Codes-Coursera | R | false | false | 778 | r | corr <- function(directory, threshold = 0){
findComplete <- complete("specdata", 1:332)
nobs <- findComplete$nobs
ids <- findComplete$id[nobs > threshold]
idLength <- length(ids)
corrVector <- rep(0, idLength)
counter <-1
for(i in ids){
filename <-''
if(i>0 && i<10){
filename <- paste(c(directory, '/00' , i , '.csv'), collapse = '')
}else if(i>9 && i<100){
filename <- paste(c(directory, '/0' , i , '.csv'), collapse = '')
}else{
filename <- paste(c(directory , '/', i , '.csv'), collapse = '')
}
currentFile <- read.csv(filename, sep=",")
corrVector[counter] <- cor(currentFile$sulfate, currentFile$nitrate, use="complete.obs")
counter <- counter + 1
}
result <- corrVector
result
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{winzer}
\alias{winzer}
\title{Winsorizes your data}
\usage{
winzer(data, x, y, z, grouping, value, label, ...)
}
\arguments{
\item{data}{A dataset in tabular format.}
\item{x}{Single value declaration for lower quantile of winsorizing.}
\item{y}{Single value declaration for upper quantile of winsorizing.}
\item{z}{Single value declaration for IQR multiplier.}
\item{grouping}{A list of strings determining by which cathegorical variable you would like to group your data.}
\item{value}{A single character vector identifying the column containing your measured variable like reaction time as RT.}
\item{label}{A string name indicating the columns containing trials or observation points.}
}
\description{
\code{winzer} performs winsorization on your data.
}
\examples{
winzer(example, grouping = c("year", "subject"), x = 0.25, y = 0.75, z = 1.5, label = "trial", value = "RT")
}
\seealso{
\code{\link{ABV}} for ABV-index, \code{\link{shaker}} for reshape your data.
For extended winsorization see \code{\link[WRS2]{winvar}} and the win function family of the WRS package.
}
| /man/winzer.Rd | no_license | TamasSmahajcsikszabo/REAC | R | false | true | 1,178 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{winzer}
\alias{winzer}
\title{Winsorizes your data}
\usage{
winzer(data, x, y, z, grouping, value, label, ...)
}
\arguments{
\item{data}{A dataset in tabular format.}
\item{x}{Single value declaration for lower quantile of winsorizing.}
\item{y}{Single value declaration for upper quantile of winsorizing.}
\item{z}{Single value declaration for IQR multiplier.}
\item{grouping}{A list of strings determining by which cathegorical variable you would like to group your data.}
\item{value}{A single character vector identifying the column containing your measured variable like reaction time as RT.}
\item{label}{A string name indicating the columns containing trials or observation points.}
}
\description{
\code{winzer} performs winsorization on your data.
}
\examples{
winzer(example, grouping = c("year", "subject"), x = 0.25, y = 0.75, z = 1.5, label = "trial", value = "RT")
}
\seealso{
\code{\link{ABV}} for ABV-index, \code{\link{shaker}} for reshape your data.
For extended winsorization see \code{\link[WRS2]{winvar}} and the win function family of the WRS package.
}
|
library(tensorflow)
library(keras)
library(caret)
set.seed(12345)
dswsize = 10000
cifar10 <- dataset_cifar10()
dssize = 50000
mask = sample (1:dssize,dswsize)
cifar10$train$x = cifar10$train$x[mask,,,]
cifar10$train$y = cifar10$train$y[mask,]
img_rows <- 32
img_cols <- 32
x_train <- cifar10$train$x
y_train <- cifar10$train$y
x_train <- array_reshape(x_train, c(nrow(x_train), img_rows,img_cols,3))
input_shape <- c(img_rows,img_cols,3)
x_train <- x_train / 255
y_train <- to_categorical(y_train, 10)
historyConv1=NULL
eps = 60
# Primer tune
for(filter in c(16,32)){
for(filter2 in c(32,64)){
for(ks1 in c(3,4)){
for(unitsHidden in c(128,256)){
historyConv1.It = NULL
myfolds = createFolds(y=cifar10$train$y,k=5)
k=5
for(i in 1:k){
model = keras_model_sequential()
model %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu',
input_shape = input_shape) %>%
layer_dropout(rate = 0.4) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks1,ks1), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.5) %>%
layer_flatten() %>%
layer_dense(units = unitsHidden, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_adadelta(),
metrics = c('accuracy'))
historyConv1.It[[i]] =
model %>% fit(
x_train[-myfolds[[i]],,,],
y_train[-myfolds[[i]],],
epochs = eps,
batch_size = 128,
validation_data = list(x_train[myfolds[[i]],,,],
y_train[myfolds[[i]],]),
verbose = 0)
}
name = paste0("Conv-",filter,"-",filter2,"-KS-",ks1, "-Units-",unitsHidden)
historyConv1[[name]] = historyConv1.It
}
}
}
}
# Empezamos con 4 filtros, variamos kernel size
for(filter in c(32)){
for(filter2 in c(64)){
for(ks1 in c(3,4)){
for(ks2 in c(3,4)){
for(unitsHidden in c(128)){
historyConv2.It = NULL
myfolds = createFolds(y=cifar10$train$y,k=5)
k=5
for(i in 1:k){
model = keras_model_sequential()
model %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.35) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.35) %>%
layer_flatten() %>%
layer_dense(units = unitsHidden, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_adadelta(),
metrics = c('accuracy'))
historyConv2.It[[i]] =
model %>% fit(
x_train[-myfolds[[i]],,,],
y_train[-myfolds[[i]],],
epochs = eps,
batch_size = 128,
validation_data = list(x_train[myfolds[[i]],,,],
y_train[myfolds[[i]],]),
verbose = 0)
}
name = paste0("Conv-",filter,"-",filter,"-",filter2,"-",filter2,"-KS-",ks1,"-",ks2, "-units-",unitsHidden)
historyConv1[[name]] = historyConv2.It
}
}
}
}
}
# Variamos filtros: 48-96, y más abajo 64-96 y 64-128
for(filter in c(48)){
for(filter2 in c(96)){
for(ks1 in c(3)){
for(ks2 in c(4)){
for(unitsHidden in c(128)){
historyConv1.It = NULL
myfolds = createFolds(y=cifar10$train$y,k=5)
k=5
for(i in 1:k){
model = keras_model_sequential()
model %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.4) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_dropout(rate = 0.4) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.4) %>%
layer_flatten() %>%
layer_dense(units = unitsHidden, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_adadelta(),
metrics = c('accuracy'))
historyConv1.It[[i]] =
model %>% fit(
x_train[-myfolds[[i]],,,],
y_train[-myfolds[[i]],],
epochs = eps,
batch_size = 128,
validation_data = list(x_train[myfolds[[i]],,,],
y_train[myfolds[[i]],]),
verbose = 0)
}
name = paste0("Conv-",filter,"-",filter,"-",filter2,"-",filter2,"-KS-",ks1,"-",ks2, "-units-",unitsHidden)
historyConv1[[name]] = historyConv1.It
}
}
}
}
}
for(filter in c(64)){
for(filter2 in c(96,128)){
for(ks1 in c(3)){
for(ks2 in c(4)){
for(unitsHidden in c(128)){
historyConv1.It = NULL
myfolds = createFolds(y=cifar10$train$y,k=5)
k=5
for(i in 1:k){
model = keras_model_sequential()
model %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.5) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.5) %>%
layer_flatten() %>%
layer_dense(units = unitsHidden, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_adadelta(),
metrics = c('accuracy'))
historyConv1.It[[i]] =
model %>% fit(
x_train[-myfolds[[i]],,,],
y_train[-myfolds[[i]],],
epochs = eps,
batch_size = 128,
validation_data = list(x_train[myfolds[[i]],,,],
y_train[myfolds[[i]],]),
verbose = 0)
}
name = paste0("Conv-",filter,"-",filter,"-",filter2,"-",filter2,"-KS-",ks1,"-",ks2, "-units-",unitsHidden)
historyConv1[[name]] = historyConv1.It
}
}
}
}
}
# Obtenemos las medias en los 5 folds de todos los experimentos
salidasConv <- c()
mediasConv <- c()
for(i in 1:(length(historyConv1))){
for (k in 1:5){
salidasConv[k] = historyConv1[[i]][[k]]$metrics$val_acc[eps]
}
name = names(historyConv1[i])
mediasConv[name] = mean(salidasConv)
}
| /R/Conv-Selection.R | no_license | rafajm7/Deep-Learning-with-Keras | R | false | false | 8,859 | r | library(tensorflow)
library(keras)
library(caret)
set.seed(12345)
dswsize = 10000
cifar10 <- dataset_cifar10()
dssize = 50000
mask = sample (1:dssize,dswsize)
cifar10$train$x = cifar10$train$x[mask,,,]
cifar10$train$y = cifar10$train$y[mask,]
img_rows <- 32
img_cols <- 32
x_train <- cifar10$train$x
y_train <- cifar10$train$y
x_train <- array_reshape(x_train, c(nrow(x_train), img_rows,img_cols,3))
input_shape <- c(img_rows,img_cols,3)
x_train <- x_train / 255
y_train <- to_categorical(y_train, 10)
historyConv1=NULL
eps = 60
# Primer tune
for(filter in c(16,32)){
for(filter2 in c(32,64)){
for(ks1 in c(3,4)){
for(unitsHidden in c(128,256)){
historyConv1.It = NULL
myfolds = createFolds(y=cifar10$train$y,k=5)
k=5
for(i in 1:k){
model = keras_model_sequential()
model %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu',
input_shape = input_shape) %>%
layer_dropout(rate = 0.4) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks1,ks1), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.5) %>%
layer_flatten() %>%
layer_dense(units = unitsHidden, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_adadelta(),
metrics = c('accuracy'))
historyConv1.It[[i]] =
model %>% fit(
x_train[-myfolds[[i]],,,],
y_train[-myfolds[[i]],],
epochs = eps,
batch_size = 128,
validation_data = list(x_train[myfolds[[i]],,,],
y_train[myfolds[[i]],]),
verbose = 0)
}
name = paste0("Conv-",filter,"-",filter2,"-KS-",ks1, "-Units-",unitsHidden)
historyConv1[[name]] = historyConv1.It
}
}
}
}
# Empezamos con 4 filtros, variamos kernel size
for(filter in c(32)){
for(filter2 in c(64)){
for(ks1 in c(3,4)){
for(ks2 in c(3,4)){
for(unitsHidden in c(128)){
historyConv2.It = NULL
myfolds = createFolds(y=cifar10$train$y,k=5)
k=5
for(i in 1:k){
model = keras_model_sequential()
model %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.35) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.35) %>%
layer_flatten() %>%
layer_dense(units = unitsHidden, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_adadelta(),
metrics = c('accuracy'))
historyConv2.It[[i]] =
model %>% fit(
x_train[-myfolds[[i]],,,],
y_train[-myfolds[[i]],],
epochs = eps,
batch_size = 128,
validation_data = list(x_train[myfolds[[i]],,,],
y_train[myfolds[[i]],]),
verbose = 0)
}
name = paste0("Conv-",filter,"-",filter,"-",filter2,"-",filter2,"-KS-",ks1,"-",ks2, "-units-",unitsHidden)
historyConv1[[name]] = historyConv2.It
}
}
}
}
}
# Variamos filtros: 48-96, y más abajo 64-96 y 64-128
for(filter in c(48)){
for(filter2 in c(96)){
for(ks1 in c(3)){
for(ks2 in c(4)){
for(unitsHidden in c(128)){
historyConv1.It = NULL
myfolds = createFolds(y=cifar10$train$y,k=5)
k=5
for(i in 1:k){
model = keras_model_sequential()
model %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.4) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_dropout(rate = 0.4) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.4) %>%
layer_flatten() %>%
layer_dense(units = unitsHidden, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_adadelta(),
metrics = c('accuracy'))
historyConv1.It[[i]] =
model %>% fit(
x_train[-myfolds[[i]],,,],
y_train[-myfolds[[i]],],
epochs = eps,
batch_size = 128,
validation_data = list(x_train[myfolds[[i]],,,],
y_train[myfolds[[i]],]),
verbose = 0)
}
name = paste0("Conv-",filter,"-",filter,"-",filter2,"-",filter2,"-KS-",ks1,"-",ks2, "-units-",unitsHidden)
historyConv1[[name]] = historyConv1.It
}
}
}
}
}
for(filter in c(64)){
for(filter2 in c(96,128)){
for(ks1 in c(3)){
for(ks2 in c(4)){
for(unitsHidden in c(128)){
historyConv1.It = NULL
myfolds = createFolds(y=cifar10$train$y,k=5)
k=5
for(i in 1:k){
model = keras_model_sequential()
model %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu',
input_shape = input_shape) %>%
layer_conv_2d(filters = filter, kernel_size = c(ks1,ks1), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.5) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_conv_2d(filters = filter2, kernel_size = c(ks2,ks2), activation = 'relu') %>%
layer_max_pooling_2d(pool_size = c(2, 2)) %>%
layer_dropout(rate = 0.5) %>%
layer_flatten() %>%
layer_dense(units = unitsHidden, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_adadelta(),
metrics = c('accuracy'))
historyConv1.It[[i]] =
model %>% fit(
x_train[-myfolds[[i]],,,],
y_train[-myfolds[[i]],],
epochs = eps,
batch_size = 128,
validation_data = list(x_train[myfolds[[i]],,,],
y_train[myfolds[[i]],]),
verbose = 0)
}
name = paste0("Conv-",filter,"-",filter,"-",filter2,"-",filter2,"-KS-",ks1,"-",ks2, "-units-",unitsHidden)
historyConv1[[name]] = historyConv1.It
}
}
}
}
}
# Obtenemos las medias en los 5 folds de todos los experimentos
salidasConv <- c()
mediasConv <- c()
for(i in 1:(length(historyConv1))){
for (k in 1:5){
salidasConv[k] = historyConv1[[i]][[k]]$metrics$val_acc[eps]
}
name = names(historyConv1[i])
mediasConv[name] = mean(salidasConv)
}
|
#' @title Apply Warp from FNIRT
#' @description This function applies a coefficient map from \code{\link{fnirt}}
#' to other images
#' @param infile (character) input filename
#' @param reffile (character) reference image to be registered to
#' @param warpfile (character) reference image to be registered to
#' @param outfile (character) output filename
#' @param retimg (logical) return image of class nifti
#' @param reorient (logical) If retimg, should file be reoriented when read in?
#' Passed to \code{\link{readNIfTI}}.
#' @param intern (logical) pass to \code{\link{system}}
#' @param opts (character) additional options to FLIRT
#' @param verbose (logical) print out command before running
#' @param ... additional arguments passed to \code{\link{readNIfTI}}.
#' @return character or logical depending on intern
#' @export
fsl_applywarp = function(infile,
reffile,
warpfile,
outfile = NULL,
retimg = FALSE,
reorient = FALSE,
intern=TRUE,
opts="", verbose = TRUE, ...){
cmd <- get.fsl()
if (retimg){
if (is.null(outfile)) {
outfile = tempfile()
}
} else {
stopifnot(!is.null(outfile))
}
# infile = path.expand(infile)
# outfile = path.expand(outfile)
# reffile = path.expand(reffile)
infile = checkimg(infile, ...)
reffile = checkimg(reffile, ...)
outfile = checkimg(outfile, ...)
outfile = nii.stub(outfile)
omat = path.expand(omat)
cmd <- paste0(cmd, sprintf(
'applywarp --in="%s" --ref="%s" --out="%s" --warp="%s" %s',
infile, reffile, outfile, warpfile, opts))
if (verbose){
cat(cmd, "\n")
}
res = system(cmd, intern=intern)
ext = get.imgext()
outfile = paste0(outfile, ext)
if (retimg){
img = readNIfTI(outfile, reorient=reorient, ...)
return(img)
}
return(res)
}
#' @title FSL applywarp help
#' @description This function calls \code{applywarp}'s help
#' @return Prints help output and returns output as character vector
#' @export
fsl_applywarp.help = function(){
return(fslhelp("applywarp"))
}
| /R/applywarp.R | no_license | emsweene/fslr | R | false | false | 2,192 | r | #' @title Apply Warp from FNIRT
#' @description This function applies a coefficient map from \code{\link{fnirt}}
#' to other images
#' @param infile (character) input filename
#' @param reffile (character) reference image to be registered to
#' @param warpfile (character) reference image to be registered to
#' @param outfile (character) output filename
#' @param retimg (logical) return image of class nifti
#' @param reorient (logical) If retimg, should file be reoriented when read in?
#' Passed to \code{\link{readNIfTI}}.
#' @param intern (logical) pass to \code{\link{system}}
#' @param opts (character) additional options to FLIRT
#' @param verbose (logical) print out command before running
#' @param ... additional arguments passed to \code{\link{readNIfTI}}.
#' @return character or logical depending on intern
#' @export
fsl_applywarp = function(infile,
reffile,
warpfile,
outfile = NULL,
retimg = FALSE,
reorient = FALSE,
intern=TRUE,
opts="", verbose = TRUE, ...){
cmd <- get.fsl()
if (retimg){
if (is.null(outfile)) {
outfile = tempfile()
}
} else {
stopifnot(!is.null(outfile))
}
# infile = path.expand(infile)
# outfile = path.expand(outfile)
# reffile = path.expand(reffile)
infile = checkimg(infile, ...)
reffile = checkimg(reffile, ...)
outfile = checkimg(outfile, ...)
outfile = nii.stub(outfile)
omat = path.expand(omat)
cmd <- paste0(cmd, sprintf(
'applywarp --in="%s" --ref="%s" --out="%s" --warp="%s" %s',
infile, reffile, outfile, warpfile, opts))
if (verbose){
cat(cmd, "\n")
}
res = system(cmd, intern=intern)
ext = get.imgext()
outfile = paste0(outfile, ext)
if (retimg){
img = readNIfTI(outfile, reorient=reorient, ...)
return(img)
}
return(res)
}
#' @title FSL applywarp help
#' @description This function calls \code{applywarp}'s help
#' @return Prints help output and returns output as character vector
#' @export
fsl_applywarp.help = function(){
return(fslhelp("applywarp"))
}
|
#' Compute the confidence interval for (currently only) simulation-based methods
#'
#' \code{get_confidence_interval()} and \code{get_ci()} are both aliases of \code{conf_int()}
#' @param x data frame of calculated statistics or containing attributes
#' of theoretical distribution values. Currently, dependent on statistics being stored in \code{stat} column as created in \code{calculate()} function.
#' @param level a numerical value between 0 and 1 giving the confidence level. Default value is 0.95.
#' @param type a string giving which method should be used for creating the confidence interval. The default is \code{"percentile"} with \code{"se"} corresponding to (multiplier * standard error) as the other option.
#' @param point_estimate a numeric value or a 1x1 data frame set to NULL by default. Needed to be provided if \code{type = "se"}.
#'
#' @return a 2 x 1 tibble with values corresponding to lower and upper values in the confidence interval
#' @export
#' @rdname get_ci
#' @examples
#' mtcars_df <- mtcars %>%
#' dplyr::mutate(am = factor(am))
#' d_hat <- mtcars_df %>%
#' specify(mpg ~ am) %>%
#' calculate(stat = "diff in means", order = c("1", "0"))
#' bootstrap_distn <- mtcars_df %>%
#' specify(mpg ~ am) %>%
#' generate(reps = 100) %>%
#' calculate(stat = "diff in means", order = c("1", "0"))
#' bootstrap_distn %>% conf_int(level = 0.9)
#' bootstrap_distn %>% conf_int(type = "se", point_estimate = d_hat)
conf_int <- function(x, level = 0.95, type = "percentile",
point_estimate = NULL){
check_ci_args(x, level, type, point_estimate)
if(type == "percentile") {
ci_vec <- stats::quantile(x[["stat"]],
probs = c((1 - level) / 2, level + (1 - level) / 2))
ci <- tibble::tibble(ci_vec[1], ci_vec[2])
names(ci) <- names(ci_vec)
} else {
point_estimate <- check_obs_stat(point_estimate)
multiplier <- stats::qnorm(1 - (1 - level) / 2)
ci <- tibble::tibble(
lower = point_estimate - multiplier * stats::sd(x[["stat"]]),
upper = point_estimate + multiplier * stats::sd(x[["stat"]]))
}
return(ci)
}
check_ci_args <- function(x, level, type, point_estimate){
if(!is.null(point_estimate)){
if(!is.data.frame(point_estimate))
check_type(point_estimate, is.numeric)
else
check_type(point_estimate, is.data.frame)
}
check_type(x, is.data.frame)
check_type(level, is.numeric)
if(level <= 0 || level >= 1){
stop_glue("The value of `level` must be between 0 and 1 non-inclusive.")
}
if(!(type %in% c("percentile", "se"))){
stop_glue('The options for `type` are "percentile" or "se".')
}
if(type == "se" && is.null(point_estimate))
stop_glue('A numeric value needs to be given for `point_estimate` ',
'for `type = "se"')
if(type == "se" && is.vector(point_estimate))
check_type(point_estimate, is.numeric)
}
#' @export
#' @rdname get_ci
get_ci <- conf_int
#' @export
#' @rdname get_ci
get_confidence_interval <- conf_int
| /R/conf_int.R | no_license | corinne-riddell/infer | R | false | false | 3,043 | r | #' Compute the confidence interval for (currently only) simulation-based methods
#'
#' \code{get_confidence_interval()} and \code{get_ci()} are both aliases of \code{conf_int()}
#' @param x data frame of calculated statistics or containing attributes
#' of theoretical distribution values. Currently, dependent on statistics being stored in \code{stat} column as created in \code{calculate()} function.
#' @param level a numerical value between 0 and 1 giving the confidence level. Default value is 0.95.
#' @param type a string giving which method should be used for creating the confidence interval. The default is \code{"percentile"} with \code{"se"} corresponding to (multiplier * standard error) as the other option.
#' @param point_estimate a numeric value or a 1x1 data frame set to NULL by default. Needed to be provided if \code{type = "se"}.
#'
#' @return a 2 x 1 tibble with values corresponding to lower and upper values in the confidence interval
#' @export
#' @rdname get_ci
#' @examples
#' mtcars_df <- mtcars %>%
#' dplyr::mutate(am = factor(am))
#' d_hat <- mtcars_df %>%
#' specify(mpg ~ am) %>%
#' calculate(stat = "diff in means", order = c("1", "0"))
#' bootstrap_distn <- mtcars_df %>%
#' specify(mpg ~ am) %>%
#' generate(reps = 100) %>%
#' calculate(stat = "diff in means", order = c("1", "0"))
#' bootstrap_distn %>% conf_int(level = 0.9)
#' bootstrap_distn %>% conf_int(type = "se", point_estimate = d_hat)
conf_int <- function(x, level = 0.95, type = "percentile",
point_estimate = NULL){
check_ci_args(x, level, type, point_estimate)
if(type == "percentile") {
ci_vec <- stats::quantile(x[["stat"]],
probs = c((1 - level) / 2, level + (1 - level) / 2))
ci <- tibble::tibble(ci_vec[1], ci_vec[2])
names(ci) <- names(ci_vec)
} else {
point_estimate <- check_obs_stat(point_estimate)
multiplier <- stats::qnorm(1 - (1 - level) / 2)
ci <- tibble::tibble(
lower = point_estimate - multiplier * stats::sd(x[["stat"]]),
upper = point_estimate + multiplier * stats::sd(x[["stat"]]))
}
return(ci)
}
check_ci_args <- function(x, level, type, point_estimate){
if(!is.null(point_estimate)){
if(!is.data.frame(point_estimate))
check_type(point_estimate, is.numeric)
else
check_type(point_estimate, is.data.frame)
}
check_type(x, is.data.frame)
check_type(level, is.numeric)
if(level <= 0 || level >= 1){
stop_glue("The value of `level` must be between 0 and 1 non-inclusive.")
}
if(!(type %in% c("percentile", "se"))){
stop_glue('The options for `type` are "percentile" or "se".')
}
if(type == "se" && is.null(point_estimate))
stop_glue('A numeric value needs to be given for `point_estimate` ',
'for `type = "se"')
if(type == "se" && is.vector(point_estimate))
check_type(point_estimate, is.numeric)
}
#' @export
#' @rdname get_ci
get_ci <- conf_int
#' @export
#' @rdname get_ci
get_confidence_interval <- conf_int
|
#############################################################################
# Purpose: clean raw mortality census data and unitfy all years to standardized format. Note: Continuous integration and Fast Field forms were implemented/used in 2021
# Developped by: Valentine Herrmann - HerrmannV@si.edu
# R version 4.0.3 (2020-10-10)
##########################################################################
# Clean environment ####
rm(list = ls())
# Set working directory ####
setwd(".")
# Load libraries ####
## to calculate allometries
library(allodb) # remotes::install_github("forestgeo/allodb")
# Load data ####
## bring in main ForestGEO census data ####
for(f in paste0("scbi.stem", 1:3)) {
print(f)
x <- read.csv(paste0("https://raw.githubusercontent.com/SCBI-ForestGEO/SCBI-ForestGEO-Data/master/tree_main_census/data/census-csv-files/", f, ".csv"))
x$quadrat <-ifelse(nchar(x$quadrat) < 4, paste0("0", x$quadrat), x$quadrat)
x$dbh <- as.numeric(x$dbh) # not numeric because of the "NULL" values
x$dbh[ x$dbh %in% 0] <- NA # replace dbh 0 by NA... only occuring in second census, not sure why...
x$gx <- round(x$gx,1)
x$gy <- round(x$gy,1)
x$ExactDate <- as.Date(x$ExactDate, format = "%m/%d/%Y")
assign(f,x)
}
### get the species table ####
f = "scbi.spptable"
assign(f, read.csv(paste0("https://raw.githubusercontent.com/SCBI-ForestGEO/SCBI-ForestGEO-Data/master/tree_main_census/data/census-csv-files/", f, ".csv")))
### bring in table that unifies the column names across years ####
unified_colnames <- read.csv("raw_data/standardizing_colnames.csv")
### bring in table for fixes we want to do ####
manual_fixes <- read.csv("raw_data/manual_fixes.csv")
# consistent_fixes <- read.csv("raw_data/consitent_fixes.csv") # now all implemented in manual_fixes or in this script
## bring in raw mortality data + clean up and calculate allometries ####
raw_data_path <- "raw_data/"
survey_files <- list.files(raw_data_path, pattern = "Mortality_Survey_.*csv")
# survey_files <- survey_files[as.numeric(regmatches(survey_files, regexpr("20\\d\\d", survey_files))) <= 2021] # only consider files before 2021 as starting 2021 is the CI files
survey_years <- NULL
eafb_recorded_on_wrong_species <- NULL
A_afterD <- NULL
for(survey_file in survey_files) {
survey_year <- as.numeric(gsub("Mortality_Survey_|\\.csv", "", survey_file))
survey_years <- c(survey_years, survey_year)
cat(paste("cleaning up", survey_year), "...\n")
# load data ####
mort <- read.csv(paste0(raw_data_path, survey_file), stringsAsFactors = F)
unified_colnames_yr <- unified_colnames[unified_colnames$survey_year %in% survey_year & unified_colnames$survey_type %in% "mortality", ]
# standardize column names ####
colnames(mort) <- unified_colnames_yr$unified_column.name[match(colnames(mort), unified_colnames_yr$raw_column_name)]
## add columns missing
mort[, setdiff(unified_colnames[unified_colnames$survey_type %in% "mortality", ]$unified_column.name, colnames(mort))] <- NA
## delete columns we don't want
mort[, grep("delete", colnames(mort))] <- NULL
# make manual fixes ####
for(i in 1:nrow(manual_fixes)) {
if(survey_year %in% eval(parse(text = manual_fixes$survey_years_to_apply_fix[i]))) {
cat("Implementing manual fix ", manual_fixes$ID[i], ": ", manual_fixes$issue_name[i], "\n", sep = "")
eval(parse(text = manual_fixes$fix[i]))
# print(head(mort))
}
}
# fill in mort$last_main_census_dbh, last_main_cenus_status and , last_main_cenus_codes ####
if(survey_year <= 2018) {
ref_main <- scbi.stem2
}
if(survey_year > 2018 & survey_year <= 2022) {
ref_main <- scbi.stem3
}
if(survey_year > 2022) stop("need to code for new main census")
idx <- match(paste(mort$tag, mort$StemTag), paste(ref_main$tag, ref_main$StemTag))
mort$last_main_census_dbh <- ref_main$dbh[idx]
mort$last_main_cenus_status <- ref_main$status[idx]
mort$last_main_census_codes <- ref_main$codes[idx]
# standardize status ####
## remove spaces
mort$previous_year_status <- toupper(gsub(" ", "", mort$previous_year_status))
mort$current_year_status <- toupper(gsub(" ", "", mort$current_year_status))
## replace empty and "not sampled" by NA
mort$previous_year_status[grepl("SAMPLED", mort$previous_year_status )| mort$previous_year_status == "" ] <- NA
mort$current_year_status[grepl("SAMPLED", mort$current_year_status ) | mort$current_year_status == "" ] <- NA
## make sure status is defined
mort$previous_year_status[mort$previous_year_status %in%"DT"] <- "DN" # this if for one case in 2014, where code was wrongly entered in status for that tree
mort$previous_year_status[mort$previous_year_status %in% "M"] <- NA # this are trees that were missed in 2019 and got a code "M" for the "previous_year_status" of 2022 survey
if(!all(na.omit(c(mort$previous_year_status, mort$current_year_status)) %in% c("A", "AU", "DC", "DN", "DS", "PD"))) stop("some statuses are not defined ")
## fill in previous_year_status when not there
if(all(is.na(mort$previous_year_status))) {
ref_mort <- get(paste0("mort", survey_year-1))
mort$previous_year_status <- ref_mort$current_year_status[match(paste(mort$tag, mort$StemTag), paste(ref_mort$tag, ref_mort$StemTag))]
}
# standardize fraxinus_eabf ####
# make a logical column to say if species is susceptible to emerald ash borer
mort$sp_affected_by_eab <- grepl("fr|ch", mort$sp)
idx_fr_or_chvi <- mort$sp_affected_by_eab # making it an object to simplify coding here
## record year and tag of non fr or ch genus that have fraxinus_eabf info
idx_issue <- !idx_fr_or_chvi & !(is.na(mort$fraxinus_eabf) | mort$fraxinus_eabf %in% c("", "none"))
if(sum(idx_issue) > 0 ) eafb_recorded_on_wrong_species <- rbind( eafb_recorded_on_wrong_species,
data.frame(survey_year, mort[idx_issue, c("sp", "tag", "fraxinus_eabf")]))
## remove spaces and change ";" into ","
mort$fraxinus_eabf <- gsub(" ", "", mort$fraxinus_eabf)
mort$fraxinus_eabf <- gsub(";", ",", mort$fraxinus_eabf)
## if fraxinus or ch, replace "" or NA by "none", otherwise, change "" by NA (leaving "none" or other valuesi there in case we need to change speices ** TO EDIT MAYBE **)
mort$fraxinus_eabf[idx_fr_or_chvi] <- ifelse(is.na(mort$fraxinus_eabf[idx_fr_or_chvi])| mort$fraxinus_eabf[idx_fr_or_chvi] %in% c(""), "none", mort$fraxinus_eabf[idx_fr_or_chvi])
mort$fraxinus_eabf[!idx_fr_or_chvi] <- ifelse(is.na(mort$fraxinus_eabf[!idx_fr_or_chvi])| mort$fraxinus_eabf[!idx_fr_or_chvi] %in% c(""),NA, mort$fraxinus_eabf[!idx_fr_or_chvi])
## check that what is in fraxinus_eabf is only what should be in there
if(!any(unlist(strsplit(mort$fraxinus_eabf, ",")) %in% c("none", "VB", "SS", "AS", "W", "DE"))) stop("some fraxinus_eabf are not defined ")
# standardize fad ####
## combine all fad into one column
mort$fad <- apply(mort[, sort(grep("fad", colnames(mort), value = T))], 1, paste, collapse = ",")
mort$fad <- gsub("(,NA){1,}|NA|,,|^,", "", mort$fad )
mort$fad <- gsub("^,|,$", "", mort$fad )
mort[grep("fad\\d", names(mort), value = T)] <- NULL
## replace "" by NA for trees with current status "A"
mort$fad[mort$current_year_status %in% "A" & mort$fad %in% ""] <- NA
# standardize score crown intact and score crown living ####
mort$score_crown_intact <- gsub("\\D| ", "", mort$score_crown_intact)
mort$score_crown_intact[mort$score_crown_intact %in% c("", "0")] <- NA
mort$score_crown_intact <- as.numeric(mort$score_crown_intact)
mort$score_crown_living <- gsub("\\D| ", "", mort$score_crown_living)
mort$score_crown_living[mort$score_crown_living %in% c("", "0")] <- NA
mort$score_crown_living <- as.numeric(mort$score_crown_living)
## translate >= 2021 percent_of_crown_living and percent_of_crown_intact into corresponding scores
if(all(is.na(mort$score_crown_intact))& !all(is.na(mort$percent_of_crown_intact))) mort$score_crown_intact <- cut(mort$percent_of_crown_intact, breaks = c(0, 25, 50, 75, 100), include.lowest = F, labels = F)
if(all(is.na(mort$score_crown_living)) & !all(is.na(mort$percent_of_crown_living))) mort$score_crown_living <- cut(mort$percent_of_crown_living, breaks = c(0, 25, 50, 75, 100), include.lowest = F, labels = F)
# padd quadrats with 0 ####
mort$quadrat <- as.character( mort$quadrat)
mort$quadrat <- ifelse(nchar(mort$quadrat) < 4, paste0("0", mort$quadrat), mort$quadrat)
# consider dbh as numeric ####
mort$last_main_census_dbh[mort$last_main_census_dbh %in% c("", "NULL", "N/A")] <- NA # doing this so if a warning shoes on next line up it means there was another type of character that is not coercible to numeric and we may want to review.
mort$last_main_census_dbh <- as.numeric( mort$last_main_census_dbh)
mort$dbh_if_dead[mort$dbh_if_dead %in% c("", "NULL", "N/A")] <- NA # doing this so if a warning shoes on next line up it means there was another type of character that is not coercible to numeric and we may want to review.
mort$dbh_if_dead <- as.numeric( mort$dbh_if_dead)
# if(survey_year >= 2019) mort$dbh.2018 <- as.numeric(mort$dbh.2018) else mort$dbh.2013 <- as.numeric(mort$dbh.2013)
# retrieve global coordinates and recalulate local ones ####
mort$gx <- scbi.stem3$gx[match(paste(mort$tag, mort$StemTag), paste(scbi.stem3$tag, scbi.stem3$StemTag))]
mort$gy <- scbi.stem3$gy[match(paste(mort$tag, mort$StemTag), paste(scbi.stem3$tag, scbi.stem3$StemTag))]
mort$gx[mort$gx %in% 400] <- 399.9 # can't be 400 really
mort$gx[mort$gy %in% 640] <- 639.9 # can't be 400 really
mort$lx <- mort$gx - floor( mort$gx / 20)*20
mort$ly <- mort$gy - floor( mort$gy / 20)*20
# consider date as Date ####
if(survey_year <= 2019) date_format = "%m/%d/%Y"
if(survey_year == 2020) date_format = "%m/%d/%y"
if(survey_year > 2020) date_format = "%m-%d-%Y"
mort$ExactDate <- as.Date(mort$ExactDate, format = date_format)
# # make consistent fixes #### NOW ALL IMPLEMENTED IN MANUAL FIXES
# for(i in 1:nrow(consistent_fixes)) {
#
# if(survey_year %in% eval(parse(text = consistent_fixes$survey_years_to_apply_fix[i]))) {
# cat("Implementing consistent fix ", consistent_fixes$ID[i], ": ", consistent_fixes$issue_name[i], "\n", sep = "")
# eval(parse(text = consistent_fixes$fix[i]))
# # print(head(mort))
# }
#
# }
# find issues of alive after dead
idx <- mort$current_year_status %in% c("A", "AU") & grepl("D", mort$previous_year_status)
if(sum(idx) > 0) A_afterD <- rbind(A_afterD, cbind(year = survey_year, mort[idx, c("tag", "StemTag", "sp", "previous_year_status", "current_year_status", "dead_with_resprout", "previous_year_comment", "current_year_comment")]))
# save
assign(paste0("mort", survey_year), mort)
}
warning("check date format after 2023 is correct!")
A_afterD
# check all tags exist in core census data if any problem, add in "manual_fixes.csv" ####
tag_stem_in_order <- paste(scbi.stem3$tag, scbi.stem3$StemTag)
for(survey_year in survey_years) {
print(survey_year)
mort <- get(paste0("mort", survey_year))
tag_stems <- paste(mort$tag, mort$StemTag)
if(!all(tag_stems %in% tag_stem_in_order)) {
print("Not all tags are in core census")
tag_stems[which(!tag_stems %in% tag_stem_in_order)]
print(mort[ paste(mort$tag, mort$StemTag) %in% tag_stems[which(!tag_stems %in% tag_stem_in_order)], ])
}
} # should all be empty (only year should show up)
# Now re-order all data to all have same rows in same order + fill in missing info ####
tag_stem_in_order <- paste(scbi.stem3$tag, scbi.stem3$StemTag)
tag_stem_in_order <- tag_stem_in_order[tag_stem_in_order %in% unique(unlist(sapply(survey_years, function(survey_year) {
mort <- get(paste0("mort", survey_year))
paste(mort$tag, mort$StemTag)
})))] # only keep the ones that were sampled for mortality at some point
for(survey_year in survey_years) {
cat(paste("Filling info of missied stems in", survey_year), "...\n")
mort <- get(paste0("mort", survey_year))
tag_stems <- paste(mort$tag, mort$StemTag)
m <- match(tag_stem_in_order, tag_stems)
mort <- mort[m, ]
# fill in info of trees that were not sampled
missing_stems <- tag_stem_in_order[is.na(m)]
if(survey_year <= 2018) {
ref_main <- scbi.stem2
}
if(survey_year > 2018 & survey_year <= 2022) {
ref_main <- scbi.stem3
}
if(survey_year > 2022) stop("need to code for new main census")
idx <- match(missing_stems, paste(ref_main$tag, ref_main$StemTag))
mort[is.na(m), c(
"tag", "StemTag",
"sp",
"quadrat", "gx", "gy",
"last_main_census_dbh", "hom",
"last_main_cenus_status", "last_main_census_codes"
)] <-
ref_main[idx, c("tag", "StemTag",
"sp",
"quadrat", "gx", "gy",
"dbh", "hom",
"status",
"codes")]
mort[is.na(m),]$current_year_comment <- "stem not sampled, info automatically filled from previous year info"
mort[is.na(m),]$sp_affected_by_eab <- grepl("fr|ch", mort[is.na(m),]$sp)
mort[is.na(m),]$lx <- mort[is.na(m),]$gx - floor( mort[is.na(m),]$gx / 20)*20
mort[is.na(m),]$ly <- mort[is.na(m),]$gy - floor( mort[is.na(m),]$gy / 20)*20
# add info from previous mortality census if we have it
if(survey_year %in% 2014) {
previous_year_status <- ref_main$status[idx]
previous_year_comment <- NA
}
if(survey_year> 2014) {
previous_year_status <- get(paste0("mort", survey_year-1))$current_year_status[is.na(m)]
previous_year_comment <- get(paste0("mort", survey_year-1))$current_year_comment[is.na(m)]
}
previous_year_status[previous_year_status %in% "G"] <- "D" # stems that are "Gone" are considered dead here... don't know if they are still standing or not so just giving status "D".
previous_year_status[previous_year_status %in% "P"] # leaving "P" for "Prior" (trees that did not exist yet)
mort[is.na(m),]$previous_year_status <- ifelse(is.na( mort[is.na(m),]$previous_year_status) & !is.na(previous_year_status), previous_year_status, mort[is.na(m),]$previous_year_status)
mort[is.na(m),]$previous_year_comment <- ifelse(is.na( mort[is.na(m),]$previous_year_comment) & !is.na(previous_year_comment), previous_year_comment, mort[is.na(m),]$previous_year_comment)
# add date, assuming it was sampled that year (to help calculate timeint - current_status is NA anyways so it will be excluded of analysis when calculating moratlity rates)
date_per_quad <- tapply(mort$ExactDate, mort$quadrat, function(x) names(sort(table(x), decreasing = T))[1])
mort$ExactDate[is.na(m)] <- as.Date(date_per_quad[mort$quadrat[is.na(m)]])
# save
assign(paste0("mort", survey_year), mort)
}
# Status corrections ####
## change D to A or AU if tree was found A or AU later
for(survey_year in survey_years) {
cat(paste("Filling info of missied stems in", survey_year), "...\n")
mort <- get(paste0("mort", survey_year))
# Change D to A
idx <- mort$current_year_status %in% c("A") & grepl("D", mort$previous_year_status)
if(sum(idx) > 0) mort$previous_year_status[idx] <- "A"
# Change D to AU
idx <- mort$current_year_status %in% c("AU") & grepl("D", mort$previous_year_status)
if(sum(idx) > 0) mort$previous_year_status[idx] <- "AU"
# save
assign(paste0("mort", survey_year), mort)
}
# Calculate allometries ####
## on main census data
for(census in paste0("scbi.stem", 1:3)) {
cat("cleaning and calculating allometries on", census, "...\n")
x <- get(census)
x$dbh <- as.numeric(x$dbh) # not numeric because of the "NULL" values
x$genus <- scbi.spptable$Genus[match(x$sp, scbi.spptable$sp)]
x$species <- scbi.spptable$Species[match(x$sp, scbi.spptable$sp)]
x$agb <-
round(get_biomass(
dbh = x$dbh/10, # in cm
genus = x$genus,
species = x$species,
coords = c(-78.2, 38.9)
) / 1000 ,2) # / 1000 to change to in Mg
assign(census, x)
}
## on mortality census
for(survey_year in survey_years) {
mort <- get(paste0("mort", survey_year))
## calculate allometries ####
cat(paste("calculating allometries for", survey_year), "...\n")
if(length(setdiff(mort$sp, scbi.spptable$sp)) > 0) stop ("There is one species that is not in scbi.spptable")
mort$genus <- scbi.spptable$Genus[match(mort$sp, scbi.spptable$sp)]
mort$species <- scbi.spptable$Species[match(mort$sp, scbi.spptable$sp)]
mort$last_main_census_agb_Mg <-
round(get_biomass(
dbh = as.numeric(mort$last_main_census_dbh)/10, # in cm
genus = mort$genus,
species = mort$species,
coords = c(-78.2, 38.9) # SCBI coordinates
) / 1000 ,2) # / 1000 to change to in Mg
mort$agb_if_dead_Mg <-
round(get_biomass(
dbh = as.numeric(mort$dbh_if_dead)/10, # in cm
genus = mort$genus,
species = mort$species,
coords = c(-78.2, 38.9)# SCBI coordinates
) / 1000 ,2) # / 1000 to change to in Mg
# save
assign(paste0("mort", survey_year), mort)
}
# make two first main census data in the same format as mortality census ####
for(census in paste0("scbi.stem", 1:2)) {
cat("making", census, "in same format as mortality...\n")
survey_year <- switch(census, scbi.stem1 = 2008, scbi.stem2 = 2013)
survey_years <- sort(c(survey_years, survey_year))
mort <- get(census)
# keep only tags later sampled in mortality
mort <- mort[paste(mort$tag, mort$StemTag) %in% tag_stem_in_order, ]
# standardize column names ####
unified_colnames_yr <- unified_colnames[unified_colnames$survey_type %in% "main", ]
colnames(mort) <- unified_colnames_yr$unified_column.name[match(colnames(mort), unified_colnames_yr$raw_column_name)]
## add columns missing
mort[, setdiff(unified_colnames[unified_colnames$survey_type %in% "mortality", ]$unified_column.name, colnames(mort))] <- NA
## delete columns we don't want
mort[, grep("delete", colnames(mort))] <- NULL
# get previous main census status
if(census == "scbi.stem1") mort$previous_year_status <- NA
if(census == "scbi.stem2") mort$previous_year_status <- mort2008$current_year_status
# add a couple missing column that are not in the unified column table as they were created in this script ####
mort$agb_if_dead_Mg <- NA
mort$sp_affected_by_eab <- grepl("fr|ch", mort$sp)
assign(paste0("mort", survey_year), mort)
}
# calculate time interval between each date the tree was censused ####
for(survey_year in survey_years) {
mort <- get(paste0("mort", survey_year))
if(survey_year == 2008) mort$timeint_days <- NA
if(survey_year > 2008) {
if(survey_year == 2013) ref_mort <- mort2008 else ref_mort <- get(paste0("mort", survey_year-1))
mort$timeint_days <- difftime(mort$ExactDate, ref_mort$ExactDate, units = "days")
}
assign(paste0("mort", survey_year), mort)
}
# save all the data in the same format ####
## order of columns we want to keep
columns_to_keep <- c("survey_year", # adding this column, it will be createid in the loop that saves the mortality files
"tag", "StemTag", "sp", "genus", "species",
"quadrat", "gy", "gx", "ly", "lx",
"last_main_census_dbh", "last_main_census_agb_Mg", "hom",
"ExactDate", "timeint_days",
"previous_year_status", "current_year_status",
"last_main_cenus_status","last_main_census_codes",
"cored",
"crown_position", "crown_illumination",
"percent_of_crown_intact", "score_crown_intact",
"percent_of_crown_living", "score_crown_living",
"fad",
"liana_load", "wounded_main_stem", "rotting_trunk", "canker_swelling_deformity",
"lean_angle_if_greater_than_15_degrees",
"dead_with_resprout",
"crown_position_if_dead",
"dbh_if_dead", "agb_if_dead_Mg",
"sp_affected_by_eab",
"fraxinus_eabf", "fraxinus_D_shaped_exit_hole_count",
"fraxinus_epicormic_growth", "fraxinus_score_crown_living",
"surveyor",
"current_year_comment", "previous_year_comment", "submission_id")
## save mortality files and build up allmort
allmort <- NULL
for (survey_year in survey_years) {
print(paste("Saving final data set for", survey_year))
mort <- get(paste0("mort", survey_year))
head(mort)
mort$survey_year <- survey_year
mort <- mort[, columns_to_keep]
assign(paste0("mort", survey_year), mort)
write.csv(mort, file = paste0("data/mortality_", survey_year, ".csv"), row.names = F)
allmort <- rbind(allmort, mort)
}
## save allmort
write.csv(allmort, "data/allmort.csv", row.names = F)
save(allmort, file = "data/allmort.RData")
| /R_scripts/1_Clean_raw_data_and_unify_all_years.R | no_license | SCBI-ForestGEO/SCBImortality | R | false | false | 21,357 | r | #############################################################################
# Purpose: clean raw mortality census data and unitfy all years to standardized format. Note: Continuous integration and Fast Field forms were implemented/used in 2021
# Developped by: Valentine Herrmann - HerrmannV@si.edu
# R version 4.0.3 (2020-10-10)
##########################################################################
# Clean environment ####
rm(list = ls())
# Set working directory ####
setwd(".")
# Load libraries ####
## to calculate allometries
library(allodb) # remotes::install_github("forestgeo/allodb")
# Load data ####
## bring in main ForestGEO census data ####
for(f in paste0("scbi.stem", 1:3)) {
print(f)
x <- read.csv(paste0("https://raw.githubusercontent.com/SCBI-ForestGEO/SCBI-ForestGEO-Data/master/tree_main_census/data/census-csv-files/", f, ".csv"))
x$quadrat <-ifelse(nchar(x$quadrat) < 4, paste0("0", x$quadrat), x$quadrat)
x$dbh <- as.numeric(x$dbh) # not numeric because of the "NULL" values
x$dbh[ x$dbh %in% 0] <- NA # replace dbh 0 by NA... only occuring in second census, not sure why...
x$gx <- round(x$gx,1)
x$gy <- round(x$gy,1)
x$ExactDate <- as.Date(x$ExactDate, format = "%m/%d/%Y")
assign(f,x)
}
### get the species table ####
f = "scbi.spptable"
assign(f, read.csv(paste0("https://raw.githubusercontent.com/SCBI-ForestGEO/SCBI-ForestGEO-Data/master/tree_main_census/data/census-csv-files/", f, ".csv")))
### bring in table that unifies the column names across years ####
unified_colnames <- read.csv("raw_data/standardizing_colnames.csv")
### bring in table for fixes we want to do ####
manual_fixes <- read.csv("raw_data/manual_fixes.csv")
# consistent_fixes <- read.csv("raw_data/consitent_fixes.csv") # now all implemented in manual_fixes or in this script
## bring in raw mortality data + clean up and calculate allometries ####
raw_data_path <- "raw_data/"
survey_files <- list.files(raw_data_path, pattern = "Mortality_Survey_.*csv")
# survey_files <- survey_files[as.numeric(regmatches(survey_files, regexpr("20\\d\\d", survey_files))) <= 2021] # only consider files before 2021 as starting 2021 is the CI files
survey_years <- NULL
eafb_recorded_on_wrong_species <- NULL
A_afterD <- NULL
for(survey_file in survey_files) {
survey_year <- as.numeric(gsub("Mortality_Survey_|\\.csv", "", survey_file))
survey_years <- c(survey_years, survey_year)
cat(paste("cleaning up", survey_year), "...\n")
# load data ####
mort <- read.csv(paste0(raw_data_path, survey_file), stringsAsFactors = F)
unified_colnames_yr <- unified_colnames[unified_colnames$survey_year %in% survey_year & unified_colnames$survey_type %in% "mortality", ]
# standardize column names ####
colnames(mort) <- unified_colnames_yr$unified_column.name[match(colnames(mort), unified_colnames_yr$raw_column_name)]
## add columns missing
mort[, setdiff(unified_colnames[unified_colnames$survey_type %in% "mortality", ]$unified_column.name, colnames(mort))] <- NA
## delete columns we don't want
mort[, grep("delete", colnames(mort))] <- NULL
# make manual fixes ####
for(i in 1:nrow(manual_fixes)) {
if(survey_year %in% eval(parse(text = manual_fixes$survey_years_to_apply_fix[i]))) {
cat("Implementing manual fix ", manual_fixes$ID[i], ": ", manual_fixes$issue_name[i], "\n", sep = "")
eval(parse(text = manual_fixes$fix[i]))
# print(head(mort))
}
}
# fill in mort$last_main_census_dbh, last_main_cenus_status and , last_main_cenus_codes ####
if(survey_year <= 2018) {
ref_main <- scbi.stem2
}
if(survey_year > 2018 & survey_year <= 2022) {
ref_main <- scbi.stem3
}
if(survey_year > 2022) stop("need to code for new main census")
idx <- match(paste(mort$tag, mort$StemTag), paste(ref_main$tag, ref_main$StemTag))
mort$last_main_census_dbh <- ref_main$dbh[idx]
mort$last_main_cenus_status <- ref_main$status[idx]
mort$last_main_census_codes <- ref_main$codes[idx]
# standardize status ####
## remove spaces
mort$previous_year_status <- toupper(gsub(" ", "", mort$previous_year_status))
mort$current_year_status <- toupper(gsub(" ", "", mort$current_year_status))
## replace empty and "not sampled" by NA
mort$previous_year_status[grepl("SAMPLED", mort$previous_year_status )| mort$previous_year_status == "" ] <- NA
mort$current_year_status[grepl("SAMPLED", mort$current_year_status ) | mort$current_year_status == "" ] <- NA
## make sure status is defined
mort$previous_year_status[mort$previous_year_status %in%"DT"] <- "DN" # this if for one case in 2014, where code was wrongly entered in status for that tree
mort$previous_year_status[mort$previous_year_status %in% "M"] <- NA # this are trees that were missed in 2019 and got a code "M" for the "previous_year_status" of 2022 survey
if(!all(na.omit(c(mort$previous_year_status, mort$current_year_status)) %in% c("A", "AU", "DC", "DN", "DS", "PD"))) stop("some statuses are not defined ")
## fill in previous_year_status when not there
if(all(is.na(mort$previous_year_status))) {
ref_mort <- get(paste0("mort", survey_year-1))
mort$previous_year_status <- ref_mort$current_year_status[match(paste(mort$tag, mort$StemTag), paste(ref_mort$tag, ref_mort$StemTag))]
}
# standardize fraxinus_eabf ####
# make a logical column to say if species is susceptible to emerald ash borer
mort$sp_affected_by_eab <- grepl("fr|ch", mort$sp)
idx_fr_or_chvi <- mort$sp_affected_by_eab # making it an object to simplify coding here
## record year and tag of non fr or ch genus that have fraxinus_eabf info
idx_issue <- !idx_fr_or_chvi & !(is.na(mort$fraxinus_eabf) | mort$fraxinus_eabf %in% c("", "none"))
if(sum(idx_issue) > 0 ) eafb_recorded_on_wrong_species <- rbind( eafb_recorded_on_wrong_species,
data.frame(survey_year, mort[idx_issue, c("sp", "tag", "fraxinus_eabf")]))
## remove spaces and change ";" into ","
mort$fraxinus_eabf <- gsub(" ", "", mort$fraxinus_eabf)
mort$fraxinus_eabf <- gsub(";", ",", mort$fraxinus_eabf)
## if fraxinus or ch, replace "" or NA by "none", otherwise, change "" by NA (leaving "none" or other valuesi there in case we need to change speices ** TO EDIT MAYBE **)
mort$fraxinus_eabf[idx_fr_or_chvi] <- ifelse(is.na(mort$fraxinus_eabf[idx_fr_or_chvi])| mort$fraxinus_eabf[idx_fr_or_chvi] %in% c(""), "none", mort$fraxinus_eabf[idx_fr_or_chvi])
mort$fraxinus_eabf[!idx_fr_or_chvi] <- ifelse(is.na(mort$fraxinus_eabf[!idx_fr_or_chvi])| mort$fraxinus_eabf[!idx_fr_or_chvi] %in% c(""),NA, mort$fraxinus_eabf[!idx_fr_or_chvi])
## check that what is in fraxinus_eabf is only what should be in there
if(!any(unlist(strsplit(mort$fraxinus_eabf, ",")) %in% c("none", "VB", "SS", "AS", "W", "DE"))) stop("some fraxinus_eabf are not defined ")
# standardize fad ####
## combine all fad into one column
mort$fad <- apply(mort[, sort(grep("fad", colnames(mort), value = T))], 1, paste, collapse = ",")
mort$fad <- gsub("(,NA){1,}|NA|,,|^,", "", mort$fad )
mort$fad <- gsub("^,|,$", "", mort$fad )
mort[grep("fad\\d", names(mort), value = T)] <- NULL
## replace "" by NA for trees with current status "A"
mort$fad[mort$current_year_status %in% "A" & mort$fad %in% ""] <- NA
# standardize score crown intact and score crown living ####
mort$score_crown_intact <- gsub("\\D| ", "", mort$score_crown_intact)
mort$score_crown_intact[mort$score_crown_intact %in% c("", "0")] <- NA
mort$score_crown_intact <- as.numeric(mort$score_crown_intact)
mort$score_crown_living <- gsub("\\D| ", "", mort$score_crown_living)
mort$score_crown_living[mort$score_crown_living %in% c("", "0")] <- NA
mort$score_crown_living <- as.numeric(mort$score_crown_living)
## translate >= 2021 percent_of_crown_living and percent_of_crown_intact into corresponding scores
if(all(is.na(mort$score_crown_intact))& !all(is.na(mort$percent_of_crown_intact))) mort$score_crown_intact <- cut(mort$percent_of_crown_intact, breaks = c(0, 25, 50, 75, 100), include.lowest = F, labels = F)
if(all(is.na(mort$score_crown_living)) & !all(is.na(mort$percent_of_crown_living))) mort$score_crown_living <- cut(mort$percent_of_crown_living, breaks = c(0, 25, 50, 75, 100), include.lowest = F, labels = F)
# padd quadrats with 0 ####
mort$quadrat <- as.character( mort$quadrat)
mort$quadrat <- ifelse(nchar(mort$quadrat) < 4, paste0("0", mort$quadrat), mort$quadrat)
# consider dbh as numeric ####
mort$last_main_census_dbh[mort$last_main_census_dbh %in% c("", "NULL", "N/A")] <- NA # doing this so if a warning shoes on next line up it means there was another type of character that is not coercible to numeric and we may want to review.
mort$last_main_census_dbh <- as.numeric( mort$last_main_census_dbh)
mort$dbh_if_dead[mort$dbh_if_dead %in% c("", "NULL", "N/A")] <- NA # doing this so if a warning shoes on next line up it means there was another type of character that is not coercible to numeric and we may want to review.
mort$dbh_if_dead <- as.numeric( mort$dbh_if_dead)
# if(survey_year >= 2019) mort$dbh.2018 <- as.numeric(mort$dbh.2018) else mort$dbh.2013 <- as.numeric(mort$dbh.2013)
# retrieve global coordinates and recalulate local ones ####
mort$gx <- scbi.stem3$gx[match(paste(mort$tag, mort$StemTag), paste(scbi.stem3$tag, scbi.stem3$StemTag))]
mort$gy <- scbi.stem3$gy[match(paste(mort$tag, mort$StemTag), paste(scbi.stem3$tag, scbi.stem3$StemTag))]
mort$gx[mort$gx %in% 400] <- 399.9 # can't be 400 really
mort$gx[mort$gy %in% 640] <- 639.9 # can't be 400 really
mort$lx <- mort$gx - floor( mort$gx / 20)*20
mort$ly <- mort$gy - floor( mort$gy / 20)*20
# consider date as Date ####
if(survey_year <= 2019) date_format = "%m/%d/%Y"
if(survey_year == 2020) date_format = "%m/%d/%y"
if(survey_year > 2020) date_format = "%m-%d-%Y"
mort$ExactDate <- as.Date(mort$ExactDate, format = date_format)
# # make consistent fixes #### NOW ALL IMPLEMENTED IN MANUAL FIXES
# for(i in 1:nrow(consistent_fixes)) {
#
# if(survey_year %in% eval(parse(text = consistent_fixes$survey_years_to_apply_fix[i]))) {
# cat("Implementing consistent fix ", consistent_fixes$ID[i], ": ", consistent_fixes$issue_name[i], "\n", sep = "")
# eval(parse(text = consistent_fixes$fix[i]))
# # print(head(mort))
# }
#
# }
# find issues of alive after dead
idx <- mort$current_year_status %in% c("A", "AU") & grepl("D", mort$previous_year_status)
if(sum(idx) > 0) A_afterD <- rbind(A_afterD, cbind(year = survey_year, mort[idx, c("tag", "StemTag", "sp", "previous_year_status", "current_year_status", "dead_with_resprout", "previous_year_comment", "current_year_comment")]))
# save
assign(paste0("mort", survey_year), mort)
}
warning("check date format after 2023 is correct!")
A_afterD
# check all tags exist in core census data if any problem, add in "manual_fixes.csv" ####
tag_stem_in_order <- paste(scbi.stem3$tag, scbi.stem3$StemTag)
for(survey_year in survey_years) {
print(survey_year)
mort <- get(paste0("mort", survey_year))
tag_stems <- paste(mort$tag, mort$StemTag)
if(!all(tag_stems %in% tag_stem_in_order)) {
print("Not all tags are in core census")
tag_stems[which(!tag_stems %in% tag_stem_in_order)]
print(mort[ paste(mort$tag, mort$StemTag) %in% tag_stems[which(!tag_stems %in% tag_stem_in_order)], ])
}
} # should all be empty (only year should show up)
# Now re-order all data to all have same rows in same order + fill in missing info ####
tag_stem_in_order <- paste(scbi.stem3$tag, scbi.stem3$StemTag)
tag_stem_in_order <- tag_stem_in_order[tag_stem_in_order %in% unique(unlist(sapply(survey_years, function(survey_year) {
mort <- get(paste0("mort", survey_year))
paste(mort$tag, mort$StemTag)
})))] # only keep the ones that were sampled for mortality at some point
for(survey_year in survey_years) {
cat(paste("Filling info of missied stems in", survey_year), "...\n")
mort <- get(paste0("mort", survey_year))
tag_stems <- paste(mort$tag, mort$StemTag)
m <- match(tag_stem_in_order, tag_stems)
mort <- mort[m, ]
# fill in info of trees that were not sampled
missing_stems <- tag_stem_in_order[is.na(m)]
if(survey_year <= 2018) {
ref_main <- scbi.stem2
}
if(survey_year > 2018 & survey_year <= 2022) {
ref_main <- scbi.stem3
}
if(survey_year > 2022) stop("need to code for new main census")
idx <- match(missing_stems, paste(ref_main$tag, ref_main$StemTag))
mort[is.na(m), c(
"tag", "StemTag",
"sp",
"quadrat", "gx", "gy",
"last_main_census_dbh", "hom",
"last_main_cenus_status", "last_main_census_codes"
)] <-
ref_main[idx, c("tag", "StemTag",
"sp",
"quadrat", "gx", "gy",
"dbh", "hom",
"status",
"codes")]
mort[is.na(m),]$current_year_comment <- "stem not sampled, info automatically filled from previous year info"
mort[is.na(m),]$sp_affected_by_eab <- grepl("fr|ch", mort[is.na(m),]$sp)
mort[is.na(m),]$lx <- mort[is.na(m),]$gx - floor( mort[is.na(m),]$gx / 20)*20
mort[is.na(m),]$ly <- mort[is.na(m),]$gy - floor( mort[is.na(m),]$gy / 20)*20
# add info from previous mortality census if we have it
if(survey_year %in% 2014) {
previous_year_status <- ref_main$status[idx]
previous_year_comment <- NA
}
if(survey_year> 2014) {
previous_year_status <- get(paste0("mort", survey_year-1))$current_year_status[is.na(m)]
previous_year_comment <- get(paste0("mort", survey_year-1))$current_year_comment[is.na(m)]
}
previous_year_status[previous_year_status %in% "G"] <- "D" # stems that are "Gone" are considered dead here... don't know if they are still standing or not so just giving status "D".
previous_year_status[previous_year_status %in% "P"] # leaving "P" for "Prior" (trees that did not exist yet)
mort[is.na(m),]$previous_year_status <- ifelse(is.na( mort[is.na(m),]$previous_year_status) & !is.na(previous_year_status), previous_year_status, mort[is.na(m),]$previous_year_status)
mort[is.na(m),]$previous_year_comment <- ifelse(is.na( mort[is.na(m),]$previous_year_comment) & !is.na(previous_year_comment), previous_year_comment, mort[is.na(m),]$previous_year_comment)
# add date, assuming it was sampled that year (to help calculate timeint - current_status is NA anyways so it will be excluded of analysis when calculating moratlity rates)
date_per_quad <- tapply(mort$ExactDate, mort$quadrat, function(x) names(sort(table(x), decreasing = T))[1])
mort$ExactDate[is.na(m)] <- as.Date(date_per_quad[mort$quadrat[is.na(m)]])
# save
assign(paste0("mort", survey_year), mort)
}
# Status corrections ####
## change D to A or AU if tree was found A or AU later
for(survey_year in survey_years) {
cat(paste("Filling info of missied stems in", survey_year), "...\n")
mort <- get(paste0("mort", survey_year))
# Change D to A
idx <- mort$current_year_status %in% c("A") & grepl("D", mort$previous_year_status)
if(sum(idx) > 0) mort$previous_year_status[idx] <- "A"
# Change D to AU
idx <- mort$current_year_status %in% c("AU") & grepl("D", mort$previous_year_status)
if(sum(idx) > 0) mort$previous_year_status[idx] <- "AU"
# save
assign(paste0("mort", survey_year), mort)
}
# Calculate allometries ####
## on main census data
for(census in paste0("scbi.stem", 1:3)) {
cat("cleaning and calculating allometries on", census, "...\n")
x <- get(census)
x$dbh <- as.numeric(x$dbh) # not numeric because of the "NULL" values
x$genus <- scbi.spptable$Genus[match(x$sp, scbi.spptable$sp)]
x$species <- scbi.spptable$Species[match(x$sp, scbi.spptable$sp)]
x$agb <-
round(get_biomass(
dbh = x$dbh/10, # in cm
genus = x$genus,
species = x$species,
coords = c(-78.2, 38.9)
) / 1000 ,2) # / 1000 to change to in Mg
assign(census, x)
}
## on mortality census
for(survey_year in survey_years) {
mort <- get(paste0("mort", survey_year))
## calculate allometries ####
cat(paste("calculating allometries for", survey_year), "...\n")
if(length(setdiff(mort$sp, scbi.spptable$sp)) > 0) stop ("There is one species that is not in scbi.spptable")
mort$genus <- scbi.spptable$Genus[match(mort$sp, scbi.spptable$sp)]
mort$species <- scbi.spptable$Species[match(mort$sp, scbi.spptable$sp)]
mort$last_main_census_agb_Mg <-
round(get_biomass(
dbh = as.numeric(mort$last_main_census_dbh)/10, # in cm
genus = mort$genus,
species = mort$species,
coords = c(-78.2, 38.9) # SCBI coordinates
) / 1000 ,2) # / 1000 to change to in Mg
mort$agb_if_dead_Mg <-
round(get_biomass(
dbh = as.numeric(mort$dbh_if_dead)/10, # in cm
genus = mort$genus,
species = mort$species,
coords = c(-78.2, 38.9)# SCBI coordinates
) / 1000 ,2) # / 1000 to change to in Mg
# save
assign(paste0("mort", survey_year), mort)
}
# make two first main census data in the same format as mortality census ####
for(census in paste0("scbi.stem", 1:2)) {
cat("making", census, "in same format as mortality...\n")
survey_year <- switch(census, scbi.stem1 = 2008, scbi.stem2 = 2013)
survey_years <- sort(c(survey_years, survey_year))
mort <- get(census)
# keep only tags later sampled in mortality
mort <- mort[paste(mort$tag, mort$StemTag) %in% tag_stem_in_order, ]
# standardize column names ####
unified_colnames_yr <- unified_colnames[unified_colnames$survey_type %in% "main", ]
colnames(mort) <- unified_colnames_yr$unified_column.name[match(colnames(mort), unified_colnames_yr$raw_column_name)]
## add columns missing
mort[, setdiff(unified_colnames[unified_colnames$survey_type %in% "mortality", ]$unified_column.name, colnames(mort))] <- NA
## delete columns we don't want
mort[, grep("delete", colnames(mort))] <- NULL
# get previous main census status
if(census == "scbi.stem1") mort$previous_year_status <- NA
if(census == "scbi.stem2") mort$previous_year_status <- mort2008$current_year_status
# add a couple missing column that are not in the unified column table as they were created in this script ####
mort$agb_if_dead_Mg <- NA
mort$sp_affected_by_eab <- grepl("fr|ch", mort$sp)
assign(paste0("mort", survey_year), mort)
}
# calculate time interval between each date the tree was censused ####
for(survey_year in survey_years) {
mort <- get(paste0("mort", survey_year))
if(survey_year == 2008) mort$timeint_days <- NA
if(survey_year > 2008) {
if(survey_year == 2013) ref_mort <- mort2008 else ref_mort <- get(paste0("mort", survey_year-1))
mort$timeint_days <- difftime(mort$ExactDate, ref_mort$ExactDate, units = "days")
}
assign(paste0("mort", survey_year), mort)
}
# save all the data in the same format ####
## order of columns we want to keep
columns_to_keep <- c("survey_year", # adding this column, it will be createid in the loop that saves the mortality files
"tag", "StemTag", "sp", "genus", "species",
"quadrat", "gy", "gx", "ly", "lx",
"last_main_census_dbh", "last_main_census_agb_Mg", "hom",
"ExactDate", "timeint_days",
"previous_year_status", "current_year_status",
"last_main_cenus_status","last_main_census_codes",
"cored",
"crown_position", "crown_illumination",
"percent_of_crown_intact", "score_crown_intact",
"percent_of_crown_living", "score_crown_living",
"fad",
"liana_load", "wounded_main_stem", "rotting_trunk", "canker_swelling_deformity",
"lean_angle_if_greater_than_15_degrees",
"dead_with_resprout",
"crown_position_if_dead",
"dbh_if_dead", "agb_if_dead_Mg",
"sp_affected_by_eab",
"fraxinus_eabf", "fraxinus_D_shaped_exit_hole_count",
"fraxinus_epicormic_growth", "fraxinus_score_crown_living",
"surveyor",
"current_year_comment", "previous_year_comment", "submission_id")
## save mortality files and build up allmort
allmort <- NULL
for (survey_year in survey_years) {
print(paste("Saving final data set for", survey_year))
mort <- get(paste0("mort", survey_year))
head(mort)
mort$survey_year <- survey_year
mort <- mort[, columns_to_keep]
assign(paste0("mort", survey_year), mort)
write.csv(mort, file = paste0("data/mortality_", survey_year, ".csv"), row.names = F)
allmort <- rbind(allmort, mort)
}
## save allmort
write.csv(allmort, "data/allmort.csv", row.names = F)
save(allmort, file = "data/allmort.RData")
|
install.packages('rJava')
library(rJava)
install.packages('KoNLP')
library(KoNLP)
install.packages("wordcloud")
library(wordcloud)
useSejongDic()
data1<-readLines("seoul_new.txt")
data1 #파일에서 읽은 Raw 데이터(한글문장)
extractNoun('서울시 버스정책을 역행하는 행위를 고발합니다.')
data2<-sapply(data1, extractNoun, USE.NAMES=F)
data2 #list형태, 명사들만 있는 데이터
head(unlist(data2),30)
data3<-unlist(data2)
data3 #list 형태가 아닌 명사데이터
#원하지않는 내용 걸러내기
data3<-gsub('\\d+', '', data3)
data3<-gsub('서울시', '', data3)
data3<-gsub('서울', '', data3)
data3<-gsub('요청', '', data3)
data3<-gsub('제안', '', data3)
data3<-gsub(' ', '', data3)
data3<-gsub('-', '', data3); data3
#내용중간에 공백 제거 하기
write(unlist(data3),'seoul_2.txt')
data4<-read.table('seoul_2.txt'); data4
head(data4)
nrow(data4)
wordcount<-table(data4); wordcount
head(sort(wordcount, decreasing=T), 20)
data3<-gsub('OO','',data3)
data3<-gsub('님','',data3)
data3<-gsub('개선','',data3)
data3<-gsub('문제','',data3)
data3<-gsub('관리','',data3)
data3<-gsub('민원','',data3)
data3<-gsub('이용','',data3)
data3<-gsub('관련','',data3)
data3<-gsub('시장','',data3)
write(unlist(data3), 'seoul_3.txt')
data4<-read.table('seoul_3.txt')
wordcount<-table(data4)
head(sort(wordcount, decreasing=T), 20)
#Word cloud 그래픽 출력
library(RColorBrewer)
palete<-brewer.pal(8,'dark2')
set.seed(1234)
wordcloud(names(wordcount), freq=wordcount,scale=c(5,0.8), rot.per=0.1, min.freq=1,
random.order=F, ramdom.color=T, colors=palete)
legend(0.3, 0.8, '서울시 응답소 요청사항 분석',cex=0.8, fill=NA, border=NA, bg='white', text.col='red',
text.font=2, box.col='red')
v3<-c('봄이 지나면 여름이고 여름이 지나면 가을 입니다.', '그리고 겨울이죠')
extractNoun(v3)
v4<-sapply(v3, extractNoun, USE.NAMES=F)
v4
wordcloud(c(letters, LETTERS, 0:9), seq(1,1000, len=62))
palete<-brewer.pal(9, 'set1')
wordcloud(c(letters, LETTERS, 0:9), seq(1,1000, len=62), colors=palete)
| /Part2/stage1_wordcloud/Ex01_seoul/ex01_script.R | no_license | JadenChoi94/R_Lecture | R | false | false | 2,103 | r | install.packages('rJava')
library(rJava)
install.packages('KoNLP')
library(KoNLP)
install.packages("wordcloud")
library(wordcloud)
useSejongDic()
data1<-readLines("seoul_new.txt")
data1 #파일에서 읽은 Raw 데이터(한글문장)
extractNoun('서울시 버스정책을 역행하는 행위를 고발합니다.')
data2<-sapply(data1, extractNoun, USE.NAMES=F)
data2 #list형태, 명사들만 있는 데이터
head(unlist(data2),30)
data3<-unlist(data2)
data3 #list 형태가 아닌 명사데이터
#원하지않는 내용 걸러내기
data3<-gsub('\\d+', '', data3)
data3<-gsub('서울시', '', data3)
data3<-gsub('서울', '', data3)
data3<-gsub('요청', '', data3)
data3<-gsub('제안', '', data3)
data3<-gsub(' ', '', data3)
data3<-gsub('-', '', data3); data3
#내용중간에 공백 제거 하기
write(unlist(data3),'seoul_2.txt')
data4<-read.table('seoul_2.txt'); data4
head(data4)
nrow(data4)
wordcount<-table(data4); wordcount
head(sort(wordcount, decreasing=T), 20)
data3<-gsub('OO','',data3)
data3<-gsub('님','',data3)
data3<-gsub('개선','',data3)
data3<-gsub('문제','',data3)
data3<-gsub('관리','',data3)
data3<-gsub('민원','',data3)
data3<-gsub('이용','',data3)
data3<-gsub('관련','',data3)
data3<-gsub('시장','',data3)
write(unlist(data3), 'seoul_3.txt')
data4<-read.table('seoul_3.txt')
wordcount<-table(data4)
head(sort(wordcount, decreasing=T), 20)
#Word cloud 그래픽 출력
library(RColorBrewer)
palete<-brewer.pal(8,'dark2')
set.seed(1234)
wordcloud(names(wordcount), freq=wordcount,scale=c(5,0.8), rot.per=0.1, min.freq=1,
random.order=F, ramdom.color=T, colors=palete)
legend(0.3, 0.8, '서울시 응답소 요청사항 분석',cex=0.8, fill=NA, border=NA, bg='white', text.col='red',
text.font=2, box.col='red')
v3<-c('봄이 지나면 여름이고 여름이 지나면 가을 입니다.', '그리고 겨울이죠')
extractNoun(v3)
v4<-sapply(v3, extractNoun, USE.NAMES=F)
v4
wordcloud(c(letters, LETTERS, 0:9), seq(1,1000, len=62))
palete<-brewer.pal(9, 'set1')
wordcloud(c(letters, LETTERS, 0:9), seq(1,1000, len=62), colors=palete)
|
knitr::opts_chunk$set(echo = TRUE)
summary(cars)
install.packages("kableExtra")
install.packages("dpylr")
if (!require("pacman")) install.packages("pacman")
pacman::p_load(tidyverse, skimr, GGally, plotly, viridis, caret, randomForest, e1071, rpart,
xgboost, h2o, corrplot, rpart.plot, corrgram, ggplot2, highcharter,
ggthemes, psych, scales, treemap, treemapify, repr, cowplot, magrittr, ggpubr,
RColorBrewer, plotrix, ggrepel, tidyverse, gridExtra, reshape2.)
library(readr)
library(ggplot2)
library(corrplot)
library(tidyverse)
library(ggcorrplot)
library(ggplot2)
library(plyr)
library(caret)
library(caTools)
library(reader)
processed_cleveland <- read_csv("heart.csv")
head(processed_cleveland)
cor(processed_cleveland)
summary(processed_cleveland)
corrplot(cor(processed_cleveland))
# Coverting the categorical data to factor
processed_cleveland$sex <- as.factor(processed_cleveland$sex)
processed_cleveland$target <- as.factor(processed_cleveland$target)
processed_cleveland$cp <- as.factor(processed_cleveland$cp)
processed_cleveland$ca <- as.factor(processed_cleveland$ca)
processed_cleveland$exang <- as.factor(processed_cleveland$exang)
processed_cleveland$slope <- as.factor(processed_cleveland$slope)
processed_cleveland$thal <- as.factor(processed_cleveland$thal)
# Summary after pre-processing the data
summary(processed_cleveland)
# DISPLAY THE NUMBER OF NAs IN EACH COLUMN
colSums(is.na(processed_cleveland))
# Bar plot for target (Heart disease)
processed_cleveland$target <- as.factor(processed_cleveland$target)
ggplot(processed_cleveland, aes(x=processed_cleveland$target, fill=processed_cleveland$target)) +
geom_bar() +
xlab("Heart Disease") +
ylab("Count") +
ggtitle("Analysis of Presence and Absence of Heart Disease") +
scale_fill_discrete(name = "Heart Disease", labels = c("Absence", "Presence"))
# Counting the frequency of the values of the age
ageCount <- count(processed_cleveland, 'age')
ageCount <- subset(ageCount[which(ageCount$freq > 10), ])
#ploting the age with frquency greater than 10
ggplot(ageCount, aes(x=ageCount$age, y=ageCount$freq)) +
ggtitle("Age Analysis") +
xlab("Age") +
ylab("Age Count") +
geom_bar(stat="identity")
# Group the different ages in three groups (young, middle, old)
young <- processed_cleveland[which((processed_cleveland$age<45)), ]
middle <- processed_cleveland[which((processed_cleveland$age>=45)&(processed_cleveland$age<55)), ]
elderly <- processed_cleveland[which(processed_cleveland$age>55), ]
groups <- data.frame(age_group = c("young","middle","elderly"), group_count = c(NROW(young$age), NROW(middle$age), NROW(elderly$age)))
#ploting different age groups
ggplot(groups, aes(x=groups$age_group, y=groups$group_count, fill=groups$age_group)) +
ggtitle("Age Analysis") +
xlab("Age Group") +
ylab("group Count") +
geom_bar(stat="identity") +
scale_fill_discrete(name = "Age Group", labels = c("Elderly", "Middle", "Young"))
processed_cleveland = subset(processed_cleveland, select = c(-age))
ggplot(processed_cleveland, aes(x= factor(processed_cleveland$sex), y=processed_cleveland$sex, colour=target)) +
geom_boxplot(stat = "boxplot",
position = "dodge2") +
geom_boxplot(outlier.shape = NA) +
geom_jitter(width = 0.2) +
xlab("Age Groups") +
ylab("Gender") +
ggtitle("Analysis of gender with different age group with presence or absense of heart disease")
# Bar plot for sex
ggplot(processed_cleveland, aes(x= processed_cleveland$sex, fill=processed_cleveland$target)) +
geom_bar() +
xlab("Gender") +
ylab("Gender Count") +
ggtitle("Analysis of Gender") +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
# Bar plot for The chest pain experienced
ggplot(processed_cleveland, aes(x= cp, fill=cp)) +
geom_bar() +
xlab("Chest Pain Type") +
ylab("Count") +
ggtitle("Analysis of Chest Pain Experienced") +
scale_fill_discrete(name = "Chest Pain Type", labels = c("Typical angina pain", "Atypical angina pain", "Non-Anginal pain", "Asymptomatic pain"))
# Bar plot for The chest pain ~ target
ggplot(processed_cleveland, aes(x= cp, fill=target)) +
geom_bar() +
xlab("Chest Pain Type") +
ylab("Count") +
ggtitle("Analysis of Chest Pain Experienced") +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
# Bar for ca (number of major vessels (0-3))
ggplot(processed_cleveland, aes(x= ca, fill=ca)) +
geom_bar() +
xlab("number of major vessels") +
ylab("Count") +
ggtitle("Analysis of number of major vessels") +
theme(legend.position="none")
# Bar for ca (number of major vessels (0-3))
ggplot(processed_cleveland, aes(x= ca, fill=target)) +
geom_bar(position = 'dodge') +
xlab("number of major vessels") +
ylab("Count") +
ggtitle("Analysis of number of major vessels") +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
# Histogram for trestbps (resting blood pressure)
ggplot(processed_cleveland, aes(x=trestbps)) +
geom_histogram() +
xlab("Resting blood pressure") +
ylab("Count") +
ggtitle("Analysis of blood pressure")
# removing the outliers
processed_cleveland$trestbps = ifelse(processed_cleveland$trestbps > 180, NA, processed_cleveland$trestbps)
processed_cleveland$trestbps = ifelse(is.na(processed_cleveland$trestbps), median(processed_cleveland$trestbps[which(!is.na(processed_cleveland$trestbps))]), processed_cleveland$trestbps)
# After the removal of outliers
ggplot(processed_cleveland, aes(x=trestbps)) +
geom_histogram() +
xlab("Resting blood pressure") +
ylab("Count") +
ggtitle("Analysis of blood pressure")
# Density graph for trestbps (resting blood pressure)
ggplot(processed_cleveland, aes(x = trestbps, fill = target)) +
geom_density(alpha=0.5) +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
# Histogram for oldpeak (ST depression induced by exercise relative to rest)
ggplot(processed_cleveland, aes(x=oldpeak)) +
geom_histogram() +
xlab("ST depression induced by exercise relative to rest") +
ylab("Count") +
ggtitle("Analysis of ST depression induced by exercise relative to rest")
length(processed_cleveland)
# Bar plot for slope (slope of the peak exercise ST segment)
processed_cleveland$slope <- ifelse(processed_cleveland$slope == 0, 1, print(processed_cleveland$slope))
processed_cleveland$slope <- as.factor(processed_cleveland$slope)
ggplot(processed_cleveland, aes(x=processed_cleveland$slope, fill=processed_cleveland$slope)) +
geom_bar() +
xlab("Slope of ST segment") +
ylab("Count") +
ggtitle("Analysis of slope of the peak exercise ST segment") +
scale_fill_discrete(name = "Slope of ST segment", labels = c("Upsloping", "Flat", "Downsloping"))
processed_cleveland$thalach = ifelse(processed_cleveland$thalach < 75, NA, processed_cleveland$thalach)
processed_cleveland$thalach = ifelse(is.na(processed_cleveland$thalach), median(processed_cleveland$thalach[which(!is.na(processed_cleveland$thalach))]), processed_cleveland$thalach)
ggplot(processed_cleveland, aes(x=thalach)) +
geom_histogram() +
xlab("Maximum heart rate achieved") +
ylab("Count") +
ggtitle("Analysis of maximum heart rate achieved")
# Density plot for thalach ~ target
ggplot(processed_cleveland, aes(x = thalach, fill = target)) +
geom_density(alpha=0.5) +
xlab("Maximum Heart Rate Achieved") +
ylab("Count") +
ggtitle("Analysis of relation of heart rate with presence of heart disease") +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
processed_cleveland$thal = ifelse(processed_cleveland$thal == 0, 2, processed_cleveland$thal)
processed_cleveland$thal <- as.factor(processed_cleveland$thal)
ggplot(processed_cleveland, aes(x=thal, fill=thal)) +
geom_bar() +
xlab("Blood disorder type") +
ylab("Count") +
ggtitle("Analysis of blood disorder (thalassemia)") +
scale_fill_discrete(name = "Blood disorder", labels = c("Normal", "Fixed defect", "reversable defect"))
processed_cleveland<-read.csv(file = 'heart.csv')
processed_cleveland <- subset(processed_cleveland, age != "NaN" & !(is.na(age)),)
processed_cleveland <- subset(processed_cleveland, sex != "NaN" & !(is.na(sex)),)
processed_cleveland <- subset(processed_cleveland, cp != "NaN" & !(is.na(cp)),)
processed_cleveland <- subset(processed_cleveland, trestbps != "NaN" & !(is.na(trestbps)),)
processed_cleveland <- subset(processed_cleveland, chol != "NaN" & !(is.na(chol)),)
processed_cleveland <- subset(processed_cleveland, fbs != "NaN" & !(is.na(fbs)),)
processed_cleveland<- subset(processed_cleveland, restecg != "NaN" & !(is.na(restecg)),)
processed_cleveland <- subset(processed_cleveland, thalach != "NaN" & !(is.na(thalach)),)
processed_cleveland <- subset(processed_cleveland, exang != "NaN" & !(is.na(exang)),)
processed_cleveland <- subset(processed_cleveland, oldpeak != "NaN" & !(is.na(oldpeak)),)
processed_cleveland <- subset(processed_cleveland, slope!= "NaN" & !(is.na(slope)),)
processed_cleveland <- subset(processed_cleveland, ca!= "NaN" & !(is.na(ca)),)
processed_cleveland <- subset(processed_cleveland, thal!= "NaN" & !(is.na(thal)),)
processed_cleveland <- subset(processed_cleveland, target!= "NaN" & !(is.na(target)),)
head(processed_cleveland)
nrow(processed_cleveland)
ncol(processed_cleveland)
data(processed_cleveland)
head(processed_cleveland)
qqnorm(processed_cleveland$chol)
hist(processed_cleveland$chol)
# the parts of the test statistic
# sample mean
x_bar <- mean(processed_cleveland$chol)
# null hypothesized population Chol
mu_0 <- 240
# sample st. dev
s <- sd(processed_cleveland$chol)
# sample size
n <- length(processed_cleveland$chol)
# t-test test statistic
t <- (x_bar - mu_0)/(s/sqrt(n))
# two-sided p-value so multiply by 2
two_sided_t_pval <- pt(q = t, df = n-1, lower.tail = FALSE)*2
two_sided_t_pval
t.test(processed_cleveland$chol,
alternative = "two.sided",
mu = 240)
# This data is pretty skewed so even though n is large, I'm going to do a lot of simulations
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
results[i] <- mean(sample(x = processed_cleveland$chol,
size = n,
replace = TRUE))
}
# Finally plot the results
hist(results, freq = FALSE, main='Sampling Distribution of the Sample Mean', xlab = 'average chlorstral of patient ', ylab = 'Density')
# estimate a normal curve over it - this looks pretty good!
lines(x = seq(238, 260, .1), dnorm(seq(238, 260, .1), mean = mean(results), sd = sd
(results)))
# Shift the sample so that the null hypothesis is true
time_given_H0_true <- processed_cleveland$chol - mean(processed_cleveland$chol) + mu_0
# This data is pretty skewed so even though n is large, I'm going to do a lot of simulations
num_sims <- 10000
# A vector to store my results
results_given_H0_true <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
results_given_H0_true[i] <- mean(sample(x = time_given_H0_true,
size = n,
replace = TRUE))
}
# Finally plot the results
hist(results_given_H0_true, freq = FALSE, main='Sampling Distribution of the Sample Mea
n, Given Null Hypothesis is True', xlab = 'Average chlorastrol level of patient', ylab = 'Density')
# add line to show values more extreme on upper end
abline(v=x_bar, col = "red")
# add line to show values more extreme on lower end
low_end_extreme <- mean(results_given_H0_true)+(mean(results_given_H0_true)-x_bar)
abline(v=low_end_extreme, col="red")
# counts of values more extreme than the test statistic in our original sample, given H_0is true
# two sided given the alternate hypothesis
count_of_more_extreme_lower_tail <- sum(results_given_H0_true <= low_end_extreme)
count_of_more_extreme_upper_tail <- sum(results_given_H0_true >= x_bar)
bootstrap_pvalue <- (count_of_more_extreme_lower_tail + count_of_more_extreme_upper_tail)/num_sims
bootstrap_pvalue
# two sided t p-value
two_sided_t_pval
# need the standard error which is the standard deviation of the results
bootstrap_SE_X_bar <- sd(results)
# an estimate is to use the formula statistic +/- 2*SE
c(x_bar - 2*bootstrap_SE_X_bar, x_bar + 2*bootstrap_SE_X_bar)
# you can also use the 5th and 95th quantiles to determine the bounds:
c(quantile(results, c(.025, .975)))
# compare to our t-methods
c(x_bar+(qt(0.025, n-1)*(s/sqrt(n))), x_bar+(qt(0.975, n-1)*(s/sqrt(n))))
table(processed_cleveland$sex
)
p_hat <- 205/302
z <- (p_hat - .5) / sqrt((.5*(1-.5)) / 302)
z
# One-sided upper exact
binom.test(x=205, n = 302, p = 0.5, alternative="greater")
# One sided upper normal ppproximation
pnorm(z, lower.tail = FALSE)
# exact binomial test confidence interval
binom.test(x=205, n = 302, p=(.5), alternative="greater")$conf.int
# normal approx confidence interval
c(p_hat - (1.64)*sqrt(((p_hat)*(1 - p_hat))/302), 1)
#Bootstrap Method
geos <- factor(rep(c("male", "female"), c(205, 302-205)))
geos
table(geos)
# This is going to be easier to use for bootstrapping
geos <- rep(c(1, 0), c(205, 302-205))
geos
# This data is pretty skewed so even though n is large, I'm going to do a lot of simulations
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
results[i] <- mean(sample(x = geos,
size = 302,
replace = TRUE))
}
# Finally plot the results
hist(results, freq = FALSE, main='Sampling Distribution of the Sample Proportion',
xlab = 'Proportion of female patinets having heart diease ', ylab = 'Density')
# estimate a normal curve
lines(x = seq(.40, .60, .001), dnorm(seq(.40, .60, .001), mean = mean(results), sd = sd(results)))
# Bootstrap Confidence Interval
n = 302
c(quantile(results, c(.05, 1)))
# exact binomial test
binom.test(x=205, n = 302, p=(.5), alternative="greater")$conf.int
# normal approx
c(p_hat - (1.64)*sqrt(((p_hat)*(1 - p_hat))/302), 1)
# Under the assumption that the null hypothesis is true, we have 50% peanuts
geos <- rep(c(1, 0), c(151, 302-151))
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
results[i] <- mean(sample(x = geos,
size = 302,
replace = TRUE))
}
# Finally plot the results
hist(results, freq = FALSE,
main='Sampling Distribution of the Sample Proportion under H_0:p=0.5', xlab = 'Proportion of women having heart diease ',
ylab = 'Density', xlim = c(0.30,0.70))
# estimate a normal curve over it - this looks pretty good!
lines(x = seq(.40, .60, .001), dnorm(seq(.40, .60, .001), mean = mean(results), sd = sd(results)))
abline(v=p_hat, col="red")
count_of_more_extreme_upper_tail <- sum(results >= p_hat)
bootstrap_pvalue <- count_of_more_extreme_upper_tail/num_sims
# Bootstrap p-value
bootstrap_pvalue
# Exact Binomial p-value
binom.test(x=205, n = 302, p=(.5), alternative="greater")$p.value
# Normal Approximation p-value
pnorm(z, lower.tail = FALSE)
# QQ Plot for clorostrol value
qqnorm(processed_cleveland$chol)
qqline(processed_cleveland$chol, col = "blue")
hist(processed_cleveland$chol)
# QQ Plot for chlorestral level when gender is male .
qqnorm(processed_cleveland$chol[processed_cleveland$sex == "1"])
qqline(processed_cleveland$chol[processed_cleveland$sex == "1"], col = "blue")
hist(processed_cleveland$chol[processed_cleveland$sex == "1"])
# QQ Plot for Launch Mass when Gender is female.
qqnorm(processed_cleveland$chol[processed_cleveland$sex == "0"])
qqline(processed_cleveland$chol[processed_cleveland$sex == "0"], col = "blue")
hist(processed_cleveland$chol[processed_cleveland$sex == "0"])
# sample means
x_bar_m <- mean(processed_cleveland$chol[processed_cleveland$sex =="1"])
x_bar_f <- mean(processed_cleveland$chol[processed_cleveland$sex =="0"])
x_bar_m
x_bar_f
# null hypothesized population mean difference between the two groups
mu_0 <- 0
mu_0
# sample variances
s_f_sq <- sd(processed_cleveland$chol[processed_cleveland$sex =="0"])**2
s_m_sq <- sd(processed_cleveland$chol[processed_cleveland$sex =="1"])**2
s_m_sq
s_f_sq
# sample size
n_f <- length(processed_cleveland$chol[processed_cleveland$sex=="1"])
n_m <- length(processed_cleveland$chol[processed_cleveland$sex =="0"])
n_m
n_f
t <- (x_bar_f - x_bar_m - mu_0)/sqrt((s_f_sq/n_f) + (s_m_sq/n_m))
t
# one sided upper p-value
two_sided_diff_t_pval <- pt(q = t, df = min(n_f, n_m)-1, lower.tail = TRUE)*2
two_sided_diff_t_pval
# Lower bound of Confidence Interval
(x_bar_f-x_bar_m)+(qt(0.025, min(n_f, n_m)-1)*sqrt((s_f_sq/n_f) + (s_m_sq/n_m)))
# Upper bound of Confidence Interval
(x_bar_f-x_bar_m)+(qt(0.975, min(n_f, n_m)-1)*sqrt((s_f_sq/n_f) + (s_m_sq/n_m)))
t.test(processed_cleveland$chol[processed_cleveland$sex=="1"],
processed_cleveland$chol[processed_cleveland$sex=="0"])
table(processed_cleveland$sex)
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
mean_male <- mean(sample(x = processed_cleveland$chol[processed_cleveland$sex == "1"],
size = 207,
replace = TRUE))
mean_female <- mean(sample(x = processed_cleveland$chol[processed_cleveland$sex == "0"],
size = 96,
replace = TRUE))
results[i] <- mean_female - mean_male
}
# Finally plot the results
hist(results, freq = FALSE, main='Sampling Distribution of the Sample Mean',
xlab = 'Average Difference clostral value ', ylab = 'Density')
# estimate a normal curve
lines(x = seq(-10, 50, .001), dnorm(seq(-10, 50, .001), mean = mean(results), sd = sd(results)))
# Bootstrap one-sided CI
c(quantile(results, c(.025, .975)))
# compare to our t-methods
t.test(processed_cleveland$chol[processed_cleveland$sex=="1"],
processed_cleveland$chol[processed_cleveland$sex=="0"])$conf.int
set.seed(0)
num_sims <- 1000
# A vector to store my results
results_given_H0_true <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
# idea here is if there is no relationshipm we should be able to shuffle the groups
shuffled_groups <- transform(processed_cleveland, sex=sample(sex))
mean_male <- mean(shuffled_groups$chol[shuffled_groups$sex == "1"])
mean_female <- mean(shuffled_groups$chol[shuffled_groups$sex == "0"])
results_given_H0_true[i] <- mean_female - mean_male
}
# Finally plot the results
hist(results_given_H0_true, freq = FALSE,
main='Dist. of the Diff in Sample Means Under Null',
xlab = 'Average Difference of clostreal level is under Null',
ylab = 'Density')
diff_in_sample_means <- mean(processed_cleveland$chol[processed_cleveland$sex == "1"]) -
mean(processed_cleveland$chol[processed_cleveland$sex == "0"])
abline(v=diff_in_sample_means, col = "blue")
abline(v=abs(diff_in_sample_means), col = "red")
# counts of values more extreme than the test statistic in our original sample, given H0 is true
# two sided given the alternate hypothesis
count_of_more_extreme_lower_tail <- sum(results_given_H0_true <= diff_in_sample_means)
count_of_more_extreme_upper_tail <- sum(results_given_H0_true >= abs(diff_in_sample_means))
bootstrap_pvalue <- (count_of_more_extreme_lower_tail + count_of_more_extreme_upper_tail)/num_sims
## Bootstrap p-value
bootstrap_pvalue
## t-test p-value
t.test(processed_cleveland$chol[processed_cleveland$sex=="0"],
processed_cleveland$chol[processed_cleveland$sex=="1"])$p.value
p_hat_f <- sum(processed_cleveland$sex == "0" & processed_cleveland$target == "1")/sum(processed_cleveland$sex == "0")
p_hat_m <- sum(processed_cleveland$sex == "1" & processed_cleveland$target == "1")/sum(processed_cleveland$sex== "1")
p_hat_f
p_hat_m
# null hypothesized population prop difference between the two groups
p_0 <- 0
# sample size
n_f <- sum(processed_cleveland$sex == "0")
n_m <- sum(processed_cleveland$sex == "1")
n_f
n_m
# sample variances
den_p_m <- (p_hat_m*(1-p_hat_m))/n_m
den_p_f <- (p_hat_f*(1-p_hat_f))/n_f
den_p_m
den_p_f
# z-test test statistic
z <- (p_hat_f - p_hat_m - p_0)/sqrt(den_p_f + den_p_m)
z
# two sided p-value
two_sided_diff_prop_pval <- pnorm(q = z, lower.tail = FALSE)*2
two_sided_diff_prop_pval
# lower bound
(p_hat_f - p_hat_m)+(qnorm(0.025)*sqrt(den_p_f + den_p_m))
# upper bound
(p_hat_f - p_hat_m)+(qnorm(0.975)*sqrt(den_p_f + den_p_m))
# Bootstrap Approach
# Make Data
female <- rep(c(1,0), c(sum(processed_cleveland$sex == "0" & processed_cleveland$target == "1"), n_f - sum(processed_cleveland$sex == "0" & processed_cleveland$target == "1")))
male<- rep(c(1, 0), c(sum(processed_cleveland$sex == "1" & processed_cleveland$target == "1"), n_m - sum(processed_cleveland$sex == "1" & processed_cleveland$target == "1")))
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
n_f
n_m
# A loop for completing the simulation
for(i in 1:num_sims){
prop_female <- mean(sample(x = female,
size = n_f,
replace = TRUE))
prop_male<- mean(sample(x = male,
size = n_m,
replace = TRUE))
results[i] <- prop_female - prop_male
}
# Finally plot the results
hist(results, freq = FALSE, main='Dist. of the Diff in Prop',
xlab = 'Difference in Prop. male male and female having heart dieaseas ', ylab = 'Density')
# Bootstrap
c(quantile(results, c(.025, .975)))
# Normal Approximation
c((p_hat_f - p_hat_m)+(qnorm(0.025)*sqrt(den_p_f + den_p_m)),
(p_hat_f - p_hat_m)+(qnorm(0.975)*sqrt(den_p_f + den_p_m)))
# Make the data
df_combined <- data.frame("count_heart_disease" = c(female,male),
"sex" = rep(c("0", "1"), c(n_f, n_m
)))
df_combined
# Sanity checks
summary(df_combined$users)
mean(df_combined$count_heart_disease[df_combined$sex == "0"]) == p_hat_f
mean(df_combined$count_heart_disease[df_combined$sex == "1"]) == p_hat_m
num_sims <- 1000
# A vector to store my results
results_given_H0_true <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
# idea here is if there is no relationship we should be able to shuffle the groups
shuffled_groups <- transform(df_combined, sex = sample(sex))
prop_male <- mean(shuffled_groups$count_heart_disease[shuffled_groups$sex == "1"])
prop_female <- mean(shuffled_groups$count_heart_disease[shuffled_groups$sex == "0"])
results_given_H0_true[i] <- prop_female - prop_male
}
results_given_H0_true
# Finally plot the results
hist(results_given_H0_true, freq = FALSE,
main='Dist. of the Diff in Sample Sample Props Under Null',
xlab = 'Average Difference in Prop of male and female having heart diease ',
ylab = 'Density', xlim = c(-0.30, 0.30))
diff_in_sample_props <- p_hat_f - p_hat_m
abline(v=diff_in_sample_props, col = "blue")
abline(v=-diff_in_sample_props, col = "red")
# counts of values more extreme than the test statistic in our original sample, given H0 is true
# two sided given the alternate hypothesis
count_of_more_extreme_lower_tail <- sum(results_given_H0_true <= -diff_in_sample_props)
count_of_more_extreme_upper_tail <- sum(results_given_H0_true > diff_in_sample_props)
bootstrap_pvalue <- (count_of_more_extreme_lower_tail + count_of_more_extreme_upper_tail)/num_sims
count_of_more_extreme_lower_tail
count_of_more_extreme_lower_tail
# Bootstrap p-value
bootstrap_pvalue
# Normal Approx p-value
two_sided_diff_prop_pval
table(processed_cleveland$thal)
prop.table(table(processed_cleveland$thal))
sum(((table(processed_cleveland$thal) - 75.75)^2)/75.75)
pchisq(245.8185, df = 4-1, lower.tail = FALSE) | /final project.R | no_license | sachin301194/Statistical-Analysis-on-Cleveland-Heart-Disease-UCI-Repository-dataset | R | false | false | 23,957 | r | knitr::opts_chunk$set(echo = TRUE)
summary(cars)
install.packages("kableExtra")
install.packages("dpylr")
if (!require("pacman")) install.packages("pacman")
pacman::p_load(tidyverse, skimr, GGally, plotly, viridis, caret, randomForest, e1071, rpart,
xgboost, h2o, corrplot, rpart.plot, corrgram, ggplot2, highcharter,
ggthemes, psych, scales, treemap, treemapify, repr, cowplot, magrittr, ggpubr,
RColorBrewer, plotrix, ggrepel, tidyverse, gridExtra, reshape2.)
library(readr)
library(ggplot2)
library(corrplot)
library(tidyverse)
library(ggcorrplot)
library(ggplot2)
library(plyr)
library(caret)
library(caTools)
library(reader)
processed_cleveland <- read_csv("heart.csv")
head(processed_cleveland)
cor(processed_cleveland)
summary(processed_cleveland)
corrplot(cor(processed_cleveland))
# Coverting the categorical data to factor
processed_cleveland$sex <- as.factor(processed_cleveland$sex)
processed_cleveland$target <- as.factor(processed_cleveland$target)
processed_cleveland$cp <- as.factor(processed_cleveland$cp)
processed_cleveland$ca <- as.factor(processed_cleveland$ca)
processed_cleveland$exang <- as.factor(processed_cleveland$exang)
processed_cleveland$slope <- as.factor(processed_cleveland$slope)
processed_cleveland$thal <- as.factor(processed_cleveland$thal)
# Summary after pre-processing the data
summary(processed_cleveland)
# DISPLAY THE NUMBER OF NAs IN EACH COLUMN
colSums(is.na(processed_cleveland))
# Bar plot for target (Heart disease)
processed_cleveland$target <- as.factor(processed_cleveland$target)
ggplot(processed_cleveland, aes(x=processed_cleveland$target, fill=processed_cleveland$target)) +
geom_bar() +
xlab("Heart Disease") +
ylab("Count") +
ggtitle("Analysis of Presence and Absence of Heart Disease") +
scale_fill_discrete(name = "Heart Disease", labels = c("Absence", "Presence"))
# Counting the frequency of the values of the age
ageCount <- count(processed_cleveland, 'age')
ageCount <- subset(ageCount[which(ageCount$freq > 10), ])
#ploting the age with frquency greater than 10
ggplot(ageCount, aes(x=ageCount$age, y=ageCount$freq)) +
ggtitle("Age Analysis") +
xlab("Age") +
ylab("Age Count") +
geom_bar(stat="identity")
# Group the different ages in three groups (young, middle, old)
young <- processed_cleveland[which((processed_cleveland$age<45)), ]
middle <- processed_cleveland[which((processed_cleveland$age>=45)&(processed_cleveland$age<55)), ]
elderly <- processed_cleveland[which(processed_cleveland$age>55), ]
groups <- data.frame(age_group = c("young","middle","elderly"), group_count = c(NROW(young$age), NROW(middle$age), NROW(elderly$age)))
#ploting different age groups
ggplot(groups, aes(x=groups$age_group, y=groups$group_count, fill=groups$age_group)) +
ggtitle("Age Analysis") +
xlab("Age Group") +
ylab("group Count") +
geom_bar(stat="identity") +
scale_fill_discrete(name = "Age Group", labels = c("Elderly", "Middle", "Young"))
processed_cleveland = subset(processed_cleveland, select = c(-age))
ggplot(processed_cleveland, aes(x= factor(processed_cleveland$sex), y=processed_cleveland$sex, colour=target)) +
geom_boxplot(stat = "boxplot",
position = "dodge2") +
geom_boxplot(outlier.shape = NA) +
geom_jitter(width = 0.2) +
xlab("Age Groups") +
ylab("Gender") +
ggtitle("Analysis of gender with different age group with presence or absense of heart disease")
# Bar plot for sex
ggplot(processed_cleveland, aes(x= processed_cleveland$sex, fill=processed_cleveland$target)) +
geom_bar() +
xlab("Gender") +
ylab("Gender Count") +
ggtitle("Analysis of Gender") +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
# Bar plot for The chest pain experienced
ggplot(processed_cleveland, aes(x= cp, fill=cp)) +
geom_bar() +
xlab("Chest Pain Type") +
ylab("Count") +
ggtitle("Analysis of Chest Pain Experienced") +
scale_fill_discrete(name = "Chest Pain Type", labels = c("Typical angina pain", "Atypical angina pain", "Non-Anginal pain", "Asymptomatic pain"))
# Bar plot for The chest pain ~ target
ggplot(processed_cleveland, aes(x= cp, fill=target)) +
geom_bar() +
xlab("Chest Pain Type") +
ylab("Count") +
ggtitle("Analysis of Chest Pain Experienced") +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
# Bar for ca (number of major vessels (0-3))
ggplot(processed_cleveland, aes(x= ca, fill=ca)) +
geom_bar() +
xlab("number of major vessels") +
ylab("Count") +
ggtitle("Analysis of number of major vessels") +
theme(legend.position="none")
# Bar for ca (number of major vessels (0-3))
ggplot(processed_cleveland, aes(x= ca, fill=target)) +
geom_bar(position = 'dodge') +
xlab("number of major vessels") +
ylab("Count") +
ggtitle("Analysis of number of major vessels") +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
# Histogram for trestbps (resting blood pressure)
ggplot(processed_cleveland, aes(x=trestbps)) +
geom_histogram() +
xlab("Resting blood pressure") +
ylab("Count") +
ggtitle("Analysis of blood pressure")
# removing the outliers
processed_cleveland$trestbps = ifelse(processed_cleveland$trestbps > 180, NA, processed_cleveland$trestbps)
processed_cleveland$trestbps = ifelse(is.na(processed_cleveland$trestbps), median(processed_cleveland$trestbps[which(!is.na(processed_cleveland$trestbps))]), processed_cleveland$trestbps)
# After the removal of outliers
ggplot(processed_cleveland, aes(x=trestbps)) +
geom_histogram() +
xlab("Resting blood pressure") +
ylab("Count") +
ggtitle("Analysis of blood pressure")
# Density graph for trestbps (resting blood pressure)
ggplot(processed_cleveland, aes(x = trestbps, fill = target)) +
geom_density(alpha=0.5) +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
# Histogram for oldpeak (ST depression induced by exercise relative to rest)
ggplot(processed_cleveland, aes(x=oldpeak)) +
geom_histogram() +
xlab("ST depression induced by exercise relative to rest") +
ylab("Count") +
ggtitle("Analysis of ST depression induced by exercise relative to rest")
length(processed_cleveland)
# Bar plot for slope (slope of the peak exercise ST segment)
processed_cleveland$slope <- ifelse(processed_cleveland$slope == 0, 1, print(processed_cleveland$slope))
processed_cleveland$slope <- as.factor(processed_cleveland$slope)
ggplot(processed_cleveland, aes(x=processed_cleveland$slope, fill=processed_cleveland$slope)) +
geom_bar() +
xlab("Slope of ST segment") +
ylab("Count") +
ggtitle("Analysis of slope of the peak exercise ST segment") +
scale_fill_discrete(name = "Slope of ST segment", labels = c("Upsloping", "Flat", "Downsloping"))
processed_cleveland$thalach = ifelse(processed_cleveland$thalach < 75, NA, processed_cleveland$thalach)
processed_cleveland$thalach = ifelse(is.na(processed_cleveland$thalach), median(processed_cleveland$thalach[which(!is.na(processed_cleveland$thalach))]), processed_cleveland$thalach)
ggplot(processed_cleveland, aes(x=thalach)) +
geom_histogram() +
xlab("Maximum heart rate achieved") +
ylab("Count") +
ggtitle("Analysis of maximum heart rate achieved")
# Density plot for thalach ~ target
ggplot(processed_cleveland, aes(x = thalach, fill = target)) +
geom_density(alpha=0.5) +
xlab("Maximum Heart Rate Achieved") +
ylab("Count") +
ggtitle("Analysis of relation of heart rate with presence of heart disease") +
scale_fill_discrete(name = "Heart disease", labels = c("No", "Yes"))
processed_cleveland$thal = ifelse(processed_cleveland$thal == 0, 2, processed_cleveland$thal)
processed_cleveland$thal <- as.factor(processed_cleveland$thal)
ggplot(processed_cleveland, aes(x=thal, fill=thal)) +
geom_bar() +
xlab("Blood disorder type") +
ylab("Count") +
ggtitle("Analysis of blood disorder (thalassemia)") +
scale_fill_discrete(name = "Blood disorder", labels = c("Normal", "Fixed defect", "reversable defect"))
processed_cleveland<-read.csv(file = 'heart.csv')
processed_cleveland <- subset(processed_cleveland, age != "NaN" & !(is.na(age)),)
processed_cleveland <- subset(processed_cleveland, sex != "NaN" & !(is.na(sex)),)
processed_cleveland <- subset(processed_cleveland, cp != "NaN" & !(is.na(cp)),)
processed_cleveland <- subset(processed_cleveland, trestbps != "NaN" & !(is.na(trestbps)),)
processed_cleveland <- subset(processed_cleveland, chol != "NaN" & !(is.na(chol)),)
processed_cleveland <- subset(processed_cleveland, fbs != "NaN" & !(is.na(fbs)),)
processed_cleveland<- subset(processed_cleveland, restecg != "NaN" & !(is.na(restecg)),)
processed_cleveland <- subset(processed_cleveland, thalach != "NaN" & !(is.na(thalach)),)
processed_cleveland <- subset(processed_cleveland, exang != "NaN" & !(is.na(exang)),)
processed_cleveland <- subset(processed_cleveland, oldpeak != "NaN" & !(is.na(oldpeak)),)
processed_cleveland <- subset(processed_cleveland, slope!= "NaN" & !(is.na(slope)),)
processed_cleveland <- subset(processed_cleveland, ca!= "NaN" & !(is.na(ca)),)
processed_cleveland <- subset(processed_cleveland, thal!= "NaN" & !(is.na(thal)),)
processed_cleveland <- subset(processed_cleveland, target!= "NaN" & !(is.na(target)),)
head(processed_cleveland)
nrow(processed_cleveland)
ncol(processed_cleveland)
data(processed_cleveland)
head(processed_cleveland)
qqnorm(processed_cleveland$chol)
hist(processed_cleveland$chol)
# the parts of the test statistic
# sample mean
x_bar <- mean(processed_cleveland$chol)
# null hypothesized population Chol
mu_0 <- 240
# sample st. dev
s <- sd(processed_cleveland$chol)
# sample size
n <- length(processed_cleveland$chol)
# t-test test statistic
t <- (x_bar - mu_0)/(s/sqrt(n))
# two-sided p-value so multiply by 2
two_sided_t_pval <- pt(q = t, df = n-1, lower.tail = FALSE)*2
two_sided_t_pval
t.test(processed_cleveland$chol,
alternative = "two.sided",
mu = 240)
# This data is pretty skewed so even though n is large, I'm going to do a lot of simulations
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
results[i] <- mean(sample(x = processed_cleveland$chol,
size = n,
replace = TRUE))
}
# Finally plot the results
hist(results, freq = FALSE, main='Sampling Distribution of the Sample Mean', xlab = 'average chlorstral of patient ', ylab = 'Density')
# estimate a normal curve over it - this looks pretty good!
lines(x = seq(238, 260, .1), dnorm(seq(238, 260, .1), mean = mean(results), sd = sd
(results)))
# Shift the sample so that the null hypothesis is true
time_given_H0_true <- processed_cleveland$chol - mean(processed_cleveland$chol) + mu_0
# This data is pretty skewed so even though n is large, I'm going to do a lot of simulations
num_sims <- 10000
# A vector to store my results
results_given_H0_true <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
results_given_H0_true[i] <- mean(sample(x = time_given_H0_true,
size = n,
replace = TRUE))
}
# Finally plot the results
hist(results_given_H0_true, freq = FALSE, main='Sampling Distribution of the Sample Mea
n, Given Null Hypothesis is True', xlab = 'Average chlorastrol level of patient', ylab = 'Density')
# add line to show values more extreme on upper end
abline(v=x_bar, col = "red")
# add line to show values more extreme on lower end
low_end_extreme <- mean(results_given_H0_true)+(mean(results_given_H0_true)-x_bar)
abline(v=low_end_extreme, col="red")
# counts of values more extreme than the test statistic in our original sample, given H_0is true
# two sided given the alternate hypothesis
count_of_more_extreme_lower_tail <- sum(results_given_H0_true <= low_end_extreme)
count_of_more_extreme_upper_tail <- sum(results_given_H0_true >= x_bar)
bootstrap_pvalue <- (count_of_more_extreme_lower_tail + count_of_more_extreme_upper_tail)/num_sims
bootstrap_pvalue
# two sided t p-value
two_sided_t_pval
# need the standard error which is the standard deviation of the results
bootstrap_SE_X_bar <- sd(results)
# an estimate is to use the formula statistic +/- 2*SE
c(x_bar - 2*bootstrap_SE_X_bar, x_bar + 2*bootstrap_SE_X_bar)
# you can also use the 5th and 95th quantiles to determine the bounds:
c(quantile(results, c(.025, .975)))
# compare to our t-methods
c(x_bar+(qt(0.025, n-1)*(s/sqrt(n))), x_bar+(qt(0.975, n-1)*(s/sqrt(n))))
table(processed_cleveland$sex
)
p_hat <- 205/302
z <- (p_hat - .5) / sqrt((.5*(1-.5)) / 302)
z
# One-sided upper exact
binom.test(x=205, n = 302, p = 0.5, alternative="greater")
# One sided upper normal ppproximation
pnorm(z, lower.tail = FALSE)
# exact binomial test confidence interval
binom.test(x=205, n = 302, p=(.5), alternative="greater")$conf.int
# normal approx confidence interval
c(p_hat - (1.64)*sqrt(((p_hat)*(1 - p_hat))/302), 1)
#Bootstrap Method
geos <- factor(rep(c("male", "female"), c(205, 302-205)))
geos
table(geos)
# This is going to be easier to use for bootstrapping
geos <- rep(c(1, 0), c(205, 302-205))
geos
# This data is pretty skewed so even though n is large, I'm going to do a lot of simulations
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
results[i] <- mean(sample(x = geos,
size = 302,
replace = TRUE))
}
# Finally plot the results
hist(results, freq = FALSE, main='Sampling Distribution of the Sample Proportion',
xlab = 'Proportion of female patinets having heart diease ', ylab = 'Density')
# estimate a normal curve
lines(x = seq(.40, .60, .001), dnorm(seq(.40, .60, .001), mean = mean(results), sd = sd(results)))
# Bootstrap Confidence Interval
n = 302
c(quantile(results, c(.05, 1)))
# exact binomial test
binom.test(x=205, n = 302, p=(.5), alternative="greater")$conf.int
# normal approx
c(p_hat - (1.64)*sqrt(((p_hat)*(1 - p_hat))/302), 1)
# Under the assumption that the null hypothesis is true, we have 50% peanuts
geos <- rep(c(1, 0), c(151, 302-151))
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
results[i] <- mean(sample(x = geos,
size = 302,
replace = TRUE))
}
# Finally plot the results
hist(results, freq = FALSE,
main='Sampling Distribution of the Sample Proportion under H_0:p=0.5', xlab = 'Proportion of women having heart diease ',
ylab = 'Density', xlim = c(0.30,0.70))
# estimate a normal curve over it - this looks pretty good!
lines(x = seq(.40, .60, .001), dnorm(seq(.40, .60, .001), mean = mean(results), sd = sd(results)))
abline(v=p_hat, col="red")
count_of_more_extreme_upper_tail <- sum(results >= p_hat)
bootstrap_pvalue <- count_of_more_extreme_upper_tail/num_sims
# Bootstrap p-value
bootstrap_pvalue
# Exact Binomial p-value
binom.test(x=205, n = 302, p=(.5), alternative="greater")$p.value
# Normal Approximation p-value
pnorm(z, lower.tail = FALSE)
# QQ Plot for clorostrol value
qqnorm(processed_cleveland$chol)
qqline(processed_cleveland$chol, col = "blue")
hist(processed_cleveland$chol)
# QQ Plot for chlorestral level when gender is male .
qqnorm(processed_cleveland$chol[processed_cleveland$sex == "1"])
qqline(processed_cleveland$chol[processed_cleveland$sex == "1"], col = "blue")
hist(processed_cleveland$chol[processed_cleveland$sex == "1"])
# QQ Plot for Launch Mass when Gender is female.
qqnorm(processed_cleveland$chol[processed_cleveland$sex == "0"])
qqline(processed_cleveland$chol[processed_cleveland$sex == "0"], col = "blue")
hist(processed_cleveland$chol[processed_cleveland$sex == "0"])
# sample means
x_bar_m <- mean(processed_cleveland$chol[processed_cleveland$sex =="1"])
x_bar_f <- mean(processed_cleveland$chol[processed_cleveland$sex =="0"])
x_bar_m
x_bar_f
# null hypothesized population mean difference between the two groups
mu_0 <- 0
mu_0
# sample variances
s_f_sq <- sd(processed_cleveland$chol[processed_cleveland$sex =="0"])**2
s_m_sq <- sd(processed_cleveland$chol[processed_cleveland$sex =="1"])**2
s_m_sq
s_f_sq
# sample size
n_f <- length(processed_cleveland$chol[processed_cleveland$sex=="1"])
n_m <- length(processed_cleveland$chol[processed_cleveland$sex =="0"])
n_m
n_f
t <- (x_bar_f - x_bar_m - mu_0)/sqrt((s_f_sq/n_f) + (s_m_sq/n_m))
t
# one sided upper p-value
two_sided_diff_t_pval <- pt(q = t, df = min(n_f, n_m)-1, lower.tail = TRUE)*2
two_sided_diff_t_pval
# Lower bound of Confidence Interval
(x_bar_f-x_bar_m)+(qt(0.025, min(n_f, n_m)-1)*sqrt((s_f_sq/n_f) + (s_m_sq/n_m)))
# Upper bound of Confidence Interval
(x_bar_f-x_bar_m)+(qt(0.975, min(n_f, n_m)-1)*sqrt((s_f_sq/n_f) + (s_m_sq/n_m)))
t.test(processed_cleveland$chol[processed_cleveland$sex=="1"],
processed_cleveland$chol[processed_cleveland$sex=="0"])
table(processed_cleveland$sex)
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
mean_male <- mean(sample(x = processed_cleveland$chol[processed_cleveland$sex == "1"],
size = 207,
replace = TRUE))
mean_female <- mean(sample(x = processed_cleveland$chol[processed_cleveland$sex == "0"],
size = 96,
replace = TRUE))
results[i] <- mean_female - mean_male
}
# Finally plot the results
hist(results, freq = FALSE, main='Sampling Distribution of the Sample Mean',
xlab = 'Average Difference clostral value ', ylab = 'Density')
# estimate a normal curve
lines(x = seq(-10, 50, .001), dnorm(seq(-10, 50, .001), mean = mean(results), sd = sd(results)))
# Bootstrap one-sided CI
c(quantile(results, c(.025, .975)))
# compare to our t-methods
t.test(processed_cleveland$chol[processed_cleveland$sex=="1"],
processed_cleveland$chol[processed_cleveland$sex=="0"])$conf.int
set.seed(0)
num_sims <- 1000
# A vector to store my results
results_given_H0_true <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
# idea here is if there is no relationshipm we should be able to shuffle the groups
shuffled_groups <- transform(processed_cleveland, sex=sample(sex))
mean_male <- mean(shuffled_groups$chol[shuffled_groups$sex == "1"])
mean_female <- mean(shuffled_groups$chol[shuffled_groups$sex == "0"])
results_given_H0_true[i] <- mean_female - mean_male
}
# Finally plot the results
hist(results_given_H0_true, freq = FALSE,
main='Dist. of the Diff in Sample Means Under Null',
xlab = 'Average Difference of clostreal level is under Null',
ylab = 'Density')
diff_in_sample_means <- mean(processed_cleveland$chol[processed_cleveland$sex == "1"]) -
mean(processed_cleveland$chol[processed_cleveland$sex == "0"])
abline(v=diff_in_sample_means, col = "blue")
abline(v=abs(diff_in_sample_means), col = "red")
# counts of values more extreme than the test statistic in our original sample, given H0 is true
# two sided given the alternate hypothesis
count_of_more_extreme_lower_tail <- sum(results_given_H0_true <= diff_in_sample_means)
count_of_more_extreme_upper_tail <- sum(results_given_H0_true >= abs(diff_in_sample_means))
bootstrap_pvalue <- (count_of_more_extreme_lower_tail + count_of_more_extreme_upper_tail)/num_sims
## Bootstrap p-value
bootstrap_pvalue
## t-test p-value
t.test(processed_cleveland$chol[processed_cleveland$sex=="0"],
processed_cleveland$chol[processed_cleveland$sex=="1"])$p.value
p_hat_f <- sum(processed_cleveland$sex == "0" & processed_cleveland$target == "1")/sum(processed_cleveland$sex == "0")
p_hat_m <- sum(processed_cleveland$sex == "1" & processed_cleveland$target == "1")/sum(processed_cleveland$sex== "1")
p_hat_f
p_hat_m
# null hypothesized population prop difference between the two groups
p_0 <- 0
# sample size
n_f <- sum(processed_cleveland$sex == "0")
n_m <- sum(processed_cleveland$sex == "1")
n_f
n_m
# sample variances
den_p_m <- (p_hat_m*(1-p_hat_m))/n_m
den_p_f <- (p_hat_f*(1-p_hat_f))/n_f
den_p_m
den_p_f
# z-test test statistic
z <- (p_hat_f - p_hat_m - p_0)/sqrt(den_p_f + den_p_m)
z
# two sided p-value
two_sided_diff_prop_pval <- pnorm(q = z, lower.tail = FALSE)*2
two_sided_diff_prop_pval
# lower bound
(p_hat_f - p_hat_m)+(qnorm(0.025)*sqrt(den_p_f + den_p_m))
# upper bound
(p_hat_f - p_hat_m)+(qnorm(0.975)*sqrt(den_p_f + den_p_m))
# Bootstrap Approach
# Make Data
female <- rep(c(1,0), c(sum(processed_cleveland$sex == "0" & processed_cleveland$target == "1"), n_f - sum(processed_cleveland$sex == "0" & processed_cleveland$target == "1")))
male<- rep(c(1, 0), c(sum(processed_cleveland$sex == "1" & processed_cleveland$target == "1"), n_m - sum(processed_cleveland$sex == "1" & processed_cleveland$target == "1")))
num_sims <- 10000
# A vector to store my results
results <- rep(NA, num_sims)
n_f
n_m
# A loop for completing the simulation
for(i in 1:num_sims){
prop_female <- mean(sample(x = female,
size = n_f,
replace = TRUE))
prop_male<- mean(sample(x = male,
size = n_m,
replace = TRUE))
results[i] <- prop_female - prop_male
}
# Finally plot the results
hist(results, freq = FALSE, main='Dist. of the Diff in Prop',
xlab = 'Difference in Prop. male male and female having heart dieaseas ', ylab = 'Density')
# Bootstrap
c(quantile(results, c(.025, .975)))
# Normal Approximation
c((p_hat_f - p_hat_m)+(qnorm(0.025)*sqrt(den_p_f + den_p_m)),
(p_hat_f - p_hat_m)+(qnorm(0.975)*sqrt(den_p_f + den_p_m)))
# Make the data
df_combined <- data.frame("count_heart_disease" = c(female,male),
"sex" = rep(c("0", "1"), c(n_f, n_m
)))
df_combined
# Sanity checks
summary(df_combined$users)
mean(df_combined$count_heart_disease[df_combined$sex == "0"]) == p_hat_f
mean(df_combined$count_heart_disease[df_combined$sex == "1"]) == p_hat_m
num_sims <- 1000
# A vector to store my results
results_given_H0_true <- rep(NA, num_sims)
# A loop for completing the simulation
for(i in 1:num_sims){
# idea here is if there is no relationship we should be able to shuffle the groups
shuffled_groups <- transform(df_combined, sex = sample(sex))
prop_male <- mean(shuffled_groups$count_heart_disease[shuffled_groups$sex == "1"])
prop_female <- mean(shuffled_groups$count_heart_disease[shuffled_groups$sex == "0"])
results_given_H0_true[i] <- prop_female - prop_male
}
results_given_H0_true
# Finally plot the results
hist(results_given_H0_true, freq = FALSE,
main='Dist. of the Diff in Sample Sample Props Under Null',
xlab = 'Average Difference in Prop of male and female having heart diease ',
ylab = 'Density', xlim = c(-0.30, 0.30))
diff_in_sample_props <- p_hat_f - p_hat_m
abline(v=diff_in_sample_props, col = "blue")
abline(v=-diff_in_sample_props, col = "red")
# counts of values more extreme than the test statistic in our original sample, given H0 is true
# two sided given the alternate hypothesis
count_of_more_extreme_lower_tail <- sum(results_given_H0_true <= -diff_in_sample_props)
count_of_more_extreme_upper_tail <- sum(results_given_H0_true > diff_in_sample_props)
bootstrap_pvalue <- (count_of_more_extreme_lower_tail + count_of_more_extreme_upper_tail)/num_sims
count_of_more_extreme_lower_tail
count_of_more_extreme_lower_tail
# Bootstrap p-value
bootstrap_pvalue
# Normal Approx p-value
two_sided_diff_prop_pval
table(processed_cleveland$thal)
prop.table(table(processed_cleveland$thal))
sum(((table(processed_cleveland$thal) - 75.75)^2)/75.75)
pchisq(245.8185, df = 4-1, lower.tail = FALSE) |
#Micropan - blast
library(micropan)
library(parallel)
setwd("/mnt/ubi/iferres/pewitEval/genomeSize/")
dirs <- list.dirs(recursive = FALSE)
dirs <- grep('_gff', dirs, value = TRUE)
dir.create('micropan_blast_resu')
fin <- mclapply(1:length(dirs), function(d){
gffs <- list.files(path = dirs[d], pattern = 'gff$', full.names = TRUE)
gid <- paste0("%0",nchar(length(gffs)),"d")
gid <- paste0('GID',sprintf(gid,1:length(gffs)))
ref <- cbind(sapply(strsplit(gffs,'/'),function(x){rev(x)[1]}), gid)
write.csv(ref, file = 'micropan_blast_resu/ref_gid.csv',quote = F)
df <- mclapply(1:5, function(i){
set.seed(i)
gfs <- sample(gffs, 10)
out <- paste0('micropan_blast_resu',sub('[./]','',dirs[d]),'_out_',i,'/')
dir.create(out)
faas <- sapply(gfs, function(x){
pewit:::extractSeqsFromGff3(x,
in.path = out,
keep = 'none',
write.in.path = 'aa')
paste0(out,sub('gff$','faa', rev(strsplit(x,'/')[[1]])[1]))
})
pprep <- sapply(faas, function(x){
gd <- ref[which(ref[,1]==sub('faa$','gff',rev(strsplit(x,'/')[[1]])[1])),2]
panPrep(x, GID.tag = gd, out.file = x,protein = TRUE)
sfx <- paste0('_',gd,'.faa')
sub('[.]faa$',sfx, x)
})
#out blast folder
oblf <- paste0(out,'blast_out')
dir.create(oblf)
#SPECIFY CHANGED EVALUE FROM DEFAULT (1)!!
runMicropanBlast <- function(d, i, pprep, oblf){
njb <- paste0(d,i,sprintf('%02d',sample(1:20, 2)), collapse = '')
blastAllAll(pprep,
oblf,
e.value = 1e-10,
threads = 1L,
job = as.integer(njb),
verbose = FALSE)
bl <- list.files(path = oblf, full.names = TRUE)
df <- bDist(bl, verbose = FALSE)
bcl <- bClust(df)
pm <- panMatrix(bcl)
return(pm)
}
stime <- system.time(xx <- runMicropanBlast(d = d,
i = i,
pprep = pprep,
oblf = oblf))
write.table(xx, file = paste0(out,'panmatrix_micropan.tsv'),
quote = F, sep = '\t', row.names = T, col.names = T)
o <- NULL
o[1] <- list(gfs)
o[2] <- out
o[3] <- sum(xx)
o[4] <- dim(xx)[1]
o[5] <- dim(xx)[2]
o[6] <- length(which(apply(xx,2,function(x){all(x==1L)})))
o[7] <- length(which(colSums(xx)>=round((dim(xx)[1]-1)*0.95)))
o[8] <- length(which(colSums(xx)==1))
o[9] <- length(which(colSums(xx)<round(dim(xx)[1]*0.95)))-o[[8]][1]
o[10] <- stime[[3]]
names(o) <- c("Orgs", "OutDir", "Num_CDS", "Num_Orgs",
"Num_Clusters", "Core", "SCore", "Singles",
"Accs", "Sys_time")
return(o)
}, mc.cores = 5)
resu <- do.call(rbind, df)
return(resu)
}, mc.cores = 2)
saveRDS(fin, file = 'micropan_blast_resu/resu_genomeSize.RDS') | /micropan_blast_genomeSize.R | no_license | iferres/pangenomeEval_scripts | R | false | false | 3,090 | r | #Micropan - blast
library(micropan)
library(parallel)
setwd("/mnt/ubi/iferres/pewitEval/genomeSize/")
dirs <- list.dirs(recursive = FALSE)
dirs <- grep('_gff', dirs, value = TRUE)
dir.create('micropan_blast_resu')
fin <- mclapply(1:length(dirs), function(d){
gffs <- list.files(path = dirs[d], pattern = 'gff$', full.names = TRUE)
gid <- paste0("%0",nchar(length(gffs)),"d")
gid <- paste0('GID',sprintf(gid,1:length(gffs)))
ref <- cbind(sapply(strsplit(gffs,'/'),function(x){rev(x)[1]}), gid)
write.csv(ref, file = 'micropan_blast_resu/ref_gid.csv',quote = F)
df <- mclapply(1:5, function(i){
set.seed(i)
gfs <- sample(gffs, 10)
out <- paste0('micropan_blast_resu',sub('[./]','',dirs[d]),'_out_',i,'/')
dir.create(out)
faas <- sapply(gfs, function(x){
pewit:::extractSeqsFromGff3(x,
in.path = out,
keep = 'none',
write.in.path = 'aa')
paste0(out,sub('gff$','faa', rev(strsplit(x,'/')[[1]])[1]))
})
pprep <- sapply(faas, function(x){
gd <- ref[which(ref[,1]==sub('faa$','gff',rev(strsplit(x,'/')[[1]])[1])),2]
panPrep(x, GID.tag = gd, out.file = x,protein = TRUE)
sfx <- paste0('_',gd,'.faa')
sub('[.]faa$',sfx, x)
})
#out blast folder
oblf <- paste0(out,'blast_out')
dir.create(oblf)
#SPECIFY CHANGED EVALUE FROM DEFAULT (1)!!
runMicropanBlast <- function(d, i, pprep, oblf){
njb <- paste0(d,i,sprintf('%02d',sample(1:20, 2)), collapse = '')
blastAllAll(pprep,
oblf,
e.value = 1e-10,
threads = 1L,
job = as.integer(njb),
verbose = FALSE)
bl <- list.files(path = oblf, full.names = TRUE)
df <- bDist(bl, verbose = FALSE)
bcl <- bClust(df)
pm <- panMatrix(bcl)
return(pm)
}
stime <- system.time(xx <- runMicropanBlast(d = d,
i = i,
pprep = pprep,
oblf = oblf))
write.table(xx, file = paste0(out,'panmatrix_micropan.tsv'),
quote = F, sep = '\t', row.names = T, col.names = T)
o <- NULL
o[1] <- list(gfs)
o[2] <- out
o[3] <- sum(xx)
o[4] <- dim(xx)[1]
o[5] <- dim(xx)[2]
o[6] <- length(which(apply(xx,2,function(x){all(x==1L)})))
o[7] <- length(which(colSums(xx)>=round((dim(xx)[1]-1)*0.95)))
o[8] <- length(which(colSums(xx)==1))
o[9] <- length(which(colSums(xx)<round(dim(xx)[1]*0.95)))-o[[8]][1]
o[10] <- stime[[3]]
names(o) <- c("Orgs", "OutDir", "Num_CDS", "Num_Orgs",
"Num_Clusters", "Core", "SCore", "Singles",
"Accs", "Sys_time")
return(o)
}, mc.cores = 5)
resu <- do.call(rbind, df)
return(resu)
}, mc.cores = 2)
saveRDS(fin, file = 'micropan_blast_resu/resu_genomeSize.RDS') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.