content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(dplyr)
## read feactures
features <- read.table("./UCI HAR Dataset/features.txt")
str(features)
NROW(features)
features <- rename(features, id = V1, name.feacture = V2)
colnames(features)
## read X_train.txt" Training set
train <- read.table("./UCI HAR Dataset/train/X_train.txt")
NCOL(train)
colnames(train) <- features$name.feacture
colnames(train)
subjecttrain = read.table('./UCI HAR Dataset/train/subject_train.txt')
NROW(subjecttrain)
## read Training labels.
ytrain = read.table('./UCI HAR Dataset/train/y_train.txt')
# read Test set
test = read.table('./UCI HAR Dataset/test/X_test.txt')
NCOL(test)
colnames(test) <- features$name.feacture
subjecttest = read.table('./UCI HAR Dataset/test/subject_test.txt')
## read Test labels.
ytest = read.table('./UCI HAR Dataset/test/y_test.txt')
## 1. Merges the training and the test sets to create one data set.
##Merge Training set y Test set
setdata <- rbind(train, test)
NROW(setdata)
## activity_labels
activitylabels <- rbind(ytrain, ytest)
activitylabels
dim(activitylabels)
dataSubject <- rbind(subjecttrain, subjecttest)
dataSubject <- rename(dataSubject, subject = V1)
## merge
df <- cbind(dataSubject, activitylabels)
df <- rename(df, Activity = V1)
dffinal <- cbind(setdata, df)
colnames(dffinal)
## 2. Extracts only the measurements on the mean and
#standard deviation for each measurement.
## You can use \< Y \> regular expressions to match the beginning / end of the Word grep ("\\<mean\\>", features$name.feacture)
subdataFeaturesNames<-features$name.feacture[grep("\\<mean\\>|\\<std\\>", features$name.feacture)]
NROW(subdataFeaturesNames)
## 3. Uses descriptive activity names to name the activities in the data set
activitylabels[, 1] <- read.table("./UCI HAR Dataset/activity_labels.txt")[activitylabels[, 1], 2]
names(activitylabels) <- "Activity"
View(activitylabels)
## 4. Appropriately labels the data set with descriptive variable names
## se cambian los nombres de las columnas con nombres mas claros
names(setdata)<-gsub("^t", "time", names(setdata))
names(setdata)<-gsub("^f", "frequency", names(setdata))
names(setdata)<-gsub("Acc", "Accelerometer", names(setdata))
names(setdata)<-gsub("Gyro", "Gyroscope", names(setdata))
names(setdata)<-gsub("Mag", "Magnitude", names(setdata))
names(setdata)<-gsub("BodyBody", "Body", names(setdata))
View(setdata)
## 5. From the data set in step 4, creates a second,
##independent tidy data set with the average of each
##variable for each activity and each subject.
library(reshape2)
colnames(dffinal)
subjectMelt <- melt(dffinal, id=c("subject","Activity"))
head(subjectMelt,n=3)
finaldcast <- dcast(subjectMelt,subject+ Activity ~ variable,mean)
head(finaldcast, n=3)
## tidy dataset file
write.table(finaldcast, file = "tidydataset.txt", row.name = FALSE)
| /run_analysis.R | no_license | sandrarairan/Getting-and-Cleaning-Data-Course-Project | R | false | false | 2,818 | r | library(dplyr)
## read feactures
features <- read.table("./UCI HAR Dataset/features.txt")
str(features)
NROW(features)
features <- rename(features, id = V1, name.feacture = V2)
colnames(features)
## read X_train.txt" Training set
train <- read.table("./UCI HAR Dataset/train/X_train.txt")
NCOL(train)
colnames(train) <- features$name.feacture
colnames(train)
subjecttrain = read.table('./UCI HAR Dataset/train/subject_train.txt')
NROW(subjecttrain)
## read Training labels.
ytrain = read.table('./UCI HAR Dataset/train/y_train.txt')
# read Test set
test = read.table('./UCI HAR Dataset/test/X_test.txt')
NCOL(test)
colnames(test) <- features$name.feacture
subjecttest = read.table('./UCI HAR Dataset/test/subject_test.txt')
## read Test labels.
ytest = read.table('./UCI HAR Dataset/test/y_test.txt')
## 1. Merges the training and the test sets to create one data set.
##Merge Training set y Test set
setdata <- rbind(train, test)
NROW(setdata)
## activity_labels
activitylabels <- rbind(ytrain, ytest)
activitylabels
dim(activitylabels)
dataSubject <- rbind(subjecttrain, subjecttest)
dataSubject <- rename(dataSubject, subject = V1)
## merge
df <- cbind(dataSubject, activitylabels)
df <- rename(df, Activity = V1)
dffinal <- cbind(setdata, df)
colnames(dffinal)
## 2. Extracts only the measurements on the mean and
#standard deviation for each measurement.
## You can use \< Y \> regular expressions to match the beginning / end of the Word grep ("\\<mean\\>", features$name.feacture)
subdataFeaturesNames<-features$name.feacture[grep("\\<mean\\>|\\<std\\>", features$name.feacture)]
NROW(subdataFeaturesNames)
## 3. Uses descriptive activity names to name the activities in the data set
activitylabels[, 1] <- read.table("./UCI HAR Dataset/activity_labels.txt")[activitylabels[, 1], 2]
names(activitylabels) <- "Activity"
View(activitylabels)
## 4. Appropriately labels the data set with descriptive variable names
## se cambian los nombres de las columnas con nombres mas claros
names(setdata)<-gsub("^t", "time", names(setdata))
names(setdata)<-gsub("^f", "frequency", names(setdata))
names(setdata)<-gsub("Acc", "Accelerometer", names(setdata))
names(setdata)<-gsub("Gyro", "Gyroscope", names(setdata))
names(setdata)<-gsub("Mag", "Magnitude", names(setdata))
names(setdata)<-gsub("BodyBody", "Body", names(setdata))
View(setdata)
## 5. From the data set in step 4, creates a second,
##independent tidy data set with the average of each
##variable for each activity and each subject.
library(reshape2)
colnames(dffinal)
subjectMelt <- melt(dffinal, id=c("subject","Activity"))
head(subjectMelt,n=3)
finaldcast <- dcast(subjectMelt,subject+ Activity ~ variable,mean)
head(finaldcast, n=3)
## tidy dataset file
write.table(finaldcast, file = "tidydataset.txt", row.name = FALSE)
|
zeroInd <-
function(Amat, r){
if (sum(t(Amat)!=Amat)>0){
stop("This method only works for symmetric matrix!")
}
p <- dim(Amat)[1]
oneMat <- matrix(0, p, p)
zeroMat <- matrix(0, p, p)
one.pos <- which(Amat!=0, arr.ind = TRUE)
zero.pos <- which(Amat==0, arr.ind = TRUE)
zero.pos <- zero.pos[which(zero.pos[,1] > zero.pos[,2]) ,]
sel.zero <- sample(seq(1, dim(zero.pos)[1]), r * dim(zero.pos)[1], replace = FALSE)
zeroMat[zero.pos[sel.zero, ]] <- 1
zeroMat <- zeroMat + t(zeroMat)
zeroArr <- zero.pos[sel.zero, ]
out <- list()
out$zeroArr = zeroArr
out$zeroMat = zeroMat
if (dim(one.pos)[1] == 0){
warning("The matrix is zero!")
out$oneMat = matrix(0, p, p)
} else
{
one.pos <- one.pos[which(one.pos[,1] > one.pos[,2]) ,]
if (is.null(dim(one.pos))){
one.pos = matrix(one.pos, nrow = 1)
}
sel.one <- sample(seq(1, dim(one.pos)[1]), r * dim(one.pos)[1], replace = FALSE)
oneMat[one.pos[sel.one, ]] <- 1
oneMat <- oneMat + t(oneMat)
diag(oneMat) <- 0
out$oneMat = oneMat
}
return(out)
}
| /netgsa/R/zeroInd.R | no_license | ingted/R-Examples | R | false | false | 1,106 | r | zeroInd <-
function(Amat, r){
if (sum(t(Amat)!=Amat)>0){
stop("This method only works for symmetric matrix!")
}
p <- dim(Amat)[1]
oneMat <- matrix(0, p, p)
zeroMat <- matrix(0, p, p)
one.pos <- which(Amat!=0, arr.ind = TRUE)
zero.pos <- which(Amat==0, arr.ind = TRUE)
zero.pos <- zero.pos[which(zero.pos[,1] > zero.pos[,2]) ,]
sel.zero <- sample(seq(1, dim(zero.pos)[1]), r * dim(zero.pos)[1], replace = FALSE)
zeroMat[zero.pos[sel.zero, ]] <- 1
zeroMat <- zeroMat + t(zeroMat)
zeroArr <- zero.pos[sel.zero, ]
out <- list()
out$zeroArr = zeroArr
out$zeroMat = zeroMat
if (dim(one.pos)[1] == 0){
warning("The matrix is zero!")
out$oneMat = matrix(0, p, p)
} else
{
one.pos <- one.pos[which(one.pos[,1] > one.pos[,2]) ,]
if (is.null(dim(one.pos))){
one.pos = matrix(one.pos, nrow = 1)
}
sel.one <- sample(seq(1, dim(one.pos)[1]), r * dim(one.pos)[1], replace = FALSE)
oneMat[one.pos[sel.one, ]] <- 1
oneMat <- oneMat + t(oneMat)
diag(oneMat) <- 0
out$oneMat = oneMat
}
return(out)
}
|
#Uses a montecarlo simulation to estimate parameters
regMC=function(ns = 20, n=1000, b1=1, b2=1, b3=0, a1=1, a2=1, a3=0, sigma=1,dist="norm")
{
b1list = vector(mode="numeric",length=0)
inInterval=vector(mode="logical",length=0)#so that append method works
for(j in 1:n)
{
#__init__
Z1 = rnorm(n=ns,mean=0,sd=1)
X2 = rnorm(n=ns,mean=0,sd=1)
X3 = rnorm(n=ns,mean=0,sd=1)
V = rnorm(n=ns,mean=0,sd=1)
if (dist == "norm")
{
U = rnorm(n=ns,mean=0,sd=1)
}
else
{
U = rlnorm(n=ns,mean=0,sd=1)
}
#Generate X1 and Y
X1 = a1*Z1 + a2*X2 + a3*X3 +V
Y = b1*X1 + b2*X2 +b3*X3 + sigma*U
#Estimates
hat = lm(Y ~ X1+X2)
b1list=append(b1list,hat$coefficients["X1"])
#In confidence interval?
interval = confint(hat,parm = "X1",interval="confidence")
inInterval = append(inInterval,b1<interval[2]&b1>interval[1])
intervalrange = interval[2]-interval[1]
}
cat("\n","bias=",(mean(b1list)-1),"sd=",(sd(b1list)),"CoverageRate=",(mean(inInterval)),"IntervalRange=",(mean(intervalrange)))
} | /regMC.r | no_license | nathana2718/Econometrics-Assignments | R | false | false | 1,078 | r | #Uses a montecarlo simulation to estimate parameters
regMC=function(ns = 20, n=1000, b1=1, b2=1, b3=0, a1=1, a2=1, a3=0, sigma=1,dist="norm")
{
b1list = vector(mode="numeric",length=0)
inInterval=vector(mode="logical",length=0)#so that append method works
for(j in 1:n)
{
#__init__
Z1 = rnorm(n=ns,mean=0,sd=1)
X2 = rnorm(n=ns,mean=0,sd=1)
X3 = rnorm(n=ns,mean=0,sd=1)
V = rnorm(n=ns,mean=0,sd=1)
if (dist == "norm")
{
U = rnorm(n=ns,mean=0,sd=1)
}
else
{
U = rlnorm(n=ns,mean=0,sd=1)
}
#Generate X1 and Y
X1 = a1*Z1 + a2*X2 + a3*X3 +V
Y = b1*X1 + b2*X2 +b3*X3 + sigma*U
#Estimates
hat = lm(Y ~ X1+X2)
b1list=append(b1list,hat$coefficients["X1"])
#In confidence interval?
interval = confint(hat,parm = "X1",interval="confidence")
inInterval = append(inInterval,b1<interval[2]&b1>interval[1])
intervalrange = interval[2]-interval[1]
}
cat("\n","bias=",(mean(b1list)-1),"sd=",(sd(b1list)),"CoverageRate=",(mean(inInterval)),"IntervalRange=",(mean(intervalrange)))
} |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.44305385403198e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615770713-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.44305385403198e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
########################################################################
###################### Combine ENVILOG #################################
########################################################################
# autor: Marvin Lorff
# date: 28.01.2021
# version: 02.02
# TODO: wirte some tests for input parameter and error/warnings
# TODO: load all plots/Subplots at once
# TODO: für den subplot ochsenhausen Fichte ungdüngt werden die Sensor namen nicht umgeschrieben.
# check LoggerImorts for bug fixing
# load functions and libraries
library(tidyverse)
library(lubridate)
source("functions/readEnvilog.R")
source("functions/check_for_ts_gaps.R")
source("functions/comtodot.R")
source("functions/countna.R")
#set directory to the Esslingen (ENVILOG) and the choosen year
#
# plot_name<- "Rotenfels"
# subplot_name <- "Fichte"
# # w?hle das jahr das zusammengefasst werden soll
# year <- 2021
# path <- "O:/PROJEKT/NIEDER/LOGGER/ROTENFEL/Rotenfels_Fichte_Envilog/2021"
# LoggerExport = T # erzeugt eine Datei im path_out,
# # welche einer Loggerdatei des jeweilige Formats entspricht,
# # und so über die Web oberfläche der Datenbank hochgeladen werden kann
# path_out <- "W:/R/Datamanagement-2021data-edit/data/" # defriniert den outpath für die Loggerdatei
# long_data <- T # speichert die daten in R im "long-format"
### Initialies funktion to run
combine_Envilog_files <- function(path, plot_name, subplot_name, year, LoggerExport = T, path_out, long_data =T){
print(c(plot_name, subplot_name))
abbr.plot <- substring(plot_name, 1,2)
abbr.sub <- substring(subplot_name, 1,2)
#get all csv-file paths from directory
l.paths <- list.files(path = path, pattern = "*.csv", full.names = T)
if (length(l.paths)== 0){
print("No Data found in directory or directory notexisting")
stop()
}
#gather data form files, use readEnvilog from LoggerImports
#to make sure Sensors have identical and consistent colomns names
dat <- l.paths %>% map_df( ~ readEnvilog(.))
#-----------------------------------------------------------------------------------
### Check Data
# check time and other column classes
# str(dat)
# check data ranges and nas
# summary(dat)
# 1. check duplicated entries
dup_check <- sum(duplicated(dat$Datum))
if (dup_check != 0){
dup_dat <- dat[duplicated(dat$Datum),]
# we could do possible mor with these dublicated entries
}
# class(gaps_alt$Dat_diff)
# as.difftime(gaps_alt$Dat_diff, format= "%H:%M")
# as.numeric(gaps_alt$Dat_diff, units = "days")
# format(gaps_alt$Dat_diff)
# 3. check for right date-time format und tz
# tz(dat$Datum); class(dat$Datum); range(dat$Datum)
# dat %>% filter( Datum >= "2021-01-01 00:00:00 UTC")
# sum(duplicated(dat$Datum))
# 4. check data consistency
#table(dat$Kanäle)
#--------------------------------------------------------------------------------------
# edit data, date time format, kick duplicated entries and filter for the choosen year
dat1 <-
dat %>% arrange(Datum) %>%
distinct(Datum, .keep_all= T) %>%
#filter(!duplicated(dat$Datum)) %>%
filter(year(Datum) >= year) %>%
filter(year(Datum) < year+1)
#create LÜCKE
#dat <- dat[-c(3000:4200),]
# 2. check for missing data
gaps <- check_for_ts_gaps(ts= dat1$Datum, max_diff= 24*60, list =F)
print(gaps)
print(paste("Es wurden Daten vom" , range(dat1$Datum, na.rm=T)[1], "bis zum", range(dat1$Datum, na.rm=T)[2], "gefunden und zusammengefasst"))
# l <- unique(date(dat1$Datum))
# t <- seq.Date(from = ymd(paste0(year, "-01-01")), to = ymd(paste0(year, "-12-31")), by = 1)
# print(paste(" Von", length(t), "Tagen im Jahr", year," wurden", sum(l == t), "aufeinanderfolgenden Tage zusammengefasst." ))# alle Tage vorhanden!
### CREATE LOGGER EXPORT FILE
if (LoggerExport == T){
# prepare data for export in Logger-format
dat_exp <- dat1 %>%
mutate(Datum = format(dat1$Datum, format = "%d.%m.%Y %H:%M")) %>%
mutate(No = seq(1:nrow(.))) %>% select(No, everything())
# Export data
# erstelle ein zusammengefasste tabelle f?r das jeweilige jahr
writeLines("Logger: #D3000C 'Esslingen_Fi_FVA_1' - USP_EXP2 - (CGI) Expander for GP5W - (V2.60, Mai 12 2013)", paste0(path_out, abbr.plot, "_Level2",abbr.sub, "_Envilog__", year,"_combine.csv"))
suppressWarnings(write.table(dat_exp, file=paste0(path_out, abbr.plot, "_Level2",abbr.sub, "_Envilog__", year,"_combine.csv"), sep = ";", dec=",", col.names = TRUE, append= TRUE, quote = F, row.names = F, na = ""))
print(paste( "Es wurde eine Logger-Combi-Datei datei für das Jahr", year, "erstellt und unter", path_out, "gespeichert."))
}
### CREATE R DataFrame for further use
if (long_data == T){
# prepare data in R-Format
dat_long <- dat1 %>% pivot_longer(. , cols= -Datum, names_to = "variable", values_to = "value") %>%
mutate(Plot = plot_name, SubPlot = subplot_name) %>%
select(Plot, SubPlot, Datum, variable, value)
return(dat_long)
}
else(
return(dat1)
)
}# end of function+
#testing funktion
#dat <- combine_Envilog_files(path=path, plot_name = plot_name, subplot_name = subplot_name, year = year, LoggerExport = LoggerExport, long_data = long_data, path_out = path_out)
| /Combine_EnvilogFiles.R | no_license | ml271/Datamanagement | R | false | false | 5,447 | r | ########################################################################
###################### Combine ENVILOG #################################
########################################################################
# autor: Marvin Lorff
# date: 28.01.2021
# version: 02.02
# TODO: wirte some tests for input parameter and error/warnings
# TODO: load all plots/Subplots at once
# TODO: für den subplot ochsenhausen Fichte ungdüngt werden die Sensor namen nicht umgeschrieben.
# check LoggerImorts for bug fixing
# load functions and libraries
library(tidyverse)
library(lubridate)
source("functions/readEnvilog.R")
source("functions/check_for_ts_gaps.R")
source("functions/comtodot.R")
source("functions/countna.R")
#set directory to the Esslingen (ENVILOG) and the choosen year
#
# plot_name<- "Rotenfels"
# subplot_name <- "Fichte"
# # w?hle das jahr das zusammengefasst werden soll
# year <- 2021
# path <- "O:/PROJEKT/NIEDER/LOGGER/ROTENFEL/Rotenfels_Fichte_Envilog/2021"
# LoggerExport = T # erzeugt eine Datei im path_out,
# # welche einer Loggerdatei des jeweilige Formats entspricht,
# # und so über die Web oberfläche der Datenbank hochgeladen werden kann
# path_out <- "W:/R/Datamanagement-2021data-edit/data/" # defriniert den outpath für die Loggerdatei
# long_data <- T # speichert die daten in R im "long-format"
### Initialies funktion to run
combine_Envilog_files <- function(path, plot_name, subplot_name, year, LoggerExport = T, path_out, long_data =T){
print(c(plot_name, subplot_name))
abbr.plot <- substring(plot_name, 1,2)
abbr.sub <- substring(subplot_name, 1,2)
#get all csv-file paths from directory
l.paths <- list.files(path = path, pattern = "*.csv", full.names = T)
if (length(l.paths)== 0){
print("No Data found in directory or directory notexisting")
stop()
}
#gather data form files, use readEnvilog from LoggerImports
#to make sure Sensors have identical and consistent colomns names
dat <- l.paths %>% map_df( ~ readEnvilog(.))
#-----------------------------------------------------------------------------------
### Check Data
# check time and other column classes
# str(dat)
# check data ranges and nas
# summary(dat)
# 1. check duplicated entries
dup_check <- sum(duplicated(dat$Datum))
if (dup_check != 0){
dup_dat <- dat[duplicated(dat$Datum),]
# we could do possible mor with these dublicated entries
}
# class(gaps_alt$Dat_diff)
# as.difftime(gaps_alt$Dat_diff, format= "%H:%M")
# as.numeric(gaps_alt$Dat_diff, units = "days")
# format(gaps_alt$Dat_diff)
# 3. check for right date-time format und tz
# tz(dat$Datum); class(dat$Datum); range(dat$Datum)
# dat %>% filter( Datum >= "2021-01-01 00:00:00 UTC")
# sum(duplicated(dat$Datum))
# 4. check data consistency
#table(dat$Kanäle)
#--------------------------------------------------------------------------------------
# edit data, date time format, kick duplicated entries and filter for the choosen year
dat1 <-
dat %>% arrange(Datum) %>%
distinct(Datum, .keep_all= T) %>%
#filter(!duplicated(dat$Datum)) %>%
filter(year(Datum) >= year) %>%
filter(year(Datum) < year+1)
#create LÜCKE
#dat <- dat[-c(3000:4200),]
# 2. check for missing data
gaps <- check_for_ts_gaps(ts= dat1$Datum, max_diff= 24*60, list =F)
print(gaps)
print(paste("Es wurden Daten vom" , range(dat1$Datum, na.rm=T)[1], "bis zum", range(dat1$Datum, na.rm=T)[2], "gefunden und zusammengefasst"))
# l <- unique(date(dat1$Datum))
# t <- seq.Date(from = ymd(paste0(year, "-01-01")), to = ymd(paste0(year, "-12-31")), by = 1)
# print(paste(" Von", length(t), "Tagen im Jahr", year," wurden", sum(l == t), "aufeinanderfolgenden Tage zusammengefasst." ))# alle Tage vorhanden!
### CREATE LOGGER EXPORT FILE
if (LoggerExport == T){
# prepare data for export in Logger-format
dat_exp <- dat1 %>%
mutate(Datum = format(dat1$Datum, format = "%d.%m.%Y %H:%M")) %>%
mutate(No = seq(1:nrow(.))) %>% select(No, everything())
# Export data
# erstelle ein zusammengefasste tabelle f?r das jeweilige jahr
writeLines("Logger: #D3000C 'Esslingen_Fi_FVA_1' - USP_EXP2 - (CGI) Expander for GP5W - (V2.60, Mai 12 2013)", paste0(path_out, abbr.plot, "_Level2",abbr.sub, "_Envilog__", year,"_combine.csv"))
suppressWarnings(write.table(dat_exp, file=paste0(path_out, abbr.plot, "_Level2",abbr.sub, "_Envilog__", year,"_combine.csv"), sep = ";", dec=",", col.names = TRUE, append= TRUE, quote = F, row.names = F, na = ""))
print(paste( "Es wurde eine Logger-Combi-Datei datei für das Jahr", year, "erstellt und unter", path_out, "gespeichert."))
}
### CREATE R DataFrame for further use
if (long_data == T){
# prepare data in R-Format
dat_long <- dat1 %>% pivot_longer(. , cols= -Datum, names_to = "variable", values_to = "value") %>%
mutate(Plot = plot_name, SubPlot = subplot_name) %>%
select(Plot, SubPlot, Datum, variable, value)
return(dat_long)
}
else(
return(dat1)
)
}# end of function+
#testing funktion
#dat <- combine_Envilog_files(path=path, plot_name = plot_name, subplot_name = subplot_name, year = year, LoggerExport = LoggerExport, long_data = long_data, path_out = path_out)
|
context("installing from source")
test_that("install_dcm2nii", {
install_dir = tempfile()
dir.create(install_dir, showWarnings = FALSE)
install_dcm2nii(progdir = install_dir)
expect_true(install_dcm2nii(progdir = install_dir))
})
test_that("install_dcm2nii source", {
testthat::skip_on_appveyor()
install_dir = tempfile()
dir.create(install_dir, showWarnings = FALSE)
cmake = Sys.which("cmake")
make = Sys.which("make")
message("make is ", make)
if (file.exists(cmake)) {
install_dcm2nii(
progdir = install_dir,
from_source = TRUE,
overwrite = TRUE,
verbose = 2)
expect_true(install_dcm2nii(progdir = install_dir))
}
})
| /tests/testthat/test-install.R | no_license | muschellij2/dcm2niir | R | false | false | 680 | r | context("installing from source")
test_that("install_dcm2nii", {
install_dir = tempfile()
dir.create(install_dir, showWarnings = FALSE)
install_dcm2nii(progdir = install_dir)
expect_true(install_dcm2nii(progdir = install_dir))
})
test_that("install_dcm2nii source", {
testthat::skip_on_appveyor()
install_dir = tempfile()
dir.create(install_dir, showWarnings = FALSE)
cmake = Sys.which("cmake")
make = Sys.which("make")
message("make is ", make)
if (file.exists(cmake)) {
install_dcm2nii(
progdir = install_dir,
from_source = TRUE,
overwrite = TRUE,
verbose = 2)
expect_true(install_dcm2nii(progdir = install_dir))
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consensus_combine.R
\name{consensus_combine}
\alias{consensus_combine}
\title{Combine algorithms}
\usage{
consensus_combine(..., element = c("matrix", "class"))
}
\arguments{
\item{...}{any number of objects outputted from \code{\link[=consensus_cluster]{consensus_cluster()}}}
\item{element}{either "matrix" or "class" to extract the consensus matrix or
consensus class, respectively.}
}
\value{
\code{consensus_combine} returns either a list of all consensus matrices
or a data frame showing all the consensus classes
}
\description{
Combines results for multiple objects from \code{consensus_cluster()} and outputs
either the consensus matrices or consensus classes for all algorithms.
}
\details{
This function is useful for collecting summaries because the original results
from \code{consensus_cluster} were combined to a single object. For example,
setting \code{element = "class"} returns a matrix of consensus cluster
assignments, which can be visualized as a consensus matrix heatmap.
}
\examples{
\dontshow{if (rlang::is_installed("apcluster")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# Consensus clustering for multiple algorithms
set.seed(911)
x <- matrix(rnorm(500), ncol = 10)
CC1 <- consensus_cluster(x, nk = 3:4, reps = 10, algorithms = "ap",
progress = FALSE)
CC2 <- consensus_cluster(x, nk = 3:4, reps = 10, algorithms = "km",
progress = FALSE)
# Combine and return either matrices or classes
y1 <- consensus_combine(CC1, CC2, element = "matrix")
str(y1)
y2 <- consensus_combine(CC1, CC2, element = "class")
str(y2)
\dontshow{\}) # examplesIf}
}
\author{
Derek Chiu
}
| /man/consensus_combine.Rd | permissive | AlineTalhouk/diceR | R | false | true | 1,699 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/consensus_combine.R
\name{consensus_combine}
\alias{consensus_combine}
\title{Combine algorithms}
\usage{
consensus_combine(..., element = c("matrix", "class"))
}
\arguments{
\item{...}{any number of objects outputted from \code{\link[=consensus_cluster]{consensus_cluster()}}}
\item{element}{either "matrix" or "class" to extract the consensus matrix or
consensus class, respectively.}
}
\value{
\code{consensus_combine} returns either a list of all consensus matrices
or a data frame showing all the consensus classes
}
\description{
Combines results for multiple objects from \code{consensus_cluster()} and outputs
either the consensus matrices or consensus classes for all algorithms.
}
\details{
This function is useful for collecting summaries because the original results
from \code{consensus_cluster} were combined to a single object. For example,
setting \code{element = "class"} returns a matrix of consensus cluster
assignments, which can be visualized as a consensus matrix heatmap.
}
\examples{
\dontshow{if (rlang::is_installed("apcluster")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# Consensus clustering for multiple algorithms
set.seed(911)
x <- matrix(rnorm(500), ncol = 10)
CC1 <- consensus_cluster(x, nk = 3:4, reps = 10, algorithms = "ap",
progress = FALSE)
CC2 <- consensus_cluster(x, nk = 3:4, reps = 10, algorithms = "km",
progress = FALSE)
# Combine and return either matrices or classes
y1 <- consensus_combine(CC1, CC2, element = "matrix")
str(y1)
y2 <- consensus_combine(CC1, CC2, element = "class")
str(y2)
\dontshow{\}) # examplesIf}
}
\author{
Derek Chiu
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{lty2dash}
\alias{lty2dash}
\title{Convert R lty line type codes to plotly "dash" codes.}
\format{\preformatted{ Named chr [1:25] "solid" "dash" "dot" "dashdot" "longdash" "longdashdot" ...
- attr(*, "names")= chr [1:25] "1" "2" "3" "4" ...
}}
\usage{
lty2dash
}
\description{
Convert R lty line type codes to plotly "dash" codes.
}
\keyword{datasets}
| /man/lty2dash.Rd | no_license | race3044/plotly | R | false | false | 427 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{lty2dash}
\alias{lty2dash}
\title{Convert R lty line type codes to plotly "dash" codes.}
\format{\preformatted{ Named chr [1:25] "solid" "dash" "dot" "dashdot" "longdash" "longdashdot" ...
- attr(*, "names")= chr [1:25] "1" "2" "3" "4" ...
}}
\usage{
lty2dash
}
\description{
Convert R lty line type codes to plotly "dash" codes.
}
\keyword{datasets}
|
##########################################################################################
#QUANTILE REGRESSION FOR LINEAR MIXED MODEL
##########################################################################################
QSAEM_COM_7 = function(y,x,z,nj,p,precision=0.0001,MaxIter=300,M=20,pc=0.5,beta=beta,sigmae=sigmae,D=D)
{
start.time <- Sys.time()
n = length(nj)
N = sum(nj)
d = dim(x)[2]
q = dim(z)[2]
z = as.matrix(z)
delta1 = 0.001
delta2 = precision
#assymetry parameters
vp = (1-2*p)/(p*(1-p))
tp = sqrt(2/(p*(1-p)))
MDel = MElim(q)
ndiag = (q*(1+q)/2)
npar = d+1+ndiag
critval = 1
critval2 = 1
count = 0
teta = c(beta,sigmae,D[upper.tri(D, diag = T)])
tetam = matrix(data=NA,nrow=npar,ncol=MaxIter)
EPV = matrix(0,nrow = npar,ncol = MaxIter)
if(pc==1){
seqq=rep(1,pc*MaxIter)
}else{
seqq = c(rep(1,pc*MaxIter),(1/((((pc*MaxIter)+1):MaxIter)-(pc*MaxIter))))
seqq = c(rep(1,MaxIter-length(seqq)),seqq)
}
SAEM_bb = array(data=0,dim=c(MaxIter+1,n,q,q))
SAEM_bi = array(data=0,dim=c(MaxIter+1,n,q))
SAEM_VGi = array(data=0,dim=c(MaxIter+1,n,npar))
SAEM_ui = vector("list", n)
SAEM_Dui = vector("list", n)
SAEM_bbZD = vector("list", n)
SAEM_DZb = vector("list", n)
for(j in 1:n)
{
SAEM_bb[count+1,j,,] = diag(q)
SAEM_ui[[j]] = array(data=0,dim=c(MaxIter+1,nj[j]))
SAEM_Dui[[j]] = array(data=0,dim=c(MaxIter+1,nj[j],nj[j]))
SAEM_bbZD[[j]] = array(data=0,dim=c(MaxIter+1,q,nj[j]))
SAEM_DZb[[j]] = array(data=0,dim=c(MaxIter+1,nj[j]))
}
pb = tkProgressBar(title = "QRLMM via SAEM", min = 0,max = MaxIter, width = 300)
setTkProgressBar(pb, 0, label=paste("Iter ",0,"/",MaxIter," - ",0,"% done",sep = ""))
while(critval < 3 && critval2 < 3)
{
count = count + 1
sumb1 = matrix(data=0,nrow=d,ncol=1)
sumb2 = matrix(data=0,nrow=d,ncol=d)
sumD = matrix(data=0,nrow=q,ncol=q)
sumsig = 0
IE = 0
for (j in 1:n)
{
y1=y[(sum(nj[1:j-1])+1):(sum(nj[1:j]))]
x1=matrix(x[(sum(nj[1:j-1])+1):(sum(nj[1:j])),],ncol=d)
z1=matrix(z[(sum(nj[1:j-1])+1):(sum(nj[1:j])),],ncol=q)
v1s = matrix(data=1,nrow=nj[j],ncol=1)
##########################################################################
#PASSO E
##########################################################################
ui = matrix(0,nrow = nj[j],ncol = 1)
sum_ui = matrix(0,nrow = nj[j],ncol = 1)
Dui = matrix(0,nrow = nj[j],ncol = nj[j])
sum_Dui = matrix(0,nrow = nj[j],ncol = nj[j])
sum_bb = matrix(0,nrow = q,ncol = q)
sum_bbZD = matrix(0,nrow = q,ncol = nj[j])
sum_DZb = matrix(0,nrow = nj[j],ncol = 1)
VGi = matrix(data = 0,nrow = npar,ncol = M)
bmetro = matrix(data = MHbi2(j=j,M=M,y1,x1,z1,bi=as.matrix(SAEM_bi[count,j,]),bibi=as.matrix(SAEM_bb[count,j,,]),d=d,q=q,p=p,nj=nj,beta=beta,sigmae=sigmae,D=D),nrow = q,ncol = M)
for(l in 1:M)
{
for(k in 1:nj[j])
{
chi = (as.numeric(y1[k]-x1[k,]%*%beta-z1[k,]%*%bmetro[,l])^2)/(sigmae*tp^2)
psi = (tp^2)/(4*sigmae)
ui[k] = Egig(lambda = 0.5,chi = chi,psi = psi,func = "x")
Dui[k,k] = Egig(lambda = 0.5,chi = chi,psi = psi,func = "1/x")
}
sum_ui = sum_ui + ui
sum_Dui = sum_Dui + Dui
sum_bbZD = sum_bbZD + bmetro[,l]%*%t(bmetro[,l])%*%t(z1)%*%Dui
sum_bb = sum_bb + bmetro[,l]%*%t(bmetro[,l])
sum_DZb = sum_DZb + Dui%*%z1%*%bmetro[,l]
t_G_b = t(x1)%*%(Dui%*%(y1-x1%*%beta) - Dui%*%z1%*%bmetro[,l] - vp*v1s)
t_G_sig = -(3/2)*(nj[j])*(1/sigmae) + (1/(2*(tp^2)*(sigmae^2)))*(t(y1-x1%*%beta-z1%*%bmetro[,l])%*%Dui%*%(y1-x1%*%beta-z1%*%bmetro[,l]) - 2*vp*t(y1-x1%*%beta-z1%*%bmetro[,l])%*%v1s + ((tp^4)/4)*t(ui)%*%v1s)
t_G_D = bmetro[,l]%*%t(bmetro[,l])
GG1 = (1/(sigmae*tp^2))*t_G_b
GG2 = t_G_sig
GG3 = (1/2)*MElim(q)%*%(kronecker(X = solve(D),Y = solve(D)))%*%as.vector(t_G_D-D)
VGi[,l] = rbind(GG1,GG2,GG3)
}
E_ui = sum_ui/M
E_Dui = sum_Dui/M
E_bbZD = sum_bbZD/M
E_bb = sum_bb/M
E_bi = apply(bmetro,1,mean)
E_DZb = sum_DZb/M
E_VGi = apply(VGi,1,mean)
SAEM_ui[[j]][count+1,] = SAEM_ui[[j]][count,] + seqq[count]*(E_ui - SAEM_ui[[j]][count,])
SAEM_Dui[[j]][count+1,,] = SAEM_Dui[[j]][count,,] + seqq[count]*(E_Dui - SAEM_Dui[[j]][count,,])
SAEM_bbZD[[j]][count+1,,] = SAEM_bbZD[[j]][count,,] + seqq[count]*(E_bbZD - SAEM_bbZD[[j]][count,,])
SAEM_bb[count+1,j,,] = SAEM_bb[count,j,,] + seqq[count]*(E_bb - SAEM_bb[count,j,,])
SAEM_bi[count+1,j,] = SAEM_bi[count,j,] + seqq[count]*(E_bi - SAEM_bi[count,j,])
SAEM_DZb[[j]][count+1,] = SAEM_DZb[[j]][count,] + seqq[count]*(E_DZb - SAEM_DZb[[j]][count,])
SAEM_VGi[count+1,j,] = SAEM_VGi[count,j,] + seqq[count]*(E_VGi - SAEM_VGi[count,j,])
##########################################################################
#PASSO M
##########################################################################
#PASSO M betas
sumb1 = sumb1 + (t(x1)%*%(SAEM_Dui[[j]][count+1,,]%*%y1 - SAEM_DZb[[j]][count+1,] - vp*v1s))
sumb2 = sumb2 + (t(x1)%*%SAEM_Dui[[j]][count+1,,]%*%x1)
#PASSO M sigmae
tsig = t(y1-x1%*%beta)%*%SAEM_Dui[[j]][count+1,,]%*%(y1-x1%*%beta) -
2*t(y1-x1%*%beta)%*%SAEM_DZb[[j]][count+1,] +
tr(z1%*%SAEM_bbZD[[j]][count+1,,])-
2*vp*t(y1-x1%*%beta)%*%v1s +
2*vp*t(z1%*%SAEM_bi[count+1,j,])%*%v1s +
((tp^4)/4)*t(SAEM_ui[[j]][count+1,])%*%v1s
sumsig = sumsig + as.numeric(tsig)
#PASSO M matriz D
sumD = sumD + SAEM_bb[count+1,j,,]
#SUM do prod vector gradiente
IE = IE + SAEM_VGi[count+1,j,]%*%t(SAEM_VGi[count+1,j,])
}
beta = solve(sumb2)%*%sumb1
D = sumD/n
sigmae = sumsig/(3*N*tp^2)
EP = sqrt(diag(solve(IE)))
EPV[,count] = EP
param = teta
teta = c(beta,sigmae,D[upper.tri(D, diag = T)])
criterio = abs(teta-param)/(abs(param)+delta1)
criterio2 = abs(teta-param)/(EP+0.0001)
if(max(criterio) < delta2){critval=critval+1}else{critval=0}
if(max(criterio2) < 0.0002){critval2=critval2+1}else{critval2=0}
#PRUEBAS
#############################################################################
tetam[,count] = teta
setTkProgressBar(pb, count, label=paste("Iter ",count,"/",MaxIter," - ",round(count/MaxIter*100,0),"% done",sep = ""))
if (count == MaxIter){critval=10}
}
loglik = logveroIS(beta,sigmae,D,y,x,z,nj,bi=SAEM_bi[count+1,,],bibi=SAEM_bb[count+1,,,],MIS=500,n=n,d=d,q=q,p=p)
AIC = -2*loglik +2*npar
BIC = -2*loglik +log(N)*npar
HQ = -2*loglik +2*log(log(N))*npar
table = data.frame(beta,EP[1:d],beta-(1.96*EP[1:d]),beta+(1.96*EP[1:d]),beta/EP[1:d],2*pnorm(abs(beta/EP[1:d]),lower.tail = F))
rownames(table) = paste("beta",1:d)
colnames(table) = c("Estimate","Std. Error","Inf CI95%","Sup CI95%","z value","Pr(>|z|)")
end.time <- Sys.time()
time.taken <- end.time - start.time
res = list(iter = count,criterio = max(criterio,criterio2),beta = beta,weights = SAEM_bi[count+1,,],sigmae= sigmae,D = D,EP=EP,table = table,loglik=loglik,AIC=AIC,BIC=BIC,HQ=HQ,time = time.taken)
conv = list(teta = tetam[,1:count],EPV = EPV[,1:count])
obj.out = list(conv=conv,res = res)
if (count == MaxIter)
{
setTkProgressBar(pb, MaxIter, label=paste("MaxIter reached ",count,"/",MaxIter," - 100 % done",sep = ""))
Sys.sleep(1)
close(pb)
}
else
{
setTkProgressBar(pb, MaxIter, label=paste("Convergence at Iter ",count,"/",MaxIter," - 100 % done",sep = ""))
Sys.sleep(1)
close(pb)
}
class(obj.out) = "QRLMM"
return(obj.out)
} | /R/SAEM.R | no_license | cran/qrLMM | R | false | false | 8,398 | r | ##########################################################################################
#QUANTILE REGRESSION FOR LINEAR MIXED MODEL
##########################################################################################
QSAEM_COM_7 = function(y,x,z,nj,p,precision=0.0001,MaxIter=300,M=20,pc=0.5,beta=beta,sigmae=sigmae,D=D)
{
start.time <- Sys.time()
n = length(nj)
N = sum(nj)
d = dim(x)[2]
q = dim(z)[2]
z = as.matrix(z)
delta1 = 0.001
delta2 = precision
#assymetry parameters
vp = (1-2*p)/(p*(1-p))
tp = sqrt(2/(p*(1-p)))
MDel = MElim(q)
ndiag = (q*(1+q)/2)
npar = d+1+ndiag
critval = 1
critval2 = 1
count = 0
teta = c(beta,sigmae,D[upper.tri(D, diag = T)])
tetam = matrix(data=NA,nrow=npar,ncol=MaxIter)
EPV = matrix(0,nrow = npar,ncol = MaxIter)
if(pc==1){
seqq=rep(1,pc*MaxIter)
}else{
seqq = c(rep(1,pc*MaxIter),(1/((((pc*MaxIter)+1):MaxIter)-(pc*MaxIter))))
seqq = c(rep(1,MaxIter-length(seqq)),seqq)
}
SAEM_bb = array(data=0,dim=c(MaxIter+1,n,q,q))
SAEM_bi = array(data=0,dim=c(MaxIter+1,n,q))
SAEM_VGi = array(data=0,dim=c(MaxIter+1,n,npar))
SAEM_ui = vector("list", n)
SAEM_Dui = vector("list", n)
SAEM_bbZD = vector("list", n)
SAEM_DZb = vector("list", n)
for(j in 1:n)
{
SAEM_bb[count+1,j,,] = diag(q)
SAEM_ui[[j]] = array(data=0,dim=c(MaxIter+1,nj[j]))
SAEM_Dui[[j]] = array(data=0,dim=c(MaxIter+1,nj[j],nj[j]))
SAEM_bbZD[[j]] = array(data=0,dim=c(MaxIter+1,q,nj[j]))
SAEM_DZb[[j]] = array(data=0,dim=c(MaxIter+1,nj[j]))
}
pb = tkProgressBar(title = "QRLMM via SAEM", min = 0,max = MaxIter, width = 300)
setTkProgressBar(pb, 0, label=paste("Iter ",0,"/",MaxIter," - ",0,"% done",sep = ""))
while(critval < 3 && critval2 < 3)
{
count = count + 1
sumb1 = matrix(data=0,nrow=d,ncol=1)
sumb2 = matrix(data=0,nrow=d,ncol=d)
sumD = matrix(data=0,nrow=q,ncol=q)
sumsig = 0
IE = 0
for (j in 1:n)
{
y1=y[(sum(nj[1:j-1])+1):(sum(nj[1:j]))]
x1=matrix(x[(sum(nj[1:j-1])+1):(sum(nj[1:j])),],ncol=d)
z1=matrix(z[(sum(nj[1:j-1])+1):(sum(nj[1:j])),],ncol=q)
v1s = matrix(data=1,nrow=nj[j],ncol=1)
##########################################################################
#PASSO E
##########################################################################
ui = matrix(0,nrow = nj[j],ncol = 1)
sum_ui = matrix(0,nrow = nj[j],ncol = 1)
Dui = matrix(0,nrow = nj[j],ncol = nj[j])
sum_Dui = matrix(0,nrow = nj[j],ncol = nj[j])
sum_bb = matrix(0,nrow = q,ncol = q)
sum_bbZD = matrix(0,nrow = q,ncol = nj[j])
sum_DZb = matrix(0,nrow = nj[j],ncol = 1)
VGi = matrix(data = 0,nrow = npar,ncol = M)
bmetro = matrix(data = MHbi2(j=j,M=M,y1,x1,z1,bi=as.matrix(SAEM_bi[count,j,]),bibi=as.matrix(SAEM_bb[count,j,,]),d=d,q=q,p=p,nj=nj,beta=beta,sigmae=sigmae,D=D),nrow = q,ncol = M)
for(l in 1:M)
{
for(k in 1:nj[j])
{
chi = (as.numeric(y1[k]-x1[k,]%*%beta-z1[k,]%*%bmetro[,l])^2)/(sigmae*tp^2)
psi = (tp^2)/(4*sigmae)
ui[k] = Egig(lambda = 0.5,chi = chi,psi = psi,func = "x")
Dui[k,k] = Egig(lambda = 0.5,chi = chi,psi = psi,func = "1/x")
}
sum_ui = sum_ui + ui
sum_Dui = sum_Dui + Dui
sum_bbZD = sum_bbZD + bmetro[,l]%*%t(bmetro[,l])%*%t(z1)%*%Dui
sum_bb = sum_bb + bmetro[,l]%*%t(bmetro[,l])
sum_DZb = sum_DZb + Dui%*%z1%*%bmetro[,l]
t_G_b = t(x1)%*%(Dui%*%(y1-x1%*%beta) - Dui%*%z1%*%bmetro[,l] - vp*v1s)
t_G_sig = -(3/2)*(nj[j])*(1/sigmae) + (1/(2*(tp^2)*(sigmae^2)))*(t(y1-x1%*%beta-z1%*%bmetro[,l])%*%Dui%*%(y1-x1%*%beta-z1%*%bmetro[,l]) - 2*vp*t(y1-x1%*%beta-z1%*%bmetro[,l])%*%v1s + ((tp^4)/4)*t(ui)%*%v1s)
t_G_D = bmetro[,l]%*%t(bmetro[,l])
GG1 = (1/(sigmae*tp^2))*t_G_b
GG2 = t_G_sig
GG3 = (1/2)*MElim(q)%*%(kronecker(X = solve(D),Y = solve(D)))%*%as.vector(t_G_D-D)
VGi[,l] = rbind(GG1,GG2,GG3)
}
E_ui = sum_ui/M
E_Dui = sum_Dui/M
E_bbZD = sum_bbZD/M
E_bb = sum_bb/M
E_bi = apply(bmetro,1,mean)
E_DZb = sum_DZb/M
E_VGi = apply(VGi,1,mean)
SAEM_ui[[j]][count+1,] = SAEM_ui[[j]][count,] + seqq[count]*(E_ui - SAEM_ui[[j]][count,])
SAEM_Dui[[j]][count+1,,] = SAEM_Dui[[j]][count,,] + seqq[count]*(E_Dui - SAEM_Dui[[j]][count,,])
SAEM_bbZD[[j]][count+1,,] = SAEM_bbZD[[j]][count,,] + seqq[count]*(E_bbZD - SAEM_bbZD[[j]][count,,])
SAEM_bb[count+1,j,,] = SAEM_bb[count,j,,] + seqq[count]*(E_bb - SAEM_bb[count,j,,])
SAEM_bi[count+1,j,] = SAEM_bi[count,j,] + seqq[count]*(E_bi - SAEM_bi[count,j,])
SAEM_DZb[[j]][count+1,] = SAEM_DZb[[j]][count,] + seqq[count]*(E_DZb - SAEM_DZb[[j]][count,])
SAEM_VGi[count+1,j,] = SAEM_VGi[count,j,] + seqq[count]*(E_VGi - SAEM_VGi[count,j,])
##########################################################################
#PASSO M
##########################################################################
#PASSO M betas
sumb1 = sumb1 + (t(x1)%*%(SAEM_Dui[[j]][count+1,,]%*%y1 - SAEM_DZb[[j]][count+1,] - vp*v1s))
sumb2 = sumb2 + (t(x1)%*%SAEM_Dui[[j]][count+1,,]%*%x1)
#PASSO M sigmae
tsig = t(y1-x1%*%beta)%*%SAEM_Dui[[j]][count+1,,]%*%(y1-x1%*%beta) -
2*t(y1-x1%*%beta)%*%SAEM_DZb[[j]][count+1,] +
tr(z1%*%SAEM_bbZD[[j]][count+1,,])-
2*vp*t(y1-x1%*%beta)%*%v1s +
2*vp*t(z1%*%SAEM_bi[count+1,j,])%*%v1s +
((tp^4)/4)*t(SAEM_ui[[j]][count+1,])%*%v1s
sumsig = sumsig + as.numeric(tsig)
#PASSO M matriz D
sumD = sumD + SAEM_bb[count+1,j,,]
#SUM do prod vector gradiente
IE = IE + SAEM_VGi[count+1,j,]%*%t(SAEM_VGi[count+1,j,])
}
beta = solve(sumb2)%*%sumb1
D = sumD/n
sigmae = sumsig/(3*N*tp^2)
EP = sqrt(diag(solve(IE)))
EPV[,count] = EP
param = teta
teta = c(beta,sigmae,D[upper.tri(D, diag = T)])
criterio = abs(teta-param)/(abs(param)+delta1)
criterio2 = abs(teta-param)/(EP+0.0001)
if(max(criterio) < delta2){critval=critval+1}else{critval=0}
if(max(criterio2) < 0.0002){critval2=critval2+1}else{critval2=0}
#PRUEBAS
#############################################################################
tetam[,count] = teta
setTkProgressBar(pb, count, label=paste("Iter ",count,"/",MaxIter," - ",round(count/MaxIter*100,0),"% done",sep = ""))
if (count == MaxIter){critval=10}
}
loglik = logveroIS(beta,sigmae,D,y,x,z,nj,bi=SAEM_bi[count+1,,],bibi=SAEM_bb[count+1,,,],MIS=500,n=n,d=d,q=q,p=p)
AIC = -2*loglik +2*npar
BIC = -2*loglik +log(N)*npar
HQ = -2*loglik +2*log(log(N))*npar
table = data.frame(beta,EP[1:d],beta-(1.96*EP[1:d]),beta+(1.96*EP[1:d]),beta/EP[1:d],2*pnorm(abs(beta/EP[1:d]),lower.tail = F))
rownames(table) = paste("beta",1:d)
colnames(table) = c("Estimate","Std. Error","Inf CI95%","Sup CI95%","z value","Pr(>|z|)")
end.time <- Sys.time()
time.taken <- end.time - start.time
res = list(iter = count,criterio = max(criterio,criterio2),beta = beta,weights = SAEM_bi[count+1,,],sigmae= sigmae,D = D,EP=EP,table = table,loglik=loglik,AIC=AIC,BIC=BIC,HQ=HQ,time = time.taken)
conv = list(teta = tetam[,1:count],EPV = EPV[,1:count])
obj.out = list(conv=conv,res = res)
if (count == MaxIter)
{
setTkProgressBar(pb, MaxIter, label=paste("MaxIter reached ",count,"/",MaxIter," - 100 % done",sep = ""))
Sys.sleep(1)
close(pb)
}
else
{
setTkProgressBar(pb, MaxIter, label=paste("Convergence at Iter ",count,"/",MaxIter," - 100 % done",sep = ""))
Sys.sleep(1)
close(pb)
}
class(obj.out) = "QRLMM"
return(obj.out)
} |
library(compiler)
###
###
### Definition of argument types
###
###
SKIP.ARGTYPE<--1
LABEL.ARGTYPE<-0
CONSTANTS.ARGTYPE<-3
CONSTANTS_DBG.ARGTYPE<-4
CONSTANTS_LABEL.ARGTYPE<-5
BOOL.ARGTYPE<-11
INT.ARGTYPE<-10
###
###
### Names list of argument types internal usage
###
###
Opcodes.argtypes = list(
LABEL = LABEL.ARGTYPE,
CONSTANT = CONSTANTS.ARGTYPE,
CONSTANT_LABEL = CONSTANTS_LABEL.ARGTYPE,
CONSTANT_DBG = CONSTANTS_DBG.ARGTYPE,
BOOL = BOOL.ARGTYPE,
INT = INT.ARGTYPE
)
###
###
### Version of bytecode being annotated
###
###
Opcodes.bcversion = 10L
###
###
### Instruction annotation for every bytecode of the code
###
###
Opcodes.argdescr <- list(
BCMISMATCH.OP = c(),
RETURN.OP = c(),
GOTO.OP = c(LABEL.ARGTYPE),
BRIFNOT.OP = c(CONSTANTS.ARGTYPE,LABEL.ARGTYPE),
POP.OP = c(),
DUP.OP = c(),
PRINTVALUE.OP = c(),
STARTLOOPCNTXT.OP = c(BOOL.ARGTYPE, LABEL.ARGTYPE),# bool is_for_loop, pc for break
ENDLOOPCNTXT.OP = c(BOOL.ARGTYPE),
DOLOOPNEXT.OP = c(),
DOLOOPBREAK.OP = c(),
STARTFOR.OP = c(CONSTANTS_DBG.ARGTYPE, CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
STEPFOR.OP = c(LABEL.ARGTYPE),
ENDFOR.OP = c(),
SETLOOPVAL.OP = c(),
INVISIBLE.OP = c(),
LDCONST.OP = c(CONSTANTS.ARGTYPE),
LDNULL.OP = c(),
LDTRUE.OP = c(),
LDFALSE.OP = c(),
GETVAR.OP = c(CONSTANTS.ARGTYPE),
DDVAL.OP = c(CONSTANTS.ARGTYPE),
SETVAR.OP = c(CONSTANTS.ARGTYPE),
GETFUN.OP = c(CONSTANTS.ARGTYPE),
GETGLOBFUN.OP = c(CONSTANTS.ARGTYPE),
GETSYMFUN.OP = c(CONSTANTS.ARGTYPE),
GETBUILTIN.OP = c(CONSTANTS.ARGTYPE),
GETINTLBUILTIN.OP = c(CONSTANTS.ARGTYPE),
CHECKFUN.OP = c(),
MAKEPROM.OP = c(CONSTANTS.ARGTYPE),
DOMISSING.OP = c(),
SETTAG.OP = c(CONSTANTS.ARGTYPE),
DODOTS.OP = c(),
PUSHARG.OP = c(),
PUSHCONSTARG.OP = c(CONSTANTS.ARGTYPE),
PUSHNULLARG.OP = c(),
PUSHTRUEARG.OP = c(),
PUSHFALSEARG.OP = c(),
CALL.OP = c(CONSTANTS.ARGTYPE),
CALLBUILTIN.OP = c(CONSTANTS.ARGTYPE),
CALLSPECIAL.OP = c(CONSTANTS.ARGTYPE),
MAKECLOSURE.OP = c(CONSTANTS.ARGTYPE),
UMINUS.OP = c(CONSTANTS_DBG.ARGTYPE),
UPLUS.OP = c(CONSTANTS_DBG.ARGTYPE),
ADD.OP = c(CONSTANTS_DBG.ARGTYPE),
SUB.OP = c(CONSTANTS_DBG.ARGTYPE),
MUL.OP = c(CONSTANTS_DBG.ARGTYPE),
DIV.OP = c(CONSTANTS_DBG.ARGTYPE),
EXPT.OP = c(CONSTANTS_DBG.ARGTYPE),
SQRT.OP = c(CONSTANTS_DBG.ARGTYPE),
EXP.OP = c(CONSTANTS_DBG.ARGTYPE),
EQ.OP = c(CONSTANTS_DBG.ARGTYPE),
NE.OP = c(CONSTANTS_DBG.ARGTYPE),
LT.OP = c(CONSTANTS_DBG.ARGTYPE),
LE.OP = c(CONSTANTS_DBG.ARGTYPE),
GE.OP = c(CONSTANTS_DBG.ARGTYPE),
GT.OP = c(CONSTANTS_DBG.ARGTYPE),
AND.OP = c(CONSTANTS_DBG.ARGTYPE),
OR.OP = c(CONSTANTS_DBG.ARGTYPE),
NOT.OP = c(CONSTANTS_DBG.ARGTYPE),
DOTSERR.OP = c(),
STARTASSIGN.OP = c(CONSTANTS.ARGTYPE),
ENDASSIGN.OP = c(CONSTANTS.ARGTYPE),
STARTSUBSET.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTSUBSET.OP = c(),
STARTSUBASSIGN.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTSUBASSIGN.OP = c(),
STARTC.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTC.OP = c(),
STARTSUBSET2.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTSUBSET2.OP = c(),
STARTSUBASSIGN2.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTSUBASSIGN2.OP = c(),
DOLLAR.OP = c(CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE),
DOLLARGETS.OP = c(CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE),
ISNULL.OP = c(),
ISLOGICAL.OP = c(),
ISINTEGER.OP = c(),
ISDOUBLE.OP = c(),
ISCOMPLEX.OP = c(),
ISCHARACTER.OP = c(),
ISSYMBOL.OP = c(),
ISOBJECT.OP = c(),
ISNUMERIC.OP = c(),
VECSUBSET.OP = c(CONSTANTS.ARGTYPE),
MATSUBSET.OP = c(CONSTANTS.ARGTYPE),
VECSUBASSIGN.OP = c(CONSTANTS.ARGTYPE),
MATSUBASSIGN.OP = c(CONSTANTS.ARGTYPE),
AND1ST.OP = c(CONSTANTS_DBG.ARGTYPE, LABEL.ARGTYPE),
AND2ND.OP = c(CONSTANTS_DBG.ARGTYPE),
OR1ST.OP = c(CONSTANTS_DBG.ARGTYPE, LABEL.ARGTYPE),
OR2ND.OP = c(CONSTANTS_DBG.ARGTYPE),
GETVAR_MISSOK.OP = c(CONSTANTS.ARGTYPE),
DDVAL_MISSOK.OP = c(CONSTANTS.ARGTYPE),
VISIBLE.OP = c(),
SETVAR2.OP = c(CONSTANTS.ARGTYPE),
STARTASSIGN2.OP = c(CONSTANTS.ARGTYPE),
ENDASSIGN2.OP = c(CONSTANTS.ARGTYPE),
SETTER_CALL.OP = c(CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE),
GETTER_CALL.OP = c(CONSTANTS.ARGTYPE),
SWAP.OP = c(),
DUP2ND.OP = c(),
SWITCH.OP = c(CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE),
RETURNJMP.OP = c(),
STARTSUBSET_N.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
STARTSUBASSIGN_N.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
VECSUBSET2.OP = c(CONSTANTS.ARGTYPE),
MATSUBSET2.OP = c(CONSTANTS.ARGTYPE),
VECSUBASSIGN2.OP = c(CONSTANTS.ARGTYPE),
MATSUBASSIGN2.OP = c(CONSTANTS.ARGTYPE),
STARTSUBSET2_N.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
STARTSUBASSIGN2_N.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
SUBSET_N.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
SUBSET2_N.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
SUBASSIGN_N.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
SUBASSIGN2_N.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
LOG.OP = c(CONSTANTS.ARGTYPE),
LOGBASE.OP = c(CONSTANTS.ARGTYPE),
MATH1.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE), #second argument is one of the math1funs
DOTCALL.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
COLON.OP = c(SKIP.ARGTYPE),
SEQALONG.OP = c(SKIP.ARGTYPE),
SEQLEN.OP = c(SKIP.ARGTYPE),
BASEGUARD.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE)
)
conf <- new.env(parent = emptyenv())
conf$verbosity <- 0
#' Print bytecode object to output and returns it \emph{invisibly} (via \code{\link{invisible}(x)})
#'
#' \code{print.disassembly} print bytecode object into output in human-friendly way.
#'
#' This is implementation of print method for bytecode object.
#' It works under internal R Bytecode structure.
#' You can manually create bytecode object through \emph{compiler} package ( via for example \code{\link{cmpfun}} function )
#'
#' @param x Bytecode object to be printed
#' @param select instruction position to be highlighted
#' @param prefix number of spaces to print before each line ( used for intendation )
#' @param verbose verbosity level ( 0 or 1 or 2)
#' 0 (default value) - display only source references ( if they are available, if they aren't print expression references instead )
#' 1 - the same as 0 + display bytecode version and display expression references ( if they are available )
#' 2 - the same as 1 + display every operand's argument ( including ones used just for debugging )
#' default value can be pre-set by \emph{bcverbose} function
#' @param maxdepth Maximum depth of nested functions which are printed
#' @param depth Current depth of nested functions which are being printed ( used for internal purposes in print recursion )
#' @param select Position of currently selected instruction ( used in debugger )
#' @param peephole Turn the peephole on - show just area surronding the selected instruction ( must have selected, used in debugger )
#' @param ... Numeric, complex, or logical vectors.
#'
#' @examples
#' library(compiler)
#' library(bctools)
#' a <- function(x){
#' r <- 1
#' while(x){
#' r = r + x*x
#' x = x-1
#' }
#' r
#' }
#' bc <- compiler::cmpfun(a)
#'
#' #these two does the same
#' disassemble(bc)
#' print(disassemble(bc))
#'
#' #manually set verbose level
#' print(disassemble(bc), verbose=1)
#' print(disassemble(bc), verbose=2)
#'
#' @export
printBC <- function(x, prefix="", verbose=NULL, constantpool=FALSE, maxdepth=2, depth=0, select=NULL, peephole=FALSE, ...){
if( is.null(verbose) ) verbose <- conf$verbosity
### if the function is passed, calls the internal disassembly of the code
if(typeof(x) == "closure")
tryCatch({
capture.output({ ### suppress output of the compiler::disassemble function
x = compiler::disassemble(x)
})
}, error = function(e) {
stop("An error occured trying to disassemble (compiler::disassemble) the passed object. Check whether is it BC compiled.")
})
#if you have the peephole turned on, you have to pass select flag for line
if( peephole && is.null(select) )
stop("if you have the peephole turned on ( peephole=TRUE ) , you have to pass select flag for line ( select=SOME_LINE )");
#can contain BREAKPOINT[0-9] instructions
code_breakpoint <- x[[2]]
#never contains BREAKPOINT[0-9] instruction
code <- x[[2]]
#constant buffer
constants <- x[[3]]
if(code[[1]] > Opcodes.bcversion){
warning(paste0("The bytecode version of your GNU-R is not supported by the disassembler. ",
"Please make update both your GNU-R and disasembler to the most recent versions"));
}
srcrefsIndex <- NULL
expressionsIndex <- NULL
srcref <- NULL
#get needed properties from constants object
for (cnst in rev(constants)){
if (class(cnst)=="srcrefsIndex") srcrefsIndex <- cnst
if (class(cnst)=="expressionsIndex") expressionsIndex <- cnst
if (class(cnst)=="srcref") srcref <- cnst
}
dumpExpressions <- ( verbose > 0 || is.null(srcrefsIndex) ) && !is.null(expressionsIndex);
dumpSrcrefs <- !is.null(srcrefsIndex);
#print leading source reference
if(!peephole && !is.null(srcref)){
environm <- attr(srcref, "srcfile")
filename <- getSrcFilename(environm)
if(!identical(filename, character(0))){
cat(paste0(prefix,"@ ",filename,"#",srcref[[1]],"\n"))
}
}
if(!peephole){
if(verbose > 0) {
cat(paste0(prefix,"Bytecode ver. ",code[[1]],"\n"))
}
cat("\n")
}
#first pass to mark instruction with labels
#labels is array that describes if each instruction has label
n <- length(code)
labels <- rep(-2, n) #labels now contains -2=not used, -1=used
i <- 2
instrCnt<-0 # count number of instructions
while( i <= n ) {
v <- code[[i]]
argdescr <- Opcodes.argdescr[[paste0(v)]]
j <- 1
while(j <= length(argdescr)){
i<-i+1
if(argdescr[[j]] == Opcodes.argtypes$LABEL){
labels[[ code[[i]] + 1 ]] <- -1
}else if(argdescr[[j]] == Opcodes.argtypes$CONSTANT_LABEL){
v <- constants[[ code[[i]] + 1 ]]
if(!is.null(v)){
for(k in 1:length(v)){
labels[[v[[k]] + 1]] <- -1
}
}
}
j<-j+1
}
instrCnt<-instrCnt+1
i<-i+1
}
#second pass to count labels
#loop through labels array and if that instruction has label marked on it
#labels array now contains values: -2=not used, -1=used, >0=index of label
i <- 2
lastlabelno <- 0;
while( i <= n ) {
if(labels[[i]] == -1){
lastlabelno <- lastlabelno+1
labels[[i]] <- lastlabelno
}
i<-i+1
}
#functions to print each type of information ( arguments / source and expression references )
#each functions return TRUE / FALSE that indicates if any output has been printed
dumpConstant<-function(v){
v <- constants[[v+1]]
if(typeof(v) == "list"){
#the max depth of recursion is definied via maxdepth parameter
if(depth < maxdepth){
if(typeof(v[[2]]) == "bytecode"){
v <- compiler::disassemble(v[[2]])
cat("<FUNCTION>")
print.disassembly(v, select=NULL, prefix=paste0(prefix," "),verbose=verbose, constantpool=constantpool, maxdepth=maxdepth, depth=depth+1)
cat("\n")
}else{
cat("<INTERNAL_FUNCTION>")
}
}else{
cat("<FUNCTION>")
}
}else{
#hack to print expression tree in infix notation instead of prefix
z <- capture.output(dput(v))
z <- sub("(\\s)\\s*", "\\1", z, perl=TRUE)
if(length(z) > 1){
# convert >>while (i) { print(i) i <- i - 1 }<< to >>while (i) { print(i); i <- i - 1 }<<
# see the semicolon after print(i)
#because the printed code does not contain ; after each instruction we have to add to it
#called with first argument of current row and second of following row
z <- mapply(function(cur, nex){
#current row does not end with { and following row does not start with } append
# semilon after this instruction
if( length(grep("\\{\\s*$", cur)) <= 0 && length(grep("^\\s*\\}", nex)) <= 0 ){
paste0(cur, ";")
}else{
cur
}
}, z, c(z[2:(length(z))], "}"))
}
cat(paste0(z))
}
TRUE
}
dumpDbgConstant <- function(v){
#there are 2 types of constants in bytecode
#this function corresponds to CONSTANT_DBG
# which means that this type of constant is used just for debugging inside bytecode
if(verbose > 1){
dumpConstant(v);
}else{
FALSE
}
}
dumpConstantReferenceToArr <- function(v){
cat(paste0( "#", v ))
TRUE
}
dumpLabel <- function(v){
cat(paste0( "$", labels[[ v+1 ]] ))
TRUE
}
dumpConstantLabels <- function(v){
if(is.null(v))
return(dumpConstant(v))
if(length(constants[[v+1]]) > 0){
v <- lapply(constants[[v+1]],
function(v){
paste0("$", labels[[ v+1 ]])
})
cat(paste(v,collapse=', '))
}else{
cat('-')
}
TRUE
}
dumpOp<-function(v){
v <- sub("\\.OP$", "", v, perl=TRUE) # example "GOTO.OP" >> "GOTO"
v <- sprintf("%-20s", v)
cat(paste(v))
TRUE
}
dumpValue<-function(v){
cat(v)
TRUE
}
dumpSrcRef<-function(cursrcref){
filename <- getSrcFilename(cursrcref)
lineno <- getSrcLocation(cursrcref)
o <- capture.output(print(cursrcref))
cat(paste0(prefix," - ",filename,"#",lineno,": ",o[[1]],"\n"))
TRUE
}
dumpExprRef<-function(exprIndex){
cat(paste0(prefix," @ "))
dumpConstant(exprIndex)
cat("\n")
TRUE
}
dumpUnknown<-function(v){
cat("???")
TRUE
}
printCodeArray <- function(){
#third pass to print result
selected <- FALSE
lastExprIndex <- -1
lastSrcrefsIndex <- -1
i <- 2
n <- length(code)
printedInstructions <- 0
while( i <= n ) {
#extract instruction from code array
instr <- code[[i]]
instrname <- instr
#instruction arguments description which is imported also from compiler package
#contains array in which each parameter describes type of argument
argdescr <- Opcodes.argdescr[[paste0(instr)]]
#if the peephole mode is turned on we skip every elements before select
# and all after 5 printed instructions after select
if( peephole
&& printedInstructions < instrCnt-5
&& (i < select || printedInstructions >= 5) ){
i <- i + 1 + length(argdescr)
next
}
#this instruction has label pointing to it
if(labels[[i]] > 0){
cat(paste0(prefix,labels[[i]],":\n"))
}
if(dumpSrcrefs){
curSrcrefsIndex <- srcrefsIndex[[i]]
if(curSrcrefsIndex != lastSrcrefsIndex){
dumpSrcRef(constants[[curSrcrefsIndex + 1 ]])
lastSrcrefsIndex <- curSrcrefsIndex
}
}
if(dumpExpressions){
curExprIndex <- expressionsIndex[[i]]
if(curExprIndex != lastExprIndex){
dumpExprRef(curExprIndex)
lastExprIndex <- curExprIndex
}
}
#print prefix ( one of argument ) before each instruction
pr <- paste0(prefix," ");
if(!selected && !is.null(select) && (i - 1) >= select ) {
#replace beginning of pr ( prefix ) with >>> ( current instruction )
pr <- paste0(substr(pr, 1 ,nchar(pr)-3), ">>>")
selected = TRUE
}
cat(pr)
if(verbose > 0 || constantpool){
cat(sprintf("%2d: ", i-1))
}
if(grepl("^BREAKPOINT[0-9]+\\.OP$", code_breakpoint[[i]])){
instrname <- paste0("(BR) ", instrname)
}
#print instruction ( eg. ADD / SUB ... )
dumpOp(instrname)
#iterate over each argument of instruction and print them
#the arguments are stored inside bytecode just after the instruction
# so as we loop through instructions we increments the index into
# code array ( i<-i+1 )
j <- 1
printed <- 0
while(j <= length(argdescr)){
if(printed >= 1){
cat("\t | ")
}
i<-i+1
#extract instruction argument from code array
v <- code[[i]]
t = paste0(argdescr[[j]])
#lookup table for argument types
argTypesDump <- c(dumpLabel,
ifelse(constantpool,dumpConstantReferenceToArr,dumpConstant),
ifelse(constantpool,dumpConstantReferenceToArr,dumpDbgConstant),
ifelse(constantpool,dumpConstantReferenceToArr,dumpConstantLabels),
dumpValue,
dumpValue)
names(argTypesDump) <- c(
Opcodes.argtypes$LABEL,
Opcodes.argtypes$CONSTANT,
Opcodes.argtypes$CONSTANT_DBG,
Opcodes.argtypes$CONSTANT_LABEL,
Opcodes.argtypes$BOOL,
Opcodes.argtypes$INT)
dumpFun <- argTypesDump[[t]]
if(is.null(dumpFun))
dumpFun <- dumpUnknown
#printting the argument
if(dumpFun(v))
printed <- printed + 1
j<-j+1
}
printedInstructions<-printedInstructions+1
i<-i+1
cat("\n")
}
cat("\n")
}
printConstantArray <- function(){
i <- 0
n <- length(constants)
printedInstructions <- 0
while( i < n ) {
cat(sprintf(" %2d: ", i))
dumpConstant(i)
cat("\n")
i<-i+1
}
}
if(constantpool){
cat("-------- code array --------\n")
printCodeArray()
cat("------ constant array ------\n")
printConstantArray()
}else{
printCodeArray()
}
#returns invisible(x) as the default behavior of print method
invisible(x)
}
#' Try-catch wrapper over the print.disassembly function
#'
#' for additional instruction see the print.disassembly function definition
#'
#' @param x Bytecode object to be printed
#' @param ... additional parameters passed to print.disassembly code
#'
#' @export
tryPrint.disassembly <- function(x, ...)
tryCatch(print.disassembly(x, ...), error = function(err) {
cat(paste(gettext("Error: bytecode dump failed - "), err$message, "at", deparse(err$call), "\n"))
x
})
#' Set default verbosity level for bytecode \emph{print} method
#'
#' \code{bcverbose} Set and/or get default verbosity level for bytecode \emph{print} method
#'
#'
#' @param lvl verbosity level ( 0 or 1 or 2) - optional
#' if setted - set default bytecode verbosity level
#' 0 - display only source references ( if they are available, if they aren't print expression references instead )
#' 1 - the same as 0 + display bytecode version and display expression references ( if they are available )
#' 2 - the same as 1 + display every operand's argument ( including ones used just for debugging )
#'
#'
#' @return current verbosity level
#'
#' @examples
#'
#' library(compiler)
#' library(bctools)
#' a <- function(x){
#' r <- 1
#' while(x){
#' r = r + x*x
#' x = x-1
#' }
#' r
#' }
#' bc <- compiler::cmpfun(a)
#'
#' #set default verbosity level
#' bcverbose(2)
#' disassemble(bc)
#'
#' @export
bcverbose <- function( lvl=NULL ){
if( !is.null(lvl) ) { conf$verbosity <- lvl }
conf$verbosity
}
| /bctools/R/bctools.R | no_license | saskaale/R-bytecode-disassembler | R | false | false | 21,203 | r | library(compiler)
###
###
### Definition of argument types
###
###
SKIP.ARGTYPE<--1
LABEL.ARGTYPE<-0
CONSTANTS.ARGTYPE<-3
CONSTANTS_DBG.ARGTYPE<-4
CONSTANTS_LABEL.ARGTYPE<-5
BOOL.ARGTYPE<-11
INT.ARGTYPE<-10
###
###
### Names list of argument types internal usage
###
###
Opcodes.argtypes = list(
LABEL = LABEL.ARGTYPE,
CONSTANT = CONSTANTS.ARGTYPE,
CONSTANT_LABEL = CONSTANTS_LABEL.ARGTYPE,
CONSTANT_DBG = CONSTANTS_DBG.ARGTYPE,
BOOL = BOOL.ARGTYPE,
INT = INT.ARGTYPE
)
###
###
### Version of bytecode being annotated
###
###
Opcodes.bcversion = 10L
###
###
### Instruction annotation for every bytecode of the code
###
###
Opcodes.argdescr <- list(
BCMISMATCH.OP = c(),
RETURN.OP = c(),
GOTO.OP = c(LABEL.ARGTYPE),
BRIFNOT.OP = c(CONSTANTS.ARGTYPE,LABEL.ARGTYPE),
POP.OP = c(),
DUP.OP = c(),
PRINTVALUE.OP = c(),
STARTLOOPCNTXT.OP = c(BOOL.ARGTYPE, LABEL.ARGTYPE),# bool is_for_loop, pc for break
ENDLOOPCNTXT.OP = c(BOOL.ARGTYPE),
DOLOOPNEXT.OP = c(),
DOLOOPBREAK.OP = c(),
STARTFOR.OP = c(CONSTANTS_DBG.ARGTYPE, CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
STEPFOR.OP = c(LABEL.ARGTYPE),
ENDFOR.OP = c(),
SETLOOPVAL.OP = c(),
INVISIBLE.OP = c(),
LDCONST.OP = c(CONSTANTS.ARGTYPE),
LDNULL.OP = c(),
LDTRUE.OP = c(),
LDFALSE.OP = c(),
GETVAR.OP = c(CONSTANTS.ARGTYPE),
DDVAL.OP = c(CONSTANTS.ARGTYPE),
SETVAR.OP = c(CONSTANTS.ARGTYPE),
GETFUN.OP = c(CONSTANTS.ARGTYPE),
GETGLOBFUN.OP = c(CONSTANTS.ARGTYPE),
GETSYMFUN.OP = c(CONSTANTS.ARGTYPE),
GETBUILTIN.OP = c(CONSTANTS.ARGTYPE),
GETINTLBUILTIN.OP = c(CONSTANTS.ARGTYPE),
CHECKFUN.OP = c(),
MAKEPROM.OP = c(CONSTANTS.ARGTYPE),
DOMISSING.OP = c(),
SETTAG.OP = c(CONSTANTS.ARGTYPE),
DODOTS.OP = c(),
PUSHARG.OP = c(),
PUSHCONSTARG.OP = c(CONSTANTS.ARGTYPE),
PUSHNULLARG.OP = c(),
PUSHTRUEARG.OP = c(),
PUSHFALSEARG.OP = c(),
CALL.OP = c(CONSTANTS.ARGTYPE),
CALLBUILTIN.OP = c(CONSTANTS.ARGTYPE),
CALLSPECIAL.OP = c(CONSTANTS.ARGTYPE),
MAKECLOSURE.OP = c(CONSTANTS.ARGTYPE),
UMINUS.OP = c(CONSTANTS_DBG.ARGTYPE),
UPLUS.OP = c(CONSTANTS_DBG.ARGTYPE),
ADD.OP = c(CONSTANTS_DBG.ARGTYPE),
SUB.OP = c(CONSTANTS_DBG.ARGTYPE),
MUL.OP = c(CONSTANTS_DBG.ARGTYPE),
DIV.OP = c(CONSTANTS_DBG.ARGTYPE),
EXPT.OP = c(CONSTANTS_DBG.ARGTYPE),
SQRT.OP = c(CONSTANTS_DBG.ARGTYPE),
EXP.OP = c(CONSTANTS_DBG.ARGTYPE),
EQ.OP = c(CONSTANTS_DBG.ARGTYPE),
NE.OP = c(CONSTANTS_DBG.ARGTYPE),
LT.OP = c(CONSTANTS_DBG.ARGTYPE),
LE.OP = c(CONSTANTS_DBG.ARGTYPE),
GE.OP = c(CONSTANTS_DBG.ARGTYPE),
GT.OP = c(CONSTANTS_DBG.ARGTYPE),
AND.OP = c(CONSTANTS_DBG.ARGTYPE),
OR.OP = c(CONSTANTS_DBG.ARGTYPE),
NOT.OP = c(CONSTANTS_DBG.ARGTYPE),
DOTSERR.OP = c(),
STARTASSIGN.OP = c(CONSTANTS.ARGTYPE),
ENDASSIGN.OP = c(CONSTANTS.ARGTYPE),
STARTSUBSET.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTSUBSET.OP = c(),
STARTSUBASSIGN.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTSUBASSIGN.OP = c(),
STARTC.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTC.OP = c(),
STARTSUBSET2.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTSUBSET2.OP = c(),
STARTSUBASSIGN2.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
DFLTSUBASSIGN2.OP = c(),
DOLLAR.OP = c(CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE),
DOLLARGETS.OP = c(CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE),
ISNULL.OP = c(),
ISLOGICAL.OP = c(),
ISINTEGER.OP = c(),
ISDOUBLE.OP = c(),
ISCOMPLEX.OP = c(),
ISCHARACTER.OP = c(),
ISSYMBOL.OP = c(),
ISOBJECT.OP = c(),
ISNUMERIC.OP = c(),
VECSUBSET.OP = c(CONSTANTS.ARGTYPE),
MATSUBSET.OP = c(CONSTANTS.ARGTYPE),
VECSUBASSIGN.OP = c(CONSTANTS.ARGTYPE),
MATSUBASSIGN.OP = c(CONSTANTS.ARGTYPE),
AND1ST.OP = c(CONSTANTS_DBG.ARGTYPE, LABEL.ARGTYPE),
AND2ND.OP = c(CONSTANTS_DBG.ARGTYPE),
OR1ST.OP = c(CONSTANTS_DBG.ARGTYPE, LABEL.ARGTYPE),
OR2ND.OP = c(CONSTANTS_DBG.ARGTYPE),
GETVAR_MISSOK.OP = c(CONSTANTS.ARGTYPE),
DDVAL_MISSOK.OP = c(CONSTANTS.ARGTYPE),
VISIBLE.OP = c(),
SETVAR2.OP = c(CONSTANTS.ARGTYPE),
STARTASSIGN2.OP = c(CONSTANTS.ARGTYPE),
ENDASSIGN2.OP = c(CONSTANTS.ARGTYPE),
SETTER_CALL.OP = c(CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE),
GETTER_CALL.OP = c(CONSTANTS.ARGTYPE),
SWAP.OP = c(),
DUP2ND.OP = c(),
SWITCH.OP = c(CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE, CONSTANTS.ARGTYPE),
RETURNJMP.OP = c(),
STARTSUBSET_N.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
STARTSUBASSIGN_N.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
VECSUBSET2.OP = c(CONSTANTS.ARGTYPE),
MATSUBSET2.OP = c(CONSTANTS.ARGTYPE),
VECSUBASSIGN2.OP = c(CONSTANTS.ARGTYPE),
MATSUBASSIGN2.OP = c(CONSTANTS.ARGTYPE),
STARTSUBSET2_N.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
STARTSUBASSIGN2_N.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE),
SUBSET_N.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
SUBSET2_N.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
SUBASSIGN_N.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
SUBASSIGN2_N.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
LOG.OP = c(CONSTANTS.ARGTYPE),
LOGBASE.OP = c(CONSTANTS.ARGTYPE),
MATH1.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE), #second argument is one of the math1funs
DOTCALL.OP = c(CONSTANTS.ARGTYPE, INT.ARGTYPE),
COLON.OP = c(SKIP.ARGTYPE),
SEQALONG.OP = c(SKIP.ARGTYPE),
SEQLEN.OP = c(SKIP.ARGTYPE),
BASEGUARD.OP = c(CONSTANTS.ARGTYPE, LABEL.ARGTYPE)
)
conf <- new.env(parent = emptyenv())
conf$verbosity <- 0
#' Print bytecode object to output and returns it \emph{invisibly} (via \code{\link{invisible}(x)})
#'
#' \code{print.disassembly} print bytecode object into output in human-friendly way.
#'
#' This is implementation of print method for bytecode object.
#' It works under internal R Bytecode structure.
#' You can manually create bytecode object through \emph{compiler} package ( via for example \code{\link{cmpfun}} function )
#'
#' @param x Bytecode object to be printed
#' @param select instruction position to be highlighted
#' @param prefix number of spaces to print before each line ( used for intendation )
#' @param verbose verbosity level ( 0 or 1 or 2)
#' 0 (default value) - display only source references ( if they are available, if they aren't print expression references instead )
#' 1 - the same as 0 + display bytecode version and display expression references ( if they are available )
#' 2 - the same as 1 + display every operand's argument ( including ones used just for debugging )
#' default value can be pre-set by \emph{bcverbose} function
#' @param maxdepth Maximum depth of nested functions which are printed
#' @param depth Current depth of nested functions which are being printed ( used for internal purposes in print recursion )
#' @param select Position of currently selected instruction ( used in debugger )
#' @param peephole Turn the peephole on - show just area surronding the selected instruction ( must have selected, used in debugger )
#' @param ... Numeric, complex, or logical vectors.
#'
#' @examples
#' library(compiler)
#' library(bctools)
#' a <- function(x){
#' r <- 1
#' while(x){
#' r = r + x*x
#' x = x-1
#' }
#' r
#' }
#' bc <- compiler::cmpfun(a)
#'
#' #these two does the same
#' disassemble(bc)
#' print(disassemble(bc))
#'
#' #manually set verbose level
#' print(disassemble(bc), verbose=1)
#' print(disassemble(bc), verbose=2)
#'
#' @export
printBC <- function(x, prefix="", verbose=NULL, constantpool=FALSE, maxdepth=2, depth=0, select=NULL, peephole=FALSE, ...){
if( is.null(verbose) ) verbose <- conf$verbosity
### if the function is passed, calls the internal disassembly of the code
if(typeof(x) == "closure")
tryCatch({
capture.output({ ### suppress output of the compiler::disassemble function
x = compiler::disassemble(x)
})
}, error = function(e) {
stop("An error occured trying to disassemble (compiler::disassemble) the passed object. Check whether is it BC compiled.")
})
#if you have the peephole turned on, you have to pass select flag for line
if( peephole && is.null(select) )
stop("if you have the peephole turned on ( peephole=TRUE ) , you have to pass select flag for line ( select=SOME_LINE )");
#can contain BREAKPOINT[0-9] instructions
code_breakpoint <- x[[2]]
#never contains BREAKPOINT[0-9] instruction
code <- x[[2]]
#constant buffer
constants <- x[[3]]
if(code[[1]] > Opcodes.bcversion){
warning(paste0("The bytecode version of your GNU-R is not supported by the disassembler. ",
"Please make update both your GNU-R and disasembler to the most recent versions"));
}
srcrefsIndex <- NULL
expressionsIndex <- NULL
srcref <- NULL
#get needed properties from constants object
for (cnst in rev(constants)){
if (class(cnst)=="srcrefsIndex") srcrefsIndex <- cnst
if (class(cnst)=="expressionsIndex") expressionsIndex <- cnst
if (class(cnst)=="srcref") srcref <- cnst
}
dumpExpressions <- ( verbose > 0 || is.null(srcrefsIndex) ) && !is.null(expressionsIndex);
dumpSrcrefs <- !is.null(srcrefsIndex);
#print leading source reference
if(!peephole && !is.null(srcref)){
environm <- attr(srcref, "srcfile")
filename <- getSrcFilename(environm)
if(!identical(filename, character(0))){
cat(paste0(prefix,"@ ",filename,"#",srcref[[1]],"\n"))
}
}
if(!peephole){
if(verbose > 0) {
cat(paste0(prefix,"Bytecode ver. ",code[[1]],"\n"))
}
cat("\n")
}
#first pass to mark instruction with labels
#labels is array that describes if each instruction has label
n <- length(code)
labels <- rep(-2, n) #labels now contains -2=not used, -1=used
i <- 2
instrCnt<-0 # count number of instructions
while( i <= n ) {
v <- code[[i]]
argdescr <- Opcodes.argdescr[[paste0(v)]]
j <- 1
while(j <= length(argdescr)){
i<-i+1
if(argdescr[[j]] == Opcodes.argtypes$LABEL){
labels[[ code[[i]] + 1 ]] <- -1
}else if(argdescr[[j]] == Opcodes.argtypes$CONSTANT_LABEL){
v <- constants[[ code[[i]] + 1 ]]
if(!is.null(v)){
for(k in 1:length(v)){
labels[[v[[k]] + 1]] <- -1
}
}
}
j<-j+1
}
instrCnt<-instrCnt+1
i<-i+1
}
#second pass to count labels
#loop through labels array and if that instruction has label marked on it
#labels array now contains values: -2=not used, -1=used, >0=index of label
i <- 2
lastlabelno <- 0;
while( i <= n ) {
if(labels[[i]] == -1){
lastlabelno <- lastlabelno+1
labels[[i]] <- lastlabelno
}
i<-i+1
}
#functions to print each type of information ( arguments / source and expression references )
#each functions return TRUE / FALSE that indicates if any output has been printed
dumpConstant<-function(v){
v <- constants[[v+1]]
if(typeof(v) == "list"){
#the max depth of recursion is definied via maxdepth parameter
if(depth < maxdepth){
if(typeof(v[[2]]) == "bytecode"){
v <- compiler::disassemble(v[[2]])
cat("<FUNCTION>")
print.disassembly(v, select=NULL, prefix=paste0(prefix," "),verbose=verbose, constantpool=constantpool, maxdepth=maxdepth, depth=depth+1)
cat("\n")
}else{
cat("<INTERNAL_FUNCTION>")
}
}else{
cat("<FUNCTION>")
}
}else{
#hack to print expression tree in infix notation instead of prefix
z <- capture.output(dput(v))
z <- sub("(\\s)\\s*", "\\1", z, perl=TRUE)
if(length(z) > 1){
# convert >>while (i) { print(i) i <- i - 1 }<< to >>while (i) { print(i); i <- i - 1 }<<
# see the semicolon after print(i)
#because the printed code does not contain ; after each instruction we have to add to it
#called with first argument of current row and second of following row
z <- mapply(function(cur, nex){
#current row does not end with { and following row does not start with } append
# semilon after this instruction
if( length(grep("\\{\\s*$", cur)) <= 0 && length(grep("^\\s*\\}", nex)) <= 0 ){
paste0(cur, ";")
}else{
cur
}
}, z, c(z[2:(length(z))], "}"))
}
cat(paste0(z))
}
TRUE
}
dumpDbgConstant <- function(v){
#there are 2 types of constants in bytecode
#this function corresponds to CONSTANT_DBG
# which means that this type of constant is used just for debugging inside bytecode
if(verbose > 1){
dumpConstant(v);
}else{
FALSE
}
}
dumpConstantReferenceToArr <- function(v){
cat(paste0( "#", v ))
TRUE
}
dumpLabel <- function(v){
cat(paste0( "$", labels[[ v+1 ]] ))
TRUE
}
dumpConstantLabels <- function(v){
if(is.null(v))
return(dumpConstant(v))
if(length(constants[[v+1]]) > 0){
v <- lapply(constants[[v+1]],
function(v){
paste0("$", labels[[ v+1 ]])
})
cat(paste(v,collapse=', '))
}else{
cat('-')
}
TRUE
}
dumpOp<-function(v){
v <- sub("\\.OP$", "", v, perl=TRUE) # example "GOTO.OP" >> "GOTO"
v <- sprintf("%-20s", v)
cat(paste(v))
TRUE
}
dumpValue<-function(v){
cat(v)
TRUE
}
dumpSrcRef<-function(cursrcref){
filename <- getSrcFilename(cursrcref)
lineno <- getSrcLocation(cursrcref)
o <- capture.output(print(cursrcref))
cat(paste0(prefix," - ",filename,"#",lineno,": ",o[[1]],"\n"))
TRUE
}
dumpExprRef<-function(exprIndex){
cat(paste0(prefix," @ "))
dumpConstant(exprIndex)
cat("\n")
TRUE
}
dumpUnknown<-function(v){
cat("???")
TRUE
}
printCodeArray <- function(){
#third pass to print result
selected <- FALSE
lastExprIndex <- -1
lastSrcrefsIndex <- -1
i <- 2
n <- length(code)
printedInstructions <- 0
while( i <= n ) {
#extract instruction from code array
instr <- code[[i]]
instrname <- instr
#instruction arguments description which is imported also from compiler package
#contains array in which each parameter describes type of argument
argdescr <- Opcodes.argdescr[[paste0(instr)]]
#if the peephole mode is turned on we skip every elements before select
# and all after 5 printed instructions after select
if( peephole
&& printedInstructions < instrCnt-5
&& (i < select || printedInstructions >= 5) ){
i <- i + 1 + length(argdescr)
next
}
#this instruction has label pointing to it
if(labels[[i]] > 0){
cat(paste0(prefix,labels[[i]],":\n"))
}
if(dumpSrcrefs){
curSrcrefsIndex <- srcrefsIndex[[i]]
if(curSrcrefsIndex != lastSrcrefsIndex){
dumpSrcRef(constants[[curSrcrefsIndex + 1 ]])
lastSrcrefsIndex <- curSrcrefsIndex
}
}
if(dumpExpressions){
curExprIndex <- expressionsIndex[[i]]
if(curExprIndex != lastExprIndex){
dumpExprRef(curExprIndex)
lastExprIndex <- curExprIndex
}
}
#print prefix ( one of argument ) before each instruction
pr <- paste0(prefix," ");
if(!selected && !is.null(select) && (i - 1) >= select ) {
#replace beginning of pr ( prefix ) with >>> ( current instruction )
pr <- paste0(substr(pr, 1 ,nchar(pr)-3), ">>>")
selected = TRUE
}
cat(pr)
if(verbose > 0 || constantpool){
cat(sprintf("%2d: ", i-1))
}
if(grepl("^BREAKPOINT[0-9]+\\.OP$", code_breakpoint[[i]])){
instrname <- paste0("(BR) ", instrname)
}
#print instruction ( eg. ADD / SUB ... )
dumpOp(instrname)
#iterate over each argument of instruction and print them
#the arguments are stored inside bytecode just after the instruction
# so as we loop through instructions we increments the index into
# code array ( i<-i+1 )
j <- 1
printed <- 0
while(j <= length(argdescr)){
if(printed >= 1){
cat("\t | ")
}
i<-i+1
#extract instruction argument from code array
v <- code[[i]]
t = paste0(argdescr[[j]])
#lookup table for argument types
argTypesDump <- c(dumpLabel,
ifelse(constantpool,dumpConstantReferenceToArr,dumpConstant),
ifelse(constantpool,dumpConstantReferenceToArr,dumpDbgConstant),
ifelse(constantpool,dumpConstantReferenceToArr,dumpConstantLabels),
dumpValue,
dumpValue)
names(argTypesDump) <- c(
Opcodes.argtypes$LABEL,
Opcodes.argtypes$CONSTANT,
Opcodes.argtypes$CONSTANT_DBG,
Opcodes.argtypes$CONSTANT_LABEL,
Opcodes.argtypes$BOOL,
Opcodes.argtypes$INT)
dumpFun <- argTypesDump[[t]]
if(is.null(dumpFun))
dumpFun <- dumpUnknown
#printting the argument
if(dumpFun(v))
printed <- printed + 1
j<-j+1
}
printedInstructions<-printedInstructions+1
i<-i+1
cat("\n")
}
cat("\n")
}
printConstantArray <- function(){
i <- 0
n <- length(constants)
printedInstructions <- 0
while( i < n ) {
cat(sprintf(" %2d: ", i))
dumpConstant(i)
cat("\n")
i<-i+1
}
}
if(constantpool){
cat("-------- code array --------\n")
printCodeArray()
cat("------ constant array ------\n")
printConstantArray()
}else{
printCodeArray()
}
#returns invisible(x) as the default behavior of print method
invisible(x)
}
#' Try-catch wrapper over the print.disassembly function
#'
#' for additional instruction see the print.disassembly function definition
#'
#' @param x Bytecode object to be printed
#' @param ... additional parameters passed to print.disassembly code
#'
#' @export
tryPrint.disassembly <- function(x, ...)
tryCatch(print.disassembly(x, ...), error = function(err) {
cat(paste(gettext("Error: bytecode dump failed - "), err$message, "at", deparse(err$call), "\n"))
x
})
#' Set default verbosity level for bytecode \emph{print} method
#'
#' \code{bcverbose} Set and/or get default verbosity level for bytecode \emph{print} method
#'
#'
#' @param lvl verbosity level ( 0 or 1 or 2) - optional
#' if setted - set default bytecode verbosity level
#' 0 - display only source references ( if they are available, if they aren't print expression references instead )
#' 1 - the same as 0 + display bytecode version and display expression references ( if they are available )
#' 2 - the same as 1 + display every operand's argument ( including ones used just for debugging )
#'
#'
#' @return current verbosity level
#'
#' @examples
#'
#' library(compiler)
#' library(bctools)
#' a <- function(x){
#' r <- 1
#' while(x){
#' r = r + x*x
#' x = x-1
#' }
#' r
#' }
#' bc <- compiler::cmpfun(a)
#'
#' #set default verbosity level
#' bcverbose(2)
#' disassemble(bc)
#'
#' @export
bcverbose <- function( lvl=NULL ){
if( !is.null(lvl) ) { conf$verbosity <- lvl }
conf$verbosity
}
|
##Get solar still water daily production data
setwd("D:/R/Solar Still")
library(lubridate)
data_record <- as.Date("2020-06-05")
SolarWater <- read.csv(file = "Sidewall Interfacial Solar Still Water Production.csv", header = FALSE, stringsAsFactors = FALSE)
SolarWater_Day <- c(SolarWater[1, 1], SolarWater[1, 2])
for(i in 2:(length(SolarWater)/2)){
daydata <- c(SolarWater[1, 2*i-1], SolarWater[1, 2*i])
SolarWater_Day <- rbind(SolarWater_Day, daydata)
}
SolarWater_Day <- as.data.frame(SolarWater_Day, stringsAsFactors = FALSE)
names(SolarWater_Day) <- c("Date", "Water_Production")
row.names(SolarWater_Day) <- SolarWater_Day[, 1]
SolarWater_Day$Date <- ymd(SolarWater_Day$Date)
SolarWater_Day <- SolarWater_Day[SolarWater_Day$Date >= data_record, ]
SolarWater_Day$Water_Production <- as.numeric(SolarWater_Day$Water_Production)*4/1000
SolarWater_Day$Water_Energy <- SolarWater_Day$Water_Production/1.5
##Get solar environment daily data
setwd("D:/R/Solar Still")
library(lubridate)
SolarEnv_Day <- read.csv(file = "CR1000_BSRN1000_Day201116.csv", skip = 1, stringsAsFactors = FALSE)
SolarEnvUnit_Day <- SolarEnv_Day[1, ]
SolarEnv_Day <- SolarEnv_Day[c(-1, -2), ] ##Delete two rows of unit
SolarEnv_Day$TIMESTAMP <- as.Date(ymd_hms(SolarEnv_Day$TIMESTAMP))
SolarEnv_Day$TIMESTAMP <- SolarEnv_Day$TIMESTAMP - ddays(1)
SolarEnv_Day[, 2:39] <- lapply(SolarEnv_Day[, 2:39], as.numeric)
##Select data column and analysis
datacol <- c("TIMESTAMP", "Global_Energy_Tot", "Direct_Energy_Tot", "Diffuse_Energy_Tot")
SolarData_Day <- SolarEnv_Day[c(SolarEnv_Day$TIMESTAMP >= data_record), datacol]
row.names(SolarData_Day) <- SolarData_Day[, 1]
SolarDataUnit_Day <- SolarEnvUnit_Day[datacol]
SolarData_Day <- merge(SolarData_Day, SolarWater_Day[, c("Date", "Water_Energy")], by.x = "TIMESTAMP", by.y = "Date", all = TRUE)
library(reshape2)
SolarEnergy_Day <- melt(SolarData_Day, id = "TIMESTAMP")
SolarData_Day$Global_Efficiency <- SolarData_Day$Water_Energy/SolarData_Day$Global_Energy_Tot
##SolarData_Day$Direct_Efficiency <- SolarData_Day$Water_Energy/SolarData_Day$Direct_Energy_Tot
SolarData_Day$DirDiffRatio <- SolarData_Day$Direct_Energy_Tot/SolarData_Day$Diffuse_Energy_Tot
SolarData_Day$DirDiff <- cut(SolarData_Day$DirDiffRatio, breaks = c(min(SolarData_Day$DirDiffRatio), 0.1, 1, max(SolarData_Day$DirDiffRatio)), labels = c("Overcast (Dir./Diff. < 0.1)", "Cloudy (0.1 < Dir./Diff. < 1)", "Sunny (Dir./Diff. > 1)"))
library(ggplot2)
##fit1 <- lm(Water_Energy ~ Global_Energy_Tot + Direct_Energy_Tot, data = na.omit(SolarData_Day))
##fit2 <- lm(Water_Energy ~ Global_Energy_Tot, data = na.omit(SolarData_Day))
##fit3 <- lm(Water_Energy ~ poly(Global_Energy_Tot, 2), data = na.omit(SolarData_Day))
##fit4 <- lm(Water_Energy ~ Global_Energy_Tot + I(Global_Energy_Tot^2), data = na.omit(SolarData_Day))
fit1 <- lm(Global_Efficiency ~ Direct_Energy_Tot + Diffuse_Energy_Tot, data = na.omit(SolarData_Day))
fit2 <- lm(Global_Efficiency ~ Direct_Energy_Tot + Diffuse_Energy_Tot, data = na.omit(SolarData_Day[-c(30, 33, 139), ]))
SolarData_Day$fit_Efficiency <- coefficients(fit2)[1] + coefficients(fit2)[2]*SolarData_Day$Direct_Energy_Tot + coefficients(fit2)[3]*SolarData_Day$Diffuse_Energy_Tot
lmfit2 <- ggplot(na.omit(melt(SolarData_Day[, c("Global_Energy_Tot", "Global_Efficiency", "fit_Efficiency")], id = "Global_Energy_Tot")),
aes(Global_Energy_Tot, value*100, color = variable))
lmfit2 + geom_point()
write.csv(SolarData_Day, file = "Sidewall Interfacial Solar Still Daily Water Production.csv")
g <- ggplot(na.omit(SolarData_Day), aes(x = Global_Energy_Tot, y = Water_Energy))
g + geom_point() + geom_smooth(method = "lm") + geom_text(data = na.omit(SolarData_Day), aes(label = TIMESTAMP, size = 1), check_overlap = TRUE)
q <- ggplot(data = SolarData_Day, aes(TIMESTAMP, Global_Efficiency))
q + geom_point()
o <- ggplot(data = na.omit(SolarData_Day), aes(Global_Energy_Tot, Global_Efficiency, color = DirDiff))
o + geom_point() + geom_smooth(method = "lm")
##r <- ggplot(data = SolarData_Day, aes(Direct_Energy_Tot, Global_Efficiency))
##r + geom_point()
p <- ggplot(SolarEnergy_Day, aes(TIMESTAMP, value, fill = variable))
p + geom_bar(stat = 'identity', position='dodge') + labs(x = "Date", y = "Energy/kWh")
##geom_text(data = SolarData_Day, aes(label = Global_Efficiency, position = position_dodge(width = 1), size = 3)) | /Sidewall Interfacial Solar Still Water Production.R | no_license | lzyempire/Solar-Still | R | false | false | 4,439 | r | ##Get solar still water daily production data
setwd("D:/R/Solar Still")
library(lubridate)
data_record <- as.Date("2020-06-05")
SolarWater <- read.csv(file = "Sidewall Interfacial Solar Still Water Production.csv", header = FALSE, stringsAsFactors = FALSE)
SolarWater_Day <- c(SolarWater[1, 1], SolarWater[1, 2])
for(i in 2:(length(SolarWater)/2)){
daydata <- c(SolarWater[1, 2*i-1], SolarWater[1, 2*i])
SolarWater_Day <- rbind(SolarWater_Day, daydata)
}
SolarWater_Day <- as.data.frame(SolarWater_Day, stringsAsFactors = FALSE)
names(SolarWater_Day) <- c("Date", "Water_Production")
row.names(SolarWater_Day) <- SolarWater_Day[, 1]
SolarWater_Day$Date <- ymd(SolarWater_Day$Date)
SolarWater_Day <- SolarWater_Day[SolarWater_Day$Date >= data_record, ]
SolarWater_Day$Water_Production <- as.numeric(SolarWater_Day$Water_Production)*4/1000
SolarWater_Day$Water_Energy <- SolarWater_Day$Water_Production/1.5
##Get solar environment daily data
setwd("D:/R/Solar Still")
library(lubridate)
SolarEnv_Day <- read.csv(file = "CR1000_BSRN1000_Day201116.csv", skip = 1, stringsAsFactors = FALSE)
SolarEnvUnit_Day <- SolarEnv_Day[1, ]
SolarEnv_Day <- SolarEnv_Day[c(-1, -2), ] ##Delete two rows of unit
SolarEnv_Day$TIMESTAMP <- as.Date(ymd_hms(SolarEnv_Day$TIMESTAMP))
SolarEnv_Day$TIMESTAMP <- SolarEnv_Day$TIMESTAMP - ddays(1)
SolarEnv_Day[, 2:39] <- lapply(SolarEnv_Day[, 2:39], as.numeric)
##Select data column and analysis
datacol <- c("TIMESTAMP", "Global_Energy_Tot", "Direct_Energy_Tot", "Diffuse_Energy_Tot")
SolarData_Day <- SolarEnv_Day[c(SolarEnv_Day$TIMESTAMP >= data_record), datacol]
row.names(SolarData_Day) <- SolarData_Day[, 1]
SolarDataUnit_Day <- SolarEnvUnit_Day[datacol]
SolarData_Day <- merge(SolarData_Day, SolarWater_Day[, c("Date", "Water_Energy")], by.x = "TIMESTAMP", by.y = "Date", all = TRUE)
library(reshape2)
SolarEnergy_Day <- melt(SolarData_Day, id = "TIMESTAMP")
SolarData_Day$Global_Efficiency <- SolarData_Day$Water_Energy/SolarData_Day$Global_Energy_Tot
##SolarData_Day$Direct_Efficiency <- SolarData_Day$Water_Energy/SolarData_Day$Direct_Energy_Tot
SolarData_Day$DirDiffRatio <- SolarData_Day$Direct_Energy_Tot/SolarData_Day$Diffuse_Energy_Tot
SolarData_Day$DirDiff <- cut(SolarData_Day$DirDiffRatio, breaks = c(min(SolarData_Day$DirDiffRatio), 0.1, 1, max(SolarData_Day$DirDiffRatio)), labels = c("Overcast (Dir./Diff. < 0.1)", "Cloudy (0.1 < Dir./Diff. < 1)", "Sunny (Dir./Diff. > 1)"))
library(ggplot2)
##fit1 <- lm(Water_Energy ~ Global_Energy_Tot + Direct_Energy_Tot, data = na.omit(SolarData_Day))
##fit2 <- lm(Water_Energy ~ Global_Energy_Tot, data = na.omit(SolarData_Day))
##fit3 <- lm(Water_Energy ~ poly(Global_Energy_Tot, 2), data = na.omit(SolarData_Day))
##fit4 <- lm(Water_Energy ~ Global_Energy_Tot + I(Global_Energy_Tot^2), data = na.omit(SolarData_Day))
fit1 <- lm(Global_Efficiency ~ Direct_Energy_Tot + Diffuse_Energy_Tot, data = na.omit(SolarData_Day))
fit2 <- lm(Global_Efficiency ~ Direct_Energy_Tot + Diffuse_Energy_Tot, data = na.omit(SolarData_Day[-c(30, 33, 139), ]))
SolarData_Day$fit_Efficiency <- coefficients(fit2)[1] + coefficients(fit2)[2]*SolarData_Day$Direct_Energy_Tot + coefficients(fit2)[3]*SolarData_Day$Diffuse_Energy_Tot
lmfit2 <- ggplot(na.omit(melt(SolarData_Day[, c("Global_Energy_Tot", "Global_Efficiency", "fit_Efficiency")], id = "Global_Energy_Tot")),
aes(Global_Energy_Tot, value*100, color = variable))
lmfit2 + geom_point()
write.csv(SolarData_Day, file = "Sidewall Interfacial Solar Still Daily Water Production.csv")
g <- ggplot(na.omit(SolarData_Day), aes(x = Global_Energy_Tot, y = Water_Energy))
g + geom_point() + geom_smooth(method = "lm") + geom_text(data = na.omit(SolarData_Day), aes(label = TIMESTAMP, size = 1), check_overlap = TRUE)
q <- ggplot(data = SolarData_Day, aes(TIMESTAMP, Global_Efficiency))
q + geom_point()
o <- ggplot(data = na.omit(SolarData_Day), aes(Global_Energy_Tot, Global_Efficiency, color = DirDiff))
o + geom_point() + geom_smooth(method = "lm")
##r <- ggplot(data = SolarData_Day, aes(Direct_Energy_Tot, Global_Efficiency))
##r + geom_point()
p <- ggplot(SolarEnergy_Day, aes(TIMESTAMP, value, fill = variable))
p + geom_bar(stat = 'identity', position='dodge') + labs(x = "Date", y = "Energy/kWh")
##geom_text(data = SolarData_Day, aes(label = Global_Efficiency, position = position_dodge(width = 1), size = 3)) |
library(chron)
library(grid)
ts<-read.table('pages_per_day.txt')
ts$V1<-chron(dates=as.character(ts$V1),
times="12:00:00",
format=c(dates = "y-m-d", times = "h:m:s"))
pdf(file="pages_per_day.pdf",
width=8,height=6,pointsize=12)
pushViewport(viewport(width=1,height=1,x=0,y=0,
just=c("left","bottom"),name="vp_main"))
pushViewport(plotViewport(margins=c(5,5,1,1)))
pushViewport(dataViewport(ts$V1,c(0,max(ts$V2))))
tics<-pretty(ts$V1,n=7)
grid.xaxis(at=tics,label=attr(tics,'label'),main=T)
grid.text('Date',y=unit(-3,"lines"))
grid.yaxis(main=T)
grid.text('No. of pages',x=unit(-3.5,"lines"), rot=90)
gp_blue = gpar(col=rgb(0,0,1,1),fill=rgb(0,0,1,1))
grid.lines(x=unit(ts$V1,"native"),y=unit(ts$V2,'native'),
gp=gp_blue)
grid.points(x=unit(ts$V1,"native"),y=unit(ts$V2,'native'),pch=20,
size=unit(3,"native"),gp=gp_blue)
popViewport()
popViewport()
upViewport()
| /monitoring/pages_per_day/plot_pages_per_day.R | no_license | oldweather/oldWeather5 | R | false | false | 950 | r | library(chron)
library(grid)
ts<-read.table('pages_per_day.txt')
ts$V1<-chron(dates=as.character(ts$V1),
times="12:00:00",
format=c(dates = "y-m-d", times = "h:m:s"))
pdf(file="pages_per_day.pdf",
width=8,height=6,pointsize=12)
pushViewport(viewport(width=1,height=1,x=0,y=0,
just=c("left","bottom"),name="vp_main"))
pushViewport(plotViewport(margins=c(5,5,1,1)))
pushViewport(dataViewport(ts$V1,c(0,max(ts$V2))))
tics<-pretty(ts$V1,n=7)
grid.xaxis(at=tics,label=attr(tics,'label'),main=T)
grid.text('Date',y=unit(-3,"lines"))
grid.yaxis(main=T)
grid.text('No. of pages',x=unit(-3.5,"lines"), rot=90)
gp_blue = gpar(col=rgb(0,0,1,1),fill=rgb(0,0,1,1))
grid.lines(x=unit(ts$V1,"native"),y=unit(ts$V2,'native'),
gp=gp_blue)
grid.points(x=unit(ts$V1,"native"),y=unit(ts$V2,'native'),pch=20,
size=unit(3,"native"),gp=gp_blue)
popViewport()
popViewport()
upViewport()
|
#Hyper DMRs in G45-TKO genes associated with 2C like genes
totalpop <- 24096 #Background
sample1 <- 6817 #(Genes with HyperMe using GREAT standard paramenter)
sample2 <- 525 #(Genes 2C)
fisher.test(matrix(c(6817-178,24096-6817-525,178,525-178), 2, 2), alternative='l')
#Down in 2c G45 DKO and Up at 2C state
totalpop <- 25256 #Total genes background
sample1 <- 50 #(Genes Diff Expressed Down in 2C Gadd DKO)
sample2 <- 5265 #(Genes with 2C up)
fisher.test(matrix(c(5262-22,25256-5265-50,22,50-22), 2, 2), alternative='l')
#Up in 2c G45 DKO and Up at 2C state
totalpop <- 25256 #Total genes background
sample1 <- 54 #(Genes Diff Expressed Up in 2C Gadd DKO)
sample2 <- 5265 #(Genes with 2C up)
fisher.test(matrix(c(5262-3,25256-5265-54,3,54-3), 2, 2), alternative='l')
#Down in 2c G45 DKO and Down at 2C state
totalpop <- 25256 #Total genes background
sample1 <- 50 #(Genes Diff Expressed Down in 2C Gadd DKO)
sample2 <- 2325 #(Genes with 2C down)
fisher.test(matrix(c(2325-7,25256-2325-50,7,50-7), 2, 2), alternative='l')
#Up in 2c G45 DKO and Down at 2C state
totalpop <- 25256 #Total genes background
sample1 <- 54 #(Genes Diff Expressed Up in 2C Gadd DKO)
sample2 <- 2325 #(Genes with 2C down)
fisher.test(matrix(c(2325-17,25256-2325-54,17,54-17), 2, 2), alternative='l')
#Down in 2c G45 DKO and 2C-like genes (Fig. 6D)
totalpop <- 25256 #Total genes background
sample1 <- 50 #(Genes Diff Expressed Down in 2C Gadd DKO)
sample2 <- 525 #(2C like genes G45-DKO)
fisher.test(matrix(c(525-7,25256-525-50,7,50-7), 2, 2), alternative='l')
| /R/HyperGeometricTest.r | no_license | MDebasish/DNA-Demethylation-Gadd45 | R | false | false | 1,584 | r | #Hyper DMRs in G45-TKO genes associated with 2C like genes
totalpop <- 24096 #Background
sample1 <- 6817 #(Genes with HyperMe using GREAT standard paramenter)
sample2 <- 525 #(Genes 2C)
fisher.test(matrix(c(6817-178,24096-6817-525,178,525-178), 2, 2), alternative='l')
#Down in 2c G45 DKO and Up at 2C state
totalpop <- 25256 #Total genes background
sample1 <- 50 #(Genes Diff Expressed Down in 2C Gadd DKO)
sample2 <- 5265 #(Genes with 2C up)
fisher.test(matrix(c(5262-22,25256-5265-50,22,50-22), 2, 2), alternative='l')
#Up in 2c G45 DKO and Up at 2C state
totalpop <- 25256 #Total genes background
sample1 <- 54 #(Genes Diff Expressed Up in 2C Gadd DKO)
sample2 <- 5265 #(Genes with 2C up)
fisher.test(matrix(c(5262-3,25256-5265-54,3,54-3), 2, 2), alternative='l')
#Down in 2c G45 DKO and Down at 2C state
totalpop <- 25256 #Total genes background
sample1 <- 50 #(Genes Diff Expressed Down in 2C Gadd DKO)
sample2 <- 2325 #(Genes with 2C down)
fisher.test(matrix(c(2325-7,25256-2325-50,7,50-7), 2, 2), alternative='l')
#Up in 2c G45 DKO and Down at 2C state
totalpop <- 25256 #Total genes background
sample1 <- 54 #(Genes Diff Expressed Up in 2C Gadd DKO)
sample2 <- 2325 #(Genes with 2C down)
fisher.test(matrix(c(2325-17,25256-2325-54,17,54-17), 2, 2), alternative='l')
#Down in 2c G45 DKO and 2C-like genes (Fig. 6D)
totalpop <- 25256 #Total genes background
sample1 <- 50 #(Genes Diff Expressed Down in 2C Gadd DKO)
sample2 <- 525 #(2C like genes G45-DKO)
fisher.test(matrix(c(525-7,25256-525-50,7,50-7), 2, 2), alternative='l')
|
## Package installing
install.packages(c("tidyr", "devtools"))
install.packages("stringr")
library(tidyr)
library(devtools)
library(stringr)
## Import the data & Check the varible names and types.
sample <- read.csv("sample100000.csv")
str(sample) ## class is factor for each datetime data column.
## Daytime data
## - Create new variable: Daypart (factor)
## - Create new variable: Date
## Start with changing datetime columns' classes to time
sample$pickup_datetime <- strptime(sample$pickup_datetime,"%Y-%m-%d %H:%M:%S")
sample$dropoff_datetime <- strptime(sample$dropoff_datetime,"%Y-%m-%d %H:%M:%S")
## Add the trip duration as a new variable
sample$duration <- as.numeric(difftime(sample$dropoff_datetime,sample$pickup_datetime, units = "mins"))
sample$duration <- round(sample$duration, digits = 2)
## I will use the pickup hour for dayparting based on tv & radio broadcast dayparts.
sample$pickup_hours = as.numeric(format(sample$pickup_datetime, "%H"))
sample$pickup_daypart[sample$pickup_hours >= 6 & sample$pickup_hours < 10 ] <- "morning drive time"
sample$pickup_daypart[sample$pickup_hours >= 10 & sample$pickup_hours < 15 ] <- "midday"
sample$pickup_daypart[sample$pickup_hours >= 15 & sample$pickup_hours < 19 ] <- "afternoon drive time"
sample$pickup_daypart[sample$pickup_hours >= 19 ] <- "evening"
sample$pickup_daypart[sample$pickup_hours >= 0 & sample$pickup_hours < 6 ] <- "overnight"
sample$pickup_hours <- NULL
## I want to keep date as a seperate column. This will be used to pull each date's climate info from climate data.
sample$date = format(sample$pickup_datetime, "%Y-%m-%d")
# Read the climate data. This data includes average temperature measured in Fahrenheit for each day in 2015. Also There are 7 dummies for categorical weather type variable.
climate_data <- read.csv("nyc-daily-weather.csv")
climate_data[is.na(climate_data)] <- 0 # Fix the NA's in climate data
summary(climate_data)
# W02, W04, W06 will not be useful.
# W01 and W08 are very similar. W01 = Fog, ice fog, or freezing fog (may include heavy fog) W08 = Smoke or haze. So we can start with keeping one of them.
# Average temperature column is empty for some reason so did not include that column. We can create a new average column based on average of max-min temperatures.
# First column is location identifier so we can remove that as well.
climate_data <- subset(climate_data, , -c(1,8,9,10,11)) #Drop the columns that we are not going to use.
climate_data$temperature <- (climate_data$TMAX + climate_data$TMIN) / 2 #Add the average temp. column.
climate_data <- subset(climate_data, , -c(4,5))
colnames(climate_data)[1] <- "date"
colnames(climate_data)[2] <- "precipitation"
colnames(climate_data)[3] <- "snowfall"
colnames(climate_data)[4] <- "fog"
str(climate_data)
climate_data$date <- strptime(climate_data$date,"%Y-%m-%d")
climate_data$fog <- factor
str(climate_data) # Now the dataset is ready to be merged.
# Join the climate data (GHCN (Global Historical Climatology Network))
sample_merged <- merge(x = sample, y = climate_data, by = "date", all.x = TRUE)
# I want to add household income data in neighborhood level. I will write the current merged table and will do some manual cleaning stuff for neigborhood names.
write.csv(sample_merged, file = "sample_merged.csv")
| /data-understanding/Data preparation with r/Data understanding & preparation.R | no_license | Loncar5/taxi-nyc-tips-amount | R | false | false | 3,318 | r | ## Package installing
install.packages(c("tidyr", "devtools"))
install.packages("stringr")
library(tidyr)
library(devtools)
library(stringr)
## Import the data & Check the varible names and types.
sample <- read.csv("sample100000.csv")
str(sample) ## class is factor for each datetime data column.
## Daytime data
## - Create new variable: Daypart (factor)
## - Create new variable: Date
## Start with changing datetime columns' classes to time
sample$pickup_datetime <- strptime(sample$pickup_datetime,"%Y-%m-%d %H:%M:%S")
sample$dropoff_datetime <- strptime(sample$dropoff_datetime,"%Y-%m-%d %H:%M:%S")
## Add the trip duration as a new variable
sample$duration <- as.numeric(difftime(sample$dropoff_datetime,sample$pickup_datetime, units = "mins"))
sample$duration <- round(sample$duration, digits = 2)
## I will use the pickup hour for dayparting based on tv & radio broadcast dayparts.
sample$pickup_hours = as.numeric(format(sample$pickup_datetime, "%H"))
sample$pickup_daypart[sample$pickup_hours >= 6 & sample$pickup_hours < 10 ] <- "morning drive time"
sample$pickup_daypart[sample$pickup_hours >= 10 & sample$pickup_hours < 15 ] <- "midday"
sample$pickup_daypart[sample$pickup_hours >= 15 & sample$pickup_hours < 19 ] <- "afternoon drive time"
sample$pickup_daypart[sample$pickup_hours >= 19 ] <- "evening"
sample$pickup_daypart[sample$pickup_hours >= 0 & sample$pickup_hours < 6 ] <- "overnight"
sample$pickup_hours <- NULL
## I want to keep date as a seperate column. This will be used to pull each date's climate info from climate data.
sample$date = format(sample$pickup_datetime, "%Y-%m-%d")
# Read the climate data. This data includes average temperature measured in Fahrenheit for each day in 2015. Also There are 7 dummies for categorical weather type variable.
climate_data <- read.csv("nyc-daily-weather.csv")
climate_data[is.na(climate_data)] <- 0 # Fix the NA's in climate data
summary(climate_data)
# W02, W04, W06 will not be useful.
# W01 and W08 are very similar. W01 = Fog, ice fog, or freezing fog (may include heavy fog) W08 = Smoke or haze. So we can start with keeping one of them.
# Average temperature column is empty for some reason so did not include that column. We can create a new average column based on average of max-min temperatures.
# First column is location identifier so we can remove that as well.
climate_data <- subset(climate_data, , -c(1,8,9,10,11)) #Drop the columns that we are not going to use.
climate_data$temperature <- (climate_data$TMAX + climate_data$TMIN) / 2 #Add the average temp. column.
climate_data <- subset(climate_data, , -c(4,5))
colnames(climate_data)[1] <- "date"
colnames(climate_data)[2] <- "precipitation"
colnames(climate_data)[3] <- "snowfall"
colnames(climate_data)[4] <- "fog"
str(climate_data)
climate_data$date <- strptime(climate_data$date,"%Y-%m-%d")
climate_data$fog <- factor
str(climate_data) # Now the dataset is ready to be merged.
# Join the climate data (GHCN (Global Historical Climatology Network))
sample_merged <- merge(x = sample, y = climate_data, by = "date", all.x = TRUE)
# I want to add household income data in neighborhood level. I will write the current merged table and will do some manual cleaning stuff for neigborhood names.
write.csv(sample_merged, file = "sample_merged.csv")
|
# Calculate the Synonymous Codon Usage Order (SCUO) index for each gene.
# Used as a substitute for Phi in the without phi case
# See: http://www.tandfonline.com/doi/abs/10.1080/03081070500502967
calc_scuo_values <- function(codon.counts, ncores)
{
# Get the gene ids
gene.names <- unique(codon.counts$ORF)
codons <- codon.counts[, 3:8]
# Compute SCUO
scuo <- unlist(
mclapply(gene.names,
function(i) # i'th gene
{
gene <- which(codon.counts$ORF == i)
codons <- codons[gene, ]
# Sum and Prob of codons within an AA within a gene
sums <- rowSums(codons, na.rm=T)
p_ij <- sweep(codons, 1, sums, "/")
# Shannon's Entropy for each AA within each gene H_i
H_i <- -rowSums(p_ij * log(p_ij), na.rm=T)
Hmax_i <- -log(1/rowSums(!is.na(codons)))
# SCUO for each amino acid
O_i <- (Hmax_i - H_i)/Hmax_i
# Composition ratio of i'th amino acid
denom <- sum(sums)
F_i <- rowSums(codons/denom, na.rm=T)
# Average SCUO for each gene
O <- sum(F_i*O_i)
return( O )
},
mc.cores = ncores, mc.preschedule = TRUE)
)
# Return
output.df <- as.data.frame(gene.names)
output.df$scuo <- scuo
names(output.df) <- c("ID", "SCUO")
return( output.df )
}
| /R/Wei-Chen/scuo.R | no_license | jeremyrogers/CES | R | false | false | 1,383 | r | # Calculate the Synonymous Codon Usage Order (SCUO) index for each gene.
# Used as a substitute for Phi in the without phi case
# See: http://www.tandfonline.com/doi/abs/10.1080/03081070500502967
calc_scuo_values <- function(codon.counts, ncores)
{
# Get the gene ids
gene.names <- unique(codon.counts$ORF)
codons <- codon.counts[, 3:8]
# Compute SCUO
scuo <- unlist(
mclapply(gene.names,
function(i) # i'th gene
{
gene <- which(codon.counts$ORF == i)
codons <- codons[gene, ]
# Sum and Prob of codons within an AA within a gene
sums <- rowSums(codons, na.rm=T)
p_ij <- sweep(codons, 1, sums, "/")
# Shannon's Entropy for each AA within each gene H_i
H_i <- -rowSums(p_ij * log(p_ij), na.rm=T)
Hmax_i <- -log(1/rowSums(!is.na(codons)))
# SCUO for each amino acid
O_i <- (Hmax_i - H_i)/Hmax_i
# Composition ratio of i'th amino acid
denom <- sum(sums)
F_i <- rowSums(codons/denom, na.rm=T)
# Average SCUO for each gene
O <- sum(F_i*O_i)
return( O )
},
mc.cores = ncores, mc.preschedule = TRUE)
)
# Return
output.df <- as.data.frame(gene.names)
output.df$scuo <- scuo
names(output.df) <- c("ID", "SCUO")
return( output.df )
}
|
calculate_intermesh_distances <- function( df_mesh_centroids, df_mesh_lockdown, df_mesh_detail, map_mesh, max_dist=5) {
df_mesh_centroids %>%
inner_join(df_mesh_detail %>% select(MB_CODE16,
MB_CATEGORY_NAME_2016), by='MB_CODE16') %>%
mutate( gh = gh_encode(mc_lat, mc_lon, ifelse( max_dist < 5, 4, 5) )) %>%
mutate(ghn = gh_neighbours(gh))%>%
do.call(data.frame, .) %>%
as_tibble() %>%
rename(lon=mc_lon, lat=mc_lat) %>%
{ . } -> df_gh
# we only want the park neighbours that are within melbourne
df_gh %>%
inner_join( df_mesh_lockdown, by='MB_CODE16') %>%
select( starts_with('gh') ) %>%
pivot_longer(starts_with('gh'), names_to=NULL, values_to ='gh_group') %>%
distinct() %>%
{ . } -> df_from_mc_neighbours
map_mesh %>%
select( MB_CODE16, geometry) %>%
inner_join( df_gh %>%
filter( MB_CATEGORY_NAME_2016=='Parkland') , by='MB_CODE16' ) %>%
pivot_longer(starts_with('gh'), names_to=NULL, values_to ='gh_group') %>%
select( gh_group, geometry ) %>%
inner_join( df_from_mc_neighbours, by='gh_group') %>%
st_as_sf() %>%
group_by(gh_group) %>%
summarise(geometry = st_combine(geometry), .groups='drop') %>%
as_Spatial() %>%
clgeo_Clean() %>%
st_as_sf() %>%
st_transform( 3577) %>%
# data.frame() %>%
# as_tibble() %>%
{ . } -> df_to_parks
df_gh %>%
inner_join( df_mesh_lockdown, by='MB_CODE16') %>%
select( -starts_with('mc'), -starts_with('ghn') ) %>%
rename(gh_group = gh ) %>%
rename( from_lat = lat, from_lon=lon) %>%
st_as_sf( coords = c("from_lon", "from_lat"), crs = 4283) %>%
st_transform(3577) %>%
mutate( circle = st_buffer( geometry, dist = max_dist * 1000) ) %>%
{ . } -> df_from_mc
df_from_mc %>%
st_drop_geometry() %>%
select(-lga_name, -MB_CATEGORY_NAME_2016) %>%
# head(100) %>%
group_by( gh_group ) %>%
nest( data=c(MB_CODE16, circle )) %>%
ungroup() %>%
{ . } -> df_pre_intersect
df_pre_intersect %>%
inner_join( df_to_parks, by='gh_group' ) %>%
# head(2) %>%
# filter(gh_group=='r1px2' ) %>%
rowwise() %>%
mutate(areas = calc_intersection( data, geometry, gh_group)) %>%
ungroup() %>%
{ . } -> df_final
df_final
}
calc_intersection = function( circle, geometry, gh_group) {
warning(gh_group)
print(gh_group)
st_geometry(geometry) %>%
st_transform( 3577) %>%
as_Spatial() %>%
gSimplify( tol = 0.00001) %>%
gBuffer( byid=TRUE, width=0) %>%
st_as_sf() %>%
st_transform( 3577) %>%
{ . } -> a
circle %>%
st_as_sf() %>%
st_intersection( a ) %>%
mutate( area = st_area(circle)) %>%
st_drop_geometry() %>%
list()
}
| /R/calculate_intermesh_distances.R | no_license | dewoller/greenspace_1km | R | false | false | 2,683 | r | calculate_intermesh_distances <- function( df_mesh_centroids, df_mesh_lockdown, df_mesh_detail, map_mesh, max_dist=5) {
df_mesh_centroids %>%
inner_join(df_mesh_detail %>% select(MB_CODE16,
MB_CATEGORY_NAME_2016), by='MB_CODE16') %>%
mutate( gh = gh_encode(mc_lat, mc_lon, ifelse( max_dist < 5, 4, 5) )) %>%
mutate(ghn = gh_neighbours(gh))%>%
do.call(data.frame, .) %>%
as_tibble() %>%
rename(lon=mc_lon, lat=mc_lat) %>%
{ . } -> df_gh
# we only want the park neighbours that are within melbourne
df_gh %>%
inner_join( df_mesh_lockdown, by='MB_CODE16') %>%
select( starts_with('gh') ) %>%
pivot_longer(starts_with('gh'), names_to=NULL, values_to ='gh_group') %>%
distinct() %>%
{ . } -> df_from_mc_neighbours
map_mesh %>%
select( MB_CODE16, geometry) %>%
inner_join( df_gh %>%
filter( MB_CATEGORY_NAME_2016=='Parkland') , by='MB_CODE16' ) %>%
pivot_longer(starts_with('gh'), names_to=NULL, values_to ='gh_group') %>%
select( gh_group, geometry ) %>%
inner_join( df_from_mc_neighbours, by='gh_group') %>%
st_as_sf() %>%
group_by(gh_group) %>%
summarise(geometry = st_combine(geometry), .groups='drop') %>%
as_Spatial() %>%
clgeo_Clean() %>%
st_as_sf() %>%
st_transform( 3577) %>%
# data.frame() %>%
# as_tibble() %>%
{ . } -> df_to_parks
df_gh %>%
inner_join( df_mesh_lockdown, by='MB_CODE16') %>%
select( -starts_with('mc'), -starts_with('ghn') ) %>%
rename(gh_group = gh ) %>%
rename( from_lat = lat, from_lon=lon) %>%
st_as_sf( coords = c("from_lon", "from_lat"), crs = 4283) %>%
st_transform(3577) %>%
mutate( circle = st_buffer( geometry, dist = max_dist * 1000) ) %>%
{ . } -> df_from_mc
df_from_mc %>%
st_drop_geometry() %>%
select(-lga_name, -MB_CATEGORY_NAME_2016) %>%
# head(100) %>%
group_by( gh_group ) %>%
nest( data=c(MB_CODE16, circle )) %>%
ungroup() %>%
{ . } -> df_pre_intersect
df_pre_intersect %>%
inner_join( df_to_parks, by='gh_group' ) %>%
# head(2) %>%
# filter(gh_group=='r1px2' ) %>%
rowwise() %>%
mutate(areas = calc_intersection( data, geometry, gh_group)) %>%
ungroup() %>%
{ . } -> df_final
df_final
}
calc_intersection = function( circle, geometry, gh_group) {
warning(gh_group)
print(gh_group)
st_geometry(geometry) %>%
st_transform( 3577) %>%
as_Spatial() %>%
gSimplify( tol = 0.00001) %>%
gBuffer( byid=TRUE, width=0) %>%
st_as_sf() %>%
st_transform( 3577) %>%
{ . } -> a
circle %>%
st_as_sf() %>%
st_intersection( a ) %>%
mutate( area = st_area(circle)) %>%
st_drop_geometry() %>%
list()
}
|
#to run this function type source("run_analysis.R")
library("dplyr")
setwd("UCI\ HAR\ Dataset")
var_names = read.table("features.txt", header = FALSE)
#read in and manipulate the test data
X_test = read.table("test/X_test.txt", header = FALSE)
Y_test = read.table("test/Y_test.txt", header = FALSE)
sub_test = read.table("test/subject_test.txt", header = FALSE)
var_names = read.table("features.txt", header = FALSE)
colnames(X_test) = var_names[,2]
colnames(Y_test) = c("Activity")
colnames(sub_test) = c("Subject")
total_test = cbind(sub_test,Y_test,X_test)
#read in and manipulate the training data
X_train = read.table("train/X_train.txt", header = FALSE)
Y_train = read.table("train/Y_train.txt", header = FALSE)
sub_train = read.table("train/subject_train.txt", header = FALSE)
colnames(X_train) = var_names[,2]
colnames(Y_train) = c("Activity")
colnames(sub_train) = c("Subject")
total_train = cbind(sub_train,Y_train,X_train)
total = rbind(total_test, total_train)
total[,2] = factor(total[,2],
labels = c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS",
"SITTING","STANDING","LAYING"))
#let's do some clean up
rm(total_train, total_test)
rm(sub_test, sub_train, X_test, X_train,Y_test, Y_train)
rm(var_names)
#extract variables for mean and stdev
total_meansStd = total[,grepl("mean|std",names(total))]
#add the subject/activity columns back
total_meansStd = cbind(total[,1:2],total_meansStd)
#create summary data set - two methods of getting summaries, differ in output format
summary_set_1 = total_meansStd %>% group_by(Subject,Activity) %>%
summarise_each(funs(mean))
summary_set_2 = aggregate(total_meansStd[,3:81],
by = list(total_meansStd$Activity,total_meansStd$Subject),
FUN = mean)
| /run_analysis.R | no_license | swuenschel/GettingAndCleaningDataProject | R | false | false | 1,908 | r | #to run this function type source("run_analysis.R")
library("dplyr")
setwd("UCI\ HAR\ Dataset")
var_names = read.table("features.txt", header = FALSE)
#read in and manipulate the test data
X_test = read.table("test/X_test.txt", header = FALSE)
Y_test = read.table("test/Y_test.txt", header = FALSE)
sub_test = read.table("test/subject_test.txt", header = FALSE)
var_names = read.table("features.txt", header = FALSE)
colnames(X_test) = var_names[,2]
colnames(Y_test) = c("Activity")
colnames(sub_test) = c("Subject")
total_test = cbind(sub_test,Y_test,X_test)
#read in and manipulate the training data
X_train = read.table("train/X_train.txt", header = FALSE)
Y_train = read.table("train/Y_train.txt", header = FALSE)
sub_train = read.table("train/subject_train.txt", header = FALSE)
colnames(X_train) = var_names[,2]
colnames(Y_train) = c("Activity")
colnames(sub_train) = c("Subject")
total_train = cbind(sub_train,Y_train,X_train)
total = rbind(total_test, total_train)
total[,2] = factor(total[,2],
labels = c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS",
"SITTING","STANDING","LAYING"))
#let's do some clean up
rm(total_train, total_test)
rm(sub_test, sub_train, X_test, X_train,Y_test, Y_train)
rm(var_names)
#extract variables for mean and stdev
total_meansStd = total[,grepl("mean|std",names(total))]
#add the subject/activity columns back
total_meansStd = cbind(total[,1:2],total_meansStd)
#create summary data set - two methods of getting summaries, differ in output format
summary_set_1 = total_meansStd %>% group_by(Subject,Activity) %>%
summarise_each(funs(mean))
summary_set_2 = aggregate(total_meansStd[,3:81],
by = list(total_meansStd$Activity,total_meansStd$Subject),
FUN = mean)
|
testlist <- list(doy = c(4.62595082430429e-312, 3.64097969159734e-277, -0.427429395729092, 2.09887675795476e-104, 1.55322770574539e-47, 1.45474215376015e+135, 3.56441595774554e+114, -2.64525441665141e+303, -9.52682579807939e+139, -3.98397314590772e+183, -1.77863325536183e+126, 6.42851301544252e-310, 1.66013830765329e-307, 0.000234804018098546, 1.91570942562165e+206, 365.687522888184, 2.07029838648818e+24, -7.21048519616203e+198, 3.51433879412484e+125, -7.92665263616256e+107, 1.17913068623545e+75 ), latitude = c(-2.61899946856284e-59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::Photoperiod,testlist)
str(result) | /meteor/inst/testfiles/Photoperiod/AFL_Photoperiod/Photoperiod_valgrind_files/1615768638-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 683 | r | testlist <- list(doy = c(4.62595082430429e-312, 3.64097969159734e-277, -0.427429395729092, 2.09887675795476e-104, 1.55322770574539e-47, 1.45474215376015e+135, 3.56441595774554e+114, -2.64525441665141e+303, -9.52682579807939e+139, -3.98397314590772e+183, -1.77863325536183e+126, 6.42851301544252e-310, 1.66013830765329e-307, 0.000234804018098546, 1.91570942562165e+206, 365.687522888184, 2.07029838648818e+24, -7.21048519616203e+198, 3.51433879412484e+125, -7.92665263616256e+107, 1.17913068623545e+75 ), latitude = c(-2.61899946856284e-59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::Photoperiod,testlist)
str(result) |
list.of.packages <- c("shiny", "rvest", "stringr", "tidyverse", "stringdist")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(shiny)
library(rvest)
library(stringr)
library(tidyverse)
library(stringdist)
options(warn = 0)
drug_list_url <- "https://bnf.nice.org.uk/drug/"
drugpage <- read_html(drug_list_url)
drugs <- html_text(html_nodes(drugpage, 'span'))
drugs <- drugs[3:length(drugs)]
drugs <- tolower(drugs)
drugs <- str_replace_all(drugs, " ", "-")
drugs <- str_replace_all(drugs, ",", "")
drugs <- str_replace_all(drugs, "é", "e")
drugs <- str_replace(drugs, "\\(", "")
drugs <- str_replace(drugs, "\\)", "")
drugs <- str_replace(drugs, "'", "")
drugs <- str_replace_all(drugs, "d-(rh0)-", "d-rh0-")
drugs <- sub("noradrenaline/norepinephrine", "noradrenalinenorepinephrine", drugs)
drugs <- str_replace(drugs, "enaline/epinephr", "enalineepinephr")
drugs <- sub("-$", "", drugs)
drugs <- drugs[-c(87, 134:137, 163, 216, 236, 1628, 1629)]
url <- "https://bnf.nice.org.uk/medicinal-forms/anastrozole.html"
webpage <- read_html(url)
#Using CSS selectors to scrape the desired information
name <- html_nodes(webpage,'span.strengthOfActiveIngredient')
size <- html_nodes(webpage, 'td.packSize')
price <- html_nodes(webpage, 'td.nhsIndicativePrice')
#Converting the ranking data to text
name <- html_text(name)
size <- html_text(size)
price <- html_text(price)
# Check the extraction is works
name1 <- unlist(strsplit(name, "<"))
price <- unlist(strsplit(price, "\n "))
price1 <- price[seq(2, length(price), 3)]
z <- cbind(name1, size, price1)
progress <- 0
for(i in drugs){
progress <- progress+1
print(paste(round(progress/length(drugs)*100, digits = 1), "%", sep = ""))
print("Once the program reaches 100% (eta 3mins), the data will be ready to download!")
url <- paste("https://bnf.nice.org.uk/medicinal-forms/", i, ".html", sep = "")
webpage <- read_html(url)
#Using CSS selectors to scrape the desired information
name <- html_nodes(webpage,'span.strengthOfActiveIngredient')
size <- html_nodes(webpage, 'td.packSize')
price <- html_nodes(webpage, 'td.nhsIndicativePrice')
#Converting the ranking data to text
name1 <- html_text(name)
size1 <- html_text(size)
price1 <- html_text(price)
if(length(name)!=0){
name1 <- unlist(strsplit(name1, "<"))
price1 <- unlist(strsplit(price1, "\n "))
price1 <- price1[seq(2, length(price1), 3)]
z1 <- cbind(name1, size1, price1)
z <- rbind(z, z1)
}
}
# Choosing the lowest price for every given active ingredient
BNF <- as.data.frame(z)
BNF <- BNF %>%
rename(ActiveIngredients = "name1", Size = "size", Price = "price1") %>%
mutate(Price = as.character(Price)) %>%
mutate(Price = str_replace(Price, "£", "")) %>%
mutate(Price = as.numeric(Price)) %>%
mutate(ActiveIngredients = as.character(ActiveIngredients)) %>%
select(ActiveIngredients, Size, Price)
code <- seq(1:length(BNF$ActiveIngredients))
code <- paste("A", code, sep = "")
BNF <- BNF %>%
add_column(Code = code, .before = "ActiveIngredients") %>%
mutate(Dose = parse_number(ActiveIngredients)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, "mg"), "mg", NA)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, "microgram"), "microgram", Dose_Type)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, " gram"), "gram", Dose_Type))
BNF_min <- BNF %>%
group_by(ActiveIngredients) %>%
slice(which.min(Price)) %>%
mutate(Dose = parse_number(ActiveIngredients)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, "mg"), "mg", NA)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, "microgram"), "microgram", Dose_Type)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, " gram"), "gram", Dose_Type))
ui <- fluidPage(
titlePanel('BNF Download'),
sidebarLayout(
sidebarPanel(
selectInput("dataset", "Choose a dataset:",
choices = c("BNF", "BNF minimum prices")),
radioButtons("filetype", "File type:",
choices = c("csv")),
downloadButton('downloadData', 'Download')
),
mainPanel(
tableOutput("table")
)
)
)
server <- function(input, output) {
### INPUT WEBSCRAPE CODE
### SHINY CONTINUE
datasetInput <- reactive({
# Fetch the appropriate data object, depending on the value
# of input$dataset.
switch(input$dataset,
"BNF" = BNF,
"BNF minimum prices" = BNF_min)
})
output$table <- renderDataTable({
head(datasetInput())
})
# downloadHandler() takes two arguments, both functions.
# The content function is passed a filename as an argument, and
# it should write out data to that filename.
output$downloadData <- downloadHandler(
# This function returns a string which tells the client
# browser what name to use when saving the file.
filename = function() {
paste(input$dataset, input$filetype, sep = ".")
},
# This function should write data to a file given to it by
# the argument 'file'.
content = function(file) {
sep <- switch(input$filetype, "csv" = ",", "tsv" = "\t")
# Write to a file specified by the 'file' argument
write.table(datasetInput(), file, sep = sep,
row.names = FALSE)
}
)
}
# RUN
# Run the application
shinyApp(ui = ui, server = server)
| /app.R | no_license | willking98/WebScrape | R | false | false | 5,762 | r | list.of.packages <- c("shiny", "rvest", "stringr", "tidyverse", "stringdist")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(shiny)
library(rvest)
library(stringr)
library(tidyverse)
library(stringdist)
options(warn = 0)
drug_list_url <- "https://bnf.nice.org.uk/drug/"
drugpage <- read_html(drug_list_url)
drugs <- html_text(html_nodes(drugpage, 'span'))
drugs <- drugs[3:length(drugs)]
drugs <- tolower(drugs)
drugs <- str_replace_all(drugs, " ", "-")
drugs <- str_replace_all(drugs, ",", "")
drugs <- str_replace_all(drugs, "é", "e")
drugs <- str_replace(drugs, "\\(", "")
drugs <- str_replace(drugs, "\\)", "")
drugs <- str_replace(drugs, "'", "")
drugs <- str_replace_all(drugs, "d-(rh0)-", "d-rh0-")
drugs <- sub("noradrenaline/norepinephrine", "noradrenalinenorepinephrine", drugs)
drugs <- str_replace(drugs, "enaline/epinephr", "enalineepinephr")
drugs <- sub("-$", "", drugs)
drugs <- drugs[-c(87, 134:137, 163, 216, 236, 1628, 1629)]
url <- "https://bnf.nice.org.uk/medicinal-forms/anastrozole.html"
webpage <- read_html(url)
#Using CSS selectors to scrape the desired information
name <- html_nodes(webpage,'span.strengthOfActiveIngredient')
size <- html_nodes(webpage, 'td.packSize')
price <- html_nodes(webpage, 'td.nhsIndicativePrice')
#Converting the ranking data to text
name <- html_text(name)
size <- html_text(size)
price <- html_text(price)
# Check the extraction is works
name1 <- unlist(strsplit(name, "<"))
price <- unlist(strsplit(price, "\n "))
price1 <- price[seq(2, length(price), 3)]
z <- cbind(name1, size, price1)
progress <- 0
for(i in drugs){
progress <- progress+1
print(paste(round(progress/length(drugs)*100, digits = 1), "%", sep = ""))
print("Once the program reaches 100% (eta 3mins), the data will be ready to download!")
url <- paste("https://bnf.nice.org.uk/medicinal-forms/", i, ".html", sep = "")
webpage <- read_html(url)
#Using CSS selectors to scrape the desired information
name <- html_nodes(webpage,'span.strengthOfActiveIngredient')
size <- html_nodes(webpage, 'td.packSize')
price <- html_nodes(webpage, 'td.nhsIndicativePrice')
#Converting the ranking data to text
name1 <- html_text(name)
size1 <- html_text(size)
price1 <- html_text(price)
if(length(name)!=0){
name1 <- unlist(strsplit(name1, "<"))
price1 <- unlist(strsplit(price1, "\n "))
price1 <- price1[seq(2, length(price1), 3)]
z1 <- cbind(name1, size1, price1)
z <- rbind(z, z1)
}
}
# Choosing the lowest price for every given active ingredient
BNF <- as.data.frame(z)
BNF <- BNF %>%
rename(ActiveIngredients = "name1", Size = "size", Price = "price1") %>%
mutate(Price = as.character(Price)) %>%
mutate(Price = str_replace(Price, "£", "")) %>%
mutate(Price = as.numeric(Price)) %>%
mutate(ActiveIngredients = as.character(ActiveIngredients)) %>%
select(ActiveIngredients, Size, Price)
code <- seq(1:length(BNF$ActiveIngredients))
code <- paste("A", code, sep = "")
BNF <- BNF %>%
add_column(Code = code, .before = "ActiveIngredients") %>%
mutate(Dose = parse_number(ActiveIngredients)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, "mg"), "mg", NA)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, "microgram"), "microgram", Dose_Type)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, " gram"), "gram", Dose_Type))
BNF_min <- BNF %>%
group_by(ActiveIngredients) %>%
slice(which.min(Price)) %>%
mutate(Dose = parse_number(ActiveIngredients)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, "mg"), "mg", NA)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, "microgram"), "microgram", Dose_Type)) %>%
mutate(Dose_Type = ifelse(str_detect(ActiveIngredients, " gram"), "gram", Dose_Type))
ui <- fluidPage(
titlePanel('BNF Download'),
sidebarLayout(
sidebarPanel(
selectInput("dataset", "Choose a dataset:",
choices = c("BNF", "BNF minimum prices")),
radioButtons("filetype", "File type:",
choices = c("csv")),
downloadButton('downloadData', 'Download')
),
mainPanel(
tableOutput("table")
)
)
)
server <- function(input, output) {
### INPUT WEBSCRAPE CODE
### SHINY CONTINUE
datasetInput <- reactive({
# Fetch the appropriate data object, depending on the value
# of input$dataset.
switch(input$dataset,
"BNF" = BNF,
"BNF minimum prices" = BNF_min)
})
output$table <- renderDataTable({
head(datasetInput())
})
# downloadHandler() takes two arguments, both functions.
# The content function is passed a filename as an argument, and
# it should write out data to that filename.
output$downloadData <- downloadHandler(
# This function returns a string which tells the client
# browser what name to use when saving the file.
filename = function() {
paste(input$dataset, input$filetype, sep = ".")
},
# This function should write data to a file given to it by
# the argument 'file'.
content = function(file) {
sep <- switch(input$filetype, "csv" = ",", "tsv" = "\t")
# Write to a file specified by the 'file' argument
write.table(datasetInput(), file, sep = sep,
row.names = FALSE)
}
)
}
# RUN
# Run the application
shinyApp(ui = ui, server = server)
|
testlist <- list(doy = c(4.62595082430429e-312, 3.64097969159734e-277, -0.427429395729092, 2.09887675795476e-104, 1.55322770574539e-47, 1.45474215376015e+135, 3.56441595774554e+114, -2.64525441665141e+303, -9.52682579807939e+139, -3.98397314603138e+183, -1.77863325536183e+126, 6.42851301544252e-310, 1.66013830765329e-307, 0.000234804018098546, 1.91570942562165e+206, 365.687522888185, 2.07029838648818e+24, -7.21048519616203e+198, 3.51433879412484e+125, -7.92665263616256e+107, 1.17913068623545e+75 ), latitude = c(-2.61899946856284e-59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::Photoperiod,testlist)
str(result) | /meteor/inst/testfiles/Photoperiod/AFL_Photoperiod/Photoperiod_valgrind_files/1615768733-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 683 | r | testlist <- list(doy = c(4.62595082430429e-312, 3.64097969159734e-277, -0.427429395729092, 2.09887675795476e-104, 1.55322770574539e-47, 1.45474215376015e+135, 3.56441595774554e+114, -2.64525441665141e+303, -9.52682579807939e+139, -3.98397314603138e+183, -1.77863325536183e+126, 6.42851301544252e-310, 1.66013830765329e-307, 0.000234804018098546, 1.91570942562165e+206, 365.687522888185, 2.07029838648818e+24, -7.21048519616203e+198, 3.51433879412484e+125, -7.92665263616256e+107, 1.17913068623545e+75 ), latitude = c(-2.61899946856284e-59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::Photoperiod,testlist)
str(result) |
#' @title Refreshes the isotope dataset
#'
#' @description Internal utility function for refreshing the isotope_data
#' dataset (not exported) This function rapidly updates the isotope_data.rda
#' file once one updates the TESIR_data.csv file.
#'
#' @param data which is typically the SIDER_data.csv file containing the dataset
#' used for model fitting and imputation.
#'
#' @return Saves the updated file to ../data/isotope_data.rda for use within the
#' pacakge
#'
#' @author Thomas Guillerme
refresh.isotope_data <- function(data = "SIDER_data.csv") {
# Read the csv file from the package data
isotope_data <- utils::read.table(file = system.file("extdata", data,
package = "SIDER"),
header = TRUE,
stringsAsFactors = FALSE,
encoding = "UTF-8")
# Save it as the isotope_data set in the manual
save(isotope_data, file = "../data/isotope_data.rda", compress = 'xz')
# Make sure to update the manual
cat("isotope_data.rda file successfully updated.\n
Don't forget to update the man page in \\man\\SIDER.R if necessary.")
} | /R/refresh.isotope_data.R | no_license | healyke/SIDER | R | false | false | 1,255 | r | #' @title Refreshes the isotope dataset
#'
#' @description Internal utility function for refreshing the isotope_data
#' dataset (not exported) This function rapidly updates the isotope_data.rda
#' file once one updates the TESIR_data.csv file.
#'
#' @param data which is typically the SIDER_data.csv file containing the dataset
#' used for model fitting and imputation.
#'
#' @return Saves the updated file to ../data/isotope_data.rda for use within the
#' pacakge
#'
#' @author Thomas Guillerme
refresh.isotope_data <- function(data = "SIDER_data.csv") {
# Read the csv file from the package data
isotope_data <- utils::read.table(file = system.file("extdata", data,
package = "SIDER"),
header = TRUE,
stringsAsFactors = FALSE,
encoding = "UTF-8")
# Save it as the isotope_data set in the manual
save(isotope_data, file = "../data/isotope_data.rda", compress = 'xz')
# Make sure to update the manual
cat("isotope_data.rda file successfully updated.\n
Don't forget to update the man page in \\man\\SIDER.R if necessary.")
} |
setwd('~/Documents/Academia/Teaching/TCD/2015-HT/POTBD_Quantitative_Methods_II/Lectures/lecture1/')
pdf('Figs/dependence.pdf')
par(mfrow=c(1,3), pty='s', paper='a4r')
#--- Linear dependence
plot(c(0,2), c(0,4), type='n', xlab='x1', ylab='x2', main = 'Linearly dependent vectors' )
# plot v2
arrows(0,0,2,4, col=2)
# plot v1
arrows(0,0,1,2)
# add labels
text(x=0.5, y=1.2, labels="v1")
text(x=1.5, y=3.3, labels="v1", col=2)
cor(c(2,4), c(1,2))
#--- Linear Independence
plot(c(0,2), c(0,4), type='n', xlab='x1', ylab='x2', main='Linearly independent vectors' )
# plot v2
arrows(0,0,2,4, col=2)
# plot v1
arrows(0,0,1,4)
# add labels
text(x=0.5, y=2.4, labels="v1")
text(x=1.5, y=3.3, labels="v1", col=2)
plot(c(2,1,4), c(1,1,4))
#--- Orthogonality
plot(c(-4,3), c(0,5), type='n', xlab='x1', ylab='x2' , las=1, main='Orthogonal vectors')
# plot v2
arrows(0,0,2,4, col=2)
# plot v1
arrows(0,0,-4,2)
# add labels
text(x=-2, y=1.5, labels="v1")
text(x=1.3, y=1.4, labels="v2", col=2)
dev.off()
| /PO7005/Lecture1/R_code/IndependenceEtc.R | no_license | chadefa1/chadefa1.github.io | R | false | false | 997 | r | setwd('~/Documents/Academia/Teaching/TCD/2015-HT/POTBD_Quantitative_Methods_II/Lectures/lecture1/')
pdf('Figs/dependence.pdf')
par(mfrow=c(1,3), pty='s', paper='a4r')
#--- Linear dependence
plot(c(0,2), c(0,4), type='n', xlab='x1', ylab='x2', main = 'Linearly dependent vectors' )
# plot v2
arrows(0,0,2,4, col=2)
# plot v1
arrows(0,0,1,2)
# add labels
text(x=0.5, y=1.2, labels="v1")
text(x=1.5, y=3.3, labels="v1", col=2)
cor(c(2,4), c(1,2))
#--- Linear Independence
plot(c(0,2), c(0,4), type='n', xlab='x1', ylab='x2', main='Linearly independent vectors' )
# plot v2
arrows(0,0,2,4, col=2)
# plot v1
arrows(0,0,1,4)
# add labels
text(x=0.5, y=2.4, labels="v1")
text(x=1.5, y=3.3, labels="v1", col=2)
plot(c(2,1,4), c(1,1,4))
#--- Orthogonality
plot(c(-4,3), c(0,5), type='n', xlab='x1', ylab='x2' , las=1, main='Orthogonal vectors')
# plot v2
arrows(0,0,2,4, col=2)
# plot v1
arrows(0,0,-4,2)
# add labels
text(x=-2, y=1.5, labels="v1")
text(x=1.3, y=1.4, labels="v2", col=2)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{spine}
\alias{spine}
\title{Spine data}
\format{
\describe{
\item{pelvic_incidence}{pelvic incidence}
\item{pelvic_tilt}{pelvic tilt}
\item{lumbar_lordosis_angle}{lumbar lordosis angle}
\item{sacral_slope}{sacral slope}
\item{pelvic_radius}{pelvic radius}
\item{degree_spondylolisthesis}{degree of spondylolisthesis}
\item{pelvic_slope}{pelvic slope}
\item{direct_tilt}{direct tilt}
\item{thoracic_slope}{thoracic slope }
\item{cervical_tilt}{cervical tilt}
\item{sacrum_angle}{sacrum angle}
\item{scoliosis_slope}{scoliosis slope}
\item{outcome}{1 is abnormal (Disk Hernia or Spondylolisthesis) and 0 is normal}
}
}
\source{
\url{http://archive.ics.uci.edu/ml/datasets/vertebral+column}
}
\usage{
spine
}
\description{
Lower back pain can be caused by a variety of problems with any parts of the complex,
interconnected network of spinal muscles, nerves, bones, discs or tendons
in the lumbar spine. This dataset contains 12 biomechanical attributes from
310 patients, of whom 100 are normal and 210 are abnormal (Disk Hernia or
Spondylolisthesis). The goal is to differentiate the normal patients from the
abnormal using those 12 variables.
}
\keyword{datasets}
| /man/spine.Rd | no_license | zuoyi93/ProSGPV | R | false | true | 1,270 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{spine}
\alias{spine}
\title{Spine data}
\format{
\describe{
\item{pelvic_incidence}{pelvic incidence}
\item{pelvic_tilt}{pelvic tilt}
\item{lumbar_lordosis_angle}{lumbar lordosis angle}
\item{sacral_slope}{sacral slope}
\item{pelvic_radius}{pelvic radius}
\item{degree_spondylolisthesis}{degree of spondylolisthesis}
\item{pelvic_slope}{pelvic slope}
\item{direct_tilt}{direct tilt}
\item{thoracic_slope}{thoracic slope }
\item{cervical_tilt}{cervical tilt}
\item{sacrum_angle}{sacrum angle}
\item{scoliosis_slope}{scoliosis slope}
\item{outcome}{1 is abnormal (Disk Hernia or Spondylolisthesis) and 0 is normal}
}
}
\source{
\url{http://archive.ics.uci.edu/ml/datasets/vertebral+column}
}
\usage{
spine
}
\description{
Lower back pain can be caused by a variety of problems with any parts of the complex,
interconnected network of spinal muscles, nerves, bones, discs or tendons
in the lumbar spine. This dataset contains 12 biomechanical attributes from
310 patients, of whom 100 are normal and 210 are abnormal (Disk Hernia or
Spondylolisthesis). The goal is to differentiate the normal patients from the
abnormal using those 12 variables.
}
\keyword{datasets}
|
#difine matrix and initial condition
size=30
kbt=1
lattice_cho1=matrix(nrow=size, ncol=size)
lattice_cho2=matrix(nrow=size, ncol=size)
totalcho=3
centerarea=1
lengthofchoarray=c(6,4,4,6)
totallengthofcho=sum(lengthofchoarray)
#record coordinace
choi=matrix(nrow=totalcho, ncol=totallengthofcho)
choj=matrix(nrow=totalcho, ncol=totallengthofcho)
lengthofcho=c(lengthofchoarray[1]-1,lengthofchoarray[-1])
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho1[i,j]=5
}
}
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho2[i,j]=5
}
}
for(loop in 1:totalcho)
{
repeat
{
#startcho_i=sample(1:(size),size=1)
#startcho_j=sample(1:(size),size=1)
#center area
startcho_i=sample(centerarea:(size-centerarea),size=1)
startcho_j=sample(centerarea:(size-centerarea),size=1)
while(lattice_cho1[startcho_i,startcho_j]!=5)
{
#startcho_i=sample(1:size,size=1)
#startcho_j=sample(1:size,size=1)
#center area
startcho_i=sample(centerarea:(size-centerarea),size=1)
startcho_j=sample(centerarea:(size-centerarea),size=1)
}
lattice_cho1[startcho_i,startcho_j]=1
choi[loop,1]=startcho_i
choj[loop,1]=startcho_j
oldchositei=startcho_i
oldchositej=startcho_j
steplength=1
for(partsofcho in 1:length(lengthofchoarray))
{
for(lengthcho1 in 1:(lengthofcho[partsofcho]))
{
newchositei=c(oldchositei,oldchositei+1,oldchositei,oldchositei-1)
newchositej=c(oldchositej-1,oldchositej,oldchositej+1,oldchositej)
newchomove=sample(1:4,size=1)
for(ooo in 1:100)
{
if(newchositej[newchomove]<1 | newchositej[newchomove]>size | newchositei[newchomove]<1 | newchositei[newchomove]>size)
{
newchomove=sample(1:4,size=1)
}
else
{
if(lattice_cho1[newchositei[newchomove],newchositej[newchomove]]!=5)
newchomove=sample(1:4,size=1)
else
break
}
}
if(ooo<100)
{
if(partsofcho%%3==0)
{lattice_cho1[newchositei[newchomove],newchositej[newchomove]]=3}
else
{lattice_cho1[newchositei[newchomove],newchositej[newchomove]]=partsofcho%%3}
oldchositei=newchositei[newchomove]
oldchositej=newchositej[newchomove]
choi[loop,(steplength+1)]=newchositei[newchomove]
choj[loop,(steplength+1)]=newchositej[newchomove]
steplength=steplength+1
}
}
}
if(sum(is.na(choi[loop,]))==0)
{
lattice_cho2=lattice_cho1
break
}
else
{
lattice_cho1=lattice_cho2
}
}
print(loop)
}
lattice_cho2=lattice_cho1
#fix endpoint
#4cor
size=30
kbt=1
lattice_cho1=matrix(nrow=size, ncol=size)
lattice_cho2=matrix(nrow=size, ncol=size)
totalcho=4
centerarea=1
lengthofchoarray=c(10,15,10,1)
totallengthofcho=sum(lengthofchoarray)
#record coordinace
choi=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
choj=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
disfori=5
disforj=5
touguelength=(totallengthofcho-(size-2*disfori))/2
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho1[i,j]=5
}
}
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho2[i,j]=5
}
}
#cho1
choi[1,]=c(seq(disfori+1,size/2,1),rep(size/2,touguelength),rep(size/2+1,touguelength),seq(size/2+1,size-disfori,1))
choj[1,]=c(rep(disforj,totallengthofcho/2-touguelength),seq(disforj+1,disforj+touguelength,1),seq(disforj+touguelength,disforj+1,-1),rep(disforj,totallengthofcho/2-touguelength))
#cho2
choj[2,]=c(seq(disfori+1,size/2,1),rep(size/2,touguelength),rep(size/2+1,touguelength),seq(size/2+1,size-disfori,1))
choi[2,]=c(rep(size-disforj+1,totallengthofcho/2-touguelength),seq(size-disforj,size-disforj-touguelength+1,-1),seq(size-disforj-touguelength+1,size-disforj,1),rep(size-disforj+1,totallengthofcho/2-touguelength))
#cho3
choi[3,]=c(seq(size-disfori,size/2+1,-1),rep(size/2+1,touguelength),rep(size/2,touguelength),seq(size/2,disfori+1,-1))
choj[3,]=c(rep(size-disforj+1,totallengthofcho/2-touguelength),seq(size-disforj,size-disforj-touguelength+1,-1),seq(size-disforj-touguelength+1,size-disforj,1),rep(size-disforj+1,totallengthofcho/2-touguelength))
#cho4
choj[4,]=c(seq(size-disfori,size/2+1,-1),rep(size/2+1,touguelength),rep(size/2,touguelength),seq(size/2,disfori+1,-1))
choi[4,]=c(rep(disforj,totallengthofcho/2-touguelength),seq(disforj+1,disforj+touguelength,1),seq(disforj+touguelength,disforj+1,-1),rep(disforj,totallengthofcho/2-touguelength))
for(j in 1:totalcho)
{
for(ranse in 1:lengthofchoarray[1])
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
for(ranse in (lengthofchoarray[1]+1):(lengthofchoarray[1]+lengthofchoarray[2]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=2
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=3
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+lengthofchoarray[4]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
}
lattice_cho2=lattice_cho1
#fix endpoint
#small with large
size=30
kbt=1
lattice_cho1=matrix(nrow=size, ncol=size)
lattice_cho2=matrix(nrow=size, ncol=size)
totalcho=1
centerarea=1
lengthofchoarray=c(6,9,1,6)
totallengthofcho=sum(lengthofchoarray)
#record coordinace
choi=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
choj=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
disfori=5
disforj=10
touguelength=(totallengthofcho-(size-2*disfori))/2
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho1[i,j]=5
}
}
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho2[i,j]=5
}
}
#cho1
choi[1,]=c(seq(disfori+1,size/2,1),rep(size/2,touguelength),rep(size/2+1,touguelength),seq(size/2+1,size-disfori,1))
choj[1,]=c(rep(disforj,totallengthofcho/2-touguelength),seq(disforj+1,disforj+touguelength,1),seq(disforj+touguelength,disforj+1,-1),rep(disforj,totallengthofcho/2-touguelength))
for(j in 1:totalcho)
{
for(ranse in 1:lengthofchoarray[1])
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
for(ranse in (lengthofchoarray[1]+1):(lengthofchoarray[1]+lengthofchoarray[2]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=2
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=3
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+lengthofchoarray[4]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
}
lattice_cho2=lattice_cho1
#fix endpoint
#croissant
size=30
kbt=1
lattice_cho1=matrix(nrow=size, ncol=size)
lattice_cho2=matrix(nrow=size, ncol=size)
totalcho=2
centerarea=1
lengthofchoarray=c(6,15,1,6)
totallengthofcho=sum(lengthofchoarray)
#record coordinace
choi=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
choj=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
disfori=5
disforj=5
touguelength=(totallengthofcho-(size-2*disfori))/2
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho1[i,j]=5
}
}
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho2[i,j]=5
}
}
#cho1
choi[1,]=c(seq(size/2,size/2-touguelength+1,-1),rep(size/2-touguelength,size-2*disfori),seq(size/2-touguelength+1,size/2,1))
choj[1,]=c(rep(disfori+1,touguelength),seq(disfori+1,size-disfori,1),rep(size-disfori,touguelength))
#cho2
choi[2,]=c(seq(size/2+1,size/2+touguelength,1),rep(size/2+touguelength+1,size-2*disfori),seq(size/2+touguelength,size/2+1,-1))
choj[2,]=c(rep(disfori+1,touguelength),seq(disfori+1,size-disfori,1),rep(size-disfori,touguelength))
for(j in 1:totalcho)
{
for(ranse in 1:lengthofchoarray[1])
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
for(ranse in (lengthofchoarray[1]+1):(lengthofchoarray[1]+lengthofchoarray[2]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=2
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=3
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+lengthofchoarray[4]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
}
lattice_cho2=lattice_cho1
#draw pic
dev.off()
{
par(mar=c(1,1,1,1))
plot(1,1,type="p",tck=0.03,cex=0.5,las=1,xlab="",col='white',pch=19, ylab="", main="",xlim=c(0,size),ylim=c(0,size),xaxt="n",yaxt="n",bty="n")
for(i in 1:size)
{
for(j in 1:size)
{
par(new=T)
if(lattice_cho1[i,j]==4)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'red')
if(lattice_cho1[i,j]==5)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'white')
if(lattice_cho1[i,j]==1)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'black')
if(lattice_cho1[i,j]==2)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'blue')
if(lattice_cho1[i,j]==3)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'grey')
}
}
}
sum(is.na(choi))
sum(is.na(choj))
length(which(lattice_cho1==1))
length(which(lattice_cho1==2))
length(which(lattice_cho1==3))
length(which(lattice_1==4))
| /Cho initial condition.R | no_license | MikawaFumika/Lattice_KMC_Polymerase-DNA-movement-simulation | R | false | false | 9,545 | r |
#difine matrix and initial condition
size=30
kbt=1
lattice_cho1=matrix(nrow=size, ncol=size)
lattice_cho2=matrix(nrow=size, ncol=size)
totalcho=3
centerarea=1
lengthofchoarray=c(6,4,4,6)
totallengthofcho=sum(lengthofchoarray)
#record coordinace
choi=matrix(nrow=totalcho, ncol=totallengthofcho)
choj=matrix(nrow=totalcho, ncol=totallengthofcho)
lengthofcho=c(lengthofchoarray[1]-1,lengthofchoarray[-1])
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho1[i,j]=5
}
}
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho2[i,j]=5
}
}
for(loop in 1:totalcho)
{
repeat
{
#startcho_i=sample(1:(size),size=1)
#startcho_j=sample(1:(size),size=1)
#center area
startcho_i=sample(centerarea:(size-centerarea),size=1)
startcho_j=sample(centerarea:(size-centerarea),size=1)
while(lattice_cho1[startcho_i,startcho_j]!=5)
{
#startcho_i=sample(1:size,size=1)
#startcho_j=sample(1:size,size=1)
#center area
startcho_i=sample(centerarea:(size-centerarea),size=1)
startcho_j=sample(centerarea:(size-centerarea),size=1)
}
lattice_cho1[startcho_i,startcho_j]=1
choi[loop,1]=startcho_i
choj[loop,1]=startcho_j
oldchositei=startcho_i
oldchositej=startcho_j
steplength=1
for(partsofcho in 1:length(lengthofchoarray))
{
for(lengthcho1 in 1:(lengthofcho[partsofcho]))
{
newchositei=c(oldchositei,oldchositei+1,oldchositei,oldchositei-1)
newchositej=c(oldchositej-1,oldchositej,oldchositej+1,oldchositej)
newchomove=sample(1:4,size=1)
for(ooo in 1:100)
{
if(newchositej[newchomove]<1 | newchositej[newchomove]>size | newchositei[newchomove]<1 | newchositei[newchomove]>size)
{
newchomove=sample(1:4,size=1)
}
else
{
if(lattice_cho1[newchositei[newchomove],newchositej[newchomove]]!=5)
newchomove=sample(1:4,size=1)
else
break
}
}
if(ooo<100)
{
if(partsofcho%%3==0)
{lattice_cho1[newchositei[newchomove],newchositej[newchomove]]=3}
else
{lattice_cho1[newchositei[newchomove],newchositej[newchomove]]=partsofcho%%3}
oldchositei=newchositei[newchomove]
oldchositej=newchositej[newchomove]
choi[loop,(steplength+1)]=newchositei[newchomove]
choj[loop,(steplength+1)]=newchositej[newchomove]
steplength=steplength+1
}
}
}
if(sum(is.na(choi[loop,]))==0)
{
lattice_cho2=lattice_cho1
break
}
else
{
lattice_cho1=lattice_cho2
}
}
print(loop)
}
lattice_cho2=lattice_cho1
#fix endpoint
#4cor
size=30
kbt=1
lattice_cho1=matrix(nrow=size, ncol=size)
lattice_cho2=matrix(nrow=size, ncol=size)
totalcho=4
centerarea=1
lengthofchoarray=c(10,15,10,1)
totallengthofcho=sum(lengthofchoarray)
#record coordinace
choi=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
choj=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
disfori=5
disforj=5
touguelength=(totallengthofcho-(size-2*disfori))/2
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho1[i,j]=5
}
}
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho2[i,j]=5
}
}
#cho1
choi[1,]=c(seq(disfori+1,size/2,1),rep(size/2,touguelength),rep(size/2+1,touguelength),seq(size/2+1,size-disfori,1))
choj[1,]=c(rep(disforj,totallengthofcho/2-touguelength),seq(disforj+1,disforj+touguelength,1),seq(disforj+touguelength,disforj+1,-1),rep(disforj,totallengthofcho/2-touguelength))
#cho2
choj[2,]=c(seq(disfori+1,size/2,1),rep(size/2,touguelength),rep(size/2+1,touguelength),seq(size/2+1,size-disfori,1))
choi[2,]=c(rep(size-disforj+1,totallengthofcho/2-touguelength),seq(size-disforj,size-disforj-touguelength+1,-1),seq(size-disforj-touguelength+1,size-disforj,1),rep(size-disforj+1,totallengthofcho/2-touguelength))
#cho3
choi[3,]=c(seq(size-disfori,size/2+1,-1),rep(size/2+1,touguelength),rep(size/2,touguelength),seq(size/2,disfori+1,-1))
choj[3,]=c(rep(size-disforj+1,totallengthofcho/2-touguelength),seq(size-disforj,size-disforj-touguelength+1,-1),seq(size-disforj-touguelength+1,size-disforj,1),rep(size-disforj+1,totallengthofcho/2-touguelength))
#cho4
choj[4,]=c(seq(size-disfori,size/2+1,-1),rep(size/2+1,touguelength),rep(size/2,touguelength),seq(size/2,disfori+1,-1))
choi[4,]=c(rep(disforj,totallengthofcho/2-touguelength),seq(disforj+1,disforj+touguelength,1),seq(disforj+touguelength,disforj+1,-1),rep(disforj,totallengthofcho/2-touguelength))
for(j in 1:totalcho)
{
for(ranse in 1:lengthofchoarray[1])
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
for(ranse in (lengthofchoarray[1]+1):(lengthofchoarray[1]+lengthofchoarray[2]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=2
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=3
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+lengthofchoarray[4]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
}
lattice_cho2=lattice_cho1
#fix endpoint
#small with large
size=30
kbt=1
lattice_cho1=matrix(nrow=size, ncol=size)
lattice_cho2=matrix(nrow=size, ncol=size)
totalcho=1
centerarea=1
lengthofchoarray=c(6,9,1,6)
totallengthofcho=sum(lengthofchoarray)
#record coordinace
choi=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
choj=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
disfori=5
disforj=10
touguelength=(totallengthofcho-(size-2*disfori))/2
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho1[i,j]=5
}
}
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho2[i,j]=5
}
}
#cho1
choi[1,]=c(seq(disfori+1,size/2,1),rep(size/2,touguelength),rep(size/2+1,touguelength),seq(size/2+1,size-disfori,1))
choj[1,]=c(rep(disforj,totallengthofcho/2-touguelength),seq(disforj+1,disforj+touguelength,1),seq(disforj+touguelength,disforj+1,-1),rep(disforj,totallengthofcho/2-touguelength))
for(j in 1:totalcho)
{
for(ranse in 1:lengthofchoarray[1])
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
for(ranse in (lengthofchoarray[1]+1):(lengthofchoarray[1]+lengthofchoarray[2]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=2
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=3
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+lengthofchoarray[4]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
}
lattice_cho2=lattice_cho1
#fix endpoint
#croissant
size=30
kbt=1
lattice_cho1=matrix(nrow=size, ncol=size)
lattice_cho2=matrix(nrow=size, ncol=size)
totalcho=2
centerarea=1
lengthofchoarray=c(6,15,1,6)
totallengthofcho=sum(lengthofchoarray)
#record coordinace
choi=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
choj=matrix(nrow=totalcho, ncol=sum(lengthofchoarray))
disfori=5
disforj=5
touguelength=(totallengthofcho-(size-2*disfori))/2
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho1[i,j]=5
}
}
for(i in 1:size)
{
for(j in 1:size)
{
lattice_cho2[i,j]=5
}
}
#cho1
choi[1,]=c(seq(size/2,size/2-touguelength+1,-1),rep(size/2-touguelength,size-2*disfori),seq(size/2-touguelength+1,size/2,1))
choj[1,]=c(rep(disfori+1,touguelength),seq(disfori+1,size-disfori,1),rep(size-disfori,touguelength))
#cho2
choi[2,]=c(seq(size/2+1,size/2+touguelength,1),rep(size/2+touguelength+1,size-2*disfori),seq(size/2+touguelength,size/2+1,-1))
choj[2,]=c(rep(disfori+1,touguelength),seq(disfori+1,size-disfori,1),rep(size-disfori,touguelength))
for(j in 1:totalcho)
{
for(ranse in 1:lengthofchoarray[1])
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
for(ranse in (lengthofchoarray[1]+1):(lengthofchoarray[1]+lengthofchoarray[2]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=2
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=3
for(ranse in (lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+1):(lengthofchoarray[1]+lengthofchoarray[2]+lengthofchoarray[3]+lengthofchoarray[4]))
lattice_cho1[choi[j,][ranse],choj[j,][ranse]]=1
}
lattice_cho2=lattice_cho1
#draw pic
dev.off()
{
par(mar=c(1,1,1,1))
plot(1,1,type="p",tck=0.03,cex=0.5,las=1,xlab="",col='white',pch=19, ylab="", main="",xlim=c(0,size),ylim=c(0,size),xaxt="n",yaxt="n",bty="n")
for(i in 1:size)
{
for(j in 1:size)
{
par(new=T)
if(lattice_cho1[i,j]==4)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'red')
if(lattice_cho1[i,j]==5)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'white')
if(lattice_cho1[i,j]==1)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'black')
if(lattice_cho1[i,j]==2)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'blue')
if(lattice_cho1[i,j]==3)
polygon(c(j-1,j,j,j-1),c(size-i,size-i,size-i+1,size-i+1), density = NULL, border = F, col = 'grey')
}
}
}
sum(is.na(choi))
sum(is.na(choj))
length(which(lattice_cho1==1))
length(which(lattice_cho1==2))
length(which(lattice_cho1==3))
length(which(lattice_1==4))
|
#to use
#R --slave --args "$acc_str" "$f11"_tail.csv "$f22"_tail.csv "$f22" < ~code/code_R/merge_and_generate_accession_plot.R
myarg <- commandArgs()
cat(myarg,"\n");
m=length(myarg)
cat(m,"\n");
f_index<-myarg[4:4]
f_acc<-myarg[5:5]
f_plot<-myarg[6:6]
f_output<-myarg[7:7]
cat(f_index,"\n")
cat(f_acc,"\n")
cat(f_plot,"\n")
cat(f_output,"\n")
#f_acc="WEMA_6x1122_entry_number_accession.csv_tail.csv"
#f_plot="Clean data 6x1122_WET11B-MARS-EVALTC-10-8_rep2_sorted.csv_tail.csv"
data.acc<-read.csv(f_acc,sep="\t",header=F)
data.plot<-read.csv(f_plot,sep="\t",header=F)
colnames(data.plot)[1]="ENTRY"
#V1 V2 V3 V4 V5 V6 V7 V8 V9 V10 V11 V12
diff_acc_plot=setdiff(data.plot[,1],data.acc[,1])
same_acc_plot=intersect(data.plot[,1],data.acc[,1])
diff_acc_plot=diff_acc_plot[order(diff_acc_plot)]
same_acc_plot=same_acc_plot[order(same_acc_plot)]
dn=length(diff_acc_plot)
sn=length(same_acc_plot)
#WEMA6x1008_WET10B-EVALTC-08-1_ungenotyped1_tester_CML395_CML444
mp=gregexpr("_rep",f_plot)
data_acc_tester=as.character(data.acc[1,2])
acc_tester=substr(data_acc_tester,gregexpr("tester",data_acc_tester)[[1]][1],nchar(data_acc_tester))
#ungenotyped=paste("WEMA_",substr(f_plot,12,mp[[1]][1]+4),"_ungenotyped_",acc_tester,"_",diff_acc_plot[1],sep="")
#for(i in 2:dn){
#
#ungenotyped=c(ungenotyped,paste("WEMA_",substr(f_plot,12,mp[[1]][1]+4),"_ungenotyped_",acc_tester,"_",diff_acc_plot[i],sep=""))
#
#}
ungenotyped=paste("WEMA_",substr(f_plot,12,17),"_ungenotyped_",1,"_",acc_tester,sep="")
for(i in 2:dn){
ungenotyped=c(ungenotyped,paste("WEMA_",substr(f_plot,12,17),"_ungenotyped_",i,"_",acc_tester,sep=""))
}
diff_acc_plot_ungenotyped<-cbind(diff_acc_plot,ungenotyped)
colnames(diff_acc_plot_ungenotyped)=c("ENTRY","DESIG")
data.acc.sorted=data.acc[order(data.acc[,1]),]
colnames(data.acc.sorted)=c("ENTRY","DESIG")
data.acc.plus.ungenotyped<-rbind(data.acc.sorted,diff_acc_plot_ungenotyped)
data.acc.plus.ungenotyped.plot<-merge(data.acc.plus.ungenotyped,data.plot,by="ENTRY",sort=F)
cn=dim(data.acc.plus.ungenotyped.plot)[2]
data.acc.plus.ungenotyped.plot.2<-data.acc.plus.ungenotyped.plot[,c(1,3,4,5,2,7:cn)]
#f_output=paste("WEMA_",substr(f_plot,12,mp[[1]][1]+3),"_plot_accession",sep="")
acc_plot_file_name=paste(f_output,"_output.csv",sep="")
cat(acc_plot_file_name,"\n")
write.table(data.acc.plus.ungenotyped.plot.2,file=acc_plot_file_name,append = F, quote = F, sep = "\t",eol = "\n",row.names = F,col.names = F,na=" ");
#ff <- myarg[5:m]
#f<-paste(ff,collapse=" ")
#cat(f_index,"\n")
#cat(f,"\n")
#cat(length(f),"\n")
#cat(m,"\n")
#library(affy)
#eset=justRMA(celfile.path=f)
#write.exprs(eset,file=paste(f_index,"_exprs.txt",sep=""))
#save.image(file=paste(f,".RData",sep=""))
#q()
#list_trait<-function(file.name){
#getwd()
#library(gdata)
#file_name=paste("~/DataFromXuecai/Link genotypes with phenotypes/",f_index,sep="");
#cat(file_name,"\n");
#data.for.read<-read.csv(file_name,header=T,sep="\t")
#print(colnames(data.for.read))
#write.table(data.for.read[2:length(data.for.read[,2]),2],file="F3_name.txt",append = T, quote = F, s#ep = "\t",eol = "\n",row.names = F,col.names = F);
#}
quit("yes") | /code_R/merge_and_generate_accession_plot.R | permissive | solgenomics/zeabase | R | false | false | 3,167 | r | #to use
#R --slave --args "$acc_str" "$f11"_tail.csv "$f22"_tail.csv "$f22" < ~code/code_R/merge_and_generate_accession_plot.R
myarg <- commandArgs()
cat(myarg,"\n");
m=length(myarg)
cat(m,"\n");
f_index<-myarg[4:4]
f_acc<-myarg[5:5]
f_plot<-myarg[6:6]
f_output<-myarg[7:7]
cat(f_index,"\n")
cat(f_acc,"\n")
cat(f_plot,"\n")
cat(f_output,"\n")
#f_acc="WEMA_6x1122_entry_number_accession.csv_tail.csv"
#f_plot="Clean data 6x1122_WET11B-MARS-EVALTC-10-8_rep2_sorted.csv_tail.csv"
data.acc<-read.csv(f_acc,sep="\t",header=F)
data.plot<-read.csv(f_plot,sep="\t",header=F)
colnames(data.plot)[1]="ENTRY"
#V1 V2 V3 V4 V5 V6 V7 V8 V9 V10 V11 V12
diff_acc_plot=setdiff(data.plot[,1],data.acc[,1])
same_acc_plot=intersect(data.plot[,1],data.acc[,1])
diff_acc_plot=diff_acc_plot[order(diff_acc_plot)]
same_acc_plot=same_acc_plot[order(same_acc_plot)]
dn=length(diff_acc_plot)
sn=length(same_acc_plot)
#WEMA6x1008_WET10B-EVALTC-08-1_ungenotyped1_tester_CML395_CML444
mp=gregexpr("_rep",f_plot)
data_acc_tester=as.character(data.acc[1,2])
acc_tester=substr(data_acc_tester,gregexpr("tester",data_acc_tester)[[1]][1],nchar(data_acc_tester))
#ungenotyped=paste("WEMA_",substr(f_plot,12,mp[[1]][1]+4),"_ungenotyped_",acc_tester,"_",diff_acc_plot[1],sep="")
#for(i in 2:dn){
#
#ungenotyped=c(ungenotyped,paste("WEMA_",substr(f_plot,12,mp[[1]][1]+4),"_ungenotyped_",acc_tester,"_",diff_acc_plot[i],sep=""))
#
#}
ungenotyped=paste("WEMA_",substr(f_plot,12,17),"_ungenotyped_",1,"_",acc_tester,sep="")
for(i in 2:dn){
ungenotyped=c(ungenotyped,paste("WEMA_",substr(f_plot,12,17),"_ungenotyped_",i,"_",acc_tester,sep=""))
}
diff_acc_plot_ungenotyped<-cbind(diff_acc_plot,ungenotyped)
colnames(diff_acc_plot_ungenotyped)=c("ENTRY","DESIG")
data.acc.sorted=data.acc[order(data.acc[,1]),]
colnames(data.acc.sorted)=c("ENTRY","DESIG")
data.acc.plus.ungenotyped<-rbind(data.acc.sorted,diff_acc_plot_ungenotyped)
data.acc.plus.ungenotyped.plot<-merge(data.acc.plus.ungenotyped,data.plot,by="ENTRY",sort=F)
cn=dim(data.acc.plus.ungenotyped.plot)[2]
data.acc.plus.ungenotyped.plot.2<-data.acc.plus.ungenotyped.plot[,c(1,3,4,5,2,7:cn)]
#f_output=paste("WEMA_",substr(f_plot,12,mp[[1]][1]+3),"_plot_accession",sep="")
acc_plot_file_name=paste(f_output,"_output.csv",sep="")
cat(acc_plot_file_name,"\n")
write.table(data.acc.plus.ungenotyped.plot.2,file=acc_plot_file_name,append = F, quote = F, sep = "\t",eol = "\n",row.names = F,col.names = F,na=" ");
#ff <- myarg[5:m]
#f<-paste(ff,collapse=" ")
#cat(f_index,"\n")
#cat(f,"\n")
#cat(length(f),"\n")
#cat(m,"\n")
#library(affy)
#eset=justRMA(celfile.path=f)
#write.exprs(eset,file=paste(f_index,"_exprs.txt",sep=""))
#save.image(file=paste(f,".RData",sep=""))
#q()
#list_trait<-function(file.name){
#getwd()
#library(gdata)
#file_name=paste("~/DataFromXuecai/Link genotypes with phenotypes/",f_index,sep="");
#cat(file_name,"\n");
#data.for.read<-read.csv(file_name,header=T,sep="\t")
#print(colnames(data.for.read))
#write.table(data.for.read[2:length(data.for.read[,2]),2],file="F3_name.txt",append = T, quote = F, s#ep = "\t",eol = "\n",row.names = F,col.names = F);
#}
quit("yes") |
\name{paws}
\alias{paws}
\alias{pawsm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Adaptive weigths smoothing using patches
}
\description{The function implements a version the propagation separation approach that
uses patches instead of individuel voxels for comparisons in parameter space. Functionality is analog to function \code{\link{aws}}. Using patches allows for an improved
handling of locally smooth functions and in 2D and 3D for improved smoothness of
discontinuities at the expense of increased computing time.
}
\usage{
paws(y, hmax = NULL, mask=NULL, onestep = FALSE, aws = TRUE, family = "Gaussian",
lkern = "Triangle", aggkern = "Uniform", sigma2 = NULL, shape = NULL,
scorr = 0, spmin = 0.25, ladjust = 1, wghts = NULL, u = NULL,
graph = FALSE, demo = FALSE, patchsize = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{y}{array \code{y} containing the observe response (image intensity) data.
\code{dim(y)} determines the dimensionality and extend of the grid design.
}
\item{mask}{logical array defining a mask. All computations are restricted to the mask.
}
\item{hmax}{ \code{hmax} specifies the maximal bandwidth. Defaults to \code{hmax=250, 12, 5}
for 1D, 2D, 3D images, respectively.
In case of \code{lkern="Gaussian"} the bandwidth is assumed to be given in full width half maximum (FWHM) units, i.e., \code{0.42466} times gridsize.
}
\item{onestep}{
apply the last step only (use for test purposes only)
}
\item{aws}{
logical: if TRUE structural adaptation (AWS) is used.
}
\item{family}{\code{family} specifies the probability distribution. Default is \code{family="Gaussian"}, also implemented
are "Bernoulli", "Poisson", "Exponential", "Volatility", "Variance" and "NCchi". \code{family="Volatility"} specifies a Gaussian distribution with
expectation 0 and unknown variance. \code{family="Volatility"} specifies that \code{p*y/theta} is distributed as \eqn{\chi^2} with \code{p=shape}
degrees of freedom. \code{family="NCchi"} uses a noncentral Chi distribution with \code{p=shape} degrees of freedom and noncentrality parameter \code{theta}
}
\item{lkern}{
character: location kernel, either "Triangle", "Plateau", "Quadratic", "Cubic" or "Gaussian".
The default "Triangle" is equivalent to using an Epanechnikov kernel, "Quadratic" and "Cubic" refer to a Bi-weight and Tri-weight
kernel, see Fan and Gijbels (1996). "Gaussian" is a truncated (compact support) Gaussian kernel.
This is included for comparisons only and should be avoided due to its large computational costs.
}
\item{aggkern}{
character: kernel used in stagewise aggregation, either "Triangle" or "Uniform"
}
\item{sigma2}{
\code{sigma2} allows to specify the variance in case of \code{family="Gaussian"}. Not used if \code{family!="Gaussian"}.
Defaults to \code{NULL}. In this case a homoskedastic variance estimate is generated. If \code{length(sigma2)==length(y)} then \code{sigma2}
is assumed to contain the pointwise variance of \code{y} and a heteroscedastic variance model is used.
}
\item{shape}{Allows to specify an additional shape parameter for certain family models. Currently only used for family="Variance", that is \eqn{\chi}-Square distributed observations
with \code{shape} degrees of freedom.
}
\item{scorr}{
The vector \code{scorr} allows to specify a first order correlations of the noise for each coordinate direction,
defaults to 0 (no correlation).
}
\item{spmin}{
Determines the form (size of the plateau) in the adaptation kernel.
Not to be changed by the user.
}
\item{ladjust}{
factor to increase the default value of lambda
}
\item{wghts}{\code{wghts} specifies the diagonal elements of a weight matrix to adjust for different distances between grid-points
in different coordinate directions, i.e. allows to define a more appropriate metric in the design space.
}
\item{u}{
a "true" value of the regression function, may be provided to
report risks at each iteration. This can be used to test the propagation condition with \code{u=0}
}
\item{graph}{If \code{graph=TRUE} intermediate results are illustrated after each iteration step. Defaults to \code{graph=FALSE}.
}
\item{demo}{ If \code{demo=TRUE} the function pauses after each iteration. Defaults to \code{demo=FALSE}.
}
\item{patchsize}{
positive integer defining the size of patches. Number of grid points within the patch is \code{(2*patchsize+1)^d} with \code{d} denoting the dimensionality of the design.
}
}
\details{ see \code{\link{aws}. The procedure is supposed to produce superior results if the assumption of a local constant image is violated or if smooothness of discontinuities is desired.}
}
\value{
returns an object of class \code{aws} with slots
\item{y = "numeric"}{y}
\item{dy = "numeric"}{dim(y)}
\item{x = "numeric"}{numeric(0)}
\item{ni = "integer"}{integer(0)}
\item{mask = "logical"}{logical(0)}
\item{theta = "numeric"}{Estimates of regression function, \code{length: length(y)}}
\item{hseq = "numeric"}{sequence of bandwidths employed}
\item{mae = "numeric"}{Mean absolute error for each iteration step if u was specified, numeric(0) else}
\item{psnr = "numeric"}{Peak signal-to-noise ratio for each iteration step if u was specified, numeric(0) else}
\item{var = "numeric"}{approx. variance of the estimates of the regression function. Please note that this does not reflect variability due to randomness of weights.}
\item{xmin = "numeric"}{numeric(0)}
\item{xmax = "numeric"}{numeric(0)}
\item{wghts = "numeric"}{numeric(0), ratio of distances \code{wghts[-1]/wghts[1]}}
\item{degree = "integer"}{0}
\item{hmax = "numeric"}{effective hmax}
\item{sigma2 = "numeric"}{provided or estimated error variance}
\item{scorr = "numeric"}{scorr}
\item{family = "character"}{family}
\item{shape = "numeric"}{shape}
\item{lkern = "integer"}{integer code for lkern,
1="Plateau", 2="Triangle", 3="Quadratic", 4="Cubic", 5="Gaussian"}
\item{lambda = "numeric"}{effective value of lambda}
\item{ladjust = "numeric"}{effective value of ladjust}
\item{aws = "logical"}{aws}
\item{memory = "logical"}{memory}
\item{homogen = "logical"}{homogen}
\item{earlystop = "logical"}{FALSE}
\item{varmodel = "character"}{"Constant"}
\item{vcoef = "numeric"}{numeric(0)}
\item{call = "function"}{the arguments of the call to \code{aws}}
}
\references{J. Polzehl, K. Tabelow (2019). Magnetic Resonance Brain Imaging:
Modeling and Data Analysis Using R. Springer, Use R! series. Appendix A.
Doi:10.1007/978-3-030-29184-6.
J. Polzehl, K. Papafitsoros, K. Tabelow. Patch-wise adaptive weights smoothing,
Preprint no. 2520, WIAS, Berlin, 2018, DOI 10.20347/WIAS.PREPRINT.2520.
(to appear in Journal of Statistical Software).
}
\author{
Joerg Polzehl, \email{polzehl@wias-berlin.de},
\url{http://www.wias-berlin.de/people/polzehl/}
}
\note{
use \code{setCores='number of threads'} to enable parallel execution.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{See also \code{\link{aws}}, \code{\link{lpaws}}, \code{\link{vpaws}},\code{link{awsdata}}
}
\examples{\dontrun{
setCores(2)
y <- array(rnorm(64^3),c(64,64,64))
yhat <- paws(y,hmax=6)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ smooth }
\keyword{ nonparametric }
\keyword{ regression }
| /man/paws.Rd | no_license | neuroconductor-releases/aws | R | false | false | 8,164 | rd | \name{paws}
\alias{paws}
\alias{pawsm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Adaptive weigths smoothing using patches
}
\description{The function implements a version the propagation separation approach that
uses patches instead of individuel voxels for comparisons in parameter space. Functionality is analog to function \code{\link{aws}}. Using patches allows for an improved
handling of locally smooth functions and in 2D and 3D for improved smoothness of
discontinuities at the expense of increased computing time.
}
\usage{
paws(y, hmax = NULL, mask=NULL, onestep = FALSE, aws = TRUE, family = "Gaussian",
lkern = "Triangle", aggkern = "Uniform", sigma2 = NULL, shape = NULL,
scorr = 0, spmin = 0.25, ladjust = 1, wghts = NULL, u = NULL,
graph = FALSE, demo = FALSE, patchsize = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{y}{array \code{y} containing the observe response (image intensity) data.
\code{dim(y)} determines the dimensionality and extend of the grid design.
}
\item{mask}{logical array defining a mask. All computations are restricted to the mask.
}
\item{hmax}{ \code{hmax} specifies the maximal bandwidth. Defaults to \code{hmax=250, 12, 5}
for 1D, 2D, 3D images, respectively.
In case of \code{lkern="Gaussian"} the bandwidth is assumed to be given in full width half maximum (FWHM) units, i.e., \code{0.42466} times gridsize.
}
\item{onestep}{
apply the last step only (use for test purposes only)
}
\item{aws}{
logical: if TRUE structural adaptation (AWS) is used.
}
\item{family}{\code{family} specifies the probability distribution. Default is \code{family="Gaussian"}, also implemented
are "Bernoulli", "Poisson", "Exponential", "Volatility", "Variance" and "NCchi". \code{family="Volatility"} specifies a Gaussian distribution with
expectation 0 and unknown variance. \code{family="Volatility"} specifies that \code{p*y/theta} is distributed as \eqn{\chi^2} with \code{p=shape}
degrees of freedom. \code{family="NCchi"} uses a noncentral Chi distribution with \code{p=shape} degrees of freedom and noncentrality parameter \code{theta}
}
\item{lkern}{
character: location kernel, either "Triangle", "Plateau", "Quadratic", "Cubic" or "Gaussian".
The default "Triangle" is equivalent to using an Epanechnikov kernel, "Quadratic" and "Cubic" refer to a Bi-weight and Tri-weight
kernel, see Fan and Gijbels (1996). "Gaussian" is a truncated (compact support) Gaussian kernel.
This is included for comparisons only and should be avoided due to its large computational costs.
}
\item{aggkern}{
character: kernel used in stagewise aggregation, either "Triangle" or "Uniform"
}
\item{sigma2}{
\code{sigma2} allows to specify the variance in case of \code{family="Gaussian"}. Not used if \code{family!="Gaussian"}.
Defaults to \code{NULL}. In this case a homoskedastic variance estimate is generated. If \code{length(sigma2)==length(y)} then \code{sigma2}
is assumed to contain the pointwise variance of \code{y} and a heteroscedastic variance model is used.
}
\item{shape}{Allows to specify an additional shape parameter for certain family models. Currently only used for family="Variance", that is \eqn{\chi}-Square distributed observations
with \code{shape} degrees of freedom.
}
\item{scorr}{
The vector \code{scorr} allows to specify a first order correlations of the noise for each coordinate direction,
defaults to 0 (no correlation).
}
\item{spmin}{
Determines the form (size of the plateau) in the adaptation kernel.
Not to be changed by the user.
}
\item{ladjust}{
factor to increase the default value of lambda
}
\item{wghts}{\code{wghts} specifies the diagonal elements of a weight matrix to adjust for different distances between grid-points
in different coordinate directions, i.e. allows to define a more appropriate metric in the design space.
}
\item{u}{
a "true" value of the regression function, may be provided to
report risks at each iteration. This can be used to test the propagation condition with \code{u=0}
}
\item{graph}{If \code{graph=TRUE} intermediate results are illustrated after each iteration step. Defaults to \code{graph=FALSE}.
}
\item{demo}{ If \code{demo=TRUE} the function pauses after each iteration. Defaults to \code{demo=FALSE}.
}
\item{patchsize}{
positive integer defining the size of patches. Number of grid points within the patch is \code{(2*patchsize+1)^d} with \code{d} denoting the dimensionality of the design.
}
}
\details{ see \code{\link{aws}. The procedure is supposed to produce superior results if the assumption of a local constant image is violated or if smooothness of discontinuities is desired.}
}
\value{
returns an object of class \code{aws} with slots
\item{y = "numeric"}{y}
\item{dy = "numeric"}{dim(y)}
\item{x = "numeric"}{numeric(0)}
\item{ni = "integer"}{integer(0)}
\item{mask = "logical"}{logical(0)}
\item{theta = "numeric"}{Estimates of regression function, \code{length: length(y)}}
\item{hseq = "numeric"}{sequence of bandwidths employed}
\item{mae = "numeric"}{Mean absolute error for each iteration step if u was specified, numeric(0) else}
\item{psnr = "numeric"}{Peak signal-to-noise ratio for each iteration step if u was specified, numeric(0) else}
\item{var = "numeric"}{approx. variance of the estimates of the regression function. Please note that this does not reflect variability due to randomness of weights.}
\item{xmin = "numeric"}{numeric(0)}
\item{xmax = "numeric"}{numeric(0)}
\item{wghts = "numeric"}{numeric(0), ratio of distances \code{wghts[-1]/wghts[1]}}
\item{degree = "integer"}{0}
\item{hmax = "numeric"}{effective hmax}
\item{sigma2 = "numeric"}{provided or estimated error variance}
\item{scorr = "numeric"}{scorr}
\item{family = "character"}{family}
\item{shape = "numeric"}{shape}
\item{lkern = "integer"}{integer code for lkern,
1="Plateau", 2="Triangle", 3="Quadratic", 4="Cubic", 5="Gaussian"}
\item{lambda = "numeric"}{effective value of lambda}
\item{ladjust = "numeric"}{effective value of ladjust}
\item{aws = "logical"}{aws}
\item{memory = "logical"}{memory}
\item{homogen = "logical"}{homogen}
\item{earlystop = "logical"}{FALSE}
\item{varmodel = "character"}{"Constant"}
\item{vcoef = "numeric"}{numeric(0)}
\item{call = "function"}{the arguments of the call to \code{aws}}
}
\references{J. Polzehl, K. Tabelow (2019). Magnetic Resonance Brain Imaging:
Modeling and Data Analysis Using R. Springer, Use R! series. Appendix A.
Doi:10.1007/978-3-030-29184-6.
J. Polzehl, K. Papafitsoros, K. Tabelow. Patch-wise adaptive weights smoothing,
Preprint no. 2520, WIAS, Berlin, 2018, DOI 10.20347/WIAS.PREPRINT.2520.
(to appear in Journal of Statistical Software).
}
\author{
Joerg Polzehl, \email{polzehl@wias-berlin.de},
\url{http://www.wias-berlin.de/people/polzehl/}
}
\note{
use \code{setCores='number of threads'} to enable parallel execution.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{See also \code{\link{aws}}, \code{\link{lpaws}}, \code{\link{vpaws}},\code{link{awsdata}}
}
\examples{\dontrun{
setCores(2)
y <- array(rnorm(64^3),c(64,64,64))
yhat <- paws(y,hmax=6)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ smooth }
\keyword{ nonparametric }
\keyword{ regression }
|
\name{helpers}
\alias{predictive.density}
\alias{predictive.draws}
\alias{parameter.draws}
\title{
Helper Functions to Access BVAR Forecast Distributions and Parameter Draws
}
\description{
Functions to extract a univariate posterior predictive distribution from a model fit generated by \code{\link{bvar.sv.tvp}}.
}
\usage{
predictive.density(fit, v = 1, h = 1, cdf = FALSE)
predictive.draws(fit, v = 1, h = 1)
parameter.draws(fit, type = "lag1", row = 1, col = 1)
}
\arguments{
\item{fit}{List, model fit generated by \code{\link{bvar.sv.tvp}}}
\item{v}{Index for variable of interest. \emph{Must be in line with the specification of \code{fit}}.}
\item{h}{Index for forecast horizon of interest. \emph{Must be in line with the specification of \code{fit}}.}
\item{cdf}{Set to TRUE to return cumulative distribution function, set to FALSE to return probability density function}
\item{type}{Character string, used to specify output for function \code{\link{parameter.draws}}. Setting to \code{"intercept"} returns parameter draws for the intercept vector. Setting to one of \code{"lag1"}, ..., \code{"lagX"}, (where X is the lag order used in \code{fit})
returns parameter draws from the autoregressive coefficient matrices. Setting to \code{"vcv"} returns draws for the elements of the residual variance-covariance matrix.}
\item{row, col}{Row and column index for the parameter for which \code{\link{parameter.draws}} should return posterior draws. That is, the function returns the row, col element of the matrix specified by \code{type}. Note that
\code{col} is irrelevant if \code{type = "intercept"} has been chosen.}
}
\value{
\code{\link{predictive.density}} returns a function \code{f(z)}, which yields the value(s) of the predictive density at point(s) \code{z}. This function exploits conditional normality of the model, given the posterior draws of the parameters.
\code{\link{predictive.draws}} returns a list containing vectors of MCMC draws, more specifically:
\item{y}{Draws from the predictand itself}
\item{m}{Mean of the normal distribution for the predictand in each draw}
\item{v}{Variance of the normal distribution for the predictand in each draw}
Both outputs should be closely in line with each other (apart from a small amount of sampling noise), see the link below for details.
\code{\link{parameter.draws}} returns posterior draws for a single (scalar) parameter of the model fitted by \code{\link{bvar.sv.tvp}}. The output is a matrix, with rows representing MCMC draws, and columns representing time.
}
\author{
Fabian Krueger
}
\examples{
\dontrun{
# Load US macro data
data(usmacro)
# Estimate trivariate BVAR using default settings
set.seed(5813)
bv <- bvar.sv.tvp(usmacro)
# Construct predictive density function for the second variable (inflation), one period ahead
f <- predictive.density(bv, v = 2, h = 1)
# Plot the density for a grid of values
grid <- seq(-2, 5, by = 0.05)
plot(x = grid, y = f(grid), type = "l")
# Cross-check: Extract MCMC sample for the same variable and horizon
smp <- predictive.draws(bv, v = 2, h = 1)
# Add density estimate to plot
lines(density(smp), col = "green")
}
}
\seealso{For examples and background, see the accompanying pdf file hosted at \url{https://sites.google.com/site/fk83research/code}.}
\keyword{helpers} | /bvarsv/man/helpers.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 3,386 | rd | \name{helpers}
\alias{predictive.density}
\alias{predictive.draws}
\alias{parameter.draws}
\title{
Helper Functions to Access BVAR Forecast Distributions and Parameter Draws
}
\description{
Functions to extract a univariate posterior predictive distribution from a model fit generated by \code{\link{bvar.sv.tvp}}.
}
\usage{
predictive.density(fit, v = 1, h = 1, cdf = FALSE)
predictive.draws(fit, v = 1, h = 1)
parameter.draws(fit, type = "lag1", row = 1, col = 1)
}
\arguments{
\item{fit}{List, model fit generated by \code{\link{bvar.sv.tvp}}}
\item{v}{Index for variable of interest. \emph{Must be in line with the specification of \code{fit}}.}
\item{h}{Index for forecast horizon of interest. \emph{Must be in line with the specification of \code{fit}}.}
\item{cdf}{Set to TRUE to return cumulative distribution function, set to FALSE to return probability density function}
\item{type}{Character string, used to specify output for function \code{\link{parameter.draws}}. Setting to \code{"intercept"} returns parameter draws for the intercept vector. Setting to one of \code{"lag1"}, ..., \code{"lagX"}, (where X is the lag order used in \code{fit})
returns parameter draws from the autoregressive coefficient matrices. Setting to \code{"vcv"} returns draws for the elements of the residual variance-covariance matrix.}
\item{row, col}{Row and column index for the parameter for which \code{\link{parameter.draws}} should return posterior draws. That is, the function returns the row, col element of the matrix specified by \code{type}. Note that
\code{col} is irrelevant if \code{type = "intercept"} has been chosen.}
}
\value{
\code{\link{predictive.density}} returns a function \code{f(z)}, which yields the value(s) of the predictive density at point(s) \code{z}. This function exploits conditional normality of the model, given the posterior draws of the parameters.
\code{\link{predictive.draws}} returns a list containing vectors of MCMC draws, more specifically:
\item{y}{Draws from the predictand itself}
\item{m}{Mean of the normal distribution for the predictand in each draw}
\item{v}{Variance of the normal distribution for the predictand in each draw}
Both outputs should be closely in line with each other (apart from a small amount of sampling noise), see the link below for details.
\code{\link{parameter.draws}} returns posterior draws for a single (scalar) parameter of the model fitted by \code{\link{bvar.sv.tvp}}. The output is a matrix, with rows representing MCMC draws, and columns representing time.
}
\author{
Fabian Krueger
}
\examples{
\dontrun{
# Load US macro data
data(usmacro)
# Estimate trivariate BVAR using default settings
set.seed(5813)
bv <- bvar.sv.tvp(usmacro)
# Construct predictive density function for the second variable (inflation), one period ahead
f <- predictive.density(bv, v = 2, h = 1)
# Plot the density for a grid of values
grid <- seq(-2, 5, by = 0.05)
plot(x = grid, y = f(grid), type = "l")
# Cross-check: Extract MCMC sample for the same variable and horizon
smp <- predictive.draws(bv, v = 2, h = 1)
# Add density estimate to plot
lines(density(smp), col = "green")
}
}
\seealso{For examples and background, see the accompanying pdf file hosted at \url{https://sites.google.com/site/fk83research/code}.}
\keyword{helpers} |
## ckeck if necessary files exist
dat.dir <- "UCI HAR Dataset/"
actLabels.file <- paste0(dat.dir,"activity_labels.txt")
features.file <- paste0(dat.dir,"features.txt")
Xtrain.file <- paste0(dat.dir,"train/X_train.txt")
ytrain.file <- paste0(dat.dir,"train/y_train.txt")
subtrain.file <- paste0(dat.dir,"train/subject_train.txt")
Xtest.file <- paste0(dat.dir,"test/X_test.txt")
ytest.file <- paste0(dat.dir,"test/y_test.txt")
subtest.file <- paste0(dat.dir,"test/subject_test.txt")
expr <- file.exists(actLabels.file, features.file, Xtrain.file, ytrain.file,
subtrain.file, Xtest.file, ytest.file, subtest.file)
if (!all(expr)) {
stop("Necessary files do not exist!")
} else{
print("All necessary files exist")
}
## load files
X.train <- read.table(Xtrain.file)
activity.train <- read.table(ytrain.file)
subject.train <- read.table(subtrain.file)
X.test <- read.table(Xtest.file)
activity.test <- read.table(ytest.file)
subject.test <- read.table(subtest.file)
activity.labels <- read.table(actLabels.file)
features.labels <- read.table(features.file)
## merge train and test datasets
X <- rbind(X.train, X.test)
activity <- rbind(activity.train, activity.test)
subject <- rbind(subject.train, subject.test)
total.data <- cbind(subject,activity,X)
## find features corresponding to mean or std values.
## angle/meanFreq features are ommited
idx <- grep('mean[^F]|std',features.labels[,2])
total.data <- total.data[,c(1,2,2+idx)]
## assign descriptive names to the columns of dataset by ommiting parentheses,
## converting '-' to '_' and ',' to '.' in original feature labels
features.cleanlabels <- features.labels[idx,]
features.cleanlabels[,2] <- gsub("[()]","",features.cleanlabels[,2])
features.cleanlabels[,2] <- gsub("[-]","_",features.cleanlabels[,2])
features.cleanlabels[,2] <- gsub("[,]",".",features.cleanlabels[,2])
colnames(total.data) <- c('subject','activity',as.character(features.cleanlabels[,2]))
## replace activity number codes by activity labels
total.data$activity <- activity.labels[total.data$activity,2]
## find average over each subject-activity pair
data.avg <- aggregate(total.data[,3:length(total.data)],
by=list(subject=total.data$subject,activity=total.data$activity),
mean)
## export tidy dataset as .csv file
write.csv(data.avg,"UCI_HAR_tidy.csv",row.names=F)
print(paste0("Success! Tidy dataset saved as: ",getwd(),"/UCI_HAR_tidy.csv")) | /run_analysis.R | no_license | pcharala/GettingAndCleaningDataAssignment | R | false | false | 2,464 | r | ## ckeck if necessary files exist
dat.dir <- "UCI HAR Dataset/"
actLabels.file <- paste0(dat.dir,"activity_labels.txt")
features.file <- paste0(dat.dir,"features.txt")
Xtrain.file <- paste0(dat.dir,"train/X_train.txt")
ytrain.file <- paste0(dat.dir,"train/y_train.txt")
subtrain.file <- paste0(dat.dir,"train/subject_train.txt")
Xtest.file <- paste0(dat.dir,"test/X_test.txt")
ytest.file <- paste0(dat.dir,"test/y_test.txt")
subtest.file <- paste0(dat.dir,"test/subject_test.txt")
expr <- file.exists(actLabels.file, features.file, Xtrain.file, ytrain.file,
subtrain.file, Xtest.file, ytest.file, subtest.file)
if (!all(expr)) {
stop("Necessary files do not exist!")
} else{
print("All necessary files exist")
}
## load files
X.train <- read.table(Xtrain.file)
activity.train <- read.table(ytrain.file)
subject.train <- read.table(subtrain.file)
X.test <- read.table(Xtest.file)
activity.test <- read.table(ytest.file)
subject.test <- read.table(subtest.file)
activity.labels <- read.table(actLabels.file)
features.labels <- read.table(features.file)
## merge train and test datasets
X <- rbind(X.train, X.test)
activity <- rbind(activity.train, activity.test)
subject <- rbind(subject.train, subject.test)
total.data <- cbind(subject,activity,X)
## find features corresponding to mean or std values.
## angle/meanFreq features are ommited
idx <- grep('mean[^F]|std',features.labels[,2])
total.data <- total.data[,c(1,2,2+idx)]
## assign descriptive names to the columns of dataset by ommiting parentheses,
## converting '-' to '_' and ',' to '.' in original feature labels
features.cleanlabels <- features.labels[idx,]
features.cleanlabels[,2] <- gsub("[()]","",features.cleanlabels[,2])
features.cleanlabels[,2] <- gsub("[-]","_",features.cleanlabels[,2])
features.cleanlabels[,2] <- gsub("[,]",".",features.cleanlabels[,2])
colnames(total.data) <- c('subject','activity',as.character(features.cleanlabels[,2]))
## replace activity number codes by activity labels
total.data$activity <- activity.labels[total.data$activity,2]
## find average over each subject-activity pair
data.avg <- aggregate(total.data[,3:length(total.data)],
by=list(subject=total.data$subject,activity=total.data$activity),
mean)
## export tidy dataset as .csv file
write.csv(data.avg,"UCI_HAR_tidy.csv",row.names=F)
print(paste0("Success! Tidy dataset saved as: ",getwd(),"/UCI_HAR_tidy.csv")) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_data.R
\name{generate_scrambled_data}
\alias{generate_scrambled_data}
\title{Permute the data within samples, maintaining relative abundances but
scrambling patterns of variation within taxa}
\usage{
generate_scrambled_data(
tax_level = "ASV",
host_sample_min = 75,
count_threshold = 1,
sample_threshold = 0.2
)
}
\arguments{
\item{tax_level}{taxonomic level at which to agglomerate data}
\item{host_sample_min}{minimum sample number for host inclusion in the
filtered data set}
\item{count_threshold}{minimum count for taxon inclusion in the filtered data
set}
\item{sample_threshold}{minimum proportion of samples within each host at
which a taxon must be observed at or above count_threshold}
}
\value{
a named list of count table, taxonomy, and metadata components
}
\description{
Permute the data within samples, maintaining relative abundances but
scrambling patterns of variation within taxa
}
| /man/generate_scrambled_data.Rd | no_license | kimberlyroche/rulesoflife | R | false | true | 996 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_data.R
\name{generate_scrambled_data}
\alias{generate_scrambled_data}
\title{Permute the data within samples, maintaining relative abundances but
scrambling patterns of variation within taxa}
\usage{
generate_scrambled_data(
tax_level = "ASV",
host_sample_min = 75,
count_threshold = 1,
sample_threshold = 0.2
)
}
\arguments{
\item{tax_level}{taxonomic level at which to agglomerate data}
\item{host_sample_min}{minimum sample number for host inclusion in the
filtered data set}
\item{count_threshold}{minimum count for taxon inclusion in the filtered data
set}
\item{sample_threshold}{minimum proportion of samples within each host at
which a taxon must be observed at or above count_threshold}
}
\value{
a named list of count table, taxonomy, and metadata components
}
\description{
Permute the data within samples, maintaining relative abundances but
scrambling patterns of variation within taxa
}
|
% Please edit documentation in R/tidy.R
\name{tidy_source}
\alias{tidy_source}
\title{Reformat R code while preserving blank lines and comments}
\usage{
tidy_source(source = "clipboard", comment = getOption("formatR.comment",
TRUE), blank = getOption("formatR.blank", TRUE), arrow = getOption("formatR.arrow",
FALSE), brace.newline = getOption("formatR.brace.newline", FALSE),
indent = getOption("formatR.indent", 4), output = TRUE, text = NULL,
width.cutoff = getOption("width"), ...)
}
\arguments{
\item{source}{a character string: location of the source code (default to be
the clipboard; this means we can copy the code to clipboard and use
\code{tidy_source()} without specifying the argument \code{source})}
\item{comment}{whether to keep comments (\code{TRUE} by default)}
\item{blank}{whether to keep blank lines (\code{TRUE} by default)}
\item{arrow}{whether to replace the assign operator \code{=} with \code{<-}}
\item{brace.newline}{whether to put the left brace \code{\{} to a new line
(default \code{FALSE})}
\item{indent}{number of spaces to indent the code (default 4)}
\item{output}{output to the console or a file using \code{\link{cat}}?}
\item{text}{an alternative way to specify the input: if it is \code{NULL},
the function will read the source code from the \code{source} argument;
alternatively, if \code{text} is a character vector containing the source
code, it will be used as the input and the \code{source} argument will be
ignored}
\item{width.cutoff}{passed to \code{\link{deparse}}: integer in [20, 500]
determining the cutoff at which line-breaking is tried (default to be
\code{getOption("width")})}
\item{...}{other arguments passed to \code{\link{cat}}, e.g. \code{file}
(this can be useful for batch-processing R scripts, e.g.
\code{tidy_source(source = 'input.R', file = 'output.R')})}
}
\value{
A list with components \item{text.tidy}{the reformatted code as a
character vector} \item{text.mask}{the code containing comments, which are
masked in assignments or with the weird operator}
}
\description{
This function returns reformatted source code; it tries to preserve blank
lines and comments, which is different with \code{\link{parse}} and
\code{\link{deparse}}. It can also replace \code{=} with \code{<-} where
\code{=} means assignments, and reindent code by a specified number of spaces
(default is 4).
}
\note{
Be sure to read the reference to know other limitations.
}
\examples{
library(formatR)
## a messy R script
messy = system.file("format", "messy.R", package = "formatR")
tidy_source(messy)
## use the 'text' argument
src = readLines(messy)
## source code
cat(src, sep = "\\n")
## the formatted version
tidy_source(text = src)
## preserve blank lines
tidy_source(text = src, blank = TRUE)
## indent with 2 spaces
tidy_source(text = src, indent = 2)
## discard comments!
tidy_source(text = src, comment = FALSE)
## wanna see the gory truth??
tidy_source(text = src, output = FALSE)$text.mask
## tidy up the source code of image demo
x = file.path(system.file(package = "graphics"), "demo", "image.R")
# to console
tidy_source(x)
# to a file
f = tempfile()
tidy_source(x, blank = TRUE, file = f)
## check the original code here and see the difference
file.show(x)
file.show(f)
## use global options
options(comment = TRUE, blank = FALSE)
tidy_source(x)
## if you've copied R code into the clipboard
if (interactive()) {
tidy_source("clipboard")
## write into clipboard again
tidy_source("clipboard", file = "clipboard")
}
## the if-else structure
tidy_source(text = c("{if(TRUE)1 else 2; if(FALSE){1+1", "## comments", "} else 2}"))
}
\author{
Yihui Xie <\url{http://yihui.name}> with substantial contribution
from Yixuan Qiu <\url{http://yixuan.cos.name}>
}
\references{
\url{http://yihui.name/formatR} (an introduction to this package,
with examples and further notes)
}
\seealso{
\code{\link{parse}}, \code{\link{deparse}}
}
| /man/tidy_source.Rd | no_license | hudsonchaves/formatR | R | false | false | 3,947 | rd | % Please edit documentation in R/tidy.R
\name{tidy_source}
\alias{tidy_source}
\title{Reformat R code while preserving blank lines and comments}
\usage{
tidy_source(source = "clipboard", comment = getOption("formatR.comment",
TRUE), blank = getOption("formatR.blank", TRUE), arrow = getOption("formatR.arrow",
FALSE), brace.newline = getOption("formatR.brace.newline", FALSE),
indent = getOption("formatR.indent", 4), output = TRUE, text = NULL,
width.cutoff = getOption("width"), ...)
}
\arguments{
\item{source}{a character string: location of the source code (default to be
the clipboard; this means we can copy the code to clipboard and use
\code{tidy_source()} without specifying the argument \code{source})}
\item{comment}{whether to keep comments (\code{TRUE} by default)}
\item{blank}{whether to keep blank lines (\code{TRUE} by default)}
\item{arrow}{whether to replace the assign operator \code{=} with \code{<-}}
\item{brace.newline}{whether to put the left brace \code{\{} to a new line
(default \code{FALSE})}
\item{indent}{number of spaces to indent the code (default 4)}
\item{output}{output to the console or a file using \code{\link{cat}}?}
\item{text}{an alternative way to specify the input: if it is \code{NULL},
the function will read the source code from the \code{source} argument;
alternatively, if \code{text} is a character vector containing the source
code, it will be used as the input and the \code{source} argument will be
ignored}
\item{width.cutoff}{passed to \code{\link{deparse}}: integer in [20, 500]
determining the cutoff at which line-breaking is tried (default to be
\code{getOption("width")})}
\item{...}{other arguments passed to \code{\link{cat}}, e.g. \code{file}
(this can be useful for batch-processing R scripts, e.g.
\code{tidy_source(source = 'input.R', file = 'output.R')})}
}
\value{
A list with components \item{text.tidy}{the reformatted code as a
character vector} \item{text.mask}{the code containing comments, which are
masked in assignments or with the weird operator}
}
\description{
This function returns reformatted source code; it tries to preserve blank
lines and comments, which is different with \code{\link{parse}} and
\code{\link{deparse}}. It can also replace \code{=} with \code{<-} where
\code{=} means assignments, and reindent code by a specified number of spaces
(default is 4).
}
\note{
Be sure to read the reference to know other limitations.
}
\examples{
library(formatR)
## a messy R script
messy = system.file("format", "messy.R", package = "formatR")
tidy_source(messy)
## use the 'text' argument
src = readLines(messy)
## source code
cat(src, sep = "\\n")
## the formatted version
tidy_source(text = src)
## preserve blank lines
tidy_source(text = src, blank = TRUE)
## indent with 2 spaces
tidy_source(text = src, indent = 2)
## discard comments!
tidy_source(text = src, comment = FALSE)
## wanna see the gory truth??
tidy_source(text = src, output = FALSE)$text.mask
## tidy up the source code of image demo
x = file.path(system.file(package = "graphics"), "demo", "image.R")
# to console
tidy_source(x)
# to a file
f = tempfile()
tidy_source(x, blank = TRUE, file = f)
## check the original code here and see the difference
file.show(x)
file.show(f)
## use global options
options(comment = TRUE, blank = FALSE)
tidy_source(x)
## if you've copied R code into the clipboard
if (interactive()) {
tidy_source("clipboard")
## write into clipboard again
tidy_source("clipboard", file = "clipboard")
}
## the if-else structure
tidy_source(text = c("{if(TRUE)1 else 2; if(FALSE){1+1", "## comments", "} else 2}"))
}
\author{
Yihui Xie <\url{http://yihui.name}> with substantial contribution
from Yixuan Qiu <\url{http://yixuan.cos.name}>
}
\references{
\url{http://yihui.name/formatR} (an introduction to this package,
with examples and further notes)
}
\seealso{
\code{\link{parse}}, \code{\link{deparse}}
}
|
library(lessR)
### Name: LineChart
### Title: Line Chart such as a Run Chart or Time-Series Chart
### Aliases: LineChart lc
### Keywords: plot line chart run chart time series chart
### ** Examples
# create data frame, d, to mimic reading data with Read function
# d contains both numeric and non-numeric data
d <- data.frame(rnorm(50), rnorm(50), rnorm(50), rep(c("A","B"),25))
names(d) <- c("X","Y","Z","C")
# default run chart
LineChart(Y)
# short name
lc(Y)
# save run chart to a pdf file
LineChart(Y, pdf=TRUE)
# LineChart in gray scale, then back to default theme
style("gray")
LineChart(Y)
style()
# customize run chart with LineChart options
style(panel.fill="mintcream", color="sienna3")
LineChart(Y, line.width=2, area="slategray3", center.line="median")
style() # reset style
# customize run chart with R par parameters
# 24 is the R value for a half-triangle pointing up
lc(Y, xlab="My xaxis", ylab="My yaxis", main="My Best Title",
cex.main=1.5, font.main=3, ylim=c(-4,4), shape.pts=24)
# generate steadily increasing values
# get a variable named A in the user workspace
A <- sort(rexp(50))
# default line chart
LineChart(A)
# line chart with border around plotted values
LineChart(A, area="off")
# time series chart, i.e., with dates, and filled area
# with option label for the x-axis
LineChart(A, time.start="2000/09/01", time.by="3 months")
# time series chart from a time series object
y.ts <- ts(A, start=c(2000, 9), frequency=4)
LineChart(y.ts)
# LineChart with built-in data set
LineChart(breaks, data=warpbreaks)
# Line charts for all numeric variables in a data frame
LineChart()
# Line charts for all specified numeric variables in a list of variables
# e.g., use the combine or c function to specify a list of variables
LineChart(c(X,Y))
| /data/genthat_extracted_code/lessR/examples/LineChart.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,788 | r | library(lessR)
### Name: LineChart
### Title: Line Chart such as a Run Chart or Time-Series Chart
### Aliases: LineChart lc
### Keywords: plot line chart run chart time series chart
### ** Examples
# create data frame, d, to mimic reading data with Read function
# d contains both numeric and non-numeric data
d <- data.frame(rnorm(50), rnorm(50), rnorm(50), rep(c("A","B"),25))
names(d) <- c("X","Y","Z","C")
# default run chart
LineChart(Y)
# short name
lc(Y)
# save run chart to a pdf file
LineChart(Y, pdf=TRUE)
# LineChart in gray scale, then back to default theme
style("gray")
LineChart(Y)
style()
# customize run chart with LineChart options
style(panel.fill="mintcream", color="sienna3")
LineChart(Y, line.width=2, area="slategray3", center.line="median")
style() # reset style
# customize run chart with R par parameters
# 24 is the R value for a half-triangle pointing up
lc(Y, xlab="My xaxis", ylab="My yaxis", main="My Best Title",
cex.main=1.5, font.main=3, ylim=c(-4,4), shape.pts=24)
# generate steadily increasing values
# get a variable named A in the user workspace
A <- sort(rexp(50))
# default line chart
LineChart(A)
# line chart with border around plotted values
LineChart(A, area="off")
# time series chart, i.e., with dates, and filled area
# with option label for the x-axis
LineChart(A, time.start="2000/09/01", time.by="3 months")
# time series chart from a time series object
y.ts <- ts(A, start=c(2000, 9), frequency=4)
LineChart(y.ts)
# LineChart with built-in data set
LineChart(breaks, data=warpbreaks)
# Line charts for all numeric variables in a data frame
LineChart()
# Line charts for all specified numeric variables in a list of variables
# e.g., use the combine or c function to specify a list of variables
LineChart(c(X,Y))
|
############ Retrives soil data from gssurgo
#' This function queries the gSSURGO database for a series of map unit keys
#'
#' @param mukeys map unit key from gssurgo
#' @param fields a character vector of the fields to be extracted. See details and the default argument to find out how to define fields.
#'
#' @return a dataframe with soil properties. units can be looked up from database documentation
#' @export
#'
#' @details
#' Full documention of available tables and their relationships can be found here \url{www.sdmdataaccess.nrcs.usda.gov/QueryHelp.aspx}
#' There have been occasions where NRCS made some minor changes to the structure of the API which this code is where those changes need
#' to be implemneted here.
#' Fields need to be defined with their associate tables. For example, sandtotal is a field in chorizon table which needs to be defined as chorizon.sandotal_(r/l/h), where
#' r stands for the representative value, l stand for low and h stands for high. At the momeent fields from mapunit, component, muaggatt, and chorizon tables can be extracted.
#'
#' @examples
#' \dontrun{
#' PEcAn.data.land::gSSURGO.Query(
#' fields = c(
#' "chorizon.cec7_r", "chorizon.sandtotal_r",
#' "chorizon.silttotal_r","chorizon.claytotal_r",
#' "chorizon.om_r","chorizon.hzdept_r","chorizon.frag3to10_r",
#' "chorizon.dbovendry_r","chorizon.ph1to1h2o_r",
#' "chorizon.cokey","chorizon.chkey"))
#' }
gSSURGO.Query <- function(mukeys=2747727,
fields=c("chorizon.sandtotal_r",
"chorizon.silttotal_r",
"chorizon.claytotal_r")){
#browser()
# ,
######### Reteiv soil
headerFields <-
c(Accept = "text/xml",
Accept = "multipart/*",
'Content-Type' = "text/xml; charset=utf-8",
SOAPAction = "http://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx/RunQuery")
body <- paste('<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<RunQuery xmlns="http://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx">
<Query>
SELECT mapunit.mukey, component.cokey, component.mukey, component.comppct_r, ',paste(fields, collapse = ", "),',
muaggatt.aws050wta from mapunit
join muaggatt on mapunit.mukey=muaggatt.mukey
join component on mapunit.mukey=component.mukey
join chorizon on component.cokey=chorizon.cokey
where mapunit.mukey in (', paste(mukeys,collapse = ", "),');
</Query>
</RunQuery>
</soap:Body>
</soap:Envelope>')
reader <- RCurl::basicTextGatherer()
out <- RCurl::curlPerform(url = "https://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx",
httpheader = headerFields, postfields = body,
writefunction = reader$update
)
suppressWarnings(
suppressMessages({
xml_doc <- XML::xmlTreeParse(reader$value())
xmltop <- XML::xmlRoot(xml_doc)
tablesxml <- (xmltop[[1]]["RunQueryResponse"][[1]]["RunQueryResult"][[1]]["diffgram"][[1]]["NewDataSet"][[1]])
})
)
#parsing the table
tryCatch({
suppressMessages(
suppressWarnings({
tables <- XML::getNodeSet(tablesxml,"//Table")
##### All datatables below newdataset
# This method leaves out the variables are all NAs - so we can't have a fixed naming scheme for this df
dfs <- tables %>%
purrr::map_dfr(function(child){
#converting the xml obj to list
allfields <- XML::xmlToList(child)
remov <- names(allfields) %in% c(".attrs")
#browser()
names(allfields)[!remov] %>%
purrr::map_dfc(function(nfield){
#browser()
outv <- allfields[[nfield]] %>% unlist() %>% as.numeric
ifelse(length(outv) > 0, outv, NA)
})%>%
as.data.frame() %>%
`colnames<-`(names(allfields)[!remov])
})%>%
dplyr::select(comppct_r:mukey) %>%
dplyr::select(-aws050wta)
})
)
return(dfs)
},
error=function(cond) {
print(cond)
return(NULL)
})
}
| /modules/data.land/R/gSSURGO_Query.R | permissive | robkooper/pecan | R | false | false | 4,529 | r | ############ Retrives soil data from gssurgo
#' This function queries the gSSURGO database for a series of map unit keys
#'
#' @param mukeys map unit key from gssurgo
#' @param fields a character vector of the fields to be extracted. See details and the default argument to find out how to define fields.
#'
#' @return a dataframe with soil properties. units can be looked up from database documentation
#' @export
#'
#' @details
#' Full documention of available tables and their relationships can be found here \url{www.sdmdataaccess.nrcs.usda.gov/QueryHelp.aspx}
#' There have been occasions where NRCS made some minor changes to the structure of the API which this code is where those changes need
#' to be implemneted here.
#' Fields need to be defined with their associate tables. For example, sandtotal is a field in chorizon table which needs to be defined as chorizon.sandotal_(r/l/h), where
#' r stands for the representative value, l stand for low and h stands for high. At the momeent fields from mapunit, component, muaggatt, and chorizon tables can be extracted.
#'
#' @examples
#' \dontrun{
#' PEcAn.data.land::gSSURGO.Query(
#' fields = c(
#' "chorizon.cec7_r", "chorizon.sandtotal_r",
#' "chorizon.silttotal_r","chorizon.claytotal_r",
#' "chorizon.om_r","chorizon.hzdept_r","chorizon.frag3to10_r",
#' "chorizon.dbovendry_r","chorizon.ph1to1h2o_r",
#' "chorizon.cokey","chorizon.chkey"))
#' }
gSSURGO.Query <- function(mukeys=2747727,
fields=c("chorizon.sandtotal_r",
"chorizon.silttotal_r",
"chorizon.claytotal_r")){
#browser()
# ,
######### Reteiv soil
headerFields <-
c(Accept = "text/xml",
Accept = "multipart/*",
'Content-Type' = "text/xml; charset=utf-8",
SOAPAction = "http://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx/RunQuery")
body <- paste('<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body>
<RunQuery xmlns="http://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx">
<Query>
SELECT mapunit.mukey, component.cokey, component.mukey, component.comppct_r, ',paste(fields, collapse = ", "),',
muaggatt.aws050wta from mapunit
join muaggatt on mapunit.mukey=muaggatt.mukey
join component on mapunit.mukey=component.mukey
join chorizon on component.cokey=chorizon.cokey
where mapunit.mukey in (', paste(mukeys,collapse = ", "),');
</Query>
</RunQuery>
</soap:Body>
</soap:Envelope>')
reader <- RCurl::basicTextGatherer()
out <- RCurl::curlPerform(url = "https://SDMDataAccess.nrcs.usda.gov/Tabular/SDMTabularService.asmx",
httpheader = headerFields, postfields = body,
writefunction = reader$update
)
suppressWarnings(
suppressMessages({
xml_doc <- XML::xmlTreeParse(reader$value())
xmltop <- XML::xmlRoot(xml_doc)
tablesxml <- (xmltop[[1]]["RunQueryResponse"][[1]]["RunQueryResult"][[1]]["diffgram"][[1]]["NewDataSet"][[1]])
})
)
#parsing the table
tryCatch({
suppressMessages(
suppressWarnings({
tables <- XML::getNodeSet(tablesxml,"//Table")
##### All datatables below newdataset
# This method leaves out the variables are all NAs - so we can't have a fixed naming scheme for this df
dfs <- tables %>%
purrr::map_dfr(function(child){
#converting the xml obj to list
allfields <- XML::xmlToList(child)
remov <- names(allfields) %in% c(".attrs")
#browser()
names(allfields)[!remov] %>%
purrr::map_dfc(function(nfield){
#browser()
outv <- allfields[[nfield]] %>% unlist() %>% as.numeric
ifelse(length(outv) > 0, outv, NA)
})%>%
as.data.frame() %>%
`colnames<-`(names(allfields)[!remov])
})%>%
dplyr::select(comppct_r:mukey) %>%
dplyr::select(-aws050wta)
})
)
return(dfs)
},
error=function(cond) {
print(cond)
return(NULL)
})
}
|
\name{ca630}
\alias{ca630}
\docType{data}
\title{Soil Data from the Central Sierra Nevada Region of California}
\description{Site and laboratory data from soils sampled in the central Sierra Nevada Region of California.}
\note{These data are out of date. Pending some new data + documentation. Use with caution}
\usage{data(ca630)}
\format{
List containing:
$site : A data frame containing site information.
\describe{
\item{\code{user_site_id}}{national user site id}
\item{\code{mlra}}{the MLRA}
\item{\code{county}}{the county}
\item{\code{ssa}}{soil survey area}
\item{\code{lon}}{longitude, WGS84}
\item{\code{lat}}{latitude, WGS84}
\item{\code{pedon_key}}{national soil profile id}
\item{\code{user_pedon_id}}{local soil profile id}
\item{\code{cntrl_depth_to_top}}{control section top depth (cm)}
\item{\code{cntrl_depth_to_bot}}{control section bottom depth (cm)}
\item{\code{sampled_taxon_name}}{soil series name}
}
$lab : A data frame containing horizon information.
\describe{
\item{\code{pedon_key}}{national soil profile id}
\item{\code{layer_key}}{national horizon id}
\item{\code{layer_sequence}}{horizon sequence number}
\item{\code{hzn_top}}{horizon top (cm)}
\item{\code{hzn_bot}}{horizon bottom (cm)}
\item{\code{hzn_desgn}}{horizon name}
\item{\code{texture_description}}{USDA soil texture}
\item{\code{nh4_sum_bases}}{sum of bases extracted by ammonium acetate (pH 7)}
\item{\code{ex_acid}}{exchangeable acidity [method ?]}
\item{\code{CEC8.2}}{cation exchange capacity by sum of cations method (pH 8.2)}
\item{\code{CEC7}}{cation exchange capacity by ammonium acetate (pH 7)}
\item{\code{bs_8.2}}{base saturation by sum of cations method (pH 8.2)}
\item{\code{bs_7}}{base saturation by ammonium acetate (pH 7)}
}
}
\details{These data were extracted from the NSSL database. `ca630` is a list composed of site and lab data, each stored as dataframes. These data are modeled by a 1:many (site:lab) relation, with the `pedon_id` acting as the primary key in the `site` table and as the foreign key in the `lab` table.}
\source{\url{https://ncsslabdatamart.sc.egov.usda.gov/}}
\examples{
\dontrun{
library(plyr)
library(lattice)
library(Hmisc)
library(maps)
library(sp)
# check the data out:
data(ca630)
str(ca630)
# note that pedon_key is the link between the two tables
# make a copy of the horizon data
ca <- ca630$lab
# promote to a SoilProfileCollection class object
depths(ca) <- pedon_key ~ hzn_top + hzn_bot
# add site data, based on pedon_key
site(ca) <- ca630$site
# ID data missing coordinates: '|' is a logical OR
(missing.coords.idx <- which(is.na(ca$lat) | is.na(ca$lon)))
# remove missing coordinates by safely subsetting
if(length(missing.coords.idx) > 0)
ca <- ca[-missing.coords.idx, ]
# register spatial data
coordinates(ca) <- ~ lon + lat
# assign a coordinate reference system
proj4string(ca) <- '+proj=longlat +datum=NAD83'
# check the result
print(ca)
# map the data (several ways to do this, here is a simple way)
map(database='county', region='california')
points(coordinates(ca), col='red', cex=0.5)
# aggregate \%BS 7 for all profiles into 1 cm slices
a <- slab(ca, fm= ~ bs_7)
# plot median & IQR by 1 cm slice
xyplot(
top ~ p.q50, data=a, lower=a$p.q25, upper=a$p.q75,
ylim=c(160,-5), alpha=0.5, scales=list(alternating=1, y=list(tick.num=7)),
panel=panel.depth_function, prepanel=prepanel.depth_function,
ylab='Depth (cm)', xlab='Base Saturation at pH 7',
par.settings=list(superpose.line=list(col='black', lwd=2))
)
# aggregate \%BS at pH 8.2 for all profiles by MLRA, along 1 cm slices
# note that mlra is stored in @site
a <- slab(ca, mlra ~ bs_8.2)
# keep only MLRA 18 and 22
a <- subset(a, subset=mlra \%in\% c('18', '22'))
# plot median & IQR by 1 cm slice, using different colors for each MLRA
xyplot(
top ~ p.q50, groups=mlra , data=a, lower=a$p.q25, upper=a$p.q75,
ylim=c(160,-5), alpha=0.5, scales=list(y=list(tick.num=7, alternating=3), x=list(alternating=1)),
panel=panel.depth_function, prepanel=prepanel.depth_function,
ylab='Depth (cm)', xlab='Base Saturation at pH 8.2',
par.settings=list(superpose.line=list(col=c('black','blue'), lty=c(1,2), lwd=2)),
auto.key=list(columns=2, title='MLRA', points=FALSE, lines=TRUE)
)
# safely compute hz-thickness weighted mean CEC (pH 7)
# using data.frame objects
head(lab.agg.cec_7 <- ddply(ca630$lab, .(pedon_key),
.fun=summarise, CEC_7=wtd.mean(bs_7, weights=hzn_bot-hzn_top)))
# extract a SPDF with horizon data along a slice at 25 cm
s.25 <- slice(ca, fm=25 ~ bs_7 + CEC7 + ex_acid)
spplot(s.25, zcol=c('bs_7','CEC7','ex_acid'))
# note that the ordering is preserved:
all.equal(s.25$pedon_key, profile_id(ca))
# extract a data.frame with horizon data at 10, 20, and 50 cm
s.multiple <- slice(ca, fm=c(10,20,50) ~ bs_7 + CEC7 + ex_acid)
# Extract the 2nd horizon from all profiles as SPDF
ca.2 <- ca[, 2]
# subset profiles 1 through 10
ca.1.to.10 <- ca[1:10, ]
# basic plot method: profile plot
plot(ca.1.to.10, name='hzn_desgn')
}
}
\keyword{datasets}
| /man/ca630.Rd | no_license | rsbivand/aqp | R | false | false | 5,095 | rd | \name{ca630}
\alias{ca630}
\docType{data}
\title{Soil Data from the Central Sierra Nevada Region of California}
\description{Site and laboratory data from soils sampled in the central Sierra Nevada Region of California.}
\note{These data are out of date. Pending some new data + documentation. Use with caution}
\usage{data(ca630)}
\format{
List containing:
$site : A data frame containing site information.
\describe{
\item{\code{user_site_id}}{national user site id}
\item{\code{mlra}}{the MLRA}
\item{\code{county}}{the county}
\item{\code{ssa}}{soil survey area}
\item{\code{lon}}{longitude, WGS84}
\item{\code{lat}}{latitude, WGS84}
\item{\code{pedon_key}}{national soil profile id}
\item{\code{user_pedon_id}}{local soil profile id}
\item{\code{cntrl_depth_to_top}}{control section top depth (cm)}
\item{\code{cntrl_depth_to_bot}}{control section bottom depth (cm)}
\item{\code{sampled_taxon_name}}{soil series name}
}
$lab : A data frame containing horizon information.
\describe{
\item{\code{pedon_key}}{national soil profile id}
\item{\code{layer_key}}{national horizon id}
\item{\code{layer_sequence}}{horizon sequence number}
\item{\code{hzn_top}}{horizon top (cm)}
\item{\code{hzn_bot}}{horizon bottom (cm)}
\item{\code{hzn_desgn}}{horizon name}
\item{\code{texture_description}}{USDA soil texture}
\item{\code{nh4_sum_bases}}{sum of bases extracted by ammonium acetate (pH 7)}
\item{\code{ex_acid}}{exchangeable acidity [method ?]}
\item{\code{CEC8.2}}{cation exchange capacity by sum of cations method (pH 8.2)}
\item{\code{CEC7}}{cation exchange capacity by ammonium acetate (pH 7)}
\item{\code{bs_8.2}}{base saturation by sum of cations method (pH 8.2)}
\item{\code{bs_7}}{base saturation by ammonium acetate (pH 7)}
}
}
\details{These data were extracted from the NSSL database. `ca630` is a list composed of site and lab data, each stored as dataframes. These data are modeled by a 1:many (site:lab) relation, with the `pedon_id` acting as the primary key in the `site` table and as the foreign key in the `lab` table.}
\source{\url{https://ncsslabdatamart.sc.egov.usda.gov/}}
\examples{
\dontrun{
library(plyr)
library(lattice)
library(Hmisc)
library(maps)
library(sp)
# check the data out:
data(ca630)
str(ca630)
# note that pedon_key is the link between the two tables
# make a copy of the horizon data
ca <- ca630$lab
# promote to a SoilProfileCollection class object
depths(ca) <- pedon_key ~ hzn_top + hzn_bot
# add site data, based on pedon_key
site(ca) <- ca630$site
# ID data missing coordinates: '|' is a logical OR
(missing.coords.idx <- which(is.na(ca$lat) | is.na(ca$lon)))
# remove missing coordinates by safely subsetting
if(length(missing.coords.idx) > 0)
ca <- ca[-missing.coords.idx, ]
# register spatial data
coordinates(ca) <- ~ lon + lat
# assign a coordinate reference system
proj4string(ca) <- '+proj=longlat +datum=NAD83'
# check the result
print(ca)
# map the data (several ways to do this, here is a simple way)
map(database='county', region='california')
points(coordinates(ca), col='red', cex=0.5)
# aggregate \%BS 7 for all profiles into 1 cm slices
a <- slab(ca, fm= ~ bs_7)
# plot median & IQR by 1 cm slice
xyplot(
top ~ p.q50, data=a, lower=a$p.q25, upper=a$p.q75,
ylim=c(160,-5), alpha=0.5, scales=list(alternating=1, y=list(tick.num=7)),
panel=panel.depth_function, prepanel=prepanel.depth_function,
ylab='Depth (cm)', xlab='Base Saturation at pH 7',
par.settings=list(superpose.line=list(col='black', lwd=2))
)
# aggregate \%BS at pH 8.2 for all profiles by MLRA, along 1 cm slices
# note that mlra is stored in @site
a <- slab(ca, mlra ~ bs_8.2)
# keep only MLRA 18 and 22
a <- subset(a, subset=mlra \%in\% c('18', '22'))
# plot median & IQR by 1 cm slice, using different colors for each MLRA
xyplot(
top ~ p.q50, groups=mlra , data=a, lower=a$p.q25, upper=a$p.q75,
ylim=c(160,-5), alpha=0.5, scales=list(y=list(tick.num=7, alternating=3), x=list(alternating=1)),
panel=panel.depth_function, prepanel=prepanel.depth_function,
ylab='Depth (cm)', xlab='Base Saturation at pH 8.2',
par.settings=list(superpose.line=list(col=c('black','blue'), lty=c(1,2), lwd=2)),
auto.key=list(columns=2, title='MLRA', points=FALSE, lines=TRUE)
)
# safely compute hz-thickness weighted mean CEC (pH 7)
# using data.frame objects
head(lab.agg.cec_7 <- ddply(ca630$lab, .(pedon_key),
.fun=summarise, CEC_7=wtd.mean(bs_7, weights=hzn_bot-hzn_top)))
# extract a SPDF with horizon data along a slice at 25 cm
s.25 <- slice(ca, fm=25 ~ bs_7 + CEC7 + ex_acid)
spplot(s.25, zcol=c('bs_7','CEC7','ex_acid'))
# note that the ordering is preserved:
all.equal(s.25$pedon_key, profile_id(ca))
# extract a data.frame with horizon data at 10, 20, and 50 cm
s.multiple <- slice(ca, fm=c(10,20,50) ~ bs_7 + CEC7 + ex_acid)
# Extract the 2nd horizon from all profiles as SPDF
ca.2 <- ca[, 2]
# subset profiles 1 through 10
ca.1.to.10 <- ca[1:10, ]
# basic plot method: profile plot
plot(ca.1.to.10, name='hzn_desgn')
}
}
\keyword{datasets}
|
#' @import chron
#' @import httr
#' @import rjson
# Global Variables
pkg.env <- new.env()
pkg.env$interana_host <- ""
pkg.env$auth_token <- ""
pkg.env$dataset <- ""
pkg.env$days_prior <- 0
pkg.env$query_type <- ""
pkg.env$aggregator_type <- ""
pkg.env$aggregator_column <- ""
pkg.env$filter_expression <- ""
pkg.env$group_by_column <- ""
#' Establish the Interana Client Connection
#'
#' This function allows you to establish the Interana Client Connection
#' @param cluster_host Specify the cluster domain. For eg. https://<cluster-domain>/login.html. This is usually the name of your company or POC with a suffix of .interana.com.
#' @param token Provide the authorization token to validate access to the cluster above.
#' @keywords interana
#' @export
#' @examples
#' interana_client()
interana_client <- function(cluster_host="",token=""){
print(paste("You have provided the cluster host as:",cluster_host,"& authorization token:",token))
pkg.env$interana_host <- cluster_host
pkg.env$auth_token <- token
}
#' Formulate the Interana Query
#'
#' This function allows you to define the Interana Query
#' @param dataset Specify the Interana dataset to query against
#' @param days_prior Specify the interval to search against. In other words: end_time: now, start_time: now-days_prior
#' @keywords interana
#' @export
#' @examples
#' interana_query()
interana_query <- function(dataset="",days_prior=7){
print(paste("Defining Query for Dataset:",dataset,"for Days Prior (to now): ",days_prior))
pkg.env$dataset <- dataset
pkg.env$days_prior <- days_prior
}
#' Add Interana Query Params
#'
#' This function allows you to further specify Interana Query Params
#' @param query_type Specify the Interana query type. Default=single_measurement. More details at TBD
#' @param agg_type Specify the Interana aggregator type. Default=count_star. More details at TBD
#' @param agg_column Specify the aggregator column, if any. This is dependent on the aggregator type. Default=null. More details at TBD
#' @param filter_expr Specify the filter criteria, if any. This is an URL-escaped value (example: TBD) Default=null. More details at TBD
#' @param group_by_column Specify the group by column. Default=null. More details at TBD
#' @keywords interana
#' @export
#' @examples
#' interana_add_query_params()
interana_add_query_params <- function(query_type="single_measurement",agg_type="count_star",agg_column="",filter_expr="",group_by_column=""){
print(paste("Adding Query Params ..."))
pkg.env$query_type <- query_type
pkg.env$aggregator_type <- agg_type
pkg.env$aggregator_column <- agg_column
pkg.env$filter_expression <- filter_expr
pkg.env$group_by_column <- group_by_column
print(paste("Completed Adding Query Params ."))
}
#' Gets the parameters that are set so far
#'
#' This function shows all the parameters set so far.
#' @keywords get_params
#' @export
#' @examples
#' interana_get_params()
interana_get_params <- function(){
print("----------Dumping Query Client Params ------")
print(paste("Interana cluster:",pkg.env$interana_host))
print(paste("Authorization Token:",pkg.env$auth_token))
print(paste("Dataset:",pkg.env$dataset))
print(paste("Days Prior:",pkg.env$days_prior))
print(paste("Query Type:",pkg.env$query_type))
print(paste("Aggregator Type:",pkg.env$aggregator_type))
print(paste("Aggregator Column:",pkg.env$aggregator_column))
print(paste("Filter Expression:",pkg.env$filter_expression))
print(paste("Group By Column:",pkg.env$group_by_column))
print("----------End Params Dump ------")
}
#' Retrieve the Interana Results from previously formulated data
#'
#' This function allows you to retrieve data from previously defined query
#' @keywords interana
#' @export
#' @examples
#' interana_get_data()
interana_get_data <- function(){
print("Retrieving the Interana Data ...")
end_time <- as.chron(Sys.time())
start_time <- end_time - pkg.env$days_prior
query_item = list("type" = pkg.env$query_type,
"measure"=list("aggregator" = pkg.env$aggregator_type,
"column" = pkg.env$aggregator_column))
if ( pkg.env$filter_expression != "" ){
# now set the filter criteria as found
query_item$filter <- pkg.env$filter_expression
}
else{
query_item$filter <- ''
}
params <- list("end"=as.integer( as.POSIXct( end_time ),
tz = "UTC" )*1000,
"start" = as.integer( as.POSIXct( start_time ),
tz = "UTC" )*1000,
"dataset" = pkg.env$dataset,
"queries" = list(query_item))
if (pkg.env$group_by_column != ""){
#print(pkg.env$group_by_column)
params$group_by <- list(pkg.env$group_by_column)
}
cluster_url <- paste("https://",pkg.env$interana_host,"/api/v1/query",sep = "")
auth_token = paste("Token",pkg.env$auth_token)
#print(cluster_url)
#print(auth_token)
#print(paste("query=",URLencode(toJSON(params)),sep = ""))
r <- GET(cluster_url, accept_json(),
add_headers('Authorization' = auth_token),
query = paste("query=",URLencode(toJSON(params)),sep = ""))
response <- fromJSON(content(r,as = "text"))
print(as.data.frame(response))
print("Retrieved the Interana Data !")
}
| /query_sdk/R/interana.query.client/R/get_interana_data.R | permissive | Interana/interana-sdk | R | false | false | 5,325 | r | #' @import chron
#' @import httr
#' @import rjson
# Global Variables
pkg.env <- new.env()
pkg.env$interana_host <- ""
pkg.env$auth_token <- ""
pkg.env$dataset <- ""
pkg.env$days_prior <- 0
pkg.env$query_type <- ""
pkg.env$aggregator_type <- ""
pkg.env$aggregator_column <- ""
pkg.env$filter_expression <- ""
pkg.env$group_by_column <- ""
#' Establish the Interana Client Connection
#'
#' This function allows you to establish the Interana Client Connection
#' @param cluster_host Specify the cluster domain. For eg. https://<cluster-domain>/login.html. This is usually the name of your company or POC with a suffix of .interana.com.
#' @param token Provide the authorization token to validate access to the cluster above.
#' @keywords interana
#' @export
#' @examples
#' interana_client()
interana_client <- function(cluster_host="",token=""){
print(paste("You have provided the cluster host as:",cluster_host,"& authorization token:",token))
pkg.env$interana_host <- cluster_host
pkg.env$auth_token <- token
}
#' Formulate the Interana Query
#'
#' This function allows you to define the Interana Query
#' @param dataset Specify the Interana dataset to query against
#' @param days_prior Specify the interval to search against. In other words: end_time: now, start_time: now-days_prior
#' @keywords interana
#' @export
#' @examples
#' interana_query()
interana_query <- function(dataset="",days_prior=7){
print(paste("Defining Query for Dataset:",dataset,"for Days Prior (to now): ",days_prior))
pkg.env$dataset <- dataset
pkg.env$days_prior <- days_prior
}
#' Add Interana Query Params
#'
#' This function allows you to further specify Interana Query Params
#' @param query_type Specify the Interana query type. Default=single_measurement. More details at TBD
#' @param agg_type Specify the Interana aggregator type. Default=count_star. More details at TBD
#' @param agg_column Specify the aggregator column, if any. This is dependent on the aggregator type. Default=null. More details at TBD
#' @param filter_expr Specify the filter criteria, if any. This is an URL-escaped value (example: TBD) Default=null. More details at TBD
#' @param group_by_column Specify the group by column. Default=null. More details at TBD
#' @keywords interana
#' @export
#' @examples
#' interana_add_query_params()
interana_add_query_params <- function(query_type="single_measurement",agg_type="count_star",agg_column="",filter_expr="",group_by_column=""){
print(paste("Adding Query Params ..."))
pkg.env$query_type <- query_type
pkg.env$aggregator_type <- agg_type
pkg.env$aggregator_column <- agg_column
pkg.env$filter_expression <- filter_expr
pkg.env$group_by_column <- group_by_column
print(paste("Completed Adding Query Params ."))
}
#' Gets the parameters that are set so far
#'
#' This function shows all the parameters set so far.
#' @keywords get_params
#' @export
#' @examples
#' interana_get_params()
interana_get_params <- function(){
print("----------Dumping Query Client Params ------")
print(paste("Interana cluster:",pkg.env$interana_host))
print(paste("Authorization Token:",pkg.env$auth_token))
print(paste("Dataset:",pkg.env$dataset))
print(paste("Days Prior:",pkg.env$days_prior))
print(paste("Query Type:",pkg.env$query_type))
print(paste("Aggregator Type:",pkg.env$aggregator_type))
print(paste("Aggregator Column:",pkg.env$aggregator_column))
print(paste("Filter Expression:",pkg.env$filter_expression))
print(paste("Group By Column:",pkg.env$group_by_column))
print("----------End Params Dump ------")
}
#' Retrieve the Interana Results from previously formulated data
#'
#' This function allows you to retrieve data from previously defined query
#' @keywords interana
#' @export
#' @examples
#' interana_get_data()
interana_get_data <- function(){
print("Retrieving the Interana Data ...")
end_time <- as.chron(Sys.time())
start_time <- end_time - pkg.env$days_prior
query_item = list("type" = pkg.env$query_type,
"measure"=list("aggregator" = pkg.env$aggregator_type,
"column" = pkg.env$aggregator_column))
if ( pkg.env$filter_expression != "" ){
# now set the filter criteria as found
query_item$filter <- pkg.env$filter_expression
}
else{
query_item$filter <- ''
}
params <- list("end"=as.integer( as.POSIXct( end_time ),
tz = "UTC" )*1000,
"start" = as.integer( as.POSIXct( start_time ),
tz = "UTC" )*1000,
"dataset" = pkg.env$dataset,
"queries" = list(query_item))
if (pkg.env$group_by_column != ""){
#print(pkg.env$group_by_column)
params$group_by <- list(pkg.env$group_by_column)
}
cluster_url <- paste("https://",pkg.env$interana_host,"/api/v1/query",sep = "")
auth_token = paste("Token",pkg.env$auth_token)
#print(cluster_url)
#print(auth_token)
#print(paste("query=",URLencode(toJSON(params)),sep = ""))
r <- GET(cluster_url, accept_json(),
add_headers('Authorization' = auth_token),
query = paste("query=",URLencode(toJSON(params)),sep = ""))
response <- fromJSON(content(r,as = "text"))
print(as.data.frame(response))
print("Retrieved the Interana Data !")
}
|
#' Title
#'
#' @param results.object
#' @param x.of.interest
#' @param FailLevel
#' @param plan.values.string
#' @param plan.string
#' @param quantile
#' @param ylim
#' @param xlim
#' @param xlab
#' @param ylab
#' @param my.title
#' @param title.option
#' @param grids
#' @param numplotsim
#' @param nxpoints
#' @param cex
#'
#' @return NULL
#' @export
#'
#' @examples
#' \dontrun{
#'
#' InsulationBrkdwn.ADDTplan <- get.allocation.matrix(list(DegreesC = c(180,225,250,275)),
#' times = c(1,2,4,8,16,32,48,64),
#' time.units = "Weeks",
#' reps = 4)
#'
#' plot(InsulationBrkdwn.ADDTplan)
#'
#' InsulationBrkdwn.ADDTpv <- get.ADDT.plan.values(distribution = "normal",
#' transformation.x = "Arrhenius",
#' transformation.Response = "log",
#' transformation.time = "linear",
#' beta0 = 2.58850162033243,
#' beta1 = -476873415881.376,
#' beta2 = 1.41806367703643,
#' sigma = 0.172609,
#' time.units = "Weeks",
#' response.units = "Volts",
#' FailLevel = 10,
#' use.condition = 100)
#'
#' print(InsulationBrkdwn.ADDTpv)
#'
#' InsulationBrkdwn.vADDTplan <- hframe.to.vframe(InsulationBrkdwn.ADDTplan)
#' sum(allocation(InsulationBrkdwn.vADDTplan))
#'
#' names(InsulationBrkdwn.ADDTpv)
#'
#' InsulationBrkdwn.plan.sim.out <- sim.ADDT.test.plan(ADDT.test.plan = InsulationBrkdwn.ADDTplan,
#' ADDT.plan.values = InsulationBrkdwn.ADDTpv,
#' number.sim = 5)
#'
#' ADDT.plot.time.v.x(InsulationBrkdwn.plan.sim.out)
#'
#' ADDT.plot.Deg.v.Time(InsulationBrkdwn.plan.sim.out)
#' ADDT.plot.FracFail.v.Time(InsulationBrkdwn.plan.sim.out)
#'
#' ADDT.vcv(ADDT.plan.values = InsulationBrkdwn.ADDTpv,
#' ADDT.test.plan = hframe.to.vframe(InsulationBrkdwn.ADDTplan))
#'
#'
#' }
ADDT.plot.Deg.v.Time <-
function (results.object, x.of.interest = NULL, FailLevel = NULL,
plan.values.string = NULL, plan.string = NULL, quantile = 0.5,
ylim = c(NA, NA), xlim = c(NA, NA), xlab = NULL, ylab = NULL,
my.title = NULL, title.option = GetSMRDDefault("SMRD.TitleOption"), grids = F, numplotsim = 50,
nxpoints = 50, cex = 1)
{
use.condition <- x.of.interest
number.sim <- nrow(results.object)
if (is.null(use.condition))
use.condition <- attr(results.object, "use.condition")
if (is.null(use.condition))
stop("Use condition not specified")
if (is.character(use.condition))
use.condition <- string.to.frame(use.condition)
if (is.null(FailLevel))
FailLevel <- attr(results.object, "FailLevel")
ADDT.plan.values <- attr(results.object, "plan.values")
ADDT.test.plan <- attr(results.object, "plan")
if (is.null(plan.string))
plan.string <- attr(results.object, "plan.string")
if (is.null(plan.values.string))
plan.values.string <- attr(results.object, "plan.values.string")
FailLevelDef <- paste(FailLevel, get.response.units(ADDT.plan.values))
if (is.null(xlab))
xlab <- get.time.units(ADDT.plan.values)
if (is.null(ylab))
ylab <- get.response.units(ADDT.plan.values)
distribution <- ADDT.plan.values$distribution
transformation.x <- ADDT.plan.values$transformation.x
transformation.time <- ADDT.plan.values$transformation.time
x.axis <- transformation.time
transformation.response <- ADDT.plan.values$transformation.response
y.axis <- transformation.response
model.string <- paste("Resp:", transformation.response, ",Time:",
transformation.time, ",x:", paste(ADDT.plan.values$transformation.x,
collapse = ","), ", Dist:", distribution, sep = "")
if (is.null(my.title))
my.title <- paste("Accelerated destructive degradation test simulation based on\n",
plan.string, plan.values.string, "\n", quantile,
"quantile of degradation versus", xlab, "at", paste(use.condition,
ADDT.plan.values$accelvar.units, collapse = ","),
"\n", model.string)
transformation.x <- fix.inverse.relationship(transformation.x)
slope.name <- attr(transformation.x, "slope.name")
y.axis <- "log"
numplotsim <- min(number.sim, numplotsim)
the.model <- list(distribution = distribution, transformation.x = transformation.x,
transformation.time = transformation.time, transformation.response = transformation.response)
Dummy.Dest.Degrad.out <- list(dummy = T, origparam = ADDT.plan.values$theta.vec,
origparamvcv = diag(length(ADDT.plan.values$theta.vec)),
model = the.model)
oldClass(Dummy.Dest.Degrad.out) <- "gmle.out"
derived.time.range <- range(times(ADDT.test.plan))
xrna <- is.na(xlim)
if (any(xrna))
xlim[xrna] <- derived.time.range[xrna]
time.vec <- seq(xlim[1], xlim[2], length = nxpoints)
degradation.true <- fx.ADDT.degradation.quantile(theta.hat = Dummy.Dest.Degrad.out$origparam,
p = quantile, time.vec = time.vec, distribution = distribution,
xuse = use.condition, transformation.x = transformation.x,
transformation.time = transformation.time)
uber.results.object <- matrix(results.object[1:nrow(results.object),
1:ncol(results.object), drop = FALSE], ncol = ncol(results.object),
nrow = nrow(results.object), byrow = F)
degradation.mat <- (apply(uber.results.object[, 1:length(ADDT.plan.values$theta.vec),
drop = F], 1, fx.ADDT.degradation.quantile, p = quantile,
time.vec = time.vec, distribution = distribution, xuse = use.condition,
transformation.x = transformation.x, transformation.time = transformation.time))
derived.ylim <- f.relationshipinv(range(degradation.mat),
transformation.response)
yrna <- is.na(ylim)
trans.time.vec <- f.relationship(time.vec, x.axis)
if (any(yrna))
ylim[yrna] <- derived.ylim[yrna]
plot.paper(ylim = ylim, xlim = xlim, x.axis = x.axis,
y.axis = y.axis, my.title = "", title.option = title.option,
cex = cex, xlab = xlab, ylab = ylab, grids = grids, cex.title = 0.8)
take.out <- c(1, 2, length(time.vec) - 1, length(time.vec))
lines(trans.time.vec, degradation.true, col = 1, lwd = 4,
lty = 1)
matlines(trans.time.vec[-take.out], degradation.mat[-take.out,
1:numplotsim], col = 1, lty = 2)
mtext(text = my.title, line = 0.5, side = 3)
invisible()
}
| /R/ADDT.plot.Deg.v.Time.R | no_license | anhnguyendepocen/SMRD | R | false | false | 7,049 | r | #' Title
#'
#' @param results.object
#' @param x.of.interest
#' @param FailLevel
#' @param plan.values.string
#' @param plan.string
#' @param quantile
#' @param ylim
#' @param xlim
#' @param xlab
#' @param ylab
#' @param my.title
#' @param title.option
#' @param grids
#' @param numplotsim
#' @param nxpoints
#' @param cex
#'
#' @return NULL
#' @export
#'
#' @examples
#' \dontrun{
#'
#' InsulationBrkdwn.ADDTplan <- get.allocation.matrix(list(DegreesC = c(180,225,250,275)),
#' times = c(1,2,4,8,16,32,48,64),
#' time.units = "Weeks",
#' reps = 4)
#'
#' plot(InsulationBrkdwn.ADDTplan)
#'
#' InsulationBrkdwn.ADDTpv <- get.ADDT.plan.values(distribution = "normal",
#' transformation.x = "Arrhenius",
#' transformation.Response = "log",
#' transformation.time = "linear",
#' beta0 = 2.58850162033243,
#' beta1 = -476873415881.376,
#' beta2 = 1.41806367703643,
#' sigma = 0.172609,
#' time.units = "Weeks",
#' response.units = "Volts",
#' FailLevel = 10,
#' use.condition = 100)
#'
#' print(InsulationBrkdwn.ADDTpv)
#'
#' InsulationBrkdwn.vADDTplan <- hframe.to.vframe(InsulationBrkdwn.ADDTplan)
#' sum(allocation(InsulationBrkdwn.vADDTplan))
#'
#' names(InsulationBrkdwn.ADDTpv)
#'
#' InsulationBrkdwn.plan.sim.out <- sim.ADDT.test.plan(ADDT.test.plan = InsulationBrkdwn.ADDTplan,
#' ADDT.plan.values = InsulationBrkdwn.ADDTpv,
#' number.sim = 5)
#'
#' ADDT.plot.time.v.x(InsulationBrkdwn.plan.sim.out)
#'
#' ADDT.plot.Deg.v.Time(InsulationBrkdwn.plan.sim.out)
#' ADDT.plot.FracFail.v.Time(InsulationBrkdwn.plan.sim.out)
#'
#' ADDT.vcv(ADDT.plan.values = InsulationBrkdwn.ADDTpv,
#' ADDT.test.plan = hframe.to.vframe(InsulationBrkdwn.ADDTplan))
#'
#'
#' }
ADDT.plot.Deg.v.Time <-
function (results.object, x.of.interest = NULL, FailLevel = NULL,
plan.values.string = NULL, plan.string = NULL, quantile = 0.5,
ylim = c(NA, NA), xlim = c(NA, NA), xlab = NULL, ylab = NULL,
my.title = NULL, title.option = GetSMRDDefault("SMRD.TitleOption"), grids = F, numplotsim = 50,
nxpoints = 50, cex = 1)
{
use.condition <- x.of.interest
number.sim <- nrow(results.object)
if (is.null(use.condition))
use.condition <- attr(results.object, "use.condition")
if (is.null(use.condition))
stop("Use condition not specified")
if (is.character(use.condition))
use.condition <- string.to.frame(use.condition)
if (is.null(FailLevel))
FailLevel <- attr(results.object, "FailLevel")
ADDT.plan.values <- attr(results.object, "plan.values")
ADDT.test.plan <- attr(results.object, "plan")
if (is.null(plan.string))
plan.string <- attr(results.object, "plan.string")
if (is.null(plan.values.string))
plan.values.string <- attr(results.object, "plan.values.string")
FailLevelDef <- paste(FailLevel, get.response.units(ADDT.plan.values))
if (is.null(xlab))
xlab <- get.time.units(ADDT.plan.values)
if (is.null(ylab))
ylab <- get.response.units(ADDT.plan.values)
distribution <- ADDT.plan.values$distribution
transformation.x <- ADDT.plan.values$transformation.x
transformation.time <- ADDT.plan.values$transformation.time
x.axis <- transformation.time
transformation.response <- ADDT.plan.values$transformation.response
y.axis <- transformation.response
model.string <- paste("Resp:", transformation.response, ",Time:",
transformation.time, ",x:", paste(ADDT.plan.values$transformation.x,
collapse = ","), ", Dist:", distribution, sep = "")
if (is.null(my.title))
my.title <- paste("Accelerated destructive degradation test simulation based on\n",
plan.string, plan.values.string, "\n", quantile,
"quantile of degradation versus", xlab, "at", paste(use.condition,
ADDT.plan.values$accelvar.units, collapse = ","),
"\n", model.string)
transformation.x <- fix.inverse.relationship(transformation.x)
slope.name <- attr(transformation.x, "slope.name")
y.axis <- "log"
numplotsim <- min(number.sim, numplotsim)
the.model <- list(distribution = distribution, transformation.x = transformation.x,
transformation.time = transformation.time, transformation.response = transformation.response)
Dummy.Dest.Degrad.out <- list(dummy = T, origparam = ADDT.plan.values$theta.vec,
origparamvcv = diag(length(ADDT.plan.values$theta.vec)),
model = the.model)
oldClass(Dummy.Dest.Degrad.out) <- "gmle.out"
derived.time.range <- range(times(ADDT.test.plan))
xrna <- is.na(xlim)
if (any(xrna))
xlim[xrna] <- derived.time.range[xrna]
time.vec <- seq(xlim[1], xlim[2], length = nxpoints)
degradation.true <- fx.ADDT.degradation.quantile(theta.hat = Dummy.Dest.Degrad.out$origparam,
p = quantile, time.vec = time.vec, distribution = distribution,
xuse = use.condition, transformation.x = transformation.x,
transformation.time = transformation.time)
uber.results.object <- matrix(results.object[1:nrow(results.object),
1:ncol(results.object), drop = FALSE], ncol = ncol(results.object),
nrow = nrow(results.object), byrow = F)
degradation.mat <- (apply(uber.results.object[, 1:length(ADDT.plan.values$theta.vec),
drop = F], 1, fx.ADDT.degradation.quantile, p = quantile,
time.vec = time.vec, distribution = distribution, xuse = use.condition,
transformation.x = transformation.x, transformation.time = transformation.time))
derived.ylim <- f.relationshipinv(range(degradation.mat),
transformation.response)
yrna <- is.na(ylim)
trans.time.vec <- f.relationship(time.vec, x.axis)
if (any(yrna))
ylim[yrna] <- derived.ylim[yrna]
plot.paper(ylim = ylim, xlim = xlim, x.axis = x.axis,
y.axis = y.axis, my.title = "", title.option = title.option,
cex = cex, xlab = xlab, ylab = ylab, grids = grids, cex.title = 0.8)
take.out <- c(1, 2, length(time.vec) - 1, length(time.vec))
lines(trans.time.vec, degradation.true, col = 1, lwd = 4,
lty = 1)
matlines(trans.time.vec[-take.out], degradation.mat[-take.out,
1:numplotsim], col = 1, lty = 2)
mtext(text = my.title, line = 0.5, side = 3)
invisible()
}
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(scales)
library(readxl)
library(lubridate)
library(zoo)
library(ggrepel)
library(tidyverse)
folder_name <- "xxxx_btc_dd_trend"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
n_day_trend <- 20
df <- readRDS(paste0(localdir, "0027_quandl_bitcoin.Rds")) %>%
arrange(desc(date))
# Find low matermark
absolute_minumum <- 10^8
for(i in 1:nrow(df)){
current_p <- df[i, "index_btc"]
if (current_p < absolute_minumum){
df[i, "low_watermark"] <- current_p
absolute_minumum <- current_p
} else{
df[i, "low_watermark"] <- absolute_minumum
}
}
df <- df %>%
arrange(date) %>%
mutate(watermark_over_index = low_watermark/index_btc - 1,
index_equal_watermark = ifelse(watermark_over_index == 0, 1, 0))
btc_dd <- drawdown_path(df, dd_counts = 1)
df <- df %>%
left_join(btc_dd)
to_plot <- df
file_path <- paste0(out_path, "/btc_watermark_over_index.jpeg")
source_string <- paste0("Source: Quandl")
plot <- ggplot(to_plot, aes(x=date, y=watermark_over_index)) +
geom_line() +
scale_y_continuous(label = percent_format(accuracy = 1)) +
of_dollars_and_data_theme +
ggtitle(paste0("BTC Future Decline")) +
labs(x = "Date" , y = "Watermark over Index",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
file_path <- paste0(out_path, "/dist_btc_watermark_over_index.jpeg")
source_string <- paste0("Source: Quandl")
plot <- ggplot(to_plot, aes(-watermark_over_index)) +
geom_density() +
scale_x_continuous(label = percent_format(accuracy = 1)) +
of_dollars_and_data_theme +
theme(axis.ticks.y = element_blank(),
axis.text.y = element_blank()) +
ggtitle(paste0("BTC Future Decline Distribution")) +
labs(x = "Future Decline Percentage" , y = "Frequency",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
to_plot <- df %>%
left_join(btc_dd) %>%
select(date, low_watermark, index_btc) %>%
rename(`Low Watermark` = low_watermark,
`Bitcoin` = index_btc) %>%
gather(-date, key=key, value=value)
file_path <- paste0(out_path, "/btc_index_watermark.jpeg")
source_string <- paste0("Source: Quandl")
plot <- ggplot(to_plot, aes(x=date, y=value, col = key)) +
geom_line() +
scale_color_manual(values = c("black", "red")) +
scale_y_continuous(label = dollar, trans = log10_trans()) +
of_dollars_and_data_theme +
theme(legend.position = "bottom",
legend.title = element_blank()) +
ggtitle(paste0("BTC Price and Low Watermark")) +
labs(x = "Date" , y = "Price (Log Scale)",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
print(mean(df$pct, na.rm = TRUE))
print(mean(df$index_equal_watermark, na.rm = TRUE))
print(mean(df$watermark_over_index, na.rm = TRUE))
# ############################ End ################################## # | /analysis/xxxx_btc_dd_trend.R | no_license | nmaggiulli/of-dollars-and-data | R | false | false | 3,411 | r | cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(scales)
library(readxl)
library(lubridate)
library(zoo)
library(ggrepel)
library(tidyverse)
folder_name <- "xxxx_btc_dd_trend"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
n_day_trend <- 20
df <- readRDS(paste0(localdir, "0027_quandl_bitcoin.Rds")) %>%
arrange(desc(date))
# Find low matermark
absolute_minumum <- 10^8
for(i in 1:nrow(df)){
current_p <- df[i, "index_btc"]
if (current_p < absolute_minumum){
df[i, "low_watermark"] <- current_p
absolute_minumum <- current_p
} else{
df[i, "low_watermark"] <- absolute_minumum
}
}
df <- df %>%
arrange(date) %>%
mutate(watermark_over_index = low_watermark/index_btc - 1,
index_equal_watermark = ifelse(watermark_over_index == 0, 1, 0))
btc_dd <- drawdown_path(df, dd_counts = 1)
df <- df %>%
left_join(btc_dd)
to_plot <- df
file_path <- paste0(out_path, "/btc_watermark_over_index.jpeg")
source_string <- paste0("Source: Quandl")
plot <- ggplot(to_plot, aes(x=date, y=watermark_over_index)) +
geom_line() +
scale_y_continuous(label = percent_format(accuracy = 1)) +
of_dollars_and_data_theme +
ggtitle(paste0("BTC Future Decline")) +
labs(x = "Date" , y = "Watermark over Index",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
file_path <- paste0(out_path, "/dist_btc_watermark_over_index.jpeg")
source_string <- paste0("Source: Quandl")
plot <- ggplot(to_plot, aes(-watermark_over_index)) +
geom_density() +
scale_x_continuous(label = percent_format(accuracy = 1)) +
of_dollars_and_data_theme +
theme(axis.ticks.y = element_blank(),
axis.text.y = element_blank()) +
ggtitle(paste0("BTC Future Decline Distribution")) +
labs(x = "Future Decline Percentage" , y = "Frequency",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
to_plot <- df %>%
left_join(btc_dd) %>%
select(date, low_watermark, index_btc) %>%
rename(`Low Watermark` = low_watermark,
`Bitcoin` = index_btc) %>%
gather(-date, key=key, value=value)
file_path <- paste0(out_path, "/btc_index_watermark.jpeg")
source_string <- paste0("Source: Quandl")
plot <- ggplot(to_plot, aes(x=date, y=value, col = key)) +
geom_line() +
scale_color_manual(values = c("black", "red")) +
scale_y_continuous(label = dollar, trans = log10_trans()) +
of_dollars_and_data_theme +
theme(legend.position = "bottom",
legend.title = element_blank()) +
ggtitle(paste0("BTC Price and Low Watermark")) +
labs(x = "Date" , y = "Price (Log Scale)",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
print(mean(df$pct, na.rm = TRUE))
print(mean(df$index_equal_watermark, na.rm = TRUE))
print(mean(df$watermark_over_index, na.rm = TRUE))
# ############################ End ################################## # |
# A sequence.
1:4
# Store it.
x <- c(1:4)
# Is it a vector?
is.vector(x)
# A vector of numbers.
x <- c(4, 6, 8)
# A vector of strings, i.e. characters, i.e. text.
y <- c("alea", "jacta", "est")
# A 'mixed' vector will be automatically recycled.
z <- c(4, "a")
# Also a vector.
y <- "This is a vector."
# Check.
is.vector(y)
# Also a vector.
z <- 1
# Check.
is.vector(z)
# What about...
m <- cbind(1:3, 4:6)
# What type (or class) is that?
class(m)
# Create vector of random exam grades.
exam <- c(7, 13, 19, 8, 12)
# Check result.
exam
# Compute mean of exam vector.
mean(exam)
# Compute median of exam vector.
median(exam)
# Descriptive statistics for exam vector.
summary(exam)
# Recall the exam object.
exam
# It's a vector.
is.vector(exam)
# Not a very long one.
length(exam)
# Select its first element; in context: show grade of first student.
exam[1]
# Select more than one element by listing which values you want.
exam[1:3]
# Select a vector of values: show grades of students no. 1, 2, 3 and 5.
exam[c(1:3, 5)]
# In context, it does makes more sense to simply exclude student no. 4's grade.
exam[-4]
# Vectors can be logical.
exam >= 10
# Select a logical vector of values.
exam[exam >= 10]
# Create a logical vector.
is_under_average <- exam < 10
# Get grades under average.
exam[is_under_average]
# Get grades above average.
exam[!is_under_average]
# The exam object is not a matrix.
is.matrix(exam)
# If you make it into a matrix, the vector becomes a column of values.
as.matrix(exam)
# Show length of grades vector.
length(exam)
# Create a random grades vector of same length.
essay <- as.integer(20 * runif(5))
# Check result.
essay
# Form a matrix.
grades <- cbind(exam, essay)
# Check result.
grades
# Compute student average.
final <- rowMeans(grades)
# Combine to grades matrix.
grades <- cbind(grades, final)
# Check result.
grades
# Compute mean exam and essay grades.
colMeans(grades)
# How many rows?
nrow(grades)
# How many columns?
ncol(grades)
# Do they have names?
dimnames(grades)
# Create a student id sequence 1, 2, 3, ...
id <- c(1:nrow(grades))
# Check result.
id
# Assign it to row names.
rownames(grades) <- id
# Check result.
grades
# First row, second cell.
grades[1,2]
# First row.
grades[1, ]
# First two rows.
grades[1:2, ]
# Third column.
grades[, 3]
# Descriptive statistics for final grades.
summary(grades[, 3])
# Descriptive statistics for all matrix columns.
summary(grades)
# Create a text object based on a logical condition.
pass <- ifelse(grades[, 3] >= 10, "Pass", "Fail")
# Check result.
pass
cbind(grades, pass)
# Understand what happens when you factor a string (text) variable.
factor(pass)
# The numeric matrix is preserved if 'pass' is added as a factor.
cbind(grades, factor(pass))
# Final operation. The '=' assignment gives a name to the column.
grades <- cbind(grades, pass = factor(pass))
# Marvel at your work.
grades
| /code/021_vectors.R | no_license | briatte/ida | R | false | false | 2,929 | r |
# A sequence.
1:4
# Store it.
x <- c(1:4)
# Is it a vector?
is.vector(x)
# A vector of numbers.
x <- c(4, 6, 8)
# A vector of strings, i.e. characters, i.e. text.
y <- c("alea", "jacta", "est")
# A 'mixed' vector will be automatically recycled.
z <- c(4, "a")
# Also a vector.
y <- "This is a vector."
# Check.
is.vector(y)
# Also a vector.
z <- 1
# Check.
is.vector(z)
# What about...
m <- cbind(1:3, 4:6)
# What type (or class) is that?
class(m)
# Create vector of random exam grades.
exam <- c(7, 13, 19, 8, 12)
# Check result.
exam
# Compute mean of exam vector.
mean(exam)
# Compute median of exam vector.
median(exam)
# Descriptive statistics for exam vector.
summary(exam)
# Recall the exam object.
exam
# It's a vector.
is.vector(exam)
# Not a very long one.
length(exam)
# Select its first element; in context: show grade of first student.
exam[1]
# Select more than one element by listing which values you want.
exam[1:3]
# Select a vector of values: show grades of students no. 1, 2, 3 and 5.
exam[c(1:3, 5)]
# In context, it does makes more sense to simply exclude student no. 4's grade.
exam[-4]
# Vectors can be logical.
exam >= 10
# Select a logical vector of values.
exam[exam >= 10]
# Create a logical vector.
is_under_average <- exam < 10
# Get grades under average.
exam[is_under_average]
# Get grades above average.
exam[!is_under_average]
# The exam object is not a matrix.
is.matrix(exam)
# If you make it into a matrix, the vector becomes a column of values.
as.matrix(exam)
# Show length of grades vector.
length(exam)
# Create a random grades vector of same length.
essay <- as.integer(20 * runif(5))
# Check result.
essay
# Form a matrix.
grades <- cbind(exam, essay)
# Check result.
grades
# Compute student average.
final <- rowMeans(grades)
# Combine to grades matrix.
grades <- cbind(grades, final)
# Check result.
grades
# Compute mean exam and essay grades.
colMeans(grades)
# How many rows?
nrow(grades)
# How many columns?
ncol(grades)
# Do they have names?
dimnames(grades)
# Create a student id sequence 1, 2, 3, ...
id <- c(1:nrow(grades))
# Check result.
id
# Assign it to row names.
rownames(grades) <- id
# Check result.
grades
# First row, second cell.
grades[1,2]
# First row.
grades[1, ]
# First two rows.
grades[1:2, ]
# Third column.
grades[, 3]
# Descriptive statistics for final grades.
summary(grades[, 3])
# Descriptive statistics for all matrix columns.
summary(grades)
# Create a text object based on a logical condition.
pass <- ifelse(grades[, 3] >= 10, "Pass", "Fail")
# Check result.
pass
cbind(grades, pass)
# Understand what happens when you factor a string (text) variable.
factor(pass)
# The numeric matrix is preserved if 'pass' is added as a factor.
cbind(grades, factor(pass))
# Final operation. The '=' assignment gives a name to the column.
grades <- cbind(grades, pass = factor(pass))
# Marvel at your work.
grades
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./Model/EN/AvgRank/breast/breast_005.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/breast/breast_005.R | no_license | leon1003/QSMART | R | false | false | 352 | r | library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./Model/EN/AvgRank/breast/breast_005.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
### Adam Blanchard
### Choose Your Own (CYO) Capstone Project
### HarvardX: PH125.9x - Capstone Project
### https://github.com/blanchard123/CYO_Project
###########################################################
############## Choose Your Own Project Code ###############
###########################################################
# Heart failure prediction data:
# https://www.kaggle.com/andrewmvd/heart-failure-clinical-data
###########################################################
##################### LOAD LIBRARIES ######################
###########################################################
# Install and load libraries as needed
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(caretEnsemble)) install.packages("caretEnsemble", repos = "http://cran.us.r-project.org")
if(!require(corrplot)) install.packages("corrplot", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(gam)) install.packages("gam", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(ggthemes)) install.packages("ggthemes", repos = "http://cran.us.r-project.org")
if(!require(gridExtra)) install.packages("gridExtra", repos = "http://cran.us.r-project.org")
if(!require(gtsummary)) install.packages("gtsummary", repos = "http://cran.us.r-project.org")
if(!require(kableExtra)) install.packages("kableExtra", repos = "http://cran.us.r-project.org")
if(!require(knitr)) install.packages("knitr", repos = "http://cran.us.r-project.org")
if(!require(markdown)) install.packages("markdown", repos = "http://cran.us.r-project.org")
if(!require(party)) install.packages("party", repos = "http://cran.us.r-project.org")
if(!require(randomForest)) install.packages("randomForest", repos = "http://cran.us.r-project.org")
if(!require(rcompanion)) install.packages("rcompanion", repos = "http://cran.us.r-project.org")
if(!require(rpart)) install.packages("rpart", repos = "http://cran.us.r-project.org")
if(!require(rstatix)) install.packages("rstatix", repos = "http://cran.us.r-project.org")
if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org")
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org")
library(caret)
library(caretEnsemble)
library(corrplot)
library(data.table)
library(gam)
library(ggplot2)
library(ggthemes)
library(gridExtra)
library(gtsummary)
library(knitr)
library(kableExtra)
library(markdown)
library(party)
library(randomForest)
library(rcompanion)
library(rpart)
library(rstatix)
library(scales)
library(tidyverse)
library(dplyr)
# set digits to 6 and stop scientific notation
options(digits = 3)
options(scipen = 999)
###########################################################
#################### DATA DESCRIPTION #####################
###########################################################
# create a dataframe describing the data
heart_variables <-
data.frame(Feature = c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking",
"Time", "Death Event"),
Description = c("Age of patients in years",
"Decrease in red blood cells",
"Level of CPK in the blood",
"Presence of diabetes",
"Percentage of blood leaving heart",
"Presence of hypertension",
"Level of platelets in the blood",
"Level of creatinine in the blood",
"Level of sodium in the blood",
"Biological sex - man or woman",
"Presence of smoking",
"Number of days to follow-up",
"Death of patient during follow-up"),
Measurement = c("Numeric - years", "Boolean", "Numeric - mcg/L", "Boolean",
"Boolean", "Numeric - percentage", "Numeric - kp/mL",
"Numeric - mg/dL", "Numeric - mEq/L", "Binary","Boolean",
"Numeric - days", "Boolean"),
Range = c("40 - 95", "0, 1", "23 - 7,861", "0, 1", "14 - 80", "0, 1",
"25.01 - 850.00", "0.50 - 9.40", "114 - 148", "0, 1", "0, 1",
"4 - 285", "0, 1"))
# convert the dataframe to a table describing the data
heart_variables %>%
kbl(caption = "Variable Description, Measurement, and Range", align = "llcc") %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down") %>%
footnote(general = c("Adapted from Chicco & Jurman, 2020",
"mcg/L = micrograms per liter; kp/mL = kiloplatelets/microliter;
mEq/L = milliequivalents per litre"),
general_title = "")
###########################################################
##################### DATA WRANGLING ######################
###########################################################
# Note: this process could take a minute
# data stored on my github account
data_url <- "https://raw.githubusercontent.com/blanchard123/CYO_Project/main/heart_failure_clinical_records_dataset.csv"
# download the csv file from github
download.file(data_url, "heart_data.csv")
# read the file into R and create numeric and logical variables (as appropriate)
heart_data_l <- read_csv("heart_data.csv", col_types = cols(
age = col_double(),
anaemia = col_logical(),
creatinine_phosphokinase = col_double(),
diabetes = col_logical(),
ejection_fraction = col_double(),
high_blood_pressure = col_logical(),
platelets = col_double(),
serum_creatinine = col_double(),
serum_sodium = col_double(),
sex = col_logical(),
smoking = col_logical(),
time = col_integer(),
DEATH_EVENT = col_logical()))
# create a separate file with numeric and factor variables (as appropriate)
# converting from logical to factor variables can create issues, so two datasets are created
# read the file into R and correct the column types
heart_data_f <- read_csv("heart_data.csv", col_types = cols(
age = col_double(),
anaemia = col_factor(),
creatinine_phosphokinase = col_double(),
diabetes = col_factor(),
ejection_fraction = col_double(),
high_blood_pressure = col_factor(),
platelets = col_double(),
serum_creatinine = col_double(),
serum_sodium = col_double(),
sex = col_factor(),
smoking = col_factor(),
time = col_integer(),
DEATH_EVENT = col_factor()))
# add value labels to the factor variables
# note only run once or will result in all NA to the selected variables
heart_data_f$anaemia <- factor(heart_data_f$anaemia, levels = c(0,1), labels = c("No","Yes"))
heart_data_f$diabetes <- factor(heart_data_f$diabetes, levels = c(0,1), labels = c("No","Yes"))
heart_data_f$high_blood_pressure <- factor(heart_data_f$high_blood_pressure, levels = c(0,1), labels = c("No","Yes"))
heart_data_f$sex <- factor(heart_data_f$sex, levels = c(0,1), labels = c("Female","Male"))
heart_data_f$smoking <- factor(heart_data_f$smoking, levels = c(0,1), labels = c("No","Yes"))
heart_data_f$DEATH_EVENT <- factor(heart_data_f$DEATH_EVENT, levels = c(0,1), labels = c("No","Yes"))
# convert age to integer variable in both datasets
heart_data_f <- heart_data_f %>% mutate(age = as.integer(age))
heart_data_l <- heart_data_l %>% mutate(age = as.integer(age))
# remove uneccesary feature from environment
rm(data_url)
# Save the datasets
save(heart_data_l, file = "heart_data_l.RData")
save(heart_data_f, file = "heart_data_f.RData")
###########################################################
##################### DATA INSPECTION #####################
###########################################################
# check for NA values in all variables
any(is.na(heart_data_f$age))
any(is.na(heart_data_f$anaemia))
any(is.na(heart_data_f$creatinine_phosphokinase))
any(is.na(heart_data_f$diabetes))
any(is.na(heart_data_f$ejection_fraction))
any(is.na(heart_data_f$high_blood_pressure))
any(is.na(heart_data_f$platelets))
any(is.na(heart_data_f$serum_creatinine))
any(is.na(heart_data_f$serum_sodium))
any(is.na(heart_data_f$sex))
any(is.na(heart_data_f$smoking))
any(is.na(heart_data_f$time))
any(is.na(heart_data_f$DEATH_EVENT))
# another check for complete data - identify rows woth any missing values
heart_data_f[!complete.cases(heart_data_f),]
# number of rows and columns
dim(heart_data_f)
# basic identification of data and variable types
str(heart_data_l, strict.width="cut")
str(heart_data_f, strict.width="cut")
# examine the heart dataset as a table
as_tibble(heart_data_f) %>%
slice(1:10) %>%
kbl(caption = "Examination of the Heart Data Structure", align = "c",
col.names = c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking", "Time",
"Death Event")) %>%
row_spec(0, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# basic descriptive information about the variables
summary(heart_data_f)
###########################################################
################# BASIC DATA EXPLORATION ##################
###########################################################
### basic frequency distributions of binary variables ###
### all plots stored as separate objects ###
# frequency distribution of anaemia
fd_anm <- heart_data_f %>%
ggplot(aes(anaemia)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Anaemia") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of diabetes
fd_db <- heart_data_f %>%
ggplot(aes(diabetes)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Diabetes") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of high blood pressure
fd_hbp <- heart_data_f %>%
ggplot(aes(high_blood_pressure)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("High Blood Pressure") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of sex
fd_sex <- heart_data_f %>%
ggplot(aes(sex)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Sex") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of smoking
fd_smk <- heart_data_f %>%
ggplot(aes(smoking)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Smoking") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of death events
fd_death <- heart_data_f %>%
ggplot(aes(DEATH_EVENT)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Death") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(fd_death, fd_anm, fd_db, fd_hbp, fd_sex, fd_smk,
ncol = 2,
top = "Figure 1: Frequency Distributions of the Dichotomous Variables",
left = "Frequency")
### basic frequency distributions of continuous variables ###
### all plots stored as separate objects ###
# frequency distribution of age
fd_age <- heart_data_f %>%
ggplot(aes(age)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$age), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of creatinine phosphokinase levels
fd_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$creatinine_phosphokinase), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_log10(labels = comma) +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of ejection fraction levels
fd_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$ejection_fraction), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of platelet levels
fd_pl <- heart_data_f %>%
ggplot(aes(platelets)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$platelets), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of serum creatinine levels
fd_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$serum_creatinine), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_log10() +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of serum sodium levels
fd_ss <- heart_data_f %>%
ggplot(aes(serum_sodium)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$serum_sodium), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of time levels
fd_tm <- heart_data_f %>%
ggplot(aes(time)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$time), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(fd_age, fd_cp, fd_ef, fd_pl, fd_sc, fd_ss, fd_tm,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,NA)),
top = "Figure 2: Frequency Distributions of the Continuous Variables",
left = "Frequency")
### bar graphs by death event ###
### all plots stored as separate objects ###
# bar graph of anaemia by death event
bg_anm <- heart_data_f %>%
ggplot(aes(anaemia, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Anaemia") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# bar graph of diabetes by death event
bg_db <- heart_data_f %>%
ggplot(aes(diabetes, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Diabetes") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# bar graph of high blood pressure by death event
bg_hbp <- heart_data_f %>%
ggplot(aes(high_blood_pressure, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("High Blood Pressure") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# bar graph of sex by death event
bg_sex <- heart_data_f %>%
ggplot(aes(sex, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Sex") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# bar graph of smoking by death event
bg_smk <- heart_data_f %>%
ggplot(aes(smoking, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Smoking") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# create a common legend for the plots
# need to create an entire dummy plot
bg_smk_legend <- heart_data_f %>%
ggplot(aes(smoking, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Smoking") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "right") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# function to extract the legend from a plot
extract_legend <- function(my_ggp) {
step1 <- ggplot_gtable(ggplot_build(my_ggp))
step2 <- which(sapply(step1$grobs, function(x) x$name) == "guide-box")
step3 <- step1$grobs[[step2]]
return(step3)
}
# extract the legend as an object
shared_legend_3 <- extract_legend(bg_smk_legend)
# arrange plots together in a grid for presentation
grid.arrange(bg_anm, bg_db, bg_hbp, bg_smk, bg_sex, shared_legend_3,
ncol = 3, layout_matrix = rbind(c(1,1,2,2,3,3), c(NA,4,4,5,5,6)),
top = "Figure 3: Bar Graphs of the Dichotomous Variables by Patient Death",
left = "Proportion")
# table of descriptive statistics of the dichotomous variables by death event
heart_data_f %>% select(anaemia, high_blood_pressure, diabetes, sex, smoking, DEATH_EVENT) %>%
tbl_summary(by = DEATH_EVENT,
type = all_categorical() ~ "categorical",
digits = all_categorical() ~ 2,
label = list(anaemia ~ "Anaemia", high_blood_pressure ~ "High Blood Pressure",
diabetes ~ "Diabetes", sex ~ "Sex", smoking ~ "Smoking")) %>%
add_p(pvalue_fun = ~style_pvalue(.x, digits = 2)) %>%
add_overall() %>%
as_kable_extra(caption = "Descriptive Statistics for the Dichotomous Variables: Overall and by Death Event", align = "lcccc") %>%
add_header_above(c("", "", "Death Event" = 3), bold =T) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
### boxplots of continuous variables by death event ###
### all plots stored as separate objects ###
# boxplot of age by death event
bp_age <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, age, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Age") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of creatinine phosphokinase levels by death event
bp_cp <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, creatinine_phosphokinase, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_log10(labels = comma) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Creatinine Phosph. (log)") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of ejection fraction levels by death event
bp_ef <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, ejection_fraction, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of platelet levels by death event
bp_pl <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, platelets, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Platelets") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of serum creatinine levels by death event
bp_sc <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, serum_creatinine, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_log10() +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of serum sodium levels by death event
bp_ss <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, serum_sodium, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of time levels by death event
bp_tm <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, time, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Death Event") +
ylab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# create a common legend for the plots
# need to create an entire dummy plot
bp_tm_legend <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, time, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Death Event") +
ylab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "right") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# extract the legend as an object
shared_legend_4 <- extract_legend(bp_tm_legend)
# arrange plots together in a grid for presentation
grid.arrange(bp_age, bp_cp, bp_ef, bp_pl, bp_sc, bp_ss, bp_tm, shared_legend_4,
ncol = 3, layout_matrix = rbind(c(1,2,3), c(4,5,6), c(NA,7,8)),
top = "Figure 4: Boxplots of the Continuous Variables by Patient Death")
# table of descriptive statistics of the continuous variables by death event
heart_data_f %>% select(age, creatinine_phosphokinase, ejection_fraction, platelets,
serum_creatinine, serum_sodium, time, DEATH_EVENT) %>%
tbl_summary(by = DEATH_EVENT,
type = all_continuous() ~ "continuous2",
statistic = all_continuous() ~ c("{mean} ({sd})", "{median} ({p25}, {p75})"),
digits = all_continuous() ~ 2,
label = list(age ~ "Age", creatinine_phosphokinase ~ "Creatinine Phosphokinase",
ejection_fraction ~ "Ejection Fraction", platelets ~ "Platelets",
serum_creatinine ~ "Serum Creatinine", serum_sodium ~ "Serum Sodium",
time ~ "Length of Follow-up")) %>%
add_p(pvalue_fun = ~style_pvalue(.x, digits = 2)) %>%
add_overall() %>%
modify_footnote(all_stat_cols() ~ "Mean (SD) or Median (IQR)") %>%
as_kable_extra(caption = "Descriptive Statistics for the Continuous Variables: Overall and by Death Event", align = "lcccc") %>%
add_header_above(c("", "", "Death Event" = 3), bold =T) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
###########################################################
################## INFERENTIAL ANALYSIS ###################
###########################################################
### some analyses already presented in tables above ###
# more details about the chi-square tests presented in table above
chisq_test(heart_data_f$anaemia, heart_data_f$DEATH_EVENT)
chisq_test(heart_data_f$high_blood_pressure, heart_data_f$DEATH_EVENT)
chisq_test(heart_data_f$diabetes, heart_data_f$DEATH_EVENT)
chisq_test(heart_data_f$sex, heart_data_f$DEATH_EVENT)
chisq_test(heart_data_f$smoking, heart_data_f$DEATH_EVENT)
# more details about the Mann-Whitney U tests in table above
wilcox.test(heart_data_f$age ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$creatinine_phosphokinase ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$ejection_fraction ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$platelets ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$serum_creatinine ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$serum_sodium ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$time ~ heart_data_f$DEATH_EVENT)
# alternative analysis (to the Mann-Whitney) using t-tests
t.test(age ~ DEATH_EVENT, data = heart_data_f)
t.test(creatinine_phosphokinase ~ DEATH_EVENT, data = heart_data_f)
t.test(ejection_fraction ~ DEATH_EVENT, data = heart_data_f)
t.test(platelets ~ DEATH_EVENT, data = heart_data_f)
t.test(serum_creatinine ~ DEATH_EVENT, data = heart_data_f)
t.test(serum_sodium ~ DEATH_EVENT, data = heart_data_f)
t.test(time ~ DEATH_EVENT, data = heart_data_f)
### correlations ###
# create correlation matrix
cmat <- cor(heart_data_l)
colnames(cmat) <- c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking",
"Time", "Death Event")
rownames(cmat) <- c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking",
"Time", "Death Event")
# create correlation test data
res1 <- cor.mtest(heart_data_l, conf.level = 0.95)
# create heatmap of correlation matrix between all variables with extras
corrplot::corrplot(cmat,
type = "lower",
method = "square",
tl.col = "black",
tl.cex = 0.7,
title = "Figure 5: Correlation Matrix",
p.mat = res1$p,
insig = "label_sig",
sig.level = c(.001, .01, .05),
pch.cex = 0.9,
pch.col = "white",
mar = c(1,1,3,1))
# correlations between death event and other variables - dataframe
correlations <- cor(heart_data_l, heart_data_l$DEATH_EVENT)
r_square <- (correlations)^2
temp_data <- data.frame(r_square = r_square, cor = correlations, p = res1$p[,13])
temp_data <- temp_data[-c(13), ]
rownames(temp_data) <- c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking", "Time")
# create table of correlations data
temp_data[order(temp_data$r_square, decreasing = T),] %>%
kbl(caption = "Correlations with Death Event",
col.names = c("r squared", "r", "p-value"),
align = "lccc",
digits = 3) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10)
### logistic regressions ###
# logistic regression with all features including time
fit_glm <- glm(DEATH_EVENT ~ ., data = heart_data_l, family = "binomial")
# results of logistic regression
summary(fit_glm)
# more results of logistic regression - overall model fit
fit_glm_r2 <- nagelkerke(fit_glm)$Pseudo.R.squared
colnames(fit_glm_r2) <- c("All Features")
fit_glm_r2 %>%
kbl(caption = "Logistic Regression - Overall Model Fit", align = "c", col.names = "Pseudo R Squared") %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# more results of logistic regression - coefficients table
fit_glm_co <- summary(fit_glm)$coefficients
rownames(fit_glm_co) <- c("(Intercept)", "Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking", "Time")
fit_glm_co %>%
kbl(caption = "Logistic Regression Coefficients - All Features",
align = "lccc", col.names = c("Estimate", "Std. Error", "Z", "p")) %>%
row_spec(0, bold = T) %>%
row_spec(2, bold = T) %>%
row_spec(6, bold = T) %>%
row_spec(9, bold = T) %>%
row_spec(13, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# more results of logistic regression - odds ratios
exp(coef(fit_glm))
# logistic regression without the TIME variable
fit_glm2 <- glm(DEATH_EVENT ~ age + anaemia + creatinine_phosphokinase + diabetes +
ejection_fraction + high_blood_pressure + platelets + serum_creatinine +
serum_sodium + sex + smoking, data = heart_data_l)
# results of logistic regression
summary(fit_glm2)
# more results of logistic regression - overall model fit - comparing both models
fit_glm_r2 <- fit_glm_r2 %>% cbind(nagelkerke(fit_glm2)$Pseudo.R.squared)
colnames(fit_glm_r2) <- c("All Features", "No Time Feature")
fit_glm_r2 %>%
kbl(caption = "Logistic Regression - Overall Model Fit", align = "cc") %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# more results of logistic regression - coefficients table
fit_glm_co2 <- summary(fit_glm2)$coefficients
rownames(fit_glm_co2) <- c("(Intercept)", "Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking")
fit_glm_co2 %>%
kbl(caption = "Logistic Regression Coefficients - No Time Feature",
align = "lccc", col.names = c("Estimate", "Std. Error", "Z", "p")) %>%
row_spec(0, bold = T) %>%
row_spec(2, bold = T) %>%
row_spec(6, bold = T) %>%
row_spec(9, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# more results of logistic regression - odds ratios
exp(coef(fit_glm2))
###########################################################
################### MODELING APPROACHES ###################
###########################################################
########## Data preparation for algorithm training ##########
# partition the heart dataset into training and test datasets
# partition the heart dataset into training and test datasets
set.seed(1, sample.kind = "Rounding")
test_index <- createDataPartition(y = heart_data_f$DEATH_EVENT,
times = 1, p = 0.2, list = FALSE)
heart_train <- heart_data_f[-test_index,]
heart_test <- heart_data_f[test_index,]
# save the training and test datasets
save(heart_train, file = "heart_train.RData")
save(heart_test, file = "heart_test.RData")
# examine frequency of outcome in both datasets
table(heart_train$DEATH_EVENT)
table(heart_test$DEATH_EVENT)
# create data frame describing the accuracy metrics
model_acc <-
data.frame(Feature = c("Accuracy", "Kappa", "Sensitivity", "Specificity", "PPV", "NPV", "Precision", "Recall",
"F1", "Prevalence", "Detection Rate", "Detection Prevalence", "Balanced Accuracy"),
Description = c("Proportion of true positives and true negatives over all instances",
"Measure of agreement accounting for random chance*",
"Proportion of true positives over actual positives",
"Proportion of true negatives over actual negatives",
"Proportion of true positives over predicted positives*",
"Proportion of true negatives over predicted negatives*",
"Proportion of true positives over predicted positives",
"Proportion of true positives over actual positives",
"Harmonic average of precision and recall*",
"Proportion of actual positives over total",
"Proportion of true positives over total",
"Proportion of predicted positives over total",
"(sensitivity + specificity)/2"))
# convert the dataframe to a table describing the accuracy metrics
model_acc %>%
kbl(caption = "Model Accuracy Metrics", align = "ll") %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down") %>%
footnote(general = c("Adapted from Irizarry (2019) and the caret package description."),
symbol = c("These metrics are calculated using more complex definitions in the caret package."),
general_title = "")
# examine information about the models - cforest
getModelInfo("cforest")
modelLookup("cforest")
# examine information about the models - knn
getModelInfo("knn")
modelLookup("knn")
# examine information about the models - knn
getModelInfo("glm")
modelLookup("glm")
# examine information about the models - gamLoess
getModelInfo("gamLoess")
modelLookup("gamLoess")
# examine information about the models - gamLoess
getModelInfo("rf")
modelLookup("rf")
# examine information about the models - gamLoess
getModelInfo("rpart")
modelLookup("rpart")
# set the seed for reproducibility
set.seed(1, sample.kind = "Rounding")
# set cross-validation with 100 samples
my_control <- trainControl(method = "cv", number = 100, p = .9,
savePredictions = "all",
classProbs = TRUE,
allowParallel = TRUE,
index = createResample(heart_train$DEATH_EVENT, 10))
# set the seed for reproducibility
set.seed(1, sample.kind = "Rounding")
# train multiple models at the same time - all features
train_models <- caretList(DEATH_EVENT ~ age + anaemia + creatinine_phosphokinase + diabetes +
ejection_fraction + high_blood_pressure + platelets + serum_creatinine +
serum_sodium + sex + smoking,
data = heart_train,
trControl = my_control,
methodList = c("cforest", "glm", "knn", "gamLoess", "rf", "rpart"),
continue_on_fail = FALSE,
preProcess = c("center", "scale"))
# set the seed for reproducibility
set.seed(1, sample.kind = "Rounding")
# train multiple models at the same time - select features (age, ejection fraction, and serum creatinine)
train_models2 <- caretList(DEATH_EVENT ~ age + ejection_fraction + serum_creatinine,
data = heart_train,
trControl = my_control,
methodList = c("cforest", "glm", "knn", "gamLoess", "rf", "rpart"),
continue_on_fail = FALSE,
preProcess = c("center", "scale"))
# report model results in the training dataset - cforest
train_models$cforest$results
train_models$cforest$bestTune
# report model results in the training dataset - glm
train_models$glm$results
# report model results in the training dataset - knn
train_models$knn$results
train_models$knn$bestTune
# report model results in the training dataset - gamLoess
train_models$gamLoess$results
# report model results in the training dataset - rf
train_models$rf$results
train_models$rf$bestTune
# report model results in the training dataset - rpart
train_models$rpart$results
train_models$rpart$bestTune
# plot the accuracy of the models in the training set cross-validation - all features
resamples <- resamples(train_models)
dotplot(resamples, metric = "Accuracy",
main = "Figure 6: Accuracy across Models - All Features (Cross-Validation)")
# plot the accuracy of the models in the training set cross-validation - select features
resamples2 <- resamples(train_models2)
dotplot(resamples2, metric = "Accuracy",
main = "Figure 8: Accuracy across Models - Select Features (Cross-Validation)")
# dataframe of results of all models in the training cross-validation
train_results <- data.frame(
Model = c("cForest", "GLM", "KNN", "Loess", "RF", "rpart"),
Accuracy1 = c(max(train_models$cforest$results$Accuracy),
max(train_models$glm$results$Accuracy),
max(train_models$knn$results$Accuracy),
max(train_models$gamLoess$results$Accuracy),
max(train_models$rf$results$Accuracy),
max(train_models$rpart$results$Accuracy)),
AccuracySD1 = c(min(train_models$cforest$results$AccuracySD),
min(train_models$glm$results$AccuracySD),
min(train_models$knn$results$AccuracySD),
min(train_models$gamLoess$results$AccuracySD),
min(train_models$rf$results$AccuracySD),
min(train_models$rpart$results$AccuracySD)),
Kappa1 = c(max(train_models$cforest$results$Kappa),
max(train_models$glm$results$Kappa),
max(train_models$knn$results$Kappa),
max(train_models$gamLoess$results$Kappa),
max(train_models$rf$results$Kappa),
max(train_models$rpart$results$Kappa)),
KappaSD1 = c(min(train_models$cforest$results$KappaSD),
min(train_models$glm$results$KappaSD),
min(train_models$knn$results$KappaSD),
min(train_models$gamLoess$results$KappaSD),
min(train_models$rf$results$KappaSD),
min(train_models$rpart$results$KappaSD)),
Accuracy2 = c(max(train_models2$cforest$results$Accuracy),
max(train_models2$glm$results$Accuracy),
max(train_models2$knn$results$Accuracy),
max(train_models2$gamLoess$results$Accuracy),
max(train_models2$rf$results$Accuracy),
max(train_models2$rpart$results$Accuracy)),
AccuracySD2 = c(min(train_models2$cforest$results$AccuracySD),
min(train_models2$glm$results$AccuracySD),
min(train_models2$knn$results$AccuracySD),
min(train_models2$gamLoess$results$AccuracySD),
min(train_models2$rf$results$AccuracySD),
min(train_models2$rpart$results$AccuracySD)),
Kappa2 = c(max(train_models2$cforest$results$Kappa),
max(train_models2$glm$results$Kappa),
max(train_models2$knn$results$Kappa),
max(train_models2$gamLoess$results$Kappa),
max(train_models2$rf$results$Kappa),
max(train_models2$rpart$results$Kappa)),
KappaSD2 = c(min(train_models2$cforest$results$KappaSD),
min(train_models2$glm$results$KappaSD),
min(train_models2$knn$results$KappaSD),
min(train_models2$gamLoess$results$KappaSD),
min(train_models2$rf$results$KappaSD),
min(train_models2$rpart$results$KappaSD)))
# table of accruacy and kappa in the cross-validation - all models
train_results %>%
kbl(caption = "Accuracy across Models (Cross-Validation)",
align = "lclclclcl", col.names = c("Model", "Accuracy", "(SD)", "Kappa", "(SD)", "Accuracy", "(SD)", "Kappa", "(SD)")) %>%
add_header_above(c("", "All Features" = 4, "Select Features" = 4), bold =T) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# plot of tuning parameters - cforest
tuning_cf <- ggplot(train_models$cforest, highlight = TRUE) +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Number of Randomly Selected Predictors") +
ylab("Accuracy in Cross-Validation") +
ggtitle("cforest") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(plot.title = element_text(size = 12, vjust = 2, hjust = 0.5)) +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.5), "cm"))
# plot of tuning parameters - knn
tuning_knn <- ggplot(train_models$knn, highlight = TRUE) +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Number of Neighbours") +
ylab("Accuracy in Cross-Validation") +
ggtitle("KNN") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(plot.title = element_text(size = 12, vjust = 2, hjust = 0.5)) +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.5), "cm"))
# plot of tuning parameters - rf
tuning_rf <- ggplot(train_models$rf, highlight = TRUE) +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Number of Randomly Selected Predictors") +
ylab("Accuracy in Cross-Validation") +
ggtitle("Random Forest") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(plot.title = element_text(size = 12, vjust = 2, hjust = 0.5)) +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.5), "cm"))
# plot of tuning parameters - rpart
tuning_rpart <- ggplot(train_models$rpart, highlight = TRUE) +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Number of Randomly Selected Predictors") +
ylab("Accuracy in Cross-Validation") +
ggtitle("rpart") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(plot.title = element_text(size = 12, vjust = 2, hjust = 0.5)) +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.5), "cm"))
# arrange tuning plots together in a grid for presentation
grid.arrange(tuning_cf, tuning_knn, tuning_rf, tuning_rpart,
ncol = 2,
top = "Figure A8: Tuning Parameters across Models - All Features")
# plot variable importance for each relevant model
imp1 <- plot(varImp(train_models$cforest), xlab = "cforest")
imp2 <- plot(varImp(train_models$glm), xlab = "GLM")
imp3 <- plot(varImp(train_models$gamLoess), xlab = "gamLoess")
imp4 <- plot(varImp(train_models$rf), xlab = "Random Forest")
imp5 <- plot(varImp(train_models$rpart), xlab = "rpart")
# arrange variable importance plots together in a grid for presentation
grid.arrange(imp1, imp2, imp3, imp4, imp5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(NA,5,5,NA)),
top = "Figure 7: Variable Importance across Models (Cross-Validation)")
###########################################################
################## RESULTS IN TEST DATASET ################
###########################################################
### using all features
# predict in the test datatset - cforest
pred_cforest <- predict(train_models$cforest, heart_test, type = "raw")
# predict in the test datatset - loess
pred_gamLoess <- predict(train_models$gamLoess, heart_test, type = "raw")
# predict in the test datatset - random forest
pred_rf <- predict(train_models$rf, heart_test, type = "raw")
# predict in the test datatset - random forest
pred_rpart <- predict(train_models$rpart, heart_test, type = "raw")
### using select features
# predict in the test datatset - cforest
pred_cforest2 <- predict(train_models2$cforest, heart_test, type = "raw")
# predict in the test datatset - loess
pred_gamLoess2 <- predict(train_models2$gamLoess, heart_test, type = "raw")
# predict in the test datatset - random forest
pred_rf2 <- predict(train_models2$rf, heart_test, type = "raw")
# predict in the test datatset - random forest
pred_rpart2 <- predict(train_models2$rpart, heart_test, type = "raw")
# examine the performance of the models in the test set
mat_results <- as.data.frame(confusionMatrix(pred_cforest, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[1] <- "cForest"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_gamLoess, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[2] <- "Loess"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_rf, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[3] <- "RF"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_rpart, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[4] <- "rpart"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_cforest2, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[5] <- "cForest2"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_gamLoess2, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[6] <- "Loess2"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_rf2, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[7] <- "RF2"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_rpart2, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[8] <- "rpart2"
# examine results in a table
mat_results %>%
kbl(caption = "Model Results in the Test Dataset", align = "lcccccccc",
col.names = c("cForest", "Loess", "RF", "rpart", "cForest", "Loess", "RF", "rpart")) %>%
add_header_above(c("", "All Features" = 4, "Select Features" = 4), bold =T) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
###########################################################
######################## APPENDIX A #######################
###########################################################
### SUPPLEMENTAL FIGURES AND TABLES ###
### examine continuous variables across ANAEMIA ###
### all plots stored as separate objects ###
# plot of age across anaemia by death event
pp_anm_age <- heart_data_f %>%
ggplot(aes(age, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across anaemia by death event
pp_anm_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across anaemia by death event
pp_anm_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across anaemia by death event
pp_anm_pl <- heart_data_f %>%
ggplot(aes(platelets, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across anaemia by death event
pp_anm_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across anaemia by death event
pp_anm_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across anaemia by death event
pp_anm_tm <- heart_data_f %>%
ggplot(aes(time, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# create a common legend for the plots
# need to create an entire dummy plot
pp_anm_tm_legend <- heart_data_f %>%
ggplot(aes(time, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 3, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -3)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "right") +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.25), "cm"))
# extract the legend as an object
shared_legend_5 <- extract_legend(pp_anm_tm_legend)
# arrange plots together in a grid for presentation
grid.arrange(pp_anm_age, pp_anm_cp, pp_anm_ef, pp_anm_pl, pp_anm_sc, pp_anm_ss, pp_anm_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A1: Plots of the Continuous Variables by Anaemia and Patient Death",
left = "Anaemia")
### examine continuous variables across DIABETES ###
### all plots stored as separate objects ###
# plot of age across diabetes by death event
pp_db_age <- heart_data_f %>%
ggplot(aes(age, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across diabetes by death event
pp_db_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across diabetes by death event
pp_db_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across diabetes by death event
pp_db_pl <- heart_data_f %>%
ggplot(aes(platelets, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across diabetes by death event
pp_db_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across diabetes by death event
pp_db_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across diabetes by death event
pp_db_tm <- heart_data_f %>%
ggplot(aes(time, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(pp_db_age, pp_db_cp, pp_db_ef, pp_db_pl, pp_db_sc, pp_db_ss, pp_db_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A2: Plots of the Continuous Variables by Diabetes and Patient Death",
left = "Diabetes")
### examine continuous variables across HIGH BLOOD PRESSURE ###
### all plots stored as separate objects ###
# plot of age across high blood pressure by death event
pp_hbp_age <- heart_data_f %>%
ggplot(aes(age, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across high blood pressure by death event
pp_hbp_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across high blood pressure by death event
pp_hbp_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across high blood pressure by death event
pp_hbp_pl <- heart_data_f %>%
ggplot(aes(platelets, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across high blood pressure by death event
pp_hbp_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across high blood pressure by death event
pp_hbp_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across high blood pressure by death event
pp_hbp_tm <- heart_data_f %>%
ggplot(aes(time, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(pp_hbp_age, pp_hbp_cp, pp_hbp_ef, pp_hbp_pl, pp_hbp_sc, pp_hbp_ss, pp_hbp_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A3: Plots of the Continuous Variables by High Blood Pressure and Patient Death",
left = "High Blood Pressure ")
### examine continuous variables across SEX ###
### all plots stored as separate objects ###
# plot of age across sex by death event
pp_sex_age <- heart_data_f %>%
ggplot(aes(age, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across sex by death event
pp_sex_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across sex by death event
pp_sex_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across sex by death event
pp_sex_pl <- heart_data_f %>%
ggplot(aes(platelets, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across sex by death event
pp_sex_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across sex by death event
pp_sex_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across sex by death event
pp_sex_tm <- heart_data_f %>%
ggplot(aes(time, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(pp_sex_age, pp_sex_cp, pp_sex_ef, pp_sex_pl, pp_sex_sc, pp_sex_ss, pp_sex_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A4: Plots of the Continuous Variables by Sex and Patient Death",
left = "Sex")
### examine continuous variables across SMOKING ###
### all plots stored as separate objects ###
# plot of age across smoking by death event
pp_smk_age <- heart_data_f %>%
ggplot(aes(age, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across smoking by death event
pp_smk_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across smoking by death event
pp_smk_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across smoking by death event
pp_smk_pl <- heart_data_f %>%
ggplot(aes(platelets, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across smoking by death event
pp_smk_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across smoking by death event
pp_smk_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across smoking by death event
pp_smk_tm <- heart_data_f %>%
ggplot(aes(time, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(pp_smk_age, pp_smk_cp, pp_smk_ef, pp_smk_pl, pp_smk_sc, pp_smk_ss, pp_smk_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A5: Plots of the Continuous Variables by Smoking and Patient Death",
left = "Smoking")
### looking at interactions between continuous variables - TIME ###
### all plots stored as separate objects ###
# plot of age and time by death event
pp_tm_age <- heart_data_f %>%
ggplot(aes(age, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction and time by death event
pp_tm_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine and time by death event
pp_tm_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_log10() +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium and time by death event
pp_tm_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# create a common legend for the plots
# need to create an entire dummy plot
pp_tm_ss_legend <- heart_data_f %>%
ggplot(aes(serum_sodium, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "top") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# extract the legend as an object
shared_legend_10 <- extract_legend(pp_tm_ss_legend)
# arrange plots together in a grid for presentation
grid.arrange(shared_legend_10,
arrangeGrob(pp_tm_age, pp_tm_ef, pp_tm_sc, pp_tm_ss,
ncol = 2),
nrow = 2, heights = c(1,20),
top = "Figure A6: Plots of the Continuous Variables by Days to Follow-up and Patient Death",
left = "Length of Follow-up")
###################################################################################################
### looking at interactions between continuous variables - MIXED ###
### all plots stored as separate objects ###
# plot of ejection fraction and age by death event
pp_age_ef <- heart_data_f %>%
ggplot(aes(age, ejection_fraction, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Age") +
ylab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of secrum creatinine and age by death event
pp_age_sc <- heart_data_f %>%
ggplot(aes(age, serum_creatinine, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Age") +
ylab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium and age by death event
pp_age_ss <- heart_data_f %>%
ggplot(aes(age, serum_sodium, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Age") +
ylab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection_fraction and secrum creatinine by death event
pp_ef_sc <- heart_data_f %>%
ggplot(aes(ejection_fraction, serum_creatinine, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Ejection Fraction") +
ylab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection_fraction and secrum sodium by death event
pp_ef_ss <- heart_data_f %>%
ggplot(aes(ejection_fraction, serum_sodium, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Ejection Fraction") +
ylab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium and secrum creatinine by death event
pp_ss_sc <- heart_data_f %>%
ggplot(aes(serum_sodium, serum_creatinine, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Serum Sodium") +
ylab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(shared_legend_10,
arrangeGrob(pp_age_ef, pp_age_sc, pp_age_ss, pp_ef_sc, pp_ef_ss, pp_ss_sc,
ncol = 2),
nrow = 2, heights = c(1,20),
top = "Figure A7: Plots of Bivariate Continuous Variables by Patient Death")
###########################################################
######################## APPENDIX B #######################
###########################################################
### ENVIRONMENT ###
# print operating system and R version
version
| /CYO_script_ab.R | no_license | blanchard123/CYO_Project | R | false | false | 92,509 | r |
### Adam Blanchard
### Choose Your Own (CYO) Capstone Project
### HarvardX: PH125.9x - Capstone Project
### https://github.com/blanchard123/CYO_Project
###########################################################
############## Choose Your Own Project Code ###############
###########################################################
# Heart failure prediction data:
# https://www.kaggle.com/andrewmvd/heart-failure-clinical-data
###########################################################
##################### LOAD LIBRARIES ######################
###########################################################
# Install and load libraries as needed
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(caretEnsemble)) install.packages("caretEnsemble", repos = "http://cran.us.r-project.org")
if(!require(corrplot)) install.packages("corrplot", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(gam)) install.packages("gam", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(ggthemes)) install.packages("ggthemes", repos = "http://cran.us.r-project.org")
if(!require(gridExtra)) install.packages("gridExtra", repos = "http://cran.us.r-project.org")
if(!require(gtsummary)) install.packages("gtsummary", repos = "http://cran.us.r-project.org")
if(!require(kableExtra)) install.packages("kableExtra", repos = "http://cran.us.r-project.org")
if(!require(knitr)) install.packages("knitr", repos = "http://cran.us.r-project.org")
if(!require(markdown)) install.packages("markdown", repos = "http://cran.us.r-project.org")
if(!require(party)) install.packages("party", repos = "http://cran.us.r-project.org")
if(!require(randomForest)) install.packages("randomForest", repos = "http://cran.us.r-project.org")
if(!require(rcompanion)) install.packages("rcompanion", repos = "http://cran.us.r-project.org")
if(!require(rpart)) install.packages("rpart", repos = "http://cran.us.r-project.org")
if(!require(rstatix)) install.packages("rstatix", repos = "http://cran.us.r-project.org")
if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org")
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org")
library(caret)
library(caretEnsemble)
library(corrplot)
library(data.table)
library(gam)
library(ggplot2)
library(ggthemes)
library(gridExtra)
library(gtsummary)
library(knitr)
library(kableExtra)
library(markdown)
library(party)
library(randomForest)
library(rcompanion)
library(rpart)
library(rstatix)
library(scales)
library(tidyverse)
library(dplyr)
# set digits to 6 and stop scientific notation
options(digits = 3)
options(scipen = 999)
###########################################################
#################### DATA DESCRIPTION #####################
###########################################################
# create a dataframe describing the data
heart_variables <-
data.frame(Feature = c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking",
"Time", "Death Event"),
Description = c("Age of patients in years",
"Decrease in red blood cells",
"Level of CPK in the blood",
"Presence of diabetes",
"Percentage of blood leaving heart",
"Presence of hypertension",
"Level of platelets in the blood",
"Level of creatinine in the blood",
"Level of sodium in the blood",
"Biological sex - man or woman",
"Presence of smoking",
"Number of days to follow-up",
"Death of patient during follow-up"),
Measurement = c("Numeric - years", "Boolean", "Numeric - mcg/L", "Boolean",
"Boolean", "Numeric - percentage", "Numeric - kp/mL",
"Numeric - mg/dL", "Numeric - mEq/L", "Binary","Boolean",
"Numeric - days", "Boolean"),
Range = c("40 - 95", "0, 1", "23 - 7,861", "0, 1", "14 - 80", "0, 1",
"25.01 - 850.00", "0.50 - 9.40", "114 - 148", "0, 1", "0, 1",
"4 - 285", "0, 1"))
# convert the dataframe to a table describing the data
heart_variables %>%
kbl(caption = "Variable Description, Measurement, and Range", align = "llcc") %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down") %>%
footnote(general = c("Adapted from Chicco & Jurman, 2020",
"mcg/L = micrograms per liter; kp/mL = kiloplatelets/microliter;
mEq/L = milliequivalents per litre"),
general_title = "")
###########################################################
##################### DATA WRANGLING ######################
###########################################################
# Note: this process could take a minute
# data stored on my github account
data_url <- "https://raw.githubusercontent.com/blanchard123/CYO_Project/main/heart_failure_clinical_records_dataset.csv"
# download the csv file from github
download.file(data_url, "heart_data.csv")
# read the file into R and create numeric and logical variables (as appropriate)
heart_data_l <- read_csv("heart_data.csv", col_types = cols(
age = col_double(),
anaemia = col_logical(),
creatinine_phosphokinase = col_double(),
diabetes = col_logical(),
ejection_fraction = col_double(),
high_blood_pressure = col_logical(),
platelets = col_double(),
serum_creatinine = col_double(),
serum_sodium = col_double(),
sex = col_logical(),
smoking = col_logical(),
time = col_integer(),
DEATH_EVENT = col_logical()))
# create a separate file with numeric and factor variables (as appropriate)
# converting from logical to factor variables can create issues, so two datasets are created
# read the file into R and correct the column types
heart_data_f <- read_csv("heart_data.csv", col_types = cols(
age = col_double(),
anaemia = col_factor(),
creatinine_phosphokinase = col_double(),
diabetes = col_factor(),
ejection_fraction = col_double(),
high_blood_pressure = col_factor(),
platelets = col_double(),
serum_creatinine = col_double(),
serum_sodium = col_double(),
sex = col_factor(),
smoking = col_factor(),
time = col_integer(),
DEATH_EVENT = col_factor()))
# add value labels to the factor variables
# note only run once or will result in all NA to the selected variables
heart_data_f$anaemia <- factor(heart_data_f$anaemia, levels = c(0,1), labels = c("No","Yes"))
heart_data_f$diabetes <- factor(heart_data_f$diabetes, levels = c(0,1), labels = c("No","Yes"))
heart_data_f$high_blood_pressure <- factor(heart_data_f$high_blood_pressure, levels = c(0,1), labels = c("No","Yes"))
heart_data_f$sex <- factor(heart_data_f$sex, levels = c(0,1), labels = c("Female","Male"))
heart_data_f$smoking <- factor(heart_data_f$smoking, levels = c(0,1), labels = c("No","Yes"))
heart_data_f$DEATH_EVENT <- factor(heart_data_f$DEATH_EVENT, levels = c(0,1), labels = c("No","Yes"))
# convert age to integer variable in both datasets
heart_data_f <- heart_data_f %>% mutate(age = as.integer(age))
heart_data_l <- heart_data_l %>% mutate(age = as.integer(age))
# remove uneccesary feature from environment
rm(data_url)
# Save the datasets
save(heart_data_l, file = "heart_data_l.RData")
save(heart_data_f, file = "heart_data_f.RData")
###########################################################
##################### DATA INSPECTION #####################
###########################################################
# check for NA values in all variables
any(is.na(heart_data_f$age))
any(is.na(heart_data_f$anaemia))
any(is.na(heart_data_f$creatinine_phosphokinase))
any(is.na(heart_data_f$diabetes))
any(is.na(heart_data_f$ejection_fraction))
any(is.na(heart_data_f$high_blood_pressure))
any(is.na(heart_data_f$platelets))
any(is.na(heart_data_f$serum_creatinine))
any(is.na(heart_data_f$serum_sodium))
any(is.na(heart_data_f$sex))
any(is.na(heart_data_f$smoking))
any(is.na(heart_data_f$time))
any(is.na(heart_data_f$DEATH_EVENT))
# another check for complete data - identify rows woth any missing values
heart_data_f[!complete.cases(heart_data_f),]
# number of rows and columns
dim(heart_data_f)
# basic identification of data and variable types
str(heart_data_l, strict.width="cut")
str(heart_data_f, strict.width="cut")
# examine the heart dataset as a table
as_tibble(heart_data_f) %>%
slice(1:10) %>%
kbl(caption = "Examination of the Heart Data Structure", align = "c",
col.names = c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking", "Time",
"Death Event")) %>%
row_spec(0, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# basic descriptive information about the variables
summary(heart_data_f)
###########################################################
################# BASIC DATA EXPLORATION ##################
###########################################################
### basic frequency distributions of binary variables ###
### all plots stored as separate objects ###
# frequency distribution of anaemia
fd_anm <- heart_data_f %>%
ggplot(aes(anaemia)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Anaemia") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of diabetes
fd_db <- heart_data_f %>%
ggplot(aes(diabetes)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Diabetes") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of high blood pressure
fd_hbp <- heart_data_f %>%
ggplot(aes(high_blood_pressure)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("High Blood Pressure") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of sex
fd_sex <- heart_data_f %>%
ggplot(aes(sex)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Sex") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of smoking
fd_smk <- heart_data_f %>%
ggplot(aes(smoking)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Smoking") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of death events
fd_death <- heart_data_f %>%
ggplot(aes(DEATH_EVENT)) +
geom_bar(color = "Black", fill = "#a6cee3") +
geom_text(aes(label = percent((..count..)/sum(..count..))),
stat = "count", position = position_stack(vjust = 0.5)) +
xlab("Death") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(fd_death, fd_anm, fd_db, fd_hbp, fd_sex, fd_smk,
ncol = 2,
top = "Figure 1: Frequency Distributions of the Dichotomous Variables",
left = "Frequency")
### basic frequency distributions of continuous variables ###
### all plots stored as separate objects ###
# frequency distribution of age
fd_age <- heart_data_f %>%
ggplot(aes(age)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$age), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of creatinine phosphokinase levels
fd_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$creatinine_phosphokinase), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_log10(labels = comma) +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of ejection fraction levels
fd_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$ejection_fraction), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of platelet levels
fd_pl <- heart_data_f %>%
ggplot(aes(platelets)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$platelets), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of serum creatinine levels
fd_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$serum_creatinine), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_log10() +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of serum sodium levels
fd_ss <- heart_data_f %>%
ggplot(aes(serum_sodium)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$serum_sodium), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# frequency distribution of time levels
fd_tm <- heart_data_f %>%
ggplot(aes(time)) +
geom_histogram(bins = 20, color = "Black", fill = "#a6cee3") +
geom_vline(xintercept = mean(heart_data_f$time), color = "black") +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(fd_age, fd_cp, fd_ef, fd_pl, fd_sc, fd_ss, fd_tm,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,NA)),
top = "Figure 2: Frequency Distributions of the Continuous Variables",
left = "Frequency")
### bar graphs by death event ###
### all plots stored as separate objects ###
# bar graph of anaemia by death event
bg_anm <- heart_data_f %>%
ggplot(aes(anaemia, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Anaemia") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# bar graph of diabetes by death event
bg_db <- heart_data_f %>%
ggplot(aes(diabetes, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Diabetes") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# bar graph of high blood pressure by death event
bg_hbp <- heart_data_f %>%
ggplot(aes(high_blood_pressure, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("High Blood Pressure") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# bar graph of sex by death event
bg_sex <- heart_data_f %>%
ggplot(aes(sex, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Sex") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# bar graph of smoking by death event
bg_smk <- heart_data_f %>%
ggplot(aes(smoking, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Smoking") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# create a common legend for the plots
# need to create an entire dummy plot
bg_smk_legend <- heart_data_f %>%
ggplot(aes(smoking, fill = DEATH_EVENT)) +
geom_bar(position = "fill", colour = "black") +
geom_text(aes(label = ..count..), stat = "count", position = position_fill(.5)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Smoking") +
theme_light() +
theme(axis.title.x = element_text()) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "right") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# function to extract the legend from a plot
extract_legend <- function(my_ggp) {
step1 <- ggplot_gtable(ggplot_build(my_ggp))
step2 <- which(sapply(step1$grobs, function(x) x$name) == "guide-box")
step3 <- step1$grobs[[step2]]
return(step3)
}
# extract the legend as an object
shared_legend_3 <- extract_legend(bg_smk_legend)
# arrange plots together in a grid for presentation
grid.arrange(bg_anm, bg_db, bg_hbp, bg_smk, bg_sex, shared_legend_3,
ncol = 3, layout_matrix = rbind(c(1,1,2,2,3,3), c(NA,4,4,5,5,6)),
top = "Figure 3: Bar Graphs of the Dichotomous Variables by Patient Death",
left = "Proportion")
# table of descriptive statistics of the dichotomous variables by death event
heart_data_f %>% select(anaemia, high_blood_pressure, diabetes, sex, smoking, DEATH_EVENT) %>%
tbl_summary(by = DEATH_EVENT,
type = all_categorical() ~ "categorical",
digits = all_categorical() ~ 2,
label = list(anaemia ~ "Anaemia", high_blood_pressure ~ "High Blood Pressure",
diabetes ~ "Diabetes", sex ~ "Sex", smoking ~ "Smoking")) %>%
add_p(pvalue_fun = ~style_pvalue(.x, digits = 2)) %>%
add_overall() %>%
as_kable_extra(caption = "Descriptive Statistics for the Dichotomous Variables: Overall and by Death Event", align = "lcccc") %>%
add_header_above(c("", "", "Death Event" = 3), bold =T) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
### boxplots of continuous variables by death event ###
### all plots stored as separate objects ###
# boxplot of age by death event
bp_age <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, age, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Age") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of creatinine phosphokinase levels by death event
bp_cp <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, creatinine_phosphokinase, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_log10(labels = comma) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Creatinine Phosph. (log)") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of ejection fraction levels by death event
bp_ef <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, ejection_fraction, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of platelet levels by death event
bp_pl <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, platelets, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Platelets") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of serum creatinine levels by death event
bp_sc <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, serum_creatinine, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_log10() +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of serum sodium levels by death event
bp_ss <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, serum_sodium, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
ylab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# boxplot of time levels by death event
bp_tm <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, time, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Death Event") +
ylab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# create a common legend for the plots
# need to create an entire dummy plot
bp_tm_legend <- heart_data_f %>%
ggplot(aes(DEATH_EVENT, time, fill = DEATH_EVENT)) +
geom_boxplot(color = "Black") +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2.5, alpha = 0.5) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_fill_brewer(name = "Death", palette = "Paired") +
xlab("Death Event") +
ylab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_blank()) +
theme(axis.title.y = element_text(angle = 90, vjust = 2)) +
theme(legend.position = "right") +
theme(plot.margin = unit(c(0.5,0.5,0.25,0.5), "cm"))
# extract the legend as an object
shared_legend_4 <- extract_legend(bp_tm_legend)
# arrange plots together in a grid for presentation
grid.arrange(bp_age, bp_cp, bp_ef, bp_pl, bp_sc, bp_ss, bp_tm, shared_legend_4,
ncol = 3, layout_matrix = rbind(c(1,2,3), c(4,5,6), c(NA,7,8)),
top = "Figure 4: Boxplots of the Continuous Variables by Patient Death")
# table of descriptive statistics of the continuous variables by death event
heart_data_f %>% select(age, creatinine_phosphokinase, ejection_fraction, platelets,
serum_creatinine, serum_sodium, time, DEATH_EVENT) %>%
tbl_summary(by = DEATH_EVENT,
type = all_continuous() ~ "continuous2",
statistic = all_continuous() ~ c("{mean} ({sd})", "{median} ({p25}, {p75})"),
digits = all_continuous() ~ 2,
label = list(age ~ "Age", creatinine_phosphokinase ~ "Creatinine Phosphokinase",
ejection_fraction ~ "Ejection Fraction", platelets ~ "Platelets",
serum_creatinine ~ "Serum Creatinine", serum_sodium ~ "Serum Sodium",
time ~ "Length of Follow-up")) %>%
add_p(pvalue_fun = ~style_pvalue(.x, digits = 2)) %>%
add_overall() %>%
modify_footnote(all_stat_cols() ~ "Mean (SD) or Median (IQR)") %>%
as_kable_extra(caption = "Descriptive Statistics for the Continuous Variables: Overall and by Death Event", align = "lcccc") %>%
add_header_above(c("", "", "Death Event" = 3), bold =T) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
###########################################################
################## INFERENTIAL ANALYSIS ###################
###########################################################
### some analyses already presented in tables above ###
# more details about the chi-square tests presented in table above
chisq_test(heart_data_f$anaemia, heart_data_f$DEATH_EVENT)
chisq_test(heart_data_f$high_blood_pressure, heart_data_f$DEATH_EVENT)
chisq_test(heart_data_f$diabetes, heart_data_f$DEATH_EVENT)
chisq_test(heart_data_f$sex, heart_data_f$DEATH_EVENT)
chisq_test(heart_data_f$smoking, heart_data_f$DEATH_EVENT)
# more details about the Mann-Whitney U tests in table above
wilcox.test(heart_data_f$age ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$creatinine_phosphokinase ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$ejection_fraction ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$platelets ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$serum_creatinine ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$serum_sodium ~ heart_data_f$DEATH_EVENT)
wilcox.test(heart_data_f$time ~ heart_data_f$DEATH_EVENT)
# alternative analysis (to the Mann-Whitney) using t-tests
t.test(age ~ DEATH_EVENT, data = heart_data_f)
t.test(creatinine_phosphokinase ~ DEATH_EVENT, data = heart_data_f)
t.test(ejection_fraction ~ DEATH_EVENT, data = heart_data_f)
t.test(platelets ~ DEATH_EVENT, data = heart_data_f)
t.test(serum_creatinine ~ DEATH_EVENT, data = heart_data_f)
t.test(serum_sodium ~ DEATH_EVENT, data = heart_data_f)
t.test(time ~ DEATH_EVENT, data = heart_data_f)
### correlations ###
# create correlation matrix
cmat <- cor(heart_data_l)
colnames(cmat) <- c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking",
"Time", "Death Event")
rownames(cmat) <- c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking",
"Time", "Death Event")
# create correlation test data
res1 <- cor.mtest(heart_data_l, conf.level = 0.95)
# create heatmap of correlation matrix between all variables with extras
corrplot::corrplot(cmat,
type = "lower",
method = "square",
tl.col = "black",
tl.cex = 0.7,
title = "Figure 5: Correlation Matrix",
p.mat = res1$p,
insig = "label_sig",
sig.level = c(.001, .01, .05),
pch.cex = 0.9,
pch.col = "white",
mar = c(1,1,3,1))
# correlations between death event and other variables - dataframe
correlations <- cor(heart_data_l, heart_data_l$DEATH_EVENT)
r_square <- (correlations)^2
temp_data <- data.frame(r_square = r_square, cor = correlations, p = res1$p[,13])
temp_data <- temp_data[-c(13), ]
rownames(temp_data) <- c("Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking", "Time")
# create table of correlations data
temp_data[order(temp_data$r_square, decreasing = T),] %>%
kbl(caption = "Correlations with Death Event",
col.names = c("r squared", "r", "p-value"),
align = "lccc",
digits = 3) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10)
### logistic regressions ###
# logistic regression with all features including time
fit_glm <- glm(DEATH_EVENT ~ ., data = heart_data_l, family = "binomial")
# results of logistic regression
summary(fit_glm)
# more results of logistic regression - overall model fit
fit_glm_r2 <- nagelkerke(fit_glm)$Pseudo.R.squared
colnames(fit_glm_r2) <- c("All Features")
fit_glm_r2 %>%
kbl(caption = "Logistic Regression - Overall Model Fit", align = "c", col.names = "Pseudo R Squared") %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# more results of logistic regression - coefficients table
fit_glm_co <- summary(fit_glm)$coefficients
rownames(fit_glm_co) <- c("(Intercept)", "Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking", "Time")
fit_glm_co %>%
kbl(caption = "Logistic Regression Coefficients - All Features",
align = "lccc", col.names = c("Estimate", "Std. Error", "Z", "p")) %>%
row_spec(0, bold = T) %>%
row_spec(2, bold = T) %>%
row_spec(6, bold = T) %>%
row_spec(9, bold = T) %>%
row_spec(13, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# more results of logistic regression - odds ratios
exp(coef(fit_glm))
# logistic regression without the TIME variable
fit_glm2 <- glm(DEATH_EVENT ~ age + anaemia + creatinine_phosphokinase + diabetes +
ejection_fraction + high_blood_pressure + platelets + serum_creatinine +
serum_sodium + sex + smoking, data = heart_data_l)
# results of logistic regression
summary(fit_glm2)
# more results of logistic regression - overall model fit - comparing both models
fit_glm_r2 <- fit_glm_r2 %>% cbind(nagelkerke(fit_glm2)$Pseudo.R.squared)
colnames(fit_glm_r2) <- c("All Features", "No Time Feature")
fit_glm_r2 %>%
kbl(caption = "Logistic Regression - Overall Model Fit", align = "cc") %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# more results of logistic regression - coefficients table
fit_glm_co2 <- summary(fit_glm2)$coefficients
rownames(fit_glm_co2) <- c("(Intercept)", "Age", "Anaemia", "Creatinine Phosphokinase", "Diabetes",
"Ejection Fraction", "High Blood Pressure", "Platelets",
"Serum Creatinine", "Serum Sodium", "Sex", "Smoking")
fit_glm_co2 %>%
kbl(caption = "Logistic Regression Coefficients - No Time Feature",
align = "lccc", col.names = c("Estimate", "Std. Error", "Z", "p")) %>%
row_spec(0, bold = T) %>%
row_spec(2, bold = T) %>%
row_spec(6, bold = T) %>%
row_spec(9, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# more results of logistic regression - odds ratios
exp(coef(fit_glm2))
###########################################################
################### MODELING APPROACHES ###################
###########################################################
########## Data preparation for algorithm training ##########
# partition the heart dataset into training and test datasets
# partition the heart dataset into training and test datasets
set.seed(1, sample.kind = "Rounding")
test_index <- createDataPartition(y = heart_data_f$DEATH_EVENT,
times = 1, p = 0.2, list = FALSE)
heart_train <- heart_data_f[-test_index,]
heart_test <- heart_data_f[test_index,]
# save the training and test datasets
save(heart_train, file = "heart_train.RData")
save(heart_test, file = "heart_test.RData")
# examine frequency of outcome in both datasets
table(heart_train$DEATH_EVENT)
table(heart_test$DEATH_EVENT)
# create data frame describing the accuracy metrics
model_acc <-
data.frame(Feature = c("Accuracy", "Kappa", "Sensitivity", "Specificity", "PPV", "NPV", "Precision", "Recall",
"F1", "Prevalence", "Detection Rate", "Detection Prevalence", "Balanced Accuracy"),
Description = c("Proportion of true positives and true negatives over all instances",
"Measure of agreement accounting for random chance*",
"Proportion of true positives over actual positives",
"Proportion of true negatives over actual negatives",
"Proportion of true positives over predicted positives*",
"Proportion of true negatives over predicted negatives*",
"Proportion of true positives over predicted positives",
"Proportion of true positives over actual positives",
"Harmonic average of precision and recall*",
"Proportion of actual positives over total",
"Proportion of true positives over total",
"Proportion of predicted positives over total",
"(sensitivity + specificity)/2"))
# convert the dataframe to a table describing the accuracy metrics
model_acc %>%
kbl(caption = "Model Accuracy Metrics", align = "ll") %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down") %>%
footnote(general = c("Adapted from Irizarry (2019) and the caret package description."),
symbol = c("These metrics are calculated using more complex definitions in the caret package."),
general_title = "")
# examine information about the models - cforest
getModelInfo("cforest")
modelLookup("cforest")
# examine information about the models - knn
getModelInfo("knn")
modelLookup("knn")
# examine information about the models - knn
getModelInfo("glm")
modelLookup("glm")
# examine information about the models - gamLoess
getModelInfo("gamLoess")
modelLookup("gamLoess")
# examine information about the models - gamLoess
getModelInfo("rf")
modelLookup("rf")
# examine information about the models - gamLoess
getModelInfo("rpart")
modelLookup("rpart")
# set the seed for reproducibility
set.seed(1, sample.kind = "Rounding")
# set cross-validation with 100 samples
my_control <- trainControl(method = "cv", number = 100, p = .9,
savePredictions = "all",
classProbs = TRUE,
allowParallel = TRUE,
index = createResample(heart_train$DEATH_EVENT, 10))
# set the seed for reproducibility
set.seed(1, sample.kind = "Rounding")
# train multiple models at the same time - all features
train_models <- caretList(DEATH_EVENT ~ age + anaemia + creatinine_phosphokinase + diabetes +
ejection_fraction + high_blood_pressure + platelets + serum_creatinine +
serum_sodium + sex + smoking,
data = heart_train,
trControl = my_control,
methodList = c("cforest", "glm", "knn", "gamLoess", "rf", "rpart"),
continue_on_fail = FALSE,
preProcess = c("center", "scale"))
# set the seed for reproducibility
set.seed(1, sample.kind = "Rounding")
# train multiple models at the same time - select features (age, ejection fraction, and serum creatinine)
train_models2 <- caretList(DEATH_EVENT ~ age + ejection_fraction + serum_creatinine,
data = heart_train,
trControl = my_control,
methodList = c("cforest", "glm", "knn", "gamLoess", "rf", "rpart"),
continue_on_fail = FALSE,
preProcess = c("center", "scale"))
# report model results in the training dataset - cforest
train_models$cforest$results
train_models$cforest$bestTune
# report model results in the training dataset - glm
train_models$glm$results
# report model results in the training dataset - knn
train_models$knn$results
train_models$knn$bestTune
# report model results in the training dataset - gamLoess
train_models$gamLoess$results
# report model results in the training dataset - rf
train_models$rf$results
train_models$rf$bestTune
# report model results in the training dataset - rpart
train_models$rpart$results
train_models$rpart$bestTune
# plot the accuracy of the models in the training set cross-validation - all features
resamples <- resamples(train_models)
dotplot(resamples, metric = "Accuracy",
main = "Figure 6: Accuracy across Models - All Features (Cross-Validation)")
# plot the accuracy of the models in the training set cross-validation - select features
resamples2 <- resamples(train_models2)
dotplot(resamples2, metric = "Accuracy",
main = "Figure 8: Accuracy across Models - Select Features (Cross-Validation)")
# dataframe of results of all models in the training cross-validation
train_results <- data.frame(
Model = c("cForest", "GLM", "KNN", "Loess", "RF", "rpart"),
Accuracy1 = c(max(train_models$cforest$results$Accuracy),
max(train_models$glm$results$Accuracy),
max(train_models$knn$results$Accuracy),
max(train_models$gamLoess$results$Accuracy),
max(train_models$rf$results$Accuracy),
max(train_models$rpart$results$Accuracy)),
AccuracySD1 = c(min(train_models$cforest$results$AccuracySD),
min(train_models$glm$results$AccuracySD),
min(train_models$knn$results$AccuracySD),
min(train_models$gamLoess$results$AccuracySD),
min(train_models$rf$results$AccuracySD),
min(train_models$rpart$results$AccuracySD)),
Kappa1 = c(max(train_models$cforest$results$Kappa),
max(train_models$glm$results$Kappa),
max(train_models$knn$results$Kappa),
max(train_models$gamLoess$results$Kappa),
max(train_models$rf$results$Kappa),
max(train_models$rpart$results$Kappa)),
KappaSD1 = c(min(train_models$cforest$results$KappaSD),
min(train_models$glm$results$KappaSD),
min(train_models$knn$results$KappaSD),
min(train_models$gamLoess$results$KappaSD),
min(train_models$rf$results$KappaSD),
min(train_models$rpart$results$KappaSD)),
Accuracy2 = c(max(train_models2$cforest$results$Accuracy),
max(train_models2$glm$results$Accuracy),
max(train_models2$knn$results$Accuracy),
max(train_models2$gamLoess$results$Accuracy),
max(train_models2$rf$results$Accuracy),
max(train_models2$rpart$results$Accuracy)),
AccuracySD2 = c(min(train_models2$cforest$results$AccuracySD),
min(train_models2$glm$results$AccuracySD),
min(train_models2$knn$results$AccuracySD),
min(train_models2$gamLoess$results$AccuracySD),
min(train_models2$rf$results$AccuracySD),
min(train_models2$rpart$results$AccuracySD)),
Kappa2 = c(max(train_models2$cforest$results$Kappa),
max(train_models2$glm$results$Kappa),
max(train_models2$knn$results$Kappa),
max(train_models2$gamLoess$results$Kappa),
max(train_models2$rf$results$Kappa),
max(train_models2$rpart$results$Kappa)),
KappaSD2 = c(min(train_models2$cforest$results$KappaSD),
min(train_models2$glm$results$KappaSD),
min(train_models2$knn$results$KappaSD),
min(train_models2$gamLoess$results$KappaSD),
min(train_models2$rf$results$KappaSD),
min(train_models2$rpart$results$KappaSD)))
# table of accruacy and kappa in the cross-validation - all models
train_results %>%
kbl(caption = "Accuracy across Models (Cross-Validation)",
align = "lclclclcl", col.names = c("Model", "Accuracy", "(SD)", "Kappa", "(SD)", "Accuracy", "(SD)", "Kappa", "(SD)")) %>%
add_header_above(c("", "All Features" = 4, "Select Features" = 4), bold =T) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
# plot of tuning parameters - cforest
tuning_cf <- ggplot(train_models$cforest, highlight = TRUE) +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Number of Randomly Selected Predictors") +
ylab("Accuracy in Cross-Validation") +
ggtitle("cforest") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(plot.title = element_text(size = 12, vjust = 2, hjust = 0.5)) +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.5), "cm"))
# plot of tuning parameters - knn
tuning_knn <- ggplot(train_models$knn, highlight = TRUE) +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Number of Neighbours") +
ylab("Accuracy in Cross-Validation") +
ggtitle("KNN") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(plot.title = element_text(size = 12, vjust = 2, hjust = 0.5)) +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.5), "cm"))
# plot of tuning parameters - rf
tuning_rf <- ggplot(train_models$rf, highlight = TRUE) +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Number of Randomly Selected Predictors") +
ylab("Accuracy in Cross-Validation") +
ggtitle("Random Forest") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(plot.title = element_text(size = 12, vjust = 2, hjust = 0.5)) +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.5), "cm"))
# plot of tuning parameters - rpart
tuning_rpart <- ggplot(train_models$rpart, highlight = TRUE) +
scale_y_continuous(breaks = pretty_breaks(n = 5)) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
xlab("Number of Randomly Selected Predictors") +
ylab("Accuracy in Cross-Validation") +
ggtitle("rpart") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(plot.title = element_text(size = 12, vjust = 2, hjust = 0.5)) +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.5), "cm"))
# arrange tuning plots together in a grid for presentation
grid.arrange(tuning_cf, tuning_knn, tuning_rf, tuning_rpart,
ncol = 2,
top = "Figure A8: Tuning Parameters across Models - All Features")
# plot variable importance for each relevant model
imp1 <- plot(varImp(train_models$cforest), xlab = "cforest")
imp2 <- plot(varImp(train_models$glm), xlab = "GLM")
imp3 <- plot(varImp(train_models$gamLoess), xlab = "gamLoess")
imp4 <- plot(varImp(train_models$rf), xlab = "Random Forest")
imp5 <- plot(varImp(train_models$rpart), xlab = "rpart")
# arrange variable importance plots together in a grid for presentation
grid.arrange(imp1, imp2, imp3, imp4, imp5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(NA,5,5,NA)),
top = "Figure 7: Variable Importance across Models (Cross-Validation)")
###########################################################
################## RESULTS IN TEST DATASET ################
###########################################################
### using all features
# predict in the test datatset - cforest
pred_cforest <- predict(train_models$cforest, heart_test, type = "raw")
# predict in the test datatset - loess
pred_gamLoess <- predict(train_models$gamLoess, heart_test, type = "raw")
# predict in the test datatset - random forest
pred_rf <- predict(train_models$rf, heart_test, type = "raw")
# predict in the test datatset - random forest
pred_rpart <- predict(train_models$rpart, heart_test, type = "raw")
### using select features
# predict in the test datatset - cforest
pred_cforest2 <- predict(train_models2$cforest, heart_test, type = "raw")
# predict in the test datatset - loess
pred_gamLoess2 <- predict(train_models2$gamLoess, heart_test, type = "raw")
# predict in the test datatset - random forest
pred_rf2 <- predict(train_models2$rf, heart_test, type = "raw")
# predict in the test datatset - random forest
pred_rpart2 <- predict(train_models2$rpart, heart_test, type = "raw")
# examine the performance of the models in the test set
mat_results <- as.data.frame(confusionMatrix(pred_cforest, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[1] <- "cForest"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_gamLoess, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[2] <- "Loess"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_rf, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[3] <- "RF"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_rpart, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[4] <- "rpart"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_cforest2, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[5] <- "cForest2"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_gamLoess2, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[6] <- "Loess2"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_rf2, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[7] <- "RF2"
mat_results <- mat_results %>% bind_cols(confusionMatrix(pred_rpart2, heart_test$DEATH_EVENT, positive = "Yes")$byClass)
names(mat_results)[8] <- "rpart2"
# examine results in a table
mat_results %>%
kbl(caption = "Model Results in the Test Dataset", align = "lcccccccc",
col.names = c("cForest", "Loess", "RF", "rpart", "cForest", "Loess", "RF", "rpart")) %>%
add_header_above(c("", "All Features" = 4, "Select Features" = 4), bold =T) %>%
row_spec(0, bold = T) %>%
column_spec(1, bold = T) %>%
kable_classic(full_width = F) %>%
kable_styling(latex_options = "hold_position", font_size = 10) %>%
kable_styling(latex_options = "scale_down")
###########################################################
######################## APPENDIX A #######################
###########################################################
### SUPPLEMENTAL FIGURES AND TABLES ###
### examine continuous variables across ANAEMIA ###
### all plots stored as separate objects ###
# plot of age across anaemia by death event
pp_anm_age <- heart_data_f %>%
ggplot(aes(age, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across anaemia by death event
pp_anm_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across anaemia by death event
pp_anm_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across anaemia by death event
pp_anm_pl <- heart_data_f %>%
ggplot(aes(platelets, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across anaemia by death event
pp_anm_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across anaemia by death event
pp_anm_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across anaemia by death event
pp_anm_tm <- heart_data_f %>%
ggplot(aes(time, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# create a common legend for the plots
# need to create an entire dummy plot
pp_anm_tm_legend <- heart_data_f %>%
ggplot(aes(time, anaemia, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 3, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -3)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "right") +
theme(plot.margin = unit(c(0.5,0.25,0.5,0.25), "cm"))
# extract the legend as an object
shared_legend_5 <- extract_legend(pp_anm_tm_legend)
# arrange plots together in a grid for presentation
grid.arrange(pp_anm_age, pp_anm_cp, pp_anm_ef, pp_anm_pl, pp_anm_sc, pp_anm_ss, pp_anm_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A1: Plots of the Continuous Variables by Anaemia and Patient Death",
left = "Anaemia")
### examine continuous variables across DIABETES ###
### all plots stored as separate objects ###
# plot of age across diabetes by death event
pp_db_age <- heart_data_f %>%
ggplot(aes(age, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across diabetes by death event
pp_db_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across diabetes by death event
pp_db_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across diabetes by death event
pp_db_pl <- heart_data_f %>%
ggplot(aes(platelets, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across diabetes by death event
pp_db_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across diabetes by death event
pp_db_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across diabetes by death event
pp_db_tm <- heart_data_f %>%
ggplot(aes(time, diabetes, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(pp_db_age, pp_db_cp, pp_db_ef, pp_db_pl, pp_db_sc, pp_db_ss, pp_db_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A2: Plots of the Continuous Variables by Diabetes and Patient Death",
left = "Diabetes")
### examine continuous variables across HIGH BLOOD PRESSURE ###
### all plots stored as separate objects ###
# plot of age across high blood pressure by death event
pp_hbp_age <- heart_data_f %>%
ggplot(aes(age, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across high blood pressure by death event
pp_hbp_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across high blood pressure by death event
pp_hbp_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across high blood pressure by death event
pp_hbp_pl <- heart_data_f %>%
ggplot(aes(platelets, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across high blood pressure by death event
pp_hbp_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across high blood pressure by death event
pp_hbp_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across high blood pressure by death event
pp_hbp_tm <- heart_data_f %>%
ggplot(aes(time, high_blood_pressure, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(pp_hbp_age, pp_hbp_cp, pp_hbp_ef, pp_hbp_pl, pp_hbp_sc, pp_hbp_ss, pp_hbp_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A3: Plots of the Continuous Variables by High Blood Pressure and Patient Death",
left = "High Blood Pressure ")
### examine continuous variables across SEX ###
### all plots stored as separate objects ###
# plot of age across sex by death event
pp_sex_age <- heart_data_f %>%
ggplot(aes(age, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across sex by death event
pp_sex_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across sex by death event
pp_sex_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across sex by death event
pp_sex_pl <- heart_data_f %>%
ggplot(aes(platelets, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across sex by death event
pp_sex_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across sex by death event
pp_sex_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across sex by death event
pp_sex_tm <- heart_data_f %>%
ggplot(aes(time, sex, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(axis.text.y = element_text(angle = 90, hjust = 0.5)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(pp_sex_age, pp_sex_cp, pp_sex_ef, pp_sex_pl, pp_sex_sc, pp_sex_ss, pp_sex_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A4: Plots of the Continuous Variables by Sex and Patient Death",
left = "Sex")
### examine continuous variables across SMOKING ###
### all plots stored as separate objects ###
# plot of age across smoking by death event
pp_smk_age <- heart_data_f %>%
ggplot(aes(age, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of creatinine phosphokinase levels across smoking by death event
pp_smk_cp <- heart_data_f %>%
ggplot(aes(creatinine_phosphokinase, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10(labels = comma) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Creatinine Phosphokinase (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction levels across smoking by death event
pp_smk_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of platelet levels across smoking by death event
pp_smk_pl <- heart_data_f %>%
ggplot(aes(platelets, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(labels = comma, breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Platelets") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine levels across smoking by death event
pp_smk_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium levels across smoking by death event
pp_smk_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of time across smoking by death event
pp_smk_tm <- heart_data_f %>%
ggplot(aes(time, smoking, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.5), size = 2, alpha = 0.75) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
stat_summary(aes(group = DEATH_EVENT), fun = "mean", geom = "point", size = 3, colour = "black") +
xlab("Length of Follow-up") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(pp_smk_age, pp_smk_cp, pp_smk_ef, pp_smk_pl, pp_smk_sc, pp_smk_ss, pp_smk_tm, shared_legend_5,
ncol = 2, layout_matrix = rbind(c(1,1,2,2), c(3,3,4,4), c(5,5,6,6), c(NA,7,7,8)),
top = "Figure A5: Plots of the Continuous Variables by Smoking and Patient Death",
left = "Smoking")
### looking at interactions between continuous variables - TIME ###
### all plots stored as separate objects ###
# plot of age and time by death event
pp_tm_age <- heart_data_f %>%
ggplot(aes(age, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Age") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection fraction and time by death event
pp_tm_ef <- heart_data_f %>%
ggplot(aes(ejection_fraction, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum creatinine and time by death event
pp_tm_sc <- heart_data_f %>%
ggplot(aes(serum_creatinine, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_log10() +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium and time by death event
pp_tm_ss <- heart_data_f %>%
ggplot(aes(serum_sodium, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# create a common legend for the plots
# need to create an entire dummy plot
pp_tm_ss_legend <- heart_data_f %>%
ggplot(aes(serum_sodium, time, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "top") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# extract the legend as an object
shared_legend_10 <- extract_legend(pp_tm_ss_legend)
# arrange plots together in a grid for presentation
grid.arrange(shared_legend_10,
arrangeGrob(pp_tm_age, pp_tm_ef, pp_tm_sc, pp_tm_ss,
ncol = 2),
nrow = 2, heights = c(1,20),
top = "Figure A6: Plots of the Continuous Variables by Days to Follow-up and Patient Death",
left = "Length of Follow-up")
###################################################################################################
### looking at interactions between continuous variables - MIXED ###
### all plots stored as separate objects ###
# plot of ejection fraction and age by death event
pp_age_ef <- heart_data_f %>%
ggplot(aes(age, ejection_fraction, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Age") +
ylab("Ejection Fraction") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of secrum creatinine and age by death event
pp_age_sc <- heart_data_f %>%
ggplot(aes(age, serum_creatinine, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Age") +
ylab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium and age by death event
pp_age_ss <- heart_data_f %>%
ggplot(aes(age, serum_sodium, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Age") +
ylab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection_fraction and secrum creatinine by death event
pp_ef_sc <- heart_data_f %>%
ggplot(aes(ejection_fraction, serum_creatinine, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Ejection Fraction") +
ylab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of ejection_fraction and secrum sodium by death event
pp_ef_ss <- heart_data_f %>%
ggplot(aes(ejection_fraction, serum_sodium, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_continuous(breaks = pretty_breaks(n = 10)) +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Ejection Fraction") +
ylab("Serum Sodium") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# plot of serum sodium and secrum creatinine by death event
pp_ss_sc <- heart_data_f %>%
ggplot(aes(serum_sodium, serum_creatinine, color = DEATH_EVENT, shape = DEATH_EVENT)) +
geom_point(position = position_jitter(h = 0.25, w = 0.25), size = 2, alpha = 0.75) +
stat_ellipse(lwd = 1) +
scale_x_continuous(breaks = pretty_breaks(n = 10)) +
scale_y_log10() +
scale_color_brewer(name = "Death", palette = "Paired") +
scale_shape_manual(name = "Death", values = c(15, 17)) +
xlab("Serum Sodium") +
ylab("Serum Creatinine (log)") +
theme_light() +
theme(axis.title.x = element_text(vjust = -1)) +
theme(axis.title.y = element_text(vjust = 2)) +
theme(legend.position = "blank") +
theme(plot.margin = unit(c(0.5,0.25,0.25,0.25), "cm"))
# arrange plots together in a grid for presentation
grid.arrange(shared_legend_10,
arrangeGrob(pp_age_ef, pp_age_sc, pp_age_ss, pp_ef_sc, pp_ef_ss, pp_ss_sc,
ncol = 2),
nrow = 2, heights = c(1,20),
top = "Figure A7: Plots of Bivariate Continuous Variables by Patient Death")
###########################################################
######################## APPENDIX B #######################
###########################################################
### ENVIRONMENT ###
# print operating system and R version
version
|
adlearn <-
function(u, label, nset=16){
require(glmnet)
# find initial informers as nset/2 compounds predictive of input clustering
fit.cur <- glmnet(as.matrix(u), as.factor(label), family="multinomial", alpha=1, dfmax=20, type.multinomial="grouped")
lambd <- fit.cur$lambda
jj <- length(lambd)
lamd.cur <- lambd[jj]
tmp_coeffs <- coef(fit.cur, s=lamd.cur)
infor.og <- tmp_coeffs[[1]]@i[-1]
while(length(infor.og) > round(nset/2) ){
jj <- jj-1
lamd.cur <- lambd[jj]
tmp_coeffs <- coef(fit.cur, s = lamd.cur ) ## 86 104 125 137 253 313 349 357
infor.og <- tmp_coeffs[[1]]@i[-1]
}
## build remaining informer set to predict non-informers adaptively
infor.tmp <- infor.og
while(length(infor.tmp) < nset)
{
infor.tmp <- adpstep(u, infor.tmp)
}
return(infor.tmp)
}
| /informRset/R/adlearn.R | no_license | wiscstatman/esdd | R | false | false | 827 | r | adlearn <-
function(u, label, nset=16){
require(glmnet)
# find initial informers as nset/2 compounds predictive of input clustering
fit.cur <- glmnet(as.matrix(u), as.factor(label), family="multinomial", alpha=1, dfmax=20, type.multinomial="grouped")
lambd <- fit.cur$lambda
jj <- length(lambd)
lamd.cur <- lambd[jj]
tmp_coeffs <- coef(fit.cur, s=lamd.cur)
infor.og <- tmp_coeffs[[1]]@i[-1]
while(length(infor.og) > round(nset/2) ){
jj <- jj-1
lamd.cur <- lambd[jj]
tmp_coeffs <- coef(fit.cur, s = lamd.cur ) ## 86 104 125 137 253 313 349 357
infor.og <- tmp_coeffs[[1]]@i[-1]
}
## build remaining informer set to predict non-informers adaptively
infor.tmp <- infor.og
while(length(infor.tmp) < nset)
{
infor.tmp <- adpstep(u, infor.tmp)
}
return(infor.tmp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CT_Skull_Strip.R
\name{CT_Skull_Strip}
\alias{CT_Skull_Strip}
\title{CT Skull Stripping within R}
\usage{
CT_Skull_Strip(img, outfile = NULL, keepmask = TRUE, maskfile = NULL,
inskull_mesh = FALSE, retimg = TRUE, reorient = FALSE,
intern = TRUE, betcmd = "bet2", opts = "-f 0.01 -v",
presmooth = TRUE, remask = TRUE, refill = FALSE,
refill.thresh = 0.75, sigma = 1, lthresh = 0, uthresh = 100,
verbose = TRUE, ...)
}
\arguments{
\item{img}{(character) File to be skull stripped or object of class
nifti}
\item{outfile}{(character) output filename}
\item{keepmask}{(logical) Should we keep the mask?}
\item{maskfile}{(character) Filename for mask (if \code{keepmask = TRUE}).
If \code{NULL}, then will do \code{paste0(outfile, "_Mask")}.}
\item{inskull_mesh}{(logical) Create inskull_mesh file from bet?
(Warning - will take longer)
This an exterior surface of the brain. (experimental)
Also, if \code{outfile} is \code{NULL}, then this will be created in
a temporary directory and not be retrieved.}
\item{retimg}{(logical) return image of class nifti}
\item{reorient}{(logical) If retimg, should file be reoriented when read in?
Passed to \code{\link{readNIfTI}}.}
\item{intern}{(logical) pass to \code{\link{system}}}
\item{betcmd}{(character) bet command to be used, see \code{\link{fslbet}}}
\item{opts}{(character) additional options to \code{\link{fslbet}}}
\item{presmooth}{(logical) indicator if pre-smoothing should be
done before BET}
\item{remask}{(logical) Mask the smoothed image with HU mask from initial
step?}
\item{refill}{(logical) indicator to post-smooth mask and then fill}
\item{refill.thresh}{(numeric) Value to threshold post-smoothed mask}
\item{sigma}{(integer) size of Gaussian kernel passed to
\code{\link{fslsmooth}} if \code{presmooth} is \code{TRUE}}
\item{lthresh}{(default: 0) Lower value to threshold CT
\code{\link{fslthresh}}}
\item{uthresh}{(default: 100) Upper value to threshold CT
\code{\link{fslthresh}}}
\item{verbose}{(logical) Should diagnostic output be printed?}
\item{...}{additional arguments passed to \code{\link{fslbet}}.}
}
\value{
character or logical depending on intern
}
\description{
Skull Stripping (using FSL's BET) a CT file using \code{fslr}
functions
}
| /man/CT_Skull_Strip.Rd | no_license | doctoryfx/ichseg | R | false | true | 2,321 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CT_Skull_Strip.R
\name{CT_Skull_Strip}
\alias{CT_Skull_Strip}
\title{CT Skull Stripping within R}
\usage{
CT_Skull_Strip(img, outfile = NULL, keepmask = TRUE, maskfile = NULL,
inskull_mesh = FALSE, retimg = TRUE, reorient = FALSE,
intern = TRUE, betcmd = "bet2", opts = "-f 0.01 -v",
presmooth = TRUE, remask = TRUE, refill = FALSE,
refill.thresh = 0.75, sigma = 1, lthresh = 0, uthresh = 100,
verbose = TRUE, ...)
}
\arguments{
\item{img}{(character) File to be skull stripped or object of class
nifti}
\item{outfile}{(character) output filename}
\item{keepmask}{(logical) Should we keep the mask?}
\item{maskfile}{(character) Filename for mask (if \code{keepmask = TRUE}).
If \code{NULL}, then will do \code{paste0(outfile, "_Mask")}.}
\item{inskull_mesh}{(logical) Create inskull_mesh file from bet?
(Warning - will take longer)
This an exterior surface of the brain. (experimental)
Also, if \code{outfile} is \code{NULL}, then this will be created in
a temporary directory and not be retrieved.}
\item{retimg}{(logical) return image of class nifti}
\item{reorient}{(logical) If retimg, should file be reoriented when read in?
Passed to \code{\link{readNIfTI}}.}
\item{intern}{(logical) pass to \code{\link{system}}}
\item{betcmd}{(character) bet command to be used, see \code{\link{fslbet}}}
\item{opts}{(character) additional options to \code{\link{fslbet}}}
\item{presmooth}{(logical) indicator if pre-smoothing should be
done before BET}
\item{remask}{(logical) Mask the smoothed image with HU mask from initial
step?}
\item{refill}{(logical) indicator to post-smooth mask and then fill}
\item{refill.thresh}{(numeric) Value to threshold post-smoothed mask}
\item{sigma}{(integer) size of Gaussian kernel passed to
\code{\link{fslsmooth}} if \code{presmooth} is \code{TRUE}}
\item{lthresh}{(default: 0) Lower value to threshold CT
\code{\link{fslthresh}}}
\item{uthresh}{(default: 100) Upper value to threshold CT
\code{\link{fslthresh}}}
\item{verbose}{(logical) Should diagnostic output be printed?}
\item{...}{additional arguments passed to \code{\link{fslbet}}.}
}
\value{
character or logical depending on intern
}
\description{
Skull Stripping (using FSL's BET) a CT file using \code{fslr}
functions
}
|
#
# In this notebook we are going to explore a bunch of different distributions that
# naturally arise when doing estimations
#
rm(list = ls())
library(dplyr);
library(ggplot2);
source("common.R");
M <- 1e4;
# Normal distribution
# let's generate random numbers according to a normal distribution
mu <- 5;
s <- 1;
x <- rnorm(n = M, mean = mu, sd = s);
df <- data.frame(x = x);
df %>%
ggplot(aes(x = x)) + geom_histogram(fill = "lightgray") +
geom_vline(aes(xintercept = mean(x)), col = "blue") +
theme_light() +
labs(title = "Histogram of 10000 i.i.d. x ~ N(5, 1)", x = "x");
# Z
# the standard normal arises when we standardize a normal distribution
# through removing the mean and dividing by s
z <- (x - mu) / s;
df <- cbind(df, z = z);
df %>%
ggplot(aes(x = x)) +
geom_histogram(fill = "gray", alpha = .5) +
geom_vline(aes(xintercept = mean(x)), col = "blue") +
geom_histogram(fill = "blue", alpha = .5, aes(x = z)) +
geom_vline(aes(xintercept = mean(z)), col = "blue") +
theme_light() +
labs(title = "Histogram of 10000 i.i.d. x ~ N(5, 1), z ~ N(0, 1)", x = "x");
# we can always recover the normal from the standardized r.v. by multiplying with s
# and adding the mean component
zx <- z * s + mu;
df <- cbind(df, zx = zx);
df %>%
ggplot(aes(x = x)) +
geom_histogram(fill = "gray", alpha = .8) +
geom_vline(aes(xintercept = mean(x)), col = "blue") +
geom_histogram(fill = "blue", alpha = .5, aes(x = z)) +
geom_vline(aes(xintercept = mean(z)), col = "blue") +
geom_histogram(col = "green", fill = "green", alpha = .2, aes(x = zx)) +
geom_vline(aes(xintercept = mean(z)), col = "blue") +
theme_light() +
labs(title = "Histogram of 10000 i.i.d. x ~ N(5, 1)", x = "x");
# X2
# the x2 distribution with v degrees of freedom arises when we add v squared standard normal r.v
degree_of_freedom <- 10;
chi_2 <- rep(0, M);
for (i in 1:degree_of_freedom) {
z_i <- rnorm(n = M, mean = 0, sd = 1);
z_i <- z_i^2;
chi_2 <- chi_2 + z_i;
}
df <- cbind(df, chi2 = chi_2);
# let's compare against R's builtin
df <- cbind(df, chi2r = rchisq(n = M, df = degree_of_freedom));
# we see that the means almost perfectly overlap, but that there is some some small
# difference likely due to randomness and maybe accuracy
# we could make a replication of this and take the average behaviour and compare
# but for now we are good
df %>%
ggplot(aes(x = chi2)) +
geom_histogram(fill = "gray", alpha = .8) +
geom_vline(aes(xintercept = mean(chi2)), col = "blue") +
geom_histogram(aes(x = chi2r), fill = "blue", alpha = .3) +
geom_vline(aes(xintercept = mean(chi2r)), col = "red") +
theme_light() +
labs(title = "Chi2(10)",
subtitle = "Comparing 1000 generated Chi2(10) against R's built-in function", x = "x");
# T
# given a Chi2 distribution with k degree of freedom c and an independent standard normal z
# the t distributed random variable l arises as z / sqrt(c / v)
c <- chi_2;
v <- degree_of_freedom;
z <- rnorm(n = M, mean = 0, sd = 1);
l <- z / sqrt(c / v);
df <- cbind(df, t = l);
# let's compare against R's builtin
df <- cbind(df, tr = rt(n = M, df = v));
# as above in the chi2 case, both distributions behave almost indicentically
# also, we we see the t distribution as a standard normal only with wider/ fatter tails
df %>%
ggplot(aes(x = t)) +
geom_histogram(fill = "gray", alpha = .8) +
geom_vline(aes(xintercept = mean(t)), col = "blue") +
geom_histogram(aes(x = tr), fill = "green", alpha = .3) +
geom_vline(aes(xintercept = mean(tr)), col = "red") +
theme_light() +
labs(title = "t(10)",
subtitle = "Comparing 1000 generated t(10) against R's built-in function", x = "x");
# F
# the F distribution arises as the quotient of two independently distributed chi2
# distributions and their respective degrees of freedom
dof_1 <- 10;
dof_2 <- 15;
c1 <- sim_chi2(degree_of_freedom = dof_1, M = M);
c2 <- sim_chi2(degree_of_freedom = dof_2, M = M);
f <- (c1/dof_1) / (c2/dof_2);
df <- cbind(df, f = f);
# let's compare against R's builtin
df <- cbind(df, fr = rf(n = M, df1 = dof_1, df2 = dof_2));
# as above in the chi2 case, both distributions behave almost indicentically
df %>%
ggplot(aes(x = f)) +
geom_histogram(fill = "gray", alpha = .8) +
geom_vline(aes(xintercept = mean(f)), col = "blue") +
geom_histogram(aes(x = fr), fill = "green", alpha = .3) +
geom_vline(aes(xintercept = mean(fr)), col = "red") +
theme_light() +
labs(title = "t(10)",
subtitle = "Comparing 1000 generated f(10, 15) against R's built-in function", x = "x");
| /types_of_probability_distributions.R | no_license | anhnguyendepocen/Von_Auer_-_Econometry | R | false | false | 4,599 | r | #
# In this notebook we are going to explore a bunch of different distributions that
# naturally arise when doing estimations
#
rm(list = ls())
library(dplyr);
library(ggplot2);
source("common.R");
M <- 1e4;
# Normal distribution
# let's generate random numbers according to a normal distribution
mu <- 5;
s <- 1;
x <- rnorm(n = M, mean = mu, sd = s);
df <- data.frame(x = x);
df %>%
ggplot(aes(x = x)) + geom_histogram(fill = "lightgray") +
geom_vline(aes(xintercept = mean(x)), col = "blue") +
theme_light() +
labs(title = "Histogram of 10000 i.i.d. x ~ N(5, 1)", x = "x");
# Z
# the standard normal arises when we standardize a normal distribution
# through removing the mean and dividing by s
z <- (x - mu) / s;
df <- cbind(df, z = z);
df %>%
ggplot(aes(x = x)) +
geom_histogram(fill = "gray", alpha = .5) +
geom_vline(aes(xintercept = mean(x)), col = "blue") +
geom_histogram(fill = "blue", alpha = .5, aes(x = z)) +
geom_vline(aes(xintercept = mean(z)), col = "blue") +
theme_light() +
labs(title = "Histogram of 10000 i.i.d. x ~ N(5, 1), z ~ N(0, 1)", x = "x");
# we can always recover the normal from the standardized r.v. by multiplying with s
# and adding the mean component
zx <- z * s + mu;
df <- cbind(df, zx = zx);
df %>%
ggplot(aes(x = x)) +
geom_histogram(fill = "gray", alpha = .8) +
geom_vline(aes(xintercept = mean(x)), col = "blue") +
geom_histogram(fill = "blue", alpha = .5, aes(x = z)) +
geom_vline(aes(xintercept = mean(z)), col = "blue") +
geom_histogram(col = "green", fill = "green", alpha = .2, aes(x = zx)) +
geom_vline(aes(xintercept = mean(z)), col = "blue") +
theme_light() +
labs(title = "Histogram of 10000 i.i.d. x ~ N(5, 1)", x = "x");
# X2
# the x2 distribution with v degrees of freedom arises when we add v squared standard normal r.v
degree_of_freedom <- 10;
chi_2 <- rep(0, M);
for (i in 1:degree_of_freedom) {
z_i <- rnorm(n = M, mean = 0, sd = 1);
z_i <- z_i^2;
chi_2 <- chi_2 + z_i;
}
df <- cbind(df, chi2 = chi_2);
# let's compare against R's builtin
df <- cbind(df, chi2r = rchisq(n = M, df = degree_of_freedom));
# we see that the means almost perfectly overlap, but that there is some some small
# difference likely due to randomness and maybe accuracy
# we could make a replication of this and take the average behaviour and compare
# but for now we are good
df %>%
ggplot(aes(x = chi2)) +
geom_histogram(fill = "gray", alpha = .8) +
geom_vline(aes(xintercept = mean(chi2)), col = "blue") +
geom_histogram(aes(x = chi2r), fill = "blue", alpha = .3) +
geom_vline(aes(xintercept = mean(chi2r)), col = "red") +
theme_light() +
labs(title = "Chi2(10)",
subtitle = "Comparing 1000 generated Chi2(10) against R's built-in function", x = "x");
# T
# given a Chi2 distribution with k degree of freedom c and an independent standard normal z
# the t distributed random variable l arises as z / sqrt(c / v)
c <- chi_2;
v <- degree_of_freedom;
z <- rnorm(n = M, mean = 0, sd = 1);
l <- z / sqrt(c / v);
df <- cbind(df, t = l);
# let's compare against R's builtin
df <- cbind(df, tr = rt(n = M, df = v));
# as above in the chi2 case, both distributions behave almost indicentically
# also, we we see the t distribution as a standard normal only with wider/ fatter tails
df %>%
ggplot(aes(x = t)) +
geom_histogram(fill = "gray", alpha = .8) +
geom_vline(aes(xintercept = mean(t)), col = "blue") +
geom_histogram(aes(x = tr), fill = "green", alpha = .3) +
geom_vline(aes(xintercept = mean(tr)), col = "red") +
theme_light() +
labs(title = "t(10)",
subtitle = "Comparing 1000 generated t(10) against R's built-in function", x = "x");
# F
# the F distribution arises as the quotient of two independently distributed chi2
# distributions and their respective degrees of freedom
dof_1 <- 10;
dof_2 <- 15;
c1 <- sim_chi2(degree_of_freedom = dof_1, M = M);
c2 <- sim_chi2(degree_of_freedom = dof_2, M = M);
f <- (c1/dof_1) / (c2/dof_2);
df <- cbind(df, f = f);
# let's compare against R's builtin
df <- cbind(df, fr = rf(n = M, df1 = dof_1, df2 = dof_2));
# as above in the chi2 case, both distributions behave almost indicentically
df %>%
ggplot(aes(x = f)) +
geom_histogram(fill = "gray", alpha = .8) +
geom_vline(aes(xintercept = mean(f)), col = "blue") +
geom_histogram(aes(x = fr), fill = "green", alpha = .3) +
geom_vline(aes(xintercept = mean(fr)), col = "red") +
theme_light() +
labs(title = "t(10)",
subtitle = "Comparing 1000 generated f(10, 15) against R's built-in function", x = "x");
|
#data <- as.data.frame(read.csv("data.csv"))
library(stringr)
#-----------------------------------------------
# Importation / Exportation of a country
#-----------------------------------------------
#-------------------
# Initialization
#-------------------
country <- "Canada"
#---------------------------
# Analysis - Exportation
#---------------------------
# Country as origin
matching_vector <- str_detect(data[,"origin"], country)
# list of the categories (among the line that have "Country" as origin)
# -> Products (categories) exporting by the country
country_cat <- data[matching_vector,"category"]
# Handling of this categories
# Regular expression for spliting the categories
regex <- "/(.*)/(.*)/(.*)"
cat <- str_match(country_cat, regex)
# Counting this categories
tab_exp <- table(cat[,3]) #cat[,3] : 2nd category
tab_exp <- sort(tab_exp, decreasing = TRUE) # Sorting (biggest in first)
tab_exp <- tab_exp[1:10] # Taking only the most important
#---------------------------
# Analysis - Importation
#---------------------------
# Country as destination
matching_vector <- str_detect(data[,"destination"], country)
# list of the categories (among the line that have "Country" as destination)
# -> Products (categories) importing by the country
country_cat <- data[matching_vector,"category"]
# Handling of this categories
# Regular expression for spliting the categories
regex <- "/(.*)/(.*)/(.*)"
cat <- str_match(country_cat, regex)
# Counting this categories
tab_imp <- table(cat[,3]) #cat[,3] : 2nd category
tab_imp <- sort(tab_imp, decreasing = TRUE) # Sorting (biggest in first)
tab_imp <- tab_imp[1:10] # Taking only the most important
#-------------------------
# Analysis - Fusion
#-------------------------
# Transformation in data frame
tab_exp <- as.data.frame(tab_exp)
tab_imp <- as.data.frame(tab_imp)
# Merger of the 2 data frame in order to have the same labels
tab <- merge(tab_exp,tab_imp,by.x="Var1",by.y="Var1",all = TRUE)
# Handling of the "NA" value (substitution by 0)
for (j in 2:3) {
for(i in 1:length(tab[,j])){
if(is.na(tab[i,j])) {tab[i,j] <-0}
}
}
#---------------------------
# Pie Chart - Exporation
#---------------------------
# ploting 2 graphics om the same picture
par(mfrow = c(1,2))
# 1- Labels :
# Calculation in percentage
piepercent <- round(100*tab[,2]/sum(tab[,2]), 1)
# round(a,1) : one digit after the comma
lab <- c()
for(i in 1:length(piepercent)) {
if(piepercent[[i]] == 0) {lab[i] <- ""}
else {lab[i] <- paste(piepercent[[i]], "%", sep=" ")}
}
# 2- Colors :
c <- rainbow(length(tab[,1]))
# 3- Plot :
pie(piepercent,labels=lab,main="Exportation",col=c)
#---------------------------
# Pie Chart - Importation
#---------------------------
# 1- Labels :
# Calculation in percentage
piepercent <- round(100*tab[,3]/sum(tab[,3]), 1)
# round(a,1) : one digit after the comma
lab <- c()
for(i in 1:length(piepercent)) {
if(piepercent[[i]] == 0) {lab[i] <- ""}
else {lab[i] <- paste(piepercent[[i]], "%", sep=" ")}
}
# 2- Plot :
pie(piepercent,labels=lab,main="Importation",col=c)
#------------------
# General - Plot
#-----------------
par(oma=c(0,0,1.8,0))
title(country,outer=TRUE)
legend(x=-4.5,y=-1,tab[,1], cex = 0.8, fill=c,ncol=3,border=NA, xpd=NA)
| /Stats/Country_Import+Export.R | no_license | SimonDele/Enlighten-DarkWeb-Markets-with-Data-Mining | R | false | false | 3,564 | r |
#data <- as.data.frame(read.csv("data.csv"))
library(stringr)
#-----------------------------------------------
# Importation / Exportation of a country
#-----------------------------------------------
#-------------------
# Initialization
#-------------------
country <- "Canada"
#---------------------------
# Analysis - Exportation
#---------------------------
# Country as origin
matching_vector <- str_detect(data[,"origin"], country)
# list of the categories (among the line that have "Country" as origin)
# -> Products (categories) exporting by the country
country_cat <- data[matching_vector,"category"]
# Handling of this categories
# Regular expression for spliting the categories
regex <- "/(.*)/(.*)/(.*)"
cat <- str_match(country_cat, regex)
# Counting this categories
tab_exp <- table(cat[,3]) #cat[,3] : 2nd category
tab_exp <- sort(tab_exp, decreasing = TRUE) # Sorting (biggest in first)
tab_exp <- tab_exp[1:10] # Taking only the most important
#---------------------------
# Analysis - Importation
#---------------------------
# Country as destination
matching_vector <- str_detect(data[,"destination"], country)
# list of the categories (among the line that have "Country" as destination)
# -> Products (categories) importing by the country
country_cat <- data[matching_vector,"category"]
# Handling of this categories
# Regular expression for spliting the categories
regex <- "/(.*)/(.*)/(.*)"
cat <- str_match(country_cat, regex)
# Counting this categories
tab_imp <- table(cat[,3]) #cat[,3] : 2nd category
tab_imp <- sort(tab_imp, decreasing = TRUE) # Sorting (biggest in first)
tab_imp <- tab_imp[1:10] # Taking only the most important
#-------------------------
# Analysis - Fusion
#-------------------------
# Transformation in data frame
tab_exp <- as.data.frame(tab_exp)
tab_imp <- as.data.frame(tab_imp)
# Merger of the 2 data frame in order to have the same labels
tab <- merge(tab_exp,tab_imp,by.x="Var1",by.y="Var1",all = TRUE)
# Handling of the "NA" value (substitution by 0)
for (j in 2:3) {
for(i in 1:length(tab[,j])){
if(is.na(tab[i,j])) {tab[i,j] <-0}
}
}
#---------------------------
# Pie Chart - Exporation
#---------------------------
# ploting 2 graphics om the same picture
par(mfrow = c(1,2))
# 1- Labels :
# Calculation in percentage
piepercent <- round(100*tab[,2]/sum(tab[,2]), 1)
# round(a,1) : one digit after the comma
lab <- c()
for(i in 1:length(piepercent)) {
if(piepercent[[i]] == 0) {lab[i] <- ""}
else {lab[i] <- paste(piepercent[[i]], "%", sep=" ")}
}
# 2- Colors :
c <- rainbow(length(tab[,1]))
# 3- Plot :
pie(piepercent,labels=lab,main="Exportation",col=c)
#---------------------------
# Pie Chart - Importation
#---------------------------
# 1- Labels :
# Calculation in percentage
piepercent <- round(100*tab[,3]/sum(tab[,3]), 1)
# round(a,1) : one digit after the comma
lab <- c()
for(i in 1:length(piepercent)) {
if(piepercent[[i]] == 0) {lab[i] <- ""}
else {lab[i] <- paste(piepercent[[i]], "%", sep=" ")}
}
# 2- Plot :
pie(piepercent,labels=lab,main="Importation",col=c)
#------------------
# General - Plot
#-----------------
par(oma=c(0,0,1.8,0))
title(country,outer=TRUE)
legend(x=-4.5,y=-1,tab[,1], cex = 0.8, fill=c,ncol=3,border=NA, xpd=NA)
|
#combine train and test data
y_raw <- rbind(yTest, yTrain)
# note: in activities, col #1 is id, col #2 is name
# replace activity id in Y with activity name
y_raw[, 1] <- activities[y_raw[, 1], 2]
y <- y_raw
names(y) <- "activity"
| /prepare_y.R | no_license | DolphinWorld/coursera_cleardata | R | false | false | 233 | r | #combine train and test data
y_raw <- rbind(yTest, yTrain)
# note: in activities, col #1 is id, col #2 is name
# replace activity id in Y with activity name
y_raw[, 1] <- activities[y_raw[, 1], 2]
y <- y_raw
names(y) <- "activity"
|
#' Visualization routine for Sedona spatial RDD.
#'
#' Generate a visual representation of geometrical object(s) within a Sedona
#' spatial RDD.
#'
#' @param rdd A Sedona spatial RDD.
#' @param resolution_x Resolution on the x-axis.
#' @param resolution_y Resolution on the y-axis.
#' @param output_location Location of the output image. This should be the
#' desired path of the image file excluding extension in its file name.
#' @param output_format File format of the output image. Currently "png",
#' "gif", and "svg" formats are supported (default: "png").
#' @param boundary Only render data within the given rectangular boundary.
#' The `boundary` parameter can be set to either a numeric vector of
#' c(min_x, max_y, min_y, max_y) values, or with a bounding box object
#' e.g., new_bounding_box(sc, min_x, max_y, min_y, max_y), or NULL
#' (the default). If `boundary` is NULL, then the minimum bounding box of the
#' input spatial RDD will be computed and used as boundary for rendering.
#' @param browse Whether to open the rendered image in a browser (default:
#' interactive()).
#' @param color_of_variation Which color channel will vary depending on values
#' of data points. Must be one of "red", "green", or "blue". Default: red.
#' @param base_color Color of any data point with value 0. Must be a numeric
#' vector of length 3 specifying values for red, green, and blue channels.
#' Default: c(0, 0, 0).
#' @param shade Whether data point with larger magnitude will be displayed with
#' darker color. Default: TRUE.
#'
#' @name sedona_visualization_routines
NULL
#' Visualize a Sedona spatial RDD using a heatmap.
#'
#' Generate a heatmap of geometrical object(s) within a Sedona spatial RDD.
#'
#' @inheritParams sedona_visualization_routines
#' @param blur_radius Controls the radius of a Gaussian blur in the resulting
#' heatmap.
#'
#' @family Sedona visualization routines
#' @export
sedona_render_heatmap <- function(
rdd,
resolution_x,
resolution_y,
output_location,
output_format = c("png", "gif", "svg"),
boundary = NULL,
blur_radius = 10L,
browse = interactive()) {
sc <- spark_connection(rdd$.jobj)
output_format <- match.arg(output_format)
boundary <- validate_boundary(rdd, boundary)
viz_op <- invoke_new(
sc,
"org.apache.sedona.viz.extension.visualizationEffect.HeatMap",
as.integer(resolution_x),
as.integer(resolution_y),
boundary$.jobj,
FALSE,
as.integer(blur_radius)
)
rdd %>% gen_raster_image(
viz_op = viz_op,
output_location = output_location,
output_format = output_format
)
if (browse) {
browseURL(paste0(output_location, ".", tolower(output_format)))
}
invisible(NULL)
}
#' Visualize a Sedona spatial RDD using a scatter plot.
#'
#' Generate a scatter plot of geometrical object(s) within a Sedona spatial RDD.
#'
#' @inheritParams sedona_visualization_routines
#' @param reverse_coords Whether to reverse spatial coordinates in the plot
#' (default: FALSE).
#'
#' @family Sedona visualization routines
#' @export
sedona_render_scatter_plot <- function(
rdd,
resolution_x,
resolution_y,
output_location,
output_format = c("png", "gif", "svg"),
boundary = NULL,
color_of_variation = c("red", "green", "blue"),
base_color = c(0, 0, 0),
shade = TRUE,
reverse_coords = FALSE,
browse = interactive()) {
sedona_render_viz_effect(
viz_effect_name = "ScatterPlot",
rdd = rdd,
resolution_x = resolution_x,
resolution_y = resolution_y,
output_location = output_location,
output_format = output_format,
boundary = boundary,
color_of_variation = color_of_variation,
base_color = base_color,
shade = shade,
reverse_coords = reverse_coords,
browse = browse
)
}
#' Visualize a Sedona spatial RDD using a choropleth map.
#'
#' Generate a choropleth map of a pair RDD assigning integral values to
#' polygons.
#'
#' @inheritParams sedona_visualization_routines
#' @param pair_rdd A pair RDD with Sedona Polygon objects being keys and
#' java.lang.Long being values.
#' @param reverse_coords Whether to reverse spatial coordinates in the plot
#' (default: FALSE).
#'
#' @family Sedona visualization routines
#' @export
sedona_render_choropleth_map <- function(
pair_rdd,
resolution_x,
resolution_y,
output_location,
output_format = c("png", "gif", "svg"),
boundary = NULL,
color_of_variation = c("red", "green", "blue"),
base_color = c(0, 0, 0),
shade = TRUE,
reverse_coords = FALSE,
browse = interactive()) {
sedona_render_viz_effect(
viz_effect_name = "ChoroplethMap",
rdd = pair_rdd,
resolution_x = resolution_x,
resolution_y = resolution_y,
output_location = output_location,
output_format = output_format,
boundary = boundary,
color_of_variation = color_of_variation,
base_color = base_color,
shade = shade,
reverse_coords = reverse_coords,
browse = browse
)
}
sedona_render_viz_effect <- function(
viz_effect_name,
rdd,
resolution_x,
resolution_y,
output_location,
output_format = c("png", "gif", "svg"),
boundary = NULL,
color_of_variation = c("red", "green", "blue"),
base_color = c(0, 0, 0),
shade = shade,
reverse_coords = FALSE,
browse = interactive()) {
sc <- spark_connection(rdd$.jobj)
output_format <- match.arg(output_format)
color_of_variation <- match.arg(color_of_variation)
validate_base_color(base_color)
boundary <- validate_boundary(rdd, boundary)
viz_op <- invoke_new(
sc,
paste0("org.apache.sedona.viz.extension.visualizationEffect.", viz_effect_name),
as.integer(resolution_x),
as.integer(resolution_y),
boundary$.jobj,
reverse_coords
)
rdd %>%
gen_raster_image(
viz_op = viz_op,
output_location = output_location,
output_format = output_format,
color_settings = list(
color_of_variation = color_of_variation,
base_color = base_color,
shade = shade
)
)
if (browse) {
browseURL(paste0(output_location, ".", tolower(output_format)))
}
invisible(NULL)
}
validate_base_color <- function(base_color) {
if (!is.numeric(base_color) || length(base_color) != 3) {
stop("Base color (`base_color`) must be a numeric vector of length 3 ",
"specifying values for red, green, and blue channels ",
"(e.g., c(0, 0, 0)).")
}
}
validate_boundary <- function(rdd, boundary) {
sc <- spark_connection(rdd$.jobj)
if (is.null(boundary)) {
minimum_bounding_box(rdd)
} else if (inherits(boundary, "bounding_box")) {
boundary
} else if (is.numeric(boundary)) {
if (length(boundary) != 4) {
stop("Boundary specification with numeric vector must consist of ",
"exactly 4 values: c(min_x, max_x, min_y, max_y).")
}
do.call(new_bounding_box, append(list(sc), as.list(boundary)))
} else {
stop("Boundary specification must be either NULL, a numeric vector of ",
"c(min_x, max_x, min_y, max_y) values, or a bounding box object")
}
}
gen_raster_image <- function(
rdd,
viz_op,
output_location,
output_format,
color_settings = NULL) {
sc <- spark_connection(rdd$.jobj)
image_generator <- invoke_new(
sc,
"org.apache.sedona.viz.core.ImageGenerator"
)
if (!is.null(color_settings)) {
customize_color_params <- list(viz_op, "CustomizeColor") %>%
append(as.list(as.integer(unlist(color_settings$base_color)))) %>%
append(
list(
255L, # gamma
sc$state$enums$awt_color[[color_settings$color_of_variation]],
color_settings$shade
)
)
do.call(invoke, customize_color_params)
}
invoke(viz_op, "Visualize", java_context(sc), rdd$.jobj)
invoke(
image_generator,
"SaveRasterImageAsLocalFile",
invoke(viz_op, "rasterImage"),
output_location,
sc$state$enums$image_types[[output_format]]
)
}
| /R/viz.R | permissive | lorenzwalthert/sparklyr.sedona | R | false | false | 9,508 | r | #' Visualization routine for Sedona spatial RDD.
#'
#' Generate a visual representation of geometrical object(s) within a Sedona
#' spatial RDD.
#'
#' @param rdd A Sedona spatial RDD.
#' @param resolution_x Resolution on the x-axis.
#' @param resolution_y Resolution on the y-axis.
#' @param output_location Location of the output image. This should be the
#' desired path of the image file excluding extension in its file name.
#' @param output_format File format of the output image. Currently "png",
#' "gif", and "svg" formats are supported (default: "png").
#' @param boundary Only render data within the given rectangular boundary.
#' The `boundary` parameter can be set to either a numeric vector of
#' c(min_x, max_y, min_y, max_y) values, or with a bounding box object
#' e.g., new_bounding_box(sc, min_x, max_y, min_y, max_y), or NULL
#' (the default). If `boundary` is NULL, then the minimum bounding box of the
#' input spatial RDD will be computed and used as boundary for rendering.
#' @param browse Whether to open the rendered image in a browser (default:
#' interactive()).
#' @param color_of_variation Which color channel will vary depending on values
#' of data points. Must be one of "red", "green", or "blue". Default: red.
#' @param base_color Color of any data point with value 0. Must be a numeric
#' vector of length 3 specifying values for red, green, and blue channels.
#' Default: c(0, 0, 0).
#' @param shade Whether data point with larger magnitude will be displayed with
#' darker color. Default: TRUE.
#'
#' @name sedona_visualization_routines
NULL
#' Visualize a Sedona spatial RDD using a heatmap.
#'
#' Generate a heatmap of geometrical object(s) within a Sedona spatial RDD.
#'
#' @inheritParams sedona_visualization_routines
#' @param blur_radius Controls the radius of a Gaussian blur in the resulting
#' heatmap.
#'
#' @family Sedona visualization routines
#' @export
sedona_render_heatmap <- function(
rdd,
resolution_x,
resolution_y,
output_location,
output_format = c("png", "gif", "svg"),
boundary = NULL,
blur_radius = 10L,
browse = interactive()) {
sc <- spark_connection(rdd$.jobj)
output_format <- match.arg(output_format)
boundary <- validate_boundary(rdd, boundary)
viz_op <- invoke_new(
sc,
"org.apache.sedona.viz.extension.visualizationEffect.HeatMap",
as.integer(resolution_x),
as.integer(resolution_y),
boundary$.jobj,
FALSE,
as.integer(blur_radius)
)
rdd %>% gen_raster_image(
viz_op = viz_op,
output_location = output_location,
output_format = output_format
)
if (browse) {
browseURL(paste0(output_location, ".", tolower(output_format)))
}
invisible(NULL)
}
#' Visualize a Sedona spatial RDD using a scatter plot.
#'
#' Generate a scatter plot of geometrical object(s) within a Sedona spatial RDD.
#'
#' @inheritParams sedona_visualization_routines
#' @param reverse_coords Whether to reverse spatial coordinates in the plot
#' (default: FALSE).
#'
#' @family Sedona visualization routines
#' @export
sedona_render_scatter_plot <- function(
rdd,
resolution_x,
resolution_y,
output_location,
output_format = c("png", "gif", "svg"),
boundary = NULL,
color_of_variation = c("red", "green", "blue"),
base_color = c(0, 0, 0),
shade = TRUE,
reverse_coords = FALSE,
browse = interactive()) {
sedona_render_viz_effect(
viz_effect_name = "ScatterPlot",
rdd = rdd,
resolution_x = resolution_x,
resolution_y = resolution_y,
output_location = output_location,
output_format = output_format,
boundary = boundary,
color_of_variation = color_of_variation,
base_color = base_color,
shade = shade,
reverse_coords = reverse_coords,
browse = browse
)
}
#' Visualize a Sedona spatial RDD using a choropleth map.
#'
#' Generate a choropleth map of a pair RDD assigning integral values to
#' polygons.
#'
#' @inheritParams sedona_visualization_routines
#' @param pair_rdd A pair RDD with Sedona Polygon objects being keys and
#' java.lang.Long being values.
#' @param reverse_coords Whether to reverse spatial coordinates in the plot
#' (default: FALSE).
#'
#' @family Sedona visualization routines
#' @export
sedona_render_choropleth_map <- function(
pair_rdd,
resolution_x,
resolution_y,
output_location,
output_format = c("png", "gif", "svg"),
boundary = NULL,
color_of_variation = c("red", "green", "blue"),
base_color = c(0, 0, 0),
shade = TRUE,
reverse_coords = FALSE,
browse = interactive()) {
sedona_render_viz_effect(
viz_effect_name = "ChoroplethMap",
rdd = pair_rdd,
resolution_x = resolution_x,
resolution_y = resolution_y,
output_location = output_location,
output_format = output_format,
boundary = boundary,
color_of_variation = color_of_variation,
base_color = base_color,
shade = shade,
reverse_coords = reverse_coords,
browse = browse
)
}
sedona_render_viz_effect <- function(
viz_effect_name,
rdd,
resolution_x,
resolution_y,
output_location,
output_format = c("png", "gif", "svg"),
boundary = NULL,
color_of_variation = c("red", "green", "blue"),
base_color = c(0, 0, 0),
shade = shade,
reverse_coords = FALSE,
browse = interactive()) {
sc <- spark_connection(rdd$.jobj)
output_format <- match.arg(output_format)
color_of_variation <- match.arg(color_of_variation)
validate_base_color(base_color)
boundary <- validate_boundary(rdd, boundary)
viz_op <- invoke_new(
sc,
paste0("org.apache.sedona.viz.extension.visualizationEffect.", viz_effect_name),
as.integer(resolution_x),
as.integer(resolution_y),
boundary$.jobj,
reverse_coords
)
rdd %>%
gen_raster_image(
viz_op = viz_op,
output_location = output_location,
output_format = output_format,
color_settings = list(
color_of_variation = color_of_variation,
base_color = base_color,
shade = shade
)
)
if (browse) {
browseURL(paste0(output_location, ".", tolower(output_format)))
}
invisible(NULL)
}
validate_base_color <- function(base_color) {
if (!is.numeric(base_color) || length(base_color) != 3) {
stop("Base color (`base_color`) must be a numeric vector of length 3 ",
"specifying values for red, green, and blue channels ",
"(e.g., c(0, 0, 0)).")
}
}
validate_boundary <- function(rdd, boundary) {
sc <- spark_connection(rdd$.jobj)
if (is.null(boundary)) {
minimum_bounding_box(rdd)
} else if (inherits(boundary, "bounding_box")) {
boundary
} else if (is.numeric(boundary)) {
if (length(boundary) != 4) {
stop("Boundary specification with numeric vector must consist of ",
"exactly 4 values: c(min_x, max_x, min_y, max_y).")
}
do.call(new_bounding_box, append(list(sc), as.list(boundary)))
} else {
stop("Boundary specification must be either NULL, a numeric vector of ",
"c(min_x, max_x, min_y, max_y) values, or a bounding box object")
}
}
gen_raster_image <- function(
rdd,
viz_op,
output_location,
output_format,
color_settings = NULL) {
sc <- spark_connection(rdd$.jobj)
image_generator <- invoke_new(
sc,
"org.apache.sedona.viz.core.ImageGenerator"
)
if (!is.null(color_settings)) {
customize_color_params <- list(viz_op, "CustomizeColor") %>%
append(as.list(as.integer(unlist(color_settings$base_color)))) %>%
append(
list(
255L, # gamma
sc$state$enums$awt_color[[color_settings$color_of_variation]],
color_settings$shade
)
)
do.call(invoke, customize_color_params)
}
invoke(viz_op, "Visualize", java_context(sc), rdd$.jobj)
invoke(
image_generator,
"SaveRasterImageAsLocalFile",
invoke(viz_op, "rasterImage"),
output_location,
sc$state$enums$image_types[[output_format]]
)
}
|
## Q1
#Consider the mtcars data set. Fit a model with mpg as the outcome that includes number of cylinders as a factor variable and weight as confounder. Give the adjusted estimate for the expected change in mpg comparing 8 cylinders to 4.
#* -6.071 <-
#* 33.991
#* -4.256
#* -3.206
data(mtcars)
df <- mtcars
df$cyl <- as.factor(df$cyl)
fit <- lm(mpg ~ cyl + wt, data = df)
summary(fit)
# expected change in mpg if increase of 4 for cyl
fit$coefficients[3]
-6.071
## Q2
# Consider the mtcars data set. Fit a model with mpg as the outcome that includes number of cylinders as a factor variable and weight as confounder. Compare the adjusted by weight effect of 8 cylinders as compared to 4 the unadjusted. What can be said about the effect?.
# * Including or excluding weight does not appear to change anything regarding the estimated impact of number of cylinders on mpg.
# * Within a given weight, 8 cylinder vehicles have an expected 12 mpg drop in fuel efficiency.
# * Holding weight constant, cylinder appears to have more of an impact on mpg than if weight is disregarded.
# * Holding weight constant, cylinder appears to have less of an impact on mpg than if weight is disregarded. <--
fit <- lm(mpg ~ cyl + wt, data = df)
fitUW <- lm(mpg ~ cyl, data = df)
summary(fit)
summary(fitUW)
# Holding weight constant, cylinder appears to have less of an impact on mpg than if weight is disregarded.
## Q3
# Consider the mtcars data set. Fit a model with mpg as the outcome that considers number of cylinders as a factor variable and weight as confounder. Now fit a second model with mpg as the outcome model that considers the interaction between number of cylinders (as a factor variable) and weight. Give the P-value for the likelihood ratio test comparing the two models and suggest a model using 0.05 as a type I error rate significance benchmark.
# * The P-value is small (less than 0.05). Thus it is surely true that there is no interaction term in the true model.
# * The P-value is small (less than 0.05). Thus it is surely true that there is an interaction term in the true model.
# * The P-value is larger than 0.05. So, according to our criterion, we would fail to reject, which suggests that the interaction terms may not be necessary. <--
# * The P-value is small (less than 0.05). So, according to our criterion, we reject, which suggests that the interaction term is necessary
# * The P-value is small (less than 0.05). So, according to our criterion, we reject, which suggests that the interaction term is not necessary.
# * The P-value is larger than 0.05. So, according to our criterion, we would fail to reject, which suggests that the interaction terms is necessary.
library("lmtest")
fit <- lm(mpg ~ cyl + wt, data = df)
fitU <- lm(mpg ~ cyl*wt, data = df)
lrtest(fit, fitU)
# The likelihood-ratio test is appropriate only if the two models you are comparing are nested, i. e., if one can be retrieved from the other, e. g., by fixing parameters (e. g., to zero). Models with more parameters will always fit better, the question the LR test answers is whether the increase in fit is defensible given the amount of added parameters. To compare non-nested models, you may use information criteria such as AIC or BIC (the smaller the better).
## Q4
# Consider the mtcars data set. Fit a model with mpg as the outcome that includes number of cylinders as a factor variable and weight inlcuded in the model as
#
# lm(mpg ~ I(wt * 0.5) + factor(cyl), data = mtcars)
# How is the wt coefficient interpretted?
#
# * The estimated expected change in MPG per half ton increase in weight.
# * The estimated expected change in MPG per half ton increase in weight for the average number of cylinders.
# * The estimated expected change in MPG per one ton increase in weight.
# * The estimated expected change in MPG per half ton increase in weight for for a specific number of cylinders (4, 6, 8).
# * The estimated expected change in MPG per one ton increase in weight for a specific number of cylinders (4, 6, 8). <--
fit <- lm(mpg ~ I(wt * 0.5) + factor(cyl), data = mtcars)
summary(fit)
# The estimated expected change in MPG per one ton increase in weight for a specific number of cylinders (4, 6, 8).
## Q5
# Consider the following data set
# x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
# y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
# Give the hat diagonal for the most influential point
# * 0.2025
# * 0.2804
# * 0.9946
# * 0.2287
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
fit <- lm(y ~ x)
lm.influence(fit)
max(lm.influence(fit)$hat)
0.9946
## Q6
# Consider the following data set
# x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
# y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
#
# Give the slope dfbeta for the point with the highest hat value.
# * 0.673
# * -.00134
# * -134
# * -0.378
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
fit <- lm(y ~ x)
(imf <- influence.measures(fit))
df <- as.data.frame(imf$infmat)
df[which(df$hat == max(df$hat)),'dfb.x']
-133.8226
## Q7
# Consider a regression relationship between Y and X with and without adjustment for a third variable Z. Which of the following is true about comparing the regression coefficient between Y and X with and without adjustment for Z.
# It is possible for the coefficient to reverse sign after adjustment. For example, it can be strongly significant and positive before adjustment and strongly significant and negative after adjustment.
| /Week3/regression models quiz 3.R | no_license | jmacarter/Regression-Models | R | false | false | 5,542 | r | ## Q1
#Consider the mtcars data set. Fit a model with mpg as the outcome that includes number of cylinders as a factor variable and weight as confounder. Give the adjusted estimate for the expected change in mpg comparing 8 cylinders to 4.
#* -6.071 <-
#* 33.991
#* -4.256
#* -3.206
data(mtcars)
df <- mtcars
df$cyl <- as.factor(df$cyl)
fit <- lm(mpg ~ cyl + wt, data = df)
summary(fit)
# expected change in mpg if increase of 4 for cyl
fit$coefficients[3]
-6.071
## Q2
# Consider the mtcars data set. Fit a model with mpg as the outcome that includes number of cylinders as a factor variable and weight as confounder. Compare the adjusted by weight effect of 8 cylinders as compared to 4 the unadjusted. What can be said about the effect?.
# * Including or excluding weight does not appear to change anything regarding the estimated impact of number of cylinders on mpg.
# * Within a given weight, 8 cylinder vehicles have an expected 12 mpg drop in fuel efficiency.
# * Holding weight constant, cylinder appears to have more of an impact on mpg than if weight is disregarded.
# * Holding weight constant, cylinder appears to have less of an impact on mpg than if weight is disregarded. <--
fit <- lm(mpg ~ cyl + wt, data = df)
fitUW <- lm(mpg ~ cyl, data = df)
summary(fit)
summary(fitUW)
# Holding weight constant, cylinder appears to have less of an impact on mpg than if weight is disregarded.
## Q3
# Consider the mtcars data set. Fit a model with mpg as the outcome that considers number of cylinders as a factor variable and weight as confounder. Now fit a second model with mpg as the outcome model that considers the interaction between number of cylinders (as a factor variable) and weight. Give the P-value for the likelihood ratio test comparing the two models and suggest a model using 0.05 as a type I error rate significance benchmark.
# * The P-value is small (less than 0.05). Thus it is surely true that there is no interaction term in the true model.
# * The P-value is small (less than 0.05). Thus it is surely true that there is an interaction term in the true model.
# * The P-value is larger than 0.05. So, according to our criterion, we would fail to reject, which suggests that the interaction terms may not be necessary. <--
# * The P-value is small (less than 0.05). So, according to our criterion, we reject, which suggests that the interaction term is necessary
# * The P-value is small (less than 0.05). So, according to our criterion, we reject, which suggests that the interaction term is not necessary.
# * The P-value is larger than 0.05. So, according to our criterion, we would fail to reject, which suggests that the interaction terms is necessary.
library("lmtest")
fit <- lm(mpg ~ cyl + wt, data = df)
fitU <- lm(mpg ~ cyl*wt, data = df)
lrtest(fit, fitU)
# The likelihood-ratio test is appropriate only if the two models you are comparing are nested, i. e., if one can be retrieved from the other, e. g., by fixing parameters (e. g., to zero). Models with more parameters will always fit better, the question the LR test answers is whether the increase in fit is defensible given the amount of added parameters. To compare non-nested models, you may use information criteria such as AIC or BIC (the smaller the better).
## Q4
# Consider the mtcars data set. Fit a model with mpg as the outcome that includes number of cylinders as a factor variable and weight inlcuded in the model as
#
# lm(mpg ~ I(wt * 0.5) + factor(cyl), data = mtcars)
# How is the wt coefficient interpretted?
#
# * The estimated expected change in MPG per half ton increase in weight.
# * The estimated expected change in MPG per half ton increase in weight for the average number of cylinders.
# * The estimated expected change in MPG per one ton increase in weight.
# * The estimated expected change in MPG per half ton increase in weight for for a specific number of cylinders (4, 6, 8).
# * The estimated expected change in MPG per one ton increase in weight for a specific number of cylinders (4, 6, 8). <--
fit <- lm(mpg ~ I(wt * 0.5) + factor(cyl), data = mtcars)
summary(fit)
# The estimated expected change in MPG per one ton increase in weight for a specific number of cylinders (4, 6, 8).
## Q5
# Consider the following data set
# x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
# y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
# Give the hat diagonal for the most influential point
# * 0.2025
# * 0.2804
# * 0.9946
# * 0.2287
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
fit <- lm(y ~ x)
lm.influence(fit)
max(lm.influence(fit)$hat)
0.9946
## Q6
# Consider the following data set
# x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
# y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
#
# Give the slope dfbeta for the point with the highest hat value.
# * 0.673
# * -.00134
# * -134
# * -0.378
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
fit <- lm(y ~ x)
(imf <- influence.measures(fit))
df <- as.data.frame(imf$infmat)
df[which(df$hat == max(df$hat)),'dfb.x']
-133.8226
## Q7
# Consider a regression relationship between Y and X with and without adjustment for a third variable Z. Which of the following is true about comparing the regression coefficient between Y and X with and without adjustment for Z.
# It is possible for the coefficient to reverse sign after adjustment. For example, it can be strongly significant and positive before adjustment and strongly significant and negative after adjustment.
|
#This R file includes a function for Bayesian estimation of the MESS error model (with splines).
#It also contains code for an application to house price data.
require(R.matlab)
require(matrixcalc)
require(expm)
require(MHadaptive)
require(graphics)
require(coda)
require(MCMCpack)
require(spdep)
require(CARBayes)
require(splines)
require(Rcpp)
require(RcppArmadillo)
Sys.setenv("PKG_CXXFLAGS"="-std=c++11")
sourceCpp("MESSGibbsExtMod.cpp")
ExtendModMargRho<-function(X,y,D,iter)
#This function samples from the marginal posterior distribution of the spatial
#parameter rho in the MESS error model (with or without splines).
#If splines are used, then they are included in X.
#input: X...predictors, y...outcome, D...weights matrix,
#iter=number of samples to be drawn
#output: samples from the marginal posterior distribution of rho
{
kappa0=0.001
theta0=0.001
kdim<-dim(X)[2]
Cinv=10^(-4)*diag(kdim)
nn=length(y)
kappa<-kappa0+nn/2
logPdf<-function(rho)
{
expon<-expm(rho*D)
M<-expon%*%X
H<-diag(nn)-M%*%chol2inv(chol(t(M)%*%M))%*%t(M)
bhat<-chol2inv(chol(t(M)%*%M))%*%t(M)%*%expon%*%y
A=t(M)%*%M+Cinv
Ainv=chol2inv(chol(A))
btilde<-Ainv%*%(t(M)%*%M%*%bhat)
Q1<-t(btilde)%*%Cinv%*%btilde
M<-expon%*%X
Q2<-t(bhat-btilde)%*%(t(M)%*%M)%*%(bhat-btilde)
factor<-H%*%expon%*%y
PP<-t(factor)%*%factor
Htilde=
output<-(-0.5*(nn+2*kappa0))*log(2*theta0+PP+Q1+Q2)+log(det(Ainv))
return(output)
}
E<-function(rho)
{
expon<-expm(rho*D)
M<-expon%*%X
H<-diag(nn)-M%*%chol2inv(chol(t(M)%*%M))%*%t(M)
factor<-H%*%expon%*%y
PP<-t(factor)%*%factor
return(PP)
}
x<-optimize(E, interval=c(-5, 1), maximum=FALSE)
samples<-Metro_Hastings(li_func=logPdf, pars=x$minimum, prop_sigma = NULL,
par_names = NULL, iterations = 5000+10*iter, burn_in = 5000,
adapt_par = c(100, 20, 0.5, 0.75), quiet = FALSE)
samples<-mcmc_thin(samples, thin = 10)
samples<-mcmc(samples$trace)
densplot(samples)
autocorr.plot(samples)
return(samples)}
##############
ExtendModGibbs<-function(X,y,D,iter,iter1,samples)
{
#This function samples from the marginal posterior distribution of the spatial parameter rho.
#input: X=independent variables, y=outcome, D=weights matrix, iter=number of samples of rho used in the Gibbs sampler
#iter1=burn-in of Gibbs sampler, samples=samples from the marginal posterior of rho
#output: samples from the joint posterior of beta and sigma^2, DIC
kappa0=0.001
theta0=0.001
kdim<-dim(X)[2]
samples<-samples[1:iter]
Cinv=10^(-4)*diag(kdim)
nn=length(y)
kappa<-kappa0+nn/2
MESSExt<-c()
MESSExt$beta<-array(data=NA,dim=c(kdim,iter))
MESSExt$sigma2<-matrix(data=NA,nrow=1,ncol=iter)
for(i in seq(1,iter,by=1))
{
print(i)
expo<-expm(samples[i]*D)
expoX<-expo%*%X
expoY<-expo%*%y
covas<-chol2inv(chol(t(expoX)%*%expoX+Cinv))
betaMeanTemp=covas%*%t(expoX)%*%expoY
AA<-gibbsMessExtMod(iter1+1,betaMeanTemp,covas,expo,kappa,X,y,kdim,theta0)
MESSExt$beta[, i]<-AA[(iter1+1),1:kdim]
MESSExt$sigma2[i]<-AA[(iter1+1),kdim+1]}
MESSExt$rho<-samples[1:(iter-1)]
#### DIC
MESSLikeE<-function(pars)
{
lp=length(pars)
beta<-matrix(data=pars[2:(lp-1)],nrow=lp-2,ncol=1)
A<-expAtv(D,y-X%*%beta,pars[1])$eAtv
output<-(-2)*(-nn/2*log(2*pi)-nn/2*log(pars[lp])-1/(2*pars[lp])*t(A)%*%A)
return(output)
}
vec1<-matrix(data=MESSExt$rho,nrow=1,ncol=iter-1)
helpMat<-rbind(vec1,MESSExt$beta[,1:(iter-1)],matrix(data=MESSExt$sigma2[(1:iter-1)],nrow=1,ncol=iter-1))
helpMatrix<-apply(helpMat,2,MESSLikeE)
DIC.av<-mean(helpMatrix)
rho.mean<-mean(vec1)
beta.mean<-rowMeans(MESSExt$beta[,1:(iter-1)])
dim(beta.mean)<-c(1,kdim)
sigma2.mean<-mean(MESSExt$sigma2[(1:iter-1)])
MESSExt$pD<-DIC.av-MESSLikeE(cbind(rho.mean,beta.mean,sigma2.mean))
MESSExt$DIC<-MESSExt$pD+DIC.av
MESSExt$pV<-0.5*var(helpMatrix)
MESSExt$DICV<-MESSExt$pV+DIC.av
return(MESSExt)
}
#####House price example including splines
load("spatialhousedata.rda")
log.house.price<-log(spatialhousedata@data$price)
house.rooms<-spatialhousedata@data$rooms
house.crime.log<-log(spatialhousedata@data$crime)
house.sales<-spatialhousedata@data$sales
house.driveshop.log<-log(spatialhousedata@data$driveshop)
house.type<-spatialhousedata@data$type
house.type.semi<-as.numeric(house.type=="semi")
house.type.flat<-as.numeric(house.type=="flat")
house.type.terrace<-as.numeric(house.type=="terrace")
intercept<-matrix(data=1,nrow=length(log.house.price), ncol=1)
house.X<-c(intercept,house.rooms,house.crime.log,house.sales,house.driveshop.log,house.type.semi,house.type.flat,house.type.terrace)
dim(house.X)<-c(270,8)
dim(log.house.price)<-c(270,1)
M.nb <- poly2nb(spatialhousedata, row.names = rownames(spatialhousedata@data))
M.list <- nb2listw(M.nb, style = "B")
M.mat <- nb2mat(M.nb, style = "W")
#exporting the housing data to Matlab
writeMat("houseX.mat",A=house.X)
writeMat("houseMatrix.mat",W=M.mat)
writeMat("log.house.price.mat",B=log.house.price)
#saving as R data files
save(house.X,file="house.X")
save(log.house.price,file="log.house.price")
save(M.mat,file="houseMatrix")
cc<-coordinates(spatialhousedata)
spline1<-ns(cc[,1],df=3)
spline2<-ns(cc[,2],df=3)
X.extra<-matrix(data=NA,nrow=270,ncol=15)
X.extra[,1:3]<-spline1
X.extra[,4:6]<-spline2
k=7
for (i in seq(1,3,1))
{
for(j in seq(1,3,1))
{
X.extra[,k]=spline1[,i]*spline2[,j]
k=k+1
}
}
house.XX<-cbind(house.X,X.extra)
SplinesRho1<-ExtendModMargRho(house.XX,log.house.price,M.mat,5000)
SplinesVersion1<-ExtendModGibbs(house.XX,log.house.price,M.mat,5000,4000,SplinesRho1)
### only with individual splines, excluding the tensor product
house.X2<-house.XX[,1:14]
spline1<-ns(cc[,1],df=5)
spline2<-ns(cc[,2],df=5)
X.extra<-matrix(data=NA,nrow=270,ncol=10)
X.extra[,1:5]<-spline1
X.extra[,6:10]<-spline2
house.X3=cbind(house.X,X.extra)
SplinesRho3<-ExtendModMargRho(house.X3,log.house.price,M.mat,5000)
SplinesVersion3<-ExtendModGibbs(house.X3,log.house.price,M.mat,5000,4000,SplinesRho3)
| /ExtendMod.R | no_license | MagdaStrauss/MESS-model | R | false | false | 6,123 | r | #This R file includes a function for Bayesian estimation of the MESS error model (with splines).
#It also contains code for an application to house price data.
require(R.matlab)
require(matrixcalc)
require(expm)
require(MHadaptive)
require(graphics)
require(coda)
require(MCMCpack)
require(spdep)
require(CARBayes)
require(splines)
require(Rcpp)
require(RcppArmadillo)
Sys.setenv("PKG_CXXFLAGS"="-std=c++11")
sourceCpp("MESSGibbsExtMod.cpp")
ExtendModMargRho<-function(X,y,D,iter)
#This function samples from the marginal posterior distribution of the spatial
#parameter rho in the MESS error model (with or without splines).
#If splines are used, then they are included in X.
#input: X...predictors, y...outcome, D...weights matrix,
#iter=number of samples to be drawn
#output: samples from the marginal posterior distribution of rho
{
kappa0=0.001
theta0=0.001
kdim<-dim(X)[2]
Cinv=10^(-4)*diag(kdim)
nn=length(y)
kappa<-kappa0+nn/2
logPdf<-function(rho)
{
expon<-expm(rho*D)
M<-expon%*%X
H<-diag(nn)-M%*%chol2inv(chol(t(M)%*%M))%*%t(M)
bhat<-chol2inv(chol(t(M)%*%M))%*%t(M)%*%expon%*%y
A=t(M)%*%M+Cinv
Ainv=chol2inv(chol(A))
btilde<-Ainv%*%(t(M)%*%M%*%bhat)
Q1<-t(btilde)%*%Cinv%*%btilde
M<-expon%*%X
Q2<-t(bhat-btilde)%*%(t(M)%*%M)%*%(bhat-btilde)
factor<-H%*%expon%*%y
PP<-t(factor)%*%factor
Htilde=
output<-(-0.5*(nn+2*kappa0))*log(2*theta0+PP+Q1+Q2)+log(det(Ainv))
return(output)
}
E<-function(rho)
{
expon<-expm(rho*D)
M<-expon%*%X
H<-diag(nn)-M%*%chol2inv(chol(t(M)%*%M))%*%t(M)
factor<-H%*%expon%*%y
PP<-t(factor)%*%factor
return(PP)
}
x<-optimize(E, interval=c(-5, 1), maximum=FALSE)
samples<-Metro_Hastings(li_func=logPdf, pars=x$minimum, prop_sigma = NULL,
par_names = NULL, iterations = 5000+10*iter, burn_in = 5000,
adapt_par = c(100, 20, 0.5, 0.75), quiet = FALSE)
samples<-mcmc_thin(samples, thin = 10)
samples<-mcmc(samples$trace)
densplot(samples)
autocorr.plot(samples)
return(samples)}
##############
ExtendModGibbs<-function(X,y,D,iter,iter1,samples)
{
#This function samples from the marginal posterior distribution of the spatial parameter rho.
#input: X=independent variables, y=outcome, D=weights matrix, iter=number of samples of rho used in the Gibbs sampler
#iter1=burn-in of Gibbs sampler, samples=samples from the marginal posterior of rho
#output: samples from the joint posterior of beta and sigma^2, DIC
kappa0=0.001
theta0=0.001
kdim<-dim(X)[2]
samples<-samples[1:iter]
Cinv=10^(-4)*diag(kdim)
nn=length(y)
kappa<-kappa0+nn/2
MESSExt<-c()
MESSExt$beta<-array(data=NA,dim=c(kdim,iter))
MESSExt$sigma2<-matrix(data=NA,nrow=1,ncol=iter)
for(i in seq(1,iter,by=1))
{
print(i)
expo<-expm(samples[i]*D)
expoX<-expo%*%X
expoY<-expo%*%y
covas<-chol2inv(chol(t(expoX)%*%expoX+Cinv))
betaMeanTemp=covas%*%t(expoX)%*%expoY
AA<-gibbsMessExtMod(iter1+1,betaMeanTemp,covas,expo,kappa,X,y,kdim,theta0)
MESSExt$beta[, i]<-AA[(iter1+1),1:kdim]
MESSExt$sigma2[i]<-AA[(iter1+1),kdim+1]}
MESSExt$rho<-samples[1:(iter-1)]
#### DIC
MESSLikeE<-function(pars)
{
lp=length(pars)
beta<-matrix(data=pars[2:(lp-1)],nrow=lp-2,ncol=1)
A<-expAtv(D,y-X%*%beta,pars[1])$eAtv
output<-(-2)*(-nn/2*log(2*pi)-nn/2*log(pars[lp])-1/(2*pars[lp])*t(A)%*%A)
return(output)
}
vec1<-matrix(data=MESSExt$rho,nrow=1,ncol=iter-1)
helpMat<-rbind(vec1,MESSExt$beta[,1:(iter-1)],matrix(data=MESSExt$sigma2[(1:iter-1)],nrow=1,ncol=iter-1))
helpMatrix<-apply(helpMat,2,MESSLikeE)
DIC.av<-mean(helpMatrix)
rho.mean<-mean(vec1)
beta.mean<-rowMeans(MESSExt$beta[,1:(iter-1)])
dim(beta.mean)<-c(1,kdim)
sigma2.mean<-mean(MESSExt$sigma2[(1:iter-1)])
MESSExt$pD<-DIC.av-MESSLikeE(cbind(rho.mean,beta.mean,sigma2.mean))
MESSExt$DIC<-MESSExt$pD+DIC.av
MESSExt$pV<-0.5*var(helpMatrix)
MESSExt$DICV<-MESSExt$pV+DIC.av
return(MESSExt)
}
#####House price example including splines
load("spatialhousedata.rda")
log.house.price<-log(spatialhousedata@data$price)
house.rooms<-spatialhousedata@data$rooms
house.crime.log<-log(spatialhousedata@data$crime)
house.sales<-spatialhousedata@data$sales
house.driveshop.log<-log(spatialhousedata@data$driveshop)
house.type<-spatialhousedata@data$type
house.type.semi<-as.numeric(house.type=="semi")
house.type.flat<-as.numeric(house.type=="flat")
house.type.terrace<-as.numeric(house.type=="terrace")
intercept<-matrix(data=1,nrow=length(log.house.price), ncol=1)
house.X<-c(intercept,house.rooms,house.crime.log,house.sales,house.driveshop.log,house.type.semi,house.type.flat,house.type.terrace)
dim(house.X)<-c(270,8)
dim(log.house.price)<-c(270,1)
M.nb <- poly2nb(spatialhousedata, row.names = rownames(spatialhousedata@data))
M.list <- nb2listw(M.nb, style = "B")
M.mat <- nb2mat(M.nb, style = "W")
#exporting the housing data to Matlab
writeMat("houseX.mat",A=house.X)
writeMat("houseMatrix.mat",W=M.mat)
writeMat("log.house.price.mat",B=log.house.price)
#saving as R data files
save(house.X,file="house.X")
save(log.house.price,file="log.house.price")
save(M.mat,file="houseMatrix")
cc<-coordinates(spatialhousedata)
spline1<-ns(cc[,1],df=3)
spline2<-ns(cc[,2],df=3)
X.extra<-matrix(data=NA,nrow=270,ncol=15)
X.extra[,1:3]<-spline1
X.extra[,4:6]<-spline2
k=7
for (i in seq(1,3,1))
{
for(j in seq(1,3,1))
{
X.extra[,k]=spline1[,i]*spline2[,j]
k=k+1
}
}
house.XX<-cbind(house.X,X.extra)
SplinesRho1<-ExtendModMargRho(house.XX,log.house.price,M.mat,5000)
SplinesVersion1<-ExtendModGibbs(house.XX,log.house.price,M.mat,5000,4000,SplinesRho1)
### only with individual splines, excluding the tensor product
house.X2<-house.XX[,1:14]
spline1<-ns(cc[,1],df=5)
spline2<-ns(cc[,2],df=5)
X.extra<-matrix(data=NA,nrow=270,ncol=10)
X.extra[,1:5]<-spline1
X.extra[,6:10]<-spline2
house.X3=cbind(house.X,X.extra)
SplinesRho3<-ExtendModMargRho(house.X3,log.house.price,M.mat,5000)
SplinesVersion3<-ExtendModGibbs(house.X3,log.house.price,M.mat,5000,4000,SplinesRho3)
|
#' Find the ancestors of specified nodes
#'
#' \code{findAncestor} finds the ancestor in the nth generation above
#' specified nodes.
#'
#' @param tree A phylo object
#' @param node A vector of node numbers or node labels
#' @param level A vector of numbers to define nth generation before the
#' specified nodes
#' @param use.alias A logical value, TRUE or FALSE. The default is FALSE, and
#' the node label would be used to name the output; otherwise, the alias of
#' node label would be used to name the output. The alias of node label is
#' created by adding a prefix \code{"alias_"} to the node number.
#' @export
#' @return A vector of nodes. The numeric value is the node number, and the
#' vector name is the corresponding node label. If a node has no label, it
#' would have NA as name when \code{use.alias = FALSE}, and have the alias of
#' node label as name when \code{use.alias = TRUE}.
#' @author Ruizhu Huang
#'
#' @examples
#' library(ggtree)
#' data(tinyTree)
#' ggtree(tinyTree, branch.length = 'none') +
#' geom_text2(aes(label = label), color = "darkorange",
#' hjust = -0.1, vjust = -0.7) +
#' geom_text2(aes(label = node), color = "darkblue",
#' hjust = -0.5, vjust = 0.7)
#'
#' findAncestor(tree = tinyTree, node = c(18, 13), level = 1)
findAncestor <- function(tree, node, level,
use.alias = FALSE) {
if (!inherits(tree, "phylo")) {
stop("tree: should be a phylo object")
}
if (!length(node)) {
stop("Please provide at least one node on the tree.")
}
# convert a tree to a matrix
# each row is a path connecting the root and a leaf
treeMat <- matTree(tree)
if (is.character(node)) {
node <- convertNode(tree = tree, node = node,
use.alias = TRUE,
message = FALSE)
}
if (length(level) == 1) {
level <- rep(level, length(node))
} else {
if (length(level) == length(node)) {
level <- level
} else {
stop("the length of level is not equal to the length of node")
}
}
selNod <- lapply(seq_along(node), FUN = function(x) {
# the node
nod.x <- node[x]
# where is the node
ind <- which(treeMat == nod.x, arr.ind = TRUE)
# the level
level.x <- level[x]
ind.x <- ind
ind.x[, "col"] <- ind[, "col"] + level.x
if (any (ind.x[, "col"] > ncol(treeMat))) {
stop("Exceed the root; try a lower level.")
}
vv <- treeMat[ind.x]
uv <- unique(as.vector(vv))
if (length(uv) > 1) {
stop("More than one node are found.")
}
return(uv)
})
out <- unlist(selNod)
# return a vector of the found node (the node number of the node)
# name the vector with the node label
names(out) <- convertNode(tree = tree, node = out,
use.alias = use.alias,
message = FALSE)
return(out)
}
| /R/tree_findAncestor.R | no_license | FelixErnst/TreeSummarizedExperiment | R | false | false | 3,060 | r | #' Find the ancestors of specified nodes
#'
#' \code{findAncestor} finds the ancestor in the nth generation above
#' specified nodes.
#'
#' @param tree A phylo object
#' @param node A vector of node numbers or node labels
#' @param level A vector of numbers to define nth generation before the
#' specified nodes
#' @param use.alias A logical value, TRUE or FALSE. The default is FALSE, and
#' the node label would be used to name the output; otherwise, the alias of
#' node label would be used to name the output. The alias of node label is
#' created by adding a prefix \code{"alias_"} to the node number.
#' @export
#' @return A vector of nodes. The numeric value is the node number, and the
#' vector name is the corresponding node label. If a node has no label, it
#' would have NA as name when \code{use.alias = FALSE}, and have the alias of
#' node label as name when \code{use.alias = TRUE}.
#' @author Ruizhu Huang
#'
#' @examples
#' library(ggtree)
#' data(tinyTree)
#' ggtree(tinyTree, branch.length = 'none') +
#' geom_text2(aes(label = label), color = "darkorange",
#' hjust = -0.1, vjust = -0.7) +
#' geom_text2(aes(label = node), color = "darkblue",
#' hjust = -0.5, vjust = 0.7)
#'
#' findAncestor(tree = tinyTree, node = c(18, 13), level = 1)
findAncestor <- function(tree, node, level,
use.alias = FALSE) {
if (!inherits(tree, "phylo")) {
stop("tree: should be a phylo object")
}
if (!length(node)) {
stop("Please provide at least one node on the tree.")
}
# convert a tree to a matrix
# each row is a path connecting the root and a leaf
treeMat <- matTree(tree)
if (is.character(node)) {
node <- convertNode(tree = tree, node = node,
use.alias = TRUE,
message = FALSE)
}
if (length(level) == 1) {
level <- rep(level, length(node))
} else {
if (length(level) == length(node)) {
level <- level
} else {
stop("the length of level is not equal to the length of node")
}
}
selNod <- lapply(seq_along(node), FUN = function(x) {
# the node
nod.x <- node[x]
# where is the node
ind <- which(treeMat == nod.x, arr.ind = TRUE)
# the level
level.x <- level[x]
ind.x <- ind
ind.x[, "col"] <- ind[, "col"] + level.x
if (any (ind.x[, "col"] > ncol(treeMat))) {
stop("Exceed the root; try a lower level.")
}
vv <- treeMat[ind.x]
uv <- unique(as.vector(vv))
if (length(uv) > 1) {
stop("More than one node are found.")
}
return(uv)
})
out <- unlist(selNod)
# return a vector of the found node (the node number of the node)
# name the vector with the node label
names(out) <- convertNode(tree = tree, node = out,
use.alias = use.alias,
message = FALSE)
return(out)
}
|
setwd("C:/Users/HP/Documents/datos 1")
datos <- read.csv("datos 2.csv")
plot(x=datos$x, y=datos$y)
hola <- function(x, x1, x2,y1,y2){
variable1 <- y1*((x-x2)/(x1-x2)) + y2*((x-x1)/(x2-x1))
return(variable1)
}
nuevosdatos <- 1:dim(datos)[1]
nuevosdatos
for(i in 1:dim(datos)[1]){
nuevosdatos[i] <- hola(datos$x[i], datos$x[2], datos$x[5],
datos$y[2], datos$y[5])
}
lines(x=datos$x, y=nuevosdatos, col='red')
logaritmo1 <- log(datos$x)
logaritmo2 <- log(datos$y)
lines(x=datos$x,y=logaritmo2)
plot(x=logaritmo1, y=logaritmo2)
| /problema 2.R | no_license | MorenoAmarillo45/R-files | R | false | false | 595 | r | setwd("C:/Users/HP/Documents/datos 1")
datos <- read.csv("datos 2.csv")
plot(x=datos$x, y=datos$y)
hola <- function(x, x1, x2,y1,y2){
variable1 <- y1*((x-x2)/(x1-x2)) + y2*((x-x1)/(x2-x1))
return(variable1)
}
nuevosdatos <- 1:dim(datos)[1]
nuevosdatos
for(i in 1:dim(datos)[1]){
nuevosdatos[i] <- hola(datos$x[i], datos$x[2], datos$x[5],
datos$y[2], datos$y[5])
}
lines(x=datos$x, y=nuevosdatos, col='red')
logaritmo1 <- log(datos$x)
logaritmo2 <- log(datos$y)
lines(x=datos$x,y=logaritmo2)
plot(x=logaritmo1, y=logaritmo2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimize.portfolio.R
\name{optimize.portfolio.rebalancing}
\alias{optimize.portfolio.rebalancing}
\alias{optimize.portfolio.rebalancing_v1}
\title{Portfolio Optimization with Rebalancing Periods}
\usage{
optimize.portfolio.rebalancing_v1(
R,
constraints,
optimize_method = c("DEoptim", "random", "ROI"),
search_size = 20000,
trace = FALSE,
...,
rp = NULL,
rebalance_on = NULL,
training_period = NULL,
rolling_window = NULL
)
optimize.portfolio.rebalancing(
R,
portfolio = NULL,
constraints = NULL,
objectives = NULL,
optimize_method = c("DEoptim", "random", "ROI"),
search_size = 20000,
trace = FALSE,
...,
rp = NULL,
rebalance_on = NULL,
training_period = NULL,
rolling_window = NULL
)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns}
\item{constraints}{default NULL, a list of constraint objects}
\item{optimize_method}{one of "DEoptim", "random", "pso", "GenSA", or "ROI"}
\item{search_size}{integer, how many portfolios to test, default 20,000}
\item{trace}{TRUE/FALSE if TRUE will attempt to return additional
information on the path or portfolios searched}
\item{\dots}{any other passthru parameters to \code{\link{optimize.portfolio}}}
\item{rp}{a set of random portfolios passed into the function to prevent recalculation}
\item{rebalance_on}{character string of period to rebalance on. See
\code{\link[xts]{endpoints}} for valid names.}
\item{training_period}{an integer of the number of periods to use as
a training data in the front of the returns data}
\item{rolling_window}{an integer of the width (i.e. number of periods)
of the rolling window, the default of NULL will run the optimization
using the data from inception.}
\item{portfolio}{an object of type "portfolio" specifying the constraints
and objectives for the optimization}
\item{objectives}{default NULL, a list of objective objects}
}
\value{
a list containing the following elements
\itemize{
\item{\code{portfolio}:}{ The portfolio object.}
\item{\code{R}:}{ The asset returns.}
\item{\code{call}:}{ The function call.}
\item{\code{elapsed_time:}}{ The amount of time that elapses while the
optimization is run.}
\item{\code{opt_rebalancing:}}{ A list of \code{optimize.portfolio}
objects computed at each rebalancing period.}
}
}
\description{
Portfolio optimization with support for rebalancing periods for
out-of-sample testing (i.e. backtesting)
}
\details{
Run portfolio optimization with periodic rebalancing at specified time periods.
Running the portfolio optimization with periodic rebalancing can help
refine the constraints and objectives by evaluating the out of sample
performance of the portfolio based on historical data.
If both \code{training_period} and \code{rolling_window} are \code{NULL},
then \code{training_period} is set to a default value of 36.
If \code{training_period} is \code{NULL} and a \code{rolling_window} is
specified, then \code{training_period} is set to the value of
\code{rolling_window}.
The user should be aware of the following behavior when both
\code{training_period} and \code{rolling_window} are specified and have
different values
\itemize{
\item{\code{training_period < rolling_window}: }{For example, if you have
\code{rolling_window=60}, \code{training_period=50}, and the periodicity
of the data is the same as the rebalance frequency (i.e. monthly data with
\code{rebalance_on="months")} then the returns data used in the optimization
at each iteration are as follows:
\itemize{
\item{1: R[1:50,]}
\item{2: R[1:51,]}
\item{...}
\item{11: R[1:60,]}
\item{12: R[1:61,]}
\item{13: R[2:62,]}
\item{...}
}
This results in a growing window for several optimizations initially while
the endpoint iterator (i.e. \code{[50, 51, ...]}) is less than the
rolling window width.}
\item{\code{training_period > rolling_window}: }{The data used in the initial
optimization is \code{R[(training_period - rolling_window):training_period,]}.
This results in some of the data being "thrown away", i.e. periods 1 to
\code{(training_period - rolling_window - 1)} are not used in the optimization.}
}
This function is a essentially a wrapper around \code{optimize.portfolio}
and thus the discussion in the Details section of the
\code{\link{optimize.portfolio}} help file is valid here as well.
This function is massively parallel and requires the 'foreach' package. It
is suggested to register a parallel backend.
}
\examples{
\dontrun{
data(edhec)
R <- edhec[,1:4]
funds <- colnames(R)
portf <- portfolio.spec(funds)
portf <- add.constraint(portf, type="full_investment")
portf <- add.constraint(portf, type="long_only")
portf <- add.objective(portf, type="risk", name="StdDev")
# Quarterly rebalancing with 5 year training period
bt.opt1 <- optimize.portfolio.rebalancing(R, portf,
optimize_method="ROI",
rebalance_on="quarters",
training_period=60)
# Monthly rebalancing with 5 year training period and 4 year rolling window
bt.opt2 <- optimize.portfolio.rebalancing(R, portf,
optimize_method="ROI",
rebalance_on="months",
training_period=60,
rolling_window=48)
}
}
\seealso{
\code{\link{portfolio.spec}} \code{\link{optimize.portfolio}}
}
\author{
Kris Boudt, Peter Carl, Brian G. Peterson
}
| /man/optimize.portfolio.rebalancing.Rd | no_license | GreenGrassBlueOcean/PortfolioAnalytics | R | false | true | 5,382 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimize.portfolio.R
\name{optimize.portfolio.rebalancing}
\alias{optimize.portfolio.rebalancing}
\alias{optimize.portfolio.rebalancing_v1}
\title{Portfolio Optimization with Rebalancing Periods}
\usage{
optimize.portfolio.rebalancing_v1(
R,
constraints,
optimize_method = c("DEoptim", "random", "ROI"),
search_size = 20000,
trace = FALSE,
...,
rp = NULL,
rebalance_on = NULL,
training_period = NULL,
rolling_window = NULL
)
optimize.portfolio.rebalancing(
R,
portfolio = NULL,
constraints = NULL,
objectives = NULL,
optimize_method = c("DEoptim", "random", "ROI"),
search_size = 20000,
trace = FALSE,
...,
rp = NULL,
rebalance_on = NULL,
training_period = NULL,
rolling_window = NULL
)
}
\arguments{
\item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns}
\item{constraints}{default NULL, a list of constraint objects}
\item{optimize_method}{one of "DEoptim", "random", "pso", "GenSA", or "ROI"}
\item{search_size}{integer, how many portfolios to test, default 20,000}
\item{trace}{TRUE/FALSE if TRUE will attempt to return additional
information on the path or portfolios searched}
\item{\dots}{any other passthru parameters to \code{\link{optimize.portfolio}}}
\item{rp}{a set of random portfolios passed into the function to prevent recalculation}
\item{rebalance_on}{character string of period to rebalance on. See
\code{\link[xts]{endpoints}} for valid names.}
\item{training_period}{an integer of the number of periods to use as
a training data in the front of the returns data}
\item{rolling_window}{an integer of the width (i.e. number of periods)
of the rolling window, the default of NULL will run the optimization
using the data from inception.}
\item{portfolio}{an object of type "portfolio" specifying the constraints
and objectives for the optimization}
\item{objectives}{default NULL, a list of objective objects}
}
\value{
a list containing the following elements
\itemize{
\item{\code{portfolio}:}{ The portfolio object.}
\item{\code{R}:}{ The asset returns.}
\item{\code{call}:}{ The function call.}
\item{\code{elapsed_time:}}{ The amount of time that elapses while the
optimization is run.}
\item{\code{opt_rebalancing:}}{ A list of \code{optimize.portfolio}
objects computed at each rebalancing period.}
}
}
\description{
Portfolio optimization with support for rebalancing periods for
out-of-sample testing (i.e. backtesting)
}
\details{
Run portfolio optimization with periodic rebalancing at specified time periods.
Running the portfolio optimization with periodic rebalancing can help
refine the constraints and objectives by evaluating the out of sample
performance of the portfolio based on historical data.
If both \code{training_period} and \code{rolling_window} are \code{NULL},
then \code{training_period} is set to a default value of 36.
If \code{training_period} is \code{NULL} and a \code{rolling_window} is
specified, then \code{training_period} is set to the value of
\code{rolling_window}.
The user should be aware of the following behavior when both
\code{training_period} and \code{rolling_window} are specified and have
different values
\itemize{
\item{\code{training_period < rolling_window}: }{For example, if you have
\code{rolling_window=60}, \code{training_period=50}, and the periodicity
of the data is the same as the rebalance frequency (i.e. monthly data with
\code{rebalance_on="months")} then the returns data used in the optimization
at each iteration are as follows:
\itemize{
\item{1: R[1:50,]}
\item{2: R[1:51,]}
\item{...}
\item{11: R[1:60,]}
\item{12: R[1:61,]}
\item{13: R[2:62,]}
\item{...}
}
This results in a growing window for several optimizations initially while
the endpoint iterator (i.e. \code{[50, 51, ...]}) is less than the
rolling window width.}
\item{\code{training_period > rolling_window}: }{The data used in the initial
optimization is \code{R[(training_period - rolling_window):training_period,]}.
This results in some of the data being "thrown away", i.e. periods 1 to
\code{(training_period - rolling_window - 1)} are not used in the optimization.}
}
This function is a essentially a wrapper around \code{optimize.portfolio}
and thus the discussion in the Details section of the
\code{\link{optimize.portfolio}} help file is valid here as well.
This function is massively parallel and requires the 'foreach' package. It
is suggested to register a parallel backend.
}
\examples{
\dontrun{
data(edhec)
R <- edhec[,1:4]
funds <- colnames(R)
portf <- portfolio.spec(funds)
portf <- add.constraint(portf, type="full_investment")
portf <- add.constraint(portf, type="long_only")
portf <- add.objective(portf, type="risk", name="StdDev")
# Quarterly rebalancing with 5 year training period
bt.opt1 <- optimize.portfolio.rebalancing(R, portf,
optimize_method="ROI",
rebalance_on="quarters",
training_period=60)
# Monthly rebalancing with 5 year training period and 4 year rolling window
bt.opt2 <- optimize.portfolio.rebalancing(R, portf,
optimize_method="ROI",
rebalance_on="months",
training_period=60,
rolling_window=48)
}
}
\seealso{
\code{\link{portfolio.spec}} \code{\link{optimize.portfolio}}
}
\author{
Kris Boudt, Peter Carl, Brian G. Peterson
}
|
library(GenomicRanges)
qcExpt <- function(expt, opt) {
print("Running QC on experimental data")
expt <- subset(expt, IncludeInModel)
#Check for duplicate experiments
dupe <- any(duplicated(expt[, c("CellType","GeneSymbol","chrPerturbationTarget","startPerturbationTarget","endPerturbationTarget")] ))
if (dupe) {
print("Error: The experimental data file contains duplicate experiments!")
stop()
}
#check to make sure regulated column contains TRUE/FALSE
# reg.vals <- sort(unique(expt[, get(opt$experimentalPositiveColumn)]))
# if (!(identical(reg.vals, c(FALSE, TRUE)) | identical(reg.vals, c(0, 1)))) {
# print("Error: The experimental data column must contain exactly two distinct values: TRUE and FALSE")
# stop()
# }
#check to make sure regulated column contains TRUE/FALSE
reg.vals <- sort(unique(expt[, get(opt$experimentalPositiveColumn)]))
if (!(all(reg.vals %in% c(FALSE, TRUE)) | all(reg.vals %in% c(0, 1)))) {
print("Error: The experimental data column must contain TRUE/FALSE")
stop()
}
if (length(reg.vals) == 1) {
print("Note: all values are either positives or negatives. Plotting code will fail, but merged prediction/experimental table will be output.")
}
}
qcPrediction <- function(pred.list, pred.config) {
# Ensure that the fill value for each prediction column is at the extreme end of its range
print("Running QC on predictions")
doOnePred <- function(pred, config) {
pred <- as.data.table(pred)
this.cols <- intersect(colnames(pred), config$pred.col)
lapply(this.cols, function(s) {
qcCol(s,
pred[, ..s],
subset(config, pred.col == s)$fill.val,
subset(config, pred.col == s)$lowerIsMoreConfident)
})
}
qcCol <- function(col.name, colData, fill.val, isInverted) {
#For each prediction column check that its missing fill val is at the extreme end of its range
print(col.name)
isBad <- (isInverted & fill.val < pmin(colData)) | (!isInverted & fill.val > pmin(colData))
suppressWarnings(if (isBad) stop(paste0("Fill val for column ", col.name, " is not at the extreme of its range!", fill.val, " ", pmin(colData))))
}
dummy <- lapply(pred.list, function(s) doOnePred(s, config = pred.config))
}
checkExistenceOfExperimentalGenesInPredictions <- function(expt, pred.list, outdir) {
experimentalGenes <- unique(expt$GeneSymbol)
res <- sapply(pred.list, function(df) {experimentalGenes %in% unique(df$GeneSymbol)})
df <- cbind(experimentalGenes, as.data.table(res))
write.table(df, file.path(outdir, "ExperimentalGenesAppearingInPredictions.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
}
combineAllExptPred <- function(expt, pred.list, config, cellMapping, outdir, fill.missing) {
merged.list <- lapply(names(pred.list), function(s) combineSingleExptPred(expt = expt,
pred = pred.list[[s]],
pred.name = s,
config = config,
cellMapping = cellMapping,
outdir = outdir,
fill.missing = fill.missing))
merge.by.cols <- c('chrPerturbationTarget', 'startPerturbationTarget',
'endPerturbationTarget', 'GeneSymbol', "startTSS","endTSS", 'CellType', 'Significant', 'Regulated', 'EffectSize','IncludeInModel')
if ('class' %in% colnames(expt)) merge.by.cols <- c(merge.by.cols, "class")
merged <- Reduce(function(x, y) merge(x, y, by = merge.by.cols, all=TRUE), merged.list)
write.table(merged, file.path(outdir, "expt.pred.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
return(merged)
}
combineSingleExptPred <- function(expt, pred, pred.name, config, cellMapping, outdir, fill.missing=TRUE) {
#Subset config to columns that actuall appear. Otherwise code will fail
print(paste0("Overlapping predictions for predictor: ", pred.name))
config <- subset(config, pred.col %in% colnames(pred))
if (opt$cellNameMapping != "") pred <- applyCellTypeNameMapping(pred, cellMapping)
# pred <- subset(pred, CellType %in% c("K562","BLD.K562.CNCR"))
# pred$CellType <- "K562"
pred.gr <- with(pred, GRanges(paste0(CellType,":",chrElement,":",GeneSymbol), IRanges(startElement, endElement)))
expt.gr <- with(expt, GRanges(paste0(CellType,":",chrPerturbationTarget,":",GeneSymbol), IRanges(startPerturbationTarget, endPerturbationTarget)))
ovl <- GenomicRanges::findOverlaps(expt.gr, pred.gr)
#Merge predictions with experimental data
merged <- cbind(expt[queryHits(ovl)], pred[subjectHits(ovl), config$pred.col, with = F])
#Sometimes a perturbed element will overlap multiple model elements (eg in the case of a large deletion)
#In these cases need to summarize, Eg sum ABC.Score across model elements overlapping the deletion
#This requires a config file describing how each prediction column should be aggregated
agg.cols <- c("chrPerturbationTarget","startPerturbationTarget","endPerturbationTarget","GeneSymbol","startTSS","endTSS","CellType","Significant","Regulated","EffectSize",'IncludeInModel') #"class",
merged <- collapseEnhancersOverlappingMultiplePredictions(merged, config, agg.cols)
#Experimental data missing predictions
#A tested enhancer element may not have a prediction
#For ABC this is typically the case if the tested element does not overlap a DHS peak.
#In this case we need to fill the predictions table
expt.missing.predictions <- expt[setdiff(seq(nrow(expt)), queryHits(ovl)),]
dir.create(file.path(outdir, "experimentalDataMissingPredictions", pred.name), recursive = TRUE)
write.table(expt.missing.predictions, file.path(outdir, "experimentalDataMissingPredictions", pred.name, "expt.missing.predictions.txt"), sep="\t", quote=F, col.names=T, row.names=F)
#print("The following experimental data is not present in predictions file: ")
#print(expt.missing.predictions[, ..agg.cols])
if (fill.missing) {
expt.missing.predictions <- fillMissingPredictions(expt.missing.predictions, config, agg.cols)
cols.we.want <- c(agg.cols, config$pred.col) #'class'
merged <- rbind(merged, expt.missing.predictions[, ..cols.we.want], fill=TRUE)
print("Experimental data missing predictions filled. Will be considered in PR curve!")
print(expt.missing.predictions[, ..cols.we.want])
} else {
print("Experimental data missing predictions ignored. Will not be considered in PR curve!")
print(expt.missing.predictions)
}
#Rename new columns based on prediction dataset name
colnames(merged)[colnames(merged) %in% config$pred.col] <- paste0(pred.name, ".", colnames(merged)[colnames(merged) %in% config$pred.col])
return(merged)
}
fillMissingPredictions <- function(df, config, agg.cols) {
#Fill in missing predictions as described in the config file
for (ii in seq(nrow(config))) {
df[, config$pred.col[[ii]]] <- config$fill.val[ii]
}
unk.cols <- setdiff(c('class', agg.cols), unique(c(colnames(df), config$pred.cols)))
df[, unk.cols] <- "Merge:UNKNOWN"
return(df)
}
collapseEnhancersOverlappingMultiplePredictions <- function(df, config, agg.cols) {
#Summarize columns as defined in config
list.for.agg <- as.list(df[, ..agg.cols])
all.list <- mapply(function(pred.col, agg.func) aggregate(df[, ..pred.col], by = list.for.agg, FUN = agg.func),
config$pred.col, config$agg.func, SIMPLIFY=F)
#Special handling for aggregating the class column
class.agg <- function(x) {
if ("promoter" %in% x) {
return("promoter")
} else if ("tss" %in% x) {
return("tss")
} else if ("genic" %in% x) {
return("genic")
} else if ("distal" %in% x) {
return("distal")
} else if ("intergenic" %in% x) {
return("intergenic")
} else {
return("UNKNOWN")
}
}
if ('class' %in% colnames(df)) {
class.temp <- aggregate(df$class, by = list.for.agg, FUN = class.agg)
colnames(class.temp)[colnames(class.temp) == 'x'] <- 'class'
all.list$class <- class.temp
}
#Merge all the aggregates together to make collapsed dataframe
full.result <- Reduce(function(df1, df2) merge(df1, df2, by = agg.cols), all.list)
return(full.result)
}
prepForPlotting <- function(df) {
df$scatterplot.color <- with(df, ifelse(Regulated, "Activating", ifelse(Significant, "Repressive", "Not Significant")))
return(df)
}
makePlots <- function(merged, config, inverse.predictors, pos.col, outdir, min.sensitivity = .7) {
#config <- fread(config)
pred.cols <- unique(unlist(lapply(config$pred.cols, function(s) {strsplit(s,",")[[1]]})))
pred.cols <- intersect(pred.cols, colnames(merged))
#Make scatter plots
lapply(pred.cols, function(s) {makeScatterPlot(merged, s, "EffectSize", outdir)})
merged <- as.data.table(merged) #seems to be necessary to run in interactive session on compute node
#Hack for predictors where lower values are more confident (eg genomic distance, pvalue)
#Multiply these by -1
inverse.predictors <- intersect(inverse.predictors, colnames(merged))
if (length(inverse.predictors) > 0) merged[, inverse.predictors] <- -1*merged[, ..inverse.predictors]
#Compute performance objects
pr <- sapply(pred.cols,
function(s) {performance(prediction(as.numeric(unlist(merged[, ..s])),
unlist(merged[, ..pos.col])),
measure="prec", x.measure="rec")})
if (length(inverse.predictors) > 0) merged[, inverse.predictors] <- -1*merged[, ..inverse.predictors]
pr.df <- pr2df(pr)
pr.df$F1 <- with(pr.df, 2 / ((1/precision) + (1/recall)))
write.table(pr.df, file.path(outdir, "pr.curve.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
#write PR summary table (AUC, cutoff, etc)
perf.summary <- makePRSummaryTable(pr, min.sensitivity, outdir)
#Assign prediction class and write output
merged <- addPredictionClassLabels(merged, perf.summary)
write.table(merged, file.path(outdir, "expt.pred.annotated.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
#Make PR curve plots
pct.pos <- sum(unlist(merged[, ..pos.col]))/nrow(merged)
for (ii in seq(nrow(config))) {
makePRCurvePlot(pr.df, config$plot.name[[ii]], config$pred.cols[[ii]], outdir, pct.pos = pct.pos)
}
}
makeScatterPlot <- function(df, x.col, y.col, outdir) {
g <- ggplot(df,
aes(x = get(x.col),
y = get(y.col),
color = scatterplot.color)) +
geom_point() +
scale_color_manual(values=c("Activating" = "red",
"Repressive" = "blue",
"Not Significant" = "gray")) +
labs(x = x.col, y = y.col, color = "")
ggsave(file.path(outdir, paste0(x.col, ".", y.col, ".scatter.pdf")), g, device = "pdf")
ggsave(file.path(outdir, paste0(x.col, ".", y.col, ".scatter.eps")), g, device = "eps")
}
makePRCurvePlot <- function(pr.df, plot.name, col.list, outdir, pct.pos) {
col.list <- strsplit(as.character(col.list), ",")[[1]]
pr.df <- subset(pr.df, pred.col %in% col.list)
#separate boolean predictors from continuous predictors
pr.cutoff <- by(pr.df, pr.df$pred.col, function(df) unique(df$alpha))
boolean.predictors <- names(pr.cutoff)[unlist(lapply(pr.cutoff, function(s) identical(s, c(Inf, 1, 0))), use.names=F)]
cont.pred <- subset(pr.df, !(pred.col %in% boolean.predictors))
bool.pred <- subset(pr.df, pred.col %in% boolean.predictors)
bool.pred <- subset(bool.pred, alpha == 1)
g <- ggplot(cont.pred,
aes(x = recall,
y = precision,
color = pred.col)) +
geom_line() +
labs(title = plot.name, color = "") +
coord_cartesian(xlim = c(0,1), ylim = c(0,1)) +
geom_hline(yintercept = pct.pos, linetype = 2, color = 'black')
if (nrow(bool.pred) > 0) {
g <- g + geom_point(data = bool.pred,
size = 3)
}
ggsave(file.path(outdir, paste0(plot.name, ".pr.pdf")), g, device = "pdf")
ggsave(file.path(outdir, paste0(plot.name, ".pr.eps")), g, device = "eps")
}
pr2df <- function(pr) {
#Convert a list of ROCR performance objects into a dataframe
doOne <- function(this.pr) {
df <- as.data.frame(list(
alpha = this.pr@alpha.values[[1]],
precision = this.pr@y.values[[1]],
recall = this.pr@x.values[[1]]
))
return(df)
}
pr.list <- lapply(pr, doOne)
for (ii in seq(length(pr.list))) {
pr.list[[ii]]$pred.col <- names(pr.list)[ii]
}
return(rbindlist(pr.list))
}
makePRSummaryTable <- function(pr, min.sensitivity = .7, outdir) {
#compute AUC
#the head() calls here remove the last element of the vector.
#The point is that performance objects produced by ROCR always include a Recall=100% point even if the predictor cannot achieve a recall of 100%
#This results in a straight line ending at (1,0) on the PR curve. This should not be included in the AUC computation.
auc <- lapply(pr, function(s) computeAUC(head(s@x.values[[1]], -1),
head(s@y.values[[1]], -1)))
cutoff <- lapply(pr, function(s) computeCutoffGivenDesiredSensitivity(s, min.sensitivity))
max.F1 <- lapply(pr, function(s) max(2 / ((1/s@x.values[[1]]) + (1/s@y.values[[1]])), na.rm = T))
perf.summary <- rbindlist(list(cutoff = as.list(as.numeric(cutoff)),
AUC = as.list(as.numeric(auc)),
maxF1 = as.list(as.numeric(max.F1))))
perf.summary <- t(perf.summary)
perf.summary <- as.data.table(cbind(names(pr), min.sensitivity, perf.summary))
colnames(perf.summary) <- c("predictor", "min.sensitivity", "cutoff", "AUPRC","maxF1")
write.table(perf.summary, file.path(outdir, "pr.summary.txt"), sep='\t', quote = F, row.names = F, col.names = T)
return(perf.summary)
}
computeAUC <- function(x.vals, y.vals) {
good.idx <- which(!is.na(x.vals) & !is.na(y.vals))
return(trapz(x.vals[good.idx], y.vals[good.idx]))
}
computeCutoffGivenDesiredSensitivity <- function(pr, min.sensitivity) {
sens <- pr@x.values[[1]]
prec <- pr@y.values[[1]]
cutoff.sensitivity <- min(sens[sens >= min.sensitivity])
idx <- which.max(sens == cutoff.sensitivity)
idx2 <- idx[which.max(prec[idx])]
cutoff <- pr@alpha.values[[1]][idx2]
return(cutoff)
}
addPredictionClassLabels <- function(merged, perf.summary) {
#assign prediction class label
for (ii in seq(nrow(perf.summary))) {
merged <- addOneLabel(merged, as.numeric(perf.summary$cutoff[[ii]]), perf.summary$predictor[[ii]])
}
return(merged)
}
addOneLabel <- function(df, cutoff, score.col, pos.col = "Regulated") {
label.name <- paste0(score.col, ".pred.class")
df[, label.name] <- "NA"
#browser()
df[which(!is.na(df[, ..score.col]) & df[, ..score.col] > cutoff & df[, ..pos.col]), label.name] <- "TP"
df[which(!is.na(df[, ..score.col]) & df[, ..score.col] <= cutoff & !df[, ..pos.col]), label.name] <- "TN"
df[which(!is.na(df[, ..score.col]) & df[, ..score.col] > cutoff & !df[, ..pos.col]), label.name] <- "FP"
df[which(!is.na(df[, ..score.col]) & df[, ..score.col] <= cutoff & df[, ..pos.col]), label.name] <- "FN"
return(df)
}
getInversePredictors <- function(pred.list, pred.config) {
inv.pred <- subset(predConfig, lowerIsMoreConfident)$pred.col
inv.cols <- with(expand.grid(pred.table$name, inv.pred), paste0(Var1, ".", Var2))
return(inv.cols)
}
applyCellTypeNameMapping <- function(df, cellMapping) {
#Map CellType in predictions file to match experimental data file
for (ii in seq(nrow(cellMapping))) {
this.from <- strsplit(cellMapping$from[ii], split=",")[[1]]
this.to <- cellMapping$to[ii]
df$CellType[df$CellType %in% this.from] <- this.to
}
return(df)
}
writeExptSummary <- function(df, outdir) {
df.summary <- as.data.frame(list(
numConnections = nrow(df),
numIncludeInModel = sum(df$IncludeInModel),
numIncludeInModelRegulated = sum(df$IncludeInModel & df$Regulated)
))
write.table(df.summary, file.path(outdir, "expt.summary.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
}
fread_ignore_empty <- function(f, ...) {
tryCatch({
return(fread(f, fill = TRUE, ...))
}, error = function(e){
print(paste0("Could not open file: ", f))
return()
})
}
fread_gz_ignore_empty <- function(f, ...) {
tryCatch({
return(fread(paste0("gunzip -c ", f), ...))
}, error = function(e){
print(paste0("Could not open file: ", f))
return()
})
}
smart_fread <- function(f, ...) {
if (summary(file(f))$class == "gzfile") {
out <- fread_gz_ignore_empty(f, ...)
} else {
out <- fread_ignore_empty(f, ...)
}
#con <- file(f)
#on.exit(close(con), add = TRUE)
tryCatch({
closeAllConnections()
}, error = function(e) {
print(e)
}
)
return(out)
}
loadDelimited <- function(file.list) {
data.list <- lapply(file.list, smart_fread)
return(rbindlist(data.list, fill = TRUE))
}
loadFileString <- function(file.str, delim = ",") {
file.list <- strsplit(file.str, split = delim)[[1]]
return(loadDelimited(file.list))
}
loadPredictions <- function(pred.table) {
#df <- fread(pred.table)
pred.list <- lapply(pred.table$path, function(s) {
print(paste0("Loading dataset: ", s))
df = loadFileString(s)
print(paste0("Dataset loaded with ", nrow(df), " rows"))
return(df)
})
names(pred.list) <- pred.table$name
return(pred.list)
}
| /comparePredictorsToCRISPRData/code/comparison.R | no_license | andy3nieto/ABC-GWAS-Paper | R | false | false | 17,874 | r | library(GenomicRanges)
qcExpt <- function(expt, opt) {
print("Running QC on experimental data")
expt <- subset(expt, IncludeInModel)
#Check for duplicate experiments
dupe <- any(duplicated(expt[, c("CellType","GeneSymbol","chrPerturbationTarget","startPerturbationTarget","endPerturbationTarget")] ))
if (dupe) {
print("Error: The experimental data file contains duplicate experiments!")
stop()
}
#check to make sure regulated column contains TRUE/FALSE
# reg.vals <- sort(unique(expt[, get(opt$experimentalPositiveColumn)]))
# if (!(identical(reg.vals, c(FALSE, TRUE)) | identical(reg.vals, c(0, 1)))) {
# print("Error: The experimental data column must contain exactly two distinct values: TRUE and FALSE")
# stop()
# }
#check to make sure regulated column contains TRUE/FALSE
reg.vals <- sort(unique(expt[, get(opt$experimentalPositiveColumn)]))
if (!(all(reg.vals %in% c(FALSE, TRUE)) | all(reg.vals %in% c(0, 1)))) {
print("Error: The experimental data column must contain TRUE/FALSE")
stop()
}
if (length(reg.vals) == 1) {
print("Note: all values are either positives or negatives. Plotting code will fail, but merged prediction/experimental table will be output.")
}
}
qcPrediction <- function(pred.list, pred.config) {
# Ensure that the fill value for each prediction column is at the extreme end of its range
print("Running QC on predictions")
doOnePred <- function(pred, config) {
pred <- as.data.table(pred)
this.cols <- intersect(colnames(pred), config$pred.col)
lapply(this.cols, function(s) {
qcCol(s,
pred[, ..s],
subset(config, pred.col == s)$fill.val,
subset(config, pred.col == s)$lowerIsMoreConfident)
})
}
qcCol <- function(col.name, colData, fill.val, isInverted) {
#For each prediction column check that its missing fill val is at the extreme end of its range
print(col.name)
isBad <- (isInverted & fill.val < pmin(colData)) | (!isInverted & fill.val > pmin(colData))
suppressWarnings(if (isBad) stop(paste0("Fill val for column ", col.name, " is not at the extreme of its range!", fill.val, " ", pmin(colData))))
}
dummy <- lapply(pred.list, function(s) doOnePred(s, config = pred.config))
}
checkExistenceOfExperimentalGenesInPredictions <- function(expt, pred.list, outdir) {
experimentalGenes <- unique(expt$GeneSymbol)
res <- sapply(pred.list, function(df) {experimentalGenes %in% unique(df$GeneSymbol)})
df <- cbind(experimentalGenes, as.data.table(res))
write.table(df, file.path(outdir, "ExperimentalGenesAppearingInPredictions.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
}
combineAllExptPred <- function(expt, pred.list, config, cellMapping, outdir, fill.missing) {
merged.list <- lapply(names(pred.list), function(s) combineSingleExptPred(expt = expt,
pred = pred.list[[s]],
pred.name = s,
config = config,
cellMapping = cellMapping,
outdir = outdir,
fill.missing = fill.missing))
merge.by.cols <- c('chrPerturbationTarget', 'startPerturbationTarget',
'endPerturbationTarget', 'GeneSymbol', "startTSS","endTSS", 'CellType', 'Significant', 'Regulated', 'EffectSize','IncludeInModel')
if ('class' %in% colnames(expt)) merge.by.cols <- c(merge.by.cols, "class")
merged <- Reduce(function(x, y) merge(x, y, by = merge.by.cols, all=TRUE), merged.list)
write.table(merged, file.path(outdir, "expt.pred.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
return(merged)
}
combineSingleExptPred <- function(expt, pred, pred.name, config, cellMapping, outdir, fill.missing=TRUE) {
#Subset config to columns that actuall appear. Otherwise code will fail
print(paste0("Overlapping predictions for predictor: ", pred.name))
config <- subset(config, pred.col %in% colnames(pred))
if (opt$cellNameMapping != "") pred <- applyCellTypeNameMapping(pred, cellMapping)
# pred <- subset(pred, CellType %in% c("K562","BLD.K562.CNCR"))
# pred$CellType <- "K562"
pred.gr <- with(pred, GRanges(paste0(CellType,":",chrElement,":",GeneSymbol), IRanges(startElement, endElement)))
expt.gr <- with(expt, GRanges(paste0(CellType,":",chrPerturbationTarget,":",GeneSymbol), IRanges(startPerturbationTarget, endPerturbationTarget)))
ovl <- GenomicRanges::findOverlaps(expt.gr, pred.gr)
#Merge predictions with experimental data
merged <- cbind(expt[queryHits(ovl)], pred[subjectHits(ovl), config$pred.col, with = F])
#Sometimes a perturbed element will overlap multiple model elements (eg in the case of a large deletion)
#In these cases need to summarize, Eg sum ABC.Score across model elements overlapping the deletion
#This requires a config file describing how each prediction column should be aggregated
agg.cols <- c("chrPerturbationTarget","startPerturbationTarget","endPerturbationTarget","GeneSymbol","startTSS","endTSS","CellType","Significant","Regulated","EffectSize",'IncludeInModel') #"class",
merged <- collapseEnhancersOverlappingMultiplePredictions(merged, config, agg.cols)
#Experimental data missing predictions
#A tested enhancer element may not have a prediction
#For ABC this is typically the case if the tested element does not overlap a DHS peak.
#In this case we need to fill the predictions table
expt.missing.predictions <- expt[setdiff(seq(nrow(expt)), queryHits(ovl)),]
dir.create(file.path(outdir, "experimentalDataMissingPredictions", pred.name), recursive = TRUE)
write.table(expt.missing.predictions, file.path(outdir, "experimentalDataMissingPredictions", pred.name, "expt.missing.predictions.txt"), sep="\t", quote=F, col.names=T, row.names=F)
#print("The following experimental data is not present in predictions file: ")
#print(expt.missing.predictions[, ..agg.cols])
if (fill.missing) {
expt.missing.predictions <- fillMissingPredictions(expt.missing.predictions, config, agg.cols)
cols.we.want <- c(agg.cols, config$pred.col) #'class'
merged <- rbind(merged, expt.missing.predictions[, ..cols.we.want], fill=TRUE)
print("Experimental data missing predictions filled. Will be considered in PR curve!")
print(expt.missing.predictions[, ..cols.we.want])
} else {
print("Experimental data missing predictions ignored. Will not be considered in PR curve!")
print(expt.missing.predictions)
}
#Rename new columns based on prediction dataset name
colnames(merged)[colnames(merged) %in% config$pred.col] <- paste0(pred.name, ".", colnames(merged)[colnames(merged) %in% config$pred.col])
return(merged)
}
fillMissingPredictions <- function(df, config, agg.cols) {
#Fill in missing predictions as described in the config file
for (ii in seq(nrow(config))) {
df[, config$pred.col[[ii]]] <- config$fill.val[ii]
}
unk.cols <- setdiff(c('class', agg.cols), unique(c(colnames(df), config$pred.cols)))
df[, unk.cols] <- "Merge:UNKNOWN"
return(df)
}
collapseEnhancersOverlappingMultiplePredictions <- function(df, config, agg.cols) {
#Summarize columns as defined in config
list.for.agg <- as.list(df[, ..agg.cols])
all.list <- mapply(function(pred.col, agg.func) aggregate(df[, ..pred.col], by = list.for.agg, FUN = agg.func),
config$pred.col, config$agg.func, SIMPLIFY=F)
#Special handling for aggregating the class column
class.agg <- function(x) {
if ("promoter" %in% x) {
return("promoter")
} else if ("tss" %in% x) {
return("tss")
} else if ("genic" %in% x) {
return("genic")
} else if ("distal" %in% x) {
return("distal")
} else if ("intergenic" %in% x) {
return("intergenic")
} else {
return("UNKNOWN")
}
}
if ('class' %in% colnames(df)) {
class.temp <- aggregate(df$class, by = list.for.agg, FUN = class.agg)
colnames(class.temp)[colnames(class.temp) == 'x'] <- 'class'
all.list$class <- class.temp
}
#Merge all the aggregates together to make collapsed dataframe
full.result <- Reduce(function(df1, df2) merge(df1, df2, by = agg.cols), all.list)
return(full.result)
}
prepForPlotting <- function(df) {
df$scatterplot.color <- with(df, ifelse(Regulated, "Activating", ifelse(Significant, "Repressive", "Not Significant")))
return(df)
}
makePlots <- function(merged, config, inverse.predictors, pos.col, outdir, min.sensitivity = .7) {
#config <- fread(config)
pred.cols <- unique(unlist(lapply(config$pred.cols, function(s) {strsplit(s,",")[[1]]})))
pred.cols <- intersect(pred.cols, colnames(merged))
#Make scatter plots
lapply(pred.cols, function(s) {makeScatterPlot(merged, s, "EffectSize", outdir)})
merged <- as.data.table(merged) #seems to be necessary to run in interactive session on compute node
#Hack for predictors where lower values are more confident (eg genomic distance, pvalue)
#Multiply these by -1
inverse.predictors <- intersect(inverse.predictors, colnames(merged))
if (length(inverse.predictors) > 0) merged[, inverse.predictors] <- -1*merged[, ..inverse.predictors]
#Compute performance objects
pr <- sapply(pred.cols,
function(s) {performance(prediction(as.numeric(unlist(merged[, ..s])),
unlist(merged[, ..pos.col])),
measure="prec", x.measure="rec")})
if (length(inverse.predictors) > 0) merged[, inverse.predictors] <- -1*merged[, ..inverse.predictors]
pr.df <- pr2df(pr)
pr.df$F1 <- with(pr.df, 2 / ((1/precision) + (1/recall)))
write.table(pr.df, file.path(outdir, "pr.curve.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
#write PR summary table (AUC, cutoff, etc)
perf.summary <- makePRSummaryTable(pr, min.sensitivity, outdir)
#Assign prediction class and write output
merged <- addPredictionClassLabels(merged, perf.summary)
write.table(merged, file.path(outdir, "expt.pred.annotated.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
#Make PR curve plots
pct.pos <- sum(unlist(merged[, ..pos.col]))/nrow(merged)
for (ii in seq(nrow(config))) {
makePRCurvePlot(pr.df, config$plot.name[[ii]], config$pred.cols[[ii]], outdir, pct.pos = pct.pos)
}
}
makeScatterPlot <- function(df, x.col, y.col, outdir) {
g <- ggplot(df,
aes(x = get(x.col),
y = get(y.col),
color = scatterplot.color)) +
geom_point() +
scale_color_manual(values=c("Activating" = "red",
"Repressive" = "blue",
"Not Significant" = "gray")) +
labs(x = x.col, y = y.col, color = "")
ggsave(file.path(outdir, paste0(x.col, ".", y.col, ".scatter.pdf")), g, device = "pdf")
ggsave(file.path(outdir, paste0(x.col, ".", y.col, ".scatter.eps")), g, device = "eps")
}
makePRCurvePlot <- function(pr.df, plot.name, col.list, outdir, pct.pos) {
col.list <- strsplit(as.character(col.list), ",")[[1]]
pr.df <- subset(pr.df, pred.col %in% col.list)
#separate boolean predictors from continuous predictors
pr.cutoff <- by(pr.df, pr.df$pred.col, function(df) unique(df$alpha))
boolean.predictors <- names(pr.cutoff)[unlist(lapply(pr.cutoff, function(s) identical(s, c(Inf, 1, 0))), use.names=F)]
cont.pred <- subset(pr.df, !(pred.col %in% boolean.predictors))
bool.pred <- subset(pr.df, pred.col %in% boolean.predictors)
bool.pred <- subset(bool.pred, alpha == 1)
g <- ggplot(cont.pred,
aes(x = recall,
y = precision,
color = pred.col)) +
geom_line() +
labs(title = plot.name, color = "") +
coord_cartesian(xlim = c(0,1), ylim = c(0,1)) +
geom_hline(yintercept = pct.pos, linetype = 2, color = 'black')
if (nrow(bool.pred) > 0) {
g <- g + geom_point(data = bool.pred,
size = 3)
}
ggsave(file.path(outdir, paste0(plot.name, ".pr.pdf")), g, device = "pdf")
ggsave(file.path(outdir, paste0(plot.name, ".pr.eps")), g, device = "eps")
}
pr2df <- function(pr) {
#Convert a list of ROCR performance objects into a dataframe
doOne <- function(this.pr) {
df <- as.data.frame(list(
alpha = this.pr@alpha.values[[1]],
precision = this.pr@y.values[[1]],
recall = this.pr@x.values[[1]]
))
return(df)
}
pr.list <- lapply(pr, doOne)
for (ii in seq(length(pr.list))) {
pr.list[[ii]]$pred.col <- names(pr.list)[ii]
}
return(rbindlist(pr.list))
}
makePRSummaryTable <- function(pr, min.sensitivity = .7, outdir) {
#compute AUC
#the head() calls here remove the last element of the vector.
#The point is that performance objects produced by ROCR always include a Recall=100% point even if the predictor cannot achieve a recall of 100%
#This results in a straight line ending at (1,0) on the PR curve. This should not be included in the AUC computation.
auc <- lapply(pr, function(s) computeAUC(head(s@x.values[[1]], -1),
head(s@y.values[[1]], -1)))
cutoff <- lapply(pr, function(s) computeCutoffGivenDesiredSensitivity(s, min.sensitivity))
max.F1 <- lapply(pr, function(s) max(2 / ((1/s@x.values[[1]]) + (1/s@y.values[[1]])), na.rm = T))
perf.summary <- rbindlist(list(cutoff = as.list(as.numeric(cutoff)),
AUC = as.list(as.numeric(auc)),
maxF1 = as.list(as.numeric(max.F1))))
perf.summary <- t(perf.summary)
perf.summary <- as.data.table(cbind(names(pr), min.sensitivity, perf.summary))
colnames(perf.summary) <- c("predictor", "min.sensitivity", "cutoff", "AUPRC","maxF1")
write.table(perf.summary, file.path(outdir, "pr.summary.txt"), sep='\t', quote = F, row.names = F, col.names = T)
return(perf.summary)
}
computeAUC <- function(x.vals, y.vals) {
good.idx <- which(!is.na(x.vals) & !is.na(y.vals))
return(trapz(x.vals[good.idx], y.vals[good.idx]))
}
computeCutoffGivenDesiredSensitivity <- function(pr, min.sensitivity) {
sens <- pr@x.values[[1]]
prec <- pr@y.values[[1]]
cutoff.sensitivity <- min(sens[sens >= min.sensitivity])
idx <- which.max(sens == cutoff.sensitivity)
idx2 <- idx[which.max(prec[idx])]
cutoff <- pr@alpha.values[[1]][idx2]
return(cutoff)
}
addPredictionClassLabels <- function(merged, perf.summary) {
#assign prediction class label
for (ii in seq(nrow(perf.summary))) {
merged <- addOneLabel(merged, as.numeric(perf.summary$cutoff[[ii]]), perf.summary$predictor[[ii]])
}
return(merged)
}
addOneLabel <- function(df, cutoff, score.col, pos.col = "Regulated") {
label.name <- paste0(score.col, ".pred.class")
df[, label.name] <- "NA"
#browser()
df[which(!is.na(df[, ..score.col]) & df[, ..score.col] > cutoff & df[, ..pos.col]), label.name] <- "TP"
df[which(!is.na(df[, ..score.col]) & df[, ..score.col] <= cutoff & !df[, ..pos.col]), label.name] <- "TN"
df[which(!is.na(df[, ..score.col]) & df[, ..score.col] > cutoff & !df[, ..pos.col]), label.name] <- "FP"
df[which(!is.na(df[, ..score.col]) & df[, ..score.col] <= cutoff & df[, ..pos.col]), label.name] <- "FN"
return(df)
}
getInversePredictors <- function(pred.list, pred.config) {
inv.pred <- subset(predConfig, lowerIsMoreConfident)$pred.col
inv.cols <- with(expand.grid(pred.table$name, inv.pred), paste0(Var1, ".", Var2))
return(inv.cols)
}
applyCellTypeNameMapping <- function(df, cellMapping) {
#Map CellType in predictions file to match experimental data file
for (ii in seq(nrow(cellMapping))) {
this.from <- strsplit(cellMapping$from[ii], split=",")[[1]]
this.to <- cellMapping$to[ii]
df$CellType[df$CellType %in% this.from] <- this.to
}
return(df)
}
writeExptSummary <- function(df, outdir) {
df.summary <- as.data.frame(list(
numConnections = nrow(df),
numIncludeInModel = sum(df$IncludeInModel),
numIncludeInModelRegulated = sum(df$IncludeInModel & df$Regulated)
))
write.table(df.summary, file.path(outdir, "expt.summary.txt"), sep = "\t", quote = F, col.names = T, row.names = F)
}
fread_ignore_empty <- function(f, ...) {
tryCatch({
return(fread(f, fill = TRUE, ...))
}, error = function(e){
print(paste0("Could not open file: ", f))
return()
})
}
fread_gz_ignore_empty <- function(f, ...) {
tryCatch({
return(fread(paste0("gunzip -c ", f), ...))
}, error = function(e){
print(paste0("Could not open file: ", f))
return()
})
}
smart_fread <- function(f, ...) {
if (summary(file(f))$class == "gzfile") {
out <- fread_gz_ignore_empty(f, ...)
} else {
out <- fread_ignore_empty(f, ...)
}
#con <- file(f)
#on.exit(close(con), add = TRUE)
tryCatch({
closeAllConnections()
}, error = function(e) {
print(e)
}
)
return(out)
}
loadDelimited <- function(file.list) {
data.list <- lapply(file.list, smart_fread)
return(rbindlist(data.list, fill = TRUE))
}
loadFileString <- function(file.str, delim = ",") {
file.list <- strsplit(file.str, split = delim)[[1]]
return(loadDelimited(file.list))
}
loadPredictions <- function(pred.table) {
#df <- fread(pred.table)
pred.list <- lapply(pred.table$path, function(s) {
print(paste0("Loading dataset: ", s))
df = loadFileString(s)
print(paste0("Dataset loaded with ", nrow(df), " rows"))
return(df)
})
names(pred.list) <- pred.table$name
return(pred.list)
}
|
\name{GFA}
\alias{GFA}
\alias{CCA}
\concept{Group factor analysis}
\concept{Canonical correlation analysis}
\alias{GFAexperiment}
\alias{CCAexperiment}
\title{
Estimate a Bayesian IBFA/CCA/GFA model
}
\description{
Estimates the parameters of a Bayesian group factor analysis (GFA),
canonical correlation analysis (BCCA), or inter-battery factor
analysis (BIBFA).
GFA is a latent variable model for explaining relationships between
multiple data matrices with co-occurring samples. The model finds linear factors
that explain dependencies between these matrices, similarly to how
factor analysis explains dependencies between individual variables.
BIBFA is a special case of GFA for two data matrices. It finds
factors explaining the relationship between them, as well as factors
explaining the residual variation in each matrix. The solution of
BIBFA equals that of CCA, with additional factors for explaining the
data-specific noise.
}
\usage{
CCA(Y, K, opts)
GFA(Y, K, opts)
CCAexperiment(Y, K, opts, Nrep=10)
GFAexperiment(Y, K, opts, Nrep=10)
}
\arguments{
\item{Y}{
A list containing matrices with N rows (samples) and D[m] columns (features). Must have exactly two matrices for \code{CCA}
and any number of co-occurring matrices for \code{GFA}.
}
\item{K}{
The number of components.
}
\item{opts}{
A list of parameters and options to be used when learning the model.
See \code{\link{getDefaultOpts}}.
}
\item{Nrep}{
The number of random initializations used for learning the model; only
used for \code{CCAexperiment} and \code{GFAexperiment}.
}
}
\details{
The recommended strategy is to use \code{GFAexperiment} for
learning a Bayesian group factor analysis model. It simply calls
\code{GFA} \code{Nrep} times and returns the model with the best
variational lower bound for the marginal likelihood.
\code{CCAexperiment} and \code{CCA} are simple wrappers for the
corresponding GFA functions, to be used for the case of M=2
data sets. CCA is a special case of GFA with exactly two co-occurring
matrices, and these functions are provided for convenience only.
}
\value{
The methods return a list that contains all the model parameters
and other details.
\item{Z}{The mean of the latent variables; N times K matrix}
\item{covZ}{The covariance of the latent variables; K times K matrix}
\item{ZZ}{The second moments Z^TZ; K times K matrix}
\item{W}{List of the mean projections; D_i times K matrices}
\item{covW}{List of the covariances of the projections; K times K matrices}
\item{WW}{List of the second moments W^TW; K times K matrices}
\item{tau}{The mean precisions (inverse variance, so 1/tau gives the variances denoted by sigma in the paper); M-element vector}
\item{alpha}{The mean precisions of the projection weights, used in the ARD prior; M times K matrix}
\item{cost}{Vector collecting the variational lower bounds for each iteration}
\item{D}{Data dimensionalities; M-element vector}
\item{K}{The number of latent factors}
\item{datavar}{The total variance in the data sets, needed for \code{\link{GFAtrim}}}
\item{R}{The rank of alpha}
\item{U}{The group factor loadings; M times R matrix}
\item{V}{The latent group factors; K times R matrix}
\item{u.mu}{The mean of group factor loadings U; M-element vector}
\item{v.mu}{The mean of latent group factors V; K-element vector}
}
\references{
Virtanen, S. and Klami, A., and Kaski, S.: Bayesian CCA via group-wise
sparsity. In \emph{Proceedings of the 28th International Conference on Machine
Learning (ICML)}, pages 457-464, 2011.
Virtanen, S. and Klami, A., and Khan, S.A. and Kaski, S.: Baysian
group factor analysis. In \emph{Proceedings of the 15th International
Conference on Artificial Intelligence and Statistics (AISTATS)}, volume 22 of JMLR W&CP, pages 1269-1277, 2012.
Klami, A. and Virtanen, S., and Kaski, S.:Bayesian Canonical Correlation
Analysis. \emph{Journal of Machine Learning Research}, 2013.
Klami, A. and Virtanen, S., Leppaaho, E., and Kaski, S.:Group Factor Analysis. \emph{Submitted to a journal}, 2014.
}
\author{
Seppo Virtanen, Eemeli Leppaaho and Arto Klami
}
\seealso{
\code{\link{getDefaultOpts}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
#
# Create simple random data
#
N <- 50; D <- c(4,6) # 50 samples with 4 and 6 dimensions
tau <- c(3,3) # residual noise precision
K <- 3 # K real components (1 shared, 1+1 private)
Z <- matrix(rnorm(N*K,0,1),N,K) # drawn from the prior
alpha <- matrix(c(1,1,1e6,1,1e6,1),2,3)
Y <- vector("list",length=2)
W <- vector("list",length=2)
for(view in 1:2) {
W[[view]] <- matrix(0,D[view],K)
for(k in 1:K) {
W[[view]][,k] <- rnorm(D[view],0,1/sqrt(alpha[view,k]))
}
Y[[view]] <- Z \%*\% t(W[[view]]) +
matrix(rnorm(N*D[view],0,1/sqrt(tau[view])),N,D[view])
}
#
# Run the model
#
opts <- getDefaultOpts()
opts$iter.max <- 10 # Terminate early for fast testing
# Only tries two random initializations for faster testing
model <- CCAexperiment(Y,K,opts,Nrep=2)
}
| /man/GFA.Rd | no_license | cran/CCAGFA | R | false | false | 5,043 | rd | \name{GFA}
\alias{GFA}
\alias{CCA}
\concept{Group factor analysis}
\concept{Canonical correlation analysis}
\alias{GFAexperiment}
\alias{CCAexperiment}
\title{
Estimate a Bayesian IBFA/CCA/GFA model
}
\description{
Estimates the parameters of a Bayesian group factor analysis (GFA),
canonical correlation analysis (BCCA), or inter-battery factor
analysis (BIBFA).
GFA is a latent variable model for explaining relationships between
multiple data matrices with co-occurring samples. The model finds linear factors
that explain dependencies between these matrices, similarly to how
factor analysis explains dependencies between individual variables.
BIBFA is a special case of GFA for two data matrices. It finds
factors explaining the relationship between them, as well as factors
explaining the residual variation in each matrix. The solution of
BIBFA equals that of CCA, with additional factors for explaining the
data-specific noise.
}
\usage{
CCA(Y, K, opts)
GFA(Y, K, opts)
CCAexperiment(Y, K, opts, Nrep=10)
GFAexperiment(Y, K, opts, Nrep=10)
}
\arguments{
\item{Y}{
A list containing matrices with N rows (samples) and D[m] columns (features). Must have exactly two matrices for \code{CCA}
and any number of co-occurring matrices for \code{GFA}.
}
\item{K}{
The number of components.
}
\item{opts}{
A list of parameters and options to be used when learning the model.
See \code{\link{getDefaultOpts}}.
}
\item{Nrep}{
The number of random initializations used for learning the model; only
used for \code{CCAexperiment} and \code{GFAexperiment}.
}
}
\details{
The recommended strategy is to use \code{GFAexperiment} for
learning a Bayesian group factor analysis model. It simply calls
\code{GFA} \code{Nrep} times and returns the model with the best
variational lower bound for the marginal likelihood.
\code{CCAexperiment} and \code{CCA} are simple wrappers for the
corresponding GFA functions, to be used for the case of M=2
data sets. CCA is a special case of GFA with exactly two co-occurring
matrices, and these functions are provided for convenience only.
}
\value{
The methods return a list that contains all the model parameters
and other details.
\item{Z}{The mean of the latent variables; N times K matrix}
\item{covZ}{The covariance of the latent variables; K times K matrix}
\item{ZZ}{The second moments Z^TZ; K times K matrix}
\item{W}{List of the mean projections; D_i times K matrices}
\item{covW}{List of the covariances of the projections; K times K matrices}
\item{WW}{List of the second moments W^TW; K times K matrices}
\item{tau}{The mean precisions (inverse variance, so 1/tau gives the variances denoted by sigma in the paper); M-element vector}
\item{alpha}{The mean precisions of the projection weights, used in the ARD prior; M times K matrix}
\item{cost}{Vector collecting the variational lower bounds for each iteration}
\item{D}{Data dimensionalities; M-element vector}
\item{K}{The number of latent factors}
\item{datavar}{The total variance in the data sets, needed for \code{\link{GFAtrim}}}
\item{R}{The rank of alpha}
\item{U}{The group factor loadings; M times R matrix}
\item{V}{The latent group factors; K times R matrix}
\item{u.mu}{The mean of group factor loadings U; M-element vector}
\item{v.mu}{The mean of latent group factors V; K-element vector}
}
\references{
Virtanen, S. and Klami, A., and Kaski, S.: Bayesian CCA via group-wise
sparsity. In \emph{Proceedings of the 28th International Conference on Machine
Learning (ICML)}, pages 457-464, 2011.
Virtanen, S. and Klami, A., and Khan, S.A. and Kaski, S.: Baysian
group factor analysis. In \emph{Proceedings of the 15th International
Conference on Artificial Intelligence and Statistics (AISTATS)}, volume 22 of JMLR W&CP, pages 1269-1277, 2012.
Klami, A. and Virtanen, S., and Kaski, S.:Bayesian Canonical Correlation
Analysis. \emph{Journal of Machine Learning Research}, 2013.
Klami, A. and Virtanen, S., Leppaaho, E., and Kaski, S.:Group Factor Analysis. \emph{Submitted to a journal}, 2014.
}
\author{
Seppo Virtanen, Eemeli Leppaaho and Arto Klami
}
\seealso{
\code{\link{getDefaultOpts}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
#
# Create simple random data
#
N <- 50; D <- c(4,6) # 50 samples with 4 and 6 dimensions
tau <- c(3,3) # residual noise precision
K <- 3 # K real components (1 shared, 1+1 private)
Z <- matrix(rnorm(N*K,0,1),N,K) # drawn from the prior
alpha <- matrix(c(1,1,1e6,1,1e6,1),2,3)
Y <- vector("list",length=2)
W <- vector("list",length=2)
for(view in 1:2) {
W[[view]] <- matrix(0,D[view],K)
for(k in 1:K) {
W[[view]][,k] <- rnorm(D[view],0,1/sqrt(alpha[view,k]))
}
Y[[view]] <- Z \%*\% t(W[[view]]) +
matrix(rnorm(N*D[view],0,1/sqrt(tau[view])),N,D[view])
}
#
# Run the model
#
opts <- getDefaultOpts()
opts$iter.max <- 10 # Terminate early for fast testing
# Only tries two random initializations for faster testing
model <- CCAexperiment(Y,K,opts,Nrep=2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/codeml.R, R/paml_rst.R
\docType{methods}
\name{get.tipseq}
\alias{get.tipseq}
\alias{get.tipseq,codeml-method}
\alias{get.tipseq,paml_rst-method}
\title{get.tipseq method}
\usage{
get.tipseq(object, ...)
\S4method{get.tipseq}{codeml}(object, ...)
\S4method{get.tipseq}{paml_rst}(object, ...)
}
\arguments{
\item{object}{one of paml_rst or codeml object}
\item{...}{additional parameter}
}
\value{
character
}
\description{
get tipseq
}
| /man/get.tipseq-methods.Rd | no_license | nemochina2008/treeio | R | false | true | 535 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/codeml.R, R/paml_rst.R
\docType{methods}
\name{get.tipseq}
\alias{get.tipseq}
\alias{get.tipseq,codeml-method}
\alias{get.tipseq,paml_rst-method}
\title{get.tipseq method}
\usage{
get.tipseq(object, ...)
\S4method{get.tipseq}{codeml}(object, ...)
\S4method{get.tipseq}{paml_rst}(object, ...)
}
\arguments{
\item{object}{one of paml_rst or codeml object}
\item{...}{additional parameter}
}
\value{
character
}
\description{
get tipseq
}
|
###############################################
#
# Topic Modeling program
#
# Reference: https://eight2late.wordpress.com/2015/09/29/a-gentle-introduction-to-topic-modeling-using-r/
#
#
#
#
###############################################
library(tm)
library(topicmodels)
library(SnowballC)
#load data from previous scripit
load("data.RData")
#Format Data
docs <- Corpus(VectorSource(paste(df$desc,df$overview,sep=" ")))
docs <-tm_map(docs,content_transformer(tolower))
#Replace useless characters
toSpace <- content_transformer(function(x, pattern) { return (gsub(pattern, " ", x))})
docs <- tm_map(docs, toSpace, "-")
docs <- tm_map(docs, toSpace, "’")
docs <- tm_map(docs, toSpace, "‘")
docs <- tm_map(docs, toSpace, "•")
docs <- tm_map(docs, toSpace, "”")
docs <- tm_map(docs, toSpace, "“")
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("english"))
docs <- tm_map(docs, stripWhitespace)
docs <- tm_map(docs,stemDocument)
#More Formatting
docs <- tm_map(docs, content_transformer(gsub),
pattern = "organiz", replacement = "organ")
docs <- tm_map(docs, content_transformer(gsub),
pattern = "organis", replacement = "organ")
docs <- tm_map(docs, content_transformer(gsub),
pattern = "andgovern", replacement = "govern")
docs <- tm_map(docs, content_transformer(gsub),
pattern = "inenterpris", replacement = "enterpris")
docs <- tm_map(docs, content_transformer(gsub),
pattern = "team-", replacement = "team")
#define and eliminate all custom stopwords
myStopwords <- c("can", "say","one","way","use",
"also","howev","tell","will",
"much","need","take","tend","even",
"like","particular","rather","said",
"get","well","make","ask","come","end",
"first","two","help","often","may",
"might","see","someth","thing","point",
"post","look","right","now","think","‘ve ",
"‘re ","anoth","put","set","new","good",
"want","sure","kind","larg","yes,","day","etc",
"quit","sinc","attemp","lack","seen","awar",
"littl","ever","moreov","though","found","abl",
"enough","far","earli","away","achiev","draw",
"last","never","brief","bit","entir","brief",
"great","lot","inform","pend")
docs <- tm_map(docs, removeWords, myStopwords)
dtm <- DocumentTermMatrix(docs)
rownames(dtm) <- df$id
freq <- colSums(as.matrix(dtm))
ord <- order(freq,decreasing=TRUE)
#LDA Gibbs topic modeling information
burnin <- 2000
iter <- 1000
thin <- 500
seed <-list(2003,5,63,100001,765) # important for getting consistant results
nstart <- 5
best <- TRUE
k <- 52 #Number of topics
keep <- 50
#Run the LDA Gibbs algorithm 99 times using 2 -> 100 topics
seqk <- seq(2, 100, 1)
rowTotals <- apply(dtm , 1, sum)
dtm <- dtm[rowTotals> 0, ]
harmonicMean <- function(logLikelihoods, precision = 2000L) {
llMed <- median(logLikelihoods)
as.double(llMed - log(mean(exp(-mpfr(logLikelihoods,
prec = precision) + llMed))))
}
#Ignore these lines two lines
#ldaOut <-LDA(dtm,k, method="Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin))
#ldaOut.topics <- as.matrix(topics(ldaOut))
#Run LDA Gibbs algorithm and get harmonic mean
system.time(fitted_many <- lapply(seqk,function(k) LDA(dtm,k=k,method="Gibbs",control=list(burnin=burnin,iter=iter,keep=keep))))
logLiks_many <- lapply(fitted_many, function(L) L@logLiks[-c(1:(burnin/keep))])
hm_many <- sapply(logLiks_many, function(h) harmonicMean(h))
ldaplot <- ggplot(data.frame(seqk, hm_many), aes(x=seqk, y=hm_many)) + geom_path(lwd=1.5) +
theme(text = element_text(family= NULL),
axis.title.y=element_text(vjust=1, size=16),
axis.title.x=element_text(vjust=-.5, size=16),
axis.text=element_text(size=16),
plot.title=element_text(size=20)) +
xlab('Number of Topics') +
ylab('Harmonic Mean') +
annotate("text", x = 25, y = -80000, label = paste("The optimal number of topics is", seqk[which.max(hm_many)])) +
ggtitle(expression(atop("Latent Dirichlet Allocation Analysis of NEN LLIS", atop(italic("How many distinct topics in the abstracts?"), ""))))
ldaplot | /R/HarmonicMean.R | permissive | davidmeza1/KA_Interns | R | false | false | 4,539 | r | ###############################################
#
# Topic Modeling program
#
# Reference: https://eight2late.wordpress.com/2015/09/29/a-gentle-introduction-to-topic-modeling-using-r/
#
#
#
#
###############################################
library(tm)
library(topicmodels)
library(SnowballC)
#load data from previous scripit
load("data.RData")
#Format Data
docs <- Corpus(VectorSource(paste(df$desc,df$overview,sep=" ")))
docs <-tm_map(docs,content_transformer(tolower))
#Replace useless characters
toSpace <- content_transformer(function(x, pattern) { return (gsub(pattern, " ", x))})
docs <- tm_map(docs, toSpace, "-")
docs <- tm_map(docs, toSpace, "’")
docs <- tm_map(docs, toSpace, "‘")
docs <- tm_map(docs, toSpace, "•")
docs <- tm_map(docs, toSpace, "”")
docs <- tm_map(docs, toSpace, "“")
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("english"))
docs <- tm_map(docs, stripWhitespace)
docs <- tm_map(docs,stemDocument)
#More Formatting
docs <- tm_map(docs, content_transformer(gsub),
pattern = "organiz", replacement = "organ")
docs <- tm_map(docs, content_transformer(gsub),
pattern = "organis", replacement = "organ")
docs <- tm_map(docs, content_transformer(gsub),
pattern = "andgovern", replacement = "govern")
docs <- tm_map(docs, content_transformer(gsub),
pattern = "inenterpris", replacement = "enterpris")
docs <- tm_map(docs, content_transformer(gsub),
pattern = "team-", replacement = "team")
#define and eliminate all custom stopwords
myStopwords <- c("can", "say","one","way","use",
"also","howev","tell","will",
"much","need","take","tend","even",
"like","particular","rather","said",
"get","well","make","ask","come","end",
"first","two","help","often","may",
"might","see","someth","thing","point",
"post","look","right","now","think","‘ve ",
"‘re ","anoth","put","set","new","good",
"want","sure","kind","larg","yes,","day","etc",
"quit","sinc","attemp","lack","seen","awar",
"littl","ever","moreov","though","found","abl",
"enough","far","earli","away","achiev","draw",
"last","never","brief","bit","entir","brief",
"great","lot","inform","pend")
docs <- tm_map(docs, removeWords, myStopwords)
dtm <- DocumentTermMatrix(docs)
rownames(dtm) <- df$id
freq <- colSums(as.matrix(dtm))
ord <- order(freq,decreasing=TRUE)
#LDA Gibbs topic modeling information
burnin <- 2000
iter <- 1000
thin <- 500
seed <-list(2003,5,63,100001,765) # important for getting consistant results
nstart <- 5
best <- TRUE
k <- 52 #Number of topics
keep <- 50
#Run the LDA Gibbs algorithm 99 times using 2 -> 100 topics
seqk <- seq(2, 100, 1)
rowTotals <- apply(dtm , 1, sum)
dtm <- dtm[rowTotals> 0, ]
harmonicMean <- function(logLikelihoods, precision = 2000L) {
llMed <- median(logLikelihoods)
as.double(llMed - log(mean(exp(-mpfr(logLikelihoods,
prec = precision) + llMed))))
}
#Ignore these lines two lines
#ldaOut <-LDA(dtm,k, method="Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin))
#ldaOut.topics <- as.matrix(topics(ldaOut))
#Run LDA Gibbs algorithm and get harmonic mean
system.time(fitted_many <- lapply(seqk,function(k) LDA(dtm,k=k,method="Gibbs",control=list(burnin=burnin,iter=iter,keep=keep))))
logLiks_many <- lapply(fitted_many, function(L) L@logLiks[-c(1:(burnin/keep))])
hm_many <- sapply(logLiks_many, function(h) harmonicMean(h))
ldaplot <- ggplot(data.frame(seqk, hm_many), aes(x=seqk, y=hm_many)) + geom_path(lwd=1.5) +
theme(text = element_text(family= NULL),
axis.title.y=element_text(vjust=1, size=16),
axis.title.x=element_text(vjust=-.5, size=16),
axis.text=element_text(size=16),
plot.title=element_text(size=20)) +
xlab('Number of Topics') +
ylab('Harmonic Mean') +
annotate("text", x = 25, y = -80000, label = paste("The optimal number of topics is", seqk[which.max(hm_many)])) +
ggtitle(expression(atop("Latent Dirichlet Allocation Analysis of NEN LLIS", atop(italic("How many distinct topics in the abstracts?"), ""))))
ldaplot |
source("create_enrichment_table.R")
source("get_region_read_count_ratio.R")
args <- commandArgs(trailingOnly = TRUE)
sam <- args[1]
con <- args[2]
outname <- args[3]
gff <- "../Genomic_Features/saccharomyces_cerevisiae_R64-2-1_20150113.gff"
ratio <- get_region_read_count_ratio(sam, con, gff)
sambam <- open(BamFile(sam))
conbam <- open(BamFile(con))
create_enrichment_table(sambam, conbam, outname) | /create-bam-table/from_bam_to_enrichmentTable.R | no_license | yoona3877/Chip-Seq-analysis | R | false | false | 402 | r | source("create_enrichment_table.R")
source("get_region_read_count_ratio.R")
args <- commandArgs(trailingOnly = TRUE)
sam <- args[1]
con <- args[2]
outname <- args[3]
gff <- "../Genomic_Features/saccharomyces_cerevisiae_R64-2-1_20150113.gff"
ratio <- get_region_read_count_ratio(sam, con, gff)
sambam <- open(BamFile(sam))
conbam <- open(BamFile(con))
create_enrichment_table(sambam, conbam, outname) |
##' Return the currently running AnalysisPage app
##'
##' Return the currently running AnalysisPage app. The way this is done
##' is to first try to chase up the call stack and find the first environment which is
##' an \code{AnalysisPageRApacheApp}, and return that. If that fails
##' then it looks for \code{app} in the GlobalEnv and returns that. IF that
##' also fails then it reutrns NULL.
##' @return Current \code{AnalysisPageRApacheApp}, or NULL
##' if it can't be found.
##' @author Brad Friedman
##' @export
current.app <- function() {
envs <- lapply(sys.parents(), function(i.frame) {
## -2 to skip lapply and function(i.frame) calls
environment(sys.function(i.frame - 2))
})
app <- Find(function(x) is(x, "AnalysisPageRApacheApp"), envs)
if(is.null(app) && exists("app", .GlobalEnv))
app <- get("app", .GlobalEnv)
return(app)
}
##' Retrieve an Analysis page from the current app
##'
##' If the current app (as returned by (\code{\link{current.app}})
##' has a page of the given name then it is returned. If
##' the current app can't be found, or if it does not have such a
##' page, then NULL is returned.
##' @param page String, name of desired page.
##' @return AnalysisPage, or NULl
##' @author Brad Friedman
##' @export
analysis.page.of.current.app <- function(page) {
app <- current.app()
reg <- app$registry
if(is(reg, "AnalysisPageRegistry")) {
## (This branch should always be executed---you'd have to go far
## out of your way to build an app without a registry. I'm just
## trying to avoid throwing errors!
if(has.page(reg, page)) {
return(get.page(reg, page))
}
}
return(NULL)
}
| /R/current.R | no_license | apomatix/AnalysisPageServer | R | false | false | 1,673 | r |
##' Return the currently running AnalysisPage app
##'
##' Return the currently running AnalysisPage app. The way this is done
##' is to first try to chase up the call stack and find the first environment which is
##' an \code{AnalysisPageRApacheApp}, and return that. If that fails
##' then it looks for \code{app} in the GlobalEnv and returns that. IF that
##' also fails then it reutrns NULL.
##' @return Current \code{AnalysisPageRApacheApp}, or NULL
##' if it can't be found.
##' @author Brad Friedman
##' @export
current.app <- function() {
envs <- lapply(sys.parents(), function(i.frame) {
## -2 to skip lapply and function(i.frame) calls
environment(sys.function(i.frame - 2))
})
app <- Find(function(x) is(x, "AnalysisPageRApacheApp"), envs)
if(is.null(app) && exists("app", .GlobalEnv))
app <- get("app", .GlobalEnv)
return(app)
}
##' Retrieve an Analysis page from the current app
##'
##' If the current app (as returned by (\code{\link{current.app}})
##' has a page of the given name then it is returned. If
##' the current app can't be found, or if it does not have such a
##' page, then NULL is returned.
##' @param page String, name of desired page.
##' @return AnalysisPage, or NULl
##' @author Brad Friedman
##' @export
analysis.page.of.current.app <- function(page) {
app <- current.app()
reg <- app$registry
if(is(reg, "AnalysisPageRegistry")) {
## (This branch should always be executed---you'd have to go far
## out of your way to build an app without a registry. I'm just
## trying to avoid throwing errors!
if(has.page(reg, page)) {
return(get.page(reg, page))
}
}
return(NULL)
}
|
## Strategy : Candle Stick Bars ### V - patterns
## Check for large Green Bar Filled near Valley/ below ma3 line
computeIndicators2<-function(dt){
rollingAvgShort = 20 ; rollingAvgLong = 175
proximity = 0.1 # tolerance: distance threshold betwee long and short averages
slopeSigF = 0.001 # Slope cannot be less than this number to be considered
holdDays = c(1:10) ; evalDays = c(-4:-1)
dt = cbind(dt[,.(index,Open,High,Close),by=Symbol],dt[,(1+ClCl(.SD)),by=Symbol][,V1])
colnames(dt)<- c('Symbol' ,'index','Open', 'High','Close','ClCl')
cumRet<-function(x){prod(1+ROC(x,type = 'discrete'),na.rm = T)}
dt = dt[,ma3 := SMA(Close,3),by=Symbol]
dt = dt[,ma3_sl := ROC(ma3),by=Symbol]
dt = dt[,ma20 := SMA(Close,20),by=Symbol]
dt = dt[,OpCl := .(Close/Open),by=Symbol]
dt = dt[,greenBar := .(OpCl>=1),by=Symbol]
dt = dt[,openHigher := .(ClCl/OpCl>1),by=Symbol]
# dt = dt[,greenBarK := seq_len(.N),by= c('Symbol',rleid('greenBar'))]
dt = dt[, relativeChange := OpCl/SMA(OpCl,60) ,by=Symbol]
dt = dt[,HiCl := .(Close/High),by=Symbol]
dt = dt[,closedHigh := .(HiCl>0.998),by=Symbol]
dt = dt[,preReturns:=rollapply(Close,width= list(evalDays),FUN = mean,fill=NA,align = 'right'),by=Symbol] ## mean returns
dt = dt[,postReturns:=rollapply(Close,width= list(holdDays),FUN = max,fill=NA,align = 'left'),by=Symbol]
dt$move = ifelse(dt$postReturns/dt$Close >1,1,-1)
setkey(dt,Symbol)
return(dt)
return(dt[,.(dates,symbol=toupper(symbol),close,returns,preReturns,postReturns,highPrice,high_to_close,lowPrice)])
}
ind = computeIndicators2(na.omit(stkdata[Symbol %in% uss$symbol])) ## only uss symbols / how about Volume / or near yearly lows
ind[ greenBar==T & (ma3>ma20) & ma3_sl>0 & (Close<ma3) &relativeChange>1.04 & Close>10]
# ind = computeIndicators2(stkdata[stkdata[,nrow(.SD)>100,by=Symbol],on='Symbol'][V1==T]) ## removing some symbols
table(ind[ greenBar==T & (ma3>ma20) & ma3_sl>0 & (Close<ma3) &relativeChange>1.04 & Close>10,move])
## Best Ratio 15/1 : Large Green Bar on slope up on + regime @Close below ma3 line
table(ind[ greenBar==T & (Close<ma3) &relativeChange>1.04 & Close>10,move])
## Ratio 188/42 - try shaping V
##
ind[Symbol %in% unique(earn[cap_mm>10000]$symbol) ,.(index,Symbol,ClCl,OpCl,greenBar,barK=seq_len(.N),postReturns,move),by=.(Symbol,rleid(greenBar))]
plotScatter(ind[ closedHigh==T & greenBar==T],x='openHigher','move',other_aes = 'Close')
ind1 = ind[Symbol %in% unique(earn[cap_mm>10000]$symbol) ,
.(index,Symbol,ClCl,OpCl,closedHigh,openHigher,greenBar,barK=seq_len(.N),postReturns,move),by=.(Symbol,rleid(greenBar))]
table(ind1[ openHigher==T & barK==2 ]$greenBar , ind1[openHigher==T & barK==2 ]$move) ## nothing
table(ind1[ greenBar==T & barK==2 ]$closedHigh , ind1[ greenBar==T & barK==2 ]$move) ## best of now
ctrl = trainControl(method = "repeatedcv", repeats = 1, number = 5,allowParallel = F)
mod = train(move ~ ., data = na.omit(ind1[,.(move=as.factor(move),greenBar,barK,closedHigh)]), method = "xgbTree",metric='Kappa' ,importance=T, tuneLength = 10,trControl = ctrl)
print(getTrainPerf(mod)) ## NO Luck but barK is everything
varImp(mod,scale = F)
earn[, cat :=cut(earn$cap_mm,c(0,500,2000,6000,15000,Inf),labels = c('tiny','small','mid','large','mega'))]
stkdata = rbindlist(lapply(unique(earn$symbol), function(st) {
tryCatch (prepareData(st,startDate = '2017-01-01'), error = function(e) data.table(symbol=st)) }),fill = T)
earn = filterTickers(na.omit(earn),cap = 1000) ## should check small caps too
print(paste('Number of tickers: ',nrow(earn)))
indicators = lapply(earn$symbol, function(st) {print(st)
tryCatch (merge(earn,prepareData(st),by='symbol')[dates == earnDate],
error = function(e) data.table(symbol=st)) })
plotScatter(rbindlist(indicators,fill = T)[cap_mm<500],'preReturns','postReturns')
| /Strategy_candle stick.R | no_license | mksaraf/finance-stockStrategy | R | false | false | 3,996 | r | ## Strategy : Candle Stick Bars ### V - patterns
## Check for large Green Bar Filled near Valley/ below ma3 line
computeIndicators2<-function(dt){
rollingAvgShort = 20 ; rollingAvgLong = 175
proximity = 0.1 # tolerance: distance threshold betwee long and short averages
slopeSigF = 0.001 # Slope cannot be less than this number to be considered
holdDays = c(1:10) ; evalDays = c(-4:-1)
dt = cbind(dt[,.(index,Open,High,Close),by=Symbol],dt[,(1+ClCl(.SD)),by=Symbol][,V1])
colnames(dt)<- c('Symbol' ,'index','Open', 'High','Close','ClCl')
cumRet<-function(x){prod(1+ROC(x,type = 'discrete'),na.rm = T)}
dt = dt[,ma3 := SMA(Close,3),by=Symbol]
dt = dt[,ma3_sl := ROC(ma3),by=Symbol]
dt = dt[,ma20 := SMA(Close,20),by=Symbol]
dt = dt[,OpCl := .(Close/Open),by=Symbol]
dt = dt[,greenBar := .(OpCl>=1),by=Symbol]
dt = dt[,openHigher := .(ClCl/OpCl>1),by=Symbol]
# dt = dt[,greenBarK := seq_len(.N),by= c('Symbol',rleid('greenBar'))]
dt = dt[, relativeChange := OpCl/SMA(OpCl,60) ,by=Symbol]
dt = dt[,HiCl := .(Close/High),by=Symbol]
dt = dt[,closedHigh := .(HiCl>0.998),by=Symbol]
dt = dt[,preReturns:=rollapply(Close,width= list(evalDays),FUN = mean,fill=NA,align = 'right'),by=Symbol] ## mean returns
dt = dt[,postReturns:=rollapply(Close,width= list(holdDays),FUN = max,fill=NA,align = 'left'),by=Symbol]
dt$move = ifelse(dt$postReturns/dt$Close >1,1,-1)
setkey(dt,Symbol)
return(dt)
return(dt[,.(dates,symbol=toupper(symbol),close,returns,preReturns,postReturns,highPrice,high_to_close,lowPrice)])
}
ind = computeIndicators2(na.omit(stkdata[Symbol %in% uss$symbol])) ## only uss symbols / how about Volume / or near yearly lows
ind[ greenBar==T & (ma3>ma20) & ma3_sl>0 & (Close<ma3) &relativeChange>1.04 & Close>10]
# ind = computeIndicators2(stkdata[stkdata[,nrow(.SD)>100,by=Symbol],on='Symbol'][V1==T]) ## removing some symbols
table(ind[ greenBar==T & (ma3>ma20) & ma3_sl>0 & (Close<ma3) &relativeChange>1.04 & Close>10,move])
## Best Ratio 15/1 : Large Green Bar on slope up on + regime @Close below ma3 line
table(ind[ greenBar==T & (Close<ma3) &relativeChange>1.04 & Close>10,move])
## Ratio 188/42 - try shaping V
##
ind[Symbol %in% unique(earn[cap_mm>10000]$symbol) ,.(index,Symbol,ClCl,OpCl,greenBar,barK=seq_len(.N),postReturns,move),by=.(Symbol,rleid(greenBar))]
plotScatter(ind[ closedHigh==T & greenBar==T],x='openHigher','move',other_aes = 'Close')
ind1 = ind[Symbol %in% unique(earn[cap_mm>10000]$symbol) ,
.(index,Symbol,ClCl,OpCl,closedHigh,openHigher,greenBar,barK=seq_len(.N),postReturns,move),by=.(Symbol,rleid(greenBar))]
table(ind1[ openHigher==T & barK==2 ]$greenBar , ind1[openHigher==T & barK==2 ]$move) ## nothing
table(ind1[ greenBar==T & barK==2 ]$closedHigh , ind1[ greenBar==T & barK==2 ]$move) ## best of now
ctrl = trainControl(method = "repeatedcv", repeats = 1, number = 5,allowParallel = F)
mod = train(move ~ ., data = na.omit(ind1[,.(move=as.factor(move),greenBar,barK,closedHigh)]), method = "xgbTree",metric='Kappa' ,importance=T, tuneLength = 10,trControl = ctrl)
print(getTrainPerf(mod)) ## NO Luck but barK is everything
varImp(mod,scale = F)
earn[, cat :=cut(earn$cap_mm,c(0,500,2000,6000,15000,Inf),labels = c('tiny','small','mid','large','mega'))]
stkdata = rbindlist(lapply(unique(earn$symbol), function(st) {
tryCatch (prepareData(st,startDate = '2017-01-01'), error = function(e) data.table(symbol=st)) }),fill = T)
earn = filterTickers(na.omit(earn),cap = 1000) ## should check small caps too
print(paste('Number of tickers: ',nrow(earn)))
indicators = lapply(earn$symbol, function(st) {print(st)
tryCatch (merge(earn,prepareData(st),by='symbol')[dates == earnDate],
error = function(e) data.table(symbol=st)) })
plotScatter(rbindlist(indicators,fill = T)[cap_mm<500],'preReturns','postReturns')
|
#name = c("a", "b", "c", "d", "e", "f", "g", "h")
value = c(102, 300, 102, 100, 205, 105, 71 , 92)
#d = data.frame(name, value)
#kruskal.test(value ~ name, data = d)
chisq.test(value) | /HW5/HW5-1.R | no_license | praal/data_analysis_course | R | false | false | 190 | r |
#name = c("a", "b", "c", "d", "e", "f", "g", "h")
value = c(102, 300, 102, 100, 205, 105, 71 , 92)
#d = data.frame(name, value)
#kruskal.test(value ~ name, data = d)
chisq.test(value) |
#'@title: Buying Probabilites EDA Flex
#'@return: list with parameters for product Flex and DOW
Distributions_flex<-list()
#' Fitting for a day and a product
#' @details: Monday, Flex
Mon_Flex<-BookingData_DOW[[2]]$PRICE[which(BookingData_DOW[[2]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Mon_Flex, "gamma")
fw <- fitdist(Mon_Flex, "weibull")
fn <- fitdist(Mon_Flex, "norm")
fl <- fitdist(Mon_Flex, "lnorm")
fitted_distr<-list(fg,fw,fn,fl)
par(mfrow = c(2, 2))
fitnames <- c("gamma", "Weibull","normal","lognormal")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6,6),fitcol = c("lightcoral", "blue","palegreen4","hotpink"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6,6),fitcol = c("lightcoral", "blue","palegreen4","hotpink"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4","hotpink"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fw)
Distributions<-append(Distributions,list("Monday","Flex","Weibull",fw$estimate))
#' @details: Tuesday, Flex
Tues_Flex<-BookingData_DOW[[3]]$PRICE[which(BookingData_DOW[[3]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Tues_Flex, "gamma")
fn <- fitdist(Tues_Flex, "norm")
fw <- fitdist(Tues_Flex, "weibull")
fu <- fitdist(Tues_Flex, "unif")
fitted_distr<-list(fg,fn,fw,fu)
par(mfrow = c(2, 2))
fitnames <- c("gamma","norm","Weibull", "uniform")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6,6),fitcol = c("lightcoral", "blue","palegreen4","hotpink"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6,6),fitcol = c("lightcoral", "blue","palegreen4","hotpink"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4","hotpink"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fw)
Distributions<-append(Distributions,list("Tuesday","Flex","Weibull",fw$estimate))
#' @details: Wednesday, Flex
Wednesday_Flex<-BookingData_DOW[[4]]$PRICE[which(BookingData_DOW[[4]]$PRODUCT_TYPE=="Flex")]
fn <- fitdist(Wednesday_Flex, "norm")
fu <- fitdist(Wednesday_Flex, "unif")
fg <- fitdist(Wednesday_Flex, "gamma")
fitted_distr<-list(fn,fu,fg)
par(mfrow = c(1, 2))
fitnames <- c("normal", "unif","gamma")
denscomp(fitted_distr, legendtext = fitnames,
xlab="Price", xlegend=-0.1,ylegend=0.0065)
cdfcomp(fitted_distr, legendtext = fitnames,
xlab="Price", xlegend="topleft")
gofstat(fitted_distr,fitnames=fitnames)
summary(fu)
Distributions<-append(Distributions,list("Wednesday","Flex","Uniform",fu$estimate))
#' @details: Thursday, Flex
Thurs_Flex<-BookingData_DOW[[5]]$PRICE[which(BookingData_DOW[[5]]$PRODUCT_TYPE=="Flex")]
fn <- fitdist(Thurs_Flex, "norm")
fu <- fitdist(Thurs_Flex, "unif")
fw <- fitdist(Thurs_Flex, "weibull")
fitted_distr<-list(fn,fu,fw)
par(mfrow = c(1, 2))
fitnames <- c("normal", "unif","Weibull")
denscomp(fitted_distr, legendtext = fitnames,
xlab="Price", xlegend=-0.1,ylegend=0.0065)
cdfcomp(fitted_distr, legendtext = fitnames,
xlab="Price", xlegend="topleft")
gofstat(fitted_distr,fitnames=fitnames)
summary(fw)
Distributions<-append(Distributions,list("Thursday","Flex","Weibull",fw$estimate))
#' @details: Friday, Flex
Fri_Flex<-BookingData_DOW[[6]]$PRICE[which(BookingData_DOW[[6]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Fri_Flex, "gamma")
fw <- fitdist(Fri_Flex, "weibull")
fn <- fitdist(Fri_Flex, "norm")
fitted_distr<-list(fg,fw,fn)
par(mfrow = c(2, 2))
fitnames <- c("gamma", "Weibull","normal")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fg)
Distributions<-append(Distributions,list("Friday","Flex","Gamma",fg$estimate))
#' @details: Satudary, Flex
Sat_Flex<-BookingData_DOW[[7]]$PRICE[which(BookingData_DOW[[7]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Sat_Flex, "gamma")
fu <- fitdist(Sat_Flex, "unif")
fn <- fitdist(Sat_Flex, "norm")
fitted_distr<-list(fg,fu,fn)
par(mfrow = c(2, 2))
fitnames <- c("gamma", "uniform","normal")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fu)
Distributions<-append(Distributions,list("Satudray","Flex","Uniform",fu$estimate))
#' @details: Sunday, Flex
Sun_Flex<-BookingData_DOW[[1]]$PRICE[which(BookingData_DOW[[1]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Sun_Flex, "gamma")
fe <- fitdist(Sun_Flex, "exp")
fw <- fitdist(Sun_Flex, "weibull")
fitted_distr<-list(fg,fe,fw)
par(mfrow = c(2, 2))
fitnames <- c("gamma", "exponential","Weibull")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fg)
Distributions<-append(Distributions,list("Sunday","Flex","Gamma",fg$estimate))
| /GUIDE_FIT_Distr_Buying_Flex.R | no_license | RajeshKN02/dynPricingPrototype | R | false | false | 6,713 | r | #'@title: Buying Probabilites EDA Flex
#'@return: list with parameters for product Flex and DOW
Distributions_flex<-list()
#' Fitting for a day and a product
#' @details: Monday, Flex
Mon_Flex<-BookingData_DOW[[2]]$PRICE[which(BookingData_DOW[[2]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Mon_Flex, "gamma")
fw <- fitdist(Mon_Flex, "weibull")
fn <- fitdist(Mon_Flex, "norm")
fl <- fitdist(Mon_Flex, "lnorm")
fitted_distr<-list(fg,fw,fn,fl)
par(mfrow = c(2, 2))
fitnames <- c("gamma", "Weibull","normal","lognormal")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6,6),fitcol = c("lightcoral", "blue","palegreen4","hotpink"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6,6),fitcol = c("lightcoral", "blue","palegreen4","hotpink"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4","hotpink"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fw)
Distributions<-append(Distributions,list("Monday","Flex","Weibull",fw$estimate))
#' @details: Tuesday, Flex
Tues_Flex<-BookingData_DOW[[3]]$PRICE[which(BookingData_DOW[[3]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Tues_Flex, "gamma")
fn <- fitdist(Tues_Flex, "norm")
fw <- fitdist(Tues_Flex, "weibull")
fu <- fitdist(Tues_Flex, "unif")
fitted_distr<-list(fg,fn,fw,fu)
par(mfrow = c(2, 2))
fitnames <- c("gamma","norm","Weibull", "uniform")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6,6),fitcol = c("lightcoral", "blue","palegreen4","hotpink"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6,6),fitcol = c("lightcoral", "blue","palegreen4","hotpink"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4","hotpink"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fw)
Distributions<-append(Distributions,list("Tuesday","Flex","Weibull",fw$estimate))
#' @details: Wednesday, Flex
Wednesday_Flex<-BookingData_DOW[[4]]$PRICE[which(BookingData_DOW[[4]]$PRODUCT_TYPE=="Flex")]
fn <- fitdist(Wednesday_Flex, "norm")
fu <- fitdist(Wednesday_Flex, "unif")
fg <- fitdist(Wednesday_Flex, "gamma")
fitted_distr<-list(fn,fu,fg)
par(mfrow = c(1, 2))
fitnames <- c("normal", "unif","gamma")
denscomp(fitted_distr, legendtext = fitnames,
xlab="Price", xlegend=-0.1,ylegend=0.0065)
cdfcomp(fitted_distr, legendtext = fitnames,
xlab="Price", xlegend="topleft")
gofstat(fitted_distr,fitnames=fitnames)
summary(fu)
Distributions<-append(Distributions,list("Wednesday","Flex","Uniform",fu$estimate))
#' @details: Thursday, Flex
Thurs_Flex<-BookingData_DOW[[5]]$PRICE[which(BookingData_DOW[[5]]$PRODUCT_TYPE=="Flex")]
fn <- fitdist(Thurs_Flex, "norm")
fu <- fitdist(Thurs_Flex, "unif")
fw <- fitdist(Thurs_Flex, "weibull")
fitted_distr<-list(fn,fu,fw)
par(mfrow = c(1, 2))
fitnames <- c("normal", "unif","Weibull")
denscomp(fitted_distr, legendtext = fitnames,
xlab="Price", xlegend=-0.1,ylegend=0.0065)
cdfcomp(fitted_distr, legendtext = fitnames,
xlab="Price", xlegend="topleft")
gofstat(fitted_distr,fitnames=fitnames)
summary(fw)
Distributions<-append(Distributions,list("Thursday","Flex","Weibull",fw$estimate))
#' @details: Friday, Flex
Fri_Flex<-BookingData_DOW[[6]]$PRICE[which(BookingData_DOW[[6]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Fri_Flex, "gamma")
fw <- fitdist(Fri_Flex, "weibull")
fn <- fitdist(Fri_Flex, "norm")
fitted_distr<-list(fg,fw,fn)
par(mfrow = c(2, 2))
fitnames <- c("gamma", "Weibull","normal")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fg)
Distributions<-append(Distributions,list("Friday","Flex","Gamma",fg$estimate))
#' @details: Satudary, Flex
Sat_Flex<-BookingData_DOW[[7]]$PRICE[which(BookingData_DOW[[7]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Sat_Flex, "gamma")
fu <- fitdist(Sat_Flex, "unif")
fn <- fitdist(Sat_Flex, "norm")
fitted_distr<-list(fg,fu,fn)
par(mfrow = c(2, 2))
fitnames <- c("gamma", "uniform","normal")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fu)
Distributions<-append(Distributions,list("Satudray","Flex","Uniform",fu$estimate))
#' @details: Sunday, Flex
Sun_Flex<-BookingData_DOW[[1]]$PRICE[which(BookingData_DOW[[1]]$PRODUCT_TYPE=="Flex")]
fg <- fitdist(Sun_Flex, "gamma")
fe <- fitdist(Sun_Flex, "exp")
fw <- fitdist(Sun_Flex, "weibull")
fitted_distr<-list(fg,fe,fw)
par(mfrow = c(2, 2))
fitnames <- c("gamma", "exponential","Weibull")
denscomp(fitted_distr, xlegend="topright",fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),
#demp=TRUE, dempcol = "lightblue",
addlegend=TRUE,legendtext=fitnames, datacol="papayawhip",
xlab="Price")
qqcomp(fitted_distr,fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames)
cdfcomp(fitted_distr, fitlty=c(1,5,6),fitcol = c("lightcoral", "blue","palegreen4"),legendtext = fitnames,
xlab="Price",xlegend="right")
ppcomp(fitted_distr,fitcol = c("lightcoral", "blue", "palegreen4"),legendtext = fitnames)
gofstat(fitted_distr,fitnames=fitnames)
summary(fg)
Distributions<-append(Distributions,list("Sunday","Flex","Gamma",fg$estimate))
|
\name{getOmega}
\alias{getOmega}
\title{
Function to return the terminal age of a life table.
}
\description{
This function returns the \eqn{\omega} value of a life table object, that is,
the last attainable age within a life table.
}
\usage{
getOmega(object)
}
\arguments{
\item{object}{A life table object.}
}
\value{
A numeric value representing the \eqn{\omega} value of a life table object}
\references{
Actuarial Mathematics (Second Edition), 1997, by Bowers, N.L., Gerber, H.U., Hickman, J.C.,
Jones, D.A. and Nesbitt, C.J.
}
\author{
Giorgio A. Spedicato
}
\section{Warning }{
The function is provided as is, without any guarantee regarding the accuracy of calculation. We disclaim any liability for eventual
losses arising from direct or indirect use of this software.
}
\seealso{
\code{\linkS4class{actuarialtable}}
}
\examples{
#assumes SOA example life table to be load
data(soaLt)
soa08=with(soaLt, new("lifetable",
x=x,lx=Ix,name="SOA2008"))
#the last attainable age under SOA life table is
getOmega(soa08)
}
| /lifecontingencies/man/getOmega.Rd | no_license | akhikolla/InformationHouse | R | false | false | 1,091 | rd | \name{getOmega}
\alias{getOmega}
\title{
Function to return the terminal age of a life table.
}
\description{
This function returns the \eqn{\omega} value of a life table object, that is,
the last attainable age within a life table.
}
\usage{
getOmega(object)
}
\arguments{
\item{object}{A life table object.}
}
\value{
A numeric value representing the \eqn{\omega} value of a life table object}
\references{
Actuarial Mathematics (Second Edition), 1997, by Bowers, N.L., Gerber, H.U., Hickman, J.C.,
Jones, D.A. and Nesbitt, C.J.
}
\author{
Giorgio A. Spedicato
}
\section{Warning }{
The function is provided as is, without any guarantee regarding the accuracy of calculation. We disclaim any liability for eventual
losses arising from direct or indirect use of this software.
}
\seealso{
\code{\linkS4class{actuarialtable}}
}
\examples{
#assumes SOA example life table to be load
data(soaLt)
soa08=with(soaLt, new("lifetable",
x=x,lx=Ix,name="SOA2008"))
#the last attainable age under SOA life table is
getOmega(soa08)
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480099e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615833879-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480099e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
library(rvest)
library(nbastatR)
library(tidyverse)
#### Free Agency Data ####
# Selecting years used for data set
years <- as.character(2017:2020)
# Looping over years to create urls to pull data from
base_url <- "https://www.basketball-reference.com/friv/free_agents.cgi?year="
urls <- map(years,
~ paste0(base_url, .))
# Looping over urls to pull free agency data
raw_data <- map2_df(urls, years,
~ read_html(.x) %>%
html_node('#free_agents') %>%
html_table() %>%
mutate(year = .y))
# Cleaning up free agency data
# Extracting length and salary from Terms field
free_agency <- raw_data %>%
select(Player, year, Terms) %>%
mutate(length = str_extract(Terms, "([0-9])(['-])(yr+)")) %>%
mutate(length = as.numeric(str_extract(length, "[0-9]"))) %>%
mutate(total_salary = parse_number(str_extract(Terms,"\\$(.*?)M"))) %>%
# Fixing edge cases
mutate(total_salary = ifelse(Player == "Klay Thompson" &
year == "2019", 189.9, total_salary))
# There are some players who signed a contract but don't have a salary
# Mostly made up of those who just have a minimum deal
# Filtering for these
no_salary <- free_agency %>%
filter(is.na(total_salary) & !(is.na(length)))
# Pulling player bios (includes salary info)
player_bios <- bref_bios(players = no_salary$Player)
# Extracting salary info for each of these missing players
# NOTE: Going to remove anyone who had multiple salaries in one year
salaries_for_missing <- player_bios %>%
filter(nameTable %in% c("Salaries", "Contracts")) %>%
unnest(dataTable) %>%
mutate(year = str_extract(slugSeason, "^[0-9]{4}")) %>%
group_by(namePlayerBREF, year) %>%
add_count() %>%
filter(n == 1) %>%
select(Player = namePlayerBREF, year, avg_salary = amountSalary)
# Joining this info to missing player tibble
# NOTE: This is taking the first year of the contract into account only
no_salary <- no_salary %>%
select(-total_salary) %>%
inner_join(salaries_for_missing)
# Calculating average salary for those who have total salary
free_agency <- free_agency %>%
filter(!is.na(total_salary)) %>%
mutate(avg_salary = (total_salary / length) * 1000000) %>%
select(Player, year, length, avg_salary)
# Combining free agency tibble with missing players
free_agency <- no_salary %>%
select(Player, year, length, avg_salary) %>%
bind_rows(free_agency)
# Calculating salary cap at different offseasons
salary_cap <- tibble(
year = as.character(2017:2020),
cap = c(99093000, 101869000, 109140000, 109140000))
free_agency <- free_agency %>%
left_join(salary_cap) %>%
mutate(poc = avg_salary / cap)
#### Stats Data ####
# Going to pull traditional and advanced stats from bref
# Creating per game and advanced urls
per_game_urls <- map(
years,
~ paste0("https://www.basketball-reference.com/leagues/NBA_", .x,
"_per_game.html"))
advanced_urls <- map(
years,
~ paste0("https://www.basketball-reference.com/leagues/NBA_", .x,
"_advanced.html"))
# Pulling data and putting it into data frames
raw_per_game <- map2_df(per_game_urls, years,
~ read_html(.x) %>%
html_node('#per_game_stats') %>%
html_table() %>%
setNames(make.names(names(.),
unique = TRUE)) %>%
mutate(year = .y))
raw_advanced <- map2_df(advanced_urls, years,
~ read_html(.x) %>%
html_node('#advanced_stats') %>%
html_table() %>%
setNames(make.names(names(.),
unique = TRUE)) %>%
mutate(year = .y))
# Selecting a specific set of columns
fields_per_game <- c("year", "Player", "Pos", "Age", "Tm", "G",
"FG", "FGA", "FG.", "X3P", "X3PA", "X3P.",
"FT", "FTA", "FT.", "ORB", "DRB", "TRB",
"AST", "STL", "BLK", "TOV", "PTS")
fields_advanced <- c("year", "Player", "Tm", "TS.", "X3PAr",
"FTr", "USG.", "OWS", "DWS", "OBPM", "DBPM")
# Joining data
stats <- raw_per_game %>%
select(all_of(fields_per_game)) %>%
inner_join(raw_advanced %>%
select(all_of(fields_advanced))) %>%
group_by(Player, year) %>%
add_count() %>%
filter(n == 1 | Tm == "TOT") %>%
select(-n)
combo <- free_agency %>%
left_join(stats) %>%
mutate_at(.vars = vars(Age, G:DBPM), .funs = as.numeric)
write_csv(combo, "data/raw_data.csv")
| /scripts/01_data_pull.R | no_license | jcampbellsjci/free_agency_model | R | false | false | 4,661 | r | library(rvest)
library(nbastatR)
library(tidyverse)
#### Free Agency Data ####
# Selecting years used for data set
years <- as.character(2017:2020)
# Looping over years to create urls to pull data from
base_url <- "https://www.basketball-reference.com/friv/free_agents.cgi?year="
urls <- map(years,
~ paste0(base_url, .))
# Looping over urls to pull free agency data
raw_data <- map2_df(urls, years,
~ read_html(.x) %>%
html_node('#free_agents') %>%
html_table() %>%
mutate(year = .y))
# Cleaning up free agency data
# Extracting length and salary from Terms field
free_agency <- raw_data %>%
select(Player, year, Terms) %>%
mutate(length = str_extract(Terms, "([0-9])(['-])(yr+)")) %>%
mutate(length = as.numeric(str_extract(length, "[0-9]"))) %>%
mutate(total_salary = parse_number(str_extract(Terms,"\\$(.*?)M"))) %>%
# Fixing edge cases
mutate(total_salary = ifelse(Player == "Klay Thompson" &
year == "2019", 189.9, total_salary))
# There are some players who signed a contract but don't have a salary
# Mostly made up of those who just have a minimum deal
# Filtering for these
no_salary <- free_agency %>%
filter(is.na(total_salary) & !(is.na(length)))
# Pulling player bios (includes salary info)
player_bios <- bref_bios(players = no_salary$Player)
# Extracting salary info for each of these missing players
# NOTE: Going to remove anyone who had multiple salaries in one year
salaries_for_missing <- player_bios %>%
filter(nameTable %in% c("Salaries", "Contracts")) %>%
unnest(dataTable) %>%
mutate(year = str_extract(slugSeason, "^[0-9]{4}")) %>%
group_by(namePlayerBREF, year) %>%
add_count() %>%
filter(n == 1) %>%
select(Player = namePlayerBREF, year, avg_salary = amountSalary)
# Joining this info to missing player tibble
# NOTE: This is taking the first year of the contract into account only
no_salary <- no_salary %>%
select(-total_salary) %>%
inner_join(salaries_for_missing)
# Calculating average salary for those who have total salary
free_agency <- free_agency %>%
filter(!is.na(total_salary)) %>%
mutate(avg_salary = (total_salary / length) * 1000000) %>%
select(Player, year, length, avg_salary)
# Combining free agency tibble with missing players
free_agency <- no_salary %>%
select(Player, year, length, avg_salary) %>%
bind_rows(free_agency)
# Calculating salary cap at different offseasons
salary_cap <- tibble(
year = as.character(2017:2020),
cap = c(99093000, 101869000, 109140000, 109140000))
free_agency <- free_agency %>%
left_join(salary_cap) %>%
mutate(poc = avg_salary / cap)
#### Stats Data ####
# Going to pull traditional and advanced stats from bref
# Creating per game and advanced urls
per_game_urls <- map(
years,
~ paste0("https://www.basketball-reference.com/leagues/NBA_", .x,
"_per_game.html"))
advanced_urls <- map(
years,
~ paste0("https://www.basketball-reference.com/leagues/NBA_", .x,
"_advanced.html"))
# Pulling data and putting it into data frames
raw_per_game <- map2_df(per_game_urls, years,
~ read_html(.x) %>%
html_node('#per_game_stats') %>%
html_table() %>%
setNames(make.names(names(.),
unique = TRUE)) %>%
mutate(year = .y))
raw_advanced <- map2_df(advanced_urls, years,
~ read_html(.x) %>%
html_node('#advanced_stats') %>%
html_table() %>%
setNames(make.names(names(.),
unique = TRUE)) %>%
mutate(year = .y))
# Selecting a specific set of columns
fields_per_game <- c("year", "Player", "Pos", "Age", "Tm", "G",
"FG", "FGA", "FG.", "X3P", "X3PA", "X3P.",
"FT", "FTA", "FT.", "ORB", "DRB", "TRB",
"AST", "STL", "BLK", "TOV", "PTS")
fields_advanced <- c("year", "Player", "Tm", "TS.", "X3PAr",
"FTr", "USG.", "OWS", "DWS", "OBPM", "DBPM")
# Joining data
stats <- raw_per_game %>%
select(all_of(fields_per_game)) %>%
inner_join(raw_advanced %>%
select(all_of(fields_advanced))) %>%
group_by(Player, year) %>%
add_count() %>%
filter(n == 1 | Tm == "TOT") %>%
select(-n)
combo <- free_agency %>%
left_join(stats) %>%
mutate_at(.vars = vars(Age, G:DBPM), .funs = as.numeric)
write_csv(combo, "data/raw_data.csv")
|
run.cec.tests <- function()
{
errors <- 0
# test files
tests <- list.files(system.file("cec_tests", package="CEC"))
for (test in tests)
{
if (grepl(".R", test, perl=T))
{
testenv <- new.env()
local(
{
# just to trick R CMD check...
testname <- NULL
setup <- NULL
},
testenv
)
source(system.file("cec_tests", test, package="CEC"), local=testenv)
errors <- errors + local(
{
local.errors <- 0
cat(paste("Test:",testname, "\n"))
fs <- lsf.str()
# execute setup function if exists
if ("setup" %in% fs) eval(expr=body(setup), envir=testenv)
for (fn in fs)
{
# test cases
if (grepl("test.", fn))
{
cat(paste("---- ", fn))
fbody = body(eval(parse(text=fn)))
# evaluate test case function and catch (and count) errors
local.errors <- local.errors + tryCatch(
{
eval(expr=fbody, envir=testenv)
cat(": OK\n")
0
},
error = function(er) {
cat(": FAILED\n")
warning(er$message, immediate.=T, call.=F)
1
}
)
}
}
local.errors
},
envir=testenv)
}
}
if (errors > 0)
{
stop("One or more tests failed.")
}
}
printmsg <- function(msg)
{
if (!is.null(msg))
paste(msg, ":")
else ""
}
checkNumericVectorEquals <- function(ex, ac, msg=NULL, tolerance = .Machine$double.eps ^ 0.5)
{
if (length(ex) != length(ac)) stop (paste(printmsg(msg),"Vectors have different length."))
for (i in seq(1, length(ex)))
if (!isTRUE(all.equal.numeric(ex[i], ac[i], tolerance=tolerance)))
stop (paste(printmsg(msg), "Vectors differ at index:", i, ", expected:", ex[i], ", actuall:",ac[i]))
}
checkNumericEquals <- function(ex, ac, msg=NULL, tolerance = .Machine$double.eps ^ 0.5)
{
if(!is.numeric(ex)) stop(paste(printmsg(msg), "Expression:",ex,"is not numeric type."))
if(!is.numeric(ac)) stop(paste(printmsg(msg), "Expression:",ac,"is not numeric type."))
if (!isTRUE(all.equal.numeric(ex, ac, tolerance=tolerance)))
stop (paste(printmsg(msg), "Numeric values are different: expected:", ex, ", actuall:",ac, ", difference:", abs(ex - ac)))
}
checkEquals <- function(ex, ac, msg=NULL)
{
if (!isTRUE(identical(ex, ac)))
stop (paste(printmsg(msg), "Values are not identical: expected:", ex, ", actuall:",ac))
}
checkTrue <- function(exp, msg=NULL)
{
if (!is.logical(exp))
{
stop(paste(printmsg(msg), "Expression has not logical type."))
}
if (!isTRUE(exp))
{
stop(paste(printmsg(msg), "Expression is not TRUE."))
}
}
checkNumericMatrixEquals <- function(ex, ac, msg=NULL, tolerance = .Machine$double.eps ^ 0.5)
{
if (nrow(ex) != nrow(ac)) stop (paste(printmsg(msg),"Matrices have different dimensions."))
if (ncol(ex) != ncol(ac)) stop (paste(printmsg(msg),"Matrices have different dimensions."))
for (i in seq(1, nrow(ex)))
for (j in seq(1, ncol(ex)))
if (!isTRUE(all.equal.numeric(ex[i, j], ac[i, j], tolerance=tolerance)))
stop (paste(printmsg(msg), "Matrices differ at row:", i, " col:", j, ": expected:", ex[i, j], ", actuall:",ac[i, j]))
}
# Maximum likelihood estimate of covariance matrix.
cov.mle <- function(M)
{
mean <- colMeans(M)
mat <- matrix(0,ncol(M),ncol(M))
for (i in seq(1, nrow(M)))
{
v <- M[i,]
mat <- mat + (t(t(v - mean)) %*% t(v - mean))
}
mat <- mat / nrow(M)
mat
}
| /CEC/R/tests.R | no_license | ingted/R-Examples | R | false | false | 4,643 | r | run.cec.tests <- function()
{
errors <- 0
# test files
tests <- list.files(system.file("cec_tests", package="CEC"))
for (test in tests)
{
if (grepl(".R", test, perl=T))
{
testenv <- new.env()
local(
{
# just to trick R CMD check...
testname <- NULL
setup <- NULL
},
testenv
)
source(system.file("cec_tests", test, package="CEC"), local=testenv)
errors <- errors + local(
{
local.errors <- 0
cat(paste("Test:",testname, "\n"))
fs <- lsf.str()
# execute setup function if exists
if ("setup" %in% fs) eval(expr=body(setup), envir=testenv)
for (fn in fs)
{
# test cases
if (grepl("test.", fn))
{
cat(paste("---- ", fn))
fbody = body(eval(parse(text=fn)))
# evaluate test case function and catch (and count) errors
local.errors <- local.errors + tryCatch(
{
eval(expr=fbody, envir=testenv)
cat(": OK\n")
0
},
error = function(er) {
cat(": FAILED\n")
warning(er$message, immediate.=T, call.=F)
1
}
)
}
}
local.errors
},
envir=testenv)
}
}
if (errors > 0)
{
stop("One or more tests failed.")
}
}
printmsg <- function(msg)
{
if (!is.null(msg))
paste(msg, ":")
else ""
}
checkNumericVectorEquals <- function(ex, ac, msg=NULL, tolerance = .Machine$double.eps ^ 0.5)
{
if (length(ex) != length(ac)) stop (paste(printmsg(msg),"Vectors have different length."))
for (i in seq(1, length(ex)))
if (!isTRUE(all.equal.numeric(ex[i], ac[i], tolerance=tolerance)))
stop (paste(printmsg(msg), "Vectors differ at index:", i, ", expected:", ex[i], ", actuall:",ac[i]))
}
checkNumericEquals <- function(ex, ac, msg=NULL, tolerance = .Machine$double.eps ^ 0.5)
{
if(!is.numeric(ex)) stop(paste(printmsg(msg), "Expression:",ex,"is not numeric type."))
if(!is.numeric(ac)) stop(paste(printmsg(msg), "Expression:",ac,"is not numeric type."))
if (!isTRUE(all.equal.numeric(ex, ac, tolerance=tolerance)))
stop (paste(printmsg(msg), "Numeric values are different: expected:", ex, ", actuall:",ac, ", difference:", abs(ex - ac)))
}
checkEquals <- function(ex, ac, msg=NULL)
{
if (!isTRUE(identical(ex, ac)))
stop (paste(printmsg(msg), "Values are not identical: expected:", ex, ", actuall:",ac))
}
checkTrue <- function(exp, msg=NULL)
{
if (!is.logical(exp))
{
stop(paste(printmsg(msg), "Expression has not logical type."))
}
if (!isTRUE(exp))
{
stop(paste(printmsg(msg), "Expression is not TRUE."))
}
}
checkNumericMatrixEquals <- function(ex, ac, msg=NULL, tolerance = .Machine$double.eps ^ 0.5)
{
if (nrow(ex) != nrow(ac)) stop (paste(printmsg(msg),"Matrices have different dimensions."))
if (ncol(ex) != ncol(ac)) stop (paste(printmsg(msg),"Matrices have different dimensions."))
for (i in seq(1, nrow(ex)))
for (j in seq(1, ncol(ex)))
if (!isTRUE(all.equal.numeric(ex[i, j], ac[i, j], tolerance=tolerance)))
stop (paste(printmsg(msg), "Matrices differ at row:", i, " col:", j, ": expected:", ex[i, j], ", actuall:",ac[i, j]))
}
# Maximum likelihood estimate of covariance matrix.
cov.mle <- function(M)
{
mean <- colMeans(M)
mat <- matrix(0,ncol(M),ncol(M))
for (i in seq(1, nrow(M)))
{
v <- M[i,]
mat <- mat + (t(t(v - mean)) %*% t(v - mean))
}
mat <- mat / nrow(M)
mat
}
|
library(dplyr)
longData <- read.csv("Working/MergedData.csv", stringsAsFactors = FALSE)
national2ppData <- read.csv("PollingData/National2ppData.csv", stringsAsFactors = FALSE, na.strings="#N/A")
nationalData <- read.csv("PollingData/NationalData.csv", stringsAsFactors = FALSE, na.strings="#N/A")
stateData <- read.csv("PollingData/StateData.csv", stringsAsFactors = FALSE, na.strings="#N/A")
pollingUrls <- read.csv("PollingData/PollingURLs.csv", stringsAsFactors=FALSE, na.strings="#N/A")
parseDate <- function(s){
shortDateWithYear <- function(q) as.Date(toupper(q), format="%d %b %Y")
shortDate <- function(q) as.Date(toupper(q), format="%d %b")
default <- as.Date
parsingFunctions <- c(shortDateWithYear, shortDate, default)
for(f in parsingFunctions){
result <- try(f(s), silent=TRUE)
if(!is.na(result) && class(result) == "Date"){
return(f(s))
}
}
return(NA)
}
pollsterCombos <- longData %>% filter(Pollster != "Election") %>%
filter(PollEndDate > as.Date("2014-01-01")) %>%
select(Pollster, Party, Electorate) %>% unique()
pollster <- readline(prompt="Pollster > ")
knownPollsters <- pollsterCombos %>% select(Pollster) %>% unique() %>% .[[1]]
if(! pollster %in% knownPollsters) {
print(knownPollsters)
stop("Unknown pollster")
}
expectedElectorates <- pollsterCombos %>% filter(Pollster == pollster) %>% select(Electorate) %>% unique() %>% .[[1]]
if(length(expectedElectorates) > 1) {
warning("Multi-electorates not implemented yet")
}
electorate <- "AUS"
url <- readline(prompt="URL > ")
pollEndDate <- parseDate(readline(prompt="End Date > "))
if(is.na(pollEndDate)) {
stop("Invalid date")
}
if(pollEndDate > as.Date(Sys.time())){
stop("Future date")
}
if(pollEndDate < (as.Date(Sys.time()) - 30)) {
stop("Old date")
}
alp2pp <- readline(prompt = "2PP ALP > ")
twoPPdataRow <- data.frame(date = format(pollEndDate, "%d/%m/%y"),
Labor2ppAUS = as.numeric(alp2pp), Pollster=pollster)
nationalDataRow <- nationalData[1,]
for(i in names(nationalDataRow)){
nationalDataRow[[i]] <- NA
}
nationalDataRow[["PollEndDate"]] <- format(pollEndDate, "%d/%m/%y")
nationalDataRow$Pollster <- pollster
for(party in names(nationalDataRow)[3:ncol(nationalDataRow)]){
if(party %in% c("DEM","FFP","PHON")){
next
}
partyResult <- readline(prompt = paste0(party, " > "))
if(partyResult == ""){
partyResult <- NA
}
nationalDataRow[[party]] <- as.numeric(partyResult)
}
if(abs(sum(nationalDataRow[3:ncol(nationalDataRow)], na.rm=TRUE) - 100) > 2) {
stop("Doesn't sum to 100.")
}
urlDataRow <- data.frame(PollEndDate = as.character(format(pollEndDate, "%d/%m/%y")),
URL = url,
Pollster = pollster)
print("Data to be written: ")
print(urlDataRow)
print(nationalDataRow)
print(twoPPdataRow)
write.csv(rbind(urlDataRow, pollingUrls), "PollingData/PollingURLs.csv", row.names=FALSE, quote=FALSE, na="#N/A")
write.csv(rbind(nationalDataRow, nationalData), "PollingData/NationalData.csv", row.names=FALSE, quote=FALSE, na="#N/A")
write.csv(rbind(national2ppData, twoPPdataRow), "PollingData/National2ppData.csv", row.names=FALSE, quote=FALSE, na="#N/A")
| /InputData.R | no_license | PhantomTrend/ptcode | R | false | false | 3,198 | r | library(dplyr)
longData <- read.csv("Working/MergedData.csv", stringsAsFactors = FALSE)
national2ppData <- read.csv("PollingData/National2ppData.csv", stringsAsFactors = FALSE, na.strings="#N/A")
nationalData <- read.csv("PollingData/NationalData.csv", stringsAsFactors = FALSE, na.strings="#N/A")
stateData <- read.csv("PollingData/StateData.csv", stringsAsFactors = FALSE, na.strings="#N/A")
pollingUrls <- read.csv("PollingData/PollingURLs.csv", stringsAsFactors=FALSE, na.strings="#N/A")
parseDate <- function(s){
shortDateWithYear <- function(q) as.Date(toupper(q), format="%d %b %Y")
shortDate <- function(q) as.Date(toupper(q), format="%d %b")
default <- as.Date
parsingFunctions <- c(shortDateWithYear, shortDate, default)
for(f in parsingFunctions){
result <- try(f(s), silent=TRUE)
if(!is.na(result) && class(result) == "Date"){
return(f(s))
}
}
return(NA)
}
pollsterCombos <- longData %>% filter(Pollster != "Election") %>%
filter(PollEndDate > as.Date("2014-01-01")) %>%
select(Pollster, Party, Electorate) %>% unique()
pollster <- readline(prompt="Pollster > ")
knownPollsters <- pollsterCombos %>% select(Pollster) %>% unique() %>% .[[1]]
if(! pollster %in% knownPollsters) {
print(knownPollsters)
stop("Unknown pollster")
}
expectedElectorates <- pollsterCombos %>% filter(Pollster == pollster) %>% select(Electorate) %>% unique() %>% .[[1]]
if(length(expectedElectorates) > 1) {
warning("Multi-electorates not implemented yet")
}
electorate <- "AUS"
url <- readline(prompt="URL > ")
pollEndDate <- parseDate(readline(prompt="End Date > "))
if(is.na(pollEndDate)) {
stop("Invalid date")
}
if(pollEndDate > as.Date(Sys.time())){
stop("Future date")
}
if(pollEndDate < (as.Date(Sys.time()) - 30)) {
stop("Old date")
}
alp2pp <- readline(prompt = "2PP ALP > ")
twoPPdataRow <- data.frame(date = format(pollEndDate, "%d/%m/%y"),
Labor2ppAUS = as.numeric(alp2pp), Pollster=pollster)
nationalDataRow <- nationalData[1,]
for(i in names(nationalDataRow)){
nationalDataRow[[i]] <- NA
}
nationalDataRow[["PollEndDate"]] <- format(pollEndDate, "%d/%m/%y")
nationalDataRow$Pollster <- pollster
for(party in names(nationalDataRow)[3:ncol(nationalDataRow)]){
if(party %in% c("DEM","FFP","PHON")){
next
}
partyResult <- readline(prompt = paste0(party, " > "))
if(partyResult == ""){
partyResult <- NA
}
nationalDataRow[[party]] <- as.numeric(partyResult)
}
if(abs(sum(nationalDataRow[3:ncol(nationalDataRow)], na.rm=TRUE) - 100) > 2) {
stop("Doesn't sum to 100.")
}
urlDataRow <- data.frame(PollEndDate = as.character(format(pollEndDate, "%d/%m/%y")),
URL = url,
Pollster = pollster)
print("Data to be written: ")
print(urlDataRow)
print(nationalDataRow)
print(twoPPdataRow)
write.csv(rbind(urlDataRow, pollingUrls), "PollingData/PollingURLs.csv", row.names=FALSE, quote=FALSE, na="#N/A")
write.csv(rbind(nationalDataRow, nationalData), "PollingData/NationalData.csv", row.names=FALSE, quote=FALSE, na="#N/A")
write.csv(rbind(national2ppData, twoPPdataRow), "PollingData/National2ppData.csv", row.names=FALSE, quote=FALSE, na="#N/A")
|
library(selectiveInference)
### Name: estimateSigma
### Title: Estimate the noise standard deviation in regression
### Aliases: estimateSigma
### ** Examples
set.seed(33)
n = 50
p = 10
sigma = 1
x = matrix(rnorm(n*p),n,p)
beta = c(3,2,rep(0,p-2))
y = x%*%beta + sigma*rnorm(n)
# run forward stepwise
fsfit = fs(x,y)
# estimate sigma
sigmahat = estimateSigma(x,y)$sigmahat
# run sequential inference with estimated sigma
out = fsInf(fsfit,sigma=sigmahat)
out
| /data/genthat_extracted_code/selectiveInference/examples/estimateSigma.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 468 | r | library(selectiveInference)
### Name: estimateSigma
### Title: Estimate the noise standard deviation in regression
### Aliases: estimateSigma
### ** Examples
set.seed(33)
n = 50
p = 10
sigma = 1
x = matrix(rnorm(n*p),n,p)
beta = c(3,2,rep(0,p-2))
y = x%*%beta + sigma*rnorm(n)
# run forward stepwise
fsfit = fs(x,y)
# estimate sigma
sigmahat = estimateSigma(x,y)$sigmahat
# run sequential inference with estimated sigma
out = fsInf(fsfit,sigma=sigmahat)
out
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tasks_functions.R
\name{is.NullOb}
\alias{is.NullOb}
\title{A helper function that tests whether an object is either NULL _or_
a list of NULLs}
\usage{
is.NullOb(x)
}
\description{
A helper function that tests whether an object is either NULL _or_
a list of NULLs
}
\keyword{internal}
| /googletasksv1.auto/man/is.NullOb.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 363 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tasks_functions.R
\name{is.NullOb}
\alias{is.NullOb}
\title{A helper function that tests whether an object is either NULL _or_
a list of NULLs}
\usage{
is.NullOb(x)
}
\description{
A helper function that tests whether an object is either NULL _or_
a list of NULLs
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Litwsd.R
\name{WSD.Lit}
\alias{WSD.Lit}
\title{Literature Woody Stem Density}
\usage{
WSD.Lit(x)
}
\arguments{
\item{x}{From King (1998): The mean number of woody stems >1m tall per 200 square meters.}
}
\value{
Returns the relative HSI value
}
\description{
Calculates the partial HSI given the Woody Stem Density.
}
\author{
Dominic LaRoche
}
| /MBQhsi/man/WSD.Lit.Rd | no_license | rocrat/MBQ_Package | R | false | true | 424 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Litwsd.R
\name{WSD.Lit}
\alias{WSD.Lit}
\title{Literature Woody Stem Density}
\usage{
WSD.Lit(x)
}
\arguments{
\item{x}{From King (1998): The mean number of woody stems >1m tall per 200 square meters.}
}
\value{
Returns the relative HSI value
}
\description{
Calculates the partial HSI given the Woody Stem Density.
}
\author{
Dominic LaRoche
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{multiThresh}
\alias{multiThresh}
\title{Resolution level thresholds for hard thresholded wavelet deconvolution estimator}
\usage{
multiThresh(Y, G = directBlur(nrow(as.matrix(Y)), ncol(as.matrix(Y))),
alpha = rep(1, dim(as.matrix(Y))[2]),
resolution = resolutionMethod(detectBlur(G)), j0 = 3L,
j1 = NA_integer_, eta = NA_real_, deg = 3L)
}
\arguments{
\item{Y}{An input signal either an n by m matrix containing the multichannel signal to be analysed or single vector of n elements for the single channel. In the multichannel case, each of the m columns represents a channel of n observations.}
\item{G}{The input multichannel blur matrix/vector (needs to be the same dimension/length as the signal input which is a matrix or vector for the multichannel or single channel case respectively). This argument dictates the form of blur present in each of the channels.}
\item{alpha}{A numeric vector, with m elements, specifying the level of long memory for the noise process within each channel of the form alpha = 2 - 2H, where H is the Hurst parameter. If alpha is a single element, that same element is repeated across all required channels.}
\item{resolution}{A character string describing which resolution selection method is to be applied.\itemize{
\item 'smooth': Smooth stopping rule in Fourier domain applied piecewise in each channel and maximum selected which is appropriate if blurring kernel is of regular smooth blur type or direct model (no convolution).
\item 'block': Blockwise variance selection method is used which is appropriate for box car type.}
The default choice uses the detectBlur function to identify what type of blur matrix, G, is input and then maps that identification to the resolution type via a simple switch statement in the hidden \code{resolutionMethod} function, whereby, identified 'smooth' and 'direct' blur use the smooth resolution selection while box.car uses the blockwise resolution selection method.}
\item{j0}{The coarsest resolution level for the wavelet expansion.}
\item{j1}{The finest resolution level for the wavelet expansion. If unspecified, the function will compute all thresholds up to the maximum possible resolution level at j1 = log2(n) - 1.}
\item{eta}{The smoothing parameter. The default level is \eqn{2\sqrt(\alpha^*)} where \eqn{\alpha^*} is an optimal level depending on the type of blur. (see Kulik, Sapatinas and Wishart (2014) for details and justification)}
\item{deg}{The degree of the auxiliary polynomial used in the Meyer wavelet.}
}
\value{
A numeric vector of the resolution level thresholds for the hard-thresholding nonlinear wavelet estimator from the multichannel model.
}
\description{
Computes the estimated resolution level thresholds for the hard-thresholding wavelet deconvolution estimate of the desired signal in the multichannel signals.
}
\details{
Given an input matrix of a multichannel signal (n rows and n columns) with m channels and n observations per channel, the function returns the required thresholds for the hard-thresholding estimator of the underlying function, f.
}
\examples{
library(mwaved)
# Simulate the multichannel doppler signal.
m <- 3
n <- 2^10
signal <- makeDoppler(n)
# Noise levels per channel
e <- rnorm(m * n)
# Create Gamma blur
shape <- seq(from = 0.5, to = 1, length = m)
scale <- rep(0.25, m)
G <- gammaBlur(n, shape = shape, scale = scale)
# Convolve the signal
X <- blurSignal(signal, G)
# Create error with custom signal to noise ratio
SNR <- c(10, 15, 20)
sigma <- sigmaSNR(X, SNR)
alpha <- c(0.75, 0.8, 1)
E <- multiNoise(n, sigma, alpha)
# Create noisy & blurred multichannel signal
Y <- X + E
# Determine thresholds blur = 'smooth'
thresh <- multiThresh(Y, G)
}
\references{
Kulik, R., Sapatinas, T. and Wishart, J.R. (2014) \emph{Multichannel wavelet deconvolution with long range dependence. Upper bounds on the L_p risk} Appl. Comput. Harmon. Anal. (to appear in).
\url{http://dx.doi.org/10.1016/j.acha.2014.04.004}
}
| /mwaved/man/multiThresh.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 4,128 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{multiThresh}
\alias{multiThresh}
\title{Resolution level thresholds for hard thresholded wavelet deconvolution estimator}
\usage{
multiThresh(Y, G = directBlur(nrow(as.matrix(Y)), ncol(as.matrix(Y))),
alpha = rep(1, dim(as.matrix(Y))[2]),
resolution = resolutionMethod(detectBlur(G)), j0 = 3L,
j1 = NA_integer_, eta = NA_real_, deg = 3L)
}
\arguments{
\item{Y}{An input signal either an n by m matrix containing the multichannel signal to be analysed or single vector of n elements for the single channel. In the multichannel case, each of the m columns represents a channel of n observations.}
\item{G}{The input multichannel blur matrix/vector (needs to be the same dimension/length as the signal input which is a matrix or vector for the multichannel or single channel case respectively). This argument dictates the form of blur present in each of the channels.}
\item{alpha}{A numeric vector, with m elements, specifying the level of long memory for the noise process within each channel of the form alpha = 2 - 2H, where H is the Hurst parameter. If alpha is a single element, that same element is repeated across all required channels.}
\item{resolution}{A character string describing which resolution selection method is to be applied.\itemize{
\item 'smooth': Smooth stopping rule in Fourier domain applied piecewise in each channel and maximum selected which is appropriate if blurring kernel is of regular smooth blur type or direct model (no convolution).
\item 'block': Blockwise variance selection method is used which is appropriate for box car type.}
The default choice uses the detectBlur function to identify what type of blur matrix, G, is input and then maps that identification to the resolution type via a simple switch statement in the hidden \code{resolutionMethod} function, whereby, identified 'smooth' and 'direct' blur use the smooth resolution selection while box.car uses the blockwise resolution selection method.}
\item{j0}{The coarsest resolution level for the wavelet expansion.}
\item{j1}{The finest resolution level for the wavelet expansion. If unspecified, the function will compute all thresholds up to the maximum possible resolution level at j1 = log2(n) - 1.}
\item{eta}{The smoothing parameter. The default level is \eqn{2\sqrt(\alpha^*)} where \eqn{\alpha^*} is an optimal level depending on the type of blur. (see Kulik, Sapatinas and Wishart (2014) for details and justification)}
\item{deg}{The degree of the auxiliary polynomial used in the Meyer wavelet.}
}
\value{
A numeric vector of the resolution level thresholds for the hard-thresholding nonlinear wavelet estimator from the multichannel model.
}
\description{
Computes the estimated resolution level thresholds for the hard-thresholding wavelet deconvolution estimate of the desired signal in the multichannel signals.
}
\details{
Given an input matrix of a multichannel signal (n rows and n columns) with m channels and n observations per channel, the function returns the required thresholds for the hard-thresholding estimator of the underlying function, f.
}
\examples{
library(mwaved)
# Simulate the multichannel doppler signal.
m <- 3
n <- 2^10
signal <- makeDoppler(n)
# Noise levels per channel
e <- rnorm(m * n)
# Create Gamma blur
shape <- seq(from = 0.5, to = 1, length = m)
scale <- rep(0.25, m)
G <- gammaBlur(n, shape = shape, scale = scale)
# Convolve the signal
X <- blurSignal(signal, G)
# Create error with custom signal to noise ratio
SNR <- c(10, 15, 20)
sigma <- sigmaSNR(X, SNR)
alpha <- c(0.75, 0.8, 1)
E <- multiNoise(n, sigma, alpha)
# Create noisy & blurred multichannel signal
Y <- X + E
# Determine thresholds blur = 'smooth'
thresh <- multiThresh(Y, G)
}
\references{
Kulik, R., Sapatinas, T. and Wishart, J.R. (2014) \emph{Multichannel wavelet deconvolution with long range dependence. Upper bounds on the L_p risk} Appl. Comput. Harmon. Anal. (to appear in).
\url{http://dx.doi.org/10.1016/j.acha.2014.04.004}
}
|
## Add new tab on the right panel
addNewTab <- function(tab.title = NULL){
if(is.null(tab.title)) tab.title <- paste('Tab', infoTabs(), " ")
tab <- ttkframe(.cdtEnv$tcl$main$tknotes)
tkadd(.cdtEnv$tcl$main$tknotes, tab, text = paste(tab.title, " "))
tkgrid.columnconfigure(tab, 0, weight = 1)
tkgrid.rowconfigure(tab, 0, weight = 1)
ftab <- tkframe(tab, bd = 2, relief = 'sunken', bg = 'white')
tkgrid(ftab, row = 0, column = 0, sticky = 'nswe')
tkgrid.columnconfigure(ftab, 0, weight = 1)
tkgrid.rowconfigure(ftab, 0, weight = 1)
return(list(tab, ftab))
}
## Count created tabs
infoTabs <- function(){
open.tabs <- unlist(strsplit(tclvalue(tkwinfo("children", .cdtEnv$tcl$main$tknotes)), " "))
end.tabs <- as.integer(unlist(strsplit(open.tabs[length(open.tabs)], "\\.")))
id.tabs <- end.tabs[length(end.tabs)]
if(length(id.tabs) == 0) id.tabs <- 0
return(id.tabs + 1)
}
########################################################################
## Display opened data.frame on Tab in a table
Display_data.frame_Table <- function(data.df, title, colwidth = 8){
onglet <- addNewTab(title)
dtab <- try(tclArrayVar(data.df), silent = TRUE)
if(inherits(dtab, "try-error")){
Insert.Messages.Out("Unable to create tclArrayVar", format = TRUE)
Insert.Messages.Out(gsub('[\r\n]', '', dtab[1]), format = TRUE)
return(list(onglet, list(NULL, NULL)))
}
table1 <- try(displayTable(onglet[[2]], tclArray = dtab, colwidth = colwidth), silent = TRUE)
if(inherits(table1, "try-error")){
Insert.Messages.Out("Unable to display table", format = TRUE)
Insert.Messages.Out(gsub('[\r\n]', '', table1[1]), format = TRUE)
table1 <- list(NULL, dtab)
}
return(list(onglet, table1))
}
## Display homogenization output info
## ???replace by Display_data.frame_Table (colwidth = 24)
# DisplayHomInfo <- function(parent, homInfo, title, colwidth = '24'){
# onglet <- addNewTab(parent, tab.title = title)
# dtab <- tclArrayVar(homInfo)
# table1 <- displayTable(onglet[[2]], tclArray = dtab, colwidth = colwidth)
# return(list(onglet, table1))
# }
########
## Display output QC/HOMOGE in a table
## old name: DisplayQcHom(parent, outqchom, title)
## data: ReturnExecResults$action, GeneralParameters$action
Display_QcHom_Output <- function(out.qc.hom, title){
onglet <- addNewTab(title)
dtab <- tclArrayVar(out.qc.hom[[1]])
col <- if(ReturnExecResults$action == 'homog' | GeneralParameters$action == "rhtests") '15' else '10'
table1 <- displayTable(onglet[[2]], tclArray = dtab, colwidth = col)
return(list(onglet, table1, out.qc.hom[-1]))
}
## ???replace by Display_data.frame_Table
# data.df <- out.qc.hom[[1]]
# col <- if(ReturnExecResults$action == 'homog' | GeneralParameters$action == "rhtests") '15' else '10'
# tab.array <- Display_data.frame_Table(data.df, title, col)
# tab.array <- c(tab.array, out.qc.hom[-1])
#######
## Display interpolation data in a table
## old name: DisplayInterpData(parent, data, title, colwidth = '15')
DisplayInterpData <- function(data, title, colwidth = '15'){
onglet <- addNewTab(title)
dtab <- tclArrayVar(data[[2]])
table1 <- displayTable(onglet[[2]], tclArray = dtab, colwidth = colwidth)
return(list(onglet, table1, data[-2]))
}
## ???replace by Display_data.frame_Table
# data.df <- data[[2]]
# tab.array <- Display_data.frame_Table(data.df, title, colwidth = 15)
# tab.array <- c(tab.array, data[-2])
########################################################################
## Deja remplacer
# ## Display data in a table from open files list
# displayInTable <- function(){
# id.active <- as.integer(tclvalue(tkcurselection(.cdtEnv$tcl$main$Openfiles))) + 1
# onglet <- addNewTab(.cdtData$OpenFiles$Data[[id.active]][[1]])
# dat.file <- .cdtData$OpenFiles$Data[[id.active]][[2]]
# dtab <- tclArrayVar(dat.file)
# table1 <- displayTable(onglet[[2]], tclArray = dtab)
# return(list(onglet, table1, .cdtData$OpenFiles$Data[[id.active]][[3]]))
# }
# id.active <- as.integer(tclvalue(tkcurselection(.cdtEnv$tcl$main$Openfiles))) + 1
# title <- .cdtData$OpenFiles$Data[[id.active]][[1]]
# data.df <- .cdtData$OpenFiles$Data[[id.active]][[2]]
# data.out <- Display_data.frame_Table(data.df, title)
# tab.array <- c(data.out, .cdtData$OpenFiles$Data[[id.active]][[3]])
########################################################################
## Open and display table on new tab
## old name: displayArrayTab(parent.win, parent)
Display_Array_Tab <- function(parent){
on.exit({
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
})
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
data.file <- getOpenFiles(parent)
if(is.null(data.file)) return(NULL)
update.OpenFiles('ascii', data.file)
tab.array <- Display_data.frame_Table(data.file[[2]], data.file[[1]])
tab.array <- c(tab.array, data.file[[3]])
return(tab.array)
}
########################################################################
## Display output console inner frame
## old name: displayConsOutput(parent, out2disp, rhtests = FALSE)
Display_Output_Console <- function(parent, out2disp, rhtests = FALSE){
xscr <- tkscrollbar(parent, repeatinterval = 5, orient = "horizontal",
command = function(...) tkxview(txta, ...))
yscr <- tkscrollbar(parent, repeatinterval = 5,
command = function(...) tkyview(txta, ...))
txta <- tktext(parent, bg = "white", font = "courier", wrap = "none",
xscrollcommand = function(...) tkset(xscr, ...),
yscrollcommand = function(...) tkset(yscr, ...))
tkgrid(txta, yscr)
tkgrid(xscr)
tkgrid.configure(yscr, sticky = "ns")
tkgrid.configure(xscr, sticky = "ew")
tkgrid.configure(txta, sticky = 'nswe')
tkgrid.rowconfigure(txta, 0, weight = 1)
tkgrid.columnconfigure(txta, 0, weight = 1)
if(!rhtests){
tempfile <- tempfile()
sink(tempfile)
op <- options()
options(width = 160)
print(out2disp)
options(op)
sink()
rdL <- readLines(tempfile, warn = FALSE)
for(i in 1:length(rdL)) tkinsert(txta, "end", paste(rdL[i], "\n"))
unlink(tempfile)
}else tkinsert(txta, "end", out2disp)
}
## Display output console on tab
## old name: displayConsOutputTabs(parent, out2disp, title, rhtests = FALSE)
Display_Output_Console_Tab <- function(out2disp, title, rhtests = FALSE){
onglet <- addNewTab(title)
Display_Output_Console(onglet[[2]], out2disp, rhtests)
return(onglet)
}
########################################################################
## Update Open Tab data
update.OpenTabs <- function(type, data){
ntab <- length(.cdtData$OpenTab$Type)
.cdtData$OpenTab$Type[[ntab + 1]] <- type
.cdtData$OpenTab$Data[[ntab + 1]] <- data
return(ntab)
}
########################################################################
## Close tab
Close_Notebook_Tab <- function(index)
{
tabid <- as.integer(index) + 1
if(!is.na(tabid)){
arrTypes <- c("arr", "chkcrds", "falsezero", "outqc", "outhom")
if(.cdtData$OpenTab$Type[[tabid]] %in% arrTypes){
tkdestroy(.cdtData$OpenTab$Data[[tabid]][[1]][[1]])
}else if(.cdtData$OpenTab$Type[[tabid]] == "ctxt"){
tkdestroy(.cdtData$OpenTab$Data[[tabid]][[1]])
}else if(.cdtData$OpenTab$Type[[tabid]] == "img"){
tkdestroy(.cdtData$OpenTab$Data[[tabid]][[1]][[2]])
tkdestroy(.cdtData$OpenTab$Data[[tabid]][[1]][[1]])
}else return(NULL)
.cdtData$OpenTab$Data[tabid] <- NULL
.cdtData$OpenTab$Type[tabid] <- NULL
}else return(NULL)
}
########################################################################
## Save table As
Save_Table_As <- function(){
on.exit({
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
})
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
tabid <- as.integer(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(!is.na(tabid)){
Objarray <- .cdtData$OpenTab$Data[[tabid]][[2]]
if(!inherits(Objarray[[2]], "tclArrayVar")){
Insert.Messages.Out(.cdtEnv$tcl$lang$global[['message']][['6']], format = TRUE)
return(NULL)
}
tryCatch(
{
file.to.save <- tk_get_SaveFile(filetypes = .cdtEnv$tcl$data$filetypes2)
dat2sav <- tclArray2dataframe(Objarray)
file.spec <- NULL
if(length(.cdtData$OpenTab$Data[[tabid]]) > 2){
file.disk <- .cdtData$OpenTab$Data[[tabid]][[3]]
if(file.exists(file.disk)){
nopfs <- length(.cdtData$OpenFiles$Type)
if(nopfs > 0){
listOpenFiles <- sapply(1:nopfs, function(j) .cdtData$OpenFiles$Data[[j]][[1]])
if(basename(file.disk) %in% listOpenFiles){
nopf <- which(listOpenFiles %in% basename(file.disk))
file.spec <- .cdtData$OpenFiles$Data[[nopf]][[4]]
}
}
}
}
if(!is.null(file.spec)){
writeFiles(dat2sav, file.to.save, col.names = file.spec$header,
na = file.spec$miss.val, sep = file.spec$sepr)
}else{
writeFiles(dat2sav, file.to.save, col.names = Objarray[[2]]$col.names)
}
Insert.Messages.Out(.cdtEnv$tcl$lang$global[['message']][['3']])
},
warning = function(w){
Insert.Messages.Out(.cdtEnv$tcl$lang$global[['message']][['2']], format = TRUE)
warningFun(w)
},
error = function(e){
Insert.Messages.Out(.cdtEnv$tcl$lang$global[['message']][['2']], format = TRUE)
errorFun(e)
}
)
}
invisible()
}
########################################################################
## Save table type "arr"
saveTable.arr.type <- function(dat2sav, tabid){
if(is.null(dat2sav)){
Insert.Messages.Out("No data to save", format = TRUE)
return(NULL)
}
if(length(.cdtData$OpenTab$Data[[tabid]]) == 2){
filetosave <- tk_get_SaveFile(filetypes = .cdtEnv$tcl$data$filetypes2)
writeFiles(dat2sav, filetosave, col.names = TRUE)
}else{
filetosave <- .cdtData$OpenTab$Data[[tabid]][[3]]
file.spec <- NULL
nopfs <- length(.cdtData$OpenFiles$Type)
if(nopfs > 0){
listOpenFiles <- sapply(1:nopfs, function(j) .cdtData$OpenFiles$Data[[j]][[1]])
if(basename(filetosave) %in% listOpenFiles){
nopf <- which(listOpenFiles %in% basename(filetosave))
file.spec <- .cdtData$OpenFiles$Data[[nopf]][[4]]
}
}
if(!is.null(file.spec)){
writeFiles(dat2sav, filetosave, col.names = file.spec$header,
na = file.spec$miss.val, sep = file.spec$sepr)
}else{
writeFiles(dat2sav, filetosave,
col.names = Objarray[[2]]$col.names,
row.names = Objarray[[2]]$row.names)
}
}
}
########################################################################
Save_Notebook_Tab_Array <- function(){
on.exit({
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
})
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
tabid <- as.integer(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(length(.cdtData$OpenTab$Type) > 0){
Objarray <- .cdtData$OpenTab$Data[[tabid]][[2]]
dat2sav <- tclArray2dataframe(Objarray)
switch(.cdtData$OpenTab$Type[[tabid]],
"arr" = saveTable.arr.type(dat2sav, tabid),
"chkcrds" = .cdtData$EnvData$StnChkCoords$SaveEdit(dat2sav),
"falsezero" = .cdtData$EnvData$qcRRZeroCheck$SaveEdit(dat2sav),
"outqc" = .cdtData$EnvData$QC$SaveEdit(dat2sav),
"outhom" = .cdtData$EnvData$HomTest$SaveEdit(dat2sav),
NULL)
}else return(NULL)
return(0)
}
| /R/cdtTabs_functions.R | no_license | heureux1985/CDT | R | false | false | 11,150 | r |
## Add new tab on the right panel
addNewTab <- function(tab.title = NULL){
if(is.null(tab.title)) tab.title <- paste('Tab', infoTabs(), " ")
tab <- ttkframe(.cdtEnv$tcl$main$tknotes)
tkadd(.cdtEnv$tcl$main$tknotes, tab, text = paste(tab.title, " "))
tkgrid.columnconfigure(tab, 0, weight = 1)
tkgrid.rowconfigure(tab, 0, weight = 1)
ftab <- tkframe(tab, bd = 2, relief = 'sunken', bg = 'white')
tkgrid(ftab, row = 0, column = 0, sticky = 'nswe')
tkgrid.columnconfigure(ftab, 0, weight = 1)
tkgrid.rowconfigure(ftab, 0, weight = 1)
return(list(tab, ftab))
}
## Count created tabs
infoTabs <- function(){
open.tabs <- unlist(strsplit(tclvalue(tkwinfo("children", .cdtEnv$tcl$main$tknotes)), " "))
end.tabs <- as.integer(unlist(strsplit(open.tabs[length(open.tabs)], "\\.")))
id.tabs <- end.tabs[length(end.tabs)]
if(length(id.tabs) == 0) id.tabs <- 0
return(id.tabs + 1)
}
########################################################################
## Display opened data.frame on Tab in a table
Display_data.frame_Table <- function(data.df, title, colwidth = 8){
onglet <- addNewTab(title)
dtab <- try(tclArrayVar(data.df), silent = TRUE)
if(inherits(dtab, "try-error")){
Insert.Messages.Out("Unable to create tclArrayVar", format = TRUE)
Insert.Messages.Out(gsub('[\r\n]', '', dtab[1]), format = TRUE)
return(list(onglet, list(NULL, NULL)))
}
table1 <- try(displayTable(onglet[[2]], tclArray = dtab, colwidth = colwidth), silent = TRUE)
if(inherits(table1, "try-error")){
Insert.Messages.Out("Unable to display table", format = TRUE)
Insert.Messages.Out(gsub('[\r\n]', '', table1[1]), format = TRUE)
table1 <- list(NULL, dtab)
}
return(list(onglet, table1))
}
## Display homogenization output info
## ???replace by Display_data.frame_Table (colwidth = 24)
# DisplayHomInfo <- function(parent, homInfo, title, colwidth = '24'){
# onglet <- addNewTab(parent, tab.title = title)
# dtab <- tclArrayVar(homInfo)
# table1 <- displayTable(onglet[[2]], tclArray = dtab, colwidth = colwidth)
# return(list(onglet, table1))
# }
########
## Display output QC/HOMOGE in a table
## old name: DisplayQcHom(parent, outqchom, title)
## data: ReturnExecResults$action, GeneralParameters$action
Display_QcHom_Output <- function(out.qc.hom, title){
onglet <- addNewTab(title)
dtab <- tclArrayVar(out.qc.hom[[1]])
col <- if(ReturnExecResults$action == 'homog' | GeneralParameters$action == "rhtests") '15' else '10'
table1 <- displayTable(onglet[[2]], tclArray = dtab, colwidth = col)
return(list(onglet, table1, out.qc.hom[-1]))
}
## ???replace by Display_data.frame_Table
# data.df <- out.qc.hom[[1]]
# col <- if(ReturnExecResults$action == 'homog' | GeneralParameters$action == "rhtests") '15' else '10'
# tab.array <- Display_data.frame_Table(data.df, title, col)
# tab.array <- c(tab.array, out.qc.hom[-1])
#######
## Display interpolation data in a table
## old name: DisplayInterpData(parent, data, title, colwidth = '15')
DisplayInterpData <- function(data, title, colwidth = '15'){
onglet <- addNewTab(title)
dtab <- tclArrayVar(data[[2]])
table1 <- displayTable(onglet[[2]], tclArray = dtab, colwidth = colwidth)
return(list(onglet, table1, data[-2]))
}
## ???replace by Display_data.frame_Table
# data.df <- data[[2]]
# tab.array <- Display_data.frame_Table(data.df, title, colwidth = 15)
# tab.array <- c(tab.array, data[-2])
########################################################################
## Deja remplacer
# ## Display data in a table from open files list
# displayInTable <- function(){
# id.active <- as.integer(tclvalue(tkcurselection(.cdtEnv$tcl$main$Openfiles))) + 1
# onglet <- addNewTab(.cdtData$OpenFiles$Data[[id.active]][[1]])
# dat.file <- .cdtData$OpenFiles$Data[[id.active]][[2]]
# dtab <- tclArrayVar(dat.file)
# table1 <- displayTable(onglet[[2]], tclArray = dtab)
# return(list(onglet, table1, .cdtData$OpenFiles$Data[[id.active]][[3]]))
# }
# id.active <- as.integer(tclvalue(tkcurselection(.cdtEnv$tcl$main$Openfiles))) + 1
# title <- .cdtData$OpenFiles$Data[[id.active]][[1]]
# data.df <- .cdtData$OpenFiles$Data[[id.active]][[2]]
# data.out <- Display_data.frame_Table(data.df, title)
# tab.array <- c(data.out, .cdtData$OpenFiles$Data[[id.active]][[3]])
########################################################################
## Open and display table on new tab
## old name: displayArrayTab(parent.win, parent)
Display_Array_Tab <- function(parent){
on.exit({
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
})
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
data.file <- getOpenFiles(parent)
if(is.null(data.file)) return(NULL)
update.OpenFiles('ascii', data.file)
tab.array <- Display_data.frame_Table(data.file[[2]], data.file[[1]])
tab.array <- c(tab.array, data.file[[3]])
return(tab.array)
}
########################################################################
## Display output console inner frame
## old name: displayConsOutput(parent, out2disp, rhtests = FALSE)
Display_Output_Console <- function(parent, out2disp, rhtests = FALSE){
xscr <- tkscrollbar(parent, repeatinterval = 5, orient = "horizontal",
command = function(...) tkxview(txta, ...))
yscr <- tkscrollbar(parent, repeatinterval = 5,
command = function(...) tkyview(txta, ...))
txta <- tktext(parent, bg = "white", font = "courier", wrap = "none",
xscrollcommand = function(...) tkset(xscr, ...),
yscrollcommand = function(...) tkset(yscr, ...))
tkgrid(txta, yscr)
tkgrid(xscr)
tkgrid.configure(yscr, sticky = "ns")
tkgrid.configure(xscr, sticky = "ew")
tkgrid.configure(txta, sticky = 'nswe')
tkgrid.rowconfigure(txta, 0, weight = 1)
tkgrid.columnconfigure(txta, 0, weight = 1)
if(!rhtests){
tempfile <- tempfile()
sink(tempfile)
op <- options()
options(width = 160)
print(out2disp)
options(op)
sink()
rdL <- readLines(tempfile, warn = FALSE)
for(i in 1:length(rdL)) tkinsert(txta, "end", paste(rdL[i], "\n"))
unlink(tempfile)
}else tkinsert(txta, "end", out2disp)
}
## Display output console on tab
## old name: displayConsOutputTabs(parent, out2disp, title, rhtests = FALSE)
Display_Output_Console_Tab <- function(out2disp, title, rhtests = FALSE){
onglet <- addNewTab(title)
Display_Output_Console(onglet[[2]], out2disp, rhtests)
return(onglet)
}
########################################################################
## Update Open Tab data
update.OpenTabs <- function(type, data){
ntab <- length(.cdtData$OpenTab$Type)
.cdtData$OpenTab$Type[[ntab + 1]] <- type
.cdtData$OpenTab$Data[[ntab + 1]] <- data
return(ntab)
}
########################################################################
## Close tab
Close_Notebook_Tab <- function(index)
{
tabid <- as.integer(index) + 1
if(!is.na(tabid)){
arrTypes <- c("arr", "chkcrds", "falsezero", "outqc", "outhom")
if(.cdtData$OpenTab$Type[[tabid]] %in% arrTypes){
tkdestroy(.cdtData$OpenTab$Data[[tabid]][[1]][[1]])
}else if(.cdtData$OpenTab$Type[[tabid]] == "ctxt"){
tkdestroy(.cdtData$OpenTab$Data[[tabid]][[1]])
}else if(.cdtData$OpenTab$Type[[tabid]] == "img"){
tkdestroy(.cdtData$OpenTab$Data[[tabid]][[1]][[2]])
tkdestroy(.cdtData$OpenTab$Data[[tabid]][[1]][[1]])
}else return(NULL)
.cdtData$OpenTab$Data[tabid] <- NULL
.cdtData$OpenTab$Type[tabid] <- NULL
}else return(NULL)
}
########################################################################
## Save table As
Save_Table_As <- function(){
on.exit({
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
})
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
tabid <- as.integer(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(!is.na(tabid)){
Objarray <- .cdtData$OpenTab$Data[[tabid]][[2]]
if(!inherits(Objarray[[2]], "tclArrayVar")){
Insert.Messages.Out(.cdtEnv$tcl$lang$global[['message']][['6']], format = TRUE)
return(NULL)
}
tryCatch(
{
file.to.save <- tk_get_SaveFile(filetypes = .cdtEnv$tcl$data$filetypes2)
dat2sav <- tclArray2dataframe(Objarray)
file.spec <- NULL
if(length(.cdtData$OpenTab$Data[[tabid]]) > 2){
file.disk <- .cdtData$OpenTab$Data[[tabid]][[3]]
if(file.exists(file.disk)){
nopfs <- length(.cdtData$OpenFiles$Type)
if(nopfs > 0){
listOpenFiles <- sapply(1:nopfs, function(j) .cdtData$OpenFiles$Data[[j]][[1]])
if(basename(file.disk) %in% listOpenFiles){
nopf <- which(listOpenFiles %in% basename(file.disk))
file.spec <- .cdtData$OpenFiles$Data[[nopf]][[4]]
}
}
}
}
if(!is.null(file.spec)){
writeFiles(dat2sav, file.to.save, col.names = file.spec$header,
na = file.spec$miss.val, sep = file.spec$sepr)
}else{
writeFiles(dat2sav, file.to.save, col.names = Objarray[[2]]$col.names)
}
Insert.Messages.Out(.cdtEnv$tcl$lang$global[['message']][['3']])
},
warning = function(w){
Insert.Messages.Out(.cdtEnv$tcl$lang$global[['message']][['2']], format = TRUE)
warningFun(w)
},
error = function(e){
Insert.Messages.Out(.cdtEnv$tcl$lang$global[['message']][['2']], format = TRUE)
errorFun(e)
}
)
}
invisible()
}
########################################################################
## Save table type "arr"
saveTable.arr.type <- function(dat2sav, tabid){
if(is.null(dat2sav)){
Insert.Messages.Out("No data to save", format = TRUE)
return(NULL)
}
if(length(.cdtData$OpenTab$Data[[tabid]]) == 2){
filetosave <- tk_get_SaveFile(filetypes = .cdtEnv$tcl$data$filetypes2)
writeFiles(dat2sav, filetosave, col.names = TRUE)
}else{
filetosave <- .cdtData$OpenTab$Data[[tabid]][[3]]
file.spec <- NULL
nopfs <- length(.cdtData$OpenFiles$Type)
if(nopfs > 0){
listOpenFiles <- sapply(1:nopfs, function(j) .cdtData$OpenFiles$Data[[j]][[1]])
if(basename(filetosave) %in% listOpenFiles){
nopf <- which(listOpenFiles %in% basename(filetosave))
file.spec <- .cdtData$OpenFiles$Data[[nopf]][[4]]
}
}
if(!is.null(file.spec)){
writeFiles(dat2sav, filetosave, col.names = file.spec$header,
na = file.spec$miss.val, sep = file.spec$sepr)
}else{
writeFiles(dat2sav, filetosave,
col.names = Objarray[[2]]$col.names,
row.names = Objarray[[2]]$row.names)
}
}
}
########################################################################
Save_Notebook_Tab_Array <- function(){
on.exit({
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
})
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
tabid <- as.integer(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(length(.cdtData$OpenTab$Type) > 0){
Objarray <- .cdtData$OpenTab$Data[[tabid]][[2]]
dat2sav <- tclArray2dataframe(Objarray)
switch(.cdtData$OpenTab$Type[[tabid]],
"arr" = saveTable.arr.type(dat2sav, tabid),
"chkcrds" = .cdtData$EnvData$StnChkCoords$SaveEdit(dat2sav),
"falsezero" = .cdtData$EnvData$qcRRZeroCheck$SaveEdit(dat2sav),
"outqc" = .cdtData$EnvData$QC$SaveEdit(dat2sav),
"outhom" = .cdtData$EnvData$HomTest$SaveEdit(dat2sav),
NULL)
}else return(NULL)
return(0)
}
|
ar <- read.csv('arrival_rate.csv')
pdf(file='arrival_rate.pdf')
plot(ar$arrival_count, xlab = 'Day of year', ylab = 'Count of Orders (log scale)', log='y', main='Count of Orders per Day', type='l')
plot(ar$arrival_median, xlab = 'Day of year', ylab = 'Median Work Minutes (log scale)', log='y', main='Median Order Work-Minutes by Day', type='l')
plot(ar$arrival_minutes, xlab = 'Day of year', ylab = 'Total Work Minutes (log scale)', log='y', main='Total Work-Minutes per Day', type='l')
plot(100*cumsum(as.numeric(ar$arrival_count))/sum(as.numeric(ar$arrival_count)),
xlab = 'Day of year', ylab = '% of Total Orders', log='', main='Cumulative % of Total Orders, by Day', type='l')
plot(100*cumsum(as.numeric(ar$arrival_minutes))/sum(as.numeric(ar$arrival_minutes)),
xlab = 'Day of year', ylab = '% of Total Work Minutes', log='', main='Cumulative % of Total Work-Minutes, by Day', type='l')
| /HelpingSantasHelpers/analysis/arrival_rate.R | no_license | chrishefele/kaggle-sample-code | R | false | false | 934 | r |
ar <- read.csv('arrival_rate.csv')
pdf(file='arrival_rate.pdf')
plot(ar$arrival_count, xlab = 'Day of year', ylab = 'Count of Orders (log scale)', log='y', main='Count of Orders per Day', type='l')
plot(ar$arrival_median, xlab = 'Day of year', ylab = 'Median Work Minutes (log scale)', log='y', main='Median Order Work-Minutes by Day', type='l')
plot(ar$arrival_minutes, xlab = 'Day of year', ylab = 'Total Work Minutes (log scale)', log='y', main='Total Work-Minutes per Day', type='l')
plot(100*cumsum(as.numeric(ar$arrival_count))/sum(as.numeric(ar$arrival_count)),
xlab = 'Day of year', ylab = '% of Total Orders', log='', main='Cumulative % of Total Orders, by Day', type='l')
plot(100*cumsum(as.numeric(ar$arrival_minutes))/sum(as.numeric(ar$arrival_minutes)),
xlab = 'Day of year', ylab = '% of Total Work Minutes', log='', main='Cumulative % of Total Work-Minutes, by Day', type='l')
|
# chap04_2_Function
# 1. 사용자 정의함수
# 형식)
# 함수명 <- function([인수]){
# 실행문
# 실행문
# [return 값]
# }
# 1) 매개변수없는 함수
f1 <- function(){
cat('f1 함수')
}
f1() # 함수 호출
# 2) 매개변수 있는 함수
f2 <- function(x){ # 가인수=매개변수
x2 <- x^2
cat('x2 =', x2)
}
f2(10) # 실인수
# 3) 리턴있는 함수
f3 <- function(x, y){
add <- x + y
return(add) # add 반환
}
# 함수 호출 -> 반환값
add_re <- f3(10, 5)
add_re # 15
num <- 1:10
tot_func <- function(x){
tot <- sum(x)
return(tot)
}
tot_re <- tot_func(num)
avg <- tot_re / length(num)
avg # 5.5
# 문) calc 함수를 정의하기
#100 + 20 = 120
#100 - 20 = 80
#100 * 20 = 2000
#100 / 20 = 5
calc <- function(x, y){
add <- x + y
sub <- x - y
mul <- x * y
div <- x / y
cat(x, '+', y, '=', add, '\n')
cat(x, '-', y, '=', sub, '\n')
cat(x, '*', y, '=', mul, '\n')
cat(x, '/', y, '=', div, '\n')
calc_df <- data.frame(add,sub,mul,div)
#return(add, sub, mul, div) # Error
return(calc_df)
}
# 함수 호출
df <- calc(100, 20)
df
# 구구단의 단을 인수 받아서 구구단 출력하기
gugu <- function(dan){
cat('***',dan,'단 ***\n')
for(i in 1:9){
cat(dan, '*', i, '=', dan*i, '\n')
}
}
gugu(2)
gugu(5)
state <- function(fname, data){
switch(fname,
SUM = sum(data),
AVG = mean(data),
VAR = var(data),
SD = sd(data))
}
data <- 1:10
state("SUM", data) # 55
state("AVG", data) # 5.5
state("VAR", data) # 9.166667
state("SD", data) # 3.02765
# 결측치(NA) 처리 함수
na <- function(x){
# 1. NA 제거
x1 <- na.omit(x)
cat('x1 =', x1, '\n')
cat('x1 = ', mean(x1), '\n')
# 2. NA -> 평균
x2 <- ifelse(is.na(x), mean(x, na.rm=T), x)
cat('x2=', x2, '\n')
cat('x2 =', mean(x2), '\n')
# 3. NA -> 0
x3 <- ifelse(is.na(x), 0, x)
cat('x3=', x3, '\n')
cat('x3 = ', mean(x3), '\n')
}
x <- c(10,5,NA,4.2,6.3,NA,7.5,8,10)
x
length(x) # 9
mean(x, na.rm = T) # 7.285714
# 함수 호출
na(x)
###################################
### 몬테카를로 시뮬레이션
###################################
# 현실적으로 불가능한 문제의 해답을 얻기 위해서 난수의 확률분포를 이용하여
# 모의시험으로 근사적 해를 구하는 기법
# 동전 앞/뒤 난수 확률분포 함수
coin <- function(n){
r <- runif(n, min=0, max=1)
#print(r) # n번 시행
result <- numeric()
for (i in 1:n){
if (r[i] <= 0.5)
result[i] <- 0 # 앞면
else
result[i] <- 1 # 뒷면
}
return(result)
}
# 몬테카를로 시뮬레이션
montaCoin <- function(n){
cnt <- 0
for(i in 1:n){
cnt <- cnt + coin(1) # 동전 함수 호출
}
result <- cnt / n
return(result)
}
montaCoin(5) # 0.6
montaCoin(1000) # 0.504
montaCoin(10000) # 0.501
# 중심극한정리
# 2. R의 주요 내장함수
# 1) 기술통계함수
vec <- 1:10
min(vec) # 최소값
max(vec) # 최대값
range(vec) # 범위
mean(vec) # 평균
median(vec) # 중위수
sum(vec) # 합계
prod(vec) # 데이터의 곱
1*2*3*4*5*6*7*8*9*10
summary(vec) # 요약통계량
n <- rnorm(100) # mean=0, sd=1
mean(n) # 0.08993331
sd(n) # 1.094573
sd(rnorm(100)) # 표준편차 구하기
factorial(5) # 팩토리얼=120
1*2*3*4*5
sqrt(49) # 루트
install.packages('RSADBE')
library(RSADBE)
library(help="RSADBE")
data(Bug_Metrics_Software)
str(Bug_Metrics_Software)
# num [1:5, 1:5, 1:2]
Bug_Metrics_Software
# 소프트웨어 발표 전 버그 수
Bug_Metrics_Software[,,1] # Before
# 행 단위 합계 : 소프트웨어 별 버그 수 합계
rowSums(Bug_Metrics_Software[,,1])
# 열 단위 합계 : 버그 별 합계
colSums(Bug_Metrics_Software[,,1])
# 행 단위 평균
rowMeans(Bug_Metrics_Software[,,1])
# 열 단위 평균
colMeans(Bug_Metrics_Software[,,1])
# 소프트웨어 발표 후 버그 수
Bug_Metrics_Software[,,2] # After
# 2) 반올림 관련 함수
x <- c(1.5, 2.5, -1.3, 2.5)
round(mean(x)) # 1.3 -> 1
ceiling(mean(x)) # x보다 큰 정수
floor(mean(x)) # x보다 작은 정수
# 3) 난수 생성과 확률분포
# (1) 정규분포를 따르는 난수 - 연속확률분포(실수형)
# 형식) rnorm(n, mean=0, sd = 1)
n <- 1000
r <- rnorm(n, mean = 0, sd = 1) # 표준정규분포
r
mean(r) # 0.02144221
sd(r) # 0.9890079
hist(r) # 대칭성
# (2) 균등분포를 따르는 난수 - 연속확률분포(실수형)
# 형식) runif(n, min=, max=)
r2 <- runif(n, min=0, max=1)
r2
hist(r2)
# (3) 이항분포를 따르는 난수 - 이산확률분포(정수형)
set.seed(123) # seed값 같으면 -> 동일한 난수
n <- 10
r3 <- rbinom(n, 1, 0.5) # 1/2
r3 # 0 1 0 1 1 0 1 1 1 0
r3 <- rbinom(n, 1, 0.25) # 1/4
r3 # 1 0 0 0 0 1 0 0 0 1
# (4) sample
sample(10:20, 5) # 17 19 16 11 10
sample(c(10:20, 50:100), 10)
# 홀드아웃방식
# train(70%)/test(30%) 데이터셋
dim(iris) # 150 5
idx <- sample(nrow(iris), nrow(iris)*0.7)
range(idx) # 4 150
idx # 행번호
length(idx) # 105
train <- iris[idx, ] # 학습용
test <- iris[-idx, ] # 검정용
dim(train) # 105 5
dim(test) # 45 5
# 4) 행렬연산 내장함수
x <- matrix(1:9, nrow = 3, byrow = T)
dim(x) # 3 3
y <- matrix(1:3, nrow = 3)
dim(y) # 3 1
x;y
z <- x %*% y # 두 행렬의 곱
z
# 행렬곱의 전제조건
# 1. x,y 모두 행렬
# 2. x(열) = y(행) 일치 : 수일치
| /chap04_2_Function.R | no_license | yangmyongho/2_Rwork | R | false | false | 5,893 | r | # chap04_2_Function
# 1. 사용자 정의함수
# 형식)
# 함수명 <- function([인수]){
# 실행문
# 실행문
# [return 값]
# }
# 1) 매개변수없는 함수
f1 <- function(){
cat('f1 함수')
}
f1() # 함수 호출
# 2) 매개변수 있는 함수
f2 <- function(x){ # 가인수=매개변수
x2 <- x^2
cat('x2 =', x2)
}
f2(10) # 실인수
# 3) 리턴있는 함수
f3 <- function(x, y){
add <- x + y
return(add) # add 반환
}
# 함수 호출 -> 반환값
add_re <- f3(10, 5)
add_re # 15
num <- 1:10
tot_func <- function(x){
tot <- sum(x)
return(tot)
}
tot_re <- tot_func(num)
avg <- tot_re / length(num)
avg # 5.5
# 문) calc 함수를 정의하기
#100 + 20 = 120
#100 - 20 = 80
#100 * 20 = 2000
#100 / 20 = 5
calc <- function(x, y){
add <- x + y
sub <- x - y
mul <- x * y
div <- x / y
cat(x, '+', y, '=', add, '\n')
cat(x, '-', y, '=', sub, '\n')
cat(x, '*', y, '=', mul, '\n')
cat(x, '/', y, '=', div, '\n')
calc_df <- data.frame(add,sub,mul,div)
#return(add, sub, mul, div) # Error
return(calc_df)
}
# 함수 호출
df <- calc(100, 20)
df
# 구구단의 단을 인수 받아서 구구단 출력하기
gugu <- function(dan){
cat('***',dan,'단 ***\n')
for(i in 1:9){
cat(dan, '*', i, '=', dan*i, '\n')
}
}
gugu(2)
gugu(5)
state <- function(fname, data){
switch(fname,
SUM = sum(data),
AVG = mean(data),
VAR = var(data),
SD = sd(data))
}
data <- 1:10
state("SUM", data) # 55
state("AVG", data) # 5.5
state("VAR", data) # 9.166667
state("SD", data) # 3.02765
# 결측치(NA) 처리 함수
na <- function(x){
# 1. NA 제거
x1 <- na.omit(x)
cat('x1 =', x1, '\n')
cat('x1 = ', mean(x1), '\n')
# 2. NA -> 평균
x2 <- ifelse(is.na(x), mean(x, na.rm=T), x)
cat('x2=', x2, '\n')
cat('x2 =', mean(x2), '\n')
# 3. NA -> 0
x3 <- ifelse(is.na(x), 0, x)
cat('x3=', x3, '\n')
cat('x3 = ', mean(x3), '\n')
}
x <- c(10,5,NA,4.2,6.3,NA,7.5,8,10)
x
length(x) # 9
mean(x, na.rm = T) # 7.285714
# 함수 호출
na(x)
###################################
### 몬테카를로 시뮬레이션
###################################
# 현실적으로 불가능한 문제의 해답을 얻기 위해서 난수의 확률분포를 이용하여
# 모의시험으로 근사적 해를 구하는 기법
# 동전 앞/뒤 난수 확률분포 함수
coin <- function(n){
r <- runif(n, min=0, max=1)
#print(r) # n번 시행
result <- numeric()
for (i in 1:n){
if (r[i] <= 0.5)
result[i] <- 0 # 앞면
else
result[i] <- 1 # 뒷면
}
return(result)
}
# 몬테카를로 시뮬레이션
montaCoin <- function(n){
cnt <- 0
for(i in 1:n){
cnt <- cnt + coin(1) # 동전 함수 호출
}
result <- cnt / n
return(result)
}
montaCoin(5) # 0.6
montaCoin(1000) # 0.504
montaCoin(10000) # 0.501
# 중심극한정리
# 2. R의 주요 내장함수
# 1) 기술통계함수
vec <- 1:10
min(vec) # 최소값
max(vec) # 최대값
range(vec) # 범위
mean(vec) # 평균
median(vec) # 중위수
sum(vec) # 합계
prod(vec) # 데이터의 곱
1*2*3*4*5*6*7*8*9*10
summary(vec) # 요약통계량
n <- rnorm(100) # mean=0, sd=1
mean(n) # 0.08993331
sd(n) # 1.094573
sd(rnorm(100)) # 표준편차 구하기
factorial(5) # 팩토리얼=120
1*2*3*4*5
sqrt(49) # 루트
install.packages('RSADBE')
library(RSADBE)
library(help="RSADBE")
data(Bug_Metrics_Software)
str(Bug_Metrics_Software)
# num [1:5, 1:5, 1:2]
Bug_Metrics_Software
# 소프트웨어 발표 전 버그 수
Bug_Metrics_Software[,,1] # Before
# 행 단위 합계 : 소프트웨어 별 버그 수 합계
rowSums(Bug_Metrics_Software[,,1])
# 열 단위 합계 : 버그 별 합계
colSums(Bug_Metrics_Software[,,1])
# 행 단위 평균
rowMeans(Bug_Metrics_Software[,,1])
# 열 단위 평균
colMeans(Bug_Metrics_Software[,,1])
# 소프트웨어 발표 후 버그 수
Bug_Metrics_Software[,,2] # After
# 2) 반올림 관련 함수
x <- c(1.5, 2.5, -1.3, 2.5)
round(mean(x)) # 1.3 -> 1
ceiling(mean(x)) # x보다 큰 정수
floor(mean(x)) # x보다 작은 정수
# 3) 난수 생성과 확률분포
# (1) 정규분포를 따르는 난수 - 연속확률분포(실수형)
# 형식) rnorm(n, mean=0, sd = 1)
n <- 1000
r <- rnorm(n, mean = 0, sd = 1) # 표준정규분포
r
mean(r) # 0.02144221
sd(r) # 0.9890079
hist(r) # 대칭성
# (2) 균등분포를 따르는 난수 - 연속확률분포(실수형)
# 형식) runif(n, min=, max=)
r2 <- runif(n, min=0, max=1)
r2
hist(r2)
# (3) 이항분포를 따르는 난수 - 이산확률분포(정수형)
set.seed(123) # seed값 같으면 -> 동일한 난수
n <- 10
r3 <- rbinom(n, 1, 0.5) # 1/2
r3 # 0 1 0 1 1 0 1 1 1 0
r3 <- rbinom(n, 1, 0.25) # 1/4
r3 # 1 0 0 0 0 1 0 0 0 1
# (4) sample
sample(10:20, 5) # 17 19 16 11 10
sample(c(10:20, 50:100), 10)
# 홀드아웃방식
# train(70%)/test(30%) 데이터셋
dim(iris) # 150 5
idx <- sample(nrow(iris), nrow(iris)*0.7)
range(idx) # 4 150
idx # 행번호
length(idx) # 105
train <- iris[idx, ] # 학습용
test <- iris[-idx, ] # 검정용
dim(train) # 105 5
dim(test) # 45 5
# 4) 행렬연산 내장함수
x <- matrix(1:9, nrow = 3, byrow = T)
dim(x) # 3 3
y <- matrix(1:3, nrow = 3)
dim(y) # 3 1
x;y
z <- x %*% y # 두 행렬의 곱
z
# 행렬곱의 전제조건
# 1. x,y 모두 행렬
# 2. x(열) = y(행) 일치 : 수일치
|
getwd()
setwd("Wk2")
getwd()
list.files()
complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
# set wd
if(grep("specdata", directory) == 1) {
directory <- ("./specdata/")
}
# calculate the length
id_len <- length(id)
complete_data <- rep(0, id_len)
# find files
all_files <- as.character( list.files(directory) )
file_paths <- paste(directory, all_files, sep="/")
j <- 1
for (i in id) {
current_file <- read.csv(file_paths[i], header=T, sep=",")
complete_data[j] <- sum(complete.cases(current_file))
j <- j + 1
}
result <- data.frame(id = id, nobs = complete_data)
return(result)
}
# check to match Coursera results
complete("specdata", 1) == 117
complete("specdata", c(2, 4, 8, 10, 12))
==
id nobs
1 2 1041
2 4 474
3 8 192
4 10 148
5 12 96
complete("specdata", 30:25)
==
id nobs
1 30 932
2 29 711
3 28 475
4 27 338
5 26 586
6 25 463
complete("specdata", 3)
id nobs
1 3 243 | /complete.R | no_license | Gmturner1981/datasciencecoursera | R | false | false | 1,364 | r | getwd()
setwd("Wk2")
getwd()
list.files()
complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
# set wd
if(grep("specdata", directory) == 1) {
directory <- ("./specdata/")
}
# calculate the length
id_len <- length(id)
complete_data <- rep(0, id_len)
# find files
all_files <- as.character( list.files(directory) )
file_paths <- paste(directory, all_files, sep="/")
j <- 1
for (i in id) {
current_file <- read.csv(file_paths[i], header=T, sep=",")
complete_data[j] <- sum(complete.cases(current_file))
j <- j + 1
}
result <- data.frame(id = id, nobs = complete_data)
return(result)
}
# check to match Coursera results
complete("specdata", 1) == 117
complete("specdata", c(2, 4, 8, 10, 12))
==
id nobs
1 2 1041
2 4 474
3 8 192
4 10 148
5 12 96
complete("specdata", 30:25)
==
id nobs
1 30 932
2 29 711
3 28 475
4 27 338
5 26 586
6 25 463
complete("specdata", 3)
id nobs
1 3 243 |
library(TSDT)
### Name: reset_factor_levels
### Title: reset_factor_levels
### Aliases: reset_factor_levels
### ** Examples
ex1 = as.factor( c( rep('A', 3), rep('B',3), rep('C',3) ) )
## The levels associated with the factor variable include the letters A, B, C
ex1 # Levels are A, B, C
## If the last three observations are dropped the value C no longer occurs
## in the data, but the list of associated factor levels still contains C.
## This mismatch between the data and the list of factor levels may cause
## problems, particularly for algorithms that iterate over the factor levels.
ex1 <- ex1[1:6]
ex1 # Levels are still A, B, C, but the data contains only A and B
## If the factor levels are reset the data and list of levels will once again
## be consistent
ex1 <- reset_factor_levels( ex1 )
ex1 # Levels now contain only A and B, which is consistent with data
| /data/genthat_extracted_code/TSDT/examples/reset_factor_levels.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 882 | r | library(TSDT)
### Name: reset_factor_levels
### Title: reset_factor_levels
### Aliases: reset_factor_levels
### ** Examples
ex1 = as.factor( c( rep('A', 3), rep('B',3), rep('C',3) ) )
## The levels associated with the factor variable include the letters A, B, C
ex1 # Levels are A, B, C
## If the last three observations are dropped the value C no longer occurs
## in the data, but the list of associated factor levels still contains C.
## This mismatch between the data and the list of factor levels may cause
## problems, particularly for algorithms that iterate over the factor levels.
ex1 <- ex1[1:6]
ex1 # Levels are still A, B, C, but the data contains only A and B
## If the factor levels are reset the data and list of levels will once again
## be consistent
ex1 <- reset_factor_levels( ex1 )
ex1 # Levels now contain only A and B, which is consistent with data
|
#' @importFrom tools md5sum file_ext
CRANMetadataCache <- R6Class(
"CRANMetadataCache",
public = list(
initialize = function() {
private$data <- new.env(parent = emptyenv())
invisible(self)
},
get = function(path) {
path <- normalizePath(path)
md5 <- md5sum(path)
if (! is.null(res <- private$data[[md5]])) {
res
} else {
private$insert_file(path, md5)
}
}
),
private = list(
data = NULL,
insert_file = function(path, md5) {
ext <- file_ext(path)
if (ext == "gz") {
private$insert_file_gz(path, md5)
} else if (ext == "rds") {
private$insert_file_rds(path, md5)
}
},
insert_file_gz = function(path, md5) {
pkgs <- format_packages_gz(read.dcf.gz(path))
private$data[[md5]] <- pkgs
pkgs
},
insert_file_rds = function(path, md5) {
obj <- format_archive_rds(readRDS(path))
private$data[[md5]] <- obj
obj
}
)
)
#' @importFrom tibble as_tibble
format_packages_gz <- function(pkgs) {
pkgs <- as_tibble(pkgs)
list(pkgs = pkgs, deps = fast_parse_deps(pkgs))
}
format_archive_rds <- function(ards) {
files <- sub("^[^/]+/", "", unlist(lapply(ards, rownames)))
tibble(
package = rep(names(ards), viapply(ards, nrow)),
file = files,
version = sub("^[^_]+_([-\\.0-9]+)\\.tar\\.gz$", "\\1", files),
size = unlist(unname(lapply(ards, "[[", "size")))
)
}
update_crandata_cache <- function(config, progress_bar) {
type_cran_update_cache(
rootdir = config$metadata_cache_dir,
platforms = config$platforms,
rversions = config$`r-versions`,
mirror = config$`cran-mirror`,
progress_bar
)
}
update_biocdata_cache <- function(config, progress_bar) {
type_bioc_update_cache(
rootdir = config$metadata_cache_dir,
platforms = config$platforms,
rversions = config$`r-versions`,
progress_bar
)
}
| /R/metadata-cache.R | permissive | slopp/pkgdepends | R | false | false | 1,948 | r |
#' @importFrom tools md5sum file_ext
CRANMetadataCache <- R6Class(
"CRANMetadataCache",
public = list(
initialize = function() {
private$data <- new.env(parent = emptyenv())
invisible(self)
},
get = function(path) {
path <- normalizePath(path)
md5 <- md5sum(path)
if (! is.null(res <- private$data[[md5]])) {
res
} else {
private$insert_file(path, md5)
}
}
),
private = list(
data = NULL,
insert_file = function(path, md5) {
ext <- file_ext(path)
if (ext == "gz") {
private$insert_file_gz(path, md5)
} else if (ext == "rds") {
private$insert_file_rds(path, md5)
}
},
insert_file_gz = function(path, md5) {
pkgs <- format_packages_gz(read.dcf.gz(path))
private$data[[md5]] <- pkgs
pkgs
},
insert_file_rds = function(path, md5) {
obj <- format_archive_rds(readRDS(path))
private$data[[md5]] <- obj
obj
}
)
)
#' @importFrom tibble as_tibble
format_packages_gz <- function(pkgs) {
pkgs <- as_tibble(pkgs)
list(pkgs = pkgs, deps = fast_parse_deps(pkgs))
}
format_archive_rds <- function(ards) {
files <- sub("^[^/]+/", "", unlist(lapply(ards, rownames)))
tibble(
package = rep(names(ards), viapply(ards, nrow)),
file = files,
version = sub("^[^_]+_([-\\.0-9]+)\\.tar\\.gz$", "\\1", files),
size = unlist(unname(lapply(ards, "[[", "size")))
)
}
update_crandata_cache <- function(config, progress_bar) {
type_cran_update_cache(
rootdir = config$metadata_cache_dir,
platforms = config$platforms,
rversions = config$`r-versions`,
mirror = config$`cran-mirror`,
progress_bar
)
}
update_biocdata_cache <- function(config, progress_bar) {
type_bioc_update_cache(
rootdir = config$metadata_cache_dir,
platforms = config$platforms,
rversions = config$`r-versions`,
progress_bar
)
}
|
# Definición de la parte server
shinyServer(function(input, output) {
output$dotplot <- renderPlot({
qplot(mpg,reorder(modelo,mpg),data=mtcars,xlab="consumo (en mpg)",ylab="modelos")
})
output$regresion <- renderPlot({
ggplot(mtcars,aes_string(input$regresor,"mpg"))+
geom_text(aes(label=modelo),angle=10,check_overlap=TRUE)+
geom_smooth(method='lm')
})
output$datos <- renderDataTable(mtcars)
})
| /09. RMarkdown y Shiny/2. Aplicaciones/interfaz2/server.R | no_license | 1789291/Master-Data-Science | R | false | false | 429 | r | # Definición de la parte server
shinyServer(function(input, output) {
output$dotplot <- renderPlot({
qplot(mpg,reorder(modelo,mpg),data=mtcars,xlab="consumo (en mpg)",ylab="modelos")
})
output$regresion <- renderPlot({
ggplot(mtcars,aes_string(input$regresor,"mpg"))+
geom_text(aes(label=modelo),angle=10,check_overlap=TRUE)+
geom_smooth(method='lm')
})
output$datos <- renderDataTable(mtcars)
})
|
library(bootLR)
### Name: BayesianLR.test
### Title: Compute the (positive/negative) likelihood ratio with
### appropriate, bootstrapped confidence intervals
### Aliases: BayesianLR.test
### ** Examples
## Not run:
##D blrt <- BayesianLR.test( truePos=100, totalDzPos=100, trueNeg=60, totalDzNeg=100 )
##D blrt
##D summary(blrt)
##D
##D BayesianLR.test( truePos=98, totalDzPos=100, trueNeg=60, totalDzNeg=100 )
##D BayesianLR.test( truePos=60, totalDzPos=100, trueNeg=100, totalDzNeg=100 )
##D BayesianLR.test( truePos=60, totalDzPos=100, trueNeg=99, totalDzNeg=100 )
##D
##D # Note the argument names are not necessary if you specify them in the proper order:
##D BayesianLR.test( 60, 100, 50, 50 )
##D
##D # You can specify R= to increase/decrease the number of bootstrap replications
##D BayesianLR.test( 60, 100, 50, 50, R=10000 )
##D
##D # You can change the number of digits that are printed
##D print.lrtest( BayesianLR.test( 500, 500, 300, 500 ), digits = 4 )
##D
##D # Or extract the results yourself
##D model.blrt1 <- BayesianLR.test( 500, 500, 300, 500 )
##D unclass( model.blrt1 )
##D model.blrt1$statistics
##D model.blrt1$posLR.ci
##D
##D # If the model doesn't converge, you can alter the search parameters
##D BayesianLR.test( 500, 500, 300, 500, parameters=list(shrink=4,tol=.001,nEach=150), maxTries = 50 )
##D
##D ### Statistician-only options
##D # These change the way the model works.
##D # It is not recommended to alter these, as this will alter the statistical properties of the test
##D # in ways that have not been validated.
##D # Change number of bootstrap replications
##D BayesianLR.test( 500, 500, 300, 500, R = 5*10^4 )
##D # Change number of times to average the confidence interval limits at the end
##D BayesianLR.test( 500, 500, 300, 500, nBSave = 100 )
##D # Change the criteria from median being consistent 0 or 1 to some other quantile
##D BayesianLR.test( 500, 500, 300, 500, consistentQuantile = .53 )
## End(Not run)
| /data/genthat_extracted_code/bootLR/examples/BayesianLR.test.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,978 | r | library(bootLR)
### Name: BayesianLR.test
### Title: Compute the (positive/negative) likelihood ratio with
### appropriate, bootstrapped confidence intervals
### Aliases: BayesianLR.test
### ** Examples
## Not run:
##D blrt <- BayesianLR.test( truePos=100, totalDzPos=100, trueNeg=60, totalDzNeg=100 )
##D blrt
##D summary(blrt)
##D
##D BayesianLR.test( truePos=98, totalDzPos=100, trueNeg=60, totalDzNeg=100 )
##D BayesianLR.test( truePos=60, totalDzPos=100, trueNeg=100, totalDzNeg=100 )
##D BayesianLR.test( truePos=60, totalDzPos=100, trueNeg=99, totalDzNeg=100 )
##D
##D # Note the argument names are not necessary if you specify them in the proper order:
##D BayesianLR.test( 60, 100, 50, 50 )
##D
##D # You can specify R= to increase/decrease the number of bootstrap replications
##D BayesianLR.test( 60, 100, 50, 50, R=10000 )
##D
##D # You can change the number of digits that are printed
##D print.lrtest( BayesianLR.test( 500, 500, 300, 500 ), digits = 4 )
##D
##D # Or extract the results yourself
##D model.blrt1 <- BayesianLR.test( 500, 500, 300, 500 )
##D unclass( model.blrt1 )
##D model.blrt1$statistics
##D model.blrt1$posLR.ci
##D
##D # If the model doesn't converge, you can alter the search parameters
##D BayesianLR.test( 500, 500, 300, 500, parameters=list(shrink=4,tol=.001,nEach=150), maxTries = 50 )
##D
##D ### Statistician-only options
##D # These change the way the model works.
##D # It is not recommended to alter these, as this will alter the statistical properties of the test
##D # in ways that have not been validated.
##D # Change number of bootstrap replications
##D BayesianLR.test( 500, 500, 300, 500, R = 5*10^4 )
##D # Change number of times to average the confidence interval limits at the end
##D BayesianLR.test( 500, 500, 300, 500, nBSave = 100 )
##D # Change the criteria from median being consistent 0 or 1 to some other quantile
##D BayesianLR.test( 500, 500, 300, 500, consistentQuantile = .53 )
## End(Not run)
|
\name{lyon}
\docType{data}
\alias{lyon}
\title{Contour des arrondissements de Lyon}
\description{
Contour des 9 arrondissements de Lyon pour représentation cartographique
}
\usage{data(lyon)}
\format{Objet de classe SpatialPolygonsDataFrame}
\keyword{datasets} | /man/lyon.Rd | no_license | juba/rgrs | R | false | false | 263 | rd | \name{lyon}
\docType{data}
\alias{lyon}
\title{Contour des arrondissements de Lyon}
\description{
Contour des 9 arrondissements de Lyon pour représentation cartographique
}
\usage{data(lyon)}
\format{Objet de classe SpatialPolygonsDataFrame}
\keyword{datasets} |
testlist <- list(type = 1L, z = 4.64186808026047e-315)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609893514-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 108 | r | testlist <- list(type = 1L, z = 4.64186808026047e-315)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
# Random Forest를 사용한 의료비 예측(회귀)
# 패키지 로드
library(tidyverse)
library(randomForest)
library(ModelMetrics)
search()
# 데이터 준비
url <- 'https://github.com/JakeOh/202105_itw_bd26/raw/main/datasets/insurance.csv'
insurance_df <- read.csv(url, stringsAsFactors = TRUE)
str(insurance_df)
# 훈련 셋, 테스트 셋 분리
n <- nrow(insurance_df) # 전체 샘플 개수
tr_size <- round(n * 0.8) # 훈련 셋 샘플 개수
train_set <- insurance_df[1:tr_size, ] # 훈련 셋
test_set <- insurance_df[(tr_size + 1):n, ] # 테스트 셋
# random forest 모델을 훈련
forest_reg <- randomForest(formula = expenses ~ .,
data = train_set)
forest_reg
#> Mean of squared residuals: 23027150
#> OOB(out of bagging) 샘플에서 추정된 MSE
# 훈련 셋 평가
train_predictions <- predict(forest_reg, train_set)
head(train_predictions, n = 5)
head(train_set$expenses, n = 5)
# mean squared error
mse(train_set$expenses, train_predictions) #> 9813250
rmse(train_set$expenses, train_predictions) #> 3132.611
# 테스트 셋 평가
test_predictions <- predict(forest_reg, test_set)
head(test_predictions, n = 5)
head(test_set$expenses, n = 5)
mse(test_set$expenses, test_predictions) #> 21884111
rmse(test_set$expenses, test_predictions) #> 4678.046
| /lab_r/rml12_random_forest.R | no_license | serener91/ITW | R | false | false | 1,362 | r | # Random Forest를 사용한 의료비 예측(회귀)
# 패키지 로드
library(tidyverse)
library(randomForest)
library(ModelMetrics)
search()
# 데이터 준비
url <- 'https://github.com/JakeOh/202105_itw_bd26/raw/main/datasets/insurance.csv'
insurance_df <- read.csv(url, stringsAsFactors = TRUE)
str(insurance_df)
# 훈련 셋, 테스트 셋 분리
n <- nrow(insurance_df) # 전체 샘플 개수
tr_size <- round(n * 0.8) # 훈련 셋 샘플 개수
train_set <- insurance_df[1:tr_size, ] # 훈련 셋
test_set <- insurance_df[(tr_size + 1):n, ] # 테스트 셋
# random forest 모델을 훈련
forest_reg <- randomForest(formula = expenses ~ .,
data = train_set)
forest_reg
#> Mean of squared residuals: 23027150
#> OOB(out of bagging) 샘플에서 추정된 MSE
# 훈련 셋 평가
train_predictions <- predict(forest_reg, train_set)
head(train_predictions, n = 5)
head(train_set$expenses, n = 5)
# mean squared error
mse(train_set$expenses, train_predictions) #> 9813250
rmse(train_set$expenses, train_predictions) #> 3132.611
# 테스트 셋 평가
test_predictions <- predict(forest_reg, test_set)
head(test_predictions, n = 5)
head(test_set$expenses, n = 5)
mse(test_set$expenses, test_predictions) #> 21884111
rmse(test_set$expenses, test_predictions) #> 4678.046
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{transpose_community}
\alias{transpose_community}
\title{Convert from a long form abundance dataframe to a time by species dataframe.}
\usage{
transpose_community(df, time.var, species.var, abundance.var)
}
\arguments{
\item{df}{A dataframe containing time.var, species.var and abundance.var columns}
\item{time.var}{The name of the time column from df}
\item{species.var}{The name of the species column from df}
\item{abundance.var}{The name of the abundance column from df}
}
\value{
A dataframe of species abundances x time
}
\description{
Convert from a long form abundance dataframe to a time by species dataframe.
}
| /man/transpose_community.Rd | permissive | TankMermaid/codyn | R | false | true | 720 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{transpose_community}
\alias{transpose_community}
\title{Convert from a long form abundance dataframe to a time by species dataframe.}
\usage{
transpose_community(df, time.var, species.var, abundance.var)
}
\arguments{
\item{df}{A dataframe containing time.var, species.var and abundance.var columns}
\item{time.var}{The name of the time column from df}
\item{species.var}{The name of the species column from df}
\item{abundance.var}{The name of the abundance column from df}
}
\value{
A dataframe of species abundances x time
}
\description{
Convert from a long form abundance dataframe to a time by species dataframe.
}
|
#importing data file
data=read.csv("C:/Users/hp/Desktop/hackathon/train (1).csv")
names(data)
data <- data[-c(1236),]
str(data)
input<- data[,c("subscriber","Trend_day_count","Tag_count","comment_count","Trend_tag_count","likes","dislike","views")]
names(input)
#converting variables from object to integer
#1
input$subscriber=as.integer(input$subscriber)
is.integer(input$subscriber)
#2
input$Trend_day_count=as.integer(input$Trend_day_count)
is.integer(input$Trend_day_count)
#3
input$Tag_count=as.integer(input$Tag_count)
is.integer(input$Tag_count)
#4
input$comment_count=as.integer(input$comment_count)
is.integer(input$comment_count)
#5
input$Trend_tag_count=as.integer(input$Trend_tag_count)
is.integer(input$Trend_tag_count)
#6
input$likes=as.integer(input$likes)
is.integer(input$likes)
#7
input$dislike=as.integer(input$dislike)
is.integer(input$dislike)
#8
input$views=as.integer(input$views)
is.integer(input$views)
str(input)
#identifying missing values
sapply(input,function(x) sum(is.na(x)))
#replacing missing values with mean
input$likes[is.na(input$likes)] <- round(mean(input$likes, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
## replacing missing values with mean
input$comment_count[is.na(input$comment_count)] <- round(mean(input$comment_count, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
## replacing missing values with mean
input$subscriber[is.na(input$subscriber)] <- round(mean(input$subscriber, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
## replacing missing values with mean
input$comment_count[is.na(input$comment_count)] <- round(mean(input$comment_count, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
## replacing missing values with mean
input$Trend_day_count[is.na(input$Trend_day_count)] <- round(mean(input$Trend_day_count, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
##checking outliers and treating them
#1
boxplot(input$subscriber)
summary(input$subscriber)
#creating upper limit value
upper<-3.825e+06+1.5*IQR(input$subscriber)
upper
#creating lower limit value
lower<-2.429e+05-1.5*IQR(input$subscriber)
lower
# upper limit replacement
input$subscriber[input$subscriber > upper]<-upper
summary(input$subscriber)
boxplot(input$subscriber)
# lower limit replacement
input$subscribe[input$subscribe < lower]<-lower
summary(input$subscriber)
#2
boxplot(input$Trend_day_count)
summary(input$Trend_day_count)
#creating upper limit value
upper<-10.000+1.5*IQR(input$Trend_day_count)
upper
#creating lower limit value
lower<-4.000-1.5*IQR(input$Trend_day_count)
lower
# upper limit replacement
input$Trend_day_count[input$Trend_day_count > upper]<-upper
summary(input$Trend_day_count)
boxplot(input$Trend_day_count)
# lower limit replacement
input$Trend_day_count[input$Trend_day_count < lower]<-lower
summary(input$Trend_day_count)
#3
boxplot(input$Tag_count)
#4
boxplot(input$comment_count)
summary(input$comment_count)
#creating upper limit value
upper<-203240+1.5*IQR(input$comment_count)
upper
#creating lower limit value
lower<-126760-1.5*IQR(input$comment_count)
lower
# upper limit replacement
input$comment_count[input$comment_count > upper]<-upper
summary(input$comment_count)
boxplot(input$comment_count)
# lower limit replacement
input$comment_count[input$comment_count < lower]<-lower
summary(input$comment_count)
#5
boxplot(input$Trend_tag_count)
#6
boxplot(input$likes)
summary(input$likes)
#creating upper limit value
upper<-10.000+1.5*IQR(input$likes)
upper
#creating lower limit value
lower<-4.000-1.5*IQR(input$likes)
lower
# upper limit replacement
input$likes[input$likes > upper]<-upper
summary(input$likes)
boxplot(input$likes)
# lower limit replacement
input$likes[input$likes < lower]<-lower
summary(input$likes)
#7
boxplot(input$dislike)
#8
boxplot(input$views)
#correlation
plot(input)
par(mfrow=c(2,2))
plot(views~.,data=input)
cor(input)
# Correlation Matrix & plot(Scatter plot) , co-linearity & multi colinearity
attach(input)
cor(input)
plot(views,likes)
plot(views,dislike)
plot(views,Trend_tag_count)
plot(views,comment_count)
plot(views,Tag_count)
plot(views,Trend_day_count)
plot(views,subscriber)
# Model Building
##Multiple Logistic Regression
set.seed(12)
library(caret)
Train<-createDataPartition(input$views,p=0.7,list=FALSE)
training<-input[Train,]
testing<-input[-Train,]
#Enter method
# Model Creation -we reject Ho (pvalue<alpha){we acceptH1}
model<-lm(views~subscriber+Trend_day_count+Tag_count+comment_count+Trend_tag_count+likes+dislike,data = training)
summary(model)
#variance inflation factor
library(car)
vif(model)
# forward method
model1<-step(lm(views~.,data=training),
direction="forward")
summary(model1)
library(car)
vif(model1)
# backward method
model2<-step(lm(views~.,data = training)
,direction = "backward")
summary(model2)
library(car)
vif(model2)
# both method
model2<-step(lm(views~.,data = training)
,direction = "both")
summary(model2)
library(car)
vif(model2)
exp(coef(model2))
#Adjusted r square - better model as greater than 70%
# assumption
par(mfrow=c(2,2))
plot(model2)
library(lmtest)
dwtest(model2)
ncvTest(model2)
##JUST TO CHECK MATHEMATICALLY of linear Model
training$Fitted_value<-model2$fitted.values
training$Residual<-model2$residuals
sum(training$Residual)
##Prediction on test data
testing$Predicted<-Predict(model2,testing)
testing$Residual<-testing$views-testing$Predicted
sum(testing$Residual)
################################### TEST DATA ########################################
#importing data file
data1=read.csv("C:/Users/hp/Desktop/hackathon/test (1).csv")
names(data1)
data1 <- data1[-c(1236),]
str(data1)
input1<- data1[,c("subscriber","Trend_day_count","Tag_count","comment_count","Trend_tag_count","likes","dislike")]
names(input1)
#converting variables from object to integer
#1
input1$subscriber=as.integer(input1$subscriber)
is.integer(input1$subscriber)
#2
input1$Trend_day_count=as.integer(input1$Trend_day_count)
is.integer(input1$Trend_day_count)
#3
input1$Tag_count=as.integer(input1$Tag_count)
is.integer(input1$Tag_count)
#4
input1$comment_count=as.integer(input1$comment_count)
is.integer(input1$comment_count)
#5
input1$Trend_tag_count=as.integer(input1$Trend_tag_count)
is.integer(input1$Trend_tag_count)
#6
input1$likes=as.integer(input1$likes)
is.integer(input1$likes)
#7
input1$dislike=as.integer(input1$dislike)
is.integer(input1$dislike)
str(input1)
#identifying missing values
sapply(input1,function(x) sum(is.na(x)))
#replacing missing values with mean
input1$likes[is.na(input1$likes)] <- round(mean(input1$likes, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
## replacing missing values with mean
input1$comment_count[is.na(input1$comment_count)] <- round(mean(input1$comment_count, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
## replacing missing values with mean
input1$subscriber[is.na(input1$subscriber)] <- round(mean(input1$subscriber, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
## replacing missing values with mean
input1$comment_count[is.na(input1$comment_count)] <- round(mean(input1$comment_count, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
## replacing missing values with mean
input1$Trend_day_count[is.na(input1$Trend_day_count)] <- round(mean(input1$Trend_day_count, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
##checking outliers and treating them
#1
boxplot(input1$subscriber)
summary(input1$subscriber)
#creating upper limit value
upper<-3181914+06+1.5*IQR(input1$subscriber)
upper
#creating lower limit value
lower<-252756+05-1.5*IQR(input1$subscriber)
lower
# upper limit replacement
input1$subscriber[input1$subscriber > upper]<-upper
summary(input1$subscriber)
boxplot(input1$subscriber)
# lower limit replacement
input1$subscribe[input1$subscriber < lower]<-lower
summary(input1$subscriber)
#2
boxplot(input1$Trend_day_count)
summary(input1$Trend_day_count)
#3
boxplot(input1$Tag_count)
#4
boxplot(input1$comment_count)
#5
boxplot(input1$Trend_tag_count)
#6
boxplot(input1$likes)
#7
boxplot(input1$dislike)
model3 <- predict(model2, input1)
input1$Y_p
df<-data.frame(model3)
write.csv(df,file = ("C:/Users/hp/Desktop/hackathon/1234.csv"))
| /youtube views prediction .R | no_license | shreyaskuthe/projects | R | false | false | 8,595 | r | #importing data file
data=read.csv("C:/Users/hp/Desktop/hackathon/train (1).csv")
names(data)
data <- data[-c(1236),]
str(data)
input<- data[,c("subscriber","Trend_day_count","Tag_count","comment_count","Trend_tag_count","likes","dislike","views")]
names(input)
#converting variables from object to integer
#1
input$subscriber=as.integer(input$subscriber)
is.integer(input$subscriber)
#2
input$Trend_day_count=as.integer(input$Trend_day_count)
is.integer(input$Trend_day_count)
#3
input$Tag_count=as.integer(input$Tag_count)
is.integer(input$Tag_count)
#4
input$comment_count=as.integer(input$comment_count)
is.integer(input$comment_count)
#5
input$Trend_tag_count=as.integer(input$Trend_tag_count)
is.integer(input$Trend_tag_count)
#6
input$likes=as.integer(input$likes)
is.integer(input$likes)
#7
input$dislike=as.integer(input$dislike)
is.integer(input$dislike)
#8
input$views=as.integer(input$views)
is.integer(input$views)
str(input)
#identifying missing values
sapply(input,function(x) sum(is.na(x)))
#replacing missing values with mean
input$likes[is.na(input$likes)] <- round(mean(input$likes, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
## replacing missing values with mean
input$comment_count[is.na(input$comment_count)] <- round(mean(input$comment_count, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
## replacing missing values with mean
input$subscriber[is.na(input$subscriber)] <- round(mean(input$subscriber, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
## replacing missing values with mean
input$comment_count[is.na(input$comment_count)] <- round(mean(input$comment_count, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
## replacing missing values with mean
input$Trend_day_count[is.na(input$Trend_day_count)] <- round(mean(input$Trend_day_count, na.rm = TRUE))
sapply(input,function(x) sum(is.na(x)))
##checking outliers and treating them
#1
boxplot(input$subscriber)
summary(input$subscriber)
#creating upper limit value
upper<-3.825e+06+1.5*IQR(input$subscriber)
upper
#creating lower limit value
lower<-2.429e+05-1.5*IQR(input$subscriber)
lower
# upper limit replacement
input$subscriber[input$subscriber > upper]<-upper
summary(input$subscriber)
boxplot(input$subscriber)
# lower limit replacement
input$subscribe[input$subscribe < lower]<-lower
summary(input$subscriber)
#2
boxplot(input$Trend_day_count)
summary(input$Trend_day_count)
#creating upper limit value
upper<-10.000+1.5*IQR(input$Trend_day_count)
upper
#creating lower limit value
lower<-4.000-1.5*IQR(input$Trend_day_count)
lower
# upper limit replacement
input$Trend_day_count[input$Trend_day_count > upper]<-upper
summary(input$Trend_day_count)
boxplot(input$Trend_day_count)
# lower limit replacement
input$Trend_day_count[input$Trend_day_count < lower]<-lower
summary(input$Trend_day_count)
#3
boxplot(input$Tag_count)
#4
boxplot(input$comment_count)
summary(input$comment_count)
#creating upper limit value
upper<-203240+1.5*IQR(input$comment_count)
upper
#creating lower limit value
lower<-126760-1.5*IQR(input$comment_count)
lower
# upper limit replacement
input$comment_count[input$comment_count > upper]<-upper
summary(input$comment_count)
boxplot(input$comment_count)
# lower limit replacement
input$comment_count[input$comment_count < lower]<-lower
summary(input$comment_count)
#5
boxplot(input$Trend_tag_count)
#6
boxplot(input$likes)
summary(input$likes)
#creating upper limit value
upper<-10.000+1.5*IQR(input$likes)
upper
#creating lower limit value
lower<-4.000-1.5*IQR(input$likes)
lower
# upper limit replacement
input$likes[input$likes > upper]<-upper
summary(input$likes)
boxplot(input$likes)
# lower limit replacement
input$likes[input$likes < lower]<-lower
summary(input$likes)
#7
boxplot(input$dislike)
#8
boxplot(input$views)
#correlation
plot(input)
par(mfrow=c(2,2))
plot(views~.,data=input)
cor(input)
# Correlation Matrix & plot(Scatter plot) , co-linearity & multi colinearity
attach(input)
cor(input)
plot(views,likes)
plot(views,dislike)
plot(views,Trend_tag_count)
plot(views,comment_count)
plot(views,Tag_count)
plot(views,Trend_day_count)
plot(views,subscriber)
# Model Building
##Multiple Logistic Regression
set.seed(12)
library(caret)
Train<-createDataPartition(input$views,p=0.7,list=FALSE)
training<-input[Train,]
testing<-input[-Train,]
#Enter method
# Model Creation -we reject Ho (pvalue<alpha){we acceptH1}
model<-lm(views~subscriber+Trend_day_count+Tag_count+comment_count+Trend_tag_count+likes+dislike,data = training)
summary(model)
#variance inflation factor
library(car)
vif(model)
# forward method
model1<-step(lm(views~.,data=training),
direction="forward")
summary(model1)
library(car)
vif(model1)
# backward method
model2<-step(lm(views~.,data = training)
,direction = "backward")
summary(model2)
library(car)
vif(model2)
# both method
model2<-step(lm(views~.,data = training)
,direction = "both")
summary(model2)
library(car)
vif(model2)
exp(coef(model2))
#Adjusted r square - better model as greater than 70%
# assumption
par(mfrow=c(2,2))
plot(model2)
library(lmtest)
dwtest(model2)
ncvTest(model2)
##JUST TO CHECK MATHEMATICALLY of linear Model
training$Fitted_value<-model2$fitted.values
training$Residual<-model2$residuals
sum(training$Residual)
##Prediction on test data
testing$Predicted<-Predict(model2,testing)
testing$Residual<-testing$views-testing$Predicted
sum(testing$Residual)
################################### TEST DATA ########################################
#importing data file
data1=read.csv("C:/Users/hp/Desktop/hackathon/test (1).csv")
names(data1)
data1 <- data1[-c(1236),]
str(data1)
input1<- data1[,c("subscriber","Trend_day_count","Tag_count","comment_count","Trend_tag_count","likes","dislike")]
names(input1)
#converting variables from object to integer
#1
input1$subscriber=as.integer(input1$subscriber)
is.integer(input1$subscriber)
#2
input1$Trend_day_count=as.integer(input1$Trend_day_count)
is.integer(input1$Trend_day_count)
#3
input1$Tag_count=as.integer(input1$Tag_count)
is.integer(input1$Tag_count)
#4
input1$comment_count=as.integer(input1$comment_count)
is.integer(input1$comment_count)
#5
input1$Trend_tag_count=as.integer(input1$Trend_tag_count)
is.integer(input1$Trend_tag_count)
#6
input1$likes=as.integer(input1$likes)
is.integer(input1$likes)
#7
input1$dislike=as.integer(input1$dislike)
is.integer(input1$dislike)
str(input1)
#identifying missing values
sapply(input1,function(x) sum(is.na(x)))
#replacing missing values with mean
input1$likes[is.na(input1$likes)] <- round(mean(input1$likes, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
## replacing missing values with mean
input1$comment_count[is.na(input1$comment_count)] <- round(mean(input1$comment_count, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
## replacing missing values with mean
input1$subscriber[is.na(input1$subscriber)] <- round(mean(input1$subscriber, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
## replacing missing values with mean
input1$comment_count[is.na(input1$comment_count)] <- round(mean(input1$comment_count, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
## replacing missing values with mean
input1$Trend_day_count[is.na(input1$Trend_day_count)] <- round(mean(input1$Trend_day_count, na.rm = TRUE))
sapply(input1,function(x) sum(is.na(x)))
##checking outliers and treating them
#1
boxplot(input1$subscriber)
summary(input1$subscriber)
#creating upper limit value
upper<-3181914+06+1.5*IQR(input1$subscriber)
upper
#creating lower limit value
lower<-252756+05-1.5*IQR(input1$subscriber)
lower
# upper limit replacement
input1$subscriber[input1$subscriber > upper]<-upper
summary(input1$subscriber)
boxplot(input1$subscriber)
# lower limit replacement
input1$subscribe[input1$subscriber < lower]<-lower
summary(input1$subscriber)
#2
boxplot(input1$Trend_day_count)
summary(input1$Trend_day_count)
#3
boxplot(input1$Tag_count)
#4
boxplot(input1$comment_count)
#5
boxplot(input1$Trend_tag_count)
#6
boxplot(input1$likes)
#7
boxplot(input1$dislike)
model3 <- predict(model2, input1)
input1$Y_p
df<-data.frame(model3)
write.csv(df,file = ("C:/Users/hp/Desktop/hackathon/1234.csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival-functions.R
\name{survtest}
\alias{survtest}
\title{Tests for time-to-event outcomes}
\usage{
survtest(
time,
status,
treat,
tau = NULL,
rho = 0,
gam = 0,
eta = 1,
var_est = "Unpooled"
)
}
\arguments{
\item{time}{The observed time.}
\item{status}{The status indicator, normally 0=alive, 1=dead.}
\item{treat}{The treatment-group indicator, normally 0=control, 1=intervention.}
\item{tau}{follow-up. Default NULL denoting the last time in which both groups had patients at risk.}
\item{rho}{A scalar parameter that controls the type of test (see Weights).}
\item{gam}{A scalar parameter that controls the type of test (see Weights).}
\item{eta}{A scalar parameter that controls the type of test (see Weights).}
\item{var_est}{indicates the variance estimate to use ('Pooled' or 'Unpooled')}
}
\value{
List: standardized statistic, statistic and variance.
}
\description{
performs a test for right-censored data. It uses the Weighted Kaplan-Meier family of statistics for testing the differences of two survival curves.
}
\author{
Marta Bofill Roig
}
| /man/survtest.Rd | no_license | MartaBofillRoig/SurvBin | R | false | true | 1,159 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival-functions.R
\name{survtest}
\alias{survtest}
\title{Tests for time-to-event outcomes}
\usage{
survtest(
time,
status,
treat,
tau = NULL,
rho = 0,
gam = 0,
eta = 1,
var_est = "Unpooled"
)
}
\arguments{
\item{time}{The observed time.}
\item{status}{The status indicator, normally 0=alive, 1=dead.}
\item{treat}{The treatment-group indicator, normally 0=control, 1=intervention.}
\item{tau}{follow-up. Default NULL denoting the last time in which both groups had patients at risk.}
\item{rho}{A scalar parameter that controls the type of test (see Weights).}
\item{gam}{A scalar parameter that controls the type of test (see Weights).}
\item{eta}{A scalar parameter that controls the type of test (see Weights).}
\item{var_est}{indicates the variance estimate to use ('Pooled' or 'Unpooled')}
}
\value{
List: standardized statistic, statistic and variance.
}
\description{
performs a test for right-censored data. It uses the Weighted Kaplan-Meier family of statistics for testing the differences of two survival curves.
}
\author{
Marta Bofill Roig
}
|
library(dplyr)
library(tidyr)
library(lubridate)
library(readxl)
library(purrr)
library(data.table)
library(stringr)
library(openxlsx)
##-- 注意如果确认你的数据时放在下面这个文件下的子文件夹:ffromall raw
##-- 下,那么你不需要修改任何代码就可以运行程序???
# setwd("//eu.boehringer.com//users//sha//users2//zhengli//Desktop//DATA//IMS DDD process//IMS DDD Data House")
setwd("//eu.boehringer.com//users//sha//users2//zhengli//Desktop//DATA//IMS DDD process//IMS DDD Data House")
##-- read in the raw ddd data
# ddd <- read.xlsx("./ffromall raw/ffromall raw.xlsx")
ddd <- read.csv("./final data/ffromall raw.csv",
header = TRUE, stringsAsFactors = F,
check.names = FALSE)
##-- brand rename
##-- 请将 "Brand Rename.xlsx" 文件与 "ffromall raw.xlsx" 文件放在同一文件夹路径下, 即 ”final data" 文件夹内
new_brand_data <- read.xlsx("./final data/Brand Rename.xlsx",
check.names = FALSE) %>%
mutate(PACK = as.character(PACK)) %>%
select("PACK", "File.Name", "new_brand" = "New.Brand_CN")
ddd <- ddd %>%
left_join(new_brand_data, by = c("Main" = "File.Name", "PACK")) %>%
mutate(Brand_CN = ifelse(!is.na(new_brand),
new_brand,
Brand_CN)) %>%
select(-new_brand)
##--
ddd[] <- Map(
function(x, y) {
if (startsWith(y, "20") && !is.numeric(x)) {
as.numeric(x)
} else {
x
}
},
ddd, colnames(ddd))
##-- transpose the data to get the csv data
select_var <- colnames(ddd)[startsWith(colnames(ddd), "20")]
select_var <- format(as.numeric(select_var), nsmall = 2)
colnames(ddd)[startsWith(colnames(ddd), "20")] <- select_var
lm1 <- select_var[12]
lm2 <- select_var[24]
ddd_m <- data.table::dcast(setDT(ddd),
# ATC.1.Code + ATC.2.Code + ATC.3.Code + ATC.4.Code +
# Molecule.code + Molecule + Product + APP1 + Form1 + Manufactory +
# Corporation + Pack.Molecule + Main + Category + Category_CN +
# Category.type + Sub.category + Molecule_CN + Brand + Brand_CN +
# MANU_CN + Region + Province + Province_CN + City + City_CN +
# Veeva.code + Veeva.name + Decile + Note ~ Period + Measurement,
Region + Province + Province_CN + City + City_CN + Veeva.code + Veeva.name +
Decile + Note + ATC.3.Code + Main + Category + Category_CN + Sub.category +
Molecule + Molecule_CN + Brand + Brand_CN + Corporation + MANU_CN + APP1 +
Form1 + Package ~
Period + Measurement,
value.var = select_var,
fun = sum,
na.rm = TRUE)
colnames_tmp <-
str_split(colnames(ddd_m)[startsWith(colnames(ddd_m), "20")],
"_", simplify = TRUE)
colnames_tmp <- paste(tolower(colnames_tmp[, 2]),
toupper(colnames_tmp[, 3]),
colnames_tmp[, 1],
sep = "_")
colnames(ddd_m)[startsWith(colnames(ddd_m), "20")] <- colnames_tmp
##-- format the csv file
mth_colnames <- c(colnames(ddd_m)[grep("mth_RMB", colnames(ddd_m))],
colnames(ddd_m)[grep("mth_DOT", colnames(ddd_m))],
colnames(ddd_m)[grep("mth_UNIT", colnames(ddd_m))])
qtr_colnames <- c(colnames(ddd_m)[grep("qtr_RMB", colnames(ddd_m))],
colnames(ddd_m)[grep("qtr_DOT", colnames(ddd_m))],
colnames(ddd_m)[grep("qtr_UNIT", colnames(ddd_m))])
# ytd_colnames <- c(colnames(ddd_m)[grep("ytd_RMB", colnames(ddd_m))],
# colnames(ddd_m)[grep("ytd_DOT", colnames(ddd_m))],
# colnames(ddd_m)[grep("ytd_UNIT", colnames(ddd_m))])
ytd_colnames <- c(paste("ytd_RMB", lm1, sep = "_"),
paste("ytd_RMB", lm2, sep = "_"),
paste("ytd_DOT", lm1, sep = "_"),
paste("ytd_DOT", lm2, sep = "_"),
paste("ytd_UNIT", lm1, sep = "_"),
paste("ytd_UNIT", lm2, sep = "_"))
# mat_colnames <- c(colnames(ddd_m)[grep("mat_RMB", colnames(ddd_m))],
# colnames(ddd_m)[grep("mat_DOT", colnames(ddd_m))],
# colnames(ddd_m)[grep("mat_UNIT", colnames(ddd_m))])
mat_colnames <- c(paste("mat_RMB", lm1, sep = "_"),
paste("mat_RMB", lm2, sep = "_"),
paste("mat_DOT", lm1, sep = "_"),
paste("mat_DOT", lm2, sep = "_"),
paste("mat_UNIT", lm1, sep = "_"),
paste("mat_UNIT", lm2, sep = "_"))
ddd_m <- setDF(ddd_m)
ddd_m <- ddd_m[,c(colnames(ddd_m)[!grepl("20", colnames(ddd_m))],
c(mth_colnames, qtr_colnames, ytd_colnames, mat_colnames))]
##-- output the csv file
output <-
lapply(unique(ddd_m$Main),
function(x) {
tmp <- ddd_m %>%
filter(Main == x)
write.csv(tmp, paste("./final data/ddd_", x, ".csv", sep = ""),
row.names = FALSE)
print(paste(x, " finished!"))
invisible()
})
| /DDD by category.R | no_license | Zaphiroth/BI_DDD | R | false | false | 5,508 | r | library(dplyr)
library(tidyr)
library(lubridate)
library(readxl)
library(purrr)
library(data.table)
library(stringr)
library(openxlsx)
##-- 注意如果确认你的数据时放在下面这个文件下的子文件夹:ffromall raw
##-- 下,那么你不需要修改任何代码就可以运行程序???
# setwd("//eu.boehringer.com//users//sha//users2//zhengli//Desktop//DATA//IMS DDD process//IMS DDD Data House")
setwd("//eu.boehringer.com//users//sha//users2//zhengli//Desktop//DATA//IMS DDD process//IMS DDD Data House")
##-- read in the raw ddd data
# ddd <- read.xlsx("./ffromall raw/ffromall raw.xlsx")
ddd <- read.csv("./final data/ffromall raw.csv",
header = TRUE, stringsAsFactors = F,
check.names = FALSE)
##-- brand rename
##-- 请将 "Brand Rename.xlsx" 文件与 "ffromall raw.xlsx" 文件放在同一文件夹路径下, 即 ”final data" 文件夹内
new_brand_data <- read.xlsx("./final data/Brand Rename.xlsx",
check.names = FALSE) %>%
mutate(PACK = as.character(PACK)) %>%
select("PACK", "File.Name", "new_brand" = "New.Brand_CN")
ddd <- ddd %>%
left_join(new_brand_data, by = c("Main" = "File.Name", "PACK")) %>%
mutate(Brand_CN = ifelse(!is.na(new_brand),
new_brand,
Brand_CN)) %>%
select(-new_brand)
##--
ddd[] <- Map(
function(x, y) {
if (startsWith(y, "20") && !is.numeric(x)) {
as.numeric(x)
} else {
x
}
},
ddd, colnames(ddd))
##-- transpose the data to get the csv data
select_var <- colnames(ddd)[startsWith(colnames(ddd), "20")]
select_var <- format(as.numeric(select_var), nsmall = 2)
colnames(ddd)[startsWith(colnames(ddd), "20")] <- select_var
lm1 <- select_var[12]
lm2 <- select_var[24]
ddd_m <- data.table::dcast(setDT(ddd),
# ATC.1.Code + ATC.2.Code + ATC.3.Code + ATC.4.Code +
# Molecule.code + Molecule + Product + APP1 + Form1 + Manufactory +
# Corporation + Pack.Molecule + Main + Category + Category_CN +
# Category.type + Sub.category + Molecule_CN + Brand + Brand_CN +
# MANU_CN + Region + Province + Province_CN + City + City_CN +
# Veeva.code + Veeva.name + Decile + Note ~ Period + Measurement,
Region + Province + Province_CN + City + City_CN + Veeva.code + Veeva.name +
Decile + Note + ATC.3.Code + Main + Category + Category_CN + Sub.category +
Molecule + Molecule_CN + Brand + Brand_CN + Corporation + MANU_CN + APP1 +
Form1 + Package ~
Period + Measurement,
value.var = select_var,
fun = sum,
na.rm = TRUE)
colnames_tmp <-
str_split(colnames(ddd_m)[startsWith(colnames(ddd_m), "20")],
"_", simplify = TRUE)
colnames_tmp <- paste(tolower(colnames_tmp[, 2]),
toupper(colnames_tmp[, 3]),
colnames_tmp[, 1],
sep = "_")
colnames(ddd_m)[startsWith(colnames(ddd_m), "20")] <- colnames_tmp
##-- format the csv file
mth_colnames <- c(colnames(ddd_m)[grep("mth_RMB", colnames(ddd_m))],
colnames(ddd_m)[grep("mth_DOT", colnames(ddd_m))],
colnames(ddd_m)[grep("mth_UNIT", colnames(ddd_m))])
qtr_colnames <- c(colnames(ddd_m)[grep("qtr_RMB", colnames(ddd_m))],
colnames(ddd_m)[grep("qtr_DOT", colnames(ddd_m))],
colnames(ddd_m)[grep("qtr_UNIT", colnames(ddd_m))])
# ytd_colnames <- c(colnames(ddd_m)[grep("ytd_RMB", colnames(ddd_m))],
# colnames(ddd_m)[grep("ytd_DOT", colnames(ddd_m))],
# colnames(ddd_m)[grep("ytd_UNIT", colnames(ddd_m))])
ytd_colnames <- c(paste("ytd_RMB", lm1, sep = "_"),
paste("ytd_RMB", lm2, sep = "_"),
paste("ytd_DOT", lm1, sep = "_"),
paste("ytd_DOT", lm2, sep = "_"),
paste("ytd_UNIT", lm1, sep = "_"),
paste("ytd_UNIT", lm2, sep = "_"))
# mat_colnames <- c(colnames(ddd_m)[grep("mat_RMB", colnames(ddd_m))],
# colnames(ddd_m)[grep("mat_DOT", colnames(ddd_m))],
# colnames(ddd_m)[grep("mat_UNIT", colnames(ddd_m))])
mat_colnames <- c(paste("mat_RMB", lm1, sep = "_"),
paste("mat_RMB", lm2, sep = "_"),
paste("mat_DOT", lm1, sep = "_"),
paste("mat_DOT", lm2, sep = "_"),
paste("mat_UNIT", lm1, sep = "_"),
paste("mat_UNIT", lm2, sep = "_"))
ddd_m <- setDF(ddd_m)
ddd_m <- ddd_m[,c(colnames(ddd_m)[!grepl("20", colnames(ddd_m))],
c(mth_colnames, qtr_colnames, ytd_colnames, mat_colnames))]
##-- output the csv file
output <-
lapply(unique(ddd_m$Main),
function(x) {
tmp <- ddd_m %>%
filter(Main == x)
write.csv(tmp, paste("./final data/ddd_", x, ".csv", sep = ""),
row.names = FALSE)
print(paste(x, " finished!"))
invisible()
})
|
library(lintr)
# Obesity V.S. Confirmed
# What is the relationship between obesity and the chance of COVID-19 infection?
chart_3 <- function(data) {
obesity_confirmed <- data %>%
mutate(
confirmed_numb = Confirmed / 100 * Population,
obesity_numb = Obesity / 100 * Population
) %>%
mutate(Region = forcats::fct_explicit_na(Region)) %>%
group_by(Region) %>%
select(Region, obesity_numb, confirmed_numb, Population) %>%
summarise(obesity_numb = sum(obesity_numb),
confirmed_numb = sum(confirmed_numb),
Population = sum(Population)) %>%
drop_na(obesity_numb, confirmed_numb) %>%
mutate(percent_obesity = round(obesity_numb / Population * 100, 2),
percent_confirmed = round(confirmed_numb / Population * 100, 2))
col_chart_obesity_confirmed <- ggplot(obesity_confirmed) +
geom_col(mapping = aes(x = reorder(Region, -percent_confirmed),
y = percent_obesity,
fill = percent_confirmed)) +
labs(
title = "Obesity V.S. Confirmed",
x = "Region",
y = "% of Obesity",
fill = "% of COVID confirmed out of Region population"
) +
theme(axis.text.x = element_text(angle = 65, hjust = 1))
return(col_chart_obesity_confirmed)
}
| /scripts/chart3.R | permissive | yufeiz6/Covid-19-and-nutrition-data-analysis | R | false | false | 1,299 | r | library(lintr)
# Obesity V.S. Confirmed
# What is the relationship between obesity and the chance of COVID-19 infection?
chart_3 <- function(data) {
obesity_confirmed <- data %>%
mutate(
confirmed_numb = Confirmed / 100 * Population,
obesity_numb = Obesity / 100 * Population
) %>%
mutate(Region = forcats::fct_explicit_na(Region)) %>%
group_by(Region) %>%
select(Region, obesity_numb, confirmed_numb, Population) %>%
summarise(obesity_numb = sum(obesity_numb),
confirmed_numb = sum(confirmed_numb),
Population = sum(Population)) %>%
drop_na(obesity_numb, confirmed_numb) %>%
mutate(percent_obesity = round(obesity_numb / Population * 100, 2),
percent_confirmed = round(confirmed_numb / Population * 100, 2))
col_chart_obesity_confirmed <- ggplot(obesity_confirmed) +
geom_col(mapping = aes(x = reorder(Region, -percent_confirmed),
y = percent_obesity,
fill = percent_confirmed)) +
labs(
title = "Obesity V.S. Confirmed",
x = "Region",
y = "% of Obesity",
fill = "% of COVID confirmed out of Region population"
) +
theme(axis.text.x = element_text(angle = 65, hjust = 1))
return(col_chart_obesity_confirmed)
}
|
## Use each model to estimate a DF with the tags
## at the end, compare the regression model
## for each version
rm(list=ls())
loadPkg=function(toLoad){
for(lib in toLoad){
if(! lib %in% installed.packages()[,1])
{install.packages(lib,
repos='http://cran.rstudio.com/')}
suppressMessages(library(lib,
character.only=TRUE))}}
packs <- c('tidyr',
'quanteda',
'dplyr',
'tidyverse',
"readxl",
'textrecipes',
'rsample',
"discrim")
engines <- c('glmnet',
"tidymodels",
"naivebayes",
"kernlab",
"ranger")
loadPkg(c(packs, engines))
#############################
## Load csv of frame tags
#############################
load("~/Dropbox/WTO-Data/rdatas/processedTextforSTMDeDeup.RData")
tags <- read.csv("../parasTaggedFrames500.csv") ## 487 x 8
## Make sure that none of the tagged paragrpahs were
## removed in the de-dup:
ls()
length(meta$pid) ## 8456
length(tags$PID)
tags[which(tags$PID== "case 193"), "PID"] <- 1385
tags$PID <- as.numeric(tags$PID)
## Keep only the intersection:
## (This makes it easier to go back and add
## (more if needed)
tags <- tags[which(tags$PID %in% meta$pid),]
dim(tags) ## 478
tagged.pids <- tags$PID
length(tagged.pids) ##478
## Prep for Prediction:
colnames(meta)[which(colnames(meta)=="pid")] <- "PID"
untagged <- meta[!(meta$PID %in% tagged.pids),]
dim(untagged) ##7978 x 16
## Merge tags and meta:
tagged <- merge(tags,
out$meta,
by.x="PID",
by.y="pid",
all.x=TRUE)
dim(tagged) ## 487 x 23
## not-needed:
tagged$numdate <- NULL
tagged$X.y <- NULL
tagged$X.x <- NULL
## Group the frame clusters:
## FrameReciprocator: donor preferences + reciprocator
## FrameRedist: recipient preferences + redistributor
tagged$Frame <- "Unknown"
tagged[tagged$dprefs==1 |
tagged$recip==1,"Frame"] <- "Recip"
tagged[tagged$rprefs==1 |
tagged$redist==1,"Frame"] <- "Redist"
table(tagged$Frame) ## 84 reciprocator; 178 redist; 216 unknown
tagged$Frame <- as.factor(tagged$Frame)
## Training -test split
set.seed(2322)
tagged.split <- initial_split(data = tagged,
strata = Frame,
prop = .7)
tagged.split
tagged.train <- training(tagged.split)
tagged.test <- testing(tagged.split)
##%%%%%%%%%%%%%%%%%%%%%%%
### Prep global settings for models
##%%%%%%%%%%%%%%%%%%%%%%%
## ID the columns for analysis + ID
wto.rec <- recipe(Frame ~ cleanedtext + PID + year,
data = tagged.train) %>%
update_role(PID, year,
new_role = "ID") ## ID fields
## Clean and convert to dtm
wto.rec <- wto.rec %>%
step_tokenize(cleanedtext) %>%
step_stopwords(cleanedtext) %>%
step_tokenfilter(cleanedtext) %>%
step_tfidf(cleanedtext)
summary(wto.rec)
##%%%%%%%%%%%%%%%%%%
## Naive Bayes
##%%%%%%%%%%%%%%%%%
set.seed(2322)
nb.spec <- naive_Bayes() %>%
set_mode("classification") %>%
set_engine("naivebayes")
nb.spec
nb.workflow <- workflow() %>%
add_recipe(wto.rec) %>%
add_model(nb.spec)
nb.fit <- nb.workflow %>%
fit(data = tagged.train)
nb.pred <- predict(nb.fit, tagged.test)
wto.nb.aug <- augment(nb.fit, tagged.test)
wto.nb.aug$Frame <- as.factor(wto.nb.aug$Frame)
##%%%%%%%%%%%%%%%%%%
## Random Forest
##%%%%%%%%%%%%%%%%%%
## Structure:
set.seed(2322)
rf.spec <- rand_forest() %>%
set_mode("classification") %>%
set_engine("ranger")
rf.spec
rf.workflow <- workflow() %>%
add_recipe(wto.rec) %>%
add_model(rf.spec)
## train:
rf.fit <- rf.workflow %>%
fit(data = tagged.train)
## predict:
rf.pred <- predict(rf.fit, tagged.test)
## Map into df
wto.rf.aug <- augment(rf.fit, tagged.test)
wto.rf.aug$Frame <- as.factor(wto.rf.aug$Frame)
## RF: dominant model, so will predict using it:
##%%%%%%%%%%%%%%%%%%
## SVM
##%%%%%%%%%%%%%%%%%%
## structure:
set.seed(2322)
svm.spec <- svm_poly() %>%
set_mode("classification") %>%
set_engine("kernlab")
svm.workflow <- workflow() %>%
add_recipe(wto.rec) %>%
add_model(svm.spec)
svm.fit <- svm.workflow %>%
fit(data = tagged.train)
## predict:
svm.pred <- predict(svm.fit, tagged.test)
wto.svm.aug <- augment(svm.fit, tagged.test)
wto.svm.aug$Frame <- as.factor(wto.svm.aug$Frame)
##%%%%%%%%%%%%%%%%%%
## Logistic Reg with LASSO
##%%%%%%%%%%%%%%%%%%
## structure:
set.seed(2322)
## penalty: mixture =1 is LASSO
## mixture =0 is ridge
glm.spec <- multinom_reg(mixture=double(1),
mode="classification",
engine= "glmnet",
penalty=0)
glm.workflow <- workflow() %>%
add_recipe(wto.rec) %>%
add_model(glm.spec)
glm.fit <- glm.workflow %>%
fit(data = tagged.train)
## predict:
glm.pred <- predict(glm.fit, tagged.test)
wto.glm.aug <- augment(glm.fit, tagged.test)
wto.glm.aug$Frame <- as.factor(wto.glm.aug$Frame)
##%%%%%%%%%%%%%%%%%%%%%%%%%%
## Speaker-delegations
## From hand-coded
##%%%%%%%%%%%%%%%%%%%%%%%%%%
## Predicting based on delegations in training set
## on testing set
## Identify speakers in tagged Redistributor paragraphs
redists <- as.data.frame(table(tagged.train[which(
tagged.train$Frame=="Redist"),"firstent"]))
colnames(redists) <- c("Deleg", "Freq.Redist")
## Speakers in Reciprocator paragaphs
recip <- as.data.frame(table(tagged.train[which(
tagged.train$Frame=="Recip"),"firstent"]))
colnames(recip) <- c("Deleg", "Freq.Recip")
unknown <- as.data.frame(table(tagged.train[which(
tagged.train$Frame=="Unknown"),"firstent"]))
colnames(unknown) <- c("Deleg", "Freq.unknown")
## union in redistributor vs reciprocator:
intersect(redists$Deleg, recip$Deleg) ## Overlap:
## China, EU, US
## Merge together; "Pred probability" as % in dominant category
delegations.twoway <- merge(recip,
redists,
by="Deleg",
all=TRUE)
delegations.twoway[is.na(delegations.twoway)] <- 0
## Predict in test set based on top speaker in test set:
## Design "probabilities": Given that a paragraph is from a
## speaker in the reciprocator or reidstributor pool, what is the likelihood that the paragraph was a redist paragraph?
## (note, I'm not taking into account non-tagged paragraphs)
## so we know that it will be an over-estimate
allparas <- delegations.twoway$Freq.Recip +
delegations.twoway$Freq.Redist
delegations.twoway$.pred_Redist <- round(
delegations.twoway$Freq.Redist/allparas, 3)
delegations.twoway$.pred_Recip <- round(
delegations.twoway$Freq.Recip/allparas, 3)
## Predict Class
delegations.twoway$.pred_class <- "Unknown"
delegations.twoway[which(delegations.twoway$.pred_Recip >
delegations.twoway$.pred_Redist),
".pred_class"] <- "Recip"
delegations.twoway[which(delegations.twoway$.pred_Recip <
delegations.twoway$.pred_Redist),
".pred_class"] <- "Redist"
### Merge into the test set:
wto.key.aug <- tagged.test
head(wto.key.aug)
cols <- c("Deleg", ".pred_Redist",
".pred_Recip", ".pred_class")
tst <- merge(wto.key.aug,
delegations.twoway[,cols],
by.x="firstent",
by.y="Deleg",
all.x=TRUE)
##also the test set, for later:
tst2 <- merge(tagged.train,
delegations.twoway[,cols],
by.x="firstent",
by.y="Deleg",
all.x=TRUE)
## And the full set for model comparisons:
wto.hand <- merge(meta,
delegations.twoway[,cols],
by.x="firstent",
by.y="Deleg",
all.x=TRUE)
tst[is.na(tst$.pred_class), ".pred_class"] <- "Unknown"
tst2[is.na(tst2$.pred_class), ".pred_class"] <- "Unknown"
wto.hand[is.na(wto.hand$.pred_class), ".pred_class"] <- "Unknown"
## Expected probabily of "unknown" = 100% if not in the
## recip/redist delegate groups. B/c this is the residual
tst[is.na(tst$.pred_class), ".pred_class"] <- "Unknown"
tst$.pred_Unknown <-0
tst[which(tst$.pred_class=="Unknown"),
".pred_Unknown"] <- 1
tst[is.na(tst$.pred_Redist), ".pred_Redist"] <- 0
tst[is.na(tst$.pred_Recip), ".pred_Recip"] <- 0
tst[,c("firstent", "Frame", ".pred_class",
".pred_Redist", ".pred_Recip",
".pred_Unknown")]
## The training set:
## Create predicted unknown:
tst2[is.na(tst2$.pred_class), ".pred_class"] <- "Unknown"
tst2$.pred_Unknown <-0
tst2[which(tst2$.pred_class=="Unknown"),
".pred_Unknown"] <- 1
tst2[is.na(tst2$.pred_Redist), ".pred_Redist"] <- 0
tst2[is.na(tst2$.pred_Recip), ".pred_Recip"] <- 0
## Scale to full data:
wto.hand[is.na(wto.hand$.pred_class),
".pred_class"] <- "Unknown"
wto.hand$.pred_Unknown <-0
wto.hand[which(wto.hand$.pred_class=="Unknown"),
".pred_Unknown"] <- 1
wto.hand[is.na(wto.hand$.pred_Redist),
".pred_Redist"] <- 0
wto.hand[is.na(wto.hand$.pred_Recip),
".pred_Recip"] <- 0
## write entire hand-tagged + predicted:
cols2 <- c("PID",
"Frame",
".pred_class",
".pred_Redist",
".pred_Recip",
".pred_Unknown")
tst.out <- rbind(tst[,cols2],
tst2[, cols2])
##%%%%%%%%%%%%%%%%%%%%
## Scale Predictions
#%%%%%%%%%%%%%%%%%%%
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Scale prediction from RF Model:
## Whole data:
## RF
rf.pred.all <- predict(rf.fit, untagged)
wto.rf.aug <- augment(rf.fit, untagged)
dim(wto.rf.aug) ## 7978 x 20 ## => 8.4k - the tagged
## GLM (glm.fit)
glm.pred.all <- predict(glm.fit, untagged)
wto.glm.aug <- augment(glm.fit, untagged)
## NB
nb.pred.all <- predict(nb.fit, untagged)
wto.nb.aug <- augment(nb.fit, untagged)
## SVM
svm.pred.all <- predict(svm.fit, untagged)
wto.svm.aug <- augment(svm.fit, untagged)
## By delegations -> wto.hand above
save(wto.glm.aug,
wto.nb.aug,
wto.rf.aug,
wto.svm.aug,
wto.hand,
tagged,
file="predicted-models500Repl.Rdata")
| /dataanalysis/analysis/topic-modeling/replicate/05frameClassificationAnalysisRepl.R | no_license | margaretfoster/WTO | R | false | false | 10,118 | r | ## Use each model to estimate a DF with the tags
## at the end, compare the regression model
## for each version
rm(list=ls())
loadPkg=function(toLoad){
for(lib in toLoad){
if(! lib %in% installed.packages()[,1])
{install.packages(lib,
repos='http://cran.rstudio.com/')}
suppressMessages(library(lib,
character.only=TRUE))}}
packs <- c('tidyr',
'quanteda',
'dplyr',
'tidyverse',
"readxl",
'textrecipes',
'rsample',
"discrim")
engines <- c('glmnet',
"tidymodels",
"naivebayes",
"kernlab",
"ranger")
loadPkg(c(packs, engines))
#############################
## Load csv of frame tags
#############################
load("~/Dropbox/WTO-Data/rdatas/processedTextforSTMDeDeup.RData")
tags <- read.csv("../parasTaggedFrames500.csv") ## 487 x 8
## Make sure that none of the tagged paragrpahs were
## removed in the de-dup:
ls()
length(meta$pid) ## 8456
length(tags$PID)
tags[which(tags$PID== "case 193"), "PID"] <- 1385
tags$PID <- as.numeric(tags$PID)
## Keep only the intersection:
## (This makes it easier to go back and add
## (more if needed)
tags <- tags[which(tags$PID %in% meta$pid),]
dim(tags) ## 478
tagged.pids <- tags$PID
length(tagged.pids) ##478
## Prep for Prediction:
colnames(meta)[which(colnames(meta)=="pid")] <- "PID"
untagged <- meta[!(meta$PID %in% tagged.pids),]
dim(untagged) ##7978 x 16
## Merge tags and meta:
tagged <- merge(tags,
out$meta,
by.x="PID",
by.y="pid",
all.x=TRUE)
dim(tagged) ## 487 x 23
## not-needed:
tagged$numdate <- NULL
tagged$X.y <- NULL
tagged$X.x <- NULL
## Group the frame clusters:
## FrameReciprocator: donor preferences + reciprocator
## FrameRedist: recipient preferences + redistributor
tagged$Frame <- "Unknown"
tagged[tagged$dprefs==1 |
tagged$recip==1,"Frame"] <- "Recip"
tagged[tagged$rprefs==1 |
tagged$redist==1,"Frame"] <- "Redist"
table(tagged$Frame) ## 84 reciprocator; 178 redist; 216 unknown
tagged$Frame <- as.factor(tagged$Frame)
## Training -test split
set.seed(2322)
tagged.split <- initial_split(data = tagged,
strata = Frame,
prop = .7)
tagged.split
tagged.train <- training(tagged.split)
tagged.test <- testing(tagged.split)
##%%%%%%%%%%%%%%%%%%%%%%%
### Prep global settings for models
##%%%%%%%%%%%%%%%%%%%%%%%
## ID the columns for analysis + ID
wto.rec <- recipe(Frame ~ cleanedtext + PID + year,
data = tagged.train) %>%
update_role(PID, year,
new_role = "ID") ## ID fields
## Clean and convert to dtm
wto.rec <- wto.rec %>%
step_tokenize(cleanedtext) %>%
step_stopwords(cleanedtext) %>%
step_tokenfilter(cleanedtext) %>%
step_tfidf(cleanedtext)
summary(wto.rec)
##%%%%%%%%%%%%%%%%%%
## Naive Bayes
##%%%%%%%%%%%%%%%%%
set.seed(2322)
nb.spec <- naive_Bayes() %>%
set_mode("classification") %>%
set_engine("naivebayes")
nb.spec
nb.workflow <- workflow() %>%
add_recipe(wto.rec) %>%
add_model(nb.spec)
nb.fit <- nb.workflow %>%
fit(data = tagged.train)
nb.pred <- predict(nb.fit, tagged.test)
wto.nb.aug <- augment(nb.fit, tagged.test)
wto.nb.aug$Frame <- as.factor(wto.nb.aug$Frame)
##%%%%%%%%%%%%%%%%%%
## Random Forest
##%%%%%%%%%%%%%%%%%%
## Structure:
set.seed(2322)
rf.spec <- rand_forest() %>%
set_mode("classification") %>%
set_engine("ranger")
rf.spec
rf.workflow <- workflow() %>%
add_recipe(wto.rec) %>%
add_model(rf.spec)
## train:
rf.fit <- rf.workflow %>%
fit(data = tagged.train)
## predict:
rf.pred <- predict(rf.fit, tagged.test)
## Map into df
wto.rf.aug <- augment(rf.fit, tagged.test)
wto.rf.aug$Frame <- as.factor(wto.rf.aug$Frame)
## RF: dominant model, so will predict using it:
##%%%%%%%%%%%%%%%%%%
## SVM
##%%%%%%%%%%%%%%%%%%
## structure:
set.seed(2322)
svm.spec <- svm_poly() %>%
set_mode("classification") %>%
set_engine("kernlab")
svm.workflow <- workflow() %>%
add_recipe(wto.rec) %>%
add_model(svm.spec)
svm.fit <- svm.workflow %>%
fit(data = tagged.train)
## predict:
svm.pred <- predict(svm.fit, tagged.test)
wto.svm.aug <- augment(svm.fit, tagged.test)
wto.svm.aug$Frame <- as.factor(wto.svm.aug$Frame)
##%%%%%%%%%%%%%%%%%%
## Logistic Reg with LASSO
##%%%%%%%%%%%%%%%%%%
## structure:
set.seed(2322)
## penalty: mixture =1 is LASSO
## mixture =0 is ridge
glm.spec <- multinom_reg(mixture=double(1),
mode="classification",
engine= "glmnet",
penalty=0)
glm.workflow <- workflow() %>%
add_recipe(wto.rec) %>%
add_model(glm.spec)
glm.fit <- glm.workflow %>%
fit(data = tagged.train)
## predict:
glm.pred <- predict(glm.fit, tagged.test)
wto.glm.aug <- augment(glm.fit, tagged.test)
wto.glm.aug$Frame <- as.factor(wto.glm.aug$Frame)
##%%%%%%%%%%%%%%%%%%%%%%%%%%
## Speaker-delegations
## From hand-coded
##%%%%%%%%%%%%%%%%%%%%%%%%%%
## Predicting based on delegations in training set
## on testing set
## Identify speakers in tagged Redistributor paragraphs
redists <- as.data.frame(table(tagged.train[which(
tagged.train$Frame=="Redist"),"firstent"]))
colnames(redists) <- c("Deleg", "Freq.Redist")
## Speakers in Reciprocator paragaphs
recip <- as.data.frame(table(tagged.train[which(
tagged.train$Frame=="Recip"),"firstent"]))
colnames(recip) <- c("Deleg", "Freq.Recip")
unknown <- as.data.frame(table(tagged.train[which(
tagged.train$Frame=="Unknown"),"firstent"]))
colnames(unknown) <- c("Deleg", "Freq.unknown")
## union in redistributor vs reciprocator:
intersect(redists$Deleg, recip$Deleg) ## Overlap:
## China, EU, US
## Merge together; "Pred probability" as % in dominant category
delegations.twoway <- merge(recip,
redists,
by="Deleg",
all=TRUE)
delegations.twoway[is.na(delegations.twoway)] <- 0
## Predict in test set based on top speaker in test set:
## Design "probabilities": Given that a paragraph is from a
## speaker in the reciprocator or reidstributor pool, what is the likelihood that the paragraph was a redist paragraph?
## (note, I'm not taking into account non-tagged paragraphs)
## so we know that it will be an over-estimate
allparas <- delegations.twoway$Freq.Recip +
delegations.twoway$Freq.Redist
delegations.twoway$.pred_Redist <- round(
delegations.twoway$Freq.Redist/allparas, 3)
delegations.twoway$.pred_Recip <- round(
delegations.twoway$Freq.Recip/allparas, 3)
## Predict Class
delegations.twoway$.pred_class <- "Unknown"
delegations.twoway[which(delegations.twoway$.pred_Recip >
delegations.twoway$.pred_Redist),
".pred_class"] <- "Recip"
delegations.twoway[which(delegations.twoway$.pred_Recip <
delegations.twoway$.pred_Redist),
".pred_class"] <- "Redist"
### Merge into the test set:
wto.key.aug <- tagged.test
head(wto.key.aug)
cols <- c("Deleg", ".pred_Redist",
".pred_Recip", ".pred_class")
tst <- merge(wto.key.aug,
delegations.twoway[,cols],
by.x="firstent",
by.y="Deleg",
all.x=TRUE)
##also the test set, for later:
tst2 <- merge(tagged.train,
delegations.twoway[,cols],
by.x="firstent",
by.y="Deleg",
all.x=TRUE)
## And the full set for model comparisons:
wto.hand <- merge(meta,
delegations.twoway[,cols],
by.x="firstent",
by.y="Deleg",
all.x=TRUE)
tst[is.na(tst$.pred_class), ".pred_class"] <- "Unknown"
tst2[is.na(tst2$.pred_class), ".pred_class"] <- "Unknown"
wto.hand[is.na(wto.hand$.pred_class), ".pred_class"] <- "Unknown"
## Expected probabily of "unknown" = 100% if not in the
## recip/redist delegate groups. B/c this is the residual
tst[is.na(tst$.pred_class), ".pred_class"] <- "Unknown"
tst$.pred_Unknown <-0
tst[which(tst$.pred_class=="Unknown"),
".pred_Unknown"] <- 1
tst[is.na(tst$.pred_Redist), ".pred_Redist"] <- 0
tst[is.na(tst$.pred_Recip), ".pred_Recip"] <- 0
tst[,c("firstent", "Frame", ".pred_class",
".pred_Redist", ".pred_Recip",
".pred_Unknown")]
## The training set:
## Create predicted unknown:
tst2[is.na(tst2$.pred_class), ".pred_class"] <- "Unknown"
tst2$.pred_Unknown <-0
tst2[which(tst2$.pred_class=="Unknown"),
".pred_Unknown"] <- 1
tst2[is.na(tst2$.pred_Redist), ".pred_Redist"] <- 0
tst2[is.na(tst2$.pred_Recip), ".pred_Recip"] <- 0
## Scale to full data:
wto.hand[is.na(wto.hand$.pred_class),
".pred_class"] <- "Unknown"
wto.hand$.pred_Unknown <-0
wto.hand[which(wto.hand$.pred_class=="Unknown"),
".pred_Unknown"] <- 1
wto.hand[is.na(wto.hand$.pred_Redist),
".pred_Redist"] <- 0
wto.hand[is.na(wto.hand$.pred_Recip),
".pred_Recip"] <- 0
## write entire hand-tagged + predicted:
cols2 <- c("PID",
"Frame",
".pred_class",
".pred_Redist",
".pred_Recip",
".pred_Unknown")
tst.out <- rbind(tst[,cols2],
tst2[, cols2])
##%%%%%%%%%%%%%%%%%%%%
## Scale Predictions
#%%%%%%%%%%%%%%%%%%%
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Scale prediction from RF Model:
## Whole data:
## RF
rf.pred.all <- predict(rf.fit, untagged)
wto.rf.aug <- augment(rf.fit, untagged)
dim(wto.rf.aug) ## 7978 x 20 ## => 8.4k - the tagged
## GLM (glm.fit)
glm.pred.all <- predict(glm.fit, untagged)
wto.glm.aug <- augment(glm.fit, untagged)
## NB
nb.pred.all <- predict(nb.fit, untagged)
wto.nb.aug <- augment(nb.fit, untagged)
## SVM
svm.pred.all <- predict(svm.fit, untagged)
wto.svm.aug <- augment(svm.fit, untagged)
## By delegations -> wto.hand above
save(wto.glm.aug,
wto.nb.aug,
wto.rf.aug,
wto.svm.aug,
wto.hand,
tagged,
file="predicted-models500Repl.Rdata")
|
## Create methods
print.ship <- function(ship){
name <- paste("Name:",ship$name, sep = " ")
size <- paste("Size:", ship$size, sep = " ")
position <- paste("Position:", ship$position, sep = " ")
hits <- paste("Number of hits on ship:", sum(ship$hits), sep = " ")
sunk <- paste("Is the ship sunk?", as.logical(ship$sunk), sep = " ")
print(c(name, size, position, hits, sunk))
}
print.fleet <- function(fleet){
#print.ship(fleet$ships[1])
name <- paste("Admiral:", fleet$admiral, sep = " ")
ocean <- paste("Ocean Size:", fleet$ocean[1], fleet$ocean[2], sep = " ")
ship1 <- paste("Ship 1:", fleet$ships[[1]][1], fleet$ships[[1]][2], fleet$ships[[1]][3], fleet$ships[[1]][4], fleet$ships[[1]][5], sep = " ")
ship2 <- paste("Ship 2:", fleet$ships[[2]][1], fleet$ships[[2]][2], fleet$ships[[2]][3], fleet$ships[[2]][4], fleet$ships[[2]][5], sep = " ")
ship3 <- paste("Ship 3:", fleet$ships[[3]][1], fleet$ships[[3]][2], fleet$ships[[3]][3], fleet$ships[[3]][4], fleet$ships[[3]][5], sep = " ")
ship4 <- paste("Ship 4:", fleet$ships[[4]][1], fleet$ships[[4]][2], fleet$ships[[4]][3], fleet$ships[[4]][4], fleet$ships[[4]][5], sep = " ")
ship5 <- paste("Ship 5:", fleet$ships[[5]][1], fleet$ships[[5]][2], fleet$ships[[5]][3], fleet$ships[[5]][4], fleet$ships[[5]][5], sep = " ")
print(c(name, ocean, ship1, ship2, ship3, ship4, ship5))
}
print.battleship <- function(battleship){
print.fleet(battleship$fleets[1])
print.fleet(battleship$fleets[2])
}
| /battleship_methods.R | no_license | mayaklee/battleship-project | R | false | false | 1,479 | r | ## Create methods
print.ship <- function(ship){
name <- paste("Name:",ship$name, sep = " ")
size <- paste("Size:", ship$size, sep = " ")
position <- paste("Position:", ship$position, sep = " ")
hits <- paste("Number of hits on ship:", sum(ship$hits), sep = " ")
sunk <- paste("Is the ship sunk?", as.logical(ship$sunk), sep = " ")
print(c(name, size, position, hits, sunk))
}
print.fleet <- function(fleet){
#print.ship(fleet$ships[1])
name <- paste("Admiral:", fleet$admiral, sep = " ")
ocean <- paste("Ocean Size:", fleet$ocean[1], fleet$ocean[2], sep = " ")
ship1 <- paste("Ship 1:", fleet$ships[[1]][1], fleet$ships[[1]][2], fleet$ships[[1]][3], fleet$ships[[1]][4], fleet$ships[[1]][5], sep = " ")
ship2 <- paste("Ship 2:", fleet$ships[[2]][1], fleet$ships[[2]][2], fleet$ships[[2]][3], fleet$ships[[2]][4], fleet$ships[[2]][5], sep = " ")
ship3 <- paste("Ship 3:", fleet$ships[[3]][1], fleet$ships[[3]][2], fleet$ships[[3]][3], fleet$ships[[3]][4], fleet$ships[[3]][5], sep = " ")
ship4 <- paste("Ship 4:", fleet$ships[[4]][1], fleet$ships[[4]][2], fleet$ships[[4]][3], fleet$ships[[4]][4], fleet$ships[[4]][5], sep = " ")
ship5 <- paste("Ship 5:", fleet$ships[[5]][1], fleet$ships[[5]][2], fleet$ships[[5]][3], fleet$ships[[5]][4], fleet$ships[[5]][5], sep = " ")
print(c(name, ocean, ship1, ship2, ship3, ship4, ship5))
}
print.battleship <- function(battleship){
print.fleet(battleship$fleets[1])
print.fleet(battleship$fleets[2])
}
|
#gnerate random numbers between 0 and 1
x=1:100
y=rnorm(x,0,1)
y1= rnorm(x,1,1)
#plotting pdf of the points
plot(density(y))
lines(density(y1), add=T, col='red', lwd=2)
| /Dataandcode/generating random numbers.R | no_license | vratchaudhary/Bayesian_Example | R | false | false | 169 | r | #gnerate random numbers between 0 and 1
x=1:100
y=rnorm(x,0,1)
y1= rnorm(x,1,1)
#plotting pdf of the points
plot(density(y))
lines(density(y1), add=T, col='red', lwd=2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_model_lineargaussian.R
\name{get_model_lineargaussian}
\alias{get_model_lineargaussian}
\title{get_model_lineargaussian}
\usage{
get_model_lineargaussian()
}
\description{
Univariate linear Gaussian model with 4 unknown parameters
(\code{phi}, \code{psi}, \code{sigmaW2}, \code{sigmaV2}).
Latent states: X[t] = \code{phi}*X[t-1] + N(0,\code{sigmaW2}).
Observations: Y[t] = \code{psi}*X[t] + N(0,\code{sigmaV2}).
}
| /man/get_model_lineargaussian.Rd | no_license | pierrejacob/bayeshscore | R | false | true | 496 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_model_lineargaussian.R
\name{get_model_lineargaussian}
\alias{get_model_lineargaussian}
\title{get_model_lineargaussian}
\usage{
get_model_lineargaussian()
}
\description{
Univariate linear Gaussian model with 4 unknown parameters
(\code{phi}, \code{psi}, \code{sigmaW2}, \code{sigmaV2}).
Latent states: X[t] = \code{phi}*X[t-1] + N(0,\code{sigmaW2}).
Observations: Y[t] = \code{psi}*X[t] + N(0,\code{sigmaV2}).
}
|
## Put comments here that give an overall description of what your
## functions do
## This is a collection of function that together offer a way to speedup
## repeated inversions of a matrix. Use makeCacheMatrix to create a cached matrix
## from a matrix and cacheSolve to get its inverse. makeCacheMatrix assumes that the matrix
## is invertible.
## matrices
## Write a short comment describing this function
## Creates a cached matrix from a matrix that caches its inverse.
## The matrix is assumed to be invertible. Inverse is created lazily
## when called the first time (lazy evaluation in R)
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) inverse <<- solve
getinverse <- function() inverse
list (set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Returns the inverse of a matrix. The matrix must be invertible.
## Uses the cached value if it exits, computes and caches it if it does not.
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("Returning cached inverse.")
return(inverse)
}
matrix <- x$get()
inverse <- solve(matrix)
x$setinverse(inverse)
message("Returning computed inverse.")
return(inverse)
}
## matrices
| /cachematrix.R | no_license | kulkarnij/ProgrammingAssignment2 | R | false | false | 1,531 | r | ## Put comments here that give an overall description of what your
## functions do
## This is a collection of function that together offer a way to speedup
## repeated inversions of a matrix. Use makeCacheMatrix to create a cached matrix
## from a matrix and cacheSolve to get its inverse. makeCacheMatrix assumes that the matrix
## is invertible.
## matrices
## Write a short comment describing this function
## Creates a cached matrix from a matrix that caches its inverse.
## The matrix is assumed to be invertible. Inverse is created lazily
## when called the first time (lazy evaluation in R)
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) inverse <<- solve
getinverse <- function() inverse
list (set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Returns the inverse of a matrix. The matrix must be invertible.
## Uses the cached value if it exits, computes and caches it if it does not.
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("Returning cached inverse.")
return(inverse)
}
matrix <- x$get()
inverse <- solve(matrix)
x$setinverse(inverse)
message("Returning computed inverse.")
return(inverse)
}
## matrices
|
options(stringsAsFactors = FALSE)
suppressMessages(library("argparse"))
suppressMessages(library("pander"))
#' For those reviewing the code below, the following is a small style guide
#' outlining the various formats for the code.
#'
#' Names with "_": objects, inlucding data.frames, GRanges, vectors, ...
#' Names in caMel format: functions or components of objects (i.e. columns
#' within a data.frame).
#' Names with ".": arguments / options for functions
code_dir <- dirname(
sub("--file=", "",
grep("--file=", commandArgs(trailingOnly = FALSE), value = TRUE)))
# Set up and gather command line arguments -------------------------------------
parser <- ArgumentParser(
description = "R-based tool for filtering a list of reads form original sequencing files.")
parser$add_argument(
"-r", "--root", nargs = "+", type = "character", default = NULL,
help = "Files with original or root sequences (.fastq / .fastq.gz / .fasta / ...) to filter. Can select multiple files. Must be fasta or fastq format.")
parser$add_argument(
"-b", "--branch", nargs = 1, type = "character", default = NULL,
help = "File to filter extract filtered read ids, a subset of root sequences.")
parser$add_argument(
"-i", "--ids", nargs = 1, type = "character", default = NULL,
help = "Text-based file with list of read names to filter from root sequences. Header included.")
parser$add_argument(
"-o", "--outputDir", nargs = 1, type = "character", default = "filteredData",
help = "Output directory.")
parser$add_argument(
"--filePattern", nargs = 1, type = "character", default = NULL,
help = "Pattern to include in output file name. ie. filtered.[filePattern].R1.fastq.")
parser$add_argument(
"--readTypes", nargs = "+", type = "character", default = "R1 R2 I1 I2",
help = "Read types (R1, R2, I1, I2) identifiers for root seqs. Default: R1 R2 I1 I2. Identifiers need to be present in same format on root sequencing files.")
parser$add_argument(
"--readNamePattern", nargs = 1, type = "character", default = "[\\w:-]+",
help = "Regular expression for pattern matching read names. Should not contain R1/R2/I1/I2 specific components. Default is [\\w:-]+")
parser$add_argument(
"-c", "--cores", nargs = 1, type = "integer", default = 0,
help = "Parallel processing option with r-parallel. Specify number of cores to use, number of cores will not be more than machine can provide or number of root files to filter, which ever is smaller.")
parser$add_argument(
"--compress", action = "store_true", help = "Compress output with gzip.")
args <- parser$parse_args(commandArgs(trailingOnly = TRUE))
# Argument Conditionals
if(is.null(args$branch) & is.null(args$ids)){
stop("Please specify file for branch or file with read ids.")}
args$readTypes <- unlist(strsplit(args$readTypes, " "))
# Print Inputs to terminal
input_table <- data.frame(
"Variables" = paste0(names(args), " :"),
"Values" = sapply(1:length(args), function(i){
paste(args[[i]], collapse = ", ")}))
input_table <- input_table[
match(c("root :", "branch :", "ids :", "outputDir :", "filePattern :",
"readTypes :", "readNamePattern :", "cores :", "compress :"),
input_table$Variables),]
pandoc.title("filterRootSeqReads Inputs")
pandoc.table(data.frame(input_table, row.names = NULL),
justify = c("left", "left"),
split.tables = Inf,
style = "simple")
# Load additional R-packages for analysis and processing
add_packs <- c("stringr", "ShortRead")
add_packs_loaded <- suppressMessages(
sapply(add_packs, require, character.only = TRUE))
if(!all(add_packs_loaded)){
pandoc.table(data.frame(
"R-Packages" = names(add_packs_loaded),
"Loaded" = add_packs_loaded,
row.names = NULL))
stop("Check dependancies.")
}
# Load id file for filtering ---------------------------------------------------
if(!is.null(args$branch)){
if(grepl("fastq", args$branch)){
branchData <- readFastq(args$branch)
}else{
branchData <- readFasta(args$branch)
}
ids <- str_extract(as.character(id(branchData)), args$readNamePattern)
}else if(!is.null(args$ids)){
ids <- read.delim(args$ids, header = TRUE)
ids <- str_extract(as.character(ids[,1]), args$readNamePattern)
}else{
stop("Please provided either a branch file or list of ids.")
}
## Make system folder of output ================================================
if(!dir.exists(args$outputDir)){
system(paste0("mkdir ", args$outputDir))
if(!dir.exists(args$outputDir)) stop("Cannont create output directory.")
}
# Filter root sequence files ---------------------------------------------------
if(args$cores == 0){
null <- lapply(args$root, function(root, args, ids){
rootFormat <- ifelse(grepl("fastq", root), "fastq", "fasta")
if(rootFormat == "fastq"){
rootReads <- readFastq(root)
}else{
rootReads <- readFasta(root)
}
pander(paste0("\nReads from ", root, " loaded, totaling ", length(rootReads), " reads."))
rootIDs <- str_extract(as.character(id(rootReads)), args$readNamePattern)
filteredReads <- rootReads[match(ids, rootIDs)]
pander(paste0("\nRoot sequences filtered to ", length(filteredReads), " reads."))
rootType <- args$readTypes[sapply(args$readTypes, grepl, x = root)]
if(length(rootType) > 1 | length(rootType) == 0){
stop("Root seq files do not contain read type identifiers. Change file names or identifiers.")
}
fileName <- paste0(
"filteredData.", args$filePattern, ".", rootType, ".", rootFormat)
if(args$compress) fileName <- paste0(fileName, ".gz")
if(rootFormat == "fastq"){
writeFastq(
filteredReads,
file = file.path(args$outputDir, fileName),
compress = args$compress)
}else{
writeFasta(
filteredReads,
file = file.path(args$outputDir, fileName),
compress = args$compress)
}
}, args = args, ids = ids)
}else{
parLoaded <- suppressMessages(require(parallel))
if(!parLoaded) stop("R-parallel not loaded, check to see if package is installed.")
buster <- makeCluster(min(detectCores(), args$cores, length(args$root)))
null <- parLapply(buster, args$root, function(root, args, ids){
library(ShortRead)
library(stringr)
library(pander)
rootFormat <- ifelse(grepl("fastq", root), "fastq", "fasta")
if(rootFormat == "fastq"){
rootReads <- readFastq(root)
}else{
rootReads <- readFasta(root)
}
pander(paste0("\nReads from ", root, " loaded, totaling ", length(rootReads), " reads."))
rootIDs <- str_extract(as.character(id(rootReads)), args$readNamePattern)
filteredReads <- rootReads[match(ids, rootIDs)]
pander(paste0("\nRoot sequences filtered to ", length(filteredReads), " reads."))
rootType <- args$readTypes[sapply(args$readTypes, grepl, x = root)]
if(length(rootType) > 1 | length(rootType) == 0){
stop("Root seq files do not contain read type identifiers. Change file names or identifiers.")
}
fileName <- paste0(
"filteredData.", args$filePattern, ".", rootType, ".", rootFormat)
if(args$compress) fileName <- paste0(fileName, ".gz")
if(rootFormat == "fastq"){
writeFastq(
filteredReads,
file = file.path(args$outputDir, fileName),
compress = args$compress)
}else{
writeFasta(
filteredReads,
file = file.path(args$outputDir, fileName),
compress = args$compress)
}
}, args = args, ids = ids)
stopCluster(buster)
}
| /filterRootSeqReads.R | no_license | cnobles/filterRootSeqReads | R | false | false | 7,525 | r | options(stringsAsFactors = FALSE)
suppressMessages(library("argparse"))
suppressMessages(library("pander"))
#' For those reviewing the code below, the following is a small style guide
#' outlining the various formats for the code.
#'
#' Names with "_": objects, inlucding data.frames, GRanges, vectors, ...
#' Names in caMel format: functions or components of objects (i.e. columns
#' within a data.frame).
#' Names with ".": arguments / options for functions
code_dir <- dirname(
sub("--file=", "",
grep("--file=", commandArgs(trailingOnly = FALSE), value = TRUE)))
# Set up and gather command line arguments -------------------------------------
parser <- ArgumentParser(
description = "R-based tool for filtering a list of reads form original sequencing files.")
parser$add_argument(
"-r", "--root", nargs = "+", type = "character", default = NULL,
help = "Files with original or root sequences (.fastq / .fastq.gz / .fasta / ...) to filter. Can select multiple files. Must be fasta or fastq format.")
parser$add_argument(
"-b", "--branch", nargs = 1, type = "character", default = NULL,
help = "File to filter extract filtered read ids, a subset of root sequences.")
parser$add_argument(
"-i", "--ids", nargs = 1, type = "character", default = NULL,
help = "Text-based file with list of read names to filter from root sequences. Header included.")
parser$add_argument(
"-o", "--outputDir", nargs = 1, type = "character", default = "filteredData",
help = "Output directory.")
parser$add_argument(
"--filePattern", nargs = 1, type = "character", default = NULL,
help = "Pattern to include in output file name. ie. filtered.[filePattern].R1.fastq.")
parser$add_argument(
"--readTypes", nargs = "+", type = "character", default = "R1 R2 I1 I2",
help = "Read types (R1, R2, I1, I2) identifiers for root seqs. Default: R1 R2 I1 I2. Identifiers need to be present in same format on root sequencing files.")
parser$add_argument(
"--readNamePattern", nargs = 1, type = "character", default = "[\\w:-]+",
help = "Regular expression for pattern matching read names. Should not contain R1/R2/I1/I2 specific components. Default is [\\w:-]+")
parser$add_argument(
"-c", "--cores", nargs = 1, type = "integer", default = 0,
help = "Parallel processing option with r-parallel. Specify number of cores to use, number of cores will not be more than machine can provide or number of root files to filter, which ever is smaller.")
parser$add_argument(
"--compress", action = "store_true", help = "Compress output with gzip.")
args <- parser$parse_args(commandArgs(trailingOnly = TRUE))
# Argument Conditionals
if(is.null(args$branch) & is.null(args$ids)){
stop("Please specify file for branch or file with read ids.")}
args$readTypes <- unlist(strsplit(args$readTypes, " "))
# Print Inputs to terminal
input_table <- data.frame(
"Variables" = paste0(names(args), " :"),
"Values" = sapply(1:length(args), function(i){
paste(args[[i]], collapse = ", ")}))
input_table <- input_table[
match(c("root :", "branch :", "ids :", "outputDir :", "filePattern :",
"readTypes :", "readNamePattern :", "cores :", "compress :"),
input_table$Variables),]
pandoc.title("filterRootSeqReads Inputs")
pandoc.table(data.frame(input_table, row.names = NULL),
justify = c("left", "left"),
split.tables = Inf,
style = "simple")
# Load additional R-packages for analysis and processing
add_packs <- c("stringr", "ShortRead")
add_packs_loaded <- suppressMessages(
sapply(add_packs, require, character.only = TRUE))
if(!all(add_packs_loaded)){
pandoc.table(data.frame(
"R-Packages" = names(add_packs_loaded),
"Loaded" = add_packs_loaded,
row.names = NULL))
stop("Check dependancies.")
}
# Load id file for filtering ---------------------------------------------------
if(!is.null(args$branch)){
if(grepl("fastq", args$branch)){
branchData <- readFastq(args$branch)
}else{
branchData <- readFasta(args$branch)
}
ids <- str_extract(as.character(id(branchData)), args$readNamePattern)
}else if(!is.null(args$ids)){
ids <- read.delim(args$ids, header = TRUE)
ids <- str_extract(as.character(ids[,1]), args$readNamePattern)
}else{
stop("Please provided either a branch file or list of ids.")
}
## Make system folder of output ================================================
if(!dir.exists(args$outputDir)){
system(paste0("mkdir ", args$outputDir))
if(!dir.exists(args$outputDir)) stop("Cannont create output directory.")
}
# Filter root sequence files ---------------------------------------------------
if(args$cores == 0){
null <- lapply(args$root, function(root, args, ids){
rootFormat <- ifelse(grepl("fastq", root), "fastq", "fasta")
if(rootFormat == "fastq"){
rootReads <- readFastq(root)
}else{
rootReads <- readFasta(root)
}
pander(paste0("\nReads from ", root, " loaded, totaling ", length(rootReads), " reads."))
rootIDs <- str_extract(as.character(id(rootReads)), args$readNamePattern)
filteredReads <- rootReads[match(ids, rootIDs)]
pander(paste0("\nRoot sequences filtered to ", length(filteredReads), " reads."))
rootType <- args$readTypes[sapply(args$readTypes, grepl, x = root)]
if(length(rootType) > 1 | length(rootType) == 0){
stop("Root seq files do not contain read type identifiers. Change file names or identifiers.")
}
fileName <- paste0(
"filteredData.", args$filePattern, ".", rootType, ".", rootFormat)
if(args$compress) fileName <- paste0(fileName, ".gz")
if(rootFormat == "fastq"){
writeFastq(
filteredReads,
file = file.path(args$outputDir, fileName),
compress = args$compress)
}else{
writeFasta(
filteredReads,
file = file.path(args$outputDir, fileName),
compress = args$compress)
}
}, args = args, ids = ids)
}else{
parLoaded <- suppressMessages(require(parallel))
if(!parLoaded) stop("R-parallel not loaded, check to see if package is installed.")
buster <- makeCluster(min(detectCores(), args$cores, length(args$root)))
null <- parLapply(buster, args$root, function(root, args, ids){
library(ShortRead)
library(stringr)
library(pander)
rootFormat <- ifelse(grepl("fastq", root), "fastq", "fasta")
if(rootFormat == "fastq"){
rootReads <- readFastq(root)
}else{
rootReads <- readFasta(root)
}
pander(paste0("\nReads from ", root, " loaded, totaling ", length(rootReads), " reads."))
rootIDs <- str_extract(as.character(id(rootReads)), args$readNamePattern)
filteredReads <- rootReads[match(ids, rootIDs)]
pander(paste0("\nRoot sequences filtered to ", length(filteredReads), " reads."))
rootType <- args$readTypes[sapply(args$readTypes, grepl, x = root)]
if(length(rootType) > 1 | length(rootType) == 0){
stop("Root seq files do not contain read type identifiers. Change file names or identifiers.")
}
fileName <- paste0(
"filteredData.", args$filePattern, ".", rootType, ".", rootFormat)
if(args$compress) fileName <- paste0(fileName, ".gz")
if(rootFormat == "fastq"){
writeFastq(
filteredReads,
file = file.path(args$outputDir, fileName),
compress = args$compress)
}else{
writeFasta(
filteredReads,
file = file.path(args$outputDir, fileName),
compress = args$compress)
}
}, args = args, ids = ids)
stopCluster(buster)
}
|
test_indeps <- function(D, testConfig, verbose=0) {
# Function to compute all the independence tests from the different datasets in D.
# - D : list of data matrices, one for each experimental setting. Columns are variables and each row is a sample.
# - testConfig$test: type of test used to generate data, outputs a value between 0 and 1?
# -"classic" classic correlation test (p-value = 0.05)
# -"oracle" independence facts determined by Patrik's oracle
# -"oracle+" independence facts determined by Patrik's oracle, plus ancestral relations from perfect interventions from oracle
# -"BIC" BIC-based calculation
# -"bayes" integrating over the parameters introduced in the paper
# -"bayes+" integrating over the parameters introduced in the paper, plus ancestral relations from perfect interventions from oracle
# - testConfig$schedule: maximum conditioning set.
# -if a number T it means all tests up to intervention tests size T
# (can be Inf, default = 3)
# - testConfig$weight: how to determine the weights from the dependencies:
# -"log" take log of the probability
# -"constant" the most likely option gets weight 1
# -"simulate_greedy" changes the weights to 2**rank of the (in)dependence to simulate the greedy algorithm.
# - testConfig$p: for bayes and BIC tests the apriori probability of
# -for classic test using algorithms the p-value threshold
# -for BIC-based score based learning the prior parameter
# - testConfig$alpha: for bayes test the eq. sample size
# - testConfig$conditioning_vars: NULL by default, otherwise it means that instead of conditioning on all vars, we use only some.
# The second option shouldn't be used yet, because it requires a bit more work for making sure we encode the sets correctly in ASP.
schedule <- testConfig$schedule
test <- testConfig$test
if (verbose) {
cat(" - Conducting independence tests: schedule/cmax=", schedule,", test=", test,".\n",sep='')
}
tested_independences <- list()
test_data<-list()
skel <- NULL
jindex <- 0
for (data in D) {
n <- ncol(data$data)
if (is.null(n)) {
n <- ncol(data$M$G)
}
# Preparing for writing indep constraints.
jindex<-jindex+1
test_data$jset <- bin.to.dec(rev(1*(data$e==1)))
test_data$J <- which(data$e==1)
test_data$names <- colnames(data$data)
test_data$indPath <- NULL
# Putting in the test data as it should be.
if (test == "classic") {
test_data$Cx<-cov(data$data)
test_data$N<-data$N
test_data$p_threshold<-testConfig$p
test_function<-test.classic
} else if (test == "oracle") {
test_data$M<-data$M #should take out the Js here
test_data$N<-Inf
# Dummy threshold, mostly used for FCI schedule.
test_data$p_threshold<-0.5 # independent vars have p-value 1, dependent 0.
test_function<-test.oracle
# Delete all the edges with intervened vars.
test_data$M$G[test_data$J,]<-0
test_data$M$Ge[test_data$J,]<-0
test_data$M$Ge[,test_data$J]<-0
} else if (test == "BIC") {
test_data$X<-data$data
test_data$p_threshold<-testConfig$p
test_function<-test.BIC
} else if (test == "bayes") {
test_data$X<-data$data
test_data$p_threshold<-testConfig$p # prior probability of ind.
test_function<-test.bayes
test_data$alpha<-testConfig$alpha # eq. sample size for the prior
test_data$discrete<-testConfig$discrete
} else if (test == "logp") {
test_data$Cx<-cov(data$data)
test_data$N<-data$N
test_data$p_threshold<-testConfig$p # significance level.
test_function<-test.logp
}
if (any(grepl("fci", schedule)> 0) ) {
test_data$test_function <- test_function
test_data$tested_independences <- list()
test_data$indPath <- file.path(testConfig$currentDir, paste("fci_indeps.ind", sep=""))
test_data$n <- n
if (length(schedule) > 1){
m.max = as.numeric(schedule[2])
} else {
m.max = Inf
}
skel <- pcalg::skeleton(suffStat=test_data, indepTest=test.wrapper, alpha=testConfig$alpha, labels = as.character(seq_len(n)),
fixedGaps = NULL, fixedEdges = NULL,
NAdelete = TRUE, m.max = m.max, verbose = FALSE, method = "stable")
system(paste("cat ", test_data$indPath, " | sort -u > ", test_data$indPath, ".sorted.txt", sep=""))
indFile <- file(test_data$indPath, "w")
cat('node(1..', n, ').\n', sep='', file = indFile)
cat('%independences and dependences\n', file = indFile)
close(indFile)
system(paste("cat ", test_data$indPath, ".sorted.txt >> ", test_data$indPath, sep=""))
parsed_indeps <- parse_asp_indeps(test_data$indPath)
tested_independences <- parsed_indeps$tested_independences
} else {
tested_independences <- test_indeps.loop(test_function, test_data, maxcset=schedule, n=n,
tested_independences=tested_independences, conditioning_vars=testConfig$conditioning_vars)
}
}
list(tested_independences=tested_independences, indPath=test_data$indPath, skel=skel)
}
# If there is another strategy for getting conditional independences instead of all possible subsets of a given size,
# it is just necessary to write a similar function to the one underneath.
test_indeps.loop <- function(test_function, test_data, maxcset=Inf, n, tested_independences, conditioning_vars=NULL) {
# Function for conducting all independence tests for one data set.
for (csetsize in index(0,maxcset) ) { #go from simpler to more complicated tests
for ( i in 1:(n-1)) {
if (i==1) browser()
# tested_independences_j <- foreach (j = (i+1):n) %do% {
# test_indeps.parallel.loop(test_function, test_data, n, i, j, csetsize, conditioning_vars)
tested_independences_j <- list()
for (j in (i+1):n) {
t <- test_indeps.parallel.loop(test_function, test_data, n, i, j, csetsize, conditioning_vars)
tested_independences_j[[length(tested_independences_j) + 1]] <- t
} #for j
for (test_result_list in tested_independences_j) {
for (test_result in test_result_list) {
tested_independences[[length(tested_independences) + 1]] <- test_result
}
}
} # for i
} # for csetsize
tested_independences
}
test_indeps.parallel.loop <- function(test_function, test_data, n, i, j, csetsize, conditioning_vars=NULL) {
#start with empty set
if (is.null(conditioning_vars)) {
csetvec <- rep(0, n)
} else {
csetvec <- rep(0, length(conditioning_vars))
}
csetvec[index(1,csetsize)]<-1
tested_independences <- list()
while ( !any(is.na(csetvec) ) ) {
if (is.null(conditioning_vars)) {
runTest <- csetvec[i]==0 && csetvec[j] == 0
cond_vars <- which(csetvec==1)
cset<-bin.to.dec(rev(csetvec))
} else {
cond_vars <- conditioning_vars[which(csetvec==1)]
runTest <- !(i %in% cond_vars || j %in% cond_vars)
cset <- rep(0, n)
cset[cond_vars] <- 1
cset<-bin.to.dec(rev(cset))
}
if (runTest) { #only if neither x and y are cond.
cat(i, " ", j , "|", cond_vars, "\n")
#calling the test function
test_result<-test_function(c(i,j), cond_vars, test_data)
#put some parameters right
test_result$J<-test_data$J
test_result$jset<-test_data$jset
test_result$cset<-cset
test_result$M<-setdiff((1:n),c(test_result$vars,test_result$C))
test_result$mset <- getm( test_result$vars, test_result$C, n=n)
#cat(paste(test_result$M,collapse=','),'=',test_result$mset,'\n')
#adding the test result also to tested_independences vector
tested_independences[[length(tested_independences) + 1]] <- test_result
} #if x and y are not in the conditioning set
#consider next csetvec given by the following function
csetvec<-next_colex_comb(csetvec)
} #while csetvec != NA
tested_independences
}
test_indeps._test1 <- function() {
MD <- pipeline.simulate_data._test1()
testConfig <- list(test="bayes", schedule=2, p=0.5, alpha=1.15, weight="log", conditioning_vars=NULL)
test_indeps(D=MD$D, testConfig=testConfig, verbose=0)
}
test_indeps._test2 <- function() {
MD <- pipeline.simulate_data._test2()
testConfig <- list(test="bayes", schedule=2, p=0.5, alpha=1.15, weight="log", conditioning_vars=NULL)
test_indeps(D=MD$D, testConfig=testConfig, verbose=0)
}
test_indeps._test3 <- function() {
MD <- pipeline.simulate_data._test3()
testConfig <- list(test="oracle", schedule=2, p=0.5, alpha=1.15, weight="log", conditioning_vars=NULL)
test_indeps(D=MD$D, testConfig=testConfig, verbose=0)
} | /R/test_indeps/test_indeps.R | permissive | regenworm/aci | R | false | false | 8,957 | r | test_indeps <- function(D, testConfig, verbose=0) {
# Function to compute all the independence tests from the different datasets in D.
# - D : list of data matrices, one for each experimental setting. Columns are variables and each row is a sample.
# - testConfig$test: type of test used to generate data, outputs a value between 0 and 1?
# -"classic" classic correlation test (p-value = 0.05)
# -"oracle" independence facts determined by Patrik's oracle
# -"oracle+" independence facts determined by Patrik's oracle, plus ancestral relations from perfect interventions from oracle
# -"BIC" BIC-based calculation
# -"bayes" integrating over the parameters introduced in the paper
# -"bayes+" integrating over the parameters introduced in the paper, plus ancestral relations from perfect interventions from oracle
# - testConfig$schedule: maximum conditioning set.
# -if a number T it means all tests up to intervention tests size T
# (can be Inf, default = 3)
# - testConfig$weight: how to determine the weights from the dependencies:
# -"log" take log of the probability
# -"constant" the most likely option gets weight 1
# -"simulate_greedy" changes the weights to 2**rank of the (in)dependence to simulate the greedy algorithm.
# - testConfig$p: for bayes and BIC tests the apriori probability of
# -for classic test using algorithms the p-value threshold
# -for BIC-based score based learning the prior parameter
# - testConfig$alpha: for bayes test the eq. sample size
# - testConfig$conditioning_vars: NULL by default, otherwise it means that instead of conditioning on all vars, we use only some.
# The second option shouldn't be used yet, because it requires a bit more work for making sure we encode the sets correctly in ASP.
schedule <- testConfig$schedule
test <- testConfig$test
if (verbose) {
cat(" - Conducting independence tests: schedule/cmax=", schedule,", test=", test,".\n",sep='')
}
tested_independences <- list()
test_data<-list()
skel <- NULL
jindex <- 0
for (data in D) {
n <- ncol(data$data)
if (is.null(n)) {
n <- ncol(data$M$G)
}
# Preparing for writing indep constraints.
jindex<-jindex+1
test_data$jset <- bin.to.dec(rev(1*(data$e==1)))
test_data$J <- which(data$e==1)
test_data$names <- colnames(data$data)
test_data$indPath <- NULL
# Putting in the test data as it should be.
if (test == "classic") {
test_data$Cx<-cov(data$data)
test_data$N<-data$N
test_data$p_threshold<-testConfig$p
test_function<-test.classic
} else if (test == "oracle") {
test_data$M<-data$M #should take out the Js here
test_data$N<-Inf
# Dummy threshold, mostly used for FCI schedule.
test_data$p_threshold<-0.5 # independent vars have p-value 1, dependent 0.
test_function<-test.oracle
# Delete all the edges with intervened vars.
test_data$M$G[test_data$J,]<-0
test_data$M$Ge[test_data$J,]<-0
test_data$M$Ge[,test_data$J]<-0
} else if (test == "BIC") {
test_data$X<-data$data
test_data$p_threshold<-testConfig$p
test_function<-test.BIC
} else if (test == "bayes") {
test_data$X<-data$data
test_data$p_threshold<-testConfig$p # prior probability of ind.
test_function<-test.bayes
test_data$alpha<-testConfig$alpha # eq. sample size for the prior
test_data$discrete<-testConfig$discrete
} else if (test == "logp") {
test_data$Cx<-cov(data$data)
test_data$N<-data$N
test_data$p_threshold<-testConfig$p # significance level.
test_function<-test.logp
}
if (any(grepl("fci", schedule)> 0) ) {
test_data$test_function <- test_function
test_data$tested_independences <- list()
test_data$indPath <- file.path(testConfig$currentDir, paste("fci_indeps.ind", sep=""))
test_data$n <- n
if (length(schedule) > 1){
m.max = as.numeric(schedule[2])
} else {
m.max = Inf
}
skel <- pcalg::skeleton(suffStat=test_data, indepTest=test.wrapper, alpha=testConfig$alpha, labels = as.character(seq_len(n)),
fixedGaps = NULL, fixedEdges = NULL,
NAdelete = TRUE, m.max = m.max, verbose = FALSE, method = "stable")
system(paste("cat ", test_data$indPath, " | sort -u > ", test_data$indPath, ".sorted.txt", sep=""))
indFile <- file(test_data$indPath, "w")
cat('node(1..', n, ').\n', sep='', file = indFile)
cat('%independences and dependences\n', file = indFile)
close(indFile)
system(paste("cat ", test_data$indPath, ".sorted.txt >> ", test_data$indPath, sep=""))
parsed_indeps <- parse_asp_indeps(test_data$indPath)
tested_independences <- parsed_indeps$tested_independences
} else {
tested_independences <- test_indeps.loop(test_function, test_data, maxcset=schedule, n=n,
tested_independences=tested_independences, conditioning_vars=testConfig$conditioning_vars)
}
}
list(tested_independences=tested_independences, indPath=test_data$indPath, skel=skel)
}
# If there is another strategy for getting conditional independences instead of all possible subsets of a given size,
# it is just necessary to write a similar function to the one underneath.
test_indeps.loop <- function(test_function, test_data, maxcset=Inf, n, tested_independences, conditioning_vars=NULL) {
# Function for conducting all independence tests for one data set.
for (csetsize in index(0,maxcset) ) { #go from simpler to more complicated tests
for ( i in 1:(n-1)) {
if (i==1) browser()
# tested_independences_j <- foreach (j = (i+1):n) %do% {
# test_indeps.parallel.loop(test_function, test_data, n, i, j, csetsize, conditioning_vars)
tested_independences_j <- list()
for (j in (i+1):n) {
t <- test_indeps.parallel.loop(test_function, test_data, n, i, j, csetsize, conditioning_vars)
tested_independences_j[[length(tested_independences_j) + 1]] <- t
} #for j
for (test_result_list in tested_independences_j) {
for (test_result in test_result_list) {
tested_independences[[length(tested_independences) + 1]] <- test_result
}
}
} # for i
} # for csetsize
tested_independences
}
test_indeps.parallel.loop <- function(test_function, test_data, n, i, j, csetsize, conditioning_vars=NULL) {
#start with empty set
if (is.null(conditioning_vars)) {
csetvec <- rep(0, n)
} else {
csetvec <- rep(0, length(conditioning_vars))
}
csetvec[index(1,csetsize)]<-1
tested_independences <- list()
while ( !any(is.na(csetvec) ) ) {
if (is.null(conditioning_vars)) {
runTest <- csetvec[i]==0 && csetvec[j] == 0
cond_vars <- which(csetvec==1)
cset<-bin.to.dec(rev(csetvec))
} else {
cond_vars <- conditioning_vars[which(csetvec==1)]
runTest <- !(i %in% cond_vars || j %in% cond_vars)
cset <- rep(0, n)
cset[cond_vars] <- 1
cset<-bin.to.dec(rev(cset))
}
if (runTest) { #only if neither x and y are cond.
cat(i, " ", j , "|", cond_vars, "\n")
#calling the test function
test_result<-test_function(c(i,j), cond_vars, test_data)
#put some parameters right
test_result$J<-test_data$J
test_result$jset<-test_data$jset
test_result$cset<-cset
test_result$M<-setdiff((1:n),c(test_result$vars,test_result$C))
test_result$mset <- getm( test_result$vars, test_result$C, n=n)
#cat(paste(test_result$M,collapse=','),'=',test_result$mset,'\n')
#adding the test result also to tested_independences vector
tested_independences[[length(tested_independences) + 1]] <- test_result
} #if x and y are not in the conditioning set
#consider next csetvec given by the following function
csetvec<-next_colex_comb(csetvec)
} #while csetvec != NA
tested_independences
}
test_indeps._test1 <- function() {
MD <- pipeline.simulate_data._test1()
testConfig <- list(test="bayes", schedule=2, p=0.5, alpha=1.15, weight="log", conditioning_vars=NULL)
test_indeps(D=MD$D, testConfig=testConfig, verbose=0)
}
test_indeps._test2 <- function() {
MD <- pipeline.simulate_data._test2()
testConfig <- list(test="bayes", schedule=2, p=0.5, alpha=1.15, weight="log", conditioning_vars=NULL)
test_indeps(D=MD$D, testConfig=testConfig, verbose=0)
}
test_indeps._test3 <- function() {
MD <- pipeline.simulate_data._test3()
testConfig <- list(test="oracle", schedule=2, p=0.5, alpha=1.15, weight="log", conditioning_vars=NULL)
test_indeps(D=MD$D, testConfig=testConfig, verbose=0)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimsum__error_model_qqplot.R
\name{dimsum__error_model_qqplot}
\alias{dimsum__error_model_qqplot}
\title{dimsum__error_model_qqplot}
\usage{
dimsum__error_model_qqplot(
dimsum_meta,
input_dt,
all_reps,
norm_dt,
error_dt,
report_outpath = NULL
)
}
\arguments{
\item{dimsum_meta}{an experiment metadata object (required)}
\item{input_dt}{input data.table (required)}
\item{all_reps}{list of replicates to retain (required)}
\item{norm_dt}{data.table of normalisation parameters (required)}
\item{error_dt}{data.table of error model parameters (required)}
\item{report_outpath}{fitness report output path (default:NULL)}
}
\value{
Nothing
}
\description{
Perform leave one out cross validation on replicates and generate QQ plot
}
| /man/dimsum__error_model_qqplot.Rd | permissive | lehner-lab/DiMSum | R | false | true | 823 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimsum__error_model_qqplot.R
\name{dimsum__error_model_qqplot}
\alias{dimsum__error_model_qqplot}
\title{dimsum__error_model_qqplot}
\usage{
dimsum__error_model_qqplot(
dimsum_meta,
input_dt,
all_reps,
norm_dt,
error_dt,
report_outpath = NULL
)
}
\arguments{
\item{dimsum_meta}{an experiment metadata object (required)}
\item{input_dt}{input data.table (required)}
\item{all_reps}{list of replicates to retain (required)}
\item{norm_dt}{data.table of normalisation parameters (required)}
\item{error_dt}{data.table of error model parameters (required)}
\item{report_outpath}{fitness report output path (default:NULL)}
}
\value{
Nothing
}
\description{
Perform leave one out cross validation on replicates and generate QQ plot
}
|
library(demogR)
### Name: odiag
### Title: odiag
### Aliases: odiag
### Keywords: array algebra
### ** Examples
## Construct a matrix from a vector
## random survival probabilities with mean 0.9 and variance 0.0082
y <- rbeta(4,9,1)
A <- odiag(y,-1)
## add fertilities
F <- c(0,rep(1,4))
A[1,] <- F
## Extract a vector from a matrix
A <- matrix(rnorm(25), nr=5, nc=5)
odiag(A,2)
| /data/genthat_extracted_code/demogR/examples/odiag.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 390 | r | library(demogR)
### Name: odiag
### Title: odiag
### Aliases: odiag
### Keywords: array algebra
### ** Examples
## Construct a matrix from a vector
## random survival probabilities with mean 0.9 and variance 0.0082
y <- rbeta(4,9,1)
A <- odiag(y,-1)
## add fertilities
F <- c(0,rep(1,4))
A[1,] <- F
## Extract a vector from a matrix
A <- matrix(rnorm(25), nr=5, nc=5)
odiag(A,2)
|
# add predictor data to the EURO 2016 matches
# Tuomo Nieminen 2016
source("em_functions.R")
matches <- read.csv2("data/matches2016.csv",header=T)
colnames(matches) <- c("date","time","hometeam","awayteam")
teams2016 <- unique(matches$hometeam)
# Add uefa data
uefa_2016 <- read.table("data/em2016_uefa.txt",header=T,sep="\t")[,1:2]
colnames(uefa_2016) <- c("team","uefa")
home_uefa <- uefa_2016
away_uefa <- uefa_2016
colnames(home_uefa) <- paste0("home",names(uefa_2016))
colnames(away_uefa) <- paste0("away",names(uefa_2016))
matches1 <- merge(home_uefa,matches, by=("hometeam"),all.x=T)
matches2 <- merge(away_uefa,matches1,by="awayteam",all.x=T)
# Add shots data
shots <- read.table("data/q_shots2016.txt", header=T, sep="\t")[,c(1,3)]
colnames(shots) <- c("team","avrg_shots")
shots$team <- make_teams(shots$team)
#exclude teams that didn't qualify
shots <- shots[shots$team %in% teams2016,]
# shots <- rbind(shots,data.frame(team="France",avrg_shots=NA))
# impute the number of shots for france (who didn't play qualifiers)
uef <- uefa_2016[order(uefa_2016$team),]
uef <- uef[uef$team != "France",]
shots <- shots[order(shots$team),]
france_uef <- c(1,uefa_2016[uefa_2016$team=="France",]$uefa)
shotmodel <- lm(shots$avrg_shots~uef$uefa)
france_shots <- (france_uef%*%shotmodel$coefficients)[1]
france <- data.frame(team="France",avrg_shots=france_shots)
shots <- rbind(shots,france)
# predictors <- merge(uefa_2016, shots, by="team")
homeshots <- shots
awayshots <- shots
colnames(homeshots) <- paste0("home",names(shots))
colnames(awayshots) <- paste0("away", names(shots))
matches3 <- merge(homeshots, matches2, by="hometeam",all.x=T)
matches4<- merge(awayshots, matches3, by="awayteam", all.x=T)
matches4$shot_ratio <- matches4$homeavrg_shots/matches4$awayavrg_shots
matches4$uefa_ratio <- matches4$homeuefa/matches4$awayuefa
names(matches4)
#add group info
groups <- data.frame("A"=c("France","Romania","Albania","Switzerland"),
"B"=c("England","Russia","Slovakia","Wales"),
"C"=c("Germany","Northern Ireland","Poland","Ukraine"),
"D"=c("Croatia","Czech Republic","Spain","Turkey"),
"E"=c("Belgium","Italy","Republic of Ireland","Sweden"),
"F"=c("Austria","Hungary","Iceland","Portugal"))
find_group <- function(country) {
names(groups)[which(groups==country,arr.ind=T)[2]]
}
matches4$group <- sapply(matches4$hometeam,find_group)
save(file = "data/matches2016.Rda", matches4)
| /data/create_matches2016.R | no_license | TuomoNieminen/EURO2016 | R | false | false | 2,513 | r | # add predictor data to the EURO 2016 matches
# Tuomo Nieminen 2016
source("em_functions.R")
matches <- read.csv2("data/matches2016.csv",header=T)
colnames(matches) <- c("date","time","hometeam","awayteam")
teams2016 <- unique(matches$hometeam)
# Add uefa data
uefa_2016 <- read.table("data/em2016_uefa.txt",header=T,sep="\t")[,1:2]
colnames(uefa_2016) <- c("team","uefa")
home_uefa <- uefa_2016
away_uefa <- uefa_2016
colnames(home_uefa) <- paste0("home",names(uefa_2016))
colnames(away_uefa) <- paste0("away",names(uefa_2016))
matches1 <- merge(home_uefa,matches, by=("hometeam"),all.x=T)
matches2 <- merge(away_uefa,matches1,by="awayteam",all.x=T)
# Add shots data
shots <- read.table("data/q_shots2016.txt", header=T, sep="\t")[,c(1,3)]
colnames(shots) <- c("team","avrg_shots")
shots$team <- make_teams(shots$team)
#exclude teams that didn't qualify
shots <- shots[shots$team %in% teams2016,]
# shots <- rbind(shots,data.frame(team="France",avrg_shots=NA))
# impute the number of shots for france (who didn't play qualifiers)
uef <- uefa_2016[order(uefa_2016$team),]
uef <- uef[uef$team != "France",]
shots <- shots[order(shots$team),]
france_uef <- c(1,uefa_2016[uefa_2016$team=="France",]$uefa)
shotmodel <- lm(shots$avrg_shots~uef$uefa)
france_shots <- (france_uef%*%shotmodel$coefficients)[1]
france <- data.frame(team="France",avrg_shots=france_shots)
shots <- rbind(shots,france)
# predictors <- merge(uefa_2016, shots, by="team")
homeshots <- shots
awayshots <- shots
colnames(homeshots) <- paste0("home",names(shots))
colnames(awayshots) <- paste0("away", names(shots))
matches3 <- merge(homeshots, matches2, by="hometeam",all.x=T)
matches4<- merge(awayshots, matches3, by="awayteam", all.x=T)
matches4$shot_ratio <- matches4$homeavrg_shots/matches4$awayavrg_shots
matches4$uefa_ratio <- matches4$homeuefa/matches4$awayuefa
names(matches4)
#add group info
groups <- data.frame("A"=c("France","Romania","Albania","Switzerland"),
"B"=c("England","Russia","Slovakia","Wales"),
"C"=c("Germany","Northern Ireland","Poland","Ukraine"),
"D"=c("Croatia","Czech Republic","Spain","Turkey"),
"E"=c("Belgium","Italy","Republic of Ireland","Sweden"),
"F"=c("Austria","Hungary","Iceland","Portugal"))
find_group <- function(country) {
names(groups)[which(groups==country,arr.ind=T)[2]]
}
matches4$group <- sapply(matches4$hometeam,find_group)
save(file = "data/matches2016.Rda", matches4)
|
##makeCacheMatrix is a function that creates
## a special object that stores a matriz and
## catches its inverse
makeCacheMatrix <- function(x = matrix()) {
mat_inv <- NULL
set <- function(y) {
x <<- y
mat_inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) mat_inv <<- inverse
getInverse <- function() mat_inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve will get the inverse of the matrix created in the previous
## function (makeCacheMatrix)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat_inv <- x$getInverse()
if (!is.null(mat_inv)) {
message("getting cached data")
return(mat_inv)
}
mat <- x$get()
mat_inv<- solve(mat, ...)
x$setInverse(mat_inv)
mat_inv
} | /ProgrammingAssignment.R | no_license | gonzalezcecilia/ProgrammingAssignment | R | false | false | 832 | r | ##makeCacheMatrix is a function that creates
## a special object that stores a matriz and
## catches its inverse
makeCacheMatrix <- function(x = matrix()) {
mat_inv <- NULL
set <- function(y) {
x <<- y
mat_inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) mat_inv <<- inverse
getInverse <- function() mat_inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve will get the inverse of the matrix created in the previous
## function (makeCacheMatrix)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat_inv <- x$getInverse()
if (!is.null(mat_inv)) {
message("getting cached data")
return(mat_inv)
}
mat <- x$get()
mat_inv<- solve(mat, ...)
x$setInverse(mat_inv)
mat_inv
} |
# 1 - gen.gc -> garde article
# 2 - gen.gcplus -> garde article
# 3 - gen.completeness -> garde article
# 4 - gen.completenessVar -> garde article
# 5 - gen.branching -> garde article
# 6 - gen.children -> garde article
# 7 - gen.meangendepth -> garde article
# 8 - gen.entropyMeanVar -> garde article
# 9 - gen.f -> garde article
# 11 - gen.fmean -> garde article
# 12 - gen.founder -> garde article
# 13 - gen.half.founder -> garde article
# 14 - gen.sibship -> garde article
# 16 - gen.genealogy -> garde article
# 17 - gen.lineages -> garde article
# 17-1 gen.genout -> garde article
# 19 - gen.implex -> garde article
# 20 - gen.implexVar -> garde article
# 21 - gen.max -> garde article
# 23 - gen.min -> garde article
# 24 - gen.mean -> garde article
# 25 - gen.nochildren -> garde article
# 26 - gen.nowomen -> garde article
# 27 - gen.nomen -> garde article
# 28 - gen.noind -> garde article
# 32 - gen.occ -> garde article
# 33 - gen.parent -> garde article
# 34 - gen.phi -> garde article
# 35 - gen.phiOver -> garde article
# 37 - gen.phiMean -> garde article
# 41 - gen.depth -> garde article
# 42 - gen.pro -> garde article
# 43 - gen.rec -> garde article
# 44 - gen.meangendepthVar -> garde article
#gen.gc = function(gen, pro = 0, ancestors = 0, typeCG = "IND", named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
#
# retour = gen.detectionErreur(gen = gen, pro = pro, ancestors = ancestors, print.it = print.it, named = named, typeCG = typeCG,
# check = c(3, 5, 11, 34, 18, 10))
# if(retour$erreur == T) stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# ancestors = retour$ancestors
# typeCG = retour$typeCG
## print.it = retour$print.it
# named = retour$named
#
# if(typeCG == "IND") {
# if(is(pro, "GLgroup")) {
# CG = GLPrivCG(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, print.it = FALSE, named = named)
# return(GLPrivCGgroup(CG, grppro = pro))
# }
# else return(GLPrivCG(gen = gen, pro = pro, ancestors = ancestors, print.it = FALSE, named = named))
# }
# else {
# if(is(pro, "GLgroup")) {
# CG = GLPrivCG(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, print.it = FALSE, named = named)
# CG = GLPrivCGgroup(CG, grppro = pro)
# if(typeCG == "MEAN")
# return(GLPrivCGmoyen(CG = CG, named = named))
# if(typeCG == "CUMUL")
# stop("CUMUL is not available per group")
# if(typeCG == "TOTAL")
# return(GLPrivCGtotal(CG = CG, named = named))
# if(typeCG == "PRODUCT")
# return(GLPrivCGproduit(CG = CG, named = named))
# }
# else {
# CG = GLPrivCG(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, print.it = FALSE, named =
# named)
# if(typeCG == "MEAN")
# return(GLPrivCGmoyen(CG = CG, named = named))
# if(typeCG == "CUMUL")
# return(GLPrivCGcumul(CG = CG, named = named))
# if(typeCG == "TOTAL")
# return(GLPrivCGtotal(CG = CG, named = named))
# if(typeCG == "PRODUCT")
# return(GLPrivCGproduit(CG = CG, named = named))
# }
# }
#}
gen.gc = function(gen, pro = 0, ancestors = 0, vctProb = c(0.5, 0.5, 0.5, 0.5), typeCG = "IND") #, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
retour = gen.detectionErreur(gen = gen, pro = pro, ancestors = ancestors, print.it = FALSE, named = TRUE, typeCG = typeCG, check = c(3, 5, 11, 34, 18, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
ancestors = retour$ancestors
typeCG = retour$typeCG
# print.it = retour$print.it
named = retour$named
if(typeCG == "IND") {
if(is(pro, "GLgroup")) {
CG = GLPrivCGPLUS(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, vctProb = vctProb, print.it = FALSE, named = named)
return(GLPrivCGgroup(CG, grppro = pro))
}
else return(GLPrivCGPLUS(gen = gen, pro = pro, ancestors = ancestors, vctProb, print.it = FALSE, named = named))
}
else {
if(is(pro, "GLgroup")) {
CG = GLPrivCGPLUS(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, vctProb = vctProb, print.it = FALSE, named = named)
CG = GLPrivCGgroup(CG, grppro = pro)
if(typeCG == "MEAN")
return(GLPrivCGmoyen(CG = CG, named = named))
if(typeCG == "CUMUL")
stop("CUMUL is not available per group")
if(typeCG == "TOTAL")
return(GLPrivCGtotal(CG = CG, named = named))
if(typeCG == "PRODUCT")
return(GLPrivCGproduit(CG = CG, named = named))
}
else {
CG = GLPrivCGPLUS(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, vctProb = vctProb, print.it = FALSE, named = named)
if(typeCG == "MEAN")
return(GLPrivCGmoyen(CG = CG, named = named))
if(typeCG == "CUMUL")
return(GLPrivCGcumul(CG = CG, named = named))
if(typeCG == "TOTAL")
return(GLPrivCGtotal(CG = CG, named = named))
if(typeCG == "PRODUCT")
return(GLPrivCGproduit(CG = CG, named = named))
}
}
}
gen.completeness = function(gen, pro = 0, genNo = -1, type = "MEAN", ...)#, check = 1)#named = T,
{
#Validation des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
if( type != "IND" )# | type != "MOYSUJETS"
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#if(check == 1) {
retour <- gen.detectionErreur(gen = gen, pro = pro, genNo = genNo, typecomp = type, named = TRUE, check = c(1, 5, 16, 171, 10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen <- retour$gen
pro <- retour$pro
genNo <- retour$genNo
type <- retour$typecomp
named <- retour$named
#}
#Calcule de la completude par sujet
if(type == "IND"){ # | type == "MOYSUJETS") {
tableau = sapply(pro, function(x, gen, genNo, named)
{
GLPriv.completeness3V(gen$ind, gen$father, gen$mother, pro = x, genNo = genNo, named = named)
}
, gen = gen, genNo = genNo, named = named)
if(is.null(dim(tableau))) tableau <- t(as.matrix(tableau))
# if(type == "MOYSUJETS")
# tableau <- data.frame(apply(tableau, 1, mean))
#Fait la moyenne
#if(named == T)
if(type == "IND")
dimnames(tableau)[[2]] <- as.character(paste("Ind", as.character(pro)))
dimnames(tableau)[[1]] <- as.character(genNo)
#Rajout du numero de generation en lignes
return(data.frame(tableau))
}
else if(type == "MEAN") {
#Si c'est MEAN, calcul de la completude avec tous les sujets a la fois
return(GLPriv.completeness3V(gen$ind, gen$father, gen$mother, pro = pro, genNo = genNo, named = named))
}
}
gen.completenessVar = function(gen, pro = 0, genNo = -1, ...) #, check = 1, ...)#named = T,
{
#Validations des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# if(bcorrFactor == T)
# if(sum(N) == 0) #Le facteur de correction doit avoir une valeur numerique N taille de la population
# stop("Correction factor must have a numerical population size value N")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
retour = gen.detectionErreur(gen = gen, pro = pro, genNo = genNo, named = TRUE, check = c(1, 5, 16, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
genNo = retour$genNo
named = retour$named
#Selon le type de donnees, le facteur de correction sera modifie en consequence
# if(typeCorpus == "ECH") corrFactor = length(pro)/(length(pro) - 1) else if(typeCorpus == "POP")
# corrFactor = 1
# if(corrFactor == T)
# corrFactor = (corrFactor * (N - length(pro)))/N
corrFactor = 1
#Calcule la variance de l'indice de completude
tab = sapply(pro, function(x, gen, genNo, named)
GLPriv.completeness3V(gen$ind, gen$father, gen$mother, pro = x, genNo = genNo, named = named),
gen = gen, genNo = genNo, named = named)
if(is.null(dim(tab))) tab <- t(as.matrix(tab))
tab = data.frame(apply(tab, 1, var) * corrFactor)
dimnames(tab)[[1]] <- as.character(genNo)
dimnames(tab)[[2]] <- "completeness.var"
return(tab)
}
gen.branching = function(gen, pro = 0, ancestors = gen.founder(gen), bflag = 0)#, check = 1)
{
if(sum(as.numeric(pro)) == 0)
pro = gen.pro(gen)
if(bflag == 0) {
pro.segment = gen.pro(gen)
ancestors = gen.founder(gen.branching(gen, pro.segment, ancestors, bflag = 1))
}
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, pro = pro, ancestors = ancestors, check = c(3, 36, 37))
if(retour$erreur)
stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
ancestors = retour$ancestors
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
#print(paste("taille alloue:",length(gen@.Data)))
tmpgen <- integer(length(gen@.Data))
tmpNelem <- integer(1)
#print(".C(SPLUSebranche,.. commence")
.Call("SPLUSebranche", gen@.Data, pro, length(pro), ancestors, length(ancestors), tmpgen, tmpNelem, specialsok = T)
#print(".C(SPLUSebranche,.. fait:")
#print(paste(length(tmpgen),tmpgen[1],tmpgen[2],tmpgen[3] ))
#print(paste(length(gen@.Data),gen@.Data[1],gen@.Data[2],gen@.Data[3]))
length(tmpgen) <- tmpNelem
tmpNelem <- length(tmpgen)
#print(length(tmpgen))
ebranche = new("GLgen", .Data = tmpgen, Date = date())
#print("1")
ebranche.asc = gen.genout(ebranche)
sexeAbsent=FALSE
if(length(setdiff(unique(ebranche.asc[,"sex"]), c(1,2,"H","F")))>0)
{
diff = setdiff(unique(ebranche.asc[,"sex"]), c(1,2,"H","F"))
ebranche.asc=data.frame(ind=ebranche.asc$ind,father=ebranche.asc$father,mother=ebranche.asc$mother) #*****
sexeAbsent=TRUE
#warning(paste("la colonne \"sexe\" contient des valeurs non valide:",diff,"\n Elle ne sera pas consideree pour le reste des calculs."))
warning(paste("The \"sex\" column contains invalid values:",diff,
"\nThe column won't be considered for further calculations."))
}
#print("2")
#print(ebranche.asc[1,])
pro.ebranche = gen.pro(ebranche)
#print("3")
pro.enTrop = setdiff(pro.ebranche, pro)
#print(paste(length(pro.ebranche),length(pro)))
#print(pro.enTrop)
if(sum(as.numeric(pro.enTrop)) != 0) {
ebranche.asc = ebranche.asc[(!(ebranche.asc$ind %in% pro.enTrop)), ]
#ebranche.asc=data.frame(ind=ebranche.asc$ind,father=ebranche.asc$father,mother=ebranche.asc$mother) #*****
ebranche = gen.genealogy(ebranche.asc)
#print(ebranche.asc)
pro.ebranche = gen.pro(ebranche)
}
#print("4")
fond.ebranche = gen.founder(ebranche)
#print("5")
pro.quiSontFond = pro.ebranche[pro.ebranche %in% fond.ebranche]
#print(paste("6", dim(ebranche.asc)))
ebranche.asc = ebranche.asc[(!(ebranche.asc$ind %in% pro.quiSontFond)), ]
#print(paste("7", dim(ebranche.asc)))
#ebranche.asc=data.frame(ind=ebranche.asc$ind,father=ebranche.asc$father,mother=ebranche.asc$mother)#*****
if(dim(ebranche.asc)[1]==0) stop("No branching possible, all probands are founders.")
else gen = gen.genealogy(ebranche.asc)
#print("8")
gen.validationAsc(gen)
#print("9")
return(gen)
}
gen.children = function(gen, individuals, ...)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, check = c(1, 13), ...)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
#}
PositionEnfantDesMeres <- match(gen$mother, individuals)
PositionEnfantDesPeres <- match(gen$father, individuals)
EnfantDesMere <- gen$ind[(1:length(PositionEnfantDesMeres))[!is.na(PositionEnfantDesMeres)]]
EnfantDesPere <- gen$ind[(1:length(PositionEnfantDesPeres))[!is.na(PositionEnfantDesPeres)]]
Enfants <- unique(c(EnfantDesMere, EnfantDesPere))
return(Enfants)
}
gen.meangendepth = function(gen, pro = 0, type = "MEAN", ...)#, check = 1)#named = T,
{
#Validations des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour <- gen.detectionErreur(gen = gen, pro = pro, typecomp = type, check = c(1, 5, 17))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen <- retour$gen
pro <- retour$pro
type <- retour$typecomp
#}
if(type == "IND") {# | type == "MOYSUJETS") {
tableau <- sapply(pro, function(x, gen)
GLPriv.entropie3V(gen$ind, gen$father, gen$mother, pro = x), gen = gen)
tableau <- data.frame(tableau)
# if(type == "MOYSUJETS") {
# tableau = data.frame(apply(tableau, 2, mean))
# dimnames(tableau)[[1]] <- "Mean.Exp.Gen.Depth"
# }
#if(named == T)
if(type == "IND")
dimnames(tableau)[[1]] <- as.character(paste("Ind", as.character(pro)))
dimnames(tableau)[[2]] <- "Exp.Gen.Depth"
return(tableau)
}
else if(type == "MEAN")
return(GLPriv.entropie3V(gen$ind, gen$father, gen$mother, pro = pro))
}
#gen.entropyMeanVar = function(gen, pro = 0, check = 1, ...) #typeCorpus = "ECH", bfacteurCorr = F, N = NULL,
#{
# #Validations des parametres
# if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
## if(bfacteurCorr == T)
# if(sum(N) == 0)
## stop("Correction factor must have a numerical population size value N")
# if(is(gen, "vector"))
# if(length(list(...)) != 2)
# stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#
# retour = gen.detectionErreur(gen = gen, pro = pro, check = c(1, 5))
# if(retour$erreur == T) stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
#
# tableau = sapply(pro, function(x, gen)
# GLPriv.entropie3V(gen$ind, gen$father, gen$mother, pro = x), gen = gen)
## if(typeCorpus == "ECH")
## facteurCorr = length(pro)/(length(pro) - 1)
## else if(typeCorpus == "POP")
## facteurCorr = 1
## if(bfacteurCorr == T)
## facteurCorr = (facteurCorr * (N - length(pro)))/N
#
# facteurCorr = 1
# tableau = data.frame(tableau)
# tableau = data.frame(apply(tableau, 2, var) * facteurCorr)
# dimnames(tableau)[[1]] <- "Mean.Exp.Gen.Depth.Var"
# dimnames(tableau)[[2]] <- "Exp.Gen.Depth"
# return(tableau)
#}
#gen.f = function(gen, pro = 0, nbgenerations = 0, named = T, check = 1)
#{
# if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# retour = gen.detectionErreur(gen = gen, pro = pro, nbgenerations = nbgenerations, print.it = FALSE, named = named, check = c(3, 5, 19, 18, 10))
# if(retour$erreur == T) stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# nbgenerations = retour$nbgenerations
## print.it = retour$print.it
# named = retour$named
#
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# tmp <- double(length(pro))
#
# #Call de la fonction en C
# .Call("SPLUSF", gen@.Data, pro, length(pro), nbgenerations, tmp, FALSE, specialsok = T)
# names(tmp) <- pro
## if(print.it) {
## base <- c(deparse(substitute(gen)), deparse(substitute(pro)), nbgenerations)
## header.txt <- paste("\n\t*** Calls : gen.F (", base[1], ",", base[2], ",", base[3], ") ***\n\n")
## cat(header.txt)
## }
# return(invisible(tmp))
#}
#gen.fmean = function(vectF, named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# #if(check == 1) {
# retour = gen.detectionErreur(vectF = vectF, named = named, check = c(33, 10))
# if(retour$erreur == T)
# stop(retour$messageErreur)
# vectF = retour$vectF
# named = retour$named
# #}
# #Test pour accelerer la procedure
# return(GLapplyF(vectF, mean, named = named))
#}
gen.founder = function(gen, ...)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, ..., check = 1)
if(retour$erreur == TRUE)
return(retour$messageErreur)
gen = retour$gen
#}
return(gen$ind[gen$father == 0 & gen$mother == 0])
}
gen.half.founder = function(gen, ...)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, ..., check = 1)
if(retour$erreur == TRUE)
return(retour$messageErreur)
gen = retour$gen
#}
return(gen$ind[(gen$father != 0 & gen$mother == 0) | (gen$father == 0 & gen$mother != 0)])
}
gen.sibship = function(gen, individuals, halfSibling = TRUE, ...)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, halfSibling = halfSibling, check = c(1, 13, 14), ...)
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
halfSibling = retour$halfSibling
#}
if(halfSibling == TRUE) {
PositionProband = match(individuals, gen$ind)
#Trouve les meres et les peres des probands
Meres <- gen$mother[PositionProband]
Peres <- gen$father[PositionProband]
MaskMere <- Meres != 0
MaskPere <- Peres != 0
Meres <- (Meres/MaskMere)[!is.na(Meres/MaskMere)]
Peres <- (Peres/MaskPere)[!is.na(Peres/MaskPere)]
#Trouve tous les enfants de ces individuals
sibshipMo <- gen.children(gen, individuals = Meres)#, check = 0)
sibshipFa <- gen.children(gen, individuals = Peres)#, check = 0)
#Vecteur contenant tous les enfants incluant les probands
sibshipAndProband <- unique(c(sibshipMo, sibshipFa))
#maintenant on enleve les probands
temp <- match(sibshipAndProband, individuals)
sibship <- sibshipAndProband[(1:length(temp))[is.na(temp)]]
return(sibship)
}
else {
PositionProband = match(individuals, gen$ind)
#Trouve les meres et les peres des probands
Meres <- gen$mother[PositionProband]
Peres <- gen$father[PositionProband]
MaskMere <- Meres != 0
MaskPere <- Peres != 0
Meres <- (Meres/MaskMere)[!is.na(Meres/MaskMere)]
Peres <- (Peres/MaskPere)[!is.na(Peres/MaskPere)]
temp1 <- match(gen$mother, Meres)
temp2 <- match(gen$father, Peres)
PositionsibshipAndProband <- temp1 * temp2
#La sibship incluant les probands
sibshipSameFaMoAndProband <- gen$ind[(1:length(PositionsibshipAndProband))[!is.na(PositionsibshipAndProband)]]
#maintenant enlevons les probands
temp <- match(sibshipSameFaMoAndProband, individuals)
sibship <- sibshipSameFaMoAndProband[(1:length(temp))[is.na(temp)]]
return(sibship)
}
}
gen.f = function(gen, pro, depthmin= (gen.depth(gen)-1), depthmax= (gen.depth(gen)-1)) #, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
if(missing(pro)) pro = gen.pro(gen)
retour = gen.detectionErreur(gen = gen, pro = pro, depthmin = depthmin, depthmax = depthmax, print.it = FALSE, named = TRUE,
check = c(3, 5, 20, 18, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
depthmin = retour$depthmin
depthmax = retour$depthmax
named = retour$named
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
tmp <- double(length(pro) * ecart)
#Call de la fonction en C
.Call("SPLUSFS", gen@.Data, pro, length(pro), depthmin, depthmax, tmp, FALSE, specialsok = TRUE)
#Construction de la matrice de retour
dim(tmp) <- c(length(pro), ecart)
dimnames(tmp) <- list(pro, NULL)
tmp = drop(tmp)
return(invisible(GLmulti(tmp, depth = as.integer(depthmin:depthmax))))
}
gen.genealogy = function(ped, autoComplete=FALSE, ...)#, check = 1)
{
if(!(is(ped, "GLgen"))) {
if(dim(ped)[2]==4 && sum(colnames(ped)==c("X1","X2","X3","X4"))==4) {
print("No column names given. Assuming <ind>, <father>, <mother> and <sex>")
colnames(ped) <- c("ind", "father", "mother", "sex")
}
if(sum(c("ind","father","mother","sex") %in% colnames(ped)) < 4){
stop(paste(paste(c("ind","father","mother","sex")[grep(FALSE,c("ind","father","mother","sex") %in% colnames(ped))]),
"not in table columns.",collapse=""))
}
if(autoComplete & !all(is.element(ped[ped[,"father"]!=0,"father"], ped[,"ind"]))) {
pereManquant <- unique(ped[grep(FALSE, is.element(ped[,"father"], ped[,"ind"])),"father"])
pereManquant <- pereManquant[-grep("^0$",pereManquant)]
ajout <- matrix(c(pereManquant, rep(0, (2*length(pereManquant))), rep(1,length(pereManquant))), byrow=FALSE, ncol=4)
colnames(ajout) <- colnames(ped)
ped <- rbind(ped, ajout)
}
if(autoComplete & !all(is.element(ped[ped[,"mother"]!=0,"mother"], ped[,"ind"]))) {
mereManquante <- unique(ped[grep(FALSE, is.element(ped[,"mother"], ped[,"ind"])),"mother"])
mereManquante <- mereManquante[-grep("^0$",mereManquante)]
ajout <- matrix(c(mereManquante, rep(0, (2*length(mereManquante))), rep(2,length(mereManquante))), byrow=FALSE, ncol=4)
colnames(ajout) <- colnames(ped)
ped <- rbind(ped, ajout)
}
}
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
retour = gen.detectionErreur(gen = ped, check = 1, ...)
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
tmp2 <- NULL
if(!is.null(gen$sex)) {
tmp <- factor(gen$sex, levels = c("H", "h", 1, "F", "f", 2))
tmp2 <- as(tmp, "integer")
tmp2[tmp2 == 2 | tmp2 == 3] <- 1
tmp2[tmp2 == 4 | tmp2 == 5 | tmp2 == 6] <- 2
}
n <- .Call("SPLUSCALLCreerObjetGenealogie", gen$ind, gen$father, gen$mother, tmp2)
#Creation de l'objet Genealogie
return(new("GLgen", .Data = n, Date = date()))
}
gen.lineages = function(ped, pro = 0, maternal = TRUE, ...)#, check = 1
{
#Creation d'un objet GLgen avec toutes les ascendances
gen = gen.genealogy(ped, ...) #check = check,
#Validation des parametres gen et proband
retour = gen.detectionErreur(gen = gen, pro = pro, check = c(3, 36))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
#Si des sujets ne sont pas forces, par defaut les individuals n'ayant pas d'enfants sont selectionnes
if(sum(pro == 0)) data.ind = gen.pro(gen) else data.ind = pro
#Si c'est des lignees maternelles, les tous les peres sont mis a 0, sinon c'est les meres
if(maternal == TRUE) {
ped$father = rep(0, length(ped$father))
# output = "M"
}
else {
ped$mother = rep(0, length(ped$mother))
# output = "F"
}
#On cree un objet GLgen avec les meres ou les peres a 0
genMouP = gen.genealogy(ped, ...) #, check = check
lig.parent.lst = c(data.ind)
#Pour toutes les depths, on prend les parents a partir des sujets
for(i in 1:gen.depth(gen)) {
data.ind = unlist(gen.parent(genMouP, data.ind))
lig.parent.lst = c(lig.parent.lst, data.ind)
}
#Du resultat, on extrait les individuals de la table d'ascendances qui sont presents
gen = gen.genealogy(ped[(ped$ind %in% lig.parent.lst), ], ...) #, check = check
#Retourne l'objet GLgen de lignees
return(gen)
}
gen.genout = function(gen, sorted = FALSE)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, sorted = sorted, check = c(3, 4))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
sorted = retour$sorted
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
#print(paste(" ? ",gen@.Data[9]))
taille <- gen.noind(gen)
v <- list(ind = integer(taille), father = integer(taille), mother = integer(taille), sex = integer(taille))
#extern "C" void SPLUSOutgen
#(long* genealogie, long* plRetIndividu,long* plRetPere,long* plRetMere,long* mustsort)
#param <- list(Data=gen@.Data, ind=v$ind, father=v$father, mother=v$mother, sex=v$sex, sorted=sorted)
#param = .Call("SPLUSOutgen", param, NAOK = T)
param = .Call("SPLUSOutgen", gen@.Data, v$ind, v$father, v$mother, v$sex, sorted)
v <- list(ind = param$ind, father = param$father, mother = param$mother, sex = param$sex)
#Si le numero du sex (0 ou 1 )des individuals est present, on les change pour "H" ou "F"
#if(v$sex[1] == -1) v <- v[1:3]
#else v[[4]] <- factor(v[[4]], labels = c("H", "F"))
return(invisible(data.frame(v)))
}
gen.implex = function(gen, pro = 0, genNo = -1, type = "MEAN", onlyNewAnc = FALSE, ...)#, check = 1 named = T,
{
#Validations des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour <- gen.detectionErreur(gen = gen, pro = pro, genNo = genNo, typecomp = type, named = TRUE, check = c(1, 5, 16, 17, 10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen <- retour$gen
pro <- retour$pro
genNo <- retour$genNo
named <- retour$named
type <- retour$typecomp
#}
#Les ancetres se repetent sur plusieurs generations
#Si on veut les ancetres distincts par generation nouveaux ou pas la fonctionnalite utilisee sera differente
if(onlyNewAnc == FALSE) fctApp <- GLPriv.implex3V else fctApp <- gen.implex3V
#Les ancetres ne sont comptes qu'a leur 1ere apparition
#Selon le type du calcul
#Calcule de l'implex par sujet
if(type == "IND" | type == "MEAN") {
tableau = sapply(pro, function(x, gen, genNo, fctApp, named)
{
fctApp(gen$ind, gen$father, gen$mother, pro = x, genNo = genNo, named = named)
}
, gen = gen, genNo = genNo, fctApp = fctApp, named = named)
if(is.null(dim(tableau))) tableau <- t(as.matrix(tableau))
#Selon le resultat, on applique au tableau une operation de moyenne ou pas
if(type == "MEAN") tableau = data.frame(apply(tableau, 1, mean))
#if(named == T)
#dimnames(tableau)[[2]] <- "implex"
names(tableau) <- "implex"
if(type == "IND") dimnames(tableau)[[2]] <- as.character(paste("Ind", as.character(pro)))
dimnames(tableau)[[1]] <- as.character(genNo)
return(data.frame(tableau))
}
else if(type == "ALL")
return(fctApp(gen$ind, gen$father, gen$mother, pro = pro, genNo = genNo, named = named))
}
gen.implexVar = function(gen, pro = 0, onlyNewAnc = FALSE, genNo = -1, ...)# check = 1,named = T,
{
#Validation des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# if(bfacteurCorr == T)
# if(sum(N) == 0)
# stop("Correction factor must have a numerical population size value N")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
retour = gen.detectionErreur(gen = gen, pro = pro, genNo = genNo, named = TRUE, check = c(1, 5, 16, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
genNo = retour$genNo
named = retour$named
#Si on veut les ancetres distincts par generation nouveaux ou pas la fonctionnalite utilisee sera differente
if(onlyNewAnc == FALSE) fctApp <- GLPriv.implex3V else fctApp <- gen.implex3V
#Selon le type de donnees, le facteur de correction sera modifie en consequence
# if(typeCorpus == "ECH") facteurCorr = length(pro)/(length(pro) - 1) else if(typeCorpus == "POP")
# facteurCorr = 1
# if(bfacteurCorr == T)
# facteurCorr = (facteurCorr * (N - length(pro)))/N
facteurCorr = 1
tableau = sapply(pro, function(x, gen, fctApp, genNo, named) {
fctApp(gen$ind, gen$father, gen$mother, pro = x, genNo = genNo, named = named)
}, gen = gen, fctApp = fctApp, genNo = genNo, named = named)
if(is.null(dim(tableau))) tableau <- t(as.matrix(tableau))
tableau = data.frame(apply(tableau, 1, var) * facteurCorr)
dimnames(tableau)[[1]] <- as.character(genNo)
dimnames(tableau)[[2]] <- "implex.var"
return(tableau)
}
#gen.max = function(gen, individuals, named = T, check = 1)
#{
# #On appel la fonction qui permet d'avoir
# #le numero de generation de tout les individuals
# dfData.numgen = gen.generation(gen, as.integer(individuals))
# dfResult = as.data.frame(as.numeric(names(dfData.numgen))) #named.index.rowcol( dfData.numgen, "numeric")
# dfResult[, 2] = dfData.numgen
# dimnames(dfResult)[[2]] <- c("ind", "numgen")
# return(dfResult)
#}
gen.max = function(gen, individuals)#, check = 1) #, ancestors=0)named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
retour = gen.detectionErreur(gen = gen, individuals = individuals, ancestors = 0, named = TRUE, check = c(3, 13, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
named = retour$named
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
nPro = length(individuals)
ret = integer(nPro)
#extern "C" void SPLUSnumeroGen(long* Genealogie, long* lpProband, NProband, retour)
.Call("SPLUSnumeroGen", gen@.Data, as.integer(individuals), nPro, ret)
#if(named)
names(ret) <- individuals
return(ret)
}
gen.min = function(gen, individuals)#, check = 1) #, ancestors=0)named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, ancestors = 0, named = TRUE, check = c(3,13,10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
named = retour$named
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
nPro = length(individuals)
ret = integer(nPro)
#extern "C" void SPLUSnumeroGen(long* Genealogie, long* lpProband, NProband, retour)
.Call("SPLUSnumGenMin", gen@.Data, as.integer(individuals), nPro, ret)
#if(named)
names(ret) <- individuals
return(ret)
}
gen.mean = function(gen, individuals)#, check = 1) #, ancestors=0)named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, ancestors = 0, named = TRUE, check = c(3,13,10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
named = retour$named
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
nPro = length(individuals)
ret = double(nPro)
#extern "C" void SPLUSnumeroGen(long* Genealogie, long* lpProband, NProband, retour)
.Call("SPLUSnumGenMoy", gen@.Data, as.integer(individuals), nPro, ret)
#if(named)
names(ret) <- individuals
return(ret)
}
gen.nochildren = function(gen, individuals)#, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, named = TRUE, check = c(3, 13, 10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
named = retour$named
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
ret <- integer(length(individuals))
#extern "C" void SPLUSChild(long* Genealogie, long* plProband,long* lNProband, long* retour)
.Call("SPLUSChild", gen@.Data, individuals, length(individuals), ret, specialsok = TRUE)
#if(named)
names(ret) <- individuals
return(ret)
}
gen.nowomen = function(gen)#, check = 1)
{
#if(length(check) != 1)stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, check = 3)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
#}
if(gen@.Data[12] == -1) return(NA)
return(gen@.Data[9] - gen@.Data[12])
}
gen.nomen = function(gen)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, check = 3)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
#}
if(gen@.Data[12] == -1) return(NA)
return(gen@.Data[12])
}
gen.noind = function(gen)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, check = c(3))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
#}
return(gen@.Data[9])
}
gen.occ = function(gen, pro = 0, ancestors = 0, typeOcc = "IND", ...) # check = 1,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen, pro = pro, ancestors = ancestors, check = c(1, 5, 11), ...)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
ancestors = retour$ancestors
#}
#Les probands sont consideres individuellement
#Les probands sont divises en groupe
if(is(pro, "GLgroup")) {
occurences <- matrix(0, nrow = length(ancestors), ncol = length(pro))
for(i in 1:length(pro))
occurences[, i] <- GLPrivOcc(gen, pro = pro[[i]], ancestors = ancestors)
dimnames(occurences) <- list(ancestors, names(pro))
return(occurences)
}
else {
occurences <- matrix(0, nrow = length(ancestors), ncol = length(pro))
for(i in 1:length(pro))
occurences[, i] <- GLPrivOcc(gen, pro = pro[i], ancestors = ancestors)
dimnames(occurences) <- list(ancestors, pro)
if(typeOcc == "IND")
return(occurences)
else if(typeOcc == "TOTAL") {
dfResult.occtot = data.sum(as.data.frame(occurences))
dimnames(dfResult.occtot)[[1]] <- dimnames(occurences)[[1]]
dimnames(dfResult.occtot)[[2]] <- c("nb.occ")
return(dfResult.occtot)
}
else
print("Please choose between \"IND\" and \"TOTAL\" for the variable typeOcc.")
}
}
gen.parent = function(gen, individuals, output = "FaMo", ...)#, check = 1
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, output = output, check = c(1, 13, 15), ...)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
output = retour$output
#}
PositionProband = match(individuals, gen$ind)
Meres <- gen$mother[PositionProband]
Peres <- gen$father[PositionProband]
Meres <- Meres[!is.na(Meres)]
Peres <- Peres[!is.na(Peres)]
Meres <- unique(Meres)
Peres <- unique(Peres)
if(output == "FaMo")
return(list(Fathers=Peres[Peres > 0], Mothers=Meres[Meres > 0]))
else if(output == "Fa")
return(Peres[Peres > 0])
else if(output == "Mo")
return(Meres[Meres > 0])
}
#gen.phi = function(gen, pro = 0, nbgenerations = 0, print.it = F, named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# #if(check == 1) {
# retour = gen.detectionErreur(gen = gen, pro = pro, nbgenerations = nbgenerations, print.it = print.it, named =
# named, check = c(3, 5, 19, 18, 10))
# if(retour$erreur == T)
# stop(retour$messageErreur)
# if(length(retour$pro) < 2)
# stop("Invalid 'pro' parameter: must be a numerical vector of at least 2 proband")
# #stop("Param\350tre 'prop' invalide: doit \352tre un vecteur num\351rique de 2 proposants minimum")
# gen = retour$gen
# pro = retour$pro
# nbgenerations = retour$nbgenerations
# print.it = retour$print.it
# named = retour$named
# #}
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# tmp <- double(length(pro) * length(pro))
# #extern "C" void SPLUSPhiMatrix(long* Genealogie,long* proband, long *NProband,long *Niveau,double* pdRetour, long *printit)
# #Call de la fonction en C
# .Call("SPLUSPhiMatrix", gen@.Data, pro, length(pro), as.integer(nbgenerations), tmp, print.it, specialsok = T)
# dim(tmp) <- c(length(pro), length(pro))
# #if(named)
# dimnames(tmp) <- list(pro, pro)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), nbgenerations)
# header.txt <- paste("*** Calls : gen.phi (", base[1], ",", base[2], ",", base[3], ") ***")
# cat(header.txt, "\n")
# }
# return(invisible(tmp))
#}
gen.phiOver = function(phiMatrix, threshold)
{
if(!is.matrix(phiMatrix))
return("erreur on doit avoir une matrice")
n = dim(phiMatrix)[1]
phiMatrix[phiMatrix >= 0.5] = 0
phiMatrix[lower.tri(phiMatrix)] = 0
ind = dimnames(phiMatrix)[[1]]
indices = matrix(rep(1:n, each = n), n, n)
ran = indices[phiMatrix >= threshold]
col = t(indices)[phiMatrix >= threshold]
if(is.null(ind))
ind = 1:n
else ind = as.numeric(ind)
data.frame(line = ran, column = col, pro1 = ind[ran], pro2 = ind[col], kinship = phiMatrix[phiMatrix >= threshold])
}
gen.phiMean = function(phiMatrix)#, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(matricephi = phiMatrix, named = TRUE, check = c(28, 10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
phiMatrix = retour$matricephi
named = retour$named
#}
#Test pour accelerer la procedure
if("matrix" %in% class(phiMatrix))
mean(phiMatrix[phiMatrix < 0.5])
else
GLapplyPhi(phiMatrix, function(x) mean(x[x < 0.5]), named = named)
}
#gen.phiMT = function(gen, pro = 0, nbgenerations = 0, print.it = F, named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# #if(check == 1) {
# retour = gen.detectionErreur(gen = gen, pro = pro, nbgenerations = nbgenerations, print.it = print.it, named = named, check = c(3, 5, 19, 18, 10))
# if(retour$erreur == T)
# return(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# nbgenerations = retour$nbgenerations
# print.it = retour$print.it
# named = retour$named
# #}
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# tmp <- double(length(pro) * length(pro))
# #extern "C" void SPLUSPhiMatrixMT(long* Genealogie,long* proband,long *NProband,long *Niveau,double* pdRetour, long *printit)
# .Call("SPLUSPhiMatrixMT", gen@.Data, pro, length(pro), as.integer(nbgenerations), tmp, print.it, specialsok = T)
# dim(tmp) <- c(length(pro), length(pro))
# #if(named)
# dimnames(tmp) <- list(pro, pro)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), nbgenerations)
# header.txt <- paste("*** Calls : gen.phiMT (", base[1], ",", base[2], ",", base[3], ") ***")
# cat(header.txt, "\n")
# }
# return(invisible(tmp))
#}
gen.phi = function(gen, pro, depthmin = (gen.depth(gen)-1), depthmax = (gen.depth(gen)-1), MT = FALSE)#, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
if(missing(pro)) pro = gen.pro(gen)
if( depthmin<0 | depthmin>(gen.depth(gen)-1) | depthmax<0 | depthmax>(gen.depth(gen)-1) )
stop("depthmin and depthmax must be between 0 and (gen.depth(gen)-1)")
retour = gen.detectionErreur( gen=gen, pro=pro, depthmin=depthmin, depthmax=depthmax, print.it=FALSE, named=TRUE, check=c(3,5,20,18,10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
depthmin = retour$depthmin
depthmax = retour$depthmax
# print.it = retour$print.it
named = retour$named
#a faire un peu plus tard
if(MT) {
ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
np <- length(pro)
npp <- length(pro) * length(pro)
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
rmatrix <- double(ecart * npp)
moyenne <- double(ecart)
.Call("SPLUSPhisMT", gen@.Data, pro, length(pro), as.integer(depthmin), as.integer(depthmax), moyenne, rmatrix, FALSE, specialsok=TRUE)
}
else {
# depthmaxtmp = depthmax
# depthmintmp = depthmin
liste = list()
j = 1
for(i in depthmin:depthmax) {
depthmintmp = i
depthmaxtmp = i
ecart <- as.integer(depthmaxtmp) - as.integer(depthmintmp) + 1
np <- length(pro)
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
npp <- length(pro) * length(pro)
rmatrix <- double(ecart * npp)
moyenne <- double(ecart)
print.it=FALSE
.Call("SPLUSPhis", gen@.Data, pro, length(pro), depthmintmp, depthmaxtmp, moyenne, rmatrix, print.it, specialsok = TRUE)
dim(rmatrix) <- c(np, np, ecart)
dimnames(rmatrix) <- list(pro, pro, NULL)
rmatrix <- drop(rmatrix)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), depthmintmp, depthmaxtmp)
# header.txt <- paste("*** Calls : gen.phis (", base[1], ",", base[2], ",", base[3], ",", base[4], ") ***")
# cat(header.txt, "\n")
# }
liste[[j]] = rmatrix
j = j + 1
}
sortie.lst = c()
for(i in 1:length(liste)) sortie.lst = c(sortie.lst, liste[[i]])
ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
np <- length(pro)
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
npp <- length(pro) * length(pro)
rmatrix <- double(ecart * npp)
rmatrix <- sortie.lst
}
dim(rmatrix) <- c(np, np, ecart)
dimnames(rmatrix) <- list(pro, pro, NULL)
rmatrix <- drop(rmatrix)
return(invisible(GLmulti(rmatrix, depth = as.integer(depthmin:depthmax))))
}
# print.it = F,
#gen.phis = function(gen, depthmin, depthmax, pro, named = T, check = 1)
#{
# if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# if(missing(pro)) pro = gen.pro(gen)
#
# retour = gen.detectionErreur(gen=gen,pro=pro,depthmin=depthmin,depthmax=depthmax,print.it=FALSE,named=named,check=c(3,5,20,18,10))
# if(retour$erreur == T) stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# depthmin = retour$depthmin
# depthmax = retour$depthmax
## print.it = retour$print.it
# named = retour$named
#
# #a faire un peu plus tard
# depthmaxtmp = depthmax
# depthmintmp = depthmin
# liste = list()
# j = 1
# for(i in depthmintmp:depthmaxtmp) {
# depthmin = i
# depthmax = i
# ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
# np <- length(pro)
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# npp <- length(pro) * length(pro)
# rmatrix <- double(ecart * npp)
# moyenne <- double(ecart)
# .Call("SPLUSPhis", gen@.Data, pro, length(pro), depthmin, depthmax, moyenne, rmatrix, print.it, specialsok = T)
# dim(rmatrix) <- c(np, np, ecart)
# dimnames(rmatrix) <- list(pro, pro, NULL)
# rmatrix <- drop(rmatrix)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), depthmin, depthmax)
# header.txt <- paste("*** Calls : gen.phis (", base[1], ",", base[2], ",", base[3], ",", base[4], ") ***")
# cat(header.txt, "\n")
# }
# liste[[j]] = rmatrix
# j = j + 1
# }
# sortie.lst = c()
# for(i in 1:length(liste))
# sortie.lst = c(sortie.lst, liste[[i]])
# ecart <- as.integer(depthmaxtmp) - as.integer(depthmintmp) + 1
# np <- length(pro)
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# npp <- length(pro) * length(pro)
# rmatrix <- double(ecart * npp)
# rmatrix <- sortie.lst
# dim(rmatrix) <- c(np, np, ecart)
# #if(named)
# dimnames(rmatrix) <- list(pro, pro, NULL)
# rmatrix <- drop(rmatrix)
# return(invisible(GLmulti(rmatrix, depth = as.integer(depthmintmp:depthmaxtmp))))
#}
#gen.phisMT = function(gen, depthmin, depthmax, pro, print.it = F, named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# if(missing(pro))
# pro = gen.pro(gen)
# #if(check == 1) {
# retour = gen.detectionErreur(gen = gen, pro = pro, depthmin = depthmin, depthmax = depthmax, print.it = print.it,
# named = named, check = c(3, 5, 20, 18, 10))
# if(retour$erreur == T)
# stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# depthmin = retour$depthmin
# depthmax = retour$depthmax
# print.it = retour$print.it
# named = retour$named
# #}
# #a faire un peu plus tard
# ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
# np <- length(pro)
# npp <- length(pro) * length(pro)
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# rmatrix <- double(ecart * npp)
# moyenne <- double(ecart)
# #extern "C" void SPLUSPhis(long* Genealogie,long* proband, long *NProband,long *NiveauMin,long *NiveauMax,double* pdRetour, double *MatrixArray, long *printit)
# .Call("SPLUSPhisMT", gen@.Data, pro, length(pro), as.integer(depthmin), as.integer(depthmax), moyenne, rmatrix, print.it, specialsok = T)
# dim(rmatrix) <- c(np, np, ecart)
# #if(named)
# dimnames(rmatrix) <- list(pro, pro, NULL)
# rmatrix <- drop(rmatrix)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), depthmin, depthmax)
# header.txt <- paste("*** Calls : gen.phis (", base[1], ",", base[2], ",", base[3], ",", base[4], ") ***")
# cat(header.txt, "\n")
# }
# return(invisible(GLmulti(rmatrix, depth = as.integer(depthmin:depthmax))))
#}
gen.depth = function(gen)
{
return(depth(gen))
}
gen.pro = function(gen, ...) #, check = 1
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#print("genPro : 1e verifications faites")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, check = 1, ...)
#return(1)
if(retour$erreur)
return(retour$messageErreur)
gen = retour$gen
#print(paste("genPro",retour$erreur))
#}
#print(paste("gen.pro post",length(gen$ind)))
#print(paste("gen.pro post",length(gen$father)))
#print(paste("gen.pro post",length(gen$mother)))
#print(paste("gen.pro post",length(gen$sex)))
return(sort(gen$ind[is.na(match(gen$ind, c(gen$father, gen$mother)))]))
}
gen.rec = function(gen, pro = 0, ancestors = 0, ...) #, check = 1
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, pro = pro, ancestors = ancestors, check = c(1, 5, 11), ...)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = gen.genealogy(retour$gen)#, check = 0)
pro = retour$pro
ancestors = retour$ancestors
#}
if(is(pro, "GLgroup")) {
nombreAncetre <- length(ancestors)
nombreGroupe <- length(pro)
rec <- matrix(0, nrow = nombreAncetre, ncol = nombreGroupe)
for(i in 1:nombreGroupe) {
contr <- t(gen.gc(gen, pro[[i]], ancestors))
rec[, i] <- (contr > 0) %*% rep(1, dim(contr)[2])
}
dimnames(rec) <- list(ancestors, names(pro))
return(rec)
}
else {
contr <- t(gen.gc(gen, pro, ancestors))
recouv <- (contr > 0) %*% rep(1, dim(contr)[2])
return(recouv)
}
}
gen.meangendepthVar = function(gen, pro = 0, type = "MEAN", ...)#, check = 1, named = T,
{
#Validation des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
retour <- gen.detectionErreur(gen = gen, pro = pro, typecomp = type, check = c(1, 5, 17))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen <- retour$gen
pro <- retour$pro
type <- retour$typecomp
if(type == "IND") {# | type == "MOYSUJETS") {
tableau <- sapply(pro, function(x, gen, pro, genNo, T)
GLPriv.variance3V(gen$ind, gen$father, gen$mother, pro = x), gen = gen, pro = pro)
tableau <- data.frame(tableau)
# if(type == "MOYSUJETS") {
# tableau <- data.frame(apply(tableau, 2, mean))
# dimnames(tableau)[[1]] <- "Mean.Exp.Gen.Depth"
# }
if(type == "IND")
dimnames(tableau)[[1]] <- as.character(paste("Ind", as.character(pro)))
dimnames(tableau)[[2]] <- "Mean.Gen.Depth"
return(tableau)
}
else if(type == "MEAN")
return(GLPriv.variance3V(gen$ind, gen$father, gen$mother, pro = pro))
}
#gen.entropyVar2 = function(gen, pro = 0, typeCorpus = "ECH", bfacteurCorr = F, N = NULL, check = 1, ...)
#{
# #Validation des parametres
# if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# if(bfacteurCorr == T)
# if(sum(N) == 0)
# stop("Correction factor must have a numerical population size value N")
# #stop("Le facteur de correction doit avoir une valeur num\351rique N taille de la population")
# if(is(gen, "vector"))
# if(length(list(...)) != 2)
# stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
# #stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
# #if(check == 1) {
# retour = gen.detectionErreur(gen = gen, pro = pro, check = c(1, 5))
# if(retour$erreur == T)
# stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# #}
# tableau = sapply(pro, function(x, gen)
# GLPriv.variance3V(gen$ind, gen$father, gen$mother, pro = x), gen = gen)
# if(typeCorpus == "ECH")
# facteurCorr = length(pro)/(length(pro) - 1)
# else if(typeCorpus == "POP")
# facteurCorr = 1
# if(bfacteurCorr == T)
# facteurCorr = (facteurCorr * (N - length(pro)))/N
# tableau = data.frame(tableau)
# tableau = data.frame(apply(tableau, 2, var) * facteurCorr)
# dimnames(tableau)[[1]] <- "Prof.varEntropie.var"
# dimnames(tableau)[[2]] <- "Prof.varEntropie.var"
# return(tableau)
#}
| /R/fonctionsBase.R | no_license | cran/GENLIB | R | false | false | 54,134 | r | # 1 - gen.gc -> garde article
# 2 - gen.gcplus -> garde article
# 3 - gen.completeness -> garde article
# 4 - gen.completenessVar -> garde article
# 5 - gen.branching -> garde article
# 6 - gen.children -> garde article
# 7 - gen.meangendepth -> garde article
# 8 - gen.entropyMeanVar -> garde article
# 9 - gen.f -> garde article
# 11 - gen.fmean -> garde article
# 12 - gen.founder -> garde article
# 13 - gen.half.founder -> garde article
# 14 - gen.sibship -> garde article
# 16 - gen.genealogy -> garde article
# 17 - gen.lineages -> garde article
# 17-1 gen.genout -> garde article
# 19 - gen.implex -> garde article
# 20 - gen.implexVar -> garde article
# 21 - gen.max -> garde article
# 23 - gen.min -> garde article
# 24 - gen.mean -> garde article
# 25 - gen.nochildren -> garde article
# 26 - gen.nowomen -> garde article
# 27 - gen.nomen -> garde article
# 28 - gen.noind -> garde article
# 32 - gen.occ -> garde article
# 33 - gen.parent -> garde article
# 34 - gen.phi -> garde article
# 35 - gen.phiOver -> garde article
# 37 - gen.phiMean -> garde article
# 41 - gen.depth -> garde article
# 42 - gen.pro -> garde article
# 43 - gen.rec -> garde article
# 44 - gen.meangendepthVar -> garde article
#gen.gc = function(gen, pro = 0, ancestors = 0, typeCG = "IND", named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
#
# retour = gen.detectionErreur(gen = gen, pro = pro, ancestors = ancestors, print.it = print.it, named = named, typeCG = typeCG,
# check = c(3, 5, 11, 34, 18, 10))
# if(retour$erreur == T) stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# ancestors = retour$ancestors
# typeCG = retour$typeCG
## print.it = retour$print.it
# named = retour$named
#
# if(typeCG == "IND") {
# if(is(pro, "GLgroup")) {
# CG = GLPrivCG(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, print.it = FALSE, named = named)
# return(GLPrivCGgroup(CG, grppro = pro))
# }
# else return(GLPrivCG(gen = gen, pro = pro, ancestors = ancestors, print.it = FALSE, named = named))
# }
# else {
# if(is(pro, "GLgroup")) {
# CG = GLPrivCG(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, print.it = FALSE, named = named)
# CG = GLPrivCGgroup(CG, grppro = pro)
# if(typeCG == "MEAN")
# return(GLPrivCGmoyen(CG = CG, named = named))
# if(typeCG == "CUMUL")
# stop("CUMUL is not available per group")
# if(typeCG == "TOTAL")
# return(GLPrivCGtotal(CG = CG, named = named))
# if(typeCG == "PRODUCT")
# return(GLPrivCGproduit(CG = CG, named = named))
# }
# else {
# CG = GLPrivCG(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, print.it = FALSE, named =
# named)
# if(typeCG == "MEAN")
# return(GLPrivCGmoyen(CG = CG, named = named))
# if(typeCG == "CUMUL")
# return(GLPrivCGcumul(CG = CG, named = named))
# if(typeCG == "TOTAL")
# return(GLPrivCGtotal(CG = CG, named = named))
# if(typeCG == "PRODUCT")
# return(GLPrivCGproduit(CG = CG, named = named))
# }
# }
#}
gen.gc = function(gen, pro = 0, ancestors = 0, vctProb = c(0.5, 0.5, 0.5, 0.5), typeCG = "IND") #, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
retour = gen.detectionErreur(gen = gen, pro = pro, ancestors = ancestors, print.it = FALSE, named = TRUE, typeCG = typeCG, check = c(3, 5, 11, 34, 18, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
ancestors = retour$ancestors
typeCG = retour$typeCG
# print.it = retour$print.it
named = retour$named
if(typeCG == "IND") {
if(is(pro, "GLgroup")) {
CG = GLPrivCGPLUS(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, vctProb = vctProb, print.it = FALSE, named = named)
return(GLPrivCGgroup(CG, grppro = pro))
}
else return(GLPrivCGPLUS(gen = gen, pro = pro, ancestors = ancestors, vctProb, print.it = FALSE, named = named))
}
else {
if(is(pro, "GLgroup")) {
CG = GLPrivCGPLUS(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, vctProb = vctProb, print.it = FALSE, named = named)
CG = GLPrivCGgroup(CG, grppro = pro)
if(typeCG == "MEAN")
return(GLPrivCGmoyen(CG = CG, named = named))
if(typeCG == "CUMUL")
stop("CUMUL is not available per group")
if(typeCG == "TOTAL")
return(GLPrivCGtotal(CG = CG, named = named))
if(typeCG == "PRODUCT")
return(GLPrivCGproduit(CG = CG, named = named))
}
else {
CG = GLPrivCGPLUS(gen = gen, pro = as.numeric(unlist(pro)), ancestors = ancestors, vctProb = vctProb, print.it = FALSE, named = named)
if(typeCG == "MEAN")
return(GLPrivCGmoyen(CG = CG, named = named))
if(typeCG == "CUMUL")
return(GLPrivCGcumul(CG = CG, named = named))
if(typeCG == "TOTAL")
return(GLPrivCGtotal(CG = CG, named = named))
if(typeCG == "PRODUCT")
return(GLPrivCGproduit(CG = CG, named = named))
}
}
}
gen.completeness = function(gen, pro = 0, genNo = -1, type = "MEAN", ...)#, check = 1)#named = T,
{
#Validation des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
if( type != "IND" )# | type != "MOYSUJETS"
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#if(check == 1) {
retour <- gen.detectionErreur(gen = gen, pro = pro, genNo = genNo, typecomp = type, named = TRUE, check = c(1, 5, 16, 171, 10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen <- retour$gen
pro <- retour$pro
genNo <- retour$genNo
type <- retour$typecomp
named <- retour$named
#}
#Calcule de la completude par sujet
if(type == "IND"){ # | type == "MOYSUJETS") {
tableau = sapply(pro, function(x, gen, genNo, named)
{
GLPriv.completeness3V(gen$ind, gen$father, gen$mother, pro = x, genNo = genNo, named = named)
}
, gen = gen, genNo = genNo, named = named)
if(is.null(dim(tableau))) tableau <- t(as.matrix(tableau))
# if(type == "MOYSUJETS")
# tableau <- data.frame(apply(tableau, 1, mean))
#Fait la moyenne
#if(named == T)
if(type == "IND")
dimnames(tableau)[[2]] <- as.character(paste("Ind", as.character(pro)))
dimnames(tableau)[[1]] <- as.character(genNo)
#Rajout du numero de generation en lignes
return(data.frame(tableau))
}
else if(type == "MEAN") {
#Si c'est MEAN, calcul de la completude avec tous les sujets a la fois
return(GLPriv.completeness3V(gen$ind, gen$father, gen$mother, pro = pro, genNo = genNo, named = named))
}
}
gen.completenessVar = function(gen, pro = 0, genNo = -1, ...) #, check = 1, ...)#named = T,
{
#Validations des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# if(bcorrFactor == T)
# if(sum(N) == 0) #Le facteur de correction doit avoir une valeur numerique N taille de la population
# stop("Correction factor must have a numerical population size value N")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
retour = gen.detectionErreur(gen = gen, pro = pro, genNo = genNo, named = TRUE, check = c(1, 5, 16, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
genNo = retour$genNo
named = retour$named
#Selon le type de donnees, le facteur de correction sera modifie en consequence
# if(typeCorpus == "ECH") corrFactor = length(pro)/(length(pro) - 1) else if(typeCorpus == "POP")
# corrFactor = 1
# if(corrFactor == T)
# corrFactor = (corrFactor * (N - length(pro)))/N
corrFactor = 1
#Calcule la variance de l'indice de completude
tab = sapply(pro, function(x, gen, genNo, named)
GLPriv.completeness3V(gen$ind, gen$father, gen$mother, pro = x, genNo = genNo, named = named),
gen = gen, genNo = genNo, named = named)
if(is.null(dim(tab))) tab <- t(as.matrix(tab))
tab = data.frame(apply(tab, 1, var) * corrFactor)
dimnames(tab)[[1]] <- as.character(genNo)
dimnames(tab)[[2]] <- "completeness.var"
return(tab)
}
gen.branching = function(gen, pro = 0, ancestors = gen.founder(gen), bflag = 0)#, check = 1)
{
if(sum(as.numeric(pro)) == 0)
pro = gen.pro(gen)
if(bflag == 0) {
pro.segment = gen.pro(gen)
ancestors = gen.founder(gen.branching(gen, pro.segment, ancestors, bflag = 1))
}
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, pro = pro, ancestors = ancestors, check = c(3, 36, 37))
if(retour$erreur)
stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
ancestors = retour$ancestors
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
#print(paste("taille alloue:",length(gen@.Data)))
tmpgen <- integer(length(gen@.Data))
tmpNelem <- integer(1)
#print(".C(SPLUSebranche,.. commence")
.Call("SPLUSebranche", gen@.Data, pro, length(pro), ancestors, length(ancestors), tmpgen, tmpNelem, specialsok = T)
#print(".C(SPLUSebranche,.. fait:")
#print(paste(length(tmpgen),tmpgen[1],tmpgen[2],tmpgen[3] ))
#print(paste(length(gen@.Data),gen@.Data[1],gen@.Data[2],gen@.Data[3]))
length(tmpgen) <- tmpNelem
tmpNelem <- length(tmpgen)
#print(length(tmpgen))
ebranche = new("GLgen", .Data = tmpgen, Date = date())
#print("1")
ebranche.asc = gen.genout(ebranche)
sexeAbsent=FALSE
if(length(setdiff(unique(ebranche.asc[,"sex"]), c(1,2,"H","F")))>0)
{
diff = setdiff(unique(ebranche.asc[,"sex"]), c(1,2,"H","F"))
ebranche.asc=data.frame(ind=ebranche.asc$ind,father=ebranche.asc$father,mother=ebranche.asc$mother) #*****
sexeAbsent=TRUE
#warning(paste("la colonne \"sexe\" contient des valeurs non valide:",diff,"\n Elle ne sera pas consideree pour le reste des calculs."))
warning(paste("The \"sex\" column contains invalid values:",diff,
"\nThe column won't be considered for further calculations."))
}
#print("2")
#print(ebranche.asc[1,])
pro.ebranche = gen.pro(ebranche)
#print("3")
pro.enTrop = setdiff(pro.ebranche, pro)
#print(paste(length(pro.ebranche),length(pro)))
#print(pro.enTrop)
if(sum(as.numeric(pro.enTrop)) != 0) {
ebranche.asc = ebranche.asc[(!(ebranche.asc$ind %in% pro.enTrop)), ]
#ebranche.asc=data.frame(ind=ebranche.asc$ind,father=ebranche.asc$father,mother=ebranche.asc$mother) #*****
ebranche = gen.genealogy(ebranche.asc)
#print(ebranche.asc)
pro.ebranche = gen.pro(ebranche)
}
#print("4")
fond.ebranche = gen.founder(ebranche)
#print("5")
pro.quiSontFond = pro.ebranche[pro.ebranche %in% fond.ebranche]
#print(paste("6", dim(ebranche.asc)))
ebranche.asc = ebranche.asc[(!(ebranche.asc$ind %in% pro.quiSontFond)), ]
#print(paste("7", dim(ebranche.asc)))
#ebranche.asc=data.frame(ind=ebranche.asc$ind,father=ebranche.asc$father,mother=ebranche.asc$mother)#*****
if(dim(ebranche.asc)[1]==0) stop("No branching possible, all probands are founders.")
else gen = gen.genealogy(ebranche.asc)
#print("8")
gen.validationAsc(gen)
#print("9")
return(gen)
}
gen.children = function(gen, individuals, ...)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, check = c(1, 13), ...)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
#}
PositionEnfantDesMeres <- match(gen$mother, individuals)
PositionEnfantDesPeres <- match(gen$father, individuals)
EnfantDesMere <- gen$ind[(1:length(PositionEnfantDesMeres))[!is.na(PositionEnfantDesMeres)]]
EnfantDesPere <- gen$ind[(1:length(PositionEnfantDesPeres))[!is.na(PositionEnfantDesPeres)]]
Enfants <- unique(c(EnfantDesMere, EnfantDesPere))
return(Enfants)
}
gen.meangendepth = function(gen, pro = 0, type = "MEAN", ...)#, check = 1)#named = T,
{
#Validations des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour <- gen.detectionErreur(gen = gen, pro = pro, typecomp = type, check = c(1, 5, 17))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen <- retour$gen
pro <- retour$pro
type <- retour$typecomp
#}
if(type == "IND") {# | type == "MOYSUJETS") {
tableau <- sapply(pro, function(x, gen)
GLPriv.entropie3V(gen$ind, gen$father, gen$mother, pro = x), gen = gen)
tableau <- data.frame(tableau)
# if(type == "MOYSUJETS") {
# tableau = data.frame(apply(tableau, 2, mean))
# dimnames(tableau)[[1]] <- "Mean.Exp.Gen.Depth"
# }
#if(named == T)
if(type == "IND")
dimnames(tableau)[[1]] <- as.character(paste("Ind", as.character(pro)))
dimnames(tableau)[[2]] <- "Exp.Gen.Depth"
return(tableau)
}
else if(type == "MEAN")
return(GLPriv.entropie3V(gen$ind, gen$father, gen$mother, pro = pro))
}
#gen.entropyMeanVar = function(gen, pro = 0, check = 1, ...) #typeCorpus = "ECH", bfacteurCorr = F, N = NULL,
#{
# #Validations des parametres
# if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
## if(bfacteurCorr == T)
# if(sum(N) == 0)
## stop("Correction factor must have a numerical population size value N")
# if(is(gen, "vector"))
# if(length(list(...)) != 2)
# stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#
# retour = gen.detectionErreur(gen = gen, pro = pro, check = c(1, 5))
# if(retour$erreur == T) stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
#
# tableau = sapply(pro, function(x, gen)
# GLPriv.entropie3V(gen$ind, gen$father, gen$mother, pro = x), gen = gen)
## if(typeCorpus == "ECH")
## facteurCorr = length(pro)/(length(pro) - 1)
## else if(typeCorpus == "POP")
## facteurCorr = 1
## if(bfacteurCorr == T)
## facteurCorr = (facteurCorr * (N - length(pro)))/N
#
# facteurCorr = 1
# tableau = data.frame(tableau)
# tableau = data.frame(apply(tableau, 2, var) * facteurCorr)
# dimnames(tableau)[[1]] <- "Mean.Exp.Gen.Depth.Var"
# dimnames(tableau)[[2]] <- "Exp.Gen.Depth"
# return(tableau)
#}
#gen.f = function(gen, pro = 0, nbgenerations = 0, named = T, check = 1)
#{
# if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# retour = gen.detectionErreur(gen = gen, pro = pro, nbgenerations = nbgenerations, print.it = FALSE, named = named, check = c(3, 5, 19, 18, 10))
# if(retour$erreur == T) stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# nbgenerations = retour$nbgenerations
## print.it = retour$print.it
# named = retour$named
#
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# tmp <- double(length(pro))
#
# #Call de la fonction en C
# .Call("SPLUSF", gen@.Data, pro, length(pro), nbgenerations, tmp, FALSE, specialsok = T)
# names(tmp) <- pro
## if(print.it) {
## base <- c(deparse(substitute(gen)), deparse(substitute(pro)), nbgenerations)
## header.txt <- paste("\n\t*** Calls : gen.F (", base[1], ",", base[2], ",", base[3], ") ***\n\n")
## cat(header.txt)
## }
# return(invisible(tmp))
#}
#gen.fmean = function(vectF, named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# #if(check == 1) {
# retour = gen.detectionErreur(vectF = vectF, named = named, check = c(33, 10))
# if(retour$erreur == T)
# stop(retour$messageErreur)
# vectF = retour$vectF
# named = retour$named
# #}
# #Test pour accelerer la procedure
# return(GLapplyF(vectF, mean, named = named))
#}
gen.founder = function(gen, ...)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, ..., check = 1)
if(retour$erreur == TRUE)
return(retour$messageErreur)
gen = retour$gen
#}
return(gen$ind[gen$father == 0 & gen$mother == 0])
}
gen.half.founder = function(gen, ...)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, ..., check = 1)
if(retour$erreur == TRUE)
return(retour$messageErreur)
gen = retour$gen
#}
return(gen$ind[(gen$father != 0 & gen$mother == 0) | (gen$father == 0 & gen$mother != 0)])
}
gen.sibship = function(gen, individuals, halfSibling = TRUE, ...)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, halfSibling = halfSibling, check = c(1, 13, 14), ...)
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
halfSibling = retour$halfSibling
#}
if(halfSibling == TRUE) {
PositionProband = match(individuals, gen$ind)
#Trouve les meres et les peres des probands
Meres <- gen$mother[PositionProband]
Peres <- gen$father[PositionProband]
MaskMere <- Meres != 0
MaskPere <- Peres != 0
Meres <- (Meres/MaskMere)[!is.na(Meres/MaskMere)]
Peres <- (Peres/MaskPere)[!is.na(Peres/MaskPere)]
#Trouve tous les enfants de ces individuals
sibshipMo <- gen.children(gen, individuals = Meres)#, check = 0)
sibshipFa <- gen.children(gen, individuals = Peres)#, check = 0)
#Vecteur contenant tous les enfants incluant les probands
sibshipAndProband <- unique(c(sibshipMo, sibshipFa))
#maintenant on enleve les probands
temp <- match(sibshipAndProband, individuals)
sibship <- sibshipAndProband[(1:length(temp))[is.na(temp)]]
return(sibship)
}
else {
PositionProband = match(individuals, gen$ind)
#Trouve les meres et les peres des probands
Meres <- gen$mother[PositionProband]
Peres <- gen$father[PositionProband]
MaskMere <- Meres != 0
MaskPere <- Peres != 0
Meres <- (Meres/MaskMere)[!is.na(Meres/MaskMere)]
Peres <- (Peres/MaskPere)[!is.na(Peres/MaskPere)]
temp1 <- match(gen$mother, Meres)
temp2 <- match(gen$father, Peres)
PositionsibshipAndProband <- temp1 * temp2
#La sibship incluant les probands
sibshipSameFaMoAndProband <- gen$ind[(1:length(PositionsibshipAndProband))[!is.na(PositionsibshipAndProband)]]
#maintenant enlevons les probands
temp <- match(sibshipSameFaMoAndProband, individuals)
sibship <- sibshipSameFaMoAndProband[(1:length(temp))[is.na(temp)]]
return(sibship)
}
}
gen.f = function(gen, pro, depthmin= (gen.depth(gen)-1), depthmax= (gen.depth(gen)-1)) #, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
if(missing(pro)) pro = gen.pro(gen)
retour = gen.detectionErreur(gen = gen, pro = pro, depthmin = depthmin, depthmax = depthmax, print.it = FALSE, named = TRUE,
check = c(3, 5, 20, 18, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
depthmin = retour$depthmin
depthmax = retour$depthmax
named = retour$named
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
tmp <- double(length(pro) * ecart)
#Call de la fonction en C
.Call("SPLUSFS", gen@.Data, pro, length(pro), depthmin, depthmax, tmp, FALSE, specialsok = TRUE)
#Construction de la matrice de retour
dim(tmp) <- c(length(pro), ecart)
dimnames(tmp) <- list(pro, NULL)
tmp = drop(tmp)
return(invisible(GLmulti(tmp, depth = as.integer(depthmin:depthmax))))
}
gen.genealogy = function(ped, autoComplete=FALSE, ...)#, check = 1)
{
if(!(is(ped, "GLgen"))) {
if(dim(ped)[2]==4 && sum(colnames(ped)==c("X1","X2","X3","X4"))==4) {
print("No column names given. Assuming <ind>, <father>, <mother> and <sex>")
colnames(ped) <- c("ind", "father", "mother", "sex")
}
if(sum(c("ind","father","mother","sex") %in% colnames(ped)) < 4){
stop(paste(paste(c("ind","father","mother","sex")[grep(FALSE,c("ind","father","mother","sex") %in% colnames(ped))]),
"not in table columns.",collapse=""))
}
if(autoComplete & !all(is.element(ped[ped[,"father"]!=0,"father"], ped[,"ind"]))) {
pereManquant <- unique(ped[grep(FALSE, is.element(ped[,"father"], ped[,"ind"])),"father"])
pereManquant <- pereManquant[-grep("^0$",pereManquant)]
ajout <- matrix(c(pereManquant, rep(0, (2*length(pereManquant))), rep(1,length(pereManquant))), byrow=FALSE, ncol=4)
colnames(ajout) <- colnames(ped)
ped <- rbind(ped, ajout)
}
if(autoComplete & !all(is.element(ped[ped[,"mother"]!=0,"mother"], ped[,"ind"]))) {
mereManquante <- unique(ped[grep(FALSE, is.element(ped[,"mother"], ped[,"ind"])),"mother"])
mereManquante <- mereManquante[-grep("^0$",mereManquante)]
ajout <- matrix(c(mereManquante, rep(0, (2*length(mereManquante))), rep(2,length(mereManquante))), byrow=FALSE, ncol=4)
colnames(ajout) <- colnames(ped)
ped <- rbind(ped, ajout)
}
}
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
retour = gen.detectionErreur(gen = ped, check = 1, ...)
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
tmp2 <- NULL
if(!is.null(gen$sex)) {
tmp <- factor(gen$sex, levels = c("H", "h", 1, "F", "f", 2))
tmp2 <- as(tmp, "integer")
tmp2[tmp2 == 2 | tmp2 == 3] <- 1
tmp2[tmp2 == 4 | tmp2 == 5 | tmp2 == 6] <- 2
}
n <- .Call("SPLUSCALLCreerObjetGenealogie", gen$ind, gen$father, gen$mother, tmp2)
#Creation de l'objet Genealogie
return(new("GLgen", .Data = n, Date = date()))
}
gen.lineages = function(ped, pro = 0, maternal = TRUE, ...)#, check = 1
{
#Creation d'un objet GLgen avec toutes les ascendances
gen = gen.genealogy(ped, ...) #check = check,
#Validation des parametres gen et proband
retour = gen.detectionErreur(gen = gen, pro = pro, check = c(3, 36))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
#Si des sujets ne sont pas forces, par defaut les individuals n'ayant pas d'enfants sont selectionnes
if(sum(pro == 0)) data.ind = gen.pro(gen) else data.ind = pro
#Si c'est des lignees maternelles, les tous les peres sont mis a 0, sinon c'est les meres
if(maternal == TRUE) {
ped$father = rep(0, length(ped$father))
# output = "M"
}
else {
ped$mother = rep(0, length(ped$mother))
# output = "F"
}
#On cree un objet GLgen avec les meres ou les peres a 0
genMouP = gen.genealogy(ped, ...) #, check = check
lig.parent.lst = c(data.ind)
#Pour toutes les depths, on prend les parents a partir des sujets
for(i in 1:gen.depth(gen)) {
data.ind = unlist(gen.parent(genMouP, data.ind))
lig.parent.lst = c(lig.parent.lst, data.ind)
}
#Du resultat, on extrait les individuals de la table d'ascendances qui sont presents
gen = gen.genealogy(ped[(ped$ind %in% lig.parent.lst), ], ...) #, check = check
#Retourne l'objet GLgen de lignees
return(gen)
}
gen.genout = function(gen, sorted = FALSE)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, sorted = sorted, check = c(3, 4))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
sorted = retour$sorted
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
#print(paste(" ? ",gen@.Data[9]))
taille <- gen.noind(gen)
v <- list(ind = integer(taille), father = integer(taille), mother = integer(taille), sex = integer(taille))
#extern "C" void SPLUSOutgen
#(long* genealogie, long* plRetIndividu,long* plRetPere,long* plRetMere,long* mustsort)
#param <- list(Data=gen@.Data, ind=v$ind, father=v$father, mother=v$mother, sex=v$sex, sorted=sorted)
#param = .Call("SPLUSOutgen", param, NAOK = T)
param = .Call("SPLUSOutgen", gen@.Data, v$ind, v$father, v$mother, v$sex, sorted)
v <- list(ind = param$ind, father = param$father, mother = param$mother, sex = param$sex)
#Si le numero du sex (0 ou 1 )des individuals est present, on les change pour "H" ou "F"
#if(v$sex[1] == -1) v <- v[1:3]
#else v[[4]] <- factor(v[[4]], labels = c("H", "F"))
return(invisible(data.frame(v)))
}
gen.implex = function(gen, pro = 0, genNo = -1, type = "MEAN", onlyNewAnc = FALSE, ...)#, check = 1 named = T,
{
#Validations des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour <- gen.detectionErreur(gen = gen, pro = pro, genNo = genNo, typecomp = type, named = TRUE, check = c(1, 5, 16, 17, 10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen <- retour$gen
pro <- retour$pro
genNo <- retour$genNo
named <- retour$named
type <- retour$typecomp
#}
#Les ancetres se repetent sur plusieurs generations
#Si on veut les ancetres distincts par generation nouveaux ou pas la fonctionnalite utilisee sera differente
if(onlyNewAnc == FALSE) fctApp <- GLPriv.implex3V else fctApp <- gen.implex3V
#Les ancetres ne sont comptes qu'a leur 1ere apparition
#Selon le type du calcul
#Calcule de l'implex par sujet
if(type == "IND" | type == "MEAN") {
tableau = sapply(pro, function(x, gen, genNo, fctApp, named)
{
fctApp(gen$ind, gen$father, gen$mother, pro = x, genNo = genNo, named = named)
}
, gen = gen, genNo = genNo, fctApp = fctApp, named = named)
if(is.null(dim(tableau))) tableau <- t(as.matrix(tableau))
#Selon le resultat, on applique au tableau une operation de moyenne ou pas
if(type == "MEAN") tableau = data.frame(apply(tableau, 1, mean))
#if(named == T)
#dimnames(tableau)[[2]] <- "implex"
names(tableau) <- "implex"
if(type == "IND") dimnames(tableau)[[2]] <- as.character(paste("Ind", as.character(pro)))
dimnames(tableau)[[1]] <- as.character(genNo)
return(data.frame(tableau))
}
else if(type == "ALL")
return(fctApp(gen$ind, gen$father, gen$mother, pro = pro, genNo = genNo, named = named))
}
gen.implexVar = function(gen, pro = 0, onlyNewAnc = FALSE, genNo = -1, ...)# check = 1,named = T,
{
#Validation des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# if(bfacteurCorr == T)
# if(sum(N) == 0)
# stop("Correction factor must have a numerical population size value N")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
retour = gen.detectionErreur(gen = gen, pro = pro, genNo = genNo, named = TRUE, check = c(1, 5, 16, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
genNo = retour$genNo
named = retour$named
#Si on veut les ancetres distincts par generation nouveaux ou pas la fonctionnalite utilisee sera differente
if(onlyNewAnc == FALSE) fctApp <- GLPriv.implex3V else fctApp <- gen.implex3V
#Selon le type de donnees, le facteur de correction sera modifie en consequence
# if(typeCorpus == "ECH") facteurCorr = length(pro)/(length(pro) - 1) else if(typeCorpus == "POP")
# facteurCorr = 1
# if(bfacteurCorr == T)
# facteurCorr = (facteurCorr * (N - length(pro)))/N
facteurCorr = 1
tableau = sapply(pro, function(x, gen, fctApp, genNo, named) {
fctApp(gen$ind, gen$father, gen$mother, pro = x, genNo = genNo, named = named)
}, gen = gen, fctApp = fctApp, genNo = genNo, named = named)
if(is.null(dim(tableau))) tableau <- t(as.matrix(tableau))
tableau = data.frame(apply(tableau, 1, var) * facteurCorr)
dimnames(tableau)[[1]] <- as.character(genNo)
dimnames(tableau)[[2]] <- "implex.var"
return(tableau)
}
#gen.max = function(gen, individuals, named = T, check = 1)
#{
# #On appel la fonction qui permet d'avoir
# #le numero de generation de tout les individuals
# dfData.numgen = gen.generation(gen, as.integer(individuals))
# dfResult = as.data.frame(as.numeric(names(dfData.numgen))) #named.index.rowcol( dfData.numgen, "numeric")
# dfResult[, 2] = dfData.numgen
# dimnames(dfResult)[[2]] <- c("ind", "numgen")
# return(dfResult)
#}
gen.max = function(gen, individuals)#, check = 1) #, ancestors=0)named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
retour = gen.detectionErreur(gen = gen, individuals = individuals, ancestors = 0, named = TRUE, check = c(3, 13, 10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
named = retour$named
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
nPro = length(individuals)
ret = integer(nPro)
#extern "C" void SPLUSnumeroGen(long* Genealogie, long* lpProband, NProband, retour)
.Call("SPLUSnumeroGen", gen@.Data, as.integer(individuals), nPro, ret)
#if(named)
names(ret) <- individuals
return(ret)
}
gen.min = function(gen, individuals)#, check = 1) #, ancestors=0)named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, ancestors = 0, named = TRUE, check = c(3,13,10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
named = retour$named
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
nPro = length(individuals)
ret = integer(nPro)
#extern "C" void SPLUSnumeroGen(long* Genealogie, long* lpProband, NProband, retour)
.Call("SPLUSnumGenMin", gen@.Data, as.integer(individuals), nPro, ret)
#if(named)
names(ret) <- individuals
return(ret)
}
gen.mean = function(gen, individuals)#, check = 1) #, ancestors=0)named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, ancestors = 0, named = TRUE, check = c(3,13,10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
named = retour$named
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
nPro = length(individuals)
ret = double(nPro)
#extern "C" void SPLUSnumeroGen(long* Genealogie, long* lpProband, NProband, retour)
.Call("SPLUSnumGenMoy", gen@.Data, as.integer(individuals), nPro, ret)
#if(named)
names(ret) <- individuals
return(ret)
}
gen.nochildren = function(gen, individuals)#, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, named = TRUE, check = c(3, 13, 10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
named = retour$named
#}
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
ret <- integer(length(individuals))
#extern "C" void SPLUSChild(long* Genealogie, long* plProband,long* lNProband, long* retour)
.Call("SPLUSChild", gen@.Data, individuals, length(individuals), ret, specialsok = TRUE)
#if(named)
names(ret) <- individuals
return(ret)
}
gen.nowomen = function(gen)#, check = 1)
{
#if(length(check) != 1)stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, check = 3)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
#}
if(gen@.Data[12] == -1) return(NA)
return(gen@.Data[9] - gen@.Data[12])
}
gen.nomen = function(gen)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, check = 3)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
#}
if(gen@.Data[12] == -1) return(NA)
return(gen@.Data[12])
}
gen.noind = function(gen)#, check = 1)
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, check = c(3))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
#}
return(gen@.Data[9])
}
gen.occ = function(gen, pro = 0, ancestors = 0, typeOcc = "IND", ...) # check = 1,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen, pro = pro, ancestors = ancestors, check = c(1, 5, 11), ...)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
ancestors = retour$ancestors
#}
#Les probands sont consideres individuellement
#Les probands sont divises en groupe
if(is(pro, "GLgroup")) {
occurences <- matrix(0, nrow = length(ancestors), ncol = length(pro))
for(i in 1:length(pro))
occurences[, i] <- GLPrivOcc(gen, pro = pro[[i]], ancestors = ancestors)
dimnames(occurences) <- list(ancestors, names(pro))
return(occurences)
}
else {
occurences <- matrix(0, nrow = length(ancestors), ncol = length(pro))
for(i in 1:length(pro))
occurences[, i] <- GLPrivOcc(gen, pro = pro[i], ancestors = ancestors)
dimnames(occurences) <- list(ancestors, pro)
if(typeOcc == "IND")
return(occurences)
else if(typeOcc == "TOTAL") {
dfResult.occtot = data.sum(as.data.frame(occurences))
dimnames(dfResult.occtot)[[1]] <- dimnames(occurences)[[1]]
dimnames(dfResult.occtot)[[2]] <- c("nb.occ")
return(dfResult.occtot)
}
else
print("Please choose between \"IND\" and \"TOTAL\" for the variable typeOcc.")
}
}
gen.parent = function(gen, individuals, output = "FaMo", ...)#, check = 1
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, individuals = individuals, output = output, check = c(1, 13, 15), ...)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = retour$gen
individuals = retour$individuals
output = retour$output
#}
PositionProband = match(individuals, gen$ind)
Meres <- gen$mother[PositionProband]
Peres <- gen$father[PositionProband]
Meres <- Meres[!is.na(Meres)]
Peres <- Peres[!is.na(Peres)]
Meres <- unique(Meres)
Peres <- unique(Peres)
if(output == "FaMo")
return(list(Fathers=Peres[Peres > 0], Mothers=Meres[Meres > 0]))
else if(output == "Fa")
return(Peres[Peres > 0])
else if(output == "Mo")
return(Meres[Meres > 0])
}
#gen.phi = function(gen, pro = 0, nbgenerations = 0, print.it = F, named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# #if(check == 1) {
# retour = gen.detectionErreur(gen = gen, pro = pro, nbgenerations = nbgenerations, print.it = print.it, named =
# named, check = c(3, 5, 19, 18, 10))
# if(retour$erreur == T)
# stop(retour$messageErreur)
# if(length(retour$pro) < 2)
# stop("Invalid 'pro' parameter: must be a numerical vector of at least 2 proband")
# #stop("Param\350tre 'prop' invalide: doit \352tre un vecteur num\351rique de 2 proposants minimum")
# gen = retour$gen
# pro = retour$pro
# nbgenerations = retour$nbgenerations
# print.it = retour$print.it
# named = retour$named
# #}
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# tmp <- double(length(pro) * length(pro))
# #extern "C" void SPLUSPhiMatrix(long* Genealogie,long* proband, long *NProband,long *Niveau,double* pdRetour, long *printit)
# #Call de la fonction en C
# .Call("SPLUSPhiMatrix", gen@.Data, pro, length(pro), as.integer(nbgenerations), tmp, print.it, specialsok = T)
# dim(tmp) <- c(length(pro), length(pro))
# #if(named)
# dimnames(tmp) <- list(pro, pro)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), nbgenerations)
# header.txt <- paste("*** Calls : gen.phi (", base[1], ",", base[2], ",", base[3], ") ***")
# cat(header.txt, "\n")
# }
# return(invisible(tmp))
#}
gen.phiOver = function(phiMatrix, threshold)
{
if(!is.matrix(phiMatrix))
return("erreur on doit avoir une matrice")
n = dim(phiMatrix)[1]
phiMatrix[phiMatrix >= 0.5] = 0
phiMatrix[lower.tri(phiMatrix)] = 0
ind = dimnames(phiMatrix)[[1]]
indices = matrix(rep(1:n, each = n), n, n)
ran = indices[phiMatrix >= threshold]
col = t(indices)[phiMatrix >= threshold]
if(is.null(ind))
ind = 1:n
else ind = as.numeric(ind)
data.frame(line = ran, column = col, pro1 = ind[ran], pro2 = ind[col], kinship = phiMatrix[phiMatrix >= threshold])
}
gen.phiMean = function(phiMatrix)#, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
#if(check == 1) {
retour = gen.detectionErreur(matricephi = phiMatrix, named = TRUE, check = c(28, 10))
if(retour$erreur == TRUE)
stop(retour$messageErreur)
phiMatrix = retour$matricephi
named = retour$named
#}
#Test pour accelerer la procedure
if("matrix" %in% class(phiMatrix))
mean(phiMatrix[phiMatrix < 0.5])
else
GLapplyPhi(phiMatrix, function(x) mean(x[x < 0.5]), named = named)
}
#gen.phiMT = function(gen, pro = 0, nbgenerations = 0, print.it = F, named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# #if(check == 1) {
# retour = gen.detectionErreur(gen = gen, pro = pro, nbgenerations = nbgenerations, print.it = print.it, named = named, check = c(3, 5, 19, 18, 10))
# if(retour$erreur == T)
# return(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# nbgenerations = retour$nbgenerations
# print.it = retour$print.it
# named = retour$named
# #}
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# tmp <- double(length(pro) * length(pro))
# #extern "C" void SPLUSPhiMatrixMT(long* Genealogie,long* proband,long *NProband,long *Niveau,double* pdRetour, long *printit)
# .Call("SPLUSPhiMatrixMT", gen@.Data, pro, length(pro), as.integer(nbgenerations), tmp, print.it, specialsok = T)
# dim(tmp) <- c(length(pro), length(pro))
# #if(named)
# dimnames(tmp) <- list(pro, pro)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), nbgenerations)
# header.txt <- paste("*** Calls : gen.phiMT (", base[1], ",", base[2], ",", base[3], ") ***")
# cat(header.txt, "\n")
# }
# return(invisible(tmp))
#}
gen.phi = function(gen, pro, depthmin = (gen.depth(gen)-1), depthmax = (gen.depth(gen)-1), MT = FALSE)#, check = 1)#named = T,
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
if(missing(pro)) pro = gen.pro(gen)
if( depthmin<0 | depthmin>(gen.depth(gen)-1) | depthmax<0 | depthmax>(gen.depth(gen)-1) )
stop("depthmin and depthmax must be between 0 and (gen.depth(gen)-1)")
retour = gen.detectionErreur( gen=gen, pro=pro, depthmin=depthmin, depthmax=depthmax, print.it=FALSE, named=TRUE, check=c(3,5,20,18,10))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen = retour$gen
pro = retour$pro
depthmin = retour$depthmin
depthmax = retour$depthmax
# print.it = retour$print.it
named = retour$named
#a faire un peu plus tard
if(MT) {
ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
np <- length(pro)
npp <- length(pro) * length(pro)
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
rmatrix <- double(ecart * npp)
moyenne <- double(ecart)
.Call("SPLUSPhisMT", gen@.Data, pro, length(pro), as.integer(depthmin), as.integer(depthmax), moyenne, rmatrix, FALSE, specialsok=TRUE)
}
else {
# depthmaxtmp = depthmax
# depthmintmp = depthmin
liste = list()
j = 1
for(i in depthmin:depthmax) {
depthmintmp = i
depthmaxtmp = i
ecart <- as.integer(depthmaxtmp) - as.integer(depthmintmp) + 1
np <- length(pro)
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
npp <- length(pro) * length(pro)
rmatrix <- double(ecart * npp)
moyenne <- double(ecart)
print.it=FALSE
.Call("SPLUSPhis", gen@.Data, pro, length(pro), depthmintmp, depthmaxtmp, moyenne, rmatrix, print.it, specialsok = TRUE)
dim(rmatrix) <- c(np, np, ecart)
dimnames(rmatrix) <- list(pro, pro, NULL)
rmatrix <- drop(rmatrix)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), depthmintmp, depthmaxtmp)
# header.txt <- paste("*** Calls : gen.phis (", base[1], ",", base[2], ",", base[3], ",", base[4], ") ***")
# cat(header.txt, "\n")
# }
liste[[j]] = rmatrix
j = j + 1
}
sortie.lst = c()
for(i in 1:length(liste)) sortie.lst = c(sortie.lst, liste[[i]])
ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
np <- length(pro)
#Structure necessaire pour emmagasiner le resultat la fonction de la dll
npp <- length(pro) * length(pro)
rmatrix <- double(ecart * npp)
rmatrix <- sortie.lst
}
dim(rmatrix) <- c(np, np, ecart)
dimnames(rmatrix) <- list(pro, pro, NULL)
rmatrix <- drop(rmatrix)
return(invisible(GLmulti(rmatrix, depth = as.integer(depthmin:depthmax))))
}
# print.it = F,
#gen.phis = function(gen, depthmin, depthmax, pro, named = T, check = 1)
#{
# if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# if(missing(pro)) pro = gen.pro(gen)
#
# retour = gen.detectionErreur(gen=gen,pro=pro,depthmin=depthmin,depthmax=depthmax,print.it=FALSE,named=named,check=c(3,5,20,18,10))
# if(retour$erreur == T) stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# depthmin = retour$depthmin
# depthmax = retour$depthmax
## print.it = retour$print.it
# named = retour$named
#
# #a faire un peu plus tard
# depthmaxtmp = depthmax
# depthmintmp = depthmin
# liste = list()
# j = 1
# for(i in depthmintmp:depthmaxtmp) {
# depthmin = i
# depthmax = i
# ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
# np <- length(pro)
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# npp <- length(pro) * length(pro)
# rmatrix <- double(ecart * npp)
# moyenne <- double(ecart)
# .Call("SPLUSPhis", gen@.Data, pro, length(pro), depthmin, depthmax, moyenne, rmatrix, print.it, specialsok = T)
# dim(rmatrix) <- c(np, np, ecart)
# dimnames(rmatrix) <- list(pro, pro, NULL)
# rmatrix <- drop(rmatrix)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), depthmin, depthmax)
# header.txt <- paste("*** Calls : gen.phis (", base[1], ",", base[2], ",", base[3], ",", base[4], ") ***")
# cat(header.txt, "\n")
# }
# liste[[j]] = rmatrix
# j = j + 1
# }
# sortie.lst = c()
# for(i in 1:length(liste))
# sortie.lst = c(sortie.lst, liste[[i]])
# ecart <- as.integer(depthmaxtmp) - as.integer(depthmintmp) + 1
# np <- length(pro)
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# npp <- length(pro) * length(pro)
# rmatrix <- double(ecart * npp)
# rmatrix <- sortie.lst
# dim(rmatrix) <- c(np, np, ecart)
# #if(named)
# dimnames(rmatrix) <- list(pro, pro, NULL)
# rmatrix <- drop(rmatrix)
# return(invisible(GLmulti(rmatrix, depth = as.integer(depthmintmp:depthmaxtmp))))
#}
#gen.phisMT = function(gen, depthmin, depthmax, pro, print.it = F, named = T, check = 1)
#{
# if(length(check) != 1)
# stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# if(missing(pro))
# pro = gen.pro(gen)
# #if(check == 1) {
# retour = gen.detectionErreur(gen = gen, pro = pro, depthmin = depthmin, depthmax = depthmax, print.it = print.it,
# named = named, check = c(3, 5, 20, 18, 10))
# if(retour$erreur == T)
# stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# depthmin = retour$depthmin
# depthmax = retour$depthmax
# print.it = retour$print.it
# named = retour$named
# #}
# #a faire un peu plus tard
# ecart <- as.integer(depthmax) - as.integer(depthmin) + 1
# np <- length(pro)
# npp <- length(pro) * length(pro)
# #Structure necessaire pour emmagasiner le resultat la fonction de la dll
# rmatrix <- double(ecart * npp)
# moyenne <- double(ecart)
# #extern "C" void SPLUSPhis(long* Genealogie,long* proband, long *NProband,long *NiveauMin,long *NiveauMax,double* pdRetour, double *MatrixArray, long *printit)
# .Call("SPLUSPhisMT", gen@.Data, pro, length(pro), as.integer(depthmin), as.integer(depthmax), moyenne, rmatrix, print.it, specialsok = T)
# dim(rmatrix) <- c(np, np, ecart)
# #if(named)
# dimnames(rmatrix) <- list(pro, pro, NULL)
# rmatrix <- drop(rmatrix)
# if(print.it) {
# base <- c(deparse(substitute(gen)), deparse(substitute(pro)), depthmin, depthmax)
# header.txt <- paste("*** Calls : gen.phis (", base[1], ",", base[2], ",", base[3], ",", base[4], ") ***")
# cat(header.txt, "\n")
# }
# return(invisible(GLmulti(rmatrix, depth = as.integer(depthmin:depthmax))))
#}
gen.depth = function(gen)
{
return(depth(gen))
}
gen.pro = function(gen, ...) #, check = 1
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#print("genPro : 1e verifications faites")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, check = 1, ...)
#return(1)
if(retour$erreur)
return(retour$messageErreur)
gen = retour$gen
#print(paste("genPro",retour$erreur))
#}
#print(paste("gen.pro post",length(gen$ind)))
#print(paste("gen.pro post",length(gen$father)))
#print(paste("gen.pro post",length(gen$mother)))
#print(paste("gen.pro post",length(gen$sex)))
return(sort(gen$ind[is.na(match(gen$ind, c(gen$father, gen$mother)))]))
}
gen.rec = function(gen, pro = 0, ancestors = 0, ...) #, check = 1
{
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
#stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
#stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
#if(check == 1) {
retour = gen.detectionErreur(gen = gen, pro = pro, ancestors = ancestors, check = c(1, 5, 11), ...)
if(retour$erreur == TRUE)
stop(retour$messageErreur)
gen = gen.genealogy(retour$gen)#, check = 0)
pro = retour$pro
ancestors = retour$ancestors
#}
if(is(pro, "GLgroup")) {
nombreAncetre <- length(ancestors)
nombreGroupe <- length(pro)
rec <- matrix(0, nrow = nombreAncetre, ncol = nombreGroupe)
for(i in 1:nombreGroupe) {
contr <- t(gen.gc(gen, pro[[i]], ancestors))
rec[, i] <- (contr > 0) %*% rep(1, dim(contr)[2])
}
dimnames(rec) <- list(ancestors, names(pro))
return(rec)
}
else {
contr <- t(gen.gc(gen, pro, ancestors))
recouv <- (contr > 0) %*% rep(1, dim(contr)[2])
return(recouv)
}
}
gen.meangendepthVar = function(gen, pro = 0, type = "MEAN", ...)#, check = 1, named = T,
{
#Validation des parametres
#if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
if(is(gen, "vector"))
if(length(list(...)) != 2)
stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
retour <- gen.detectionErreur(gen = gen, pro = pro, typecomp = type, check = c(1, 5, 17))
if(retour$erreur == TRUE) stop(retour$messageErreur)
gen <- retour$gen
pro <- retour$pro
type <- retour$typecomp
if(type == "IND") {# | type == "MOYSUJETS") {
tableau <- sapply(pro, function(x, gen, pro, genNo, T)
GLPriv.variance3V(gen$ind, gen$father, gen$mother, pro = x), gen = gen, pro = pro)
tableau <- data.frame(tableau)
# if(type == "MOYSUJETS") {
# tableau <- data.frame(apply(tableau, 2, mean))
# dimnames(tableau)[[1]] <- "Mean.Exp.Gen.Depth"
# }
if(type == "IND")
dimnames(tableau)[[1]] <- as.character(paste("Ind", as.character(pro)))
dimnames(tableau)[[2]] <- "Mean.Gen.Depth"
return(tableau)
}
else if(type == "MEAN")
return(GLPriv.variance3V(gen$ind, gen$father, gen$mother, pro = pro))
}
#gen.entropyVar2 = function(gen, pro = 0, typeCorpus = "ECH", bfacteurCorr = F, N = NULL, check = 1, ...)
#{
# #Validation des parametres
# if(length(check) != 1) stop("Invalid 'check' parameter: choices are 0 or 1")
# #stop("Param\350tre 'check' invalide: les choix disponibles sont 0 et 1")
# if(bfacteurCorr == T)
# if(sum(N) == 0)
# stop("Correction factor must have a numerical population size value N")
# #stop("Le facteur de correction doit avoir une valeur num\351rique N taille de la population")
# if(is(gen, "vector"))
# if(length(list(...)) != 2)
# stop("Invalid '...' parameter : 'father' and 'mother' parameter names are obligatory")
# #stop("Param\350tre '...' invalide : indication du nom des param\350tres 'pere' et 'mere' est obligatoire")
# #if(check == 1) {
# retour = gen.detectionErreur(gen = gen, pro = pro, check = c(1, 5))
# if(retour$erreur == T)
# stop(retour$messageErreur)
# gen = retour$gen
# pro = retour$pro
# #}
# tableau = sapply(pro, function(x, gen)
# GLPriv.variance3V(gen$ind, gen$father, gen$mother, pro = x), gen = gen)
# if(typeCorpus == "ECH")
# facteurCorr = length(pro)/(length(pro) - 1)
# else if(typeCorpus == "POP")
# facteurCorr = 1
# if(bfacteurCorr == T)
# facteurCorr = (facteurCorr * (N - length(pro)))/N
# tableau = data.frame(tableau)
# tableau = data.frame(apply(tableau, 2, var) * facteurCorr)
# dimnames(tableau)[[1]] <- "Prof.varEntropie.var"
# dimnames(tableau)[[2]] <- "Prof.varEntropie.var"
# return(tableau)
#}
|
library(seriation)
### Name: permute
### Title: Permute the Order in Various Objects
### Aliases: permute permute.dist permute.numeric permute.list
### permute.matrix permute.array permute.data.frame permute.hclust
### permute.dendrogram
### Keywords: manip
### ** Examples
## permute matrix
m <- matrix(rnorm(10), 5, 2, dimnames = list(1:5, 1:2))
m
## permute rows and columns
permute(m, ser_permutation(5:1, 2:1))
## permute only columns
permute(m, ser_permutation(NA, 2:1))
## permute objects in a dist object
d <- dist(m)
d
permute(d, ser_permutation(c(3,2,1,4,5)))
## permute a list
l <- list(a=1:5, b=letters[1:3], c=0)
l
permute(l, c(2,3,1))
## permute a dendrogram
hc <- hclust(d)
plot(hc)
plot(permute(hc, 5:1))
| /data/genthat_extracted_code/seriation/examples/permute.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 746 | r | library(seriation)
### Name: permute
### Title: Permute the Order in Various Objects
### Aliases: permute permute.dist permute.numeric permute.list
### permute.matrix permute.array permute.data.frame permute.hclust
### permute.dendrogram
### Keywords: manip
### ** Examples
## permute matrix
m <- matrix(rnorm(10), 5, 2, dimnames = list(1:5, 1:2))
m
## permute rows and columns
permute(m, ser_permutation(5:1, 2:1))
## permute only columns
permute(m, ser_permutation(NA, 2:1))
## permute objects in a dist object
d <- dist(m)
d
permute(d, ser_permutation(c(3,2,1,4,5)))
## permute a list
l <- list(a=1:5, b=letters[1:3], c=0)
l
permute(l, c(2,3,1))
## permute a dendrogram
hc <- hclust(d)
plot(hc)
plot(permute(hc, 5:1))
|
getwd()
paste(getwd(), "\\R_Fundamentals\\")
paste(getwd(), "/R_Fundamentals/Section5-Homework-Data.csv", sep = "")
stats <- read.csv(getwd(), "\\R_Fundamentals\\Section5-Homework-Data.csv", sep = "")
print(getwd())
stats <- read.csv("E:\\Users\\Shiv\\R\\R_Dev\\R_Udemy\\R_Fundamentals\\DemographicData.csv")
#----------Explore the data----------------
stats
nrow(stats)
ncol(stats)
head(stats)
head(stats, n=10)
tail(stats)
tail(stats, n=9)
str(stats)
summary(stats)
runif(stats)
#---------------$ symbol usage--------------
stats
head(stats)
stats[3,3]
stats[3, "Region"]
stats$Fertility.Rate
stats$Fertility.Rate[3]
stats[,"Fertility.Rate"]
levels(stats$Region)
#---------------Basic Operations with a DF--------------
stats[1:10,] #subsetting
stats[3:9,]
stats[c(4,100),]
#Remember how the [] work:
is.data.frame(stats[1,])
is.data.frame(stats[,1])
is.data.frame(stats[,1,drop=F])
#multiply columns
head(stats)
stats$Year * stats$Fertility.Rate
stats$Year + stats$Fertility.Rate
stats <- read.csv("E:\\Users\\Shiv\\R\\R_Dev\\R_Udemy\\R_Fundamentals\\DemographicData.csv")
#add column
head(stats)
stats$MyCalc <- stats$Birth.rate * stats$Internet.users
stats$xyz <- 1:5
head(stats, n=36)
#remove a column
stats$MyCalc <- NULL
stats$xyz <- NULL
head(stats)
#-----------Filtering Data Frames
head(stats)
filter<-stats$Internet.users < 2
filter #filter gives a vector with true or false depending on the test
stats[filter,] #This line returns only those rows where filter value is true for that row
stats[stats$Birth.rate > 40, ]
stats[stats$Birth.rate > 40 & stats$Internet.users < 2,]
stats[stats$Income.Group == "High income",]
levels(stats$Income.Group)
stats[stats$Country.Name == "Malta",]
#----------Intro to QpLOT
library(ggplot2)
install.packages("ggplot2")
library(ggplot2)
?qplot
??qplot
dev.On
qplot(data=stats, x = Internet.users)
qplot(data=stats, x=Income.Group, y=Birth.rate)
qplot(data=stats, x=Income.Group, y=Birth.rate, size=I(3))
qplot(data=stats, x=Income.Group, y=Birth.rate, size=I(3), color=I("blue"))
qplot(data=stats, x=Income.Group, y=Birth.rate, geom="boxplot")
stats[stats$Income.Group == "Low income" & stats$Birth.rate== min(stats$Birth.rate),]
#-------------Challenge
qplot(data=stats, x=Internet.users)
qplot(data=stats, x=Internet.users, y=Birth.rate)
qplot(data=stats, x=Internet.users, y=Birth.rate, size=I(3))
qplot(data=stats, x=Internet.users, y=Birth.rate, size=I(3), color=I("red"))
qplot(data=stats, x=Internet.users, y=Birth.rate, size=I(3), color=Income.Group)
qplot(data=stats, x=Internet.users, y=Birth.rate, size=Birth.rate, color=Income.Group)
| /R_25_QPlot.R | no_license | ShivKumarBS/R_Udemy | R | false | false | 2,747 | r | getwd()
paste(getwd(), "\\R_Fundamentals\\")
paste(getwd(), "/R_Fundamentals/Section5-Homework-Data.csv", sep = "")
stats <- read.csv(getwd(), "\\R_Fundamentals\\Section5-Homework-Data.csv", sep = "")
print(getwd())
stats <- read.csv("E:\\Users\\Shiv\\R\\R_Dev\\R_Udemy\\R_Fundamentals\\DemographicData.csv")
#----------Explore the data----------------
stats
nrow(stats)
ncol(stats)
head(stats)
head(stats, n=10)
tail(stats)
tail(stats, n=9)
str(stats)
summary(stats)
runif(stats)
#---------------$ symbol usage--------------
stats
head(stats)
stats[3,3]
stats[3, "Region"]
stats$Fertility.Rate
stats$Fertility.Rate[3]
stats[,"Fertility.Rate"]
levels(stats$Region)
#---------------Basic Operations with a DF--------------
stats[1:10,] #subsetting
stats[3:9,]
stats[c(4,100),]
#Remember how the [] work:
is.data.frame(stats[1,])
is.data.frame(stats[,1])
is.data.frame(stats[,1,drop=F])
#multiply columns
head(stats)
stats$Year * stats$Fertility.Rate
stats$Year + stats$Fertility.Rate
stats <- read.csv("E:\\Users\\Shiv\\R\\R_Dev\\R_Udemy\\R_Fundamentals\\DemographicData.csv")
#add column
head(stats)
stats$MyCalc <- stats$Birth.rate * stats$Internet.users
stats$xyz <- 1:5
head(stats, n=36)
#remove a column
stats$MyCalc <- NULL
stats$xyz <- NULL
head(stats)
#-----------Filtering Data Frames
head(stats)
filter<-stats$Internet.users < 2
filter #filter gives a vector with true or false depending on the test
stats[filter,] #This line returns only those rows where filter value is true for that row
stats[stats$Birth.rate > 40, ]
stats[stats$Birth.rate > 40 & stats$Internet.users < 2,]
stats[stats$Income.Group == "High income",]
levels(stats$Income.Group)
stats[stats$Country.Name == "Malta",]
#----------Intro to QpLOT
library(ggplot2)
install.packages("ggplot2")
library(ggplot2)
?qplot
??qplot
dev.On
qplot(data=stats, x = Internet.users)
qplot(data=stats, x=Income.Group, y=Birth.rate)
qplot(data=stats, x=Income.Group, y=Birth.rate, size=I(3))
qplot(data=stats, x=Income.Group, y=Birth.rate, size=I(3), color=I("blue"))
qplot(data=stats, x=Income.Group, y=Birth.rate, geom="boxplot")
stats[stats$Income.Group == "Low income" & stats$Birth.rate== min(stats$Birth.rate),]
#-------------Challenge
qplot(data=stats, x=Internet.users)
qplot(data=stats, x=Internet.users, y=Birth.rate)
qplot(data=stats, x=Internet.users, y=Birth.rate, size=I(3))
qplot(data=stats, x=Internet.users, y=Birth.rate, size=I(3), color=I("red"))
qplot(data=stats, x=Internet.users, y=Birth.rate, size=I(3), color=Income.Group)
qplot(data=stats, x=Internet.users, y=Birth.rate, size=Birth.rate, color=Income.Group)
|
install.packages("RColorBrewer")
install.packages("Rfacebook")
install.packages("httpuv")
install.packages("RCurl")
install.packages("rjson")
install.packages("httr")
#Profile Analytics
library(Rfacebook)
library(httpuv)
library(RColorBrewer)
#"EAACEdEose0cBAPe6eGPv0DLV3yZBdAwU6NG4yjkgZAMPNQgBhE79ZBhwn6bDHhc4ezap7pieqPIg6pTzxMOWLzKDTQQsbQGLaY4VoxFtTXfzMF8MIGSbjZASGQckNw9YpSYMZCf3cCMSu0rxS4yWL1xOA2d3kL8J8ooJFXeMwhWTqsqcPRryXJTMRVnifmw4ZD"
acess_token="EAACEdEose0cBAE8WIwmjQyaW7CGZCpy1iFnZCa95ZBZAWqADUL2IANC0g9FHpVprSdPndsRVPeWTSynt4Lm9yv8y6ZC1DIusiwUAj8bhZCZCxdaE6mueIViExCt4sDq9YvrKVe7yBq5WpBSA15sFrzCx3CtrN7CevoNEApmflVDU3QNpMEB7nuZA4SGF2ZCy6zaYZD"
options(RCurlOptions=list(verbose=FALSE,capath=system.file("CurlSLL","cacert.pem",package = "RCurl"),ssl.verifypeer=FALSE))
me<-getUsers("me",token = acess_token)
myFriends<-getFriends(acess_token,simplify = FALSE)
table(myFriends)
View(myFriends)
pie(table(myFriends$gender))
pie(table(myFriends$birthday))
pie(table(myFriends$location))
pie(table(myFriends$hometown))
pie(table(myFriends$id))
ids<-c(myFriends$id)
print(ids)
print(myFriends$picture)
print(myFriends$likes)
pie(table(myFriends$name))
View(myFriends$likes_count)
head(myFriends,n=10)
#Business Analytics
library(Rfacebook)
#token<-"EAACEdEose0cBADKAFDiDq9CPYgOTB2E5uBpJprxWqlWbudnZB2ULVf1WW8B9qgY9kmS8PgxotLDEO2DUhPdWzuEUC8ujzNK8L3Rj6ATmRAQhZBukx065PTN43BzZB9TCA6KpS2u3x7DjaLibDuE9J825Uh7HKYm2SLOrrZAgDZCAjFQbw5L1aYpRuNJZCLTuNjZAgG0qojiXwZDZD"
#"EAACEdEose0cBAPe6eGPv0DLV3yZBdAwU6NG4yjkgZAMPNQgBhE79ZBhwn6bDHhc4ezap7pieqPIg6pTzxMOWLzKDTQQsbQGLaY4VoxFtTXfzMF8MIGSbjZASGQckNw9YpSYMZCf3cCMSu0rxS4yWL1xOA2d3kL8J8ooJFXeMwhWTqsqcPRryXJTMRVnifmw4ZD"
token<-"EAACEdEose0cBAE8WIwmjQyaW7CGZCpy1iFnZCa95ZBZAWqADUL2IANC0g9FHpVprSdPndsRVPeWTSynt4Lm9yv8y6ZC1DIusiwUAj8bhZCZCxdaE6mueIViExCt4sDq9YvrKVe7yBq5WpBSA15sFrzCx3CtrN7CevoNEApmflVDU3QNpMEB7nuZA4SGF2ZCy6zaYZD"
me<-getUsers("me",token,private_info = T)
me$name
me$hometown
intel<-getPage("Intel",token)
head(intel$likes_count)
head(intel$message)
intel$created_time
pie(table(intel$created_time))
pie(table(intel$comments_count))
pie(table((intel$shares_count)))
View(intel)
comcount<-intel$comments_count
H<-c(comcount)
barplot(H)
datecre<-intel$created_time
M<-c(datecre)
# Plot the bar chart
barplot(H,names.arg=M,xlab="Dates",ylab="CommentsCount",col="blue",
main="ActivityChart",border="red")
my_friends<-getFriends(token)
head(my_friends)
fb_page<-getPage(page = "facebook",token)
post_reaction<-getReactions(post = fb_page$id[1],token,api="v2.8")
post_reaction$likes_count
post_reaction$id
| /facebookmining.R | no_license | SumanthPai/Data-Analytics-R- | R | false | false | 2,592 | r | install.packages("RColorBrewer")
install.packages("Rfacebook")
install.packages("httpuv")
install.packages("RCurl")
install.packages("rjson")
install.packages("httr")
#Profile Analytics
library(Rfacebook)
library(httpuv)
library(RColorBrewer)
#"EAACEdEose0cBAPe6eGPv0DLV3yZBdAwU6NG4yjkgZAMPNQgBhE79ZBhwn6bDHhc4ezap7pieqPIg6pTzxMOWLzKDTQQsbQGLaY4VoxFtTXfzMF8MIGSbjZASGQckNw9YpSYMZCf3cCMSu0rxS4yWL1xOA2d3kL8J8ooJFXeMwhWTqsqcPRryXJTMRVnifmw4ZD"
acess_token="EAACEdEose0cBAE8WIwmjQyaW7CGZCpy1iFnZCa95ZBZAWqADUL2IANC0g9FHpVprSdPndsRVPeWTSynt4Lm9yv8y6ZC1DIusiwUAj8bhZCZCxdaE6mueIViExCt4sDq9YvrKVe7yBq5WpBSA15sFrzCx3CtrN7CevoNEApmflVDU3QNpMEB7nuZA4SGF2ZCy6zaYZD"
options(RCurlOptions=list(verbose=FALSE,capath=system.file("CurlSLL","cacert.pem",package = "RCurl"),ssl.verifypeer=FALSE))
me<-getUsers("me",token = acess_token)
myFriends<-getFriends(acess_token,simplify = FALSE)
table(myFriends)
View(myFriends)
pie(table(myFriends$gender))
pie(table(myFriends$birthday))
pie(table(myFriends$location))
pie(table(myFriends$hometown))
pie(table(myFriends$id))
ids<-c(myFriends$id)
print(ids)
print(myFriends$picture)
print(myFriends$likes)
pie(table(myFriends$name))
View(myFriends$likes_count)
head(myFriends,n=10)
#Business Analytics
library(Rfacebook)
#token<-"EAACEdEose0cBADKAFDiDq9CPYgOTB2E5uBpJprxWqlWbudnZB2ULVf1WW8B9qgY9kmS8PgxotLDEO2DUhPdWzuEUC8ujzNK8L3Rj6ATmRAQhZBukx065PTN43BzZB9TCA6KpS2u3x7DjaLibDuE9J825Uh7HKYm2SLOrrZAgDZCAjFQbw5L1aYpRuNJZCLTuNjZAgG0qojiXwZDZD"
#"EAACEdEose0cBAPe6eGPv0DLV3yZBdAwU6NG4yjkgZAMPNQgBhE79ZBhwn6bDHhc4ezap7pieqPIg6pTzxMOWLzKDTQQsbQGLaY4VoxFtTXfzMF8MIGSbjZASGQckNw9YpSYMZCf3cCMSu0rxS4yWL1xOA2d3kL8J8ooJFXeMwhWTqsqcPRryXJTMRVnifmw4ZD"
token<-"EAACEdEose0cBAE8WIwmjQyaW7CGZCpy1iFnZCa95ZBZAWqADUL2IANC0g9FHpVprSdPndsRVPeWTSynt4Lm9yv8y6ZC1DIusiwUAj8bhZCZCxdaE6mueIViExCt4sDq9YvrKVe7yBq5WpBSA15sFrzCx3CtrN7CevoNEApmflVDU3QNpMEB7nuZA4SGF2ZCy6zaYZD"
me<-getUsers("me",token,private_info = T)
me$name
me$hometown
intel<-getPage("Intel",token)
head(intel$likes_count)
head(intel$message)
intel$created_time
pie(table(intel$created_time))
pie(table(intel$comments_count))
pie(table((intel$shares_count)))
View(intel)
comcount<-intel$comments_count
H<-c(comcount)
barplot(H)
datecre<-intel$created_time
M<-c(datecre)
# Plot the bar chart
barplot(H,names.arg=M,xlab="Dates",ylab="CommentsCount",col="blue",
main="ActivityChart",border="red")
my_friends<-getFriends(token)
head(my_friends)
fb_page<-getPage(page = "facebook",token)
post_reaction<-getReactions(post = fb_page$id[1],token,api="v2.8")
post_reaction$likes_count
post_reaction$id
|
# ------------------------------------------------------------------------------
#
# SetPythonObjects
#
# ------------------------------------------------------------------------------
# ---------------------------------------------------------
# pySet
# =====
#' @title assigns R objects to Python
#'
#' @description The function pySet allows to assign R objects to the Python
#' namespace, the conversion from R to Python is done automatically.
#' @param key a string specifying the name of the Python object.
#' @param value a R object which is assigned to Python.
#' @param namespace a string specifying where the key should be located.
#' If the namespace is set to "__main__" the key will be
#' set to the global namespace. But it is also possible to
#' set attributes of objects e.g. the attribute name of
#' the object 'os'.
#' @param useSetPoly an optional logical, giving if pySetPoly should be used
#' to transform R objects into Python objects. For example if
#' useSetPoly is TRUE unnamed vectors are transformed to
#' Python objects of type PrVector else to lists.
#' @param useNumpy an optional logical, default is FALSE, to control if numpy
#' should be used for the type conversion of matrices.
#' @param usePandas an optional logical, default is FALSE, to control if pandas
#' should be used for the type conversion of data frames.
#' @details More information about the type conversion can be found in the README
#' file or at \url{http://pythoninr.bitbucket.org/}.
#' @examples
#' \dontshow{PythonEmbedInR:::pyCranConnect()}
#' pySet("x", 3)
#' pySet("M", diag(1,3))
#' pyImport("os")
#' pySet("name", "Hello os!", namespace="os")
#' ## In some situations it can be beneficial to convert R lists or vectors
#' ## to Python tuple instead of lists. One way to accomplish this is to change
#' ## the class of the vector to "tuple".
#' y <- c(1, 2, 3)
#' class(y) <- "tuple"
#' pySet("y", y)
#' ## pySet can also be used to change values of objects or dictionaries.
#' asTuple <- function(x) {
#' class(x) <- "tuple"
#' return(x)
#' }
#' pyExec("d = dict()")
#' pySet("myTuple", asTuple(1:10), namespace="d")
#' pySet("myList", as.list(1:5), namespace="d")
# ---------------------------------------------------------
pySet <- function(key, value, namespace = "__main__",
useSetPoly = TRUE,
useNumpy=pyOptions("useNumpy"),
usePandas=pyOptions("usePandas")){
if ( pyConnectionCheck() ) return(invisible(NULL))
check_string(key)
if (all(useNumpy) & all(class(value) == "matrix")){
class(value) <- "ndarray"
}else if (all(usePandas) & all(class(value) == "data.frame")){
class(value) <- "DataFrame"
}
if ( isBasic(value) | (!useSetPoly) ){
returnValue <- pySetSimple(key, value, namespace)
}else{
returnValue <- pySetPoly(key, value, namespace)
}
invisible(returnValue)
}
# pySetSimple is a wrapper over the C function that users can
# ===========
# create new generic functions by using the function PythonEmbedInR:::pySetSimple
pySetSimple <- function(key, value, namespace="__main__"){
.Call("r_set_py", namespace, key, value, PACKAGE="PythonEmbedInR")
}
# pySetPoly is a polymorphic function
# =========
# The goal is to provide a part which can easily modified by the user.
pySetPoly <- function(key, value, namespace="__main__"){
pySetSimple(key, value, namespace)
}
setGeneric("pySetPoly")
# ----------------------------------------------------------
# vector
# ----------------------------------------------------------
pySetVector <- function(key, value, namespace="__main__"){
success <- pySetSimple(key,
list(vector=unname(value), names=names(value), rClass=class(value)),
namespace="__main__")
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = __R__.PrVector(%s['vector'], %s['names'], %s['rClass'])",
nam1, key, nam2, key, key, key)
pyExec(cmd)
}
# logical
setMethod("pySetPoly", signature(key="character", value = "logical"),
function(key, value, namespace) pySetVector(key, value, namespace))
# integer
setMethod("pySetPoly", signature(key="character", value = "integer"),
function(key, value, namespace) pySetVector(key, value, namespace))
# numeric
setMethod("pySetPoly", signature(key="character", value = "numeric"),
function(key, value, namespace) pySetVector(key, value, namespace))
# character
setMethod("pySetPoly", signature(key="character", value = "character"),
function(key, value, namespace) pySetVector(key, value, namespace))
# ----------------------------------------------------------
# matrix
# ----------------------------------------------------------
# PrMatrix (a pretty reduced matrix class)
# ========
setMethod("pySetPoly", signature(key="character", value = "matrix"),
function(key, value, namespace){
rnam <- rownames(value)
cnam <- colnames(value)
xdim <- dim(value)
rownames(value) <- NULL
colnames(value) <- NULL
value <- apply(value, 1, function(x) as.list(x))
value <- list(matrix=value, rownames=rnam, colnames=cnam, dim=xdim)
success <- pySetSimple(key, value, namespace)
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = __R__.PrMatrix(%s['matrix'], %s['rownames'], %s['colnames'], %s['dim'])",
nam1, key, nam2, key, key, key, key)
pyExec(cmd)
})
# numpy.ndarray
# =============
setClass("ndarray")
setMethod("pySetPoly", signature(key="character", value = "ndarray"),
function(key, value, namespace){
rownames(value) <- NULL
colnames(value) <- NULL
value <- apply(value, 1, function(x) as.list(x))
success <- pySetSimple(key, value, namespace)
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = %s.array(%s)",
nam1, key, nam2, pyOptions("numpyAlias"), key)
pyExec(cmd)
})
# ----------------------------------------------------------
# data.frame
# ----------------------------------------------------------
# PrDataFrame
# ===========
setMethod("pySetPoly", signature(key="character", value = "data.frame"),
function(key, value, namespace){
rnam <- rownames(value)
cnam <- colnames(value)
xdim <- dim(value)
rownames(value) <- NULL
value <- list(data.frame=lapply(value, "["), rownames=rnam, colnames=cnam, dim=xdim)
success <- pySetSimple(key, value, namespace)
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = __R__.PrDataFrame(%s['data.frame'], %s['rownames'], %s['colnames'], %s['dim'])",
nam1, key, nam2, key, key, key, key)
pyExec(cmd)
})
# pandas.DataFrame
# ================
setClass("DataFrame")
setMethod("pySetPoly", signature(key="character", value = "DataFrame"),
function(key, value, namespace){
rnam <- rownames(value)
xdim <- dim(value)
rownames(value) <- NULL
value <- list(data.frame=lapply(value, "["), rownames=rnam)
success <- pySetSimple(key, value, namespace)
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = %s.DataFrame(%s['data.frame'], index=%s['rownames'])",
nam1, key, nam2, pyOptions("pandasAlias"), key, key)
pyExec(cmd)
})
| /R/PySet.R | no_license | Sage-Bionetworks/PythonEmbedInR | R | false | false | 8,704 | r | # ------------------------------------------------------------------------------
#
# SetPythonObjects
#
# ------------------------------------------------------------------------------
# ---------------------------------------------------------
# pySet
# =====
#' @title assigns R objects to Python
#'
#' @description The function pySet allows to assign R objects to the Python
#' namespace, the conversion from R to Python is done automatically.
#' @param key a string specifying the name of the Python object.
#' @param value a R object which is assigned to Python.
#' @param namespace a string specifying where the key should be located.
#' If the namespace is set to "__main__" the key will be
#' set to the global namespace. But it is also possible to
#' set attributes of objects e.g. the attribute name of
#' the object 'os'.
#' @param useSetPoly an optional logical, giving if pySetPoly should be used
#' to transform R objects into Python objects. For example if
#' useSetPoly is TRUE unnamed vectors are transformed to
#' Python objects of type PrVector else to lists.
#' @param useNumpy an optional logical, default is FALSE, to control if numpy
#' should be used for the type conversion of matrices.
#' @param usePandas an optional logical, default is FALSE, to control if pandas
#' should be used for the type conversion of data frames.
#' @details More information about the type conversion can be found in the README
#' file or at \url{http://pythoninr.bitbucket.org/}.
#' @examples
#' \dontshow{PythonEmbedInR:::pyCranConnect()}
#' pySet("x", 3)
#' pySet("M", diag(1,3))
#' pyImport("os")
#' pySet("name", "Hello os!", namespace="os")
#' ## In some situations it can be beneficial to convert R lists or vectors
#' ## to Python tuple instead of lists. One way to accomplish this is to change
#' ## the class of the vector to "tuple".
#' y <- c(1, 2, 3)
#' class(y) <- "tuple"
#' pySet("y", y)
#' ## pySet can also be used to change values of objects or dictionaries.
#' asTuple <- function(x) {
#' class(x) <- "tuple"
#' return(x)
#' }
#' pyExec("d = dict()")
#' pySet("myTuple", asTuple(1:10), namespace="d")
#' pySet("myList", as.list(1:5), namespace="d")
# ---------------------------------------------------------
pySet <- function(key, value, namespace = "__main__",
useSetPoly = TRUE,
useNumpy=pyOptions("useNumpy"),
usePandas=pyOptions("usePandas")){
if ( pyConnectionCheck() ) return(invisible(NULL))
check_string(key)
if (all(useNumpy) & all(class(value) == "matrix")){
class(value) <- "ndarray"
}else if (all(usePandas) & all(class(value) == "data.frame")){
class(value) <- "DataFrame"
}
if ( isBasic(value) | (!useSetPoly) ){
returnValue <- pySetSimple(key, value, namespace)
}else{
returnValue <- pySetPoly(key, value, namespace)
}
invisible(returnValue)
}
# pySetSimple is a wrapper over the C function that users can
# ===========
# create new generic functions by using the function PythonEmbedInR:::pySetSimple
pySetSimple <- function(key, value, namespace="__main__"){
.Call("r_set_py", namespace, key, value, PACKAGE="PythonEmbedInR")
}
# pySetPoly is a polymorphic function
# =========
# The goal is to provide a part which can easily modified by the user.
pySetPoly <- function(key, value, namespace="__main__"){
pySetSimple(key, value, namespace)
}
setGeneric("pySetPoly")
# ----------------------------------------------------------
# vector
# ----------------------------------------------------------
pySetVector <- function(key, value, namespace="__main__"){
success <- pySetSimple(key,
list(vector=unname(value), names=names(value), rClass=class(value)),
namespace="__main__")
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = __R__.PrVector(%s['vector'], %s['names'], %s['rClass'])",
nam1, key, nam2, key, key, key)
pyExec(cmd)
}
# logical
setMethod("pySetPoly", signature(key="character", value = "logical"),
function(key, value, namespace) pySetVector(key, value, namespace))
# integer
setMethod("pySetPoly", signature(key="character", value = "integer"),
function(key, value, namespace) pySetVector(key, value, namespace))
# numeric
setMethod("pySetPoly", signature(key="character", value = "numeric"),
function(key, value, namespace) pySetVector(key, value, namespace))
# character
setMethod("pySetPoly", signature(key="character", value = "character"),
function(key, value, namespace) pySetVector(key, value, namespace))
# ----------------------------------------------------------
# matrix
# ----------------------------------------------------------
# PrMatrix (a pretty reduced matrix class)
# ========
setMethod("pySetPoly", signature(key="character", value = "matrix"),
function(key, value, namespace){
rnam <- rownames(value)
cnam <- colnames(value)
xdim <- dim(value)
rownames(value) <- NULL
colnames(value) <- NULL
value <- apply(value, 1, function(x) as.list(x))
value <- list(matrix=value, rownames=rnam, colnames=cnam, dim=xdim)
success <- pySetSimple(key, value, namespace)
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = __R__.PrMatrix(%s['matrix'], %s['rownames'], %s['colnames'], %s['dim'])",
nam1, key, nam2, key, key, key, key)
pyExec(cmd)
})
# numpy.ndarray
# =============
setClass("ndarray")
setMethod("pySetPoly", signature(key="character", value = "ndarray"),
function(key, value, namespace){
rownames(value) <- NULL
colnames(value) <- NULL
value <- apply(value, 1, function(x) as.list(x))
success <- pySetSimple(key, value, namespace)
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = %s.array(%s)",
nam1, key, nam2, pyOptions("numpyAlias"), key)
pyExec(cmd)
})
# ----------------------------------------------------------
# data.frame
# ----------------------------------------------------------
# PrDataFrame
# ===========
setMethod("pySetPoly", signature(key="character", value = "data.frame"),
function(key, value, namespace){
rnam <- rownames(value)
cnam <- colnames(value)
xdim <- dim(value)
rownames(value) <- NULL
value <- list(data.frame=lapply(value, "["), rownames=rnam, colnames=cnam, dim=xdim)
success <- pySetSimple(key, value, namespace)
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = __R__.PrDataFrame(%s['data.frame'], %s['rownames'], %s['colnames'], %s['dim'])",
nam1, key, nam2, key, key, key, key)
pyExec(cmd)
})
# pandas.DataFrame
# ================
setClass("DataFrame")
setMethod("pySetPoly", signature(key="character", value = "DataFrame"),
function(key, value, namespace){
rnam <- rownames(value)
xdim <- dim(value)
rownames(value) <- NULL
value <- list(data.frame=lapply(value, "["), rownames=rnam)
success <- pySetSimple(key, value, namespace)
if ( namespace == "__main__" ) {
nam1 <- nam2 <- ""
} else if ( pyGet(sprintf("isinstance(%s, dict)", namespace)) ) {
nam1 <- sprintf("%s['", namespace)
nam2 <- "']"
} else {
nam1 <- sprintf("%s.", namespace)
nam2 <- ""
}
cmd <- sprintf("%s%s%s = %s.DataFrame(%s['data.frame'], index=%s['rownames'])",
nam1, key, nam2, pyOptions("pandasAlias"), key, key)
pyExec(cmd)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.