content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
rankall <- function(outcome, num = "best") {
## Read outcome data
outcomedata <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that outcome and num are valid
states <- levels(as.factor(outcomedata$State)) ## what if csv doesn't contain all states?
outcomes <- list(n = c("heart attack", "heart failure", "pneumonia"), c = c(11, 17, 23)) # also save column nums
## If an invalid outcome value is passed to best, the function should throw an error via the stop function with the exact message "invalid outcome".
if(!(outcome %in% outcomes$n)){
stop("invalid outcome")
}
##The num argument can take values "best", "worst", or an integer indicating the ranking (smaller numbers are better)
if(num == "best"){
num <- 1
} else if(num == "worst"){
} else if(class(num) != "numeric"){
stop("invalid num")
} else{
num <- round(num, 0)
}
## read outcome values
colnum <- outcomes$c[outcome == outcomes$n] ## find column name for requested outcome
outcomedata[, colnum] <- as.numeric(outcomedata[, colnum])
statesoutcomes <- split(outcomedata, outcomedata$State)
myfun <- function(dataframe){
##If the number given by num is larger than the number of hospitals in that state, then the function should return NA.
##Hospitals that do not have data on a particular outcome should be excluded from the set of hospitals when deciding the rankings.
numstatehosp <- sum(!is.na(dataframe[,colnum]))
if(num == "worst"){
num <- numstatehosp
} else if(num > numstatehosp){
return(NA)
}
## For each state, find the hospital of the given rank
orderedstateoutcomes <- dataframe[order(dataframe[, colnum], dataframe[, 2]), c(2,colnum)]
orderedstateoutcomes[num, 1]
}
## Return a data frame with the hospital names and the (abbreviated) state name
lapply(statesoutcomes, myfun)
} | /rankall.R | no_license | antrinna/ProgrammingAssignment3 | R | false | false | 1,911 | r | rankall <- function(outcome, num = "best") {
## Read outcome data
outcomedata <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that outcome and num are valid
states <- levels(as.factor(outcomedata$State)) ## what if csv doesn't contain all states?
outcomes <- list(n = c("heart attack", "heart failure", "pneumonia"), c = c(11, 17, 23)) # also save column nums
## If an invalid outcome value is passed to best, the function should throw an error via the stop function with the exact message "invalid outcome".
if(!(outcome %in% outcomes$n)){
stop("invalid outcome")
}
##The num argument can take values "best", "worst", or an integer indicating the ranking (smaller numbers are better)
if(num == "best"){
num <- 1
} else if(num == "worst"){
} else if(class(num) != "numeric"){
stop("invalid num")
} else{
num <- round(num, 0)
}
## read outcome values
colnum <- outcomes$c[outcome == outcomes$n] ## find column name for requested outcome
outcomedata[, colnum] <- as.numeric(outcomedata[, colnum])
statesoutcomes <- split(outcomedata, outcomedata$State)
myfun <- function(dataframe){
##If the number given by num is larger than the number of hospitals in that state, then the function should return NA.
##Hospitals that do not have data on a particular outcome should be excluded from the set of hospitals when deciding the rankings.
numstatehosp <- sum(!is.na(dataframe[,colnum]))
if(num == "worst"){
num <- numstatehosp
} else if(num > numstatehosp){
return(NA)
}
## For each state, find the hospital of the given rank
orderedstateoutcomes <- dataframe[order(dataframe[, colnum], dataframe[, 2]), c(2,colnum)]
orderedstateoutcomes[num, 1]
}
## Return a data frame with the hospital names and the (abbreviated) state name
lapply(statesoutcomes, myfun)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stan_hierarchical_response_thall.R
\name{stan_hierarchical_response_thall}
\alias{stan_hierarchical_response_thall}
\title{Fit the hierarchical response model described by
Thall \emph{et al.} (2003).}
\usage{
stan_hierarchical_response_thall(
group_responses,
group_sizes,
mu_mean,
mu_sd,
tau_alpha,
tau_beta,
...
)
}
\arguments{
\item{group_responses}{vector of integers, number of responses in each group}
\item{group_sizes}{vector of integers, number of patients in each group}
\item{mu_mean}{mean parameter of normal prior distribution on mu. See details.}
\item{mu_sd}{standard deviation parameter of normal prior distribution on mu.
See details.}
\item{tau_alpha}{parameter alpha of inverse gamma prior distribution on tau. See
details.}
\item{tau_beta}{beta parameter of inverse gamma prior distribution on tau. See
details.}
\item{...}{Extra parameters are passed to \code{rstan::sampling}. Commonly
used options are \code{iter}, \code{chains}, \code{warmup}, \code{cores}, and
\code{control}.}
}
\value{
Object of class \code{\link[rstan:stanfit]{rstan::stanfit}} returned
by \code{\link[rstan:sampling]{rstan::sampling}}
}
\description{
Fit the hierarchical response model to exchangeable groups described by
Thall \emph{et al.} (2003).
}
\details{
Thall \emph{et al.} (2003) describe hierarchical methods for analysing
treatment effects of a common intervention in several sub-types of a disease.
The treatment effects are assumed to be different but exchangeable and
correlated. Observing efficacy in one cohort, for example, increases one's
expectations of efficacy in others.
They demonstrate the hierarchical approach in a trial with binary response
outcomes and in another with time-to-event outcomes.
This function fits their model for binary response outcomes.
Let the probability of response in group \eqn{i} be \eqn{\pi[i]} for
\eqn{i = 1,...,N}.
They assume a logistic model so that
\eqn{\theta_{i} = \log{\pi_{i} / (1 - \pi_{i})}}
is the log-odds of response in group \eqn{i}.
They assume that \eqn{\theta_{i} \sim N(\mu, \sigma^2)}.
The authors implemented their model in BUGS.
As is the convention in BUGS, the authors define normal distributions by a
precision parameter \eqn{\tau} as opposed to the standard deviation parameter
\eqn{\sigma} used here. We have re-specified their model to comply with the
Stan convention of using standard deviation. The authors use a normal
prior on \eqn{\mu}, and a gamma prior on \eqn{\tau}, equivalent to
an inverse gamma prior on \eqn{\tau^{-1} = \sigma^2}.
The authors provide WinBUGS code in their publication.
We implement their model here in Stan.
}
\examples{
\dontrun{
# Example from p.778 of Thall et al. (2003)
mod0 <- stan_hierarchical_response_thall(
group_responses = c(0, 0, 1, 3, 5, 0, 1, 2, 0, 0),
group_sizes = c(0, 2 ,1, 7, 5, 0, 2, 3, 1, 0),
mu_mean = -1.3863,
mu_sd = sqrt(1 / 0.1),
tau_alpha = 2,
tau_beta = 20)
}
}
\references{
Thall, Wathen, Bekele, Champlin, Baker, and Benjamin. 2003.
“Hierarchical Bayesian approaches to phase II trials in diseases with
multiple subtypes.” Statistics in Medicine 22 (5): 763–80.
https://doi.org/10.1002/sim.1399.
}
\seealso{
\code{\link[rstan:stanfit]{rstan::stanfit}},
\code{\link[rstan:sampling]{rstan::sampling}}
}
| /man/stan_hierarchical_response_thall.Rd | no_license | brockk/trialr | R | false | true | 3,361 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stan_hierarchical_response_thall.R
\name{stan_hierarchical_response_thall}
\alias{stan_hierarchical_response_thall}
\title{Fit the hierarchical response model described by
Thall \emph{et al.} (2003).}
\usage{
stan_hierarchical_response_thall(
group_responses,
group_sizes,
mu_mean,
mu_sd,
tau_alpha,
tau_beta,
...
)
}
\arguments{
\item{group_responses}{vector of integers, number of responses in each group}
\item{group_sizes}{vector of integers, number of patients in each group}
\item{mu_mean}{mean parameter of normal prior distribution on mu. See details.}
\item{mu_sd}{standard deviation parameter of normal prior distribution on mu.
See details.}
\item{tau_alpha}{parameter alpha of inverse gamma prior distribution on tau. See
details.}
\item{tau_beta}{beta parameter of inverse gamma prior distribution on tau. See
details.}
\item{...}{Extra parameters are passed to \code{rstan::sampling}. Commonly
used options are \code{iter}, \code{chains}, \code{warmup}, \code{cores}, and
\code{control}.}
}
\value{
Object of class \code{\link[rstan:stanfit]{rstan::stanfit}} returned
by \code{\link[rstan:sampling]{rstan::sampling}}
}
\description{
Fit the hierarchical response model to exchangeable groups described by
Thall \emph{et al.} (2003).
}
\details{
Thall \emph{et al.} (2003) describe hierarchical methods for analysing
treatment effects of a common intervention in several sub-types of a disease.
The treatment effects are assumed to be different but exchangeable and
correlated. Observing efficacy in one cohort, for example, increases one's
expectations of efficacy in others.
They demonstrate the hierarchical approach in a trial with binary response
outcomes and in another with time-to-event outcomes.
This function fits their model for binary response outcomes.
Let the probability of response in group \eqn{i} be \eqn{\pi[i]} for
\eqn{i = 1,...,N}.
They assume a logistic model so that
\eqn{\theta_{i} = \log{\pi_{i} / (1 - \pi_{i})}}
is the log-odds of response in group \eqn{i}.
They assume that \eqn{\theta_{i} \sim N(\mu, \sigma^2)}.
The authors implemented their model in BUGS.
As is the convention in BUGS, the authors define normal distributions by a
precision parameter \eqn{\tau} as opposed to the standard deviation parameter
\eqn{\sigma} used here. We have re-specified their model to comply with the
Stan convention of using standard deviation. The authors use a normal
prior on \eqn{\mu}, and a gamma prior on \eqn{\tau}, equivalent to
an inverse gamma prior on \eqn{\tau^{-1} = \sigma^2}.
The authors provide WinBUGS code in their publication.
We implement their model here in Stan.
}
\examples{
\dontrun{
# Example from p.778 of Thall et al. (2003)
mod0 <- stan_hierarchical_response_thall(
group_responses = c(0, 0, 1, 3, 5, 0, 1, 2, 0, 0),
group_sizes = c(0, 2 ,1, 7, 5, 0, 2, 3, 1, 0),
mu_mean = -1.3863,
mu_sd = sqrt(1 / 0.1),
tau_alpha = 2,
tau_beta = 20)
}
}
\references{
Thall, Wathen, Bekele, Champlin, Baker, and Benjamin. 2003.
“Hierarchical Bayesian approaches to phase II trials in diseases with
multiple subtypes.” Statistics in Medicine 22 (5): 763–80.
https://doi.org/10.1002/sim.1399.
}
\seealso{
\code{\link[rstan:stanfit]{rstan::stanfit}},
\code{\link[rstan:sampling]{rstan::sampling}}
}
|
#The data linked are data collected from the accelerometers from the Samsung Galaxy S smartphone.
#A full description is available at the site where the data was obtained:
#http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#The following R script does the following.
#Merges the training and the test sets to create one data set.
#Extracts only the measurements on the mean and standard deviation for each measurement.
#Uses descriptive activity names to name the activities in the data set
#Appropriately labels the data set with descriptive variable names.
#From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(plyr)
"Merge training and test datasets"
# Read data
message("reading X_train.txt")
training.x <- read.table("data/UCI HAR Dataset/train/X_train.txt")
message("reading y_train.txt")
training.y <- read.table("data/UCI HAR Dataset/train/y_train.txt")
message("reading subject_train.txt")
training.subject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
message("reading X_test.txt")
test.x <- read.table("data/UCI HAR Dataset/test/X_test.txt")
message("reading y_test.txt")
test.y <- read.table("data/UCI HAR Dataset/test/y_test.txt")
message("reading subject_test.txt")
test.subject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
# Merge x, y, subject data sets
merged.x <- rbind(training.x, test.x)
merged.y <- rbind(training.y, test.y)
merged.subject <- rbind(training.subject, test.subject)
# Read features
features = read.table("data/UCI HAR Dataset/features.txt")
# Index only the data on mean and std. dev.
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
# Make the feature names better suited for R with some substitutions
features[,2] = gsub('-mean', 'Mean', features[,2])
features[,2] = gsub('-std', 'Std', features[,2])
features[,2] = gsub('[-()]', '', features[,2])
# Add the column names (features) to data
colnames(merged.subject) = "subject";
colnames(merged.x) = features[,2];
colnames(merged.y) = "activity";
# First reduce the merged.x table to what we want
merged.x <- merged.x[,mean_and_std_features]
#marge the three data sets
df <- cbind(merged.x, merged.y, merged.subject)
# Use descriptive activity names to name the activities in the dataset
activityLabels = read.table("data/UCI HAR Dataset/activity_labels.txt")
df$activity[df$activity == 1] = "WALKING"
df$activity[df$activity == 2] = "WALKING_UPSTAIRS"
df$activity[df$activity == 3] = "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] = "SITTING"
df$activity[df$activity == 5] = "STANDING"
df$activity[df$activity == 6] = "LAYING"
#Appropriately labels the data set with descriptive variable names.
names(df)<-gsub("^t", "time", names(df))
names(df)<-gsub("^f", "frequency", names(df))
names(df)<-gsub("Acc", "Accelerometer", names(df))
names(df)<-gsub("Gyro", "Gyroscope", names(df))
names(df)<-gsub("Mag", "Magnitude", names(df))
names(df)<-gsub("BodyBody", "Body", names(df))
df$activity <- as.factor(df$activity)
df$subject <- as.factor(df$subject)
tidy <-aggregate(. ~subject + activity, df, mean)
tidy <-tidy[order(tidy$subject,tidy$activity),]
write.table(tidy, file = "tidydata.txt",row.name=FALSE)
| /run_analysis.R | no_license | yanghs/Getting-and-Cleaning-Data-Project | R | false | false | 3,308 | r | #The data linked are data collected from the accelerometers from the Samsung Galaxy S smartphone.
#A full description is available at the site where the data was obtained:
#http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#The following R script does the following.
#Merges the training and the test sets to create one data set.
#Extracts only the measurements on the mean and standard deviation for each measurement.
#Uses descriptive activity names to name the activities in the data set
#Appropriately labels the data set with descriptive variable names.
#From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(plyr)
"Merge training and test datasets"
# Read data
message("reading X_train.txt")
training.x <- read.table("data/UCI HAR Dataset/train/X_train.txt")
message("reading y_train.txt")
training.y <- read.table("data/UCI HAR Dataset/train/y_train.txt")
message("reading subject_train.txt")
training.subject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
message("reading X_test.txt")
test.x <- read.table("data/UCI HAR Dataset/test/X_test.txt")
message("reading y_test.txt")
test.y <- read.table("data/UCI HAR Dataset/test/y_test.txt")
message("reading subject_test.txt")
test.subject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
# Merge x, y, subject data sets
merged.x <- rbind(training.x, test.x)
merged.y <- rbind(training.y, test.y)
merged.subject <- rbind(training.subject, test.subject)
# Read features
features = read.table("data/UCI HAR Dataset/features.txt")
# Index only the data on mean and std. dev.
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
# Make the feature names better suited for R with some substitutions
features[,2] = gsub('-mean', 'Mean', features[,2])
features[,2] = gsub('-std', 'Std', features[,2])
features[,2] = gsub('[-()]', '', features[,2])
# Add the column names (features) to data
colnames(merged.subject) = "subject";
colnames(merged.x) = features[,2];
colnames(merged.y) = "activity";
# First reduce the merged.x table to what we want
merged.x <- merged.x[,mean_and_std_features]
#marge the three data sets
df <- cbind(merged.x, merged.y, merged.subject)
# Use descriptive activity names to name the activities in the dataset
activityLabels = read.table("data/UCI HAR Dataset/activity_labels.txt")
df$activity[df$activity == 1] = "WALKING"
df$activity[df$activity == 2] = "WALKING_UPSTAIRS"
df$activity[df$activity == 3] = "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] = "SITTING"
df$activity[df$activity == 5] = "STANDING"
df$activity[df$activity == 6] = "LAYING"
#Appropriately labels the data set with descriptive variable names.
names(df)<-gsub("^t", "time", names(df))
names(df)<-gsub("^f", "frequency", names(df))
names(df)<-gsub("Acc", "Accelerometer", names(df))
names(df)<-gsub("Gyro", "Gyroscope", names(df))
names(df)<-gsub("Mag", "Magnitude", names(df))
names(df)<-gsub("BodyBody", "Body", names(df))
df$activity <- as.factor(df$activity)
df$subject <- as.factor(df$subject)
tidy <-aggregate(. ~subject + activity, df, mean)
tidy <-tidy[order(tidy$subject,tidy$activity),]
write.table(tidy, file = "tidydata.txt",row.name=FALSE)
|
p <- ggplot(mpg, aes(cyl, hwy))
p <-
ggplot(mpg, aes(cty, hwy)) +
geom_jitter()
| /ggplot2/Layers/Geoms/geom_jitter/example6.R | no_license | plotly/ssim_baselines | R | false | false | 87 | r | p <- ggplot(mpg, aes(cyl, hwy))
p <-
ggplot(mpg, aes(cty, hwy)) +
geom_jitter()
|
testlist <- list(doy = c(4.62595082430429e-312, 3.64097969159734e-277, -0.427429395729092, 2.09887675795476e-104, 1.55322770574539e-47, 2.90948430752029e+135, 3.56441595774554e+114, -2.64525441665141e+303, -9.52682579807939e+139, -3.98397314603138e+183, -1.77863325536183e+126, 6.42851301544252e-310, 1.66013830765329e-307, 0.000234804018098546, 1.91570942562165e+206, 365.687522888184, 2.07029838648818e+24, -7.21048519616203e+198, 3.51433879412484e+125, -7.92665263616256e+107, 1.17913068623545e+75 ), latitude = c(-2.61899946856284e-59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::Photoperiod,testlist)
str(result) | /meteor/inst/testfiles/Photoperiod/AFL_Photoperiod/Photoperiod_valgrind_files/1615768579-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 683 | r | testlist <- list(doy = c(4.62595082430429e-312, 3.64097969159734e-277, -0.427429395729092, 2.09887675795476e-104, 1.55322770574539e-47, 2.90948430752029e+135, 3.56441595774554e+114, -2.64525441665141e+303, -9.52682579807939e+139, -3.98397314603138e+183, -1.77863325536183e+126, 6.42851301544252e-310, 1.66013830765329e-307, 0.000234804018098546, 1.91570942562165e+206, 365.687522888184, 2.07029838648818e+24, -7.21048519616203e+198, 3.51433879412484e+125, -7.92665263616256e+107, 1.17913068623545e+75 ), latitude = c(-2.61899946856284e-59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::Photoperiod,testlist)
str(result) |
cnames<-as.vector(t(read.table("../household_power_consumption.txt", sep=";",
stringsAsFactors=FALSE,nrows=1)))
classes<-c("character", "character", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric")
# record starts from 16/12/2006;17:24:00, sample rate every minutes,
# so to get 1/2/2007, we need to skip lines:
# 1440 mins/day * (15+31) days ## from 17/12/06 to 31/01/07 total: (15+31) days
# + 6 hours * 60 mins/hour ## from 18:00 to 00:00 total 6 hours
# + (59-24+1) mins ## from 17:24:00 to 17:59:00 total (59-24+1) minutes
# + 1 first line ## first line with variable names
# = 66637 lines
#########
# read in lines:
# 2 days * 1440 mins/day = 2880 lines
powdata<-read.table("../household_power_consumption.txt", sep=";",
stringsAsFactors=FALSE,na.strings="?", colClasses=classes,
col.names=cnames,skip=66637,nrows=2880)
# combine date & time and convert to time format
dt<-strptime(paste(powdata$Date, powdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
powdata$DateTime <- dt
#open png graphic device
png(filename="plot2.png", width=480, height=480, units="px", bg="transparent")
#plot
with(powdata, plot(DateTime, Global_active_power, type="l", xlab="",
ylab="Global Active Power (kilowatts)"))
# close file
dev.off()
| /plot2.R | no_license | ds-2015/ExData_Plotting1 | R | false | false | 1,397 | r | cnames<-as.vector(t(read.table("../household_power_consumption.txt", sep=";",
stringsAsFactors=FALSE,nrows=1)))
classes<-c("character", "character", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric")
# record starts from 16/12/2006;17:24:00, sample rate every minutes,
# so to get 1/2/2007, we need to skip lines:
# 1440 mins/day * (15+31) days ## from 17/12/06 to 31/01/07 total: (15+31) days
# + 6 hours * 60 mins/hour ## from 18:00 to 00:00 total 6 hours
# + (59-24+1) mins ## from 17:24:00 to 17:59:00 total (59-24+1) minutes
# + 1 first line ## first line with variable names
# = 66637 lines
#########
# read in lines:
# 2 days * 1440 mins/day = 2880 lines
powdata<-read.table("../household_power_consumption.txt", sep=";",
stringsAsFactors=FALSE,na.strings="?", colClasses=classes,
col.names=cnames,skip=66637,nrows=2880)
# combine date & time and convert to time format
dt<-strptime(paste(powdata$Date, powdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
powdata$DateTime <- dt
#open png graphic device
png(filename="plot2.png", width=480, height=480, units="px", bg="transparent")
#plot
with(powdata, plot(DateTime, Global_active_power, type="l", xlab="",
ylab="Global Active Power (kilowatts)"))
# close file
dev.off()
|
library(ggmap)
laMap <- qmap('Los Anageles', zoom = 11, maptype = 'hybrid')
m1 <- laMap +
geom_point(aes(x = Long, y = Lat, colour = `Crm Cd Desc`), alpha = .3,
data = LAcrime %>% filter(`Crm Cd Desc` %in% commonCrimes$crime))
m2 <- laMap +
stat_density2d(
aes(x = Long, y = Lat, fill = ..level.., alpha = ..level..),
data = LAcrime
)
| /R/explore2_maps.R | no_license | manifolded/NewMetDataSummer2016 | R | false | false | 370 | r | library(ggmap)
laMap <- qmap('Los Anageles', zoom = 11, maptype = 'hybrid')
m1 <- laMap +
geom_point(aes(x = Long, y = Lat, colour = `Crm Cd Desc`), alpha = .3,
data = LAcrime %>% filter(`Crm Cd Desc` %in% commonCrimes$crime))
m2 <- laMap +
stat_density2d(
aes(x = Long, y = Lat, fill = ..level.., alpha = ..level..),
data = LAcrime
)
|
testlist <- list(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) | /blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609955171-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 198 | r | testlist <- list(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) |
## Created 06 / February / 2017
## Marina Costa Rillo
##
## Code to find NOAA sea depth for Buckley Collection coordinates
##
## Uses "BuckleyCollection_DataPortal.csv" & "marmap_coord_-180;-90;180;90_res_5.csv"
##
## Updates "BuckleyCollection_DataPortal.csv"
##
rm(list=ls())
library(marmap)
setwd("/Users/marinacostarillo/Google Drive/DOUTORADO/R_data")
buckley_table <- read.csv("Buckley_Collection/BuckleyCollection_DataPortal.csv", header = TRUE, stringsAsFactors=FALSE)
### Adding NOAA SEA DEPTH to coord_table
## Code to get sea depth given coordinates (uses marmap and NOAA data)
## If marmap_buckley_NOAA_depth.csv files already exist, no need to run this out-commented part
# Getting NOAA database from the internet - TAKES QUITE SOME TIME!!
# OBS: resolution = 5 minutes ; keep = TRUE will save data automatically as marmap_coord.csv)
# sea_depth = getNOAA.bathy(180, -180, 90, -90, resolution = 2, keep=TRUE, antimeridian=FALSE)
# sea_depth is already of class bathy
# Getting sea depths from already saved table marmap_coord_-180;-90;180;90_res_5.csv (in folder Ocean_Environment)
sea_depth <- read.csv("/Users/marinacostarillo/Google Drive/DOUTORADO/R_data/Ocean_Environment/marmap_coord_-180;-90;180;90_res_5.csv")
sea_depth <- as.bathy(sea_depth)
is.bathy(sea_depth)
NOAA_coord = unique(na.omit(buckley_table[,c("Lat.decimal","Long.decimal")]))
NOAA_depth_table = get.depth(sea_depth, x=NOAA_coord[,"Long.decimal"], y=NOAA_coord[,"Lat.decimal"], locator=FALSE, distance=FALSE)
write.csv(NOAA_depth_table, file = "Buckley_Collection/marmap_buckley_depths.csv",row.names=FALSE)
NOAA_depth_table = read.csv("Buckley_Collection/marmap_buckley_depths.csv", header = TRUE)
for (i in 1 : length(buckley_table[,1])){
same_latlong = c()
same_latlong = c(same_latlong, which(buckley_table[i,"Lat.decimal"] == NOAA_depth_table[,"lat"] &
buckley_table[i,"Long.decimal"] == NOAA_depth_table[,"lon"]) )
if (length(same_latlong) > 0){
buckley_table[i, "Sea.Depth..m....modern.methods"] = unique(NOAA_depth_table[same_latlong,"depth"])
}
}
# Fixing the "land" and depth > 0
unique(buckley_table[which(buckley_table[,"Sample.depth.MIN..cm."]==c("land") | buckley_table[,"Sea.Depth..m....modern.methods"]>0 ),
c("Lat.decimal","Long.decimal","Sample.depth.MIN..cm.","Sea.Depth..m....modern.methods")])
# 2 Samples are wrong
# 1st Sample
buckley_table[which(buckley_table[,"Lat.decimal"]== 18.23300 & buckley_table[,"Long.decimal"] == -76.60000),]
print(buckley_table[which(buckley_table[,"Lat.decimal"]== 18.23300 & buckley_table[,"Long.decimal"] == -76.60000),"Collection.info"])
# "1/2 mile E of Buff Bay, Jamaica" ~ Lat 18.250622 Long -76.544666 (I looked on a map)
get.depth(sea_depth, x=-76.544666, y=18.250622, locator=FALSE, distance=FALSE)
buckley_table[which(buckley_table[,"Lat.decimal"]== 18.23300 & buckley_table[,"Long.decimal"] == -76.60000),"Sea.Depth..m....modern.methods"] <- -606
# 2nd Sample
buckley_table[which(buckley_table[,"Lat.decimal"]== 15.21700 & buckley_table[,"Long.decimal"] == 145.76700),]
# Saipan Island ~ Lat 18.250622 Long 145.744684 (I looked on a map)
get.depth(sea_depth, x=15.191158, y=15.191158, locator=FALSE, distance=FALSE)
buckley_table[which(buckley_table[,"Lat.decimal"]== 15.21700 & buckley_table[,"Long.decimal"] == 145.76700),"Sea.Depth..m....modern.methods"] <- c(23,23)
# Checking NAs - the NAs are all from samples without Lat and Long info
buckley_table[which(is.na(buckley_table[,"Sea.Depth..m....modern.methods"])),
c("Lat.decimal","Long.decimal","Sample.depth.MIN..cm.","Sea.Depth..m....modern.methods")]
# Saving buckley_table
write.csv(buckley_table, file = "Buckley_Collection/BuckleyCollection_DataPortal.csv",row.names=FALSE)
| /analysis/R/get_sea_depth.R | no_license | mcrillo/buckley-collection | R | false | false | 3,800 | r | ## Created 06 / February / 2017
## Marina Costa Rillo
##
## Code to find NOAA sea depth for Buckley Collection coordinates
##
## Uses "BuckleyCollection_DataPortal.csv" & "marmap_coord_-180;-90;180;90_res_5.csv"
##
## Updates "BuckleyCollection_DataPortal.csv"
##
rm(list=ls())
library(marmap)
setwd("/Users/marinacostarillo/Google Drive/DOUTORADO/R_data")
buckley_table <- read.csv("Buckley_Collection/BuckleyCollection_DataPortal.csv", header = TRUE, stringsAsFactors=FALSE)
### Adding NOAA SEA DEPTH to coord_table
## Code to get sea depth given coordinates (uses marmap and NOAA data)
## If marmap_buckley_NOAA_depth.csv files already exist, no need to run this out-commented part
# Getting NOAA database from the internet - TAKES QUITE SOME TIME!!
# OBS: resolution = 5 minutes ; keep = TRUE will save data automatically as marmap_coord.csv)
# sea_depth = getNOAA.bathy(180, -180, 90, -90, resolution = 2, keep=TRUE, antimeridian=FALSE)
# sea_depth is already of class bathy
# Getting sea depths from already saved table marmap_coord_-180;-90;180;90_res_5.csv (in folder Ocean_Environment)
sea_depth <- read.csv("/Users/marinacostarillo/Google Drive/DOUTORADO/R_data/Ocean_Environment/marmap_coord_-180;-90;180;90_res_5.csv")
sea_depth <- as.bathy(sea_depth)
is.bathy(sea_depth)
NOAA_coord = unique(na.omit(buckley_table[,c("Lat.decimal","Long.decimal")]))
NOAA_depth_table = get.depth(sea_depth, x=NOAA_coord[,"Long.decimal"], y=NOAA_coord[,"Lat.decimal"], locator=FALSE, distance=FALSE)
write.csv(NOAA_depth_table, file = "Buckley_Collection/marmap_buckley_depths.csv",row.names=FALSE)
NOAA_depth_table = read.csv("Buckley_Collection/marmap_buckley_depths.csv", header = TRUE)
for (i in 1 : length(buckley_table[,1])){
same_latlong = c()
same_latlong = c(same_latlong, which(buckley_table[i,"Lat.decimal"] == NOAA_depth_table[,"lat"] &
buckley_table[i,"Long.decimal"] == NOAA_depth_table[,"lon"]) )
if (length(same_latlong) > 0){
buckley_table[i, "Sea.Depth..m....modern.methods"] = unique(NOAA_depth_table[same_latlong,"depth"])
}
}
# Fixing the "land" and depth > 0
unique(buckley_table[which(buckley_table[,"Sample.depth.MIN..cm."]==c("land") | buckley_table[,"Sea.Depth..m....modern.methods"]>0 ),
c("Lat.decimal","Long.decimal","Sample.depth.MIN..cm.","Sea.Depth..m....modern.methods")])
# 2 Samples are wrong
# 1st Sample
buckley_table[which(buckley_table[,"Lat.decimal"]== 18.23300 & buckley_table[,"Long.decimal"] == -76.60000),]
print(buckley_table[which(buckley_table[,"Lat.decimal"]== 18.23300 & buckley_table[,"Long.decimal"] == -76.60000),"Collection.info"])
# "1/2 mile E of Buff Bay, Jamaica" ~ Lat 18.250622 Long -76.544666 (I looked on a map)
get.depth(sea_depth, x=-76.544666, y=18.250622, locator=FALSE, distance=FALSE)
buckley_table[which(buckley_table[,"Lat.decimal"]== 18.23300 & buckley_table[,"Long.decimal"] == -76.60000),"Sea.Depth..m....modern.methods"] <- -606
# 2nd Sample
buckley_table[which(buckley_table[,"Lat.decimal"]== 15.21700 & buckley_table[,"Long.decimal"] == 145.76700),]
# Saipan Island ~ Lat 18.250622 Long 145.744684 (I looked on a map)
get.depth(sea_depth, x=15.191158, y=15.191158, locator=FALSE, distance=FALSE)
buckley_table[which(buckley_table[,"Lat.decimal"]== 15.21700 & buckley_table[,"Long.decimal"] == 145.76700),"Sea.Depth..m....modern.methods"] <- c(23,23)
# Checking NAs - the NAs are all from samples without Lat and Long info
buckley_table[which(is.na(buckley_table[,"Sea.Depth..m....modern.methods"])),
c("Lat.decimal","Long.decimal","Sample.depth.MIN..cm.","Sea.Depth..m....modern.methods")]
# Saving buckley_table
write.csv(buckley_table, file = "Buckley_Collection/BuckleyCollection_DataPortal.csv",row.names=FALSE)
|
# AUthor : Romain Louvet
# Date : 08/10/2015
#
# Create spatially randomized points with same values as "fires" data
# In other words, it randomized fires location.
# Then, it will join by location the points for each scale level from
# list_scales.
########################
# create random points #
########################
fires2 <- data.frame(SURFM2=fires$Surface_parcourue_m2)
# call "points_random" function, create 100 spatially randomized points within "map"
# extent and with fires2 values (take +- 25 min to generate)
if(file.exists("./2-outputdata/pointrandom100.Rdata")){
load("./2-outputdata/pointrandom100.Rdata")
}else{
randomliste <- points_random(map,fires2,100,"random")
save(randomliste,file="./2-outputdata/pointrandom100.Rdata")
}
######################
# join random points #
######################
# (take +- 60 min to generate)
if(file.exists("./2-outputdata/pointrandom100.Rdata")){
load("./2-outputdata/joinrandom100.Rdata")
}else{
joinrandom <- over_join_random(randomliste,list_scales,scalenames,"SURFM2")
save(joinrandom,file="./2-outputdata/joinrandom100.Rdata")
} | /promethee/3_random.R | no_license | romain-louvet/sageo_rig2015 | R | false | false | 1,112 | r | # AUthor : Romain Louvet
# Date : 08/10/2015
#
# Create spatially randomized points with same values as "fires" data
# In other words, it randomized fires location.
# Then, it will join by location the points for each scale level from
# list_scales.
########################
# create random points #
########################
fires2 <- data.frame(SURFM2=fires$Surface_parcourue_m2)
# call "points_random" function, create 100 spatially randomized points within "map"
# extent and with fires2 values (take +- 25 min to generate)
if(file.exists("./2-outputdata/pointrandom100.Rdata")){
load("./2-outputdata/pointrandom100.Rdata")
}else{
randomliste <- points_random(map,fires2,100,"random")
save(randomliste,file="./2-outputdata/pointrandom100.Rdata")
}
######################
# join random points #
######################
# (take +- 60 min to generate)
if(file.exists("./2-outputdata/pointrandom100.Rdata")){
load("./2-outputdata/joinrandom100.Rdata")
}else{
joinrandom <- over_join_random(randomliste,list_scales,scalenames,"SURFM2")
save(joinrandom,file="./2-outputdata/joinrandom100.Rdata")
} |
SLP <- function(
Data,
Benefits_SLP_Rate_Couple,
Benefits_SLP_Rate_SoleParent,
Benefits_SLP_Rate_SingleYoung,
Benefits_SLP_Rate_Single,
Benefits_SLP_Rate_CoupleParent,
modelyear = 2015,
Periods = 1){
weeks_in_year = 52.2
if (nrow(Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(P_Attributes_Age <= 15),]) != 0) {
stop("errSLPAge")
}
Data[, P_Benefits_SLP_Amount_Unabated := 0]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_Single]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(P_Attributes_Age > 16) &
(P_Attributes_Age < 18),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_SingleYoung]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(F_Counts_Dependents > 0),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_SoleParent]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(F_Attributes_IsCouple == 1),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_Couple]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(F_Attributes_IsCouple == 1) &
(F_Counts_Dependents > 0) &
(modelyear >= 2017),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_CoupleParent]
}
attr(SLP, "output") <-
c("P_Benefits_SLP_Amount_Unabated")
attr(SLP, "input") <-
c("P_Benefits_Eligibility_SLP_RecipientByPeriod",
"F_Attributes_IsCouple",
"P_Attributes_Age",
"F_Counts_Dependents"
)
| /procedures/SLP.R | permissive | kevinxperese/TAWA_microsimulation_model | R | false | false | 1,946 | r |
SLP <- function(
Data,
Benefits_SLP_Rate_Couple,
Benefits_SLP_Rate_SoleParent,
Benefits_SLP_Rate_SingleYoung,
Benefits_SLP_Rate_Single,
Benefits_SLP_Rate_CoupleParent,
modelyear = 2015,
Periods = 1){
weeks_in_year = 52.2
if (nrow(Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(P_Attributes_Age <= 15),]) != 0) {
stop("errSLPAge")
}
Data[, P_Benefits_SLP_Amount_Unabated := 0]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_Single]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(P_Attributes_Age > 16) &
(P_Attributes_Age < 18),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_SingleYoung]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(F_Counts_Dependents > 0),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_SoleParent]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(F_Attributes_IsCouple == 1),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_Couple]
Data[(P_Benefits_Eligibility_SLP_RecipientByPeriod == 1) &
(F_Attributes_IsCouple == 1) &
(F_Counts_Dependents > 0) &
(modelyear >= 2017),
P_Benefits_SLP_Amount_Unabated := weeks_in_year *
Benefits_SLP_Rate_CoupleParent]
}
attr(SLP, "output") <-
c("P_Benefits_SLP_Amount_Unabated")
attr(SLP, "input") <-
c("P_Benefits_Eligibility_SLP_RecipientByPeriod",
"F_Attributes_IsCouple",
"P_Attributes_Age",
"F_Counts_Dependents"
)
|
# Copyright 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Plot daily streamflow data with their symbols
#'
#' @description Plots data symbols for a daily data set. A column of symbols is required, default \code{symbols = 'Symbol'}.
#' For HYDAT data, symbols include: 'E' Estimate, 'A' Partial Day, 'B' Ice Conditions, 'D' Dry, and 'R' Revised.
#' Other symbols or categories may be used to colour points of plot.
#' Returns a list of plots.
#'
#' @inheritParams plot_flow_data
#' @param symbols Name of column in \code{data} that contains symbols. Only required if symbols column name is not
#' 'Symbol' (default). Leave blank or set to \code{NULL} if using \code{station_number} argument.
#'
#' @return A list of ggplot2 objects with the following for each station provided:
#' \item{Flow_Data_Symbols}{a plot that contains the flow data with symbol categories}
#'
#' @examples
#' # Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
#' if (file.exists(tidyhydat::hy_downloaded_db())) {
#'
#' # Plot data and symbols from a data frame and data argument
#' flow_data <- tidyhydat::hy_daily_flows(station_number = "08NM116")
#' plot_flow_data_symbols(data = flow_data)
#'
#' # Plot data and symbols using station_number argument with defaults
#' plot_flow_data_symbols(station_number = "08NM116")
#'
#' }
#' @export
plot_flow_data_symbols <- function(data,
dates = Date,
values = Value,
groups = STATION_NUMBER,
symbols = Symbol,
station_number,
water_year_start = 1,
start_year,
end_year,
exclude_years,
months = 1:12,
start_date,
end_date,
log_discharge = FALSE,
include_title = FALSE){
## ARGUMENT CHECKS
## ---------------
if (missing(data)) {
data <- NULL
}
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
if (missing(exclude_years)) {
exclude_years <- NULL
}
if (missing(start_date)) {
start_date <- "0000-01-01"
}
if (missing(end_date)) {
end_date <- "3000-12-31"
}
water_year_checks(water_year_start)
years_checks(start_year, end_year, exclude_years = NULL)
logical_arg_check(log_discharge)
logical_arg_check(include_title)
months_checks(months)
if (start_date >= end_date) stop("start_date must be less than end_date.", call. = FALSE)
## FLOW DATA CHECKS AND FORMATTING
## -------------------------------
# Check if data is provided and import it
flow_data <- flowdata_import(data = data, station_number = station_number)
# Check and rename columns
flow_data <- format_all_cols(data = flow_data,
dates = as.character(substitute(dates)),
values = as.character(substitute(values)),
groups = as.character(substitute(groups)),
symbols = as.character(substitute(symbols)),
rm_other_cols = TRUE,
keep_symbols = TRUE)
flow_data <- analysis_prep(data = flow_data,
water_year_start = water_year_start)
# Filter for the selected year (remove excluded years after)
flow_data <- dplyr::filter(flow_data, WaterYear >= start_year & WaterYear <= end_year)
# Filter for specific dates, if selected
flow_data <- dplyr::filter(flow_data, Date >= start_date)
flow_data <- dplyr::filter(flow_data, Date <= end_date)
# Remove selected excluded years
flow_data <- dplyr::mutate(flow_data, Value = replace(Value, WaterYear %in% exclude_years, NA))
flow_data <- dplyr::mutate(flow_data, Value = replace(Value, !Month %in% months, NA))
if (anyNA(flow_data$Value))
message(paste0("Note: Did not plot ", sum(is.na(flow_data$Value)),
" missing or excluded values between ", min(flow_data$Date), " and ", max(flow_data$Date),"."))
flow_data <- dplyr::mutate(flow_data,
Symbol = ifelse(is.na(Symbol), "No Symbol", Symbol),
Symbol = factor(Symbol, levels = c("No Symbol",unique(Symbol)[which(unique(Symbol) != "No Symbol")])))
# Plot the data
sym_plots <- dplyr::group_by(flow_data, STATION_NUMBER)
sym_plots <- tidyr::nest(sym_plots)
sym_plots <- dplyr::mutate(
sym_plots,
plot = purrr::map2(
data, STATION_NUMBER,
~ggplot2::ggplot(data = ., ggplot2::aes(x = Date, y = Value)) +
ggplot2::geom_line(colour = "dodgerblue4", size = 0.2, na.rm = TRUE) +
ggplot2::geom_point(ggplot2::aes(color = Symbol), size = 1.5, na.rm = TRUE)+
ggplot2::ylab("Daily Discharge (cms)") +
{if(!log_discharge) ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(n = 8), expand = c(0, 0))} +
{if(log_discharge) ggplot2::scale_y_log10(expand = c(0, 0), breaks = scales::log_breaks(n = 8, base = 10))} +
{if(!log_discharge) ggplot2::expand_limits(y = c(0, max(.$Value) * 1.05))} +
{if(log_discharge) ggplot2::expand_limits(y = c(min(.$Value) * .95, max(.$Value) * 1.05))} +
{if (include_title & .y != "XXXXXXX") ggplot2::ggtitle(.y) } +
ggplot2::scale_color_viridis_d()+
ggplot2::theme_bw() +
ggplot2::labs(color = 'Symbol') +
ggplot2::theme(panel.border = ggplot2::element_rect(colour = "black", fill = NA, size = 1),
legend.position = "right",
legend.spacing = ggplot2::unit(0, "cm"),
legend.text = ggplot2::element_text(size = 9),
panel.grid = ggplot2::element_line(size = .2),
axis.title = ggplot2::element_text(size = 12),
axis.text = ggplot2::element_text(size = 10),
plot.title = ggplot2::element_text(hjust = 1, size = 9, colour = "grey25"))
))
# Create a list of named plots extracted from the tibble
plots <- sym_plots$plot
if (nrow(sym_plots) == 1) {
names(plots) <- "Flow_Data_Symbols"
} else {
names(plots) <- paste0(sym_plots$STATION_NUMBER, "_Flow_Data_Symbols")
}
plots
}
| /R/plot_flow_data_symbols.R | no_license | cran/fasstr | R | false | false | 7,346 | r | # Copyright 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Plot daily streamflow data with their symbols
#'
#' @description Plots data symbols for a daily data set. A column of symbols is required, default \code{symbols = 'Symbol'}.
#' For HYDAT data, symbols include: 'E' Estimate, 'A' Partial Day, 'B' Ice Conditions, 'D' Dry, and 'R' Revised.
#' Other symbols or categories may be used to colour points of plot.
#' Returns a list of plots.
#'
#' @inheritParams plot_flow_data
#' @param symbols Name of column in \code{data} that contains symbols. Only required if symbols column name is not
#' 'Symbol' (default). Leave blank or set to \code{NULL} if using \code{station_number} argument.
#'
#' @return A list of ggplot2 objects with the following for each station provided:
#' \item{Flow_Data_Symbols}{a plot that contains the flow data with symbol categories}
#'
#' @examples
#' # Run if HYDAT database has been downloaded (using tidyhydat::download_hydat())
#' if (file.exists(tidyhydat::hy_downloaded_db())) {
#'
#' # Plot data and symbols from a data frame and data argument
#' flow_data <- tidyhydat::hy_daily_flows(station_number = "08NM116")
#' plot_flow_data_symbols(data = flow_data)
#'
#' # Plot data and symbols using station_number argument with defaults
#' plot_flow_data_symbols(station_number = "08NM116")
#'
#' }
#' @export
plot_flow_data_symbols <- function(data,
dates = Date,
values = Value,
groups = STATION_NUMBER,
symbols = Symbol,
station_number,
water_year_start = 1,
start_year,
end_year,
exclude_years,
months = 1:12,
start_date,
end_date,
log_discharge = FALSE,
include_title = FALSE){
## ARGUMENT CHECKS
## ---------------
if (missing(data)) {
data <- NULL
}
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
if (missing(exclude_years)) {
exclude_years <- NULL
}
if (missing(start_date)) {
start_date <- "0000-01-01"
}
if (missing(end_date)) {
end_date <- "3000-12-31"
}
water_year_checks(water_year_start)
years_checks(start_year, end_year, exclude_years = NULL)
logical_arg_check(log_discharge)
logical_arg_check(include_title)
months_checks(months)
if (start_date >= end_date) stop("start_date must be less than end_date.", call. = FALSE)
## FLOW DATA CHECKS AND FORMATTING
## -------------------------------
# Check if data is provided and import it
flow_data <- flowdata_import(data = data, station_number = station_number)
# Check and rename columns
flow_data <- format_all_cols(data = flow_data,
dates = as.character(substitute(dates)),
values = as.character(substitute(values)),
groups = as.character(substitute(groups)),
symbols = as.character(substitute(symbols)),
rm_other_cols = TRUE,
keep_symbols = TRUE)
flow_data <- analysis_prep(data = flow_data,
water_year_start = water_year_start)
# Filter for the selected year (remove excluded years after)
flow_data <- dplyr::filter(flow_data, WaterYear >= start_year & WaterYear <= end_year)
# Filter for specific dates, if selected
flow_data <- dplyr::filter(flow_data, Date >= start_date)
flow_data <- dplyr::filter(flow_data, Date <= end_date)
# Remove selected excluded years
flow_data <- dplyr::mutate(flow_data, Value = replace(Value, WaterYear %in% exclude_years, NA))
flow_data <- dplyr::mutate(flow_data, Value = replace(Value, !Month %in% months, NA))
if (anyNA(flow_data$Value))
message(paste0("Note: Did not plot ", sum(is.na(flow_data$Value)),
" missing or excluded values between ", min(flow_data$Date), " and ", max(flow_data$Date),"."))
flow_data <- dplyr::mutate(flow_data,
Symbol = ifelse(is.na(Symbol), "No Symbol", Symbol),
Symbol = factor(Symbol, levels = c("No Symbol",unique(Symbol)[which(unique(Symbol) != "No Symbol")])))
# Plot the data
sym_plots <- dplyr::group_by(flow_data, STATION_NUMBER)
sym_plots <- tidyr::nest(sym_plots)
sym_plots <- dplyr::mutate(
sym_plots,
plot = purrr::map2(
data, STATION_NUMBER,
~ggplot2::ggplot(data = ., ggplot2::aes(x = Date, y = Value)) +
ggplot2::geom_line(colour = "dodgerblue4", size = 0.2, na.rm = TRUE) +
ggplot2::geom_point(ggplot2::aes(color = Symbol), size = 1.5, na.rm = TRUE)+
ggplot2::ylab("Daily Discharge (cms)") +
{if(!log_discharge) ggplot2::scale_y_continuous(breaks = scales::pretty_breaks(n = 8), expand = c(0, 0))} +
{if(log_discharge) ggplot2::scale_y_log10(expand = c(0, 0), breaks = scales::log_breaks(n = 8, base = 10))} +
{if(!log_discharge) ggplot2::expand_limits(y = c(0, max(.$Value) * 1.05))} +
{if(log_discharge) ggplot2::expand_limits(y = c(min(.$Value) * .95, max(.$Value) * 1.05))} +
{if (include_title & .y != "XXXXXXX") ggplot2::ggtitle(.y) } +
ggplot2::scale_color_viridis_d()+
ggplot2::theme_bw() +
ggplot2::labs(color = 'Symbol') +
ggplot2::theme(panel.border = ggplot2::element_rect(colour = "black", fill = NA, size = 1),
legend.position = "right",
legend.spacing = ggplot2::unit(0, "cm"),
legend.text = ggplot2::element_text(size = 9),
panel.grid = ggplot2::element_line(size = .2),
axis.title = ggplot2::element_text(size = 12),
axis.text = ggplot2::element_text(size = 10),
plot.title = ggplot2::element_text(hjust = 1, size = 9, colour = "grey25"))
))
# Create a list of named plots extracted from the tibble
plots <- sym_plots$plot
if (nrow(sym_plots) == 1) {
names(plots) <- "Flow_Data_Symbols"
} else {
names(plots) <- paste0(sym_plots$STATION_NUMBER, "_Flow_Data_Symbols")
}
plots
}
|
source('antActivity.R')
ant4 <- Ant(Length=-1.0,Position=c(3.0,2.0,1.0))
ant4
| /book/packt/R.Object-oriented.Programming/6682OS_09_Codes/chapter9/chapter_9_ex4.R | no_license | xenron/sandbox-da-r | R | false | false | 85 | r | source('antActivity.R')
ant4 <- Ant(Length=-1.0,Position=c(3.0,2.0,1.0))
ant4
|
#setwd('C:/Users/Ivan.Liuyanfeng/Desktop/Data_Mining_Work_Space/FICO/Helping-Santas-Helpers')
#gc(); rm(list=ls()); source('R code/Functions.R');
require(Rcpp)
sourceCpp('Latest Stable Models/greedy algorithm/main_greedy_ver2.1.cpp')
### Segmentation Elf ###
NUM_ELVES <- 900
myelves <- create_elves(NUM_ELVES)
myelves_rate <- myelves[,'current_rating']
toys_0_class <- c(sum(a[1:30])-1, sum(a[1:60])-1, sum(a[1:90])-1, sum(a[1:120])-1)
toys_0_row <- c(0,sum(a[1:30]), sum(a[1:60]), sum(a[1:90]))
### main loop ###
submissions <- solution_Elf(toys_0,toys_1,toys_2,toys_3,toys_4,toys_5,toys_6,toys_7,toys_8,
toys_9,toys_10,toys_11,toys_12,toys_13,toys_14,toys_15,toys_16,
toys_17,myelves,myelves_rate, toys_0_class, toys_0_row)
(submissions[which.max(submissions[,3]),3]+submissions[which.max(submissions[,3]), 4])*log(901)
length(table(submissions[,1]));length(table(submissions[,2]))
submissions_output <- data.frame(ToyId = as.integer(submissions[,1]),
ElfId = as.integer(submissions[,2]),
StartTime = convert_to_chardate(submissions[,3]),
Duration = as.integer(submissions[,4]), stringsAsFactors = FALSE)
write.csv(submissions_output, 'toys_submission_greedy_algorithm_1_2.csv', row.names = FALSE)
# 1695631955
# 1695641820
# 1695633200
# 1695613055
# 1690830993
x_all <- list()
for (i in 1:900){
x_all[[i]] <- submissions[which(submissions[,2]==i), 1]
}
save(x_all, file='Latest Stable Models/simulated_annealing/greedy_algorithm_solution.RData')
## search toys_0 | chunk of toys_0 = train1-14
## delay = 51274116 | 3% | 1644365377
| /Latest Stable Models/greedy algorithm/old2/1/Greedy_algorithm_ver2.1.R | no_license | ivanliu1989/Helping-Santas-Helpers | R | false | false | 1,706 | r | #setwd('C:/Users/Ivan.Liuyanfeng/Desktop/Data_Mining_Work_Space/FICO/Helping-Santas-Helpers')
#gc(); rm(list=ls()); source('R code/Functions.R');
require(Rcpp)
sourceCpp('Latest Stable Models/greedy algorithm/main_greedy_ver2.1.cpp')
### Segmentation Elf ###
NUM_ELVES <- 900
myelves <- create_elves(NUM_ELVES)
myelves_rate <- myelves[,'current_rating']
toys_0_class <- c(sum(a[1:30])-1, sum(a[1:60])-1, sum(a[1:90])-1, sum(a[1:120])-1)
toys_0_row <- c(0,sum(a[1:30]), sum(a[1:60]), sum(a[1:90]))
### main loop ###
submissions <- solution_Elf(toys_0,toys_1,toys_2,toys_3,toys_4,toys_5,toys_6,toys_7,toys_8,
toys_9,toys_10,toys_11,toys_12,toys_13,toys_14,toys_15,toys_16,
toys_17,myelves,myelves_rate, toys_0_class, toys_0_row)
(submissions[which.max(submissions[,3]),3]+submissions[which.max(submissions[,3]), 4])*log(901)
length(table(submissions[,1]));length(table(submissions[,2]))
submissions_output <- data.frame(ToyId = as.integer(submissions[,1]),
ElfId = as.integer(submissions[,2]),
StartTime = convert_to_chardate(submissions[,3]),
Duration = as.integer(submissions[,4]), stringsAsFactors = FALSE)
write.csv(submissions_output, 'toys_submission_greedy_algorithm_1_2.csv', row.names = FALSE)
# 1695631955
# 1695641820
# 1695633200
# 1695613055
# 1690830993
x_all <- list()
for (i in 1:900){
x_all[[i]] <- submissions[which(submissions[,2]==i), 1]
}
save(x_all, file='Latest Stable Models/simulated_annealing/greedy_algorithm_solution.RData')
## search toys_0 | chunk of toys_0 = train1-14
## delay = 51274116 | 3% | 1644365377
|
library(GenomicRanges)
### Description: Average together the total excision repair from BOTH strands ###
## Strategy:
# Step 1: Load data sets containing repair signal in chromatin states for IMR90 (both plus and minus strand repair datasets)
# Step 2: Prepare forloop script to average repair rates from both strands.
out<-"output_directory/"
######
## 64PP repair time points
path<-"path_to_merged_replicate_XRseq_files/"
PatternList<-c("5min","20min","1h","2h","4h")
for (i in 1:5) {
load(file = paste0(path, "XR64PP_", PatternList[i], "_R1_R2_PLUS_core15.RData"))
Pooled_plus_Chrom15<-as.data.frame(Pooled_plus_Chrom15)
load(file = paste0(path, "XR64PP_", PatternList[i], "_R1_R2_MINUS_core15.RData"))
Pooled_minus_Chrom15<-as.data.frame(Pooled_minus_Chrom15)
Mean_strand_repair<-Pooled_plus_Chrom15[,1:6]
Mean_strand_repair$XR64_plus_mean<-Pooled_plus_Chrom15$XR64_plus_mean
Mean_strand_repair$XR64_minus_mean<-Pooled_minus_Chrom15$XR64_minus_mean
Mean_strand_repair$XR64_strand_mean<-rowMeans(Mean_strand_repair[,7:8])
colnames(Mean_strand_repair)[9]<-paste0('XR64_strand_mean_', PatternList[i])
Mean_strand_repair<-makeGRangesFromDataFrame(Mean_strand_repair, keep.extra.columns = T)
save(Mean_strand_repair,file = paste0(out, "XR64PP_Strand_mean_", PatternList[i], "_core15.RData"))
}
######
## CPD repair
path<-"path_to_merged_replicate_XRseq_files/"
PatternList<-c("1h","4h","8h","16h","1d","2d")
for (i in 1:6) {
load(file = paste0(path, "XRCPD_", PatternList[i], "_R1_R2_PLUS_core15.RData"))
Pooled_plus_Chrom15<-as.data.frame(Pooled_plus_Chrom15)
load(file = paste0(path, "XRCPD_", PatternList[i], "_R1_R2_MINUS_core15.RData"))
Pooled_minus_Chrom15<-as.data.frame(Pooled_minus_Chrom15)
Mean_strand_repair<-Pooled_plus_Chrom15[,1:6]
Mean_strand_repair$XRCPD_plus_mean<-Pooled_plus_Chrom15$XRCPD_plus_mean
Mean_strand_repair$XRCPD_minus_mean<-Pooled_minus_Chrom15$XRCPD_minus_mean
Mean_strand_repair$XRCPD_strand_mean<-rowMeans(Mean_strand_repair[,7:8])
colnames(Mean_strand_repair)[9]<-paste0('XRCPD_strand_mean_', PatternList[i])
Mean_strand_repair<-makeGRangesFromDataFrame(Mean_strand_repair, keep.extra.columns = T)
save(Mean_strand_repair,file = paste0(out, "XRCPD_Strand_mean_", PatternList[i], "_core15.RData"))
}
### END ###
| /Nucleotide excision repair scripts/Chromatin repair analysis/2 Average repair signal from both Plus & Minus strands.R | no_license | BrianP-13/Genomic_Profiles_of_UV_Induced_Lesion_Susceptibilities_and_Associations_with_Melanoma_Mutations | R | false | false | 2,384 | r | library(GenomicRanges)
### Description: Average together the total excision repair from BOTH strands ###
## Strategy:
# Step 1: Load data sets containing repair signal in chromatin states for IMR90 (both plus and minus strand repair datasets)
# Step 2: Prepare forloop script to average repair rates from both strands.
out<-"output_directory/"
######
## 64PP repair time points
path<-"path_to_merged_replicate_XRseq_files/"
PatternList<-c("5min","20min","1h","2h","4h")
for (i in 1:5) {
load(file = paste0(path, "XR64PP_", PatternList[i], "_R1_R2_PLUS_core15.RData"))
Pooled_plus_Chrom15<-as.data.frame(Pooled_plus_Chrom15)
load(file = paste0(path, "XR64PP_", PatternList[i], "_R1_R2_MINUS_core15.RData"))
Pooled_minus_Chrom15<-as.data.frame(Pooled_minus_Chrom15)
Mean_strand_repair<-Pooled_plus_Chrom15[,1:6]
Mean_strand_repair$XR64_plus_mean<-Pooled_plus_Chrom15$XR64_plus_mean
Mean_strand_repair$XR64_minus_mean<-Pooled_minus_Chrom15$XR64_minus_mean
Mean_strand_repair$XR64_strand_mean<-rowMeans(Mean_strand_repair[,7:8])
colnames(Mean_strand_repair)[9]<-paste0('XR64_strand_mean_', PatternList[i])
Mean_strand_repair<-makeGRangesFromDataFrame(Mean_strand_repair, keep.extra.columns = T)
save(Mean_strand_repair,file = paste0(out, "XR64PP_Strand_mean_", PatternList[i], "_core15.RData"))
}
######
## CPD repair
path<-"path_to_merged_replicate_XRseq_files/"
PatternList<-c("1h","4h","8h","16h","1d","2d")
for (i in 1:6) {
load(file = paste0(path, "XRCPD_", PatternList[i], "_R1_R2_PLUS_core15.RData"))
Pooled_plus_Chrom15<-as.data.frame(Pooled_plus_Chrom15)
load(file = paste0(path, "XRCPD_", PatternList[i], "_R1_R2_MINUS_core15.RData"))
Pooled_minus_Chrom15<-as.data.frame(Pooled_minus_Chrom15)
Mean_strand_repair<-Pooled_plus_Chrom15[,1:6]
Mean_strand_repair$XRCPD_plus_mean<-Pooled_plus_Chrom15$XRCPD_plus_mean
Mean_strand_repair$XRCPD_minus_mean<-Pooled_minus_Chrom15$XRCPD_minus_mean
Mean_strand_repair$XRCPD_strand_mean<-rowMeans(Mean_strand_repair[,7:8])
colnames(Mean_strand_repair)[9]<-paste0('XRCPD_strand_mean_', PatternList[i])
Mean_strand_repair<-makeGRangesFromDataFrame(Mean_strand_repair, keep.extra.columns = T)
save(Mean_strand_repair,file = paste0(out, "XRCPD_Strand_mean_", PatternList[i], "_core15.RData"))
}
### END ###
|
# Modified 15 May: rearrange each matrix so that the focal plot is the first row of the resulting matrix.
# Correction 22 Aug: use workspace 2, not old workspace
# Modified 23 Oct: change to use unfuzzed coordinates
# Modified 13 Dec: change input data to entire USA.
radii <- c(5,10,20,50,75,100,150,200,300) * 1000
task <- as.numeric(Sys.getenv('PBS_ARRAYID'))
r <- radii[task]
load('/mnt/research/nasabio/data/fia/fiaworkspace_nospatial_wholeusa.r')
plotmetadata <- read.csv('/mnt/research/nasabio/data/fia/fianocoords_wholeusa.csv', stringsAsFactors = FALSE)
library(dplyr)
area_by_sp <- function(dat, sppids) {
areas <- numeric(length(sppids))
for (i in 1:length(sppids)) {
areas[i] <- sum(dat$basalarea[dat$SPCD == sppids[i]])
}
areas
}
all_mats <- list()
for (p in 1:length(fianhb_r)) {
if (class(fianhb_r[[p]]) == 'data.frame') {
if (any(fianhb_r[[p]]$dist <= r)) {
# Subset out the data frame with the nearest neighbors
neighbs <- subset(fianhb_r[[p]], dist <= r)
# Subset out the data frame with the nearest neighbors
plotcns <- plotmetadata[c(p, neighbs$idx), ]$PLT_CN
dat_p <- subset(fiasums_plot, PLT_CN %in% plotcns)
# Convert into a site x species matrix
sppids <- sort(unique(dat_p$SPCD))
mat_p <- dat_p %>% group_by(PLT_CN) %>% do(x = area_by_sp(., sppids))
# Sort the output so that the focal plot will be the first row of the resulting matrix.
focal_idx <- which(mat_p$PLT_CN == plotmetadata$PLT_CN[p])
mat_p <- mat_p[c(focal_idx, (1:nrow(mat_p))[-focal_idx]), ]
mat_p <- do.call('rbind', mat_p$x)
sppnames <- fiataxa$binomial_forphylo[match(sppids, fiataxa$FIA.Code)]
if (inherits(mat_p, 'matrix')) {
dimnames(mat_p) <- list(1:nrow(mat_p), sppnames)
all_mats[[length(all_mats) + 1]] <- mat_p
}
else {
all_mats[[length(all_mats) + 1]] <- NA
}
}
else {
all_mats[[length(all_mats) + 1]] <- NA
}
}
else {
all_mats[[length(all_mats) + 1]] <- NA
}
if (p%%1000 == 0) print(p)
}
save(all_mats, file = paste0('/mnt/research/nasabio/data/fia/mats/usamat_',as.character(as.integer(r)),'.r'))
| /prep_diversity_files/deprecated/fia_getidx.r | no_license | qdread/nasabio | R | false | false | 2,122 | r | # Modified 15 May: rearrange each matrix so that the focal plot is the first row of the resulting matrix.
# Correction 22 Aug: use workspace 2, not old workspace
# Modified 23 Oct: change to use unfuzzed coordinates
# Modified 13 Dec: change input data to entire USA.
radii <- c(5,10,20,50,75,100,150,200,300) * 1000
task <- as.numeric(Sys.getenv('PBS_ARRAYID'))
r <- radii[task]
load('/mnt/research/nasabio/data/fia/fiaworkspace_nospatial_wholeusa.r')
plotmetadata <- read.csv('/mnt/research/nasabio/data/fia/fianocoords_wholeusa.csv', stringsAsFactors = FALSE)
library(dplyr)
area_by_sp <- function(dat, sppids) {
areas <- numeric(length(sppids))
for (i in 1:length(sppids)) {
areas[i] <- sum(dat$basalarea[dat$SPCD == sppids[i]])
}
areas
}
all_mats <- list()
for (p in 1:length(fianhb_r)) {
if (class(fianhb_r[[p]]) == 'data.frame') {
if (any(fianhb_r[[p]]$dist <= r)) {
# Subset out the data frame with the nearest neighbors
neighbs <- subset(fianhb_r[[p]], dist <= r)
# Subset out the data frame with the nearest neighbors
plotcns <- plotmetadata[c(p, neighbs$idx), ]$PLT_CN
dat_p <- subset(fiasums_plot, PLT_CN %in% plotcns)
# Convert into a site x species matrix
sppids <- sort(unique(dat_p$SPCD))
mat_p <- dat_p %>% group_by(PLT_CN) %>% do(x = area_by_sp(., sppids))
# Sort the output so that the focal plot will be the first row of the resulting matrix.
focal_idx <- which(mat_p$PLT_CN == plotmetadata$PLT_CN[p])
mat_p <- mat_p[c(focal_idx, (1:nrow(mat_p))[-focal_idx]), ]
mat_p <- do.call('rbind', mat_p$x)
sppnames <- fiataxa$binomial_forphylo[match(sppids, fiataxa$FIA.Code)]
if (inherits(mat_p, 'matrix')) {
dimnames(mat_p) <- list(1:nrow(mat_p), sppnames)
all_mats[[length(all_mats) + 1]] <- mat_p
}
else {
all_mats[[length(all_mats) + 1]] <- NA
}
}
else {
all_mats[[length(all_mats) + 1]] <- NA
}
}
else {
all_mats[[length(all_mats) + 1]] <- NA
}
if (p%%1000 == 0) print(p)
}
save(all_mats, file = paste0('/mnt/research/nasabio/data/fia/mats/usamat_',as.character(as.integer(r)),'.r'))
|
# make test patterns
library(rstrauss)
z <- rstrauss(500, 0.1, 0.03, bbox=cbind(0:1, 0:1)-.5, perfect=T, iter=1e5)
| /tests/strauss-sim.R | no_license | antiphon/Kdirectional | R | false | false | 116 | r | # make test patterns
library(rstrauss)
z <- rstrauss(500, 0.1, 0.03, bbox=cbind(0:1, 0:1)-.5, perfect=T, iter=1e5)
|
# ellipses and inverses
# demonstrate orthogonality of ellipses for A and A^{-1}
#source("c:/R/functions/car-ellipse.R")
m <- c(0,0)
A <- matrix(c(1, .5, .5, 1), 2, 2)
A2 <- 2*A
Ainv <- solve(A)
Ainv2 <- solve(A2)
## from heplot.R
label.ellipse <- function(ellipse, label, col, adj, ...){
if (cor(ellipse)[1,2] >= 0){
index <- which.max(ellipse[,2])
x <- ellipse[index, 1] + 0.5 * strwidth(label) # was: "A"
y <- ellipse[index, 2] + 0.5 *strheight("A")
if (missing(adj)) adj <- c(1, 0)
}
else {
index <- which.min(ellipse[,2])
x <- ellipse[index, 1] - 0.5 * strwidth(label) # was: "A"
y <- ellipse[index, 2] - 0.5 * strheight("A")
if (missing(adj)) adj <- c(0, 1)
}
text(x, y, label, adj=adj, xpd=TRUE, col=col, ...)
}
#library(car)
cd("c:/sasuser/datavis/manova/ellipses/fig")
#pdf(file="inverse.pdf", width=7, height=7)
png(file="inverse.png", width=7, height=7, res=200, units="in")
op <- par(mar=c(3, 3, 1, 1) + 0.1)
E11 <- ellipse(m, A, radius=1, add=FALSE, asp=1, xlim=c(-2,2), ylim=c(-2,2), fill=TRUE)
E12 <- ellipse(m, A2, radius=1, fill=TRUE, fill.alpha=0.2)
r <- 1.4
lines(matrix(c(-r, r, -r, r), 2, 2), col="black")
lines(matrix(c(-r, r, r, -r), 2, 2), col="black")
E21 <- ellipse(m, Ainv, radius=1, col="blue")
E22 <- ellipse(m, Ainv2, radius=1, col="blue", fill=TRUE, fill.alpha=0.2)
#E23 <- ellipse(m, Ainv, radius=0.5, col="blue")
label.ellipse(E11, "C", "red", cex=1.3)
label.ellipse(E12, "2C", "red", cex=1.5)
label.ellipse(E21, expression(C^-1), "blue", cex=1.5, adj=c(-1,0.5))
label.ellipse(E22, expression((2 * C)^-1), "blue", cex=1.3, adj=c(-0.5,0.5))
par(op)
dev.off()
| /R/inverse.R | no_license | jcmunozmora/ellipses_paper | R | false | false | 1,685 | r | # ellipses and inverses
# demonstrate orthogonality of ellipses for A and A^{-1}
#source("c:/R/functions/car-ellipse.R")
m <- c(0,0)
A <- matrix(c(1, .5, .5, 1), 2, 2)
A2 <- 2*A
Ainv <- solve(A)
Ainv2 <- solve(A2)
## from heplot.R
label.ellipse <- function(ellipse, label, col, adj, ...){
if (cor(ellipse)[1,2] >= 0){
index <- which.max(ellipse[,2])
x <- ellipse[index, 1] + 0.5 * strwidth(label) # was: "A"
y <- ellipse[index, 2] + 0.5 *strheight("A")
if (missing(adj)) adj <- c(1, 0)
}
else {
index <- which.min(ellipse[,2])
x <- ellipse[index, 1] - 0.5 * strwidth(label) # was: "A"
y <- ellipse[index, 2] - 0.5 * strheight("A")
if (missing(adj)) adj <- c(0, 1)
}
text(x, y, label, adj=adj, xpd=TRUE, col=col, ...)
}
#library(car)
cd("c:/sasuser/datavis/manova/ellipses/fig")
#pdf(file="inverse.pdf", width=7, height=7)
png(file="inverse.png", width=7, height=7, res=200, units="in")
op <- par(mar=c(3, 3, 1, 1) + 0.1)
E11 <- ellipse(m, A, radius=1, add=FALSE, asp=1, xlim=c(-2,2), ylim=c(-2,2), fill=TRUE)
E12 <- ellipse(m, A2, radius=1, fill=TRUE, fill.alpha=0.2)
r <- 1.4
lines(matrix(c(-r, r, -r, r), 2, 2), col="black")
lines(matrix(c(-r, r, r, -r), 2, 2), col="black")
E21 <- ellipse(m, Ainv, radius=1, col="blue")
E22 <- ellipse(m, Ainv2, radius=1, col="blue", fill=TRUE, fill.alpha=0.2)
#E23 <- ellipse(m, Ainv, radius=0.5, col="blue")
label.ellipse(E11, "C", "red", cex=1.3)
label.ellipse(E12, "2C", "red", cex=1.5)
label.ellipse(E21, expression(C^-1), "blue", cex=1.5, adj=c(-1,0.5))
label.ellipse(E22, expression((2 * C)^-1), "blue", cex=1.3, adj=c(-0.5,0.5))
par(op)
dev.off()
|
| pc = 0xc003 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc006 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc007 | a = 0x02 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc008 | a = 0x04 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc009 | a = 0x08 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00a | a = 0x10 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00b | a = 0x20 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00c | a = 0x40 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00d | a = 0x80 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc00e | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110111 |
| pc = 0xc00f | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc010 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc011 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc012 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| /emulator/res/asl_acc.r | no_license | uatach/mc861-nes | R | false | false | 1,320 | r | | pc = 0xc003 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc006 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc007 | a = 0x02 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc008 | a = 0x04 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc009 | a = 0x08 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00a | a = 0x10 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00b | a = 0x20 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00c | a = 0x40 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00d | a = 0x80 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc00e | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110111 |
| pc = 0xc00f | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc010 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc011 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc012 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
|
#' @export
print.lmperm <- function(x,...){
print(x$table,...)
} | /R/print.lmperm.R | no_license | cran/permuco | R | false | false | 69 | r | #' @export
print.lmperm <- function(x,...){
print(x$table,...)
} |
test_that("roxzgen code examples are written to cache as both individual expressions and as whole text", {
on.exit(clear_testthat_cache())
fresh_testthat_cache()
more_specs <- cache_more_specs_default()
text <- c(
"#' Comment",
"#'",
"#' Stuff",
"#' @examples",
"#' 1 + 1",
"#' f(x )",
"NULL",
"103"
)
styled <- style_text(text)
expect_equal(cache_info()$n, 6)
# 1 whole (with comments)
# 1code whole
# 1 code by expr
# 1 roxzgen whole
# 2 roxzgen individula
# total: 6
expect_true(
is_cached(as.character(styled), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("1 + 1", "f(x)"), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("1 + 1"), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("f(x)"), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("NULL"), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("103"), tidyverse_style(), more_specs = more_specs)
)
expect_false(
is_cached(c("f(x )"), tidyverse_style(), more_specs = more_specs)
)
})
test_that("roxzgen code examples are written to cache as whole expressions bring speedgain", {
skip_on_cran()
on.exit(clear_testthat_cache())
fresh_testthat_cache()
text <- readLines(test_path("cache-with-r-cache/roxygen-cache-1.R"))
first <- system.time(styled <- style_text(text))
# don't use full cache, only roxygen cache
styled[1] <- "#' This is a nother text"
second <- system.time(style_text(styled))
expect_gt(first["elapsed"], 6 * second["elapsed"])
})
test_that("cache is deactivated at end of caching related testthat file", {
expect_false(cache_is_activated())
})
# consider dropping transformer text from cache key to speed up.
| /tests/testthat/test-cache-interaction-roxygen-code-examples.R | no_license | oranwutang/styler | R | false | false | 1,834 | r | test_that("roxzgen code examples are written to cache as both individual expressions and as whole text", {
on.exit(clear_testthat_cache())
fresh_testthat_cache()
more_specs <- cache_more_specs_default()
text <- c(
"#' Comment",
"#'",
"#' Stuff",
"#' @examples",
"#' 1 + 1",
"#' f(x )",
"NULL",
"103"
)
styled <- style_text(text)
expect_equal(cache_info()$n, 6)
# 1 whole (with comments)
# 1code whole
# 1 code by expr
# 1 roxzgen whole
# 2 roxzgen individula
# total: 6
expect_true(
is_cached(as.character(styled), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("1 + 1", "f(x)"), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("1 + 1"), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("f(x)"), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("NULL"), tidyverse_style(), more_specs = more_specs)
)
expect_true(
is_cached(c("103"), tidyverse_style(), more_specs = more_specs)
)
expect_false(
is_cached(c("f(x )"), tidyverse_style(), more_specs = more_specs)
)
})
test_that("roxzgen code examples are written to cache as whole expressions bring speedgain", {
skip_on_cran()
on.exit(clear_testthat_cache())
fresh_testthat_cache()
text <- readLines(test_path("cache-with-r-cache/roxygen-cache-1.R"))
first <- system.time(styled <- style_text(text))
# don't use full cache, only roxygen cache
styled[1] <- "#' This is a nother text"
second <- system.time(style_text(styled))
expect_gt(first["elapsed"], 6 * second["elapsed"])
})
test_that("cache is deactivated at end of caching related testthat file", {
expect_false(cache_is_activated())
})
# consider dropping transformer text from cache key to speed up.
|
################################################################################################################################################
## Author: Noshad Hosseini ##
## Date: 06-12-2019 ##
## Description: We are going to first plot the pca of our tile-coverage data, then we will find it's principle components, using linear ##
## Algebra to remove those from data (removing the variation, which is caused by the noise, it is a denoising thing) ##
################################################################################################################################################
#PREPRATION-------------------------------------------------------------------------------------------------------------------------------------
#Libraries:
library(GenomicRanges)
library(cnvex) #this is a local library, might not work on other machines
library(tidyverse)
library(rlist)
library(gplots)
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
library(mclust)
library(gridExtra)
library(corrr)
library(plotly)
library(rlang)
library(limma) #bioconductor lib
library(gridExtra)
library(ggfortify)
set.seed(1024)
#Data load: #we will use data loaded in tiles_Correlation_Analysis
base::load('/home/noshadh/Codes/Germline_variation_detection/K-means_Multimodal_fitting_data_gcNormlized.RData')
#TILES_PCA PLOT---------------------------------------------------------------------------------------------------------------------------------
#PLOTING TILES PCA USING GC-NORMALIZED COVERAGE DATA FOR NORMAL PATIENTS
autoplot(prcomp(t(sex_removed_tile_cov_gc_blacklist_newMask)), loadings = FALSE)+theme_minimal()
#analysi the PC information
pca_data = prcomp(t(tile_cov_gc_normalized))
pc_variance = as.data.frame((pca_data$sdev)^2/sum((pca_data$sdev)^2))
pc_cummulative_variation = cumsum(pc_variance)
pc_variance = pc_variance %>% mutate(cumsum = pc_cummulative_variation$pc_variance)
colnames(pc_variance) = c("variance", "cum_variance")
rm(pc_cummulative_variation)
#plot these values:
pc_variance %>% ggplot()+
geom_point(aes(y = variance, x = 1:dim(pc_variance)[1]))+theme_minimal()
#By looking at these values, I decided to remove three of PC's
num_PC = 6
#REMOVING THE COMPONENTS FROM NORMAL DATA-------------------------------------------------------------------------------------------------------
#first method: find eigen vslues, rotate data to that space, make the value zero, rotate back
#eigen_vectors = eigen(cov(tile_cov_gc_normalized))$vectors
eigen_vectors = prcomp((sex_removed_tile_cov_gc_blacklist_newMask), center = TRUE)$rotation
rotated_tile_cov_gc_normalized = as.matrix((sex_removed_tile_cov_gc_blacklist_newMask)) %*% eigen_vectors
rotated_tile_cov_gc_normalized[, 1:num_PC] = 0
purified_tile_cov_gc_normalized = rotated_tile_cov_gc_normalized %*% t(eigen_vectors) # transpose of orthogonal matrix = inverse
#(OPTIONAL) Analysing the effect of principle reduction
sum(purified_tile_cov_gc_normalized)
#analysing column changes from pc1:3 removed compare to pc1:4 removed (expect to be pseudo uniformily)
col_sum3 = as.data.frame(purified_tile_cov_gc_normalized) %>% summarise_all(funs(sum)) #sum of columns #change num_pc to 4 and try again for col_sum3
col_changes34 = abs(col_sum3) - abs(col_sum4) #sum of changes from removing pc4 from pc3removed case
ggplot() + geom_point(aes(x = 1:dim(col_changes34)[2],y = t(col_changes34)))+theme_minimal()
#analysing row changes from pc1:3 removed compare to pc1:4 removed (expect to be non-uniformly)
row_sum3 = rowSums(x = purified_tile_cov_gc_normalized, dims = 1)
row_changes34 = abs(row_sum2) - abs(row_sum3)
ggplot() + geom_point(aes(x = 1:length(row_changes34),y = (row_changes34)))+theme_minimal()+ggtitle("2 to 3")
#QC: turn the space into pc subspace and plot
QC_matrix = as.matrix(t(sex_removed_tile_cov_gc_blacklist)) %*% eigen_vectors #rotate it to the pca components
ggplot()+geom_point(aes(x = QC_matrix[,1], y = QC_matrix[,5]))
#second method: map unto the subplane spaned by those PC's, map each vector onto that subplane, remove the values from the vector
####SVD----------------------------------------------------------------------------------------------------------------------------------------
#remove sex chromosomes:
sex_removed_tile_cov_gc = tile_cov_gc_normalized[1:287509,]
sex_removed_tile_cov_gc = sex_removed_tile_cov_gc %>% mutate(blacklist = blacklist$blacklist[1:287509])
sex_removed_tile_cov_gc = sex_removed_tile_cov_gc %>% mutate(tile = row_number()) #to keep track of what tiles will remain
sex_removed_tile_cov_gc_blacklist = sex_removed_tile_cov_gc %>% filter(blacklist == 0)
sex_removed_tile_cov_gc_blacklist = sex_removed_tile_cov_gc_blacklist %>% select(-blacklist)
blacklist_removed_tile_list = sex_removed_tile_cov_gc_blacklist %>% select(tile)
blacklist_removed_tile_list = blacklist_removed_tile_list %>% mutate(new_row = row_number())
sex_removed_tile_cov_gc_blacklist = sex_removed_tile_cov_gc_blacklist %>% select(-tile)
#ADDING NEW BLACKLIST MASK (THESE ARE THE REGIONS SELECTED BY VARIANCE ANALYSIS, THE REGIONS PC'S EFFECT THE MOST)
#sex_removed_tile_cov_gc_blacklist_newMask = (sex_removed_tile_cov_gc_blacklist %>% mutate(tile = blacklist_removed_tile_list$tile)) #first add the ORIGINAL ROW's to the matrix
#now remove the tiles(rows) we want to be removed
#NEW_MASK = c(14320:14327,278577:278603,28188,96148,267422) #it has the tile numbers
#sex_removed_tile_cov_gc_blacklist_newMask = sex_removed_tile_cov_gc_blacklist_newMask %>% filter(!(tile %in% NEW_MASK))
#blacklist_removed_tile_list_newMask = sex_removed_tile_cov_gc_blacklist_newMask %>% select(tile)
#blacklist_removed_tile_list_newMask = blacklist_removed_tile_list_newMask %>% mutate(new_row = row_number())
#sex_removed_tile_cov_gc_blacklist_newMask = sex_removed_tile_cov_gc_blacklist_newMask %>% select(-tile)
#our matrix should have 110 rows (we want each point to be a patient and not a tile)
svd = svd(sex_removed_tile_cov_gc_blacklist_newMask_227)
#scree plot
as.data.frame(svd$d) %>% ggplot()+
geom_point(aes(y = svd$d, x = 1:length(svd$d)))+theme_minimal()+geom_line(aes(y = svd$d, x = 1:length(svd$d)))
#we choose the number of sc to be deleted
sc_num = 8
svd$d[1:sc_num] = 0
svd$d = diag(svd$d)
purified_tile_cov_gc_normalized_227 = svd$u %*% tcrossprod(svd$d,svd$v)
#look at the distribution of data before and after normalization
plotDist = function(data, sample,start = 1,end = 308837){ #it should have 110 row and ...
temp = as.data.frame((data[sample,])) %>% mutate(gr = 0) #we use this to just show the selected region on plot
temp = temp %>% mutate(bl = if_else(file$tile$blacklist > 0,'blacklist','normal')) #bl is to show blacklisted regions
temp$bl[257422:257422] = 'selected' #this shows the tile we are looking at
row = as.data.frame((data[sample,start:end]))
colnames(row) = "val"
#row =row %>% filter(val < 1 & val > -1)
ggplot()+geom_point(aes(x = 1:dim(row)[1],y = row$val,color = as.factor(temp[start:end,]$bl)),size = 0.8)+theme_linedraw()
#+ylim(-10,45)
#geom_point(aes(x = 1:dim(row)[1],y = row$val),size = 0.3)+theme_minimal()+ylim(0,10)
#+geom_vline(xintercept = 287509,linetype = "dashed",size = 0.3)+
#geom_vline(xintercept = 303114,linetype = "dashed",size = 0.3)+ylim(0,100)
}
plotDistPure = function(data, sample,start = 1,end = 308837){ #it should have 110 row and ...
#temp = as.data.frame((data[sample,])) %>% mutate(gr = 0) #we use this to just show the selected region on plot
#temp = temp %>% mutate(bl = if_else(file$tile$blacklist > 0,'blacklist','normal')) #bl is to show blacklisted regions
#temp$bl[24952:24952] = 'selected' #this shows the tile we are looking at
row = as.data.frame((data[sample,start:end]))
colnames(row) = "val"
#row =row %>% filter(val < 1 & val > -1)
ggplot()+geom_point(aes(x = 1:dim(row)[1],y = row$val),size = 0.8)+theme_linedraw()+ylim(-10,45)
#geom_point(aes(x = 1:dim(row)[1],y = row$val),size = 0.3)+theme_minimal()+ylim(0,10)
#+geom_vline(xintercept = 287509,linetype = "dashed",size = 0.3)+
#geom_vline(xintercept = 303114,linetype = "dashed",size = 0.3)+ylim(0,100)
}
plotDist(t(tile_cov_gc_normalized),65,1,12341)
plotDistPure(t(purified_tile_cov_gc_normalized),sample,1,31804)
#LOOK AT ONE CHROMOSOME LIKE THE THING WE HAVE ABOVE
#chr1 long arm is from 12509 to 24896
#chr21: short arm is from tile 277757 to 278857
#how to find arms of chromosome tiles:
#temp = (as.data.frame(file$tile)) %>% mutate(tile = row_number())
#(temp %>% group_by(arm) %>% slice(1))[34:37,]
#analysis commands for a sample tile (14324 here)
#variant_tiles_bl_8_9
#file$tile[14324]
#temp = (as.data.frame(file$tile)) %>% mutate(tile = row_number())
#(temp %>% group_by(arm) %>% slice(1))[35:38,]
#blacklist_removed_tile_list_newMask %>% filter(tile >= 120430) %>% slice(1)
#blacklist_removed_tile_list_newMask %>% filter(new_row == 120430)
| /Principle_Component_Analysis.R | no_license | NoshadHo/Germline_variation_detection | R | false | false | 9,778 | r | ################################################################################################################################################
## Author: Noshad Hosseini ##
## Date: 06-12-2019 ##
## Description: We are going to first plot the pca of our tile-coverage data, then we will find it's principle components, using linear ##
## Algebra to remove those from data (removing the variation, which is caused by the noise, it is a denoising thing) ##
################################################################################################################################################
#PREPRATION-------------------------------------------------------------------------------------------------------------------------------------
#Libraries:
library(GenomicRanges)
library(cnvex) #this is a local library, might not work on other machines
library(tidyverse)
library(rlist)
library(gplots)
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
library(mclust)
library(gridExtra)
library(corrr)
library(plotly)
library(rlang)
library(limma) #bioconductor lib
library(gridExtra)
library(ggfortify)
set.seed(1024)
#Data load: #we will use data loaded in tiles_Correlation_Analysis
base::load('/home/noshadh/Codes/Germline_variation_detection/K-means_Multimodal_fitting_data_gcNormlized.RData')
#TILES_PCA PLOT---------------------------------------------------------------------------------------------------------------------------------
#PLOTING TILES PCA USING GC-NORMALIZED COVERAGE DATA FOR NORMAL PATIENTS
autoplot(prcomp(t(sex_removed_tile_cov_gc_blacklist_newMask)), loadings = FALSE)+theme_minimal()
#analysi the PC information
pca_data = prcomp(t(tile_cov_gc_normalized))
pc_variance = as.data.frame((pca_data$sdev)^2/sum((pca_data$sdev)^2))
pc_cummulative_variation = cumsum(pc_variance)
pc_variance = pc_variance %>% mutate(cumsum = pc_cummulative_variation$pc_variance)
colnames(pc_variance) = c("variance", "cum_variance")
rm(pc_cummulative_variation)
#plot these values:
pc_variance %>% ggplot()+
geom_point(aes(y = variance, x = 1:dim(pc_variance)[1]))+theme_minimal()
#By looking at these values, I decided to remove three of PC's
num_PC = 6
#REMOVING THE COMPONENTS FROM NORMAL DATA-------------------------------------------------------------------------------------------------------
#first method: find eigen vslues, rotate data to that space, make the value zero, rotate back
#eigen_vectors = eigen(cov(tile_cov_gc_normalized))$vectors
eigen_vectors = prcomp((sex_removed_tile_cov_gc_blacklist_newMask), center = TRUE)$rotation
rotated_tile_cov_gc_normalized = as.matrix((sex_removed_tile_cov_gc_blacklist_newMask)) %*% eigen_vectors
rotated_tile_cov_gc_normalized[, 1:num_PC] = 0
purified_tile_cov_gc_normalized = rotated_tile_cov_gc_normalized %*% t(eigen_vectors) # transpose of orthogonal matrix = inverse
#(OPTIONAL) Analysing the effect of principle reduction
sum(purified_tile_cov_gc_normalized)
#analysing column changes from pc1:3 removed compare to pc1:4 removed (expect to be pseudo uniformily)
col_sum3 = as.data.frame(purified_tile_cov_gc_normalized) %>% summarise_all(funs(sum)) #sum of columns #change num_pc to 4 and try again for col_sum3
col_changes34 = abs(col_sum3) - abs(col_sum4) #sum of changes from removing pc4 from pc3removed case
ggplot() + geom_point(aes(x = 1:dim(col_changes34)[2],y = t(col_changes34)))+theme_minimal()
#analysing row changes from pc1:3 removed compare to pc1:4 removed (expect to be non-uniformly)
row_sum3 = rowSums(x = purified_tile_cov_gc_normalized, dims = 1)
row_changes34 = abs(row_sum2) - abs(row_sum3)
ggplot() + geom_point(aes(x = 1:length(row_changes34),y = (row_changes34)))+theme_minimal()+ggtitle("2 to 3")
#QC: turn the space into pc subspace and plot
QC_matrix = as.matrix(t(sex_removed_tile_cov_gc_blacklist)) %*% eigen_vectors #rotate it to the pca components
ggplot()+geom_point(aes(x = QC_matrix[,1], y = QC_matrix[,5]))
#second method: map unto the subplane spaned by those PC's, map each vector onto that subplane, remove the values from the vector
####SVD----------------------------------------------------------------------------------------------------------------------------------------
#remove sex chromosomes:
sex_removed_tile_cov_gc = tile_cov_gc_normalized[1:287509,]
sex_removed_tile_cov_gc = sex_removed_tile_cov_gc %>% mutate(blacklist = blacklist$blacklist[1:287509])
sex_removed_tile_cov_gc = sex_removed_tile_cov_gc %>% mutate(tile = row_number()) #to keep track of what tiles will remain
sex_removed_tile_cov_gc_blacklist = sex_removed_tile_cov_gc %>% filter(blacklist == 0)
sex_removed_tile_cov_gc_blacklist = sex_removed_tile_cov_gc_blacklist %>% select(-blacklist)
blacklist_removed_tile_list = sex_removed_tile_cov_gc_blacklist %>% select(tile)
blacklist_removed_tile_list = blacklist_removed_tile_list %>% mutate(new_row = row_number())
sex_removed_tile_cov_gc_blacklist = sex_removed_tile_cov_gc_blacklist %>% select(-tile)
#ADDING NEW BLACKLIST MASK (THESE ARE THE REGIONS SELECTED BY VARIANCE ANALYSIS, THE REGIONS PC'S EFFECT THE MOST)
#sex_removed_tile_cov_gc_blacklist_newMask = (sex_removed_tile_cov_gc_blacklist %>% mutate(tile = blacklist_removed_tile_list$tile)) #first add the ORIGINAL ROW's to the matrix
#now remove the tiles(rows) we want to be removed
#NEW_MASK = c(14320:14327,278577:278603,28188,96148,267422) #it has the tile numbers
#sex_removed_tile_cov_gc_blacklist_newMask = sex_removed_tile_cov_gc_blacklist_newMask %>% filter(!(tile %in% NEW_MASK))
#blacklist_removed_tile_list_newMask = sex_removed_tile_cov_gc_blacklist_newMask %>% select(tile)
#blacklist_removed_tile_list_newMask = blacklist_removed_tile_list_newMask %>% mutate(new_row = row_number())
#sex_removed_tile_cov_gc_blacklist_newMask = sex_removed_tile_cov_gc_blacklist_newMask %>% select(-tile)
#our matrix should have 110 rows (we want each point to be a patient and not a tile)
svd = svd(sex_removed_tile_cov_gc_blacklist_newMask_227)
#scree plot
as.data.frame(svd$d) %>% ggplot()+
geom_point(aes(y = svd$d, x = 1:length(svd$d)))+theme_minimal()+geom_line(aes(y = svd$d, x = 1:length(svd$d)))
#we choose the number of sc to be deleted
sc_num = 8
svd$d[1:sc_num] = 0
svd$d = diag(svd$d)
purified_tile_cov_gc_normalized_227 = svd$u %*% tcrossprod(svd$d,svd$v)
#look at the distribution of data before and after normalization
plotDist = function(data, sample,start = 1,end = 308837){ #it should have 110 row and ...
temp = as.data.frame((data[sample,])) %>% mutate(gr = 0) #we use this to just show the selected region on plot
temp = temp %>% mutate(bl = if_else(file$tile$blacklist > 0,'blacklist','normal')) #bl is to show blacklisted regions
temp$bl[257422:257422] = 'selected' #this shows the tile we are looking at
row = as.data.frame((data[sample,start:end]))
colnames(row) = "val"
#row =row %>% filter(val < 1 & val > -1)
ggplot()+geom_point(aes(x = 1:dim(row)[1],y = row$val,color = as.factor(temp[start:end,]$bl)),size = 0.8)+theme_linedraw()
#+ylim(-10,45)
#geom_point(aes(x = 1:dim(row)[1],y = row$val),size = 0.3)+theme_minimal()+ylim(0,10)
#+geom_vline(xintercept = 287509,linetype = "dashed",size = 0.3)+
#geom_vline(xintercept = 303114,linetype = "dashed",size = 0.3)+ylim(0,100)
}
plotDistPure = function(data, sample,start = 1,end = 308837){ #it should have 110 row and ...
#temp = as.data.frame((data[sample,])) %>% mutate(gr = 0) #we use this to just show the selected region on plot
#temp = temp %>% mutate(bl = if_else(file$tile$blacklist > 0,'blacklist','normal')) #bl is to show blacklisted regions
#temp$bl[24952:24952] = 'selected' #this shows the tile we are looking at
row = as.data.frame((data[sample,start:end]))
colnames(row) = "val"
#row =row %>% filter(val < 1 & val > -1)
ggplot()+geom_point(aes(x = 1:dim(row)[1],y = row$val),size = 0.8)+theme_linedraw()+ylim(-10,45)
#geom_point(aes(x = 1:dim(row)[1],y = row$val),size = 0.3)+theme_minimal()+ylim(0,10)
#+geom_vline(xintercept = 287509,linetype = "dashed",size = 0.3)+
#geom_vline(xintercept = 303114,linetype = "dashed",size = 0.3)+ylim(0,100)
}
plotDist(t(tile_cov_gc_normalized),65,1,12341)
plotDistPure(t(purified_tile_cov_gc_normalized),sample,1,31804)
#LOOK AT ONE CHROMOSOME LIKE THE THING WE HAVE ABOVE
#chr1 long arm is from 12509 to 24896
#chr21: short arm is from tile 277757 to 278857
#how to find arms of chromosome tiles:
#temp = (as.data.frame(file$tile)) %>% mutate(tile = row_number())
#(temp %>% group_by(arm) %>% slice(1))[34:37,]
#analysis commands for a sample tile (14324 here)
#variant_tiles_bl_8_9
#file$tile[14324]
#temp = (as.data.frame(file$tile)) %>% mutate(tile = row_number())
#(temp %>% group_by(arm) %>% slice(1))[35:38,]
#blacklist_removed_tile_list_newMask %>% filter(tile >= 120430) %>% slice(1)
#blacklist_removed_tile_list_newMask %>% filter(new_row == 120430)
|
# Building a Prod-Ready, Robust Shiny Application.
#
# Each step is optional.
#
# 2. All along your project
## 2.1 Add modules
##
golem::add_module( name = "my_first_module" ) # Name of the module
golem::add_module( name = "my_other_module" ) # Name of the module
## 2.2 Add dependencies
usethis::use_package( "thinkr" ) # To call each time you need a new package
## 2.3 Add tests
usethis::use_test( "app" )
## 2.4 Add a browser button
golem::browser_button()
## 2.5 Add external files
golem::add_js_file( "script" )
golem::add_js_handler( "handlers" )
golem::add_css_file( "custom" )
# 3. Documentation
## 3.1 Vignette
usethis::use_vignette("locate")
devtools::build_vignettes()
## 3.2 Code coverage
## You'll need GitHub there
usethis::use_github()
usethis::use_travis()
usethis::use_appveyor()
# You're now set!
# go to dev/03_deploy.R
rstudioapi::navigateToFile("dev/03_deploy.R")
| /dev/02_dev.R | no_license | VincentGuyader/locate | R | false | false | 949 | r | # Building a Prod-Ready, Robust Shiny Application.
#
# Each step is optional.
#
# 2. All along your project
## 2.1 Add modules
##
golem::add_module( name = "my_first_module" ) # Name of the module
golem::add_module( name = "my_other_module" ) # Name of the module
## 2.2 Add dependencies
usethis::use_package( "thinkr" ) # To call each time you need a new package
## 2.3 Add tests
usethis::use_test( "app" )
## 2.4 Add a browser button
golem::browser_button()
## 2.5 Add external files
golem::add_js_file( "script" )
golem::add_js_handler( "handlers" )
golem::add_css_file( "custom" )
# 3. Documentation
## 3.1 Vignette
usethis::use_vignette("locate")
devtools::build_vignettes()
## 3.2 Code coverage
## You'll need GitHub there
usethis::use_github()
usethis::use_travis()
usethis::use_appveyor()
# You're now set!
# go to dev/03_deploy.R
rstudioapi::navigateToFile("dev/03_deploy.R")
|
library(lavaan)
load("/data/pt_life/ResearchProjects/LLammer/si_update/Results_mediation/Workspace/workspace_fiml.RData")
overview <- data.frame(matrix(nrow = 10, ncol = 16))
colnames(overview) <- c("311","311","312","312","411a","411a","411b","411b","411c","411c","412a","412a","412b","412b","412c","412c")
rownames(overview) <- c("chisq", "df", "pvalue", "chisq/df", "rmsea", "rmsea_lower", "rmsea_upper", "srmr", "nnfi", "cfi")
fit_check <- function(fit){
xx <- as.data.frame(fitMeasures(fit$fit))
values <- c(xx["chisq",], xx["df",], xx["pvalue",], xx["chisq",]/xx["df",], xx["rmsea",], xx["rmsea.ci.lower",], xx["rmsea.ci.upper",], xx["srmr",], xx["nnfi",], xx["cfi",])
chisq <- ""
df <- ""
if(xx["pvalue",] > 0.05){
pvalue <- "good fit"
}
else if(0.5 >= xx["pvalue",] & xx["pvalue",] >= 0.01){
pvalue <- "acceptable fit"
}
else{
pvalue <- "unacceptable fit"
}
if(xx["chisq",]/xx["df",] <= 2){
chisq_df <- "good fit"
}
else if(3 >= xx["chisq",]/xx["df",] & xx["chisq",]/xx["df",] >= 2){
chisq_df <- "acceptable fit"
}
else{
chisq_df <- "unacceptable fit"
}
if(xx["rmsea",] <= 0.05){
rmsea <- "good fit"
}
else if(0.08 >= xx["rmsea",] & xx["rmsea",] > 0.05){
rmsea <- "acceptable fit"
}
else{
rmsea <- "unacceptable fit"
}
rmsea_l <- ""
rmsea_u <- ""
if(xx["srmr",] <= 0.05){
srmr <- "good fit"
}
else if(0.1 >= xx["srmr",] & xx["srmr",] > 0.05){
srmr <- "acceptable fit"
}
else{
srmr <- "unacceptable fit"
}
if(0.97 <= xx["nnfi",] & xx["nnfi",] <= 1){
nnfi <- "good fit"
}
else if(0.95 <= xx["nnfi",] & xx["nnfi",] < 0.97){
nnfi <- "acceptable fit"
}
else{
nnfi <- "unacceptable fit"
}
if(0.97 <= xx["cfi",] & xx["cfi",] <= 1){
cfi <- "good fit"
}
else if(0.95 <= xx["cfi",] & xx["cfi",] < 0.97){
cfi <- "acceptable fit"
}
else{
cfi <- "unacceptable fit"
}
evaluations <- c(chisq, df, pvalue, chisq_df, rmsea, rmsea_l, rmsea_u, srmr, nnfi, cfi)
res <- list(values, evaluations)
return(res)
}
res <- fit_check(fimlfit311)
overview[,1] <- res[[1]]
overview[,2] <- res[[2]]
res <- fit_check(fimlfit312)
overview[,3] <- res[[1]]
overview[,4] <- res[[2]]
res <- fit_check(fimlfit411a)
overview[,5] <- res[[1]]
overview[,6] <- res[[2]]
res <- fit_check(fimlfit411b)
overview[,7] <- res[[1]]
overview[,8] <- res[[2]]
res <- fit_check(fimlfit411c)
overview[,9] <- res[[1]]
overview[,10] <- res[[2]]
res <- fit_check(fimlfit412a)
overview[,11] <- res[[1]]
overview[,12] <- res[[2]]
res <- fit_check(fimlfit412b)
overview[,13] <- res[[1]]
overview[,14] <- res[[2]]
res <- fit_check(fimlfit412c)
overview[,15] <- res[[1]]
overview[,16] <- res[[2]]
write.csv(overview, "/data/pt_life/ResearchProjects/LLammer/si_update/Results_mediation/fit_indices/overview_fiml.csv")
| /result_evaluation/fit_indices/index_overview.R | no_license | LaurenzLammer/socialisolation | R | false | false | 2,832 | r | library(lavaan)
load("/data/pt_life/ResearchProjects/LLammer/si_update/Results_mediation/Workspace/workspace_fiml.RData")
overview <- data.frame(matrix(nrow = 10, ncol = 16))
colnames(overview) <- c("311","311","312","312","411a","411a","411b","411b","411c","411c","412a","412a","412b","412b","412c","412c")
rownames(overview) <- c("chisq", "df", "pvalue", "chisq/df", "rmsea", "rmsea_lower", "rmsea_upper", "srmr", "nnfi", "cfi")
fit_check <- function(fit){
xx <- as.data.frame(fitMeasures(fit$fit))
values <- c(xx["chisq",], xx["df",], xx["pvalue",], xx["chisq",]/xx["df",], xx["rmsea",], xx["rmsea.ci.lower",], xx["rmsea.ci.upper",], xx["srmr",], xx["nnfi",], xx["cfi",])
chisq <- ""
df <- ""
if(xx["pvalue",] > 0.05){
pvalue <- "good fit"
}
else if(0.5 >= xx["pvalue",] & xx["pvalue",] >= 0.01){
pvalue <- "acceptable fit"
}
else{
pvalue <- "unacceptable fit"
}
if(xx["chisq",]/xx["df",] <= 2){
chisq_df <- "good fit"
}
else if(3 >= xx["chisq",]/xx["df",] & xx["chisq",]/xx["df",] >= 2){
chisq_df <- "acceptable fit"
}
else{
chisq_df <- "unacceptable fit"
}
if(xx["rmsea",] <= 0.05){
rmsea <- "good fit"
}
else if(0.08 >= xx["rmsea",] & xx["rmsea",] > 0.05){
rmsea <- "acceptable fit"
}
else{
rmsea <- "unacceptable fit"
}
rmsea_l <- ""
rmsea_u <- ""
if(xx["srmr",] <= 0.05){
srmr <- "good fit"
}
else if(0.1 >= xx["srmr",] & xx["srmr",] > 0.05){
srmr <- "acceptable fit"
}
else{
srmr <- "unacceptable fit"
}
if(0.97 <= xx["nnfi",] & xx["nnfi",] <= 1){
nnfi <- "good fit"
}
else if(0.95 <= xx["nnfi",] & xx["nnfi",] < 0.97){
nnfi <- "acceptable fit"
}
else{
nnfi <- "unacceptable fit"
}
if(0.97 <= xx["cfi",] & xx["cfi",] <= 1){
cfi <- "good fit"
}
else if(0.95 <= xx["cfi",] & xx["cfi",] < 0.97){
cfi <- "acceptable fit"
}
else{
cfi <- "unacceptable fit"
}
evaluations <- c(chisq, df, pvalue, chisq_df, rmsea, rmsea_l, rmsea_u, srmr, nnfi, cfi)
res <- list(values, evaluations)
return(res)
}
res <- fit_check(fimlfit311)
overview[,1] <- res[[1]]
overview[,2] <- res[[2]]
res <- fit_check(fimlfit312)
overview[,3] <- res[[1]]
overview[,4] <- res[[2]]
res <- fit_check(fimlfit411a)
overview[,5] <- res[[1]]
overview[,6] <- res[[2]]
res <- fit_check(fimlfit411b)
overview[,7] <- res[[1]]
overview[,8] <- res[[2]]
res <- fit_check(fimlfit411c)
overview[,9] <- res[[1]]
overview[,10] <- res[[2]]
res <- fit_check(fimlfit412a)
overview[,11] <- res[[1]]
overview[,12] <- res[[2]]
res <- fit_check(fimlfit412b)
overview[,13] <- res[[1]]
overview[,14] <- res[[2]]
res <- fit_check(fimlfit412c)
overview[,15] <- res[[1]]
overview[,16] <- res[[2]]
write.csv(overview, "/data/pt_life/ResearchProjects/LLammer/si_update/Results_mediation/fit_indices/overview_fiml.csv")
|
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom vegawidget %>%
#' @usage lhs \%>\% rhs
NULL
| /R/utils-pipe.R | permissive | vegawidget/virgo | R | false | false | 209 | r | #' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom vegawidget %>%
#' @usage lhs \%>\% rhs
NULL
|
# MKarp94
# Coursera - Exploratory Data Analysis - Data Science Specialization
# Proj2 - plot6.R
setwd("/Users/MKarp/datasciencecoursera/Exploratory Data Analysis/exdata-data-NEI_data")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$year <- factor(NEI$year)
NEI$type <- factor(NEI$type)
library(ggplot2)
balt.la.NEI <- NEI[(NEI$fips == "24510" | NEI$fips == "06037") & NEI$type == "ON-ROAD",]
plot <- ggplot(balt.la.NEI, aes(year, Emissions)) +
geom_point() + facet_grid(. ~ fips)
ggsave(file="plot6.png")
| /Exploratory Data Analysis/plot6.R | no_license | mkarp94/datasciencecoursera | R | false | false | 555 | r | # MKarp94
# Coursera - Exploratory Data Analysis - Data Science Specialization
# Proj2 - plot6.R
setwd("/Users/MKarp/datasciencecoursera/Exploratory Data Analysis/exdata-data-NEI_data")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$year <- factor(NEI$year)
NEI$type <- factor(NEI$type)
library(ggplot2)
balt.la.NEI <- NEI[(NEI$fips == "24510" | NEI$fips == "06037") & NEI$type == "ON-ROAD",]
plot <- ggplot(balt.la.NEI, aes(year, Emissions)) +
geom_point() + facet_grid(. ~ fips)
ggsave(file="plot6.png")
|
#data frames
x<-sample(c("small", "medium", "large"), 30, replace=TRUE)
set.seed(1000)
x<-sample(c("small", "medium", "large"), 30, replace=TRUE)
set.seed(1000)
x<-sample(c("small", "medium", "large"), 30, replace=TRUE)
x<-factor(x,levels=c("small", "medium", "large"), ordered=TRUE)
str(x)
lst <-list(
c(1,2,3),
matrix(1:9, nrow=3, ncol=3),
list(1:2, c(TRUE, FALSE), c("a","b"))
)
lst | /dataframes.R | no_license | EdinZecevic/dataanalysis | R | false | false | 395 | r | #data frames
x<-sample(c("small", "medium", "large"), 30, replace=TRUE)
set.seed(1000)
x<-sample(c("small", "medium", "large"), 30, replace=TRUE)
set.seed(1000)
x<-sample(c("small", "medium", "large"), 30, replace=TRUE)
x<-factor(x,levels=c("small", "medium", "large"), ordered=TRUE)
str(x)
lst <-list(
c(1,2,3),
matrix(1:9, nrow=3, ncol=3),
list(1:2, c(TRUE, FALSE), c("a","b"))
)
lst |
##graph tune results
#load library
library(ggplot2)
#qualified results
#load data
tune.results <- read.csv("./res/tune.results.csv", stringsAsFactors = FALSE, header = TRUE)
#create ggplot
graph <- ggplot() +
geom_line(data = tune.results, aes(x = ntree.per.trial, y = average.n.unique.elem.per.param, color = "black"), linetype = "solid", size = 1) +
geom_line(data = tune.results, aes(x = ntree.per.trial, y = average.hamming.dist.per.param, color = "blue"), linetype = "solid", size = 1) +
geom_ribbon(data = tune.results, aes(x = ntree.per.trial, ymin = average.n.unique.elem.per.param - sd.n.unique.elem.per.param, ymax = average.n.unique.elem.per.param + sd.n.unique.elem.per.param), fill = "grey") +
geom_ribbon(data = tune.results, aes(x = ntree.per.trial, ymin = average.hamming.dist.per.param - sd.hamming.dist.per.param, ymax = average.hamming.dist.per.param + sd.hamming.dist.per.param), fill = "blue") +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 345000) +
labs(x = "ntree", y = "", color = "") +
scale_color_manual(labels = c("Average Number of Pairwise Unique Elements", "Average Pairwise Hamming Distance"), values = c("black", "blue")) +
theme_classic(base_size = 18) +
theme(plot.title = element_text(hjust = 0.5, face = "bold"), legend.position = "top", axis.text = element_text(color = "black"))
graph
ggsave("./fig/tune.results.eps", plot = graph)
#refined results
#load data
tune.results.refined <- read.csv("./res/tune.results.refined.csv", header = TRUE)
graph.refined <- ggplot() +
geom_line(data = tune.results.refined, aes(x = ntree, y = mean), linetype = "solid", size = 1) +
labs(x = "ntree", y = "Estimated Out-of-bag Error Rate", color = "black") +
scale_color_manual(labels = c("Estimated Out-of-bag Error Rate"), values = c("black")) +
theme_classic(base_size = 18) +
theme(plot.title = element_text(hjust = 0.5, face = "bold"), legend.position = "top", axis.text = element_text(color = "black"))
graph.refined
ggsave("./fig/refined.tune.results.eps", plot = graph.refined)
| /plot_tune_results.R | no_license | NolanLabNYU/PEDF-a-pleiotropic | R | false | false | 2,058 | r | ##graph tune results
#load library
library(ggplot2)
#qualified results
#load data
tune.results <- read.csv("./res/tune.results.csv", stringsAsFactors = FALSE, header = TRUE)
#create ggplot
graph <- ggplot() +
geom_line(data = tune.results, aes(x = ntree.per.trial, y = average.n.unique.elem.per.param, color = "black"), linetype = "solid", size = 1) +
geom_line(data = tune.results, aes(x = ntree.per.trial, y = average.hamming.dist.per.param, color = "blue"), linetype = "solid", size = 1) +
geom_ribbon(data = tune.results, aes(x = ntree.per.trial, ymin = average.n.unique.elem.per.param - sd.n.unique.elem.per.param, ymax = average.n.unique.elem.per.param + sd.n.unique.elem.per.param), fill = "grey") +
geom_ribbon(data = tune.results, aes(x = ntree.per.trial, ymin = average.hamming.dist.per.param - sd.hamming.dist.per.param, ymax = average.hamming.dist.per.param + sd.hamming.dist.per.param), fill = "blue") +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 345000) +
labs(x = "ntree", y = "", color = "") +
scale_color_manual(labels = c("Average Number of Pairwise Unique Elements", "Average Pairwise Hamming Distance"), values = c("black", "blue")) +
theme_classic(base_size = 18) +
theme(plot.title = element_text(hjust = 0.5, face = "bold"), legend.position = "top", axis.text = element_text(color = "black"))
graph
ggsave("./fig/tune.results.eps", plot = graph)
#refined results
#load data
tune.results.refined <- read.csv("./res/tune.results.refined.csv", header = TRUE)
graph.refined <- ggplot() +
geom_line(data = tune.results.refined, aes(x = ntree, y = mean), linetype = "solid", size = 1) +
labs(x = "ntree", y = "Estimated Out-of-bag Error Rate", color = "black") +
scale_color_manual(labels = c("Estimated Out-of-bag Error Rate"), values = c("black")) +
theme_classic(base_size = 18) +
theme(plot.title = element_text(hjust = 0.5, face = "bold"), legend.position = "top", axis.text = element_text(color = "black"))
graph.refined
ggsave("./fig/refined.tune.results.eps", plot = graph.refined)
|
### This script produces the survival analysis presented in Vasconcellos et al. (2014)
### We estimate survival curves for four experimental groups using a Weibull regression model
### The objective is to estimates the curves and test for differences in survival among
### infected and uninfected adult Oncopeltus males and females
## Copyleft (or the one to blame): Carvalho, LMF (2014)
## last updated: 09/11/2014
library(survival);library(ggplot2)
source("weibull-reg_aux.R")
survdata <- data.frame(read.table("data/oncosurv.txt", header = TRUE))
OSurv <- Surv(survdata$TIME)
Osurvfit <- survfit(OSurv~GROUP, data = survdata)
mWei <- survreg(OSurv ~ as.factor(GROUP), dist = 'weibull', data = survdata)
Times <- 1:100
bands <- conf.band.weibull(mWei, .05, Times)
bigdt.surv <- data.frame(day = rep(Times, 4), rbind(bands[[1]], bands[[2]], bands[[3]], bands[[4]]),
GROUP = factor(c(rep("Uninfected females", length(Times)),
rep("Infected females", length(Times)),
rep("Uninfected males", length(Times)),
rep("Infected males", length(Times)))
))
##
pdf(file = "figs/survival.pdf")
ggplot(bigdt.surv, aes(x = day, y = Mean)) +
geom_ribbon(aes(ymin = Lwr, ymax = Upr, fill = GROUP), alpha = .2) +
scale_x_continuous("Time (days)") +
scale_y_continuous("Survival proportion") +
guides(fill = FALSE)+
geom_line(aes(colour = GROUP),size = 1)
dev.off() | /OncoLeptoModeling/oncosurvival_weibull.R | no_license | Brandon-Stark/CODE | R | false | false | 1,499 | r | ### This script produces the survival analysis presented in Vasconcellos et al. (2014)
### We estimate survival curves for four experimental groups using a Weibull regression model
### The objective is to estimates the curves and test for differences in survival among
### infected and uninfected adult Oncopeltus males and females
## Copyleft (or the one to blame): Carvalho, LMF (2014)
## last updated: 09/11/2014
library(survival);library(ggplot2)
source("weibull-reg_aux.R")
survdata <- data.frame(read.table("data/oncosurv.txt", header = TRUE))
OSurv <- Surv(survdata$TIME)
Osurvfit <- survfit(OSurv~GROUP, data = survdata)
mWei <- survreg(OSurv ~ as.factor(GROUP), dist = 'weibull', data = survdata)
Times <- 1:100
bands <- conf.band.weibull(mWei, .05, Times)
bigdt.surv <- data.frame(day = rep(Times, 4), rbind(bands[[1]], bands[[2]], bands[[3]], bands[[4]]),
GROUP = factor(c(rep("Uninfected females", length(Times)),
rep("Infected females", length(Times)),
rep("Uninfected males", length(Times)),
rep("Infected males", length(Times)))
))
##
pdf(file = "figs/survival.pdf")
ggplot(bigdt.surv, aes(x = day, y = Mean)) +
geom_ribbon(aes(ymin = Lwr, ymax = Upr, fill = GROUP), alpha = .2) +
scale_x_continuous("Time (days)") +
scale_y_continuous("Survival proportion") +
guides(fill = FALSE)+
geom_line(aes(colour = GROUP),size = 1)
dev.off() |
library(tidyverse)
library(RJDBC)
library(RPresto)
library(feather)
library(modeltime)
library(timetk)
library(janitor)
library(dbplyr)
library(tidymodels)
library(lubridate)
setwd("C:/Users/e394102/Documents/GitHub/time_series/R")
source("hive_connection.R")
df <- snaps %>%
#select(`Employee ID`) %>%
select(`TTOC Descr Current`, `Month End Date`, `Headcount`, `Hire Count`, `Term Count`, `Alt Dept Descr`, `Job Level`, `Job Discipline`,
`Job Discipline Descr Current`, `Direct Indirect`, `Job Function Descr`, `Job Function`, `Full Part Time`,
`Casual to Full/Part - Req Count`, `Transfer In - Req Count`, `Work Loc City`, `Work Loc State`) %>%
as_tibble() %>%
clean_headers()
dbDisconnect(conn)
rm(conn)
library(remotes)
remotes::install_github("business-science/modeltime.gluonts")
devtools::install_github("business-science/modeltime.gluonts")
devtools::install_github("hadley/dplyr")
library(reticulate)
reticulate::conda_version()
#reticulate::py_module_available("gluonts")
library(dplyr)
my_gluonts_env_python_path <- reticulate::conda_list() %>%
filter(name == "my_gluonts_env") %>%
pull(python)
my_gluonts_env_python_path
Sys.setenv(GLUONTS_PYTHON = my_gluonts_env_python_path)
# verify it's been set
Sys.getenv("GLUONTS_PYTHON")
#> "/Users/mdancho/Library/r-miniconda/envs/my_gluonts_env/bin/python"
######################################################################################################
#### Start Here ---------------------------------------
rms_tbl <- df %>% filter(full_part_time != "C") %>%
## LRP hiring definition
select(month_end_date, hire_count, casual_to_full_part_req_count, transfer_in_req_count) %>%
mutate(sum = rowSums(.[2:4])) %>%
group_by(month_end_date) %>%
summarise(hires = sum(sum) )
#rms_tbl <- df
### Start the forecasting process here
################
rms_tbl %>%
plot_time_series(month_end_date, hires)
### visualize
rms_tbl %>%
plot_acf_diagnostics(month_end_date, log(hires + 1), .lags = 1000)
#
fourier_periods <- c(12)
fourier_order <- 1
horizon <- 12
# Data Transformation
hires_trans_tbl <- rms_tbl %>%
# Preprocess Target---- yeo johnson - review
mutate(hires_trans = log1p(hires)) %>%
# standarization -- centering and scaling which tranformas to mean = 0 and sd = 1
mutate(hires_trans = standardize_vec(hires_trans))
## create functions to hold the standard vec values -- for later use when transforming back
standardize_vec_mean_val <- function(x) {
if (!is.numeric(x)) rlang::abort("Non-numeric data detected. 'x' must be numeric.")
{m <- round(mean(x, na.rm = T), digits = 6) }
m
}
standardize_vec_sd_val <- function(x) {
if (!is.numeric(x)) rlang::abort("Non-numeric data detected. 'x' must be numeric.")
{s <- stats::sd(x, na.rm = T) }
s
}
mean_val <- rms_tbl %>% mutate(hires = log(hires)) %>% summarise(mean = standardize_vec_mean_val(hires))
std_sd <- rms_tbl %>% mutate(hires = log(hires)) %>% summarise(sd = standardize_vec_sd_val(hires))
################################
horizon <- 12 # months
lag_period <- c(12)
rolling_periods <- c(12, 24, 36)
hires_prepared_full_tbl <- hires_trans_tbl %>%
select(-hires) %>%
# add future window
bind_rows(
future_frame(.data = ., .date_var = month_end_date, .length_out = horizon)
) %>%
# add autocorrelated Lags
# TIP when adding lags - visualize them with the pivot longer step below
tk_augment_lags(hires_trans, .lags = lag_period) %>%
# ADD FUTURE ROLLING LAGS features - use slidify for any rolling or window calculations to a datafame
tk_augment_slidify(
# use lag 12 --- connected with the horizon / monthly values
.value = hires_trans_lag12,
.f = mean, .period = rolling_periods, # creates new columns - with a rolling avgs
.align = "center",
.partial = TRUE
) %>% # when you just add the above it's best to visualize using the below option and you'll notice that the lag stop at the value of forward periods
## use if adding events or other xregs
# left_join(df_two, by = c("date_time" = "event_date")) %>%
# mutate(event_date = ifelse(is.na(event_date), 0, event_date)) %>% ## adds zero or 1 where there's an event
## tip format the column names into groups that start with the same code -- e.g. lag for lagged features which makes it easier to select them when modeling
# rename(event_type = event_date) %>%
rename_with(.cols = contains("lag"), .fn = ~ str_c("lag_", .))
###########################
#SEPARATE INTO MODELING & FORECAST DATA ----
hires_data_prepared_tbl <- hires_prepared_full_tbl %>%
# this will remove the NAs for the future years
filter(!is.na(hires_trans)) %>%
# drop any lingering NAs in any other columns
replace(., is.na(.), 0)
hires_data_prepared_tbl
summary(hires_data_prepared_tbl) # check to ensure no NAs are in the lag columns
# here we have our future lag features but the hires_trans values are missing
hires_forecast_tbl <- hires_prepared_full_tbl %>%
filter(is.na(hires_trans))
hires_forecast_tbl
# * Create Future Data ----
#hires_future_tbl <- hires_trans_tbl%>%
# future_frame(.date_var = month_end_date, .length_out = "12 months") %>%
# mutate(hires_trans = as.double(NA)) %>%
## taken from above
# tk_augment_fourier(month_end_date, .periods = fourier_periods)
#hires_future_tbl
###### Train test split ###################################
hires_data_prepared_tbl %>% mutate_if(is.numeric, list(~na_if(., Inf))) %>%
mutate_if(is.numeric, list(~na_if(., -Inf)))
# train test split
splits_hires <- hires_data_prepared_tbl %>%
time_series_split(assess = "12 months",
# initial = "1 year 1 months"
cumulative = TRUE)
# visualize
splits_hires %>%
tk_time_series_cv_plan() %>%
plot_time_series_cv_plan(month_end_date, hires_trans)
## create a training cleaned from outliers
train_cleaned <- training(splits_hires) %>%
# group_by(pagePath) %>%
# ts_clean_vec --- REMOVES OUTLIERS and replaces missing values
mutate(hires_trans = ts_clean_vec(hires_trans, period = 12)) # %>% # period = 7, working with daily series so set to weekly season
#ungroup()
###################################################
## Recipe Spec Base
hires_recipe_spec_base <- recipe(hires_trans ~ ., data = training(splits_hires)) %>%
#
# Time series signature --- adds a preprocessing step to generate the time series signature -- utilizes a date time column
step_timeseries_signature(month_end_date) %>%
# Removing some features - columns that are not important for a monthly series
step_rm(matches("(iso)|(xts)|(hour)|(minute)|(second)|(am.pm)")) %>% # regex () is used to create multi - regex search patterns
## normally with larger features we should standardize
# Standardize - those features like time of year etc...
# step normalize is equivalent to standardize vec function step range is equivalent to normalize_vec function
step_normalize(matches("(index.num)|(year)|(yday)")) %>%
## NEXT STEP One HOT ENCODING
# will focus on anthing that is a text feature
step_dummy(all_nominal(), one_hot = TRUE) %>%
## Interaction
# this will add some additional features -- takes weeks two and multiplies it by wday.lbl - you'll see this in the glimpse below
step_interact(~ matches("week2") * matches("wday.lbl") ) %>%
## Last Step --- Add the fourier series features -- takes a date time --- we can add periods
step_fourier(month_end_date, period = c(12, 24, 36), K =2) #%>%
# step_rm("month_end_date")
# Look at how your preprocessing steps are being applied
hires_recipe_spec_base %>%
# prepares the dataset - it's being trained
prep() %>%
# juice returns the training data with the prepared recipe applied
juice() %>%
glimpse()
############################################################################
# xgboost
model_spec_boost <- boost_tree(
mode = "regression",
mtry = 30,
trees = 1000,
min_n = 1,
tree_depth = 15,
learn_rate = 0.013,
loss_reduction = 0.1
) %>%
set_engine("xgboost")
model_spec_rf <- rand_forest(
mode = "regression",
mtry = 50,
trees = 1000,
min_n = 5
) %>%
set_engine("randomForest")
model_spec_nnet <- mlp(
mode = "regression",
hidden_units = 10,
penalty = 2,
epochs = 50
) %>%
set_engine("nnet")
model_spec_svm_rbf <- svm_rbf(
mode = "regression",
cost = 1,
rbf_sigma = 0.01,
margin = 0.1
) %>%
set_engine("kernlab")
model_spec_nnetar <- nnetar_reg(
non_seasonal_ar = 2,
seasonal_ar = 12, # uses seasonal period which is auto detected in the series (weekly, monthly in our case)
hidden_units = 10,
penalty = 10,
num_networks = 10,
epochs = 50
) %>%
set_engine("nnetar")
# * Workflow ----
set.seed(123)
wflow_fit_svm_poly <- workflow() %>%
add_model(model_spec_svm_rbf) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
set.seed(123)
wflw_fit_xgboost <- workflow() %>%
add_model(model_spec_boost) %>%
#add_recipe(hires_recipe_spec_base) %>%
# we have to remove the date var when using xgboost so we can update role or step_rm here
add_recipe(hires_recipe_spec_base %>% update_role(month_end_date, new_role = "indicator")) %>%
fit(training(splits_hires)) # %>% step_naomit()) ## trying to omit nas
set.seed(123)
wflw_fit_rf <- workflow() %>%
add_model(model_spec_rf) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
set.seed(123)
wflw_fit_nnet <- workflow() %>%
add_model(model_spec_nnet) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
set.seed(123)
wflw_fit_nnet_ar <- workflow() %>%
add_model(model_spec_nnetar) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
wflw_fit_nnet %>% pull_workflow_fit() %>% pluck("fit") %>% summary()
# * Compare with Modeltime -----
calibration_tbl <- modeltime_table(wflw_fit_rf,
wflw_fit_nnet,
wflw_fit_nnet_ar,
wflw_fit_xgboost
#wflow_fit_svm_poly
# model_fit_2_arima_sarimax
) %>%
modeltime_calibrate(new_data = training(splits_hires))
calibration_tbl %>% modeltime_forecast(new_data = testing(splits_hires), actual_data = hires_data_prepared_tbl) %>%
plot_modeltime_forecast()
calibration_tbl %>% modeltime_accuracy() %>% arrange(rmse)
##########################################
# Model 1: auto_arima ----
model_fit_arima_no_boost <- arima_reg() %>%
set_engine(engine = "auto_arima") %>%
fit(hires_trans ~ month_end_date, data = training(splits_hires))
# Model 2: arima_boost ----
model_fit_arima_boosted <- arima_boost(
min_n = 2,
learn_rate = 0.015
) %>%
set_engine(engine = "auto_arima_xgboost") %>%
fit(hires_trans ~ month_end_date +
as.numeric(month_end_date) +
factor(month(month_end_date, label = TRUE), ordered = F),
data = training(splits_hires))
#> frequency = 12 observations per 1 year
# Model 3: ets ----
model_fit_ets <- exp_smoothing() %>%
set_engine(engine = "ets") %>%
fit(hires_trans ~ month_end_date, data = training(splits_hires))
#> frequency = 12 observations per 1 year
# Model 4: prophet ----
model_fit_prophet <- prophet_reg() %>%
set_engine(engine = "prophet") %>%
fit(hires_trans ~ month_end_date, data = training(splits_hires))
#> Disabling weekly seasonality. Run prophet with weekly.seasonality=TRUE to override this.
#> Disabling daily seasonality. Run prophet with daily.seasonality=TRUE to override this.
model_fit_lm <- linear_reg() %>%
set_engine("lm") %>%
fit(hires_trans ~ as.numeric(month_end_date) +
factor(month(month_end_date, label = TRUE), ordered = FALSE),
data = training(splits_hires))
model_spec_mars <- mars(mode = "regression") %>%
set_engine("earth")
wflw_fit_mars <- workflow() %>%
add_recipe(hires_recipe_spec_base) %>%
add_model(model_spec_mars) %>%
fit(train_cleaned)
model_spec_arima_boost <- arima_boost(
# typically we don't use seasonality here and let the xgboost take care of season
#seasonal_period = 12,
# non_seasonal_ar = 2,
# non_seasonal_differences =1,
# non_seasonal_ma = 1,
# seasonal_ar = 0,
# seasonal_differences = 0,
# seasonal_ma = 1,
mtry = 2, ## note setting to zero shuts off xgboost effect --- by setting mtry = 0
min_n = 20,
tree_depth = 20,
learn_rate = 0.012,
loss_reduction = 0.15,
trees = 1000
) %>%
#set_engine("arima_xgboost")
set_engine("auto_arima_xgboost")
set.seed(456)
wflw_fit_arima_boost <- workflow() %>%
add_model(model_spec_arima_boost) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
###################
model_spec_prophet_boost <- prophet_boost(
# prophet params
#changepoint_num = 25,
#changepoint_range = 0.8,
# seasonality_yearly = TRUE,
# seasonality_weekly = FALSE,
# seasonality_daily = FALSE,
#xgboost parmas
# when set high it forces xgboost to shut down (foreces algorithum to not make any splits which essentially
#predicts the mean of the residuals) - reduced effect of xgboost to
#adding a constant to the prophet results that constand it the avg residuals
min_n = 55, #
tree_depth = 25,
learn_rate = 0.199, ## note on above you can also shut off xgboost effect by setting mtry = 0
loss_reduction = 0.115,
trees = 1000
) %>%
set_engine("prophet_xgboost")
# Workflow
set.seed(456)
wflw_fit_prophet_boost <- workflow() %>%
add_model(model_spec_prophet_boost) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
# * LIGHTGBM ----
#library(lightgbm)
#library(treesnip)
model_spec_light_gbm <- boost_tree(
mode = "regression",
min_n = 55, #
tree_depth = 40,
learn_rate = 0.199,
loss_reduction = 0.115,
trees = 1000
) %>% set_engine("lightgbm")
set.seed(456)
wflw_fit_light_gbm_parms <- workflow() %>%
add_model(model_spec_light_gbm) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
wflw_lightgbm_defaults <- workflow() %>%
add_model(
boost_tree(mode = "regression") %>%
set_engine("lightgbm")
) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
########################### View above models
######################################################################################################
models_tbl <- modeltime_table(
model_fit_arima_no_boost,
wflw_fit_arima_boost,
wflw_fit_prophet_boost,
model_fit_prophet,
model_fit_lm,
wflw_fit_rf,
wflw_fit_nnet,
wflw_fit_mars,
wflw_fit_nnet_ar,
wflw_fit_xgboost
# wflw_lightgbm_defaults,
# wflw_fit_light_gbm_parms
)
calibration_tbl <- models_tbl %>%
modeltime_calibrate(new_data = testing(splits_hires))
calibration_tbl %>% modeltime_accuracy() %>% arrange(rmse)
calibration_tbl %>%
modeltime_forecast(
new_data = testing(splits_hires),
actual_data = hires_data_prepared_tbl
) %>%
plot_modeltime_forecast()
################## Finalize
# refit here
refit_tbl <- calibration_tbl %>%
modeltime_refit(hires_data_prepared_tbl)
refit_tbl
#fcast_invert <-
# refit_tbl %>%
# refit_ensemble_superlearner_tbl
# modeltime_forecast(
# new_data = hires_forecast_tbl,
#hires_future_tbl,
# actual_data = hires_data_prepared_tbl
# ) %>%
# mutate(across(.value:.conf_hi, .fns = expm1))
### transform back
fcast_invert <- refit_tbl %>%
modeltime_forecast(new_data = hires_forecast_tbl,
actual_data = hires_data_prepared_tbl) %>%
# Invert Transformation
mutate(across(.value:.conf_hi, .fns = ~ standardize_inv_vec(
x = .,
mean = mean_val$mean,
sd = std_sd$sd
# intervert out of log
))) %>%
mutate(across(.value:.conf_hi, .fns = expm1))
fcast_invert %>%
plot_modeltime_forecast()
fcast_invert %>% mutate( year = as.factor(year(.index) )) %>%
dplyr::mutate(.model_id = replace_na(.model_id, 0)) %>%
filter(.model_id == 1 | .model_id == 0) %>%
group_by(year) %>%
summarise(val = sum(.value))
#summarise_by_time(.index, .by = "year", .value)
######################################################################################################
library(modeltime.ensemble)
# ensemble step starts here
###################### # GET TOP Performing Models ######
# get top models and/or diverstiy of models here for the ensemble
model_id_selection <- calibration_tbl %>%
modeltime_accuracy() %>%
arrange(rmse) %>%
filter(.model_id == 6 | .model_id == 3 | .model_id == 10 | .model_id == 9) %>% #|
#.model_id == 9 | .model_id == 3 |
# .model_id == 1
#| .model_id == 4
# ) %>%
# dplyr::slice(1:3) %>%
pull(.model_id)
model_id_selection
# do the actual subset or filter of the models
submodels_tbl <- calibration_tbl %>%
# filter(.model_id == 5 | .model_id == 2)
filter(.model_id %in% model_id_selection)
############################# SIMPLE ENSEMBLE #### NO STACKING
ensemble_fit <- submodels_tbl %>%
ensemble_average(type = "median")
ensemble_calibration_tbl <- modeltime_table(
ensemble_fit
) %>%
modeltime_calibrate(testing(splits_hires))
# Forecast vs Test Set
ensemble_calibration_tbl %>%
modeltime_forecast(
new_data = testing(splits_hires),
actual_data = hires_data_prepared_tbl
) %>%
plot_modeltime_forecast()
ensemble_calibration_tbl %>% modeltime_accuracy()
################## Finalize
# CODE HERE
refit_tbl <- ensemble_calibration_tbl %>%
modeltime_refit(hires_data_prepared_tbl)
refit_tbl
# invert here
### transform back
fcast_invert <- refit_tbl %>%
modeltime_forecast(new_data = hires_forecast_tbl,
actual_data = hires_data_prepared_tbl) %>%
# Invert Transformation
mutate(across(.value:.conf_hi, .fns = ~ standardize_inv_vec(
x = .,
mean = mean_val$mean,
sd = std_sd$sd
# invert out of log
))) %>%
mutate(across(.value:.conf_hi, .fns = expm1))
fcast_invert %>%
plot_modeltime_forecast()
fcast_invert %>% mutate( year = as.factor(year(.index) )) %>%
dplyr::mutate(.model_id = replace_na(.model_id, 0)) %>%
filter(.model_id == 1 | .model_id == 0) %>%
group_by(year) %>%
summarise(val = sum(.value), .conf_lo = sum(.conf_lo, na.rm = TRUE), .conf_hi = sum(.conf_hi, na.rm = TRUE))
fcast_invert %>%
dplyr::mutate(.model_id = replace_na(.model_id, 0)) %>%
# just use the prophet xgboost model
#filter(.model_id == 2 | .model_id == 0) %>%
write_rds("/mnt/Forecasts/Forecast_Data/rms_forecast.rds")
| /R/modeltime_forecasting_pipeline_hires.R | no_license | jam1245/time_series | R | false | false | 18,772 | r | library(tidyverse)
library(RJDBC)
library(RPresto)
library(feather)
library(modeltime)
library(timetk)
library(janitor)
library(dbplyr)
library(tidymodels)
library(lubridate)
setwd("C:/Users/e394102/Documents/GitHub/time_series/R")
source("hive_connection.R")
df <- snaps %>%
#select(`Employee ID`) %>%
select(`TTOC Descr Current`, `Month End Date`, `Headcount`, `Hire Count`, `Term Count`, `Alt Dept Descr`, `Job Level`, `Job Discipline`,
`Job Discipline Descr Current`, `Direct Indirect`, `Job Function Descr`, `Job Function`, `Full Part Time`,
`Casual to Full/Part - Req Count`, `Transfer In - Req Count`, `Work Loc City`, `Work Loc State`) %>%
as_tibble() %>%
clean_headers()
dbDisconnect(conn)
rm(conn)
library(remotes)
remotes::install_github("business-science/modeltime.gluonts")
devtools::install_github("business-science/modeltime.gluonts")
devtools::install_github("hadley/dplyr")
library(reticulate)
reticulate::conda_version()
#reticulate::py_module_available("gluonts")
library(dplyr)
my_gluonts_env_python_path <- reticulate::conda_list() %>%
filter(name == "my_gluonts_env") %>%
pull(python)
my_gluonts_env_python_path
Sys.setenv(GLUONTS_PYTHON = my_gluonts_env_python_path)
# verify it's been set
Sys.getenv("GLUONTS_PYTHON")
#> "/Users/mdancho/Library/r-miniconda/envs/my_gluonts_env/bin/python"
######################################################################################################
#### Start Here ---------------------------------------
rms_tbl <- df %>% filter(full_part_time != "C") %>%
## LRP hiring definition
select(month_end_date, hire_count, casual_to_full_part_req_count, transfer_in_req_count) %>%
mutate(sum = rowSums(.[2:4])) %>%
group_by(month_end_date) %>%
summarise(hires = sum(sum) )
#rms_tbl <- df
### Start the forecasting process here
################
rms_tbl %>%
plot_time_series(month_end_date, hires)
### visualize
rms_tbl %>%
plot_acf_diagnostics(month_end_date, log(hires + 1), .lags = 1000)
#
fourier_periods <- c(12)
fourier_order <- 1
horizon <- 12
# Data Transformation
hires_trans_tbl <- rms_tbl %>%
# Preprocess Target---- yeo johnson - review
mutate(hires_trans = log1p(hires)) %>%
# standarization -- centering and scaling which tranformas to mean = 0 and sd = 1
mutate(hires_trans = standardize_vec(hires_trans))
## create functions to hold the standard vec values -- for later use when transforming back
standardize_vec_mean_val <- function(x) {
if (!is.numeric(x)) rlang::abort("Non-numeric data detected. 'x' must be numeric.")
{m <- round(mean(x, na.rm = T), digits = 6) }
m
}
standardize_vec_sd_val <- function(x) {
if (!is.numeric(x)) rlang::abort("Non-numeric data detected. 'x' must be numeric.")
{s <- stats::sd(x, na.rm = T) }
s
}
mean_val <- rms_tbl %>% mutate(hires = log(hires)) %>% summarise(mean = standardize_vec_mean_val(hires))
std_sd <- rms_tbl %>% mutate(hires = log(hires)) %>% summarise(sd = standardize_vec_sd_val(hires))
################################
horizon <- 12 # months
lag_period <- c(12)
rolling_periods <- c(12, 24, 36)
hires_prepared_full_tbl <- hires_trans_tbl %>%
select(-hires) %>%
# add future window
bind_rows(
future_frame(.data = ., .date_var = month_end_date, .length_out = horizon)
) %>%
# add autocorrelated Lags
# TIP when adding lags - visualize them with the pivot longer step below
tk_augment_lags(hires_trans, .lags = lag_period) %>%
# ADD FUTURE ROLLING LAGS features - use slidify for any rolling or window calculations to a datafame
tk_augment_slidify(
# use lag 12 --- connected with the horizon / monthly values
.value = hires_trans_lag12,
.f = mean, .period = rolling_periods, # creates new columns - with a rolling avgs
.align = "center",
.partial = TRUE
) %>% # when you just add the above it's best to visualize using the below option and you'll notice that the lag stop at the value of forward periods
## use if adding events or other xregs
# left_join(df_two, by = c("date_time" = "event_date")) %>%
# mutate(event_date = ifelse(is.na(event_date), 0, event_date)) %>% ## adds zero or 1 where there's an event
## tip format the column names into groups that start with the same code -- e.g. lag for lagged features which makes it easier to select them when modeling
# rename(event_type = event_date) %>%
rename_with(.cols = contains("lag"), .fn = ~ str_c("lag_", .))
###########################
#SEPARATE INTO MODELING & FORECAST DATA ----
hires_data_prepared_tbl <- hires_prepared_full_tbl %>%
# this will remove the NAs for the future years
filter(!is.na(hires_trans)) %>%
# drop any lingering NAs in any other columns
replace(., is.na(.), 0)
hires_data_prepared_tbl
summary(hires_data_prepared_tbl) # check to ensure no NAs are in the lag columns
# here we have our future lag features but the hires_trans values are missing
hires_forecast_tbl <- hires_prepared_full_tbl %>%
filter(is.na(hires_trans))
hires_forecast_tbl
# * Create Future Data ----
#hires_future_tbl <- hires_trans_tbl%>%
# future_frame(.date_var = month_end_date, .length_out = "12 months") %>%
# mutate(hires_trans = as.double(NA)) %>%
## taken from above
# tk_augment_fourier(month_end_date, .periods = fourier_periods)
#hires_future_tbl
###### Train test split ###################################
hires_data_prepared_tbl %>% mutate_if(is.numeric, list(~na_if(., Inf))) %>%
mutate_if(is.numeric, list(~na_if(., -Inf)))
# train test split
splits_hires <- hires_data_prepared_tbl %>%
time_series_split(assess = "12 months",
# initial = "1 year 1 months"
cumulative = TRUE)
# visualize
splits_hires %>%
tk_time_series_cv_plan() %>%
plot_time_series_cv_plan(month_end_date, hires_trans)
## create a training cleaned from outliers
train_cleaned <- training(splits_hires) %>%
# group_by(pagePath) %>%
# ts_clean_vec --- REMOVES OUTLIERS and replaces missing values
mutate(hires_trans = ts_clean_vec(hires_trans, period = 12)) # %>% # period = 7, working with daily series so set to weekly season
#ungroup()
###################################################
## Recipe Spec Base
hires_recipe_spec_base <- recipe(hires_trans ~ ., data = training(splits_hires)) %>%
#
# Time series signature --- adds a preprocessing step to generate the time series signature -- utilizes a date time column
step_timeseries_signature(month_end_date) %>%
# Removing some features - columns that are not important for a monthly series
step_rm(matches("(iso)|(xts)|(hour)|(minute)|(second)|(am.pm)")) %>% # regex () is used to create multi - regex search patterns
## normally with larger features we should standardize
# Standardize - those features like time of year etc...
# step normalize is equivalent to standardize vec function step range is equivalent to normalize_vec function
step_normalize(matches("(index.num)|(year)|(yday)")) %>%
## NEXT STEP One HOT ENCODING
# will focus on anthing that is a text feature
step_dummy(all_nominal(), one_hot = TRUE) %>%
## Interaction
# this will add some additional features -- takes weeks two and multiplies it by wday.lbl - you'll see this in the glimpse below
step_interact(~ matches("week2") * matches("wday.lbl") ) %>%
## Last Step --- Add the fourier series features -- takes a date time --- we can add periods
step_fourier(month_end_date, period = c(12, 24, 36), K =2) #%>%
# step_rm("month_end_date")
# Look at how your preprocessing steps are being applied
hires_recipe_spec_base %>%
# prepares the dataset - it's being trained
prep() %>%
# juice returns the training data with the prepared recipe applied
juice() %>%
glimpse()
############################################################################
# xgboost
model_spec_boost <- boost_tree(
mode = "regression",
mtry = 30,
trees = 1000,
min_n = 1,
tree_depth = 15,
learn_rate = 0.013,
loss_reduction = 0.1
) %>%
set_engine("xgboost")
model_spec_rf <- rand_forest(
mode = "regression",
mtry = 50,
trees = 1000,
min_n = 5
) %>%
set_engine("randomForest")
model_spec_nnet <- mlp(
mode = "regression",
hidden_units = 10,
penalty = 2,
epochs = 50
) %>%
set_engine("nnet")
model_spec_svm_rbf <- svm_rbf(
mode = "regression",
cost = 1,
rbf_sigma = 0.01,
margin = 0.1
) %>%
set_engine("kernlab")
model_spec_nnetar <- nnetar_reg(
non_seasonal_ar = 2,
seasonal_ar = 12, # uses seasonal period which is auto detected in the series (weekly, monthly in our case)
hidden_units = 10,
penalty = 10,
num_networks = 10,
epochs = 50
) %>%
set_engine("nnetar")
# * Workflow ----
set.seed(123)
wflow_fit_svm_poly <- workflow() %>%
add_model(model_spec_svm_rbf) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
set.seed(123)
wflw_fit_xgboost <- workflow() %>%
add_model(model_spec_boost) %>%
#add_recipe(hires_recipe_spec_base) %>%
# we have to remove the date var when using xgboost so we can update role or step_rm here
add_recipe(hires_recipe_spec_base %>% update_role(month_end_date, new_role = "indicator")) %>%
fit(training(splits_hires)) # %>% step_naomit()) ## trying to omit nas
set.seed(123)
wflw_fit_rf <- workflow() %>%
add_model(model_spec_rf) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
set.seed(123)
wflw_fit_nnet <- workflow() %>%
add_model(model_spec_nnet) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
set.seed(123)
wflw_fit_nnet_ar <- workflow() %>%
add_model(model_spec_nnetar) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
wflw_fit_nnet %>% pull_workflow_fit() %>% pluck("fit") %>% summary()
# * Compare with Modeltime -----
calibration_tbl <- modeltime_table(wflw_fit_rf,
wflw_fit_nnet,
wflw_fit_nnet_ar,
wflw_fit_xgboost
#wflow_fit_svm_poly
# model_fit_2_arima_sarimax
) %>%
modeltime_calibrate(new_data = training(splits_hires))
calibration_tbl %>% modeltime_forecast(new_data = testing(splits_hires), actual_data = hires_data_prepared_tbl) %>%
plot_modeltime_forecast()
calibration_tbl %>% modeltime_accuracy() %>% arrange(rmse)
##########################################
# Model 1: auto_arima ----
model_fit_arima_no_boost <- arima_reg() %>%
set_engine(engine = "auto_arima") %>%
fit(hires_trans ~ month_end_date, data = training(splits_hires))
# Model 2: arima_boost ----
model_fit_arima_boosted <- arima_boost(
min_n = 2,
learn_rate = 0.015
) %>%
set_engine(engine = "auto_arima_xgboost") %>%
fit(hires_trans ~ month_end_date +
as.numeric(month_end_date) +
factor(month(month_end_date, label = TRUE), ordered = F),
data = training(splits_hires))
#> frequency = 12 observations per 1 year
# Model 3: ets ----
model_fit_ets <- exp_smoothing() %>%
set_engine(engine = "ets") %>%
fit(hires_trans ~ month_end_date, data = training(splits_hires))
#> frequency = 12 observations per 1 year
# Model 4: prophet ----
model_fit_prophet <- prophet_reg() %>%
set_engine(engine = "prophet") %>%
fit(hires_trans ~ month_end_date, data = training(splits_hires))
#> Disabling weekly seasonality. Run prophet with weekly.seasonality=TRUE to override this.
#> Disabling daily seasonality. Run prophet with daily.seasonality=TRUE to override this.
model_fit_lm <- linear_reg() %>%
set_engine("lm") %>%
fit(hires_trans ~ as.numeric(month_end_date) +
factor(month(month_end_date, label = TRUE), ordered = FALSE),
data = training(splits_hires))
model_spec_mars <- mars(mode = "regression") %>%
set_engine("earth")
wflw_fit_mars <- workflow() %>%
add_recipe(hires_recipe_spec_base) %>%
add_model(model_spec_mars) %>%
fit(train_cleaned)
model_spec_arima_boost <- arima_boost(
# typically we don't use seasonality here and let the xgboost take care of season
#seasonal_period = 12,
# non_seasonal_ar = 2,
# non_seasonal_differences =1,
# non_seasonal_ma = 1,
# seasonal_ar = 0,
# seasonal_differences = 0,
# seasonal_ma = 1,
mtry = 2, ## note setting to zero shuts off xgboost effect --- by setting mtry = 0
min_n = 20,
tree_depth = 20,
learn_rate = 0.012,
loss_reduction = 0.15,
trees = 1000
) %>%
#set_engine("arima_xgboost")
set_engine("auto_arima_xgboost")
set.seed(456)
wflw_fit_arima_boost <- workflow() %>%
add_model(model_spec_arima_boost) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
###################
model_spec_prophet_boost <- prophet_boost(
# prophet params
#changepoint_num = 25,
#changepoint_range = 0.8,
# seasonality_yearly = TRUE,
# seasonality_weekly = FALSE,
# seasonality_daily = FALSE,
#xgboost parmas
# when set high it forces xgboost to shut down (foreces algorithum to not make any splits which essentially
#predicts the mean of the residuals) - reduced effect of xgboost to
#adding a constant to the prophet results that constand it the avg residuals
min_n = 55, #
tree_depth = 25,
learn_rate = 0.199, ## note on above you can also shut off xgboost effect by setting mtry = 0
loss_reduction = 0.115,
trees = 1000
) %>%
set_engine("prophet_xgboost")
# Workflow
set.seed(456)
wflw_fit_prophet_boost <- workflow() %>%
add_model(model_spec_prophet_boost) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
# * LIGHTGBM ----
#library(lightgbm)
#library(treesnip)
model_spec_light_gbm <- boost_tree(
mode = "regression",
min_n = 55, #
tree_depth = 40,
learn_rate = 0.199,
loss_reduction = 0.115,
trees = 1000
) %>% set_engine("lightgbm")
set.seed(456)
wflw_fit_light_gbm_parms <- workflow() %>%
add_model(model_spec_light_gbm) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
wflw_lightgbm_defaults <- workflow() %>%
add_model(
boost_tree(mode = "regression") %>%
set_engine("lightgbm")
) %>%
add_recipe(hires_recipe_spec_base) %>%
fit(training(splits_hires))
########################### View above models
######################################################################################################
models_tbl <- modeltime_table(
model_fit_arima_no_boost,
wflw_fit_arima_boost,
wflw_fit_prophet_boost,
model_fit_prophet,
model_fit_lm,
wflw_fit_rf,
wflw_fit_nnet,
wflw_fit_mars,
wflw_fit_nnet_ar,
wflw_fit_xgboost
# wflw_lightgbm_defaults,
# wflw_fit_light_gbm_parms
)
calibration_tbl <- models_tbl %>%
modeltime_calibrate(new_data = testing(splits_hires))
calibration_tbl %>% modeltime_accuracy() %>% arrange(rmse)
calibration_tbl %>%
modeltime_forecast(
new_data = testing(splits_hires),
actual_data = hires_data_prepared_tbl
) %>%
plot_modeltime_forecast()
################## Finalize
# refit here
refit_tbl <- calibration_tbl %>%
modeltime_refit(hires_data_prepared_tbl)
refit_tbl
#fcast_invert <-
# refit_tbl %>%
# refit_ensemble_superlearner_tbl
# modeltime_forecast(
# new_data = hires_forecast_tbl,
#hires_future_tbl,
# actual_data = hires_data_prepared_tbl
# ) %>%
# mutate(across(.value:.conf_hi, .fns = expm1))
### transform back
fcast_invert <- refit_tbl %>%
modeltime_forecast(new_data = hires_forecast_tbl,
actual_data = hires_data_prepared_tbl) %>%
# Invert Transformation
mutate(across(.value:.conf_hi, .fns = ~ standardize_inv_vec(
x = .,
mean = mean_val$mean,
sd = std_sd$sd
# intervert out of log
))) %>%
mutate(across(.value:.conf_hi, .fns = expm1))
fcast_invert %>%
plot_modeltime_forecast()
fcast_invert %>% mutate( year = as.factor(year(.index) )) %>%
dplyr::mutate(.model_id = replace_na(.model_id, 0)) %>%
filter(.model_id == 1 | .model_id == 0) %>%
group_by(year) %>%
summarise(val = sum(.value))
#summarise_by_time(.index, .by = "year", .value)
######################################################################################################
library(modeltime.ensemble)
# ensemble step starts here
###################### # GET TOP Performing Models ######
# get top models and/or diverstiy of models here for the ensemble
model_id_selection <- calibration_tbl %>%
modeltime_accuracy() %>%
arrange(rmse) %>%
filter(.model_id == 6 | .model_id == 3 | .model_id == 10 | .model_id == 9) %>% #|
#.model_id == 9 | .model_id == 3 |
# .model_id == 1
#| .model_id == 4
# ) %>%
# dplyr::slice(1:3) %>%
pull(.model_id)
model_id_selection
# do the actual subset or filter of the models
submodels_tbl <- calibration_tbl %>%
# filter(.model_id == 5 | .model_id == 2)
filter(.model_id %in% model_id_selection)
############################# SIMPLE ENSEMBLE #### NO STACKING
ensemble_fit <- submodels_tbl %>%
ensemble_average(type = "median")
ensemble_calibration_tbl <- modeltime_table(
ensemble_fit
) %>%
modeltime_calibrate(testing(splits_hires))
# Forecast vs Test Set
ensemble_calibration_tbl %>%
modeltime_forecast(
new_data = testing(splits_hires),
actual_data = hires_data_prepared_tbl
) %>%
plot_modeltime_forecast()
ensemble_calibration_tbl %>% modeltime_accuracy()
################## Finalize
# CODE HERE
refit_tbl <- ensemble_calibration_tbl %>%
modeltime_refit(hires_data_prepared_tbl)
refit_tbl
# invert here
### transform back
fcast_invert <- refit_tbl %>%
modeltime_forecast(new_data = hires_forecast_tbl,
actual_data = hires_data_prepared_tbl) %>%
# Invert Transformation
mutate(across(.value:.conf_hi, .fns = ~ standardize_inv_vec(
x = .,
mean = mean_val$mean,
sd = std_sd$sd
# invert out of log
))) %>%
mutate(across(.value:.conf_hi, .fns = expm1))
fcast_invert %>%
plot_modeltime_forecast()
fcast_invert %>% mutate( year = as.factor(year(.index) )) %>%
dplyr::mutate(.model_id = replace_na(.model_id, 0)) %>%
filter(.model_id == 1 | .model_id == 0) %>%
group_by(year) %>%
summarise(val = sum(.value), .conf_lo = sum(.conf_lo, na.rm = TRUE), .conf_hi = sum(.conf_hi, na.rm = TRUE))
fcast_invert %>%
dplyr::mutate(.model_id = replace_na(.model_id, 0)) %>%
# just use the prophet xgboost model
#filter(.model_id == 2 | .model_id == 0) %>%
write_rds("/mnt/Forecasts/Forecast_Data/rms_forecast.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aov_car.R
\encoding{UTF-8}
\name{aov_car}
\alias{aov_car}
\alias{aov_4}
\alias{aov_ez}
\title{Convenient ANOVA estimation for factorial designs}
\usage{
aov_car(
formula,
data,
fun_aggregate = NULL,
type = afex_options("type"),
factorize = afex_options("factorize"),
check_contrasts = afex_options("check_contrasts"),
observed = NULL,
anova_table = list(),
include_aov = afex_options("include_aov"),
return = afex_options("return_aov"),
...
)
aov_4(
formula,
data,
observed = NULL,
fun_aggregate = NULL,
type = afex_options("type"),
factorize = afex_options("factorize"),
check_contrasts = afex_options("check_contrasts"),
return = afex_options("return_aov"),
anova_table = list(),
include_aov = afex_options("include_aov"),
...,
print.formula = FALSE
)
aov_ez(
id,
dv,
data,
between = NULL,
within = NULL,
covariate = NULL,
observed = NULL,
fun_aggregate = NULL,
transformation,
type = afex_options("type"),
factorize = afex_options("factorize"),
check_contrasts = afex_options("check_contrasts"),
return = afex_options("return_aov"),
anova_table = list(),
include_aov = afex_options("include_aov"),
...,
print.formula = FALSE
)
}
\arguments{
\item{formula}{A formula specifying the ANOVA model similar to
\code{\link{aov}} (for \code{aov_car} or similar to \code{lme4:lmer} for
\code{aov_4}). Must include an error term (i.e., \code{Error(id/...)} for
\code{aov_car} or \code{(...|id)} for \code{aov_4}). Note that the
within-subject factors do not need to be outside the Error term (this
contrasts with \code{aov}). See Details.}
\item{data}{A \code{data.frame} containing the data. Mandatory.}
\item{fun_aggregate}{The function for aggregating the data before running the
ANOVA if there is more than one observation per individual and cell of the
design. The default \code{NULL} issues a warning if aggregation is
necessary and uses \code{\link{mean}}. Pass \code{mean} directly to avoid
the warning.}
\item{type}{The type of sums of squares for the ANOVA. The default is given
by \code{afex_options("type")}, which is \strong{initially set to 3}.
Passed to \code{\link[car]{Anova}}. Possible values are \code{"II"},
\code{"III"}, \code{2}, or \code{3}.}
\item{factorize}{logical. Should between subject factors be factorized (with
note) before running the analysis. The default is given by
\code{afex_options("factorize")}, which is initially \code{TRUE}. If one
wants to run an ANCOVA, this needs to be set to \code{FALSE} (in which case
centering on 0 is checked on numeric variables).}
\item{check_contrasts}{\code{logical}. Should contrasts for between-subject
factors be checked and (if necessary) changed to be \code{"contr.sum"}. See
details. The default is given by \code{afex_options("check_contrasts")},
which is initially \code{TRUE}.}
\item{observed}{\code{character} vector indicating which of the variables are
observed (i.e, measured) as compared to experimentally manipulated. The
default effect size reported (generalized eta-squared) requires correct
specification of the obsered (in contrast to manipulated) variables.}
\item{anova_table}{\code{list} of further arguments passed to function
producing the ANOVA table. Arguments such as \code{es} (effect size) or
\code{correction} are passed to either \code{anova.afex_aov} or
\code{nice}. Note that those settings can also be changed once an object of
class \code{afex_aov} is created by invoking the \code{anova} method
directly.}
\item{include_aov}{Boolean. Allows suppressing the calculation of the aov
object, which is per default part of the returned \code{afex_aov} object.
\code{FALSE} prevents this potentially costly calculation. Especially for
designs with larger N and within-subjects factors, this is highly
advisable. Follow-up analyses using \pkg{emmeans} are then always based on
the multivariate or \code{lm} model.}
\item{return}{What should be returned? The default is given by
\code{afex_options("return_aov")}, which is initially \code{"afex_aov"},
returning an S3 object of class \code{afex_aov} for which various
\link[=afex_aov-methods]{methods} exist (see there and below for more
details). Other values are currently still supported for backward
compatibility.}
\item{...}{Further arguments passed to \code{fun_aggregate}.}
\item{print.formula}{\code{aov_ez} and \code{aov_4} are wrapper for
\code{aov_car}. This boolean argument indicates whether the formula in the
call to \code{car.aov} should be printed.}
\item{id}{\code{character} vector (of length 1) indicating the subject
identifier column in \code{data}.}
\item{dv}{\code{character} vector (of length 1) indicating the column
containing the \strong{dependent variable} in \code{data}.}
\item{between}{\code{character} vector indicating the
\strong{between}-subject(s) factor(s)/column(s) in \code{data}. Default is
\code{NULL} indicating no between-subjects factors.}
\item{within}{\code{character} vector indicating the
\strong{within}-subject(s)(or repeated-measures) factor(s)/column(s) in
\code{data}. Default is \code{NULL} indicating no within-subjects factors.}
\item{covariate}{\code{character} vector indicating the between-subject(s)
covariate(s) (i.e., column(s)) in \code{data}. Default is \code{NULL}
indicating no covariates. Please note that \code{factorize} needs to be set
to \code{FALSE} in case the covariate is numeric and should be treated as
such.}
\item{transformation}{In \code{aov_ez}, a \code{character} vector (of length
1) indicating the name of a transformation to apply to \code{dv} before
fitting the model. If missing, no transformation is applied. In
\code{aov_car} and \code{aov_4}, a response transformation may be
incorporated in the left-hand side of \code{formula}.}
}
\value{
\code{aov_car}, \code{aov_4}, and \code{aov_ez} are wrappers for
\code{\link[car]{Anova}} and \code{\link{aov}}, the return value is
dependent on the \code{return} argument. Per default, an S3 object of class
\code{"afex_aov"} is returned containing the following slots:
\describe{
\item{\code{"anova_table"}}{An ANOVA table of class \code{c("anova",
"data.frame")}.}
\item{\code{"aov"}}{\code{aov} object returned from \code{\link{aov}}
(should not be used to evaluate significance of effects, but can be passed
to \code{emmeans} for post-hoc tests).}
\item{\code{"Anova"}}{object returned from \code{\link[car]{Anova}}, an
object of class \code{"Anova.mlm"} (if within-subjects factors are present)
or of class \code{c("anova", "data.frame")}.}
\item{\code{"lm"}}{the object fitted with \code{lm} and passed to
\code{Anova} (i.e., an object of class \code{"lm"} or \code{"mlm"}). Also
returned if \code{return = "lm"}.}
\item{\code{"data"}}{a list containing: (1) \code{long} (the possibly
aggregated data in long format used for \code{aov}), \code{wide} (the data
used to fit the \code{lm} object), and \code{idata} (if within-subject
factors are present, the \code{idata} argument passed to
\code{car::Anova}). Also returned if \code{return = "data"}.}
}
In addition, the object has the following attributes: \code{"dv"},
\code{"id"}, \code{"within"}, \code{"between"}, and \code{"type"}.
The \link[=afex_aov-methods]{print} method for \code{afex_aov} objects
(invisibly) returns (and prints) the same as if \code{return} is
\code{"nice"}: a nice ANOVA table (produced by \code{\link{nice}}) with the
following columns: \code{Effect}, \code{df}, \code{MSE} (mean-squared
errors), \code{F} (potentially with significant symbols), \code{ges}
(generalized eta-squared), \code{p}.
}
\description{
These functions allow convenient specification of any type of ANOVAs (i.e.,
purely within-subjects ANOVAs, purely between-subjects ANOVAs, and mixed
between-within or split-plot ANOVAs) for data in the \strong{long} format
(i.e., one observation per row). If the data has more than one observation
per individual and cell of the design (e.g., multiple responses per
condition), the data will by automatically aggregated. The default settings
reproduce results from commercial statistical packages such as SPSS or SAS.
\code{aov_ez} is called specifying the factors as character vectors,
\code{aov_car} is called using a formula similar to \code{\link{aov}}
specifying an error strata for the within-subject factor(s), and \code{aov_4}
is called with a \pkg{lme4}-like formula (all ANOVA functions return
identical results). The returned object contains the ANOVA also fitted via
base R's \code{\link{aov}} which can be passed to e.g., \pkg{emmeans} for
further analysis (e.g., follow-up tests, contrasts, plotting, etc.). These
functions employ \code{\link[car]{Anova}} (from the \pkg{car} package) to
provide test of effects avoiding the somewhat unhandy format of
\code{car::Anova}.
}
\details{
\subsection{Details of ANOVA Specification}{ \code{aov_ez} will concatenate
all between-subject factors using \code{*} (i.e., producing all main effects
and interactions) and all covariates by \code{+} (i.e., adding only the main
effects to the existing between-subject factors). The within-subject factors
do fully interact with all between-subject factors and covariates. This is
essentially identical to the behavior of SPSS's \code{glm} function.
The \code{formula}s for \code{aov_car} or \code{aov_4} must contain a single
\code{Error} term specifying the \code{ID} column and potential
within-subject factors (you can use \code{\link{mixed}} for running
mixed-effects models with multiple error terms). Factors outside the
\code{Error} term are treated as between-subject factors (the within-subject
factors specified in the \code{Error} term are ignored outside the
\code{Error} term; in other words, it is not necessary to specify them
outside the \code{Error} term, see Examples).\cr Suppressing the intercept
(i.e, via \code{0 +} or \code{- 1}) is ignored. Specific specifications of
effects (e.g., excluding terms with \code{-} or using \code{^}) could be okay
but is not tested. Using the \code{\link{I}} or \code{\link{poly}} function
within the formula is not tested and not supported!
To run an ANCOVA you need to set \code{factorize = FALSE} and make sure that
all variables have the correct type (i.e., factors are factors and numeric
variables are numeric and centered).
Note that the default behavior is to include calculation of the effect size
generalized eta-squared for which \strong{all non-manipluated (i.e.,
observed)} variables need to be specified via the \code{observed} argument to
obtain correct results. When changing the effect size to \code{"pes"}
(partial eta-squared) or \code{"none"} via \code{anova_table} this becomes
unnecessary.
If \code{check_contrasts = TRUE}, contrasts will be set to \code{"contr.sum"}
for all between-subject factors if default contrasts are not equal to
\code{"contr.sum"} or \code{attrib(factor, "contrasts") != "contr.sum"}.
(within-subject factors are hard-coded \code{"contr.sum"}.) }
\subsection{Statistical Issues}{ \strong{Type 3 sums of squares are default
in \pkg{afex}.} While some authors argue that so-called type 3 sums of
squares are dangerous and/or problematic (most notably Venables, 2000), they
are the default in many commercial statistical application such as SPSS or
SAS. Furthermore, statisticians with an applied perspective recommend type 3
tests (e.g., Maxwell and Delaney, 2004). Consequently, they are the default
for the ANOVA functions described here. For some more discussion on this
issue see \href{https://stats.stackexchange.com/q/6208/442}{here}.
Note that lower order effects (e.g., main effects) in type 3 ANOVAs are only
meaningful with
\href{https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faqwhat-is-effect-coding/}{effects
coding}. That is, contrasts should be set to \code{\link{contr.sum}} to
obtain meaningful results. This is imposed automatically for the functions
discussed here as long as \code{check_contrasts} is \code{TRUE} (the
default). I nevertheless recommend to set the contrasts globally to
\code{contr.sum} via running \code{\link{set_sum_contrasts}}. For a
discussion of the other (non-recommended) coding schemes see
\href{https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/}{here}. }
\subsection{Follow-Up Contrasts and Post-Hoc Tests}{ The S3 object returned
per default can be directly passed to \code{emmeans::emmeans} for further
analysis. This allows to test any type of contrasts that might be of interest
independent of whether or not this contrast involves between-subject
variables, within-subject variables, or a combination thereof. The general
procedure to run those contrasts is the following (see Examples for a full
example):
\enumerate{
\item Estimate an \code{afex_aov} object with the function returned here. For example: \code{x <- aov_car(dv ~ a*b + (id/c), d)}
\item Obtain a \code{\link[emmeans]{emmGrid-class}} object by running \code{\link[emmeans]{emmeans}} on the \code{afex_aov} object from step 1 using the factors involved in the contrast. For example: \code{r <- emmeans(x, ~a:c)}
\item Create a list containing the desired contrasts on the reference grid object from step 2. For example: \code{con1 <- list(a_x = c(-1, 1, 0, 0, 0, 0), b_x = c(0, 0, -0.5, -0.5, 0, 1))}
\item Test the contrast on the reference grid using \code{\link[emmeans]{contrast}}. For example: \code{contrast(r, con1)}
\item To control for multiple testing p-value adjustments can be specified. For example the Bonferroni-Holm correction: \code{contrast(r, con1, adjust = "holm")}
}
Note that \pkg{emmeans} allows for a variety of advanced settings and
simplifiations, for example: all pairwise comparison of a single factor
using one command (e.g., \code{emmeans(x, "a", contr = "pairwise")}) or
advanced control for multiple testing by passing objects to \pkg{multcomp}.
A comprehensive overview of the functionality is provided in the
accompanying vignettes (see
\href{https://CRAN.R-project.org/package=emmeans}{here}).
A caveat regarding the use of \pkg{emmeans} concerns the assumption of
sphericity for ANOVAs including within-subjects/repeated-measures factors
(with more than two levels). The current default for follow-up tests uses a
univariate model (\code{model = "univariate"} in the call to
\code{emmeans}), which does not adequately control for violations of
sphericity. This may result in anti-conservative tests and contrasts
somewhat with the default ANOVA table which reports results based on the
Greenhousse-Geisser correction. An alternative is to use a multivariate
model (\code{model = "multivariate"} in the call to \code{emmeans}) which
should handle violations of sphericity better. The default will likely
change to multivariate tests in one of the next versions of the package.
Starting with \pkg{afex} version 0.22, \pkg{emmeans} is \emph{not}
loaded/attached automatically when loading \pkg{afex}. Therefore,
\pkg{emmeans} now needs to be loaded by the user via
\code{library("emmeans")} or \code{require("emmeans")}.
}
\subsection{Methods for \code{afex_aov} Objects}{ A full overview over the
methods provided for \code{afex_aov} objects is provided in the corresponding
help page: \code{\link{afex_aov-methods}}. The probably most important ones
for end-users are \code{summary}, \code{anova}, and \code{\link{nice}}.
The \code{summary} method returns, for ANOVAs containing within-subject
(repeated-measures) factors with more than two levels, the complete
univariate analysis: Results without df-correction, the Greenhouse-Geisser
corrected results, the Hyunh-Feldt corrected results, and the results of the
Mauchly test for sphericity.
The \code{anova} method returns a \code{data.frame} of class \code{"anova"}
containing the ANOVA table in numeric form (i.e., the one in slot
\code{anova_table} of a \code{afex_aov}). This method has arguments such as
\code{correction} and \code{es} and can be used to obtain an ANOVA table with
different correction than the one initially specified.
The \code{\link{nice}} method also returns a \code{data.frame}, but rounds
most values and transforms them into characters for nice printing. Also has
arguments like \code{correction} and \code{es} which can be used to obtain an
ANOVA table with different correction than the one initially specified. }
}
\section{Functions}{
\itemize{
\item \code{aov_4}: Allows definition of ANOVA-model using
\code{lme4::lmer}-like Syntax (but still fits a standard ANOVA).
\item \code{aov_ez}: Allows definition of ANOVA-model using character strings.
}}
\note{
Calculation of ANOVA models via \code{aov} (which is done per default)
can be comparatively slow and produce comparatively large objects for
ANOVAs with many within-subjects factors or levels. To avoid this
calculation set \code{include_aov = FALSE}. You can also disable this
globally with: \code{afex_options(include_aov = FALSE)}
The id variable and variables entered as within-subjects (i.e.,
repeated-measures) factors are silently converted to factors. Levels of
within-subject factors are converted to valid variable names using
\code{\link{make.names}(...,unique=TRUE)}. Unused factor levels are
silently dropped on all variables.
Contrasts attached to a factor as an attribute are probably not preserved
and not supported.
The workhorse is \code{aov_car}. \code{aov_4} and \code{aov_ez} only
construe and pass an appropriate formula to \code{aov_car}. Use
\code{print.formula = TRUE} to view this formula.
In contrast to \code{\link{aov}} \code{aov_car} assumes that all factors to
the right of \code{/} in the \code{Error} term are belonging together.
Consequently, \code{Error(id/(a*b))} and \code{Error(id/a*b)} are identical
(which is not true for \code{\link{aov}}).
}
\examples{
##########################
## 1: Specifying ANOVAs ##
##########################
# Example using a purely within-subjects design
# (Maxwell & Delaney, 2004, Chapter 12, Table 12.5, p. 578):
data(md_12.1)
aov_ez("id", "rt", md_12.1, within = c("angle", "noise"),
anova_table=list(correction = "none", es = "none"))
# Default output
aov_ez("id", "rt", md_12.1, within = c("angle", "noise"))
# examples using obk.long (see ?obk.long), a long version of the OBrienKaiser dataset (car package).
# Data is a split-plot or mixed design: contains both within- and between-subjects factors.
data(obk.long, package = "afex")
# estimate mixed ANOVA on the full design:
aov_car(value ~ treatment * gender + Error(id/(phase*hour)),
data = obk.long, observed = "gender")
aov_4(value ~ treatment * gender + (phase*hour|id),
data = obk.long, observed = "gender")
aov_ez("id", "value", obk.long, between = c("treatment", "gender"),
within = c("phase", "hour"), observed = "gender")
# the three calls return the same ANOVA table:
# Anova Table (Type 3 tests)
#
# Response: value
# Effect df MSE F ges p.value
# 1 treatment 2, 10 22.81 3.94 + .198 .055
# 2 gender 1, 10 22.81 3.66 + .115 .085
# 3 treatment:gender 2, 10 22.81 2.86 .179 .104
# 4 phase 1.60, 15.99 5.02 16.13 *** .151 <.001
# 5 treatment:phase 3.20, 15.99 5.02 4.85 * .097 .013
# 6 gender:phase 1.60, 15.99 5.02 0.28 .003 .709
# 7 treatment:gender:phase 3.20, 15.99 5.02 0.64 .014 .612
# 8 hour 1.84, 18.41 3.39 16.69 *** .125 <.001
# 9 treatment:hour 3.68, 18.41 3.39 0.09 .002 .979
# 10 gender:hour 1.84, 18.41 3.39 0.45 .004 .628
# 11 treatment:gender:hour 3.68, 18.41 3.39 0.62 .011 .641
# 12 phase:hour 3.60, 35.96 2.67 1.18 .015 .335
# 13 treatment:phase:hour 7.19, 35.96 2.67 0.35 .009 .930
# 14 gender:phase:hour 3.60, 35.96 2.67 0.93 .012 .449
# 15 treatment:gender:phase:hour 7.19, 35.96 2.67 0.74 .019 .646
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘+’ 0.1 ‘ ’ 1
#
# Sphericity correction method: GG
# "numeric" variables are per default converted to factors (as long as factorize = TRUE):
obk.long$hour2 <- as.numeric(as.character(obk.long$hour))
# gives same results as calls before
aov_car(value ~ treatment * gender + Error(id/phase*hour2),
data = obk.long, observed = c("gender"))
# ANCOVA: adding a covariate (necessary to set factorize = FALSE)
aov_car(value ~ treatment * gender + age + Error(id/(phase*hour)),
data = obk.long, observed = c("gender", "age"), factorize = FALSE)
aov_4(value ~ treatment * gender + age + (phase*hour|id),
data = obk.long, observed = c("gender", "age"), factorize = FALSE)
aov_ez("id", "value", obk.long, between = c("treatment", "gender"),
within = c("phase", "hour"), covariate = "age",
observed = c("gender", "age"), factorize = FALSE)
# aggregating over one within-subjects factor (phase), with warning:
aov_car(value ~ treatment * gender + Error(id/hour), data = obk.long, observed = "gender")
aov_ez("id", "value", obk.long, c("treatment", "gender"), "hour", observed = "gender")
# aggregating over both within-subjects factors (again with warning),
# only between-subjects factors:
aov_car(value ~ treatment * gender + Error(id), data = obk.long, observed = c("gender"))
aov_4(value ~ treatment * gender + (1|id), data = obk.long, observed = c("gender"))
aov_ez("id", "value", obk.long, between = c("treatment", "gender"), observed = "gender")
# only within-subject factors (ignoring between-subjects factors)
aov_car(value ~ Error(id/(phase*hour)), data = obk.long)
aov_4(value ~ (phase*hour|id), data = obk.long)
aov_ez("id", "value", obk.long, within = c("phase", "hour"))
### changing defaults of ANOVA table:
# no df-correction & partial eta-squared:
aov_car(value ~ treatment * gender + Error(id/(phase*hour)),
data = obk.long, anova_table = list(correction = "none", es = "pes"))
# no df-correction and no MSE
aov_car(value ~ treatment * gender + Error(id/(phase*hour)),
data = obk.long,observed = "gender",
anova_table = list(correction = "none", MSE = FALSE))
# add p-value adjustment for all effects (see Cramer et al., 2015, PB&R)
aov_ez("id", "value", obk.long, between = "treatment",
within = c("phase", "hour"),
anova_table = list(p_adjust_method = "holm"))
###########################
## 2: Follow-up Analysis ##
###########################
# use data as above
data(obk.long, package = "afex")
# 1. obtain afex_aov object:
a1 <- aov_ez("id", "value", obk.long, between = c("treatment", "gender"),
within = c("phase", "hour"), observed = "gender")
if (requireNamespace("ggplot2") & requireNamespace("emmeans")) {
# 1b. plot data using afex_plot function, for more see:
## vignette("afex_plot_introduction", package = "afex")
## default plot uses univariate model-based CIs
afex_plot(a1, "hour", "gender", c("treatment", "phase"))
## you can use multivairate model and CIs
afex_plot(a1, "hour", "gender", c("treatment", "phase"),
emmeans_arg = list(model = "multivariate"))
## in a mixed between-within designs, no error-bars might be preferrable:
afex_plot(a1, "hour", "gender", c("treatment", "phase"), error = "none")
}
if (requireNamespace("emmeans")) {
library("emmeans") # package emmeans needs to be attached for follow-up tests.
# 2. obtain reference grid object (default uses univariate model):
r1 <- emmeans(a1, ~treatment +phase)
r1
# multivariate model may be more appropriate
r1 <- emmeans(a1, ~treatment +phase, model = "multivariate")
r1
# 3. create list of contrasts on the reference grid:
c1 <- list(
A_B_pre = c(rep(0, 6), 0, -1, 1), # A versus B for pretest
A_B_comb = c(-0.5, 0.5, 0, -0.5, 0.5, 0, 0, 0, 0), # A vs. B for post and follow-up combined
effect_post = c(0, 0, 0, -1, 0.5, 0.5, 0, 0, 0), # control versus A&B post
effect_fup = c(-1, 0.5, 0.5, 0, 0, 0, 0, 0, 0), # control versus A&B follow-up
effect_comb = c(-0.5, 0.25, 0.25, -0.5, 0.25, 0.25, 0, 0, 0) # control versus A&B combined
)
# 4. test contrasts on reference grid:
contrast(r1, c1)
# same as before, but using Bonferroni-Holm correction for multiple testing:
contrast(r1, c1, adjust = "holm")
# 2. (alternative): all pairwise comparisons of treatment:
emmeans(a1, "treatment", contr = "pairwise", model = "multivariate")
## set multivariate models globally:
# afex_options(emmeans_model = "multivariate")
}
#######################
## 3: Other examples ##
#######################
data(obk.long, package = "afex")
# replicating ?Anova using aov_car:
obk_anova <- aov_car(value ~ treatment * gender + Error(id/(phase*hour)),
data = obk.long, type = 2)
# in contrast to aov you do not need the within-subject factors outside Error()
str(obk_anova, 1, give.attr = FALSE)
# List of 5
# $ anova_table:Classes 'anova' and 'data.frame': 15 obs. of 6 variables:
# $ aov :List of 5
# $ Anova :List of 14
# $ lm :List of 13
# $ data :List of 3
obk_anova$Anova
# Type II Repeated Measures MANOVA Tests: Pillai test statistic
# Df test stat approx F num Df den Df Pr(>F)
# (Intercept) 1 0.96954 318.34 1 10 6.532e-09 ***
# treatment 2 0.48092 4.63 2 10 0.0376868 *
# gender 1 0.20356 2.56 1 10 0.1409735
# treatment:gender 2 0.36350 2.86 2 10 0.1044692
# phase 1 0.85052 25.61 2 9 0.0001930 ***
# treatment:phase 2 0.68518 2.61 4 20 0.0667354 .
# gender:phase 1 0.04314 0.20 2 9 0.8199968
# treatment:gender:phase 2 0.31060 0.92 4 20 0.4721498
# hour 1 0.93468 25.04 4 7 0.0003043 ***
# treatment:hour 2 0.30144 0.35 8 16 0.9295212
# gender:hour 1 0.29274 0.72 4 7 0.6023742
# treatment:gender:hour 2 0.57022 0.80 8 16 0.6131884
# phase:hour 1 0.54958 0.46 8 3 0.8324517
# treatment:phase:hour 2 0.66367 0.25 16 8 0.9914415
# gender:phase:hour 1 0.69505 0.85 8 3 0.6202076
# treatment:gender:phase:hour 2 0.79277 0.33 16 8 0.9723693
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
}
\references{
Cramer, A. O. J., van Ravenzwaaij, D., Matzke, D., Steingroever,
H., Wetzels, R., Grasman, R. P. P. P., ... Wagenmakers, E.-J. (2015).
Hidden multiplicity in exploratory multiway ANOVA: Prevalence and remedies.
\emph{Psychonomic Bulletin & Review}, 1-8.
doi:\href{https://doi.org/10.3758/s13423-015-0913-5}{10.3758/s13423-015-0913-5}
Maxwell, S. E., & Delaney, H. D. (2004). \emph{Designing Experiments and
Analyzing Data: A Model-Comparisons Perspective}. Mahwah, N.J.: Lawrence
Erlbaum Associates.
Venables, W.N. (2000). \emph{Exegeses on linear models}. Paper presented to
the S-Plus User's Conference, Washington DC, 8-9 October 1998, Washington,
DC. Available from: \url{http://www.stats.ox.ac.uk/pub/MASS3/Exegeses.pdf}
}
\seealso{
Various methods for objects of class \code{afex_aov} are available:
\code{\link{afex_aov-methods}}
\code{\link{nice}} creates the nice ANOVA tables which is by default printed.
See also there for a slightly longer discussion of the available effect
sizes.
\code{\link{mixed}} provides a (formula) interface for obtaining p-values for
mixed-models via \pkg{lme4}. The functions presented here do not estimate
mixed models.
}
\author{
Henrik Singmann
The design of these functions was influenced by \code{\link[ez]{ezANOVA}}
from package \pkg{ez}.
}
| /man/aov_car.Rd | no_license | mattansb/afex | R | false | true | 28,239 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aov_car.R
\encoding{UTF-8}
\name{aov_car}
\alias{aov_car}
\alias{aov_4}
\alias{aov_ez}
\title{Convenient ANOVA estimation for factorial designs}
\usage{
aov_car(
formula,
data,
fun_aggregate = NULL,
type = afex_options("type"),
factorize = afex_options("factorize"),
check_contrasts = afex_options("check_contrasts"),
observed = NULL,
anova_table = list(),
include_aov = afex_options("include_aov"),
return = afex_options("return_aov"),
...
)
aov_4(
formula,
data,
observed = NULL,
fun_aggregate = NULL,
type = afex_options("type"),
factorize = afex_options("factorize"),
check_contrasts = afex_options("check_contrasts"),
return = afex_options("return_aov"),
anova_table = list(),
include_aov = afex_options("include_aov"),
...,
print.formula = FALSE
)
aov_ez(
id,
dv,
data,
between = NULL,
within = NULL,
covariate = NULL,
observed = NULL,
fun_aggregate = NULL,
transformation,
type = afex_options("type"),
factorize = afex_options("factorize"),
check_contrasts = afex_options("check_contrasts"),
return = afex_options("return_aov"),
anova_table = list(),
include_aov = afex_options("include_aov"),
...,
print.formula = FALSE
)
}
\arguments{
\item{formula}{A formula specifying the ANOVA model similar to
\code{\link{aov}} (for \code{aov_car} or similar to \code{lme4:lmer} for
\code{aov_4}). Must include an error term (i.e., \code{Error(id/...)} for
\code{aov_car} or \code{(...|id)} for \code{aov_4}). Note that the
within-subject factors do not need to be outside the Error term (this
contrasts with \code{aov}). See Details.}
\item{data}{A \code{data.frame} containing the data. Mandatory.}
\item{fun_aggregate}{The function for aggregating the data before running the
ANOVA if there is more than one observation per individual and cell of the
design. The default \code{NULL} issues a warning if aggregation is
necessary and uses \code{\link{mean}}. Pass \code{mean} directly to avoid
the warning.}
\item{type}{The type of sums of squares for the ANOVA. The default is given
by \code{afex_options("type")}, which is \strong{initially set to 3}.
Passed to \code{\link[car]{Anova}}. Possible values are \code{"II"},
\code{"III"}, \code{2}, or \code{3}.}
\item{factorize}{logical. Should between subject factors be factorized (with
note) before running the analysis. The default is given by
\code{afex_options("factorize")}, which is initially \code{TRUE}. If one
wants to run an ANCOVA, this needs to be set to \code{FALSE} (in which case
centering on 0 is checked on numeric variables).}
\item{check_contrasts}{\code{logical}. Should contrasts for between-subject
factors be checked and (if necessary) changed to be \code{"contr.sum"}. See
details. The default is given by \code{afex_options("check_contrasts")},
which is initially \code{TRUE}.}
\item{observed}{\code{character} vector indicating which of the variables are
observed (i.e, measured) as compared to experimentally manipulated. The
default effect size reported (generalized eta-squared) requires correct
specification of the obsered (in contrast to manipulated) variables.}
\item{anova_table}{\code{list} of further arguments passed to function
producing the ANOVA table. Arguments such as \code{es} (effect size) or
\code{correction} are passed to either \code{anova.afex_aov} or
\code{nice}. Note that those settings can also be changed once an object of
class \code{afex_aov} is created by invoking the \code{anova} method
directly.}
\item{include_aov}{Boolean. Allows suppressing the calculation of the aov
object, which is per default part of the returned \code{afex_aov} object.
\code{FALSE} prevents this potentially costly calculation. Especially for
designs with larger N and within-subjects factors, this is highly
advisable. Follow-up analyses using \pkg{emmeans} are then always based on
the multivariate or \code{lm} model.}
\item{return}{What should be returned? The default is given by
\code{afex_options("return_aov")}, which is initially \code{"afex_aov"},
returning an S3 object of class \code{afex_aov} for which various
\link[=afex_aov-methods]{methods} exist (see there and below for more
details). Other values are currently still supported for backward
compatibility.}
\item{...}{Further arguments passed to \code{fun_aggregate}.}
\item{print.formula}{\code{aov_ez} and \code{aov_4} are wrapper for
\code{aov_car}. This boolean argument indicates whether the formula in the
call to \code{car.aov} should be printed.}
\item{id}{\code{character} vector (of length 1) indicating the subject
identifier column in \code{data}.}
\item{dv}{\code{character} vector (of length 1) indicating the column
containing the \strong{dependent variable} in \code{data}.}
\item{between}{\code{character} vector indicating the
\strong{between}-subject(s) factor(s)/column(s) in \code{data}. Default is
\code{NULL} indicating no between-subjects factors.}
\item{within}{\code{character} vector indicating the
\strong{within}-subject(s)(or repeated-measures) factor(s)/column(s) in
\code{data}. Default is \code{NULL} indicating no within-subjects factors.}
\item{covariate}{\code{character} vector indicating the between-subject(s)
covariate(s) (i.e., column(s)) in \code{data}. Default is \code{NULL}
indicating no covariates. Please note that \code{factorize} needs to be set
to \code{FALSE} in case the covariate is numeric and should be treated as
such.}
\item{transformation}{In \code{aov_ez}, a \code{character} vector (of length
1) indicating the name of a transformation to apply to \code{dv} before
fitting the model. If missing, no transformation is applied. In
\code{aov_car} and \code{aov_4}, a response transformation may be
incorporated in the left-hand side of \code{formula}.}
}
\value{
\code{aov_car}, \code{aov_4}, and \code{aov_ez} are wrappers for
\code{\link[car]{Anova}} and \code{\link{aov}}, the return value is
dependent on the \code{return} argument. Per default, an S3 object of class
\code{"afex_aov"} is returned containing the following slots:
\describe{
\item{\code{"anova_table"}}{An ANOVA table of class \code{c("anova",
"data.frame")}.}
\item{\code{"aov"}}{\code{aov} object returned from \code{\link{aov}}
(should not be used to evaluate significance of effects, but can be passed
to \code{emmeans} for post-hoc tests).}
\item{\code{"Anova"}}{object returned from \code{\link[car]{Anova}}, an
object of class \code{"Anova.mlm"} (if within-subjects factors are present)
or of class \code{c("anova", "data.frame")}.}
\item{\code{"lm"}}{the object fitted with \code{lm} and passed to
\code{Anova} (i.e., an object of class \code{"lm"} or \code{"mlm"}). Also
returned if \code{return = "lm"}.}
\item{\code{"data"}}{a list containing: (1) \code{long} (the possibly
aggregated data in long format used for \code{aov}), \code{wide} (the data
used to fit the \code{lm} object), and \code{idata} (if within-subject
factors are present, the \code{idata} argument passed to
\code{car::Anova}). Also returned if \code{return = "data"}.}
}
In addition, the object has the following attributes: \code{"dv"},
\code{"id"}, \code{"within"}, \code{"between"}, and \code{"type"}.
The \link[=afex_aov-methods]{print} method for \code{afex_aov} objects
(invisibly) returns (and prints) the same as if \code{return} is
\code{"nice"}: a nice ANOVA table (produced by \code{\link{nice}}) with the
following columns: \code{Effect}, \code{df}, \code{MSE} (mean-squared
errors), \code{F} (potentially with significant symbols), \code{ges}
(generalized eta-squared), \code{p}.
}
\description{
These functions allow convenient specification of any type of ANOVAs (i.e.,
purely within-subjects ANOVAs, purely between-subjects ANOVAs, and mixed
between-within or split-plot ANOVAs) for data in the \strong{long} format
(i.e., one observation per row). If the data has more than one observation
per individual and cell of the design (e.g., multiple responses per
condition), the data will by automatically aggregated. The default settings
reproduce results from commercial statistical packages such as SPSS or SAS.
\code{aov_ez} is called specifying the factors as character vectors,
\code{aov_car} is called using a formula similar to \code{\link{aov}}
specifying an error strata for the within-subject factor(s), and \code{aov_4}
is called with a \pkg{lme4}-like formula (all ANOVA functions return
identical results). The returned object contains the ANOVA also fitted via
base R's \code{\link{aov}} which can be passed to e.g., \pkg{emmeans} for
further analysis (e.g., follow-up tests, contrasts, plotting, etc.). These
functions employ \code{\link[car]{Anova}} (from the \pkg{car} package) to
provide test of effects avoiding the somewhat unhandy format of
\code{car::Anova}.
}
\details{
\subsection{Details of ANOVA Specification}{ \code{aov_ez} will concatenate
all between-subject factors using \code{*} (i.e., producing all main effects
and interactions) and all covariates by \code{+} (i.e., adding only the main
effects to the existing between-subject factors). The within-subject factors
do fully interact with all between-subject factors and covariates. This is
essentially identical to the behavior of SPSS's \code{glm} function.
The \code{formula}s for \code{aov_car} or \code{aov_4} must contain a single
\code{Error} term specifying the \code{ID} column and potential
within-subject factors (you can use \code{\link{mixed}} for running
mixed-effects models with multiple error terms). Factors outside the
\code{Error} term are treated as between-subject factors (the within-subject
factors specified in the \code{Error} term are ignored outside the
\code{Error} term; in other words, it is not necessary to specify them
outside the \code{Error} term, see Examples).\cr Suppressing the intercept
(i.e, via \code{0 +} or \code{- 1}) is ignored. Specific specifications of
effects (e.g., excluding terms with \code{-} or using \code{^}) could be okay
but is not tested. Using the \code{\link{I}} or \code{\link{poly}} function
within the formula is not tested and not supported!
To run an ANCOVA you need to set \code{factorize = FALSE} and make sure that
all variables have the correct type (i.e., factors are factors and numeric
variables are numeric and centered).
Note that the default behavior is to include calculation of the effect size
generalized eta-squared for which \strong{all non-manipluated (i.e.,
observed)} variables need to be specified via the \code{observed} argument to
obtain correct results. When changing the effect size to \code{"pes"}
(partial eta-squared) or \code{"none"} via \code{anova_table} this becomes
unnecessary.
If \code{check_contrasts = TRUE}, contrasts will be set to \code{"contr.sum"}
for all between-subject factors if default contrasts are not equal to
\code{"contr.sum"} or \code{attrib(factor, "contrasts") != "contr.sum"}.
(within-subject factors are hard-coded \code{"contr.sum"}.) }
\subsection{Statistical Issues}{ \strong{Type 3 sums of squares are default
in \pkg{afex}.} While some authors argue that so-called type 3 sums of
squares are dangerous and/or problematic (most notably Venables, 2000), they
are the default in many commercial statistical application such as SPSS or
SAS. Furthermore, statisticians with an applied perspective recommend type 3
tests (e.g., Maxwell and Delaney, 2004). Consequently, they are the default
for the ANOVA functions described here. For some more discussion on this
issue see \href{https://stats.stackexchange.com/q/6208/442}{here}.
Note that lower order effects (e.g., main effects) in type 3 ANOVAs are only
meaningful with
\href{https://stats.idre.ucla.edu/other/mult-pkg/faq/general/faqwhat-is-effect-coding/}{effects
coding}. That is, contrasts should be set to \code{\link{contr.sum}} to
obtain meaningful results. This is imposed automatically for the functions
discussed here as long as \code{check_contrasts} is \code{TRUE} (the
default). I nevertheless recommend to set the contrasts globally to
\code{contr.sum} via running \code{\link{set_sum_contrasts}}. For a
discussion of the other (non-recommended) coding schemes see
\href{https://stats.idre.ucla.edu/r/library/r-library-contrast-coding-systems-for-categorical-variables/}{here}. }
\subsection{Follow-Up Contrasts and Post-Hoc Tests}{ The S3 object returned
per default can be directly passed to \code{emmeans::emmeans} for further
analysis. This allows to test any type of contrasts that might be of interest
independent of whether or not this contrast involves between-subject
variables, within-subject variables, or a combination thereof. The general
procedure to run those contrasts is the following (see Examples for a full
example):
\enumerate{
\item Estimate an \code{afex_aov} object with the function returned here. For example: \code{x <- aov_car(dv ~ a*b + (id/c), d)}
\item Obtain a \code{\link[emmeans]{emmGrid-class}} object by running \code{\link[emmeans]{emmeans}} on the \code{afex_aov} object from step 1 using the factors involved in the contrast. For example: \code{r <- emmeans(x, ~a:c)}
\item Create a list containing the desired contrasts on the reference grid object from step 2. For example: \code{con1 <- list(a_x = c(-1, 1, 0, 0, 0, 0), b_x = c(0, 0, -0.5, -0.5, 0, 1))}
\item Test the contrast on the reference grid using \code{\link[emmeans]{contrast}}. For example: \code{contrast(r, con1)}
\item To control for multiple testing p-value adjustments can be specified. For example the Bonferroni-Holm correction: \code{contrast(r, con1, adjust = "holm")}
}
Note that \pkg{emmeans} allows for a variety of advanced settings and
simplifiations, for example: all pairwise comparison of a single factor
using one command (e.g., \code{emmeans(x, "a", contr = "pairwise")}) or
advanced control for multiple testing by passing objects to \pkg{multcomp}.
A comprehensive overview of the functionality is provided in the
accompanying vignettes (see
\href{https://CRAN.R-project.org/package=emmeans}{here}).
A caveat regarding the use of \pkg{emmeans} concerns the assumption of
sphericity for ANOVAs including within-subjects/repeated-measures factors
(with more than two levels). The current default for follow-up tests uses a
univariate model (\code{model = "univariate"} in the call to
\code{emmeans}), which does not adequately control for violations of
sphericity. This may result in anti-conservative tests and contrasts
somewhat with the default ANOVA table which reports results based on the
Greenhousse-Geisser correction. An alternative is to use a multivariate
model (\code{model = "multivariate"} in the call to \code{emmeans}) which
should handle violations of sphericity better. The default will likely
change to multivariate tests in one of the next versions of the package.
Starting with \pkg{afex} version 0.22, \pkg{emmeans} is \emph{not}
loaded/attached automatically when loading \pkg{afex}. Therefore,
\pkg{emmeans} now needs to be loaded by the user via
\code{library("emmeans")} or \code{require("emmeans")}.
}
\subsection{Methods for \code{afex_aov} Objects}{ A full overview over the
methods provided for \code{afex_aov} objects is provided in the corresponding
help page: \code{\link{afex_aov-methods}}. The probably most important ones
for end-users are \code{summary}, \code{anova}, and \code{\link{nice}}.
The \code{summary} method returns, for ANOVAs containing within-subject
(repeated-measures) factors with more than two levels, the complete
univariate analysis: Results without df-correction, the Greenhouse-Geisser
corrected results, the Hyunh-Feldt corrected results, and the results of the
Mauchly test for sphericity.
The \code{anova} method returns a \code{data.frame} of class \code{"anova"}
containing the ANOVA table in numeric form (i.e., the one in slot
\code{anova_table} of a \code{afex_aov}). This method has arguments such as
\code{correction} and \code{es} and can be used to obtain an ANOVA table with
different correction than the one initially specified.
The \code{\link{nice}} method also returns a \code{data.frame}, but rounds
most values and transforms them into characters for nice printing. Also has
arguments like \code{correction} and \code{es} which can be used to obtain an
ANOVA table with different correction than the one initially specified. }
}
\section{Functions}{
\itemize{
\item \code{aov_4}: Allows definition of ANOVA-model using
\code{lme4::lmer}-like Syntax (but still fits a standard ANOVA).
\item \code{aov_ez}: Allows definition of ANOVA-model using character strings.
}}
\note{
Calculation of ANOVA models via \code{aov} (which is done per default)
can be comparatively slow and produce comparatively large objects for
ANOVAs with many within-subjects factors or levels. To avoid this
calculation set \code{include_aov = FALSE}. You can also disable this
globally with: \code{afex_options(include_aov = FALSE)}
The id variable and variables entered as within-subjects (i.e.,
repeated-measures) factors are silently converted to factors. Levels of
within-subject factors are converted to valid variable names using
\code{\link{make.names}(...,unique=TRUE)}. Unused factor levels are
silently dropped on all variables.
Contrasts attached to a factor as an attribute are probably not preserved
and not supported.
The workhorse is \code{aov_car}. \code{aov_4} and \code{aov_ez} only
construe and pass an appropriate formula to \code{aov_car}. Use
\code{print.formula = TRUE} to view this formula.
In contrast to \code{\link{aov}} \code{aov_car} assumes that all factors to
the right of \code{/} in the \code{Error} term are belonging together.
Consequently, \code{Error(id/(a*b))} and \code{Error(id/a*b)} are identical
(which is not true for \code{\link{aov}}).
}
\examples{
##########################
## 1: Specifying ANOVAs ##
##########################
# Example using a purely within-subjects design
# (Maxwell & Delaney, 2004, Chapter 12, Table 12.5, p. 578):
data(md_12.1)
aov_ez("id", "rt", md_12.1, within = c("angle", "noise"),
anova_table=list(correction = "none", es = "none"))
# Default output
aov_ez("id", "rt", md_12.1, within = c("angle", "noise"))
# examples using obk.long (see ?obk.long), a long version of the OBrienKaiser dataset (car package).
# Data is a split-plot or mixed design: contains both within- and between-subjects factors.
data(obk.long, package = "afex")
# estimate mixed ANOVA on the full design:
aov_car(value ~ treatment * gender + Error(id/(phase*hour)),
data = obk.long, observed = "gender")
aov_4(value ~ treatment * gender + (phase*hour|id),
data = obk.long, observed = "gender")
aov_ez("id", "value", obk.long, between = c("treatment", "gender"),
within = c("phase", "hour"), observed = "gender")
# the three calls return the same ANOVA table:
# Anova Table (Type 3 tests)
#
# Response: value
# Effect df MSE F ges p.value
# 1 treatment 2, 10 22.81 3.94 + .198 .055
# 2 gender 1, 10 22.81 3.66 + .115 .085
# 3 treatment:gender 2, 10 22.81 2.86 .179 .104
# 4 phase 1.60, 15.99 5.02 16.13 *** .151 <.001
# 5 treatment:phase 3.20, 15.99 5.02 4.85 * .097 .013
# 6 gender:phase 1.60, 15.99 5.02 0.28 .003 .709
# 7 treatment:gender:phase 3.20, 15.99 5.02 0.64 .014 .612
# 8 hour 1.84, 18.41 3.39 16.69 *** .125 <.001
# 9 treatment:hour 3.68, 18.41 3.39 0.09 .002 .979
# 10 gender:hour 1.84, 18.41 3.39 0.45 .004 .628
# 11 treatment:gender:hour 3.68, 18.41 3.39 0.62 .011 .641
# 12 phase:hour 3.60, 35.96 2.67 1.18 .015 .335
# 13 treatment:phase:hour 7.19, 35.96 2.67 0.35 .009 .930
# 14 gender:phase:hour 3.60, 35.96 2.67 0.93 .012 .449
# 15 treatment:gender:phase:hour 7.19, 35.96 2.67 0.74 .019 .646
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘+’ 0.1 ‘ ’ 1
#
# Sphericity correction method: GG
# "numeric" variables are per default converted to factors (as long as factorize = TRUE):
obk.long$hour2 <- as.numeric(as.character(obk.long$hour))
# gives same results as calls before
aov_car(value ~ treatment * gender + Error(id/phase*hour2),
data = obk.long, observed = c("gender"))
# ANCOVA: adding a covariate (necessary to set factorize = FALSE)
aov_car(value ~ treatment * gender + age + Error(id/(phase*hour)),
data = obk.long, observed = c("gender", "age"), factorize = FALSE)
aov_4(value ~ treatment * gender + age + (phase*hour|id),
data = obk.long, observed = c("gender", "age"), factorize = FALSE)
aov_ez("id", "value", obk.long, between = c("treatment", "gender"),
within = c("phase", "hour"), covariate = "age",
observed = c("gender", "age"), factorize = FALSE)
# aggregating over one within-subjects factor (phase), with warning:
aov_car(value ~ treatment * gender + Error(id/hour), data = obk.long, observed = "gender")
aov_ez("id", "value", obk.long, c("treatment", "gender"), "hour", observed = "gender")
# aggregating over both within-subjects factors (again with warning),
# only between-subjects factors:
aov_car(value ~ treatment * gender + Error(id), data = obk.long, observed = c("gender"))
aov_4(value ~ treatment * gender + (1|id), data = obk.long, observed = c("gender"))
aov_ez("id", "value", obk.long, between = c("treatment", "gender"), observed = "gender")
# only within-subject factors (ignoring between-subjects factors)
aov_car(value ~ Error(id/(phase*hour)), data = obk.long)
aov_4(value ~ (phase*hour|id), data = obk.long)
aov_ez("id", "value", obk.long, within = c("phase", "hour"))
### changing defaults of ANOVA table:
# no df-correction & partial eta-squared:
aov_car(value ~ treatment * gender + Error(id/(phase*hour)),
data = obk.long, anova_table = list(correction = "none", es = "pes"))
# no df-correction and no MSE
aov_car(value ~ treatment * gender + Error(id/(phase*hour)),
data = obk.long,observed = "gender",
anova_table = list(correction = "none", MSE = FALSE))
# add p-value adjustment for all effects (see Cramer et al., 2015, PB&R)
aov_ez("id", "value", obk.long, between = "treatment",
within = c("phase", "hour"),
anova_table = list(p_adjust_method = "holm"))
###########################
## 2: Follow-up Analysis ##
###########################
# use data as above
data(obk.long, package = "afex")
# 1. obtain afex_aov object:
a1 <- aov_ez("id", "value", obk.long, between = c("treatment", "gender"),
within = c("phase", "hour"), observed = "gender")
if (requireNamespace("ggplot2") & requireNamespace("emmeans")) {
# 1b. plot data using afex_plot function, for more see:
## vignette("afex_plot_introduction", package = "afex")
## default plot uses univariate model-based CIs
afex_plot(a1, "hour", "gender", c("treatment", "phase"))
## you can use multivairate model and CIs
afex_plot(a1, "hour", "gender", c("treatment", "phase"),
emmeans_arg = list(model = "multivariate"))
## in a mixed between-within designs, no error-bars might be preferrable:
afex_plot(a1, "hour", "gender", c("treatment", "phase"), error = "none")
}
if (requireNamespace("emmeans")) {
library("emmeans") # package emmeans needs to be attached for follow-up tests.
# 2. obtain reference grid object (default uses univariate model):
r1 <- emmeans(a1, ~treatment +phase)
r1
# multivariate model may be more appropriate
r1 <- emmeans(a1, ~treatment +phase, model = "multivariate")
r1
# 3. create list of contrasts on the reference grid:
c1 <- list(
A_B_pre = c(rep(0, 6), 0, -1, 1), # A versus B for pretest
A_B_comb = c(-0.5, 0.5, 0, -0.5, 0.5, 0, 0, 0, 0), # A vs. B for post and follow-up combined
effect_post = c(0, 0, 0, -1, 0.5, 0.5, 0, 0, 0), # control versus A&B post
effect_fup = c(-1, 0.5, 0.5, 0, 0, 0, 0, 0, 0), # control versus A&B follow-up
effect_comb = c(-0.5, 0.25, 0.25, -0.5, 0.25, 0.25, 0, 0, 0) # control versus A&B combined
)
# 4. test contrasts on reference grid:
contrast(r1, c1)
# same as before, but using Bonferroni-Holm correction for multiple testing:
contrast(r1, c1, adjust = "holm")
# 2. (alternative): all pairwise comparisons of treatment:
emmeans(a1, "treatment", contr = "pairwise", model = "multivariate")
## set multivariate models globally:
# afex_options(emmeans_model = "multivariate")
}
#######################
## 3: Other examples ##
#######################
data(obk.long, package = "afex")
# replicating ?Anova using aov_car:
obk_anova <- aov_car(value ~ treatment * gender + Error(id/(phase*hour)),
data = obk.long, type = 2)
# in contrast to aov you do not need the within-subject factors outside Error()
str(obk_anova, 1, give.attr = FALSE)
# List of 5
# $ anova_table:Classes 'anova' and 'data.frame': 15 obs. of 6 variables:
# $ aov :List of 5
# $ Anova :List of 14
# $ lm :List of 13
# $ data :List of 3
obk_anova$Anova
# Type II Repeated Measures MANOVA Tests: Pillai test statistic
# Df test stat approx F num Df den Df Pr(>F)
# (Intercept) 1 0.96954 318.34 1 10 6.532e-09 ***
# treatment 2 0.48092 4.63 2 10 0.0376868 *
# gender 1 0.20356 2.56 1 10 0.1409735
# treatment:gender 2 0.36350 2.86 2 10 0.1044692
# phase 1 0.85052 25.61 2 9 0.0001930 ***
# treatment:phase 2 0.68518 2.61 4 20 0.0667354 .
# gender:phase 1 0.04314 0.20 2 9 0.8199968
# treatment:gender:phase 2 0.31060 0.92 4 20 0.4721498
# hour 1 0.93468 25.04 4 7 0.0003043 ***
# treatment:hour 2 0.30144 0.35 8 16 0.9295212
# gender:hour 1 0.29274 0.72 4 7 0.6023742
# treatment:gender:hour 2 0.57022 0.80 8 16 0.6131884
# phase:hour 1 0.54958 0.46 8 3 0.8324517
# treatment:phase:hour 2 0.66367 0.25 16 8 0.9914415
# gender:phase:hour 1 0.69505 0.85 8 3 0.6202076
# treatment:gender:phase:hour 2 0.79277 0.33 16 8 0.9723693
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
}
\references{
Cramer, A. O. J., van Ravenzwaaij, D., Matzke, D., Steingroever,
H., Wetzels, R., Grasman, R. P. P. P., ... Wagenmakers, E.-J. (2015).
Hidden multiplicity in exploratory multiway ANOVA: Prevalence and remedies.
\emph{Psychonomic Bulletin & Review}, 1-8.
doi:\href{https://doi.org/10.3758/s13423-015-0913-5}{10.3758/s13423-015-0913-5}
Maxwell, S. E., & Delaney, H. D. (2004). \emph{Designing Experiments and
Analyzing Data: A Model-Comparisons Perspective}. Mahwah, N.J.: Lawrence
Erlbaum Associates.
Venables, W.N. (2000). \emph{Exegeses on linear models}. Paper presented to
the S-Plus User's Conference, Washington DC, 8-9 October 1998, Washington,
DC. Available from: \url{http://www.stats.ox.ac.uk/pub/MASS3/Exegeses.pdf}
}
\seealso{
Various methods for objects of class \code{afex_aov} are available:
\code{\link{afex_aov-methods}}
\code{\link{nice}} creates the nice ANOVA tables which is by default printed.
See also there for a slightly longer discussion of the available effect
sizes.
\code{\link{mixed}} provides a (formula) interface for obtaining p-values for
mixed-models via \pkg{lme4}. The functions presented here do not estimate
mixed models.
}
\author{
Henrik Singmann
The design of these functions was influenced by \code{\link[ez]{ezANOVA}}
from package \pkg{ez}.
}
|
library(tidyverse)
path.dir='./ignore_data/'
answer=read_csv(paste0(path.dir,'sample_submission.csv'))
answer$time=format(answer$time,nsmall = 4)
answer$open_channels=read_csv('best py result.csv')[[2]]
write_csv(answer,paste0(path.dir,'bestpy.csv'))
library(tidyverse)
answer=tibble(
time=format(seq(500.0001,700.0000,length.out = 2000000),nsmall = 4),
open_channels=read_csv('best py result.csv')[[2]]
)
write_csv(answer,'bestpy.csv')
| /Ion Switching/COMPLETE PY RESULTS.R | no_license | PasaOpasen/Competitions | R | false | false | 460 | r |
library(tidyverse)
path.dir='./ignore_data/'
answer=read_csv(paste0(path.dir,'sample_submission.csv'))
answer$time=format(answer$time,nsmall = 4)
answer$open_channels=read_csv('best py result.csv')[[2]]
write_csv(answer,paste0(path.dir,'bestpy.csv'))
library(tidyverse)
answer=tibble(
time=format(seq(500.0001,700.0000,length.out = 2000000),nsmall = 4),
open_channels=read_csv('best py result.csv')[[2]]
)
write_csv(answer,'bestpy.csv')
|
library(httr)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. Register an application at https://github.com/settings/applications
# Insert your values below - if secret is omitted, it will look it up in
# the GITHUB_CONSUMER_SECRET environmental variable.
#
# Use http://localhost:1410 as the callback url
myapp <- oauth_app("github", "5368a09e7a2784cae808")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
req <- GET("https://api.github.com/users/jtleek/repos", config(token = github_token))
stop_for_status(req)
content(req) | /GithubAPI.R | no_license | reevaedd/datasciencecoursera | R | false | false | 659 | r | library(httr)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. Register an application at https://github.com/settings/applications
# Insert your values below - if secret is omitted, it will look it up in
# the GITHUB_CONSUMER_SECRET environmental variable.
#
# Use http://localhost:1410 as the callback url
myapp <- oauth_app("github", "5368a09e7a2784cae808")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
req <- GET("https://api.github.com/users/jtleek/repos", config(token = github_token))
stop_for_status(req)
content(req) |
# Author: Begum Topcuoglu
# Date: 2018-02-12
#
######################################################################
# This script plots Figure 2:
# 1. Y axis: mean cvAUC of 100 datasplits
# 2. X axis: different hyper-parameters tested in cv(hp)
######################################################################
######################################################################
# Load in needed functions and libraries
source('code/learning/functions.R')
# detach("package:randomForest", unload=TRUE) to run
######################################################################
#----------------- Read in necessary libraries -------------------#
######################################################################
deps = c("scales","cowplot", "ggplot2","knitr","rmarkdown","vegan","gtools", "tidyverse");
for (dep in deps){
if (dep %in% installed.packages()[,"Package"] == FALSE){
install.packages(as.character(dep), quiet=TRUE);
}
library(dep, verbose=FALSE, character.only=TRUE)
}
######################################################################
######################################################################
# Load .tsv data generated with modeling pipeline for Logistic Regression
######################################################################
# Read in the results of trained model of 100 data-splits
all_files <- list.files(path= 'data/process', pattern='combined_all_hp.*', full.names = TRUE)
rbf_all <- read_files(all_files[6])
rf_all <- read_files(all_files[5])
dt_all <- read_files(all_files[1])
xgboost_all <- read_files(all_files[7])
######################################################################
#Plot the mean AUC values for hyper parameters tested #
######################################################################
# Define the base plot for all the modeling methods
base_plot <- function(data, x_axis, y_axis){
plot <- ggplot(data, aes(x_axis, y_axis)) +
geom_line() +
geom_point() +
theme_bw() +
geom_hline(yintercept = 0.5, linetype="dashed") +
theme(legend.text=element_text(size=6),
legend.title=element_text(size=7),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
text = element_text(size = 6),
axis.text.x=element_text(size = 6, colour='black'),
axis.text.y=element_text(size = 6, colour='black'),
axis.title.y=element_text(size = 7),
axis.title.x=element_text(size = 7))
return(plot)
}
dt <- dt_all %>%
group_by(maxdepth) %>%
summarise(mean_AUC = mean(ROC), sd_AUC = sd(ROC))
dt_plot <- base_plot(dt, dt$maxdepth, dt$mean_AUC) +
scale_x_continuous(name="Maximum depth of tree") +
scale_y_continuous(name="Decision tree
mean cvAUROC",
limits = c(0.30, 1),
breaks = seq(0.3, 1, 0.1)) +
geom_errorbar(aes(ymin=mean_AUC-sd_AUC, ymax=mean_AUC+sd_AUC), width=.001)
rf <- rf_all %>%
group_by(mtry) %>%
summarise(mean_AUC = mean(ROC), sd_AUC = sd(ROC))
rf_plot <- base_plot(rf, rf$mtry, rf$mean_AUC) +
scale_x_continuous(name="Number of features (mtry)",
breaks=seq(0, 1500, 250), limits = c(0, 1500)) +
scale_y_continuous(name="Random forest
mean cvAUROC",
limits = c(0.30, 1),
breaks = seq(0.3, 1, 0.1)) +
geom_errorbar(aes(ymin=mean_AUC-sd_AUC, ymax=mean_AUC+sd_AUC), width=1)
# Start plotting models with 2 hyper-parameters individually
rbf_data <- rbf_all %>%
group_by(sigma, C) %>%
summarise(mean_AUC = mean(ROC), sd_AUC = sd(ROC))
rbf_plot <- ggplot(rbf_data, aes(x = sigma, y = C, fill = mean_AUC)) +
geom_tile() +
scale_fill_gradient(name= "SVM RBF mean cvAUROC",
low = "#FFFFFF",
high = "#012345") +
annotate("point", # best hp for rbf svm - highest mean cv AUROC
x = 0.000001,
y = 0.01,
colour = "#FC4E07",
size = 3,
shape = 8) +
#coord_fixed(ratio = 0.5) +
#coord_equal() +
scale_y_log10(name="Regularization penalty
(C)",
breaks = c(0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10),
expand = c(0, 0),
labels=trans_format('log10',math_format(10^.x))) +
scale_x_log10(name = "The reach of a single training instance (sigma)",
breaks = c(0.00000001, 0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1),
expand = c(0, 0),
labels=trans_format('log10',math_format(10^.x))) +
theme(legend.background = element_rect(size=0.5, linetype="solid", color="black"),
legend.box.margin=margin(c(1,1,1,1)),
legend.text=element_text(size=5),
legend.title=element_text(size=5),
legend.position="bottom",
axis.title = element_text(size=6),
axis.text = element_text(size=6),
panel.border = element_rect(colour = "black", fill=NA, size=3),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(size = 6, colour='black'),
axis.text.y=element_text(size = 6, colour='black'))
xgboost_data <- xgboost_all %>%
group_by(eta, subsample) %>%
summarise(mean_AUC = mean(ROC), sd_AUC = sd(ROC))
xgboost_plot <- ggplot(xgboost_data, aes(x = eta, y = subsample, fill = mean_AUC)) +
geom_tile() +
#coord_fixed(ratio = 5) +
scale_fill_gradient(name= "XGBoost mean cvAUROC",
low = "#FFFFFF",
high = "#012345") +
annotate("point", # best hp for xgboost - highest mean cv AUROC
x = 0.01,
y = 0.5,
colour = "#FC4E07",
size = 3,
shape = 8) +
scale_y_continuous(name="Ratio of the training data
(subsample)",
breaks = c(0.4, 0.5, 0.6, 0.7),
expand=c(0,0)) +
scale_x_log10(name = "Learning rate (eta)",
breaks = c(0.001, 0.01, 0.1, 1),
expand = c(0, 0),
labels=trans_format('log10',math_format(10^.x))) +
guides(fill=guide_colourbar(barwidth = 8, barheight = 1)) +
theme(axis.title = element_text(size=6),
axis.text = element_text(size=6),
panel.border = element_rect(colour = "black", fill=NA, size=3),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(size = 6, colour='black'),
axis.text.y=element_text(size = 6, colour='black'),
legend.background = element_rect(size=0.5, linetype="solid", color="black"),
legend.box.margin=margin(c(1,1,1,1)),
legend.text=element_text(size=5),
legend.title=element_text(size=5), legend.position="bottom")
non_linear_models <- plot_grid(dt_plot, rf_plot, rbf_plot, xgboost_plot, labels = c("A", "B", "C", "D"), ncol=2)
######################################################################
#-----------------------Save figure as .pdf ------------------------ #
######################################################################
ggsave("Figure_S3.tiff", plot = non_linear_models, device = 'tiff', path = 'submission', width = 6, height = 5, dpi=300)
| /code/learning/FigureS2.R | permissive | SchlossLab/Topcuoglu_ML_mBio_2020 | R | false | false | 7,296 | r | # Author: Begum Topcuoglu
# Date: 2018-02-12
#
######################################################################
# This script plots Figure 2:
# 1. Y axis: mean cvAUC of 100 datasplits
# 2. X axis: different hyper-parameters tested in cv(hp)
######################################################################
######################################################################
# Load in needed functions and libraries
source('code/learning/functions.R')
# detach("package:randomForest", unload=TRUE) to run
######################################################################
#----------------- Read in necessary libraries -------------------#
######################################################################
deps = c("scales","cowplot", "ggplot2","knitr","rmarkdown","vegan","gtools", "tidyverse");
for (dep in deps){
if (dep %in% installed.packages()[,"Package"] == FALSE){
install.packages(as.character(dep), quiet=TRUE);
}
library(dep, verbose=FALSE, character.only=TRUE)
}
######################################################################
######################################################################
# Load .tsv data generated with modeling pipeline for Logistic Regression
######################################################################
# Read in the results of trained model of 100 data-splits
all_files <- list.files(path= 'data/process', pattern='combined_all_hp.*', full.names = TRUE)
rbf_all <- read_files(all_files[6])
rf_all <- read_files(all_files[5])
dt_all <- read_files(all_files[1])
xgboost_all <- read_files(all_files[7])
######################################################################
#Plot the mean AUC values for hyper parameters tested #
######################################################################
# Define the base plot for all the modeling methods
base_plot <- function(data, x_axis, y_axis){
plot <- ggplot(data, aes(x_axis, y_axis)) +
geom_line() +
geom_point() +
theme_bw() +
geom_hline(yintercept = 0.5, linetype="dashed") +
theme(legend.text=element_text(size=6),
legend.title=element_text(size=7),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
text = element_text(size = 6),
axis.text.x=element_text(size = 6, colour='black'),
axis.text.y=element_text(size = 6, colour='black'),
axis.title.y=element_text(size = 7),
axis.title.x=element_text(size = 7))
return(plot)
}
dt <- dt_all %>%
group_by(maxdepth) %>%
summarise(mean_AUC = mean(ROC), sd_AUC = sd(ROC))
dt_plot <- base_plot(dt, dt$maxdepth, dt$mean_AUC) +
scale_x_continuous(name="Maximum depth of tree") +
scale_y_continuous(name="Decision tree
mean cvAUROC",
limits = c(0.30, 1),
breaks = seq(0.3, 1, 0.1)) +
geom_errorbar(aes(ymin=mean_AUC-sd_AUC, ymax=mean_AUC+sd_AUC), width=.001)
rf <- rf_all %>%
group_by(mtry) %>%
summarise(mean_AUC = mean(ROC), sd_AUC = sd(ROC))
rf_plot <- base_plot(rf, rf$mtry, rf$mean_AUC) +
scale_x_continuous(name="Number of features (mtry)",
breaks=seq(0, 1500, 250), limits = c(0, 1500)) +
scale_y_continuous(name="Random forest
mean cvAUROC",
limits = c(0.30, 1),
breaks = seq(0.3, 1, 0.1)) +
geom_errorbar(aes(ymin=mean_AUC-sd_AUC, ymax=mean_AUC+sd_AUC), width=1)
# Start plotting models with 2 hyper-parameters individually
rbf_data <- rbf_all %>%
group_by(sigma, C) %>%
summarise(mean_AUC = mean(ROC), sd_AUC = sd(ROC))
rbf_plot <- ggplot(rbf_data, aes(x = sigma, y = C, fill = mean_AUC)) +
geom_tile() +
scale_fill_gradient(name= "SVM RBF mean cvAUROC",
low = "#FFFFFF",
high = "#012345") +
annotate("point", # best hp for rbf svm - highest mean cv AUROC
x = 0.000001,
y = 0.01,
colour = "#FC4E07",
size = 3,
shape = 8) +
#coord_fixed(ratio = 0.5) +
#coord_equal() +
scale_y_log10(name="Regularization penalty
(C)",
breaks = c(0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10),
expand = c(0, 0),
labels=trans_format('log10',math_format(10^.x))) +
scale_x_log10(name = "The reach of a single training instance (sigma)",
breaks = c(0.00000001, 0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1),
expand = c(0, 0),
labels=trans_format('log10',math_format(10^.x))) +
theme(legend.background = element_rect(size=0.5, linetype="solid", color="black"),
legend.box.margin=margin(c(1,1,1,1)),
legend.text=element_text(size=5),
legend.title=element_text(size=5),
legend.position="bottom",
axis.title = element_text(size=6),
axis.text = element_text(size=6),
panel.border = element_rect(colour = "black", fill=NA, size=3),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(size = 6, colour='black'),
axis.text.y=element_text(size = 6, colour='black'))
xgboost_data <- xgboost_all %>%
group_by(eta, subsample) %>%
summarise(mean_AUC = mean(ROC), sd_AUC = sd(ROC))
xgboost_plot <- ggplot(xgboost_data, aes(x = eta, y = subsample, fill = mean_AUC)) +
geom_tile() +
#coord_fixed(ratio = 5) +
scale_fill_gradient(name= "XGBoost mean cvAUROC",
low = "#FFFFFF",
high = "#012345") +
annotate("point", # best hp for xgboost - highest mean cv AUROC
x = 0.01,
y = 0.5,
colour = "#FC4E07",
size = 3,
shape = 8) +
scale_y_continuous(name="Ratio of the training data
(subsample)",
breaks = c(0.4, 0.5, 0.6, 0.7),
expand=c(0,0)) +
scale_x_log10(name = "Learning rate (eta)",
breaks = c(0.001, 0.01, 0.1, 1),
expand = c(0, 0),
labels=trans_format('log10',math_format(10^.x))) +
guides(fill=guide_colourbar(barwidth = 8, barheight = 1)) +
theme(axis.title = element_text(size=6),
axis.text = element_text(size=6),
panel.border = element_rect(colour = "black", fill=NA, size=3),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(size = 6, colour='black'),
axis.text.y=element_text(size = 6, colour='black'),
legend.background = element_rect(size=0.5, linetype="solid", color="black"),
legend.box.margin=margin(c(1,1,1,1)),
legend.text=element_text(size=5),
legend.title=element_text(size=5), legend.position="bottom")
non_linear_models <- plot_grid(dt_plot, rf_plot, rbf_plot, xgboost_plot, labels = c("A", "B", "C", "D"), ncol=2)
######################################################################
#-----------------------Save figure as .pdf ------------------------ #
######################################################################
ggsave("Figure_S3.tiff", plot = non_linear_models, device = 'tiff', path = 'submission', width = 6, height = 5, dpi=300)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2008
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1994
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1994
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Scholl-Becker/QBF-Hardness/arbiter-07-comp-error01-qbf-hardness-depth-4.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 794
c no.of clauses 2008
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1994
c
c QBFLIB/Miller-Scholl-Becker/QBF-Hardness/arbiter-07-comp-error01-qbf-hardness-depth-4.qdimacs 794 2008 E1 [113 114 115 116 117 118 119] 0 42 630 1994 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Scholl-Becker/QBF-Hardness/arbiter-07-comp-error01-qbf-hardness-depth-4/arbiter-07-comp-error01-qbf-hardness-depth-4.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 827 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2008
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1994
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1994
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Scholl-Becker/QBF-Hardness/arbiter-07-comp-error01-qbf-hardness-depth-4.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 794
c no.of clauses 2008
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1994
c
c QBFLIB/Miller-Scholl-Becker/QBF-Hardness/arbiter-07-comp-error01-qbf-hardness-depth-4.qdimacs 794 2008 E1 [113 114 115 116 117 118 119] 0 42 630 1994 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcmc_posterior.R
\name{mq_r0}
\alias{mq_r0}
\title{Calculates median and quantiles for basic reproduction number}
\usage{
mq_r0(data.gp, data.msm, times)
}
\arguments{
\item{data.gp}{matrix for basic reproduction number for general population}
\item{data.msm}{matrix for basic reproduction number for msm}
\item{times}{vector of time points}
}
\value{
dataframe of median, and upper and lower bounds for the posterior
}
\description{
Calculates median and quantiles for basic reproduction number
}
\details{
in both matrices (data.gp and data.msm), each column
represents a different replicate and each row represents a time point.
}
\examples{
#TO DO
}
| /man/mq_r0.Rd | no_license | thednainus/senegalHIVmodel | R | false | true | 737 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcmc_posterior.R
\name{mq_r0}
\alias{mq_r0}
\title{Calculates median and quantiles for basic reproduction number}
\usage{
mq_r0(data.gp, data.msm, times)
}
\arguments{
\item{data.gp}{matrix for basic reproduction number for general population}
\item{data.msm}{matrix for basic reproduction number for msm}
\item{times}{vector of time points}
}
\value{
dataframe of median, and upper and lower bounds for the posterior
}
\description{
Calculates median and quantiles for basic reproduction number
}
\details{
in both matrices (data.gp and data.msm), each column
represents a different replicate and each row represents a time point.
}
\examples{
#TO DO
}
|
library(protr)
### Name: getUniProt
### Title: Retrieve Protein Sequences from UniProt by Protein ID
### Aliases: getUniProt
### Keywords: UniProt
### ** Examples
## Not run:
##D # Network latency may slow down this example
##D # Only test this when your connection is fast enough
##D ids = c("P00750", "P00751", "P00752")
##D getUniProt(ids)
## End(Not run)
| /data/genthat_extracted_code/protr/examples/getUniProt.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 367 | r | library(protr)
### Name: getUniProt
### Title: Retrieve Protein Sequences from UniProt by Protein ID
### Aliases: getUniProt
### Keywords: UniProt
### ** Examples
## Not run:
##D # Network latency may slow down this example
##D # Only test this when your connection is fast enough
##D ids = c("P00750", "P00751", "P00752")
##D getUniProt(ids)
## End(Not run)
|
library(Rsolnp)
mydata <- read.csv("Data1.csv")
##############Data Format###############
#Column 1: Exact Age (Years)
#Column 2: mens onset days min (Days)
#Column 3: mens onset days max (Days)
#Column 4: Recall Type code
#Column 5: Recall Type
###################################################################################
names(mydata)
mydata[, 2] <- (mydata[, 2]) / 365.25
mydata[, 3] <- (mydata[, 3]) / 365.25
fact <- factor(mydata[, 5])
S <- mydata[, 1]
T1 <- mydata[, 2]
T2 <- mydata[, 3]
ind <- mydata[, 5]
d <- ind
v <- factor(ind)
levels(v)
k = 4
Xvec <- c(0, 3, 6, 9, 12)
delta <- ifelse(d == "Not happened", 0, 1)
NHp <- (length(d[d == "Not happened"])) / (length(d))
NOp <- (length(d[d == "No recall"])) / (length(d))
EXp <- (length(d[d == "Day recall"])) / (length(d))
MOp <- (length(d[d == "Month recall"])) / (length(d))
YRp <- (length(d[d == "Year recall"])) / (length(d))
n2 <- length(d[d == "Day recall"])
Tex <- T1[d == "Day recall"]
T_mass <- sort(Tex, decreasing = FALSE)
n <- length(S)
r <- 12 + n2
ro <- 4 + n2
####################Nonparametric Likelihood for Partial Recall#####################
AMLEneglog.likelihood <- function(lamda, S, T1, T2, d, Xvec)
{
b0 <- lamda[1:4]
b2 <- lamda[5:8]
b3 <- lamda[9:12]
b1 <- (1 - (b0 + b2 + b3))
if(min(b1)< 0)
{loglike = -999999}
if(min(b1)>0){
n2 <- length(d[d == "Day recall"])
r <- (12 + n2)
q <- lamda[13:r]
Tex <- T1[d == "Day recall"]
n <- length(S)
T_mass <- sort(Tex, decreasing = FALSE)
a <- mat.or.vec(nr = n, nc = n2)
for (i in 1:n)
{
for (j in 1:n2)
{
if (d[i] == "Not happened")
{
a[i, j] = ifelse(T_mass[j] > S[i], 1, 0)
}
if (d[i] == "Day recall")
{
k <- length(b1)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b1[l] * ifelse(T_mass[j] == T1[i], 1, 0) * ifelse(T1[i] > (S[i] -
Xvec[l + 1]) && T1[i] <= (S[i] - Xvec[l]), 1, 0)
}
conl[k] = b1[k] * ifelse(T_mass[j] == T1[i], 1, 0) * ifelse(T1[i] <=
(S[i] - Xvec[k]), 1, 0)
a[i, j] = sum(conl)
}
if (d[i] == "No recall")
{
k <- length(b0)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b0[l] * ifelse(T_mass[j] > (S[i] - Xvec[l + 1]) &&
T_mass[j] <= (S[i] - Xvec[l]), 1, 0)
}
conl[k] = b0[k] * ifelse(T_mass[j] <= (S[i] - Xvec[k]), 1, 0)
a[i, j] = sum(conl)
}
if (d[i] == "Month recall")
{
k <- length(b2)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b2[l] * ifelse(T_mass[j] > max((S[i] - Xvec[l + 1]), (T1[i])) &&
T_mass[j] <= min((S[i] - Xvec[l]), (T2[i])), 1, 0)
}
conl[k] = b2[k] * ifelse(T_mass[j] <= min((S[i] - Xvec[k]), (T2[i])) &&
T_mass[j] > (T1[i]) , 1, 0)
a[i, j] = sum(conl)
}
if (d[i] == "Year recall")
{
k <- length(b3)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b3[l] * ifelse(T_mass[j] > max((S[i] - Xvec[l + 1]), (T1[i])) &&
T_mass[j] <= min((S[i] - Xvec[l]), (T2[i])), 1, 0)
}
conl[k] = b3[k] * ifelse(T_mass[j] <= min((S[i] - Xvec[k]), (T2[i])) &&
T_mass[j] > (T1[i]) , 1, 0)
a[i, j] = sum(conl)
}
}
}
g <- c()
for (i in 1:n)
{
p <- c()
for (j in 1:n2)
{
p[j] <- a[i, j] * q[j]
}
g[i] = sum(p)
}
l <- c()
for (i in 1:n)
{
if (!is.na(g[i]) && g[i] != 0)
{
l[i] <- log(g[i])
} else{
l[i] <- 0
}
}
l <- l[is.finite(l)]
loglike <- sum(l)
if (any(is.finite(loglike)))
{
loglike = loglike
} else{
loglike <- -999999
}
return(-loglike)
}
####################Nonparametric Likelihood for Partial Recall#####################
AMLEExneglog.likelihood <- function(lamda1, S, T1, T2, d, Xvec)
{
b0 <- lamda1[1:4]
b1 <- (1 - b0)
n2 <- length(d[d == "Day recall"])
ro <- (4 + n2)
q <- lamda[5:ro]
Tex <- T1[d == "Day recall"]
n <- length(S)
T_mass <- sort(Tex, decreasing = FALSE)
a <- mat.or.vec(nr = n, nc = n2)
for (i in 1:n)
{
for (j in 1:n2)
{
if (d[i] == "Not happened")
{
a[i, j] = ifelse(T_mass[j] > S[i], 1, 0)
}
if (d[i] == "Day recall")
{
k <- length(b1)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b1[l] * ifelse(T_mass[j] == T1[i], 1, 0) * ifelse(T1[i] > (S[i] -
Xvec[l + 1]) && T1[i] <= (S[i] - Xvec[l]), 1, 0)
}
conl[k] = b1[k] * ifelse(T_mass[j] == T1[i], 1, 0) * ifelse(T1[i] <=
(S[i] - Xvec[k]), 1, 0)
a[i, j] = sum(conl)
}
if (d[i] == "No recall" ||
d[i] == "Month recall" || d[i] == "Year recall")
{
k <- length(b0)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b0[l] * ifelse(T_mass[j] > (S[i] - Xvec[l + 1]) &&
T_mass[j] <= (S[i] - Xvec[l]), 1, 0)
}
conl[k] = b0[k] * ifelse(T_mass[j] <= (S[i] - Xvec[k]), 1, 0)
a[i, j] = sum(conl)
}
}
}
g <- c()
for (i in 1:n)
{
p <- c()
for (j in 1:n2)
{
p[j] <- a[i, j] * q[j]
}
g[i] = sum(p)
}
l <- c()
for (i in 1:n)
{
if (!is.na(g[i]) && g[i] != 0)
{
l[i] <- log(g[i])
} else{
l[i] <- 0
}
}
l <- l[is.finite(l)]
loglike <- sum(l)
if (any(is.finite(loglike)))
{
loglike = loglike
} else{
loglike <- -999999
}
}
return(-loglike)
}
###########Setting initial Values##########
u <- runif(n2, 0, 1)
qq <- u / sum(u)
b00 <- c(.08, .39, .49, .62)
b22 <- c(.18, .25, .14, .13)
b33 <- c(.09, .18, .16, .17)
b11 <- (1 - (b00 + b22 + b33))
lamda <- c(b00, b22, b33, qq)
lamda1 <- c(b00, qq)
r <- 12 + n2
ro <- 4 + n2
##########Setting constraints###############
equal <- function(lamda) {
sum(lamda[13:r])
}
inequal <- function(lamda) {
s1 <- lamda[1] + lamda[5] + lamda[9]
s2 <- lamda[2] + lamda[6] + lamda[10]
s3 <- lamda[3] + lamda[7] + lamda[11]
s4 <- lamda[4] + lamda[8] + lamda[12]
c(s1, s2, s3, s4)
}
func1 <- function(lamda)
{
AMLEneglog.likelihood(lamda, S, T1, T2, d, Xvec)
}
equal1 <- function(lamda1) {
sum(lamda1[5:ro])
}
func2 <- function(lamda1)
{
AMLEExneglog.likelihood(lamda1, S, T1, T2, d, Xvec)
}
#########Performing Optimization###############
m <- solnp(
lamda,
func1,
eqfun = equal,
eqB = 1,
LB = rep(0, r),
UB = rep(1, r)
)
n <- solnp(
lamda1,
func2,
eqfun = equal1,
eqB = 1,
LB = rep(0, ro),
UB = rep(1, ro)
)
qopt <- m$par[13:r]
qopt1 <- n$par[5:ro]
Tkaplan <- T1[delta == 1]
Tkap <- T1
Tmat <- T_mass
#######Calculating the NP distribution function#################
FN <- function(t)
{
temp <- c()
for (j in 1:n2)
{
if (T_mass[j] <= t)
{
temp[j] = qopt[j]
}
if (T_mass[j] > t)
{
temp[j] = 0
}
}
sum(temp)
}
FN1 <- Vectorize(FN)
FN0 <- function(t)
{
temp <- c()
for (j in 1:n2)
{
if (T_mass[j] <= t)
{
temp[j] = qopt1[j]
}
if (T_mass[j] > t)
{
temp[j] = 0
}
}
sum(temp)
}
FN2 <- Vectorize(FN0)
###================ For NP distribution function plot ===========
FN <- function(t)
{
temp <- c()
for (j in 1:length(T_mass))
{
if (T_mass[j] <= t)
{
temp[j] = qopt[j]
}
if (T_mass[j] > t)
{
temp[j] = 0
}
}
sum(temp)
}
FN1 <- Vectorize(FN)
qopt1 <- qdata$qopt1
FN0 <- function(t)
{
temp <- c()
for (j in 1:length(T_mass))
{
if (T_mass[j] <= t)
{
temp[j] = qopt1[j]
}
if (T_mass[j] > t)
{
temp[j] = 0
}
}
sum(temp)
}
FN2 <- Vectorize(FN0)
t <- seq(4, 16, l = 100)
plot(t, FN1(t), type = "l", col = "green")
lines(t, FN2(t), type = "l", col = "red")
#################################################################
| /Real-data1-NP.R | no_license | rahulfrodo/PartialRecall | R | false | false | 8,882 | r | library(Rsolnp)
mydata <- read.csv("Data1.csv")
##############Data Format###############
#Column 1: Exact Age (Years)
#Column 2: mens onset days min (Days)
#Column 3: mens onset days max (Days)
#Column 4: Recall Type code
#Column 5: Recall Type
###################################################################################
names(mydata)
mydata[, 2] <- (mydata[, 2]) / 365.25
mydata[, 3] <- (mydata[, 3]) / 365.25
fact <- factor(mydata[, 5])
S <- mydata[, 1]
T1 <- mydata[, 2]
T2 <- mydata[, 3]
ind <- mydata[, 5]
d <- ind
v <- factor(ind)
levels(v)
k = 4
Xvec <- c(0, 3, 6, 9, 12)
delta <- ifelse(d == "Not happened", 0, 1)
NHp <- (length(d[d == "Not happened"])) / (length(d))
NOp <- (length(d[d == "No recall"])) / (length(d))
EXp <- (length(d[d == "Day recall"])) / (length(d))
MOp <- (length(d[d == "Month recall"])) / (length(d))
YRp <- (length(d[d == "Year recall"])) / (length(d))
n2 <- length(d[d == "Day recall"])
Tex <- T1[d == "Day recall"]
T_mass <- sort(Tex, decreasing = FALSE)
n <- length(S)
r <- 12 + n2
ro <- 4 + n2
####################Nonparametric Likelihood for Partial Recall#####################
AMLEneglog.likelihood <- function(lamda, S, T1, T2, d, Xvec)
{
b0 <- lamda[1:4]
b2 <- lamda[5:8]
b3 <- lamda[9:12]
b1 <- (1 - (b0 + b2 + b3))
if(min(b1)< 0)
{loglike = -999999}
if(min(b1)>0){
n2 <- length(d[d == "Day recall"])
r <- (12 + n2)
q <- lamda[13:r]
Tex <- T1[d == "Day recall"]
n <- length(S)
T_mass <- sort(Tex, decreasing = FALSE)
a <- mat.or.vec(nr = n, nc = n2)
for (i in 1:n)
{
for (j in 1:n2)
{
if (d[i] == "Not happened")
{
a[i, j] = ifelse(T_mass[j] > S[i], 1, 0)
}
if (d[i] == "Day recall")
{
k <- length(b1)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b1[l] * ifelse(T_mass[j] == T1[i], 1, 0) * ifelse(T1[i] > (S[i] -
Xvec[l + 1]) && T1[i] <= (S[i] - Xvec[l]), 1, 0)
}
conl[k] = b1[k] * ifelse(T_mass[j] == T1[i], 1, 0) * ifelse(T1[i] <=
(S[i] - Xvec[k]), 1, 0)
a[i, j] = sum(conl)
}
if (d[i] == "No recall")
{
k <- length(b0)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b0[l] * ifelse(T_mass[j] > (S[i] - Xvec[l + 1]) &&
T_mass[j] <= (S[i] - Xvec[l]), 1, 0)
}
conl[k] = b0[k] * ifelse(T_mass[j] <= (S[i] - Xvec[k]), 1, 0)
a[i, j] = sum(conl)
}
if (d[i] == "Month recall")
{
k <- length(b2)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b2[l] * ifelse(T_mass[j] > max((S[i] - Xvec[l + 1]), (T1[i])) &&
T_mass[j] <= min((S[i] - Xvec[l]), (T2[i])), 1, 0)
}
conl[k] = b2[k] * ifelse(T_mass[j] <= min((S[i] - Xvec[k]), (T2[i])) &&
T_mass[j] > (T1[i]) , 1, 0)
a[i, j] = sum(conl)
}
if (d[i] == "Year recall")
{
k <- length(b3)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b3[l] * ifelse(T_mass[j] > max((S[i] - Xvec[l + 1]), (T1[i])) &&
T_mass[j] <= min((S[i] - Xvec[l]), (T2[i])), 1, 0)
}
conl[k] = b3[k] * ifelse(T_mass[j] <= min((S[i] - Xvec[k]), (T2[i])) &&
T_mass[j] > (T1[i]) , 1, 0)
a[i, j] = sum(conl)
}
}
}
g <- c()
for (i in 1:n)
{
p <- c()
for (j in 1:n2)
{
p[j] <- a[i, j] * q[j]
}
g[i] = sum(p)
}
l <- c()
for (i in 1:n)
{
if (!is.na(g[i]) && g[i] != 0)
{
l[i] <- log(g[i])
} else{
l[i] <- 0
}
}
l <- l[is.finite(l)]
loglike <- sum(l)
if (any(is.finite(loglike)))
{
loglike = loglike
} else{
loglike <- -999999
}
return(-loglike)
}
####################Nonparametric Likelihood for Partial Recall#####################
AMLEExneglog.likelihood <- function(lamda1, S, T1, T2, d, Xvec)
{
b0 <- lamda1[1:4]
b1 <- (1 - b0)
n2 <- length(d[d == "Day recall"])
ro <- (4 + n2)
q <- lamda[5:ro]
Tex <- T1[d == "Day recall"]
n <- length(S)
T_mass <- sort(Tex, decreasing = FALSE)
a <- mat.or.vec(nr = n, nc = n2)
for (i in 1:n)
{
for (j in 1:n2)
{
if (d[i] == "Not happened")
{
a[i, j] = ifelse(T_mass[j] > S[i], 1, 0)
}
if (d[i] == "Day recall")
{
k <- length(b1)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b1[l] * ifelse(T_mass[j] == T1[i], 1, 0) * ifelse(T1[i] > (S[i] -
Xvec[l + 1]) && T1[i] <= (S[i] - Xvec[l]), 1, 0)
}
conl[k] = b1[k] * ifelse(T_mass[j] == T1[i], 1, 0) * ifelse(T1[i] <=
(S[i] - Xvec[k]), 1, 0)
a[i, j] = sum(conl)
}
if (d[i] == "No recall" ||
d[i] == "Month recall" || d[i] == "Year recall")
{
k <- length(b0)
conl <- c()
for (l in 1:k - 1)
{
conl[l] = b0[l] * ifelse(T_mass[j] > (S[i] - Xvec[l + 1]) &&
T_mass[j] <= (S[i] - Xvec[l]), 1, 0)
}
conl[k] = b0[k] * ifelse(T_mass[j] <= (S[i] - Xvec[k]), 1, 0)
a[i, j] = sum(conl)
}
}
}
g <- c()
for (i in 1:n)
{
p <- c()
for (j in 1:n2)
{
p[j] <- a[i, j] * q[j]
}
g[i] = sum(p)
}
l <- c()
for (i in 1:n)
{
if (!is.na(g[i]) && g[i] != 0)
{
l[i] <- log(g[i])
} else{
l[i] <- 0
}
}
l <- l[is.finite(l)]
loglike <- sum(l)
if (any(is.finite(loglike)))
{
loglike = loglike
} else{
loglike <- -999999
}
}
return(-loglike)
}
###########Setting initial Values##########
u <- runif(n2, 0, 1)
qq <- u / sum(u)
b00 <- c(.08, .39, .49, .62)
b22 <- c(.18, .25, .14, .13)
b33 <- c(.09, .18, .16, .17)
b11 <- (1 - (b00 + b22 + b33))
lamda <- c(b00, b22, b33, qq)
lamda1 <- c(b00, qq)
r <- 12 + n2
ro <- 4 + n2
##########Setting constraints###############
equal <- function(lamda) {
sum(lamda[13:r])
}
inequal <- function(lamda) {
s1 <- lamda[1] + lamda[5] + lamda[9]
s2 <- lamda[2] + lamda[6] + lamda[10]
s3 <- lamda[3] + lamda[7] + lamda[11]
s4 <- lamda[4] + lamda[8] + lamda[12]
c(s1, s2, s3, s4)
}
func1 <- function(lamda)
{
AMLEneglog.likelihood(lamda, S, T1, T2, d, Xvec)
}
equal1 <- function(lamda1) {
sum(lamda1[5:ro])
}
func2 <- function(lamda1)
{
AMLEExneglog.likelihood(lamda1, S, T1, T2, d, Xvec)
}
#########Performing Optimization###############
m <- solnp(
lamda,
func1,
eqfun = equal,
eqB = 1,
LB = rep(0, r),
UB = rep(1, r)
)
n <- solnp(
lamda1,
func2,
eqfun = equal1,
eqB = 1,
LB = rep(0, ro),
UB = rep(1, ro)
)
qopt <- m$par[13:r]
qopt1 <- n$par[5:ro]
Tkaplan <- T1[delta == 1]
Tkap <- T1
Tmat <- T_mass
#######Calculating the NP distribution function#################
FN <- function(t)
{
temp <- c()
for (j in 1:n2)
{
if (T_mass[j] <= t)
{
temp[j] = qopt[j]
}
if (T_mass[j] > t)
{
temp[j] = 0
}
}
sum(temp)
}
FN1 <- Vectorize(FN)
FN0 <- function(t)
{
temp <- c()
for (j in 1:n2)
{
if (T_mass[j] <= t)
{
temp[j] = qopt1[j]
}
if (T_mass[j] > t)
{
temp[j] = 0
}
}
sum(temp)
}
FN2 <- Vectorize(FN0)
###================ For NP distribution function plot ===========
FN <- function(t)
{
temp <- c()
for (j in 1:length(T_mass))
{
if (T_mass[j] <= t)
{
temp[j] = qopt[j]
}
if (T_mass[j] > t)
{
temp[j] = 0
}
}
sum(temp)
}
FN1 <- Vectorize(FN)
qopt1 <- qdata$qopt1
FN0 <- function(t)
{
temp <- c()
for (j in 1:length(T_mass))
{
if (T_mass[j] <= t)
{
temp[j] = qopt1[j]
}
if (T_mass[j] > t)
{
temp[j] = 0
}
}
sum(temp)
}
FN2 <- Vectorize(FN0)
t <- seq(4, 16, l = 100)
plot(t, FN1(t), type = "l", col = "green")
lines(t, FN2(t), type = "l", col = "red")
#################################################################
|
# ensure the results are repeatable
set.seed(7)
# load the library
library(mlbench)
library(caret)
library(data.table)
library(mlr)
library(h2o)
library(readr)
library(MLmetrics)
library(readr)
rf.GIM_Dataset <- read_csv("Desktop/PGDBA/1st Semester/TVS credit/original dataset/5dbabd3a3e7e8_GIM_Dataset/GIM_Dataset.csv")
names(rf.GIM_Dataset)
rf.GIM_Dataset$V6 <- floor(rf.GIM_Dataset$V6/10000)
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6==11 | rf.GIM_Dataset$V6==11]="DL"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6==12 | rf.GIM_Dataset$V6==13]="Ha"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=14 & rf.GIM_Dataset$V6<=16]="PU"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6==17]="HI"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=18 & rf.GIM_Dataset$V6<=19]="JK"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=20 & rf.GIM_Dataset$V6<=28]="UP"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=30 & rf.GIM_Dataset$V6<=34]="RJ"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=36 & rf.GIM_Dataset$V6<=39]="GJ"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=40 & rf.GIM_Dataset$V6<=44]="MH"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=45 & rf.GIM_Dataset$V6<=49]="MPC"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=50 & rf.GIM_Dataset$V6<=53]="APT"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=56 & rf.GIM_Dataset$V6<=59]="KT"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=60 & rf.GIM_Dataset$V6<=64]="TN"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=67 & rf.GIM_Dataset$V6<=69]="KL"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=70 & rf.GIM_Dataset$V6<=74]="WB"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=75 & rf.GIM_Dataset$V6<=77]="OR"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=78 & rf.GIM_Dataset$V6<=78]="AS"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=79 & rf.GIM_Dataset$V6<=79]="NE"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=80 & rf.GIM_Dataset$V6<=85]="BJ"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=90 & rf.GIM_Dataset$V6<=99]="JK"
rf.GIM_Dataset$V18 <- as.character(rf.GIM_Dataset$V18)
rf.GIM_Dataset$V18 <- factor(rf.GIM_Dataset$V18,levels= c("OTHERS","12TH", "SSC", "UNDER GRADUATE", "GRADUATE", "POST-GRADUATE" ,"PROFESSIONAL"),labels =1:7,ordered=T)
rf.GIM_Dataset$V17 <- as.factor(rf.GIM_Dataset$V17)
rf.GIM_Dataset$V17 <- as.numeric(rf.GIM_Dataset$V17)
rf.GIM_Dataset$V18 <- as.numeric(rf.GIM_Dataset$V18)
rf.GIM_Dataset1 <- rf.GIM_Dataset[,-1]
rf.GIM_Dataset1 <- fastDummies::dummy_cols(rf.GIM_Dataset[,-1])
colnames(rf.GIM_Dataset1) <- gsub(" ","_",colnames(rf.GIM_Dataset1))
colnames(rf.GIM_Dataset1) <- gsub("/","_",colnames(rf.GIM_Dataset1))
colnames(rf.GIM_Dataset1) <- gsub("-","_",colnames(rf.GIM_Dataset1))
write_csv(rf.GIM_Dataset1,"rf.GIM_Dataset1.csv")
rf.GIM_Dataset1 <- rf.GIM_Dataset1[,sapply(rf.GIM_Dataset1, is.numeric)]
rf.GIM_Dataset1$V27 <- as.factor(rf.GIM_Dataset1$V27)
rf.GIM_Dataset1$V27 <- as.numeric(rf.GIM_Dataset1$V27)
labels <- rf.GIM_Dataset1["V27"]
df_train <- rf.GIM_Dataset1[-grep('V27',colnames(rf.GIM_Dataset1))]
# define the control using a random forest selection function
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
# run the RFE algorithm
results <- rfe(as.matrix(df_train), as.matrix(labels), sizes=c(1:45), rfeControl=control)
# summarize the results
print(results)
# list the chosen features
predictors(results)
# plot the results
plot(results, type=c("g", "o")) | /TVS credit/TVSCredit_RFE.R | no_license | sumanpal94/Competitions | R | false | false | 3,183 | r | # ensure the results are repeatable
set.seed(7)
# load the library
library(mlbench)
library(caret)
library(data.table)
library(mlr)
library(h2o)
library(readr)
library(MLmetrics)
library(readr)
rf.GIM_Dataset <- read_csv("Desktop/PGDBA/1st Semester/TVS credit/original dataset/5dbabd3a3e7e8_GIM_Dataset/GIM_Dataset.csv")
names(rf.GIM_Dataset)
rf.GIM_Dataset$V6 <- floor(rf.GIM_Dataset$V6/10000)
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6==11 | rf.GIM_Dataset$V6==11]="DL"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6==12 | rf.GIM_Dataset$V6==13]="Ha"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=14 & rf.GIM_Dataset$V6<=16]="PU"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6==17]="HI"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=18 & rf.GIM_Dataset$V6<=19]="JK"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=20 & rf.GIM_Dataset$V6<=28]="UP"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=30 & rf.GIM_Dataset$V6<=34]="RJ"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=36 & rf.GIM_Dataset$V6<=39]="GJ"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=40 & rf.GIM_Dataset$V6<=44]="MH"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=45 & rf.GIM_Dataset$V6<=49]="MPC"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=50 & rf.GIM_Dataset$V6<=53]="APT"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=56 & rf.GIM_Dataset$V6<=59]="KT"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=60 & rf.GIM_Dataset$V6<=64]="TN"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=67 & rf.GIM_Dataset$V6<=69]="KL"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=70 & rf.GIM_Dataset$V6<=74]="WB"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=75 & rf.GIM_Dataset$V6<=77]="OR"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=78 & rf.GIM_Dataset$V6<=78]="AS"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=79 & rf.GIM_Dataset$V6<=79]="NE"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=80 & rf.GIM_Dataset$V6<=85]="BJ"
rf.GIM_Dataset$V6[rf.GIM_Dataset$V6>=90 & rf.GIM_Dataset$V6<=99]="JK"
rf.GIM_Dataset$V18 <- as.character(rf.GIM_Dataset$V18)
rf.GIM_Dataset$V18 <- factor(rf.GIM_Dataset$V18,levels= c("OTHERS","12TH", "SSC", "UNDER GRADUATE", "GRADUATE", "POST-GRADUATE" ,"PROFESSIONAL"),labels =1:7,ordered=T)
rf.GIM_Dataset$V17 <- as.factor(rf.GIM_Dataset$V17)
rf.GIM_Dataset$V17 <- as.numeric(rf.GIM_Dataset$V17)
rf.GIM_Dataset$V18 <- as.numeric(rf.GIM_Dataset$V18)
rf.GIM_Dataset1 <- rf.GIM_Dataset[,-1]
rf.GIM_Dataset1 <- fastDummies::dummy_cols(rf.GIM_Dataset[,-1])
colnames(rf.GIM_Dataset1) <- gsub(" ","_",colnames(rf.GIM_Dataset1))
colnames(rf.GIM_Dataset1) <- gsub("/","_",colnames(rf.GIM_Dataset1))
colnames(rf.GIM_Dataset1) <- gsub("-","_",colnames(rf.GIM_Dataset1))
write_csv(rf.GIM_Dataset1,"rf.GIM_Dataset1.csv")
rf.GIM_Dataset1 <- rf.GIM_Dataset1[,sapply(rf.GIM_Dataset1, is.numeric)]
rf.GIM_Dataset1$V27 <- as.factor(rf.GIM_Dataset1$V27)
rf.GIM_Dataset1$V27 <- as.numeric(rf.GIM_Dataset1$V27)
labels <- rf.GIM_Dataset1["V27"]
df_train <- rf.GIM_Dataset1[-grep('V27',colnames(rf.GIM_Dataset1))]
# define the control using a random forest selection function
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
# run the RFE algorithm
results <- rfe(as.matrix(df_train), as.matrix(labels), sizes=c(1:45), rfeControl=control)
# summarize the results
print(results)
# list the chosen features
predictors(results)
# plot the results
plot(results, type=c("g", "o")) |
## Create a folder named "data in WD, Download Files, unzip them
if(!file.exists("data"))
{dir.create("data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
dest <- "./data/Electricpower.zip"
download.file(fileUrl, dest)
unzip(dest, exdir = "./data")
## Read the data and save it into object x
x <- read.table("./data/household_power_consumption.txt", sep = ";",
header = TRUE, na.strings= "?")
## Subset the data: Only Measurements with Date = 1/2/2007 or 2/2/2007
data <- x[x$Date %in% c("1/2/2007","2/2/2007") ,]
## Add a new date-variable as a combination from "Date" and "Time"
data$datetime <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## Change the language to "English" (necessary for Step 2 - 4)
Sys.setenv(LANG = "en")
Sys.setlocale("LC_TIME", "C")
## Create and save Plot 3 as .png (480x480)
png("plot3.png", width=480, height=480)
Sys.setenv(LANG = "en")
Sys.setlocale("LC_TIME", "C")
with(data, plot(datetime, Sub_metering_1, type="l", xlab = "",
ylab="Energy sub metering"))
with(data, points(datetime, Sub_metering_2, col = "red", type = "l"))
with(data, points(datetime, Sub_metering_3, col = "blue", type = "l"))
legend("topright", lwd = 1 , col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
| /plot3.R | no_license | Gwx1/ExData_Plotting1 | R | false | false | 1,398 | r | ## Create a folder named "data in WD, Download Files, unzip them
if(!file.exists("data"))
{dir.create("data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
dest <- "./data/Electricpower.zip"
download.file(fileUrl, dest)
unzip(dest, exdir = "./data")
## Read the data and save it into object x
x <- read.table("./data/household_power_consumption.txt", sep = ";",
header = TRUE, na.strings= "?")
## Subset the data: Only Measurements with Date = 1/2/2007 or 2/2/2007
data <- x[x$Date %in% c("1/2/2007","2/2/2007") ,]
## Add a new date-variable as a combination from "Date" and "Time"
data$datetime <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## Change the language to "English" (necessary for Step 2 - 4)
Sys.setenv(LANG = "en")
Sys.setlocale("LC_TIME", "C")
## Create and save Plot 3 as .png (480x480)
png("plot3.png", width=480, height=480)
Sys.setenv(LANG = "en")
Sys.setlocale("LC_TIME", "C")
with(data, plot(datetime, Sub_metering_1, type="l", xlab = "",
ylab="Energy sub metering"))
with(data, points(datetime, Sub_metering_2, col = "red", type = "l"))
with(data, points(datetime, Sub_metering_3, col = "blue", type = "l"))
legend("topright", lwd = 1 , col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{ram}
\alias{ram}
\title{AWS Resource Access Manager}
\usage{
ram(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Use AWS Resource Access Manager to share AWS resources between AWS
accounts. To share a resource, you create a resource share, associate
the resource with the resource share, and specify the principals that
can access the resources associated with the resource share. The
following principals are supported: AWS accounts, organizational units
(OU) from AWS Organizations, and organizations from AWS Organizations.
For more information, see the \href{https://docs.aws.amazon.com/ram/latest/userguide/}{AWS Resource Access Manager User Guide}.
}
\section{Service syntax}{
\preformatted{svc <- ram(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=ram_accept_resource_share_invitation]{accept_resource_share_invitation} \tab Accepts an invitation to a resource share from another AWS account \cr
\link[=ram_associate_resource_share]{associate_resource_share} \tab Associates the specified resource share with the specified principals and resources \cr
\link[=ram_associate_resource_share_permission]{associate_resource_share_permission} \tab Associates a permission with a resource share \cr
\link[=ram_create_resource_share]{create_resource_share} \tab Creates a resource share \cr
\link[=ram_delete_resource_share]{delete_resource_share} \tab Deletes the specified resource share \cr
\link[=ram_disassociate_resource_share]{disassociate_resource_share} \tab Disassociates the specified principals or resources from the specified resource share \cr
\link[=ram_disassociate_resource_share_permission]{disassociate_resource_share_permission} \tab Disassociates an AWS RAM permission from a resource share \cr
\link[=ram_enable_sharing_with_aws_organization]{enable_sharing_with_aws_organization} \tab Enables resource sharing within your AWS Organization \cr
\link[=ram_get_permission]{get_permission} \tab Gets the contents of an AWS RAM permission in JSON format \cr
\link[=ram_get_resource_policies]{get_resource_policies} \tab Gets the policies for the specified resources that you own and have shared \cr
\link[=ram_get_resource_share_associations]{get_resource_share_associations} \tab Gets the resources or principals for the resource shares that you own \cr
\link[=ram_get_resource_share_invitations]{get_resource_share_invitations} \tab Gets the invitations for resource sharing that you've received \cr
\link[=ram_get_resource_shares]{get_resource_shares} \tab Gets the resource shares that you own or the resource shares that are shared with you \cr
\link[=ram_list_pending_invitation_resources]{list_pending_invitation_resources} \tab Lists the resources in a resource share that is shared with you but that the invitation is still pending for \cr
\link[=ram_list_permissions]{list_permissions} \tab Lists the AWS RAM permissions \cr
\link[=ram_list_principals]{list_principals} \tab Lists the principals that you have shared resources with or that have shared resources with you \cr
\link[=ram_list_resources]{list_resources} \tab Lists the resources that you added to a resource shares or the resources that are shared with you \cr
\link[=ram_list_resource_share_permissions]{list_resource_share_permissions} \tab Lists the AWS RAM permissions that are associated with a resource share \cr
\link[=ram_promote_resource_share_created_from_policy]{promote_resource_share_created_from_policy} \tab Resource shares that were created by attaching a policy to a resource are visible only to the resource share owner, and the resource share cannot be modified in AWS RAM\cr
\link[=ram_reject_resource_share_invitation]{reject_resource_share_invitation} \tab Rejects an invitation to a resource share from another AWS account \cr
\link[=ram_tag_resource]{tag_resource} \tab Adds the specified tags to the specified resource share that you own \cr
\link[=ram_untag_resource]{untag_resource} \tab Removes the specified tags from the specified resource share that you own \cr
\link[=ram_update_resource_share]{update_resource_share} \tab Updates the specified resource share that you own
}
}
\examples{
\dontrun{
svc <- ram()
svc$accept_resource_share_invitation(
Foo = 123
)
}
}
| /cran/paws/man/ram.Rd | permissive | johnnytommy/paws | R | false | true | 4,625 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{ram}
\alias{ram}
\title{AWS Resource Access Manager}
\usage{
ram(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
Use AWS Resource Access Manager to share AWS resources between AWS
accounts. To share a resource, you create a resource share, associate
the resource with the resource share, and specify the principals that
can access the resources associated with the resource share. The
following principals are supported: AWS accounts, organizational units
(OU) from AWS Organizations, and organizations from AWS Organizations.
For more information, see the \href{https://docs.aws.amazon.com/ram/latest/userguide/}{AWS Resource Access Manager User Guide}.
}
\section{Service syntax}{
\preformatted{svc <- ram(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=ram_accept_resource_share_invitation]{accept_resource_share_invitation} \tab Accepts an invitation to a resource share from another AWS account \cr
\link[=ram_associate_resource_share]{associate_resource_share} \tab Associates the specified resource share with the specified principals and resources \cr
\link[=ram_associate_resource_share_permission]{associate_resource_share_permission} \tab Associates a permission with a resource share \cr
\link[=ram_create_resource_share]{create_resource_share} \tab Creates a resource share \cr
\link[=ram_delete_resource_share]{delete_resource_share} \tab Deletes the specified resource share \cr
\link[=ram_disassociate_resource_share]{disassociate_resource_share} \tab Disassociates the specified principals or resources from the specified resource share \cr
\link[=ram_disassociate_resource_share_permission]{disassociate_resource_share_permission} \tab Disassociates an AWS RAM permission from a resource share \cr
\link[=ram_enable_sharing_with_aws_organization]{enable_sharing_with_aws_organization} \tab Enables resource sharing within your AWS Organization \cr
\link[=ram_get_permission]{get_permission} \tab Gets the contents of an AWS RAM permission in JSON format \cr
\link[=ram_get_resource_policies]{get_resource_policies} \tab Gets the policies for the specified resources that you own and have shared \cr
\link[=ram_get_resource_share_associations]{get_resource_share_associations} \tab Gets the resources or principals for the resource shares that you own \cr
\link[=ram_get_resource_share_invitations]{get_resource_share_invitations} \tab Gets the invitations for resource sharing that you've received \cr
\link[=ram_get_resource_shares]{get_resource_shares} \tab Gets the resource shares that you own or the resource shares that are shared with you \cr
\link[=ram_list_pending_invitation_resources]{list_pending_invitation_resources} \tab Lists the resources in a resource share that is shared with you but that the invitation is still pending for \cr
\link[=ram_list_permissions]{list_permissions} \tab Lists the AWS RAM permissions \cr
\link[=ram_list_principals]{list_principals} \tab Lists the principals that you have shared resources with or that have shared resources with you \cr
\link[=ram_list_resources]{list_resources} \tab Lists the resources that you added to a resource shares or the resources that are shared with you \cr
\link[=ram_list_resource_share_permissions]{list_resource_share_permissions} \tab Lists the AWS RAM permissions that are associated with a resource share \cr
\link[=ram_promote_resource_share_created_from_policy]{promote_resource_share_created_from_policy} \tab Resource shares that were created by attaching a policy to a resource are visible only to the resource share owner, and the resource share cannot be modified in AWS RAM\cr
\link[=ram_reject_resource_share_invitation]{reject_resource_share_invitation} \tab Rejects an invitation to a resource share from another AWS account \cr
\link[=ram_tag_resource]{tag_resource} \tab Adds the specified tags to the specified resource share that you own \cr
\link[=ram_untag_resource]{untag_resource} \tab Removes the specified tags from the specified resource share that you own \cr
\link[=ram_update_resource_share]{update_resource_share} \tab Updates the specified resource share that you own
}
}
\examples{
\dontrun{
svc <- ram()
svc$accept_resource_share_invitation(
Foo = 123
)
}
}
|
testlist <- list(ExpressionSet = structure(c(3.10503529562433e+231, 1.23181983389617e+58, 1.52478221711266e+245, 6.36967296041789e+178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), Phylostratum = numeric(0))
result <- do.call(myTAI:::cpp_TAI,testlist)
str(result) | /myTAI/inst/testfiles/cpp_TAI/AFL_cpp_TAI/cpp_TAI_valgrind_files/1615763513-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 334 | r | testlist <- list(ExpressionSet = structure(c(3.10503529562433e+231, 1.23181983389617e+58, 1.52478221711266e+245, 6.36967296041789e+178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), Phylostratum = numeric(0))
result <- do.call(myTAI:::cpp_TAI,testlist)
str(result) |
#groceries - association rules
library(arules)
data(Groceries)
fit <- apriori(Groceries, parameter = list(confidence = 0.5, support = 0.01))
fit <- sort(fit, by = "lift")
inspect(fit)
fit2 <- apriori(Groceries, parameter = list(confidence = 0.2, support = 0.01))
fit2 <- sort(fit2, by = "lift")
inspect(fit2)
# lhs rhs support confidence lift
#[1] {citrus fruit,root vegetables} => {other vegetables} 0.01037112 0.5862069 3.029608
#[2] {tropical fruit,root vegetables} => {other vegetables} 0.01230300 0.5845411 3.020999
#count - num of times the transactions have occured.
| /associationRules.R | no_license | alexmillea/R-Association-Rules | R | false | false | 679 | r | #groceries - association rules
library(arules)
data(Groceries)
fit <- apriori(Groceries, parameter = list(confidence = 0.5, support = 0.01))
fit <- sort(fit, by = "lift")
inspect(fit)
fit2 <- apriori(Groceries, parameter = list(confidence = 0.2, support = 0.01))
fit2 <- sort(fit2, by = "lift")
inspect(fit2)
# lhs rhs support confidence lift
#[1] {citrus fruit,root vegetables} => {other vegetables} 0.01037112 0.5862069 3.029608
#[2] {tropical fruit,root vegetables} => {other vegetables} 0.01230300 0.5845411 3.020999
#count - num of times the transactions have occured.
|
wi <- read.csv.sql("D:/national/04-11-2019-delivery/voter-files-by-state/VM2--WI--2019-02-01/VM2--WI--2019-02-01-DEMOGRAPHIC.tab",
sep = "\t",
sql = "select LALVOTERID, Voters_Active,
Voters_StateVoterID, Voters_CountyVoterID,
Voters_LastName,
Residence_Addresses_CensusTract,
Residence_Addresses_CensusBlockGroup,
Residence_Addresses_CensusBlock,
Residence_Addresses_Latitude,
Residence_Addresses_Longitude,
Voters_Gender, Voters_Age, Voters_BirthDate,
DateConfidence_Description, Parties_Description,
EthnicGroups_EthnicGroup1Desc, Voters_CalculatedRegDate,
Voters_OfficialRegDate, US_Congressional_District,
State_Senate_District, State_House_District,
County, Voters_FIPS
from file where Residence_Addresses_CensusTract != '0'")
wi_history <- read.csv.sql("D:/national/04-11-2019-delivery/voter-files-by-state/VM2--WI--2019-02-01/VM2--WI--2019-02-01-VOTEHISTORY.tab",
sep = "\t",
sql = "select LALVOTERID from file
where BallotType_General_2018_11_06 in ('Absentee', 'Poll Vote')")
wi$voted_general <- wi$LALVOTERID %in% wi_history$LALVOTERID
rm(wi_history)
####
wi <- rename(wi, cd = US_Congressional_District, assembly = State_House_District)
## use wru to come up with race estimates
# wi_census <- get_census_data(key = api_key, state = "WI", census.geo = "tract")
# saveRDS(wi_census, "./temp/wru_census_wi.RDS")
wi_census <- readRDS("./temp/wru_census_wi.RDS")
wi <- wi %>%
mutate(tract_full = paste0("55", str_pad(as.character(Voters_FIPS), side = "left",
width = 3, pad = "0"),
str_pad(as.character(Residence_Addresses_CensusTract), side = "left",
width = 6, pad = "0")),
state_code = substring(tract_full, 1, 2),
county = substring(tract_full, 3, 5),
tract = substring(tract_full, 6, 11),
state = "WI") %>%
rename(surname = Voters_LastName) %>%
mutate(surname = str_replace_all(surname, "[^[:alnum:]]", ""))
wi <- predict_race(wi, census.geo = "tract", census.key = api_key, retry = 999, census.data = wi_census)
### pull down census data from tidycensus
wi_census_data <- get_basic_census_stats(geo = "tract", state = "WI", year = 2017)
wi <- left_join(wi, wi_census_data, by = c("tract_full" = "GEOID"))
### pull in uncontested races
uc <- fread("./raw_data/wi_assembly_uncontested.csv") %>%
select(assembly = district, uncontested)
wi <- left_join(wi, uc, by = "assembly")
## clean up, only keep complete cases for matching procedure
wi <- wi %>%
mutate_at(vars(voted_general), funs(ifelse(is.na(.), 0, .))) %>%
mutate(gender = Voters_Gender == "F",
dem = Parties_Description == "Democratic",
rep = Parties_Description == "Republican",
yob = as.integer(substring(Voters_BirthDate, nchar(Voters_BirthDate) - 3))) %>%
select(uncontested, LALVOTERID, voted_general, gender, dem, rep, median_age,
pred.whi, pred.bla, pred.his, median_income, some_college, assembly) %>%
filter(!is.na(LALVOTERID))
wi <- wi[complete.cases(wi), ]
saveRDS(wi, "./temp/wisconsin_race_census.RDS")
| /code/archive/wisconsin/01_code_wi.R | no_license | ktmorris/uncontested | R | false | false | 3,514 | r | wi <- read.csv.sql("D:/national/04-11-2019-delivery/voter-files-by-state/VM2--WI--2019-02-01/VM2--WI--2019-02-01-DEMOGRAPHIC.tab",
sep = "\t",
sql = "select LALVOTERID, Voters_Active,
Voters_StateVoterID, Voters_CountyVoterID,
Voters_LastName,
Residence_Addresses_CensusTract,
Residence_Addresses_CensusBlockGroup,
Residence_Addresses_CensusBlock,
Residence_Addresses_Latitude,
Residence_Addresses_Longitude,
Voters_Gender, Voters_Age, Voters_BirthDate,
DateConfidence_Description, Parties_Description,
EthnicGroups_EthnicGroup1Desc, Voters_CalculatedRegDate,
Voters_OfficialRegDate, US_Congressional_District,
State_Senate_District, State_House_District,
County, Voters_FIPS
from file where Residence_Addresses_CensusTract != '0'")
wi_history <- read.csv.sql("D:/national/04-11-2019-delivery/voter-files-by-state/VM2--WI--2019-02-01/VM2--WI--2019-02-01-VOTEHISTORY.tab",
sep = "\t",
sql = "select LALVOTERID from file
where BallotType_General_2018_11_06 in ('Absentee', 'Poll Vote')")
wi$voted_general <- wi$LALVOTERID %in% wi_history$LALVOTERID
rm(wi_history)
####
wi <- rename(wi, cd = US_Congressional_District, assembly = State_House_District)
## use wru to come up with race estimates
# wi_census <- get_census_data(key = api_key, state = "WI", census.geo = "tract")
# saveRDS(wi_census, "./temp/wru_census_wi.RDS")
wi_census <- readRDS("./temp/wru_census_wi.RDS")
wi <- wi %>%
mutate(tract_full = paste0("55", str_pad(as.character(Voters_FIPS), side = "left",
width = 3, pad = "0"),
str_pad(as.character(Residence_Addresses_CensusTract), side = "left",
width = 6, pad = "0")),
state_code = substring(tract_full, 1, 2),
county = substring(tract_full, 3, 5),
tract = substring(tract_full, 6, 11),
state = "WI") %>%
rename(surname = Voters_LastName) %>%
mutate(surname = str_replace_all(surname, "[^[:alnum:]]", ""))
wi <- predict_race(wi, census.geo = "tract", census.key = api_key, retry = 999, census.data = wi_census)
### pull down census data from tidycensus
wi_census_data <- get_basic_census_stats(geo = "tract", state = "WI", year = 2017)
wi <- left_join(wi, wi_census_data, by = c("tract_full" = "GEOID"))
### pull in uncontested races
uc <- fread("./raw_data/wi_assembly_uncontested.csv") %>%
select(assembly = district, uncontested)
wi <- left_join(wi, uc, by = "assembly")
## clean up, only keep complete cases for matching procedure
wi <- wi %>%
mutate_at(vars(voted_general), funs(ifelse(is.na(.), 0, .))) %>%
mutate(gender = Voters_Gender == "F",
dem = Parties_Description == "Democratic",
rep = Parties_Description == "Republican",
yob = as.integer(substring(Voters_BirthDate, nchar(Voters_BirthDate) - 3))) %>%
select(uncontested, LALVOTERID, voted_general, gender, dem, rep, median_age,
pred.whi, pred.bla, pred.his, median_income, some_college, assembly) %>%
filter(!is.na(LALVOTERID))
wi <- wi[complete.cases(wi), ]
saveRDS(wi, "./temp/wisconsin_race_census.RDS")
|
se <- function(object, ...) UseMethod("se")
se.tsglm <- function(object, B, parallel=FALSE, ...){
tsglm.check(object)
est <- c(coef(object), sigmasq=if(object$distr=="poisson") NULL else object$sigmasq)
if(missing(B)){
vcov <- vcov(object)
var <- diag(vcov)
stderrors <- c(sqrt(var), sigmasq=if(object$distr=="poisson") NULL else NA)
result <- list(est=est, se=stderrors, type="normapprox")
}else{
stopifnot(B>=2, B%%1==0)
simfit <- function(seed, fit, ...){
set.seed(seed)
ts_sim <- tsglm.sim(fit=fit)$ts
fit_sim <- tsglm(ts=ts_sim, model=fit$model, xreg=fit$xreg, link=fit$link, distr=fit$distr, score=FALSE, info="none", ...)
result <- c(coef(fit_sim), sigmasq=if(object$distr=="poisson") NULL else fit_sim$sigmasq)
return(result)
}
seeds <- sample(1e+9, size=B)
if(parallel){
Sapply <- function(X, FUN, ...) parSapply(cl=NULL, X=X, FUN=FUN, ...)
}else{
Sapply <- sapply
}
bootstrap_coefs <- Sapply(seeds, simfit, fit=object, ..., simplify=TRUE)
if(object$distr!="poisson" && anyNA(bootstrap_coefs["sigmasq",])) warning(paste("The overdispersion coefficient 'sigmasq' could not be estimated\nin", sum(is.na(bootstrap_coefs["sigmasq",])), "of the", B, "replications. It is set to zero for these\nreplications. This might to some extent result in an overestimation\nof its true variability."))
stderrors <- apply(bootstrap_coefs, 1, sd, na.rm=TRUE)
result <- list(est=est, se=stderrors, type="bootstrap", B=B)
}
return(result)
}
| /tscount/R/se.tsglm.r | no_license | ingted/R-Examples | R | false | false | 1,577 | r | se <- function(object, ...) UseMethod("se")
se.tsglm <- function(object, B, parallel=FALSE, ...){
tsglm.check(object)
est <- c(coef(object), sigmasq=if(object$distr=="poisson") NULL else object$sigmasq)
if(missing(B)){
vcov <- vcov(object)
var <- diag(vcov)
stderrors <- c(sqrt(var), sigmasq=if(object$distr=="poisson") NULL else NA)
result <- list(est=est, se=stderrors, type="normapprox")
}else{
stopifnot(B>=2, B%%1==0)
simfit <- function(seed, fit, ...){
set.seed(seed)
ts_sim <- tsglm.sim(fit=fit)$ts
fit_sim <- tsglm(ts=ts_sim, model=fit$model, xreg=fit$xreg, link=fit$link, distr=fit$distr, score=FALSE, info="none", ...)
result <- c(coef(fit_sim), sigmasq=if(object$distr=="poisson") NULL else fit_sim$sigmasq)
return(result)
}
seeds <- sample(1e+9, size=B)
if(parallel){
Sapply <- function(X, FUN, ...) parSapply(cl=NULL, X=X, FUN=FUN, ...)
}else{
Sapply <- sapply
}
bootstrap_coefs <- Sapply(seeds, simfit, fit=object, ..., simplify=TRUE)
if(object$distr!="poisson" && anyNA(bootstrap_coefs["sigmasq",])) warning(paste("The overdispersion coefficient 'sigmasq' could not be estimated\nin", sum(is.na(bootstrap_coefs["sigmasq",])), "of the", B, "replications. It is set to zero for these\nreplications. This might to some extent result in an overestimation\nof its true variability."))
stderrors <- apply(bootstrap_coefs, 1, sd, na.rm=TRUE)
result <- list(est=est, se=stderrors, type="bootstrap", B=B)
}
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/large_seg_utils.R
\name{load.perms.bygene}
\alias{load.perms.bygene}
\title{Load gene-based permutation data}
\usage{
load.perms.bygene(perm.res.in, subset_to_regions = NULL)
}
\arguments{
\item{perm.res.in}{path to .tsv of all permutation results}
\item{subset_to_regions}{optional vector of region IDs to keep [default: keep all regions]}
}
\value{
data.table() of permutation results
}
\description{
Load all permuted segments matched on the number of genes
}
| /source/rCNV2/man/load.perms.bygene.Rd | permissive | talkowski-lab/rCNV2 | R | false | true | 542 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/large_seg_utils.R
\name{load.perms.bygene}
\alias{load.perms.bygene}
\title{Load gene-based permutation data}
\usage{
load.perms.bygene(perm.res.in, subset_to_regions = NULL)
}
\arguments{
\item{perm.res.in}{path to .tsv of all permutation results}
\item{subset_to_regions}{optional vector of region IDs to keep [default: keep all regions]}
}
\value{
data.table() of permutation results
}
\description{
Load all permuted segments matched on the number of genes
}
|
#I have already downloaded the data set file and changed its name into "Project1.txt"
#Read the data set
power <- read.table("project1.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
#We will only be using data from the dates 2007-02-01 and 2007-02-02.
power$Date <- as.Date(power$Date, format = "%d/%m/%y")
data <- subset(power, Date >= "1/2/2007" | Date <= "2/2/2007")
#Convert the Date and Time variables to Date/Time classes
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
##Draw the plot1
png("plot1.png", width=480, height=480, units="px")
hist(data$Global_active_power, type = "1", main = "Global Active Power", xlab = "Global Active Power(kilowatts)", ylab = "Frequency", col = "red")
dev.off()
| /plot1.R | no_license | punadsmile/Exploratory_Data_Analysis | R | false | false | 838 | r | #I have already downloaded the data set file and changed its name into "Project1.txt"
#Read the data set
power <- read.table("project1.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
#We will only be using data from the dates 2007-02-01 and 2007-02-02.
power$Date <- as.Date(power$Date, format = "%d/%m/%y")
data <- subset(power, Date >= "1/2/2007" | Date <= "2/2/2007")
#Convert the Date and Time variables to Date/Time classes
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
##Draw the plot1
png("plot1.png", width=480, height=480, units="px")
hist(data$Global_active_power, type = "1", main = "Global Active Power", xlab = "Global Active Power(kilowatts)", ylab = "Frequency", col = "red")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metatrial.R
\name{metatrial}
\alias{metatrial}
\title{Generate meta-analysis data and calculate estimator}
\usage{
metatrial(
measure = "median",
measure_spread = "iqr",
tau_sq = 0.6,
effect_ratio = 1.2,
rdist = "norm",
parameters = list(mean = 50, sd = 0.2),
n_df = sim_n(k = 3),
knha = TRUE,
true_effect = 50,
test = "knha"
)
}
\arguments{
\item{effect_ratio}{ratio of population effects intervention / control}
\item{rdist}{string indicating distribution, "norm", "lnorm", "exp", or "pareto"}
\item{n_df}{\code{data.frame} of sample sizes,
such as output by \code{\link{meta_n}}.}
\item{true_effect}{The value of the control population median.}
\item{test}{"knha" or "z" for \link[metafor:rma]{metafor::rma}.}
}
\description{
Simulate data based on simulation parameters and meta-analyse.
}
\details{
NB: bias is effect - true effect.
}
\seealso{
Other neet_test_one One neet test has been written:
\code{\link{beta_par}()},
\code{\link{default_parameters}},
\code{\link{density_fn}()},
\code{\link{dist_name}()},
\code{\link{intervention_proportion}()},
\code{\link{lr_se}()},
\code{\link{metamodel}()},
\code{\link{metasims}()},
\code{\link{metasim}()},
\code{\link{plots}},
\code{\link{sim_df}()},
\code{\link{sim_dist}()},
\code{\link{sim_n}()}
Other simulation Functions that contribute to simulation pipeline.:
\code{\link{beta_par}()},
\code{\link{default_parameters}},
\code{\link{density_fn}()},
\code{\link{intervention_proportion}()},
\code{\link{lr_se}()},
\code{\link{metamodel}()},
\code{\link{metasims}()},
\code{\link{metasim}()},
\code{\link{sim_df}()},
\code{\link{sim_n}()},
\code{\link{sim_sample}()}
}
\concept{neet_test_one One neet test has been written}
\concept{simulation Functions that contribute to simulation pipeline.}
| /man/metatrial.Rd | permissive | kylehamilton/simeta | R | false | true | 1,857 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metatrial.R
\name{metatrial}
\alias{metatrial}
\title{Generate meta-analysis data and calculate estimator}
\usage{
metatrial(
measure = "median",
measure_spread = "iqr",
tau_sq = 0.6,
effect_ratio = 1.2,
rdist = "norm",
parameters = list(mean = 50, sd = 0.2),
n_df = sim_n(k = 3),
knha = TRUE,
true_effect = 50,
test = "knha"
)
}
\arguments{
\item{effect_ratio}{ratio of population effects intervention / control}
\item{rdist}{string indicating distribution, "norm", "lnorm", "exp", or "pareto"}
\item{n_df}{\code{data.frame} of sample sizes,
such as output by \code{\link{meta_n}}.}
\item{true_effect}{The value of the control population median.}
\item{test}{"knha" or "z" for \link[metafor:rma]{metafor::rma}.}
}
\description{
Simulate data based on simulation parameters and meta-analyse.
}
\details{
NB: bias is effect - true effect.
}
\seealso{
Other neet_test_one One neet test has been written:
\code{\link{beta_par}()},
\code{\link{default_parameters}},
\code{\link{density_fn}()},
\code{\link{dist_name}()},
\code{\link{intervention_proportion}()},
\code{\link{lr_se}()},
\code{\link{metamodel}()},
\code{\link{metasims}()},
\code{\link{metasim}()},
\code{\link{plots}},
\code{\link{sim_df}()},
\code{\link{sim_dist}()},
\code{\link{sim_n}()}
Other simulation Functions that contribute to simulation pipeline.:
\code{\link{beta_par}()},
\code{\link{default_parameters}},
\code{\link{density_fn}()},
\code{\link{intervention_proportion}()},
\code{\link{lr_se}()},
\code{\link{metamodel}()},
\code{\link{metasims}()},
\code{\link{metasim}()},
\code{\link{sim_df}()},
\code{\link{sim_n}()},
\code{\link{sim_sample}()}
}
\concept{neet_test_one One neet test has been written}
\concept{simulation Functions that contribute to simulation pipeline.}
|
# Non-exported utility functions
#' Unpack and return a SciDB query expression as a data frame
#' @param db scidb database connection object
#' @param query A SciDB query expression or scidb object
#' @param ... optional extra arguments (see below)
#' @note option extra arguments
#' \itemize{
#' \item{binar}{ optional logical value, if \code{FALSE} use iquery text transfer, otherwise binary transfer, defaults \code{TRUE}}
#' \item{buffer}{ integer initial parse buffer size in bytes, adaptively resized as needed: larger buffers can be faster but comsume more memory, default size is 100000L.}
#' \item{only_attributes}{ optional logical value, \code{TRUE} means don't retrieve dimension coordinates, only return attribute values; defaults to \code{FALSE}.}
#" \item{schema}{ optional result schema string, only applies when \code{query} is not a SciDB object. Supplying this avois one extra metadata query to determine result schema. Defaults to \code{schema(query)}.}
#' }
#' @keywords internal
#' @importFrom curl new_handle handle_setheaders handle_setopt curl_fetch_memory handle_setform form_file
#' @importFrom data.table data.table
#' @import bit64
scidb_unpack_to_dataframe = function(db, query, ...)
{
DEBUG = FALSE
INT64 = attr(db, "connection")$int64
DEBUG = getOption("scidb.debug", FALSE)
buffer = 100000L
args = list(...)
if (is.null(args$only_attributes)) args$only_attributes = FALSE
if (is.null(args$binary)) args$binary = TRUE
if (!is.null(args$buffer))
{
argsbuf = tryCatch(as.integer(args$buffer), warning=function(e) NA)
if (!is.na(argsbuf) && argsbuf <= 1e9) buffer = as.integer(argsbuf)
}
if (!inherits(query, "scidb"))
{
# make a scidb object out of the query, optionally using a supplied schema to skip metadata query
if (is.null(args$schema)) query = scidb(db, query)
else query = scidb(db, query, schema=args$schema)
}
attributes = schema(query, "attributes")
dimensions = schema(query, "dimensions")
query = query@name
if(! args$binary) return(iquery(db, query, binary=FALSE, `return`=TRUE))
if (args$only_attributes)
{
internal_attributes = attributes
internal_query = query
} else
{
dim_names = dimensions$name
attr_names = attributes$name
all_names = c(dim_names, attr_names)
internal_query = query
if (length(all_names) != length(unique(all_names)))
{
# Cast to completeley unique names to be safe:
cast_dim_names = make.names_(dim_names)
cast_attr_names = make.unique_(cast_dim_names, make.names_(attributes$name))
cast_schema = sprintf("<%s>[%s]", paste(paste(cast_attr_names, attributes$type, sep=":"), collapse=","), paste(cast_dim_names, collapse=","))
internal_query = sprintf("cast(%s, %s)", internal_query, cast_schema)
all_names = c(cast_dim_names, cast_attr_names)
dim_names = cast_dim_names
}
# Apply dimensions as attributes, using unique names. Manually construct the list of resulting attributes:
dimensional_attributes = data.frame(name=dimensions$name, type="int64", nullable=FALSE) # original dimension names (used below)
internal_attributes = rbind(attributes, dimensional_attributes)
dim_apply = paste(dim_names, dim_names, sep=",", collapse=",")
internal_query = sprintf("apply(%s, %s)", internal_query, dim_apply)
}
ns = rep("", length(internal_attributes$nullable))
ns[internal_attributes$nullable] = "null"
format_string = paste(paste(internal_attributes$type, ns), collapse=",")
format_string = sprintf("(%s)", format_string)
if (DEBUG) message("Data query ", internal_query)
if (DEBUG) message("Format ", format_string)
sessionid = scidbquery(db, internal_query, save=format_string, release=0)
on.exit( SGET(db, "/release_session", list(id=sessionid), err=FALSE), add=TRUE)
dt2 = proc.time()
uri = URI(db, "/read_bytes", list(id=sessionid, n=0))
h = new_handle()
handle_setheaders(h, .list=list(`Authorization`=digest_auth(db, "GET", uri)))
handle_setopt(h, .list=list(ssl_verifyhost=as.integer(getOption("scidb.verifyhost", FALSE)),
ssl_verifypeer=0))
resp = curl_fetch_memory(uri, h)
if (resp$status_code > 299) stop("HTTP error", resp$status_code)
# Explicitly reap the handle to avoid short-term build up of socket descriptors
rm(h)
gc()
if (DEBUG) message("Data transfer time ", (proc.time() - dt2)[3])
dt1 = proc.time()
len = length(resp$content)
p = 0
ans = c()
cnames = c(internal_attributes$name, "lines", "p") # we are unpacking to a SciDB array, ignore dims
n = nrow(internal_attributes)
rnames = c()
typediff = setdiff(internal_attributes$type, names(.scidbtypes))
if(length(typediff) > 0)
{
stop(typediff, " SciDB type not supported. Try converting to string in SciDB or use a binary=FALSE data transfer")
}
while (p < len)
{
dt2 = proc.time()
tmp = .Call("scidb_parse", as.integer(buffer), internal_attributes$type,
internal_attributes$nullable, resp$content, as.double(p), as.integer(INT64), PACKAGE="scidb")
names(tmp) = cnames
lines = tmp[[n+1]]
p_old = p
p = tmp[[n+2]]
if (DEBUG) message(" R buffer ", p, "/", len, " bytes parsing time ", round( (proc.time() - dt2)[3], 4))
dt2 = proc.time()
if (lines > 0)
{
if ("binary" %in% internal_attributes$type)
{
if (DEBUG) message(" R rbind/df assembly time ", round( (proc.time() - dt2)[3], 4))
return(lapply(1:n, function(j) tmp[[j]][1:lines])) # XXX issue 33
}
len_out = length(tmp[[1]])
if (lines < len_out) tmp = lapply(tmp[1:n], function(x) x[1:lines])
# adaptively re-estimate a buffer size
avg_bytes_per_line = ceiling( (p - p_old) / lines)
buffer = min(getOption("scidb.buffer_size"), ceiling(1.3 * (len - p) / avg_bytes_per_line)) # Engineering factors
# Assemble the data frame
if (is.null(ans)) ans = data.table::data.table(data.frame(tmp[1:n], stringsAsFactors=FALSE, check.names=FALSE))
else ans = rbind(ans, data.table::data.table(data.frame(tmp[1:n], stringsAsFactors=FALSE, check.names=FALSE)))
}
if (DEBUG) message(" R rbind/df assembly time ", round( (proc.time() - dt2)[3], 4))
}
if (is.null(ans))
{
xa = attributes$name
if (args$only_attributes) # permute cols, see issue #125
xd = c()
else
xd = dimensions$name
n = length(xd) + length(xa)
ans = vector(mode="list", length=n)
names(ans) = make.names_(c(xd, xa))
class(ans) = "data.frame"
return(ans)
}
if (DEBUG) message("Total R parsing time ", round( (proc.time() - dt1)[3], 4))
ans = as.data.frame(ans, check.names=FALSE)
if (INT64)
{
for (i64 in which(internal_attributes$type %in% "int64")) oldClass(ans[, i64]) = "integer64"
}
# Handle datetime (integer POSIX time)
for (idx in which(internal_attributes$type %in% "datetime")) ans[, idx] = as.POSIXct(ans[, idx], origin="1970-1-1")
if (args$only_attributes) # permute cols, see issue #125
{
colnames(ans) = make.names_(attributes$name)
}
else
{
nd = length(dimensions$name)
i = ncol(ans) - nd
ans = ans[, c( (i+1):ncol(ans), 1:i)]
colnames(ans) = make.names_(c(dimensions$name, attributes$name))
}
gc()
ans
}
#' Convenience function for digest authentication.
#' @param db a scidb database connection object
#' @param method digest method
#' @param uri uri
#' @param realm realm
#' @param nonce nonce
#' @keywords internal
#' @importFrom digest digest
digest_auth = function(db, method, uri, realm="", nonce="123456")
{
.scidbenv = attr(db, "connection")
if (!is.null(.scidbenv$authtype))
{
if (.scidbenv$authtype != "digest") return("")
}
uri = gsub(".*/", "/", uri)
userpwd = .scidbenv$digest
if (is.null(userpwd)) userpwd=":"
up = strsplit(userpwd, ":")[[1]]
user = up[1]
pwd = up[2]
if (is.na(pwd)) pwd=""
ha1=digest(sprintf("%s:%s:%s", user, realm, pwd, algo="md5"), serialize=FALSE)
ha2=digest(sprintf("%s:%s", method, uri, algo="md5"), serialize=FALSE)
cnonce="MDc1YmFhOWFkY2M0YWY2MDAwMDBlY2JhMDAwMmYxNTI="
nc="00000001"
qop="auth"
response=digest(sprintf("%s:%s:%s:%s:%s:%s", ha1, nonce, nc, cnonce, qop, ha2), algo="md5", serialize=FALSE)
sprintf('Digest username="%s", realm=%s, nonce="%s", uri="%s", cnonce="%s", nc=%s, qop=%s, response="%s"', user, realm, nonce, uri, cnonce, nc, qop, response)
}
# Internal warning function
warnonce = (function() {
state = list(
count="Use the AFL op_count macro operator for an exact count of data rows.",
nonum="Note: The R sparse Matrix package does not support certain value types like\ncharacter strings"
)
function(warn) {
if (!is.null(state[warn][[1]])) {
message(state[warn])
s <<- state
s[warn] = c()
state <<- s
}
}
}) ()
# Some versions of RCurl seem to contain a broken URLencode function.
oldURLencode = function (URL, reserved = FALSE)
{
OK = paste0("[^-ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz0123456789$_.+!*'(),",
if (!reserved)
";/?:@=&", "]")
x = strsplit(URL, "")[[1L]]
z = grep(OK, x)
if (length(z)) {
y = sapply(x[z], function(x) paste0("%", as.character(charToRaw(x)),
collapse = ""))
x[z] = y
}
paste(x, collapse = "")
}
# Internal function
create_temp_array = function(db, name, schema)
{
# SciDB temporary array syntax varies with SciDB version
TEMP = "'TEMP'"
if (at_least(attr(db, "connection")$scidb.version, "14.12")) TEMP="true"
query = sprintf("create_array(%s, %s, %s)", name, schema, TEMP)
iquery(db, query, `return`=FALSE)
}
#' An important internal convenience function that returns a scidb object. If
#' eval=TRUE, a new SciDB array is created the returned scidb object refers to
#' that. Otherwise, the returned scidb object represents a SciDB array promise.
#'
#' @param db scidb connection object
#' @param expr (character) A SciDB expression or array name
#' @param eval (logical) If TRUE evaluate expression and assign to new SciDB array.
#' If FALSE, infer output schema but don't evaluate.
#' @param name (optional character) If supplied, name for stored array when eval=TRUE
#' @param gc (optional logical) If TRUE, tie SciDB object to garbage collector.
#' @param depend (optional list) An optional list of other scidb objects
#' that this expression depends on (preventing their garbage collection
#' if other references to them go away).
#' @param schema (optional) used to create SciDB temp arrays
#' (requires scidb >= 14.8)
#' @param temp (optional) used to create SciDB temp arrays
#' (requires scidb >= 14.8)
#' @return A \code{scidb} array object
#' @note Only AFL supported.
`.scidbeval` = function(db, expr, eval=FALSE, name, gc=TRUE, depend, schema, temp)
{
ans = c()
if (missing(depend)) depend = c()
if (missing(schema)) schema = ""
if (missing(temp)) temp = FALSE
if (!is.list(depend)) depend = list(depend)
# Address bug #45. Try to cheaply determine if expr refers to a named array
# or an AFL expression. If it's a named array, then eval must be set TRUE.
if (!grepl("\\(", expr, perl=TRUE)) eval = TRUE
if (`eval`)
{
if (missing(name) || is.null(name))
{
newarray = tmpnam(db)
if (temp) create_temp_array(db, newarray, schema)
}
else newarray = name
query = sprintf("store(%s,%s)", expr, newarray)
scidbquery(db, query, stream=0L)
ans = scidb(db, newarray, gc=gc)
if (temp) ans@meta$temp = TRUE
} else
{
ans = scidb(db, expr, gc=gc)
# Assign dependencies
if (length(depend) > 0)
{
assign("depend", depend, envir=ans@meta)
}
}
ans
}
make.names_ = function(x)
{
gsub("\\.", "_", make.names(x, unique=TRUE), perl=TRUE)
}
# x is vector of existing values
# y is vector of new values
# returns a set the same size as y with non-conflicting value names
make.unique_ = function(x, y)
{
z = make.names(gsub("_", ".", c(x, y)), unique=TRUE)
gsub("\\.", "_", utils::tail(z, length(y)))
}
# Make a name from a prefix and a unique SciDB identifier.
getuid = function(db)
{
.scidbenv = attributes(db)$connection
if (is.null(.scidbenv$id)) stop("Not connected...try scidbconnect")
.scidbenv$id
}
tmpnam = function(db, prefix="R_array")
{
stopifnot(inherits(db, "afl"))
salt = basename(tempfile(pattern=prefix))
paste(salt, getuid(db), sep="")
}
# Return a shim session ID or error
getSession = function(db)
{
session = SGET(db, "/new_session")
if (length(session)<1) stop("SciDB http session error; are you connecting to a valid SciDB host?")
session = gsub("\r", "", session)
session = gsub("\n", "", session)
session
}
# Supply the base SciDB URI from the global host, port and auth
# parameters stored in the "connection" environment in the db object
# Every function that needs to talk to the shim interface should use
# this function to supply the URI.
# Arguments:
# db scidb database connection object
# resource (string): A URI identifying the requested service
# args (list): A list of named query parameters
URI = function(db, resource="", args=list())
{
.scidbenv = attr(db, "connection")
if (is.null(.scidbenv$host)) stop("Not connected...try scidbconnect")
if (!is.null(.scidbenv$auth))
args = c(args, list(auth=.scidbenv$auth))
if (!is.null(.scidbenv$password)) args = c(args, list(password=.scidbenv$password))
if (!is.null(.scidbenv$username)) args = c(args, list(user=.scidbenv$username))
prot = paste(.scidbenv$protocol, "//", sep=":")
if ("password" %in% names(args) || "auth" %in% names(args)) prot = "https://"
ans = paste(prot, .scidbenv$host, ":", .scidbenv$port, sep="")
ans = paste(ans, resource, sep="/")
if (length(args)>0)
ans = paste(ans, paste(paste(names(args), args, sep="="), collapse="&"), sep="?")
ans
}
SGET = function(db, resource, args=list(), err=TRUE, binary=FALSE)
{
if (!(substr(resource, 1, 1)=="/")) resource = paste("/", resource, sep="")
uri = URI(db, resource, args)
uri = oldURLencode(uri)
uri = gsub("\\+", "%2B", uri, perl=TRUE)
h = new_handle()
handle_setheaders(h, .list=list(Authorization=digest_auth(db, "GET", uri)))
handle_setopt(h, .list=list(ssl_verifyhost=as.integer(getOption("scidb.verifyhost", FALSE)),
ssl_verifypeer=0))
ans = curl_fetch_memory(uri, h)
if (ans$status_code > 299 && err)
{
msg = sprintf("HTTP error %s", ans$status_code)
if (ans$status_code >= 500) msg = sprintf("%s\n%s", msg, rawToChar(ans$content))
stop(msg)
}
if (binary) return(ans$content)
rawToChar(ans$content)
}
# Normally called with raw data and args=list(id=whatever)
POST = function(db, data, args=list(), err=TRUE)
{
# check for new shim simple post option (/upload), otherwise use
# multipart/file upload (/upload_file)
shimspl = strsplit(attr(db, "connection")$scidb.version, "\\.")[[1]]
shim_yr = tryCatch(as.integer(gsub("[A-z]", "", shimspl[1])), error=function(e) 16, warning=function(e) 8)
shim_mo = tryCatch(as.integer(gsub("[A-z]", "", shimspl[2])), error=function(e) 16, warning=function(e) 8)
if (is.na(shim_yr)) shim_yr = 16
if (is.na(shim_mo)) shim_mo = 8
simple = (shim_yr >= 15 && shim_mo >= 7) || shim_yr >= 16
if (simple)
{
uri = URI(db, "/upload", args)
uri = oldURLencode(uri)
uri = gsub("\\+", "%2B", uri, perl=TRUE)
h = new_handle()
handle_setheaders(h, .list=list(Authorization=digest_auth(db, "POST", uri)))
handle_setopt(h, .list=list(ssl_verifyhost=as.integer(getOption("scidb.verifyhost", FALSE)),
ssl_verifypeer=0, post=TRUE, postfieldsize=length(data), postfields=data))
ans = curl_fetch_memory(uri, h)
if (ans$status_code > 299 && err) stop("HTTP error ", ans$status_code)
return(rawToChar(ans$content))
}
uri = URI(db, "/upload_file", args)
uri = oldURLencode(uri)
uri = gsub("\\+", "%2B", uri, perl=TRUE)
h = new_handle()
handle_setheaders(h, .list=list(Authorization=digest_auth(db, "POST", uri)))
handle_setopt(h, .list=list(ssl_verifyhost=as.integer(getOption("scidb.verifyhost", FALSE)),
ssl_verifypeer=0))
tmpf = tempfile()
if (is.character(data)) data = charToRaw(data)
writeBin(data, tmpf)
handle_setform(h, file=form_file(tmpf))
ans = curl_fetch_memory(uri, h)
unlink(tmpf)
if (ans$status_code > 299 && err) stop("HTTP error", ans$status_code)
return(rawToChar(ans$content))
}
# Basic low-level query. Returns query id. This is an internal function.
# db: scidb database connection object
# query: a character query string
# save: Save format query string or NULL.
# release: Set to zero preserve web session until manually calling release_session
# session: if you already have a SciDB http session, set this to it, otherwise NULL
# resp(logical): return http response
# stream: Set to 0L or 1L to control streaming (NOT USED)
# prefix: optional AFL statement to prefix query in the same connection context.
# Example values of save: "dcsv", "csv+", "(double NULL, int32)"
#
# Returns the HTTP session in each case
scidbquery = function(db, query, save=NULL, release=1, session=NULL, resp=FALSE, stream, prefix=attributes(db)$connection$prefix)
{
DEBUG = FALSE
STREAM = 0L
DEBUG = getOption("scidb.debug", FALSE)
if (missing(stream))
{
STREAM = 0L
} else STREAM = as.integer(stream)
sessionid = session
if (is.null(session))
{
# Obtain a session from shim
sessionid = getSession(db)
}
if (is.null(save)) save=""
if (DEBUG)
{
message(query, "\n")
t1 = proc.time()
}
ans = tryCatch(
{
args = list(id=sessionid, afl=0L, query=query, stream=0L)
args$release = release
args$prefix = c(getOption("scidb.prefix"), prefix)
if (!is.null(args$prefix)) args$prefix = paste(args$prefix, collapse=";")
args$save = save
args = list(db=db, resource="/execute_query", args=args)
do.call("SGET", args=args)
}, error=function(e)
{
# User cancel?
SGET(db, "/cancel", list(id=sessionid), err=FALSE)
SGET(db, "/release_session", list(id=sessionid), err=FALSE)
stop(as.character(e))
}, interrupt=function(e)
{
SGET(db, "/cancel", list(id=sessionid), err=FALSE)
SGET(db, "/release_session", list(id=sessionid), err=FALSE)
stop("cancelled")
}, warning=invisible)
if (DEBUG) message("Query time ", round( (proc.time() - t1)[3], 4))
if (resp) return(list(session=sessionid, response=ans))
sessionid
}
.Matrix2scidb = function(db, X, name, rowChunkSize=1000, colChunkSize=1000, start=c(0, 0), gc=TRUE, ...)
{
D = dim(X)
rowOverlap = 0L
colOverlap = 0L
if (missing(start)) start=c(0, 0)
if (length(start) < 1) stop ("Invalid starting coordinates")
if (length(start) > 2) start = start[1:2]
if (length(start) < 2) start = c(start, 0)
start = as.integer(start)
type = .scidbtypes[[typeof(X@x)]]
if (is.null(type)) {
stop(paste("Unupported data type. The package presently supports: ",
paste(.scidbtypes, collapse=" "), ".", sep=""))
}
if (type != "double") stop("Sorry, the package only supports double-precision sparse matrices right now.")
schema = sprintf(
"< val : %s null> [i=%.0f:%.0f,%.0f,%.0f, j=%.0f:%.0f,%.0f,%.0f]", type, start[[1]],
nrow(X)-1+start[[1]], min(nrow(X), rowChunkSize), rowOverlap, start[[2]], ncol(X)-1+start[[2]],
min(ncol(X), colChunkSize), colOverlap)
schema1d = sprintf("<i:int64 null, j:int64 null, val : %s null>[idx=0:*,100000,0]", type)
# Obtain a session from shim for the upload process
session = getSession(db)
if (length(session)<1) stop("SciDB http session error")
on.exit(SGET(db, "/release_session", list(id=session), err=FALSE), add=TRUE)
# Compute the indices and assemble message to SciDB in the form
# double, double, double for indices i, j and data val.
dp = diff(X@p)
j = rep(seq_along(dp), dp) - 1
# Upload the data
bytes = .Call("scidb_raw", as.vector(t(matrix(c(X@i + start[[1]], j + start[[2]], X@x), length(X@x)))), PACKAGE="scidb")
ans = POST(db, bytes, list(id=session))
ans = gsub("\n", "", gsub("\r", "", ans))
# redimension into a matrix
query = sprintf("store(redimension(input(%s,'%s',-2,'(double null,double null,double null)'),%s),%s)", schema1d, ans, schema, name)
iquery(db, query)
scidb(db, name, gc=gc)
}
# raw value to special 1-element SciDB array
raw2scidb = function(db, X, name, gc=TRUE, ...)
{
if (!is.raw(X)) stop("X must be a raw value")
args = list(...)
# Obtain a session from shim for the upload process
session = getSession(db)
if (length(session)<1) stop("SciDB http session error")
on.exit(SGET(db, "/release_session", list(id=session), err=FALSE), add=TRUE)
bytes = .Call("scidb_raw", X, PACKAGE="scidb")
ans = POST(db, bytes, list(id=session))
ans = gsub("\n", "", gsub("\r", "", ans))
schema = "<val:binary null>[i=0:0,1,0]"
if (!is.null(args$temp))
{
if (args$temp) create_temp_array(db, name, schema)
}
query = sprintf("store(input(%s,'%s',-2,'(binary null)'),%s)", schema, ans, name)
iquery(db, query)
scidb(db, name, gc=gc)
}
# Internal utility function used to format numbers
noE = function(w) sapply(w,
function(x)
{
if (is.infinite(x)) return("*")
if (is.character(x)) return(x)
sprintf("%.0f", x)
})
#' Returns TRUE if version string x is greater than or equal to than version y
#' @param x version string like "12.1", "15.12", etc. (non-numeric ignored)
#' @param y version string like "12.1", "15.12", etc. (non-numeric ignored)
#' @return logical TRUE if x is greater than or equal to y
at_least = function(x, y)
{
b = as.numeric(gsub("-.*", "", gsub("[A-z].*", "", strsplit(sprintf("%s.0", x), "\\.")[[1]])))
b = b[1] + b[2] / 100
a = as.numeric(gsub("-.*", "", gsub("[A-z].*", "", strsplit(sprintf("%s.0", y), "\\.")[[1]])))
a = a[1] + a[2] / 100
b >= a
}
# Used in delayed assignment of scidb object schema
lazyeval = function(db, name)
{
escape = gsub("'", "\\\\'", name, perl=TRUE)
query = iquery(db, sprintf("show('filter(%s, true)', 'afl')", escape), `return`=TRUE, binary=FALSE)
# NOTE that we need binary=FALSE here to avoid a terrible recursion
list(schema = gsub("^.*<", "<", query$schema, perl=TRUE))
}
#' Internal function to upload an R data frame to SciDB
#' @param db scidb database connection
#' @param X a data frame
#' @param name SciDB array name
#' @param chunk_size optional value passed to the aio_input operator see https://github.com/Paradigm4/accelerated_io_tools
#' @param types SciDB attribute types
#' @param gc set to \code{TRUE} to connect SciDB array to R's garbage collector
#' @return a \code{\link{scidb}} object, or a character schema string if \code{schema_only=TRUE}.
#' @keywords internal
df2scidb = function(db, X,
name=tmpnam(db),
types=NULL,
chunk_size,
gc)
{
.scidbenv = attr(db, "connection")
if (!is.data.frame(X)) stop("X must be a data frame")
if (missing(gc)) gc = FALSE
nullable = TRUE
anames = make.names(names(X), unique=TRUE)
anames = gsub("\\.", "_", anames, perl=TRUE)
if (length(anames) != ncol(X)) anames = make.names(1:ncol(X))
if (!all(anames == names(X))) warning("Attribute names have been changed")
# Default type is string
typ = rep("string", ncol(X))
dcast = anames
if (!is.null(types)) {
for (j in 1:ncol(X)) typ[j] = types[j]
} else {
for (j in 1:ncol(X)) {
if ("numeric" %in% class(X[, j]))
{
typ[j] = "double"
X[, j] = gsub("NA", "null", sprintf("%.16f", X[, j]))
}
else if ("integer" %in% class(X[, j]))
{
typ[j] = "int32"
X[, j] = gsub("NA", "null", sprintf("%d", X[, j]))
}
else if ("integer64" %in% class(X[, j]))
{
typ[j] = "int64"
X[, j] = gsub("NA", "null", as.character(X[, j]))
}
else if ("logical" %in% class(X[, j]))
{
typ[j] = "bool"
X[, j] = gsub("na", "null", tolower(sprintf("%s", X[, j])))
}
else if ("character" %in% class(X[, j]))
{
typ[j] = "string"
X[is.na(X[, j]), j] = "null"
}
else if ("factor" %in% class(X[, j]))
{
typ[j] = "string"
isna = is.na(X[, j])
X[, j] = sprintf("%s", X[, j])
if (any(isna)) X[isna, j] = "null"
}
else if ("POSIXct" %in% class(X[, j]))
{
warning("Converting R POSIXct to SciDB datetime as UTC time. Subsecond times rounded to seconds.")
X[, j] = format(X[, j], tz="UTC")
X[is.na(X[, j]), j] = "null"
typ[j] = "datetime"
}
}
}
for (j in 1:ncol(X))
{
if (typ[j] == "datetime") dcast[j] = sprintf("%s, datetime(a%d)", anames[j], j - 1)
else if (typ[j] == "string") dcast[j] = sprintf("%s, a%d", anames[j], j - 1)
else dcast[j] = sprintf("%s, dcast(a%d, %s(null))", anames[j], j - 1, typ[j])
}
args = sprintf("<%s>", paste(anames, ":", typ, " null", collapse=","))
# Obtain a session from the SciDB http service for the upload process
session = getSession(db)
on.exit(SGET(db, "/release_session", list(id=session), err=FALSE), add=TRUE)
ncolX = ncol(X)
nrowX = nrow(X)
X = charToRaw(fwrite(X, file=return))
tmp = POST(db, X, list(id=session))
tmp = gsub("\n", "", gsub("\r", "", tmp))
# Generate a load_tools query
aio = length(grep("aio_input", names(db))) > 0
atts = paste(dcast, collapse=",")
if (aio)
{
if (missing(chunk_size))
LOAD = sprintf("project(apply(aio_input('%s','num_attributes=%d'),%s),%s)", tmp,
ncolX, atts, paste(anames, collapse=","))
else
LOAD = sprintf("project(apply(aio_input('%s','num_attributes=%d','chunk_size=%.0f'),%s),%s)", tmp,
ncolX, chunk_size, atts, paste(anames, collapse=","))
} else
{
if (missing(chunk_size))
LOAD = sprintf("input(%s, '%s', -2, 'tsv')", dfschema(anames, typ, nrowX), tmp)
else
LOAD = sprintf("input(%s, '%s', -2, 'tsv')", dfschema(anames, typ, nrowX, chunk_size), tmp)
}
query = sprintf("store(%s,%s)", LOAD, name)
scidbquery(db, query, release=1, session=session, stream=0L)
scidb(db, name, gc=gc)
}
#' Fast write.table/textConnection substitute
#'
#' Conversions are vectorized and the entire output is buffered in memory and written in
#' one shot. Great option for replacing writing to a textConnection (much much faster).
#' Not such a great option for writing to files, marginal difference from write.table and
#' obviously much greater memory use.
#' @param x a data frame
#' @param file a connection or \code{return} to return character output directly (fast)
#' @param sep column separator
#' @param format optional fprint-style column format specifyer
#' @return Use for the side effect of writing to the connection returning \code{NULL}, or
#' return a character value when \code{file=return}.
#' @importFrom utils write.table
#' @keywords internal
fwrite = function(x, file=stdout(), sep="\t", format=paste(rep("%s", ncol(x)), collapse=sep))
{
foo = NULL
rm(list="foo") # avoid package R CMD check warnings of undeclared variable
if (!is.data.frame(x)) stop("x must be a data.frame")
if (is.null(file) || ncol(x) > 97) # use slow write.table method
{
tc = textConnection("foo", open="w")
write.table(x, sep=sep, col.names=FALSE, row.names=FALSE, file=tc, quote=FALSE)
close(tc)
return(paste(foo, collapse="\n"))
}
if (is.function(file)) return(paste(do.call("sprintf", args=c(format, as.list(x))), collapse="\n"))
write(paste(do.call("sprintf", args=c(format, as.list(x))), collapse="\n"), file=file)
invisible()
}
matvec2scidb = function(db, X,
name=tmpnam(db),
start,
gc=TRUE, ...)
{
# Check for a bunch of optional hidden arguments
args = list(...)
attr_name = "val"
if (!is.null(args$attr)) attr_name = as.character(args$attr) # attribute name
do_reshape = TRUE
type = force_type = .Rtypes[[typeof(X)]]
if (class(X) %in% "integer64") type = force_type = "int64"
if (is.null(type)) {
stop(paste("Unupported data type. The package presently supports: ",
paste(unique(names(.Rtypes)), collapse=" "), ".", sep=""))
}
if (!is.null(args$reshape)) do_reshape = as.logical(args$reshape) # control reshape
if (!is.null(args$type)) force_type = as.character(args$type) # limited type conversion
chunkSize = c(min(1000L, nrow(X)), min(1000L, ncol(X)))
chunkSize = as.numeric(chunkSize)
if (length(chunkSize) == 1) chunkSize = c(chunkSize, chunkSize)
overlap = c(0, 0)
if (missing(start)) start = c(0, 0)
start = as.numeric(start)
if (length(start) ==1) start = c(start, start)
D = dim(X)
start = as.integer(start)
overlap = as.integer(overlap)
dimname = make.unique_(attr_name, "i")
if (is.null(D))
{
# X is a vector
if (!is.vector(X) && !(type =="int64")) stop ("Unsupported object") # XXX bit64/integer64 bug?
do_reshape = FALSE
chunkSize = min(chunkSize[[1]], length(X))
X = as.matrix(X)
schema = sprintf(
"< %s : %s null> [%s=%.0f:%.0f,%.0f,%.0f]", attr_name, force_type, dimname, start[[1]],
nrow(X) - 1 + start[[1]], min(nrow(X), chunkSize), overlap[[1]])
load_schema = schema
} else if (length(D) > 2)
{
# X is an n-d array
stop("not supported yet") # XXX WRITE ME
} else {
# X is a matrix
schema = sprintf(
"< %s : %s null> [i=%.0f:%.0f,%.0f,%.0f, j=%.0f:%.0f,%.0f,%.0f]", attr_name, force_type, start[[1]],
nrow(X) - 1 + start[[1]], chunkSize[[1]], overlap[[1]], start[[2]], ncol(X) - 1 + start[[2]],
chunkSize[[2]], overlap[[2]])
load_schema = sprintf("<%s:%s null>[__row=1:%.0f,1000000,0]", attr_name, force_type, length(X))
}
if (!is.matrix(X)) stop ("X must be a matrix or a vector")
DEBUG = getOption("scidb.debug", FALSE)
td1 = proc.time()
# Obtain a session from shim for the upload process
session = getSession(db)
on.exit( SGET(db, "/release_session", list(id=session), err=FALSE), add=TRUE)
# Upload the data
bytes = .Call("scidb_raw", as.vector(t(X)), PACKAGE="scidb")
ans = POST(db, bytes, list(id=session))
ans = gsub("\n", "", gsub("\r", "", ans))
if (DEBUG)
{
message("Data upload time ", (proc.time() - td1)[3], "\n")
}
# Load query
if (do_reshape)
{
query = sprintf("store(reshape(input(%s,'%s', -2, '(%s null)'),%s),%s)", load_schema, ans, type, schema, name)
} else
{
query = sprintf("store(input(%s,'%s', -2, '(%s null)'),%s)", load_schema, ans, type, name)
}
iquery(db, query)
scidb(db, name, gc=gc)
}
| /R/internal.R | no_license | apoliakov/SciDBR | R | false | false | 30,799 | r | # Non-exported utility functions
#' Unpack and return a SciDB query expression as a data frame
#' @param db scidb database connection object
#' @param query A SciDB query expression or scidb object
#' @param ... optional extra arguments (see below)
#' @note option extra arguments
#' \itemize{
#' \item{binar}{ optional logical value, if \code{FALSE} use iquery text transfer, otherwise binary transfer, defaults \code{TRUE}}
#' \item{buffer}{ integer initial parse buffer size in bytes, adaptively resized as needed: larger buffers can be faster but comsume more memory, default size is 100000L.}
#' \item{only_attributes}{ optional logical value, \code{TRUE} means don't retrieve dimension coordinates, only return attribute values; defaults to \code{FALSE}.}
#" \item{schema}{ optional result schema string, only applies when \code{query} is not a SciDB object. Supplying this avois one extra metadata query to determine result schema. Defaults to \code{schema(query)}.}
#' }
#' @keywords internal
#' @importFrom curl new_handle handle_setheaders handle_setopt curl_fetch_memory handle_setform form_file
#' @importFrom data.table data.table
#' @import bit64
scidb_unpack_to_dataframe = function(db, query, ...)
{
DEBUG = FALSE
INT64 = attr(db, "connection")$int64
DEBUG = getOption("scidb.debug", FALSE)
buffer = 100000L
args = list(...)
if (is.null(args$only_attributes)) args$only_attributes = FALSE
if (is.null(args$binary)) args$binary = TRUE
if (!is.null(args$buffer))
{
argsbuf = tryCatch(as.integer(args$buffer), warning=function(e) NA)
if (!is.na(argsbuf) && argsbuf <= 1e9) buffer = as.integer(argsbuf)
}
if (!inherits(query, "scidb"))
{
# make a scidb object out of the query, optionally using a supplied schema to skip metadata query
if (is.null(args$schema)) query = scidb(db, query)
else query = scidb(db, query, schema=args$schema)
}
attributes = schema(query, "attributes")
dimensions = schema(query, "dimensions")
query = query@name
if(! args$binary) return(iquery(db, query, binary=FALSE, `return`=TRUE))
if (args$only_attributes)
{
internal_attributes = attributes
internal_query = query
} else
{
dim_names = dimensions$name
attr_names = attributes$name
all_names = c(dim_names, attr_names)
internal_query = query
if (length(all_names) != length(unique(all_names)))
{
# Cast to completeley unique names to be safe:
cast_dim_names = make.names_(dim_names)
cast_attr_names = make.unique_(cast_dim_names, make.names_(attributes$name))
cast_schema = sprintf("<%s>[%s]", paste(paste(cast_attr_names, attributes$type, sep=":"), collapse=","), paste(cast_dim_names, collapse=","))
internal_query = sprintf("cast(%s, %s)", internal_query, cast_schema)
all_names = c(cast_dim_names, cast_attr_names)
dim_names = cast_dim_names
}
# Apply dimensions as attributes, using unique names. Manually construct the list of resulting attributes:
dimensional_attributes = data.frame(name=dimensions$name, type="int64", nullable=FALSE) # original dimension names (used below)
internal_attributes = rbind(attributes, dimensional_attributes)
dim_apply = paste(dim_names, dim_names, sep=",", collapse=",")
internal_query = sprintf("apply(%s, %s)", internal_query, dim_apply)
}
ns = rep("", length(internal_attributes$nullable))
ns[internal_attributes$nullable] = "null"
format_string = paste(paste(internal_attributes$type, ns), collapse=",")
format_string = sprintf("(%s)", format_string)
if (DEBUG) message("Data query ", internal_query)
if (DEBUG) message("Format ", format_string)
sessionid = scidbquery(db, internal_query, save=format_string, release=0)
on.exit( SGET(db, "/release_session", list(id=sessionid), err=FALSE), add=TRUE)
dt2 = proc.time()
uri = URI(db, "/read_bytes", list(id=sessionid, n=0))
h = new_handle()
handle_setheaders(h, .list=list(`Authorization`=digest_auth(db, "GET", uri)))
handle_setopt(h, .list=list(ssl_verifyhost=as.integer(getOption("scidb.verifyhost", FALSE)),
ssl_verifypeer=0))
resp = curl_fetch_memory(uri, h)
if (resp$status_code > 299) stop("HTTP error", resp$status_code)
# Explicitly reap the handle to avoid short-term build up of socket descriptors
rm(h)
gc()
if (DEBUG) message("Data transfer time ", (proc.time() - dt2)[3])
dt1 = proc.time()
len = length(resp$content)
p = 0
ans = c()
cnames = c(internal_attributes$name, "lines", "p") # we are unpacking to a SciDB array, ignore dims
n = nrow(internal_attributes)
rnames = c()
typediff = setdiff(internal_attributes$type, names(.scidbtypes))
if(length(typediff) > 0)
{
stop(typediff, " SciDB type not supported. Try converting to string in SciDB or use a binary=FALSE data transfer")
}
while (p < len)
{
dt2 = proc.time()
tmp = .Call("scidb_parse", as.integer(buffer), internal_attributes$type,
internal_attributes$nullable, resp$content, as.double(p), as.integer(INT64), PACKAGE="scidb")
names(tmp) = cnames
lines = tmp[[n+1]]
p_old = p
p = tmp[[n+2]]
if (DEBUG) message(" R buffer ", p, "/", len, " bytes parsing time ", round( (proc.time() - dt2)[3], 4))
dt2 = proc.time()
if (lines > 0)
{
if ("binary" %in% internal_attributes$type)
{
if (DEBUG) message(" R rbind/df assembly time ", round( (proc.time() - dt2)[3], 4))
return(lapply(1:n, function(j) tmp[[j]][1:lines])) # XXX issue 33
}
len_out = length(tmp[[1]])
if (lines < len_out) tmp = lapply(tmp[1:n], function(x) x[1:lines])
# adaptively re-estimate a buffer size
avg_bytes_per_line = ceiling( (p - p_old) / lines)
buffer = min(getOption("scidb.buffer_size"), ceiling(1.3 * (len - p) / avg_bytes_per_line)) # Engineering factors
# Assemble the data frame
if (is.null(ans)) ans = data.table::data.table(data.frame(tmp[1:n], stringsAsFactors=FALSE, check.names=FALSE))
else ans = rbind(ans, data.table::data.table(data.frame(tmp[1:n], stringsAsFactors=FALSE, check.names=FALSE)))
}
if (DEBUG) message(" R rbind/df assembly time ", round( (proc.time() - dt2)[3], 4))
}
if (is.null(ans))
{
xa = attributes$name
if (args$only_attributes) # permute cols, see issue #125
xd = c()
else
xd = dimensions$name
n = length(xd) + length(xa)
ans = vector(mode="list", length=n)
names(ans) = make.names_(c(xd, xa))
class(ans) = "data.frame"
return(ans)
}
if (DEBUG) message("Total R parsing time ", round( (proc.time() - dt1)[3], 4))
ans = as.data.frame(ans, check.names=FALSE)
if (INT64)
{
for (i64 in which(internal_attributes$type %in% "int64")) oldClass(ans[, i64]) = "integer64"
}
# Handle datetime (integer POSIX time)
for (idx in which(internal_attributes$type %in% "datetime")) ans[, idx] = as.POSIXct(ans[, idx], origin="1970-1-1")
if (args$only_attributes) # permute cols, see issue #125
{
colnames(ans) = make.names_(attributes$name)
}
else
{
nd = length(dimensions$name)
i = ncol(ans) - nd
ans = ans[, c( (i+1):ncol(ans), 1:i)]
colnames(ans) = make.names_(c(dimensions$name, attributes$name))
}
gc()
ans
}
#' Convenience function for digest authentication.
#' @param db a scidb database connection object
#' @param method digest method
#' @param uri uri
#' @param realm realm
#' @param nonce nonce
#' @keywords internal
#' @importFrom digest digest
digest_auth = function(db, method, uri, realm="", nonce="123456")
{
.scidbenv = attr(db, "connection")
if (!is.null(.scidbenv$authtype))
{
if (.scidbenv$authtype != "digest") return("")
}
uri = gsub(".*/", "/", uri)
userpwd = .scidbenv$digest
if (is.null(userpwd)) userpwd=":"
up = strsplit(userpwd, ":")[[1]]
user = up[1]
pwd = up[2]
if (is.na(pwd)) pwd=""
ha1=digest(sprintf("%s:%s:%s", user, realm, pwd, algo="md5"), serialize=FALSE)
ha2=digest(sprintf("%s:%s", method, uri, algo="md5"), serialize=FALSE)
cnonce="MDc1YmFhOWFkY2M0YWY2MDAwMDBlY2JhMDAwMmYxNTI="
nc="00000001"
qop="auth"
response=digest(sprintf("%s:%s:%s:%s:%s:%s", ha1, nonce, nc, cnonce, qop, ha2), algo="md5", serialize=FALSE)
sprintf('Digest username="%s", realm=%s, nonce="%s", uri="%s", cnonce="%s", nc=%s, qop=%s, response="%s"', user, realm, nonce, uri, cnonce, nc, qop, response)
}
# Internal warning function
warnonce = (function() {
state = list(
count="Use the AFL op_count macro operator for an exact count of data rows.",
nonum="Note: The R sparse Matrix package does not support certain value types like\ncharacter strings"
)
function(warn) {
if (!is.null(state[warn][[1]])) {
message(state[warn])
s <<- state
s[warn] = c()
state <<- s
}
}
}) ()
# Some versions of RCurl seem to contain a broken URLencode function.
oldURLencode = function (URL, reserved = FALSE)
{
OK = paste0("[^-ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz0123456789$_.+!*'(),",
if (!reserved)
";/?:@=&", "]")
x = strsplit(URL, "")[[1L]]
z = grep(OK, x)
if (length(z)) {
y = sapply(x[z], function(x) paste0("%", as.character(charToRaw(x)),
collapse = ""))
x[z] = y
}
paste(x, collapse = "")
}
# Internal function
create_temp_array = function(db, name, schema)
{
# SciDB temporary array syntax varies with SciDB version
TEMP = "'TEMP'"
if (at_least(attr(db, "connection")$scidb.version, "14.12")) TEMP="true"
query = sprintf("create_array(%s, %s, %s)", name, schema, TEMP)
iquery(db, query, `return`=FALSE)
}
#' An important internal convenience function that returns a scidb object. If
#' eval=TRUE, a new SciDB array is created the returned scidb object refers to
#' that. Otherwise, the returned scidb object represents a SciDB array promise.
#'
#' @param db scidb connection object
#' @param expr (character) A SciDB expression or array name
#' @param eval (logical) If TRUE evaluate expression and assign to new SciDB array.
#' If FALSE, infer output schema but don't evaluate.
#' @param name (optional character) If supplied, name for stored array when eval=TRUE
#' @param gc (optional logical) If TRUE, tie SciDB object to garbage collector.
#' @param depend (optional list) An optional list of other scidb objects
#' that this expression depends on (preventing their garbage collection
#' if other references to them go away).
#' @param schema (optional) used to create SciDB temp arrays
#' (requires scidb >= 14.8)
#' @param temp (optional) used to create SciDB temp arrays
#' (requires scidb >= 14.8)
#' @return A \code{scidb} array object
#' @note Only AFL supported.
`.scidbeval` = function(db, expr, eval=FALSE, name, gc=TRUE, depend, schema, temp)
{
ans = c()
if (missing(depend)) depend = c()
if (missing(schema)) schema = ""
if (missing(temp)) temp = FALSE
if (!is.list(depend)) depend = list(depend)
# Address bug #45. Try to cheaply determine if expr refers to a named array
# or an AFL expression. If it's a named array, then eval must be set TRUE.
if (!grepl("\\(", expr, perl=TRUE)) eval = TRUE
if (`eval`)
{
if (missing(name) || is.null(name))
{
newarray = tmpnam(db)
if (temp) create_temp_array(db, newarray, schema)
}
else newarray = name
query = sprintf("store(%s,%s)", expr, newarray)
scidbquery(db, query, stream=0L)
ans = scidb(db, newarray, gc=gc)
if (temp) ans@meta$temp = TRUE
} else
{
ans = scidb(db, expr, gc=gc)
# Assign dependencies
if (length(depend) > 0)
{
assign("depend", depend, envir=ans@meta)
}
}
ans
}
make.names_ = function(x)
{
gsub("\\.", "_", make.names(x, unique=TRUE), perl=TRUE)
}
# x is vector of existing values
# y is vector of new values
# returns a set the same size as y with non-conflicting value names
make.unique_ = function(x, y)
{
z = make.names(gsub("_", ".", c(x, y)), unique=TRUE)
gsub("\\.", "_", utils::tail(z, length(y)))
}
# Make a name from a prefix and a unique SciDB identifier.
getuid = function(db)
{
.scidbenv = attributes(db)$connection
if (is.null(.scidbenv$id)) stop("Not connected...try scidbconnect")
.scidbenv$id
}
tmpnam = function(db, prefix="R_array")
{
stopifnot(inherits(db, "afl"))
salt = basename(tempfile(pattern=prefix))
paste(salt, getuid(db), sep="")
}
# Return a shim session ID or error
getSession = function(db)
{
session = SGET(db, "/new_session")
if (length(session)<1) stop("SciDB http session error; are you connecting to a valid SciDB host?")
session = gsub("\r", "", session)
session = gsub("\n", "", session)
session
}
# Supply the base SciDB URI from the global host, port and auth
# parameters stored in the "connection" environment in the db object
# Every function that needs to talk to the shim interface should use
# this function to supply the URI.
# Arguments:
# db scidb database connection object
# resource (string): A URI identifying the requested service
# args (list): A list of named query parameters
URI = function(db, resource="", args=list())
{
.scidbenv = attr(db, "connection")
if (is.null(.scidbenv$host)) stop("Not connected...try scidbconnect")
if (!is.null(.scidbenv$auth))
args = c(args, list(auth=.scidbenv$auth))
if (!is.null(.scidbenv$password)) args = c(args, list(password=.scidbenv$password))
if (!is.null(.scidbenv$username)) args = c(args, list(user=.scidbenv$username))
prot = paste(.scidbenv$protocol, "//", sep=":")
if ("password" %in% names(args) || "auth" %in% names(args)) prot = "https://"
ans = paste(prot, .scidbenv$host, ":", .scidbenv$port, sep="")
ans = paste(ans, resource, sep="/")
if (length(args)>0)
ans = paste(ans, paste(paste(names(args), args, sep="="), collapse="&"), sep="?")
ans
}
SGET = function(db, resource, args=list(), err=TRUE, binary=FALSE)
{
if (!(substr(resource, 1, 1)=="/")) resource = paste("/", resource, sep="")
uri = URI(db, resource, args)
uri = oldURLencode(uri)
uri = gsub("\\+", "%2B", uri, perl=TRUE)
h = new_handle()
handle_setheaders(h, .list=list(Authorization=digest_auth(db, "GET", uri)))
handle_setopt(h, .list=list(ssl_verifyhost=as.integer(getOption("scidb.verifyhost", FALSE)),
ssl_verifypeer=0))
ans = curl_fetch_memory(uri, h)
if (ans$status_code > 299 && err)
{
msg = sprintf("HTTP error %s", ans$status_code)
if (ans$status_code >= 500) msg = sprintf("%s\n%s", msg, rawToChar(ans$content))
stop(msg)
}
if (binary) return(ans$content)
rawToChar(ans$content)
}
# Normally called with raw data and args=list(id=whatever)
POST = function(db, data, args=list(), err=TRUE)
{
# check for new shim simple post option (/upload), otherwise use
# multipart/file upload (/upload_file)
shimspl = strsplit(attr(db, "connection")$scidb.version, "\\.")[[1]]
shim_yr = tryCatch(as.integer(gsub("[A-z]", "", shimspl[1])), error=function(e) 16, warning=function(e) 8)
shim_mo = tryCatch(as.integer(gsub("[A-z]", "", shimspl[2])), error=function(e) 16, warning=function(e) 8)
if (is.na(shim_yr)) shim_yr = 16
if (is.na(shim_mo)) shim_mo = 8
simple = (shim_yr >= 15 && shim_mo >= 7) || shim_yr >= 16
if (simple)
{
uri = URI(db, "/upload", args)
uri = oldURLencode(uri)
uri = gsub("\\+", "%2B", uri, perl=TRUE)
h = new_handle()
handle_setheaders(h, .list=list(Authorization=digest_auth(db, "POST", uri)))
handle_setopt(h, .list=list(ssl_verifyhost=as.integer(getOption("scidb.verifyhost", FALSE)),
ssl_verifypeer=0, post=TRUE, postfieldsize=length(data), postfields=data))
ans = curl_fetch_memory(uri, h)
if (ans$status_code > 299 && err) stop("HTTP error ", ans$status_code)
return(rawToChar(ans$content))
}
uri = URI(db, "/upload_file", args)
uri = oldURLencode(uri)
uri = gsub("\\+", "%2B", uri, perl=TRUE)
h = new_handle()
handle_setheaders(h, .list=list(Authorization=digest_auth(db, "POST", uri)))
handle_setopt(h, .list=list(ssl_verifyhost=as.integer(getOption("scidb.verifyhost", FALSE)),
ssl_verifypeer=0))
tmpf = tempfile()
if (is.character(data)) data = charToRaw(data)
writeBin(data, tmpf)
handle_setform(h, file=form_file(tmpf))
ans = curl_fetch_memory(uri, h)
unlink(tmpf)
if (ans$status_code > 299 && err) stop("HTTP error", ans$status_code)
return(rawToChar(ans$content))
}
# Basic low-level query. Returns query id. This is an internal function.
# db: scidb database connection object
# query: a character query string
# save: Save format query string or NULL.
# release: Set to zero preserve web session until manually calling release_session
# session: if you already have a SciDB http session, set this to it, otherwise NULL
# resp(logical): return http response
# stream: Set to 0L or 1L to control streaming (NOT USED)
# prefix: optional AFL statement to prefix query in the same connection context.
# Example values of save: "dcsv", "csv+", "(double NULL, int32)"
#
# Returns the HTTP session in each case
scidbquery = function(db, query, save=NULL, release=1, session=NULL, resp=FALSE, stream, prefix=attributes(db)$connection$prefix)
{
DEBUG = FALSE
STREAM = 0L
DEBUG = getOption("scidb.debug", FALSE)
if (missing(stream))
{
STREAM = 0L
} else STREAM = as.integer(stream)
sessionid = session
if (is.null(session))
{
# Obtain a session from shim
sessionid = getSession(db)
}
if (is.null(save)) save=""
if (DEBUG)
{
message(query, "\n")
t1 = proc.time()
}
ans = tryCatch(
{
args = list(id=sessionid, afl=0L, query=query, stream=0L)
args$release = release
args$prefix = c(getOption("scidb.prefix"), prefix)
if (!is.null(args$prefix)) args$prefix = paste(args$prefix, collapse=";")
args$save = save
args = list(db=db, resource="/execute_query", args=args)
do.call("SGET", args=args)
}, error=function(e)
{
# User cancel?
SGET(db, "/cancel", list(id=sessionid), err=FALSE)
SGET(db, "/release_session", list(id=sessionid), err=FALSE)
stop(as.character(e))
}, interrupt=function(e)
{
SGET(db, "/cancel", list(id=sessionid), err=FALSE)
SGET(db, "/release_session", list(id=sessionid), err=FALSE)
stop("cancelled")
}, warning=invisible)
if (DEBUG) message("Query time ", round( (proc.time() - t1)[3], 4))
if (resp) return(list(session=sessionid, response=ans))
sessionid
}
.Matrix2scidb = function(db, X, name, rowChunkSize=1000, colChunkSize=1000, start=c(0, 0), gc=TRUE, ...)
{
D = dim(X)
rowOverlap = 0L
colOverlap = 0L
if (missing(start)) start=c(0, 0)
if (length(start) < 1) stop ("Invalid starting coordinates")
if (length(start) > 2) start = start[1:2]
if (length(start) < 2) start = c(start, 0)
start = as.integer(start)
type = .scidbtypes[[typeof(X@x)]]
if (is.null(type)) {
stop(paste("Unupported data type. The package presently supports: ",
paste(.scidbtypes, collapse=" "), ".", sep=""))
}
if (type != "double") stop("Sorry, the package only supports double-precision sparse matrices right now.")
schema = sprintf(
"< val : %s null> [i=%.0f:%.0f,%.0f,%.0f, j=%.0f:%.0f,%.0f,%.0f]", type, start[[1]],
nrow(X)-1+start[[1]], min(nrow(X), rowChunkSize), rowOverlap, start[[2]], ncol(X)-1+start[[2]],
min(ncol(X), colChunkSize), colOverlap)
schema1d = sprintf("<i:int64 null, j:int64 null, val : %s null>[idx=0:*,100000,0]", type)
# Obtain a session from shim for the upload process
session = getSession(db)
if (length(session)<1) stop("SciDB http session error")
on.exit(SGET(db, "/release_session", list(id=session), err=FALSE), add=TRUE)
# Compute the indices and assemble message to SciDB in the form
# double, double, double for indices i, j and data val.
dp = diff(X@p)
j = rep(seq_along(dp), dp) - 1
# Upload the data
bytes = .Call("scidb_raw", as.vector(t(matrix(c(X@i + start[[1]], j + start[[2]], X@x), length(X@x)))), PACKAGE="scidb")
ans = POST(db, bytes, list(id=session))
ans = gsub("\n", "", gsub("\r", "", ans))
# redimension into a matrix
query = sprintf("store(redimension(input(%s,'%s',-2,'(double null,double null,double null)'),%s),%s)", schema1d, ans, schema, name)
iquery(db, query)
scidb(db, name, gc=gc)
}
# raw value to special 1-element SciDB array
raw2scidb = function(db, X, name, gc=TRUE, ...)
{
if (!is.raw(X)) stop("X must be a raw value")
args = list(...)
# Obtain a session from shim for the upload process
session = getSession(db)
if (length(session)<1) stop("SciDB http session error")
on.exit(SGET(db, "/release_session", list(id=session), err=FALSE), add=TRUE)
bytes = .Call("scidb_raw", X, PACKAGE="scidb")
ans = POST(db, bytes, list(id=session))
ans = gsub("\n", "", gsub("\r", "", ans))
schema = "<val:binary null>[i=0:0,1,0]"
if (!is.null(args$temp))
{
if (args$temp) create_temp_array(db, name, schema)
}
query = sprintf("store(input(%s,'%s',-2,'(binary null)'),%s)", schema, ans, name)
iquery(db, query)
scidb(db, name, gc=gc)
}
# Internal utility function used to format numbers
noE = function(w) sapply(w,
function(x)
{
if (is.infinite(x)) return("*")
if (is.character(x)) return(x)
sprintf("%.0f", x)
})
#' Returns TRUE if version string x is greater than or equal to than version y
#' @param x version string like "12.1", "15.12", etc. (non-numeric ignored)
#' @param y version string like "12.1", "15.12", etc. (non-numeric ignored)
#' @return logical TRUE if x is greater than or equal to y
at_least = function(x, y)
{
b = as.numeric(gsub("-.*", "", gsub("[A-z].*", "", strsplit(sprintf("%s.0", x), "\\.")[[1]])))
b = b[1] + b[2] / 100
a = as.numeric(gsub("-.*", "", gsub("[A-z].*", "", strsplit(sprintf("%s.0", y), "\\.")[[1]])))
a = a[1] + a[2] / 100
b >= a
}
# Used in delayed assignment of scidb object schema
lazyeval = function(db, name)
{
escape = gsub("'", "\\\\'", name, perl=TRUE)
query = iquery(db, sprintf("show('filter(%s, true)', 'afl')", escape), `return`=TRUE, binary=FALSE)
# NOTE that we need binary=FALSE here to avoid a terrible recursion
list(schema = gsub("^.*<", "<", query$schema, perl=TRUE))
}
#' Internal function to upload an R data frame to SciDB
#' @param db scidb database connection
#' @param X a data frame
#' @param name SciDB array name
#' @param chunk_size optional value passed to the aio_input operator see https://github.com/Paradigm4/accelerated_io_tools
#' @param types SciDB attribute types
#' @param gc set to \code{TRUE} to connect SciDB array to R's garbage collector
#' @return a \code{\link{scidb}} object, or a character schema string if \code{schema_only=TRUE}.
#' @keywords internal
df2scidb = function(db, X,
name=tmpnam(db),
types=NULL,
chunk_size,
gc)
{
.scidbenv = attr(db, "connection")
if (!is.data.frame(X)) stop("X must be a data frame")
if (missing(gc)) gc = FALSE
nullable = TRUE
anames = make.names(names(X), unique=TRUE)
anames = gsub("\\.", "_", anames, perl=TRUE)
if (length(anames) != ncol(X)) anames = make.names(1:ncol(X))
if (!all(anames == names(X))) warning("Attribute names have been changed")
# Default type is string
typ = rep("string", ncol(X))
dcast = anames
if (!is.null(types)) {
for (j in 1:ncol(X)) typ[j] = types[j]
} else {
for (j in 1:ncol(X)) {
if ("numeric" %in% class(X[, j]))
{
typ[j] = "double"
X[, j] = gsub("NA", "null", sprintf("%.16f", X[, j]))
}
else if ("integer" %in% class(X[, j]))
{
typ[j] = "int32"
X[, j] = gsub("NA", "null", sprintf("%d", X[, j]))
}
else if ("integer64" %in% class(X[, j]))
{
typ[j] = "int64"
X[, j] = gsub("NA", "null", as.character(X[, j]))
}
else if ("logical" %in% class(X[, j]))
{
typ[j] = "bool"
X[, j] = gsub("na", "null", tolower(sprintf("%s", X[, j])))
}
else if ("character" %in% class(X[, j]))
{
typ[j] = "string"
X[is.na(X[, j]), j] = "null"
}
else if ("factor" %in% class(X[, j]))
{
typ[j] = "string"
isna = is.na(X[, j])
X[, j] = sprintf("%s", X[, j])
if (any(isna)) X[isna, j] = "null"
}
else if ("POSIXct" %in% class(X[, j]))
{
warning("Converting R POSIXct to SciDB datetime as UTC time. Subsecond times rounded to seconds.")
X[, j] = format(X[, j], tz="UTC")
X[is.na(X[, j]), j] = "null"
typ[j] = "datetime"
}
}
}
for (j in 1:ncol(X))
{
if (typ[j] == "datetime") dcast[j] = sprintf("%s, datetime(a%d)", anames[j], j - 1)
else if (typ[j] == "string") dcast[j] = sprintf("%s, a%d", anames[j], j - 1)
else dcast[j] = sprintf("%s, dcast(a%d, %s(null))", anames[j], j - 1, typ[j])
}
args = sprintf("<%s>", paste(anames, ":", typ, " null", collapse=","))
# Obtain a session from the SciDB http service for the upload process
session = getSession(db)
on.exit(SGET(db, "/release_session", list(id=session), err=FALSE), add=TRUE)
ncolX = ncol(X)
nrowX = nrow(X)
X = charToRaw(fwrite(X, file=return))
tmp = POST(db, X, list(id=session))
tmp = gsub("\n", "", gsub("\r", "", tmp))
# Generate a load_tools query
aio = length(grep("aio_input", names(db))) > 0
atts = paste(dcast, collapse=",")
if (aio)
{
if (missing(chunk_size))
LOAD = sprintf("project(apply(aio_input('%s','num_attributes=%d'),%s),%s)", tmp,
ncolX, atts, paste(anames, collapse=","))
else
LOAD = sprintf("project(apply(aio_input('%s','num_attributes=%d','chunk_size=%.0f'),%s),%s)", tmp,
ncolX, chunk_size, atts, paste(anames, collapse=","))
} else
{
if (missing(chunk_size))
LOAD = sprintf("input(%s, '%s', -2, 'tsv')", dfschema(anames, typ, nrowX), tmp)
else
LOAD = sprintf("input(%s, '%s', -2, 'tsv')", dfschema(anames, typ, nrowX, chunk_size), tmp)
}
query = sprintf("store(%s,%s)", LOAD, name)
scidbquery(db, query, release=1, session=session, stream=0L)
scidb(db, name, gc=gc)
}
#' Fast write.table/textConnection substitute
#'
#' Conversions are vectorized and the entire output is buffered in memory and written in
#' one shot. Great option for replacing writing to a textConnection (much much faster).
#' Not such a great option for writing to files, marginal difference from write.table and
#' obviously much greater memory use.
#' @param x a data frame
#' @param file a connection or \code{return} to return character output directly (fast)
#' @param sep column separator
#' @param format optional fprint-style column format specifyer
#' @return Use for the side effect of writing to the connection returning \code{NULL}, or
#' return a character value when \code{file=return}.
#' @importFrom utils write.table
#' @keywords internal
fwrite = function(x, file=stdout(), sep="\t", format=paste(rep("%s", ncol(x)), collapse=sep))
{
foo = NULL
rm(list="foo") # avoid package R CMD check warnings of undeclared variable
if (!is.data.frame(x)) stop("x must be a data.frame")
if (is.null(file) || ncol(x) > 97) # use slow write.table method
{
tc = textConnection("foo", open="w")
write.table(x, sep=sep, col.names=FALSE, row.names=FALSE, file=tc, quote=FALSE)
close(tc)
return(paste(foo, collapse="\n"))
}
if (is.function(file)) return(paste(do.call("sprintf", args=c(format, as.list(x))), collapse="\n"))
write(paste(do.call("sprintf", args=c(format, as.list(x))), collapse="\n"), file=file)
invisible()
}
matvec2scidb = function(db, X,
name=tmpnam(db),
start,
gc=TRUE, ...)
{
# Check for a bunch of optional hidden arguments
args = list(...)
attr_name = "val"
if (!is.null(args$attr)) attr_name = as.character(args$attr) # attribute name
do_reshape = TRUE
type = force_type = .Rtypes[[typeof(X)]]
if (class(X) %in% "integer64") type = force_type = "int64"
if (is.null(type)) {
stop(paste("Unupported data type. The package presently supports: ",
paste(unique(names(.Rtypes)), collapse=" "), ".", sep=""))
}
if (!is.null(args$reshape)) do_reshape = as.logical(args$reshape) # control reshape
if (!is.null(args$type)) force_type = as.character(args$type) # limited type conversion
chunkSize = c(min(1000L, nrow(X)), min(1000L, ncol(X)))
chunkSize = as.numeric(chunkSize)
if (length(chunkSize) == 1) chunkSize = c(chunkSize, chunkSize)
overlap = c(0, 0)
if (missing(start)) start = c(0, 0)
start = as.numeric(start)
if (length(start) ==1) start = c(start, start)
D = dim(X)
start = as.integer(start)
overlap = as.integer(overlap)
dimname = make.unique_(attr_name, "i")
if (is.null(D))
{
# X is a vector
if (!is.vector(X) && !(type =="int64")) stop ("Unsupported object") # XXX bit64/integer64 bug?
do_reshape = FALSE
chunkSize = min(chunkSize[[1]], length(X))
X = as.matrix(X)
schema = sprintf(
"< %s : %s null> [%s=%.0f:%.0f,%.0f,%.0f]", attr_name, force_type, dimname, start[[1]],
nrow(X) - 1 + start[[1]], min(nrow(X), chunkSize), overlap[[1]])
load_schema = schema
} else if (length(D) > 2)
{
# X is an n-d array
stop("not supported yet") # XXX WRITE ME
} else {
# X is a matrix
schema = sprintf(
"< %s : %s null> [i=%.0f:%.0f,%.0f,%.0f, j=%.0f:%.0f,%.0f,%.0f]", attr_name, force_type, start[[1]],
nrow(X) - 1 + start[[1]], chunkSize[[1]], overlap[[1]], start[[2]], ncol(X) - 1 + start[[2]],
chunkSize[[2]], overlap[[2]])
load_schema = sprintf("<%s:%s null>[__row=1:%.0f,1000000,0]", attr_name, force_type, length(X))
}
if (!is.matrix(X)) stop ("X must be a matrix or a vector")
DEBUG = getOption("scidb.debug", FALSE)
td1 = proc.time()
# Obtain a session from shim for the upload process
session = getSession(db)
on.exit( SGET(db, "/release_session", list(id=session), err=FALSE), add=TRUE)
# Upload the data
bytes = .Call("scidb_raw", as.vector(t(X)), PACKAGE="scidb")
ans = POST(db, bytes, list(id=session))
ans = gsub("\n", "", gsub("\r", "", ans))
if (DEBUG)
{
message("Data upload time ", (proc.time() - td1)[3], "\n")
}
# Load query
if (do_reshape)
{
query = sprintf("store(reshape(input(%s,'%s', -2, '(%s null)'),%s),%s)", load_schema, ans, type, schema, name)
} else
{
query = sprintf("store(input(%s,'%s', -2, '(%s null)'),%s)", load_schema, ans, type, name)
}
iquery(db, query)
scidb(db, name, gc=gc)
}
|
# Basic tests for the IndexedRelations class.
# library(testthat); library(IndexedRelations); source("setup.R"); source("test-basic.R")
set.seed(19999)
r1 <- random_ranges(20)
r2 <- random_ranges(10)
r3 <- random_ranges(40)
N <- 100
i1 <- sample(length(r1), N, replace=TRUE)
i2 <- sample(length(r2), N, replace=TRUE)
i3 <- sample(length(r3), N, replace=TRUE)
################
# Constructors #
################
test_that("basic construction works correctly", {
IR <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
expect_identical(length(IR), as.integer(N))
expected <- DataFrame(i1, i2, i3)
colnames(expected) <- paste0("X.", 1:3)
expect_identical(partners(IR), expected)
expect_identical(unname(featureSets(IR)), List(r1, r2, r3))
# Preserves names.
IRn <- IndexedRelations(list(X=i1, Y=i2, Z=i3), list(r1, r2, r3))
expect_identical(partnerNames(IRn), c("X", "Y", "Z"))
IRn <- IndexedRelations(list(X=i1, Y=i2, Z=i3), list(A=r1, B=r2, C=r3))
expect_identical(partnerNames(IRn), c("X", "Y", "Z"))
# Handles zero-length inputs.
ir0 <- IndexedRelations(list(integer(0), integer(0), integer(0)), list(r1, r2, r3))
expect_identical(length(ir0), 0L)
expect_identical(featureSets(ir0), featureSets(IR))
ir0 <- IndexedRelations(list(integer(0), integer(0), integer(0)), list(r1[0], r2[0], r3[0]))
expect_identical(length(ir0), 0L)
})
test_that("alternative construction works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
ir2 <- IndexedRelations(list(r1[i1], r2[i2], r3[i3]))
expect_as_if(ir, ir2)
expect_identical(featureSets(ir2)[[1]], r1[unique(i1)])
expect_identical(featureSets(ir2)[[2]], r2[unique(i2)])
expect_identical(featureSets(ir2)[[3]], r3[unique(i3)])
# Preserves names.
ir3 <- IndexedRelations(list(A=r1[i1], B=r2[i2], C=r3[i3]))
expect_identical(partnerNames(ir3), c("A", "B", "C"))
# Handles zero-length inputs.
ir0 <- IndexedRelations(list(r1[0], r2[0], r3[0]))
expect_identical(length(ir0), 0L)
})
test_that("constructors fail with invalid inputs", {
expect_error(IndexedRelations(list(-1L, i2, i3), list(r1, r2, r3)), "out-of-bounds")
expect_error(IndexedRelations(list(10000L, i2, i3), list(r1, r2, r3)), "out-of-bounds")
expect_error(IndexedRelations(list(NA, i2, i3), list(r1, r2, r3)), "out-of-bounds")
expect_error(IndexedRelations(list(i1, i2, i3), list(r1)), "should be the same")
expect_error(IndexedRelations(list(), list()), NA)
})
########################
# Name-related methods #
########################
test_that("names getting and setting works", {
IR <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
expect_identical(names(IR), NULL)
all.inters <- sprintf("LINK.%i", seq_along(IR))
names(IR) <- all.inters
expect_identical(names(IR), all.inters)
names(IR) <- NULL
expect_identical(names(IR), NULL)
IR <- IndexedRelations(list(A=i1, B=i2, C=i3), list(r1, r2, r3))
expect_identical(partnerNames(IR), c("A", "B", "C"))
partnerNames(IR) <- c("C", "B", "A")
expect_identical(partnerNames(IR), c("C", "B", "A"))
})
###################
# Partner getters #
###################
test_that("partner getter works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
expect_identical(partnerFeatures(ir, 1), r1[i1])
expect_identical(partnerFeatures(ir, 2), r2[i2])
expect_identical(partnerFeatures(ir, 3), r3[i3])
expect_identical(partners(ir)[,1], i1)
expect_identical(partners(ir)[,2], i2)
expect_identical(partners(ir)[,3], i3)
# Works by name.
ir <- IndexedRelations(list(A=i1, B=i2, C=i3), list(r1, r2, r3))
expect_identical(partners(ir)[,"A"], i1)
expect_identical(partners(ir)[,"B"], i2)
expect_identical(partners(ir)[,"C"], i3)
expect_identical(partnerFeatures(ir, "A"), r1[i1])
expect_identical(partnerFeatures(ir, "B"), r2[i2])
expect_identical(partnerFeatures(ir, "C"), r3[i3])
# Works with empty inputs.
expect_identical(partners(ir[0]), partners(ir)[0,])
expect_identical(partnerFeatures(ir[0], 1), partnerFeatures(ir, 1)[0,])
})
test_that("partner setter works correctly (same feature set)", {
ir.0 <- ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
alt <- rev(i2)
partners(ir)[,2] <- alt
expect_identical(partnerFeatures(ir, 2), r2[alt])
expect_identical(partners(ir)[,2], alt)
expect_identical(featureSets(ir), featureSets(ir.0))
alt <- rev(r1[i1])
partnerFeatures(ir, 1) <- alt
expect_identical(partnerFeatures(ir, 1), alt)
expect_identical(featureSets(ir)[[1]], unique(alt))
# Works by name.
ir <- IndexedRelations(list(A=i1, B=i2, C=i3), list(r1, r2, r3))
alt <- rev(i2)
partners(ir)[,"B"] <- alt
expect_identical(partnerFeatures(ir, "B"), r2[alt])
expect_identical(partners(ir)[,"B"], alt)
expect_identical(unname(featureSets(ir)), unname(featureSets(ir.0)))
alt <- rev(r1[i1])
partnerFeatures(ir, "A") <- alt
expect_identical(partnerFeatures(ir, "A"), alt)
expect_identical(featureSets(ir)[[1]], unique(alt))
})
test_that("partner setter works correctly (different feature set)", {
ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
# Guarantee uniqueness from 'r1', for easier testing.
new.ranges <- sample(random_ranges(15), length(ir), replace=TRUE)
width(new.ranges) <- max(width(r1))+1
partnerFeatures(ir, 1) <- new.ranges
expect_identical(partnerFeatures(ir, 1), new.ranges)
expect_identical(featureSets(ir)[[1]], unique(new.ranges))
# Partially unique from 'r2'.
chosen <- sample(length(ir), 10)
new.ranges <- random_ranges(10)
width(new.ranges) <- max(width(r2))+1
partnerFeatures(ir, 2)[chosen] <- new.ranges
expect_identical(partnerFeatures(ir, 2)[-chosen], r2[i2[-chosen]])
expect_identical(partnerFeatures(ir, 2)[chosen], new.ranges)
# Handles metadata.
new.ranges <- sample(random_ranges(15), length(ir), replace=TRUE)
mcols(new.ranges)$blah <- runif(length(new.ranges))
expect_warning(partnerFeatures(ir, 1) <- new.ranges, NA)
expect_identical(colnames(mcols(featureSets(ir)[[1]])), "blah")
expect_warning(partnerFeatures(ir, 3)[chosen] <- new.ranges[chosen], NA)
})
test_that("partner setter handles odd inputs correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
# Complains with invalid values.
alt <- ir
expect_error(partners(alt)[,1] <- -1L, "out-of-bounds")
expect_error(partners(alt)[,1] <- 1000L, "out-of-bounds")
expect_error(partners(alt)[,1] <- NA_integer_, "out-of-bounds")
# Works with empty inputs.
alt <- ir[0]
partners(alt[0])[0,] <- integer(0)
expect_identical(alt, ir[0])
partnerFeatures(alt[0], 2) <- IRanges()
expect_identical(alt, ir[0])
# Fails if you try to give it another class.
library(GenomicRanges)
expect_error(partnerFeatures(alt[0], 2) <- GRanges(), "failed to coerce")
})
###################
# Feature getters #
###################
test_that("feature getter works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(A=r1, B=r2, C=r3))
expect_identical(featureSets(ir)[[1]], r1)
expect_identical(featureSets(ir)[[2]], r2)
expect_identical(featureSets(ir)[[3]], r3)
# Respects names.
partnerNames(ir) <- c("X", "Y", "Z")
expect_identical(names(featureSets(ir)), c("X", "Y", "Z"))
})
test_that("feature setter works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(A=r1, B=r2, C=r3))
featureSets(ir)[[1]] <- resize(featureSets(ir)[[1]], width=100)
expect_identical(partnerFeatures(ir, 1), resize(r1[i1], width=100))
# Respects being set with metadata.
X <- runif(length(r2))
mcols(featureSets(ir)[[2]])$blah <- X
expect_identical(mcols(partnerFeatures(ir, 2))$blah, X[i2])
stuff <- rnorm(length(r3))
mcols(featureSets(ir)$X.3)$stuff <- stuff
expect_identical(mcols(featureSets(ir)$X.3)$stuff, stuff)
# Complains with invalid values.
expect_error(featureSets(ir) <- featureSets(ir)[1:2], "invalid class")
expect_error(featureSets(ir) <- endoapply(featureSets(ir), "[", 0), "out-of-bounds")
})
#################
# Other getters #
#################
test_that("metadata getting and setting works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(A=r1, B=r2, C=r3))
X <- runif(length(i1))
mcols(ir)$stuff <- X
expect_identical(ir$stuff, X)
ir$stuff <- X+1
expect_identical(ir$stuff, X+1)
ir$stuff <- NULL
expect_identical(ir$stuff, NULL)
})
############################
# Subsetting and combining #
############################
test_that("subsetting works correctly", {
original <- list(i1, i2, i3)
ir <- IndexedRelations(original, list(A=r1, B=r2, C=r3))
expect_identical(ir[1:10], IndexedRelations(lapply(original, "[", 1:10), list(A=r1, B=r2, C=r3)))
expect_identical(ir[10:1], IndexedRelations(lapply(original, "[", 10:1), list(A=r1, B=r2, C=r3)))
})
test_that("combining works correctly", {
# Same features.
original <- list(i1, i2, i3)
ir <- IndexedRelations(original, list(A=r1, B=r2, C=r3))
ir2 <- c(ir, ir[100:80])
modified <- mapply(c, original, lapply(original, "[", 100:80), SIMPLIFY=FALSE)
expect_identical(ir2, IndexedRelations(modified, list(A=r1, B=r2, C=r3)))
# Different features.
irx <- IndexedRelations(original[c(3,1,2)], list(A=r3, B=r1, C=r2))
ir3 <- c(ir, irx)
for (i in 1:3) {
expect_identical(partnerFeatures(ir3, i), c(partnerFeatures(ir, i), partnerFeatures(irx, i)))
}
expect_identical(featureSets(ir3)[[1]], unique(c(r1, r3)))
expect_identical(featureSets(ir3)[[2]], unique(c(r2, r1)))
expect_identical(featureSets(ir3)[[3]], unique(c(r3, r2)))
# Crashes with incompatible features.
expect_error(c(ir, IndexedRelations(original[1:2], list(A=r1, B=r2))), "feature sets")
expect_error(c(ir, IndexedRelations(original[1:2], list(A=r1, B=r2, C=r3))), "feature sets")
})
test_that("combining works correctly with names and metadata", {
original <- list(i1, i2, i3)
ir <- IndexedRelations(original, list(r1, r2, r3))
# Checking warnings upon naming.
ir2a <- ir
ir2b <- c(ir, ir[100:80])
names(featureSets(ir2a)[[1]]) <- paste("REGION_", seq_along(r1))
expect_warning(out <- c(ir2a, ir2b), "potential modification of names")
expect_identical(names(featureSets(out)[[1]]), names(featureSets(ir2a)[[1]]))
names(featureSets(out)[[1]]) <- NULL
names(featureSets(out)) <- NULL
expect_identical(out, c(ir, ir2b))
# Equivalent warnings for metadata.
ir2a <- ir
mcols(featureSets(ir2a)[[1]])$X <- runif(length(r1))
expect_warning(out <- c(ir2a, ir2b), "potential modification of metadata")
expect_identical(mcols(featureSets(out)[[1]])$X, mcols(featureSets(ir2a)[[1]])$X)
mcols(featureSets(out)[[1]]) <- NULL
names(featureSets(out)) <- NULL
expect_identical(out, c(ir, ir2b))
})
test_that("subset assignment works correctly", {
# Same features.
original <- list(i1, i2, i3)
ir <- ir2 <- IndexedRelations(original, list(A=r1, B=r2, C=r3))
ir2[21:30] <- ir[1:10]
modified <- lapply(original, "[", c(1:20, 1:10, 31:100))
expect_identical(ir2, IndexedRelations(modified, list(A=r1, B=r2, C=r3)))
# Different features.
irx <- IndexedRelations(original[c(3,1,2)], list(A=r3, B=r1, C=r2))
ir3 <- ir
ir3[21:30] <- irx[1:10]
for (i in 1:3) {
ref <- partnerFeatures(ir, i)
ref[21:30] <- partnerFeatures(irx, i)[1:10]
expect_identical(partnerFeatures(ir3, i), ref)
}
expect_identical(featureSets(ir3)[[1]], unique(c(r1, r3)))
expect_identical(featureSets(ir3)[[2]], unique(c(r2, r1)))
expect_identical(featureSets(ir3)[[3]], unique(c(r3, r2)))
# Crashes with incompatible features.
expect_error(ir[1:100] <- IndexedRelations(original[1:2], list(A=r1, B=r2)), "feature sets")
expect_error(ir[1:100] <- IndexedRelations(original[1:2], list(A=r1, B=r2, C=r3)), "feature sets")
})
| /tests/testthat/test-basic.R | no_license | LTLA/IndexedRelations | R | false | false | 12,219 | r | # Basic tests for the IndexedRelations class.
# library(testthat); library(IndexedRelations); source("setup.R"); source("test-basic.R")
set.seed(19999)
r1 <- random_ranges(20)
r2 <- random_ranges(10)
r3 <- random_ranges(40)
N <- 100
i1 <- sample(length(r1), N, replace=TRUE)
i2 <- sample(length(r2), N, replace=TRUE)
i3 <- sample(length(r3), N, replace=TRUE)
################
# Constructors #
################
test_that("basic construction works correctly", {
IR <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
expect_identical(length(IR), as.integer(N))
expected <- DataFrame(i1, i2, i3)
colnames(expected) <- paste0("X.", 1:3)
expect_identical(partners(IR), expected)
expect_identical(unname(featureSets(IR)), List(r1, r2, r3))
# Preserves names.
IRn <- IndexedRelations(list(X=i1, Y=i2, Z=i3), list(r1, r2, r3))
expect_identical(partnerNames(IRn), c("X", "Y", "Z"))
IRn <- IndexedRelations(list(X=i1, Y=i2, Z=i3), list(A=r1, B=r2, C=r3))
expect_identical(partnerNames(IRn), c("X", "Y", "Z"))
# Handles zero-length inputs.
ir0 <- IndexedRelations(list(integer(0), integer(0), integer(0)), list(r1, r2, r3))
expect_identical(length(ir0), 0L)
expect_identical(featureSets(ir0), featureSets(IR))
ir0 <- IndexedRelations(list(integer(0), integer(0), integer(0)), list(r1[0], r2[0], r3[0]))
expect_identical(length(ir0), 0L)
})
test_that("alternative construction works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
ir2 <- IndexedRelations(list(r1[i1], r2[i2], r3[i3]))
expect_as_if(ir, ir2)
expect_identical(featureSets(ir2)[[1]], r1[unique(i1)])
expect_identical(featureSets(ir2)[[2]], r2[unique(i2)])
expect_identical(featureSets(ir2)[[3]], r3[unique(i3)])
# Preserves names.
ir3 <- IndexedRelations(list(A=r1[i1], B=r2[i2], C=r3[i3]))
expect_identical(partnerNames(ir3), c("A", "B", "C"))
# Handles zero-length inputs.
ir0 <- IndexedRelations(list(r1[0], r2[0], r3[0]))
expect_identical(length(ir0), 0L)
})
test_that("constructors fail with invalid inputs", {
expect_error(IndexedRelations(list(-1L, i2, i3), list(r1, r2, r3)), "out-of-bounds")
expect_error(IndexedRelations(list(10000L, i2, i3), list(r1, r2, r3)), "out-of-bounds")
expect_error(IndexedRelations(list(NA, i2, i3), list(r1, r2, r3)), "out-of-bounds")
expect_error(IndexedRelations(list(i1, i2, i3), list(r1)), "should be the same")
expect_error(IndexedRelations(list(), list()), NA)
})
########################
# Name-related methods #
########################
test_that("names getting and setting works", {
IR <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
expect_identical(names(IR), NULL)
all.inters <- sprintf("LINK.%i", seq_along(IR))
names(IR) <- all.inters
expect_identical(names(IR), all.inters)
names(IR) <- NULL
expect_identical(names(IR), NULL)
IR <- IndexedRelations(list(A=i1, B=i2, C=i3), list(r1, r2, r3))
expect_identical(partnerNames(IR), c("A", "B", "C"))
partnerNames(IR) <- c("C", "B", "A")
expect_identical(partnerNames(IR), c("C", "B", "A"))
})
###################
# Partner getters #
###################
test_that("partner getter works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
expect_identical(partnerFeatures(ir, 1), r1[i1])
expect_identical(partnerFeatures(ir, 2), r2[i2])
expect_identical(partnerFeatures(ir, 3), r3[i3])
expect_identical(partners(ir)[,1], i1)
expect_identical(partners(ir)[,2], i2)
expect_identical(partners(ir)[,3], i3)
# Works by name.
ir <- IndexedRelations(list(A=i1, B=i2, C=i3), list(r1, r2, r3))
expect_identical(partners(ir)[,"A"], i1)
expect_identical(partners(ir)[,"B"], i2)
expect_identical(partners(ir)[,"C"], i3)
expect_identical(partnerFeatures(ir, "A"), r1[i1])
expect_identical(partnerFeatures(ir, "B"), r2[i2])
expect_identical(partnerFeatures(ir, "C"), r3[i3])
# Works with empty inputs.
expect_identical(partners(ir[0]), partners(ir)[0,])
expect_identical(partnerFeatures(ir[0], 1), partnerFeatures(ir, 1)[0,])
})
test_that("partner setter works correctly (same feature set)", {
ir.0 <- ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
alt <- rev(i2)
partners(ir)[,2] <- alt
expect_identical(partnerFeatures(ir, 2), r2[alt])
expect_identical(partners(ir)[,2], alt)
expect_identical(featureSets(ir), featureSets(ir.0))
alt <- rev(r1[i1])
partnerFeatures(ir, 1) <- alt
expect_identical(partnerFeatures(ir, 1), alt)
expect_identical(featureSets(ir)[[1]], unique(alt))
# Works by name.
ir <- IndexedRelations(list(A=i1, B=i2, C=i3), list(r1, r2, r3))
alt <- rev(i2)
partners(ir)[,"B"] <- alt
expect_identical(partnerFeatures(ir, "B"), r2[alt])
expect_identical(partners(ir)[,"B"], alt)
expect_identical(unname(featureSets(ir)), unname(featureSets(ir.0)))
alt <- rev(r1[i1])
partnerFeatures(ir, "A") <- alt
expect_identical(partnerFeatures(ir, "A"), alt)
expect_identical(featureSets(ir)[[1]], unique(alt))
})
test_that("partner setter works correctly (different feature set)", {
ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
# Guarantee uniqueness from 'r1', for easier testing.
new.ranges <- sample(random_ranges(15), length(ir), replace=TRUE)
width(new.ranges) <- max(width(r1))+1
partnerFeatures(ir, 1) <- new.ranges
expect_identical(partnerFeatures(ir, 1), new.ranges)
expect_identical(featureSets(ir)[[1]], unique(new.ranges))
# Partially unique from 'r2'.
chosen <- sample(length(ir), 10)
new.ranges <- random_ranges(10)
width(new.ranges) <- max(width(r2))+1
partnerFeatures(ir, 2)[chosen] <- new.ranges
expect_identical(partnerFeatures(ir, 2)[-chosen], r2[i2[-chosen]])
expect_identical(partnerFeatures(ir, 2)[chosen], new.ranges)
# Handles metadata.
new.ranges <- sample(random_ranges(15), length(ir), replace=TRUE)
mcols(new.ranges)$blah <- runif(length(new.ranges))
expect_warning(partnerFeatures(ir, 1) <- new.ranges, NA)
expect_identical(colnames(mcols(featureSets(ir)[[1]])), "blah")
expect_warning(partnerFeatures(ir, 3)[chosen] <- new.ranges[chosen], NA)
})
test_that("partner setter handles odd inputs correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(r1, r2, r3))
# Complains with invalid values.
alt <- ir
expect_error(partners(alt)[,1] <- -1L, "out-of-bounds")
expect_error(partners(alt)[,1] <- 1000L, "out-of-bounds")
expect_error(partners(alt)[,1] <- NA_integer_, "out-of-bounds")
# Works with empty inputs.
alt <- ir[0]
partners(alt[0])[0,] <- integer(0)
expect_identical(alt, ir[0])
partnerFeatures(alt[0], 2) <- IRanges()
expect_identical(alt, ir[0])
# Fails if you try to give it another class.
library(GenomicRanges)
expect_error(partnerFeatures(alt[0], 2) <- GRanges(), "failed to coerce")
})
###################
# Feature getters #
###################
test_that("feature getter works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(A=r1, B=r2, C=r3))
expect_identical(featureSets(ir)[[1]], r1)
expect_identical(featureSets(ir)[[2]], r2)
expect_identical(featureSets(ir)[[3]], r3)
# Respects names.
partnerNames(ir) <- c("X", "Y", "Z")
expect_identical(names(featureSets(ir)), c("X", "Y", "Z"))
})
test_that("feature setter works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(A=r1, B=r2, C=r3))
featureSets(ir)[[1]] <- resize(featureSets(ir)[[1]], width=100)
expect_identical(partnerFeatures(ir, 1), resize(r1[i1], width=100))
# Respects being set with metadata.
X <- runif(length(r2))
mcols(featureSets(ir)[[2]])$blah <- X
expect_identical(mcols(partnerFeatures(ir, 2))$blah, X[i2])
stuff <- rnorm(length(r3))
mcols(featureSets(ir)$X.3)$stuff <- stuff
expect_identical(mcols(featureSets(ir)$X.3)$stuff, stuff)
# Complains with invalid values.
expect_error(featureSets(ir) <- featureSets(ir)[1:2], "invalid class")
expect_error(featureSets(ir) <- endoapply(featureSets(ir), "[", 0), "out-of-bounds")
})
#################
# Other getters #
#################
test_that("metadata getting and setting works correctly", {
ir <- IndexedRelations(list(i1, i2, i3), list(A=r1, B=r2, C=r3))
X <- runif(length(i1))
mcols(ir)$stuff <- X
expect_identical(ir$stuff, X)
ir$stuff <- X+1
expect_identical(ir$stuff, X+1)
ir$stuff <- NULL
expect_identical(ir$stuff, NULL)
})
############################
# Subsetting and combining #
############################
test_that("subsetting works correctly", {
original <- list(i1, i2, i3)
ir <- IndexedRelations(original, list(A=r1, B=r2, C=r3))
expect_identical(ir[1:10], IndexedRelations(lapply(original, "[", 1:10), list(A=r1, B=r2, C=r3)))
expect_identical(ir[10:1], IndexedRelations(lapply(original, "[", 10:1), list(A=r1, B=r2, C=r3)))
})
test_that("combining works correctly", {
# Same features.
original <- list(i1, i2, i3)
ir <- IndexedRelations(original, list(A=r1, B=r2, C=r3))
ir2 <- c(ir, ir[100:80])
modified <- mapply(c, original, lapply(original, "[", 100:80), SIMPLIFY=FALSE)
expect_identical(ir2, IndexedRelations(modified, list(A=r1, B=r2, C=r3)))
# Different features.
irx <- IndexedRelations(original[c(3,1,2)], list(A=r3, B=r1, C=r2))
ir3 <- c(ir, irx)
for (i in 1:3) {
expect_identical(partnerFeatures(ir3, i), c(partnerFeatures(ir, i), partnerFeatures(irx, i)))
}
expect_identical(featureSets(ir3)[[1]], unique(c(r1, r3)))
expect_identical(featureSets(ir3)[[2]], unique(c(r2, r1)))
expect_identical(featureSets(ir3)[[3]], unique(c(r3, r2)))
# Crashes with incompatible features.
expect_error(c(ir, IndexedRelations(original[1:2], list(A=r1, B=r2))), "feature sets")
expect_error(c(ir, IndexedRelations(original[1:2], list(A=r1, B=r2, C=r3))), "feature sets")
})
test_that("combining works correctly with names and metadata", {
original <- list(i1, i2, i3)
ir <- IndexedRelations(original, list(r1, r2, r3))
# Checking warnings upon naming.
ir2a <- ir
ir2b <- c(ir, ir[100:80])
names(featureSets(ir2a)[[1]]) <- paste("REGION_", seq_along(r1))
expect_warning(out <- c(ir2a, ir2b), "potential modification of names")
expect_identical(names(featureSets(out)[[1]]), names(featureSets(ir2a)[[1]]))
names(featureSets(out)[[1]]) <- NULL
names(featureSets(out)) <- NULL
expect_identical(out, c(ir, ir2b))
# Equivalent warnings for metadata.
ir2a <- ir
mcols(featureSets(ir2a)[[1]])$X <- runif(length(r1))
expect_warning(out <- c(ir2a, ir2b), "potential modification of metadata")
expect_identical(mcols(featureSets(out)[[1]])$X, mcols(featureSets(ir2a)[[1]])$X)
mcols(featureSets(out)[[1]]) <- NULL
names(featureSets(out)) <- NULL
expect_identical(out, c(ir, ir2b))
})
test_that("subset assignment works correctly", {
# Same features.
original <- list(i1, i2, i3)
ir <- ir2 <- IndexedRelations(original, list(A=r1, B=r2, C=r3))
ir2[21:30] <- ir[1:10]
modified <- lapply(original, "[", c(1:20, 1:10, 31:100))
expect_identical(ir2, IndexedRelations(modified, list(A=r1, B=r2, C=r3)))
# Different features.
irx <- IndexedRelations(original[c(3,1,2)], list(A=r3, B=r1, C=r2))
ir3 <- ir
ir3[21:30] <- irx[1:10]
for (i in 1:3) {
ref <- partnerFeatures(ir, i)
ref[21:30] <- partnerFeatures(irx, i)[1:10]
expect_identical(partnerFeatures(ir3, i), ref)
}
expect_identical(featureSets(ir3)[[1]], unique(c(r1, r3)))
expect_identical(featureSets(ir3)[[2]], unique(c(r2, r1)))
expect_identical(featureSets(ir3)[[3]], unique(c(r3, r2)))
# Crashes with incompatible features.
expect_error(ir[1:100] <- IndexedRelations(original[1:2], list(A=r1, B=r2)), "feature sets")
expect_error(ir[1:100] <- IndexedRelations(original[1:2], list(A=r1, B=r2, C=r3)), "feature sets")
})
|
library(tidyverse)
load("swap_v1_clean.RData")
load("swap_v2_clean.RData")
hist(swap_v1_clean$rt)
hist(swap_v2_clean$rt)
# Create fixations and expdata files
unique(swap_v1_clean$subject)
unique(swap_v2_clean$subject)
| /3_experiment/3_3_data_analysis_md/R/.ipynb_checkpoints/data_overview-checkpoint.R | no_license | danieljwilson/MADE | R | false | false | 223 | r |
library(tidyverse)
load("swap_v1_clean.RData")
load("swap_v2_clean.RData")
hist(swap_v1_clean$rt)
hist(swap_v2_clean$rt)
# Create fixations and expdata files
unique(swap_v1_clean$subject)
unique(swap_v2_clean$subject)
|
#' this returns the lake locations as a geom that can be used by geoknife.
#' Can be a WFS or a dataframe wrapped in a simplegeom
#'
#'
lake_summary_locations <- function(){
sites <- read.csv('data/depth_data_linked/depth_data_summary.csv', stringsAsFactors = FALSE)
return(unique(sites[c('id')]))
}
stencil_from_id <- function(ids){
sites <- read.csv('data/NHD_summ/nhd_centroids.csv', stringsAsFactors = FALSE)
for (i in 1:length(ids)){
site <- sites[sites$id == ids[i],]
df <- data.frame(c(site$lon, site$lat))
names(df) <- ids[i]
if (i == 1)
geom <- df
else
geom <- cbind(geom, df)
}
simplegeom(geom)
} | /scripts/lake_summary_locations.R | permissive | USGS-R/necsc-lake-modeling | R | false | false | 668 | r | #' this returns the lake locations as a geom that can be used by geoknife.
#' Can be a WFS or a dataframe wrapped in a simplegeom
#'
#'
lake_summary_locations <- function(){
sites <- read.csv('data/depth_data_linked/depth_data_summary.csv', stringsAsFactors = FALSE)
return(unique(sites[c('id')]))
}
stencil_from_id <- function(ids){
sites <- read.csv('data/NHD_summ/nhd_centroids.csv', stringsAsFactors = FALSE)
for (i in 1:length(ids)){
site <- sites[sites$id == ids[i],]
df <- data.frame(c(site$lon, site$lat))
names(df) <- ids[i]
if (i == 1)
geom <- df
else
geom <- cbind(geom, df)
}
simplegeom(geom)
} |
library(lidR)
rm(list = ls(globalenv()))
# ======================================
# OTHER LASFUNCTIONS
# ======================================
# We have already seen several function families:
#
# - io: to read and write LAS object in las/laz files
# - filter_*: to select points of interest (return LAS objects)
# - clip_*: to select regions of interest (return LAS objects)
# - grid_*: to rasterize the point cloud (return Raster* objects)
#
# We now introduce the other functions that return LAS objects
las = readLAS("data/MixedEucaNat_normalized.laz")
# A. merge_spatial: merge geographic data with the point cloud
# ==========================================================
# 1. With a shapefile of polygons
# -------------------------------
# Load a shapefile
eucalyptus = shapefile("data/shapefiles/MixedEucaNat.shp")
# Merge with the point cloud
lasc = lasmergespatial(las, eucalyptus, "in_plantation")
lasc
# Visualize
plot(lasc, color = "in_plantation")
# Do something: here, for the example, we simply filter the points. You can imagine any application
not_plantation = filter_poi(lasc, in_plantation == FALSE)
plot(not_plantation)
# 2. With a raster
# ----------------------------
# /!\ NOTICE /!\
# In the past it was possible to get an easy access to the google map API via R to get satellite
# images. Former example consisted in RGB colorization of the point cloud. It is no longer possible
# to access to the google API without a registration key. Thus I replaced the RGB colorization by a
# less nice example.
# Make a raster. Here a CHM
chm = grid_canopy(las, 1, p2r())
plot(chm, col = height.colors(50))
# Merge with the point cloud
lasc = merge_spatial(las, chm, "hchm")
lasc
# Do something. Here for the example we simply filter a layer below the canopy.
# You can imagine any application. RGB colorization was one of them.
layer = filter_poi(lasc, Z > hchm - 1)
plot(layer)
not_layer = filter_poi(lasc, Z < hchm - 1)
plot(not_layer)
# Former example: that works only wit a google map API key.
# ------------------------------------------------------------------
library(dismo)
bbox <- extent(las)
proj4 <- proj4string(las)
r <- raster()
extent(r) <- bbox
proj4string(r) <- proj4
gm <- gmap(x = r, type = "satellite", scale = 2, rgb = TRUE)
plotRGB(gm)
gm <- projectRaster(gm, crs = proj4)
las <- merge_spatial(las, gm)
plot(las, color = "RGB")
las_check(las)
# B. Memory usage consideration
# ===============================
pryr::object_size(las)
pryr::object_size(lasc)
pryr::object_size(las, lasc)
# This is true for any functions that does not change the number of points i.e
# almost all the functions but filter_*, clip_*
# C. smooth_height: point-cloud-based smoothing
# =========================================
# Smooth the point cloud
lass = smooth_height(las, 4)
plot(lass)
# It is not really useful. It may become interesting combined with lasfiltersurfacepoints
lassp = filter_surfacepoints(las, 0.5)
lass = smooth_height(lassp, 2)
plot(lassp)
plot(lass)
# D. add_attribute: add data to a LAS object
# ========================================
A <- runif(nrow(las@data), 10, 100)
# Forbidden
las$Amplitude <- A
# The reason is to force the user to read the documentation of lasadddata
?add_attribute
# add_attribute does what you might expect using <-
las_new = add_attribute(las, A, "Amplitute")
# But the header is not updated
las_new@header
# add_lasattribute actually adds data in a way that enables the data to be written in las files
las_new = add_lasattribute(las, A, "Amplitude", "Pulse amplitude")
# The header has been updated
las_new@header
# E: classify_ground: segment ground points
# =======================================
las = readLAS("data/MixedEucaNat.laz")
plot(las)
# The original file contains an outlier
hist(las$Z, n = 30)
range(las$Z)
# Read the file skipping the outlier
las = readLAS("data/MixedEucaNat.laz", filter = "-drop_z_below 740")
plot(las)
# The file is already classified. For the purpose of the example we can clear this classification
las$Classification = 0 # Error, explain why.
plot(las, color = "Classification")
# Segment the ground points with classify_ground
las = classify_ground(las, csf(rigidness = 2.5))
plot(las, color = "Classification")
ground = filter_ground(las)
plot(ground)
# F. normalize_height: remove topography
# ===================================
# 1. With a DTM
# -------------
dtm = grid_terrain(las, 1, tin())
plot(dtm, col = height.colors(50))
plot_dtm3d(dtm)
lasn = las - dtm
plot(lasn)
las_check(lasn)
# 2. Without a DTM
# -----------------
lasn = normalize_height(las, tin())
plot(lasn)
las_check(lasn)
# Explain the difference between the two methods
# G. Other functions
# ========================
# find_trees (see next section)
# segment_trees (see next section)
# segment_snags
# segment_shapes
# unsmooth_height
# unormalize_height
# classify_noise
# decimate_points
# ...
| /2 days/Day 1/1-7 lasf.r | no_license | paulterinho/lidRworkshop | R | false | false | 4,978 | r | library(lidR)
rm(list = ls(globalenv()))
# ======================================
# OTHER LASFUNCTIONS
# ======================================
# We have already seen several function families:
#
# - io: to read and write LAS object in las/laz files
# - filter_*: to select points of interest (return LAS objects)
# - clip_*: to select regions of interest (return LAS objects)
# - grid_*: to rasterize the point cloud (return Raster* objects)
#
# We now introduce the other functions that return LAS objects
las = readLAS("data/MixedEucaNat_normalized.laz")
# A. merge_spatial: merge geographic data with the point cloud
# ==========================================================
# 1. With a shapefile of polygons
# -------------------------------
# Load a shapefile
eucalyptus = shapefile("data/shapefiles/MixedEucaNat.shp")
# Merge with the point cloud
lasc = lasmergespatial(las, eucalyptus, "in_plantation")
lasc
# Visualize
plot(lasc, color = "in_plantation")
# Do something: here, for the example, we simply filter the points. You can imagine any application
not_plantation = filter_poi(lasc, in_plantation == FALSE)
plot(not_plantation)
# 2. With a raster
# ----------------------------
# /!\ NOTICE /!\
# In the past it was possible to get an easy access to the google map API via R to get satellite
# images. Former example consisted in RGB colorization of the point cloud. It is no longer possible
# to access to the google API without a registration key. Thus I replaced the RGB colorization by a
# less nice example.
# Make a raster. Here a CHM
chm = grid_canopy(las, 1, p2r())
plot(chm, col = height.colors(50))
# Merge with the point cloud
lasc = merge_spatial(las, chm, "hchm")
lasc
# Do something. Here for the example we simply filter a layer below the canopy.
# You can imagine any application. RGB colorization was one of them.
layer = filter_poi(lasc, Z > hchm - 1)
plot(layer)
not_layer = filter_poi(lasc, Z < hchm - 1)
plot(not_layer)
# Former example: that works only wit a google map API key.
# ------------------------------------------------------------------
library(dismo)
bbox <- extent(las)
proj4 <- proj4string(las)
r <- raster()
extent(r) <- bbox
proj4string(r) <- proj4
gm <- gmap(x = r, type = "satellite", scale = 2, rgb = TRUE)
plotRGB(gm)
gm <- projectRaster(gm, crs = proj4)
las <- merge_spatial(las, gm)
plot(las, color = "RGB")
las_check(las)
# B. Memory usage consideration
# ===============================
pryr::object_size(las)
pryr::object_size(lasc)
pryr::object_size(las, lasc)
# This is true for any functions that does not change the number of points i.e
# almost all the functions but filter_*, clip_*
# C. smooth_height: point-cloud-based smoothing
# =========================================
# Smooth the point cloud
lass = smooth_height(las, 4)
plot(lass)
# It is not really useful. It may become interesting combined with lasfiltersurfacepoints
lassp = filter_surfacepoints(las, 0.5)
lass = smooth_height(lassp, 2)
plot(lassp)
plot(lass)
# D. add_attribute: add data to a LAS object
# ========================================
A <- runif(nrow(las@data), 10, 100)
# Forbidden
las$Amplitude <- A
# The reason is to force the user to read the documentation of lasadddata
?add_attribute
# add_attribute does what you might expect using <-
las_new = add_attribute(las, A, "Amplitute")
# But the header is not updated
las_new@header
# add_lasattribute actually adds data in a way that enables the data to be written in las files
las_new = add_lasattribute(las, A, "Amplitude", "Pulse amplitude")
# The header has been updated
las_new@header
# E: classify_ground: segment ground points
# =======================================
las = readLAS("data/MixedEucaNat.laz")
plot(las)
# The original file contains an outlier
hist(las$Z, n = 30)
range(las$Z)
# Read the file skipping the outlier
las = readLAS("data/MixedEucaNat.laz", filter = "-drop_z_below 740")
plot(las)
# The file is already classified. For the purpose of the example we can clear this classification
las$Classification = 0 # Error, explain why.
plot(las, color = "Classification")
# Segment the ground points with classify_ground
las = classify_ground(las, csf(rigidness = 2.5))
plot(las, color = "Classification")
ground = filter_ground(las)
plot(ground)
# F. normalize_height: remove topography
# ===================================
# 1. With a DTM
# -------------
dtm = grid_terrain(las, 1, tin())
plot(dtm, col = height.colors(50))
plot_dtm3d(dtm)
lasn = las - dtm
plot(lasn)
las_check(lasn)
# 2. Without a DTM
# -----------------
lasn = normalize_height(las, tin())
plot(lasn)
las_check(lasn)
# Explain the difference between the two methods
# G. Other functions
# ========================
# find_trees (see next section)
# segment_trees (see next section)
# segment_snags
# segment_shapes
# unsmooth_height
# unormalize_height
# classify_noise
# decimate_points
# ...
|
library(forcats)
funcs <- list('median' = median,
'huberM' = getMufromAlgA)
set.seed('0304')
res <- ldply(4:100, function(n){
d <- ldply(seq.int(0.01, 0.1, by=.001), function(cv){
r <- rdply(200, {
mean <- runif(1, 1, 50)
sd <- mean*cv
nOut <- round(n*runif(1, 0.1, 0.2))
samples <- rnorm(n, mean=mean, sd=sd)
samples <- c(samples,
runif(nOut, 3, 5)*mean*sd*(round(runif(nOut,0,1))*2-1))
ldply(funcs, function(f){
data.frame(diff = (mean-f(samples))/mean)
}, .id='method')
}, .id = NULL)
r$cv <- cv
r
})
d$n <- n
d
}, .progress = 'text')
resAnalysed <- res %>%
mutate(e=1.253*round(cv/sqrt(n), 3)) %>%
group_by(e, method) %>%
summarise(p975 = quantile(diff, .975),
p025 = quantile(diff, .025), n=n())
ggplot(resAnalysed) +
geom_ribbon(aes(x=e, ymin=p025, ymax=p975, fill=method),
alpha=.5) +
theme_Publication() +
scale_y_continuous(label=percent) +
xlab('estimated relative standard error') +
ylab("relative deviation from true mean\n (central 95%)")
ggsave(paste0(base.dir, 'fig/stdError.png'),
dpi = 600, width = 85, height= 100, units='mm')
| /R/sim.R | no_license | acnb/Glucose-EQA | R | false | false | 1,207 | r | library(forcats)
funcs <- list('median' = median,
'huberM' = getMufromAlgA)
set.seed('0304')
res <- ldply(4:100, function(n){
d <- ldply(seq.int(0.01, 0.1, by=.001), function(cv){
r <- rdply(200, {
mean <- runif(1, 1, 50)
sd <- mean*cv
nOut <- round(n*runif(1, 0.1, 0.2))
samples <- rnorm(n, mean=mean, sd=sd)
samples <- c(samples,
runif(nOut, 3, 5)*mean*sd*(round(runif(nOut,0,1))*2-1))
ldply(funcs, function(f){
data.frame(diff = (mean-f(samples))/mean)
}, .id='method')
}, .id = NULL)
r$cv <- cv
r
})
d$n <- n
d
}, .progress = 'text')
resAnalysed <- res %>%
mutate(e=1.253*round(cv/sqrt(n), 3)) %>%
group_by(e, method) %>%
summarise(p975 = quantile(diff, .975),
p025 = quantile(diff, .025), n=n())
ggplot(resAnalysed) +
geom_ribbon(aes(x=e, ymin=p025, ymax=p975, fill=method),
alpha=.5) +
theme_Publication() +
scale_y_continuous(label=percent) +
xlab('estimated relative standard error') +
ylab("relative deviation from true mean\n (central 95%)")
ggsave(paste0(base.dir, 'fig/stdError.png'),
dpi = 600, width = 85, height= 100, units='mm')
|
# Assignment: ASSIGNMENT 6
# Name: Stoneburner, Kurt
# Date: 2020-06-29
## Set the working directory to the root of your DSC 520 directory
setwd("C:\\Users\\newcomb\\DSCProjects\\dsc520_github")
#setwd("L:\\stonk\\projects\\DSC\\dsc520")
## Load the `data/r4ds/heights.csv` to
heights_df <- read.csv("data/r4ds/heights.csv")
head(heights_df)
## Load the ggplot2 library
library(ggplot2)
## Fit a linear model using the `age` variable as the predictor and `earn` as the outcome
age_lm <- lm(earn ~ age, data=heights_df)
## View the summary of your model using `summary()`
summary(age_lm)
##r-Squared is 0.006561 the sqrt() is an R coefficient of .081. Showing little correlation
##The p-value is greater than .001 so this prediction is not significant
##The F-Value is low representaing a 92.3% chance the null hypothesis is true.
##The Null hypothesis would be that age does not affect earnings in a meaningful way(?)
## Creating predictions using `predict()`
age_predict_df <- data.frame(earn = predict(age_lm, newdata=heights_df), age=heights_df$age)
head(age_predict_df) ## remove
summary(age_predict_df) ## Remove
## Plot the predictions against the original data
ggplot(data = heights_df, aes(y = earn, x = age)) +
geom_point(color='blue') +
geom_line(color='red',data = age_predict_df, aes(y=earn, x=age))
mean_earn <- mean(heights_df$earn)
## Corrected Sum of Squares Total
## mean_earn - each value of earn (measures the distance from the mean, which
## is the expected value). The difference is squared (to eliminate negatives canceling out positives).
## The differences for each data point are added.
## A Large value represents a large error, or many points are distant from the mean
## sst - Sum of Squares - Total
sst <- sum((mean_earn - heights_df$earn)^2)
## Corrected Sum of Squares for Model
## Calculate Sum of the Squares for the prediction model. Greater the number, the greater the error.
ssm <- sum((mean_earn - age_predict_df$earn)^2)
## Since ssm < sst i'm predicting ssm has significance. I would be wrong.
## Residuals
## Difference between the data and the model.
## The difference for each value between the data and the model
residuals <- heights_df$earn - age_predict_df$earn
## Sum of Squares for Error
## This is the error or difference between the data point and the presidiction. This number is large.
## And similar to sst. If the Error of the model is similar to the error of the mean. The model isn't much
## better. sst / sse is close to 1. Not sure if this is significant.
sse <- sum(residuals^2)
## R Squared R^2 = SSM/SST
## Error of the mean / Error of the Prediction. This is the correlation coefficient of the model.
## Small numbers have less significance. Closer to 1 the stronger the correlation.
## This value is small, the model is not a good fit for the data compared to the mean.
## Judging by the scatterplot, this makes sense.
r_squared <- ssm / sst
## Number of observations
## method: str(heights_df)
n <- 1192
## Number of regression parameters
p <- 2
## Corrected Degrees of Freedom for Model (p-1)
dfm <- p-1
## Degrees of Freedom for Error (n-p)
dfe <- n-p
## Corrected Degrees of Freedom Total: DFT = n - 1
dft <- n-1
## Mean of Squares for Model: MSM = SSM / DFM
msm <- ssm / dfm
## Mean of Squares for Error: MSE = SSE / DFE
mse <- sse / dfe
## Mean of Squares Total: MST = SST / DFT
mst <- sst / dft
## F Statistic F = MSM/MSE
f_score <- msm/mse
f_score
## Adjusted R Squared R2 = 1 - (1 - R2)(n - 1) / (n - p)
adjusted_r_squared <- 1 - (1 - r_squared ) * dft / dfe
adjusted_r_squared
## Calculate the p-value from the F distribution
p_value <- pf(f_score, dfm, dft, lower.tail=F)
p_value
| /DSC520/wk05/assignment_06_StoneburnerKurt.R | no_license | RachelONelson/DSC | R | false | false | 3,696 | r | # Assignment: ASSIGNMENT 6
# Name: Stoneburner, Kurt
# Date: 2020-06-29
## Set the working directory to the root of your DSC 520 directory
setwd("C:\\Users\\newcomb\\DSCProjects\\dsc520_github")
#setwd("L:\\stonk\\projects\\DSC\\dsc520")
## Load the `data/r4ds/heights.csv` to
heights_df <- read.csv("data/r4ds/heights.csv")
head(heights_df)
## Load the ggplot2 library
library(ggplot2)
## Fit a linear model using the `age` variable as the predictor and `earn` as the outcome
age_lm <- lm(earn ~ age, data=heights_df)
## View the summary of your model using `summary()`
summary(age_lm)
##r-Squared is 0.006561 the sqrt() is an R coefficient of .081. Showing little correlation
##The p-value is greater than .001 so this prediction is not significant
##The F-Value is low representaing a 92.3% chance the null hypothesis is true.
##The Null hypothesis would be that age does not affect earnings in a meaningful way(?)
## Creating predictions using `predict()`
age_predict_df <- data.frame(earn = predict(age_lm, newdata=heights_df), age=heights_df$age)
head(age_predict_df) ## remove
summary(age_predict_df) ## Remove
## Plot the predictions against the original data
ggplot(data = heights_df, aes(y = earn, x = age)) +
geom_point(color='blue') +
geom_line(color='red',data = age_predict_df, aes(y=earn, x=age))
mean_earn <- mean(heights_df$earn)
## Corrected Sum of Squares Total
## mean_earn - each value of earn (measures the distance from the mean, which
## is the expected value). The difference is squared (to eliminate negatives canceling out positives).
## The differences for each data point are added.
## A Large value represents a large error, or many points are distant from the mean
## sst - Sum of Squares - Total
sst <- sum((mean_earn - heights_df$earn)^2)
## Corrected Sum of Squares for Model
## Calculate Sum of the Squares for the prediction model. Greater the number, the greater the error.
ssm <- sum((mean_earn - age_predict_df$earn)^2)
## Since ssm < sst i'm predicting ssm has significance. I would be wrong.
## Residuals
## Difference between the data and the model.
## The difference for each value between the data and the model
residuals <- heights_df$earn - age_predict_df$earn
## Sum of Squares for Error
## This is the error or difference between the data point and the presidiction. This number is large.
## And similar to sst. If the Error of the model is similar to the error of the mean. The model isn't much
## better. sst / sse is close to 1. Not sure if this is significant.
sse <- sum(residuals^2)
## R Squared R^2 = SSM/SST
## Error of the mean / Error of the Prediction. This is the correlation coefficient of the model.
## Small numbers have less significance. Closer to 1 the stronger the correlation.
## This value is small, the model is not a good fit for the data compared to the mean.
## Judging by the scatterplot, this makes sense.
r_squared <- ssm / sst
## Number of observations
## method: str(heights_df)
n <- 1192
## Number of regression parameters
p <- 2
## Corrected Degrees of Freedom for Model (p-1)
dfm <- p-1
## Degrees of Freedom for Error (n-p)
dfe <- n-p
## Corrected Degrees of Freedom Total: DFT = n - 1
dft <- n-1
## Mean of Squares for Model: MSM = SSM / DFM
msm <- ssm / dfm
## Mean of Squares for Error: MSE = SSE / DFE
mse <- sse / dfe
## Mean of Squares Total: MST = SST / DFT
mst <- sst / dft
## F Statistic F = MSM/MSE
f_score <- msm/mse
f_score
## Adjusted R Squared R2 = 1 - (1 - R2)(n - 1) / (n - p)
adjusted_r_squared <- 1 - (1 - r_squared ) * dft / dfe
adjusted_r_squared
## Calculate the p-value from the F distribution
p_value <- pf(f_score, dfm, dft, lower.tail=F)
p_value
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/barmekko.R
\name{barmekko}
\alias{barmekko}
\title{Create a bar mekko plot.}
\usage{
barmekko(data, x, y, width, values = FALSE)
}
\arguments{
\item{data}{A data frame.}
\item{x}{A categorical variable defining the width categories.}
\item{y}{A numeric variable defining the bar height.}
\item{width}{A numeric variable defining the bar widths}
\item{values}{A boolean indicating whether to show value labels in bars}
}
\value{
A bar mekko constructed with ggplot2.
}
\description{
A smarter bar chart.
}
\examples{
library(ggplot2)
df <- data.frame(
region = c('Northeast', 'Southeast', 'Central', 'West'),
sales = c(1200, 800, 450, 900),
avg_margin = c(3.2, -1.4, 0.1, 2.1)
)
barmekko(df, region, avg_margin, sales)
barmekko(df, region, avg_margin, sales) + labs(title = 'Margins by Region')
barmekko(df[order(-df$sales),], region, avg_margin, sales)
barmekko(df[order(-df$avg_margin),], region, avg_margin, sales)
}
| /man/barmekko.Rd | no_license | Julius-V/mekko | R | false | true | 1,010 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/barmekko.R
\name{barmekko}
\alias{barmekko}
\title{Create a bar mekko plot.}
\usage{
barmekko(data, x, y, width, values = FALSE)
}
\arguments{
\item{data}{A data frame.}
\item{x}{A categorical variable defining the width categories.}
\item{y}{A numeric variable defining the bar height.}
\item{width}{A numeric variable defining the bar widths}
\item{values}{A boolean indicating whether to show value labels in bars}
}
\value{
A bar mekko constructed with ggplot2.
}
\description{
A smarter bar chart.
}
\examples{
library(ggplot2)
df <- data.frame(
region = c('Northeast', 'Southeast', 'Central', 'West'),
sales = c(1200, 800, 450, 900),
avg_margin = c(3.2, -1.4, 0.1, 2.1)
)
barmekko(df, region, avg_margin, sales)
barmekko(df, region, avg_margin, sales) + labs(title = 'Margins by Region')
barmekko(df[order(-df$sales),], region, avg_margin, sales)
barmekko(df[order(-df$avg_margin),], region, avg_margin, sales)
}
|
#' @useDynLib tibble, .registration = TRUE
#' @importFrom utils head tail
#' @import rlang
#' @aliases NULL tibble-package
#' @details The S3 class `tbl_df` wraps a local data frame. The main
#' advantage to using a `tbl_df` over a regular data frame is the printing:
#' tbl objects only print a few rows and all the columns that fit on one screen,
#' describing the rest of it as text.
#'
#' @section Methods:
#'
#' `tbl_df` implements four important base methods:
#'
#' \describe{
#' \item{print}{By default only prints the first 10 rows (at most 20), and the
#' columns that fit on screen; see [print.tbl()]}
#' \item{\code{[}}{Does not simplify (drop) by default, returns a data frame}
#' \item{\code{[[}, `$`}{Calls [.subset2()] directly,
#' so is considerably faster. Returns `NULL` if column does not exist,
#' `$` warns.}
#' }
#' @section Important functions:
#' [tibble()] and [tribble()] for construction,
#' [as_tibble()] for coercion,
#' and [print.tbl()] and [glimpse()] for display.
#' @examples
#' tibble(a = 1:26, b = letters)
#' as_tibble(iris)
"_PACKAGE"
#' Package options
#'
#' Display options for `tbl_df`, used by [trunc_mat()] and
#' (indirectly) by [print.tbl()].
#'
#' @name tibble-options
#' @inheritSection pillar::`pillar-package` Package options
#' @section Package options:
(op.tibble <- list(
#' - `tibble.print_max`: Row number threshold: Maximum number of rows
#' printed. Set to `Inf` to always print all rows. Default: 20.
tibble.print_max = 20L,
#' - `tibble.print_min`: Number of rows printed if row number
#' threshold is exceeded. Default: 10.
tibble.print_min = 10L,
#' - `tibble.width`: Output width. Default: `NULL` (use
#' `width` option).
tibble.width = NULL,
#' - `tibble.max_extra_cols`: Number of extra columns
#' printed in reduced form. Default: 100.
tibble.max_extra_cols = 100L
))
tibble_opt <- function(x) {
x_tibble <- paste0("tibble.", x)
res <- getOption(x_tibble)
if (!is.null(res)) {
return(res)
}
x_dplyr <- paste0("dplyr.", x)
res <- getOption(x_dplyr)
if (!is.null(res)) {
return(res)
}
op.tibble[[x_tibble]]
}
| /R/tibble-package.R | no_license | jeffreyhanson/tibble | R | false | false | 2,151 | r | #' @useDynLib tibble, .registration = TRUE
#' @importFrom utils head tail
#' @import rlang
#' @aliases NULL tibble-package
#' @details The S3 class `tbl_df` wraps a local data frame. The main
#' advantage to using a `tbl_df` over a regular data frame is the printing:
#' tbl objects only print a few rows and all the columns that fit on one screen,
#' describing the rest of it as text.
#'
#' @section Methods:
#'
#' `tbl_df` implements four important base methods:
#'
#' \describe{
#' \item{print}{By default only prints the first 10 rows (at most 20), and the
#' columns that fit on screen; see [print.tbl()]}
#' \item{\code{[}}{Does not simplify (drop) by default, returns a data frame}
#' \item{\code{[[}, `$`}{Calls [.subset2()] directly,
#' so is considerably faster. Returns `NULL` if column does not exist,
#' `$` warns.}
#' }
#' @section Important functions:
#' [tibble()] and [tribble()] for construction,
#' [as_tibble()] for coercion,
#' and [print.tbl()] and [glimpse()] for display.
#' @examples
#' tibble(a = 1:26, b = letters)
#' as_tibble(iris)
"_PACKAGE"
#' Package options
#'
#' Display options for `tbl_df`, used by [trunc_mat()] and
#' (indirectly) by [print.tbl()].
#'
#' @name tibble-options
#' @inheritSection pillar::`pillar-package` Package options
#' @section Package options:
(op.tibble <- list(
#' - `tibble.print_max`: Row number threshold: Maximum number of rows
#' printed. Set to `Inf` to always print all rows. Default: 20.
tibble.print_max = 20L,
#' - `tibble.print_min`: Number of rows printed if row number
#' threshold is exceeded. Default: 10.
tibble.print_min = 10L,
#' - `tibble.width`: Output width. Default: `NULL` (use
#' `width` option).
tibble.width = NULL,
#' - `tibble.max_extra_cols`: Number of extra columns
#' printed in reduced form. Default: 100.
tibble.max_extra_cols = 100L
))
tibble_opt <- function(x) {
x_tibble <- paste0("tibble.", x)
res <- getOption(x_tibble)
if (!is.null(res)) {
return(res)
}
x_dplyr <- paste0("dplyr.", x)
res <- getOption(x_dplyr)
if (!is.null(res)) {
return(res)
}
op.tibble[[x_tibble]]
}
|
context("wi_th")
test_that("wi_th: with just headers", {
aa <- stub_request("get", "https://httpbin.org/get") %>%
wi_th(headers = list('User-Agent' = 'R'))
expect_is(aa, "StubbedRequest")
expect_null(aa$body)
expect_null(aa$host)
expect_null(aa$query)
expect_is(aa$request_headers, "list")
expect_null(aa$response)
expect_null(aa$response_headers)
expect_null(aa$responses_sequences)
expect_is(aa$method, "character")
expect_equal(aa$method, "get")
expect_is(aa$uri, "character")
expect_equal(aa$uri, "https://httpbin.org/get")
expect_equal(aa$request_headers, list('User-Agent' = 'R'))
})
test_that("wi_th: with headers and query", {
aa <- stub_request("get", "https://httpbin.org/get") %>%
wi_th(
query = list(hello = "world"),
headers = list('User-Agent' = 'R'))
expect_is(aa$query, "list")
expect_is(aa$request_headers, "list")
expect_output(print(aa), "hello=world")
expect_output(print(aa), "User-Agent=R")
})
test_that("wi_th fails well", {
expect_error(wi_th(), "argument \".data\" is missing")
})
| /tests/testthat/test-wi_th.R | permissive | ktargows/webmockr | R | false | false | 1,073 | r | context("wi_th")
test_that("wi_th: with just headers", {
aa <- stub_request("get", "https://httpbin.org/get") %>%
wi_th(headers = list('User-Agent' = 'R'))
expect_is(aa, "StubbedRequest")
expect_null(aa$body)
expect_null(aa$host)
expect_null(aa$query)
expect_is(aa$request_headers, "list")
expect_null(aa$response)
expect_null(aa$response_headers)
expect_null(aa$responses_sequences)
expect_is(aa$method, "character")
expect_equal(aa$method, "get")
expect_is(aa$uri, "character")
expect_equal(aa$uri, "https://httpbin.org/get")
expect_equal(aa$request_headers, list('User-Agent' = 'R'))
})
test_that("wi_th: with headers and query", {
aa <- stub_request("get", "https://httpbin.org/get") %>%
wi_th(
query = list(hello = "world"),
headers = list('User-Agent' = 'R'))
expect_is(aa$query, "list")
expect_is(aa$request_headers, "list")
expect_output(print(aa), "hello=world")
expect_output(print(aa), "User-Agent=R")
})
test_that("wi_th fails well", {
expect_error(wi_th(), "argument \".data\" is missing")
})
|
ci_single_prop_theo <- function(y, success, conf_level, y_name,
show_var_types, show_summ_stats, show_res,
show_eda_plot, show_inf_plot){
# calculate sample size
n <- length(y)
# calculate p-hat
p_hat <- sum(y == success) / n
# find percentile associated with critical value
perc_crit_value <- conf_level + ((1 - conf_level) / 2)
# find critical value
z_star <- qnorm(perc_crit_value)
# calculate SE
se <- sqrt(p_hat * (1 - p_hat) / n)
# calculate ME
me <- z_star * se
# calculate CI
ci <- p_hat + c(-1, 1) * me
# print variable types
if(show_var_types == TRUE){
cat(paste0("Single categorical variable, success: ", success,"\n"))
}
# print summary statistics
if(show_summ_stats == TRUE){
cat(paste0("n = ", n, ", p-hat = ", round(p_hat, 4), "\n"))
}
# print results
if(show_res == TRUE){
conf_level_perc = conf_level * 100
cat(paste0(conf_level_perc, "% CI: (", round(ci[1], 4), " , ", round(ci[2], 4), ")\n"))
}
# eda_plot
d_eda <- data.frame(y = y)
eda_plot <- ggplot2::ggplot(data = d_eda, ggplot2::aes(x = y), environment = environment()) +
ggplot2::geom_bar(fill = "#8FDEE1") +
ggplot2::xlab(y_name) +
ggplot2::ylab("") +
ggplot2::ggtitle("Sample Distribution")
# print plots
if(show_eda_plot){ print(eda_plot) }
if(show_inf_plot){ warning("No inference plot available.") }
# return
return(list(SE = round(se, 4), ME = round(me, 4), CI = round(ci, 4)))
} | /R/ci_single_prop_theo.R | no_license | aaronbaggett/labs4316 | R | false | false | 1,553 | r | ci_single_prop_theo <- function(y, success, conf_level, y_name,
show_var_types, show_summ_stats, show_res,
show_eda_plot, show_inf_plot){
# calculate sample size
n <- length(y)
# calculate p-hat
p_hat <- sum(y == success) / n
# find percentile associated with critical value
perc_crit_value <- conf_level + ((1 - conf_level) / 2)
# find critical value
z_star <- qnorm(perc_crit_value)
# calculate SE
se <- sqrt(p_hat * (1 - p_hat) / n)
# calculate ME
me <- z_star * se
# calculate CI
ci <- p_hat + c(-1, 1) * me
# print variable types
if(show_var_types == TRUE){
cat(paste0("Single categorical variable, success: ", success,"\n"))
}
# print summary statistics
if(show_summ_stats == TRUE){
cat(paste0("n = ", n, ", p-hat = ", round(p_hat, 4), "\n"))
}
# print results
if(show_res == TRUE){
conf_level_perc = conf_level * 100
cat(paste0(conf_level_perc, "% CI: (", round(ci[1], 4), " , ", round(ci[2], 4), ")\n"))
}
# eda_plot
d_eda <- data.frame(y = y)
eda_plot <- ggplot2::ggplot(data = d_eda, ggplot2::aes(x = y), environment = environment()) +
ggplot2::geom_bar(fill = "#8FDEE1") +
ggplot2::xlab(y_name) +
ggplot2::ylab("") +
ggplot2::ggtitle("Sample Distribution")
# print plots
if(show_eda_plot){ print(eda_plot) }
if(show_inf_plot){ warning("No inference plot available.") }
# return
return(list(SE = round(se, 4), ME = round(me, 4), CI = round(ci, 4)))
} |
testlist <- list(lims = structure(0, .Dim = c(1L, 1L)), points = structure(c(2.06427662810466e-310, 1.21367431719399e+132, 1.20027270135455e+132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 8L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result) | /palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612987786-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 463 | r | testlist <- list(lims = structure(0, .Dim = c(1L, 1L)), points = structure(c(2.06427662810466e-310, 1.21367431719399e+132, 1.20027270135455e+132, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 8L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_mfast.R
\name{run_mfast}
\alias{run_mfast}
\title{Run splitting measurement}
\usage{
run_mfast(path, name, filtlist)
}
\arguments{
\item{path}{Path to folder}
\item{name}{Name of event}
\item{filtlist}{A dataframe of the best filters to be used (output of writesac_filt)}
}
\description{
Runs shearwave splitting measurements on a set of filtered SAC files
}
\examples{
# Run shear wave splitting measurements on event 2002.054.09.47.lhor2
pathto <- "~/mfast/sample_data/raw_data"
write_sample(pathto)
event <- "2002.054.09.47.lhor2"
triplet <- readtriplet(event,path=pathto)
bestfilt <- filter_spread(triplet)
maxfreq <- createini(pathto,triplet,bestfilt,event)
f <- writesac_filt(pathto,triplet,event,bestfilt)
run_mfast(pathto,event,f)
}
| /man/run_mfast.Rd | no_license | shearwavesplitter/MFASTR | R | false | true | 824 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fn_mfast.R
\name{run_mfast}
\alias{run_mfast}
\title{Run splitting measurement}
\usage{
run_mfast(path, name, filtlist)
}
\arguments{
\item{path}{Path to folder}
\item{name}{Name of event}
\item{filtlist}{A dataframe of the best filters to be used (output of writesac_filt)}
}
\description{
Runs shearwave splitting measurements on a set of filtered SAC files
}
\examples{
# Run shear wave splitting measurements on event 2002.054.09.47.lhor2
pathto <- "~/mfast/sample_data/raw_data"
write_sample(pathto)
event <- "2002.054.09.47.lhor2"
triplet <- readtriplet(event,path=pathto)
bestfilt <- filter_spread(triplet)
maxfreq <- createini(pathto,triplet,bestfilt,event)
f <- writesac_filt(pathto,triplet,event,bestfilt)
run_mfast(pathto,event,f)
}
|
#' @export
makeRLearner.classif.geoDA = function() {
makeRLearnerClassif(
cl = "classif.geoDA",
package = "DiscriMiner",
par.set = makeParamSet(
makeDiscreteLearnerParam(id = "validation", values = list(crossval = "crossval", learntest = "learntest", NULL = NULL), default = NULL, tunable = FALSE)
),
par.vals = list(validation = NULL),
properties = c("twoclass", "multiclass", "numerics"),
name = "Geometric Predictive Discriminant Analysis",
short.name = "geoda"
)
}
#' @export
trainLearner.classif.geoDA = function(.learner, .task, .subset, .weights = NULL, ...) {
d = getTaskData(.task, .subset, target.extra = TRUE, recode.target = "drop.levels")
DiscriMiner::geoDA(variables = d$data, group = d$target, ...)
}
#' @export
predictLearner.classif.geoDA = function(.learner, .model, .newdata, ...) {
m = .model$learner.model
p = DiscriMiner::classify(m, newdata = .newdata)
#p$scores #we loose this information
p$pred_class
}
| /R/RLearner_classif_geoDA.R | no_license | jimhester/mlr | R | false | false | 985 | r | #' @export
makeRLearner.classif.geoDA = function() {
makeRLearnerClassif(
cl = "classif.geoDA",
package = "DiscriMiner",
par.set = makeParamSet(
makeDiscreteLearnerParam(id = "validation", values = list(crossval = "crossval", learntest = "learntest", NULL = NULL), default = NULL, tunable = FALSE)
),
par.vals = list(validation = NULL),
properties = c("twoclass", "multiclass", "numerics"),
name = "Geometric Predictive Discriminant Analysis",
short.name = "geoda"
)
}
#' @export
trainLearner.classif.geoDA = function(.learner, .task, .subset, .weights = NULL, ...) {
d = getTaskData(.task, .subset, target.extra = TRUE, recode.target = "drop.levels")
DiscriMiner::geoDA(variables = d$data, group = d$target, ...)
}
#' @export
predictLearner.classif.geoDA = function(.learner, .model, .newdata, ...) {
m = .model$learner.model
p = DiscriMiner::classify(m, newdata = .newdata)
#p$scores #we loose this information
p$pred_class
}
|
#' The "population" variance
#'
#' Calculates the variance with n as the denominator.
#'
#' @param x A vector for which a variance can be calculated
#' @param y NULL (default) or a vector, matrix or data frame
#' with compatible dimensions to x. The default is equivalent to y = x (but more efficient).
#' @param na.rm logical. Should missing values be removed?
#' @param use an optional character string giving a method for computing covariances in the presence of
#' missing values. This must be (an abbreviation of) one of the strings "everything", "all.obs",
#' "complete.obs", "na.or.complete", or "pairwise.complete.obs".
#' @keywords variance, population
#' @export
varp<-function(x, y = NULL, na.rm = FALSE, use){
n <- sum(!is.na(x))
varp <- ifelse(n != 0, var(x)*(n-1)/n, return("All values are missing"))
varp
}
| /R/varp.R | no_license | lehmansociology/lehmansociology | R | false | false | 861 | r | #' The "population" variance
#'
#' Calculates the variance with n as the denominator.
#'
#' @param x A vector for which a variance can be calculated
#' @param y NULL (default) or a vector, matrix or data frame
#' with compatible dimensions to x. The default is equivalent to y = x (but more efficient).
#' @param na.rm logical. Should missing values be removed?
#' @param use an optional character string giving a method for computing covariances in the presence of
#' missing values. This must be (an abbreviation of) one of the strings "everything", "all.obs",
#' "complete.obs", "na.or.complete", or "pairwise.complete.obs".
#' @keywords variance, population
#' @export
varp<-function(x, y = NULL, na.rm = FALSE, use){
n <- sum(!is.na(x))
varp <- ifelse(n != 0, var(x)*(n-1)/n, return("All values are missing"))
varp
}
|
\name{legendg}
\alias{legendg}
\title{Legend with grouped bars, lines or symbols}
\description{Displays a legend with more than one rectangle, symbol or line.}
\usage{
legendg(x,y=NULL,legend,fill=NULL,col=par("col"),
border=list("black"),lty,lwd,pch=NULL,angle=45,density=NULL,
bty="o",bg=par("bg"),box.lwd=par("lwd"),box.lty=par("lty"),
box.col=par("fg"),pt.bg=NA,cex=1,pt.cex=cex,pt.lwd=lwd,
pt.space=1,xjust=0,yjust=1,x.intersp=1,y.intersp=1,
adj=c(0,0.5),text.width=NULL,text.col=par("col"),merge=FALSE,
trace=FALSE,plot=TRUE,ncol=1,horiz=FALSE,title=NULL,
inset=0,xpd,title.col=text.col)
}
\arguments{
\item{x,y}{Position of the legend as in \samp{legend}.}
\item{legend}{Labels for the legend as in \samp{legend}.}
\item{fill}{List of fill colors for the rectangles.}
\item{col}{Color(s), perhaps as a list, for the symbols.}
\item{border}{Border color(s) for the rectangles.}
\item{lty}{Line type, currently ignored and set to 1.}
\item{lwd}{Line width, currently ignored.}
\item{pch}{List of symbols for the legend.}
\item{angle,density}{Currently ignored.}
\item{bty}{Legend box type to be displayed.}
\item{bg}{Background color for the legend.}
\item{box.lwd,box.lty,box.col}{Line width, type and color
for the surrounding box.}
\item{cex}{Character expansion for text.}
\item{pt.bg,pt.cex,pt.lwd}{Background color, character
expansion and line width for the symbols.}
\item{pt.space}{Spacing for the symbols as a multiplier
for \samp{strwidth("O")}.}
\item{xjust,yjust}{Justification for the legend.}
\item{x.intersp,y.intersp}{x and y character spacing for
the legend text.}
\item{adj}{Text adjustment.}
\item{text.width,text.col}{Width and color of the legend text.}
\item{merge}{Whether to merge points and lines.}
\item{trace}{Show how the legend is calculated.}
\item{plot}{Whether to plot the legend.}
\item{ncol}{Number of columns in the legend.}
\item{horiz}{Whether to display a horizontal legend.}
\item{title}{Title for the legend.}
\item{inset}{Inset distances for use with keywords.}
\item{xpd}{An optional value for \samp{par(xpd=)}.}
\item{title.col}{Color for the legend title.}
}
\value{
The value returned by \samp{legend} returned invisibly.
}
\details{
\samp{legendg} calls \samp{legend} to display a legend with a
blank space to the left of the labels. It then attempts to display
groups of colored rectangles or symbols in that space depending
upon the contents of either \samp{fill} or \samp{pch}. These
should be in the form of a list with the number of elements equal
to the number of labels, and one or more fills or symbols for each
label. \samp{legendg} will display up to four fills or symbols
next to each label, allowing the user to label a group of these
rather than just one per label.
}
\author{Jim Lemon}
\seealso{\link{legend}}
\examples{
plot(0.5,0.5,xlim=c(0,1),ylim=c(0,1),type="n",
main="Test of grouped legend function")
legendg(0.5,0.8,c("one","two","three"),pch=list(1,2:3,4:6),
col=list(1,2:3,4:6),pt.space=1.5)
legendg(0.5,0.5,c("one","two","three"),fill=list(1,2:3,4:6))
# fake a line/point with text points
legendg(0.2,0.25,c("letter","number"),
pch=list(c("-","A","-"),c("-","1","-")),
col=list(rep(2,3),rep(3,3)))
}
\keyword{misc}
| /man/legendg.Rd | no_license | plotrix/plotrix | R | false | false | 3,279 | rd | \name{legendg}
\alias{legendg}
\title{Legend with grouped bars, lines or symbols}
\description{Displays a legend with more than one rectangle, symbol or line.}
\usage{
legendg(x,y=NULL,legend,fill=NULL,col=par("col"),
border=list("black"),lty,lwd,pch=NULL,angle=45,density=NULL,
bty="o",bg=par("bg"),box.lwd=par("lwd"),box.lty=par("lty"),
box.col=par("fg"),pt.bg=NA,cex=1,pt.cex=cex,pt.lwd=lwd,
pt.space=1,xjust=0,yjust=1,x.intersp=1,y.intersp=1,
adj=c(0,0.5),text.width=NULL,text.col=par("col"),merge=FALSE,
trace=FALSE,plot=TRUE,ncol=1,horiz=FALSE,title=NULL,
inset=0,xpd,title.col=text.col)
}
\arguments{
\item{x,y}{Position of the legend as in \samp{legend}.}
\item{legend}{Labels for the legend as in \samp{legend}.}
\item{fill}{List of fill colors for the rectangles.}
\item{col}{Color(s), perhaps as a list, for the symbols.}
\item{border}{Border color(s) for the rectangles.}
\item{lty}{Line type, currently ignored and set to 1.}
\item{lwd}{Line width, currently ignored.}
\item{pch}{List of symbols for the legend.}
\item{angle,density}{Currently ignored.}
\item{bty}{Legend box type to be displayed.}
\item{bg}{Background color for the legend.}
\item{box.lwd,box.lty,box.col}{Line width, type and color
for the surrounding box.}
\item{cex}{Character expansion for text.}
\item{pt.bg,pt.cex,pt.lwd}{Background color, character
expansion and line width for the symbols.}
\item{pt.space}{Spacing for the symbols as a multiplier
for \samp{strwidth("O")}.}
\item{xjust,yjust}{Justification for the legend.}
\item{x.intersp,y.intersp}{x and y character spacing for
the legend text.}
\item{adj}{Text adjustment.}
\item{text.width,text.col}{Width and color of the legend text.}
\item{merge}{Whether to merge points and lines.}
\item{trace}{Show how the legend is calculated.}
\item{plot}{Whether to plot the legend.}
\item{ncol}{Number of columns in the legend.}
\item{horiz}{Whether to display a horizontal legend.}
\item{title}{Title for the legend.}
\item{inset}{Inset distances for use with keywords.}
\item{xpd}{An optional value for \samp{par(xpd=)}.}
\item{title.col}{Color for the legend title.}
}
\value{
The value returned by \samp{legend} returned invisibly.
}
\details{
\samp{legendg} calls \samp{legend} to display a legend with a
blank space to the left of the labels. It then attempts to display
groups of colored rectangles or symbols in that space depending
upon the contents of either \samp{fill} or \samp{pch}. These
should be in the form of a list with the number of elements equal
to the number of labels, and one or more fills or symbols for each
label. \samp{legendg} will display up to four fills or symbols
next to each label, allowing the user to label a group of these
rather than just one per label.
}
\author{Jim Lemon}
\seealso{\link{legend}}
\examples{
plot(0.5,0.5,xlim=c(0,1),ylim=c(0,1),type="n",
main="Test of grouped legend function")
legendg(0.5,0.8,c("one","two","three"),pch=list(1,2:3,4:6),
col=list(1,2:3,4:6),pt.space=1.5)
legendg(0.5,0.5,c("one","two","three"),fill=list(1,2:3,4:6))
# fake a line/point with text points
legendg(0.2,0.25,c("letter","number"),
pch=list(c("-","A","-"),c("-","1","-")),
col=list(rep(2,3),rep(3,3)))
}
\keyword{misc}
|
setwd("E:\\project\\urine\\new\\hclust-barplot\\hcluster.count.barplot")
library(reshape2)
library(RColorBrewer)
library(ggplot2)
#p<-c()
ff = 6
#inp=c("g__Prevotella","g__Escherichia-Shigella","g__Gardnerella","g__Streptococcus","g__Veillonella","g__Lactobacillus")[ff]
D <- read.table(file = "barL6.txt",sep = "\t",row.names = 1,header = T)
map <- read.table(file = "hclust-mapping1.txt",sep = "\t",row.names = 1,header = T)
inp <- unique(map[,c(4,5)])
inp = inp[-nrow(inp),]
inp$main.genus <- as.character(inp$main.genus)
map<-map[which(map$data.cluster!=7),]
D <- as.data.frame(t(D[1:6,row.names(map)]))
m <- map$data.cluster
m[map$data.cluster!=inp$data.cluster[ff]] <-'others'
m <- factor(m,c(as.character(ff),'others'))
fig1<-ggplot()+
geom_histogram(aes(log2(D[,inp$main.genus[ff]]),fill=m),alpha=0.5)
f=ggplot_build(fig1)
dat1 <- f$data[[1]]
fig2<-ggplot()+
geom_density(aes(log2(D[,inp$main.genus[ff]])),position='stack')
f=ggplot_build(fig2)
dat2 <- f$data[[1]]
l=unique(dat$fill)
dat1$group[dat1$fill=='#00BFC4']='others'
dat1$group[dat1$fill=='#F8766D']=as.character(ff)
cutoff<-max(dat1$count)/max(dat2$density)*0.8
p[[inp$main.genus[ff]]]<-ggplot(dat1)+
geom_bar(aes(x=x,y=count,fill=group),position = 'stack',stat = 'identity',alpha=0.8)+
geom_line(data=dat2,aes(x=x,y=density*cutoff),color='steelblue')+
labs(title='',x=paste('log2(',inp$main.genus[ff],')'),y='Count')+
guides(fill=guide_legend(title = 'Group'))+
scale_y_continuous(
sec.axis = sec_axis( ~./cutoff, #对次坐标轴刻度标签的二次映射(极差范围指定真实极差即可)
name = "Dentity") #次坐标名
)+
theme_classic()+
theme(
plot.title = element_text(hjust = 0.5,size = 10),
panel.border = element_rect(
color = 'black',
fill=NA
),
axis.title = element_text(size=8),
axis.text = element_text(size = 6),
axis.line = element_blank()
)
pdf(paste('E:/Users/Desktop/TIANC/',inp$main.genus[ff],'.pdf',sep=''),width = 4.5,height = 3)
p[[inp$main.genus[ff]]]
dev.off()
#cowplot::plot_grid(plotlist = p)
| /Figure_S6.R | no_license | RChGO/UrinaryMicrobiota | R | false | false | 2,170 | r | setwd("E:\\project\\urine\\new\\hclust-barplot\\hcluster.count.barplot")
library(reshape2)
library(RColorBrewer)
library(ggplot2)
#p<-c()
ff = 6
#inp=c("g__Prevotella","g__Escherichia-Shigella","g__Gardnerella","g__Streptococcus","g__Veillonella","g__Lactobacillus")[ff]
D <- read.table(file = "barL6.txt",sep = "\t",row.names = 1,header = T)
map <- read.table(file = "hclust-mapping1.txt",sep = "\t",row.names = 1,header = T)
inp <- unique(map[,c(4,5)])
inp = inp[-nrow(inp),]
inp$main.genus <- as.character(inp$main.genus)
map<-map[which(map$data.cluster!=7),]
D <- as.data.frame(t(D[1:6,row.names(map)]))
m <- map$data.cluster
m[map$data.cluster!=inp$data.cluster[ff]] <-'others'
m <- factor(m,c(as.character(ff),'others'))
fig1<-ggplot()+
geom_histogram(aes(log2(D[,inp$main.genus[ff]]),fill=m),alpha=0.5)
f=ggplot_build(fig1)
dat1 <- f$data[[1]]
fig2<-ggplot()+
geom_density(aes(log2(D[,inp$main.genus[ff]])),position='stack')
f=ggplot_build(fig2)
dat2 <- f$data[[1]]
l=unique(dat$fill)
dat1$group[dat1$fill=='#00BFC4']='others'
dat1$group[dat1$fill=='#F8766D']=as.character(ff)
cutoff<-max(dat1$count)/max(dat2$density)*0.8
p[[inp$main.genus[ff]]]<-ggplot(dat1)+
geom_bar(aes(x=x,y=count,fill=group),position = 'stack',stat = 'identity',alpha=0.8)+
geom_line(data=dat2,aes(x=x,y=density*cutoff),color='steelblue')+
labs(title='',x=paste('log2(',inp$main.genus[ff],')'),y='Count')+
guides(fill=guide_legend(title = 'Group'))+
scale_y_continuous(
sec.axis = sec_axis( ~./cutoff, #对次坐标轴刻度标签的二次映射(极差范围指定真实极差即可)
name = "Dentity") #次坐标名
)+
theme_classic()+
theme(
plot.title = element_text(hjust = 0.5,size = 10),
panel.border = element_rect(
color = 'black',
fill=NA
),
axis.title = element_text(size=8),
axis.text = element_text(size = 6),
axis.line = element_blank()
)
pdf(paste('E:/Users/Desktop/TIANC/',inp$main.genus[ff],'.pdf',sep=''),width = 4.5,height = 3)
p[[inp$main.genus[ff]]]
dev.off()
#cowplot::plot_grid(plotlist = p)
|
library(datarobot)
### Name: DeletePredictJob
### Title: Function to delete one predict job from the DataRobot queue
### Aliases: DeletePredictJob
### ** Examples
## Not run:
##D projectId <- "59a5af20c80891534e3c2bde"
##D initialJobs <- GetPredictJobs(project)
##D job <- initialJobs[[1]]
##D predictJobId <- job$predictJobId
##D DeletePredictJob(projectId, predictJobId)
## End(Not run)
| /data/genthat_extracted_code/datarobot/examples/DeletePredictJob.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 407 | r | library(datarobot)
### Name: DeletePredictJob
### Title: Function to delete one predict job from the DataRobot queue
### Aliases: DeletePredictJob
### ** Examples
## Not run:
##D projectId <- "59a5af20c80891534e3c2bde"
##D initialJobs <- GetPredictJobs(project)
##D job <- initialJobs[[1]]
##D predictJobId <- job$predictJobId
##D DeletePredictJob(projectId, predictJobId)
## End(Not run)
|
ui <- fluidPage( theme = shinytheme("united"),
titlePanel(tags$h2(tags$b("Games Sales 2000 - 2016"))),
tabsetPanel(
tabPanel(tags$h4("Overview of the market"),
tags$br(),
fluidRow(
column(2,
checkboxGroupInput("year",
label = tags$h4("Year"),
choices = unique(games_sales_added_company$year_of_release),
selected = c(2000:2016),inline = TRUE),
tags$br(),
actionButton("year_button", label = "Display Plots")
),
column(10,
plotOutput("plot_1")
)
),
tags$br(),
fluidRow(
column(6,
plotOutput("plot_3")
),
column(6,
plotOutput("plot_2")
)
),
tags$br(),
),
tabPanel(tags$h4("Most profitable games"),
tags$br(),
fluidRow(
column(2,
checkboxGroupInput("year_tab_2",
label = tags$h4("Year"),
choices = unique(games_sales_added_company$year_of_release),
selected = c(2000:2016),inline = TRUE
),
tags$br(),
checkboxGroupInput("company",
label = tags$h4("Company box"),
choices = unique(games_sales_added_company$company),
selected = unique(games_sales_added_company$company)),
actionButton("tab_2_button", label = "Display Plots")
),
column(10,
plotOutput("plot_1_tab_2")
)
),
),
tabPanel(tags$h4("Ratings critics vs users"),
tags$br(),
fluidRow(
column(2,
sliderInput("slider_ratings",
label = tags$h4("Slider Range"),
min = 2000,
max = 2016,
value = c(2010, 2016)),
tags$br(),
checkboxGroupInput("company_ratings",
label = tags$h4("Company
"),
choices = unique(games_sales_added_company$company),
selected = unique(games_sales_added_company$company)),
actionButton("ratings_button",
label = "Display Plots")
),
column(10,
plotOutput("plot_1_ratings")
)
)
),
tabPanel(tags$h4("Used dataset"),
tags$br(),
fluidPage(
DT::dataTableOutput("data")
)
)
)
)
| /ui.R | no_license | kuba1016/games_shiny_app | R | false | false | 3,855 | r |
ui <- fluidPage( theme = shinytheme("united"),
titlePanel(tags$h2(tags$b("Games Sales 2000 - 2016"))),
tabsetPanel(
tabPanel(tags$h4("Overview of the market"),
tags$br(),
fluidRow(
column(2,
checkboxGroupInput("year",
label = tags$h4("Year"),
choices = unique(games_sales_added_company$year_of_release),
selected = c(2000:2016),inline = TRUE),
tags$br(),
actionButton("year_button", label = "Display Plots")
),
column(10,
plotOutput("plot_1")
)
),
tags$br(),
fluidRow(
column(6,
plotOutput("plot_3")
),
column(6,
plotOutput("plot_2")
)
),
tags$br(),
),
tabPanel(tags$h4("Most profitable games"),
tags$br(),
fluidRow(
column(2,
checkboxGroupInput("year_tab_2",
label = tags$h4("Year"),
choices = unique(games_sales_added_company$year_of_release),
selected = c(2000:2016),inline = TRUE
),
tags$br(),
checkboxGroupInput("company",
label = tags$h4("Company box"),
choices = unique(games_sales_added_company$company),
selected = unique(games_sales_added_company$company)),
actionButton("tab_2_button", label = "Display Plots")
),
column(10,
plotOutput("plot_1_tab_2")
)
),
),
tabPanel(tags$h4("Ratings critics vs users"),
tags$br(),
fluidRow(
column(2,
sliderInput("slider_ratings",
label = tags$h4("Slider Range"),
min = 2000,
max = 2016,
value = c(2010, 2016)),
tags$br(),
checkboxGroupInput("company_ratings",
label = tags$h4("Company
"),
choices = unique(games_sales_added_company$company),
selected = unique(games_sales_added_company$company)),
actionButton("ratings_button",
label = "Display Plots")
),
column(10,
plotOutput("plot_1_ratings")
)
)
),
tabPanel(tags$h4("Used dataset"),
tags$br(),
fluidPage(
DT::dataTableOutput("data")
)
)
)
)
|
#' @title DetectionRate
#' @Description Probability of detection by identity class
#' Stolen from Seurat 2.3.4 (and rewritten), since it was removed from Seurat 3
#'
#' For each gene, calculates the probability of detection for each identity
#' class.
#'
#' @param object Seurat object
#' @param thresh.min Minimum threshold to define 'detected' (log-scale)
#' @param features Which features to calculate detection rate for. Default: NULL (= all)
#' @param slot_use Slot to pull data from. Default: "data"
#' @param ... ignored
#'
#' @importFrom stringr str_remove
#' @importFrom purrr map_dfr map
#' @importFrom glue glue
#'
#' @return Returns a matrix with genes as rows, identity classes as columns.
#'
#' @export
#'
#' @examples
DetectionRate <- function(object, ...){
UseMethod("DetectionRate")
}
#' @rdname DetectionRate
#' @method DetectionRate Seurat
#' @importFrom Seurat FetchData WhichCells Idents
#' @export
#' @return
DetectionRate.Seurat <- function(object,
features = NULL,
slot_use = "data",
thresh.min = 0,
...) {
assay <- DefaultAssay(object)
ident_use <- Idents(object)
data_all <- map_dfr(sort(x = unique(x = ident_use)),
function(i) {
temp_cells <- WhichCells(object = object,
ident = i)
vars_use <- glue("{tolower(assay)}_{features}") %>%
as.character()
data.temp <- map(FetchData(object,
vars = vars_use,
cells = temp_cells,
slot = slot_use),
function(x){
sum(x > thresh.min)/length(x = x)
})
}) %>%
t()
colnames(x = data_all) <- sort(x = unique(x = ident_use))
rownames(x = data_all) %<>% str_remove(glue("{tolower(assay)}_"))
return(data_all)
} | /R/detectionrate.R | permissive | KevinTThomas/SeuratBubblePlot | R | false | false | 2,203 | r | #' @title DetectionRate
#' @Description Probability of detection by identity class
#' Stolen from Seurat 2.3.4 (and rewritten), since it was removed from Seurat 3
#'
#' For each gene, calculates the probability of detection for each identity
#' class.
#'
#' @param object Seurat object
#' @param thresh.min Minimum threshold to define 'detected' (log-scale)
#' @param features Which features to calculate detection rate for. Default: NULL (= all)
#' @param slot_use Slot to pull data from. Default: "data"
#' @param ... ignored
#'
#' @importFrom stringr str_remove
#' @importFrom purrr map_dfr map
#' @importFrom glue glue
#'
#' @return Returns a matrix with genes as rows, identity classes as columns.
#'
#' @export
#'
#' @examples
DetectionRate <- function(object, ...){
UseMethod("DetectionRate")
}
#' @rdname DetectionRate
#' @method DetectionRate Seurat
#' @importFrom Seurat FetchData WhichCells Idents
#' @export
#' @return
DetectionRate.Seurat <- function(object,
features = NULL,
slot_use = "data",
thresh.min = 0,
...) {
assay <- DefaultAssay(object)
ident_use <- Idents(object)
data_all <- map_dfr(sort(x = unique(x = ident_use)),
function(i) {
temp_cells <- WhichCells(object = object,
ident = i)
vars_use <- glue("{tolower(assay)}_{features}") %>%
as.character()
data.temp <- map(FetchData(object,
vars = vars_use,
cells = temp_cells,
slot = slot_use),
function(x){
sum(x > thresh.min)/length(x = x)
})
}) %>%
t()
colnames(x = data_all) <- sort(x = unique(x = ident_use))
rownames(x = data_all) %<>% str_remove(glue("{tolower(assay)}_"))
return(data_all)
} |
#' Constructor function for Milne pitch-class spectrum
#'
#' This function constructs a "milne_pc_spectrum" object.
#' @param x A numeric vector of pitch-class weights,
#' typically (but not necessarily) of length 1200.
#' @return An object of class "milne_pc_spectrum".
#' @seealso \code{\link{milne_pc_spectrum}}.
#' @export
.milne_pc_spectrum <- function(x) {
checkmate::qassert(x, "N")
x <- unclass(x)
y <- smooth_spectrum(x = x,
x_unit = "pc",
y_unit = "weight",
lower = 0,
upper = 12,
low_eq = TRUE,
high_eq = FALSE,
label = "pitch-class spectrum",
x_lab = "Pitch class",
y_lab = "Weight")
class(y) <- c("milne_pc_spectrum", "chord", class(y))
y
}
#' Milne pitch-class spectrum
#'
#' This function represents an input object as a
#' 'Milne pitch-class spectrum'.
#' A Milne pitch-class spectrum defines 'perceptual weight'
#' as a continuous function of 'pitch class'.
#'
#' @details
#' This spectrum is typically constructed from musical chords
#' by expanding each note into its implied harmonics
#' and applying a Gaussian smoothing to account for perceptual uncertainties.
#' See \insertCite{Milne2016;textual}{hrep} for details.
#'
#' @param x Input sonority.
#'
#' @param ...
#' Provided for S3 method consistency.
#'
#' @return An object of class \code{milne_pc_spectrum}.
#'
#' @rdname milne_pc_spectrum
#'
#' @seealso \code{\link{.milne_pc_spectrum}}.
#'
#' @references
#' \insertAllCited{}
#'
#' @export
milne_pc_spectrum <- function(x, ...) {
UseMethod("milne_pc_spectrum")
}
#' @param weights (Numeric vector)
#' Vector of weights to assign to each pitch class.
#' If a scalar value is provided, this value is assigned to all pitch classes.
#'
#' @param num_harmonics (Integerish scalar)
#' Number of harmonics to use when expanding tones into their implied harmonics,
#' and when defining the harmonic template
#' (including the fundamental frequency).
#' Defaults to 12, after
#' \insertCite{Milne2016;textual}{hrep}.
#'
#' @param rho (Numeric scalar)
#' Roll-off parameter for harmonic expansion.
#' Defaults to 0.75, after
#' \insertCite{Milne2016;textual}{hrep}.
#'
#' @param sigma (Numeric scalar)
#' Standard deviation of the Gaussian smoothing distribution (cents).
#' Defaults to 6.83, after
#' \insertCite{Milne2016;textual}{hrep}.
#'
#' @param array_dim (Integerish scalar)
#' Dimensionality of the pitch-class spectrum array.
#' Defaults to 1200, after
#' \insertCite{Milne2016;textual}{hrep}.
#'
#' @rdname milne_pc_spectrum
#'
#' @references
#' \insertAllCited{}
#'
#' @export
milne_pc_spectrum.pc_set <- function(x,
weights = 1,
num_harmonics = 12,
rho = 0.75,
sigma = 6.83,
array_dim = 1200,
...) {
if (length(weights) == 1L) weights <- rep(weights, times = length(x))
pc_spectra <- mapply(
function(pc, weight) {
get_complex_tone(fundamental_pc = pc,
weight = weight,
array_dim = array_dim,
num_harmonics = num_harmonics,
rho = rho,
sigma = sigma)
}, x, weights)
.milne_pc_spectrum(rowSums(pc_spectra))
}
#' @rdname milne_pc_spectrum
#' @export
milne_pc_spectrum.default <- function(x, ...) {
milne_pc_spectrum(pc_set(x), ...)
}
#' @rdname milne_pc_spectrum
#' @export
milne_pc_spectrum.milne_pc_spectrum <- function(x, ...) {
x
}
#' Check for class "milne_pc_spectrum"
#'
#' Checks whether an object is of class "milne_pc_spectrum".
#' @param x Object to check.
#' @return Logical scalar.
#' @export
is.milne_pc_spectrum <- function(x) is(x, "milne_pc_spectrum")
# Pitch-class spectrum, template 1
# Makes a Gaussian pitch-class spectral template with unit mass, centred on 0,
# with standard deviation <sigma>.
# The template will be truncated to zero for points <truncation-point>
# standard deviations or further away from the mean,
# after \insertCite{Milne2016;textual}{hrep}.
pc_spectrum_template_1 <- function(array_dim, sigma, truncation_point) {
checkmate::qassert(array_dim, "X1[3,)")
checkmate::qassert(sigma, "N1")
checkmate::qassert(truncation_point, "N1(0,]")
limit <- floor(sigma * 12)
template <- numeric(array_dim)
template[1] <- dnorm(0, mean = 0, sd = sigma)
seq <- seq(from = 1, to = limit)
weight <- dnorm(seq, mean = 0, sd = sigma)
if (limit + 1 > array_dim)
stop("array_dim is too small to create this spectrum")
template[2:(limit + 1)] <- weight
template[array_dim:(array_dim - limit + 1)] <- weight
template
}
pc_spectrum_template_2 <- function(array_dim, mean, mass, sigma, truncation_point = 12) {
stopifnot(mean >= 0, mean <= array_dim)
origin <- round(mean)
template <- pc_spectrum_template_1(
array_dim, sigma, truncation_point = truncation_point
)
scaled <- template * mass
output <- numeric(array_dim)
seq <- seq(from = 0, to = array_dim - 1)
output <- scaled[((seq - origin) %% array_dim) + 1]
output
}
#' Make complex tone
#'
#' Returns an array describing the pitch-class spectrum for a given complex tone.
#' @param num_harmonics Number of harmonics, including the fundamental
#' @keywords internal
new_complex_tone <- function(
fundamental_pc,
weight,
array_dim,
num_harmonics,
rho,
sigma
) {
checkmate::qassert(fundamental_pc, "N1[0,12)")
checkmate::qassert(weight, "N1[0,)")
checkmate::qassert(num_harmonics, "X1[0,)")
checkmate::qassert(rho, "N1")
checkmate::qassert(sigma, "N1[0,)")
pcs <- vapply(seq_len(num_harmonics),
function(i) {
(
(fundamental_pc * array_dim / 12) +
(array_dim * log(i, 2))
) %% array_dim
}, numeric(1))
weights <- vapply(seq_len(num_harmonics),
function(i) {
weight / (i ^ rho)
}, numeric(1))
spectra <- mapply(
function(pc, weight) {
pc_spectrum_template_2(array_dim, pc, weight, sigma)
}, pcs, weights, SIMPLIFY = TRUE
)
spectrum <- rowSums(spectra)
spectrum
}
# Get complex tone
#
# Wrapper for \code{new_complex_tone} that implements caching.
get_complex_tone <- memoise::memoise(new_complex_tone)
| /R/milne-pc-spectrum.R | permissive | pmcharrison/hrep | R | false | false | 6,565 | r | #' Constructor function for Milne pitch-class spectrum
#'
#' This function constructs a "milne_pc_spectrum" object.
#' @param x A numeric vector of pitch-class weights,
#' typically (but not necessarily) of length 1200.
#' @return An object of class "milne_pc_spectrum".
#' @seealso \code{\link{milne_pc_spectrum}}.
#' @export
.milne_pc_spectrum <- function(x) {
checkmate::qassert(x, "N")
x <- unclass(x)
y <- smooth_spectrum(x = x,
x_unit = "pc",
y_unit = "weight",
lower = 0,
upper = 12,
low_eq = TRUE,
high_eq = FALSE,
label = "pitch-class spectrum",
x_lab = "Pitch class",
y_lab = "Weight")
class(y) <- c("milne_pc_spectrum", "chord", class(y))
y
}
#' Milne pitch-class spectrum
#'
#' This function represents an input object as a
#' 'Milne pitch-class spectrum'.
#' A Milne pitch-class spectrum defines 'perceptual weight'
#' as a continuous function of 'pitch class'.
#'
#' @details
#' This spectrum is typically constructed from musical chords
#' by expanding each note into its implied harmonics
#' and applying a Gaussian smoothing to account for perceptual uncertainties.
#' See \insertCite{Milne2016;textual}{hrep} for details.
#'
#' @param x Input sonority.
#'
#' @param ...
#' Provided for S3 method consistency.
#'
#' @return An object of class \code{milne_pc_spectrum}.
#'
#' @rdname milne_pc_spectrum
#'
#' @seealso \code{\link{.milne_pc_spectrum}}.
#'
#' @references
#' \insertAllCited{}
#'
#' @export
milne_pc_spectrum <- function(x, ...) {
UseMethod("milne_pc_spectrum")
}
#' @param weights (Numeric vector)
#' Vector of weights to assign to each pitch class.
#' If a scalar value is provided, this value is assigned to all pitch classes.
#'
#' @param num_harmonics (Integerish scalar)
#' Number of harmonics to use when expanding tones into their implied harmonics,
#' and when defining the harmonic template
#' (including the fundamental frequency).
#' Defaults to 12, after
#' \insertCite{Milne2016;textual}{hrep}.
#'
#' @param rho (Numeric scalar)
#' Roll-off parameter for harmonic expansion.
#' Defaults to 0.75, after
#' \insertCite{Milne2016;textual}{hrep}.
#'
#' @param sigma (Numeric scalar)
#' Standard deviation of the Gaussian smoothing distribution (cents).
#' Defaults to 6.83, after
#' \insertCite{Milne2016;textual}{hrep}.
#'
#' @param array_dim (Integerish scalar)
#' Dimensionality of the pitch-class spectrum array.
#' Defaults to 1200, after
#' \insertCite{Milne2016;textual}{hrep}.
#'
#' @rdname milne_pc_spectrum
#'
#' @references
#' \insertAllCited{}
#'
#' @export
milne_pc_spectrum.pc_set <- function(x,
weights = 1,
num_harmonics = 12,
rho = 0.75,
sigma = 6.83,
array_dim = 1200,
...) {
if (length(weights) == 1L) weights <- rep(weights, times = length(x))
pc_spectra <- mapply(
function(pc, weight) {
get_complex_tone(fundamental_pc = pc,
weight = weight,
array_dim = array_dim,
num_harmonics = num_harmonics,
rho = rho,
sigma = sigma)
}, x, weights)
.milne_pc_spectrum(rowSums(pc_spectra))
}
#' @rdname milne_pc_spectrum
#' @export
milne_pc_spectrum.default <- function(x, ...) {
milne_pc_spectrum(pc_set(x), ...)
}
#' @rdname milne_pc_spectrum
#' @export
milne_pc_spectrum.milne_pc_spectrum <- function(x, ...) {
x
}
#' Check for class "milne_pc_spectrum"
#'
#' Checks whether an object is of class "milne_pc_spectrum".
#' @param x Object to check.
#' @return Logical scalar.
#' @export
is.milne_pc_spectrum <- function(x) is(x, "milne_pc_spectrum")
# Pitch-class spectrum, template 1
# Makes a Gaussian pitch-class spectral template with unit mass, centred on 0,
# with standard deviation <sigma>.
# The template will be truncated to zero for points <truncation-point>
# standard deviations or further away from the mean,
# after \insertCite{Milne2016;textual}{hrep}.
pc_spectrum_template_1 <- function(array_dim, sigma, truncation_point) {
checkmate::qassert(array_dim, "X1[3,)")
checkmate::qassert(sigma, "N1")
checkmate::qassert(truncation_point, "N1(0,]")
limit <- floor(sigma * 12)
template <- numeric(array_dim)
template[1] <- dnorm(0, mean = 0, sd = sigma)
seq <- seq(from = 1, to = limit)
weight <- dnorm(seq, mean = 0, sd = sigma)
if (limit + 1 > array_dim)
stop("array_dim is too small to create this spectrum")
template[2:(limit + 1)] <- weight
template[array_dim:(array_dim - limit + 1)] <- weight
template
}
pc_spectrum_template_2 <- function(array_dim, mean, mass, sigma, truncation_point = 12) {
stopifnot(mean >= 0, mean <= array_dim)
origin <- round(mean)
template <- pc_spectrum_template_1(
array_dim, sigma, truncation_point = truncation_point
)
scaled <- template * mass
output <- numeric(array_dim)
seq <- seq(from = 0, to = array_dim - 1)
output <- scaled[((seq - origin) %% array_dim) + 1]
output
}
#' Make complex tone
#'
#' Returns an array describing the pitch-class spectrum for a given complex tone.
#' @param num_harmonics Number of harmonics, including the fundamental
#' @keywords internal
new_complex_tone <- function(
fundamental_pc,
weight,
array_dim,
num_harmonics,
rho,
sigma
) {
checkmate::qassert(fundamental_pc, "N1[0,12)")
checkmate::qassert(weight, "N1[0,)")
checkmate::qassert(num_harmonics, "X1[0,)")
checkmate::qassert(rho, "N1")
checkmate::qassert(sigma, "N1[0,)")
pcs <- vapply(seq_len(num_harmonics),
function(i) {
(
(fundamental_pc * array_dim / 12) +
(array_dim * log(i, 2))
) %% array_dim
}, numeric(1))
weights <- vapply(seq_len(num_harmonics),
function(i) {
weight / (i ^ rho)
}, numeric(1))
spectra <- mapply(
function(pc, weight) {
pc_spectrum_template_2(array_dim, pc, weight, sigma)
}, pcs, weights, SIMPLIFY = TRUE
)
spectrum <- rowSums(spectra)
spectrum
}
# Get complex tone
#
# Wrapper for \code{new_complex_tone} that implements caching.
get_complex_tone <- memoise::memoise(new_complex_tone)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.survFitTKTD.R
\name{plot.survFitTKTD}
\alias{plot.survFitTKTD}
\title{Plotting method for \code{survFitTKTD} objects}
\usage{
\method{plot}{survFitTKTD}(
x,
xlab = "Time",
ylab = "Survival probablity",
main = NULL,
concentration = NULL,
spaghetti = FALSE,
one.plot = FALSE,
adddata = FALSE,
addlegend = FALSE,
style = "ggplot",
...
)
}
\arguments{
\item{x}{An object of class \code{survFitTKTD}.}
\item{xlab}{A label for the \eqn{X}-axis, by default \code{Time}.}
\item{ylab}{A label for the \eqn{Y}-axis, by default \code{Survival probablity}.}
\item{main}{A main title for the plot.}
\item{concentration}{A numeric value corresponding to some specific concentration in
\code{data}. If \code{concentration = NULL}, draws a plot for each concentration.}
\item{spaghetti}{if \code{TRUE}, draws a set of survival curves using
parameters drawn from the posterior distribution}
\item{one.plot}{if \code{TRUE}, draws all the estimated curves in
one plot instead of one plot per concentration.}
\item{adddata}{if \code{TRUE}, adds the observed data to the plot
with (frequentist binomial) confidence intervals}
\item{addlegend}{if \code{TRUE}, adds a default legend to the plot.}
\item{style}{graphical backend, can be \code{'generic'} or \code{'ggplot'}}
\item{\dots}{Further arguments to be passed to generic methods.}
}
\value{
a plot of class \code{ggplot}
}
\description{
This is the generic \code{plot} S3 method for the
\code{survFitTKTD}. It plots the fit obtained for each
concentration of chemical compound in the original dataset.
}
\details{
The fitted curves represent the \strong{estimated survival probablity} as a function
of time for each concentration
When \code{adddata = TRUE} the black dots depict the \strong{observed survival
probablity} at each time point. Note that since our model does not take
inter-replicate variability into consideration, replicates are systematically
pooled in this plot.
The function plots both 95\% credible intervals for the estimated survival
probablity (by default the grey area around the fitted curve) and 95\% binomial confidence
intervals for the observed survival probablity (as black error bars if
\code{adddata = TRUE}).
Both types of intervals are taken at the same level. Typically
a good fit is expected to display a large overlap between the two types of intervals.
If \code{spaghetti = TRUE}, the credible intervals are represented by two
dotted lines limiting the credible band, and a spaghetti plot is added to this band.
This spaghetti plot consists of the representation of simulated curves using parameter values
sampled in the posterior distribution (2\% of the MCMC chains are randomly
taken for this sample).
}
\examples{
# (1) Load the survival data
data(propiconazole)
# (2) Create an object of class "survData"
dataset <- survData(propiconazole)
\donttest{
# (3) Run the survFitTKTD function ('SD' model only)
out <- survFitTKTD(dataset)
# (4) Plot the fitted curves in one plot
plot(out)
# (5) Plot one fitted curve per concentration with credible limits as
# spaghetti, data and confidence intervals
# and with a ggplot style
plot(out, spaghetti = TRUE , adddata = TRUE, one.plot = FALSE,
style = "ggplot")
# (6) Plot fitted curve for one specific concentration
plot(out, concentration = 36, style = "ggplot")
}
}
\keyword{plot}
| /man/plot.survFitTKTD.Rd | no_license | cran/morse | R | false | true | 3,430 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.survFitTKTD.R
\name{plot.survFitTKTD}
\alias{plot.survFitTKTD}
\title{Plotting method for \code{survFitTKTD} objects}
\usage{
\method{plot}{survFitTKTD}(
x,
xlab = "Time",
ylab = "Survival probablity",
main = NULL,
concentration = NULL,
spaghetti = FALSE,
one.plot = FALSE,
adddata = FALSE,
addlegend = FALSE,
style = "ggplot",
...
)
}
\arguments{
\item{x}{An object of class \code{survFitTKTD}.}
\item{xlab}{A label for the \eqn{X}-axis, by default \code{Time}.}
\item{ylab}{A label for the \eqn{Y}-axis, by default \code{Survival probablity}.}
\item{main}{A main title for the plot.}
\item{concentration}{A numeric value corresponding to some specific concentration in
\code{data}. If \code{concentration = NULL}, draws a plot for each concentration.}
\item{spaghetti}{if \code{TRUE}, draws a set of survival curves using
parameters drawn from the posterior distribution}
\item{one.plot}{if \code{TRUE}, draws all the estimated curves in
one plot instead of one plot per concentration.}
\item{adddata}{if \code{TRUE}, adds the observed data to the plot
with (frequentist binomial) confidence intervals}
\item{addlegend}{if \code{TRUE}, adds a default legend to the plot.}
\item{style}{graphical backend, can be \code{'generic'} or \code{'ggplot'}}
\item{\dots}{Further arguments to be passed to generic methods.}
}
\value{
a plot of class \code{ggplot}
}
\description{
This is the generic \code{plot} S3 method for the
\code{survFitTKTD}. It plots the fit obtained for each
concentration of chemical compound in the original dataset.
}
\details{
The fitted curves represent the \strong{estimated survival probablity} as a function
of time for each concentration
When \code{adddata = TRUE} the black dots depict the \strong{observed survival
probablity} at each time point. Note that since our model does not take
inter-replicate variability into consideration, replicates are systematically
pooled in this plot.
The function plots both 95\% credible intervals for the estimated survival
probablity (by default the grey area around the fitted curve) and 95\% binomial confidence
intervals for the observed survival probablity (as black error bars if
\code{adddata = TRUE}).
Both types of intervals are taken at the same level. Typically
a good fit is expected to display a large overlap between the two types of intervals.
If \code{spaghetti = TRUE}, the credible intervals are represented by two
dotted lines limiting the credible band, and a spaghetti plot is added to this band.
This spaghetti plot consists of the representation of simulated curves using parameter values
sampled in the posterior distribution (2\% of the MCMC chains are randomly
taken for this sample).
}
\examples{
# (1) Load the survival data
data(propiconazole)
# (2) Create an object of class "survData"
dataset <- survData(propiconazole)
\donttest{
# (3) Run the survFitTKTD function ('SD' model only)
out <- survFitTKTD(dataset)
# (4) Plot the fitted curves in one plot
plot(out)
# (5) Plot one fitted curve per concentration with credible limits as
# spaghetti, data and confidence intervals
# and with a ggplot style
plot(out, spaghetti = TRUE , adddata = TRUE, one.plot = FALSE,
style = "ggplot")
# (6) Plot fitted curve for one specific concentration
plot(out, concentration = 36, style = "ggplot")
}
}
\keyword{plot}
|
library(tidyverse)
load('data/resilience_indicators_data.RData')
source('~/Downloads/all_functions.R')
resilience_indicators_data %>%
mutate(id = row_number()) %>%
select(id,
name,
feature_easting,
feature_northing,
administrative_boundary,
description) %>%
write_csv('data/outputs/resilience_indicators.csv')
| /scripts/Hot_spots_analysis/resilience_indicators_final.R | no_license | ElliotMeador84/Scotland_resilience_analysis | R | false | false | 368 | r | library(tidyverse)
load('data/resilience_indicators_data.RData')
source('~/Downloads/all_functions.R')
resilience_indicators_data %>%
mutate(id = row_number()) %>%
select(id,
name,
feature_easting,
feature_northing,
administrative_boundary,
description) %>%
write_csv('data/outputs/resilience_indicators.csv')
|
#remove all files expect...
rm(list = ls()[!ls() %in% c("strips",
"paddock_ID_1",
"paddock_ID_2",
"paddock_ID_3",
#"paddock_ID_4",
"Zone_labels",
"input_file",
"assigned_names2",
"all_results_1"
#"function_grand_mean_std_error"
)])
recom_rateDB <- read_excel( "W:/value_soil_testing_prj/Yield_data/2020/processing/GRDC 2020 Paddock Database_SA_VIC_May25 2021.xlsx")
##########################################################################################################################################
### Extra analysis for ricks tables GSP vs low high comparision
recom_rateDB <- recom_rateDB %>%
dplyr::select(Zone_ID = `Paddock code` ,
p_rec = `P rec`,
n_rec_yld_low = `N Rec (< 3 t/ha)` ,
n_rec_yld_med = `N Rec (3-5 t/ha)` ,
n_rec_yld_high = `N Rec (> 5 t/ha)`
)
recom_rateDB <- dplyr::mutate(recom_rateDB, maxN = apply(recom_rateDB[3:5], 1, max, na.rm = TRUE))
str(recom_rateDB)
# remove redunant clm and replace inf
recom_rateDB <- recom_rateDB %>%
mutate(
maxN = case_when(
maxN >= 0 ~ maxN,
TRUE ~ NA_real_
)
)
recom_rateDB <- recom_rateDB %>%
dplyr::select(Zone_ID ,
p_rec,
N_rec = maxN
)
str(strips)
rec_rates <- strips %>%
filter(!is.na(zone_name)) %>%
dplyr::select(Zone_ID, SegmentID, YldMassDry, Rate, rate_name_order, rate_name, zone_name, Zone, Strip_Type)
rec_rates
#put the tow files togther
str(rec_rates)
str(recom_rateDB)
recom_rateDB$Zone_ID <- as.double(recom_rateDB$Zone_ID)
recom_rate1 <- left_join( rec_rates, recom_rateDB)
recom_rate1 <- data.frame(recom_rate1)
str(recom_rate1)
## bring in the fert rates applied cal
fert_app_all_steps <- read.csv("W:/value_soil_testing_prj/Yield_data/2020/processing/processing_files/step2_fert_app_all_steps.csv")
fert_app_all_steps <- fert_app_all_steps %>%
dplyr::filter(Paddock_ID == substr(paddock_ID_1, start = 1, stop = 5)|
Paddock_ID == substr(paddock_ID_2, start = 1, stop = 5)
#Paddock_ID == substr(paddock_ID_3, start = 1, stop = 5)
#Paddock_ID == substr(paddock_ID_4, start = 1, stop = 5)
) %>%
dplyr::select( Paddock_ID, Rate, Strip_Rate, Total_sum_P_content, Total_sum_N_content)
str(fert_app_all_steps)
str(recom_rate1)
##################################################################################################################
#
recom_rate1 <- left_join(recom_rate1, fert_app_all_steps)
str(recom_rate1)
#View(recom_rate1)
# recom_rate1 %>% group_by( Rate, Zone_ID, zone_name) %>%
# summarise(count= n()) %>%
# arrange(Zone_ID, Rate )
###############################################################################################################
## what are the comparision I want to make
# names(recom_rate1)
# View(recom_rate1)
recom_rate1_summary <- recom_rate1 %>% group_by(Zone_ID,
Rate, zone_name) %>%
summarise(
p_rec = max(p_rec, na.rm = TRUE),
P_content = max(Total_sum_P_content, na.rm = TRUE),
n_rec = max(N_rec, na.rm = TRUE),
N_content = max(Total_sum_N_content, na.rm = TRUE)
)
recom_rate1_summary <- ungroup(recom_rate1_summary)
recom_rate1_summary[] <- Map(function(x) replace(x, is.infinite(x), NA), recom_rate1_summary)
recom_rate1_summary <- data.frame(recom_rate1_summary)
str(recom_rate1_summary)
recom_rate1_summary
## do the difference for P
recom_rate1_summary <- recom_rate1_summary %>%
dplyr::mutate(difference_p = abs(p_rec - P_content)) %>%
arrange(difference_p)
str(recom_rate1_summary)
recom_rate1_summary <- ungroup(recom_rate1_summary)
str(recom_rate1_summary)
recom_rate1_summary$Rate <- as.double(recom_rate1_summary$Rate)
unique(recom_rate1_summary$Rate)
## Two steps need to filter data first (zone 1 and zone 2 need to have this as clm in
recom_rate1_summary_zone1 <- recom_rate1_summary %>%
filter(zone_name == "zone1")
recom_rate1_summary_zone2 <- recom_rate1_summary %>%
filter(zone_name == "zone2")
# recom_rate1_summary_zone3 <- recom_rate1_summary %>%
# filter(zone_name == "zone3")
# recom_rate1_summary_zone4 <- recom_rate1_summary %>%
# filter(zone_name == "zone4")
recom_rate1_summary_zone1
recom_rate1_summary_zone1 <-recom_rate1_summary_zone1 %>%
dplyr::mutate(
approx_p_rec =
dplyr::case_when(
difference_p == min(recom_rate1_summary_zone1$difference_p) ~ "best_match",
difference_p == min(recom_rate1_summary_zone1$difference_p[recom_rate1_summary_zone1$difference_p !=
min(recom_rate1_summary_zone1$difference_p)]) ~ "rate1",
difference_p == max(recom_rate1_summary_zone1$difference_p[recom_rate1_summary_zone1$difference_p !=
max(recom_rate1_summary_zone1$difference_p)]) ~ "rate2",
#use this if you have 4 rates best match rate 1 -3
difference_p == max(recom_rate1_summary_zone1$difference_p) ~ "rate3",
#difference_p == max(recom_rate1_summary_zone1$difference_p) ~ "rate2",
TRUE ~ as.character(Rate)
)
)
recom_rate1_summary_zone2 <-recom_rate1_summary_zone2 %>%
dplyr::mutate(
approx_p_rec =
dplyr::case_when(
difference_p == min(recom_rate1_summary_zone2$difference_p) ~ "best_match",
difference_p == min(recom_rate1_summary_zone2$difference_p[recom_rate1_summary_zone2$difference_p !=
min(recom_rate1_summary_zone2$difference_p)]) ~ "rate1",
difference_p == max(recom_rate1_summary_zone2$difference_p[recom_rate1_summary_zone2$difference_p !=
max(recom_rate1_summary_zone2$difference_p)]) ~ "rate2",
#use this if you have 4 rates best match rate 1 -3
#difference_p == max(recom_rate1_summary_zone2$difference_p) ~ "rate2",
difference_p == max(recom_rate1_summary_zone2$difference_p) ~ "rate3",
TRUE ~ as.character(Rate)
)
)
# recom_rate1_summary_zone3 <-recom_rate1_summary_zone3 %>%
# dplyr::mutate(
# approx_p_rec =
# dplyr::case_when(
# difference_p == min(recom_rate1_summary_zone3$difference_p) ~ "best_match",
# difference_p == min(recom_rate1_summary_zone3$difference_p[recom_rate1_summary_zone3$difference_p !=
# min(recom_rate1_summary_zone3$difference_p)]) ~ "rate1",
# difference_p == max(recom_rate1_summary_zone3$difference_p[recom_rate1_summary_zone3$difference_p !=
# max(recom_rate1_summary_zone3$difference_p)]) ~ "rate2",
# #use this if you have 4 rates best match rate 1 -3
# #difference_p == max(recom_rate1_summary_zone3$difference_p) ~ "rate2",
# difference_p == max(recom_rate1_summary_zone3$difference_p) ~ "rate3",
# TRUE ~ as.character(Rate)
# )
# )
# recom_rate1_summary_zone4 <-recom_rate1_summary_zone4 %>%
# dplyr::mutate(
# approx_p_rec =
# dplyr::case_when(
# difference_p == min(recom_rate1_summary_zone4$difference_p) ~ "best_match",
# difference_p == min(recom_rate1_summary_zone4$difference_p[recom_rate1_summary_zone4$difference_p !=
# min(recom_rate1_summary_zone4$difference_p)]) ~ "rate1",
# difference_p == max(recom_rate1_summary_zone4$difference_p[recom_rate1_summary_zone4$difference_p !=
# max(recom_rate1_summary_zone4$difference_p)]) ~ "rate2",
# #use this if you have 4 rates best match rate 1 -3
# #difference_p == max(recom_rate1_summary_zone4$difference_p) ~ "rate2",
# difference_p == max(recom_rate1_summary_zone4$difference_p) ~ "rate3",
# TRUE ~ as.character(Rate)
# )
# )
recom_rate1_summary_zone1
recom_rate1_summary_zone2
recom_rate1_summary_zone3
recom_rate1_summary_zone4
# put them back togther
recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2)
# recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
# recom_rate1_summary_zone3)
# recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
# recom_rate1_summary_zone3, recom_rate1_summary_zone4)
rm(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
recom_rate1_summary_zone3, recom_rate1_summary_zone4)
str(recom_rate1_summary)
##########################################################################################################
## do the difference for n This needs more work
recom_rate1_summary <- recom_rate1_summary %>%
dplyr::mutate(difference_n = abs(n_rec - N_content)) %>%
arrange(difference_n)
recom_rate1_summary <- ungroup(recom_rate1_summary)
str(recom_rate1_summary)
recom_rate1_summary_zone1 <- recom_rate1_summary %>%
filter(zone_name == "zone1")
recom_rate1_summary_zone2 <- recom_rate1_summary %>%
filter(zone_name == "zone2")
# recom_rate1_summary_zone3 <- recom_rate1_summary %>%
# filter(zone_name == "zone3")
# recom_rate1_summary_zone4 <- recom_rate1_summary %>%
# filter(zone_name == "zone4")
recom_rate1_summary_zone1
recom_rate1_summary_zone2
recom_rate1_summary_zone3
recom_rate1_summary_zone4
recom_rate1_summary_zone1 <-recom_rate1_summary_zone1 %>%
dplyr::mutate(
approx_n_rec =
dplyr::case_when(
difference_n == min(recom_rate1_summary_zone1$difference_n) ~ "best_match",
difference_n == min(recom_rate1_summary_zone1$difference_n[recom_rate1_summary_zone1$difference_n !=
min(recom_rate1_summary_zone1$difference_n)]) ~ "rate1",
#difference_n == max(recom_rate1_summary_zone1$difference_n[recom_rate1_summary_zone1$difference_n !=
# max(recom_rate1_summary_zone1$difference_n)]) ~ "rate2",
#use this if you have 4 rates best match rate 1 -3
#difference_n == max(recom_rate1_summary_zone1$difference_n) ~ "rate3",
difference_n == max(recom_rate1_summary_zone1$difference_n) ~ "rate2",
TRUE ~ as.character(Rate)
)
)
recom_rate1_summary_zone2 <-recom_rate1_summary_zone2 %>%
dplyr::mutate(
approx_n_rec =
dplyr::case_when(
difference_n == min(recom_rate1_summary_zone2$difference_n) ~ "best_match",
difference_n == min(recom_rate1_summary_zone2$difference_n[recom_rate1_summary_zone2$difference_n !=
min(recom_rate1_summary_zone2$difference_n)]) ~ "rate1",
#difference_n == max(recom_rate1_summary_zone2$difference_n[recom_rate1_summary_zone2$difference_n !=
# max(recom_rate1_summary_zone2$difference_n)]) ~ "rate2",
#use this if you have 4 rates best match rate 1 -3
#difference_n == max(recom_rate1_summary_zone2$difference_n) ~ "rate3",
difference_n == max(recom_rate1_summary_zone2$difference_n) ~ "rate2",
TRUE ~ as.character(Rate)
)
)
# recom_rate1_summary_zone3 <-recom_rate1_summary_zone3 %>%
# dplyr::mutate(
# approx_n_rec =
# dplyr::case_when(
# difference_n == min(recom_rate1_summary_zone3$difference_n) ~ "best_match",
# difference_n == min(recom_rate1_summary_zone3$difference_n[recom_rate1_summary_zone3$difference_n !=
# min(recom_rate1_summary_zone3$difference_n)]) ~ "rate1",
# difference_n == max(recom_rate1_summary_zone3$difference_n[recom_rate1_summary_zone3$difference_n !=
# max(recom_rate1_summary_zone3$difference_n)]) ~ "rate2",
# #use this if you have 4 rates best match rate 1 -3
# #difference_n == max(recom_rate1_summary_zone3$difference_n) ~ "rate3",
# #difference_n == max(recom_rate1_summary_zone3$difference_n) ~ "rate2",
# TRUE ~ as.character(Rate)
# )
# )
# recom_rate1_summary_zone4 <-recom_rate1_summary_zone4 %>%
# dplyr::mutate(
# approx_n_rec =
# dplyr::case_when(
# difference_n == min(recom_rate1_summary_zone4$difference_n) ~ "best_match",
# difference_n == min(recom_rate1_summary_zone4$difference_n[recom_rate1_summary_zone4$difference_n !=
# min(recom_rate1_summary_zone4$difference_n)]) ~ "rate1",
# difference_n == max(recom_rate1_summary_zone4$difference_n[recom_rate1_summary_zone4$difference_n !=
# max(recom_rate1_summary_zone4$difference_n)]) ~ "rate2",
# #use this if you have 4 rates best match rate 1 -3
# difference_n == max(recom_rate1_summary_zone4$difference_n) ~ "rate3",
# #difference_n == max(recom_rate1_summary_zone4$difference_n) ~ "rate2",
# TRUE ~ as.character(Rate)
# )
# )
recom_rate1_summary_zone1
recom_rate1_summary_zone2
#recom_rate1_summary_zone3
#recom_rate1_summary_zone4
# put them back togther
recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2)
# recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
# recom_rate1_summary_zone3)
# recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
# recom_rate1_summary_zone3, recom_rate1_summary_zone4)
rm(recom_rate1_summary_zone1, recom_rate1_summary_zone2)
recom_rate1_summary
# recom_rate1_summary %>% group_by( Rate, Zone_ID, zone_name) %>%
# summarise(count= n()) %>%
# arrange(Zone_ID, Rate )
###################################################################
str(recom_rate1_summary)
#what is the recommed rate for p?
Rec_rate_p <- recom_rate1_summary %>% filter(approx_p_rec == "best_match") %>%
dplyr::select( Zone_ID, P_content) %>%
rename( rec_rate_p = P_content)
Rec_rate_p
## add this to df with all the yield data
names(recom_rate1)
names(Rec_rate_p)
# recom_rate1 <- left_join(recom_rate1, Rec_rate_p, by ="Zone_ID")
# head(recom_rate1)
## is the rec higher or lower than the rec
# recom_rate1 <- recom_rate1 %>%
# mutate(
# rec_rate_high_low_p = case_when(
# rec_rate_p - Total_sum_P_content > 0 ~ "lower_than_rec_rate_p",
# rec_rate_p - Total_sum_P_content < 0 ~ "higher_than_rec_rate_p",
# rec_rate_p - Total_sum_P_content == 0 ~ "rec_rate_p",
# TRUE ~ "other"))
#check this is correct
# test <- recom_rate1 %>%
# mutate(
# rec_rate_high_low_p = rec_rate_p - Total_sum_P_content)
#what is the recommed rate for n?
Rec_rate_n <- recom_rate1_summary %>% filter(approx_n_rec == "best_match") %>%
dplyr::select( Zone_ID, N_content) %>%
rename( rec_rate_n = N_content)
Rec_rate_n
## add this to df with all the yield data
names(recom_rate1)
names(Rec_rate_n)
recom_rate1 %>% group_by( Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate )
dim(recom_rate1)
Rec_rate_n
recom_rate1 <- left_join(recom_rate1, Rec_rate_n, by ="Zone_ID")
# #what is the recommed rate for n?
# Rec_rate_n <- dplyr::distinct(recom_rate1_summary,Rate, .keep_all = TRUE) %>%
# filter(approx_n_rec == "best_match") %>%
# dplyr::select(Rate)
#
#
# recom_rate1
# ## add this to df
# recom_rate1 <- recom_rate1 %>%
# mutate(rec_rate_n = as.double(Rec_rate_n_value))
# #mutate(rec_rate_n = Rec_rate_n[1])
# names(recom_rate1)
# str(recom_rate1)
## is the GSP higher or lower than the rec rate
recom_rate1 <- recom_rate1 %>%
mutate(
rec_rate_high_low_n = case_when(
rec_rate_n - Total_sum_N_content > 0 ~ "lower_than_rec_rate_n",
rec_rate_n - Total_sum_N_content < 0 ~ "higher_than_rec_rate_n",
rec_rate_n - Total_sum_N_content == 0 ~ "rec_rate_n",
TRUE ~ "other"))
str(recom_rate1$rec_rate_high_low_n)
str(recom_rate1$rec_rate_high_low_p)
str(recom_rate1)
#################################################################################################
######### !!!!! User input needed here !!!!#####################################################
#################################################################################################
#how many rates are lower_than_rec rate - this is checking how may are lower and how many higher
# what trial is it first?
unique(recom_rate1$Strip_Type)
# for P
recom_rate1 %>% group_by(rec_rate_high_low_p, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate )
str(recom_rate1)
#for n
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate )
str(recom_rate1)
## all good - if it wasnt I would need to adjust something??
#################################################################################
### list what rates per zone I want to keep
## filter out one rate so we only have rec rate, lower than and higher than
# try and aim for sensible rates not zero if it can be avoided
# 1/2 the rec rate and *2 rec rate
recom_rate1 %>% group_by(rec_rate_high_low_p, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
filter(rec_rate_high_low_p == "rec_rate_p" ) %>%
mutate(double_rec_rate = Rate*2,
half_rec_rate = Rate*.5)
## For N
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
filter(rec_rate_high_low_n == "rec_rate_n" ) %>%
mutate(double_rec_rate = Rate*2,
half_rec_rate = Rate*.5)
zone_1_filter <- recom_rate1 %>%
filter(Rate %in% c(120,240) & zone_name == "zone1") #what is in the bracket we will keep
zone_2_filter <- recom_rate1 %>%
filter(Rate %in% c(120,240) & zone_name == "zone2")
# zone_3_filter <- recom_rate1 %>%
# filter(Rate %in% c(20,40) & zone_name == "zone3")
#zone_4_filter <- recom_rate1 %>%
# filter(Rate %in% c(88.2, 42.2,19.2) & zone_name == "zone4")
recom_rate1 <- rbind(zone_1_filter, zone_2_filter)
# recom_rate1 <- rbind(zone_1_filter, zone_2_filter,
# zone_3_filter, zone_4_filter)
# recom_rate1 <- rbind(zone_1_filter, zone_2_filter,
# zone_3_filter)
#rm(zone_1_filter, zone_2_filter)
unique(recom_rate1$Rate)
# this is a check
# recom_rate1 %>% group_by(rec_rate_high_low_p, Rate, Zone_ID, zone_name) %>%
# summarise(count= n()) %>%
# arrange(Zone_ID, Rate )
#for n
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate )
# first I need to make a new clm for the comparsions
recom_rate1 <- recom_rate1 %>%
mutate(
# comparison_rec_rate_high_p = case_when(
# rec_rate_high_low_p == "rec_rate_p" ~ "rec_rate_high_p",
# rec_rate_high_low_p == "higher_than_rec_rate_p" ~ "rec_rate_high_p",
# TRUE ~ "other"
# ),
# comparison_rec_rate_low_p = case_when(
# rec_rate_high_low_p == "rec_rate_p" ~ "rec_rate_low_p",
# rec_rate_high_low_p == "lower_than_rec_rate_p" ~ "rec_rate_low_p",
# TRUE ~ "other"
# ),
#
comparison_rec_rate_high_n = case_when(
rec_rate_high_low_n == "rec_rate_n" ~ "rec_rate_high_n",
rec_rate_high_low_n == "higher_than_rec_rate_n" ~ "rec_rate_high_n",
TRUE ~ "other"
),
comparison_rec_rate_low_n = case_when(
rec_rate_high_low_n == "rec_rate_p" ~ "rec_rate_low_n",
rec_rate_high_low_n == "lower_than_rec_rate_n" ~ "rec_rate_low_n",
TRUE ~ "other"
)
)
#View(recom_rate1)
########################################################################################################################################
### for each zone and comparsion what is the mean and st error
function_grand_mean_std_error_zone <- function(df, comparison, zone){
clm <- paste0("comparison_", comparison)
comparison_grand_mean <- paste0("grand_mean_", comparison)
comparison_se <- paste0("se_comp_", comparison)
grand_mean_std_error <- df %>%
dplyr::filter(zone_name == paste0("zone",zone )) %>%
dplyr::filter(.data[[clm[[1]]]] == comparison) %>%
group_by(Zone_ID ) %>%
summarise(!!comparison_grand_mean := mean(YldMassDry,na.rm = TRUE ),
sd = sd(YldMassDry),
n = n(),
!!comparison_se := sd / sqrt(n)) %>%
dplyr::select(-sd, -n)
grand_mean_std_error
}
# #higher than rec rate comaprison
# assign(paste0("grand_mean_std_error_zone1_", "rec_rate_high"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_p",1))
# assign(paste0("grand_mean_std_error_zone2_", "rec_rate_high"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_p",2))
#
# #low than rec rate comaprison
# assign(paste0("grand_mean_std_error_zone1_", "rec_rate_low"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_p",1))
# assign(paste0("grand_mean_std_error_zone2_", "rec_rate_low"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_p",2))
#for n
#higher than rec rate comaprison
assign(paste0("grand_mean_std_error_zone1_", "rec_rate_high"),
function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_n",1))
assign(paste0("grand_mean_std_error_zone2_", "rec_rate_high"),
function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_n",2))
#assign(paste0("grand_mean_std_error_zone3_", "rec_rate_high"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_n",3))
# assign(paste0("grand_mean_std_error_zone4_", "rec_rate_high"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_n",4))
#low than rec rate comaprison
assign(paste0("grand_mean_std_error_zone1_", "rec_rate_low"),
function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_n",1))
assign(paste0("grand_mean_std_error_zone2_", "rec_rate_low"),
function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_n",2))
# assign(paste0("grand_mean_std_error_zone3_", "rec_rate_low"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_n",3))
# assign(paste0("grand_mean_std_error_zone4_", "rec_rate_low"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_n",4))
#if I have both high and low then I can join them togther
#But first check what I want to add
# this is a check what comaprison I have
recom_rate1 %>% group_by(rec_rate_high_low_p, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate) %>%
group_by(zone_name) %>%
distinct(rec_rate_high_low_p)%>%
filter(rec_rate_high_low_p != "rec_rate_p") %>%
arrange(rec_rate_high_low_p)
# for n
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate) %>%
group_by(zone_name) %>%
distinct(rec_rate_high_low_n)%>%
filter(rec_rate_high_low_n != "rec_rate_n") %>%
arrange(rec_rate_high_low_n)
# ## higher than P rate comparision
grand_mean_recom_rate_H_se <- rbind(
grand_mean_std_error_zone1_rec_rate_high,
grand_mean_std_error_zone2_rec_rate_high)
# grand_mean_std_error_zone3_rec_rate_high,
# grand_mean_std_error_zone4_rec_rate_high
grand_mean_recom_rate_H_se
#grand_mean_recom_rate_H_se <- grand_mean_std_error_zone2_rec_rate_high
## lower than P rate comparision
grand_mean_recom_rate_L_se <- rbind(
grand_mean_std_error_zone1_rec_rate_low,
grand_mean_std_error_zone2_rec_rate_low)#
#grand_mean_std_error_zone3_rec_rate_low)#,
#grand_mean_std_error_zone4_rec_rate_low)
grand_mean_recom_rate_H_se #
grand_mean_recom_rate_L_se #
grand_mean_recom_rate_H_L_se <- full_join(grand_mean_recom_rate_H_se,
grand_mean_recom_rate_L_se
)
#grand_mean_recom_rate_H_L_se <- grand_mean_recom_rate_H_se
#grand_mean_recom_rate_H_L_se <- grand_mean_recom_rate_L_se
grand_mean_recom_rate_H_L_se
#### !!! select what comaprision are needed
## we have no lower comparision so I need to empty these clm
# this occurs beacsue we always have the rec rate flagged as a yield value
# grand_mean_recom_rate_H_L_se <- grand_mean_recom_rate_H_L_se %>%
# mutate(
# #higher than recom rate comp
# grand_mean_rec_rate_high_p = grand_mean_rec_rate_high_p,
# se_comp_rec_rate_high_p = se_comp_rec_rate_high_p,
# #grand_mean_rec_rate_high_p = NA,
# #se_comp_rec_rate_high_p = NA,
#
# #lower than recom rate comp
# #grand_mean_rec_rate_low_p = NA,
# #se_comp_rec_rate_low_p = NA)
# grand_mean_rec_rate_low_p = grand_mean_rec_rate_low_p,
# se_comp_rec_rate_low_p = se_comp_rec_rate_low_p)
## for N
grand_mean_recom_rate_H_L_se <- grand_mean_recom_rate_H_L_se %>%
mutate(
#higher than recom rate comp
grand_mean_rec_rate_high_n = grand_mean_rec_rate_high_n,
se_comp_rec_rate_high_n = se_comp_rec_rate_high_n,
# grand_mean_rec_rate_high_n = NA,
# se_comp_rec_rate_high_n = NA,
#lower than recom rate comp
#grand_mean_rec_rate_low_n = NA,
#se_comp_rec_rate_low_n = NA)
grand_mean_rec_rate_low_n = grand_mean_rec_rate_low_n,
se_comp_rec_rate_low_n = se_comp_rec_rate_low_n)
grand_mean_recom_rate_H_L_se
#View(grand_mean_recom_rate_H_L_se)
## I need to generate mean yield value for the zone and Rate
## for P If the trial is N this needs to be changed
rec_rate_p_vs_low_High <- recom_rate1 %>%
group_by( Zone_ID, zone_name, rec_rate_high_low_p) %>%
summarise(zone_yld = mean(YldMassDry, na.rm = TRUE))
rec_rate_p_vs_low_High <- ungroup(rec_rate_p_vs_low_High)
rec_rate_p_vs_low_High
rec_rate_p_vs_low_High_wide <- tidyr::pivot_wider(rec_rate_p_vs_low_High,
id_cols = c( Zone_ID),
names_from =rec_rate_high_low_p,
values_from = zone_yld
)
## for N
rec_rate_n_vs_low_High <- recom_rate1 %>%
group_by( Zone_ID, zone_name, rec_rate_high_low_n) %>%
summarise(zone_yld = mean(YldMassDry, na.rm = TRUE))
rec_rate_n_vs_low_High
rec_rate_n_vs_low_High <- ungroup(rec_rate_n_vs_low_High)
rec_rate_n_vs_low_High
rec_rate_n_vs_low_High_wide <- tidyr::pivot_wider(rec_rate_n_vs_low_High,
id_cols = c( Zone_ID),
names_from =rec_rate_high_low_n,
values_from = zone_yld
)
rec_rate_n_vs_low_High_wide
names(rec_rate_n_vs_low_High_wide)
#### !!! select what comaprision are needed
## differences in yld clms
# rec_rate_p_vs_low_High_wide <- rec_rate_p_vs_low_High_wide %>%
# mutate(
# rec_rate_p_vs_lower = rec_rate_p - lower_than_rec_rate_p,
# #rec_rate_p_vs_lower = NA,
# rec_rate_p_vs_higher = rec_rate_p - higher_than_rec_rate_p
# #rec_rate_p_vs_higher = NA
# )
# rec_rate_p_vs_low_High_wide
#For N
rec_rate_n_vs_low_High_wide <- rec_rate_n_vs_low_High_wide %>%
mutate(
rec_rate_n_vs_lower = rec_rate_n - lower_than_rec_rate_n,
#rec_rate_n_vs_lower = NA,
#rec_rate_n_vs_higher = rec_rate_n - higher_than_rec_rate_n
rec_rate_n_vs_higher = NA
)
rec_rate_n_vs_low_High_wide
rec_rate_n_vs_low_High_wide
grand_mean_recom_rate_H_L_se
# rec_rate_p_vs_low_High_wide <- left_join(rec_rate_p_vs_low_High_wide,
# grand_mean_recom_rate_H_L_se)
#for N
rec_rate_n_vs_low_High_wide <- left_join(rec_rate_n_vs_low_High_wide,
grand_mean_recom_rate_H_L_se)
str(rec_rate_n_vs_low_High_wide)
#View(rec_rate_p_vs_low_High_wide)
#####
# rec_rate_p_vs_low_High_summary <- rec_rate_p_vs_low_High_wide %>%
# mutate(
# yld_resposne_rec_v_low = case_when(
# rec_rate_p_vs_lower > 0 + se_comp_rec_rate_low_p ~ "positive",
# rec_rate_p_vs_lower < 0 - se_comp_rec_rate_low_p ~ "negative",
# TRUE ~ "no_response"
# ),
# yld_resposne_rec_v_high = case_when(
# rec_rate_p_vs_higher > 0 + se_comp_rec_rate_high_p ~ "negative",
# rec_rate_p_vs_higher < 0 - se_comp_rec_rate_high_p ~ "positive",
# TRUE ~ "no_response"
# )
# )
## for N
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_wide %>%
mutate(
yld_resposne_rec_v_low = case_when(
rec_rate_n_vs_lower > 0 + se_comp_rec_rate_low_n ~ "positive",
rec_rate_n_vs_lower < 0 - se_comp_rec_rate_low_n ~ "negative",
TRUE ~ "no_response"
),
yld_resposne_rec_v_high = case_when(
rec_rate_n_vs_higher > 0 + se_comp_rec_rate_high_n ~ "negative",
rec_rate_n_vs_higher < 0 - se_comp_rec_rate_high_n ~ "positive",
TRUE ~ "no_response"
)
)
str(rec_rate_n_vs_low_High_summary)
names(rec_rate_n_vs_low_High_summary)
View(rec_rate_n_vs_low_High_summary)
#### !!! select what comaprision are needed if we are missing clm add at the end
###
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
tidyr::pivot_longer(
cols = c("yld_resposne_rec_v_high",
"yld_resposne_rec_v_low"),
names_to = "comparison",
values_to = "yld_response"
) %>%
dplyr::select(
Zone_ID,
comparison,
yld_response,
#higher_than_rec_rate_n ,
lower_than_rec_rate_n,
rec_rate_n,
rec_rate_n_vs_lower,
#rec_rate_n_vs_higher,
se_comp_rec_rate_low_n ,
se_comp_rec_rate_high_n
) %>%
mutate(
comparison = case_when(
comparison == "yld_resposne_rec_v_low" ~ "rec_n_v_lower",
comparison == "yld_resposne_rec_v_high" ~ "rec_n_v_higher"
))
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
mutate(higher_than_rec_rate_n = NA,
rec_rate_n_vs_higher = NA)
rec_rate_n_vs_low_High_summary
View(rec_rate_n_vs_low_High_summary)
### Extra t test #######################################################################################################################
#Prep the data making a sub selection of df for each zone and run the paired t test
#####################################################################################
function_paired_ttest_rec_rate_low_high <- function(recom_rate1, zone_x, comp){
#select the zone data and the high vs low rates
zone_x_rec_r_n_vs_x <- recom_rate1 %>%
filter(zone_name == paste0("zone", zone_x)) %>%
filter(rec_rate_high_low_n == "rec_rate_n" | rec_rate_high_low_n == paste0(comp,"_than_rec_rate_n"))
#average the yld per segment and rate
zone_x_rec_r_n_vs_x_av <- group_by(zone_x_rec_r_n_vs_x,
SegmentID, Rate, Zone,Zone_ID,
rate_name, zone_name , rec_rate_high_low_n) %>%
summarise_all(mean, na.rm= TRUE)
str(zone_x_rec_r_n_vs_x_av)
#ensure that the dataset is duplictaed
list_SegmentID_values_rec_rate_l <- zone_x_rec_r_n_vs_x_av$SegmentID[duplicated(zone_x_rec_r_n_vs_x_av$SegmentID)] #this returns a list of values I want to keep
list_SegmentID_values_rec_rate_l
zone_x_rec_r_n_vs_x_av <- zone_x_rec_r_n_vs_x_av %>% filter(SegmentID %in% list_SegmentID_values_rec_rate_l)
str(zone_x_rec_r_n_vs_x_av)
zone_x_rec_rate_n_vs_x_res <- t.test(YldMassDry ~ rec_rate_high_low_n,
data = zone_x_rec_r_n_vs_x_av, paired = TRUE)
#####test results
# Report values from the t.test
zone_x_rec_rate_n_vs_x_res_sig <-
data.frame(P_value = as.double(zone_x_rec_rate_n_vs_x_res$p.value),
Mean_diff = (zone_x_rec_rate_n_vs_x_res$estimate)) %>%
mutate(
comparison = paste0("rec_n_v_", comp),
zone = paste0("zone", zone_x),
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
zone_x_rec_rate_n_vs_x_res_sig
return(data.frame(zone_x_rec_rate_n_vs_x_res_sig))
}
#function(recom_rate1, zone_x, comp)
assign(paste0("rec_rate_n_vs_lower_", "zone_", "1"),function_paired_ttest_rec_rate_low_high(recom_rate1, 1, "lower"))
assign(paste0("rec_rate_n_vs_lower_","zone_", "2"),function_paired_ttest_rec_rate_low_high(recom_rate1, 2, "lower"))
#assign(paste0("rec_rate_n_vs_lower_","zone_", "3"),function_paired_ttest_rec_rate_low_high(recom_rate1, 3, "lower"))
#assign(paste0("rec_rate_n_vs_lower_","zone_", "4"),function_paired_ttest_rec_rate_low_high(recom_rate1, 4, "lower"))
assign(paste0("rec_rate_n_vs_higher_", "zone_", "1"),function_paired_ttest_rec_rate_low_high(recom_rate1, 1, "higher"))
assign(paste0("rec_rate_n_vs_higher_","zone_", "2"),function_paired_ttest_rec_rate_low_high(recom_rate1, 2, "higher"))
# assign(paste0("rec_rate_n_vs_higher_","zone_", "3"),function_paired_ttest_rec_rate_low_high(recom_rate1, 3, "higher"))
#assign(paste0("rec_rate_n_vs_higher_","zone_", "4"),function_paired_ttest_rec_rate_low_high(recom_rate1, 4, "higher"))
#what ran?
rec_rate_n_vs_lower_zone_1 # y
rec_rate_n_vs_lower_zone_2 # y
rec_rate_n_vs_lower_zone_3 #nope
#rec_rate_n_vs_lower_zone_4 # nope
rec_rate_n_vs_higher_zone_1 # n
rec_rate_n_vs_higher_zone_2 # ynes
rec_rate_n_vs_higher_zone_3 # nope
#rec_rate_n_vs_higher_zone_4
# this is a check what comaprison I have what was I expecting to run?
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate) %>%
group_by(zone_name) %>%
distinct(rec_rate_high_low_n)%>%
filter(rec_rate_high_low_n != "rec_rate_n") %>%
arrange(rec_rate_high_low_n)
### !!! user input required
rec_rate_n_low_vs_high_all <- rbind(rec_rate_n_vs_lower_zone_1,
rec_rate_n_vs_lower_zone_2)#,
#rec_rate_n_vs_lower_zone_3)
#rec_rate_n_vs_lower_zone_4,
#rec_rate_n_vs_higher_zone_1,
#rec_rate_n_vs_higher_zone_2)
#rec_rate_n_vs_higher_zone_3,
#rec_rate_n_vs_higher_zone_4)
#rec_rate_p_low_vs_high_all <- rec_rate_p_vs_lower_zone_1
rec_rate_n_low_vs_high_all
## turn rec rate_vs_low_High_rate_summary to narrow format
## need t0 add in the zone name
zoneID_zone_names <- recom_rate1 %>% distinct(Zone_ID, .keep_all = TRUE) %>%
dplyr::select(Zone_ID,zone_name )
zoneID_zone_names
rec_rate_n_low_vs_high_all <- left_join(rec_rate_n_low_vs_high_all,zoneID_zone_names,
by = c("zone" = "zone_name"))
rec_rate_n_vs_low_High_summary <- full_join(rec_rate_n_vs_low_High_summary,
rec_rate_n_low_vs_high_all, by = c("Zone_ID", "comparison"))
names(rec_rate_n_vs_low_High_summary)
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
dplyr::select(
Zone_ID,
zone,
comparison,
yld_response,
higher_than_rec_rate_n,
lower_than_rec_rate_n,
rec_rate_n,
rec_rate_n_vs_higher,
rec_rate_n_vs_lower ,
se_comp_rec_rate_high_n,
se_comp_rec_rate_low_n,
Significant,
P_value,
rounded
)
## add in a few clms that help later
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
mutate(paddock_ID = unique(strips$Paddock_ID),
Strip_Type = unique(strips$Strip_Type),
input_file = input_file)
rec_rate_n_vs_low_High_summary
assigned_names2
rec_rate_n_vs_low_High_summary <- cbind(rec_rate_n_vs_low_High_summary,assigned_names2)
rec_rate_n_vs_low_High_summary
str(rec_rate_n_vs_low_High_summary)
## not all comparison are valid - I need to drop some
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
filter(!is.na(zone))
#what is the recommed rate?
names(recom_rate1)
label_rec_rates <- recom_rate1 %>% group_by(rec_rate_high_low_n,
Rate, Strip_Rate, Zone_ID) %>%
summarise(count= n())
label_rec_rates
label_rec_rates <- ungroup(label_rec_rates) %>%
dplyr::select( rec_rate_high_low_n, Strip_Rate, Zone_ID)
label_rec_rates
label_rec_rates <- tidyr::pivot_wider(
label_rec_rates,
names_from = rec_rate_high_low_n,
values_from = Strip_Rate
)
label_rec_rates <- data.frame(label_rec_rates)
names(label_rec_rates)
## !! make sure this runs
label_rec_rates <-label_rec_rates %>% rename(
#higher_than_rec_rate_n_label = higher_than_rec_rate_n,
lower_than_rec_rate_n_label = lower_than_rec_rate_n,
rec_rate_n_label = rec_rate_n)
str(label_rec_rates)
str(rec_rate_n_vs_low_High_summary)
rec_rate_n_vs_low_High_summary <- full_join(rec_rate_n_vs_low_High_summary, label_rec_rates, by = "Zone_ID")
names(rec_rate_n_vs_low_High_summary)
#remove duplication
rec_rate_n_vs_low_High_summary <- dplyr::distinct(rec_rate_n_vs_low_High_summary,
Zone_ID, comparison, .keep_all = TRUE)
### check that what I am outputting is sensible Yld repsonse should reflect the comparsion made.
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
mutate(
yld_response = case_when(
comparison == "rec_n_v_higher" &
higher_than_rec_rate_n != "NA" ~ yld_response,
comparison == "rec_n_v_lower" &
lower_than_rec_rate_n != "NA" ~ yld_response,
TRUE ~ "NA"
))
View(rec_rate_n_vs_low_High_summary)
#save the output
name_rec_rate_low_high <- paste0("W:/value_soil_testing_prj/Yield_data/2020/processing/r_outputs/rec_rate_comparision_N/rec_rate_comp_",
dplyr::distinct(all_results_1,paddock_ID_Type), ".csv")
name_rec_rate_low_high
write.csv(rec_rate_n_vs_low_High_summary, name_rec_rate_low_high)
| /site_t_test/SFS/t-test comparision recom rates _add_on_Keating_Middle_Fays_3N_2Z.R | no_license | JackieOuzman/analysis_strip_trials | R | false | false | 39,039 | r |
#remove all files expect...
rm(list = ls()[!ls() %in% c("strips",
"paddock_ID_1",
"paddock_ID_2",
"paddock_ID_3",
#"paddock_ID_4",
"Zone_labels",
"input_file",
"assigned_names2",
"all_results_1"
#"function_grand_mean_std_error"
)])
recom_rateDB <- read_excel( "W:/value_soil_testing_prj/Yield_data/2020/processing/GRDC 2020 Paddock Database_SA_VIC_May25 2021.xlsx")
##########################################################################################################################################
### Extra analysis for ricks tables GSP vs low high comparision
recom_rateDB <- recom_rateDB %>%
dplyr::select(Zone_ID = `Paddock code` ,
p_rec = `P rec`,
n_rec_yld_low = `N Rec (< 3 t/ha)` ,
n_rec_yld_med = `N Rec (3-5 t/ha)` ,
n_rec_yld_high = `N Rec (> 5 t/ha)`
)
recom_rateDB <- dplyr::mutate(recom_rateDB, maxN = apply(recom_rateDB[3:5], 1, max, na.rm = TRUE))
str(recom_rateDB)
# remove redunant clm and replace inf
recom_rateDB <- recom_rateDB %>%
mutate(
maxN = case_when(
maxN >= 0 ~ maxN,
TRUE ~ NA_real_
)
)
recom_rateDB <- recom_rateDB %>%
dplyr::select(Zone_ID ,
p_rec,
N_rec = maxN
)
str(strips)
rec_rates <- strips %>%
filter(!is.na(zone_name)) %>%
dplyr::select(Zone_ID, SegmentID, YldMassDry, Rate, rate_name_order, rate_name, zone_name, Zone, Strip_Type)
rec_rates
#put the tow files togther
str(rec_rates)
str(recom_rateDB)
recom_rateDB$Zone_ID <- as.double(recom_rateDB$Zone_ID)
recom_rate1 <- left_join( rec_rates, recom_rateDB)
recom_rate1 <- data.frame(recom_rate1)
str(recom_rate1)
## bring in the fert rates applied cal
fert_app_all_steps <- read.csv("W:/value_soil_testing_prj/Yield_data/2020/processing/processing_files/step2_fert_app_all_steps.csv")
fert_app_all_steps <- fert_app_all_steps %>%
dplyr::filter(Paddock_ID == substr(paddock_ID_1, start = 1, stop = 5)|
Paddock_ID == substr(paddock_ID_2, start = 1, stop = 5)
#Paddock_ID == substr(paddock_ID_3, start = 1, stop = 5)
#Paddock_ID == substr(paddock_ID_4, start = 1, stop = 5)
) %>%
dplyr::select( Paddock_ID, Rate, Strip_Rate, Total_sum_P_content, Total_sum_N_content)
str(fert_app_all_steps)
str(recom_rate1)
##################################################################################################################
#
recom_rate1 <- left_join(recom_rate1, fert_app_all_steps)
str(recom_rate1)
#View(recom_rate1)
# recom_rate1 %>% group_by( Rate, Zone_ID, zone_name) %>%
# summarise(count= n()) %>%
# arrange(Zone_ID, Rate )
###############################################################################################################
## what are the comparision I want to make
# names(recom_rate1)
# View(recom_rate1)
recom_rate1_summary <- recom_rate1 %>% group_by(Zone_ID,
Rate, zone_name) %>%
summarise(
p_rec = max(p_rec, na.rm = TRUE),
P_content = max(Total_sum_P_content, na.rm = TRUE),
n_rec = max(N_rec, na.rm = TRUE),
N_content = max(Total_sum_N_content, na.rm = TRUE)
)
recom_rate1_summary <- ungroup(recom_rate1_summary)
recom_rate1_summary[] <- Map(function(x) replace(x, is.infinite(x), NA), recom_rate1_summary)
recom_rate1_summary <- data.frame(recom_rate1_summary)
str(recom_rate1_summary)
recom_rate1_summary
## do the difference for P
recom_rate1_summary <- recom_rate1_summary %>%
dplyr::mutate(difference_p = abs(p_rec - P_content)) %>%
arrange(difference_p)
str(recom_rate1_summary)
recom_rate1_summary <- ungroup(recom_rate1_summary)
str(recom_rate1_summary)
recom_rate1_summary$Rate <- as.double(recom_rate1_summary$Rate)
unique(recom_rate1_summary$Rate)
## Two steps need to filter data first (zone 1 and zone 2 need to have this as clm in
recom_rate1_summary_zone1 <- recom_rate1_summary %>%
filter(zone_name == "zone1")
recom_rate1_summary_zone2 <- recom_rate1_summary %>%
filter(zone_name == "zone2")
# recom_rate1_summary_zone3 <- recom_rate1_summary %>%
# filter(zone_name == "zone3")
# recom_rate1_summary_zone4 <- recom_rate1_summary %>%
# filter(zone_name == "zone4")
recom_rate1_summary_zone1
recom_rate1_summary_zone1 <-recom_rate1_summary_zone1 %>%
dplyr::mutate(
approx_p_rec =
dplyr::case_when(
difference_p == min(recom_rate1_summary_zone1$difference_p) ~ "best_match",
difference_p == min(recom_rate1_summary_zone1$difference_p[recom_rate1_summary_zone1$difference_p !=
min(recom_rate1_summary_zone1$difference_p)]) ~ "rate1",
difference_p == max(recom_rate1_summary_zone1$difference_p[recom_rate1_summary_zone1$difference_p !=
max(recom_rate1_summary_zone1$difference_p)]) ~ "rate2",
#use this if you have 4 rates best match rate 1 -3
difference_p == max(recom_rate1_summary_zone1$difference_p) ~ "rate3",
#difference_p == max(recom_rate1_summary_zone1$difference_p) ~ "rate2",
TRUE ~ as.character(Rate)
)
)
recom_rate1_summary_zone2 <-recom_rate1_summary_zone2 %>%
dplyr::mutate(
approx_p_rec =
dplyr::case_when(
difference_p == min(recom_rate1_summary_zone2$difference_p) ~ "best_match",
difference_p == min(recom_rate1_summary_zone2$difference_p[recom_rate1_summary_zone2$difference_p !=
min(recom_rate1_summary_zone2$difference_p)]) ~ "rate1",
difference_p == max(recom_rate1_summary_zone2$difference_p[recom_rate1_summary_zone2$difference_p !=
max(recom_rate1_summary_zone2$difference_p)]) ~ "rate2",
#use this if you have 4 rates best match rate 1 -3
#difference_p == max(recom_rate1_summary_zone2$difference_p) ~ "rate2",
difference_p == max(recom_rate1_summary_zone2$difference_p) ~ "rate3",
TRUE ~ as.character(Rate)
)
)
# recom_rate1_summary_zone3 <-recom_rate1_summary_zone3 %>%
# dplyr::mutate(
# approx_p_rec =
# dplyr::case_when(
# difference_p == min(recom_rate1_summary_zone3$difference_p) ~ "best_match",
# difference_p == min(recom_rate1_summary_zone3$difference_p[recom_rate1_summary_zone3$difference_p !=
# min(recom_rate1_summary_zone3$difference_p)]) ~ "rate1",
# difference_p == max(recom_rate1_summary_zone3$difference_p[recom_rate1_summary_zone3$difference_p !=
# max(recom_rate1_summary_zone3$difference_p)]) ~ "rate2",
# #use this if you have 4 rates best match rate 1 -3
# #difference_p == max(recom_rate1_summary_zone3$difference_p) ~ "rate2",
# difference_p == max(recom_rate1_summary_zone3$difference_p) ~ "rate3",
# TRUE ~ as.character(Rate)
# )
# )
# recom_rate1_summary_zone4 <-recom_rate1_summary_zone4 %>%
# dplyr::mutate(
# approx_p_rec =
# dplyr::case_when(
# difference_p == min(recom_rate1_summary_zone4$difference_p) ~ "best_match",
# difference_p == min(recom_rate1_summary_zone4$difference_p[recom_rate1_summary_zone4$difference_p !=
# min(recom_rate1_summary_zone4$difference_p)]) ~ "rate1",
# difference_p == max(recom_rate1_summary_zone4$difference_p[recom_rate1_summary_zone4$difference_p !=
# max(recom_rate1_summary_zone4$difference_p)]) ~ "rate2",
# #use this if you have 4 rates best match rate 1 -3
# #difference_p == max(recom_rate1_summary_zone4$difference_p) ~ "rate2",
# difference_p == max(recom_rate1_summary_zone4$difference_p) ~ "rate3",
# TRUE ~ as.character(Rate)
# )
# )
recom_rate1_summary_zone1
recom_rate1_summary_zone2
recom_rate1_summary_zone3
recom_rate1_summary_zone4
# put them back togther
recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2)
# recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
# recom_rate1_summary_zone3)
# recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
# recom_rate1_summary_zone3, recom_rate1_summary_zone4)
rm(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
recom_rate1_summary_zone3, recom_rate1_summary_zone4)
str(recom_rate1_summary)
##########################################################################################################
## do the difference for n This needs more work
recom_rate1_summary <- recom_rate1_summary %>%
dplyr::mutate(difference_n = abs(n_rec - N_content)) %>%
arrange(difference_n)
recom_rate1_summary <- ungroup(recom_rate1_summary)
str(recom_rate1_summary)
recom_rate1_summary_zone1 <- recom_rate1_summary %>%
filter(zone_name == "zone1")
recom_rate1_summary_zone2 <- recom_rate1_summary %>%
filter(zone_name == "zone2")
# recom_rate1_summary_zone3 <- recom_rate1_summary %>%
# filter(zone_name == "zone3")
# recom_rate1_summary_zone4 <- recom_rate1_summary %>%
# filter(zone_name == "zone4")
recom_rate1_summary_zone1
recom_rate1_summary_zone2
recom_rate1_summary_zone3
recom_rate1_summary_zone4
recom_rate1_summary_zone1 <-recom_rate1_summary_zone1 %>%
dplyr::mutate(
approx_n_rec =
dplyr::case_when(
difference_n == min(recom_rate1_summary_zone1$difference_n) ~ "best_match",
difference_n == min(recom_rate1_summary_zone1$difference_n[recom_rate1_summary_zone1$difference_n !=
min(recom_rate1_summary_zone1$difference_n)]) ~ "rate1",
#difference_n == max(recom_rate1_summary_zone1$difference_n[recom_rate1_summary_zone1$difference_n !=
# max(recom_rate1_summary_zone1$difference_n)]) ~ "rate2",
#use this if you have 4 rates best match rate 1 -3
#difference_n == max(recom_rate1_summary_zone1$difference_n) ~ "rate3",
difference_n == max(recom_rate1_summary_zone1$difference_n) ~ "rate2",
TRUE ~ as.character(Rate)
)
)
recom_rate1_summary_zone2 <-recom_rate1_summary_zone2 %>%
dplyr::mutate(
approx_n_rec =
dplyr::case_when(
difference_n == min(recom_rate1_summary_zone2$difference_n) ~ "best_match",
difference_n == min(recom_rate1_summary_zone2$difference_n[recom_rate1_summary_zone2$difference_n !=
min(recom_rate1_summary_zone2$difference_n)]) ~ "rate1",
#difference_n == max(recom_rate1_summary_zone2$difference_n[recom_rate1_summary_zone2$difference_n !=
# max(recom_rate1_summary_zone2$difference_n)]) ~ "rate2",
#use this if you have 4 rates best match rate 1 -3
#difference_n == max(recom_rate1_summary_zone2$difference_n) ~ "rate3",
difference_n == max(recom_rate1_summary_zone2$difference_n) ~ "rate2",
TRUE ~ as.character(Rate)
)
)
# recom_rate1_summary_zone3 <-recom_rate1_summary_zone3 %>%
# dplyr::mutate(
# approx_n_rec =
# dplyr::case_when(
# difference_n == min(recom_rate1_summary_zone3$difference_n) ~ "best_match",
# difference_n == min(recom_rate1_summary_zone3$difference_n[recom_rate1_summary_zone3$difference_n !=
# min(recom_rate1_summary_zone3$difference_n)]) ~ "rate1",
# difference_n == max(recom_rate1_summary_zone3$difference_n[recom_rate1_summary_zone3$difference_n !=
# max(recom_rate1_summary_zone3$difference_n)]) ~ "rate2",
# #use this if you have 4 rates best match rate 1 -3
# #difference_n == max(recom_rate1_summary_zone3$difference_n) ~ "rate3",
# #difference_n == max(recom_rate1_summary_zone3$difference_n) ~ "rate2",
# TRUE ~ as.character(Rate)
# )
# )
# recom_rate1_summary_zone4 <-recom_rate1_summary_zone4 %>%
# dplyr::mutate(
# approx_n_rec =
# dplyr::case_when(
# difference_n == min(recom_rate1_summary_zone4$difference_n) ~ "best_match",
# difference_n == min(recom_rate1_summary_zone4$difference_n[recom_rate1_summary_zone4$difference_n !=
# min(recom_rate1_summary_zone4$difference_n)]) ~ "rate1",
# difference_n == max(recom_rate1_summary_zone4$difference_n[recom_rate1_summary_zone4$difference_n !=
# max(recom_rate1_summary_zone4$difference_n)]) ~ "rate2",
# #use this if you have 4 rates best match rate 1 -3
# difference_n == max(recom_rate1_summary_zone4$difference_n) ~ "rate3",
# #difference_n == max(recom_rate1_summary_zone4$difference_n) ~ "rate2",
# TRUE ~ as.character(Rate)
# )
# )
recom_rate1_summary_zone1
recom_rate1_summary_zone2
#recom_rate1_summary_zone3
#recom_rate1_summary_zone4
# put them back togther
recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2)
# recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
# recom_rate1_summary_zone3)
# recom_rate1_summary <- rbind(recom_rate1_summary_zone1, recom_rate1_summary_zone2,
# recom_rate1_summary_zone3, recom_rate1_summary_zone4)
rm(recom_rate1_summary_zone1, recom_rate1_summary_zone2)
recom_rate1_summary
# recom_rate1_summary %>% group_by( Rate, Zone_ID, zone_name) %>%
# summarise(count= n()) %>%
# arrange(Zone_ID, Rate )
###################################################################
str(recom_rate1_summary)
#what is the recommed rate for p?
Rec_rate_p <- recom_rate1_summary %>% filter(approx_p_rec == "best_match") %>%
dplyr::select( Zone_ID, P_content) %>%
rename( rec_rate_p = P_content)
Rec_rate_p
## add this to df with all the yield data
names(recom_rate1)
names(Rec_rate_p)
# recom_rate1 <- left_join(recom_rate1, Rec_rate_p, by ="Zone_ID")
# head(recom_rate1)
## is the rec higher or lower than the rec
# recom_rate1 <- recom_rate1 %>%
# mutate(
# rec_rate_high_low_p = case_when(
# rec_rate_p - Total_sum_P_content > 0 ~ "lower_than_rec_rate_p",
# rec_rate_p - Total_sum_P_content < 0 ~ "higher_than_rec_rate_p",
# rec_rate_p - Total_sum_P_content == 0 ~ "rec_rate_p",
# TRUE ~ "other"))
#check this is correct
# test <- recom_rate1 %>%
# mutate(
# rec_rate_high_low_p = rec_rate_p - Total_sum_P_content)
#what is the recommed rate for n?
Rec_rate_n <- recom_rate1_summary %>% filter(approx_n_rec == "best_match") %>%
dplyr::select( Zone_ID, N_content) %>%
rename( rec_rate_n = N_content)
Rec_rate_n
## add this to df with all the yield data
names(recom_rate1)
names(Rec_rate_n)
recom_rate1 %>% group_by( Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate )
dim(recom_rate1)
Rec_rate_n
recom_rate1 <- left_join(recom_rate1, Rec_rate_n, by ="Zone_ID")
# #what is the recommed rate for n?
# Rec_rate_n <- dplyr::distinct(recom_rate1_summary,Rate, .keep_all = TRUE) %>%
# filter(approx_n_rec == "best_match") %>%
# dplyr::select(Rate)
#
#
# recom_rate1
# ## add this to df
# recom_rate1 <- recom_rate1 %>%
# mutate(rec_rate_n = as.double(Rec_rate_n_value))
# #mutate(rec_rate_n = Rec_rate_n[1])
# names(recom_rate1)
# str(recom_rate1)
## is the GSP higher or lower than the rec rate
recom_rate1 <- recom_rate1 %>%
mutate(
rec_rate_high_low_n = case_when(
rec_rate_n - Total_sum_N_content > 0 ~ "lower_than_rec_rate_n",
rec_rate_n - Total_sum_N_content < 0 ~ "higher_than_rec_rate_n",
rec_rate_n - Total_sum_N_content == 0 ~ "rec_rate_n",
TRUE ~ "other"))
str(recom_rate1$rec_rate_high_low_n)
str(recom_rate1$rec_rate_high_low_p)
str(recom_rate1)
#################################################################################################
######### !!!!! User input needed here !!!!#####################################################
#################################################################################################
#how many rates are lower_than_rec rate - this is checking how may are lower and how many higher
# what trial is it first?
unique(recom_rate1$Strip_Type)
# for P
recom_rate1 %>% group_by(rec_rate_high_low_p, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate )
str(recom_rate1)
#for n
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate )
str(recom_rate1)
## all good - if it wasnt I would need to adjust something??
#################################################################################
### list what rates per zone I want to keep
## filter out one rate so we only have rec rate, lower than and higher than
# try and aim for sensible rates not zero if it can be avoided
# 1/2 the rec rate and *2 rec rate
recom_rate1 %>% group_by(rec_rate_high_low_p, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
filter(rec_rate_high_low_p == "rec_rate_p" ) %>%
mutate(double_rec_rate = Rate*2,
half_rec_rate = Rate*.5)
## For N
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
filter(rec_rate_high_low_n == "rec_rate_n" ) %>%
mutate(double_rec_rate = Rate*2,
half_rec_rate = Rate*.5)
zone_1_filter <- recom_rate1 %>%
filter(Rate %in% c(120,240) & zone_name == "zone1") #what is in the bracket we will keep
zone_2_filter <- recom_rate1 %>%
filter(Rate %in% c(120,240) & zone_name == "zone2")
# zone_3_filter <- recom_rate1 %>%
# filter(Rate %in% c(20,40) & zone_name == "zone3")
#zone_4_filter <- recom_rate1 %>%
# filter(Rate %in% c(88.2, 42.2,19.2) & zone_name == "zone4")
recom_rate1 <- rbind(zone_1_filter, zone_2_filter)
# recom_rate1 <- rbind(zone_1_filter, zone_2_filter,
# zone_3_filter, zone_4_filter)
# recom_rate1 <- rbind(zone_1_filter, zone_2_filter,
# zone_3_filter)
#rm(zone_1_filter, zone_2_filter)
unique(recom_rate1$Rate)
# this is a check
# recom_rate1 %>% group_by(rec_rate_high_low_p, Rate, Zone_ID, zone_name) %>%
# summarise(count= n()) %>%
# arrange(Zone_ID, Rate )
#for n
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate )
# first I need to make a new clm for the comparsions
recom_rate1 <- recom_rate1 %>%
mutate(
# comparison_rec_rate_high_p = case_when(
# rec_rate_high_low_p == "rec_rate_p" ~ "rec_rate_high_p",
# rec_rate_high_low_p == "higher_than_rec_rate_p" ~ "rec_rate_high_p",
# TRUE ~ "other"
# ),
# comparison_rec_rate_low_p = case_when(
# rec_rate_high_low_p == "rec_rate_p" ~ "rec_rate_low_p",
# rec_rate_high_low_p == "lower_than_rec_rate_p" ~ "rec_rate_low_p",
# TRUE ~ "other"
# ),
#
comparison_rec_rate_high_n = case_when(
rec_rate_high_low_n == "rec_rate_n" ~ "rec_rate_high_n",
rec_rate_high_low_n == "higher_than_rec_rate_n" ~ "rec_rate_high_n",
TRUE ~ "other"
),
comparison_rec_rate_low_n = case_when(
rec_rate_high_low_n == "rec_rate_p" ~ "rec_rate_low_n",
rec_rate_high_low_n == "lower_than_rec_rate_n" ~ "rec_rate_low_n",
TRUE ~ "other"
)
)
#View(recom_rate1)
########################################################################################################################################
### for each zone and comparsion what is the mean and st error
function_grand_mean_std_error_zone <- function(df, comparison, zone){
clm <- paste0("comparison_", comparison)
comparison_grand_mean <- paste0("grand_mean_", comparison)
comparison_se <- paste0("se_comp_", comparison)
grand_mean_std_error <- df %>%
dplyr::filter(zone_name == paste0("zone",zone )) %>%
dplyr::filter(.data[[clm[[1]]]] == comparison) %>%
group_by(Zone_ID ) %>%
summarise(!!comparison_grand_mean := mean(YldMassDry,na.rm = TRUE ),
sd = sd(YldMassDry),
n = n(),
!!comparison_se := sd / sqrt(n)) %>%
dplyr::select(-sd, -n)
grand_mean_std_error
}
# #higher than rec rate comaprison
# assign(paste0("grand_mean_std_error_zone1_", "rec_rate_high"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_p",1))
# assign(paste0("grand_mean_std_error_zone2_", "rec_rate_high"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_p",2))
#
# #low than rec rate comaprison
# assign(paste0("grand_mean_std_error_zone1_", "rec_rate_low"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_p",1))
# assign(paste0("grand_mean_std_error_zone2_", "rec_rate_low"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_p",2))
#for n
#higher than rec rate comaprison
assign(paste0("grand_mean_std_error_zone1_", "rec_rate_high"),
function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_n",1))
assign(paste0("grand_mean_std_error_zone2_", "rec_rate_high"),
function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_n",2))
#assign(paste0("grand_mean_std_error_zone3_", "rec_rate_high"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_n",3))
# assign(paste0("grand_mean_std_error_zone4_", "rec_rate_high"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_high_n",4))
#low than rec rate comaprison
assign(paste0("grand_mean_std_error_zone1_", "rec_rate_low"),
function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_n",1))
assign(paste0("grand_mean_std_error_zone2_", "rec_rate_low"),
function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_n",2))
# assign(paste0("grand_mean_std_error_zone3_", "rec_rate_low"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_n",3))
# assign(paste0("grand_mean_std_error_zone4_", "rec_rate_low"),
# function_grand_mean_std_error_zone(recom_rate1,"rec_rate_low_n",4))
#if I have both high and low then I can join them togther
#But first check what I want to add
# this is a check what comaprison I have
recom_rate1 %>% group_by(rec_rate_high_low_p, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate) %>%
group_by(zone_name) %>%
distinct(rec_rate_high_low_p)%>%
filter(rec_rate_high_low_p != "rec_rate_p") %>%
arrange(rec_rate_high_low_p)
# for n
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate) %>%
group_by(zone_name) %>%
distinct(rec_rate_high_low_n)%>%
filter(rec_rate_high_low_n != "rec_rate_n") %>%
arrange(rec_rate_high_low_n)
# ## higher than P rate comparision
grand_mean_recom_rate_H_se <- rbind(
grand_mean_std_error_zone1_rec_rate_high,
grand_mean_std_error_zone2_rec_rate_high)
# grand_mean_std_error_zone3_rec_rate_high,
# grand_mean_std_error_zone4_rec_rate_high
grand_mean_recom_rate_H_se
#grand_mean_recom_rate_H_se <- grand_mean_std_error_zone2_rec_rate_high
## lower than P rate comparision
grand_mean_recom_rate_L_se <- rbind(
grand_mean_std_error_zone1_rec_rate_low,
grand_mean_std_error_zone2_rec_rate_low)#
#grand_mean_std_error_zone3_rec_rate_low)#,
#grand_mean_std_error_zone4_rec_rate_low)
grand_mean_recom_rate_H_se #
grand_mean_recom_rate_L_se #
grand_mean_recom_rate_H_L_se <- full_join(grand_mean_recom_rate_H_se,
grand_mean_recom_rate_L_se
)
#grand_mean_recom_rate_H_L_se <- grand_mean_recom_rate_H_se
#grand_mean_recom_rate_H_L_se <- grand_mean_recom_rate_L_se
grand_mean_recom_rate_H_L_se
#### !!! select what comaprision are needed
## we have no lower comparision so I need to empty these clm
# this occurs beacsue we always have the rec rate flagged as a yield value
# grand_mean_recom_rate_H_L_se <- grand_mean_recom_rate_H_L_se %>%
# mutate(
# #higher than recom rate comp
# grand_mean_rec_rate_high_p = grand_mean_rec_rate_high_p,
# se_comp_rec_rate_high_p = se_comp_rec_rate_high_p,
# #grand_mean_rec_rate_high_p = NA,
# #se_comp_rec_rate_high_p = NA,
#
# #lower than recom rate comp
# #grand_mean_rec_rate_low_p = NA,
# #se_comp_rec_rate_low_p = NA)
# grand_mean_rec_rate_low_p = grand_mean_rec_rate_low_p,
# se_comp_rec_rate_low_p = se_comp_rec_rate_low_p)
## for N
grand_mean_recom_rate_H_L_se <- grand_mean_recom_rate_H_L_se %>%
mutate(
#higher than recom rate comp
grand_mean_rec_rate_high_n = grand_mean_rec_rate_high_n,
se_comp_rec_rate_high_n = se_comp_rec_rate_high_n,
# grand_mean_rec_rate_high_n = NA,
# se_comp_rec_rate_high_n = NA,
#lower than recom rate comp
#grand_mean_rec_rate_low_n = NA,
#se_comp_rec_rate_low_n = NA)
grand_mean_rec_rate_low_n = grand_mean_rec_rate_low_n,
se_comp_rec_rate_low_n = se_comp_rec_rate_low_n)
grand_mean_recom_rate_H_L_se
#View(grand_mean_recom_rate_H_L_se)
## I need to generate mean yield value for the zone and Rate
## for P If the trial is N this needs to be changed
rec_rate_p_vs_low_High <- recom_rate1 %>%
group_by( Zone_ID, zone_name, rec_rate_high_low_p) %>%
summarise(zone_yld = mean(YldMassDry, na.rm = TRUE))
rec_rate_p_vs_low_High <- ungroup(rec_rate_p_vs_low_High)
rec_rate_p_vs_low_High
rec_rate_p_vs_low_High_wide <- tidyr::pivot_wider(rec_rate_p_vs_low_High,
id_cols = c( Zone_ID),
names_from =rec_rate_high_low_p,
values_from = zone_yld
)
## for N
rec_rate_n_vs_low_High <- recom_rate1 %>%
group_by( Zone_ID, zone_name, rec_rate_high_low_n) %>%
summarise(zone_yld = mean(YldMassDry, na.rm = TRUE))
rec_rate_n_vs_low_High
rec_rate_n_vs_low_High <- ungroup(rec_rate_n_vs_low_High)
rec_rate_n_vs_low_High
rec_rate_n_vs_low_High_wide <- tidyr::pivot_wider(rec_rate_n_vs_low_High,
id_cols = c( Zone_ID),
names_from =rec_rate_high_low_n,
values_from = zone_yld
)
rec_rate_n_vs_low_High_wide
names(rec_rate_n_vs_low_High_wide)
#### !!! select what comaprision are needed
## differences in yld clms
# rec_rate_p_vs_low_High_wide <- rec_rate_p_vs_low_High_wide %>%
# mutate(
# rec_rate_p_vs_lower = rec_rate_p - lower_than_rec_rate_p,
# #rec_rate_p_vs_lower = NA,
# rec_rate_p_vs_higher = rec_rate_p - higher_than_rec_rate_p
# #rec_rate_p_vs_higher = NA
# )
# rec_rate_p_vs_low_High_wide
#For N
rec_rate_n_vs_low_High_wide <- rec_rate_n_vs_low_High_wide %>%
mutate(
rec_rate_n_vs_lower = rec_rate_n - lower_than_rec_rate_n,
#rec_rate_n_vs_lower = NA,
#rec_rate_n_vs_higher = rec_rate_n - higher_than_rec_rate_n
rec_rate_n_vs_higher = NA
)
rec_rate_n_vs_low_High_wide
rec_rate_n_vs_low_High_wide
grand_mean_recom_rate_H_L_se
# rec_rate_p_vs_low_High_wide <- left_join(rec_rate_p_vs_low_High_wide,
# grand_mean_recom_rate_H_L_se)
#for N
rec_rate_n_vs_low_High_wide <- left_join(rec_rate_n_vs_low_High_wide,
grand_mean_recom_rate_H_L_se)
str(rec_rate_n_vs_low_High_wide)
#View(rec_rate_p_vs_low_High_wide)
#####
# rec_rate_p_vs_low_High_summary <- rec_rate_p_vs_low_High_wide %>%
# mutate(
# yld_resposne_rec_v_low = case_when(
# rec_rate_p_vs_lower > 0 + se_comp_rec_rate_low_p ~ "positive",
# rec_rate_p_vs_lower < 0 - se_comp_rec_rate_low_p ~ "negative",
# TRUE ~ "no_response"
# ),
# yld_resposne_rec_v_high = case_when(
# rec_rate_p_vs_higher > 0 + se_comp_rec_rate_high_p ~ "negative",
# rec_rate_p_vs_higher < 0 - se_comp_rec_rate_high_p ~ "positive",
# TRUE ~ "no_response"
# )
# )
## for N
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_wide %>%
mutate(
yld_resposne_rec_v_low = case_when(
rec_rate_n_vs_lower > 0 + se_comp_rec_rate_low_n ~ "positive",
rec_rate_n_vs_lower < 0 - se_comp_rec_rate_low_n ~ "negative",
TRUE ~ "no_response"
),
yld_resposne_rec_v_high = case_when(
rec_rate_n_vs_higher > 0 + se_comp_rec_rate_high_n ~ "negative",
rec_rate_n_vs_higher < 0 - se_comp_rec_rate_high_n ~ "positive",
TRUE ~ "no_response"
)
)
str(rec_rate_n_vs_low_High_summary)
names(rec_rate_n_vs_low_High_summary)
View(rec_rate_n_vs_low_High_summary)
#### !!! select what comaprision are needed if we are missing clm add at the end
###
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
tidyr::pivot_longer(
cols = c("yld_resposne_rec_v_high",
"yld_resposne_rec_v_low"),
names_to = "comparison",
values_to = "yld_response"
) %>%
dplyr::select(
Zone_ID,
comparison,
yld_response,
#higher_than_rec_rate_n ,
lower_than_rec_rate_n,
rec_rate_n,
rec_rate_n_vs_lower,
#rec_rate_n_vs_higher,
se_comp_rec_rate_low_n ,
se_comp_rec_rate_high_n
) %>%
mutate(
comparison = case_when(
comparison == "yld_resposne_rec_v_low" ~ "rec_n_v_lower",
comparison == "yld_resposne_rec_v_high" ~ "rec_n_v_higher"
))
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
mutate(higher_than_rec_rate_n = NA,
rec_rate_n_vs_higher = NA)
rec_rate_n_vs_low_High_summary
View(rec_rate_n_vs_low_High_summary)
### Extra t test #######################################################################################################################
#Prep the data making a sub selection of df for each zone and run the paired t test
#####################################################################################
function_paired_ttest_rec_rate_low_high <- function(recom_rate1, zone_x, comp){
#select the zone data and the high vs low rates
zone_x_rec_r_n_vs_x <- recom_rate1 %>%
filter(zone_name == paste0("zone", zone_x)) %>%
filter(rec_rate_high_low_n == "rec_rate_n" | rec_rate_high_low_n == paste0(comp,"_than_rec_rate_n"))
#average the yld per segment and rate
zone_x_rec_r_n_vs_x_av <- group_by(zone_x_rec_r_n_vs_x,
SegmentID, Rate, Zone,Zone_ID,
rate_name, zone_name , rec_rate_high_low_n) %>%
summarise_all(mean, na.rm= TRUE)
str(zone_x_rec_r_n_vs_x_av)
#ensure that the dataset is duplictaed
list_SegmentID_values_rec_rate_l <- zone_x_rec_r_n_vs_x_av$SegmentID[duplicated(zone_x_rec_r_n_vs_x_av$SegmentID)] #this returns a list of values I want to keep
list_SegmentID_values_rec_rate_l
zone_x_rec_r_n_vs_x_av <- zone_x_rec_r_n_vs_x_av %>% filter(SegmentID %in% list_SegmentID_values_rec_rate_l)
str(zone_x_rec_r_n_vs_x_av)
zone_x_rec_rate_n_vs_x_res <- t.test(YldMassDry ~ rec_rate_high_low_n,
data = zone_x_rec_r_n_vs_x_av, paired = TRUE)
#####test results
# Report values from the t.test
zone_x_rec_rate_n_vs_x_res_sig <-
data.frame(P_value = as.double(zone_x_rec_rate_n_vs_x_res$p.value),
Mean_diff = (zone_x_rec_rate_n_vs_x_res$estimate)) %>%
mutate(
comparison = paste0("rec_n_v_", comp),
zone = paste0("zone", zone_x),
rounded = abs(round(Mean_diff, 2)),
Significant = case_when(P_value < 0.05 ~ "significant",
TRUE ~ "not significant"))
zone_x_rec_rate_n_vs_x_res_sig
return(data.frame(zone_x_rec_rate_n_vs_x_res_sig))
}
#function(recom_rate1, zone_x, comp)
assign(paste0("rec_rate_n_vs_lower_", "zone_", "1"),function_paired_ttest_rec_rate_low_high(recom_rate1, 1, "lower"))
assign(paste0("rec_rate_n_vs_lower_","zone_", "2"),function_paired_ttest_rec_rate_low_high(recom_rate1, 2, "lower"))
#assign(paste0("rec_rate_n_vs_lower_","zone_", "3"),function_paired_ttest_rec_rate_low_high(recom_rate1, 3, "lower"))
#assign(paste0("rec_rate_n_vs_lower_","zone_", "4"),function_paired_ttest_rec_rate_low_high(recom_rate1, 4, "lower"))
assign(paste0("rec_rate_n_vs_higher_", "zone_", "1"),function_paired_ttest_rec_rate_low_high(recom_rate1, 1, "higher"))
assign(paste0("rec_rate_n_vs_higher_","zone_", "2"),function_paired_ttest_rec_rate_low_high(recom_rate1, 2, "higher"))
# assign(paste0("rec_rate_n_vs_higher_","zone_", "3"),function_paired_ttest_rec_rate_low_high(recom_rate1, 3, "higher"))
#assign(paste0("rec_rate_n_vs_higher_","zone_", "4"),function_paired_ttest_rec_rate_low_high(recom_rate1, 4, "higher"))
#what ran?
rec_rate_n_vs_lower_zone_1 # y
rec_rate_n_vs_lower_zone_2 # y
rec_rate_n_vs_lower_zone_3 #nope
#rec_rate_n_vs_lower_zone_4 # nope
rec_rate_n_vs_higher_zone_1 # n
rec_rate_n_vs_higher_zone_2 # ynes
rec_rate_n_vs_higher_zone_3 # nope
#rec_rate_n_vs_higher_zone_4
# this is a check what comaprison I have what was I expecting to run?
recom_rate1 %>% group_by(rec_rate_high_low_n, Rate, Zone_ID, zone_name) %>%
summarise(count= n()) %>%
arrange(Zone_ID, Rate) %>%
group_by(zone_name) %>%
distinct(rec_rate_high_low_n)%>%
filter(rec_rate_high_low_n != "rec_rate_n") %>%
arrange(rec_rate_high_low_n)
### !!! user input required
rec_rate_n_low_vs_high_all <- rbind(rec_rate_n_vs_lower_zone_1,
rec_rate_n_vs_lower_zone_2)#,
#rec_rate_n_vs_lower_zone_3)
#rec_rate_n_vs_lower_zone_4,
#rec_rate_n_vs_higher_zone_1,
#rec_rate_n_vs_higher_zone_2)
#rec_rate_n_vs_higher_zone_3,
#rec_rate_n_vs_higher_zone_4)
#rec_rate_p_low_vs_high_all <- rec_rate_p_vs_lower_zone_1
rec_rate_n_low_vs_high_all
## turn rec rate_vs_low_High_rate_summary to narrow format
## need t0 add in the zone name
zoneID_zone_names <- recom_rate1 %>% distinct(Zone_ID, .keep_all = TRUE) %>%
dplyr::select(Zone_ID,zone_name )
zoneID_zone_names
rec_rate_n_low_vs_high_all <- left_join(rec_rate_n_low_vs_high_all,zoneID_zone_names,
by = c("zone" = "zone_name"))
rec_rate_n_vs_low_High_summary <- full_join(rec_rate_n_vs_low_High_summary,
rec_rate_n_low_vs_high_all, by = c("Zone_ID", "comparison"))
names(rec_rate_n_vs_low_High_summary)
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
dplyr::select(
Zone_ID,
zone,
comparison,
yld_response,
higher_than_rec_rate_n,
lower_than_rec_rate_n,
rec_rate_n,
rec_rate_n_vs_higher,
rec_rate_n_vs_lower ,
se_comp_rec_rate_high_n,
se_comp_rec_rate_low_n,
Significant,
P_value,
rounded
)
## add in a few clms that help later
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
mutate(paddock_ID = unique(strips$Paddock_ID),
Strip_Type = unique(strips$Strip_Type),
input_file = input_file)
rec_rate_n_vs_low_High_summary
assigned_names2
rec_rate_n_vs_low_High_summary <- cbind(rec_rate_n_vs_low_High_summary,assigned_names2)
rec_rate_n_vs_low_High_summary
str(rec_rate_n_vs_low_High_summary)
## not all comparison are valid - I need to drop some
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
filter(!is.na(zone))
#what is the recommed rate?
names(recom_rate1)
label_rec_rates <- recom_rate1 %>% group_by(rec_rate_high_low_n,
Rate, Strip_Rate, Zone_ID) %>%
summarise(count= n())
label_rec_rates
label_rec_rates <- ungroup(label_rec_rates) %>%
dplyr::select( rec_rate_high_low_n, Strip_Rate, Zone_ID)
label_rec_rates
label_rec_rates <- tidyr::pivot_wider(
label_rec_rates,
names_from = rec_rate_high_low_n,
values_from = Strip_Rate
)
label_rec_rates <- data.frame(label_rec_rates)
names(label_rec_rates)
## !! make sure this runs
label_rec_rates <-label_rec_rates %>% rename(
#higher_than_rec_rate_n_label = higher_than_rec_rate_n,
lower_than_rec_rate_n_label = lower_than_rec_rate_n,
rec_rate_n_label = rec_rate_n)
str(label_rec_rates)
str(rec_rate_n_vs_low_High_summary)
rec_rate_n_vs_low_High_summary <- full_join(rec_rate_n_vs_low_High_summary, label_rec_rates, by = "Zone_ID")
names(rec_rate_n_vs_low_High_summary)
#remove duplication
rec_rate_n_vs_low_High_summary <- dplyr::distinct(rec_rate_n_vs_low_High_summary,
Zone_ID, comparison, .keep_all = TRUE)
### check that what I am outputting is sensible Yld repsonse should reflect the comparsion made.
rec_rate_n_vs_low_High_summary <- rec_rate_n_vs_low_High_summary %>%
mutate(
yld_response = case_when(
comparison == "rec_n_v_higher" &
higher_than_rec_rate_n != "NA" ~ yld_response,
comparison == "rec_n_v_lower" &
lower_than_rec_rate_n != "NA" ~ yld_response,
TRUE ~ "NA"
))
View(rec_rate_n_vs_low_High_summary)
#save the output
name_rec_rate_low_high <- paste0("W:/value_soil_testing_prj/Yield_data/2020/processing/r_outputs/rec_rate_comparision_N/rec_rate_comp_",
dplyr::distinct(all_results_1,paddock_ID_Type), ".csv")
name_rec_rate_low_high
write.csv(rec_rate_n_vs_low_High_summary, name_rec_rate_low_high)
|
corr <- function(directory, threshold = 0)
{
valid_counts <- complete(directory, 1:length(list.files(directory)))
valid_counts <- valid_counts[valid_counts$nobs > threshold, "id"]
res <- numeric(length(valid_counts))
for(i in seq_along(valid_counts))
{
fi <- sprintf("%03d.csv", valid_counts[i])
data <- na.omit(read.csv(file.path(directory, fi)))
res[i] <- cor(data[,"sulfate"], data[,"nitrate"])
}
res
} | /r_programming/wk2/assignment1/corr.R | no_license | ddexter/datasciencecoursera | R | false | false | 431 | r | corr <- function(directory, threshold = 0)
{
valid_counts <- complete(directory, 1:length(list.files(directory)))
valid_counts <- valid_counts[valid_counts$nobs > threshold, "id"]
res <- numeric(length(valid_counts))
for(i in seq_along(valid_counts))
{
fi <- sprintf("%03d.csv", valid_counts[i])
data <- na.omit(read.csv(file.path(directory, fi)))
res[i] <- cor(data[,"sulfate"], data[,"nitrate"])
}
res
} |
# server.R
library(shiny)
library(tidyverse)
library(ggplot2)
# read in global data
data <- read_csv("https://raw.githubusercontent.com/andrewlilley/tool_COVID-19/master/output_data/country_level.csv?token=ANJMHSDWF7URVBXFWA5I47K6SAWOG")
if(any(colnames(data)=="X1")){
data <- data %>%
select(-X1)
}
data <- data %>%
rename(country = Region, total_cases = Transmissions, total_deaths = Deaths)
data <- data[data$country!="UK",]
data$date = as.Date(data$date, "%Y-%m-%d")
UK.data <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/UK_total.csv")
UK.data$date = as.Date(UK.data$date, "%d/%m/%Y")
UK.data$country <- "United Kingdom"
data <- rbind(data, UK.data)
data$country[data$country=="US"]<-"United States"
data$country[data$country=="UAE"]<-"United Arab Emirates"
data <- data[order(data$country),]
# calculate new daily cases, deaths, recoveries
data$new_cases <- c()
data$new_deaths <- c()
uni.country <- unique(data$country)
out.cases <- c()
out.deaths <- c()
for (i in 1:length(uni.country)){
x <- data[data$country==uni.country[i],]
out.cases <- c(out.cases,0,diff(x$total_cases))
out.deaths <- c(out.deaths,0,diff(x$total_deaths))
}
data$new_cases <- out.cases
data$new_deaths <- out.deaths
data <- gather(data, key="type", value="number",3:ncol(data))
UK.data <- data[data$country=="United Kingdom",]
# UK breakdown data
UK_by_country_cases <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/UK_countries_cases.csv")
UK_by_country_pop <- data.frame(country=c("England","Scotland","Wales","Northern Ireland"), pop=c(55980000,5438000,3139000,1882000))
UK_by_country_cases$pop <- UK_by_country_pop$pop
UK_by_country_cases <- gather(UK_by_country_cases, key="date", value="total_cases",2:(ncol(UK_by_country_cases)-1))
UK_by_country_deaths <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/UK_countries_deaths.csv")
UK_by_country_deaths <- UK_by_country_deaths[,c(1,3:6)]
UK_by_country_deaths <- gather(UK_by_country_deaths, key="country", value="total_deaths",2:(ncol(UK_by_country_deaths)))
UK_by_country_deaths <- UK_by_country_deaths %>%
rename(date = Date)
UK_by_country <- left_join(UK_by_country_cases,UK_by_country_deaths,by=c("date","country"))
UK_by_country <- UK_by_country[order(UK_by_country$country),]
out.cases <- c()
out.deaths <- c()
for (i in 1:nrow(UK_by_country_pop)){
x <- UK_by_country[UK_by_country$country==UK_by_country_pop$country[order(UK_by_country_pop$country)][i],]
out.cases <- c(out.cases,0,diff(x$total_cases))
out.deaths <- c(out.deaths,NA,diff(x$total_deaths))
}
UK_by_country$new_cases <- out.cases
UK_by_country$new_deaths <- out.deaths
UK_by_country <- gather(UK_by_country, key="type", value="number", 4:7)
UK_by_country$date <- as.Date(UK_by_country$date, "%d/%m/%Y")
# read in country population data
country.pop.data <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/Other/country_pop.csv")
data <- left_join(data,country.pop.data, by="country")
data$number_pop <- 100000*data$number/data$pop
# list of countries with >=100 cases
data.100 <- data[data$type=="total_cases",]
data.100 <- data.100[data.100$number>=100,]
uni.country.100 <- c(unique(data.100$country))
data.100.out <- NULL
date.100 <- c(as.Date("2020-01-01","%Y-%m-%d"))
for (i in 1:length(uni.country.100)){
x <- data.100[data.100$country==uni.country.100[i],]
out <- as.Date(x$date[which(x$number>=100)],"%Y-%m-%d")
out_pop <- as.Date(x$date[which(x$number_pop>=1)],"%Y-%m-%d")
out <- min(out)
x$date_rel <- x$date - out
if(length(out_pop)==0){
x$date_rel_pop <- c(rep(NA, length(x$date)))
} else{
out_pop <- min(out_pop)
x$date_rel_pop <- x$date - out_pop
}
x <- x[x$date_rel>=0,]
data.100.out <- rbind(data.100.out, x)
}
data.100 <- data.100.out
# relative dates - deaths
data.deaths10 <- data[data$type=="total_deaths",]
data.deaths10 <- data.deaths10[data.deaths10$number>=5,]
uni.country.deaths10 <- c(unique(data.deaths10$country))
data.deaths10.out <- NULL
date.deaths10 <- c(as.Date("2020-01-01","%Y-%m-%d"))
for (i in 1:length(uni.country.deaths10)){
x <- data.deaths10[data.deaths10$country==uni.country.deaths10[i],]
out <- as.Date(x$date[which(x$number>=10)],"%Y-%m-%d")
out_pop <- as.Date(x$date[which(x$number_pop>=0.5)],"%Y-%m-%d")
out <- min(out)
x$date_rel <- x$date - out
if(length(out_pop)==0){
x$date_rel_pop <- c(rep(NA, length(x$date)))
} else{
out_pop <- min(out_pop)
x$date_rel_pop <- x$date - out_pop
}
x <- x[x$date_rel>=0,]
data.deaths10.out <- rbind(data.deaths10.out, x)
}
data.deaths10 <- data.deaths10.out
data.deaths10$date_rel <- as.numeric(data.deaths10$date_rel)
data.deaths10$date_rel_pop <- as.numeric(data.deaths10$date_rel_pop)
# UK county data
# read in UK county data
data.county <- "https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/england_countyUA.csv"
data.county <- read_csv(data.county)
data.county <- gather(data.county, key="date", value="total_cases",3:ncol(data.county))
data.county$date = as.Date(data.county$date, "%d/%m/%Y")
data.county <- data.county[order(data.county$county_UA),]
# calculate new daily cases
data.county$new_case <- c()
uni.county <- unique(data.county$county_UA)
out <- c()
for (i in 1:length(uni.county)){
x <- data.county[data.county$county_UA==uni.county[i],]
out <- c(out,0,diff(x$total_cases))
}
data.county$new_cases <- out
#data.county$new_cases[data.county$new_cases<0]<- 0
data.county <- gather(data.county,key="type",value="number",4:ncol(data.county))
# get list of counties
data.county$county_UA <- as.character(data.county$county_UA)
county_LA.list <- c(unique(data.county$county_UA))
list.county <- list()
for (i in 1:length(county_LA.list)){
list.county[i] <- county_LA.list[i]
}
names(list.county) <- county_LA.list
# read in England region data
data.region <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/england_region.csv")
data.region <- gather(data.region, key="date", value="cases",3:ncol(data.region))
data.region$date = as.Date(data.region$date, "%d/%m/%Y")
data.region <- data.region %>%
rename(region = NHSRNm, total_cases =cases)
data.region <- data.region[order(data.region$region),]
# get list of regions
data.region$region <- as.character(data.region$region)
region.list <- c(unique(data.region$region))
list.region <- list()
for (i in 1:length(region.list)){
list.region[i] <- region.list[i]
}
names(list.region) <- region.list
# calculate new daily cases
data.region$new_case <- c()
uni.region <- unique(data.region$region)
out <- c()
for (i in 1:length(uni.region)){
x <- data.region[data.region$region==uni.region[i],]
out <- c(out,0,diff(x$total_cases))
}
data.region$new_cases <- out
# get per 100,000 population results
region.pop.data <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/NHS_england_regions_pop.csv")
data.region.pop <- left_join(data.region,region.pop.data, by="region")
data.region.pop <- data.region.pop %>%
mutate(total_cases = 100000*total_cases/pop, new_cases=100000*new_cases/pop)
data.region <- gather(data.region,key="type",value="number",4:ncol(data.region))
data.region.pop <- gather(data.region.pop,key="type",value="number",4:(ncol(data.region.pop)-1))
# Testing data
data.test <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/UK_testing.csv")
data.test <- data.test[,1:4]
data.test <- data.test %>%
select(date, total_tested = tested, total_cases=cases, new_cases)
data.test$date = as.Date(data.test$date, "%d/%m/%Y")
data.test$new_tested <- c(NA,diff(data.test$total_tested))
data.test$total_prop_pos <- 100*data.test$total_cases/data.test$total_tested
data.test$new_prop_pos <- 100*data.test$new_cases/data.test$new_tested
data.test <- gather(data.test, key="type", value="number",2:ncol(data.test))
# Define server logic required to plot various variables against mpg
shinyServer(function(input, output, session) {
# Change date range for by country UK graphs
observe({
val <- input$checkGroup_UK
if(length(val)<3 & input$tabs_UK==2){
x <- sum("new_deaths" %in% val, "total_deaths" %in% val)
if(x==length(val)) {
updateDateRangeInput(session, "dateRange_UK",
start = as.Date("27/03/2020", "%d/%m/%Y"),
end = max(UK.data$date),
min = as.Date("27/03/2020", "%d/%m/%Y"),
max = max(UK.data$date))
} else{
updateDateRangeInput(session, "dateRange_UK",
start = as.Date("09/03/2020", "%d/%m/%Y"),
end = max(UK.data$date),
min = as.Date("09/03/2020", "%d/%m/%Y"),
max = max(UK.data$date))
}
}
})
# Compute the forumla text in a reactive expression since it is
# shared by the output$caption and output$mpgPlot expressions
formulaText <- reactive({
paste(input$country)
})
formulaText_county <- reactive({
paste(input$county)
})
output$startdate <- renderText({
paste("Date range: ",as.character(input$dateRange[1])," to ",as.character(input$dateRange[2]),sep="")
})
# Return the formula text for printing as a caption
output$caption <- renderText({
formulaText()
})
output$caption_county <- renderText({
formulaText_county()
})
red <- data.county[data.county$date == max(data.county$date) & data.county$type == "new_cases",]
red <- red[order(red$number,decreasing=TRUE),]
red2 <- data.county[data.county$date == max(data.county$date) & data.county$type == "total_cases",]
red2 <- red2[order(red2$number,decreasing=TRUE),]
output$county_newcase_update <- renderText({
paste("Top 5 highest new daily cases: ", as.character(red$county_UA[1])," (", red$number[1],"), ",
as.character(red$county_UA[2])," (", red$number[2],"), ",
as.character(red$county_UA[3])," (", red$number[3],"), ",
as.character(red$county_UA[4])," (", red$number[4],"), ",
as.character(red$county_UA[5])," (", red$number[5],"), ", sep="")
})
output$county_totalcase_update <- renderText({
paste("Top 5 highest total cases: ", as.character(red2$county_UA[1])," (", red2$number[1],"), ",
as.character(red2$county_UA[2])," (", red2$number[2],"), ",
as.character(red2$county_UA[3])," (", red2$number[3],"), ",
as.character(red2$county_UA[4])," (", red2$number[4],"), ",
as.character(red2$county_UA[5])," (", red2$number[5],"), ", sep="")
})
url <- a("Twitter", href="https://twitter.com/maxeyre3")
output$twitter <- renderUI({
tagList(url)
})
output$twitter2 <- renderUI({
tagList(url)
})
output$twitter_comp <- renderUI({
tagList(url)
})
output$twitter3 <- renderUI({
tagList(url)
})
output$twitter4 <- renderUI({
tagList(url)
})
output$twitter_UK <- renderUI({
tagList(url)
})
url_data <- a("JHU CSSE Data sources", href="https://github.com/CSSEGISandData/COVID-19")
url_data_andrew <- a("Thanks to Andrew Lilley for scraping international data", href="https://twitter.com/alil9145")
url_data2 <- a("Data source", href="https://www.gov.uk/guidance/coronavirus-covid-19-information-for-the-public")
output$data_source <- renderUI({
tagList(url_data)
})
output$data_source_comp <- renderUI({
tagList(url_data)
})
output$data_source_andrew <- renderUI({
tagList(url_data_andrew)
})
output$data_source_andrew_comp <- renderUI({
tagList(url_data_andrew)
})
output$data_source2 <- renderUI({
tagList(url_data2)
})
output$data_source_UK <- renderUI({
tagList(url_data2)
})
output$data_source3 <- renderUI({
tagList(url_data2)
})
output$data_source4 <- renderUI({
tagList(url_data2)
})
output$checkGroup <- renderText({
paste(as.character(length(input$checkGroup)))
})
output$checkGroup_county <- renderText({
paste(as.character(c(input$checkGroup_county)))
})
output$checkGroup_region <- renderText({
paste(as.character(c(input$checkGroup_region)))
})
output$dateRange.100 <- renderPrint({ input$dateRange.100 })
output$counter <- renderText({
library(rdrop2)
token <- readRDS("token.rds")
counter <- drop_read_csv("counter.csv",dtoken = token)
counter$count <- counter$count + 1
counter <- counter%>%
select(count)
write.csv(counter, file = "counter.csv")
drop_upload("counter.csv",dtoken = token)
paste0(counter$count," site visits", sep="")
})
# Single country plots
output$countryPlot <- renderPlot({
lines <- c(as.character(input$checkGroup))
data<- data[data$type %in% lines, ]
if(input$pop_country=="pop_yes"){
p <- ggplot(data[data$country==paste(formulaText(),sep=""),]) + geom_point(aes(x=date, y=number_pop, col=type),size=1.5) +
geom_line(aes(x=date, y=number_pop, col=type),size=1) +
scale_x_date(limits=c(input$dateRange[1],input$dateRange[2])) + xlab(label = "") +ylab(label="Number (per 100,000)") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c", "total_deaths"="#ff7f00",
"new_deaths"="#a65628"),
breaks=c("new_cases","total_cases","new_deaths","total_deaths"),
labels=c("Cases (daily)", "Cases (total)", "Deaths (daily)","Deaths (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}
else{
p <- ggplot(data[data$country==paste(formulaText(),sep=""),]) + geom_point(aes(x=date, y=number, col=type),size=1.5) +
geom_line(aes(x=date, y=number, col=type),size=1) +
scale_x_date(limits=c(input$dateRange[1],input$dateRange[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c", "total_deaths"="#ff7f00",
"new_deaths"="#a65628"),
breaks=c("new_cases","total_cases","new_deaths","total_deaths"),
labels=c("Cases (daily)", "Cases (total)", "Deaths (daily)","Deaths (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}
p
})
# country comparisons
output$countryPlot_compare <- renderPlot({
lines2 <- c(as.character(input$checkGroup_countryCompare))
if(input$compare_by=="cases"){
data.100<- data.100[data.100$country %in% lines2, ]
lab_y <- "Cases"
}else{
data.100<- data.deaths10[data.deaths10$country %in% lines2, ]
lab_y <- "Deaths"
}
if(input$compare_pop=="pop_no"){
y_min <- min(data.100$number[data.100$date_rel==0], na.rm=TRUE)
y_max <- max(data.100$number, na.rm=TRUE)
p2 <- ggplot(data.100) + geom_point(aes(x=date_rel, y=number, col=country),size=1.5) +
geom_line(aes(x=date_rel, y=number, col=country),size=1) +
scale_x_continuous(limits=c(input$dateRange.100[1],input$dateRange.100[2])) + scale_y_continuous(limits=c(y_min,y_max), labels= scales::comma) +
xlab(label = "Days") +
ylab(label=paste(lab_y)) +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-0.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log_compare=='log_yes'){
p2 <- p2 + scale_y_log10(limits=c(y_min,y_max), labels = scales::comma)
}
} else{
y_min <- min(data.100$number_pop[data.100$date_rel_pop==0], na.rm=TRUE)
y_max <- max(data.100$number_pop, na.rm=TRUE)
p2 <- ggplot(data.100) + geom_point(aes(x=date_rel_pop, y=number_pop, col=country),size=1.5) +
geom_line(aes(x=date_rel_pop, y=number_pop, col=country),size=1) +
scale_x_continuous(limits=c(input$dateRange.100[1],input$dateRange.100[2])) + scale_y_continuous(limits=c(y_min,y_max)) + xlab(label = "Days") +
ylab(label=paste(lab_y," (per 100,000)",sep="")) +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-0.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log_compare=='log_yes'){
p2 <- p2 + scale_y_log10(limits=c(y_min,y_max),labels = scales::comma)
}
}
p2
})
# UK plot
output$UKPlot <- renderPlot({
lines <- c(as.character(input$checkGroup_UK))
UK.data<- UK.data[UK.data$type %in% lines, ]
if (input$pop_UK=="pop_yes"){
p <- ggplot(UK.data) + geom_point(aes(x=date, y=100000*number/66440000, col=type),size=1.5) +
geom_line(aes(x=date, y=100000*number/66440000, col=type),size=1) +
scale_x_date(limits=c(input$dateRange_UK[1],input$dateRange_UK[2])) + xlab(label = "") +ylab(label="Number (per 100,000)") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c", "total_deaths"="#ff7f00",
"new_deaths"="#a65628"),
breaks=c("new_cases","total_cases","new_deaths","total_deaths"),
labels=c("Cases (daily)", "Cases (total)", "Deaths (daily)","Deaths (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log_UK=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
} else{
p <- ggplot(UK.data) + geom_point(aes(x=date, y=number, col=type),size=1.5) +
geom_line(aes(x=date, y=number, col=type),size=1) +
scale_x_date(limits=c(input$dateRange_UK[1],input$dateRange_UK[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c", "total_deaths"="#ff7f00",
"new_deaths"="#a65628"),
breaks=c("new_cases","total_cases","new_deaths","total_deaths"),
labels=c("Cases (daily)", "Cases (total)", "Deaths (daily)","Deaths (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log_UK=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}
p
})
# UK plot
output$UKPlot_by_country <- renderPlot({
lines <- c(as.character(input$checkGroup_UK))
UK_by_country<- UK_by_country[UK_by_country$type %in% lines, ]
val <- input$checkGroup_UK
if(length(val)<3 & input$tabs_UK==2){
x <- sum("new_deaths" %in% val, "total_deaths" %in% val)
if(x==length(val)) {
date.min <- as.Date("27/03/2020", "%d/%m/%Y")
} else {
date.min <- as.Date("09/03/2020", "%d/%m/%Y")
}
} else {
date.min <- as.Date("09/03/2020", "%d/%m/%Y")
}
if (input$pop_UK=="pop_yes"){
p <- ggplot(UK_by_country) + geom_point(aes(x=date, y=100000*number/pop, col=country),size=1.5) +
geom_line(aes(x=date, y=100000*number/pop, col=country, linetype=type),size=1) +
scale_x_date(limits=c(date.min,input$dateRange_UK[2])) + xlab(label = "") +ylab(label="Number (per 100,000)") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) + scale_linetype_manual(name="", values=c("total_cases"=1, "new_cases" = 2, "total_deaths" =3, "new_deaths"=4),
breaks=c("total_cases","new_cases","total_deaths","new_deaths"),
labels=c("Cases (total)","Cases (daily)","Deaths (total)","Deaths (daily)")) +
guides(linetype = guide_legend(label.position = "top", keywidth = 2)) +
theme(legend.direction = "horizontal",legend.box = "vertical")
if(input$log_UK=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}else{
p <- ggplot(UK_by_country) + geom_point(aes(x=date, y=number, col=country),size=1.5) +
geom_line(aes(x=date, y=number, col=country, linetype=type),size=1) +
scale_x_date(limits=c(date.min,input$dateRange_UK[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) + scale_linetype_manual(name="", values=c("total_cases"=1, "new_cases" = 2, "total_deaths" =3, "new_deaths"=4),
breaks=c("total_cases","new_cases","total_deaths","new_deaths"),
labels=c("Cases (total)","Cases (daily)","Deaths (total)","Deaths (daily)")) +
guides(linetype = guide_legend(label.position = "top", keywidth = 2)) +
theme(legend.direction = "horizontal",legend.box = "vertical")
if(input$log_UK=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}
p
})
# England NHS regions plots
output$EnglandRegionPlot <- renderPlot({
lines <- c(as.character(input$checkGroup_region))
if (input$pop=="pop_yes"){
data.region <- data.region.pop
}
data.region<- data.region[data.region$type %in% lines, ]
p.pop <- ggplot(data.region) + geom_point(aes(x=date, y=number, col=region),size=1.5) +
geom_line(aes(x=date, y=number, col=region, linetype=type),size=1) +
scale_x_date(limits=c(input$dateRange_region[1],input$dateRange_region[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) + scale_linetype_manual(name="", values=c("total_cases"=1, "new_cases" = 2),
breaks=c("total_cases","new_cases"),
labels=c("Cases (total)","Cases (daily)")) +
guides(linetype = guide_legend(label.position = "top", keywidth = 2)) +
theme(legend.direction = "horizontal",legend.box = "vertical")
if (input$pop=="pop_yes"){
p.pop <- p.pop + ylab(label="Number (per 100,000)")
}
if(input$log_region=='log_yes'){
p.pop <- p.pop + scale_y_log10(labels = scales::comma)
}
p.pop
})
# England county plots
output$englandcountyPlot <- renderPlot({
lines <- c(as.character(input$checkGroup_county))
data.county <- data.county[data.county$type %in% lines, ]
ggplot(data.county[data.county$county_UA==paste(formulaText_county(),sep=""),]) + geom_point(aes(x=date, y=number, col=type),size=1.5) +
geom_line(aes(x=date, y=number, col=type),size=1) +
scale_x_date(limits=c(input$dateRange_county[1],input$dateRange_county[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c"),
breaks=c("new_cases","total_cases"),
labels=c("Cases (daily)", "Cases (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
})
# UK testing plot
output$UKtestingPlot <- renderPlot({
lines <- c(as.character(input$checkGroup_test))
data.test <- data.test[data.test$type %in% lines, ]
p.test <- ggplot(data.test) + geom_point(aes(x=date, y=number, col=type),size=1.5)+
geom_line(aes(x=date, y=number, col=type, group=type),size=1) +
scale_x_date(limits=c(input$dateRange_test[1],input$dateRange_test[2])) + xlab(label = "") +ylab(label="Number tested") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_tested" = "#000000", "new_tested" = "#e41a1c"),
breaks=c("new_tested","total_tested"),
labels=c("Daily", "Total"))
if(input$log_test=='log_yes'){
p.test <- p.test + scale_y_log10(labels = scales::comma)
}
p.test
})
output$UKtestingPlot2 <- renderPlot({
lines <- c(as.character(input$checkGroup_test2))
data.test <- data.test[data.test$type %in% lines, ]
p.test <- ggplot(data.test) + geom_point(aes(x=date, y=number, col=type),size=1.5)+
geom_line(aes(x=date, y=number, col=type, group=type),size=1) +
scale_x_date(limits=c(input$dateRange_test2[1],input$dateRange_test2[2])) + xlab(label = "") +ylab(label="Prop. positive (%)") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_prop_pos" = "#000000", "new_prop_pos" = "#e41a1c"),
breaks=c("new_prop_pos","total_prop_pos"),
labels=c("Daily", "Total"))
p.test
})
})
| /Data visualisation/shinyapp_archive/shinyapp_9apr/server.R | no_license | maxeyre/COVID-19 | R | false | false | 30,297 | r | # server.R
library(shiny)
library(tidyverse)
library(ggplot2)
# read in global data
data <- read_csv("https://raw.githubusercontent.com/andrewlilley/tool_COVID-19/master/output_data/country_level.csv?token=ANJMHSDWF7URVBXFWA5I47K6SAWOG")
if(any(colnames(data)=="X1")){
data <- data %>%
select(-X1)
}
data <- data %>%
rename(country = Region, total_cases = Transmissions, total_deaths = Deaths)
data <- data[data$country!="UK",]
data$date = as.Date(data$date, "%Y-%m-%d")
UK.data <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/UK_total.csv")
UK.data$date = as.Date(UK.data$date, "%d/%m/%Y")
UK.data$country <- "United Kingdom"
data <- rbind(data, UK.data)
data$country[data$country=="US"]<-"United States"
data$country[data$country=="UAE"]<-"United Arab Emirates"
data <- data[order(data$country),]
# calculate new daily cases, deaths, recoveries
data$new_cases <- c()
data$new_deaths <- c()
uni.country <- unique(data$country)
out.cases <- c()
out.deaths <- c()
for (i in 1:length(uni.country)){
x <- data[data$country==uni.country[i],]
out.cases <- c(out.cases,0,diff(x$total_cases))
out.deaths <- c(out.deaths,0,diff(x$total_deaths))
}
data$new_cases <- out.cases
data$new_deaths <- out.deaths
data <- gather(data, key="type", value="number",3:ncol(data))
UK.data <- data[data$country=="United Kingdom",]
# UK breakdown data
UK_by_country_cases <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/UK_countries_cases.csv")
UK_by_country_pop <- data.frame(country=c("England","Scotland","Wales","Northern Ireland"), pop=c(55980000,5438000,3139000,1882000))
UK_by_country_cases$pop <- UK_by_country_pop$pop
UK_by_country_cases <- gather(UK_by_country_cases, key="date", value="total_cases",2:(ncol(UK_by_country_cases)-1))
UK_by_country_deaths <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/UK_countries_deaths.csv")
UK_by_country_deaths <- UK_by_country_deaths[,c(1,3:6)]
UK_by_country_deaths <- gather(UK_by_country_deaths, key="country", value="total_deaths",2:(ncol(UK_by_country_deaths)))
UK_by_country_deaths <- UK_by_country_deaths %>%
rename(date = Date)
UK_by_country <- left_join(UK_by_country_cases,UK_by_country_deaths,by=c("date","country"))
UK_by_country <- UK_by_country[order(UK_by_country$country),]
out.cases <- c()
out.deaths <- c()
for (i in 1:nrow(UK_by_country_pop)){
x <- UK_by_country[UK_by_country$country==UK_by_country_pop$country[order(UK_by_country_pop$country)][i],]
out.cases <- c(out.cases,0,diff(x$total_cases))
out.deaths <- c(out.deaths,NA,diff(x$total_deaths))
}
UK_by_country$new_cases <- out.cases
UK_by_country$new_deaths <- out.deaths
UK_by_country <- gather(UK_by_country, key="type", value="number", 4:7)
UK_by_country$date <- as.Date(UK_by_country$date, "%d/%m/%Y")
# read in country population data
country.pop.data <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/Other/country_pop.csv")
data <- left_join(data,country.pop.data, by="country")
data$number_pop <- 100000*data$number/data$pop
# list of countries with >=100 cases
data.100 <- data[data$type=="total_cases",]
data.100 <- data.100[data.100$number>=100,]
uni.country.100 <- c(unique(data.100$country))
data.100.out <- NULL
date.100 <- c(as.Date("2020-01-01","%Y-%m-%d"))
for (i in 1:length(uni.country.100)){
x <- data.100[data.100$country==uni.country.100[i],]
out <- as.Date(x$date[which(x$number>=100)],"%Y-%m-%d")
out_pop <- as.Date(x$date[which(x$number_pop>=1)],"%Y-%m-%d")
out <- min(out)
x$date_rel <- x$date - out
if(length(out_pop)==0){
x$date_rel_pop <- c(rep(NA, length(x$date)))
} else{
out_pop <- min(out_pop)
x$date_rel_pop <- x$date - out_pop
}
x <- x[x$date_rel>=0,]
data.100.out <- rbind(data.100.out, x)
}
data.100 <- data.100.out
# relative dates - deaths
data.deaths10 <- data[data$type=="total_deaths",]
data.deaths10 <- data.deaths10[data.deaths10$number>=5,]
uni.country.deaths10 <- c(unique(data.deaths10$country))
data.deaths10.out <- NULL
date.deaths10 <- c(as.Date("2020-01-01","%Y-%m-%d"))
for (i in 1:length(uni.country.deaths10)){
x <- data.deaths10[data.deaths10$country==uni.country.deaths10[i],]
out <- as.Date(x$date[which(x$number>=10)],"%Y-%m-%d")
out_pop <- as.Date(x$date[which(x$number_pop>=0.5)],"%Y-%m-%d")
out <- min(out)
x$date_rel <- x$date - out
if(length(out_pop)==0){
x$date_rel_pop <- c(rep(NA, length(x$date)))
} else{
out_pop <- min(out_pop)
x$date_rel_pop <- x$date - out_pop
}
x <- x[x$date_rel>=0,]
data.deaths10.out <- rbind(data.deaths10.out, x)
}
data.deaths10 <- data.deaths10.out
data.deaths10$date_rel <- as.numeric(data.deaths10$date_rel)
data.deaths10$date_rel_pop <- as.numeric(data.deaths10$date_rel_pop)
# UK county data
# read in UK county data
data.county <- "https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/england_countyUA.csv"
data.county <- read_csv(data.county)
data.county <- gather(data.county, key="date", value="total_cases",3:ncol(data.county))
data.county$date = as.Date(data.county$date, "%d/%m/%Y")
data.county <- data.county[order(data.county$county_UA),]
# calculate new daily cases
data.county$new_case <- c()
uni.county <- unique(data.county$county_UA)
out <- c()
for (i in 1:length(uni.county)){
x <- data.county[data.county$county_UA==uni.county[i],]
out <- c(out,0,diff(x$total_cases))
}
data.county$new_cases <- out
#data.county$new_cases[data.county$new_cases<0]<- 0
data.county <- gather(data.county,key="type",value="number",4:ncol(data.county))
# get list of counties
data.county$county_UA <- as.character(data.county$county_UA)
county_LA.list <- c(unique(data.county$county_UA))
list.county <- list()
for (i in 1:length(county_LA.list)){
list.county[i] <- county_LA.list[i]
}
names(list.county) <- county_LA.list
# read in England region data
data.region <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/england_region.csv")
data.region <- gather(data.region, key="date", value="cases",3:ncol(data.region))
data.region$date = as.Date(data.region$date, "%d/%m/%Y")
data.region <- data.region %>%
rename(region = NHSRNm, total_cases =cases)
data.region <- data.region[order(data.region$region),]
# get list of regions
data.region$region <- as.character(data.region$region)
region.list <- c(unique(data.region$region))
list.region <- list()
for (i in 1:length(region.list)){
list.region[i] <- region.list[i]
}
names(list.region) <- region.list
# calculate new daily cases
data.region$new_case <- c()
uni.region <- unique(data.region$region)
out <- c()
for (i in 1:length(uni.region)){
x <- data.region[data.region$region==uni.region[i],]
out <- c(out,0,diff(x$total_cases))
}
data.region$new_cases <- out
# get per 100,000 population results
region.pop.data <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/NHS_england_regions_pop.csv")
data.region.pop <- left_join(data.region,region.pop.data, by="region")
data.region.pop <- data.region.pop %>%
mutate(total_cases = 100000*total_cases/pop, new_cases=100000*new_cases/pop)
data.region <- gather(data.region,key="type",value="number",4:ncol(data.region))
data.region.pop <- gather(data.region.pop,key="type",value="number",4:(ncol(data.region.pop)-1))
# Testing data
data.test <- read_csv("https://raw.githubusercontent.com/maxeyre/COVID-19/master/Data%20visualisation/UK%20data/UK_testing.csv")
data.test <- data.test[,1:4]
data.test <- data.test %>%
select(date, total_tested = tested, total_cases=cases, new_cases)
data.test$date = as.Date(data.test$date, "%d/%m/%Y")
data.test$new_tested <- c(NA,diff(data.test$total_tested))
data.test$total_prop_pos <- 100*data.test$total_cases/data.test$total_tested
data.test$new_prop_pos <- 100*data.test$new_cases/data.test$new_tested
data.test <- gather(data.test, key="type", value="number",2:ncol(data.test))
# Define server logic required to plot various variables against mpg
shinyServer(function(input, output, session) {
# Change date range for by country UK graphs
observe({
val <- input$checkGroup_UK
if(length(val)<3 & input$tabs_UK==2){
x <- sum("new_deaths" %in% val, "total_deaths" %in% val)
if(x==length(val)) {
updateDateRangeInput(session, "dateRange_UK",
start = as.Date("27/03/2020", "%d/%m/%Y"),
end = max(UK.data$date),
min = as.Date("27/03/2020", "%d/%m/%Y"),
max = max(UK.data$date))
} else{
updateDateRangeInput(session, "dateRange_UK",
start = as.Date("09/03/2020", "%d/%m/%Y"),
end = max(UK.data$date),
min = as.Date("09/03/2020", "%d/%m/%Y"),
max = max(UK.data$date))
}
}
})
# Compute the forumla text in a reactive expression since it is
# shared by the output$caption and output$mpgPlot expressions
formulaText <- reactive({
paste(input$country)
})
formulaText_county <- reactive({
paste(input$county)
})
output$startdate <- renderText({
paste("Date range: ",as.character(input$dateRange[1])," to ",as.character(input$dateRange[2]),sep="")
})
# Return the formula text for printing as a caption
output$caption <- renderText({
formulaText()
})
output$caption_county <- renderText({
formulaText_county()
})
red <- data.county[data.county$date == max(data.county$date) & data.county$type == "new_cases",]
red <- red[order(red$number,decreasing=TRUE),]
red2 <- data.county[data.county$date == max(data.county$date) & data.county$type == "total_cases",]
red2 <- red2[order(red2$number,decreasing=TRUE),]
output$county_newcase_update <- renderText({
paste("Top 5 highest new daily cases: ", as.character(red$county_UA[1])," (", red$number[1],"), ",
as.character(red$county_UA[2])," (", red$number[2],"), ",
as.character(red$county_UA[3])," (", red$number[3],"), ",
as.character(red$county_UA[4])," (", red$number[4],"), ",
as.character(red$county_UA[5])," (", red$number[5],"), ", sep="")
})
output$county_totalcase_update <- renderText({
paste("Top 5 highest total cases: ", as.character(red2$county_UA[1])," (", red2$number[1],"), ",
as.character(red2$county_UA[2])," (", red2$number[2],"), ",
as.character(red2$county_UA[3])," (", red2$number[3],"), ",
as.character(red2$county_UA[4])," (", red2$number[4],"), ",
as.character(red2$county_UA[5])," (", red2$number[5],"), ", sep="")
})
url <- a("Twitter", href="https://twitter.com/maxeyre3")
output$twitter <- renderUI({
tagList(url)
})
output$twitter2 <- renderUI({
tagList(url)
})
output$twitter_comp <- renderUI({
tagList(url)
})
output$twitter3 <- renderUI({
tagList(url)
})
output$twitter4 <- renderUI({
tagList(url)
})
output$twitter_UK <- renderUI({
tagList(url)
})
url_data <- a("JHU CSSE Data sources", href="https://github.com/CSSEGISandData/COVID-19")
url_data_andrew <- a("Thanks to Andrew Lilley for scraping international data", href="https://twitter.com/alil9145")
url_data2 <- a("Data source", href="https://www.gov.uk/guidance/coronavirus-covid-19-information-for-the-public")
output$data_source <- renderUI({
tagList(url_data)
})
output$data_source_comp <- renderUI({
tagList(url_data)
})
output$data_source_andrew <- renderUI({
tagList(url_data_andrew)
})
output$data_source_andrew_comp <- renderUI({
tagList(url_data_andrew)
})
output$data_source2 <- renderUI({
tagList(url_data2)
})
output$data_source_UK <- renderUI({
tagList(url_data2)
})
output$data_source3 <- renderUI({
tagList(url_data2)
})
output$data_source4 <- renderUI({
tagList(url_data2)
})
output$checkGroup <- renderText({
paste(as.character(length(input$checkGroup)))
})
output$checkGroup_county <- renderText({
paste(as.character(c(input$checkGroup_county)))
})
output$checkGroup_region <- renderText({
paste(as.character(c(input$checkGroup_region)))
})
output$dateRange.100 <- renderPrint({ input$dateRange.100 })
output$counter <- renderText({
library(rdrop2)
token <- readRDS("token.rds")
counter <- drop_read_csv("counter.csv",dtoken = token)
counter$count <- counter$count + 1
counter <- counter%>%
select(count)
write.csv(counter, file = "counter.csv")
drop_upload("counter.csv",dtoken = token)
paste0(counter$count," site visits", sep="")
})
# Single country plots
output$countryPlot <- renderPlot({
lines <- c(as.character(input$checkGroup))
data<- data[data$type %in% lines, ]
if(input$pop_country=="pop_yes"){
p <- ggplot(data[data$country==paste(formulaText(),sep=""),]) + geom_point(aes(x=date, y=number_pop, col=type),size=1.5) +
geom_line(aes(x=date, y=number_pop, col=type),size=1) +
scale_x_date(limits=c(input$dateRange[1],input$dateRange[2])) + xlab(label = "") +ylab(label="Number (per 100,000)") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c", "total_deaths"="#ff7f00",
"new_deaths"="#a65628"),
breaks=c("new_cases","total_cases","new_deaths","total_deaths"),
labels=c("Cases (daily)", "Cases (total)", "Deaths (daily)","Deaths (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}
else{
p <- ggplot(data[data$country==paste(formulaText(),sep=""),]) + geom_point(aes(x=date, y=number, col=type),size=1.5) +
geom_line(aes(x=date, y=number, col=type),size=1) +
scale_x_date(limits=c(input$dateRange[1],input$dateRange[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c", "total_deaths"="#ff7f00",
"new_deaths"="#a65628"),
breaks=c("new_cases","total_cases","new_deaths","total_deaths"),
labels=c("Cases (daily)", "Cases (total)", "Deaths (daily)","Deaths (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}
p
})
# country comparisons
output$countryPlot_compare <- renderPlot({
lines2 <- c(as.character(input$checkGroup_countryCompare))
if(input$compare_by=="cases"){
data.100<- data.100[data.100$country %in% lines2, ]
lab_y <- "Cases"
}else{
data.100<- data.deaths10[data.deaths10$country %in% lines2, ]
lab_y <- "Deaths"
}
if(input$compare_pop=="pop_no"){
y_min <- min(data.100$number[data.100$date_rel==0], na.rm=TRUE)
y_max <- max(data.100$number, na.rm=TRUE)
p2 <- ggplot(data.100) + geom_point(aes(x=date_rel, y=number, col=country),size=1.5) +
geom_line(aes(x=date_rel, y=number, col=country),size=1) +
scale_x_continuous(limits=c(input$dateRange.100[1],input$dateRange.100[2])) + scale_y_continuous(limits=c(y_min,y_max), labels= scales::comma) +
xlab(label = "Days") +
ylab(label=paste(lab_y)) +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-0.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log_compare=='log_yes'){
p2 <- p2 + scale_y_log10(limits=c(y_min,y_max), labels = scales::comma)
}
} else{
y_min <- min(data.100$number_pop[data.100$date_rel_pop==0], na.rm=TRUE)
y_max <- max(data.100$number_pop, na.rm=TRUE)
p2 <- ggplot(data.100) + geom_point(aes(x=date_rel_pop, y=number_pop, col=country),size=1.5) +
geom_line(aes(x=date_rel_pop, y=number_pop, col=country),size=1) +
scale_x_continuous(limits=c(input$dateRange.100[1],input$dateRange.100[2])) + scale_y_continuous(limits=c(y_min,y_max)) + xlab(label = "Days") +
ylab(label=paste(lab_y," (per 100,000)",sep="")) +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-0.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log_compare=='log_yes'){
p2 <- p2 + scale_y_log10(limits=c(y_min,y_max),labels = scales::comma)
}
}
p2
})
# UK plot
output$UKPlot <- renderPlot({
lines <- c(as.character(input$checkGroup_UK))
UK.data<- UK.data[UK.data$type %in% lines, ]
if (input$pop_UK=="pop_yes"){
p <- ggplot(UK.data) + geom_point(aes(x=date, y=100000*number/66440000, col=type),size=1.5) +
geom_line(aes(x=date, y=100000*number/66440000, col=type),size=1) +
scale_x_date(limits=c(input$dateRange_UK[1],input$dateRange_UK[2])) + xlab(label = "") +ylab(label="Number (per 100,000)") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c", "total_deaths"="#ff7f00",
"new_deaths"="#a65628"),
breaks=c("new_cases","total_cases","new_deaths","total_deaths"),
labels=c("Cases (daily)", "Cases (total)", "Deaths (daily)","Deaths (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log_UK=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
} else{
p <- ggplot(UK.data) + geom_point(aes(x=date, y=number, col=type),size=1.5) +
geom_line(aes(x=date, y=number, col=type),size=1) +
scale_x_date(limits=c(input$dateRange_UK[1],input$dateRange_UK[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c", "total_deaths"="#ff7f00",
"new_deaths"="#a65628"),
breaks=c("new_cases","total_cases","new_deaths","total_deaths"),
labels=c("Cases (daily)", "Cases (total)", "Deaths (daily)","Deaths (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
if(input$log_UK=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}
p
})
# UK plot
output$UKPlot_by_country <- renderPlot({
lines <- c(as.character(input$checkGroup_UK))
UK_by_country<- UK_by_country[UK_by_country$type %in% lines, ]
val <- input$checkGroup_UK
if(length(val)<3 & input$tabs_UK==2){
x <- sum("new_deaths" %in% val, "total_deaths" %in% val)
if(x==length(val)) {
date.min <- as.Date("27/03/2020", "%d/%m/%Y")
} else {
date.min <- as.Date("09/03/2020", "%d/%m/%Y")
}
} else {
date.min <- as.Date("09/03/2020", "%d/%m/%Y")
}
if (input$pop_UK=="pop_yes"){
p <- ggplot(UK_by_country) + geom_point(aes(x=date, y=100000*number/pop, col=country),size=1.5) +
geom_line(aes(x=date, y=100000*number/pop, col=country, linetype=type),size=1) +
scale_x_date(limits=c(date.min,input$dateRange_UK[2])) + xlab(label = "") +ylab(label="Number (per 100,000)") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) + scale_linetype_manual(name="", values=c("total_cases"=1, "new_cases" = 2, "total_deaths" =3, "new_deaths"=4),
breaks=c("total_cases","new_cases","total_deaths","new_deaths"),
labels=c("Cases (total)","Cases (daily)","Deaths (total)","Deaths (daily)")) +
guides(linetype = guide_legend(label.position = "top", keywidth = 2)) +
theme(legend.direction = "horizontal",legend.box = "vertical")
if(input$log_UK=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}else{
p <- ggplot(UK_by_country) + geom_point(aes(x=date, y=number, col=country),size=1.5) +
geom_line(aes(x=date, y=number, col=country, linetype=type),size=1) +
scale_x_date(limits=c(date.min,input$dateRange_UK[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) + scale_linetype_manual(name="", values=c("total_cases"=1, "new_cases" = 2, "total_deaths" =3, "new_deaths"=4),
breaks=c("total_cases","new_cases","total_deaths","new_deaths"),
labels=c("Cases (total)","Cases (daily)","Deaths (total)","Deaths (daily)")) +
guides(linetype = guide_legend(label.position = "top", keywidth = 2)) +
theme(legend.direction = "horizontal",legend.box = "vertical")
if(input$log_UK=='log_yes'){
p <- p + scale_y_log10(labels = scales::comma)
}
}
p
})
# England NHS regions plots
output$EnglandRegionPlot <- renderPlot({
lines <- c(as.character(input$checkGroup_region))
if (input$pop=="pop_yes"){
data.region <- data.region.pop
}
data.region<- data.region[data.region$type %in% lines, ]
p.pop <- ggplot(data.region) + geom_point(aes(x=date, y=number, col=region),size=1.5) +
geom_line(aes(x=date, y=number, col=region, linetype=type),size=1) +
scale_x_date(limits=c(input$dateRange_region[1],input$dateRange_region[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.title = element_blank(),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) + scale_linetype_manual(name="", values=c("total_cases"=1, "new_cases" = 2),
breaks=c("total_cases","new_cases"),
labels=c("Cases (total)","Cases (daily)")) +
guides(linetype = guide_legend(label.position = "top", keywidth = 2)) +
theme(legend.direction = "horizontal",legend.box = "vertical")
if (input$pop=="pop_yes"){
p.pop <- p.pop + ylab(label="Number (per 100,000)")
}
if(input$log_region=='log_yes'){
p.pop <- p.pop + scale_y_log10(labels = scales::comma)
}
p.pop
})
# England county plots
output$englandcountyPlot <- renderPlot({
lines <- c(as.character(input$checkGroup_county))
data.county <- data.county[data.county$type %in% lines, ]
ggplot(data.county[data.county$county_UA==paste(formulaText_county(),sep=""),]) + geom_point(aes(x=date, y=number, col=type),size=1.5) +
geom_line(aes(x=date, y=number, col=type),size=1) +
scale_x_date(limits=c(input$dateRange_county[1],input$dateRange_county[2])) + xlab(label = "") +ylab(label="Number") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_cases" = "#000000", "new_cases" = "#e41a1c"),
breaks=c("new_cases","total_cases"),
labels=c("Cases (daily)", "Cases (total)")) +
guides(linetype = guide_legend(override.aes = list(size = 20)))
})
# UK testing plot
output$UKtestingPlot <- renderPlot({
lines <- c(as.character(input$checkGroup_test))
data.test <- data.test[data.test$type %in% lines, ]
p.test <- ggplot(data.test) + geom_point(aes(x=date, y=number, col=type),size=1.5)+
geom_line(aes(x=date, y=number, col=type, group=type),size=1) +
scale_x_date(limits=c(input$dateRange_test[1],input$dateRange_test[2])) + xlab(label = "") +ylab(label="Number tested") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_tested" = "#000000", "new_tested" = "#e41a1c"),
breaks=c("new_tested","total_tested"),
labels=c("Daily", "Total"))
if(input$log_test=='log_yes'){
p.test <- p.test + scale_y_log10(labels = scales::comma)
}
p.test
})
output$UKtestingPlot2 <- renderPlot({
lines <- c(as.character(input$checkGroup_test2))
data.test <- data.test[data.test$type %in% lines, ]
p.test <- ggplot(data.test) + geom_point(aes(x=date, y=number, col=type),size=1.5)+
geom_line(aes(x=date, y=number, col=type, group=type),size=1) +
scale_x_date(limits=c(input$dateRange_test2[1],input$dateRange_test2[2])) + xlab(label = "") +ylab(label="Prop. positive (%)") +
theme_classic()+
theme(axis.text=element_text(size=13),
axis.title=element_text(size=16),
axis.title.x = element_text(vjust=-1.5),
axis.title.y = element_text(vjust=2),
legend.text = element_text(size=13),
legend.position = 'top',
legend.spacing.x = unit(0.4, 'cm'),
panel.grid.major.y=element_line(size=0.05)) +
scale_colour_manual(name="",values = c("total_prop_pos" = "#000000", "new_prop_pos" = "#e41a1c"),
breaks=c("new_prop_pos","total_prop_pos"),
labels=c("Daily", "Total"))
p.test
})
})
|
#' Estimate variance components in single trait GWAS
#'
#' Helper function for estimating variance components in single trait GWAS.
#'
#' @noRd
#' @keywords internal
estVarComp <- function(GLSMethod,
remlAlgo,
trait,
pheno,
covar,
K,
chrs,
nonMiss,
nonMissRepId) {
## Estimate variance components.
if (GLSMethod == "single") {
if (isTRUE(all.equal(K, diag(nrow = nrow(K)), check.names = FALSE))) {
## Kinship matrix is computationally identical to identity matrix.
vcovMatrix <- diag(nrow = nrow(pheno))
}
} else if (GLSMethod == "multi") {
varComp <- vcovMatrix <-
setNames(vector(mode = "list", length = length(chrs)), paste("chr", chrs))
}
if (remlAlgo == "EMMA") {
EMMADat <- pheno[, c("genotype", trait)]
EMMACovar <- as.data.frame(pheno[covar], row.names = pheno[["genotype"]])
if (GLSMethod == "single") {
remlObj <- EMMA(dat = EMMADat, trait = trait, covar = EMMACovar, K = K)
## Extract varComp and vcovMatrix
varComp <- remlObj$varComp
vcovMatrix <- remlObj$vcovMatrix
} else if (GLSMethod == "multi") {
for (chr in chrs) {
## Compute variance components using chromosome specific kinship.
remlObj <- EMMA(dat = EMMADat, trait = trait, covar = EMMACovar,
K = K[[which(chrs == chr)]])
## Compute varcov matrix using var components.
varComp[[which(chrs == chr)]] <- remlObj$varComp
vcovMatrix[[which(chrs == chr)]] <- remlObj$vcovMatrix
}
}
} else if (remlAlgo == "NR") {
if (!is.null(covar)) {
## Construct the formula for the fixed part of the model.
## Define formula for fixed part. ` needed to accommodate -
## in variable names.
fixed <- as.formula(paste0(trait," ~ `",
paste0(covar, collapse = "` + `"), "`"))
} else {
fixed <- as.formula(paste(trait, " ~ 1"))
}
if (GLSMethod == "single") {
vcNR <- estVarCompNR(dat = pheno, fixed = fixed, K = K, nonMiss = nonMiss,
nonMissRepId = nonMissRepId)
varComp <- vcNR$varComp
vcovMatrix <- vcNR$vcovMatrix
} else if (GLSMethod == "multi") {
for (chr in chrs) {
vcNR <- estVarCompNR(dat = pheno, fixed = fixed,
K = K[[which(chrs == chr)]],
nonMiss = nonMiss, nonMissRepId = nonMissRepId)
varComp[[which(chrs == chr)]] <- vcNR$varComp
vcovMatrix[[which(chrs == chr)]] <- vcNR$vcovMatrix
} # End loop over chromosomes.
} # End GLSMethod multi.
} # End remlAlgo NR.
return(list(varComp = varComp, vcovMatrix = vcovMatrix))
}
#' Helper function for estimating variance components using NR method.
#'
#' @noRd
#' @keywords internal
estVarCompNR <- function(dat,
fixed,
K,
nonMiss,
nonMissRepId) {
K <- K[nonMiss, nonMiss]
## Fit model.
modFit <- sommer::mmer(fixed = fixed, data = dat,
random = ~ sommer::vs(genotype, Gu = K),
verbose = FALSE, date.warning = FALSE)
## Compute varcov matrix using var components from model.
vcMod <- modFit$sigma
varComp <- setNames(unlist(vcMod)[c(1, length(unlist(vcMod)))],
c("Vg", "Ve"))
modK <- K[nonMissRepId, nonMissRepId]
vcovMatrix <- unlist(vcMod)[1] * modK +
diag(x = unlist(vcMod)[length(unlist(vcMod))], nrow = nrow(modK))
## Assure that vcovMatrix is positive definite.
if (any(eigen(vcovMatrix, symmetric = TRUE,
only.values = TRUE)$values <= 1e-8)) {
nearestPD(vcovMatrix)
}
return(list(varComp = varComp, vcovMatrix = vcovMatrix))
}
#' Select markers to be excluded from GWAS scan.
#'
#' Helper function for selecting markers to be excluded from GWAS scan.
#' Markers are excluded if they are identical to any of the snpCovariates
#' (including the snpCovariates themselves).
#'
#' @param snpCov A character vector of snpCovariates.
#' @param markers A matrix with marker information.
#' @param allFreq A numerical vector of allele frequencies of the markers in
#' \code{markers}. This could be computed from markers as well but it is
#' needed in the general algorithm so to not redo things unnecessarily it is
#' not redone here.
#'
#' @return A numerical vector of markers to be exluded from the GWAS scan.
#'
#' @noRd
#' @keywords internal
exclMarkers <- function(snpCov,
markers,
allFreq,
ref = NULL) {
exclude <- integer()
if (any(snpCov %in% colnames(markers))) {
snpCovNumbers <- which(colnames(markers) %in% snpCov)
for (snp in snpCovNumbers) {
## Rough selection based on allele frequency. Done for speed.
candidates <- which(allFreq == allFreq[snp])
## Exclude all snps that are identical to snps in snpCovariates.
snpInfo <- as.numeric(markers[, snp])
exclude <- union(exclude,
candidates[apply(X = markers[, candidates,
drop = FALSE],
MARGIN = 2, FUN = function(x) {
identical(as.numeric(x), snpInfo)
})])
}
}
return(exclude)
}
#' Correction of p-values based on genomic inflation
#'
#' Correction of p-values based on the genomic inflation factor, as in Devlin
#' and Roeder (1999). It is assumed that the p-values come from an F-test with
#' df1 = 1 and df2 = nObs - nCov - 2.
#'
#' @param pVals A numeric vector of p-values between 0 and 1; may contain NA's.
#' @param nObs An integer > 0 indicating the number of individuals.
#' @param nCov An integer > 0 indicating the number of covariables.
#'
#' @return A list with two components:
#' \itemize{
#' \item{\code{pValues} a vector of p-values corrected by the genomic inflation
#' factor, with the same NA's as the input}.
#' \item{\code{inflation} the inflation factor}.
#' }
#'
#' @references Devlin, B. and Roeder K. (1999) Genomic control for association
#' studies. Biometrics, December 1999, Vol. 55(4), p. 997-1004.
#'
#' @noRd
#' @keywords internal
genCtrlPVals <- function(pVals,
nObs,
nCov = 0) {
## Compute degree of freedom.
df2 <- nObs - nCov - 2
pValsNew <- pVals
## Compute F-values from input p-values.
fVals <- qf(p = na.omit(pVals), df1 = 1, df2 = df2, lower.tail = FALSE)
## Compute inflation factor as in Devlin and Roeder.
inflation <- median(fVals, na.rm = TRUE) /
qf(p = 0.5, df1 = 1, df2 = df2, lower.tail = FALSE)
## Compute new F-values and p-values.
fValsNew <- fVals / inflation
pValsNew[!is.na(pVals)] <- pf(q = fValsNew, df1 = 1, df2 = df2,
lower.tail = FALSE)
return(list(pValues = pValsNew, inflation = inflation))
}
#' @noRd
#' @keywords internal
extrSignSnps <- function(GWAResult,
LODThr,
sizeInclRegion,
minR2,
map,
markers,
maxScore,
pheno,
trait) {
signSnpNr <- which(!is.na(GWAResult[["LOD"]]) & GWAResult[["LOD"]] >= LODThr)
if (length(signSnpNr) > 0) {
if (sizeInclRegion > 0) {
snpSelection <- unlist(sapply(X = signSnpNr, FUN = getSNPsInRegionSufLD,
map = map, markers = markers,
sizeInclRegion = sizeInclRegion,
minR2 = minR2))
snpSelection <- sort(union(snpSelection, signSnpNr))
snpStatus <- rep(paste("within", sizeInclRegion, "of a significant SNP"),
length(snpSelection))
snpStatus[snpSelection %in% signSnpNr] <- "significant SNP"
} else {
snpSelection <- signSnpNr
snpStatus <- rep("significant SNP", length(signSnpNr))
}
## Compute variance of marker scores, based on genotypes for which
## phenotypic data is available. For inbreeders, this depends on
## maxScore. It is therefore scaled to marker scores 0, 1 (or 0, 0.5,
## 1 if there are heterozygotes).
snpVar <- 4 * GWAResult[snpSelection, "effect"] ^ 2 / maxScore ^ 2 *
apply(X = markers[, GWAResult[snpSelection][["snp"]], drop = FALSE],
MARGIN = 2, FUN = var)
propSnpVar <- snpVar[["effect"]] / as.numeric(var(pheno[trait]))
## Create data.table with significant snps.
signSnp <- data.table::data.table(GWAResult[snpSelection, ],
snpStatus = as.factor(snpStatus),
propSnpVar = propSnpVar)
## Sort columns.
data.table::setkeyv(x = signSnp, cols = c("trait", "chr", "pos"))
} else {
## No significant SNPs. Return empty data.table.
signSnp <- data.table::data.table()
}
return(signSnp)
}
#' @noRd
#' @keywords internal
extrSignSnpsFDR <- function(GWAResult,
markers,
maxScore,
pheno,
trait,
rho = 0.3,
pThr = 0.05,
alpha = 0.05) {
## Get named vector of p Values.
pVals <- setNames(GWAResult$pValue, GWAResult$snp)
## Subset p Values base on threshold.
B <- pVals[pVals < pThr]
## Subset markers based on selected p Values.
BMarkers <- markers[, colnames(markers) %in% names(B), drop = FALSE]
## Compute selection threshold.
selThr <- alpha / length(pVals)
## Initialize values.
BpVals <- numeric()
snpSelection <- character()
continue <- TRUE
nClust <- 0
while (length(B) > 0 && continue) {
## Next cluster is represented by remaining SNP with lowest p Value.
clusterRep <- which.min(B)
## Only continue if next p Value satisfies criterion.
## After the first failure all following clusters are irrelevant.
if (B[clusterRep] < (nClust + 1) * selThr) {
## Add p Value for representing SNP to output.
BpVals <- c(BpVals, B[clusterRep])
## Find all remaining SNPs within LD of at least rho of representing SNP.
LD <- cor(BMarkers[, names(clusterRep)], BMarkers)
LDSet <- names(LD[, LD > rho])
## Remove selected SNPs from B and from markers.
B <- B[!names(B) %in% LDSet]
BMarkers <- BMarkers[, !colnames(BMarkers) %in% LDSet, drop = FALSE]
## Add LD set to selected SNPs.
## Using union assures representing SNP will be the first in the list.
snpSelection <- c(snpSelection, union(names(snpSelection), LDSet))
nClust <- nClust + 1
} else {
continue <- FALSE
}
}
if (nClust > 0) {
## Create a vector of SNP statuses, differentiating between representing
## SNPs and everything else.
snpStatus <- ifelse(snpSelection %in% names(BpVals), "significant SNP",
"within LD of significant SNP")
## Compute variance of marker scores, based on genotypes for which
## phenotypic data is available. For inbreeders, this depends on
## maxScore. It is therefore scaled to marker scores 0, 1 (or 0, 0.5,
## 1 if there are heterozygotes).
snpVar <- 4 * GWAResult[snpSelection, "effect"] ^ 2 / maxScore ^ 2 *
apply(X = markers[, snpSelection, drop = FALSE], MARGIN = 2, FUN = var)
propSnpVar <- snpVar[["effect"]] / as.numeric(var(pheno[trait]))
## Create data.table with significant snps.
signSnp <- data.table::data.table(GWAResult[snpSelection, ],
snpStatus = as.factor(snpStatus),
propSnpVar = propSnpVar)
## Sort columns.
data.table::setkeyv(x = signSnp, cols = c("trait", "chr", "pos"))
} else {
## No significant SNPs. Return empty data.table.
signSnp <- data.table::data.table()
}
return(signSnp)
}
#' get the SNPs close to a given SNP with sufficient LD
#'
#' \code{getSNPsInRegionSufLD} extracts the SNPs from a map file that are
#' within a given distance of a reference SNP (on either side). Only those SNPs
#' that are in sufficient linkage disequilibrium (LD) with the reference SNP
#' are returned.
#'
#' @param gData An object of class gData with at least the map and markers
#' included.
#' @param snp An integer indicating the index of the reference SNP within
#' the map.
#' @param sizeInclRegion A numerical value indicating the size of the region on
#' the chromosome in which to look for SNPs.
#' @param minR2 A numerical value between 0 and 1 indicating the minimum
#' LD (in terms of R^2) that the SNPs should have with the reference SNP.
#'
#' @return An integer vector with indices of the SNPs that are within the
#' given \code{sizeInclRegion} and have a minimum LD with the reference SNP.
#'
#' @noRd
#' @keywords internal
getSNPsInRegionSufLD <- function(snp,
map,
markers,
sizeInclRegion = 5000,
minR2 = 0.5) {
## Get candidate SNPs based on position.
crit1 <- abs(map[snp, "pos"] - map[["pos"]]) <= sizeInclRegion
crit2 <- map[["chr"]] == map[snp, "chr"]
candidateSnps <- setdiff(which(crit1 & crit2), snp)
## Compute R2 for candidate SNPs.
if (length(candidateSnps) > 0) {
R2 <- suppressWarnings(cor(markers[, snp, drop = FALSE],
markers[, candidateSnps, drop = FALSE]) ^ 2)
## Select SNPs based on R2.
candidateSnpsNames <- colnames(R2[, R2 > minR2, drop = FALSE])
return(which(rownames(map) %in% candidateSnpsNames))
} else {
return(integer())
}
}
| /R/GWASHelp.R | no_license | ntduc11/statgenGWAS | R | false | false | 14,027 | r | #' Estimate variance components in single trait GWAS
#'
#' Helper function for estimating variance components in single trait GWAS.
#'
#' @noRd
#' @keywords internal
estVarComp <- function(GLSMethod,
remlAlgo,
trait,
pheno,
covar,
K,
chrs,
nonMiss,
nonMissRepId) {
## Estimate variance components.
if (GLSMethod == "single") {
if (isTRUE(all.equal(K, diag(nrow = nrow(K)), check.names = FALSE))) {
## Kinship matrix is computationally identical to identity matrix.
vcovMatrix <- diag(nrow = nrow(pheno))
}
} else if (GLSMethod == "multi") {
varComp <- vcovMatrix <-
setNames(vector(mode = "list", length = length(chrs)), paste("chr", chrs))
}
if (remlAlgo == "EMMA") {
EMMADat <- pheno[, c("genotype", trait)]
EMMACovar <- as.data.frame(pheno[covar], row.names = pheno[["genotype"]])
if (GLSMethod == "single") {
remlObj <- EMMA(dat = EMMADat, trait = trait, covar = EMMACovar, K = K)
## Extract varComp and vcovMatrix
varComp <- remlObj$varComp
vcovMatrix <- remlObj$vcovMatrix
} else if (GLSMethod == "multi") {
for (chr in chrs) {
## Compute variance components using chromosome specific kinship.
remlObj <- EMMA(dat = EMMADat, trait = trait, covar = EMMACovar,
K = K[[which(chrs == chr)]])
## Compute varcov matrix using var components.
varComp[[which(chrs == chr)]] <- remlObj$varComp
vcovMatrix[[which(chrs == chr)]] <- remlObj$vcovMatrix
}
}
} else if (remlAlgo == "NR") {
if (!is.null(covar)) {
## Construct the formula for the fixed part of the model.
## Define formula for fixed part. ` needed to accommodate -
## in variable names.
fixed <- as.formula(paste0(trait," ~ `",
paste0(covar, collapse = "` + `"), "`"))
} else {
fixed <- as.formula(paste(trait, " ~ 1"))
}
if (GLSMethod == "single") {
vcNR <- estVarCompNR(dat = pheno, fixed = fixed, K = K, nonMiss = nonMiss,
nonMissRepId = nonMissRepId)
varComp <- vcNR$varComp
vcovMatrix <- vcNR$vcovMatrix
} else if (GLSMethod == "multi") {
for (chr in chrs) {
vcNR <- estVarCompNR(dat = pheno, fixed = fixed,
K = K[[which(chrs == chr)]],
nonMiss = nonMiss, nonMissRepId = nonMissRepId)
varComp[[which(chrs == chr)]] <- vcNR$varComp
vcovMatrix[[which(chrs == chr)]] <- vcNR$vcovMatrix
} # End loop over chromosomes.
} # End GLSMethod multi.
} # End remlAlgo NR.
return(list(varComp = varComp, vcovMatrix = vcovMatrix))
}
#' Helper function for estimating variance components using NR method.
#'
#' @noRd
#' @keywords internal
estVarCompNR <- function(dat,
fixed,
K,
nonMiss,
nonMissRepId) {
K <- K[nonMiss, nonMiss]
## Fit model.
modFit <- sommer::mmer(fixed = fixed, data = dat,
random = ~ sommer::vs(genotype, Gu = K),
verbose = FALSE, date.warning = FALSE)
## Compute varcov matrix using var components from model.
vcMod <- modFit$sigma
varComp <- setNames(unlist(vcMod)[c(1, length(unlist(vcMod)))],
c("Vg", "Ve"))
modK <- K[nonMissRepId, nonMissRepId]
vcovMatrix <- unlist(vcMod)[1] * modK +
diag(x = unlist(vcMod)[length(unlist(vcMod))], nrow = nrow(modK))
## Assure that vcovMatrix is positive definite.
if (any(eigen(vcovMatrix, symmetric = TRUE,
only.values = TRUE)$values <= 1e-8)) {
nearestPD(vcovMatrix)
}
return(list(varComp = varComp, vcovMatrix = vcovMatrix))
}
#' Select markers to be excluded from GWAS scan.
#'
#' Helper function for selecting markers to be excluded from GWAS scan.
#' Markers are excluded if they are identical to any of the snpCovariates
#' (including the snpCovariates themselves).
#'
#' @param snpCov A character vector of snpCovariates.
#' @param markers A matrix with marker information.
#' @param allFreq A numerical vector of allele frequencies of the markers in
#' \code{markers}. This could be computed from markers as well but it is
#' needed in the general algorithm so to not redo things unnecessarily it is
#' not redone here.
#'
#' @return A numerical vector of markers to be exluded from the GWAS scan.
#'
#' @noRd
#' @keywords internal
exclMarkers <- function(snpCov,
markers,
allFreq,
ref = NULL) {
exclude <- integer()
if (any(snpCov %in% colnames(markers))) {
snpCovNumbers <- which(colnames(markers) %in% snpCov)
for (snp in snpCovNumbers) {
## Rough selection based on allele frequency. Done for speed.
candidates <- which(allFreq == allFreq[snp])
## Exclude all snps that are identical to snps in snpCovariates.
snpInfo <- as.numeric(markers[, snp])
exclude <- union(exclude,
candidates[apply(X = markers[, candidates,
drop = FALSE],
MARGIN = 2, FUN = function(x) {
identical(as.numeric(x), snpInfo)
})])
}
}
return(exclude)
}
#' Correction of p-values based on genomic inflation
#'
#' Correction of p-values based on the genomic inflation factor, as in Devlin
#' and Roeder (1999). It is assumed that the p-values come from an F-test with
#' df1 = 1 and df2 = nObs - nCov - 2.
#'
#' @param pVals A numeric vector of p-values between 0 and 1; may contain NA's.
#' @param nObs An integer > 0 indicating the number of individuals.
#' @param nCov An integer > 0 indicating the number of covariables.
#'
#' @return A list with two components:
#' \itemize{
#' \item{\code{pValues} a vector of p-values corrected by the genomic inflation
#' factor, with the same NA's as the input}.
#' \item{\code{inflation} the inflation factor}.
#' }
#'
#' @references Devlin, B. and Roeder K. (1999) Genomic control for association
#' studies. Biometrics, December 1999, Vol. 55(4), p. 997-1004.
#'
#' @noRd
#' @keywords internal
genCtrlPVals <- function(pVals,
nObs,
nCov = 0) {
## Compute degree of freedom.
df2 <- nObs - nCov - 2
pValsNew <- pVals
## Compute F-values from input p-values.
fVals <- qf(p = na.omit(pVals), df1 = 1, df2 = df2, lower.tail = FALSE)
## Compute inflation factor as in Devlin and Roeder.
inflation <- median(fVals, na.rm = TRUE) /
qf(p = 0.5, df1 = 1, df2 = df2, lower.tail = FALSE)
## Compute new F-values and p-values.
fValsNew <- fVals / inflation
pValsNew[!is.na(pVals)] <- pf(q = fValsNew, df1 = 1, df2 = df2,
lower.tail = FALSE)
return(list(pValues = pValsNew, inflation = inflation))
}
#' @noRd
#' @keywords internal
extrSignSnps <- function(GWAResult,
LODThr,
sizeInclRegion,
minR2,
map,
markers,
maxScore,
pheno,
trait) {
signSnpNr <- which(!is.na(GWAResult[["LOD"]]) & GWAResult[["LOD"]] >= LODThr)
if (length(signSnpNr) > 0) {
if (sizeInclRegion > 0) {
snpSelection <- unlist(sapply(X = signSnpNr, FUN = getSNPsInRegionSufLD,
map = map, markers = markers,
sizeInclRegion = sizeInclRegion,
minR2 = minR2))
snpSelection <- sort(union(snpSelection, signSnpNr))
snpStatus <- rep(paste("within", sizeInclRegion, "of a significant SNP"),
length(snpSelection))
snpStatus[snpSelection %in% signSnpNr] <- "significant SNP"
} else {
snpSelection <- signSnpNr
snpStatus <- rep("significant SNP", length(signSnpNr))
}
## Compute variance of marker scores, based on genotypes for which
## phenotypic data is available. For inbreeders, this depends on
## maxScore. It is therefore scaled to marker scores 0, 1 (or 0, 0.5,
## 1 if there are heterozygotes).
snpVar <- 4 * GWAResult[snpSelection, "effect"] ^ 2 / maxScore ^ 2 *
apply(X = markers[, GWAResult[snpSelection][["snp"]], drop = FALSE],
MARGIN = 2, FUN = var)
propSnpVar <- snpVar[["effect"]] / as.numeric(var(pheno[trait]))
## Create data.table with significant snps.
signSnp <- data.table::data.table(GWAResult[snpSelection, ],
snpStatus = as.factor(snpStatus),
propSnpVar = propSnpVar)
## Sort columns.
data.table::setkeyv(x = signSnp, cols = c("trait", "chr", "pos"))
} else {
## No significant SNPs. Return empty data.table.
signSnp <- data.table::data.table()
}
return(signSnp)
}
#' @noRd
#' @keywords internal
extrSignSnpsFDR <- function(GWAResult,
markers,
maxScore,
pheno,
trait,
rho = 0.3,
pThr = 0.05,
alpha = 0.05) {
## Get named vector of p Values.
pVals <- setNames(GWAResult$pValue, GWAResult$snp)
## Subset p Values base on threshold.
B <- pVals[pVals < pThr]
## Subset markers based on selected p Values.
BMarkers <- markers[, colnames(markers) %in% names(B), drop = FALSE]
## Compute selection threshold.
selThr <- alpha / length(pVals)
## Initialize values.
BpVals <- numeric()
snpSelection <- character()
continue <- TRUE
nClust <- 0
while (length(B) > 0 && continue) {
## Next cluster is represented by remaining SNP with lowest p Value.
clusterRep <- which.min(B)
## Only continue if next p Value satisfies criterion.
## After the first failure all following clusters are irrelevant.
if (B[clusterRep] < (nClust + 1) * selThr) {
## Add p Value for representing SNP to output.
BpVals <- c(BpVals, B[clusterRep])
## Find all remaining SNPs within LD of at least rho of representing SNP.
LD <- cor(BMarkers[, names(clusterRep)], BMarkers)
LDSet <- names(LD[, LD > rho])
## Remove selected SNPs from B and from markers.
B <- B[!names(B) %in% LDSet]
BMarkers <- BMarkers[, !colnames(BMarkers) %in% LDSet, drop = FALSE]
## Add LD set to selected SNPs.
## Using union assures representing SNP will be the first in the list.
snpSelection <- c(snpSelection, union(names(snpSelection), LDSet))
nClust <- nClust + 1
} else {
continue <- FALSE
}
}
if (nClust > 0) {
## Create a vector of SNP statuses, differentiating between representing
## SNPs and everything else.
snpStatus <- ifelse(snpSelection %in% names(BpVals), "significant SNP",
"within LD of significant SNP")
## Compute variance of marker scores, based on genotypes for which
## phenotypic data is available. For inbreeders, this depends on
## maxScore. It is therefore scaled to marker scores 0, 1 (or 0, 0.5,
## 1 if there are heterozygotes).
snpVar <- 4 * GWAResult[snpSelection, "effect"] ^ 2 / maxScore ^ 2 *
apply(X = markers[, snpSelection, drop = FALSE], MARGIN = 2, FUN = var)
propSnpVar <- snpVar[["effect"]] / as.numeric(var(pheno[trait]))
## Create data.table with significant snps.
signSnp <- data.table::data.table(GWAResult[snpSelection, ],
snpStatus = as.factor(snpStatus),
propSnpVar = propSnpVar)
## Sort columns.
data.table::setkeyv(x = signSnp, cols = c("trait", "chr", "pos"))
} else {
## No significant SNPs. Return empty data.table.
signSnp <- data.table::data.table()
}
return(signSnp)
}
#' get the SNPs close to a given SNP with sufficient LD
#'
#' \code{getSNPsInRegionSufLD} extracts the SNPs from a map file that are
#' within a given distance of a reference SNP (on either side). Only those SNPs
#' that are in sufficient linkage disequilibrium (LD) with the reference SNP
#' are returned.
#'
#' @param gData An object of class gData with at least the map and markers
#' included.
#' @param snp An integer indicating the index of the reference SNP within
#' the map.
#' @param sizeInclRegion A numerical value indicating the size of the region on
#' the chromosome in which to look for SNPs.
#' @param minR2 A numerical value between 0 and 1 indicating the minimum
#' LD (in terms of R^2) that the SNPs should have with the reference SNP.
#'
#' @return An integer vector with indices of the SNPs that are within the
#' given \code{sizeInclRegion} and have a minimum LD with the reference SNP.
#'
#' @noRd
#' @keywords internal
getSNPsInRegionSufLD <- function(snp,
map,
markers,
sizeInclRegion = 5000,
minR2 = 0.5) {
## Get candidate SNPs based on position.
crit1 <- abs(map[snp, "pos"] - map[["pos"]]) <= sizeInclRegion
crit2 <- map[["chr"]] == map[snp, "chr"]
candidateSnps <- setdiff(which(crit1 & crit2), snp)
## Compute R2 for candidate SNPs.
if (length(candidateSnps) > 0) {
R2 <- suppressWarnings(cor(markers[, snp, drop = FALSE],
markers[, candidateSnps, drop = FALSE]) ^ 2)
## Select SNPs based on R2.
candidateSnpsNames <- colnames(R2[, R2 > minR2, drop = FALSE])
return(which(rownames(map) %in% candidateSnpsNames))
} else {
return(integer())
}
}
|
data <- read.csv("wdbc.data", header = F)
#Column names from wdbc.names
features <- c("radius", "texture", "perimeter", "area", "smoothness", "compactness", "concavity", "concave_points", "symmetry", "fractal_dimension")
names(data) <- c("id", "diagnosis", paste0(features, "_mean"), paste0(features, "_std_err"), paste0(features, "_worst"))
#Separate ID from variables, convert diagnosis to a numeric. (M=1, B=0)
wdbc.data <- data[,c(3:32)]
row.names(wdbc.data) <- data$id
ready_data <- cbind(wdbc.data, as.numeric(factor(data$diagnosis))-1)
colnames(ready_data)[31] <- "diagnosis"
unit_length <- function(x) {
x / sqrt(sum(x^2))
}
unit_length_data <- as.data.frame(lapply(ready_data, unit_length)) | /Normalisation/Unit Length Normalisation.R | no_license | CBProgramming/machine-learning-breast-cancer | R | false | false | 705 | r | data <- read.csv("wdbc.data", header = F)
#Column names from wdbc.names
features <- c("radius", "texture", "perimeter", "area", "smoothness", "compactness", "concavity", "concave_points", "symmetry", "fractal_dimension")
names(data) <- c("id", "diagnosis", paste0(features, "_mean"), paste0(features, "_std_err"), paste0(features, "_worst"))
#Separate ID from variables, convert diagnosis to a numeric. (M=1, B=0)
wdbc.data <- data[,c(3:32)]
row.names(wdbc.data) <- data$id
ready_data <- cbind(wdbc.data, as.numeric(factor(data$diagnosis))-1)
colnames(ready_data)[31] <- "diagnosis"
unit_length <- function(x) {
x / sqrt(sum(x^2))
}
unit_length_data <- as.data.frame(lapply(ready_data, unit_length)) |
# plot1.R
# Thu Nov 6 18:21:25 EST 2014
# D. Von Pless
#
# Read in two days' data from household power consumption file (Feb 1 and 2, 2007). Plot a
# histogram of the Global Active Power variable (in kW) in red color to file 'plot1.png'
# It is assumed you have the requisite data file in your working directory. See README.md for
# link to this large text file.
# Required libraries:
library(sqldf) # To read in only the two days' data required
library(lubridate) # Easier to work with dates
# Read in data
f<-file("household_power_consumption.txt")
df<-sqldf("select * from f where Date='2/2/2007' OR Date='1/2/2007'", file.format=list(sep=";", header=TRUE))
# Generate plot 1 as a .PNG file type.
png(filename="plot1.png", width=480, height=480)
with(df,hist(Global_active_power,col="red",xlab="Global Active Power (kilowatts)", main="Global Active Power"))
dev.off()
| /plot1.R | no_license | dvonpless/ExData_Plotting1 | R | false | false | 889 | r | # plot1.R
# Thu Nov 6 18:21:25 EST 2014
# D. Von Pless
#
# Read in two days' data from household power consumption file (Feb 1 and 2, 2007). Plot a
# histogram of the Global Active Power variable (in kW) in red color to file 'plot1.png'
# It is assumed you have the requisite data file in your working directory. See README.md for
# link to this large text file.
# Required libraries:
library(sqldf) # To read in only the two days' data required
library(lubridate) # Easier to work with dates
# Read in data
f<-file("household_power_consumption.txt")
df<-sqldf("select * from f where Date='2/2/2007' OR Date='1/2/2007'", file.format=list(sep=";", header=TRUE))
# Generate plot 1 as a .PNG file type.
png(filename="plot1.png", width=480, height=480)
with(df,hist(Global_active_power,col="red",xlab="Global Active Power (kilowatts)", main="Global Active Power"))
dev.off()
|
# *****************************************************************************
# R Script for preparing systematic random samples for subsequent analysis
# related to the manuscript "Dealing with clustered samples for assessing map
# accuracy by cross-validation".
# Contact: Sytze de Bruin, Wageningen University, Laboratory of Geo-information
# Science and Remote Sensing, email: sytze.debruin@wur.nl
# May 3, 2022
# *****************************************************************************
# ****** load required library *******
library(terra)
# ************ GLOBALS ***************
infolder <- "../data"
outfolder <- "../samples"
n_samp <- 100 # number of sample replicates
# ********* load input data **********
# download data from https://doi.org/10.5281/zenodo.6513429
msk <- rast(file.path(infolder, "TOTmask.tif"))
AGBstack <- rast(file.path(infolder, "AGBstack.tif"))
OCSstack <- rast(file.path(infolder, "OCSstack.tif"))
# create outfolders if they don't exist
if(!dir.exists(outfolder))
dir.create(outfolder)
if(!dir.exists(paste0(outfolder, "/regular")))
dir.create(paste0(outfolder, "/regular"))
# ******* create the samples ********
# sample size over entire extent, aiming for 5000 points within the study area
# found by earlier iterations
esamp <- 12400
set.seed(1234567)
for (i in 1:n_samp){
extSmp <- spatSample(msk, esamp, method="regular", as.points=T)
maxShft <- 11500 # min(distance(extSmp)) * 0.5
shftSmp <- shift(extSmp, runif(1, -maxShft, maxShft),
runif(1, -maxShft, maxShft))
tst <- extract(msk, shftSmp)
idx <- which(tst[,2] == 1)
shftSmp <- shftSmp[idx,]
AGBdata <- extract(AGBstack, shftSmp)
OCSdata <- extract(OCSstack, shftSmp)
AGBdata$ID <- NULL
OCSdata$ID <- NULL
AGBdata$glc2017 <- factor(AGBdata$glc2017, levels=1:8)
OCSdata$glc2017 <- factor(OCSdata$glc2017, levels=1:8)
fname <- paste0("AGBdata", sprintf("%03d", i), ".Rdata")
save(AGBdata, file=file.path(outfolder, "regular", fname))
fname <- paste0("OCSdata", sprintf("%03d", i), ".Rdata")
save(OCSdata, file=file.path(outfolder, "regular", fname))
}
| /R/sample_regular.R | permissive | sytbru/CV-clustered-data | R | false | false | 2,137 | r | # *****************************************************************************
# R Script for preparing systematic random samples for subsequent analysis
# related to the manuscript "Dealing with clustered samples for assessing map
# accuracy by cross-validation".
# Contact: Sytze de Bruin, Wageningen University, Laboratory of Geo-information
# Science and Remote Sensing, email: sytze.debruin@wur.nl
# May 3, 2022
# *****************************************************************************
# ****** load required library *******
library(terra)
# ************ GLOBALS ***************
infolder <- "../data"
outfolder <- "../samples"
n_samp <- 100 # number of sample replicates
# ********* load input data **********
# download data from https://doi.org/10.5281/zenodo.6513429
msk <- rast(file.path(infolder, "TOTmask.tif"))
AGBstack <- rast(file.path(infolder, "AGBstack.tif"))
OCSstack <- rast(file.path(infolder, "OCSstack.tif"))
# create outfolders if they don't exist
if(!dir.exists(outfolder))
dir.create(outfolder)
if(!dir.exists(paste0(outfolder, "/regular")))
dir.create(paste0(outfolder, "/regular"))
# ******* create the samples ********
# sample size over entire extent, aiming for 5000 points within the study area
# found by earlier iterations
esamp <- 12400
set.seed(1234567)
for (i in 1:n_samp){
extSmp <- spatSample(msk, esamp, method="regular", as.points=T)
maxShft <- 11500 # min(distance(extSmp)) * 0.5
shftSmp <- shift(extSmp, runif(1, -maxShft, maxShft),
runif(1, -maxShft, maxShft))
tst <- extract(msk, shftSmp)
idx <- which(tst[,2] == 1)
shftSmp <- shftSmp[idx,]
AGBdata <- extract(AGBstack, shftSmp)
OCSdata <- extract(OCSstack, shftSmp)
AGBdata$ID <- NULL
OCSdata$ID <- NULL
AGBdata$glc2017 <- factor(AGBdata$glc2017, levels=1:8)
OCSdata$glc2017 <- factor(OCSdata$glc2017, levels=1:8)
fname <- paste0("AGBdata", sprintf("%03d", i), ".Rdata")
save(AGBdata, file=file.path(outfolder, "regular", fname))
fname <- paste0("OCSdata", sprintf("%03d", i), ".Rdata")
save(OCSdata, file=file.path(outfolder, "regular", fname))
}
|
### Z1: 1 for DFMO and 0 for Placebo
### Z2: 1 for base>4 and 0 for base<=4
#baseline data
basedata <- read.csv("kbase.csv",header=TRUE)
#cancer data
cancdata <- read.csv("kcanc.csv",header=TRUE)
attach(basedata)
attach(cancdata)
str(basedata)
dim(basedata)
str(cancdata)
dim(cancdata)
base <- basedata$scbase
trtgrp <- basedata$ trtgrp
age<- basedata$age
sex<- basedata$sex
table(trtgrp)
### m[i]: total number of obs times for experimental subject i
id <- cancdata$randno
length(id)
n <- nrow(basedata) # n different experimental subject
n
m <- numeric(n)
for (i in 1:n)
m[i] <- sum(id==i) ## count the number of id==i, that is to count how many time ith subject is observed
max_observation_time = max(m)
max_observation_time
### Z: here is defined by categories(treatment or placebo)
# Z is defined as 2 rows 291 columns with elements 0
Z <- matrix(0, 2, n)
#Z[1,] <- as.numeric(trtgrp=="DFMO");Z[2,] <- base (trtgrp=="PLACEBO)
#Z[,trtgrp=="DFMO" & base<=4] denote for subject have treatment
#and Total Skin Cancers Reported up to Randomization(scbase) is less or equal to 4
#trtgrp2 <- ifelse((trtgrp == "PLACEBO"),1,0)
#trtgrp2
Z[,trtgrp=="DFMO" & base<2] <- c(1, 0)
Z[,trtgrp=="DFMO" & base>=2] <- c(1, 1)
Z[,trtgrp=="PLACEBO" & base<2] <- c(0, 0)
Z[,trtgrp=="PLACEBO" & base>=2] <- c(0, 1)
Z[1,]
Z[2,]
dim(Z)
#library(DescTools)
#Desc(Z[1,])
#table(Z[1,],Z[2,])
#Desc(table(Z[1,],Z[2,]))
sex <- ifelse(basedata$sex=="Male",1,0)
length(sex)
### T=T2 for obs times on 2-dim count data.
### Note that subject 267 has no count data, therefore removed
### T is a n by nt matrix;
### T[i,j] gives on the jth oberservation time, how many days from randomization does ith subject has
### note that here the length of study is set to 1
T <- matrix(0, n, max(m))
T
for (i in 1:n) {
if (i!=267)
T[i, 1:m[i]] <- sort(dysfrnd[id==i])
}
dim(T)
T # T[i,j] mean the days from randamization for jth observation time of ith subject
cdata <- cancdata[-267,]
n <- n-1
T2 <- T <- T[-267,]; m <- m[-267]; Z <- Z[,-267];base<- base[-267]; trtgrp <- trtgrp[-267];
sex<-sex[-267]; age<-age[-267]
id <- id[id!=267]
table(id)
treatment = Z[1,]
base2 = Z[2,] # base2 = 1 stands for base>=4
table(treatment,base)
table(treatment,base2)
table(treatment)
dim(T2)
max(T2[,])/365
max(dysfrnd)
length(dysfrnd)
max(T2)
#The longest day from randamization is 1879, the trial has already taken more than 5 years on
T <- T/max(T)
#Treat the entire timeline as 1
# let the longest days from randamization =1, then to see how long each obearvation takes comparing to 1
T
dim(T)
min(T) #means for the jth obearvation of ith subject, the days of randomizaiton is 0;
# larger T[i,j] means longer the trial takes
#--------------------------------------------------------------------------------------------
##### S: collection of all obs times(different level of days from randomization)
### Exclude Randomization days that is 0
S1 = sort(unique(T2[T2!=0]))
length(S1)
table(S1)
obsTime = S1
summary(obsTime)
S=sort(unique(T[T!=0]));
length(S)
min(S)
max(S)
test = sort(unique(cancdata$dysfrnd))
length(test)
### nt,
# How many unique days from randamization at observation time sorting from trial with shorter days from randomization
#to relatively longer trials
#nt ??ʾ??ʵ?鿪ʼʱ?ж??ٴι۲죬??ʵ?鿪ʼ????1879????,obs_time ?ij?????1159??level??ÿ??level?Dz?ͬ???????????ߵ?
#????ǰ?????ߵ?level??1879?죬????11??,??Щsubject?й?ͬ??obs time???????Ե?22,80,96,133,148,178??subject???ֱ??????μ?ʵ???ĵ?1483???????˹۲?;
#?ֱ??Ե?1,14,91,131,192,229,269,274?Ŷ??????????μ?ʵ???ĵ?350???????˹۲?
# ???ˣ?obs_timeӦ???Dz?ͬ????????????????????ͬ??level
nt = length(S);
nt
# nt =1159 means that there are 1159 differnt levels(!!!) of days (not days itself) from randamization
#nt measures differnt observation time, rather than the days from randamization
n
# n=290, nt=1159 that is 290 subject were observed 1159 times, among which the longest days is 1879 days??shortest is 11 days
#----------------------------------------------------------------------------------------------------------------------------
### TT
#Extract the longest trial days for each subject
TT <- apply(T, 1, max) # take the max for each row in matrix T
length(TT)
### dN[i,j]: whether subject i is observed on time j (j=1,...nt)
#??????ʵ??��?????????ʱ?䣨days from randomization???ﵽ1879??,????11?죬????1159????ͬ??level-->??nt??ʾ
#????ij??ʵ??????��˵???䱻?۲?????????????17??max(m)?????۲????????????ˣ??????Dzμ?ʵ??ʱ????ģ?
dN <- matrix(0, n, nt);
for (j in 1:nt) {
for (i in 1:n) {
dN[i, j] = (sum(T[i,]== S[j])!=0); # mention that S is just created from T
}
}
class(dN)
dNtd <- dN
dim(dNtd)
table(dNtd)
#For dNtd[i,j]=1 means ith subject is observed in jth observation time (the jth observation time is meansured by days from randamization)
# dNtd[i,j]=0 means the ith subject was not observed in the j time (a specific days from randamization)
days = sort(unique(cdata$dysfrnd)) # differnt level of days from randamization
length(days)
max(days)
Days <-matrix(0,n,nt)
for (i in 1:n){
Days[i,] <- days
}
dim(Days)
table(Days)
Year = Days/365
table(Year)
dim(Year)
max(Year)
diffYear <- matrix(0, n, nt);
for (j in 2:nt) {
for (i in 1:n) {
diffYear[i,j] = Year[i,j]-Year[i,j-1]
}
}
class(diffYear)
table(diffYear)
dim(diffYear)
days1 = sort(unique(S))
length(days1)
Days1 <-matrix(0,n,nt)
for (i in 1:n){
Days1[i,] <- days1
}
dim(Days1)
table(Days1)
### inn[i,j]: the additional # of basal cell carcenoma since last observed on time j (j=1,...,nt)
### inn2[i,j]: the additional # of squamous cell carcenoma since last observed on time j (j=1,...,nt)
n
nt
inn2 <- inn <- matrix(0, n, nt)
idnm <- unique(id)
for (i in 1:n) {
for (j in 1:nt) {
if (sum(randno==idnm[i] & dysfrnd==(S[j]*max(dysfrnd)))>0) {
temp <- which(randno==idnm[i] & dysfrnd==(S[j]*max(dysfrnd)))
inn[i, j] <- countba[temp]
inn2[i, j] <- countsq[temp]
}
}
}
dim(inn)
dim(inn2)
inn[which(! inn == 0)]
### N[i,j]: the cumulative # of basal cell carcenoma on time j (j=1,...,nt)
### N2[i,j]: the cumulative # of squamous cell carcenoma on time j (j=1,...,nt)
N2 <- N <- matrix(0, n,nt);
for (j in 1:nt) {
for (i in 1:n) {
N[i,j]=sum(inn[i,1:j]);
N2[i,j]=sum(inn2[i,1:j]);
}
}
dim(N)
dim(N2)
BCC=N
SCC=N2
table(BCC)
table(SCC)
### Y[i,j]: the cumulative # of both tumors on time j (j=1,...,nt)
Ytd <- Y <- N+N2
dim(Y)
#S=sort(unique(T[T!=0]));
#nt = length(S);
#write.csv(as.data.frame(inn),file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreased.csv",row.names = F)
#write.csv(as.data.frame(inn2),file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scIncreased.csv",row.names = F)
#write.csv(as.data.frame(N),file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcCumulative.csv",row.names = F)
#write.csv(as.data.frame(N2),file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scCumulative.csv",row.names = F)
bcIncreased =read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreased.csv",header = TRUE)
dim(bcIncreased)
bcIncreased1 = cbind.data.frame(treatment,bcIncreased)
dim(bcIncreased1)
dim(bcIncreased1[treatment==1,])
dim(bcIncreased1[treatment==0,])
bcaddedTreatment = bcIncreased1[treatment == 1,2:1160]
dim(bcaddedTreatment)
bcaddedPlacebo = bcIncreased1[treatment == 0,2:1160]
dim(bcaddedPlacebo)
#write.csv(bcaddedTreatment,file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasedwithTreatment.csv",row.names = F)
#write.csv(bcaddedPlacebo,file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasdPlacebo.csv",row.names = F)
bcIncrdTreatment = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasedwithTreatment.csv",header = TRUE)
bcIncrdPlacebo = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasdPlacebo.csv",header = TRUE)
bcCumulative = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcCumulative.csv",header = TRUE)
dim(bcCumulative)
bcCumulative1 = cbind.data.frame(treatment,bcCumulative)
dim(bcCumulative1)
#write.csv(bcCumulative1,file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcCulmulativewithTreatment.csv",row.names = F)
bcCmltTrtgrp = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcCulmulativewithTreatment.csv",header = TRUE)
dim(bcCmltTrtgrp)
bcCmltTreatment = bcCmltTrtgrp[treatment == 1,2:1160]
dim(bcCmltTreatment)
bcCmltPlacebo = bcCmltTrtgrp[treatment == 0,2:1160]
dim(bcCmltPlacebo)
scCumulative = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scCumulative.csv",header = TRUE)
dim(scCumulative)
scCumulative1 = cbind.data.frame(treatment,scCumulative)
#write.csv(scCumulative1,file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scCulmulativewithTreatment.csv",row.names = F)
scCmltTrtgrp = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scCulmulativewithTreatment.csv",header = TRUE)
scCmltTreatment = scCmltTrtgrp[treatment == 1,2:1160]
dim(scCmltTreatment)
scCmltPlacebo = scCmltTrtgrp[treatment == 0,2:1160]
dim(scCmltPlacebo)
####################################
# Plot trajectories of bcc
par(mfrow = c(1,2))
plot(1,type="l",xlim = c(0,max(obsTime)),ylim = c(0,max(bcCmltTrtgrp[,2:1160])),xlab="Observation Days",main="Individual Trajectories_Treatment(n=143)")
for( i in 1:143){
lines(obsTime, bcCmltTreatment[i,],type = "l",col=4)
}
plot(0,type="l",xlim = c(0,max(obsTime)),ylim = c(0,max(bcCmltTrtgrp[,2:1160])),xlab="Observation Days",main="Placebo Group (n=147)")
for( i in 1:147){
lines(obsTime, bcCmltPlacebo[i,],col=2)
}
# Plot trajectories of scc
par(mfrow = c(1,2))
plot(1,type="l",xlim = c(0,max(obsTime)),ylim = c(0,max(scCmltTrtgrp[,2:1160])),xlab="Observation Days",main="Individual Trajectories_Treatment(n=143)")
for( i in 1:143){
lines(obsTime, scCmltTreatment[i,],type = "l",col=4)
}
plot(0,type="l",xlim = c(0,max(obsTime)),ylim = c(0,max(scCmltTrtgrp[,2:1160])),xlab="Observation Days",main="Placebo Group (n=147)")
for( i in 1:147){
lines(obsTime, scCmltPlacebo[i,],col=2)
}
dev.off()
###################################################################
# Manova for Cumulative number
dim(bcCumulative)
BCC1 = bcCumulative[,1:1159]
length(treatment)
dim(BCC1)
str(BCC1)
bcc5<-bcc4<-bcc3<-bcc2<-bcc1 <-vector(mode = "numeric")
for (i in 1:290)
{
for (j in 1:232)
bcc1[i] = sum(BCC1[i,j])
}
bcc1
for (i in 1:290)
{
for (j in 233:233+232)
bcc2[i] = sum(BCC1[i,j])
}
bcc2
for(i in 1:290)
{
for (j in 456:456+232)
bcc3[i] = sum(BCC1[i,j])
}
bcc3
for (i in 1:290)
{
for (j in 690:690+232)
bcc4[i] = sum(BCC1[i,j])
}
bcc4
for(i in 1:290 )
{
for (j in 923:1159)
bcc5[i] = sum(BCC1[i,j])
}
bcc5
#BCC_Manova = cbind(treatment,bcc1,bcc2,bcc3,bcc4,bcc5)
BCC_Manova = as.matrix(cbind(bcc1,bcc2,bcc3,bcc4,bcc5))
summary(manova(BCC_Manova ~ treatment))
################################################################################
#Q-Q Plot
dim(BCC_Manova)
BCC_Manova
n = nrow(BCC_Manova)
p = ncol(BCC_Manova)
BCC_Manovabar = colMeans(BCC_Manova)
BCC_Manovabar
S = cov(BCC_Manova)
S
########################################
# Test marginal normality.
par(mfrow = c(2,2)) # A 2 by 2 panel of plots
for(i in 1:3) {
y = BCC_Manova[,i]
v=qqnorm(y, ylab = colnames(BCC_Manova)[i])
text(0, .9*max(v$y), paste("p = ", round(shapiro.test(y)[[2]],3)))
qqline(y)
}
# Trivariate normality:
dsqd = vector(length = n)
qsqd = vector(length = n)
for(i in 1:n) {
dsqd[i] = t(BCC_Manova[i,] - BCC_Manovabar) %*% solve(S) %*% (BCC_Manova[i,] - BCC_Manovabar)
qsqd[i] = qchisq((i-.5)/n, p, lower.tail = TRUE)
}
dsqd = sort(dsqd)
qqplot(qsqd, dsqd, main = "Chisquare Q-Q Plot", xlab = "Chisquare quantiles", ylab = "sample quantiles")
abline(0,1)
text(6, max(qsqd-2), paste("corr = ", round(cor(qsqd,dsqd),3)))
#############3
#SCC Manova
dim(scCumulative)
SCC1 = scCumulative[,1:1159]
dim(SCC1)
scc5<-scc4<-scc3<-scc2<-scc1 <-vector(mode = "numeric")
for (i in 1:290)
{
for (j in 1:232)
scc1[i] = sum(SCC1[i,j])
}
scc1
for (i in 1:290)
{
for (j in 233:233+232)
scc2[i] = sum(SCC1[i,j])
}
scc2
for(i in 1:290)
{
for (j in 456:456+232)
scc3[i] = sum(SCC1[i,j])
}
scc3
for (i in 1:290)
{
for (j in 690:690+232)
scc4[i] = sum(SCC1[i,j])
}
scc4
for(i in 1:290 )
{
for (j in 923:1159)
scc5[i] = sum(SCC1[i,j])
}
scc5
SCC_Manova = as.matrix(cbind(scc1,scc2,scc3,scc4,scc5))
summary(manova(SCC_Manova~ treatment))
skinManova = as.matrix(cbind.data.frame(BCC_Manova,SCC_Manova))
summary(manova(skinManova~ treatment))
#Q-Q Plot for SCC
dim(SCC_Manova)
SCC_Manova
n = nrow(SCC_Manova)
p = ncol(SCC_Manova)
SCC_Manovabar = colMeans(SCC_Manova)
SCC_Manovabar
S = cov(SCC_Manova)
S
########################################
# Test marginal normality.
par(mfrow = c(2,2)) # A 2 by 2 panel of plots
for(i in 1:3) {
y = SCC_Manova[,i]
v=qqnorm(y, ylab = colnames(SCC_Manova)[i])
text(0, .9*max(v$y), paste("p = ", round(shapiro.test(y)[[2]],3)))
qqline(y)
}
# Trivariate normality:
dsqd = vector(length = n)
qsqd = vector(length = n)
for(i in 1:n) {
dsqd[i] = t(SCC_Manova[i,] - SCC_Manovabar) %*% solve(S) %*% (SCC_Manova[i,] - SCC_Manovabar)
qsqd[i] = qchisq((i-.5)/n, p, lower.tail = TRUE)
}
dsqd = sort(dsqd)
qqplot(qsqd, dsqd, main = "Chisquare Q-Q Plot", xlab = "Chisquare quantiles", ylab = "sample quantiles")
abline(0,1)
text(6, max(qsqd-2), paste("corr = ", round(cor(qsqd,dsqd),3)))
###################################################################
# Manova for increased number
dim()
bcIncrdTreatment = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasedwithTreatment.csv",header = TRUE)
bcIncrdPlacebo = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasdPlacebo.csv",header = TRUE)
bccI = bcIncreased1[,2:1160]
str(bcIncreased1)
dim(bcIncreased1)
bccI5<-bccI4<-bccI3<-bccI2<-bccI1 <-vector(mode = "numeric")
for (i in 1:290)
{
for (j in 1:232)
bccI1[i] = sum(bccI[i,j])
}
bccI1
for (i in 1:290)
{
for (j in 233:233+232)
bccI2[i] = sum(bccI[i,j])
}
bccI2
for(i in 1:290)
{
for (j in 456:456+232)
bccI3[i] = sum(bccI[i,j])
}
bccI3
for (i in 1:290)
{
for (j in 690:690+232)
bccI4[i] = sum(bccI[i,j])
}
bccI4
for(i in 1:290 )
{
for (j in 923:1159)
bccI5[i] = sum(bccI[i,j])
}
bccI5
#bccI_Manova = cbind(treatment,bccI1,bccI2,bccI3,bccI4,bccI5)
bccI_Manova = as.matrix(cbind(bccI1,bccI2,bccI3,bccI4,bccI5))
summary(manova(bccI_Manova ~ treatment))
bccI_bar = colMeans(bccI_Manova)
bccI_bar
S_bccI = cov(bccI_Manova)
S_bccI
# Test Marginal Normality
par(mfrow = c(2,2))
for(i in 1:5) {
y = bccI_Manova[,i]
v=qqnorm(y, ylab = colnames(bccI_Manova)[i])
text(0, .9*max(v$y), paste("p = ", round(shapiro.test(y)[[2]],3)))
qqline(y)
}
bartlett.test(bccI1~treatment)
SCC_Manova = as.matrix(cbind(scc1,scc2,scc3,scc4,scc5))
summary(manova(SCC_Manova~ treatment))
skinManova = as.matrix(cbind.data.frame(bccI_Manova,SCC_Manova))
summary(manova(skinManova~ treatment))
#SCC
dim(scCmltTrtgrp)
SCC1 = scCmltTrtgrp[,2:1160]
dim(SCC1)
scc5<-scc4<-scc3<-scc2<-scc1 <-vector(mode = "numeric")
for (i in 1:290)
{
for (j in 1:232)
scc1[i] = sum(SCC1[i,j])
}
scc1
for (i in 1:290)
{
for (j in 233:233+232)
scc2[i] = sum(SCC1[i,j])
}
scc2
for(i in 1:290)
{
for (j in 456:456+232)
scc3[i] = sum(SCC1[i,j])
}
scc3
for (i in 1:290)
{
for (j in 690:690+232)
scc4[i] = sum(SCC1[i,j])
}
scc4
for(i in 1:290 )
{
for (j in 923:1159)
scc5[i] = sum(SCC1[i,j])
}
scc5
######################################################################
dim(bcCmltTreatment)
dim(bcCmltPlacebo)
1159/5
length(treatment[treatment==1])
for (i in 1:232)
{
bccTreatment_sum1[i] = sum(bcCmltTreatment[,i])
}
bccTreatment_sum1
for (i in 233:233+232)
{
bccTreatment_sum2 = sum(bcCmltTreatment[,i])
}
bccTreatment_sum2
for(i in 456:456+232)
{
bccTreatment_sum3 = sum(bcCmltTreatment[,i])
}
bccTreatment_sum3
for(i in 779:779+232)
{
bccTreatment_sum4 = sum(bcCmltTreatment[,i])
}
bccTreatment_sum4
for(i in 1002:1159)
{
bccTreatment_sum5 = sum(bcCmltTreatment[,i])
}
bccTreatment_sum5
bccTreatment_sum = c(bccTreatment_sum1,bccTreatment_sum2,bccTreatment_sum3,bccTreatment_sum4,bccTreatment_sum5)
# For Placebo Group
for (i in 1:232)
{
bccPlacebo_sum1 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum1
for (i in 233:233+232)
{
bccPlacebo_sum2 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum2
for(i in 456:456+232)
{
bccPlacebo_sum3 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum3
for(i in 779:779+232)
{
bccPlacebo_sum4 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum4
for(i in 1002:1159)
{
bccPlacebo_sum5 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum5
bccPlacebo_sum = c(bccPlacebo_sum1,bccPlacebo_sum2,bccPlacebo_sum3,bccPlacebo_sum4,bccPlacebo_sum5)
#############################################################################################################
############################################################################################################3
#--------------------------------------------------------------------------------------------------------------------
# Stack outcome variable in one column by 1159 observation time, for each time, Age,sex,treatment,base keep same with ID
length(idnm)
length(id)
length(age)
length(sex)
length(treatment)
length(base)
length(base2)
dim(dNtd)
#install.packages("reshape")
library(reshape)
df_dNtd = cbind.data.frame(id,age,sex,treatment,base,dNtd)
melt_dNtd = melt(df_dNtd,id=(c("idnm","age","sex","treatment","base")))
head(melt_dNtd)
dim(melt_dNtd)
observed = melt_dNtd$value
melt_dNtd$variable
obsdays_level = melt_dNtd$variable
length(obsdays_level)
table(obsdays_level)
df_Days = cbind.data.frame(idnm,age,sex,treatment,base,Days)
melt_Days = melt(df_Days,id=(c("idnm","age","sex","treatment","base")))
head(melt_Days)
dim(melt_Days)
obsDays = melt_Days$value
length(obsDays)
df_Days1 = cbind.data.frame(idnm,age,sex,treatment,base,Days1)
melt_Days1 = melt(df_Days1,id=(c("idnm","age","sex","treatment","base")))
head(melt_Days1)
dim(melt_Days1)
obsDays1 = melt_Days1$value
length(obsDays1)
df_Year = cbind.data.frame(idnm,age,sex,treatment,base,Year)
melt_Days1 = melt(df_Days1,id=(c("idnm","age","sex","treatment","base")))
head(melt_Year)
dim(melt_Year)
obsYear = melt_Year$value
length(obsDays1)
df_Ytd = cbind.data.frame(idnm,age,sex,treatment,base,Ytd)
melt_Ytd <- melt(df_Ytd,id=(c("idnm","age","sex","treatment","base")))
head(melt_Ytd)
dim(melt_Ytd)
melt_Ytd$value
bothcc = melt_Ytd$value
df_N = cbind.data.frame(idnm,age,sex,treatment,base,N)
melt_N <- melt(df_N,id=(c("idnm","age","sex","treatment","base")))
head(melt_N)
dim(melt_N) #1159*290=336,110
bcc = melt_N$value
df_N2 = cbind.data.frame(idnm,age,sex,treatment,base,N2)
melt_N2 <- melt(df_N2,id=(c("idnm","age","sex","treatment","base")))
head(melt_N2)
dim(melt_N2)
scc = melt_N2$value
df_inn = cbind.data.frame(idnm,age,sex,treatment,base,inn)
melt_inn <- melt(df_inn,id=(c("idnm","age","sex","treatment","base")))
head(melt_inn)
dim(melt_inn)
bcadded = melt_inn$value
df_inn2 = cbind.data.frame(idnm,age,sex,treatment,base,inn2)
melt_inn2 <- melt(df_inn2,id=(c("idnm","age","sex","treatment","base")))
head(melt_inn2)
dim(melt_inn2)
scadded = melt_inn2$value
data = cbind.data.frame(obsdays_level,obsDays,obsDays1,melt_Days,observed,bcc,scc, bcadded,scadded,bothcc)
head(data)
data = subset(data,select = -c(variable,value))
head(data)
dim(data)
str(data)
# only keep the datapoint when observed
data1 = data[observed==1,]
dim(data1)
write.csv(data,
file="D:/001UNC Charlotte/2017Fall/Research Seminar/skinCanc.csv",row.names = F) # with obervatoin that not observed
write.csv(data1,
file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/skinCanc_observed.csv",row.names = F) # only keep observed values
#######################################################################################################
#######################################################################################################
#----------------------------------------------------------------------------------------
skincanc <- read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/skinCanc_observed.csv",header=TRUE)
dim(skincanc)
head(skincanc)
attach(skincanc)
str(skincanc)
library(lattice)
x= sort(unique(obsDays))
xyplot(bcc~x,data = skincanc)
xyplot(bcc~x,groups = idnm,data=skincanc)
xyplot(bcc~x,groups = sex,data=skincanc)
library(ggplot2)
#ggplot(skincanc,aes(x =obsDays))+geom_line(aes(y=bcc),colour="blue")+
# geom_line(aes(y=scc),colour="red")
ggplot(skincanc,aes(x = obsDays,y = bcc,colour=sex))+geom_line()+ylab(label="Cumulative Basal Lesions")+xlab(label="Observation Days")
ggplot(skincanc,aes(x = obsDays,y = scc,colour=treatment))+geom_line()+ylab(label="Cumulative Squamous Lesions")+xlab(label="Observation Days")
dev.off()
#-----------------------------------------------------------------------------------------
# calculate average by gender
totalObsTime = length(unique(obsDays))
totalObsTime
bcc_mean<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean[i]= mean(bcc[obsdays_level == i])
}
bcc_mean
length(bcc_mean)
plot(sort(unique(obsDays)),bcc_mean, xlab = "Observation Days", ylab = "bcc_mean",
main = "Mean of Basal Lesions",col =4,type ="l")
bcc_mean_male<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean_male[i]= mean(bcc[obsdays_level == i] & bcc[sex ==1])
}
table(bcc_mean_male)
length(bcc_mean_male)
plot(sort(unique(obsDays)),bcc_mean_male, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Male",
main = "Mean Basal Lesions of Male",col =4,type ="l")
bcc_mean_female<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean_female[i]= mean(bcc[obsdays_level == i] & bcc[sex ==0])
}
table(bcc_mean_female)
length(bcc_mean_female)
plot(sort(unique(obsDays)),bcc_mean_female, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Female",
main = "Mean Basal Lesions of Female",col ="red",type ="l")
#ggplot(aes(x = sort(unique(obsDays)))) + geom_line(aes(y = bcc_mean_male),
# colour="blue")+
# geom_line(aes(y = bcc_mean_female,colour="red"))
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(bcc_mean_male,bcc_mean_female),pch = 20,xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Basal Lesions at different observation time")
matplot(x,cbind(bcc_mean,bcc_mean_male,bcc_mean_female),pch = 20,xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Basal Lesions at different observation time")
# SCC between gender
scc_mean_male<- vector(mode="numeric")
for (i in 1:totalObsTime){
scc_mean_male[i]= mean(scc[obsdays_level == i] & scc[sex ==1])
}
table(scc_mean_male)
length(scc_mean_male)
plot(sort(unique(obsDays)),scc_mean_male, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Male",
main = "Mean Squamous Lesions of Male",col =4,type ="l")
scc_mean_female<- vector(mode="numeric")
for (i in 1:totalObsTime){
scc_mean_female[i]= mean(scc[obsdays_level == i] & scc[sex ==0])
}
table(scc_mean_female)
length(scc_mean_female)
plot(sort(unique(obsDays)),scc_mean_female, xlab = "Observation Days", ylab = "Mean of Squmous Lesions for Female",
main = "Mean Squamouse Lesions of Female",col ="red",type ="l")
#ggplot(aes(x = sort(unique(obsDays)))) + geom_line(aes(y = bcc_mean_male),
# colour="blue")+
# geom_line(aes(y = bcc_mean_female,colour="red"))
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(scc_mean_male,scc_mean_female),pch = ".",xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Squamous Lesions at different observation time")
dev.off()
#_________________________________________________________________________________________________________________
#-----------------------------------------------------------------------------------------
#Calculate average by treatment group
totalObsTime = length(unique(obsDays))
totalObsTime
bcc_mean_treatment<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean_treatment[i]= mean(bcc[obsdays_level == i] & bcc[treatment ==1])
}
table(bcc_mean_treatment)
length(bcc_mean_treatment)
plot(sort(unique(obsDays)),bcc_mean_treatment, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Treatment Group",
main = "Mean Basal Lesions of treatment",col =4,type ="l")
bcc_mean_placebo<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean_placebo[i]= mean(bcc[obsdays_level == i] & bcc[treatment ==0])
}
table(bcc_mean_placebo)
length(bcc_mean_placebo)
plot(sort(unique(obsDays)),bcc_mean_placebo, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Placebo Group ",
main = "Mean Basal Lesions of Placebo",col ="red",type ="l")
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(bcc_mean_treatment,bcc_mean_placebo),pch =c("+","-"),cex = c(0.5,1),col=c(4,2),xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Basal Lesions between Treatment at different observation time")
# scc by treatment group
totalObsTime = length(unique(obsDays))
totalObsTime
scc_mean_treatment<- vector(mode="numeric")
for (i in 1:totalObsTime){
scc_mean_treatment[i]= mean(scc[obsdays_level == i] & scc[treatment ==1])
}
table(scc_mean_treatment)
length(scc_mean_treatment)
plot(sort(unique(obsDays)),scc_mean_treatment, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Treatment Group",
main = "Mean Squamouse Lesions of treat",col =4,type ="l")
scc_mean_placebo<- vector(mode="numeric")
for (i in 1:totalObsTime){
scc_mean_placebo[i]= mean(scc[obsdays_level == i] & scc[treatment ==0])
}
table(scc_mean_placebo)
length(scc_mean_placebo)
plot(sort(unique(obsDays)),scc_mean_placebo, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Placebo Group ",
main = "Mean Squamouse Lesions of Placebo",col ="red",type ="l")
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(scc_mean_treatment,scc_mean_placebo),pch =c("+","-"),cex = c(0.5,1),col=c(4,2),xlab="Observation time",ylab="Mean of Squamouse Lesions",
main="Mean value of Squamouse Lesions between Treatment at different observation time")
#######################################################
#___ Increased Number --------
#Calculate average by treatment group for increased number
str(skincanc)
totalObsTime = length(unique(obsDays))
totalObsTime
bcadded_mean_treatment<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcadded_mean_treatment[i]= mean(bcadded[obsdays_level == i] & bcadded[treatment ==1])
}
table(bcadded_mean_treatment)
length(bcadded_mean_treatment)
plot(sort(unique(obsDays)),bcadded_mean_treatment, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Treatment Group",
main = "Mean Basal Lesions of treatment",col =4,type ="l")
bcadded_mean_placebo<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcadded_mean_placebo[i]= mean(bcadded[obsdays_level == i] & bcadded[treatment ==0])
}
table(bcadded_mean_placebo)
length(bcadded_mean_placebo)
plot(sort(unique(obsDays)),bcadded_mean_placebo, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Placebo Group ",
main = "Mean Basal Lesions of Placebo",col ="red",type ="l")
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(bcadded_mean_treatment,bcadded_mean_placebo),pch =c("+","-"),col=c(4,2),xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Basal Lesions between Treatment at different observation time")
# scadded by treatment group
totalObsTime = length(unique(obsDays))
totalObsTime
scadded_mean_treatment<- vector(mode="numeric")
for (i in 1:totalObsTime){
scadded_mean_treatment[i]= mean(scadded[obsdays_level == i] & scadded[treatment ==1])
}
table(scadded_mean_treatment)
length(scadded_mean_treatment)
plot(sort(unique(obsDays)),scadded_mean_treatment, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Treatment Group",
main = "Mean Squamouse Lesions of treat",col =4,type ="l")
scadded_mean_placebo<- vector(mode="numeric")
for (i in 1:totalObsTime){
scadded_mean_placebo[i]= mean(scadded[obsdays_level == i] & scadded[treatment ==0])
}
table(scadded_mean_placebo)
length(scadded_mean_placebo)
plot(sort(unique(obsDays)),scadded_mean_placebo, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Placebo Group ",
main = "Mean Squamouse Lesions of Placebo",col ="red",type ="l")
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(scadded_mean_treatment,scadded_mean_placebo),pch =c("+","-"),cex = c(0.5,1),col=c(4,2),xlab="Observation time",ylab="Mean of Squamouse Lesions",
main="Mean value of Squamouse Lesions between Treatment at different observation time")
############################################################################################################
| /GEE.R | no_license | Snowball119/Generalized_Estimating_Equations_in_Longitudinal_Data_Analysis | R | false | false | 29,375 | r | ### Z1: 1 for DFMO and 0 for Placebo
### Z2: 1 for base>4 and 0 for base<=4
#baseline data
basedata <- read.csv("kbase.csv",header=TRUE)
#cancer data
cancdata <- read.csv("kcanc.csv",header=TRUE)
attach(basedata)
attach(cancdata)
str(basedata)
dim(basedata)
str(cancdata)
dim(cancdata)
base <- basedata$scbase
trtgrp <- basedata$ trtgrp
age<- basedata$age
sex<- basedata$sex
table(trtgrp)
### m[i]: total number of obs times for experimental subject i
id <- cancdata$randno
length(id)
n <- nrow(basedata) # n different experimental subject
n
m <- numeric(n)
for (i in 1:n)
m[i] <- sum(id==i) ## count the number of id==i, that is to count how many time ith subject is observed
max_observation_time = max(m)
max_observation_time
### Z: here is defined by categories(treatment or placebo)
# Z is defined as 2 rows 291 columns with elements 0
Z <- matrix(0, 2, n)
#Z[1,] <- as.numeric(trtgrp=="DFMO");Z[2,] <- base (trtgrp=="PLACEBO)
#Z[,trtgrp=="DFMO" & base<=4] denote for subject have treatment
#and Total Skin Cancers Reported up to Randomization(scbase) is less or equal to 4
#trtgrp2 <- ifelse((trtgrp == "PLACEBO"),1,0)
#trtgrp2
Z[,trtgrp=="DFMO" & base<2] <- c(1, 0)
Z[,trtgrp=="DFMO" & base>=2] <- c(1, 1)
Z[,trtgrp=="PLACEBO" & base<2] <- c(0, 0)
Z[,trtgrp=="PLACEBO" & base>=2] <- c(0, 1)
Z[1,]
Z[2,]
dim(Z)
#library(DescTools)
#Desc(Z[1,])
#table(Z[1,],Z[2,])
#Desc(table(Z[1,],Z[2,]))
sex <- ifelse(basedata$sex=="Male",1,0)
length(sex)
### T=T2 for obs times on 2-dim count data.
### Note that subject 267 has no count data, therefore removed
### T is a n by nt matrix;
### T[i,j] gives on the jth oberservation time, how many days from randomization does ith subject has
### note that here the length of study is set to 1
T <- matrix(0, n, max(m))
T
for (i in 1:n) {
if (i!=267)
T[i, 1:m[i]] <- sort(dysfrnd[id==i])
}
dim(T)
T # T[i,j] mean the days from randamization for jth observation time of ith subject
cdata <- cancdata[-267,]
n <- n-1
T2 <- T <- T[-267,]; m <- m[-267]; Z <- Z[,-267];base<- base[-267]; trtgrp <- trtgrp[-267];
sex<-sex[-267]; age<-age[-267]
id <- id[id!=267]
table(id)
treatment = Z[1,]
base2 = Z[2,] # base2 = 1 stands for base>=4
table(treatment,base)
table(treatment,base2)
table(treatment)
dim(T2)
max(T2[,])/365
max(dysfrnd)
length(dysfrnd)
max(T2)
#The longest day from randamization is 1879, the trial has already taken more than 5 years on
T <- T/max(T)
#Treat the entire timeline as 1
# let the longest days from randamization =1, then to see how long each obearvation takes comparing to 1
T
dim(T)
min(T) #means for the jth obearvation of ith subject, the days of randomizaiton is 0;
# larger T[i,j] means longer the trial takes
#--------------------------------------------------------------------------------------------
##### S: collection of all obs times(different level of days from randomization)
### Exclude Randomization days that is 0
S1 = sort(unique(T2[T2!=0]))
length(S1)
table(S1)
obsTime = S1
summary(obsTime)
S=sort(unique(T[T!=0]));
length(S)
min(S)
max(S)
test = sort(unique(cancdata$dysfrnd))
length(test)
### nt,
# How many unique days from randamization at observation time sorting from trial with shorter days from randomization
#to relatively longer trials
#nt ??ʾ??ʵ?鿪ʼʱ?ж??ٴι۲죬??ʵ?鿪ʼ????1879????,obs_time ?ij?????1159??level??ÿ??level?Dz?ͬ???????????ߵ?
#????ǰ?????ߵ?level??1879?죬????11??,??Щsubject?й?ͬ??obs time???????Ե?22,80,96,133,148,178??subject???ֱ??????μ?ʵ???ĵ?1483???????˹۲?;
#?ֱ??Ե?1,14,91,131,192,229,269,274?Ŷ??????????μ?ʵ???ĵ?350???????˹۲?
# ???ˣ?obs_timeӦ???Dz?ͬ????????????????????ͬ??level
nt = length(S);
nt
# nt =1159 means that there are 1159 differnt levels(!!!) of days (not days itself) from randamization
#nt measures differnt observation time, rather than the days from randamization
n
# n=290, nt=1159 that is 290 subject were observed 1159 times, among which the longest days is 1879 days??shortest is 11 days
#----------------------------------------------------------------------------------------------------------------------------
### TT
#Extract the longest trial days for each subject
TT <- apply(T, 1, max) # take the max for each row in matrix T
length(TT)
### dN[i,j]: whether subject i is observed on time j (j=1,...nt)
#??????ʵ??��?????????ʱ?䣨days from randomization???ﵽ1879??,????11?죬????1159????ͬ??level-->??nt??ʾ
#????ij??ʵ??????��˵???䱻?۲?????????????17??max(m)?????۲????????????ˣ??????Dzμ?ʵ??ʱ????ģ?
dN <- matrix(0, n, nt);
for (j in 1:nt) {
for (i in 1:n) {
dN[i, j] = (sum(T[i,]== S[j])!=0); # mention that S is just created from T
}
}
class(dN)
dNtd <- dN
dim(dNtd)
table(dNtd)
#For dNtd[i,j]=1 means ith subject is observed in jth observation time (the jth observation time is meansured by days from randamization)
# dNtd[i,j]=0 means the ith subject was not observed in the j time (a specific days from randamization)
days = sort(unique(cdata$dysfrnd)) # differnt level of days from randamization
length(days)
max(days)
Days <-matrix(0,n,nt)
for (i in 1:n){
Days[i,] <- days
}
dim(Days)
table(Days)
Year = Days/365
table(Year)
dim(Year)
max(Year)
diffYear <- matrix(0, n, nt);
for (j in 2:nt) {
for (i in 1:n) {
diffYear[i,j] = Year[i,j]-Year[i,j-1]
}
}
class(diffYear)
table(diffYear)
dim(diffYear)
days1 = sort(unique(S))
length(days1)
Days1 <-matrix(0,n,nt)
for (i in 1:n){
Days1[i,] <- days1
}
dim(Days1)
table(Days1)
### inn[i,j]: the additional # of basal cell carcenoma since last observed on time j (j=1,...,nt)
### inn2[i,j]: the additional # of squamous cell carcenoma since last observed on time j (j=1,...,nt)
n
nt
inn2 <- inn <- matrix(0, n, nt)
idnm <- unique(id)
for (i in 1:n) {
for (j in 1:nt) {
if (sum(randno==idnm[i] & dysfrnd==(S[j]*max(dysfrnd)))>0) {
temp <- which(randno==idnm[i] & dysfrnd==(S[j]*max(dysfrnd)))
inn[i, j] <- countba[temp]
inn2[i, j] <- countsq[temp]
}
}
}
dim(inn)
dim(inn2)
inn[which(! inn == 0)]
### N[i,j]: the cumulative # of basal cell carcenoma on time j (j=1,...,nt)
### N2[i,j]: the cumulative # of squamous cell carcenoma on time j (j=1,...,nt)
N2 <- N <- matrix(0, n,nt);
for (j in 1:nt) {
for (i in 1:n) {
N[i,j]=sum(inn[i,1:j]);
N2[i,j]=sum(inn2[i,1:j]);
}
}
dim(N)
dim(N2)
BCC=N
SCC=N2
table(BCC)
table(SCC)
### Y[i,j]: the cumulative # of both tumors on time j (j=1,...,nt)
Ytd <- Y <- N+N2
dim(Y)
#S=sort(unique(T[T!=0]));
#nt = length(S);
#write.csv(as.data.frame(inn),file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreased.csv",row.names = F)
#write.csv(as.data.frame(inn2),file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scIncreased.csv",row.names = F)
#write.csv(as.data.frame(N),file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcCumulative.csv",row.names = F)
#write.csv(as.data.frame(N2),file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scCumulative.csv",row.names = F)
bcIncreased =read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreased.csv",header = TRUE)
dim(bcIncreased)
bcIncreased1 = cbind.data.frame(treatment,bcIncreased)
dim(bcIncreased1)
dim(bcIncreased1[treatment==1,])
dim(bcIncreased1[treatment==0,])
bcaddedTreatment = bcIncreased1[treatment == 1,2:1160]
dim(bcaddedTreatment)
bcaddedPlacebo = bcIncreased1[treatment == 0,2:1160]
dim(bcaddedPlacebo)
#write.csv(bcaddedTreatment,file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasedwithTreatment.csv",row.names = F)
#write.csv(bcaddedPlacebo,file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasdPlacebo.csv",row.names = F)
bcIncrdTreatment = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasedwithTreatment.csv",header = TRUE)
bcIncrdPlacebo = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasdPlacebo.csv",header = TRUE)
bcCumulative = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcCumulative.csv",header = TRUE)
dim(bcCumulative)
bcCumulative1 = cbind.data.frame(treatment,bcCumulative)
dim(bcCumulative1)
#write.csv(bcCumulative1,file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcCulmulativewithTreatment.csv",row.names = F)
bcCmltTrtgrp = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcCulmulativewithTreatment.csv",header = TRUE)
dim(bcCmltTrtgrp)
bcCmltTreatment = bcCmltTrtgrp[treatment == 1,2:1160]
dim(bcCmltTreatment)
bcCmltPlacebo = bcCmltTrtgrp[treatment == 0,2:1160]
dim(bcCmltPlacebo)
scCumulative = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scCumulative.csv",header = TRUE)
dim(scCumulative)
scCumulative1 = cbind.data.frame(treatment,scCumulative)
#write.csv(scCumulative1,file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scCulmulativewithTreatment.csv",row.names = F)
scCmltTrtgrp = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/scCulmulativewithTreatment.csv",header = TRUE)
scCmltTreatment = scCmltTrtgrp[treatment == 1,2:1160]
dim(scCmltTreatment)
scCmltPlacebo = scCmltTrtgrp[treatment == 0,2:1160]
dim(scCmltPlacebo)
####################################
# Plot trajectories of bcc
par(mfrow = c(1,2))
plot(1,type="l",xlim = c(0,max(obsTime)),ylim = c(0,max(bcCmltTrtgrp[,2:1160])),xlab="Observation Days",main="Individual Trajectories_Treatment(n=143)")
for( i in 1:143){
lines(obsTime, bcCmltTreatment[i,],type = "l",col=4)
}
plot(0,type="l",xlim = c(0,max(obsTime)),ylim = c(0,max(bcCmltTrtgrp[,2:1160])),xlab="Observation Days",main="Placebo Group (n=147)")
for( i in 1:147){
lines(obsTime, bcCmltPlacebo[i,],col=2)
}
# Plot trajectories of scc
par(mfrow = c(1,2))
plot(1,type="l",xlim = c(0,max(obsTime)),ylim = c(0,max(scCmltTrtgrp[,2:1160])),xlab="Observation Days",main="Individual Trajectories_Treatment(n=143)")
for( i in 1:143){
lines(obsTime, scCmltTreatment[i,],type = "l",col=4)
}
plot(0,type="l",xlim = c(0,max(obsTime)),ylim = c(0,max(scCmltTrtgrp[,2:1160])),xlab="Observation Days",main="Placebo Group (n=147)")
for( i in 1:147){
lines(obsTime, scCmltPlacebo[i,],col=2)
}
dev.off()
###################################################################
# Manova for Cumulative number
dim(bcCumulative)
BCC1 = bcCumulative[,1:1159]
length(treatment)
dim(BCC1)
str(BCC1)
bcc5<-bcc4<-bcc3<-bcc2<-bcc1 <-vector(mode = "numeric")
for (i in 1:290)
{
for (j in 1:232)
bcc1[i] = sum(BCC1[i,j])
}
bcc1
for (i in 1:290)
{
for (j in 233:233+232)
bcc2[i] = sum(BCC1[i,j])
}
bcc2
for(i in 1:290)
{
for (j in 456:456+232)
bcc3[i] = sum(BCC1[i,j])
}
bcc3
for (i in 1:290)
{
for (j in 690:690+232)
bcc4[i] = sum(BCC1[i,j])
}
bcc4
for(i in 1:290 )
{
for (j in 923:1159)
bcc5[i] = sum(BCC1[i,j])
}
bcc5
#BCC_Manova = cbind(treatment,bcc1,bcc2,bcc3,bcc4,bcc5)
BCC_Manova = as.matrix(cbind(bcc1,bcc2,bcc3,bcc4,bcc5))
summary(manova(BCC_Manova ~ treatment))
################################################################################
#Q-Q Plot
dim(BCC_Manova)
BCC_Manova
n = nrow(BCC_Manova)
p = ncol(BCC_Manova)
BCC_Manovabar = colMeans(BCC_Manova)
BCC_Manovabar
S = cov(BCC_Manova)
S
########################################
# Test marginal normality.
par(mfrow = c(2,2)) # A 2 by 2 panel of plots
for(i in 1:3) {
y = BCC_Manova[,i]
v=qqnorm(y, ylab = colnames(BCC_Manova)[i])
text(0, .9*max(v$y), paste("p = ", round(shapiro.test(y)[[2]],3)))
qqline(y)
}
# Trivariate normality:
dsqd = vector(length = n)
qsqd = vector(length = n)
for(i in 1:n) {
dsqd[i] = t(BCC_Manova[i,] - BCC_Manovabar) %*% solve(S) %*% (BCC_Manova[i,] - BCC_Manovabar)
qsqd[i] = qchisq((i-.5)/n, p, lower.tail = TRUE)
}
dsqd = sort(dsqd)
qqplot(qsqd, dsqd, main = "Chisquare Q-Q Plot", xlab = "Chisquare quantiles", ylab = "sample quantiles")
abline(0,1)
text(6, max(qsqd-2), paste("corr = ", round(cor(qsqd,dsqd),3)))
#############3
#SCC Manova
dim(scCumulative)
SCC1 = scCumulative[,1:1159]
dim(SCC1)
scc5<-scc4<-scc3<-scc2<-scc1 <-vector(mode = "numeric")
for (i in 1:290)
{
for (j in 1:232)
scc1[i] = sum(SCC1[i,j])
}
scc1
for (i in 1:290)
{
for (j in 233:233+232)
scc2[i] = sum(SCC1[i,j])
}
scc2
for(i in 1:290)
{
for (j in 456:456+232)
scc3[i] = sum(SCC1[i,j])
}
scc3
for (i in 1:290)
{
for (j in 690:690+232)
scc4[i] = sum(SCC1[i,j])
}
scc4
for(i in 1:290 )
{
for (j in 923:1159)
scc5[i] = sum(SCC1[i,j])
}
scc5
SCC_Manova = as.matrix(cbind(scc1,scc2,scc3,scc4,scc5))
summary(manova(SCC_Manova~ treatment))
skinManova = as.matrix(cbind.data.frame(BCC_Manova,SCC_Manova))
summary(manova(skinManova~ treatment))
#Q-Q Plot for SCC
dim(SCC_Manova)
SCC_Manova
n = nrow(SCC_Manova)
p = ncol(SCC_Manova)
SCC_Manovabar = colMeans(SCC_Manova)
SCC_Manovabar
S = cov(SCC_Manova)
S
########################################
# Test marginal normality.
par(mfrow = c(2,2)) # A 2 by 2 panel of plots
for(i in 1:3) {
y = SCC_Manova[,i]
v=qqnorm(y, ylab = colnames(SCC_Manova)[i])
text(0, .9*max(v$y), paste("p = ", round(shapiro.test(y)[[2]],3)))
qqline(y)
}
# Trivariate normality:
dsqd = vector(length = n)
qsqd = vector(length = n)
for(i in 1:n) {
dsqd[i] = t(SCC_Manova[i,] - SCC_Manovabar) %*% solve(S) %*% (SCC_Manova[i,] - SCC_Manovabar)
qsqd[i] = qchisq((i-.5)/n, p, lower.tail = TRUE)
}
dsqd = sort(dsqd)
qqplot(qsqd, dsqd, main = "Chisquare Q-Q Plot", xlab = "Chisquare quantiles", ylab = "sample quantiles")
abline(0,1)
text(6, max(qsqd-2), paste("corr = ", round(cor(qsqd,dsqd),3)))
###################################################################
# Manova for increased number
dim()
bcIncrdTreatment = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasedwithTreatment.csv",header = TRUE)
bcIncrdPlacebo = read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/bcIncreasdPlacebo.csv",header = TRUE)
bccI = bcIncreased1[,2:1160]
str(bcIncreased1)
dim(bcIncreased1)
bccI5<-bccI4<-bccI3<-bccI2<-bccI1 <-vector(mode = "numeric")
for (i in 1:290)
{
for (j in 1:232)
bccI1[i] = sum(bccI[i,j])
}
bccI1
for (i in 1:290)
{
for (j in 233:233+232)
bccI2[i] = sum(bccI[i,j])
}
bccI2
for(i in 1:290)
{
for (j in 456:456+232)
bccI3[i] = sum(bccI[i,j])
}
bccI3
for (i in 1:290)
{
for (j in 690:690+232)
bccI4[i] = sum(bccI[i,j])
}
bccI4
for(i in 1:290 )
{
for (j in 923:1159)
bccI5[i] = sum(bccI[i,j])
}
bccI5
#bccI_Manova = cbind(treatment,bccI1,bccI2,bccI3,bccI4,bccI5)
bccI_Manova = as.matrix(cbind(bccI1,bccI2,bccI3,bccI4,bccI5))
summary(manova(bccI_Manova ~ treatment))
bccI_bar = colMeans(bccI_Manova)
bccI_bar
S_bccI = cov(bccI_Manova)
S_bccI
# Test Marginal Normality
par(mfrow = c(2,2))
for(i in 1:5) {
y = bccI_Manova[,i]
v=qqnorm(y, ylab = colnames(bccI_Manova)[i])
text(0, .9*max(v$y), paste("p = ", round(shapiro.test(y)[[2]],3)))
qqline(y)
}
bartlett.test(bccI1~treatment)
SCC_Manova = as.matrix(cbind(scc1,scc2,scc3,scc4,scc5))
summary(manova(SCC_Manova~ treatment))
skinManova = as.matrix(cbind.data.frame(bccI_Manova,SCC_Manova))
summary(manova(skinManova~ treatment))
#SCC
dim(scCmltTrtgrp)
SCC1 = scCmltTrtgrp[,2:1160]
dim(SCC1)
scc5<-scc4<-scc3<-scc2<-scc1 <-vector(mode = "numeric")
for (i in 1:290)
{
for (j in 1:232)
scc1[i] = sum(SCC1[i,j])
}
scc1
for (i in 1:290)
{
for (j in 233:233+232)
scc2[i] = sum(SCC1[i,j])
}
scc2
for(i in 1:290)
{
for (j in 456:456+232)
scc3[i] = sum(SCC1[i,j])
}
scc3
for (i in 1:290)
{
for (j in 690:690+232)
scc4[i] = sum(SCC1[i,j])
}
scc4
for(i in 1:290 )
{
for (j in 923:1159)
scc5[i] = sum(SCC1[i,j])
}
scc5
######################################################################
dim(bcCmltTreatment)
dim(bcCmltPlacebo)
1159/5
length(treatment[treatment==1])
for (i in 1:232)
{
bccTreatment_sum1[i] = sum(bcCmltTreatment[,i])
}
bccTreatment_sum1
for (i in 233:233+232)
{
bccTreatment_sum2 = sum(bcCmltTreatment[,i])
}
bccTreatment_sum2
for(i in 456:456+232)
{
bccTreatment_sum3 = sum(bcCmltTreatment[,i])
}
bccTreatment_sum3
for(i in 779:779+232)
{
bccTreatment_sum4 = sum(bcCmltTreatment[,i])
}
bccTreatment_sum4
for(i in 1002:1159)
{
bccTreatment_sum5 = sum(bcCmltTreatment[,i])
}
bccTreatment_sum5
bccTreatment_sum = c(bccTreatment_sum1,bccTreatment_sum2,bccTreatment_sum3,bccTreatment_sum4,bccTreatment_sum5)
# For Placebo Group
for (i in 1:232)
{
bccPlacebo_sum1 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum1
for (i in 233:233+232)
{
bccPlacebo_sum2 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum2
for(i in 456:456+232)
{
bccPlacebo_sum3 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum3
for(i in 779:779+232)
{
bccPlacebo_sum4 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum4
for(i in 1002:1159)
{
bccPlacebo_sum5 = sum(bcCmltPlacebo[,i])
}
bccPlacebo_sum5
bccPlacebo_sum = c(bccPlacebo_sum1,bccPlacebo_sum2,bccPlacebo_sum3,bccPlacebo_sum4,bccPlacebo_sum5)
#############################################################################################################
############################################################################################################3
#--------------------------------------------------------------------------------------------------------------------
# Stack outcome variable in one column by 1159 observation time, for each time, Age,sex,treatment,base keep same with ID
length(idnm)
length(id)
length(age)
length(sex)
length(treatment)
length(base)
length(base2)
dim(dNtd)
#install.packages("reshape")
library(reshape)
df_dNtd = cbind.data.frame(id,age,sex,treatment,base,dNtd)
melt_dNtd = melt(df_dNtd,id=(c("idnm","age","sex","treatment","base")))
head(melt_dNtd)
dim(melt_dNtd)
observed = melt_dNtd$value
melt_dNtd$variable
obsdays_level = melt_dNtd$variable
length(obsdays_level)
table(obsdays_level)
df_Days = cbind.data.frame(idnm,age,sex,treatment,base,Days)
melt_Days = melt(df_Days,id=(c("idnm","age","sex","treatment","base")))
head(melt_Days)
dim(melt_Days)
obsDays = melt_Days$value
length(obsDays)
df_Days1 = cbind.data.frame(idnm,age,sex,treatment,base,Days1)
melt_Days1 = melt(df_Days1,id=(c("idnm","age","sex","treatment","base")))
head(melt_Days1)
dim(melt_Days1)
obsDays1 = melt_Days1$value
length(obsDays1)
df_Year = cbind.data.frame(idnm,age,sex,treatment,base,Year)
melt_Days1 = melt(df_Days1,id=(c("idnm","age","sex","treatment","base")))
head(melt_Year)
dim(melt_Year)
obsYear = melt_Year$value
length(obsDays1)
df_Ytd = cbind.data.frame(idnm,age,sex,treatment,base,Ytd)
melt_Ytd <- melt(df_Ytd,id=(c("idnm","age","sex","treatment","base")))
head(melt_Ytd)
dim(melt_Ytd)
melt_Ytd$value
bothcc = melt_Ytd$value
df_N = cbind.data.frame(idnm,age,sex,treatment,base,N)
melt_N <- melt(df_N,id=(c("idnm","age","sex","treatment","base")))
head(melt_N)
dim(melt_N) #1159*290=336,110
bcc = melt_N$value
df_N2 = cbind.data.frame(idnm,age,sex,treatment,base,N2)
melt_N2 <- melt(df_N2,id=(c("idnm","age","sex","treatment","base")))
head(melt_N2)
dim(melt_N2)
scc = melt_N2$value
df_inn = cbind.data.frame(idnm,age,sex,treatment,base,inn)
melt_inn <- melt(df_inn,id=(c("idnm","age","sex","treatment","base")))
head(melt_inn)
dim(melt_inn)
bcadded = melt_inn$value
df_inn2 = cbind.data.frame(idnm,age,sex,treatment,base,inn2)
melt_inn2 <- melt(df_inn2,id=(c("idnm","age","sex","treatment","base")))
head(melt_inn2)
dim(melt_inn2)
scadded = melt_inn2$value
data = cbind.data.frame(obsdays_level,obsDays,obsDays1,melt_Days,observed,bcc,scc, bcadded,scadded,bothcc)
head(data)
data = subset(data,select = -c(variable,value))
head(data)
dim(data)
str(data)
# only keep the datapoint when observed
data1 = data[observed==1,]
dim(data1)
write.csv(data,
file="D:/001UNC Charlotte/2017Fall/Research Seminar/skinCanc.csv",row.names = F) # with obervatoin that not observed
write.csv(data1,
file="D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/skinCanc_observed.csv",row.names = F) # only keep observed values
#######################################################################################################
#######################################################################################################
#----------------------------------------------------------------------------------------
skincanc <- read.csv("D:/001UNC Charlotte/2017Fall/Research Seminar/R code and dataset/skinCanc_observed.csv",header=TRUE)
dim(skincanc)
head(skincanc)
attach(skincanc)
str(skincanc)
library(lattice)
x= sort(unique(obsDays))
xyplot(bcc~x,data = skincanc)
xyplot(bcc~x,groups = idnm,data=skincanc)
xyplot(bcc~x,groups = sex,data=skincanc)
library(ggplot2)
#ggplot(skincanc,aes(x =obsDays))+geom_line(aes(y=bcc),colour="blue")+
# geom_line(aes(y=scc),colour="red")
ggplot(skincanc,aes(x = obsDays,y = bcc,colour=sex))+geom_line()+ylab(label="Cumulative Basal Lesions")+xlab(label="Observation Days")
ggplot(skincanc,aes(x = obsDays,y = scc,colour=treatment))+geom_line()+ylab(label="Cumulative Squamous Lesions")+xlab(label="Observation Days")
dev.off()
#-----------------------------------------------------------------------------------------
# calculate average by gender
totalObsTime = length(unique(obsDays))
totalObsTime
bcc_mean<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean[i]= mean(bcc[obsdays_level == i])
}
bcc_mean
length(bcc_mean)
plot(sort(unique(obsDays)),bcc_mean, xlab = "Observation Days", ylab = "bcc_mean",
main = "Mean of Basal Lesions",col =4,type ="l")
bcc_mean_male<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean_male[i]= mean(bcc[obsdays_level == i] & bcc[sex ==1])
}
table(bcc_mean_male)
length(bcc_mean_male)
plot(sort(unique(obsDays)),bcc_mean_male, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Male",
main = "Mean Basal Lesions of Male",col =4,type ="l")
bcc_mean_female<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean_female[i]= mean(bcc[obsdays_level == i] & bcc[sex ==0])
}
table(bcc_mean_female)
length(bcc_mean_female)
plot(sort(unique(obsDays)),bcc_mean_female, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Female",
main = "Mean Basal Lesions of Female",col ="red",type ="l")
#ggplot(aes(x = sort(unique(obsDays)))) + geom_line(aes(y = bcc_mean_male),
# colour="blue")+
# geom_line(aes(y = bcc_mean_female,colour="red"))
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(bcc_mean_male,bcc_mean_female),pch = 20,xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Basal Lesions at different observation time")
matplot(x,cbind(bcc_mean,bcc_mean_male,bcc_mean_female),pch = 20,xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Basal Lesions at different observation time")
# SCC between gender
scc_mean_male<- vector(mode="numeric")
for (i in 1:totalObsTime){
scc_mean_male[i]= mean(scc[obsdays_level == i] & scc[sex ==1])
}
table(scc_mean_male)
length(scc_mean_male)
plot(sort(unique(obsDays)),scc_mean_male, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Male",
main = "Mean Squamous Lesions of Male",col =4,type ="l")
scc_mean_female<- vector(mode="numeric")
for (i in 1:totalObsTime){
scc_mean_female[i]= mean(scc[obsdays_level == i] & scc[sex ==0])
}
table(scc_mean_female)
length(scc_mean_female)
plot(sort(unique(obsDays)),scc_mean_female, xlab = "Observation Days", ylab = "Mean of Squmous Lesions for Female",
main = "Mean Squamouse Lesions of Female",col ="red",type ="l")
#ggplot(aes(x = sort(unique(obsDays)))) + geom_line(aes(y = bcc_mean_male),
# colour="blue")+
# geom_line(aes(y = bcc_mean_female,colour="red"))
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(scc_mean_male,scc_mean_female),pch = ".",xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Squamous Lesions at different observation time")
dev.off()
#_________________________________________________________________________________________________________________
#-----------------------------------------------------------------------------------------
#Calculate average by treatment group
totalObsTime = length(unique(obsDays))
totalObsTime
bcc_mean_treatment<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean_treatment[i]= mean(bcc[obsdays_level == i] & bcc[treatment ==1])
}
table(bcc_mean_treatment)
length(bcc_mean_treatment)
plot(sort(unique(obsDays)),bcc_mean_treatment, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Treatment Group",
main = "Mean Basal Lesions of treatment",col =4,type ="l")
bcc_mean_placebo<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcc_mean_placebo[i]= mean(bcc[obsdays_level == i] & bcc[treatment ==0])
}
table(bcc_mean_placebo)
length(bcc_mean_placebo)
plot(sort(unique(obsDays)),bcc_mean_placebo, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Placebo Group ",
main = "Mean Basal Lesions of Placebo",col ="red",type ="l")
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(bcc_mean_treatment,bcc_mean_placebo),pch =c("+","-"),cex = c(0.5,1),col=c(4,2),xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Basal Lesions between Treatment at different observation time")
# scc by treatment group
totalObsTime = length(unique(obsDays))
totalObsTime
scc_mean_treatment<- vector(mode="numeric")
for (i in 1:totalObsTime){
scc_mean_treatment[i]= mean(scc[obsdays_level == i] & scc[treatment ==1])
}
table(scc_mean_treatment)
length(scc_mean_treatment)
plot(sort(unique(obsDays)),scc_mean_treatment, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Treatment Group",
main = "Mean Squamouse Lesions of treat",col =4,type ="l")
scc_mean_placebo<- vector(mode="numeric")
for (i in 1:totalObsTime){
scc_mean_placebo[i]= mean(scc[obsdays_level == i] & scc[treatment ==0])
}
table(scc_mean_placebo)
length(scc_mean_placebo)
plot(sort(unique(obsDays)),scc_mean_placebo, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Placebo Group ",
main = "Mean Squamouse Lesions of Placebo",col ="red",type ="l")
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(scc_mean_treatment,scc_mean_placebo),pch =c("+","-"),cex = c(0.5,1),col=c(4,2),xlab="Observation time",ylab="Mean of Squamouse Lesions",
main="Mean value of Squamouse Lesions between Treatment at different observation time")
#######################################################
#___ Increased Number --------
#Calculate average by treatment group for increased number
str(skincanc)
totalObsTime = length(unique(obsDays))
totalObsTime
bcadded_mean_treatment<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcadded_mean_treatment[i]= mean(bcadded[obsdays_level == i] & bcadded[treatment ==1])
}
table(bcadded_mean_treatment)
length(bcadded_mean_treatment)
plot(sort(unique(obsDays)),bcadded_mean_treatment, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Treatment Group",
main = "Mean Basal Lesions of treatment",col =4,type ="l")
bcadded_mean_placebo<- vector(mode="numeric")
for (i in 1:totalObsTime){
bcadded_mean_placebo[i]= mean(bcadded[obsdays_level == i] & bcadded[treatment ==0])
}
table(bcadded_mean_placebo)
length(bcadded_mean_placebo)
plot(sort(unique(obsDays)),bcadded_mean_placebo, xlab = "Observation Days", ylab = "Mean of Basal Lesions for Placebo Group ",
main = "Mean Basal Lesions of Placebo",col ="red",type ="l")
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(bcadded_mean_treatment,bcadded_mean_placebo),pch =c("+","-"),col=c(4,2),xlab="Observation time",ylab="Mean of Basal Lesions",
main="Mean value of Basal Lesions between Treatment at different observation time")
# scadded by treatment group
totalObsTime = length(unique(obsDays))
totalObsTime
scadded_mean_treatment<- vector(mode="numeric")
for (i in 1:totalObsTime){
scadded_mean_treatment[i]= mean(scadded[obsdays_level == i] & scadded[treatment ==1])
}
table(scadded_mean_treatment)
length(scadded_mean_treatment)
plot(sort(unique(obsDays)),scadded_mean_treatment, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Treatment Group",
main = "Mean Squamouse Lesions of treat",col =4,type ="l")
scadded_mean_placebo<- vector(mode="numeric")
for (i in 1:totalObsTime){
scadded_mean_placebo[i]= mean(scadded[obsdays_level == i] & scadded[treatment ==0])
}
table(scadded_mean_placebo)
length(scadded_mean_placebo)
plot(sort(unique(obsDays)),scadded_mean_placebo, xlab = "Observation Days", ylab = "Mean of Squamouse Lesions for Placebo Group ",
main = "Mean Squamouse Lesions of Placebo",col ="red",type ="l")
x = sort((unique(obsDays)))
length(x)
matplot(x,cbind(scadded_mean_treatment,scadded_mean_placebo),pch =c("+","-"),cex = c(0.5,1),col=c(4,2),xlab="Observation time",ylab="Mean of Squamouse Lesions",
main="Mean value of Squamouse Lesions between Treatment at different observation time")
############################################################################################################
|
library("testthat")
library("recommenderlab")
set.seed(1234)
## create a matrix with ratings
db <- matrix(as.numeric(sample(c(NA,0:5),100, replace=TRUE,
prob=c(.7,rep(.3/6,6)))),
nrow=10, ncol=10, dimnames = list(
users=paste('u', 1:10, sep=''),
items=paste('i', 1:10, sep='')
))
sparse <- dropNA(db)
expect_identical(db, dropNA2matrix(sparse))
expect_identical(as(dropNAis.na(sparse), "matrix"), is.na(db))
| /tests/testthat/test-dropNA.R | no_license | metaganal/recommenderlab | R | false | false | 421 | r | library("testthat")
library("recommenderlab")
set.seed(1234)
## create a matrix with ratings
db <- matrix(as.numeric(sample(c(NA,0:5),100, replace=TRUE,
prob=c(.7,rep(.3/6,6)))),
nrow=10, ncol=10, dimnames = list(
users=paste('u', 1:10, sep=''),
items=paste('i', 1:10, sep='')
))
sparse <- dropNA(db)
expect_identical(db, dropNA2matrix(sparse))
expect_identical(as(dropNAis.na(sparse), "matrix"), is.na(db))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{pct}
\alias{pct}
\title{Helper for providing a numeric value as percentage}
\usage{
pct(x)
}
\arguments{
\item{x}{the numeric value to format as a string percentage for some
\code{\link[=tab_options]{tab_options()}} arguments that can take percentage values
(e.g., \code{table.width}).}
}
\description{
Helper for providing a numeric value as percentage
}
\seealso{
Other helper functions:
\code{\link{cell_borders}()},
\code{\link{cell_fill}()},
\code{\link{cell_text}()},
\code{\link{cells_styles}()},
\code{\link{escape_latex}()},
\code{\link{gt_latex_dependencies}()},
\code{\link{html}()},
\code{\link{md}()},
\code{\link{px}()},
\code{\link{random_id}()}
}
\concept{helper functions}
| /man/pct.Rd | permissive | tomryder9/gt | R | false | true | 785 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{pct}
\alias{pct}
\title{Helper for providing a numeric value as percentage}
\usage{
pct(x)
}
\arguments{
\item{x}{the numeric value to format as a string percentage for some
\code{\link[=tab_options]{tab_options()}} arguments that can take percentage values
(e.g., \code{table.width}).}
}
\description{
Helper for providing a numeric value as percentage
}
\seealso{
Other helper functions:
\code{\link{cell_borders}()},
\code{\link{cell_fill}()},
\code{\link{cell_text}()},
\code{\link{cells_styles}()},
\code{\link{escape_latex}()},
\code{\link{gt_latex_dependencies}()},
\code{\link{html}()},
\code{\link{md}()},
\code{\link{px}()},
\code{\link{random_id}()}
}
\concept{helper functions}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
## Multiple Choice ARE OUTSIDE FLUIDPAGE
animals <- c("dog", "cat", "porpoise")
state_name <- c("AL", "AK", "MA", "RI", "Non sequitur")
## User Interface
#----------------
user_interface <- fluidPage(
titlePanel("INPUTS"),
## Free Text
textInput(inputId = "name",
label = "What's your name?",
placeholder = "First Name"),
passwordInput("password", "What's your password?",
placeholder = "Tell me a secret"),
textAreaInput("story",
"Tell me about yourself",
rows = 3,
placeholder = "Expand here."),
## Numeric Values
numericInput("num1", "X 1", value = 0, min = 0, max = 100),
sliderInput("num2", "X 2", value = 50, min = 0, max = 100),
sliderInput("rang", "Range", value = c(20, 80), min = 0, max = 100),
## Dates
dateInput("dob", "Date of Birth?"),
dateRangeInput("delivery_time", "Delivery Time Range?"),
## Multiple Choice
selectInput("state", "What's your favourite state?", state_name),
radioButtons("swimmer", "Who swims with you?", animals),
## Select More Than 1 Choice
selectInput("states", "Choice a state & non sequitur",
state_name,
multiple = TRUE),
# For a single checkbox for a single yes/no question
checkboxInput("calc_value", "Calculate Equation?", value = FALSE),
# Action buttons
actionButton("click", "Click me!"),
actionButton("drink", "Drink me!", icon = icon("cocktail")),
# OUTPUT Text
# Husker Du?
# **renderText()** <-> **textOutput()**
# **renderPrint()** <-> **verbatimTextOutput()**
textOutput(inputId = "text", label = "tell me your name."),
verbatimTextOutput("print"),
# Output Tables
tableOutput("static"),
dataTableOutput("dynamic"),
#textOutput(name)
plotOutput("negative_slope_plot")
)
## Define server functions
#-------------------------
server <- function(input, output) {
# Output Text
output$text <- renderText(input$text)
output$print <- renderPrint(3.1415926*3.1415926)
# Output Table
output$static <- renderTable(head(iris, n = 2))
output$dynamic <- renderDataTable(iris, options = list(pageLength = 2))
# Download button
# requires new techniques in the server function,
# so we’ll come back to that in Chapter 9.
# Plot
output$negative_slope_plot <- renderPlot({
res = 96
plot(1:5, 5:1,
main = "Dependent vs Independent Variables",
xlab = "X Variable",
ylab = "Y Variable")
})
} # To server function
# Run the application
shinyApp(ui = user_interface, server = server)
| /examples/02-basic_ui/app.R | no_license | shmuhammadd/Introduction_AdvR_bookclub | R | false | false | 2,937 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
## Multiple Choice ARE OUTSIDE FLUIDPAGE
animals <- c("dog", "cat", "porpoise")
state_name <- c("AL", "AK", "MA", "RI", "Non sequitur")
## User Interface
#----------------
user_interface <- fluidPage(
titlePanel("INPUTS"),
## Free Text
textInput(inputId = "name",
label = "What's your name?",
placeholder = "First Name"),
passwordInput("password", "What's your password?",
placeholder = "Tell me a secret"),
textAreaInput("story",
"Tell me about yourself",
rows = 3,
placeholder = "Expand here."),
## Numeric Values
numericInput("num1", "X 1", value = 0, min = 0, max = 100),
sliderInput("num2", "X 2", value = 50, min = 0, max = 100),
sliderInput("rang", "Range", value = c(20, 80), min = 0, max = 100),
## Dates
dateInput("dob", "Date of Birth?"),
dateRangeInput("delivery_time", "Delivery Time Range?"),
## Multiple Choice
selectInput("state", "What's your favourite state?", state_name),
radioButtons("swimmer", "Who swims with you?", animals),
## Select More Than 1 Choice
selectInput("states", "Choice a state & non sequitur",
state_name,
multiple = TRUE),
# For a single checkbox for a single yes/no question
checkboxInput("calc_value", "Calculate Equation?", value = FALSE),
# Action buttons
actionButton("click", "Click me!"),
actionButton("drink", "Drink me!", icon = icon("cocktail")),
# OUTPUT Text
# Husker Du?
# **renderText()** <-> **textOutput()**
# **renderPrint()** <-> **verbatimTextOutput()**
textOutput(inputId = "text", label = "tell me your name."),
verbatimTextOutput("print"),
# Output Tables
tableOutput("static"),
dataTableOutput("dynamic"),
#textOutput(name)
plotOutput("negative_slope_plot")
)
## Define server functions
#-------------------------
server <- function(input, output) {
# Output Text
output$text <- renderText(input$text)
output$print <- renderPrint(3.1415926*3.1415926)
# Output Table
output$static <- renderTable(head(iris, n = 2))
output$dynamic <- renderDataTable(iris, options = list(pageLength = 2))
# Download button
# requires new techniques in the server function,
# so we’ll come back to that in Chapter 9.
# Plot
output$negative_slope_plot <- renderPlot({
res = 96
plot(1:5, 5:1,
main = "Dependent vs Independent Variables",
xlab = "X Variable",
ylab = "Y Variable")
})
} # To server function
# Run the application
shinyApp(ui = user_interface, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert2igraph.R
\name{convert2igraph}
\alias{convert2igraph}
\title{Convert networks to \code{\link{igraph}}}
\usage{
convert2igraph(A, diagonal = 0)
}
\arguments{
\item{A}{Matrix or data frame.
\emph{N} x \emph{N} matrix where \emph{N} is the number of nodes}
\item{diagonal}{Numeric.
Value to be placed on the diagonal of \code{A}.
Defaults to \code{0}}
}
\value{
Returns a network in the \code{\link{igraph}} format
}
\description{
Converts networks to \code{\link{igraph}} format
}
\examples{
convert2igraph(ega.wmt$network)
}
\author{
Hudson Golino <hfg9s at virginia.edu> & Alexander P. Christensen <alexander.christensen at Vanderbilt.Edu>
}
| /man/convert2igraph.Rd | no_license | cran/EGAnet | R | false | true | 730 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert2igraph.R
\name{convert2igraph}
\alias{convert2igraph}
\title{Convert networks to \code{\link{igraph}}}
\usage{
convert2igraph(A, diagonal = 0)
}
\arguments{
\item{A}{Matrix or data frame.
\emph{N} x \emph{N} matrix where \emph{N} is the number of nodes}
\item{diagonal}{Numeric.
Value to be placed on the diagonal of \code{A}.
Defaults to \code{0}}
}
\value{
Returns a network in the \code{\link{igraph}} format
}
\description{
Converts networks to \code{\link{igraph}} format
}
\examples{
convert2igraph(ega.wmt$network)
}
\author{
Hudson Golino <hfg9s at virginia.edu> & Alexander P. Christensen <alexander.christensen at Vanderbilt.Edu>
}
|
library(visreg)
library(multcomp)
library(smatr)
library(emmeans)
library(car)
library(lme4)
library(MuMIn)
library(lmerTest)
library(LMERConvenienceFunctions)
alldata <- read.csv("calculated_data/ferns_traits_complete.csv")
#reorder from ground to canopy
alldata$niche2<-factor(alldata$niche2,
levels=c("terrestrial", "hemi-epiphyte", "epiphyte"))
#drop species outlier for stom density
stomata_noout <- droplevels(alldata[!alldata$genusspecies == "oleart",])
##separate habitat dataframes for all traits -----
terr <- stomata_noout[stomata_noout$niche2 == "terrestrial",]
hemi <- stomata_noout[stomata_noout$niche2 == "hemi-epiphyte" ,]
epi <- stomata_noout[stomata_noout$niche2 == "epiphyte",]
plot(stomatal_size ~ sd_mm2, data=stomata_noout)
#bivariate mixed model
sizedens <- lmer(sqrt(stomatal_size) ~ sqrt(sd_mm2) * niche2
+ (1|species), data=stomata_noout)
sizedens2 <- lmer(log10(sd_mm2) ~ log10(stomatal_size) * niche2
+ (1|species), data=stomata_noout)
sizedens3 <- lmer(sqrt(sd_mm2) ~ sqrt(stomatal_size) + niche2
+ (1|species), data=stomata_noout)
sizedens4 <- lmer(stomatal_size ~ sd_mm2 * niche2
+ (1|species), data=stomata_noout)
#model diagnostics (sqrt trans works best)
qqPlot(residuals(sizedens))
plot(sizedens)
Anova(sizedens, type=3)
anova(sizedens, sizedens3)
AIC(sizedens, sizedens3) #no interaction, choose model without
Anova(sizedens3, type=3) #p = .09
summary(sizedens3)
r.squaredGLMM(sizedens3)
#R2m R2c
#0.2027541 0.8960833
anova(lm(sd_mm2 ~ stomatal_size , data=terr))
anova(lm(sd_mm2 ~ stomatal_size , data=hemi))
anova(lm(sd_mm2 ~ stomatal_size , data=epi))
#test slopes and elevations
sizedens3 <- sma(sd_mm2 ~ stomatal_size * niche2,
data=alldata, multcomp = TRUE,
multcompmethod='adjusted') #slopes not equal
summary(sizedens3) #slopes not equal, all different, all relationships sig
sizedens4 <- sma(sd_mm2 ~ stomatal_size + niche2,
data=alldata, multcomp = TRUE,
multcompmethod='adjusted') #slopes not equal
summary(sizedens4) #elevations not equal
#elevations different for hemi-epi, but similar for terrstrial epi
# ------------------------------------------------------------
# Results of comparing lines among groups.
#
# H0 : slopes are equal.
# Likelihood ratio statistic : 65.94 with 2 degrees of freedom
# P-value : 4.774e-15
# ------------------------------------------------------------
#
# H0 : no difference in elevation.
# Wald statistic: 9.036 with 2 degrees of freedom
# P-value : 0.010912
# ------------------------------------------------------------
##For reviewer1
selva <- stomata_noout[stomata_noout$site == "la_selva",]
cruces <- stomata_noout[stomata_noout$site == "las_cruces" ,]
selva_mod <- lmer(sqrt(sd_mm2) ~ niche2 + (1|species), data=selva)
cruces_mod <- lmer(sqrt(sd_mm2) ~ niche2 + (1|species), data=cruces)
#laselva
plot(selva_mod)
qqPlot(residuals(selva_mod))
summary(selva_mod)
Anova(selva_mod, type="3") #only niche effect
r.squaredGLM(selva_mod)
tukey_selva <- glht(selva_mod, linfct = mcp(niche2 = "Tukey"))
cld(tukey_selva) #same
#lascruces
plot(cruces_mod)
qqPlot(residuals(cruces_mod))
summary(cruces_mod)
Anova(cruces_mod, type="3") #only niche effect
r.squaredGLM(cruces_mod)
tukey_cruces <- glht(cruces_mod, linfct = mcp(niche2 = "Tukey"))
cld(tukey_cruces) #not same
| /stats/stomata_regression.R | no_license | CourtneyCampany/sporgasm | R | false | false | 3,482 | r | library(visreg)
library(multcomp)
library(smatr)
library(emmeans)
library(car)
library(lme4)
library(MuMIn)
library(lmerTest)
library(LMERConvenienceFunctions)
alldata <- read.csv("calculated_data/ferns_traits_complete.csv")
#reorder from ground to canopy
alldata$niche2<-factor(alldata$niche2,
levels=c("terrestrial", "hemi-epiphyte", "epiphyte"))
#drop species outlier for stom density
stomata_noout <- droplevels(alldata[!alldata$genusspecies == "oleart",])
##separate habitat dataframes for all traits -----
terr <- stomata_noout[stomata_noout$niche2 == "terrestrial",]
hemi <- stomata_noout[stomata_noout$niche2 == "hemi-epiphyte" ,]
epi <- stomata_noout[stomata_noout$niche2 == "epiphyte",]
plot(stomatal_size ~ sd_mm2, data=stomata_noout)
#bivariate mixed model
sizedens <- lmer(sqrt(stomatal_size) ~ sqrt(sd_mm2) * niche2
+ (1|species), data=stomata_noout)
sizedens2 <- lmer(log10(sd_mm2) ~ log10(stomatal_size) * niche2
+ (1|species), data=stomata_noout)
sizedens3 <- lmer(sqrt(sd_mm2) ~ sqrt(stomatal_size) + niche2
+ (1|species), data=stomata_noout)
sizedens4 <- lmer(stomatal_size ~ sd_mm2 * niche2
+ (1|species), data=stomata_noout)
#model diagnostics (sqrt trans works best)
qqPlot(residuals(sizedens))
plot(sizedens)
Anova(sizedens, type=3)
anova(sizedens, sizedens3)
AIC(sizedens, sizedens3) #no interaction, choose model without
Anova(sizedens3, type=3) #p = .09
summary(sizedens3)
r.squaredGLMM(sizedens3)
#R2m R2c
#0.2027541 0.8960833
anova(lm(sd_mm2 ~ stomatal_size , data=terr))
anova(lm(sd_mm2 ~ stomatal_size , data=hemi))
anova(lm(sd_mm2 ~ stomatal_size , data=epi))
#test slopes and elevations
sizedens3 <- sma(sd_mm2 ~ stomatal_size * niche2,
data=alldata, multcomp = TRUE,
multcompmethod='adjusted') #slopes not equal
summary(sizedens3) #slopes not equal, all different, all relationships sig
sizedens4 <- sma(sd_mm2 ~ stomatal_size + niche2,
data=alldata, multcomp = TRUE,
multcompmethod='adjusted') #slopes not equal
summary(sizedens4) #elevations not equal
#elevations different for hemi-epi, but similar for terrstrial epi
# ------------------------------------------------------------
# Results of comparing lines among groups.
#
# H0 : slopes are equal.
# Likelihood ratio statistic : 65.94 with 2 degrees of freedom
# P-value : 4.774e-15
# ------------------------------------------------------------
#
# H0 : no difference in elevation.
# Wald statistic: 9.036 with 2 degrees of freedom
# P-value : 0.010912
# ------------------------------------------------------------
##For reviewer1
selva <- stomata_noout[stomata_noout$site == "la_selva",]
cruces <- stomata_noout[stomata_noout$site == "las_cruces" ,]
selva_mod <- lmer(sqrt(sd_mm2) ~ niche2 + (1|species), data=selva)
cruces_mod <- lmer(sqrt(sd_mm2) ~ niche2 + (1|species), data=cruces)
#laselva
plot(selva_mod)
qqPlot(residuals(selva_mod))
summary(selva_mod)
Anova(selva_mod, type="3") #only niche effect
r.squaredGLM(selva_mod)
tukey_selva <- glht(selva_mod, linfct = mcp(niche2 = "Tukey"))
cld(tukey_selva) #same
#lascruces
plot(cruces_mod)
qqPlot(residuals(cruces_mod))
summary(cruces_mod)
Anova(cruces_mod, type="3") #only niche effect
r.squaredGLM(cruces_mod)
tukey_cruces <- glht(cruces_mod, linfct = mcp(niche2 = "Tukey"))
cld(tukey_cruces) #not same
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HLP_geneRanges.R
\name{subsetByOverlaps.keepAllMeta}
\alias{subsetByOverlaps.keepAllMeta}
\alias{granges2df}
\title{Help functions for handling gene ranges}
\usage{
subsetByOverlaps.keepAllMeta(
gr1,
gr2,
write.ranges.tofile = NULL,
addStart = 0
)
granges2df(gr1, addStart = 0)
}
\arguments{
\item{gr1, gr2}{GRanges object}
\item{write.ranges.tofile}{character with file path. If given, a data.frame is generated
from the returned GRanges object and written to this destination. Omitted if NULL.}
\item{addStart}{numeric with mumber of bases to be added to the start coordinate
when converting a GRanges object to a dataframe. E.g. necessary for switching between
0-based and 1-based genome coordinate systems.}
}
\value{
\code{subsetByOverlaps.keepAllMeta} returnes GRanges object containing
overlap and meta data from input ranges.
\code{granges2df} returns a data.frame with genomic coordinates and meta data
from input object.
}
\description{
Keep all meta data when overlapping gene ranges or converting to dataframe.
}
\details{
By default, meta data is lost when GRanges are merged or overlapped.
\code{subsetByOverlaps.keepAllMeta} returns overlap of two GRanges objects with
merged meta data from both. Meta data stored in \code{CompressedCharacterList}
is collapsed to a single column.
\code{granges2df} generates a data.frame from an GRanges containing the meta data.
Meta data stored in \code{CompressedCharacterList} is collapsed to a single column.
}
\section{Functions}{
\itemize{
\item \code{subsetByOverlaps.keepAllMeta}: subsetByOverlaps which keeps meta data from both objects
\item \code{granges2df}: Convert GRanges object to dataframe
}}
\author{
Frank Ruehle
}
| /man/subsetByOverlaps.keepAllMeta.Rd | no_license | frankRuehle/systemsbio | R | false | true | 1,781 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HLP_geneRanges.R
\name{subsetByOverlaps.keepAllMeta}
\alias{subsetByOverlaps.keepAllMeta}
\alias{granges2df}
\title{Help functions for handling gene ranges}
\usage{
subsetByOverlaps.keepAllMeta(
gr1,
gr2,
write.ranges.tofile = NULL,
addStart = 0
)
granges2df(gr1, addStart = 0)
}
\arguments{
\item{gr1, gr2}{GRanges object}
\item{write.ranges.tofile}{character with file path. If given, a data.frame is generated
from the returned GRanges object and written to this destination. Omitted if NULL.}
\item{addStart}{numeric with mumber of bases to be added to the start coordinate
when converting a GRanges object to a dataframe. E.g. necessary for switching between
0-based and 1-based genome coordinate systems.}
}
\value{
\code{subsetByOverlaps.keepAllMeta} returnes GRanges object containing
overlap and meta data from input ranges.
\code{granges2df} returns a data.frame with genomic coordinates and meta data
from input object.
}
\description{
Keep all meta data when overlapping gene ranges or converting to dataframe.
}
\details{
By default, meta data is lost when GRanges are merged or overlapped.
\code{subsetByOverlaps.keepAllMeta} returns overlap of two GRanges objects with
merged meta data from both. Meta data stored in \code{CompressedCharacterList}
is collapsed to a single column.
\code{granges2df} generates a data.frame from an GRanges containing the meta data.
Meta data stored in \code{CompressedCharacterList} is collapsed to a single column.
}
\section{Functions}{
\itemize{
\item \code{subsetByOverlaps.keepAllMeta}: subsetByOverlaps which keeps meta data from both objects
\item \code{granges2df}: Convert GRanges object to dataframe
}}
\author{
Frank Ruehle
}
|
#Author: Thomas Hollis
#Subject: Bachelor Thesis
#0.1 Required Packages
library(lmtest)
library(vars)
#0.2 Normalize function
normalize <- function(x)
{
return ((x - min(x)) / (max(x) - min(x))-0.5)
}
#0.3 Data processing (custom data webscraped off CoinMarketCap)
GrangerData1 <- read.csv("R/DB_CMC_BTC_365.csv")
GrangerData2 <- read.csv("R/DB_CMC_LTC_365.csv")
GrangerData3 <- read.csv("R/DB_CMC_XRP_365.csv")
GrangerData4 <- read.csv("R/DB_CMC_ETH_365.csv")
GrangerData5 <- read.csv("R/DB_CMC_DASH_365.csv")
GrangerData6 <- read.csv("R/DB_CMC_BCH_365.csv")
GrangerData7 <- read.csv("R/DB_CMC_IOTA_365.csv")
GrangerData1Price_N <- normalize(GrangerData1$Close)
GrangerData2Price_N <- normalize(GrangerData2$Close)
GrangerData3Price_N <- normalize(GrangerData3$Close)
GrangerData4Price_N <- normalize(GrangerData4$Close)
GrangerData5Price_N <- normalize(GrangerData5$Close)
GrangerData6Price_N <- normalize(GrangerData6$Close)
GrangerData7Price_N <- normalize(GrangerData7$Close)
GrangerData1Price_NR <- GrangerData1Price_N[length(GrangerData1Price_N):1]
GrangerData2Price_NR <- GrangerData2Price_N[length(GrangerData2Price_N):1]
GrangerData3Price_NR <- GrangerData3Price_N[length(GrangerData3Price_N):1]
GrangerData4Price_NR <- GrangerData4Price_N[length(GrangerData4Price_N):1]
GrangerData5Price_NR <- GrangerData5Price_N[length(GrangerData5Price_N):1]
GrangerData6Price_NR <- GrangerData6Price_N[length(GrangerData6Price_N):1]
GrangerData7Price_NR <- GrangerData7Price_N[length(GrangerData7Price_N):1]
plot(GrangerData1Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "black")
par(new = TRUE)
plot(GrangerData2Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "blue")
par(new = TRUE)
plot(GrangerData3Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "green")
par(new = TRUE)
plot(GrangerData4Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "red")
par(new = TRUE)
plot(GrangerData5Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "purple")
par(new = TRUE)
#plot(GrangerData6Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "yellow")
#par(new = TRUE)
#plot(GrangerData7Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "orange")
legend("topleft", legend=c("BTC", "LTC", "XRP", "ETH", "DASH"), col=c("black", "blue", "green", "red", "purple"), bty = "n", cex = 0.9, pch = 16, text.col = "black")
#Test 1: Granger with custom lag order
grangertest(GrangerData1Price_NR~GrangerData2Price_NR, order=100) #granger test with lag of 2, normalised data
#Test 2: Granger with optimal lag order via AIC
GrangerData1Price_NRD <- diff(GrangerData1Price_NR)
GrangerData2Price_NRD <- diff(GrangerData2Price_NR)
GrangerData3Price_NRD <- diff(GrangerData3Price_NR)
GrangerData4Price_NRD <- diff(GrangerData4Price_NR)
GrangerData5Price_NRD <- diff(GrangerData5Price_NR)
#GrangerData6Price_NRD <- diff(GrangerData6Price_NR)
#GrangerData7Price_NRD <- diff(GrangerData7Price_NR)
GrangerData1Price_NRD_pair1 <- cbind(GrangerData1Price_NRD, GrangerData2Price_NRD)
GrangerData_VAR=VAR(GrangerData1Price_NRD_pair1, type="const", lag.max=10, ic="AIC")
causality(GrangerData_VAR, cause = "GrangerData1Price_NRD")$Granger
#Test 3: Granger with binary data?
GrangerData1Price_NRD <- diff(GrangerData1Price_NR)
GrangerData2Price_NRD <- diff(GrangerData2Price_NR)
GrangerData3Price_NRD <- diff(GrangerData3Price_NR)
GrangerData4Price_NRD <- diff(GrangerData4Price_NR)
GrangerData5Price_NRD <- diff(GrangerData5Price_NR)
#GrangerData6Price_NRD <- diff(GrangerData6Price_NR)
#GrangerData7Price_NRD <- diff(GrangerData7Price_NR)
GrangerData1Price_NRDbin = integer(30)
GrangerData2Price_NRDbin = integer(30)
GrangerData3Price_NRDbin = integer(30)
GrangerData4Price_NRDbin = integer(30)
GrangerData5Price_NRDbin = integer(30)
#GrangerData6Price_NRDbin = integer(30)
#GrangerData7Price_NRDbin = integer(30)
for (i in 310:340)
{
if (GrangerData1Price_NRD[i] > 0)
{
GrangerData1Price_NRDbin[i-311] <- 1
}
else
{
GrangerData1Price_NRDbin[i-311] <- 0
}
}
for (i in 310:340)
{
if (GrangerData2Price_NRD[i] > 0)
{
GrangerData2Price_NRDbin[i-311] <- 1
}
else
{
GrangerData2Price_NRDbin[i-311] <- 0
}
}
for (i in 310:340)
{
if (GrangerData3Price_NRD[i] > 0)
{
GrangerData3Price_NRDbin[i-311] <- 1
}
else
{
GrangerData3Price_NRDbin[i-311] <- 0
}
}
for (i in 310:340)
{
if (GrangerData4Price_NRD[i] > 0)
{
GrangerData4Price_NRDbin[i-311] <- 1
}
else
{
GrangerData4Price_NRDbin[i-311] <- 0
}
}
for (i in 310:340)
{
if (GrangerData5Price_NRD[i] > 0)
{
GrangerData5Price_NRDbin[i-311] <- 1
}
else
{
GrangerData5Price_NRDbin[i-311] <- 0
}
}
GrangerData1Price_NRDbin
GrangerData2Price_NRDbin
GrangerData3Price_NRDbin
GrangerData4Price_NRDbin
GrangerData5Price_NRDbin
plot(GrangerData1Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "black")
plot(GrangerData2Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "blue")
plot(GrangerData3Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "green")
plot(GrangerData4Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "red")
plot(GrangerData5Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "purple")
| /src/Ex11_GrangerCausalityTests.R | permissive | ipesTTH/Bachelor-Thesis | R | false | false | 5,580 | r | #Author: Thomas Hollis
#Subject: Bachelor Thesis
#0.1 Required Packages
library(lmtest)
library(vars)
#0.2 Normalize function
normalize <- function(x)
{
return ((x - min(x)) / (max(x) - min(x))-0.5)
}
#0.3 Data processing (custom data webscraped off CoinMarketCap)
GrangerData1 <- read.csv("R/DB_CMC_BTC_365.csv")
GrangerData2 <- read.csv("R/DB_CMC_LTC_365.csv")
GrangerData3 <- read.csv("R/DB_CMC_XRP_365.csv")
GrangerData4 <- read.csv("R/DB_CMC_ETH_365.csv")
GrangerData5 <- read.csv("R/DB_CMC_DASH_365.csv")
GrangerData6 <- read.csv("R/DB_CMC_BCH_365.csv")
GrangerData7 <- read.csv("R/DB_CMC_IOTA_365.csv")
GrangerData1Price_N <- normalize(GrangerData1$Close)
GrangerData2Price_N <- normalize(GrangerData2$Close)
GrangerData3Price_N <- normalize(GrangerData3$Close)
GrangerData4Price_N <- normalize(GrangerData4$Close)
GrangerData5Price_N <- normalize(GrangerData5$Close)
GrangerData6Price_N <- normalize(GrangerData6$Close)
GrangerData7Price_N <- normalize(GrangerData7$Close)
GrangerData1Price_NR <- GrangerData1Price_N[length(GrangerData1Price_N):1]
GrangerData2Price_NR <- GrangerData2Price_N[length(GrangerData2Price_N):1]
GrangerData3Price_NR <- GrangerData3Price_N[length(GrangerData3Price_N):1]
GrangerData4Price_NR <- GrangerData4Price_N[length(GrangerData4Price_N):1]
GrangerData5Price_NR <- GrangerData5Price_N[length(GrangerData5Price_N):1]
GrangerData6Price_NR <- GrangerData6Price_N[length(GrangerData6Price_N):1]
GrangerData7Price_NR <- GrangerData7Price_N[length(GrangerData7Price_N):1]
plot(GrangerData1Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "black")
par(new = TRUE)
plot(GrangerData2Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "blue")
par(new = TRUE)
plot(GrangerData3Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "green")
par(new = TRUE)
plot(GrangerData4Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "red")
par(new = TRUE)
plot(GrangerData5Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "purple")
par(new = TRUE)
#plot(GrangerData6Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "yellow")
#par(new = TRUE)
#plot(GrangerData7Price_NR, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "orange")
legend("topleft", legend=c("BTC", "LTC", "XRP", "ETH", "DASH"), col=c("black", "blue", "green", "red", "purple"), bty = "n", cex = 0.9, pch = 16, text.col = "black")
#Test 1: Granger with custom lag order
grangertest(GrangerData1Price_NR~GrangerData2Price_NR, order=100) #granger test with lag of 2, normalised data
#Test 2: Granger with optimal lag order via AIC
GrangerData1Price_NRD <- diff(GrangerData1Price_NR)
GrangerData2Price_NRD <- diff(GrangerData2Price_NR)
GrangerData3Price_NRD <- diff(GrangerData3Price_NR)
GrangerData4Price_NRD <- diff(GrangerData4Price_NR)
GrangerData5Price_NRD <- diff(GrangerData5Price_NR)
#GrangerData6Price_NRD <- diff(GrangerData6Price_NR)
#GrangerData7Price_NRD <- diff(GrangerData7Price_NR)
GrangerData1Price_NRD_pair1 <- cbind(GrangerData1Price_NRD, GrangerData2Price_NRD)
GrangerData_VAR=VAR(GrangerData1Price_NRD_pair1, type="const", lag.max=10, ic="AIC")
causality(GrangerData_VAR, cause = "GrangerData1Price_NRD")$Granger
#Test 3: Granger with binary data?
GrangerData1Price_NRD <- diff(GrangerData1Price_NR)
GrangerData2Price_NRD <- diff(GrangerData2Price_NR)
GrangerData3Price_NRD <- diff(GrangerData3Price_NR)
GrangerData4Price_NRD <- diff(GrangerData4Price_NR)
GrangerData5Price_NRD <- diff(GrangerData5Price_NR)
#GrangerData6Price_NRD <- diff(GrangerData6Price_NR)
#GrangerData7Price_NRD <- diff(GrangerData7Price_NR)
GrangerData1Price_NRDbin = integer(30)
GrangerData2Price_NRDbin = integer(30)
GrangerData3Price_NRDbin = integer(30)
GrangerData4Price_NRDbin = integer(30)
GrangerData5Price_NRDbin = integer(30)
#GrangerData6Price_NRDbin = integer(30)
#GrangerData7Price_NRDbin = integer(30)
for (i in 310:340)
{
if (GrangerData1Price_NRD[i] > 0)
{
GrangerData1Price_NRDbin[i-311] <- 1
}
else
{
GrangerData1Price_NRDbin[i-311] <- 0
}
}
for (i in 310:340)
{
if (GrangerData2Price_NRD[i] > 0)
{
GrangerData2Price_NRDbin[i-311] <- 1
}
else
{
GrangerData2Price_NRDbin[i-311] <- 0
}
}
for (i in 310:340)
{
if (GrangerData3Price_NRD[i] > 0)
{
GrangerData3Price_NRDbin[i-311] <- 1
}
else
{
GrangerData3Price_NRDbin[i-311] <- 0
}
}
for (i in 310:340)
{
if (GrangerData4Price_NRD[i] > 0)
{
GrangerData4Price_NRDbin[i-311] <- 1
}
else
{
GrangerData4Price_NRDbin[i-311] <- 0
}
}
for (i in 310:340)
{
if (GrangerData5Price_NRD[i] > 0)
{
GrangerData5Price_NRDbin[i-311] <- 1
}
else
{
GrangerData5Price_NRDbin[i-311] <- 0
}
}
GrangerData1Price_NRDbin
GrangerData2Price_NRDbin
GrangerData3Price_NRDbin
GrangerData4Price_NRDbin
GrangerData5Price_NRDbin
plot(GrangerData1Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "black")
plot(GrangerData2Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "blue")
plot(GrangerData3Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "green")
plot(GrangerData4Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "red")
plot(GrangerData5Price_NRDbin, xlab = "Days elapsed", ylab = "Price (USD)", type = "l", col = "purple")
|
library(ape)
testtree <- read.tree("5371_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5371_0_unrooted.txt") | /codeml_files/newick_trees_processed/5371_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("5371_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5371_0_unrooted.txt") |
# Script Description --------------------
# This script creates a stacked area graph of multi-temporal land cover data, particularly
# the land cover maps produced using combined Landsat+SAR data (optical Landsat 5 TM/8 OLI;
# L-band SAR JERS-1/ALOS-PALSAR-1/ALOS-PALSAR-2) at three time-points: 1996,2007,2016 for
# Myanmar. The land cover maps consist of 10 categories including: bare ground, built-up,
# forest, ice/snow, mangrove, oil palm, rice paddy, rubber, shrubland, water.
#
# Script By: Jose Don T De Alban
# Date Created: 20 Nov 2017
# Last Modified: 09 May 2019
# Set Working Directory -----------------
setwd("/Users/dondealban/Dropbox/Research/myanmar/3 mmr land cover transitions/stacked area/15_yangon")
# Load Libraries and Data ---------------
library(reshape2)
library(tidyverse)
# Read csv files in the directory and store as a list
filenames <- list.files()
# Function to read data
readdata <- function(filename) {
df <- read.csv(filename, sep="\t")
vec <- df[, 3] # Read column with percentage values
names(vec) <- df[, 1] # Read column with class codes
return(vec)
}
# Combine as class codes and percentage values in a matrix
temp <- do.call(rbind, lapply(filenames, readdata))
colnames(temp) <- c("1","2","4","5","6","7","8","9","10")
# Add years as another column
row.names(temp) <- c("1996","2007","2016")
# Convert wide format data frame into long format data frame
data <- melt(temp, id.vars="years", variable.name="class", value.name="percentage")
colnames(data) <- c("Years","Class","Percentage")
# Create Stacked Area Graphs ------------
plot <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Built-up",
"Forest",
"Mangrove",
"Oil Palm Mature",
"Rice Paddy",
"Rubber Mature",
"Shrubland",
"Water",
"Bare Ground"))),
data=data)
plot <- plot + labs(title="Net Land Cover Transitions: Yangon Region", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot <- plot + guides(fill=guide_legend(ncol=1))
plot <- plot + theme_bw()
plot <- plot + scale_fill_manual(values=c("#ff0000","#246a24","#6666ff","#ff8000",
"#a65400","#ff00ff","#ccff66","#66ccff","#ffff66"))
plot <- plot + scale_x_continuous(breaks=c(1996,2007,2016))
plot <- plot + theme(legend.position="none")
ggsave(plot, file="StackedArea-15-Yangon.pdf", width=16, height=15, units="cm", dpi=300)
| /3 mmr land cover transitions/archives/scripts/R/MMR.StackedArea.LandsatSAR.15.Yangon.R | no_license | dondealban/myanmar | R | false | false | 2,676 | r | # Script Description --------------------
# This script creates a stacked area graph of multi-temporal land cover data, particularly
# the land cover maps produced using combined Landsat+SAR data (optical Landsat 5 TM/8 OLI;
# L-band SAR JERS-1/ALOS-PALSAR-1/ALOS-PALSAR-2) at three time-points: 1996,2007,2016 for
# Myanmar. The land cover maps consist of 10 categories including: bare ground, built-up,
# forest, ice/snow, mangrove, oil palm, rice paddy, rubber, shrubland, water.
#
# Script By: Jose Don T De Alban
# Date Created: 20 Nov 2017
# Last Modified: 09 May 2019
# Set Working Directory -----------------
setwd("/Users/dondealban/Dropbox/Research/myanmar/3 mmr land cover transitions/stacked area/15_yangon")
# Load Libraries and Data ---------------
library(reshape2)
library(tidyverse)
# Read csv files in the directory and store as a list
filenames <- list.files()
# Function to read data
readdata <- function(filename) {
df <- read.csv(filename, sep="\t")
vec <- df[, 3] # Read column with percentage values
names(vec) <- df[, 1] # Read column with class codes
return(vec)
}
# Combine as class codes and percentage values in a matrix
temp <- do.call(rbind, lapply(filenames, readdata))
colnames(temp) <- c("1","2","4","5","6","7","8","9","10")
# Add years as another column
row.names(temp) <- c("1996","2007","2016")
# Convert wide format data frame into long format data frame
data <- melt(temp, id.vars="years", variable.name="class", value.name="percentage")
colnames(data) <- c("Years","Class","Percentage")
# Create Stacked Area Graphs ------------
plot <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Built-up",
"Forest",
"Mangrove",
"Oil Palm Mature",
"Rice Paddy",
"Rubber Mature",
"Shrubland",
"Water",
"Bare Ground"))),
data=data)
plot <- plot + labs(title="Net Land Cover Transitions: Yangon Region", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot <- plot + guides(fill=guide_legend(ncol=1))
plot <- plot + theme_bw()
plot <- plot + scale_fill_manual(values=c("#ff0000","#246a24","#6666ff","#ff8000",
"#a65400","#ff00ff","#ccff66","#66ccff","#ffff66"))
plot <- plot + scale_x_continuous(breaks=c(1996,2007,2016))
plot <- plot + theme(legend.position="none")
ggsave(plot, file="StackedArea-15-Yangon.pdf", width=16, height=15, units="cm", dpi=300)
|
# Load library
library(data.table)
# Get data
wine.data <- fread('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data')
# Show top 10 lines of dataset
head(wine.data)
# Show distribution of cultivars (col V1)
table(wine.data$V1)
# Extract col 1 (just the cultivars) to wine.type
wine.type <- wine.data[,1]
# Extract all other columns (the wine properties) to wine.features
wine.features <- wine.data[,-1]
# Scale the feature data
wine.features.scaled <- data.frame(scale(wine.features))
# Convert dataframe to a matrix
wine.mat <- data.matrix(wine.features.scaled)
# Add row names (seq numbers)
rownames(wine.mat) <- seq(1:dim(wine.features.scaled)[1])
# Show first two rows
wine.mat[1:2,]
# Find the Pearson Coefficient between the rows.
# Transpose matrix
wine.mat <- t(wine.mat)
# Find the similarity matrix
cor.matrix <- cor(wine.mat, use = "pairwise.complete.obs", method = "pearson")
dim(cor.matrix)
cor.matrix[1:5,1:5]
# -1 = perfect negative correlation, +1 = perfect positive correlation
# Create recommendation for User looking at Wine 3
user.view <- wine.features.scaled[3,]
user.view
sim.items <- cor.matrix[3,]
sim.items
sim.items.sorted <- sort(sim.items, decreasing = TRUE)
sim.items.sorted[1:5]
# Confirm top 5
rbind(wine.data[3,]
,wine.data[52,]
,wine.data[51,]
,wine.data[85,]
,wine.data[15,]
)
| /R-Rec-Content-Filter-Fuzzy-Wine.R | permissive | GavinBWebster/r-rec-content-filter-fuzzy-wine | R | false | false | 1,377 | r | # Load library
library(data.table)
# Get data
wine.data <- fread('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data')
# Show top 10 lines of dataset
head(wine.data)
# Show distribution of cultivars (col V1)
table(wine.data$V1)
# Extract col 1 (just the cultivars) to wine.type
wine.type <- wine.data[,1]
# Extract all other columns (the wine properties) to wine.features
wine.features <- wine.data[,-1]
# Scale the feature data
wine.features.scaled <- data.frame(scale(wine.features))
# Convert dataframe to a matrix
wine.mat <- data.matrix(wine.features.scaled)
# Add row names (seq numbers)
rownames(wine.mat) <- seq(1:dim(wine.features.scaled)[1])
# Show first two rows
wine.mat[1:2,]
# Find the Pearson Coefficient between the rows.
# Transpose matrix
wine.mat <- t(wine.mat)
# Find the similarity matrix
cor.matrix <- cor(wine.mat, use = "pairwise.complete.obs", method = "pearson")
dim(cor.matrix)
cor.matrix[1:5,1:5]
# -1 = perfect negative correlation, +1 = perfect positive correlation
# Create recommendation for User looking at Wine 3
user.view <- wine.features.scaled[3,]
user.view
sim.items <- cor.matrix[3,]
sim.items
sim.items.sorted <- sort(sim.items, decreasing = TRUE)
sim.items.sorted[1:5]
# Confirm top 5
rbind(wine.data[3,]
,wine.data[52,]
,wine.data[51,]
,wine.data[85,]
,wine.data[15,]
)
|
####################
# #
# Copiar todo esto #
# #
####################
# Hecho con gusto por Rafa @GonzalezGouveia
# video 06 – matrices
# Objetivo: estudiar qué es una matriz en R.
# ——————————————–
# En este ejercicio vamos a:
# 1. Crear matrices en un script
# 2. Realizar opraciones aritméticas con matrices
# 3. Seleccionar elementos en una matriz
#####################################
# práctica 1: creando matrices en R #
#####################################
# crear vectores para las columnas de la matriz
warner <- c(20,20,16,17,17,22,17,18,19)
disney <- c(11,13,11,8,12,11,12,8,10)
fox<-c(18,15,15,15,16,17,15,13,11)
# fuente https://www.the-numbers.com/market/
# creando matriz a partir de vectores
peliculas <- matrix(c(warner,disney,fox),nrow = 9, ncol = 3)
# imprimir matriz en consola
peliculas
# agregar nombres de columnas
colnames(peliculas) <- c("warner", "disney", "fox")
# agregar nombres de filas/renglones
rownames(peliculas) <- c('2010','2011','2012','2013','2014','2015','2016','2017', '2018')
# imprimir matriz por segunda vez
peliculas
####################################################
# práctica 2: operaciones aritméticas con matrices #
####################################################
# resta 5 a la matriz
peliculas -5
# sumar matriz consigo misma
peliculas + peliculas
# multiplicar la matriz consigo mismo
peliculas * peliculas
###################################################
# práctica 3: selección de elementos de un matriz #
###################################################
# seleccionar un elemento de la matriz
peliculas[3,2]
peliculas['2012', 'disney']
# seleccionar más de un elemento de la matriz
peliculas[c(3,4), c(2,3)]
peliculas[c(3,4),c("disney","fox")]
# seleccionar una fila o renglón
peliculas[3,]
peliculas['2012',]
# seleccionar una columna
peliculas[,2]
# Hecho con gusto por Rafa @GonzalezGouveia
# Suscribete para más código en R https://bit.ly/2WNDhNR | /Matrices.R | no_license | NicolasGomez97/R-Programming | R | false | false | 2,016 | r | ####################
# #
# Copiar todo esto #
# #
####################
# Hecho con gusto por Rafa @GonzalezGouveia
# video 06 – matrices
# Objetivo: estudiar qué es una matriz en R.
# ——————————————–
# En este ejercicio vamos a:
# 1. Crear matrices en un script
# 2. Realizar opraciones aritméticas con matrices
# 3. Seleccionar elementos en una matriz
#####################################
# práctica 1: creando matrices en R #
#####################################
# crear vectores para las columnas de la matriz
warner <- c(20,20,16,17,17,22,17,18,19)
disney <- c(11,13,11,8,12,11,12,8,10)
fox<-c(18,15,15,15,16,17,15,13,11)
# fuente https://www.the-numbers.com/market/
# creando matriz a partir de vectores
peliculas <- matrix(c(warner,disney,fox),nrow = 9, ncol = 3)
# imprimir matriz en consola
peliculas
# agregar nombres de columnas
colnames(peliculas) <- c("warner", "disney", "fox")
# agregar nombres de filas/renglones
rownames(peliculas) <- c('2010','2011','2012','2013','2014','2015','2016','2017', '2018')
# imprimir matriz por segunda vez
peliculas
####################################################
# práctica 2: operaciones aritméticas con matrices #
####################################################
# resta 5 a la matriz
peliculas -5
# sumar matriz consigo misma
peliculas + peliculas
# multiplicar la matriz consigo mismo
peliculas * peliculas
###################################################
# práctica 3: selección de elementos de un matriz #
###################################################
# seleccionar un elemento de la matriz
peliculas[3,2]
peliculas['2012', 'disney']
# seleccionar más de un elemento de la matriz
peliculas[c(3,4), c(2,3)]
peliculas[c(3,4),c("disney","fox")]
# seleccionar una fila o renglón
peliculas[3,]
peliculas['2012',]
# seleccionar una columna
peliculas[,2]
# Hecho con gusto por Rafa @GonzalezGouveia
# Suscribete para más código en R https://bit.ly/2WNDhNR |
#' MLS league results 1996-2016
#'
#' All results for MLS games
#' from 1996 season to 2016 season.
#' Includes playoff games.
#'
#' @format A data frame with 4995 rows and 16 variables:
#' \describe{
#' \item{Date}{Date of match}
#' \item{Season}{Season of match - refers to starting year}
#' \item{home}{Home team}
#' \item{visitor}{Visiting team}
#' \item{FT}{Full-time result}
#' \item{hgoal}{Goals scored by home team}
#' \item{vgoal}{Goals scored by visiting team}
#' \item{hconf}{Conference of home team}
#' \item{vconf}{Conference of visiting team}
#' \item{totgoal}{Total goals in game}
#' \item{round}{Regular Season or Playoff round}
#' \item{leg}{leg of Playoff game}
#' \item{hgoalaet}{Goals scored by home team after extra time}
#' \item{vgoalaet}{Goals scored by visiting team after extra time}
#' \item{hpen}{Penalties scored by home team in shootout}
#' \item{vpen}{Penalties scored by visiting team in shootout}
#' }
"mls"
| /R/mls.R | no_license | jalapic/engsoccerdata | R | false | false | 973 | r | #' MLS league results 1996-2016
#'
#' All results for MLS games
#' from 1996 season to 2016 season.
#' Includes playoff games.
#'
#' @format A data frame with 4995 rows and 16 variables:
#' \describe{
#' \item{Date}{Date of match}
#' \item{Season}{Season of match - refers to starting year}
#' \item{home}{Home team}
#' \item{visitor}{Visiting team}
#' \item{FT}{Full-time result}
#' \item{hgoal}{Goals scored by home team}
#' \item{vgoal}{Goals scored by visiting team}
#' \item{hconf}{Conference of home team}
#' \item{vconf}{Conference of visiting team}
#' \item{totgoal}{Total goals in game}
#' \item{round}{Regular Season or Playoff round}
#' \item{leg}{leg of Playoff game}
#' \item{hgoalaet}{Goals scored by home team after extra time}
#' \item{vgoalaet}{Goals scored by visiting team after extra time}
#' \item{hpen}{Penalties scored by home team in shootout}
#' \item{vpen}{Penalties scored by visiting team in shootout}
#' }
"mls"
|
#通过yahoo更新
rm(list=ls(all=T))
source('D:/Rcode/code/RSTOCK_TRAIL/collectdata/include.R',encoding='utf8')
update_data_from_yahoo = function(symbol,tablename)
{
data = readFromDataBase(tablename)
lastdate = max(as.Date(data$date))
begindate = lastdate + 1
data_yahoo= data.frame()
try((data_yahoo =adjustOHLC(getSymbols(symbol,auto.assign=F,from=begindate),use.Adjusted = T)),silent=T)
if(nrow(data_yahoo) > 0 )
{
colnames(data_yahoo) = c('Open','High','Low','Close','Volume','adjust')
if( data_yahoo$Volume > 0)
{
data_yahoo$Close = data_yahoo$adjust
data_yahoo = data_yahoo[,1:5]
dates = as.character(index(data_yahoo))
data_yahoo = as.data.frame(data_yahoo)
data_yahoo$date = dates
insertToDataBase(tablename,data_yahoo)
}
else
{
print(paste(symbol,lastdate))
}
}
else
{
print(paste(symbol,lastdate))
}
}
config = list(c(symbol='000001.SS',tablename='shindex',exp='上证')
# ,c(symbol='399001.SZ',file='399001.txt',exp='深圳')
,c(symbol='^IXIC',tablename='ixic',exp='纳斯达克')
,c(symbol='^DJI',tablename='dji',exp='道琼斯')
,c(symbol='^GSPC',tablename='gspc',exp = '标准普尔')
,c(symbol='^N225',tablename='n225',exp = '日经225')
,c(symbol='^TWII',tablename='TWII',exp = '台湾台北加权指数')
,c(symbol='^HSI',tablename='HSI',exp = '恒生指数')
,c(symbol='^FCHI',tablename='FCHI',exp = '法国CAC40指数')
,c(symbol='^GDAXI',tablename='DAX',exp = '德国法兰克福DAX指数')
)
for(i in 1:length(config))
{
l = config[[i]]
symbol = l['symbol']
tablename = l['tablename']
update_data_from_yahoo(symbol,tablename)
} | /collectdata/index/incrementdata.R | no_license | zhurui1351/RSTOCK_TRAIL | R | false | false | 1,789 | r | #通过yahoo更新
rm(list=ls(all=T))
source('D:/Rcode/code/RSTOCK_TRAIL/collectdata/include.R',encoding='utf8')
update_data_from_yahoo = function(symbol,tablename)
{
data = readFromDataBase(tablename)
lastdate = max(as.Date(data$date))
begindate = lastdate + 1
data_yahoo= data.frame()
try((data_yahoo =adjustOHLC(getSymbols(symbol,auto.assign=F,from=begindate),use.Adjusted = T)),silent=T)
if(nrow(data_yahoo) > 0 )
{
colnames(data_yahoo) = c('Open','High','Low','Close','Volume','adjust')
if( data_yahoo$Volume > 0)
{
data_yahoo$Close = data_yahoo$adjust
data_yahoo = data_yahoo[,1:5]
dates = as.character(index(data_yahoo))
data_yahoo = as.data.frame(data_yahoo)
data_yahoo$date = dates
insertToDataBase(tablename,data_yahoo)
}
else
{
print(paste(symbol,lastdate))
}
}
else
{
print(paste(symbol,lastdate))
}
}
config = list(c(symbol='000001.SS',tablename='shindex',exp='上证')
# ,c(symbol='399001.SZ',file='399001.txt',exp='深圳')
,c(symbol='^IXIC',tablename='ixic',exp='纳斯达克')
,c(symbol='^DJI',tablename='dji',exp='道琼斯')
,c(symbol='^GSPC',tablename='gspc',exp = '标准普尔')
,c(symbol='^N225',tablename='n225',exp = '日经225')
,c(symbol='^TWII',tablename='TWII',exp = '台湾台北加权指数')
,c(symbol='^HSI',tablename='HSI',exp = '恒生指数')
,c(symbol='^FCHI',tablename='FCHI',exp = '法国CAC40指数')
,c(symbol='^GDAXI',tablename='DAX',exp = '德国法兰克福DAX指数')
)
for(i in 1:length(config))
{
l = config[[i]]
symbol = l['symbol']
tablename = l['tablename']
update_data_from_yahoo(symbol,tablename)
} |
## Download packages data.table and reshape
library(data.table)
library(reshape2)
## Get directory; make sure it's the directory you want
getwd()
## Download zipfile
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./dataset.zip", method = "curl")
UCIdata <- unzip("./dataset.zip")
## check file names for upload
UCIdata
## Open files to check similarities; read UCIdata's readme and other files for descriptions
## Objective 1: Merges the training and the test sets to create one data set
### Within the process of merging, Objectives 2-4 will be done;
## (after training and test sets are merged, the right measurements will be extracted and will be labeled with variable names)
##y_train and y_test will need activity_labels.txt to assign descriptive activity labels.
# Objective 1: Merges the training and the test sets to create one data set
## Upload test data and train data
subjecttest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
xtest <- read.table("./UCI HAR Dataset/test/X_test.txt")
ytest <- read.table("./UCI HAR Dataset/test/y_test.txt")
subjecttrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
xtrain <- read.table( "./UCI HAR Dataset/train/X_train.txt")
ytrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
########################
## First, merge the measured data
measureddata <- rbind(xtrain,xtest)
### Then objective 2 can be done
# Objective 2: Extracts only the measurements on the mean and standard deviation for each measurement
## Download features data; this has measurement labels
measurements <- read.table( "./UCI HAR Dataset/features.txt")
## Change column names so its easier to merge later
setnames(measurements, "V1", "measurement")
setnames(measurements, "V2", "measurementlabel")
## Find the measurement labels with standard deviation and mean
findmeasurement<- grepl("-[Mm]ean\\(\\)|-[Ss]td\\(\\)", measurements$measurementlabel)
## Extract the columns in measured data that match the position in findmeasurement,
## because this is where measurementlabel is mean or standard devation
measureddata <- measureddata[,findmeasurement]
## Now to finish the extracing we need to get the actual measurement labels
## extracted from the measurements data that have mean or standard deviation calculations
submeasurements <- measurements$measurementlabel[findmeasurement]
as.character(submeasurements)
## Further cleaning of data....
## Need to remove non-letter characters in the measurements
submeasurements <- gsub("\\(|\\)", replacement = "", submeasurements)
submeasurements <- gsub("-", replacement = "", submeasurements)
## Now Objective 4 can be done
## Objective 4: Appropriately labels the data set with descriptive variable names.
## The names of columns in measureddata needs to be the measurement labels (variable labels)
## that are either mean or standard deviation
## Assign variable names to measureddata
names(measureddata) <- submeasurements
########################
## Next step is to use descriptive names to name the activites in the data set
## It is known that ytrain and ytest are the test labels, which correspond
## to the activity_tabels.txt, so they must be merged first before objective 3 can be done
#
## First, merge the test and train labels
datalabels <- rbind(ytrain, ytest)
## The column names of datalabels is still V1 and needs to change to "actvity"
## for easier merging with the activity labels
setnames(datalabels, "V1", "activitynumber")
## Then Objective 3 can be done.
## Objective 3: Uses descriptive activity names to name the activities in the data set.
## download activity labels data
activities <- read.table( "./UCI HAR Dataset/activity_labels.txt")
## Change column names so that it can be mered with datalabels
setnames(activities, "V1", "activitynumber")
setnames(activities, "V2", "activitylabels")
## Remove underscore in activity label data
activities$activitylabels <- sub("_", "", activities$activitylabels)
## Merge datalabels and activity so that desriptive labels can be shown
activity <- merge(datalabels, activities, by = "activitynumber")
## Remove activity number and only show activitylabels
activity <- activity$activitylabels
########################
## Subject numbers from train and test need to merge as well
subjects <- rbind(subjecttrain, subjecttest)
## rename subject column name
setnames(subjects, "V1", "subject")
##No further cleaning needs to be done.
########################
## Now merge the measured data, with the activity, and the subjects
tidydata <- cbind(subjects, activity, measureddata)
## tidydata needs to be saved in local directory
write.table(tidydata, "tidydata.txt")
########################
## Objective 5: From the data set in step 4, creates a second,
## independent tidy data set with the average of each variable
## for each activity and each subject.
## Make sure reshape package is loaded
## Melt tidydata, and use subject and activity as the id variables
## because they are the fixed column for the new data
tidymelt <- melt(tidydata, id.vars = c("subject", "activity"))
## Cast to add average of variables in new data
averagetidydata <- dcast(tidymelt, subject + activity + variable ~., mean)
## Change column name of "." to average
setnames(averagetidydata, ".", "average")
## Save averagetidydata in local directory
write.table(averagetidydata, "averagetidydata.txt")
| /run_analysis.R | no_license | kbelita/Getting-and-Cleaning-Data-Assmt | R | false | false | 5,505 | r | ## Download packages data.table and reshape
library(data.table)
library(reshape2)
## Get directory; make sure it's the directory you want
getwd()
## Download zipfile
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./dataset.zip", method = "curl")
UCIdata <- unzip("./dataset.zip")
## check file names for upload
UCIdata
## Open files to check similarities; read UCIdata's readme and other files for descriptions
## Objective 1: Merges the training and the test sets to create one data set
### Within the process of merging, Objectives 2-4 will be done;
## (after training and test sets are merged, the right measurements will be extracted and will be labeled with variable names)
##y_train and y_test will need activity_labels.txt to assign descriptive activity labels.
# Objective 1: Merges the training and the test sets to create one data set
## Upload test data and train data
subjecttest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
xtest <- read.table("./UCI HAR Dataset/test/X_test.txt")
ytest <- read.table("./UCI HAR Dataset/test/y_test.txt")
subjecttrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
xtrain <- read.table( "./UCI HAR Dataset/train/X_train.txt")
ytrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
########################
## First, merge the measured data
measureddata <- rbind(xtrain,xtest)
### Then objective 2 can be done
# Objective 2: Extracts only the measurements on the mean and standard deviation for each measurement
## Download features data; this has measurement labels
measurements <- read.table( "./UCI HAR Dataset/features.txt")
## Change column names so its easier to merge later
setnames(measurements, "V1", "measurement")
setnames(measurements, "V2", "measurementlabel")
## Find the measurement labels with standard deviation and mean
findmeasurement<- grepl("-[Mm]ean\\(\\)|-[Ss]td\\(\\)", measurements$measurementlabel)
## Extract the columns in measured data that match the position in findmeasurement,
## because this is where measurementlabel is mean or standard devation
measureddata <- measureddata[,findmeasurement]
## Now to finish the extracing we need to get the actual measurement labels
## extracted from the measurements data that have mean or standard deviation calculations
submeasurements <- measurements$measurementlabel[findmeasurement]
as.character(submeasurements)
## Further cleaning of data....
## Need to remove non-letter characters in the measurements
submeasurements <- gsub("\\(|\\)", replacement = "", submeasurements)
submeasurements <- gsub("-", replacement = "", submeasurements)
## Now Objective 4 can be done
## Objective 4: Appropriately labels the data set with descriptive variable names.
## The names of columns in measureddata needs to be the measurement labels (variable labels)
## that are either mean or standard deviation
## Assign variable names to measureddata
names(measureddata) <- submeasurements
########################
## Next step is to use descriptive names to name the activites in the data set
## It is known that ytrain and ytest are the test labels, which correspond
## to the activity_tabels.txt, so they must be merged first before objective 3 can be done
#
## First, merge the test and train labels
datalabels <- rbind(ytrain, ytest)
## The column names of datalabels is still V1 and needs to change to "actvity"
## for easier merging with the activity labels
setnames(datalabels, "V1", "activitynumber")
## Then Objective 3 can be done.
## Objective 3: Uses descriptive activity names to name the activities in the data set.
## download activity labels data
activities <- read.table( "./UCI HAR Dataset/activity_labels.txt")
## Change column names so that it can be mered with datalabels
setnames(activities, "V1", "activitynumber")
setnames(activities, "V2", "activitylabels")
## Remove underscore in activity label data
activities$activitylabels <- sub("_", "", activities$activitylabels)
## Merge datalabels and activity so that desriptive labels can be shown
activity <- merge(datalabels, activities, by = "activitynumber")
## Remove activity number and only show activitylabels
activity <- activity$activitylabels
########################
## Subject numbers from train and test need to merge as well
subjects <- rbind(subjecttrain, subjecttest)
## rename subject column name
setnames(subjects, "V1", "subject")
##No further cleaning needs to be done.
########################
## Now merge the measured data, with the activity, and the subjects
tidydata <- cbind(subjects, activity, measureddata)
## tidydata needs to be saved in local directory
write.table(tidydata, "tidydata.txt")
########################
## Objective 5: From the data set in step 4, creates a second,
## independent tidy data set with the average of each variable
## for each activity and each subject.
## Make sure reshape package is loaded
## Melt tidydata, and use subject and activity as the id variables
## because they are the fixed column for the new data
tidymelt <- melt(tidydata, id.vars = c("subject", "activity"))
## Cast to add average of variables in new data
averagetidydata <- dcast(tidymelt, subject + activity + variable ~., mean)
## Change column name of "." to average
setnames(averagetidydata, ".", "average")
## Save averagetidydata in local directory
write.table(averagetidydata, "averagetidydata.txt")
|
R commands and output:
## Sonoluminescense Light Intensity Case Study.
## Read and sort data.
fname = "inn.dat"
mo = matrix(scan(fname,skip=25),ncol=8,byrow=T)
m = mo[order(mo[,1]),]
y = m[,1]
x1 = m[,2]
x2 = m[,3]
x3 = m[,4]
x4 = m[,5]
x5 = m[,6]
x6 = m[,7]
x7 = m[,8]
## Attach memisc library for the recode function.
library(memisc)
## Generate re-coded factor variables for plotting.
r0 = "12345678"
r1 = recode(x1,"+" <- c(1),"-" <- c(-1))
r2 = recode(x2,"+" <- c(1),"-" <- c(-1))
r3 = recode(x3,"+" <- c(1),"-" <- c(-1))
r4 = recode(x4,"+" <- c(1),"-" <- c(-1))
r5 = recode(x5,"+" <- c(1),"-" <- c(-1))
r6 = recode(x6,"+" <- c(1),"-" <- c(-1))
r7 = recode(x7,"+" <- c(1),"-" <- c(-1))
id = paste(r1,r2,r3,r4,r5,r6,r7,sep="")
id = c(r0,id)
id12 = paste(r1,r2,sep="")
id13 = paste(r1,r3,sep="")
id14 = paste(r1,r4,sep="")
id15 = paste(r1,r5,sep="")
id16 = paste(r1,r6,sep="")
id17 = paste(r1,r7,sep="")
id23 = paste(r2,r3,sep="")
id24 = paste(r2,r4,sep="")
id25 = paste(r2,r5,sep="")
id26 = paste(r2,r6,sep="")
id27 = paste(r2,r7,sep="")
id34 = paste(r3,r4,sep="")
id35 = paste(r3,r5,sep="")
id36 = paste(r3,r6,sep="")
id37 = paste(r3,r7,sep="")
id45 = paste(r4,r5,sep="")
id46 = paste(r4,r6,sep="")
id47 = paste(r4,r7,sep="")
id56 = paste(r5,r6,sep="")
id57 = paste(r5,r7,sep="")
id67 = paste(r6,r7,sep="")
## Plot points in increasing order with labels indicating
## factor levels.
par(cex=1.25,las=3)
case = c(1:length(id))
plot(c(NA,m[,1]), xaxt = "n", col="blue", pch=19,
main="Ordered Sonoluminescence Light Intensity Data",
ylab="Light Intensity", xlab="")
axis(1, at=case, labels=id)
## Restructure data so that x1, x2, ... x7 are in a single column.
## Also, save re-coded version of the factor levels for the mean plot.
tempx = x1
tempxc = x1 + 1
dm1 = cbind(y,tempx,tempxc)
tempx = x2
tempxc = x2 + 4
dm2 = cbind(y,tempx,tempxc)
tempx = x3
tempxc = x3 + 7
dm3 = cbind(y,tempx,tempxc)
tempx = x4
tempxc = x4 + 10
dm4 = cbind(y,tempx,tempxc)
tempx = x5
tempxc = x5 + 13
dm5 = cbind(y,tempx,tempxc)
tempx = x6
tempxc = x6 + 16
dm6 = cbind(y,tempx,tempxc)
tempx = x7
tempxc = x7 + 19
dm7 = cbind(y,tempx,tempxc)
dm8 = rbind(dm1,dm2,dm3,dm4,dm5,dm6,dm7)
## Generate factor ID variable.
n = length(y)
varind = c(rep("Molarity",n),
rep("Solute Type",n),
rep("pH",n),
rep("Gas Type",n),
rep("Water Depth",n),
rep("Horn Depth",n),
rep("Flask Clamping",n))
varind = as.factor(varind)
## Comute grand mean.
ybar = mean(y)
## Create a dataframe with "stacked" factors and data.
df = data.frame(dm8,varind)
## Attach lattice library and generate the DEX scatter plot.
library(lattice)
xyplot(y~tempx|varind,data=df,layout=c(4,2),xlim=c(-2,2),
ylab="Light Intensity",xlab="Factor Levels",
main="Scatter Plot for Sonoluminescense Light Intensity",
panel=function(x,y, ...){
panel.xyplot(x,y, ...)
panel.abline(h=ybar) }
)
## Generate mean plot.
par(cex=1,las=3)
interaction.plot(df$tempxc,df$varind,df$y,fun=mean,
ylab="Average Light Intensity",xlab="",
main="DEX Mean Plot for Sonoluminescense Light Intensity",
trace.label="Factor",type="b",pch=19,
legend=FALSE,xaxt="n")
xpos = c(1.5,3.5,5.5,7.5,9.5,11.5,13.5)
xlabel = c("Molarity","Solute","pH","Gas Type","Water",
"Horn","Flask")
axis(side=1,at=xpos,labels=xlabel)
abline(h=ybar)
## Create dataframe with interaction factors.
x12 = x1*x2
x13 = x1*x3
x14 = x1*x4
x15 = x1*x5
x16 = x1*x6
x17 = x1*x7
x23 = x2*x3
x24 = x2*x4
x25 = x2*x5
x26 = x2*x6
x27 = x2*x7
x34 = x3*x4
x35 = x3*x5
x36 = x3*x6
x37 = x3*x7
x45 = x4*x5
x46 = x4*x6
x47 = x4*x7
x56 = x5*x6
x57 = x5*x7
x67 = x6*x7
x124 = x1*x2*x4
fx1 = factor(x1)
fx2 = factor(x2)
fx3 = factor(x3)
fx4 = factor(x4)
fx5 = factor(x5)
fx6 = factor(x6)
fx7 = factor(x7)
fx12 = factor(x12)
fx13 = factor(x13)
fx14 = factor(x14)
fx15 = factor(x15)
fx16 = factor(x16)
fx17 = factor(x17)
fx23 = factor(x23)
fx24 = factor(x24)
fx25 = factor(x25)
fx26 = factor(x26)
fx27 = factor(x27)
fx34 = factor(x34)
fx35 = factor(x35)
fx36 = factor(x36)
fx37 = factor(x37)
fx45 = factor(x45)
fx46 = factor(x46)
fx47 = factor(x47)
fx56 = factor(x56)
fx57 = factor(x57)
fx67 = factor(x67)
fx124 = factor(x124)
dfip = data.frame(y,fx1,fx2,fx3,fx4,fx5,fx6,fx7,
fx12,fx13,fx14,fx15,fx16,fx17,
fx23,fx24,fx25,fx26,fx27,
fx34,fx35,fx36,fx37,
fx45,fx46,fx47,
fx56,fx57,
fx67,fx124)
## Compute effect estimates and factor means.
fmeans = function(x,fac){
q = aggregate(x=x,by=list(fac),FUN="mean")
lo = q[1,2]
hi = q[2,2]
e = hi-lo
ret = c(lo,hi,e)
}
e1 = fmeans(dfip$y,dfip$fx1)
e2 = fmeans(dfip$y,dfip$fx2)
e3 = fmeans(dfip$y,dfip$fx3)
e4 = fmeans(dfip$y,dfip$fx4)
e5 = fmeans(dfip$y,dfip$fx5)
e6 = fmeans(dfip$y,dfip$fx6)
e7 = fmeans(dfip$y,dfip$fx7)
e12 = fmeans(dfip$y,dfip$fx12)
e13 = fmeans(dfip$y,dfip$fx13)
e14 = fmeans(dfip$y,dfip$fx14)
e15 = fmeans(dfip$y,dfip$fx15)
e16 = fmeans(dfip$y,dfip$fx16)
e17 = fmeans(dfip$y,dfip$fx17)
e23 = fmeans(dfip$y,dfip$fx23)
e24 = fmeans(dfip$y,dfip$fx24)
e25 = fmeans(dfip$y,dfip$fx25)
e26 = fmeans(dfip$y,dfip$fx26)
e27 = fmeans(dfip$y,dfip$fx27)
e34 = fmeans(dfip$y,dfip$fx34)
e35 = fmeans(dfip$y,dfip$fx35)
e36 = fmeans(dfip$y,dfip$fx36)
e37 = fmeans(dfip$y,dfip$fx37)
e45 = fmeans(dfip$y,dfip$fx45)
e46 = fmeans(dfip$y,dfip$fx46)
e47 = fmeans(dfip$y,dfip$fx47)
e56 = fmeans(dfip$y,dfip$fx56)
e57 = fmeans(dfip$y,dfip$fx57)
e67 = fmeans(dfip$y,dfip$fx67)
# Create factor labels from effect values.
e = round(rbind(e7,e6,e67,e5,e56,e57,e4,e45,e46,e47,
e3,e34,e35,e36,e37,e2,e23,e24,e25,e26,e27,
e1,e12,e13,e14,e15,e16,e17),1)
textlabs = c("X7 =",
"X6 =", "X67 =",
"X5 =", "X56 =", "X57 =",
"X4 =", "X45 =", "X46 =", "X47 =",
"X3 =", "X34 =", "X35 =", "X36 =", "X37 =",
"X2 =", "X23 =", "X24 =", "X25 =", "X26 =", "X27 =",
"X1 =", "X12 =", "X13 =", "X14 =", "X15 =", "X16 =", "X17 =")
labs = paste(textlabs,e[,3])
group = factor(c(1:28),labels=labs)
# Create data frame with factor level means.
x = e[,1]
xlev = rep(-1,28)
xlo = cbind(x,xlev,group)
x = e[,2]
xlev = rep(1,28)
xhi = cbind(x,xlev,group)
mm = rbind(xlo,xhi)
mm = as.data.frame(mm)
# Customize Lattice plot layout and color.
sp = c(T,T,T,T,T,T,F,
T,T,T,T,T,F,F,
T,T,T,T,F,F,F,
T,T,T,F,F,F,F,
T,T,F,F,F,F,F,
T,F,F,F,F,F,F,
F,F,F,F,F,F,F)
strip.bg_custom = trellis.par.get("strip.background")
strip.bg_custom$col =c("#cce6ff","#ffe5cc","#ccffcc","#ccffff","#ffccff",
"#ffcccc","#ffffcc")
strip.sh_custom = strip.bg_custom
trellis.par.set("strip.background", strip.bg_custom)
trellis.par.set("strip.shingle", strip.sh_custom)
trellis.par.set(list(fontsize=list(text=10)))
# Generate plot.
xyplot(x~xlev | group, data=mm, type="b", xlim=c(-2,2),
layout=c(7,7), skip=sp, col=c(4),
strip = function(..., style,factor.levels,strip.levels,strip.names)
strip.default(..., style = 1,factor.levels=labs,
strip.levels=c(F,T),strip.names=c(T,F)),
xlab="Factor Level", ylab="Light Intensity",
main="DEX Mean Plot for Sonoluminescense Light Intensity",
panel = function(x, y, ...){
panel.xyplot(x, y, ...)
panel.abline(h = ybar, lty = 2, col = 2)
}
)
## Create dataframe with factors.
fid1 = factor(id23)
fid2 = factor(id13)
fid3 = factor(id12)
df2 = data.frame(y,fx1,fx2,fx3,fx4,fx5,fx6,fx7,
fid1,fid2,fid3)
## Generate seven plots on one page.
par(mfrow=c(3,3),las=0)
## Generate level means.
ag = aggregate(x=df2$y,by=list(df2$fx1,df2$fx2,df2$fx3),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.1,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag3 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
ag13 = paste(ag1,ag3,sep="")
ag23 = paste(ag2,ag3,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag$Group.2,ag$Group.3,ag12,ag13,ag23)
## Generate the block plot for factor 1.
boxplot(dfag$ag.x ~ dfag$ag23, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X2,X3",
main="Primary Factor X1", cex.main=1)
## Add points for the effects.
points(dfag$ag23[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag23[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(3.25,350,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X1 Level", cex=.7, horiz=TRUE)
## Generate the block plot for factor 2.
boxplot(dfag$ag.x ~ dfag$ag13, medlty="blank", boxwex=.5,
ylab="Sensitivity",xlab="Factor Levels of X1 X3",
main="Primary Factor X2", cex.main=1)
## Add points for the effect means.
points(dfag$ag13[dfag$ag.Group.2==1],dfag$ag.x[dfag$ag.Group.2==1],
pch=19,col="blue")
points(dfag$ag13[dfag$ag.Group.2==-1],dfag$ag.x[dfag$ag.Group.2==-1],
col="blue")
## Add legend.
legend(.5,350,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X2 Level", cex=.7, horiz=TRUE)
## Generate the block plot for factor 3.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Sensitivity",xlab="Factor Levels of X1 X2",
main="Primary Factor X3", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.3==1],dfag$ag.x[dfag$ag.Group.3==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.3==-1],dfag$ag.x[dfag$ag.Group.3==-1],
col="blue")
## Add legend.
legend(0.5,350,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X3 Level", cex=.7, horiz=TRUE)
## Generate level means for factor 4.
ag = aggregate(x=df2$y,by=list(df2$fx4,df2$fx1,df2$fx2),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag12)
## Generate the block plot for factor 4.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X1,X2",
main="Primary Factor X4", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(.5,220,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X4 Level", cex=.7, horiz=TRUE)
## Generate level means for factor 5.
ag = aggregate(x=df2$y,by=list(df2$fx5,df2$fx1,df2$fx2),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag12)
## Generate the block plot for factor 5.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X1,X2",
main="Primary Factor X5", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(.5,225,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X5 Level", cex=.7, horiz=TRUE)
## Generate level means for factor 6.
ag = aggregate(x=df2$y,by=list(df2$fx6,df2$fx1,df2$fx2),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag12)
## Generate the block plot for factor 6.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X1,X2",
main="Primary Factor X6", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(.5,225,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X6 Level", cex=.7, horiz=TRUE)
## Generate level means for factor 7.
ag = aggregate(x=df2$y,by=list(df2$fx7,df2$fx1,df2$fx2),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag12)
## Generate the block plot for factor 7.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X1,X2",
main="Primary Factor X7", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(.5,350,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X7 Level", cex=.7, horiz=TRUE)
par(mfrow=c(1,1))
## Generate Youden plot.
## Generate averages for each factor and level.
q1 = aggregate(x=dfip$y,by=list(dfip$fx1),FUN="mean")
qt1 = t(q1$x)
q2 = aggregate(x=dfip$y,by=list(dfip$fx2),FUN="mean")
qt2 = t(q2$x)
q3 = aggregate(x=dfip$y,by=list(dfip$fx3),FUN="mean")
qt3 = t(q3$x)
q4 = aggregate(x=dfip$y,by=list(dfip$fx4),FUN="mean")
qt4 = t(q4$x)
q5 = aggregate(x=dfip$y,by=list(dfip$fx5),FUN="mean")
qt5 = t(q5$x)
q6 = aggregate(x=dfip$y,by=list(dfip$fx6),FUN="mean")
qt6 = t(q6$x)
q7 = aggregate(x=dfip$y,by=list(dfip$fx7),FUN="mean")
qt7 = t(q7$x)
q12 = aggregate(x=dfip$y,by=list(dfip$fx12),FUN="mean")
qt12 = t(q12$x)
q13 = aggregate(x=dfip$y,by=list(dfip$fx13),FUN="mean")
qt13 = t(q13$x)
q14 = aggregate(x=dfip$y,by=list(dfip$fx14),FUN="mean")
qt14 = t(q14$x)
q15 = aggregate(x=dfip$y,by=list(dfip$fx15),FUN="mean")
qt15 = t(q15$x)
q16 = aggregate(x=dfip$y,by=list(dfip$fx16),FUN="mean")
qt16 = t(q16$x)
q17 = aggregate(x=dfip$y,by=list(dfip$fx17),FUN="mean")
qt17 = t(q17$x)
q24 = aggregate(x=dfip$y,by=list(dfip$fx24),FUN="mean")
qt24 = t(q24$x)
q124 = aggregate(x=dfip$y,by=list(dfip$fx124),FUN="mean")
qt124 = t(q124$x)
yp = rbind(qt1,qt2,qt3,qt4,qt5,qt6,qt7,
qt12,qt13,qt14,qt15,qt16,qt17,qt24,qt124)
## Generate names for effect estimates.
z = lm(y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 +
x12 + x13 + x14 + x15 + x16 + x17 + x24 + x124)
zz = summary(z)
effects = coef(z)[-1]*2
## Generate Youden plot.
plot(yp[,1],yp[,2], xlim=c(70,155), ylim=c(70,155),
xlab="Average Response for -1 Settings",
ylab="Average Response for +1 Settings",
main="Youden Plot for Sonoluminescense Data")
text(yp[,1],yp[,2],labels=names(effects),pos=4,cex=.75)
abline(h=ybar)
abline(v=ybar)
## Save effects in decreasing order.
torder = zz$coefficients[order(abs(zz$coefficients[,1]),decreasing=TRUE),]
torder[,1]
> (Intercept) x2 x7 x13 x1 x3
> 110.60625 -39.30625 -39.05625 35.00625 33.10625 31.90625
> x17 x12 x16 x14 x6 x5
> -31.73125 -29.78125 -8.16875 -5.24375 -4.51875 3.74375
> x124 x4 x24 x15
> 2.91875 1.85625 0.84375 -0.28125
yvar = torder[-1,1]*2
lvar16 = rownames(torder)
lvar = lvar16[-1]
xvar = c(1:length(lvar))
## Plot absolute values of effects in decreasing order.
plot(xvar,abs(yvar), xlim=c(1,16),
main = "Sonoluminescent Light Intensity",
ylab="|Effect|", xlab="", xaxt="n")
text(xvar,abs(yvar), labels=lvar, pos=4, cex=.8)
## Generate half-normal probability plot of effect estimates.
library(faraway)
halfnorm(effects,nlab=length(effects), cex=.8,
labs=names(effects),
ylab="Ordered |Effects|",
main="Half-Normal Probability Plot of Sonoluminescent Data")
## Compute the residual standard deviation for cumulative
## models (mean plus cumulative terms).
z = lm(y ~ 1)
ese = summary(z)$sigma
z = update(z, . ~ . + x2)
se1 = summary(z)$sigma
z = update(z, . ~ . + x7)
se2 = summary(z)$sigma
z = update(z, . ~ . + x13)
se3 = summary(z)$sigma
z = update(z, . ~ . + x1)
se4 = summary(z)$sigma
z = update(z, . ~ . + x3)
se5 = summary(z)$sigma
z = update(z, . ~ . + x17)
se6 = summary(z)$sigma
z = update(z, . ~ . + x12)
se7 = summary(z)$sigma
z = update(z, . ~ . + x16)
se8 = summary(z)$sigma
z = update(z, . ~ . + x14)
se9 = summary(z)$sigma
z = update(z, . ~ . + x6)
se10 = summary(z)$sigma
z = update(z, . ~ . + x5)
se11 = summary(z)$sigma
z = update(z, . ~ . + x124)
se12 = summary(z)$sigma
z = update(z, . ~ . + x4)
se13 = summary(z)$sigma
z = update(z, . ~ . + x24)
se14 = summary(z)$sigma
z = update(z, . ~ . + x15)
se15 = summary(z)$sigma
Eff.SE = rbind(ese,se1,se2,se3,se4,se5,se6,se7,se8,se9,
se10,se11,se12,se13,se14,se15)
## Plot residual standard deviation for cummulative models.
plot(Eff.SE, main = "Sonoluminescent Light Intensity",
ylab="Cummulative Residual Standard Deviation", xlab="Additional Term",
xaxt="n")
text(c(1:length(Eff.SE)) ,Eff.SE, labels=lvar16, pos=4, cex=.8)
## Generate level means for plotting.
q = aggregate(x=dfip$y,by=list(dfip$fx2,dfip$fx7),FUN="mean")
qv1 = as.vector(q$Group.1,mode="numeric")-1
qv2 = as.vector(q$Group.2,mode="numeric")-1
qv1[qv1==0] = -1
qv2[qv2==0] = -1
## Contour plot y(x7),x(x2)
## Generate x and y data for plotting.
xord = seq(-2,2,by=.1)
yord = seq(-2,2,by=.1)
## Fit model with two factors, x2 and x7, and their interaction
## for predicting the surface.
z = lm(y ~ 1 + x2 + x7 + x27)
## Generate predicted response surface and generate matrix of surface.
model = function (a, b){
z$coefficients[1] +
z$coefficients[2]*a +
z$coefficients[3]*b +
z$coefficients[4]*a*b}
pmatu = outer(xord,yord,model)
## Generate contour plot, add design points and labels.
contour(xord, yord, pmatu, nlevels=15, main="Contour Plot",
xlab="x2", ylab="x7", col="blue")
points(qv1,qv2,pch=19)
text(c(qv1[1],qv1[3]),c(qv2[1],qv2[3]),labels=c(q$x[1],q$x[3]),pos=2)
text(c(qv1[2],qv1[4]),c(qv2[2],qv2[4]),labels=c(q$x[2],q$x[4]),pos=4)
lines(c(-1,1,1,-1,-1),c(-1,-1,1,1,-1))
| /Dataplot/doc/handbook/Rcode/R_Codes_and_Data/pri621.r | no_license | BRICOMATA/Bricomata_ | R | false | false | 19,086 | r | R commands and output:
## Sonoluminescense Light Intensity Case Study.
## Read and sort data.
fname = "inn.dat"
mo = matrix(scan(fname,skip=25),ncol=8,byrow=T)
m = mo[order(mo[,1]),]
y = m[,1]
x1 = m[,2]
x2 = m[,3]
x3 = m[,4]
x4 = m[,5]
x5 = m[,6]
x6 = m[,7]
x7 = m[,8]
## Attach memisc library for the recode function.
library(memisc)
## Generate re-coded factor variables for plotting.
r0 = "12345678"
r1 = recode(x1,"+" <- c(1),"-" <- c(-1))
r2 = recode(x2,"+" <- c(1),"-" <- c(-1))
r3 = recode(x3,"+" <- c(1),"-" <- c(-1))
r4 = recode(x4,"+" <- c(1),"-" <- c(-1))
r5 = recode(x5,"+" <- c(1),"-" <- c(-1))
r6 = recode(x6,"+" <- c(1),"-" <- c(-1))
r7 = recode(x7,"+" <- c(1),"-" <- c(-1))
id = paste(r1,r2,r3,r4,r5,r6,r7,sep="")
id = c(r0,id)
id12 = paste(r1,r2,sep="")
id13 = paste(r1,r3,sep="")
id14 = paste(r1,r4,sep="")
id15 = paste(r1,r5,sep="")
id16 = paste(r1,r6,sep="")
id17 = paste(r1,r7,sep="")
id23 = paste(r2,r3,sep="")
id24 = paste(r2,r4,sep="")
id25 = paste(r2,r5,sep="")
id26 = paste(r2,r6,sep="")
id27 = paste(r2,r7,sep="")
id34 = paste(r3,r4,sep="")
id35 = paste(r3,r5,sep="")
id36 = paste(r3,r6,sep="")
id37 = paste(r3,r7,sep="")
id45 = paste(r4,r5,sep="")
id46 = paste(r4,r6,sep="")
id47 = paste(r4,r7,sep="")
id56 = paste(r5,r6,sep="")
id57 = paste(r5,r7,sep="")
id67 = paste(r6,r7,sep="")
## Plot points in increasing order with labels indicating
## factor levels.
par(cex=1.25,las=3)
case = c(1:length(id))
plot(c(NA,m[,1]), xaxt = "n", col="blue", pch=19,
main="Ordered Sonoluminescence Light Intensity Data",
ylab="Light Intensity", xlab="")
axis(1, at=case, labels=id)
## Restructure data so that x1, x2, ... x7 are in a single column.
## Also, save re-coded version of the factor levels for the mean plot.
tempx = x1
tempxc = x1 + 1
dm1 = cbind(y,tempx,tempxc)
tempx = x2
tempxc = x2 + 4
dm2 = cbind(y,tempx,tempxc)
tempx = x3
tempxc = x3 + 7
dm3 = cbind(y,tempx,tempxc)
tempx = x4
tempxc = x4 + 10
dm4 = cbind(y,tempx,tempxc)
tempx = x5
tempxc = x5 + 13
dm5 = cbind(y,tempx,tempxc)
tempx = x6
tempxc = x6 + 16
dm6 = cbind(y,tempx,tempxc)
tempx = x7
tempxc = x7 + 19
dm7 = cbind(y,tempx,tempxc)
dm8 = rbind(dm1,dm2,dm3,dm4,dm5,dm6,dm7)
## Generate factor ID variable.
n = length(y)
varind = c(rep("Molarity",n),
rep("Solute Type",n),
rep("pH",n),
rep("Gas Type",n),
rep("Water Depth",n),
rep("Horn Depth",n),
rep("Flask Clamping",n))
varind = as.factor(varind)
## Comute grand mean.
ybar = mean(y)
## Create a dataframe with "stacked" factors and data.
df = data.frame(dm8,varind)
## Attach lattice library and generate the DEX scatter plot.
library(lattice)
xyplot(y~tempx|varind,data=df,layout=c(4,2),xlim=c(-2,2),
ylab="Light Intensity",xlab="Factor Levels",
main="Scatter Plot for Sonoluminescense Light Intensity",
panel=function(x,y, ...){
panel.xyplot(x,y, ...)
panel.abline(h=ybar) }
)
## Generate mean plot.
par(cex=1,las=3)
interaction.plot(df$tempxc,df$varind,df$y,fun=mean,
ylab="Average Light Intensity",xlab="",
main="DEX Mean Plot for Sonoluminescense Light Intensity",
trace.label="Factor",type="b",pch=19,
legend=FALSE,xaxt="n")
xpos = c(1.5,3.5,5.5,7.5,9.5,11.5,13.5)
xlabel = c("Molarity","Solute","pH","Gas Type","Water",
"Horn","Flask")
axis(side=1,at=xpos,labels=xlabel)
abline(h=ybar)
## Create dataframe with interaction factors.
x12 = x1*x2
x13 = x1*x3
x14 = x1*x4
x15 = x1*x5
x16 = x1*x6
x17 = x1*x7
x23 = x2*x3
x24 = x2*x4
x25 = x2*x5
x26 = x2*x6
x27 = x2*x7
x34 = x3*x4
x35 = x3*x5
x36 = x3*x6
x37 = x3*x7
x45 = x4*x5
x46 = x4*x6
x47 = x4*x7
x56 = x5*x6
x57 = x5*x7
x67 = x6*x7
x124 = x1*x2*x4
fx1 = factor(x1)
fx2 = factor(x2)
fx3 = factor(x3)
fx4 = factor(x4)
fx5 = factor(x5)
fx6 = factor(x6)
fx7 = factor(x7)
fx12 = factor(x12)
fx13 = factor(x13)
fx14 = factor(x14)
fx15 = factor(x15)
fx16 = factor(x16)
fx17 = factor(x17)
fx23 = factor(x23)
fx24 = factor(x24)
fx25 = factor(x25)
fx26 = factor(x26)
fx27 = factor(x27)
fx34 = factor(x34)
fx35 = factor(x35)
fx36 = factor(x36)
fx37 = factor(x37)
fx45 = factor(x45)
fx46 = factor(x46)
fx47 = factor(x47)
fx56 = factor(x56)
fx57 = factor(x57)
fx67 = factor(x67)
fx124 = factor(x124)
dfip = data.frame(y,fx1,fx2,fx3,fx4,fx5,fx6,fx7,
fx12,fx13,fx14,fx15,fx16,fx17,
fx23,fx24,fx25,fx26,fx27,
fx34,fx35,fx36,fx37,
fx45,fx46,fx47,
fx56,fx57,
fx67,fx124)
## Compute effect estimates and factor means.
fmeans = function(x,fac){
q = aggregate(x=x,by=list(fac),FUN="mean")
lo = q[1,2]
hi = q[2,2]
e = hi-lo
ret = c(lo,hi,e)
}
e1 = fmeans(dfip$y,dfip$fx1)
e2 = fmeans(dfip$y,dfip$fx2)
e3 = fmeans(dfip$y,dfip$fx3)
e4 = fmeans(dfip$y,dfip$fx4)
e5 = fmeans(dfip$y,dfip$fx5)
e6 = fmeans(dfip$y,dfip$fx6)
e7 = fmeans(dfip$y,dfip$fx7)
e12 = fmeans(dfip$y,dfip$fx12)
e13 = fmeans(dfip$y,dfip$fx13)
e14 = fmeans(dfip$y,dfip$fx14)
e15 = fmeans(dfip$y,dfip$fx15)
e16 = fmeans(dfip$y,dfip$fx16)
e17 = fmeans(dfip$y,dfip$fx17)
e23 = fmeans(dfip$y,dfip$fx23)
e24 = fmeans(dfip$y,dfip$fx24)
e25 = fmeans(dfip$y,dfip$fx25)
e26 = fmeans(dfip$y,dfip$fx26)
e27 = fmeans(dfip$y,dfip$fx27)
e34 = fmeans(dfip$y,dfip$fx34)
e35 = fmeans(dfip$y,dfip$fx35)
e36 = fmeans(dfip$y,dfip$fx36)
e37 = fmeans(dfip$y,dfip$fx37)
e45 = fmeans(dfip$y,dfip$fx45)
e46 = fmeans(dfip$y,dfip$fx46)
e47 = fmeans(dfip$y,dfip$fx47)
e56 = fmeans(dfip$y,dfip$fx56)
e57 = fmeans(dfip$y,dfip$fx57)
e67 = fmeans(dfip$y,dfip$fx67)
# Create factor labels from effect values.
e = round(rbind(e7,e6,e67,e5,e56,e57,e4,e45,e46,e47,
e3,e34,e35,e36,e37,e2,e23,e24,e25,e26,e27,
e1,e12,e13,e14,e15,e16,e17),1)
textlabs = c("X7 =",
"X6 =", "X67 =",
"X5 =", "X56 =", "X57 =",
"X4 =", "X45 =", "X46 =", "X47 =",
"X3 =", "X34 =", "X35 =", "X36 =", "X37 =",
"X2 =", "X23 =", "X24 =", "X25 =", "X26 =", "X27 =",
"X1 =", "X12 =", "X13 =", "X14 =", "X15 =", "X16 =", "X17 =")
labs = paste(textlabs,e[,3])
group = factor(c(1:28),labels=labs)
# Create data frame with factor level means.
x = e[,1]
xlev = rep(-1,28)
xlo = cbind(x,xlev,group)
x = e[,2]
xlev = rep(1,28)
xhi = cbind(x,xlev,group)
mm = rbind(xlo,xhi)
mm = as.data.frame(mm)
# Customize Lattice plot layout and color.
sp = c(T,T,T,T,T,T,F,
T,T,T,T,T,F,F,
T,T,T,T,F,F,F,
T,T,T,F,F,F,F,
T,T,F,F,F,F,F,
T,F,F,F,F,F,F,
F,F,F,F,F,F,F)
strip.bg_custom = trellis.par.get("strip.background")
strip.bg_custom$col =c("#cce6ff","#ffe5cc","#ccffcc","#ccffff","#ffccff",
"#ffcccc","#ffffcc")
strip.sh_custom = strip.bg_custom
trellis.par.set("strip.background", strip.bg_custom)
trellis.par.set("strip.shingle", strip.sh_custom)
trellis.par.set(list(fontsize=list(text=10)))
# Generate plot.
xyplot(x~xlev | group, data=mm, type="b", xlim=c(-2,2),
layout=c(7,7), skip=sp, col=c(4),
strip = function(..., style,factor.levels,strip.levels,strip.names)
strip.default(..., style = 1,factor.levels=labs,
strip.levels=c(F,T),strip.names=c(T,F)),
xlab="Factor Level", ylab="Light Intensity",
main="DEX Mean Plot for Sonoluminescense Light Intensity",
panel = function(x, y, ...){
panel.xyplot(x, y, ...)
panel.abline(h = ybar, lty = 2, col = 2)
}
)
## Create dataframe with factors.
fid1 = factor(id23)
fid2 = factor(id13)
fid3 = factor(id12)
df2 = data.frame(y,fx1,fx2,fx3,fx4,fx5,fx6,fx7,
fid1,fid2,fid3)
## Generate seven plots on one page.
par(mfrow=c(3,3),las=0)
## Generate level means.
ag = aggregate(x=df2$y,by=list(df2$fx1,df2$fx2,df2$fx3),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.1,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag3 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
ag13 = paste(ag1,ag3,sep="")
ag23 = paste(ag2,ag3,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag$Group.2,ag$Group.3,ag12,ag13,ag23)
## Generate the block plot for factor 1.
boxplot(dfag$ag.x ~ dfag$ag23, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X2,X3",
main="Primary Factor X1", cex.main=1)
## Add points for the effects.
points(dfag$ag23[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag23[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(3.25,350,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X1 Level", cex=.7, horiz=TRUE)
## Generate the block plot for factor 2.
boxplot(dfag$ag.x ~ dfag$ag13, medlty="blank", boxwex=.5,
ylab="Sensitivity",xlab="Factor Levels of X1 X3",
main="Primary Factor X2", cex.main=1)
## Add points for the effect means.
points(dfag$ag13[dfag$ag.Group.2==1],dfag$ag.x[dfag$ag.Group.2==1],
pch=19,col="blue")
points(dfag$ag13[dfag$ag.Group.2==-1],dfag$ag.x[dfag$ag.Group.2==-1],
col="blue")
## Add legend.
legend(.5,350,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X2 Level", cex=.7, horiz=TRUE)
## Generate the block plot for factor 3.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Sensitivity",xlab="Factor Levels of X1 X2",
main="Primary Factor X3", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.3==1],dfag$ag.x[dfag$ag.Group.3==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.3==-1],dfag$ag.x[dfag$ag.Group.3==-1],
col="blue")
## Add legend.
legend(0.5,350,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X3 Level", cex=.7, horiz=TRUE)
## Generate level means for factor 4.
ag = aggregate(x=df2$y,by=list(df2$fx4,df2$fx1,df2$fx2),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag12)
## Generate the block plot for factor 4.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X1,X2",
main="Primary Factor X4", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(.5,220,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X4 Level", cex=.7, horiz=TRUE)
## Generate level means for factor 5.
ag = aggregate(x=df2$y,by=list(df2$fx5,df2$fx1,df2$fx2),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag12)
## Generate the block plot for factor 5.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X1,X2",
main="Primary Factor X5", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(.5,225,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X5 Level", cex=.7, horiz=TRUE)
## Generate level means for factor 6.
ag = aggregate(x=df2$y,by=list(df2$fx6,df2$fx1,df2$fx2),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag12)
## Generate the block plot for factor 6.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X1,X2",
main="Primary Factor X6", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(.5,225,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X6 Level", cex=.7, horiz=TRUE)
## Generate level means for factor 7.
ag = aggregate(x=df2$y,by=list(df2$fx7,df2$fx1,df2$fx2),FUN="mean")
## Recode variables for plotting.
ag1 = recode(ag$Group.2,"+" <- c(1),"-" <- c(-1))
ag2 = recode(ag$Group.3,"+" <- c(1),"-" <- c(-1))
ag12 = paste(ag1,ag2,sep="")
dfag = data.frame(ag$x,ag$Group.1,ag12)
## Generate the block plot for factor 7.
boxplot(dfag$ag.x ~ dfag$ag12, medlty="blank", boxwex=.5,
ylab="Light Intensity",xlab="Factor Levels of X1,X2",
main="Primary Factor X7", cex.main=1)
## Add points for the effects.
points(dfag$ag12[dfag$ag.Group.1==1],dfag$ag.x[dfag$ag.Group.1==1],
pch=19,col="blue")
points(dfag$ag12[dfag$ag.Group.1==-1],dfag$ag.x[dfag$ag.Group.1==-1],
col="blue")
## Add legend.
legend(.5,350,c("+","-"),pch=c(19,1), col="blue", bty="o",
x.intersp=.75, title="X7 Level", cex=.7, horiz=TRUE)
par(mfrow=c(1,1))
## Generate Youden plot.
## Generate averages for each factor and level.
q1 = aggregate(x=dfip$y,by=list(dfip$fx1),FUN="mean")
qt1 = t(q1$x)
q2 = aggregate(x=dfip$y,by=list(dfip$fx2),FUN="mean")
qt2 = t(q2$x)
q3 = aggregate(x=dfip$y,by=list(dfip$fx3),FUN="mean")
qt3 = t(q3$x)
q4 = aggregate(x=dfip$y,by=list(dfip$fx4),FUN="mean")
qt4 = t(q4$x)
q5 = aggregate(x=dfip$y,by=list(dfip$fx5),FUN="mean")
qt5 = t(q5$x)
q6 = aggregate(x=dfip$y,by=list(dfip$fx6),FUN="mean")
qt6 = t(q6$x)
q7 = aggregate(x=dfip$y,by=list(dfip$fx7),FUN="mean")
qt7 = t(q7$x)
q12 = aggregate(x=dfip$y,by=list(dfip$fx12),FUN="mean")
qt12 = t(q12$x)
q13 = aggregate(x=dfip$y,by=list(dfip$fx13),FUN="mean")
qt13 = t(q13$x)
q14 = aggregate(x=dfip$y,by=list(dfip$fx14),FUN="mean")
qt14 = t(q14$x)
q15 = aggregate(x=dfip$y,by=list(dfip$fx15),FUN="mean")
qt15 = t(q15$x)
q16 = aggregate(x=dfip$y,by=list(dfip$fx16),FUN="mean")
qt16 = t(q16$x)
q17 = aggregate(x=dfip$y,by=list(dfip$fx17),FUN="mean")
qt17 = t(q17$x)
q24 = aggregate(x=dfip$y,by=list(dfip$fx24),FUN="mean")
qt24 = t(q24$x)
q124 = aggregate(x=dfip$y,by=list(dfip$fx124),FUN="mean")
qt124 = t(q124$x)
yp = rbind(qt1,qt2,qt3,qt4,qt5,qt6,qt7,
qt12,qt13,qt14,qt15,qt16,qt17,qt24,qt124)
## Generate names for effect estimates.
z = lm(y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 +
x12 + x13 + x14 + x15 + x16 + x17 + x24 + x124)
zz = summary(z)
effects = coef(z)[-1]*2
## Generate Youden plot.
plot(yp[,1],yp[,2], xlim=c(70,155), ylim=c(70,155),
xlab="Average Response for -1 Settings",
ylab="Average Response for +1 Settings",
main="Youden Plot for Sonoluminescense Data")
text(yp[,1],yp[,2],labels=names(effects),pos=4,cex=.75)
abline(h=ybar)
abline(v=ybar)
## Save effects in decreasing order.
torder = zz$coefficients[order(abs(zz$coefficients[,1]),decreasing=TRUE),]
torder[,1]
> (Intercept) x2 x7 x13 x1 x3
> 110.60625 -39.30625 -39.05625 35.00625 33.10625 31.90625
> x17 x12 x16 x14 x6 x5
> -31.73125 -29.78125 -8.16875 -5.24375 -4.51875 3.74375
> x124 x4 x24 x15
> 2.91875 1.85625 0.84375 -0.28125
yvar = torder[-1,1]*2
lvar16 = rownames(torder)
lvar = lvar16[-1]
xvar = c(1:length(lvar))
## Plot absolute values of effects in decreasing order.
plot(xvar,abs(yvar), xlim=c(1,16),
main = "Sonoluminescent Light Intensity",
ylab="|Effect|", xlab="", xaxt="n")
text(xvar,abs(yvar), labels=lvar, pos=4, cex=.8)
## Generate half-normal probability plot of effect estimates.
library(faraway)
halfnorm(effects,nlab=length(effects), cex=.8,
labs=names(effects),
ylab="Ordered |Effects|",
main="Half-Normal Probability Plot of Sonoluminescent Data")
## Compute the residual standard deviation for cumulative
## models (mean plus cumulative terms).
z = lm(y ~ 1)
ese = summary(z)$sigma
z = update(z, . ~ . + x2)
se1 = summary(z)$sigma
z = update(z, . ~ . + x7)
se2 = summary(z)$sigma
z = update(z, . ~ . + x13)
se3 = summary(z)$sigma
z = update(z, . ~ . + x1)
se4 = summary(z)$sigma
z = update(z, . ~ . + x3)
se5 = summary(z)$sigma
z = update(z, . ~ . + x17)
se6 = summary(z)$sigma
z = update(z, . ~ . + x12)
se7 = summary(z)$sigma
z = update(z, . ~ . + x16)
se8 = summary(z)$sigma
z = update(z, . ~ . + x14)
se9 = summary(z)$sigma
z = update(z, . ~ . + x6)
se10 = summary(z)$sigma
z = update(z, . ~ . + x5)
se11 = summary(z)$sigma
z = update(z, . ~ . + x124)
se12 = summary(z)$sigma
z = update(z, . ~ . + x4)
se13 = summary(z)$sigma
z = update(z, . ~ . + x24)
se14 = summary(z)$sigma
z = update(z, . ~ . + x15)
se15 = summary(z)$sigma
Eff.SE = rbind(ese,se1,se2,se3,se4,se5,se6,se7,se8,se9,
se10,se11,se12,se13,se14,se15)
## Plot residual standard deviation for cummulative models.
plot(Eff.SE, main = "Sonoluminescent Light Intensity",
ylab="Cummulative Residual Standard Deviation", xlab="Additional Term",
xaxt="n")
text(c(1:length(Eff.SE)) ,Eff.SE, labels=lvar16, pos=4, cex=.8)
## Generate level means for plotting.
q = aggregate(x=dfip$y,by=list(dfip$fx2,dfip$fx7),FUN="mean")
qv1 = as.vector(q$Group.1,mode="numeric")-1
qv2 = as.vector(q$Group.2,mode="numeric")-1
qv1[qv1==0] = -1
qv2[qv2==0] = -1
## Contour plot y(x7),x(x2)
## Generate x and y data for plotting.
xord = seq(-2,2,by=.1)
yord = seq(-2,2,by=.1)
## Fit model with two factors, x2 and x7, and their interaction
## for predicting the surface.
z = lm(y ~ 1 + x2 + x7 + x27)
## Generate predicted response surface and generate matrix of surface.
model = function (a, b){
z$coefficients[1] +
z$coefficients[2]*a +
z$coefficients[3]*b +
z$coefficients[4]*a*b}
pmatu = outer(xord,yord,model)
## Generate contour plot, add design points and labels.
contour(xord, yord, pmatu, nlevels=15, main="Contour Plot",
xlab="x2", ylab="x7", col="blue")
points(qv1,qv2,pch=19)
text(c(qv1[1],qv1[3]),c(qv2[1],qv2[3]),labels=c(q$x[1],q$x[3]),pos=2)
text(c(qv1[2],qv1[4]),c(qv2[2],qv2[4]),labels=c(q$x[2],q$x[4]),pos=4)
lines(c(-1,1,1,-1,-1),c(-1,-1,1,1,-1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pp_instant.R
\name{pp_instant}
\alias{pp_instant}
\title{Create a pp module which evaluates expression specified in its argument.}
\usage{
pp_instant(expr, sub_object = NULL, desc = NULL, data = NULL,
standby = TRUE)
}
\arguments{
\item{expr}{Expression.}
\item{sub_object}{Any object necessary to evaluate `expr`.}
\item{desc}{(optional) Description of the module.}
}
\description{
Create a pp module which evaluates expression specified in its argument.
}
| /man/pp_instant.Rd | no_license | jeongnna/dps | R | false | true | 540 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pp_instant.R
\name{pp_instant}
\alias{pp_instant}
\title{Create a pp module which evaluates expression specified in its argument.}
\usage{
pp_instant(expr, sub_object = NULL, desc = NULL, data = NULL,
standby = TRUE)
}
\arguments{
\item{expr}{Expression.}
\item{sub_object}{Any object necessary to evaluate `expr`.}
\item{desc}{(optional) Description of the module.}
}
\description{
Create a pp module which evaluates expression specified in its argument.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var_creation.r
\name{cv_bake}
\alias{cv_bake}
\title{Generate a composite variable}
\usage{
cv_bake(data, new, ingred.list)
}
\arguments{
\item{data}{data frame}
\item{new}{name of new variable to create}
\item{ingred.list}{quosure of variables to average into composite}
}
\value{
a new variable composite of individual items
}
\description{
This function creates a composite variable of a group of individual items in your data frame.
}
| /man/cv_bake.Rd | no_license | crbwin/clnR | R | false | true | 519 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var_creation.r
\name{cv_bake}
\alias{cv_bake}
\title{Generate a composite variable}
\usage{
cv_bake(data, new, ingred.list)
}
\arguments{
\item{data}{data frame}
\item{new}{name of new variable to create}
\item{ingred.list}{quosure of variables to average into composite}
}
\value{
a new variable composite of individual items
}
\description{
This function creates a composite variable of a group of individual items in your data frame.
}
|
# 1. Crear un nuevo proyecto denominado practica 4.
# 2. Mediante la libreria readr, o mediante los menus de RStudio, leer los datasets sleep.csv y activities.csv
# ambos archivos deben estar previamente en la carpeta del proyecto creado
library(readr)
library(tidyverse)
library(dplyr)
activities <- read_csv("C:/Users/jbart/OneDrive/Escritorio/CUNEF/Programación en R/PROYECTO 4/POYECTO 4/activities.csv")
sleep <- read_csv("C:/Users/jbart/OneDrive/Escritorio/CUNEF/Programación en R/PROYECTO 4/POYECTO 4/sleep.csv")
# 3.Comprobar el contenido con View y contar cuantos NAs hay en la columna GPS del dataset activities
View(sleep)
View(activities)
sum(is.na(activities$GPS))
# 4. Crear un objeto R denominado act_new que contenga solo las variables
# siguientes: 1,2,5,6
act_new<- select(activities,1,2,5,6)
view(act_new)
# 5. Renombrar la variable 'Activity type' con el nombre 'tipo' y la variable 'Time zone' como 'ciudad'
activities<- rename(activities,'tipo'= 'Activity type', 'ciudad'= 'Timezone') #Renombramos en ambos data
act_new<- rename(act_new,'tipo'= 'Activity type', 'ciudad'= 'Timezone')
# 6. Realizar un recuento de tipo de actividad con summary. Para ello
# debes transformar previamente la variable tipo a factor con as.factor.
# Crea un grafico de barras con dicha variable para visualizar las frecuencias.
# Haz lo mismo para la variable ciudad
activities$tipo<- as.factor(activities$tipo)
summary(activities$tipo)
plot(x=activities$tipo)
activities$ciudad<-as.factor(activities$ciudad)
summary(activities$ciudad)
plot(x=activities$ciudad)
#7. Filtrar los registros de act_new que correspondan con ciudad Amsterdam en otro objeto
# y lo mismo con Madrid. Con esos nuevos objetos determina los deportes que
# no se practican en Amsterdam y si en Madrid y viceversa. Genera graficos para visualizar los resultados
#OBEJTOS AMSTERDAM Y MADRID SOLO
amsterdam<-filter(act_new, ciudad=="Europe/Amsterdam")
madrid<-filter(act_new, ciudad=="Europe/Madrid")
setdiff(amsterdam$tipo, madrid$tipo)
setdiff(madrid$tipo, amsterdam$tipo)
#8. Encontrar las fechas en las que se ha practicado bicicleta o pilates en Amsterdam en el a?o 2019
#FILTER CON COMPOSICION DE COSAS (SI NO SALE, QUITAMOS A?O 2019, XQ HAY QUE TRANSFORMAR A 2019)
fechas_bici_pilates<-filter(amsterdam, tipo=="Cycling" | tipo=="Pilates")
fechas_bici_pilates
#9. Crear una nueva variable dif con los minutos de realizaci?n de cada actividad en Amsterdam
# y realizar una representaci?n gr?fica de los resultados con plot y determinar que deporte o deportes
# se han practicado durante dos horas o mas
#MUTATE!!!!
dif<-mutate(amsterdam, tiempo=a-de)
dif%>%
group_by(tipo) %>%
summarize(sum(tiempo))
#10. Guardar el nuevo dataset en un archivo llamado "act_new.csv"
#WRITE.CSV EN UN NUEVO ARCHIVO EN NUESTRO PROYECTO
#-------------------------------
#-----SEGUNDA PARTE-------------
# 11. Cargar el dataset sleep en un objeto llamado sleep
sleep <- read_csv("C:/Users/jbart/OneDrive/Escritorio/CUNEF/Programación en R/PROYECTO 4/POYECTO 4/sleep.csv")
#12. crear un nuevo data set llamado sleep_new que contenga solo las variables
#que contengan informaci?n, que no sean todo cero.
summary(sleep)
sleep_new <- data.frame(select(sleep, -"Rem (seg)", -"Snoring episodes", -"Average heart rate", -"Heart rate (min)", -"Heart rate (max)"))
#13. Renombrar las variables de sleep_new a nombres cortos:
sleep_new<-rename(sleep_new,"ligero"="ligero..s.",'profundo'='profundo..s.','despierto'='despierto..s.', 'tiempo_de_dormir'='Duration.to.sleep..s.', 'tiempo_de_despertar'='Duration.to.wake.up..s.', 'roncar'='Snoring..s.' )
#14. Eliminar todas las filas que contengan alg?n NA
sleep_new <- na.omit(sleep_new)
# 15. Calcular cuanto tiempo en total se ha dormido cada noche: ligero+profundo
#SUMAR EN SEGUNDOS Y PARA REPRESENTACION PASARLO A HORAS
ligero_y_profundo <- mutate (sleep_new, total=ligero + profundo)
ligero_y_profundo
# 16. Visualizacion de la relacion ligero-profundo-total
#GGPLOT2, 3 GRAFICOS, CON LIGEROTOTAL, LIGEROPROFUNDO, PROFUNDO TOTAL
#PRIMERA FORMA
plot(sleep_new$ligero, sleep_new$profundo)
plot(sleep_new$ligero, sleep_new$ligero_y_profundo)
plot(sleep_new$profundo, sleep_new$ligero_y_profundo)
#SEGUNDA FORMA
ggplot(data = sleep_new) +
geom_point(mapping = aes(x=sleep_new$ligero, y=sleep_new$profundo))
ggplot(data = sleep_new) +
geom_point(mapping = aes(x=sleep_new$ligero_y_profundo, y=sleep_new$ligero))
ggplot(data = sleep_new) +
geom_point(mapping = aes(x=sleep_new$profundo, y=sleep_new$ligero_y_profundo))
# A la vista de los resultados, que tipo de sue?o es mas relevante?
#Respuesta: el plot mas relevante es el primero, el que corresponde a LIGEROPROFUNDO
# 17. Realizar un analisis de diferencias entre los dos tipos de sue?o e interpretar los resultados
# usar la funci?n ICalpha o el 'One sample t-test' de TeachingDemos: t.test()
t.test(x=((sleep_new$profundo)-(sleep_new$ligero)), mu=0)
#18. Crear una nueva variable 'ciudad' en sleep_new con la informacion de act_new.
#CREAMOS LA VARIABLE CIUDAD (RELLENARLA CON UN FOR)
sleep_new$ciudad<-NA #creamos la variable ciudad vacia
activities$a<- as.Date(activities$a) #quitamos horas y minutos
sleep_new$a <- as.Date(sleep_new$a)
for(i in 1:length(sleep_new$a)){ #realizamos el bucle
for (j in 1:length(activities$a)) {
if (sleep_new$a[i] == activities$a [j]){
sleep_new$ciudad[i]<-activities$Timezone [j]
}
}
}
#19. Representar la relaci?n totalsleep y profundo usando como facetas el factor ciudad
ggplot(data=sleep_new) +
geom_point(mapping = aes(x= total, y = profundo, color = ciudad))
#20. Guardar el dataset sleep_new en un archivo "sleep_new.csv"
write.csv(sleep_new, file="sleep_new.csv")
#21. Guardar el proyecto completo. Subir la carpeta del proyecto al campus.
#GUARDADO Y COMPLETADO
| /Programacion_en_R/proyecto 4.R | no_license | jbartolo97/MasterDataScienceCunef2019 | R | false | false | 5,831 | r | # 1. Crear un nuevo proyecto denominado practica 4.
# 2. Mediante la libreria readr, o mediante los menus de RStudio, leer los datasets sleep.csv y activities.csv
# ambos archivos deben estar previamente en la carpeta del proyecto creado
library(readr)
library(tidyverse)
library(dplyr)
activities <- read_csv("C:/Users/jbart/OneDrive/Escritorio/CUNEF/Programación en R/PROYECTO 4/POYECTO 4/activities.csv")
sleep <- read_csv("C:/Users/jbart/OneDrive/Escritorio/CUNEF/Programación en R/PROYECTO 4/POYECTO 4/sleep.csv")
# 3.Comprobar el contenido con View y contar cuantos NAs hay en la columna GPS del dataset activities
View(sleep)
View(activities)
sum(is.na(activities$GPS))
# 4. Crear un objeto R denominado act_new que contenga solo las variables
# siguientes: 1,2,5,6
act_new<- select(activities,1,2,5,6)
view(act_new)
# 5. Renombrar la variable 'Activity type' con el nombre 'tipo' y la variable 'Time zone' como 'ciudad'
activities<- rename(activities,'tipo'= 'Activity type', 'ciudad'= 'Timezone') #Renombramos en ambos data
act_new<- rename(act_new,'tipo'= 'Activity type', 'ciudad'= 'Timezone')
# 6. Realizar un recuento de tipo de actividad con summary. Para ello
# debes transformar previamente la variable tipo a factor con as.factor.
# Crea un grafico de barras con dicha variable para visualizar las frecuencias.
# Haz lo mismo para la variable ciudad
activities$tipo<- as.factor(activities$tipo)
summary(activities$tipo)
plot(x=activities$tipo)
activities$ciudad<-as.factor(activities$ciudad)
summary(activities$ciudad)
plot(x=activities$ciudad)
#7. Filtrar los registros de act_new que correspondan con ciudad Amsterdam en otro objeto
# y lo mismo con Madrid. Con esos nuevos objetos determina los deportes que
# no se practican en Amsterdam y si en Madrid y viceversa. Genera graficos para visualizar los resultados
#OBEJTOS AMSTERDAM Y MADRID SOLO
amsterdam<-filter(act_new, ciudad=="Europe/Amsterdam")
madrid<-filter(act_new, ciudad=="Europe/Madrid")
setdiff(amsterdam$tipo, madrid$tipo)
setdiff(madrid$tipo, amsterdam$tipo)
#8. Encontrar las fechas en las que se ha practicado bicicleta o pilates en Amsterdam en el a?o 2019
#FILTER CON COMPOSICION DE COSAS (SI NO SALE, QUITAMOS A?O 2019, XQ HAY QUE TRANSFORMAR A 2019)
fechas_bici_pilates<-filter(amsterdam, tipo=="Cycling" | tipo=="Pilates")
fechas_bici_pilates
#9. Crear una nueva variable dif con los minutos de realizaci?n de cada actividad en Amsterdam
# y realizar una representaci?n gr?fica de los resultados con plot y determinar que deporte o deportes
# se han practicado durante dos horas o mas
#MUTATE!!!!
dif<-mutate(amsterdam, tiempo=a-de)
dif%>%
group_by(tipo) %>%
summarize(sum(tiempo))
#10. Guardar el nuevo dataset en un archivo llamado "act_new.csv"
#WRITE.CSV EN UN NUEVO ARCHIVO EN NUESTRO PROYECTO
#-------------------------------
#-----SEGUNDA PARTE-------------
# 11. Cargar el dataset sleep en un objeto llamado sleep
sleep <- read_csv("C:/Users/jbart/OneDrive/Escritorio/CUNEF/Programación en R/PROYECTO 4/POYECTO 4/sleep.csv")
#12. crear un nuevo data set llamado sleep_new que contenga solo las variables
#que contengan informaci?n, que no sean todo cero.
summary(sleep)
sleep_new <- data.frame(select(sleep, -"Rem (seg)", -"Snoring episodes", -"Average heart rate", -"Heart rate (min)", -"Heart rate (max)"))
#13. Renombrar las variables de sleep_new a nombres cortos:
sleep_new<-rename(sleep_new,"ligero"="ligero..s.",'profundo'='profundo..s.','despierto'='despierto..s.', 'tiempo_de_dormir'='Duration.to.sleep..s.', 'tiempo_de_despertar'='Duration.to.wake.up..s.', 'roncar'='Snoring..s.' )
#14. Eliminar todas las filas que contengan alg?n NA
sleep_new <- na.omit(sleep_new)
# 15. Calcular cuanto tiempo en total se ha dormido cada noche: ligero+profundo
#SUMAR EN SEGUNDOS Y PARA REPRESENTACION PASARLO A HORAS
ligero_y_profundo <- mutate (sleep_new, total=ligero + profundo)
ligero_y_profundo
# 16. Visualizacion de la relacion ligero-profundo-total
#GGPLOT2, 3 GRAFICOS, CON LIGEROTOTAL, LIGEROPROFUNDO, PROFUNDO TOTAL
#PRIMERA FORMA
plot(sleep_new$ligero, sleep_new$profundo)
plot(sleep_new$ligero, sleep_new$ligero_y_profundo)
plot(sleep_new$profundo, sleep_new$ligero_y_profundo)
#SEGUNDA FORMA
ggplot(data = sleep_new) +
geom_point(mapping = aes(x=sleep_new$ligero, y=sleep_new$profundo))
ggplot(data = sleep_new) +
geom_point(mapping = aes(x=sleep_new$ligero_y_profundo, y=sleep_new$ligero))
ggplot(data = sleep_new) +
geom_point(mapping = aes(x=sleep_new$profundo, y=sleep_new$ligero_y_profundo))
# A la vista de los resultados, que tipo de sue?o es mas relevante?
#Respuesta: el plot mas relevante es el primero, el que corresponde a LIGEROPROFUNDO
# 17. Realizar un analisis de diferencias entre los dos tipos de sue?o e interpretar los resultados
# usar la funci?n ICalpha o el 'One sample t-test' de TeachingDemos: t.test()
t.test(x=((sleep_new$profundo)-(sleep_new$ligero)), mu=0)
#18. Crear una nueva variable 'ciudad' en sleep_new con la informacion de act_new.
#CREAMOS LA VARIABLE CIUDAD (RELLENARLA CON UN FOR)
sleep_new$ciudad<-NA #creamos la variable ciudad vacia
activities$a<- as.Date(activities$a) #quitamos horas y minutos
sleep_new$a <- as.Date(sleep_new$a)
for(i in 1:length(sleep_new$a)){ #realizamos el bucle
for (j in 1:length(activities$a)) {
if (sleep_new$a[i] == activities$a [j]){
sleep_new$ciudad[i]<-activities$Timezone [j]
}
}
}
#19. Representar la relaci?n totalsleep y profundo usando como facetas el factor ciudad
ggplot(data=sleep_new) +
geom_point(mapping = aes(x= total, y = profundo, color = ciudad))
#20. Guardar el dataset sleep_new en un archivo "sleep_new.csv"
write.csv(sleep_new, file="sleep_new.csv")
#21. Guardar el proyecto completo. Subir la carpeta del proyecto al campus.
#GUARDADO Y COMPLETADO
|
#Juho Ivaska, 8.11.2018. This is the code for second excercise set of the course.
learning2014 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", header = TRUE, sep = "\t")
dim(learning2014)
str(learning2014)
#Looks like results of some kind of questionary.
#I didn't manage to create the analysis dataset myself so I took it from MOOC
analysis_dataset <- read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/learning2014.txt", header = TRUE, sep = ",")
write.table(analysis_dataset, file = "learning2014.txt")
read.table("C:/Users/juhoi/OneDrive/Tiedostot/Coodaushit/IODS-project/data/learning2014.txt")
str(learning2014)
head(learning2014)
#endofstory | /data/create_learning2014.R | no_license | Juhizzz/IODS-project | R | false | false | 719 | r | #Juho Ivaska, 8.11.2018. This is the code for second excercise set of the course.
learning2014 <- read.table("http://www.helsinki.fi/~kvehkala/JYTmooc/JYTOPKYS3-data.txt", header = TRUE, sep = "\t")
dim(learning2014)
str(learning2014)
#Looks like results of some kind of questionary.
#I didn't manage to create the analysis dataset myself so I took it from MOOC
analysis_dataset <- read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/learning2014.txt", header = TRUE, sep = ",")
write.table(analysis_dataset, file = "learning2014.txt")
read.table("C:/Users/juhoi/OneDrive/Tiedostot/Coodaushit/IODS-project/data/learning2014.txt")
str(learning2014)
head(learning2014)
#endofstory |
library(sbtools)
### Name: query_sb_text
### Title: Query SB for items containing specific text
### Aliases: query_sb_text
### ** Examples
#query for the package maintainer's name
query_sb_text('Luke Winslow')
#query for one of the old river gaging stations
query_sb_text('Lees Ferry')
| /data/genthat_extracted_code/sbtools/examples/query_sb_text.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 296 | r | library(sbtools)
### Name: query_sb_text
### Title: Query SB for items containing specific text
### Aliases: query_sb_text
### ** Examples
#query for the package maintainer's name
query_sb_text('Luke Winslow')
#query for one of the old river gaging stations
query_sb_text('Lees Ferry')
|
library(dplyr)
#Carga de archivos
archivesName <- list(
"SP1-2015.csv",
"SP1-2016.csv",
"SP1-2017.csv",
"SP1-2018.csv",
"SP1-2019.csv"
)
dataSoccer <- list()
for (i in 1:length(archivesName)) {
dataSoccer[[i]] <- read.csv(file = archivesName[[i]])
}
#dataSoccer <- list(read.csv(file = "SP1-2017.csv"), read.csv(file = "SP1-2018.csv"), read.csv(file = "SP1-2019.csv"))
#Obten una mejor idea de las caracteristicas de los data frames al usar las funciones: str, head, View y summary
lapply(dataSoccer, str)
lapply(dataSoccer, head)
lapply(dataSoccer, View)
lapply(dataSoccer, summary)
#Con la funcion select del paquete dplyr selecciona unicamente las columnas Date, HomeTeam, AwayTeam, FTHG, FTAG y FTR; esto para cada uno de los data frames
for (i in 1:length(dataSoccer)) {
dataSoccer[[i]] <- select(dataSoccer[[i]], Date, HomeTeam, AwayTeam, FTHG, FTAG, FTR)
}
#Asegurate de que los elementos de las columnas correspondientes de los nuevos data frames sean del mismo tipo (Hint 1: usa as.Date y mutate para arreglar las fechas)
for (i in 1:3) {
dataSoccer[[i]] <- mutate(dataSoccer[[i]], Date = as.Date(dataSoccer[[i]]$Date, format = "%d/%m/%y"))
}
for (i in 4:5) {
dataSoccer[[i]] <- mutate(dataSoccer[[i]], Date = as.Date(dataSoccer[[i]]$Date, format = "%d/%m/%Y"))
}
#Con ayuda de la funcion rbind forma un unico data frame que contenga las seis columnas mencionadas en el punto 3
dataFrameSoccer <- do.call("rbind", dataSoccer)
write.csv(dataFrameSoccer, file = "dataSoccer.csv")
| /sesion_2/postwork.R | no_license | YaelRmz/bedu_fase_2_modulo_1 | R | false | false | 1,506 | r | library(dplyr)
#Carga de archivos
archivesName <- list(
"SP1-2015.csv",
"SP1-2016.csv",
"SP1-2017.csv",
"SP1-2018.csv",
"SP1-2019.csv"
)
dataSoccer <- list()
for (i in 1:length(archivesName)) {
dataSoccer[[i]] <- read.csv(file = archivesName[[i]])
}
#dataSoccer <- list(read.csv(file = "SP1-2017.csv"), read.csv(file = "SP1-2018.csv"), read.csv(file = "SP1-2019.csv"))
#Obten una mejor idea de las caracteristicas de los data frames al usar las funciones: str, head, View y summary
lapply(dataSoccer, str)
lapply(dataSoccer, head)
lapply(dataSoccer, View)
lapply(dataSoccer, summary)
#Con la funcion select del paquete dplyr selecciona unicamente las columnas Date, HomeTeam, AwayTeam, FTHG, FTAG y FTR; esto para cada uno de los data frames
for (i in 1:length(dataSoccer)) {
dataSoccer[[i]] <- select(dataSoccer[[i]], Date, HomeTeam, AwayTeam, FTHG, FTAG, FTR)
}
#Asegurate de que los elementos de las columnas correspondientes de los nuevos data frames sean del mismo tipo (Hint 1: usa as.Date y mutate para arreglar las fechas)
for (i in 1:3) {
dataSoccer[[i]] <- mutate(dataSoccer[[i]], Date = as.Date(dataSoccer[[i]]$Date, format = "%d/%m/%y"))
}
for (i in 4:5) {
dataSoccer[[i]] <- mutate(dataSoccer[[i]], Date = as.Date(dataSoccer[[i]]$Date, format = "%d/%m/%Y"))
}
#Con ayuda de la funcion rbind forma un unico data frame que contenga las seis columnas mencionadas en el punto 3
dataFrameSoccer <- do.call("rbind", dataSoccer)
write.csv(dataFrameSoccer, file = "dataSoccer.csv")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.