content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Use the PMIDs of articles to obtain the PMIDs of the references of that articles
#'
#' Search the references of a list of articles (as PMIDs) and return them as PMIDs too.
#'
#' @param pmid character vector with the PMIDs of articles
#'
#' @return A character vector with the PMIDs of the references of the articles used as
#' inputs in the function
#' @export
#'
#' @examples
#'
pmid_to_refs <- function(pmid) {
base <- "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
# here we're using `"&id="` instead of commas, this is because that way you obtain
# the references from artiles independently, otherwise the repeated articles would
# collapse
url_id <- str_c("&id=", str_c(pmid, collapse = "&id="))
if (length(pmid) <= 300) {
url <- str_c(
base,
"elink.fcgi?dbfrom=pubmed&linkname=pubmed_pubmed_refs",
url_id
)
output <- GET(url)
} else {
output <- POST(
url = str_c(base, "elink.fcgi?"),
body = str_c("dbfrom=pubmed&linkname=pubmed_pubmed_refs", url_id)
)
}
xml_extract_text(output, "//LinkSet/LinkSetDb/Link")
}
| /www/pmid_to_refs.R | no_license | danimedi/find_main_references | R | false | false | 1,097 | r | #' Use the PMIDs of articles to obtain the PMIDs of the references of that articles
#'
#' Search the references of a list of articles (as PMIDs) and return them as PMIDs too.
#'
#' @param pmid character vector with the PMIDs of articles
#'
#' @return A character vector with the PMIDs of the references of the articles used as
#' inputs in the function
#' @export
#'
#' @examples
#'
pmid_to_refs <- function(pmid) {
base <- "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
# here we're using `"&id="` instead of commas, this is because that way you obtain
# the references from artiles independently, otherwise the repeated articles would
# collapse
url_id <- str_c("&id=", str_c(pmid, collapse = "&id="))
if (length(pmid) <= 300) {
url <- str_c(
base,
"elink.fcgi?dbfrom=pubmed&linkname=pubmed_pubmed_refs",
url_id
)
output <- GET(url)
} else {
output <- POST(
url = str_c(base, "elink.fcgi?"),
body = str_c("dbfrom=pubmed&linkname=pubmed_pubmed_refs", url_id)
)
}
xml_extract_text(output, "//LinkSet/LinkSetDb/Link")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rxdbHandling.R
\name{rxdbQuery_v1}
\alias{rxdbQuery_v1}
\title{very simple query formulation, build queries using endpoints of bhklab PharmacoDB API}
\usage{
rxdbQuery_v1(..., url = "https://api.pharmacodb.com/v1/",
decoder = basicDecoder)
}
\arguments{
\item{\dots}{typically a string representing an API endpoint, will be processed by unlist() and then to paste0 preceded by \code{url}}
\item{url}{of a PharmacoDB server API target}
\item{decoder}{a function of one argument that will be applied to API response (typically JSON)}
}
\description{
very simple query formulation, build queries using endpoints of bhklab PharmacoDB API
}
\examples{
qout = rxdbQuery_v1("cell_lines") # yields 30; append "?all=true" to retrieve all
sapply(qout, function(x) x[[2]])
}
| /man/rxdbQuery_v1.Rd | no_license | shwetagopaul92/RxGeno | R | false | true | 846 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rxdbHandling.R
\name{rxdbQuery_v1}
\alias{rxdbQuery_v1}
\title{very simple query formulation, build queries using endpoints of bhklab PharmacoDB API}
\usage{
rxdbQuery_v1(..., url = "https://api.pharmacodb.com/v1/",
decoder = basicDecoder)
}
\arguments{
\item{\dots}{typically a string representing an API endpoint, will be processed by unlist() and then to paste0 preceded by \code{url}}
\item{url}{of a PharmacoDB server API target}
\item{decoder}{a function of one argument that will be applied to API response (typically JSON)}
}
\description{
very simple query formulation, build queries using endpoints of bhklab PharmacoDB API
}
\examples{
qout = rxdbQuery_v1("cell_lines") # yields 30; append "?all=true" to retrieve all
sapply(qout, function(x) x[[2]])
}
|
#!usr/bin/env R
#################################################
# Title: 3(2) Peak_Fecundity_Rate and Fecundity Loss Rate in R
# MSc CMEE
# July 2020
# Author: YUAN ZHANG
# refer to: TPC - Fecundity.ipynb
#################################################
rm(list = ls())
graphics.off()
#--------- Load some packages --------#
library("dplyr")
library("ggplot2")
library("gridExtra")
library("ggforce")
library("pdftools")
library("ggpubr")
#--------- Load dataset and check levels --------#
data <- read.csv("../data/simple.csv")
b <- subset(data, data$Variable == "Fecundity")
b <- gdata::drop.levels(b)
unique((b$species))
#-----------------------------------------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------------------------------------#
# "Tetraneura nigri abdominalis"
# "Aedes camptorhynchus"
# "Culex annulirostris"
# "Aedes aegypti"
# "Aphis gossypii"
# "Corythucha ciliata"
# "Aedes albopictus"
## "Coleoptera" ##
# 1. only one species : "Anthonomus grandis"
b1 <- subset(b, b$species == "Anthonomus grandis")
b1 <- gdata::drop.levels(b1)
b1$stdvalue <- b1$traitvalue
b1$logvalue <- log(b1$stdvalue)
# check units
unique(b1$unit) # "Eggs/female/day"
# plot b ~ t
bpk1 <- c()
k1 <- c()
temp1 <- unique(b1$temp) # get temperature ranges: 15 20 25 30 35
for (i in 1: length(temp1)) {
df <- b1[which(b1$temp == temp1[i]),]
bpk1[i] = max(df$traitvalue)
df$stdvalue <- df$traitvalue
pbt <- ggplot(df, aes(x = time, y = stdvalue))+ geom_point() + geom_smooth()+
labs(title = paste("Anthonomus grandis",temp1[i],"(\u00B0C)",sep = "" ),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")+
theme(plot.title = element_text(hjust = 0.5,size = 10, face = "bold.italic"),
axis.text=element_text(size=8,face = "bold"),
axis.title.x=element_text(size= 10),
axis.title.y=element_text(size= 10))
ggsave(paste("../Results/bt_1.Anthonomus grandis:", temp1[i], ":.png",sep = ""), device = png())
lm <- lm(logvalue ~ time, data = df)
k1[1] <- coef(lm)[2]
}
bpk1 <- data.frame(bpk1, temp1, k1)
pbpk1< ggplot(bpk1, aes(x = temp1, y = bpk1))+ geom_point() + geom_smooth()+
labs(title = paste("Anthonomus grandis",te,"(\u00B0C)",sep = "" ),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")+
theme(plot.title = element_text(hjust = 0.5,size = 10, face = "bold.italic"),
axis.text=element_text(size=8,face = "bold"),
axis.title.x=element_text(size= 10),
axis.title.y=element_text(size= 10))
ggsave(paste("../Results/bt_1.Anthonomus grandis:", temp1[i], ":.png",sep = ""), device = png())
# A15
A15 <- subset(Ant, Ant$ambienttemp == "15")
A15 <- gdata::drop.levels(A15)
A15$time <-A15$timestart
A15$stdunit <- "Days"
A15$stdvalue <- A15$originaltraitvalue
A15$stdname <- paste(unique(A15$ambienttemp),"celcius")
A15$logvalue <- log(A15$stdvalue)
p15 <- ggplot(A15, aes(x = time, y = stdvalue))
A1 <- p15 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 15 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A15.lm <- lm(log(stdvalue) ~ time, data = A15)
test <- ggplot(A15, aes(x = time, y = logvalue))
test <- test + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 15 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Log of Fecundity (Eggs/female/day)")
A15_k <- coef(A15.lm)[2]
# A20
A20 <- subset(Ant, Ant$ambienttemp == "20")
A20 <- gdata::drop.levels(A20)
A20$time <-A20$timestart
A20$stdunit <- "Days"
A20$stdvalue <- A20$originaltraitvalue
A20$stdname <- paste(unique(A20$ambienttemp),"celcius")
p20 <- ggplot(A20, aes(x = time, y = stdvalue))
A2 <- p20 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 20 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A20.lm <- lm(stdvalue ~ time, data = A20)
A20_k <- coef(A20.lm)[2]
# A25
A25 <- subset(Ant, Ant$ambienttemp == "25")
A25 <- gdata::drop.levels(A25)
A25$time <-A25$timestart
A25$stdunit <- "Days"
A25$stdvalue <- A25$originaltraitvalue
A25$stdname <- paste(unique(A25$ambienttemp),"celcius")
p25 <- ggplot(A25, aes(x = time, y = stdvalue))
A3 <- p25 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 25 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A25.lm <- lm(stdvalue ~ time, data = A25)
A25_k <- coef(A25.lm)[2]
# A30
A30 <- subset(Ant, Ant$ambienttemp == "30")
A30 <- gdata::drop.levels(A30)
A30$time <-A30$timestart
A30$stdunit <- "Days"
A30$stdvalue <- A30$originaltraitvalue
A30$stdname <- paste(unique(A30$ambienttemp),"celcius")
p30 <- ggplot(A30, aes(x = time, y = stdvalue))
A4 <- p30 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 30 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A30.lm <- lm(stdvalue ~ time, data = A30)
A30_k <- coef(A30.lm)[2]
# A35
A35 <- subset(Ant, Ant$ambienttemp == "35")
A35 <- gdata::drop.levels(A35)
A35$time <-A35$timestart
A35$stdunit <- "Days"
A35$stdvalue <- A35$originaltraitvalue
A35$stdname <- paste(unique(A35$ambienttemp),"celcius")
p35 <- ggplot(A35, aes(x = time, y = stdvalue))
A5 <- p35 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 35 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A35.lm <- lm(stdvalue ~ time, data = A35)
A35_k <- coef(A35.lm)[2]
# combine
time_b_t <- rbind(A15, A20, A25, A30, A35)
b_t <- ggplot(time_b_t, aes(x=time, y=stdvalue)) + geom_point() + theme_bw() +
labs(title=expression(paste("Anthonomus grandis_","Fecundity", (b), "~Time")),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
plot1 <- b_t + facet_wrap(~ stdname, ncol = 2, scales = "free")+ geom_smooth()
# plot bpk and k ~ temperature
Abpk <- c(max(A15$stdvalue), max(A20$stdvalue), max(A25$stdvalue), max(A30$stdvalue), max(A35$stdvalue))
Ak <- c(A15_k, A20_k, A25_k, A30_k, A35_k)
At <- c(15,20,25,30,35)
data_b_A <- data.frame(Abpk,Ak, At)
plot_bpk1 <- ggplot(data_b_A, aes(x = At, y = Abpk)) + geom_point()+
labs(title="Peak Fecundity Rate (bpk)_Anthonomus grandis",
x = expression(paste('Temperature (',~degree,'C)',sep='')),
y = "Peak Fecundity Rate (Eggs/female/day)")
plot_k1 <- ggplot(data_b_A, aes(x = At, y = Ak)) + geom_point()+
labs(title="Fecundity Loss Rate (k)_Anthonomus grandis",
x = expression(paste('Temperature (',~degree,'C)',sep='')),
y = "Fecundity Loss Rate (k)")
# save
ggsave("../results/Anthonomus_grandis_b_time15.pdf", plot=A1)
ggsave("../results/Anthonomus_grandis_b_time20.pdf", plot=A2)
ggsave("../results/Anthonomus_grandis_b_time25.pdf", plot=A3)
ggsave("../results/Anthonomus_grandis_b_time30.pdf", plot=A4)
ggsave("../results/Anthonomus_grandis_b_time35.pdf", plot=A5)
ggsave("../results/b_time1.pdf", plot=plot1)
ggsave("../results/bpk_tem1.pdf", plot=plot_bpk1)
ggsave("../results/k_tem1.pdf", plot=plot_k1)
## 2. "Diptera"
Dip <- subset(fecund, fecund$interactor1order =="Diptera")
Dip <- gdata::drop.levels(Dip)
levels(Dip$interactor1)
# 1) "Aedes aegypti"
# get subset of species
aegypti <- subset(Dip, Dip$interactor1 =="Aedes aegypti")
aegypti <- gdata::drop.levels(aegypti)
# check units
levels(aegypti$originaltraitunit) # unit: per day per female
# (?) no time information, so no peak value and k
# 2) "Aedes albopictus"
# get subset of species
albopictus <- subset(Dip, Dip$interactor1 =="Aedes albopictus")
albopictus <- gdata::drop.levels(albopictus)
# check units
levels(albopictus$originaltraitunit) # unit:"eggs per female per cycle"
# (?) no time information, so no peak value and k
# 3) "Aedes camptorhynchus"
# get subset of species
camptorhynchus <- subset(Dip, Dip$interactor1 =="Aedes camptorhynchus")
camptorhynchus <- gdata::drop.levels(camptorhynchus)
# check units
levels(camptorhynchus$originaltraitunit) # unit: eggs
# (?) no time information, so no peak value and k
# 4) "Culex annulirostris"
# get subset of species
annulirostris <- subset(Dip, Dip$interactor1 =="Culex annulirostris")
annulirostris <- gdata::drop.levels(annulirostris)
# check units
levels(Cole$originaltraitunit)
# (?) no time information, so no peak value and k
## 3. "Hemiptera"
Hemi <- subset(fecund, fecund$interactor1order =="Hemiptera")
Hemi <- gdata::drop.levels(Hemi)
levels(Hemi$interactor1)
# 1) "Aphis gossypii"
# get subset of species
gossypii <- subset(Hemi, Hemi$interactor1 =="Aphis gossypii")
gossypii <- gdata::drop.levels(gossypii)
# check units
levels(gossypii$originaltraitunit) # unit: "nymphs/female/day"
# check the temperature range
unique(gossypii$ambienttemp)
# 15.0
Go15.0 <- subset(gossypii, gossypii$ambienttemp == 15.0)
Go15.0 <- gdata::drop.levels(Go15.0)
Go15.0$time <- Go15.0$timestart
Go15.0$stdunit <- "Days"
Go15.0$stdvalue <- Go15.0$originaltraitvalue
Go15.0$stdname <- paste(unique(Go15.0$ambienttemp),"celcius")
Go_p15.0 <- ggplot(Go15.0, aes(x = time, y = stdvalue))
Go_p15.0 <- Go_p15.0 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 15.0 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go15.0 <- max(Go15$stdvalue)
Go15.0sub <- subset(Go15.0, Go15.0$stdvalue == max(Go15.0$stdvalue))
Go15.0sub$timestart
Go15.0sub2 <- subset(Go15.0, Go15.0$timestart >= 21)
Go15.0.lm <- lm(stdvalue ~ time, data = Go15.0sub2)
k_Go15.0 <- coef(Go15.0.lm)[2]
# 17.5
Go17.5 <- subset(gossypii, gossypii$ambienttemp == 17.5)
Go17.5 <- gdata::drop.levels(Go17.5)
Go17.5$time <- Go17.5$timestart
Go17.5$stdunit <- "Days"
Go17.5$stdvalue <- Go17.5$originaltraitvalue
Go17.5$stdname <- paste(unique(Go17.5$ambienttemp),"celcius")
Go_p17.5 <- ggplot(Go17.5, aes(x = time, y = stdvalue))
Go_p17.5 <- Go_p17.5 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 17.5 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go17.5 <- max(Go17.5$stdvalue)
Go17.5sub <- subset(Go17.5, Go17.5$stdvalue == max(Go17.5$stdvalue))
Go17.5sub$timestart
Go17.5sub2 <- subset(Go17.5, Go17.5$timestart >= Go17.5sub$timestart)
Go17.5.lm <- lm(stdvalue ~ time, data = Go17.5sub2)
k_Go17.5 <- coef(Go17.5.lm)[2]
# 20.0
Go20.0 <- subset(gossypii, gossypii$ambienttemp == 20.0)
Go20.0 <- gdata::drop.levels(Go20.0)
Go20.0$time <- Go20.0$timestart
Go20.0$stdunit <- "Days"
Go20.0$stdvalue <- Go20.0$originaltraitvalue
Go20.0$stdname <- paste(unique(Go20.0$ambienttemp),"celcius")
Go_p20.0 <- ggplot(Go20.0, aes(x = time, y = stdvalue))
Go_p20.0 <- Go_p20.0 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 20.0 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go20.0 <- max(Go20.0$stdvalue)
Go20.0sub <- subset(Go20.0, Go20.0$stdvalue == max(Go20.0$stdvalue))
Go20.0sub$timestart
Go20.0sub2 <- subset(Go20.0, Go20.0$timestart >= Go20.0sub$timestart)
Go20.0.lm <- lm(stdvalue ~ time, data = Go20.0sub2)
k_Go20.0 <- coef(Go20.0.lm)[2]
# 22.5
Go22.5 <- subset(gossypii, gossypii$ambienttemp == 22.5)
Go22.5 <- gdata::drop.levels(Go22.5)
Go22.5$time <- Go22.5$timestart
Go22.5$stdunit <- "Days"
Go22.5$stdvalue <- Go22.5$originaltraitvalue
Go22.5$stdname <- paste(unique(Go22.5$ambienttemp),"celcius")
Go_p22.5 <- ggplot(Go22.5, aes(x = time, y = stdvalue))
Go_p22.5 <- Go_p22.5 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 22.5 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go22.5<- max(Go22.5$stdvalue)
Go22.5sub <- subset(Go22.5, Go22.5$stdvalue == max(Go22.5$stdvalue))
Go22.5sub$timestart
Go22.5sub2 <- subset(Go22.5, Go22.5$timestart >= Go22.5sub$timestart)
Go22.5.lm <- lm(stdvalue ~ time, data = Go22.5sub2)
k_Go22.5 <- coef(Go22.5.lm)[2]
# 25.0
Go25.0 <- subset(gossypii, gossypii$ambienttemp == 25.0)
Go25.0 <- gdata::drop.levels(Go25.0)
Go25.0$time <- Go25.0$timestart
Go25.0$stdunit <- "Days"
Go25.0$stdvalue <- Go25.0$originaltraitvalue
Go25.0$stdname <- paste(unique(Go25.0$ambienttemp),"celcius")
Go_p25.0 <- ggplot(Go25.0, aes(x = time, y = stdvalue))
Go_p25.0 <- Go_p25.0 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 25.0 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go25.0 <- max(Go25.0$stdvalue)
Go25.0sub <- subset(Go25.0, Go25.0$stdvalue == max(Go25.0$stdvalue))
Go25.0sub$timestart
Go25.0sub2 <- subset(Go25.0, Go25.0$timestart >= Go25.0sub$timestart)
Go25.0.lm <- lm(stdvalue ~ time, data = Go25.0sub2)
k_Go25.0 <- coef(Go25.0.lm)[2]
# 27.5
Go27.5 <- subset(gossypii, gossypii$ambienttemp == 27.5)
Go27.5 <- gdata::drop.levels(Go27.5)
Go27.5$time <- Go27.5$timestart
Go27.5$stdunit <- "Days"
Go27.5$stdvalue <- Go27.5$originaltraitvalue
Go27.5$stdname <- paste(unique(Go27.5$ambienttemp),"celcius")
Go_p27.5 <- ggplot(Go27.5, aes(x = time, y = stdvalue))
Go_p27.5 <- Go_p27.5 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 27.5 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go27.5<- max(Go27.5$stdvalue)
Go27.5sub <- subset(Go27.5, Go27.5$stdvalue == max(Go27.5$stdvalue))
Go27.5sub$timestart
Go27.5sub2 <- subset(Go27.5, Go27.5$timestart >= Go27.5sub$timestart)
Go27.5.lm <- lm(stdvalue ~ time, data = Go27.5sub2)
k_Go27.5 <- coef(Go27.5.lm)[2]
k_Go27.5
# 30.0
Go30.0 <- subset(gossypii, gossypii$ambienttemp == 30.0)
Go30.0 <- gdata::drop.levels(Go30.0)
Go30.0$time <- Go30.0$timestart
Go30.0$stdunit <- "Days"
Go30.0$stdvalue <- Go30.0$originaltraitvalue
Go30.0$stdname <- paste(unique(Go30.0$ambienttemp),"celcius")
Go_p30.0 <- ggplot(Go30.0, aes(x = time, y = stdvalue))
Go_p30.0 <- Go_p30.0 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 30.0 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go30.0 <- max(Go30.0$stdvalue)
Go30.0sub <- subset(Go30.0, Go30.0$stdvalue == max(Go30.0$stdvalue))
Go30.0sub$timestart
Go30.0sub2 <- subset(Go30.0, Go30.0$timestart >= Go30.0sub$timestart)
Go30.0.lm <- lm(stdvalue ~ time, data = Go30.0sub2)
k_Go30.0 <- coef(Go30.0.lm)[2]
k_Go30.0
# 32.5
Go32.5 <- subset(gossypii, gossypii$ambienttemp == 32.5)
Go32.5 <- gdata::drop.levels(Go32.5)
Go32.5$time <- Go32.5$timestart
Go32.5$stdunit <- "Days"
Go32.5$stdvalue <- Go32.5$originaltraitvalue
Go32.5$stdname <- paste(unique(Go32.5$ambienttemp),"celcius")
Go_p32.5 <- ggplot(Go32.5, aes(x = time, y = stdvalue))
Go_p32.5 <- Go_p32.5 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 32.5 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go32.5 <- max(Go32.5$stdvalue)
Go32.5sub <- subset(Go32.5, Go32.5$stdvalue == max(Go32.5$stdvalue))
Go32.5sub$timestart
Go32.5sub2 <- subset(Go32.5, Go32.5$timestart >= Go32.5sub$timestart)
Go32.5.lm <- lm(stdvalue ~ time, data = Go32.5sub2)
k_Go32.5 <- coef(Go32.5.lm)[2]
k_Go32.5
# combine and plot
bpk_Go <- c(bpk_Go15.0, bpk_Go17.5, bpk_Go20.0, bpk_Go22.5, bpk_Go25.0, bpk_Go27.5, bpk_Go30.0, bpk_Go32.5)
k_Go <- c(k_Go15.0, k_Go17.5, k_Go20.0, k_Go22.5, k_Go25.0, k_Go27.5, k_Go30.0, k_Go32.5)
Temp_Go <- c(15.0, 17.5, 20.0, 22.5, 25.0, 27.5, 30.0, 32.5)
data_b_Go <- data.frame(bpk_Go, k_Go, Temp_Go)
plot_bpk2 <- ggplot(data_b_Go, aes(x = Temp_Go, y = bpk_Go)) + geom_point()+
labs(title="Peak Fecundity Rate (bpk)_Aphis gossypii",
x = expression(paste('Temperature (',~degree,'C)',sep='')),
y = "Peak Fecundity Rate (nymphs/female/day)")
plot_k2 <- ggplot(data_b_Go, aes(x = Temp_Go, y = k_Go)) + geom_point()+
labs(title="Fecundity Loss Rate (k)_Aphis gossypii",
x = expression(paste('Temperature (',~degree,'C)',sep='')),
y = "Fecundity Loss Rate (k)")
time_b_t2 <- rbind(Go15.0, Go17.5, Go20.0, Go22.5, Go25.0, Go27.5, Go30.0, Go32.5)
b_t2 <- ggplot(time_b_t2, aes(x=time, y=stdvalue)) + geom_point() + theme_bw() +
labs(title=expression(paste("Aphis gossypii_","Fecundity", (b), "~Time")),
x = "Time (days)",
y = "Fecundity")
plot2 <- b_t2 + facet_wrap(~ stdname, ncol = 2, scales = "free")+ geom_smooth()
plot2
# save
ggsave("../results/Aphis gossypii_b_time15.0.pdf", plot=Go_p15.0)
ggsave("../results/Aphis gossypii_b_time17.5.pdf", plot=Go_p17.5)
ggsave("../results/Aphis gossypii_b_time20.0.pdf", plot=Go_p20.0)
ggsave("../results/Aphis gossypii_b_time22.5.pdf", plot=Go_p22.5)
ggsave("../results/Aphis gossypii_b_time25.0.pdf", plot=Go_p25.0)
ggsave("../results/Aphis gossypii_b_time27.5.pdf", plot=Go_p27.5)
ggsave("../results/Aphis gossypii_b_time30.0.pdf", plot=Go_p30.0)
ggsave("../results/Aphis gossypii_b_time32.5.pdf", plot=Go_p32.5)
ggsave("../results/b_time2.pdf", plot=plot2)
ggsave("../results/bpk_tem2.pdf", plot=plot_bpk2)
ggsave("../results/k_tem2.pdf", plot=plot_k2)
# 2) "Corythucha ciliata"
# get subset of species
ciliata <- subset(Hemi, Hemi$interactor1 =="Corythucha ciliata")
ciliata <- gdata::drop.levels(ciliata)
# check units
levels(ciliata$originaltraitunit) # unit:eggs per female
unique(ciliata$ambienttemp)
# check time
# (?) no time information, so no peak value and k
# 3) "Tetraneura nigri abdominalis"
# get subset of species
nig <- subset(Hemi, Hemi$interactor1 =="Tetraneura nigri abdominalis")
nig <- gdata::drop.levels(nig)
# check units
levels(nig$originaltraitunit)
unique(nig$ambienttemp)
# check time
# (?) no time information, so no peak value and k
########### optput ############
# 3.2.1.b ~ time
pdf_combine(c("../results/b_time1.pdf","../results/b_time2.pdf" ), output = "../results/3.2.1.Fecundity_time.pdf")
# 3.2.2.bpk ~ temperature
pdf_combine(c("../results/bpk_tem1.pdf","../results/bpk_tem2.pdf" ), output = "../results/3.2.2.Peak_Fecundity_Rate_temperature.pdf")
# 3.2.2. k ~ temperature
pdf_combine(c("../results/k_tem1.pdf","../results/k_tem2.pdf" ), output = "../results/3.2.3.Fecundity_Loss_Rate_temperature.pdf")
| /Mainproject/code/3.2.TPC_Fecundity.R | no_license | YuanZhang1203/CMEECourseWork | R | false | false | 18,575 | r | #!usr/bin/env R
#################################################
# Title: 3(2) Peak_Fecundity_Rate and Fecundity Loss Rate in R
# MSc CMEE
# July 2020
# Author: YUAN ZHANG
# refer to: TPC - Fecundity.ipynb
#################################################
rm(list = ls())
graphics.off()
#--------- Load some packages --------#
library("dplyr")
library("ggplot2")
library("gridExtra")
library("ggforce")
library("pdftools")
library("ggpubr")
#--------- Load dataset and check levels --------#
data <- read.csv("../data/simple.csv")
b <- subset(data, data$Variable == "Fecundity")
b <- gdata::drop.levels(b)
unique((b$species))
#-----------------------------------------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------------------------------------#
# "Tetraneura nigri abdominalis"
# "Aedes camptorhynchus"
# "Culex annulirostris"
# "Aedes aegypti"
# "Aphis gossypii"
# "Corythucha ciliata"
# "Aedes albopictus"
## "Coleoptera" ##
# 1. only one species : "Anthonomus grandis"
b1 <- subset(b, b$species == "Anthonomus grandis")
b1 <- gdata::drop.levels(b1)
b1$stdvalue <- b1$traitvalue
b1$logvalue <- log(b1$stdvalue)
# check units
unique(b1$unit) # "Eggs/female/day"
# plot b ~ t
bpk1 <- c()
k1 <- c()
temp1 <- unique(b1$temp) # get temperature ranges: 15 20 25 30 35
for (i in 1: length(temp1)) {
df <- b1[which(b1$temp == temp1[i]),]
bpk1[i] = max(df$traitvalue)
df$stdvalue <- df$traitvalue
pbt <- ggplot(df, aes(x = time, y = stdvalue))+ geom_point() + geom_smooth()+
labs(title = paste("Anthonomus grandis",temp1[i],"(\u00B0C)",sep = "" ),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")+
theme(plot.title = element_text(hjust = 0.5,size = 10, face = "bold.italic"),
axis.text=element_text(size=8,face = "bold"),
axis.title.x=element_text(size= 10),
axis.title.y=element_text(size= 10))
ggsave(paste("../Results/bt_1.Anthonomus grandis:", temp1[i], ":.png",sep = ""), device = png())
lm <- lm(logvalue ~ time, data = df)
k1[1] <- coef(lm)[2]
}
bpk1 <- data.frame(bpk1, temp1, k1)
pbpk1< ggplot(bpk1, aes(x = temp1, y = bpk1))+ geom_point() + geom_smooth()+
labs(title = paste("Anthonomus grandis",te,"(\u00B0C)",sep = "" ),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")+
theme(plot.title = element_text(hjust = 0.5,size = 10, face = "bold.italic"),
axis.text=element_text(size=8,face = "bold"),
axis.title.x=element_text(size= 10),
axis.title.y=element_text(size= 10))
ggsave(paste("../Results/bt_1.Anthonomus grandis:", temp1[i], ":.png",sep = ""), device = png())
# A15
A15 <- subset(Ant, Ant$ambienttemp == "15")
A15 <- gdata::drop.levels(A15)
A15$time <-A15$timestart
A15$stdunit <- "Days"
A15$stdvalue <- A15$originaltraitvalue
A15$stdname <- paste(unique(A15$ambienttemp),"celcius")
A15$logvalue <- log(A15$stdvalue)
p15 <- ggplot(A15, aes(x = time, y = stdvalue))
A1 <- p15 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 15 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A15.lm <- lm(log(stdvalue) ~ time, data = A15)
test <- ggplot(A15, aes(x = time, y = logvalue))
test <- test + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 15 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Log of Fecundity (Eggs/female/day)")
A15_k <- coef(A15.lm)[2]
# A20
A20 <- subset(Ant, Ant$ambienttemp == "20")
A20 <- gdata::drop.levels(A20)
A20$time <-A20$timestart
A20$stdunit <- "Days"
A20$stdvalue <- A20$originaltraitvalue
A20$stdname <- paste(unique(A20$ambienttemp),"celcius")
p20 <- ggplot(A20, aes(x = time, y = stdvalue))
A2 <- p20 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 20 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A20.lm <- lm(stdvalue ~ time, data = A20)
A20_k <- coef(A20.lm)[2]
# A25
A25 <- subset(Ant, Ant$ambienttemp == "25")
A25 <- gdata::drop.levels(A25)
A25$time <-A25$timestart
A25$stdunit <- "Days"
A25$stdvalue <- A25$originaltraitvalue
A25$stdname <- paste(unique(A25$ambienttemp),"celcius")
p25 <- ggplot(A25, aes(x = time, y = stdvalue))
A3 <- p25 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 25 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A25.lm <- lm(stdvalue ~ time, data = A25)
A25_k <- coef(A25.lm)[2]
# A30
A30 <- subset(Ant, Ant$ambienttemp == "30")
A30 <- gdata::drop.levels(A30)
A30$time <-A30$timestart
A30$stdunit <- "Days"
A30$stdvalue <- A30$originaltraitvalue
A30$stdname <- paste(unique(A30$ambienttemp),"celcius")
p30 <- ggplot(A30, aes(x = time, y = stdvalue))
A4 <- p30 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 30 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A30.lm <- lm(stdvalue ~ time, data = A30)
A30_k <- coef(A30.lm)[2]
# A35
A35 <- subset(Ant, Ant$ambienttemp == "35")
A35 <- gdata::drop.levels(A35)
A35$time <-A35$timestart
A35$stdunit <- "Days"
A35$stdvalue <- A35$originaltraitvalue
A35$stdname <- paste(unique(A35$ambienttemp),"celcius")
p35 <- ggplot(A35, aes(x = time, y = stdvalue))
A5 <- p35 + geom_point() + geom_smooth()+
labs(title=expression(paste('Anthonomus grandis 35 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
A35.lm <- lm(stdvalue ~ time, data = A35)
A35_k <- coef(A35.lm)[2]
# combine
time_b_t <- rbind(A15, A20, A25, A30, A35)
b_t <- ggplot(time_b_t, aes(x=time, y=stdvalue)) + geom_point() + theme_bw() +
labs(title=expression(paste("Anthonomus grandis_","Fecundity", (b), "~Time")),
x = "Time (days)",
y = "Fecundity (Eggs/female/day)")
plot1 <- b_t + facet_wrap(~ stdname, ncol = 2, scales = "free")+ geom_smooth()
# plot bpk and k ~ temperature
Abpk <- c(max(A15$stdvalue), max(A20$stdvalue), max(A25$stdvalue), max(A30$stdvalue), max(A35$stdvalue))
Ak <- c(A15_k, A20_k, A25_k, A30_k, A35_k)
At <- c(15,20,25,30,35)
data_b_A <- data.frame(Abpk,Ak, At)
plot_bpk1 <- ggplot(data_b_A, aes(x = At, y = Abpk)) + geom_point()+
labs(title="Peak Fecundity Rate (bpk)_Anthonomus grandis",
x = expression(paste('Temperature (',~degree,'C)',sep='')),
y = "Peak Fecundity Rate (Eggs/female/day)")
plot_k1 <- ggplot(data_b_A, aes(x = At, y = Ak)) + geom_point()+
labs(title="Fecundity Loss Rate (k)_Anthonomus grandis",
x = expression(paste('Temperature (',~degree,'C)',sep='')),
y = "Fecundity Loss Rate (k)")
# save
ggsave("../results/Anthonomus_grandis_b_time15.pdf", plot=A1)
ggsave("../results/Anthonomus_grandis_b_time20.pdf", plot=A2)
ggsave("../results/Anthonomus_grandis_b_time25.pdf", plot=A3)
ggsave("../results/Anthonomus_grandis_b_time30.pdf", plot=A4)
ggsave("../results/Anthonomus_grandis_b_time35.pdf", plot=A5)
ggsave("../results/b_time1.pdf", plot=plot1)
ggsave("../results/bpk_tem1.pdf", plot=plot_bpk1)
ggsave("../results/k_tem1.pdf", plot=plot_k1)
## 2. "Diptera"
Dip <- subset(fecund, fecund$interactor1order =="Diptera")
Dip <- gdata::drop.levels(Dip)
levels(Dip$interactor1)
# 1) "Aedes aegypti"
# get subset of species
aegypti <- subset(Dip, Dip$interactor1 =="Aedes aegypti")
aegypti <- gdata::drop.levels(aegypti)
# check units
levels(aegypti$originaltraitunit) # unit: per day per female
# (?) no time information, so no peak value and k
# 2) "Aedes albopictus"
# get subset of species
albopictus <- subset(Dip, Dip$interactor1 =="Aedes albopictus")
albopictus <- gdata::drop.levels(albopictus)
# check units
levels(albopictus$originaltraitunit) # unit:"eggs per female per cycle"
# (?) no time information, so no peak value and k
# 3) "Aedes camptorhynchus"
# get subset of species
camptorhynchus <- subset(Dip, Dip$interactor1 =="Aedes camptorhynchus")
camptorhynchus <- gdata::drop.levels(camptorhynchus)
# check units
levels(camptorhynchus$originaltraitunit) # unit: eggs
# (?) no time information, so no peak value and k
# 4) "Culex annulirostris"
# get subset of species
annulirostris <- subset(Dip, Dip$interactor1 =="Culex annulirostris")
annulirostris <- gdata::drop.levels(annulirostris)
# check units
levels(Cole$originaltraitunit)
# (?) no time information, so no peak value and k
## 3. "Hemiptera"
Hemi <- subset(fecund, fecund$interactor1order =="Hemiptera")
Hemi <- gdata::drop.levels(Hemi)
levels(Hemi$interactor1)
# 1) "Aphis gossypii"
# get subset of species
gossypii <- subset(Hemi, Hemi$interactor1 =="Aphis gossypii")
gossypii <- gdata::drop.levels(gossypii)
# check units
levels(gossypii$originaltraitunit) # unit: "nymphs/female/day"
# check the temperature range
unique(gossypii$ambienttemp)
# 15.0
Go15.0 <- subset(gossypii, gossypii$ambienttemp == 15.0)
Go15.0 <- gdata::drop.levels(Go15.0)
Go15.0$time <- Go15.0$timestart
Go15.0$stdunit <- "Days"
Go15.0$stdvalue <- Go15.0$originaltraitvalue
Go15.0$stdname <- paste(unique(Go15.0$ambienttemp),"celcius")
Go_p15.0 <- ggplot(Go15.0, aes(x = time, y = stdvalue))
Go_p15.0 <- Go_p15.0 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 15.0 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go15.0 <- max(Go15$stdvalue)
Go15.0sub <- subset(Go15.0, Go15.0$stdvalue == max(Go15.0$stdvalue))
Go15.0sub$timestart
Go15.0sub2 <- subset(Go15.0, Go15.0$timestart >= 21)
Go15.0.lm <- lm(stdvalue ~ time, data = Go15.0sub2)
k_Go15.0 <- coef(Go15.0.lm)[2]
# 17.5
Go17.5 <- subset(gossypii, gossypii$ambienttemp == 17.5)
Go17.5 <- gdata::drop.levels(Go17.5)
Go17.5$time <- Go17.5$timestart
Go17.5$stdunit <- "Days"
Go17.5$stdvalue <- Go17.5$originaltraitvalue
Go17.5$stdname <- paste(unique(Go17.5$ambienttemp),"celcius")
Go_p17.5 <- ggplot(Go17.5, aes(x = time, y = stdvalue))
Go_p17.5 <- Go_p17.5 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 17.5 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go17.5 <- max(Go17.5$stdvalue)
Go17.5sub <- subset(Go17.5, Go17.5$stdvalue == max(Go17.5$stdvalue))
Go17.5sub$timestart
Go17.5sub2 <- subset(Go17.5, Go17.5$timestart >= Go17.5sub$timestart)
Go17.5.lm <- lm(stdvalue ~ time, data = Go17.5sub2)
k_Go17.5 <- coef(Go17.5.lm)[2]
# 20.0
Go20.0 <- subset(gossypii, gossypii$ambienttemp == 20.0)
Go20.0 <- gdata::drop.levels(Go20.0)
Go20.0$time <- Go20.0$timestart
Go20.0$stdunit <- "Days"
Go20.0$stdvalue <- Go20.0$originaltraitvalue
Go20.0$stdname <- paste(unique(Go20.0$ambienttemp),"celcius")
Go_p20.0 <- ggplot(Go20.0, aes(x = time, y = stdvalue))
Go_p20.0 <- Go_p20.0 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 20.0 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go20.0 <- max(Go20.0$stdvalue)
Go20.0sub <- subset(Go20.0, Go20.0$stdvalue == max(Go20.0$stdvalue))
Go20.0sub$timestart
Go20.0sub2 <- subset(Go20.0, Go20.0$timestart >= Go20.0sub$timestart)
Go20.0.lm <- lm(stdvalue ~ time, data = Go20.0sub2)
k_Go20.0 <- coef(Go20.0.lm)[2]
# 22.5
Go22.5 <- subset(gossypii, gossypii$ambienttemp == 22.5)
Go22.5 <- gdata::drop.levels(Go22.5)
Go22.5$time <- Go22.5$timestart
Go22.5$stdunit <- "Days"
Go22.5$stdvalue <- Go22.5$originaltraitvalue
Go22.5$stdname <- paste(unique(Go22.5$ambienttemp),"celcius")
Go_p22.5 <- ggplot(Go22.5, aes(x = time, y = stdvalue))
Go_p22.5 <- Go_p22.5 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 22.5 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go22.5<- max(Go22.5$stdvalue)
Go22.5sub <- subset(Go22.5, Go22.5$stdvalue == max(Go22.5$stdvalue))
Go22.5sub$timestart
Go22.5sub2 <- subset(Go22.5, Go22.5$timestart >= Go22.5sub$timestart)
Go22.5.lm <- lm(stdvalue ~ time, data = Go22.5sub2)
k_Go22.5 <- coef(Go22.5.lm)[2]
# 25.0
Go25.0 <- subset(gossypii, gossypii$ambienttemp == 25.0)
Go25.0 <- gdata::drop.levels(Go25.0)
Go25.0$time <- Go25.0$timestart
Go25.0$stdunit <- "Days"
Go25.0$stdvalue <- Go25.0$originaltraitvalue
Go25.0$stdname <- paste(unique(Go25.0$ambienttemp),"celcius")
Go_p25.0 <- ggplot(Go25.0, aes(x = time, y = stdvalue))
Go_p25.0 <- Go_p25.0 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 25.0 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go25.0 <- max(Go25.0$stdvalue)
Go25.0sub <- subset(Go25.0, Go25.0$stdvalue == max(Go25.0$stdvalue))
Go25.0sub$timestart
Go25.0sub2 <- subset(Go25.0, Go25.0$timestart >= Go25.0sub$timestart)
Go25.0.lm <- lm(stdvalue ~ time, data = Go25.0sub2)
k_Go25.0 <- coef(Go25.0.lm)[2]
# 27.5
Go27.5 <- subset(gossypii, gossypii$ambienttemp == 27.5)
Go27.5 <- gdata::drop.levels(Go27.5)
Go27.5$time <- Go27.5$timestart
Go27.5$stdunit <- "Days"
Go27.5$stdvalue <- Go27.5$originaltraitvalue
Go27.5$stdname <- paste(unique(Go27.5$ambienttemp),"celcius")
Go_p27.5 <- ggplot(Go27.5, aes(x = time, y = stdvalue))
Go_p27.5 <- Go_p27.5 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 27.5 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go27.5<- max(Go27.5$stdvalue)
Go27.5sub <- subset(Go27.5, Go27.5$stdvalue == max(Go27.5$stdvalue))
Go27.5sub$timestart
Go27.5sub2 <- subset(Go27.5, Go27.5$timestart >= Go27.5sub$timestart)
Go27.5.lm <- lm(stdvalue ~ time, data = Go27.5sub2)
k_Go27.5 <- coef(Go27.5.lm)[2]
k_Go27.5
# 30.0
Go30.0 <- subset(gossypii, gossypii$ambienttemp == 30.0)
Go30.0 <- gdata::drop.levels(Go30.0)
Go30.0$time <- Go30.0$timestart
Go30.0$stdunit <- "Days"
Go30.0$stdvalue <- Go30.0$originaltraitvalue
Go30.0$stdname <- paste(unique(Go30.0$ambienttemp),"celcius")
Go_p30.0 <- ggplot(Go30.0, aes(x = time, y = stdvalue))
Go_p30.0 <- Go_p30.0 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 30.0 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go30.0 <- max(Go30.0$stdvalue)
Go30.0sub <- subset(Go30.0, Go30.0$stdvalue == max(Go30.0$stdvalue))
Go30.0sub$timestart
Go30.0sub2 <- subset(Go30.0, Go30.0$timestart >= Go30.0sub$timestart)
Go30.0.lm <- lm(stdvalue ~ time, data = Go30.0sub2)
k_Go30.0 <- coef(Go30.0.lm)[2]
k_Go30.0
# 32.5
Go32.5 <- subset(gossypii, gossypii$ambienttemp == 32.5)
Go32.5 <- gdata::drop.levels(Go32.5)
Go32.5$time <- Go32.5$timestart
Go32.5$stdunit <- "Days"
Go32.5$stdvalue <- Go32.5$originaltraitvalue
Go32.5$stdname <- paste(unique(Go32.5$ambienttemp),"celcius")
Go_p32.5 <- ggplot(Go32.5, aes(x = time, y = stdvalue))
Go_p32.5 <- Go_p32.5 + geom_point() + geom_smooth()+
labs(title=expression(paste('Aphis gossypii 32.5 (',~degree,'C)',sep='')),
x = "Time (days)",
y = "Fecundity (nymphs/female/day)")
bpk_Go32.5 <- max(Go32.5$stdvalue)
Go32.5sub <- subset(Go32.5, Go32.5$stdvalue == max(Go32.5$stdvalue))
Go32.5sub$timestart
Go32.5sub2 <- subset(Go32.5, Go32.5$timestart >= Go32.5sub$timestart)
Go32.5.lm <- lm(stdvalue ~ time, data = Go32.5sub2)
k_Go32.5 <- coef(Go32.5.lm)[2]
k_Go32.5
# combine and plot
bpk_Go <- c(bpk_Go15.0, bpk_Go17.5, bpk_Go20.0, bpk_Go22.5, bpk_Go25.0, bpk_Go27.5, bpk_Go30.0, bpk_Go32.5)
k_Go <- c(k_Go15.0, k_Go17.5, k_Go20.0, k_Go22.5, k_Go25.0, k_Go27.5, k_Go30.0, k_Go32.5)
Temp_Go <- c(15.0, 17.5, 20.0, 22.5, 25.0, 27.5, 30.0, 32.5)
data_b_Go <- data.frame(bpk_Go, k_Go, Temp_Go)
plot_bpk2 <- ggplot(data_b_Go, aes(x = Temp_Go, y = bpk_Go)) + geom_point()+
labs(title="Peak Fecundity Rate (bpk)_Aphis gossypii",
x = expression(paste('Temperature (',~degree,'C)',sep='')),
y = "Peak Fecundity Rate (nymphs/female/day)")
plot_k2 <- ggplot(data_b_Go, aes(x = Temp_Go, y = k_Go)) + geom_point()+
labs(title="Fecundity Loss Rate (k)_Aphis gossypii",
x = expression(paste('Temperature (',~degree,'C)',sep='')),
y = "Fecundity Loss Rate (k)")
time_b_t2 <- rbind(Go15.0, Go17.5, Go20.0, Go22.5, Go25.0, Go27.5, Go30.0, Go32.5)
b_t2 <- ggplot(time_b_t2, aes(x=time, y=stdvalue)) + geom_point() + theme_bw() +
labs(title=expression(paste("Aphis gossypii_","Fecundity", (b), "~Time")),
x = "Time (days)",
y = "Fecundity")
plot2 <- b_t2 + facet_wrap(~ stdname, ncol = 2, scales = "free")+ geom_smooth()
plot2
# save
ggsave("../results/Aphis gossypii_b_time15.0.pdf", plot=Go_p15.0)
ggsave("../results/Aphis gossypii_b_time17.5.pdf", plot=Go_p17.5)
ggsave("../results/Aphis gossypii_b_time20.0.pdf", plot=Go_p20.0)
ggsave("../results/Aphis gossypii_b_time22.5.pdf", plot=Go_p22.5)
ggsave("../results/Aphis gossypii_b_time25.0.pdf", plot=Go_p25.0)
ggsave("../results/Aphis gossypii_b_time27.5.pdf", plot=Go_p27.5)
ggsave("../results/Aphis gossypii_b_time30.0.pdf", plot=Go_p30.0)
ggsave("../results/Aphis gossypii_b_time32.5.pdf", plot=Go_p32.5)
ggsave("../results/b_time2.pdf", plot=plot2)
ggsave("../results/bpk_tem2.pdf", plot=plot_bpk2)
ggsave("../results/k_tem2.pdf", plot=plot_k2)
# 2) "Corythucha ciliata"
# get subset of species
ciliata <- subset(Hemi, Hemi$interactor1 =="Corythucha ciliata")
ciliata <- gdata::drop.levels(ciliata)
# check units
levels(ciliata$originaltraitunit) # unit:eggs per female
unique(ciliata$ambienttemp)
# check time
# (?) no time information, so no peak value and k
# 3) "Tetraneura nigri abdominalis"
# get subset of species
nig <- subset(Hemi, Hemi$interactor1 =="Tetraneura nigri abdominalis")
nig <- gdata::drop.levels(nig)
# check units
levels(nig$originaltraitunit)
unique(nig$ambienttemp)
# check time
# (?) no time information, so no peak value and k
########### optput ############
# 3.2.1.b ~ time
pdf_combine(c("../results/b_time1.pdf","../results/b_time2.pdf" ), output = "../results/3.2.1.Fecundity_time.pdf")
# 3.2.2.bpk ~ temperature
pdf_combine(c("../results/bpk_tem1.pdf","../results/bpk_tem2.pdf" ), output = "../results/3.2.2.Peak_Fecundity_Rate_temperature.pdf")
# 3.2.2. k ~ temperature
pdf_combine(c("../results/k_tem1.pdf","../results/k_tem2.pdf" ), output = "../results/3.2.3.Fecundity_Loss_Rate_temperature.pdf")
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509746e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615765039-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509746e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
library(RChronoModel)
### Name: ImportCSV
### Title: Importing a CSV file containing the output of the MCMC algorithm
### Aliases: ImportCSV
### Keywords: CSV file
### ** Examples
data(Events)
write.csv(Events, "data.csv", row.names=FALSE)
ImportCSV("data.csv")
ImportCSV("data.csv", dec = '.', sep=',', comment.char='#', header = TRUE)
| /data/genthat_extracted_code/RChronoModel/examples/ImportCSV.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 352 | r | library(RChronoModel)
### Name: ImportCSV
### Title: Importing a CSV file containing the output of the MCMC algorithm
### Aliases: ImportCSV
### Keywords: CSV file
### ** Examples
data(Events)
write.csv(Events, "data.csv", row.names=FALSE)
ImportCSV("data.csv")
ImportCSV("data.csv", dec = '.', sep=',', comment.char='#', header = TRUE)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{ncdc_datatypes}
\alias{ncdc_datatypes}
\title{Get possible data types for a particular dataset}
\usage{
ncdc_datatypes(datasetid = NULL, datatypeid = NULL, datacategoryid = NULL,
stationid = NULL, locationid = NULL, startdate = NULL, enddate = NULL,
sortfield = NULL, sortorder = NULL, limit = 25, offset = NULL,
callopts = list(), token = NULL, dataset = NULL, page = NULL,
filter = NULL)
}
\arguments{
\item{datatypeid}{Accepts a valid data type id or a chain of data type ids in a
comma-separated vector. Data returned will contain all of the data type(s) specified
(optional)}
\item{locationid}{Accepts a valid location id or a chain of location ids in a
comma-separated vector. Data returned will contain data for the location(s) specified (optional)}
\item{stationid}{Accepts a valid station id or a chain of of station ids in a
comma-separated vector. Data returned will contain data for the station(s) specified (optional)}
\item{sortfield}{The field to sort results by. Supports id, name, mindate, maxdate, and
datacoverage fields (optional)}
\item{sortorder}{Which order to sort by, asc or desc. Defaults to asc (optional)}
\item{limit}{Defaults to 25, limits the number of results in the response. Maximum is
1000 (optional)}
\item{offset}{Defaults to 0, used to offset the resultlist (optional)}
\item{token}{This must be a valid token token supplied to you by NCDC's Climate
Data Online access token generator. (required) Get an API key (=token) at
\url{http://www.ncdc.noaa.gov/cdo-web/token}. You can pass your token in as
an argument or store it in your .Rprofile file with an entry like
\itemize{
\item options("noaakey" = "your-noaa-token")
}}
\item{callopts}{Further arguments passed on to the API GET call. (optional)}
\item{datasetid}{(optional) Accepts a single valid dataset id. Data returned will be from the
dataset specified, see datasets()}
\item{startdate}{(optional) Accepts valid ISO formated date (yyyy-mm-dd) or date time
(YYYY-MM-DDThh:mm:ss). Data returned will have data after the specified date. The
date range must be less than 1 year.}
\item{enddate}{(optional) Accepts valid ISO formated date (yyyy-mm-dd) or date time
(YYYY-MM-DDThh:mm:ss). Data returned will have data before the specified date. The
date range must be less than 1 year.}
\item{dataset}{THIS IS A DEPRECATED ARGUMENT. See datasetid.}
\item{page}{THIS IS A DEPRECATED ARGUMENT. There is no equivalent argument in v2
of the NOAA API.}
\item{filter}{THIS IS A DEPRECATED ARGUMENT. There is no equivalent argument in v2
of the NOAA API.}
\item{datacategoryid}{Optional. Accepts a valid data category id or a chain of data
category ids seperated by ampersands (although it is rare to have a data type
with more than one data category). Data types returned will be associated with
the data category(ies) specified}
}
\value{
A \code{data.frame} for all datasets, or a list of length two, each with
a data.frame.
}
\description{
From the NOAA API docs: Describes the type of data, acts as a label. If it's 64
degrees out right now, then the data type is Air Temperature and the data is 64.
}
\examples{
\dontrun{
# Fetch available data types
ncdc_datatypes()
# Fetch more information about the ACMH data type id
ncdc_datatypes(datatypeid="ACMH")
# Fetch data types with the air temperature data category
ncdc_datatypes(datacategoryid="TEMP", limit=56)
# Fetch data types that support a given set of stations
ncdc_datatypes(stationid=c('COOP:310090','COOP:310184','COOP:310212'))
}
}
| /man/ncdc_datatypes.Rd | permissive | khemanta/rnoaa | R | false | false | 3,577 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{ncdc_datatypes}
\alias{ncdc_datatypes}
\title{Get possible data types for a particular dataset}
\usage{
ncdc_datatypes(datasetid = NULL, datatypeid = NULL, datacategoryid = NULL,
stationid = NULL, locationid = NULL, startdate = NULL, enddate = NULL,
sortfield = NULL, sortorder = NULL, limit = 25, offset = NULL,
callopts = list(), token = NULL, dataset = NULL, page = NULL,
filter = NULL)
}
\arguments{
\item{datatypeid}{Accepts a valid data type id or a chain of data type ids in a
comma-separated vector. Data returned will contain all of the data type(s) specified
(optional)}
\item{locationid}{Accepts a valid location id or a chain of location ids in a
comma-separated vector. Data returned will contain data for the location(s) specified (optional)}
\item{stationid}{Accepts a valid station id or a chain of of station ids in a
comma-separated vector. Data returned will contain data for the station(s) specified (optional)}
\item{sortfield}{The field to sort results by. Supports id, name, mindate, maxdate, and
datacoverage fields (optional)}
\item{sortorder}{Which order to sort by, asc or desc. Defaults to asc (optional)}
\item{limit}{Defaults to 25, limits the number of results in the response. Maximum is
1000 (optional)}
\item{offset}{Defaults to 0, used to offset the resultlist (optional)}
\item{token}{This must be a valid token token supplied to you by NCDC's Climate
Data Online access token generator. (required) Get an API key (=token) at
\url{http://www.ncdc.noaa.gov/cdo-web/token}. You can pass your token in as
an argument or store it in your .Rprofile file with an entry like
\itemize{
\item options("noaakey" = "your-noaa-token")
}}
\item{callopts}{Further arguments passed on to the API GET call. (optional)}
\item{datasetid}{(optional) Accepts a single valid dataset id. Data returned will be from the
dataset specified, see datasets()}
\item{startdate}{(optional) Accepts valid ISO formated date (yyyy-mm-dd) or date time
(YYYY-MM-DDThh:mm:ss). Data returned will have data after the specified date. The
date range must be less than 1 year.}
\item{enddate}{(optional) Accepts valid ISO formated date (yyyy-mm-dd) or date time
(YYYY-MM-DDThh:mm:ss). Data returned will have data before the specified date. The
date range must be less than 1 year.}
\item{dataset}{THIS IS A DEPRECATED ARGUMENT. See datasetid.}
\item{page}{THIS IS A DEPRECATED ARGUMENT. There is no equivalent argument in v2
of the NOAA API.}
\item{filter}{THIS IS A DEPRECATED ARGUMENT. There is no equivalent argument in v2
of the NOAA API.}
\item{datacategoryid}{Optional. Accepts a valid data category id or a chain of data
category ids seperated by ampersands (although it is rare to have a data type
with more than one data category). Data types returned will be associated with
the data category(ies) specified}
}
\value{
A \code{data.frame} for all datasets, or a list of length two, each with
a data.frame.
}
\description{
From the NOAA API docs: Describes the type of data, acts as a label. If it's 64
degrees out right now, then the data type is Air Temperature and the data is 64.
}
\examples{
\dontrun{
# Fetch available data types
ncdc_datatypes()
# Fetch more information about the ACMH data type id
ncdc_datatypes(datatypeid="ACMH")
# Fetch data types with the air temperature data category
ncdc_datatypes(datacategoryid="TEMP", limit=56)
# Fetch data types that support a given set of stations
ncdc_datatypes(stationid=c('COOP:310090','COOP:310184','COOP:310212'))
}
}
|
context("test-tnorm")
test_that("rtnorm",
{
n <- 100000
x <- rtnorm(n, 0, 1, -0.5, 0.5)
expect_true(all(x >= -0.5))
expect_true(all(x <= 0.5))
expect_error(rtnorm(n, 0, -1), silent = TRUE)
})
test_that("dtnorm",
{
x <- seq(0, 3, by = 0.5)
y <- dtnorm(x, 1, 2, 0.5, 2)
expect_equal(y[c(1,6,7)], rep(0,3))
expect_equal(y[2:5], dnorm(x[2:5], 1, 2) /
(pnorm(2,1,2) - pnorm(0.5,1,2)))
expect_error(dtnorm(x, 1, -1), silent = TRUE)
expect_equal(dtnorm(7), dnorm(7))
})
test_that("ptnorm",
{
x <- seq(0, 3, by = 0.5)
y <- ptnorm(x, 1, 2, 0.5, 2)
expect_equal(y[1], 0)
expect_equal(y[6:7], rep(1,2))
z <- ptnorm(rtnorm(10, 1, 2, 0.5, 2), 1, 2, 0.5, 2)
expect_true(all( z >= 0 & z <= 1))
p <- seq(0,1,by = 0.1)
expect_equal(ptnorm(qtnorm(p, 1, 2, 0.5, 2), 1, 2, 0.5, 2), p)
expect_error(ptnorm(x, 1, -1), silent = TRUE)
expect_equal(ptnorm(7), pnorm(7))
})
test_that("qtnorm",
{
p <- seq(0, 1, by = 0.1)
x <- qtnorm(p, 1, 2, 0.5, 2)
expect_equal(x[c(1,11)], c(0.5,2))
x <- seq(0.5,2.0, by = 0.1)
expect_equal(qtnorm(ptnorm(x, 1, 2, 0.5, 2), 1, 2, 0.5, 2), x)
expect_error(qtnorm(x, 1, -1), silent = TRUE)
expect_equal(qtnorm(0.5), qnorm(0.5))
})
| /tests/testthat/test-tnorm.r | no_license | bertcarnell/truncateddist | R | false | false | 1,224 | r | context("test-tnorm")
test_that("rtnorm",
{
n <- 100000
x <- rtnorm(n, 0, 1, -0.5, 0.5)
expect_true(all(x >= -0.5))
expect_true(all(x <= 0.5))
expect_error(rtnorm(n, 0, -1), silent = TRUE)
})
test_that("dtnorm",
{
x <- seq(0, 3, by = 0.5)
y <- dtnorm(x, 1, 2, 0.5, 2)
expect_equal(y[c(1,6,7)], rep(0,3))
expect_equal(y[2:5], dnorm(x[2:5], 1, 2) /
(pnorm(2,1,2) - pnorm(0.5,1,2)))
expect_error(dtnorm(x, 1, -1), silent = TRUE)
expect_equal(dtnorm(7), dnorm(7))
})
test_that("ptnorm",
{
x <- seq(0, 3, by = 0.5)
y <- ptnorm(x, 1, 2, 0.5, 2)
expect_equal(y[1], 0)
expect_equal(y[6:7], rep(1,2))
z <- ptnorm(rtnorm(10, 1, 2, 0.5, 2), 1, 2, 0.5, 2)
expect_true(all( z >= 0 & z <= 1))
p <- seq(0,1,by = 0.1)
expect_equal(ptnorm(qtnorm(p, 1, 2, 0.5, 2), 1, 2, 0.5, 2), p)
expect_error(ptnorm(x, 1, -1), silent = TRUE)
expect_equal(ptnorm(7), pnorm(7))
})
test_that("qtnorm",
{
p <- seq(0, 1, by = 0.1)
x <- qtnorm(p, 1, 2, 0.5, 2)
expect_equal(x[c(1,11)], c(0.5,2))
x <- seq(0.5,2.0, by = 0.1)
expect_equal(qtnorm(ptnorm(x, 1, 2, 0.5, 2), 1, 2, 0.5, 2), x)
expect_error(qtnorm(x, 1, -1), silent = TRUE)
expect_equal(qtnorm(0.5), qnorm(0.5))
})
|
load('NormGeneData.Rda')
library('CoGAPS')
normLumiDat.Gene.SD[is.na(normLumiDat.Gene.SD)] <-
0.1*normLumiDat.Gene[is.na(normLumiDat.Gene.SD)]
normLumiDat.Gene.SD <- pmax(normLumiDat.Gene.SD,
0.1*normLumiDat.Gene)
nP8 <- gapsRun(D=normLumiDat.Gene,S = normLumiDat.Gene.SD,
nFactor="8", nEquil="50000", nSample="50000")
save(list=ls(), file='CoGAPS.nP8')
| /FertigAnalysis/CoGAPS_8.R | no_license | FertigLab/CetuxADCC | R | false | false | 397 | r | load('NormGeneData.Rda')
library('CoGAPS')
normLumiDat.Gene.SD[is.na(normLumiDat.Gene.SD)] <-
0.1*normLumiDat.Gene[is.na(normLumiDat.Gene.SD)]
normLumiDat.Gene.SD <- pmax(normLumiDat.Gene.SD,
0.1*normLumiDat.Gene)
nP8 <- gapsRun(D=normLumiDat.Gene,S = normLumiDat.Gene.SD,
nFactor="8", nEquil="50000", nSample="50000")
save(list=ls(), file='CoGAPS.nP8')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ipv.R
\name{mggplot}
\alias{mggplot}
\title{mggplot}
\usage{
mggplot(
x,
columnas = names(x),
tipo = c("panel", "multiple"),
leyenda = c("bottom", "right")
)
}
\arguments{
\item{x}{Lista de índices tal como los proporciona \code{ConsInd.} Puede ser una dataframe (caso especial de lista).}
\item{columnas}{Nombres de los índices a representar; por defecto, los nombres de la lista \code{x} (o columnas de la dataframe \code{x}.)}
\item{tipo}{Tipo de plot que se desea: "panel" si se desea cada serie en un panel propio, o "multiple", si se desean todas las series en un único panel.}
\item{leyenda}{Lugar donde situar la leyenda; por defecto, "bottom", pero puede especificarfse "right".}
}
\value{
Llamada por su efecto secundario, consistente en generar los gráficos.
}
\description{
Representar múltiples índices (típicamente devueltos por la función \code{IndZonas}) en sendos paneles o superpuestos en un único panel.
}
\examples{
}
| /man/mggplot.Rd | no_license | FernandoTusell/ipv | R | false | true | 1,038 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ipv.R
\name{mggplot}
\alias{mggplot}
\title{mggplot}
\usage{
mggplot(
x,
columnas = names(x),
tipo = c("panel", "multiple"),
leyenda = c("bottom", "right")
)
}
\arguments{
\item{x}{Lista de índices tal como los proporciona \code{ConsInd.} Puede ser una dataframe (caso especial de lista).}
\item{columnas}{Nombres de los índices a representar; por defecto, los nombres de la lista \code{x} (o columnas de la dataframe \code{x}.)}
\item{tipo}{Tipo de plot que se desea: "panel" si se desea cada serie en un panel propio, o "multiple", si se desean todas las series en un único panel.}
\item{leyenda}{Lugar donde situar la leyenda; por defecto, "bottom", pero puede especificarfse "right".}
}
\value{
Llamada por su efecto secundario, consistente en generar los gráficos.
}
\description{
Representar múltiples índices (típicamente devueltos por la función \code{IndZonas}) en sendos paneles o superpuestos en un único panel.
}
\examples{
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stab.fs.ranking.R
\name{stab.fs.ranking}
\alias{stab.fs.ranking}
\title{Function to quantify stability of feature ranking}
\usage{
stab.fs.ranking(fsets, sizes, N, method = c("kuncheva", "davis"), ...)
}
\arguments{
\item{fsets}{list or matrix of sets of selected features (in rows),
each ranking must have the same size.}
\item{sizes}{Number of top-ranked features for which the stability
index must be computed.}
\item{N}{total number of features on which feature selection is performed}
\item{method}{stability index (see details section).}
\item{...}{additional parameters passed to stability index (penalty
that is a numeric for Davis' stability index, see details section).}
}
\value{
A vector of numeric that are stability indices for each size of the sets
of selected features given the rankings.
}
\description{
This function computes several indexes to quantify feature ranking
stability for several number of selected features. This is usually
estimated through perturbation of the original dataset by generating
multiple sets of selected features.
}
\details{
Stability indices may use different parameters. In this version only the
Davis index requires an additional parameter that is penalty, a numeric
value used as penalty term.
Kuncheva index (kuncheva) lays in [-1, 1], An index of -1 means no
intersection between sets of selected features, +1 means that all the
same features are always selected and 0 is the expected stability of a
random feature selection.
Davis index (davis) lays in [0,1], With a penalty term equal to 0, an index
of 0 means no intersection between sets of selected features and +1 means
that all the same features are always selected. A penalty of 1 is usually
used so that a feature selection performed with no or all features has a
Davis stability index equals to 0. None estimate of the expected Davis
stability index of a random feature selection was published.
}
\examples{
# 100 random selection of 50 features from a set of 10,000 features
fsets <- lapply(as.list(1:100), function(x, size=50, N=10000) {
return(sample(1:N, size, replace=FALSE))} )
names(fsets) <- paste("fsel", 1:length(fsets), sep=".")
# Kuncheva index
stab.fs.ranking(fsets=fsets, sizes=c(1, 10, 20, 30, 40, 50),
N=10000, method="kuncheva")
# close to 0 as expected for a random feature selection
# Davis index
stab.fs.ranking(fsets=fsets, sizes=c(1, 10, 20, 30, 40, 50),
N=10000, method="davis", penalty=1)
}
\references{
Davis CA, Gerick F, Hintermair V, Friedel CC, Fundel K, Kuffner R,
Zimmer R (2006) "Reliable gene signatures for microarray classification:
assessment of stability and performance", Bioinformatics, 22(19):356-2363.
Kuncheva LI (2007) "A stability index for feature selection", AIAP'07:
Proceedings of the 25th conference on Proceedings of the 25th IASTED
International Multi-Conference, pages 390-395.
}
\seealso{
\link{stab.fs}
}
| /man/stab.fs.ranking.Rd | no_license | bhklab/genefu | R | false | true | 2,963 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stab.fs.ranking.R
\name{stab.fs.ranking}
\alias{stab.fs.ranking}
\title{Function to quantify stability of feature ranking}
\usage{
stab.fs.ranking(fsets, sizes, N, method = c("kuncheva", "davis"), ...)
}
\arguments{
\item{fsets}{list or matrix of sets of selected features (in rows),
each ranking must have the same size.}
\item{sizes}{Number of top-ranked features for which the stability
index must be computed.}
\item{N}{total number of features on which feature selection is performed}
\item{method}{stability index (see details section).}
\item{...}{additional parameters passed to stability index (penalty
that is a numeric for Davis' stability index, see details section).}
}
\value{
A vector of numeric that are stability indices for each size of the sets
of selected features given the rankings.
}
\description{
This function computes several indexes to quantify feature ranking
stability for several number of selected features. This is usually
estimated through perturbation of the original dataset by generating
multiple sets of selected features.
}
\details{
Stability indices may use different parameters. In this version only the
Davis index requires an additional parameter that is penalty, a numeric
value used as penalty term.
Kuncheva index (kuncheva) lays in [-1, 1], An index of -1 means no
intersection between sets of selected features, +1 means that all the
same features are always selected and 0 is the expected stability of a
random feature selection.
Davis index (davis) lays in [0,1], With a penalty term equal to 0, an index
of 0 means no intersection between sets of selected features and +1 means
that all the same features are always selected. A penalty of 1 is usually
used so that a feature selection performed with no or all features has a
Davis stability index equals to 0. None estimate of the expected Davis
stability index of a random feature selection was published.
}
\examples{
# 100 random selection of 50 features from a set of 10,000 features
fsets <- lapply(as.list(1:100), function(x, size=50, N=10000) {
return(sample(1:N, size, replace=FALSE))} )
names(fsets) <- paste("fsel", 1:length(fsets), sep=".")
# Kuncheva index
stab.fs.ranking(fsets=fsets, sizes=c(1, 10, 20, 30, 40, 50),
N=10000, method="kuncheva")
# close to 0 as expected for a random feature selection
# Davis index
stab.fs.ranking(fsets=fsets, sizes=c(1, 10, 20, 30, 40, 50),
N=10000, method="davis", penalty=1)
}
\references{
Davis CA, Gerick F, Hintermair V, Friedel CC, Fundel K, Kuffner R,
Zimmer R (2006) "Reliable gene signatures for microarray classification:
assessment of stability and performance", Bioinformatics, 22(19):356-2363.
Kuncheva LI (2007) "A stability index for feature selection", AIAP'07:
Proceedings of the 25th conference on Proceedings of the 25th IASTED
International Multi-Conference, pages 390-395.
}
\seealso{
\link{stab.fs}
}
|
## Functions that can be used to calculate of matrix invwersion.
## Function "makeCacheMatrix" creates object, where is stored matrix and their inverse.
## Function "cacheSolve" checks if matrix inversion exists in the cache, if no the inverse matrix is created and stored in cache for feature use.
## Creates object which contains matrix and their inversion (if is calculated).
## Function works like a cache.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
## Sets the metrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Gets the matrix
get <- function() {
m
}
## Sets inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Gets inverse of the matrix
getInverse <- function() {
i
}
## List of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function checks if object "x" contains inversion of the matrix,
## If inversion of matrix exists in cache, cached object is returned.
## If inversion of materix doesn't exists, inversion of matrix is calculated and stored in cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Checks if inverse exists, if yes returns inverse from cache
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Gets the matrix form object
data <- x$get()
## Calculate the inverse of matrix
m <- solve(data) %*% data
## Sets the inverse to the object (cache)
x$setInverse(m)
## Returns the matrix
m
}
| /cachematrix.R | no_license | vkorecky/ProgrammingAssignment2 | R | false | false | 1,733 | r | ## Functions that can be used to calculate of matrix invwersion.
## Function "makeCacheMatrix" creates object, where is stored matrix and their inverse.
## Function "cacheSolve" checks if matrix inversion exists in the cache, if no the inverse matrix is created and stored in cache for feature use.
## Creates object which contains matrix and their inversion (if is calculated).
## Function works like a cache.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
## Sets the metrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Gets the matrix
get <- function() {
m
}
## Sets inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Gets inverse of the matrix
getInverse <- function() {
i
}
## List of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function checks if object "x" contains inversion of the matrix,
## If inversion of matrix exists in cache, cached object is returned.
## If inversion of materix doesn't exists, inversion of matrix is calculated and stored in cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Checks if inverse exists, if yes returns inverse from cache
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Gets the matrix form object
data <- x$get()
## Calculate the inverse of matrix
m <- solve(data) %*% data
## Sets the inverse to the object (cache)
x$setInverse(m)
## Returns the matrix
m
}
|
library(dplyr)
library(tidyr)
M2 <- 0.066
T_n <- 200
C_n <- 150
T_event <- 74
C_event <- 62
phat.T <- T_event/T_n
phat.C <- C_event/C_n
phat.d <- phat.T - phat.C
alpha <- 0.025
# 95% Wald CI
phat.d - qnorm(1-alpha)*sqrt(phat.T*(1-phat.T)/T_n + phat.C*(1-phat.C)/C_n)
phat.d + qnorm(1-alpha)*sqrt(phat.T*(1-phat.T)/T_n + phat.C*(1-phat.C)/C_n)
# 95% FM CI
theta <- C_n/T_n
a <- 1 + theta
b <- -1*(1 + theta + phat.T + theta*phat.C + M2*(theta + 2))
c <- M2^2 + M2*(2*phat.T + theta + 1) + phat.T + theta*phat.C
d <- -phat.C*M2*(1 + M2)
v <- b^3/(27*a^3) - b*c/(6*a^2) + d/(2*a)
u <- sign(v)*(b^2/(9*a^2) - c/(3*a))^0.5
w <- 1/3*(pi + acos(v/u^3))
phat.T.rmle <- 2*u*cos(w) - b/(3*a)
phat.C.rmle <- phat.T.rmle + d
phat.d - qnorm(1-alpha)*sqrt(phat.T.rmle*(1-phat.T.rmle)/T_n + phat.C.rmle*(1-phat.C.rmle)/C_n)
phat.d + qnorm(1-alpha)*sqrt(phat.T.rmle*(1-phat.T.rmle)/T_n + phat.C.rmle*(1-phat.C.rmle)/C_n)
# 95% Newcombe-Wilson CI
z <-qnorm(1-alpha)
l.C <- (phat.C + z^2/(2*C_n) -
z*sqrt((phat.C*(1 - phat.C) + z^2/(4*C_n))/C_n))/(1 + z^2/C_n)
u.C <- (phat.C + z^2/(2*C_n) +
z*sqrt((phat.C*(1 - phat.C) + z^2/(4*C_n))/C_n))/(1 + z^2/C_n)
l.T <- (phat.T + z^2/(2*T_n) -
z*sqrt((phat.T*(1 - phat.T) + z^2/(4*T_n))/T_n))/(1 + z^2/T_n)
u.T <- (phat.T + z^2/(2*T_n) +
z*sqrt((phat.T*(1 - phat.T) + z^2/(4*T_n))/T_n))/(1 + z^2/T_n)
phat.d - sqrt((phat.T - l.T)^2 + (u.C - phat.C)^2)
phat.d + sqrt((u.T - phat.T)^2 + (phat.C - l.C)^2)
| /95CI_example.R | no_license | yuliasidi/Review | R | false | false | 1,480 | r | library(dplyr)
library(tidyr)
M2 <- 0.066
T_n <- 200
C_n <- 150
T_event <- 74
C_event <- 62
phat.T <- T_event/T_n
phat.C <- C_event/C_n
phat.d <- phat.T - phat.C
alpha <- 0.025
# 95% Wald CI
phat.d - qnorm(1-alpha)*sqrt(phat.T*(1-phat.T)/T_n + phat.C*(1-phat.C)/C_n)
phat.d + qnorm(1-alpha)*sqrt(phat.T*(1-phat.T)/T_n + phat.C*(1-phat.C)/C_n)
# 95% FM CI
theta <- C_n/T_n
a <- 1 + theta
b <- -1*(1 + theta + phat.T + theta*phat.C + M2*(theta + 2))
c <- M2^2 + M2*(2*phat.T + theta + 1) + phat.T + theta*phat.C
d <- -phat.C*M2*(1 + M2)
v <- b^3/(27*a^3) - b*c/(6*a^2) + d/(2*a)
u <- sign(v)*(b^2/(9*a^2) - c/(3*a))^0.5
w <- 1/3*(pi + acos(v/u^3))
phat.T.rmle <- 2*u*cos(w) - b/(3*a)
phat.C.rmle <- phat.T.rmle + d
phat.d - qnorm(1-alpha)*sqrt(phat.T.rmle*(1-phat.T.rmle)/T_n + phat.C.rmle*(1-phat.C.rmle)/C_n)
phat.d + qnorm(1-alpha)*sqrt(phat.T.rmle*(1-phat.T.rmle)/T_n + phat.C.rmle*(1-phat.C.rmle)/C_n)
# 95% Newcombe-Wilson CI
z <-qnorm(1-alpha)
l.C <- (phat.C + z^2/(2*C_n) -
z*sqrt((phat.C*(1 - phat.C) + z^2/(4*C_n))/C_n))/(1 + z^2/C_n)
u.C <- (phat.C + z^2/(2*C_n) +
z*sqrt((phat.C*(1 - phat.C) + z^2/(4*C_n))/C_n))/(1 + z^2/C_n)
l.T <- (phat.T + z^2/(2*T_n) -
z*sqrt((phat.T*(1 - phat.T) + z^2/(4*T_n))/T_n))/(1 + z^2/T_n)
u.T <- (phat.T + z^2/(2*T_n) +
z*sqrt((phat.T*(1 - phat.T) + z^2/(4*T_n))/T_n))/(1 + z^2/T_n)
phat.d - sqrt((phat.T - l.T)^2 + (u.C - phat.C)^2)
phat.d + sqrt((u.T - phat.T)^2 + (phat.C - l.C)^2)
|
##Testing data visualization using five R packages: Leaflet, Dygraphs, networkD3, DataTables, and threejs.
install.packages("magrittr")
library(magrittr)
##Leaflet
install.packages("leaflet")
library(leaflet)
##Creates a leaflet object named map
map <- leaflet()
##Adds map tiles
map <- addTiles(map)
##Adds map markers at specific locations
map <- addMarkers(map, lat = 42.128, lng = -80.087,popup = "The location of Gannon University")
##Print map
map
##Dygraphs
install.packages("dygraphs")
library(dygraphs)
dygraph(AirPassengers, main = "Monthly Airline Passenger Numbers 1949-1960") %>%
dyAxis("x", label = "Year", pixelsPerLabel = 40) %>%
dyAxis("y", label = "# of Passengers") %>%
dySeries("V1", label = "Passengers", color = "red") %>%
dyOptions(drawPoints = TRUE, includeZero = TRUE)
##networkD3
install.packages("networkD3")
library(networkD3)
##Using built in R dataset USArrests
National <- hclust(dist(USArrests), "ave")
diagonalNetwork(as.radialNetwork(National), nodeColour = "#393",
height = 700, width = 800, linkColour = "2B5DE5")
##DataTables
install.packages("DT")
library(DT)
##Using built in R dataset Orange
datatable(Orange, rownames = FALSE, options = list(pageLength = 4))
##rthreejs
install.packages("threejs")
library(threejs)
##Using built in threejs dataset LeMis
graphjs(LeMis, vertex.size = .5, vertex.shape = "sphere", bg = "black")
| /RWidgetCode.R | no_license | zacherl008/R_Scripts | R | false | false | 1,402 | r | ##Testing data visualization using five R packages: Leaflet, Dygraphs, networkD3, DataTables, and threejs.
install.packages("magrittr")
library(magrittr)
##Leaflet
install.packages("leaflet")
library(leaflet)
##Creates a leaflet object named map
map <- leaflet()
##Adds map tiles
map <- addTiles(map)
##Adds map markers at specific locations
map <- addMarkers(map, lat = 42.128, lng = -80.087,popup = "The location of Gannon University")
##Print map
map
##Dygraphs
install.packages("dygraphs")
library(dygraphs)
dygraph(AirPassengers, main = "Monthly Airline Passenger Numbers 1949-1960") %>%
dyAxis("x", label = "Year", pixelsPerLabel = 40) %>%
dyAxis("y", label = "# of Passengers") %>%
dySeries("V1", label = "Passengers", color = "red") %>%
dyOptions(drawPoints = TRUE, includeZero = TRUE)
##networkD3
install.packages("networkD3")
library(networkD3)
##Using built in R dataset USArrests
National <- hclust(dist(USArrests), "ave")
diagonalNetwork(as.radialNetwork(National), nodeColour = "#393",
height = 700, width = 800, linkColour = "2B5DE5")
##DataTables
install.packages("DT")
library(DT)
##Using built in R dataset Orange
datatable(Orange, rownames = FALSE, options = list(pageLength = 4))
##rthreejs
install.packages("threejs")
library(threejs)
##Using built in threejs dataset LeMis
graphjs(LeMis, vertex.size = .5, vertex.shape = "sphere", bg = "black")
|
library(miscTools)
### Name: vecli
### Title: Vector of linear independent values
### Aliases: vecli
### Keywords: array
### ** Examples
# a symmetric n x n matrix
m <- cbind(c(11,12,13),c(12,22,23),c(13,23,33))
vecli(m) # returns: 11 12 13 22 23 33
| /data/genthat_extracted_code/miscTools/examples/vecli.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 267 | r | library(miscTools)
### Name: vecli
### Title: Vector of linear independent values
### Aliases: vecli
### Keywords: array
### ** Examples
# a symmetric n x n matrix
m <- cbind(c(11,12,13),c(12,22,23),c(13,23,33))
vecli(m) # returns: 11 12 13 22 23 33
|
#' Generate pkgdown data structure
#'
#' You will generally not need to use this unless you need a custom site
#' design and you're writing your own equivalent of [build_site()].
#'
#' @param path Path to package
#' @export
as_pkgdown <- function(path = ".") {
if (is_pkgdown(path)) {
return(path)
}
if (!dir_exists(path)) {
stop("`path` is not an existing directory", call. = FALSE)
}
desc <- read_desc(path)
package <- desc$get("Package")[[1]]
topics <- package_topics(path, package)
meta <- read_meta(path)
structure(
list(
package = package,
src_path = path_abs(path),
dst_path = path_abs(meta$destination %||% path(path, "docs")),
desc = desc,
meta = meta,
topics = topics,
vignettes = package_vignettes(path),
topic_index = topic_index_local(package, path),
article_index = article_index_local(package, path)
),
class = "pkgdown"
)
}
is_pkgdown <- function(x) inherits(x, "pkgdown")
str_person <- function(pers) {
s <- paste0(c(pers$given, pers$family), collapse = ' ')
if (length(pers$email)) {
s <- paste0("<a href='mailto:", pers$email, "'>", s, "</a>")
}
if (length(pers$role)) {
s <- paste0(s, " [", paste0(pers$role, collapse = ", "), "]")
}
s
}
read_desc <- function(path = ".") {
path <- path(path, "DESCRIPTION")
if (!file_exists(path)) {
stop("Can't find DESCRIPTION", call. = FALSE)
}
desc::description$new(path)
}
# Metadata ----------------------------------------------------------------
read_meta <- function(path) {
path <- find_first_existing(
path,
c("_pkgdown.yml", "pkgdown/_pkgdown.yml", "_pkgdown.yaml")
)
if (is.null(path)) {
yaml <- list()
} else {
yaml <- yaml::yaml.load_file(path)
}
yaml
}
# Topics ------------------------------------------------------------------
package_topics <- function(path = ".", package = "") {
rd <- package_rd(path)
# In case there are links in titles
scoped_package_context(package)
scoped_file_context()
aliases <- purrr::map(rd, extract_tag, "tag_alias")
names <- purrr::map_chr(rd, extract_tag, "tag_name")
titles <- purrr::map_chr(rd, extract_title)
concepts <- purrr::map(rd, extract_tag, "tag_concept")
internal <- purrr::map_lgl(rd, is_internal)
file_in <- names(rd)
file_out <- gsub("\\.Rd$", ".html", file_in)
usage <- purrr::map(rd, topic_usage)
funs <- purrr::map(usage, usage_funs)
tibble::tibble(
name = names,
file_in = file_in,
file_out = file_out,
alias = aliases,
usage = usage,
funs = funs,
title = titles,
rd = rd,
concepts = concepts,
internal = internal
)
}
package_rd <- function(path = ".") {
man_path <- path(path, "man")
if (!dir_exists(man_path)) {
return(set_names(list(), character()))
}
rd <- dir_ls(man_path, pattern = "\\.Rd$", type = "file")
names(rd) <- path_file(rd)
lapply(rd, rd_file, pkg_path = path)
}
extract_tag <- function(x, tag) {
x %>%
purrr::keep(inherits, tag) %>%
purrr::map_chr(c(1, 1))
}
extract_title <- function(x) {
x %>%
purrr::detect(inherits, "tag_title") %>%
flatten_text(auto_link = FALSE) %>%
trimws()
}
is_internal <- function(x) {
any(extract_tag(x, "tag_keyword") %in% "internal")
}
# Vignettes ---------------------------------------------------------------
package_vignettes <- function(path = ".") {
vig_path <- dir(
path(path, "vignettes"),
pattern = "\\.[rR]md$",
recursive = TRUE
)
vig_path <- vig_path[!grepl("^_", basename(vig_path))]
title <- path(path, "vignettes", vig_path) %>%
purrr::map(rmarkdown::yaml_front_matter) %>%
purrr::map_chr("title", .null = "UNKNOWN TITLE")
tibble::tibble(
file_in = vig_path,
file_out = gsub("\\.[rR]md$", "\\.html", vig_path),
name = tools::file_path_sans_ext(basename(vig_path)),
path = dirname(vig_path),
vig_depth = dir_depth(vig_path),
title = title
)
}
dir_depth <- function(x) {
x %>%
strsplit("") %>%
purrr::map_int(function(x) sum(x == "/"))
}
| /R/package.r | permissive | DataStrategist/pkgdown | R | false | false | 4,086 | r | #' Generate pkgdown data structure
#'
#' You will generally not need to use this unless you need a custom site
#' design and you're writing your own equivalent of [build_site()].
#'
#' @param path Path to package
#' @export
as_pkgdown <- function(path = ".") {
if (is_pkgdown(path)) {
return(path)
}
if (!dir_exists(path)) {
stop("`path` is not an existing directory", call. = FALSE)
}
desc <- read_desc(path)
package <- desc$get("Package")[[1]]
topics <- package_topics(path, package)
meta <- read_meta(path)
structure(
list(
package = package,
src_path = path_abs(path),
dst_path = path_abs(meta$destination %||% path(path, "docs")),
desc = desc,
meta = meta,
topics = topics,
vignettes = package_vignettes(path),
topic_index = topic_index_local(package, path),
article_index = article_index_local(package, path)
),
class = "pkgdown"
)
}
is_pkgdown <- function(x) inherits(x, "pkgdown")
str_person <- function(pers) {
s <- paste0(c(pers$given, pers$family), collapse = ' ')
if (length(pers$email)) {
s <- paste0("<a href='mailto:", pers$email, "'>", s, "</a>")
}
if (length(pers$role)) {
s <- paste0(s, " [", paste0(pers$role, collapse = ", "), "]")
}
s
}
read_desc <- function(path = ".") {
path <- path(path, "DESCRIPTION")
if (!file_exists(path)) {
stop("Can't find DESCRIPTION", call. = FALSE)
}
desc::description$new(path)
}
# Metadata ----------------------------------------------------------------
read_meta <- function(path) {
path <- find_first_existing(
path,
c("_pkgdown.yml", "pkgdown/_pkgdown.yml", "_pkgdown.yaml")
)
if (is.null(path)) {
yaml <- list()
} else {
yaml <- yaml::yaml.load_file(path)
}
yaml
}
# Topics ------------------------------------------------------------------
package_topics <- function(path = ".", package = "") {
rd <- package_rd(path)
# In case there are links in titles
scoped_package_context(package)
scoped_file_context()
aliases <- purrr::map(rd, extract_tag, "tag_alias")
names <- purrr::map_chr(rd, extract_tag, "tag_name")
titles <- purrr::map_chr(rd, extract_title)
concepts <- purrr::map(rd, extract_tag, "tag_concept")
internal <- purrr::map_lgl(rd, is_internal)
file_in <- names(rd)
file_out <- gsub("\\.Rd$", ".html", file_in)
usage <- purrr::map(rd, topic_usage)
funs <- purrr::map(usage, usage_funs)
tibble::tibble(
name = names,
file_in = file_in,
file_out = file_out,
alias = aliases,
usage = usage,
funs = funs,
title = titles,
rd = rd,
concepts = concepts,
internal = internal
)
}
package_rd <- function(path = ".") {
man_path <- path(path, "man")
if (!dir_exists(man_path)) {
return(set_names(list(), character()))
}
rd <- dir_ls(man_path, pattern = "\\.Rd$", type = "file")
names(rd) <- path_file(rd)
lapply(rd, rd_file, pkg_path = path)
}
extract_tag <- function(x, tag) {
x %>%
purrr::keep(inherits, tag) %>%
purrr::map_chr(c(1, 1))
}
extract_title <- function(x) {
x %>%
purrr::detect(inherits, "tag_title") %>%
flatten_text(auto_link = FALSE) %>%
trimws()
}
is_internal <- function(x) {
any(extract_tag(x, "tag_keyword") %in% "internal")
}
# Vignettes ---------------------------------------------------------------
package_vignettes <- function(path = ".") {
vig_path <- dir(
path(path, "vignettes"),
pattern = "\\.[rR]md$",
recursive = TRUE
)
vig_path <- vig_path[!grepl("^_", basename(vig_path))]
title <- path(path, "vignettes", vig_path) %>%
purrr::map(rmarkdown::yaml_front_matter) %>%
purrr::map_chr("title", .null = "UNKNOWN TITLE")
tibble::tibble(
file_in = vig_path,
file_out = gsub("\\.[rR]md$", "\\.html", vig_path),
name = tools::file_path_sans_ext(basename(vig_path)),
path = dirname(vig_path),
vig_depth = dir_depth(vig_path),
title = title
)
}
dir_depth <- function(x) {
x %>%
strsplit("") %>%
purrr::map_int(function(x) sum(x == "/"))
}
|
library("data.table")
Sys.setlocale(category = "LC_ALL", locale = "en_US.UTF-8")
legalDate <- c('1/2/2007', '2/2/2007')
dat <- fread("household_power_consumption.txt", na.strings="?")
dat <- dat[dat$Date %in% legalDate, ]
dat$Global_active_power <- as.numeric(dat$Global_active_power)
xval <- as.POSIXct(paste(dat$Date, dat$Time), format="%d/%m/%Y %T")
#r <- round(range(xval), "days")
png("plot2.png")
plot(xval, dat$Global_active_power, type="l", xlab="", ylab="Global Active Power(kilowatts)")
#axis.POSIXct(1, at = seq(r[1], r[2], by="day"), format="%a")
dev.off() | /courses/Exploratory_Data_Analysis/week1/ploting1/plot2.R | no_license | code6/playground | R | false | false | 570 | r | library("data.table")
Sys.setlocale(category = "LC_ALL", locale = "en_US.UTF-8")
legalDate <- c('1/2/2007', '2/2/2007')
dat <- fread("household_power_consumption.txt", na.strings="?")
dat <- dat[dat$Date %in% legalDate, ]
dat$Global_active_power <- as.numeric(dat$Global_active_power)
xval <- as.POSIXct(paste(dat$Date, dat$Time), format="%d/%m/%Y %T")
#r <- round(range(xval), "days")
png("plot2.png")
plot(xval, dat$Global_active_power, type="l", xlab="", ylab="Global Active Power(kilowatts)")
#axis.POSIXct(1, at = seq(r[1], r[2], by="day"), format="%a")
dev.off() |
# Rscript cmp_obs_random_leDily_quantif.R
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
source("../Yuanlong_Cancer_HiC_data_TAD_DA/subtype_cols.R")
plotCex <- 1.2
outFolder <- file.path("CMP_OBS_RANDOM_LEDILYQUANTIF")
dir.create(outFolder, recursive = TRUE)
obs_dt <- get(load(file.path("LEDILY_QUANTIF/all_stats_dt.Rdata")))
rd_types <- c("RANDOMMIDPOS", "RANDOMMIDPOSDISC", "RANDOMMIDPOSSTRICT")
rd=rd_types[1]
for(rd in rd_types) {
rd_dt <- get(load(file.path("LEDILY_QUANTIF_RANDOM", rd, "all_stats_dt.Rdata")))
rd_dt$dataset <- gsub(paste0(rd, "_40kb"), "40kb", rd_dt$dataset)
stopifnot(setequal(rd_dt$dataset, obs_dt$dataset))
all_dt <- merge(obs_dt, rd_dt, by="dataset", suffixes=c("_obs", "_rd"),all=TRUE)
# stopifnot(!is.na(all_dt))s
stopifnot(setequal(colnames(obs_dt), colnames(rd_dt)))
all_vars <- colnames(obs_dt)
all_vars <- all_vars[all_vars!="dataset"]
all_dt$cmpTypeCol <- all_cols[all_cmps[basename(all_dt$dataset)]]
stopifnot(!is.na(all_dt$cmpTypeCol))
head(all_dt)
plot_var = all_vars[1]
for(plot_var in all_vars) {
cat(plot_var, "\n")
my_x <- all_dt[,paste0(plot_var, "_obs")]
my_y <- all_dt[,paste0(plot_var, "_rd")]
outFile <- file.path(outFolder, paste0(plot_var, "_obs_vs_", rd, ".svg"))
cat(paste0(outFile), "\n")
svg(outFile, height=6, width=6)
plot(
x = my_x,
y = my_y,
main=paste0(plot_var),
pch=16,
cex=0.7,
col=all_dt$cmpTypeCol,
xlab="observed",
ylab=rd,
cex.axis=plotCex,
cex.lab=plotCex,
cex.main=plotCex
)
curve(1*x, col="darkgrey", add=TRUE)
addCorr(
x = my_x,
y = my_y,
bty="n", legPos = "topleft"
)
legend(
"bottomright",
legend=names(all_cols),
pch=16,
cex=0.7,
col=all_cols,
bty="n"
)
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
}
#
#
# all_rd_dt <- get(load(file.path("ALL_PURITYFLAGGED_FINAL_RANDOMMIDPOS//aran/CPE/log10/all_dt.Rdata")))
# all_rd_dt$rd_type <- gsub(".+_(.+)_40kb", "\\1", dirname(all_rd_dt$dataset))
# all_rd_dt$dataset_init <- all_rd_dt$dataset
# all_rd_dt$dataset <- gsub("_RANDOM.+_40kb", "_40kb", all_rd_dt$dataset_init)
# stopifnot(setequal(all_rd_dt$dataset, all_obs_dt$dataset))
# all_rd_dt$dataset_init <- NULL
# nall_dt <- merge(all_obs_dt, all_rd_dt, by=c("dataset"), suffixes=c("_obs", "_rd"))
#
# signif_obs_dt <- get(load(file.path("SIGNIF_PURITYFLAGGED_FINAL//aran/CPE/log10/all_dt.Rdata")))
# signif_rd_dt <- get(load(file.path("SIGNIF_PURITYFLAGGED_FINAL_RANDOMMIDPOS//aran/CPE/log10/all_dt.Rdata")))
# signif_rd_dt$rd_type <- gsub(".+_(.+)_40kb", "\\1", dirname(signif_rd_dt$dataset))
# signif_rd_dt$dataset_init <- signif_rd_dt$dataset
# signif_rd_dt$dataset <- gsub("_RANDOM.+_40kb", "_40kb", signif_rd_dt$dataset_init)
# stopifnot(setequal(signif_rd_dt$dataset, signif_rd_dt$dataset))
# signif_rd_dt$dataset_init <- NULL
# signif_dt <- merge(signif_obs_dt, signif_rd_dt, by=c("dataset"), suffixes=c("_obs", "_rd"))
#
# all_dt <- nall_dt # because 2nd get(load overwrite the all_dt
#
#
# rd_types <- c("RANDOMMIDPOS", "RANDOMMIDPOSDISC", "RANDOMMIDPOSSTRICT")
# rd=rd_types[1]
# for(rd in rd_types) {
#
# curr_dt <- all_dt[all_dt$rd_type == rd,]
# stopifnot(nrow(curr_dt) > 0)
# stopifnot(!duplicated(curr_dt$dataset))
#
# outFile <- file.path(outFolder, paste0("ratioFlagged_", rd, ".svg"))
# cat(paste0(outFile), "\n")
# svg(outFile, height=6, width=6)
# plot(
# x = curr_dt$ratioFlagged_obs,
# y = curr_dt$ratioFlagged_rd,
# main=paste0("ratioFlagged"),
# pch=16,
# cex=0.7,
# col=curr_dt$cmpTypeCol,
# xlab="observed",
# ylab=rd,
# cex.axis=plotCex,
# cex.lab=plotCex,
# cex.main=plotCex
# )
# curve(1*x, col="darkgrey", add=TRUE)
# addCorr(
# x = curr_dt$ratioFlagged_obs,
# y = curr_dt$ratioFlagged_rd,
# bty="n", legPos = "topleft"
# )
# legend(
# "bottomright",
# legend=names(all_cols),
# pch=16,
# cex=0.7,
# col=all_cols,
# bty="n"
# )
# foo <- dev.off()
# cat(paste0("... written: ", outFile, "\n"))
#
#
# curr_dt <- signif_dt[signif_dt$rd_type == rd,]
# stopifnot(nrow(curr_dt) > 0)
# stopifnot(!duplicated(curr_dt$dataset))
#
# outFile <- file.path(outFolder, paste0("ratioSignifFlagged_", rd, ".svg"))
# svg(outFile, height=6, width=6)
# plot(
# x = curr_dt$ratioSignifFlagged_obs,
# y = curr_dt$ratioSignifFlagged_rd,
# main=paste0("ratioSignifFlagged"),
# pch=16,
# cex=0.7,
# col=curr_dt$cmpTypeCol,
# xlab="observed",
# ylab=rd,
# cex.axis=plotCex,
# cex.lab=plotCex,
# cex.main=plotCex
# )
# curve(1*x, col="darkgrey", add=TRUE)
# addCorr(
# x = curr_dt$ratioSignifFlagged_obs,
# y = curr_dt$ratioSignifFlagged_rd,
# bty="n", legPos = "topleft"
# )
# legend(
# "bottomright",
# legend=names(all_cols),
# pch=16,
# cex=0.7,
# col=all_cols,
# bty="n"
# )
# foo <- dev.off()
# cat(paste0("... written: ", outFile, "\n"))
#
#
#
# }
#
#
#
| /cmp_obs_random_leDily_quantif.R | no_license | marzuf/v2_Yuanlong_Cancer_HiC_data_TAD_DA | R | false | false | 5,182 | r | # Rscript cmp_obs_random_leDily_quantif.R
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
source("../Yuanlong_Cancer_HiC_data_TAD_DA/subtype_cols.R")
plotCex <- 1.2
outFolder <- file.path("CMP_OBS_RANDOM_LEDILYQUANTIF")
dir.create(outFolder, recursive = TRUE)
obs_dt <- get(load(file.path("LEDILY_QUANTIF/all_stats_dt.Rdata")))
rd_types <- c("RANDOMMIDPOS", "RANDOMMIDPOSDISC", "RANDOMMIDPOSSTRICT")
rd=rd_types[1]
for(rd in rd_types) {
rd_dt <- get(load(file.path("LEDILY_QUANTIF_RANDOM", rd, "all_stats_dt.Rdata")))
rd_dt$dataset <- gsub(paste0(rd, "_40kb"), "40kb", rd_dt$dataset)
stopifnot(setequal(rd_dt$dataset, obs_dt$dataset))
all_dt <- merge(obs_dt, rd_dt, by="dataset", suffixes=c("_obs", "_rd"),all=TRUE)
# stopifnot(!is.na(all_dt))s
stopifnot(setequal(colnames(obs_dt), colnames(rd_dt)))
all_vars <- colnames(obs_dt)
all_vars <- all_vars[all_vars!="dataset"]
all_dt$cmpTypeCol <- all_cols[all_cmps[basename(all_dt$dataset)]]
stopifnot(!is.na(all_dt$cmpTypeCol))
head(all_dt)
plot_var = all_vars[1]
for(plot_var in all_vars) {
cat(plot_var, "\n")
my_x <- all_dt[,paste0(plot_var, "_obs")]
my_y <- all_dt[,paste0(plot_var, "_rd")]
outFile <- file.path(outFolder, paste0(plot_var, "_obs_vs_", rd, ".svg"))
cat(paste0(outFile), "\n")
svg(outFile, height=6, width=6)
plot(
x = my_x,
y = my_y,
main=paste0(plot_var),
pch=16,
cex=0.7,
col=all_dt$cmpTypeCol,
xlab="observed",
ylab=rd,
cex.axis=plotCex,
cex.lab=plotCex,
cex.main=plotCex
)
curve(1*x, col="darkgrey", add=TRUE)
addCorr(
x = my_x,
y = my_y,
bty="n", legPos = "topleft"
)
legend(
"bottomright",
legend=names(all_cols),
pch=16,
cex=0.7,
col=all_cols,
bty="n"
)
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
}
}
#
#
# all_rd_dt <- get(load(file.path("ALL_PURITYFLAGGED_FINAL_RANDOMMIDPOS//aran/CPE/log10/all_dt.Rdata")))
# all_rd_dt$rd_type <- gsub(".+_(.+)_40kb", "\\1", dirname(all_rd_dt$dataset))
# all_rd_dt$dataset_init <- all_rd_dt$dataset
# all_rd_dt$dataset <- gsub("_RANDOM.+_40kb", "_40kb", all_rd_dt$dataset_init)
# stopifnot(setequal(all_rd_dt$dataset, all_obs_dt$dataset))
# all_rd_dt$dataset_init <- NULL
# nall_dt <- merge(all_obs_dt, all_rd_dt, by=c("dataset"), suffixes=c("_obs", "_rd"))
#
# signif_obs_dt <- get(load(file.path("SIGNIF_PURITYFLAGGED_FINAL//aran/CPE/log10/all_dt.Rdata")))
# signif_rd_dt <- get(load(file.path("SIGNIF_PURITYFLAGGED_FINAL_RANDOMMIDPOS//aran/CPE/log10/all_dt.Rdata")))
# signif_rd_dt$rd_type <- gsub(".+_(.+)_40kb", "\\1", dirname(signif_rd_dt$dataset))
# signif_rd_dt$dataset_init <- signif_rd_dt$dataset
# signif_rd_dt$dataset <- gsub("_RANDOM.+_40kb", "_40kb", signif_rd_dt$dataset_init)
# stopifnot(setequal(signif_rd_dt$dataset, signif_rd_dt$dataset))
# signif_rd_dt$dataset_init <- NULL
# signif_dt <- merge(signif_obs_dt, signif_rd_dt, by=c("dataset"), suffixes=c("_obs", "_rd"))
#
# all_dt <- nall_dt # because 2nd get(load overwrite the all_dt
#
#
# rd_types <- c("RANDOMMIDPOS", "RANDOMMIDPOSDISC", "RANDOMMIDPOSSTRICT")
# rd=rd_types[1]
# for(rd in rd_types) {
#
# curr_dt <- all_dt[all_dt$rd_type == rd,]
# stopifnot(nrow(curr_dt) > 0)
# stopifnot(!duplicated(curr_dt$dataset))
#
# outFile <- file.path(outFolder, paste0("ratioFlagged_", rd, ".svg"))
# cat(paste0(outFile), "\n")
# svg(outFile, height=6, width=6)
# plot(
# x = curr_dt$ratioFlagged_obs,
# y = curr_dt$ratioFlagged_rd,
# main=paste0("ratioFlagged"),
# pch=16,
# cex=0.7,
# col=curr_dt$cmpTypeCol,
# xlab="observed",
# ylab=rd,
# cex.axis=plotCex,
# cex.lab=plotCex,
# cex.main=plotCex
# )
# curve(1*x, col="darkgrey", add=TRUE)
# addCorr(
# x = curr_dt$ratioFlagged_obs,
# y = curr_dt$ratioFlagged_rd,
# bty="n", legPos = "topleft"
# )
# legend(
# "bottomright",
# legend=names(all_cols),
# pch=16,
# cex=0.7,
# col=all_cols,
# bty="n"
# )
# foo <- dev.off()
# cat(paste0("... written: ", outFile, "\n"))
#
#
# curr_dt <- signif_dt[signif_dt$rd_type == rd,]
# stopifnot(nrow(curr_dt) > 0)
# stopifnot(!duplicated(curr_dt$dataset))
#
# outFile <- file.path(outFolder, paste0("ratioSignifFlagged_", rd, ".svg"))
# svg(outFile, height=6, width=6)
# plot(
# x = curr_dt$ratioSignifFlagged_obs,
# y = curr_dt$ratioSignifFlagged_rd,
# main=paste0("ratioSignifFlagged"),
# pch=16,
# cex=0.7,
# col=curr_dt$cmpTypeCol,
# xlab="observed",
# ylab=rd,
# cex.axis=plotCex,
# cex.lab=plotCex,
# cex.main=plotCex
# )
# curve(1*x, col="darkgrey", add=TRUE)
# addCorr(
# x = curr_dt$ratioSignifFlagged_obs,
# y = curr_dt$ratioSignifFlagged_rd,
# bty="n", legPos = "topleft"
# )
# legend(
# "bottomright",
# legend=names(all_cols),
# pch=16,
# cex=0.7,
# col=all_cols,
# bty="n"
# )
# foo <- dev.off()
# cat(paste0("... written: ", outFile, "\n"))
#
#
#
# }
#
#
#
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/api_search_by_updated_at.R
\name{api_search_by_updated_at}
\alias{api_search_by_updated_at}
\title{Search MS Bioscreen by an updated at timestamp}
\usage{
api_search_by_updated_at(updated_at_date = "2015-09-24",
updated_at_time = "00:00:00", endpoint = "subjects",
base_url = "https://msbioscreen-uat.herokuapp.com/api/v1",
token = get_token(), verbose_b = TRUE)
}
\arguments{
\item{updated_at_date}{date stamp in format "yyy-mm-dd".}
\item{updated_at_time}{time stamp in format "hh:mm:ss".}
\item{endpoint}{the data endpoint of interest. Possible values are "subjects", "attacks",
"treatments", and "visits".}
\item{base_url}{the API base URL.}
\item{token}{HTTP authorization token. Default is to get environment variable 'MSBWAITER_TOKEN'.}
\item{verbose_b}{print progress messages as function runs?}
}
\description{
\code{api_search_by_updated_at} returns the bioscreen entries updated on or after the specified date and time.
}
\seealso{
\code{\link{api_do_action}}, \code{\link{api_create}}, \code{\link{api_update}},
\code{\link{api_delete}}, \code{\link{api_get}}, \code{\link{api_search_by_epicid}}
}
| /man/api_search_by_updated_at.Rd | no_license | UCSF-MSLAB/msbwaiter | R | false | false | 1,208 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/api_search_by_updated_at.R
\name{api_search_by_updated_at}
\alias{api_search_by_updated_at}
\title{Search MS Bioscreen by an updated at timestamp}
\usage{
api_search_by_updated_at(updated_at_date = "2015-09-24",
updated_at_time = "00:00:00", endpoint = "subjects",
base_url = "https://msbioscreen-uat.herokuapp.com/api/v1",
token = get_token(), verbose_b = TRUE)
}
\arguments{
\item{updated_at_date}{date stamp in format "yyy-mm-dd".}
\item{updated_at_time}{time stamp in format "hh:mm:ss".}
\item{endpoint}{the data endpoint of interest. Possible values are "subjects", "attacks",
"treatments", and "visits".}
\item{base_url}{the API base URL.}
\item{token}{HTTP authorization token. Default is to get environment variable 'MSBWAITER_TOKEN'.}
\item{verbose_b}{print progress messages as function runs?}
}
\description{
\code{api_search_by_updated_at} returns the bioscreen entries updated on or after the specified date and time.
}
\seealso{
\code{\link{api_do_action}}, \code{\link{api_create}}, \code{\link{api_update}},
\code{\link{api_delete}}, \code{\link{api_get}}, \code{\link{api_search_by_epicid}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/newEngine.R
\name{engineAEFA}
\alias{engineAEFA}
\title{estimate full-information item factor analysis models with combinating random effects}
\usage{
engineAEFA(data, model = 1, GenRandomPars = T, NCYCLES = 4000,
BURNIN = 1500, SEMCYCLES = 1000, covdata = NULL, fixed = c(~1,
~0, ~-1), random = list(~1 | items), key = NULL,
accelerate = "squarem", symmetric = F, resampling = T,
samples = 5000, printDebugMsg = F, fitEMatUIRT = F,
ranefautocomb = T, tryLCA = T, forcingMixedModelOnly = F,
forcingQMC = F, turnOffMixedEst = F, anchor = NULL,
skipggumInternal = F, powertest = F, idling = 60, leniency = F)
}
\arguments{
\item{data}{insert \code{data.frame} object.}
\item{model}{specify the mirt model if want to calibrate. accepting \code{mirt::mirt.model} object.}
\item{GenRandomPars}{Try to generate Random Parameters? Default is TRUE}
\item{NCYCLES}{N Cycles of Robbin Monroe stage (stage 3). Default is 4000.}
\item{BURNIN}{N Cycles of Metro-hastings burnin stage (stage 1). Default is 1500.}
\item{SEMCYCLES}{N Cycles of Metro-hastings burnin stage (stage 2). Default is 1000.}
\item{covdata}{insert covariate data frame where use to fixed and random effect term. if not inserted, ignoring fixed and random effect estimation.}
\item{fixed}{a right sided R formula for specifying the fixed effect (aka 'explanatory') predictors from covdata and itemdesign.}
\item{random}{a right sided formula or list of formulas containing crossed random effects of the form \code{v1 + ... v_n | G}, where \code{G} is the grouping variable and \code{v_n} are random numeric predictors within each group. G may contain interaction terms, such as group:items to include cross or person-level interactions effects.}
\item{key}{item key vector of multiple choices test.}
\item{accelerate}{a character vector indicating the type of acceleration to use. Default is 'squarem' for the SQUAREM procedure (specifically, the gSqS3 approach)}
\item{symmetric}{force S-EM/Oakes information matrix to be symmetric? Default is FALSE to detect solutions that have not reached the ML estimate.}
\item{resampling}{Do you want to do resampling with replace? default is TRUE and activate nrow is over samples argument.}
\item{samples}{specify the number samples with resampling. default is 5000.}
\item{printDebugMsg}{Do you want to see the debugging messeages? default is FALSE}
\item{fitEMatUIRT}{Do you want to fit the model with EM at UIRT? default is FALSE}
\item{ranefautocomb}{Do you want to find global-optimal random effect combination? default is TRUE}
\item{tryLCA}{Do you want to try calibrate LCA model if avaliable? default is TRUE}
\item{forcingMixedModelOnly}{Do you want to forcing the Mixed model calibration? default is FALSE}
\item{forcingQMC}{Do you want to forcing the use QMC estimation instead MHRM? default is FALSE}
\item{turnOffMixedEst}{Do you want to turn off mixed effect (multilevel) estimation? default is FALSE}
\item{anchor}{Set the anchor item names If you want to consider DIF detection. default is NULL.}
\item{skipggumInternal}{Set the skipping ggum fitting procedure to speed up. default is FALSE.}
\item{powertest}{Set power test mode. default is FALSE.}
\item{idling}{Set seconds to idle. default is 60.}
\item{leniency}{skip second order test. default is FALSE}
}
\value{
possible optimal combinations of models in list
}
\description{
estimate full-information item factor analysis models with combinating random effects
}
\examples{
\dontrun{
testMod1 <- engineAEFA(mirt::Science, model = 1)
}
}
| /man/engineAEFA.Rd | no_license | seonghobae/kaefa | R | false | true | 3,631 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/newEngine.R
\name{engineAEFA}
\alias{engineAEFA}
\title{estimate full-information item factor analysis models with combinating random effects}
\usage{
engineAEFA(data, model = 1, GenRandomPars = T, NCYCLES = 4000,
BURNIN = 1500, SEMCYCLES = 1000, covdata = NULL, fixed = c(~1,
~0, ~-1), random = list(~1 | items), key = NULL,
accelerate = "squarem", symmetric = F, resampling = T,
samples = 5000, printDebugMsg = F, fitEMatUIRT = F,
ranefautocomb = T, tryLCA = T, forcingMixedModelOnly = F,
forcingQMC = F, turnOffMixedEst = F, anchor = NULL,
skipggumInternal = F, powertest = F, idling = 60, leniency = F)
}
\arguments{
\item{data}{insert \code{data.frame} object.}
\item{model}{specify the mirt model if want to calibrate. accepting \code{mirt::mirt.model} object.}
\item{GenRandomPars}{Try to generate Random Parameters? Default is TRUE}
\item{NCYCLES}{N Cycles of Robbin Monroe stage (stage 3). Default is 4000.}
\item{BURNIN}{N Cycles of Metro-hastings burnin stage (stage 1). Default is 1500.}
\item{SEMCYCLES}{N Cycles of Metro-hastings burnin stage (stage 2). Default is 1000.}
\item{covdata}{insert covariate data frame where use to fixed and random effect term. if not inserted, ignoring fixed and random effect estimation.}
\item{fixed}{a right sided R formula for specifying the fixed effect (aka 'explanatory') predictors from covdata and itemdesign.}
\item{random}{a right sided formula or list of formulas containing crossed random effects of the form \code{v1 + ... v_n | G}, where \code{G} is the grouping variable and \code{v_n} are random numeric predictors within each group. G may contain interaction terms, such as group:items to include cross or person-level interactions effects.}
\item{key}{item key vector of multiple choices test.}
\item{accelerate}{a character vector indicating the type of acceleration to use. Default is 'squarem' for the SQUAREM procedure (specifically, the gSqS3 approach)}
\item{symmetric}{force S-EM/Oakes information matrix to be symmetric? Default is FALSE to detect solutions that have not reached the ML estimate.}
\item{resampling}{Do you want to do resampling with replace? default is TRUE and activate nrow is over samples argument.}
\item{samples}{specify the number samples with resampling. default is 5000.}
\item{printDebugMsg}{Do you want to see the debugging messeages? default is FALSE}
\item{fitEMatUIRT}{Do you want to fit the model with EM at UIRT? default is FALSE}
\item{ranefautocomb}{Do you want to find global-optimal random effect combination? default is TRUE}
\item{tryLCA}{Do you want to try calibrate LCA model if avaliable? default is TRUE}
\item{forcingMixedModelOnly}{Do you want to forcing the Mixed model calibration? default is FALSE}
\item{forcingQMC}{Do you want to forcing the use QMC estimation instead MHRM? default is FALSE}
\item{turnOffMixedEst}{Do you want to turn off mixed effect (multilevel) estimation? default is FALSE}
\item{anchor}{Set the anchor item names If you want to consider DIF detection. default is NULL.}
\item{skipggumInternal}{Set the skipping ggum fitting procedure to speed up. default is FALSE.}
\item{powertest}{Set power test mode. default is FALSE.}
\item{idling}{Set seconds to idle. default is 60.}
\item{leniency}{skip second order test. default is FALSE}
}
\value{
possible optimal combinations of models in list
}
\description{
estimate full-information item factor analysis models with combinating random effects
}
\examples{
\dontrun{
testMod1 <- engineAEFA(mirt::Science, model = 1)
}
}
|
draw_confusion_matrix <- function(cm) {
layout(matrix(c(1,1,2)))
par(mar=c(2,2,2,2))
plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
title('CONFUSION MATRIX', cex.main=2.5)
# create the matrix
rect(150, 430, 240, 370, col='#00A65A')
text(195, 435, 'Pass', cex=1.5)
rect(250, 430, 340, 370, col='#F39C12')
text(295, 435, 'Fail', cex=1.5)
text(125, 370, 'Predicted', cex=1.6, srt=90, font=2)
text(245, 450, 'Actual', cex=1.6, font=2)
rect(150, 305, 240, 365, col='#F39C12')
rect(250, 305, 340, 365, col='#00A65A')
text(140, 400, 'Pass', cex=1.5, srt=90)
text(140, 335, 'Fail', cex=1.5, srt=90)
# add in the cm results
res <- as.numeric(cm$table)
text(195, 400, res[1], cex=2, font=2, col='white')
text(195, 335, res[2], cex=2, font=2, col='white')
text(295, 400, res[3], cex=2, font=2, col='white')
text(295, 335, res[4], cex=2, font=2, col='white')
# add in the specifics
plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
text(10, 85, names(cm$byClass[1]), cex=1.6, font=2)
text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.6)
text(30, 85, names(cm$byClass[2]), cex=1.6, font=2)
text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.6)
text(50, 85, names(cm$byClass[5]), cex=1.6, font=2)
text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.6)
text(70, 85, names(cm$byClass[6]), cex=1.6, font=2)
text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.6)
text(90, 85, names(cm$byClass[7]), cex=1.6, font=2)
text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.6)
# add in the accuracy information
text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.8)
text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.8)
} | /3-deploy-your-model/draw_confusion_matrix.R | no_license | andrefsferreira/mwd-2020 | R | false | false | 1,908 | r | draw_confusion_matrix <- function(cm) {
layout(matrix(c(1,1,2)))
par(mar=c(2,2,2,2))
plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
title('CONFUSION MATRIX', cex.main=2.5)
# create the matrix
rect(150, 430, 240, 370, col='#00A65A')
text(195, 435, 'Pass', cex=1.5)
rect(250, 430, 340, 370, col='#F39C12')
text(295, 435, 'Fail', cex=1.5)
text(125, 370, 'Predicted', cex=1.6, srt=90, font=2)
text(245, 450, 'Actual', cex=1.6, font=2)
rect(150, 305, 240, 365, col='#F39C12')
rect(250, 305, 340, 365, col='#00A65A')
text(140, 400, 'Pass', cex=1.5, srt=90)
text(140, 335, 'Fail', cex=1.5, srt=90)
# add in the cm results
res <- as.numeric(cm$table)
text(195, 400, res[1], cex=2, font=2, col='white')
text(195, 335, res[2], cex=2, font=2, col='white')
text(295, 400, res[3], cex=2, font=2, col='white')
text(295, 335, res[4], cex=2, font=2, col='white')
# add in the specifics
plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
text(10, 85, names(cm$byClass[1]), cex=1.6, font=2)
text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.6)
text(30, 85, names(cm$byClass[2]), cex=1.6, font=2)
text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.6)
text(50, 85, names(cm$byClass[5]), cex=1.6, font=2)
text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.6)
text(70, 85, names(cm$byClass[6]), cex=1.6, font=2)
text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.6)
text(90, 85, names(cm$byClass[7]), cex=1.6, font=2)
text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.6)
# add in the accuracy information
text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.8)
text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.8)
} |
library(QCA3)
data(CarenPanofsky)
tqca.tt <- cs_truthTable(CarenPanofsky,'recognition',names(CarenPanofsky)[1:5])
tqca.ans <- reduce(tqca.tt)
QCA3:::prettyPI(tqca.ans)
data(McCammonVanDyke)
workdat <- McCammonVanDyke
workdat[workdat==-9] <- 0
fig13.2 <- reduce(workdat,"coalition",c("ideology","threats","opportunity","ties","resources"))
QCA3:::prettyPI(fig13.2)
## result in figure 13.2
workdat <- McCammonVanDyke
idx <- apply(workdat, 1, function(x) any(x==-9))
ans <- reduce(workdat[!idx,],"coalition",c("ideology","threats","opportunity","ties","resources"))
fig13.3 <- constrReduce(ans,include=workdat[idx,1:5])
QCA3:::prettyPI(fig13.3)
| /tests/dontcare.R | no_license | cran/QCA3 | R | false | false | 651 | r | library(QCA3)
data(CarenPanofsky)
tqca.tt <- cs_truthTable(CarenPanofsky,'recognition',names(CarenPanofsky)[1:5])
tqca.ans <- reduce(tqca.tt)
QCA3:::prettyPI(tqca.ans)
data(McCammonVanDyke)
workdat <- McCammonVanDyke
workdat[workdat==-9] <- 0
fig13.2 <- reduce(workdat,"coalition",c("ideology","threats","opportunity","ties","resources"))
QCA3:::prettyPI(fig13.2)
## result in figure 13.2
workdat <- McCammonVanDyke
idx <- apply(workdat, 1, function(x) any(x==-9))
ans <- reduce(workdat[!idx,],"coalition",c("ideology","threats","opportunity","ties","resources"))
fig13.3 <- constrReduce(ans,include=workdat[idx,1:5])
QCA3:::prettyPI(fig13.3)
|
# Put custom tests in this file.
# Uncommenting the following line of code will disable
# auto-detection of new variables and thus prevent swirl from
# executing every command twice, which can slow things down.
# AUTO_DETECT_NEWVAR <- FALSE
# However, this means that you should detect user-created
# variables when appropriate. The answer test, creates_new_var()
# can be used for for the purpose, but it also re-evaluates the
# expression which the user entered, so care must be taken.
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
# Retrieve the log from swirl's state
getLog <- function(){
getState()$log
}
submit_log <- function(){
if(getState()$val == "Yes (will take you to the Google Form)"){
pre_fill_link <- "https://docs.google.com/forms/d/e/1FAIpQLSflIJeTb7yQ0eZXrvM7ZRTTiMmzR-cnQy8t07EnyYeuXGBVRQ/viewform?entry.754375154"
if(!grepl("=$", pre_fill_link)){
pre_fill_link <- paste0(pre_fill_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame(user = rep(log_$user, nrow_),
course_name = rep(log_$course_name, nrow_),
lesson_name = rep(log_$lesson_name, nrow_),
question_number = p(log_$question_number, nrow_, NA),
correct = p(log_$correct, nrow_, NA),
attempt = p(log_$attempt, nrow_, NA),
skipped = p(log_$skipped, nrow_, NA),
datetime = p(log_$datetime, nrow_, NA),
stringsAsFactors = FALSE)
write.csv(log_tbl, file = temp, row.names = FALSE)
encoded_log <- base64encode(temp)
browseURL(paste0(pre_fill_link, encoded_log))
}
}
| /Regex_in_base_R/customTests.R | permissive | erhard1/Regular_Expressions | R | false | false | 2,125 | r | # Put custom tests in this file.
# Uncommenting the following line of code will disable
# auto-detection of new variables and thus prevent swirl from
# executing every command twice, which can slow things down.
# AUTO_DETECT_NEWVAR <- FALSE
# However, this means that you should detect user-created
# variables when appropriate. The answer test, creates_new_var()
# can be used for for the purpose, but it also re-evaluates the
# expression which the user entered, so care must be taken.
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
# Retrieve the log from swirl's state
getLog <- function(){
getState()$log
}
submit_log <- function(){
if(getState()$val == "Yes (will take you to the Google Form)"){
pre_fill_link <- "https://docs.google.com/forms/d/e/1FAIpQLSflIJeTb7yQ0eZXrvM7ZRTTiMmzR-cnQy8t07EnyYeuXGBVRQ/viewform?entry.754375154"
if(!grepl("=$", pre_fill_link)){
pre_fill_link <- paste0(pre_fill_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame(user = rep(log_$user, nrow_),
course_name = rep(log_$course_name, nrow_),
lesson_name = rep(log_$lesson_name, nrow_),
question_number = p(log_$question_number, nrow_, NA),
correct = p(log_$correct, nrow_, NA),
attempt = p(log_$attempt, nrow_, NA),
skipped = p(log_$skipped, nrow_, NA),
datetime = p(log_$datetime, nrow_, NA),
stringsAsFactors = FALSE)
write.csv(log_tbl, file = temp, row.names = FALSE)
encoded_log <- base64encode(temp)
browseURL(paste0(pre_fill_link, encoded_log))
}
}
|
\name{pcpt.prior<-}
\alias{pcpt.prior<-}
\title{
Generic Function - pcpt.prior<-
}
\description{
Generic function
}
\usage{
pcpt.prior(object)<-value
}
\arguments{
\item{object}{
Depending on the class of \code{object} depends on the method used (and if one exists)
}
\item{value}{
Replacement value
}
}
\details{
Generic Function
}
\value{
Depends on the class of \code{object}, see individual methods
}
\author{
Simon Taylor
Rebecca Killick
}
\seealso{
\code{\link{pcpt.prior<--methods}}
}
\examples{
x=new("pcpt") # new pcpt object
pcpt.prior(x) = list(Mprior = "pois", Mhyp = 1, spread = 1)
# replaces the existing pcpt.prior slot in x
}
\keyword{methods}
\keyword{pcpt}
\keyword{internal}
| /man/pcpt.prior-.Rd | no_license | taylors2/PeriodCPT | R | false | false | 708 | rd | \name{pcpt.prior<-}
\alias{pcpt.prior<-}
\title{
Generic Function - pcpt.prior<-
}
\description{
Generic function
}
\usage{
pcpt.prior(object)<-value
}
\arguments{
\item{object}{
Depending on the class of \code{object} depends on the method used (and if one exists)
}
\item{value}{
Replacement value
}
}
\details{
Generic Function
}
\value{
Depends on the class of \code{object}, see individual methods
}
\author{
Simon Taylor
Rebecca Killick
}
\seealso{
\code{\link{pcpt.prior<--methods}}
}
\examples{
x=new("pcpt") # new pcpt object
pcpt.prior(x) = list(Mprior = "pois", Mhyp = 1, spread = 1)
# replaces the existing pcpt.prior slot in x
}
\keyword{methods}
\keyword{pcpt}
\keyword{internal}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307019658e+77, 4.99473791974483e-196, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615784756-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307019658e+77, 4.99473791974483e-196, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#!/bin/Rscript
# Calculates the confidence interval of qscores for each city point for each of the groups received
library(ggplot2)
library(rmarkdown)
library(knitr)
#Calculates the variance that should be added or removed from the mean
ic <- function(x) {
#return (sd(x)/sqrt(length(x))*qt(.95,999))#95% confidence interval for a sample of 1000 items
#print (100 * qnorm(1-(0.05/2)) * sd(x) / (5 * mean(x)))^2
return (sd(x)/sqrt(length(x))*qnorm(1-(0.05/2)))#95% confidence interval, significance level of 0.05 (alpha) - sample 100
}
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#Selecting only qscores
analyseIC <- function(file1, file2, type1, type2, shouldPlot=FALSE){
data1 <- read.table(file1)
data2 <- read.table(file2)
newdata1 <- data1 [ c(4:103) ]
newdata2 <- data2 [ c(4:103) ]
icData1 <- apply(newdata1, 1, ic)
icData2 <- apply(newdata2, 1, ic)
temp1 <- lapply(as.character(data1$V2), function (x) strsplit(x, split="/", fixed=TRUE)[[1]][1])
temp2 <- lapply(as.character(data2$V2), function (x) strsplit(x, split="/", fixed=TRUE)[[1]][1])
neigs1 <- unlist(lapply(temp1, '[[', 1))
neigs2 <- unlist(lapply(temp2, '[[', 1))
#Saving data
newframe1 <- data.frame(ques=data1$V1, photo=data1$V2, mean=data1$V3, dist=icData1, inf=data1$V3-icData1, sup=data1$V3+icData1, neig=neigs1)
newframe1 <- newframe1[with(newframe1, order(photo)), ]
newframe1$type <- type1
write.table(newframe1, "teste1.txt", sep="\t", row.names=FALSE, col.names=TRUE, quote=FALSE)
newframe2 <- data.frame(ques=data2$V1, photo=data2$V2, mean=data2$V3, dist=icData2, inf=data2$V3-icData1, sup=data2$V3+icData1, neig=neigs2)
newframe2 <- newframe2[with(newframe2, order(photo)), ]
newframe2$type <- type2
write.table(newframe2, "teste2.txt", sep="\t", row.names=FALSE, col.names=TRUE, quote=FALSE)
total <- rbind(newframe1, newframe2)
size <- nrow(total)
total$photo <- 1:size
total$photo [ c((size/2+1):size) ] <- (1:(size/2))
centro <<- total[total$neig == "centro",]
liberdade <<- total[total$neig == "liberdade",]
catole <<- total[total$neig == "catole",]
#if(shouldPlot){
#
# pdf(file=paste("IC-Centro-", type1, "-", type2, ".pdf"), paper="special")
# ggplot(centro, aes(x=photo, y=mean, colour=type)) + geom_point(shape=1, size=2) + geom_errorbar(aes(ymin=inf, ymax=sup)) + facet_grid(. ~ques, shrink=TRUE) + xlab("Local") + ylab("QScore") + ggtitle("Centro")
#
# pdf(file=paste("IC-Liberdade-", type1, "-", type2, ".pdf"), paper="special")
# ggplot(liberdade, aes(x=photo, y=mean, colour=type)) + geom_point(shape=1, size=2) + geom_errorbar(aes(ymin=inf, ymax=sup)) + facet_grid(. ~ques, shrink=TRUE) + xlab("Local") + ylab("QScore") + ggtitle("Liberdade")
#
# pdf(file=paste("IC-Catole-", type1, "-", type2, ".pdf"), paper="special")
# ggplot(catole, aes(x=photo, y=mean, colour=type)) + geom_point(shape=1, size=2) + geom_errorbar(aes(ymin=inf, ymax=sup)) + facet_grid(. ~ques, shrink=TRUE) + xlab("Local") + ylab("QScore") + ggtitle("Catolé")
# dev.off()
# }
}
#multiplot(g1, g2, g3, cols=1)
# solt$type <- "solteiro"
# casa$type <- "casado"
# novo <- rbind(solt, casa)
# novo$photo <- 1:156 + novo$photo [c(79:156) ] <- 1:78
args <- commandArgs(trailingOnly = TRUE)
if (length(args) > 1){
file1 <- args[1]
file2 <- args[2]
type1 <- args[3]
type2 <- args[4]
analyseIC(file1, file2, type1, type2, TRUE)
}
| /scripts/analise/dadosNov15/usuarios/samples10000SemAleat/analisaICPorFoto.R | no_license | davidcmm/campinaPulse | R | false | false | 4,908 | r | #!/bin/Rscript
# Calculates the confidence interval of qscores for each city point for each of the groups received
library(ggplot2)
library(rmarkdown)
library(knitr)
#Calculates the variance that should be added or removed from the mean
ic <- function(x) {
#return (sd(x)/sqrt(length(x))*qt(.95,999))#95% confidence interval for a sample of 1000 items
#print (100 * qnorm(1-(0.05/2)) * sd(x) / (5 * mean(x)))^2
return (sd(x)/sqrt(length(x))*qnorm(1-(0.05/2)))#95% confidence interval, significance level of 0.05 (alpha) - sample 100
}
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#Selecting only qscores
analyseIC <- function(file1, file2, type1, type2, shouldPlot=FALSE){
data1 <- read.table(file1)
data2 <- read.table(file2)
newdata1 <- data1 [ c(4:103) ]
newdata2 <- data2 [ c(4:103) ]
icData1 <- apply(newdata1, 1, ic)
icData2 <- apply(newdata2, 1, ic)
temp1 <- lapply(as.character(data1$V2), function (x) strsplit(x, split="/", fixed=TRUE)[[1]][1])
temp2 <- lapply(as.character(data2$V2), function (x) strsplit(x, split="/", fixed=TRUE)[[1]][1])
neigs1 <- unlist(lapply(temp1, '[[', 1))
neigs2 <- unlist(lapply(temp2, '[[', 1))
#Saving data
newframe1 <- data.frame(ques=data1$V1, photo=data1$V2, mean=data1$V3, dist=icData1, inf=data1$V3-icData1, sup=data1$V3+icData1, neig=neigs1)
newframe1 <- newframe1[with(newframe1, order(photo)), ]
newframe1$type <- type1
write.table(newframe1, "teste1.txt", sep="\t", row.names=FALSE, col.names=TRUE, quote=FALSE)
newframe2 <- data.frame(ques=data2$V1, photo=data2$V2, mean=data2$V3, dist=icData2, inf=data2$V3-icData1, sup=data2$V3+icData1, neig=neigs2)
newframe2 <- newframe2[with(newframe2, order(photo)), ]
newframe2$type <- type2
write.table(newframe2, "teste2.txt", sep="\t", row.names=FALSE, col.names=TRUE, quote=FALSE)
total <- rbind(newframe1, newframe2)
size <- nrow(total)
total$photo <- 1:size
total$photo [ c((size/2+1):size) ] <- (1:(size/2))
centro <<- total[total$neig == "centro",]
liberdade <<- total[total$neig == "liberdade",]
catole <<- total[total$neig == "catole",]
#if(shouldPlot){
#
# pdf(file=paste("IC-Centro-", type1, "-", type2, ".pdf"), paper="special")
# ggplot(centro, aes(x=photo, y=mean, colour=type)) + geom_point(shape=1, size=2) + geom_errorbar(aes(ymin=inf, ymax=sup)) + facet_grid(. ~ques, shrink=TRUE) + xlab("Local") + ylab("QScore") + ggtitle("Centro")
#
# pdf(file=paste("IC-Liberdade-", type1, "-", type2, ".pdf"), paper="special")
# ggplot(liberdade, aes(x=photo, y=mean, colour=type)) + geom_point(shape=1, size=2) + geom_errorbar(aes(ymin=inf, ymax=sup)) + facet_grid(. ~ques, shrink=TRUE) + xlab("Local") + ylab("QScore") + ggtitle("Liberdade")
#
# pdf(file=paste("IC-Catole-", type1, "-", type2, ".pdf"), paper="special")
# ggplot(catole, aes(x=photo, y=mean, colour=type)) + geom_point(shape=1, size=2) + geom_errorbar(aes(ymin=inf, ymax=sup)) + facet_grid(. ~ques, shrink=TRUE) + xlab("Local") + ylab("QScore") + ggtitle("Catolé")
# dev.off()
# }
}
#multiplot(g1, g2, g3, cols=1)
# solt$type <- "solteiro"
# casa$type <- "casado"
# novo <- rbind(solt, casa)
# novo$photo <- 1:156 + novo$photo [c(79:156) ] <- 1:78
args <- commandArgs(trailingOnly = TRUE)
if (length(args) > 1){
file1 <- args[1]
file2 <- args[2]
type1 <- args[3]
type2 <- args[4]
analyseIC(file1, file2, type1, type2, TRUE)
}
|
library(plyr)
library(DataCombine)
library(xts)
library(taRifx)
source("EMD_Prophet_v4_fn_utility.R")
##########################
# Performance Indicators #
##########################
perf_indicators <- function(oos_df, oos_xts, oos_idx)
{
oos_df <- plyr::rename(oos_df, c("xts" = "SPX"))
p_res <- DropNA(oos_df[c("SPX", "pos_res")], Var = "pos_res", message = FALSE)
d_res <- as.Date(rownames(p_res))
p_res_s <- shift.data.frame(p_res, n=-1,wrap=FALSE,pad=TRUE)
pnl_res <- p_res_s["pos_res"]*(p_res["SPX"]-p_res_s["SPX"])
pnl_res[is.na(pnl_res)] <- 0.0
AUM_res <- sum(abs(p_res["pos_res"])*p_res["SPX"])/length(p_res[,"SPX"])
xts_res <- xts(x = pnl_res, order.by=d_res, tzone="America/New York") #indices 0 and 1
temp_res <- merge(oos_xts, xts_res, join='left', fill=0.0)
oos_df[ ,"pnl_res"] <- temp_res[ , "pos_res"]
oos_df[ ,"ret"] <- oos_df[ ,"pnl_res"]/AUM_res
cumperf <- cumprod(1+oos_df[ ,"ret"])
deltat <- as.numeric(oos_idx[length(oos_idx)]-oos_idx[1], units="days")/365.25
freq <- length(oos_df[ ,"ret"]) / deltat
aSR <- mean(oos_df[ ,"ret"]) / sd(oos_df[ ,"ret"]) * sqrt(freq)
aRoR <- as.numeric((cumperf[length(cumperf)] - 1.0) / deltat * 100.0)
freq_days <- deltat * 365.25 / length(d_res)
retspx <- oos_df[ ,"SPX"]/lag(oos_df[ ,"SPX"], 1)-1
retspx[is.na(retspx)] <- 0.0
cumspx <- cumprod(1+retspx)
tCost <- NA
tRate <- NA
perf_ind <- list("aSR"=aSR, "aRoR"=aRoR, "freq_days"=freq_days, "tCost"=tCost, "tRate"=tRate,
"cumperf"=cumperf, "cumspx"=cumspx)
return(perf_ind)
} | /Performance_Indicators.R | no_license | rstreppa/strategies-PerfIndicators | R | false | false | 1,652 | r | library(plyr)
library(DataCombine)
library(xts)
library(taRifx)
source("EMD_Prophet_v4_fn_utility.R")
##########################
# Performance Indicators #
##########################
perf_indicators <- function(oos_df, oos_xts, oos_idx)
{
oos_df <- plyr::rename(oos_df, c("xts" = "SPX"))
p_res <- DropNA(oos_df[c("SPX", "pos_res")], Var = "pos_res", message = FALSE)
d_res <- as.Date(rownames(p_res))
p_res_s <- shift.data.frame(p_res, n=-1,wrap=FALSE,pad=TRUE)
pnl_res <- p_res_s["pos_res"]*(p_res["SPX"]-p_res_s["SPX"])
pnl_res[is.na(pnl_res)] <- 0.0
AUM_res <- sum(abs(p_res["pos_res"])*p_res["SPX"])/length(p_res[,"SPX"])
xts_res <- xts(x = pnl_res, order.by=d_res, tzone="America/New York") #indices 0 and 1
temp_res <- merge(oos_xts, xts_res, join='left', fill=0.0)
oos_df[ ,"pnl_res"] <- temp_res[ , "pos_res"]
oos_df[ ,"ret"] <- oos_df[ ,"pnl_res"]/AUM_res
cumperf <- cumprod(1+oos_df[ ,"ret"])
deltat <- as.numeric(oos_idx[length(oos_idx)]-oos_idx[1], units="days")/365.25
freq <- length(oos_df[ ,"ret"]) / deltat
aSR <- mean(oos_df[ ,"ret"]) / sd(oos_df[ ,"ret"]) * sqrt(freq)
aRoR <- as.numeric((cumperf[length(cumperf)] - 1.0) / deltat * 100.0)
freq_days <- deltat * 365.25 / length(d_res)
retspx <- oos_df[ ,"SPX"]/lag(oos_df[ ,"SPX"], 1)-1
retspx[is.na(retspx)] <- 0.0
cumspx <- cumprod(1+retspx)
tCost <- NA
tRate <- NA
perf_ind <- list("aSR"=aSR, "aRoR"=aRoR, "freq_days"=freq_days, "tCost"=tCost, "tRate"=tRate,
"cumperf"=cumperf, "cumspx"=cumspx)
return(perf_ind)
} |
#########################################
## The Perks of Being a Lawmaker ##
## Kevin Fahey - Dissertation May 2017 ##
## Prepared 2018-04-07 ##
#########################################
######################
## Clear Everything ##
######################
rm(list=ls())
###########################
## Set Working Directory ##
###########################
# Example: setwd("/File/Subfile")
######################
## Load in Packages ##
######################
library(foreign)
library(sandwich)
library(xtable)
library(lme4)
library(effects)
library(Matching)
library(rgenoud)
library(car)
library(cem)
library(arm)
library(lattice)
library(plm)
library(stargazer)
library(aod)
library(ggplot2)
library(compactr)
library(MASS)
library(stats)
library(dplyr)
library(ecm)
options(scipen=7)
options(digits=3)
#########################
## ClusterMod function ##
#########################
clusterMod<-function(model, cluster)
{
require(multiwayvcov)
require(lmtest)
vcovCL<-cluster.vcov(model, cluster)
coef<-coeftest(model, vcovCL)
#w<-waldtest(model, vcov = vcovCL, test = "F")
get_confint<-function(model, vcovCL){
t<-qt(.975, model$df.residual)
ct<-coeftest(model, vcovCL)
cse<-sqrt(diag(vcovCL))
est<-cbind(ct[,1], cse,ct[,1]-t*ct[,2], ct[,1]+t*ct[,2])
colnames(est)<-c("Estimate", "Clustered SE","LowerCI","UpperCI")
return(est)
}
ci<-round(get_confint(model, vcovCL),4)
return(list(coef, ci))
}
#####################
## Read in Dataset ##
#####################
dat <- read.dta("~/2017-1-22 perks of being a lawmaker.dta")
################
## OLS Models ##
################
## Main Model, no fixed effects ##
ols.main<-lm(loginc ~ votemarg + leadership +
majpty + chair + rules + fintax +
approp + agriculture + education +
health + judiciary + highpost + lowpost +
age + tenure + postgrad + legal + bizman +
female + white + gdp01, data = dat)
summary(ols.main)
se.cluster.main<-as.matrix(clusterMod(ols.main, dat$year)[[1]])[,2]
se.robust.main<-sqrt(diag(vcovHC(ols.main)))
## Main model, fixed effects ##
ols.fe.main<-lm(loginc ~ votemarg + leadership +
majpty + chair + rules + fintax +
approp + agriculture + education +
health + judiciary +
as.factor(year) +
as.factor(memberid), data=dat)
summary(ols.fe.main)
se.cluster.fe.main<-as.matrix(clusterMod(ols.fe.main, dat$year)[[1]])[,2]
se.robust.fe.main<-sqrt(diag(vcovHC(ols.fe.main)))
## Electoral Safety Only model, no fixed effects ##
ols.ev<-lm(loginc ~ votemarg + agriculture + education +
health + judiciary + highpost + lowpost +
age + tenure + postgrad + legal +
bizman + bizman +female + white +
gdp01, data = dat)
se.cluster.ev<-as.matrix(clusterMod(ols.ev, dat$year)[[1]])[,2]
se.robust.ev<-sqrt(diag(vcovHC(ols.ev)))
## Electoral Safety Only model, fixed effects ##
ols.ev.fe<-lm(loginc ~ votemarg + agriculture + education +
health + judiciary + as.factor(year) +
as.factor(memberid), data = dat)
se.cluster.ev.fe<-as.matrix(clusterMod(ols.ev.fe, dat$year)[[1]])[,2]
se.robust.ev.fe<-sqrt(diag(vcovHC(ols.ev.fe)))
##Access hypothesis only model, no fixed effects ##
ols.ac<-lm(loginc ~ leadership + majpty + chair +
rules + fintax + approp + agriculture +
education + health + judiciary +
highpost + lowpost +
age + tenure + postgrad + legal +
bizman + bizman +female + white +
gdp01, data = dat)
se.cluster.ac<-as.matrix(clusterMod(ols.ac, dat$year)[[1]])[,2]
se.robust.ac<-sqrt(diag(vcovHC(ols.ac)))
##Access hypothesis only model, fixed effects ##
ols.ac.fe<-lm(loginc ~ leadership + majpty + chair +
rules + fintax + approp + agriculture +
education + health+ judiciary +
as.factor(year) + as.factor(memberid),
data = dat)
se.cluster.ac.fe<-as.matrix(clusterMod(ols.ac.fe, dat$year)[[1]])[,2]
se.robust.ac.fe<-sqrt(diag(vcovHC(ols.ac.fe)))
## Combine in stargazer, regular standard errors ##
stargazer(ols.main, ols.fe.main, ols.ev,
ols.ev.fe, ols.ac, ols.ac.fe,
se = list(se.cluster.main,
se.cluster.fe.main,
se.cluster.ev,
se.cluster.ev.fe,
se.cluster.ac,
se.cluster.ac.fe),
no.space=T,
keep.stat = c("n", "adj.rsq", "f"),
omit = c("memberid", "year"),
dep.var.labels = "Income (2001 $USD)",
covariate.labels = c("Vote Share",
"Party Leaders",
"Majority Party",
"Committee Chairs",
"Rules Committee",
"Finance & Tax Committee",
"Appropriations Committee",
"Agriculture Committee",
"Education Committee",
"Health Committee",
"Judiciary Committee",
"Ran For Higher Office",
"Ran For Lower Office",
"Age",
"Tenure",
"Post-Graduate Degree",
"Legal Career",
"Business Career",
"Female",
"White",
"GDP (2001 $USD)",
"Intercept"))
#################################
## Error Correction Model ##
## See Stata Do File ##
#################################
###########################################################
## Simple propensity score match for Election Increases ##
###########################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc
Tr = ifelse(dat$electdif>=0, 1, 0)
X = cbind(dat[,c("ruleslag", "approplag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year")])
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ ruleslag + approplag + fintaxlag + agrilag + judiclag + edulag + healthlag + leaderlag + agelag + tenurelag + majpty + postgrad + female + white + black + hispanic + year, family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("agelag", "year")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.ev<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match1<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.ev[i,1]<-match1$est[1] # estimate for each caliper #
match.ev[i,2]<-match1$se # standard error for each caliper
match.ev[i,3]<-match1$est[1]/match1$se # z-score for each caliper #
match.ev[i,4]<-match1$caliper[1] # caliper length #
match.ev[i,5]<-match1$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match1<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper= 0.5)
summary(match1)
bal.vote<-as.data.frame(rbind(t[match1$index.treated,], t[match1$index.control,]))
mvote <- glm(Tr ~ ruleslag + approplag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.vote, family=binomial(link="logit"))
voter<-summary(mvote)
se.voter<-sqrt(diag(vcovHC(mvote)))
## t-tests ##
t.test.vote<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.vote[i,3]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$statistic
t.test.vote[i,1]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[1]
t.test.vote[i,2]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.vote[,3], pch=16, main="Normal Q-Q Plot, Vote Margin Difference (Any Increase)", xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
## iterate through each covariate to test pre- and post- balance ##
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match1, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
## print balance statistics ##
xtable(mb1, digits = 3)
####################################################################################
## Simple propensity score match for Election Increases, 5% increase versus less ###
####################################################################################
Y = dat$difinc
Tr = ifelse(dat$electdif >= 0.05, 1, 0)
X = cbind(dat[,c("ruleslag", "approplag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year")])
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ ruleslag + approplag + fintaxlag + agrilag + judiclag + edulag + healthlag + leaderlag + agelag + tenurelag + majpty + postgrad + female + white + black + hispanic + year, family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("judiclag", "agelag", "tenurelag", "black", "hispanic")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.ev<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match2<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.ev[i,1]<-match2$est[1] # estimate for each caliper #
match.ev[i,2]<-match2$se # standard error for each caliper
match.ev[i,3]<-match2$est[1]/match2$se # z-score for each caliper #
match.ev[i,4]<-match2$caliper[1] # caliper length #
match.ev[i,5]<-match2$wnobs # treated observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match2<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match2)
## check for balance ##
bal.vote<-as.data.frame(rbind(t[match2$index.treated,], t[match2$index.control,]))
mvote<-glm(Tr ~ ruleslag + approplag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.vote, family=binomial(link="logit"))
voter<-summary(mvote)
se.voter<-sqrt(diag(vcovHC(mvote)))
## t-tests ##
t.test.vote<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.vote[i,3]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$statistic
t.test.vote[i,1]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[1]
t.test.vote[i,2]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.vote[,3], pch=16, main="Normal Q-Q Plot, Vote Margin Difference (5%)", xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
## iterate through each covariate to test pre- and post- balance ##
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match2, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
#####################################################################################
## Simple propensity score match for Election Increases, 10% increase versus less ###
#####################################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc
Tr = ifelse(dat$electdif>=.10, 1, 0)
X = cbind(dat[,c("ruleslag", "approplag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year")])
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ ruleslag + approplag + fintaxlag + agrilag + judiclag + edulag + healthlag + leaderlag + agelag + tenurelag + majpty + postgrad + female + white + black + hispanic + year, family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("majpty")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.ev.10<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match3<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.ev.10[i,1]<-match3$est[1] # estimate for each caliper #
match.ev.10[i,2]<-match3$se # standard error for each caliper
match.ev.10[i,3]<-match3$est[1]/match3$se # z-score for each caliper #
match.ev.10[i,4]<-match3$caliper[1] # caliper length #
match.ev.10[i,5]<-match3$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match3<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match3)
## check for balance ##
bal.vote<-as.data.frame(rbind(t[match3$index.treated,], t[match3$index.control,]))
mvote<-glm(Tr ~ ruleslag + approplag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.vote, family=binomial(link="logit"))
voter<-summary(mvote)
se.voter<-sqrt(diag(vcovHC(mvote)))
t.test.vote<-matrix(NA, nrow=17, ncol=3)
## t-tests ##
for(i in 1:17){
t.test.vote[i,3]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$statistic
t.test.vote[i,1]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[1]
t.test.vote[i,2]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.vote[,3], pch=16, main="Normal Q-Q Plot, Vote Margin Difference (5%)", xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
## iterate through each covariate to test pre- and post- balance ##
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match3, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
#######################################################
## Simple propensity score match for Rules committee ##
#######################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$rulesdif>=0]
Tr = dat$rulesdif[dat$rulesdif>=0]
X = cbind(dat[,c("votelag", "approplag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "rulesdif")])
X<-X[X$rulesdif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + approplag + fintaxlag + agrilag + judiclag + edulag + healthlag + leaderlag + agelag + tenurelag + majpty + postgrad + female + white + black + hispanic + year, family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("votelag", "agelag")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.rules<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match4<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.rules[i,1]<-match4$est[1] # estimate for each caliper #
match.rules[i,2]<-match4$se # standard error for each caliper
match.rules[i,3]<-match4$est[1]/match4$se # z-score for each caliper #
match.rules[i,4]<-match4$caliper[1] # caliper length #
match.rules[i,5]<-match4$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match4<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match4)
bal.rules<-as.data.frame(rbind(t[match4$index.treated,], t[match4$index.control,]))
mrules<-glm(Tr ~ votelag + approplag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.rules, family=binomial(link="logit"))
rulesc<-summary(mrules)
se.rules<-sqrt(diag(vcovHC(mrules)))
## t-tests ##
t.test.rules<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.rules[i,3]<-t.test(bal.rules[i+2][bal.rules$Tr==1,], bal.rules[i+2][bal.rules$Tr==0,])$statistic
t.test.rules[i,1]<-t.test(bal.rules[i+2][bal.rules$Tr==1,], bal.rules[i+2][bal.rules$Tr==0,])$estimate[1]
t.test.rules[i,2]<-t.test(bal.rules[i+2][bal.rules$Tr==1,], bal.rules[i+2][bal.rules$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.rules[,3], pch=16, main="Normal Q-Q Plot, Rules Committee", xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match4, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
################################################################
## Simple propensity score match for Appropriations committee ##
################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$apropdif>=0]
Tr = dat$apropdif[dat$apropdif>=0]
X = cbind(dat[,c("votelag", "ruleslag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "apropdif")])
X<-X[X$apropdif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + ruleslag +
fintaxlag + agrilag + judiclag +
edulag + healthlag + leaderlag +
agelag + tenurelag + majpty +
postgrad + female + white +
black + hispanic + year,
family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("fintaxlag")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.approp<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match5<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.approp[i,1]<-match5$est[1] # estimate for each caliper #
match.approp[i,2]<-match5$se # standard error for each caliper
match.approp[i,3]<-match5$est[1]/match5$se # z-score for each caliper #
match.approp[i,4]<-match5$caliper[1] # caliper length #
match.approp[i,5]<-match5$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match5<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match5)
bal.approp<-as.data.frame(rbind(t[match5$index.treated,], t[match5$index.control,]))
mapprop<-glm(Tr ~ votelag + ruleslag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.approp, family=binomial(link="logit"))
appropc<-summary(mapprop)
se.approp<-sqrt(diag(vcovHC(mapprop)))
## t-tests ##
t.test.approp<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.approp[i,3]<-t.test(bal.approp[i+2][bal.approp$Tr==1,],
bal.approp[i+2][bal.approp$Tr==0,])$statistic
t.test.approp[i,1]<-t.test(bal.approp[i+2][bal.approp$Tr==1,],
bal.approp[i+2][bal.approp$Tr==0,])$estimate[1]
t.test.approp[i,2]<-t.test(bal.approp[i+2][bal.approp$Tr==1,],
bal.approp[i+2][bal.approp$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.approp[,3], pch=16, main="Normal Q-Q Plot, Appropriations Committee",
xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match5, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
################################################################
## Simple propensity score match for Finance & Tax committee ##
################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$fintaxdif>=0]
Tr = dat$fintaxdif[dat$fintaxdif>=0]
X = cbind(dat[,c("votelag", "ruleslag", "approplag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "fintaxdif")])
X<-X[X$fintaxdif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + ruleslag +
approplag + agrilag + judiclag +
edulag + healthlag + leaderlag +
agelag + tenurelag + majpty +
postgrad + female + white +
black + hispanic + year,
family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("tenurelag")]) # don't really need to do but can always optimize balance #
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.fintax<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match6<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.fintax[i,1]<-match6$est[1] # estimate for each caliper #
match.fintax[i,2]<-match6$se # standard error for each caliper
match.fintax[i,3]<-match6$est[1]/match6$se # z-score for each caliper #
match.fintax[i,4]<-match6$caliper[1] # caliper length #
match.fintax[i,5]<-match6$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match6<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match6)
bal.fintax<-as.data.frame(rbind(t[match6$index.treated,], t[match6$index.control,]))
mfintax<-glm(Tr ~ votelag + ruleslag + approplag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.fintax, family=binomial(link="logit"))
fintaxc <- summary(mfintax)
se.fintax<-sqrt(diag(vcovHC(mfintax)))
## t-tests ##
t.test.fintax<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.fintax[i,3]<-t.test(bal.fintax[i+2][bal.fintax$Tr==1,],
bal.fintax[i+2][bal.fintax$Tr==0,])$statistic
t.test.fintax[i,1]<-t.test(bal.fintax[i+2][bal.fintax$Tr==1,],
bal.fintax[i+2][bal.fintax$Tr==0,])$estimate[1]
t.test.fintax[i,2]<-t.test(bal.fintax[i+2][bal.fintax$Tr==1,],
bal.fintax[i+2][bal.fintax$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.fintax[,3], pch=16, main="Normal Q-Q Plot, Finance and Tax Committee",
xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match6, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
################################################################
## Simple propensity score match for Leadership Position ##
################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$leaddif>=0]
Tr = dat$leaddif[dat$leaddif>=0]
X = cbind(dat[,c("votelag", "ruleslag", "approplag", "agrilag", "judiclag", "edulag", "healthlag", "fintaxlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "leaddif")])
X<-X[X$leaddif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + ruleslag +
fintaxlag + agrilag + judiclag +
edulag + healthlag + approplag +
agelag + tenurelag + majpty +
postgrad + female + white +
black + hispanic + year,
family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("ruleslag")])
#####################################################
## For Leaders, to 2 caliper and check for balance ##
#####################################################
match7<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=2)
summary(match7)
bal.lead<-as.data.frame(rbind(t[match7$index.treated,], t[match7$index.control,]))
mlead <- glm(Tr ~ votelag + ruleslag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
approplag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.lead, family=binomial(link="logit"))
leadc<-summary(mlead)
se.lead<-sqrt(diag(vcovHC(mlead)))
## t-tests ##
t.test.lead<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.lead[i,3]<-t.test(bal.lead[i+2][bal.lead$Tr==1,],
bal.lead[i+2][bal.lead$Tr==0,])$statistic
t.test.lead[i,1]<-t.test(bal.lead[i+2][bal.lead$Tr==1,],
bal.lead[i+2][bal.lead$Tr==0,])$estimate[1]
t.test.lead[i,2]<-t.test(bal.lead[i+2][bal.lead$Tr==1,],
bal.lead[i+2][bal.lead$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.lead[,3], pch=16, main="Normal Q-Q Plot, Leadership Position",
xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match7, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
################################################################
## Simple propensity score match for Committee Chair ##
################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$chairdif>=0]
Tr = dat$chairdif[dat$chairdif>=0]
X = cbind(dat[,c("votelag", "ruleslag", "approplag", "agrilag", "judiclag", "edulag", "healthlag", "fintaxlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "chairdif")])
X<-X[X$chairdif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + ruleslag +
fintaxlag + agrilag + judiclag +
edulag + healthlag + approplag +
agelag + tenurelag + majpty +
postgrad + female + white +
black + hispanic + year,
family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("votelag", "ruleslag", "agelag", "hispanic")])
######################################################
## For Cmte Chairs, 2 caliper and check for balance ##
######################################################
match8<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=2)
summary(match8)
bal.chair<-as.data.frame(rbind(t[match8$index.treated,], t[match8$index.control,]))
mchair <- glm(Tr ~ votelag + ruleslag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
approplag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.chair, family=binomial(link="logit"))
chairc<-summary(mchair)
se.chair<-sqrt(diag(vcovHC(mchair)))
## t-tests ##
t.test.chair<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.chair[i,3]<-t.test(bal.chair[i+2][bal.chair$Tr==1,],
bal.chair[i+2][bal.chair$Tr==0,])$statistic
t.test.chair[i,1]<-t.test(bal.chair[i+2][bal.chair$Tr==1,],
bal.chair[i+2][bal.chair$Tr==0,])$estimate[1]
t.test.chair[i,2]<-t.test(bal.chair[i+2][bal.chair$Tr==1,],
bal.chair[i+2][bal.chair$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.lead[,3], pch=16, main="Normal Q-Q Plot, Committee Chairs",
xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match8, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
##############################################
## Difference-in-Differences Matching Table ##
##############################################
est <- c(match1$est, match2$est, match3$est,
match7$est, match8$est,
match4$est, match6$est, match5$est)
se <- c(match1$se, match2$se, match3$se,
match7$se, match8$se,
match4$se, match6$se, match5$se)
n <- c(sum(length(match1$index.treated), length(match1$index.control)),
sum(length(match2$index.treated), length(match2$index.control)),
sum(length(match3$index.treated), length(match3$index.control)),
sum(length(match7$index.treated), length(match7$index.control)),
sum(length(match8$index.treated), length(match8$index.control)),
sum(length(match4$index.treated), length(match4$index.control)),
sum(length(match6$index.treated), length(match6$index.control)),
sum(length(match5$index.treated), length(match5$index.control))
)
table.match <- cbind(est, se, n)
rownames(table.match) <- c("Vote Share", "Vote Share 0.05",
"Vote Share 0.10",
"Leadership", "Cmte Chair", "Rules Cmte",
"Finance & Tax Cmte", "Appropriations Cmte")
#########################
## Rules Committee t+1 ##
#########################
rules.dat <- dat[dat$ruleslag>= 1,]
##################################
## Re-Do OLS Main Model, And FE ##
##################################
## Main Model, no fixed effects ##
ols.main.rules <- lm(loginc ~ votemarg + leadership +
majpty + chair + rules + fintax +
approp + agriculture + education +
health + judiciary + highpost + lowpost +
age + tenure + postgrad + legal + bizman +
female + white + gdp01, data = rules.dat)
summary(ols.main.rules)
se.cluster.main.rules <- as.matrix(clusterMod(ols.main.rules, rules.dat$year)[[1]])[,2]
se.robust.main.rules <- sqrt(diag(vcovHC(ols.main.rules)))
## Main model, fixed effects ##
ols.fe.main.rules <- lm(loginc ~ votemarg + leadership +
majpty + chair + rules + fintax +
approp + agriculture + education +
health + judiciary +
as.factor(year) +
as.factor(memberid), data=rules.dat)
summary(ols.fe.main.rules)
se.cluster.fe.main.rules <- as.matrix(clusterMod(ols.fe.main.rules, rules.dat$year)[[1]])[,2]
se.robust.fe.main.rules <- sqrt(diag(vcovHC(ols.fe.main.rules)))
################################
## ECM Model in Stata do-file ##
## Stargaze OLS Models here ##
################################
stargazer(ols.main.rules, ols.fe.main.rules,
se = list(se.cluster.main.rules,
se.cluster.fe.main.rules),
no.space=T,
keep.stat = c("n", "adj.rsq", "f"),
omit = c("memberid", "year"),
dep.var.labels = "Income (2001 $USD)",
covariate.labels = c("Vote Share",
"Party Leaders",
"Majority Party",
"Committee Chairs",
"Rules Committee",
"Finance & Tax Committee",
"Appropriations Committee",
"Agriculture Committee",
"Education Committee",
"Health Committee",
"Judiciary Committee",
"Ran For Higher Office",
"Ran For Lower Office",
"Age",
"Tenure",
"Post-Graduate Degree",
"Legal Career",
"Business Career",
"Female",
"White",
"GDP (2001 $USD)",
"Intercept"))
| /Perks of Being a Lawmaker R Script Master.R | no_license | KevinFahey65/Perks-of-Being-a-Lawmaker | R | false | false | 42,419 | r | #########################################
## The Perks of Being a Lawmaker ##
## Kevin Fahey - Dissertation May 2017 ##
## Prepared 2018-04-07 ##
#########################################
######################
## Clear Everything ##
######################
rm(list=ls())
###########################
## Set Working Directory ##
###########################
# Example: setwd("/File/Subfile")
######################
## Load in Packages ##
######################
library(foreign)
library(sandwich)
library(xtable)
library(lme4)
library(effects)
library(Matching)
library(rgenoud)
library(car)
library(cem)
library(arm)
library(lattice)
library(plm)
library(stargazer)
library(aod)
library(ggplot2)
library(compactr)
library(MASS)
library(stats)
library(dplyr)
library(ecm)
options(scipen=7)
options(digits=3)
#########################
## ClusterMod function ##
#########################
clusterMod<-function(model, cluster)
{
require(multiwayvcov)
require(lmtest)
vcovCL<-cluster.vcov(model, cluster)
coef<-coeftest(model, vcovCL)
#w<-waldtest(model, vcov = vcovCL, test = "F")
get_confint<-function(model, vcovCL){
t<-qt(.975, model$df.residual)
ct<-coeftest(model, vcovCL)
cse<-sqrt(diag(vcovCL))
est<-cbind(ct[,1], cse,ct[,1]-t*ct[,2], ct[,1]+t*ct[,2])
colnames(est)<-c("Estimate", "Clustered SE","LowerCI","UpperCI")
return(est)
}
ci<-round(get_confint(model, vcovCL),4)
return(list(coef, ci))
}
#####################
## Read in Dataset ##
#####################
dat <- read.dta("~/2017-1-22 perks of being a lawmaker.dta")
################
## OLS Models ##
################
## Main Model, no fixed effects ##
ols.main<-lm(loginc ~ votemarg + leadership +
majpty + chair + rules + fintax +
approp + agriculture + education +
health + judiciary + highpost + lowpost +
age + tenure + postgrad + legal + bizman +
female + white + gdp01, data = dat)
summary(ols.main)
se.cluster.main<-as.matrix(clusterMod(ols.main, dat$year)[[1]])[,2]
se.robust.main<-sqrt(diag(vcovHC(ols.main)))
## Main model, fixed effects ##
ols.fe.main<-lm(loginc ~ votemarg + leadership +
majpty + chair + rules + fintax +
approp + agriculture + education +
health + judiciary +
as.factor(year) +
as.factor(memberid), data=dat)
summary(ols.fe.main)
se.cluster.fe.main<-as.matrix(clusterMod(ols.fe.main, dat$year)[[1]])[,2]
se.robust.fe.main<-sqrt(diag(vcovHC(ols.fe.main)))
## Electoral Safety Only model, no fixed effects ##
ols.ev<-lm(loginc ~ votemarg + agriculture + education +
health + judiciary + highpost + lowpost +
age + tenure + postgrad + legal +
bizman + bizman +female + white +
gdp01, data = dat)
se.cluster.ev<-as.matrix(clusterMod(ols.ev, dat$year)[[1]])[,2]
se.robust.ev<-sqrt(diag(vcovHC(ols.ev)))
## Electoral Safety Only model, fixed effects ##
ols.ev.fe<-lm(loginc ~ votemarg + agriculture + education +
health + judiciary + as.factor(year) +
as.factor(memberid), data = dat)
se.cluster.ev.fe<-as.matrix(clusterMod(ols.ev.fe, dat$year)[[1]])[,2]
se.robust.ev.fe<-sqrt(diag(vcovHC(ols.ev.fe)))
##Access hypothesis only model, no fixed effects ##
ols.ac<-lm(loginc ~ leadership + majpty + chair +
rules + fintax + approp + agriculture +
education + health + judiciary +
highpost + lowpost +
age + tenure + postgrad + legal +
bizman + bizman +female + white +
gdp01, data = dat)
se.cluster.ac<-as.matrix(clusterMod(ols.ac, dat$year)[[1]])[,2]
se.robust.ac<-sqrt(diag(vcovHC(ols.ac)))
##Access hypothesis only model, fixed effects ##
ols.ac.fe<-lm(loginc ~ leadership + majpty + chair +
rules + fintax + approp + agriculture +
education + health+ judiciary +
as.factor(year) + as.factor(memberid),
data = dat)
se.cluster.ac.fe<-as.matrix(clusterMod(ols.ac.fe, dat$year)[[1]])[,2]
se.robust.ac.fe<-sqrt(diag(vcovHC(ols.ac.fe)))
## Combine in stargazer, regular standard errors ##
stargazer(ols.main, ols.fe.main, ols.ev,
ols.ev.fe, ols.ac, ols.ac.fe,
se = list(se.cluster.main,
se.cluster.fe.main,
se.cluster.ev,
se.cluster.ev.fe,
se.cluster.ac,
se.cluster.ac.fe),
no.space=T,
keep.stat = c("n", "adj.rsq", "f"),
omit = c("memberid", "year"),
dep.var.labels = "Income (2001 $USD)",
covariate.labels = c("Vote Share",
"Party Leaders",
"Majority Party",
"Committee Chairs",
"Rules Committee",
"Finance & Tax Committee",
"Appropriations Committee",
"Agriculture Committee",
"Education Committee",
"Health Committee",
"Judiciary Committee",
"Ran For Higher Office",
"Ran For Lower Office",
"Age",
"Tenure",
"Post-Graduate Degree",
"Legal Career",
"Business Career",
"Female",
"White",
"GDP (2001 $USD)",
"Intercept"))
#################################
## Error Correction Model ##
## See Stata Do File ##
#################################
###########################################################
## Simple propensity score match for Election Increases ##
###########################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc
Tr = ifelse(dat$electdif>=0, 1, 0)
X = cbind(dat[,c("ruleslag", "approplag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year")])
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ ruleslag + approplag + fintaxlag + agrilag + judiclag + edulag + healthlag + leaderlag + agelag + tenurelag + majpty + postgrad + female + white + black + hispanic + year, family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("agelag", "year")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.ev<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match1<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.ev[i,1]<-match1$est[1] # estimate for each caliper #
match.ev[i,2]<-match1$se # standard error for each caliper
match.ev[i,3]<-match1$est[1]/match1$se # z-score for each caliper #
match.ev[i,4]<-match1$caliper[1] # caliper length #
match.ev[i,5]<-match1$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match1<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper= 0.5)
summary(match1)
bal.vote<-as.data.frame(rbind(t[match1$index.treated,], t[match1$index.control,]))
mvote <- glm(Tr ~ ruleslag + approplag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.vote, family=binomial(link="logit"))
voter<-summary(mvote)
se.voter<-sqrt(diag(vcovHC(mvote)))
## t-tests ##
t.test.vote<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.vote[i,3]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$statistic
t.test.vote[i,1]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[1]
t.test.vote[i,2]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.vote[,3], pch=16, main="Normal Q-Q Plot, Vote Margin Difference (Any Increase)", xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
## iterate through each covariate to test pre- and post- balance ##
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match1, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
## print balance statistics ##
xtable(mb1, digits = 3)
####################################################################################
## Simple propensity score match for Election Increases, 5% increase versus less ###
####################################################################################
Y = dat$difinc
Tr = ifelse(dat$electdif >= 0.05, 1, 0)
X = cbind(dat[,c("ruleslag", "approplag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year")])
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ ruleslag + approplag + fintaxlag + agrilag + judiclag + edulag + healthlag + leaderlag + agelag + tenurelag + majpty + postgrad + female + white + black + hispanic + year, family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("judiclag", "agelag", "tenurelag", "black", "hispanic")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.ev<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match2<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.ev[i,1]<-match2$est[1] # estimate for each caliper #
match.ev[i,2]<-match2$se # standard error for each caliper
match.ev[i,3]<-match2$est[1]/match2$se # z-score for each caliper #
match.ev[i,4]<-match2$caliper[1] # caliper length #
match.ev[i,5]<-match2$wnobs # treated observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match2<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match2)
## check for balance ##
bal.vote<-as.data.frame(rbind(t[match2$index.treated,], t[match2$index.control,]))
mvote<-glm(Tr ~ ruleslag + approplag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.vote, family=binomial(link="logit"))
voter<-summary(mvote)
se.voter<-sqrt(diag(vcovHC(mvote)))
## t-tests ##
t.test.vote<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.vote[i,3]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$statistic
t.test.vote[i,1]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[1]
t.test.vote[i,2]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.vote[,3], pch=16, main="Normal Q-Q Plot, Vote Margin Difference (5%)", xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
## iterate through each covariate to test pre- and post- balance ##
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match2, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
#####################################################################################
## Simple propensity score match for Election Increases, 10% increase versus less ###
#####################################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc
Tr = ifelse(dat$electdif>=.10, 1, 0)
X = cbind(dat[,c("ruleslag", "approplag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year")])
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ ruleslag + approplag + fintaxlag + agrilag + judiclag + edulag + healthlag + leaderlag + agelag + tenurelag + majpty + postgrad + female + white + black + hispanic + year, family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("majpty")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.ev.10<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match3<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.ev.10[i,1]<-match3$est[1] # estimate for each caliper #
match.ev.10[i,2]<-match3$se # standard error for each caliper
match.ev.10[i,3]<-match3$est[1]/match3$se # z-score for each caliper #
match.ev.10[i,4]<-match3$caliper[1] # caliper length #
match.ev.10[i,5]<-match3$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match3<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match3)
## check for balance ##
bal.vote<-as.data.frame(rbind(t[match3$index.treated,], t[match3$index.control,]))
mvote<-glm(Tr ~ ruleslag + approplag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.vote, family=binomial(link="logit"))
voter<-summary(mvote)
se.voter<-sqrt(diag(vcovHC(mvote)))
t.test.vote<-matrix(NA, nrow=17, ncol=3)
## t-tests ##
for(i in 1:17){
t.test.vote[i,3]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$statistic
t.test.vote[i,1]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[1]
t.test.vote[i,2]<-t.test(bal.vote[i+2][bal.vote$Tr==1,], bal.vote[i+2][bal.vote$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.vote[,3], pch=16, main="Normal Q-Q Plot, Vote Margin Difference (5%)", xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
## iterate through each covariate to test pre- and post- balance ##
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match3, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
#######################################################
## Simple propensity score match for Rules committee ##
#######################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$rulesdif>=0]
Tr = dat$rulesdif[dat$rulesdif>=0]
X = cbind(dat[,c("votelag", "approplag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "rulesdif")])
X<-X[X$rulesdif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + approplag + fintaxlag + agrilag + judiclag + edulag + healthlag + leaderlag + agelag + tenurelag + majpty + postgrad + female + white + black + hispanic + year, family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("votelag", "agelag")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.rules<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match4<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.rules[i,1]<-match4$est[1] # estimate for each caliper #
match.rules[i,2]<-match4$se # standard error for each caliper
match.rules[i,3]<-match4$est[1]/match4$se # z-score for each caliper #
match.rules[i,4]<-match4$caliper[1] # caliper length #
match.rules[i,5]<-match4$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match4<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match4)
bal.rules<-as.data.frame(rbind(t[match4$index.treated,], t[match4$index.control,]))
mrules<-glm(Tr ~ votelag + approplag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.rules, family=binomial(link="logit"))
rulesc<-summary(mrules)
se.rules<-sqrt(diag(vcovHC(mrules)))
## t-tests ##
t.test.rules<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.rules[i,3]<-t.test(bal.rules[i+2][bal.rules$Tr==1,], bal.rules[i+2][bal.rules$Tr==0,])$statistic
t.test.rules[i,1]<-t.test(bal.rules[i+2][bal.rules$Tr==1,], bal.rules[i+2][bal.rules$Tr==0,])$estimate[1]
t.test.rules[i,2]<-t.test(bal.rules[i+2][bal.rules$Tr==1,], bal.rules[i+2][bal.rules$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.rules[,3], pch=16, main="Normal Q-Q Plot, Rules Committee", xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match4, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
################################################################
## Simple propensity score match for Appropriations committee ##
################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$apropdif>=0]
Tr = dat$apropdif[dat$apropdif>=0]
X = cbind(dat[,c("votelag", "ruleslag", "fintaxlag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "apropdif")])
X<-X[X$apropdif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + ruleslag +
fintaxlag + agrilag + judiclag +
edulag + healthlag + leaderlag +
agelag + tenurelag + majpty +
postgrad + female + white +
black + hispanic + year,
family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("fintaxlag")])
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.approp<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match5<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.approp[i,1]<-match5$est[1] # estimate for each caliper #
match.approp[i,2]<-match5$se # standard error for each caliper
match.approp[i,3]<-match5$est[1]/match5$se # z-score for each caliper #
match.approp[i,4]<-match5$caliper[1] # caliper length #
match.approp[i,5]<-match5$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match5<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match5)
bal.approp<-as.data.frame(rbind(t[match5$index.treated,], t[match5$index.control,]))
mapprop<-glm(Tr ~ votelag + ruleslag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.approp, family=binomial(link="logit"))
appropc<-summary(mapprop)
se.approp<-sqrt(diag(vcovHC(mapprop)))
## t-tests ##
t.test.approp<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.approp[i,3]<-t.test(bal.approp[i+2][bal.approp$Tr==1,],
bal.approp[i+2][bal.approp$Tr==0,])$statistic
t.test.approp[i,1]<-t.test(bal.approp[i+2][bal.approp$Tr==1,],
bal.approp[i+2][bal.approp$Tr==0,])$estimate[1]
t.test.approp[i,2]<-t.test(bal.approp[i+2][bal.approp$Tr==1,],
bal.approp[i+2][bal.approp$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.approp[,3], pch=16, main="Normal Q-Q Plot, Appropriations Committee",
xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match5, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
################################################################
## Simple propensity score match for Finance & Tax committee ##
################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$fintaxdif>=0]
Tr = dat$fintaxdif[dat$fintaxdif>=0]
X = cbind(dat[,c("votelag", "ruleslag", "approplag", "agrilag", "judiclag", "edulag", "healthlag", "leaderlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "fintaxdif")])
X<-X[X$fintaxdif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + ruleslag +
approplag + agrilag + judiclag +
edulag + healthlag + leaderlag +
agelag + tenurelag + majpty +
postgrad + female + white +
black + hispanic + year,
family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("tenurelag")]) # don't really need to do but can always optimize balance #
#####################################################
## Create a loop to go through all caliper lengths ##
#####################################################
match.fintax<-matrix("NA", nrow = 20, ncol = 5)
for(i in 1:20){
match6<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=i/10)
match.fintax[i,1]<-match6$est[1] # estimate for each caliper #
match.fintax[i,2]<-match6$se # standard error for each caliper
match.fintax[i,3]<-match6$est[1]/match6$se # z-score for each caliper #
match.fintax[i,4]<-match6$caliper[1] # caliper length #
match.fintax[i,5]<-match6$wnobs # observations at each caliper #
}
#################################################
## return to 0.5 caliper and check for balance ##
#################################################
match6<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=0.5)
summary(match6)
bal.fintax<-as.data.frame(rbind(t[match6$index.treated,], t[match6$index.control,]))
mfintax<-glm(Tr ~ votelag + ruleslag + approplag +
agrilag + judiclag + edulag + healthlag +
leaderlag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.fintax, family=binomial(link="logit"))
fintaxc <- summary(mfintax)
se.fintax<-sqrt(diag(vcovHC(mfintax)))
## t-tests ##
t.test.fintax<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.fintax[i,3]<-t.test(bal.fintax[i+2][bal.fintax$Tr==1,],
bal.fintax[i+2][bal.fintax$Tr==0,])$statistic
t.test.fintax[i,1]<-t.test(bal.fintax[i+2][bal.fintax$Tr==1,],
bal.fintax[i+2][bal.fintax$Tr==0,])$estimate[1]
t.test.fintax[i,2]<-t.test(bal.fintax[i+2][bal.fintax$Tr==1,],
bal.fintax[i+2][bal.fintax$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.fintax[,3], pch=16, main="Normal Q-Q Plot, Finance and Tax Committee",
xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match6, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
################################################################
## Simple propensity score match for Leadership Position ##
################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$leaddif>=0]
Tr = dat$leaddif[dat$leaddif>=0]
X = cbind(dat[,c("votelag", "ruleslag", "approplag", "agrilag", "judiclag", "edulag", "healthlag", "fintaxlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "leaddif")])
X<-X[X$leaddif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + ruleslag +
fintaxlag + agrilag + judiclag +
edulag + healthlag + approplag +
agelag + tenurelag + majpty +
postgrad + female + white +
black + hispanic + year,
family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("ruleslag")])
#####################################################
## For Leaders, to 2 caliper and check for balance ##
#####################################################
match7<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=2)
summary(match7)
bal.lead<-as.data.frame(rbind(t[match7$index.treated,], t[match7$index.control,]))
mlead <- glm(Tr ~ votelag + ruleslag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
approplag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.lead, family=binomial(link="logit"))
leadc<-summary(mlead)
se.lead<-sqrt(diag(vcovHC(mlead)))
## t-tests ##
t.test.lead<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.lead[i,3]<-t.test(bal.lead[i+2][bal.lead$Tr==1,],
bal.lead[i+2][bal.lead$Tr==0,])$statistic
t.test.lead[i,1]<-t.test(bal.lead[i+2][bal.lead$Tr==1,],
bal.lead[i+2][bal.lead$Tr==0,])$estimate[1]
t.test.lead[i,2]<-t.test(bal.lead[i+2][bal.lead$Tr==1,],
bal.lead[i+2][bal.lead$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.lead[,3], pch=16, main="Normal Q-Q Plot, Leadership Position",
xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match7, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
################################################################
## Simple propensity score match for Committee Chair ##
################################################################
###############################
## Create Treatment Variable ##
## And Matching Dataset ##
###############################
Y = dat$difinc[dat$chairdif>=0]
Tr = dat$chairdif[dat$chairdif>=0]
X = cbind(dat[,c("votelag", "ruleslag", "approplag", "agrilag", "judiclag", "edulag", "healthlag", "fintaxlag", "agelag", "tenurelag", "majpty", "postgrad", "female", "white", "black", "hispanic", "year", "chairdif")])
X<-X[X$chairdif>=0,]
X<-X[,c(1:17)]
t<-cbind(Y, Tr, X)
t<-na.omit(t)
############################################
## Estimate Balance on Treatment Variable ##
## With Other Covariates as Predictors ##
############################################
prop.vote <- glm(Tr ~ votelag + ruleslag +
fintaxlag + agrilag + judiclag +
edulag + healthlag + approplag +
agelag + tenurelag + majpty +
postgrad + female + white +
black + hispanic + year,
family = binomial(link = "logit"), data = t)
summary(prop.vote)
########################
## Matching Algorithm ##
########################
Z = cbind(t[,c("votelag", "ruleslag", "agelag", "hispanic")])
######################################################
## For Cmte Chairs, 2 caliper and check for balance ##
######################################################
match8<-Match(Y=t$Y, Tr=t$Tr, X=t[,c(3:17)], Z=Z, BiasAdjust=T, estimand="ATT", M=1, replace=T, caliper=2)
summary(match8)
bal.chair<-as.data.frame(rbind(t[match8$index.treated,], t[match8$index.control,]))
mchair <- glm(Tr ~ votelag + ruleslag + fintaxlag +
agrilag + judiclag + edulag + healthlag +
approplag + agelag + tenurelag + majpty +
postgrad + female + white + black + hispanic +
year, data=bal.chair, family=binomial(link="logit"))
chairc<-summary(mchair)
se.chair<-sqrt(diag(vcovHC(mchair)))
## t-tests ##
t.test.chair<-matrix(NA, nrow=17, ncol=3)
for(i in 1:17){
t.test.chair[i,3]<-t.test(bal.chair[i+2][bal.chair$Tr==1,],
bal.chair[i+2][bal.chair$Tr==0,])$statistic
t.test.chair[i,1]<-t.test(bal.chair[i+2][bal.chair$Tr==1,],
bal.chair[i+2][bal.chair$Tr==0,])$estimate[1]
t.test.chair[i,2]<-t.test(bal.chair[i+2][bal.chair$Tr==1,],
bal.chair[i+2][bal.chair$Tr==0,])$estimate[2]
}
## qq-plots ##
qqnorm(t.test.lead[,3], pch=16, main="Normal Q-Q Plot, Committee Chairs",
xlim=c(-3,3), ylim=c(-1,1))
lines(x=c(-3,3), y=c(-1,1), lwd=1)
## report pre- and post- balance means and variances ##
mb1<-matrix(NA, ncol = 10, nrow = 17)
colnames(mb1) <- c("Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-Value", "Mean, Tr", "Mean, Co", "Var, Tr", "Var, Co", "P-value")
for(i in 1:17){
mb.ediff<-MatchBalance(Tr~t[,i+2], data=t, match.out = match8, ks = T, nboots = 1000)
# mb1[i,1] <- colnames(t[i+2])
mb1[i,1] <- mb.ediff$BeforeMatching[[1]]$mean.Tr
mb1[i,2] <- mb.ediff$BeforeMatching[[1]]$mean.Co
mb1[i,3] <- mb.ediff$BeforeMatching[[1]]$var.Tr
mb1[i,4] <- mb.ediff$BeforeMatching[[1]]$var.Co
mb1[i,5] <- mb.ediff$BeforeMatching[[1]]$p.value
mb1[i,6] <- mb.ediff$AfterMatching[[1]]$mean.Tr
mb1[i,7] <- mb.ediff$AfterMatching[[1]]$mean.Co
mb1[i,8] <- mb.ediff$AfterMatching[[1]]$var.Tr
mb1[i,9] <- mb.ediff$AfterMatching[[1]]$var.Co
mb1[i,10]<- mb.ediff$AfterMatching[[1]]$p.value
}
xtable(mb1, digits = 3)
##############################################
## Difference-in-Differences Matching Table ##
##############################################
est <- c(match1$est, match2$est, match3$est,
match7$est, match8$est,
match4$est, match6$est, match5$est)
se <- c(match1$se, match2$se, match3$se,
match7$se, match8$se,
match4$se, match6$se, match5$se)
n <- c(sum(length(match1$index.treated), length(match1$index.control)),
sum(length(match2$index.treated), length(match2$index.control)),
sum(length(match3$index.treated), length(match3$index.control)),
sum(length(match7$index.treated), length(match7$index.control)),
sum(length(match8$index.treated), length(match8$index.control)),
sum(length(match4$index.treated), length(match4$index.control)),
sum(length(match6$index.treated), length(match6$index.control)),
sum(length(match5$index.treated), length(match5$index.control))
)
table.match <- cbind(est, se, n)
rownames(table.match) <- c("Vote Share", "Vote Share 0.05",
"Vote Share 0.10",
"Leadership", "Cmte Chair", "Rules Cmte",
"Finance & Tax Cmte", "Appropriations Cmte")
#########################
## Rules Committee t+1 ##
#########################
rules.dat <- dat[dat$ruleslag>= 1,]
##################################
## Re-Do OLS Main Model, And FE ##
##################################
## Main Model, no fixed effects ##
ols.main.rules <- lm(loginc ~ votemarg + leadership +
majpty + chair + rules + fintax +
approp + agriculture + education +
health + judiciary + highpost + lowpost +
age + tenure + postgrad + legal + bizman +
female + white + gdp01, data = rules.dat)
summary(ols.main.rules)
se.cluster.main.rules <- as.matrix(clusterMod(ols.main.rules, rules.dat$year)[[1]])[,2]
se.robust.main.rules <- sqrt(diag(vcovHC(ols.main.rules)))
## Main model, fixed effects ##
ols.fe.main.rules <- lm(loginc ~ votemarg + leadership +
majpty + chair + rules + fintax +
approp + agriculture + education +
health + judiciary +
as.factor(year) +
as.factor(memberid), data=rules.dat)
summary(ols.fe.main.rules)
se.cluster.fe.main.rules <- as.matrix(clusterMod(ols.fe.main.rules, rules.dat$year)[[1]])[,2]
se.robust.fe.main.rules <- sqrt(diag(vcovHC(ols.fe.main.rules)))
################################
## ECM Model in Stata do-file ##
## Stargaze OLS Models here ##
################################
stargazer(ols.main.rules, ols.fe.main.rules,
se = list(se.cluster.main.rules,
se.cluster.fe.main.rules),
no.space=T,
keep.stat = c("n", "adj.rsq", "f"),
omit = c("memberid", "year"),
dep.var.labels = "Income (2001 $USD)",
covariate.labels = c("Vote Share",
"Party Leaders",
"Majority Party",
"Committee Chairs",
"Rules Committee",
"Finance & Tax Committee",
"Appropriations Committee",
"Agriculture Committee",
"Education Committee",
"Health Committee",
"Judiciary Committee",
"Ran For Higher Office",
"Ran For Lower Office",
"Age",
"Tenure",
"Post-Graduate Degree",
"Legal Career",
"Business Career",
"Female",
"White",
"GDP (2001 $USD)",
"Intercept"))
|
#' Stacked Bar Chart
#'
#' Plots a series of bar charts stacked according to specified data_groups
#' @param dataset List of numeric vectors specifying the datasets to be plotted.
#' @param data_groups List of character vectors defining different groupings for the chart.
#' @param x_categories Character vector of names for the x category (Show ticks as categorized by each data.)
#' @param colors Named list with colors for the data series in the chart.
#' NULL results in an random automatically generated colors.
#' @param axis_labels Named list of characters defining the prefered chart axis labels
#' @param labels_pos Named list of characters defining the prefered position of the axis labels
#' e.g for x-axis ( inner-center, inner-left, outer-right, outer-center, outer-left, inner-right [default] )
#' and y-axis ( inner-middle, inner-bottom, outer-top, outer-middle, outer-bottom, inner-top [default] )
#' @param axis_rotate Boolean value to determine axis rotation. Default is set
#' to False.
#' @param subchart Boolean option to show sub chart for zoom and selection
#' range.Default set to False.
#' @param zoom Boolean option to Zoom by mouse wheel event and
#' slide by drag. Default set to True
#' @param width,height Must be a valid CSS unit (like '100%', '400px', 'auto')
#' or a number, which will be coerced to a string and have
#' 'px' appended.The default is NULL, which results in
#' intelligent automatic sizing based on the chart’s
#' container.
#' @param elementId Use an explicit element ID for the widget Useful if you
#' have other JavaScript that needs to explicitly discover
#' and interact with a specific widget instance. in any
#' other case leave as NULL which results in an
#' automatically generated one.
#' @return Stacked Bar Chart with specified parms.
#' @examples
#' dataset <- list(
#' data1=c(30, 20, 50, 40, 60, 50),
#' data2=c(200, 130, 90, 240, 130, 220),
#' data3=c(300, 200, 160, 400, 250, 250),
#' data4=c(200, 130, 90, 240, 130, 220))
#'
#' data_groups <- list(grp1=c('data1','data3'),grp2=c('data2','data4'))
#' x_categories <- c('one','two','three','four','five','six')
#' colors <- list(data1="orange",data2="green",data3="red")
#' axis_labels <- list(x_axis="species",y_axis="frequency")
#' labels_pos <- list(xpos="outer-center",ypos="outer-middle")
#'
#' p3_stacked_bar_chart(dataset,data_groups,x_categories,colors,labels_pos=labels_pos)
#' p3_stacked_bar_chart (dataset,data_groups,x_categories,colors,axis_labels,labels_pos,subchart=TRUE)
#'
#' @import htmlwidgets
#'
#' @export
p3_stacked_bar_chart <- function(dataset,data_groups,x_categories=NULL,colors=NULL,axis_labels=NULL,
labels_pos=NULL,axis_rotate=FALSE,subchart=FALSE,zoom=TRUE,
width=NULL, height=NULL, elementId=NULL) {
if(is.null(axis_labels))
{
axis_labels <- list(x_axis="x",y_axis="y")
}
if(is.null(labels_pos))
{
labels_pos <- list(xs="outer-right",ys="outer-bottom")
}
# forward options using x
x = list(
dataset = dataset,
data_groups = data_groups,
x_categories = x_categories,
colors = colors,
axis_labels = axis_labels,
labels_pos = labels_pos,
axis_rotate = axis_rotate,
subchart = subchart,
zoom = zoom
)
# create widget
htmlwidgets::createWidget(
name = 'p3_stacked_bar_chart',
x,
width = width,
height = height,
package = 'PantheraWidgets',
elementId = elementId
)
}
#' Shiny bindings for p3_stacked_bar_chart
#'
#' Output and render functions for using p3_stacked_bar_chart within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a p3_stacked_bar_chart
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name p3_stacked_bar_chart-shiny
#'
#' @export
p3_stacked_bar_chartOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId, 'p3_stacked_bar_chart', width, height, package = 'PantheraWidgets')
}
#' @rdname p3_stacked_bar_chart-shiny
#' @export
renderp3_stacked_bar_chart <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, p3_stacked_bar_chartOutput, env, quoted = TRUE)
}
| /R/p3_stacked_bar_chart.R | no_license | pantheracorp/PantheraWidgets | R | false | false | 5,040 | r | #' Stacked Bar Chart
#'
#' Plots a series of bar charts stacked according to specified data_groups
#' @param dataset List of numeric vectors specifying the datasets to be plotted.
#' @param data_groups List of character vectors defining different groupings for the chart.
#' @param x_categories Character vector of names for the x category (Show ticks as categorized by each data.)
#' @param colors Named list with colors for the data series in the chart.
#' NULL results in an random automatically generated colors.
#' @param axis_labels Named list of characters defining the prefered chart axis labels
#' @param labels_pos Named list of characters defining the prefered position of the axis labels
#' e.g for x-axis ( inner-center, inner-left, outer-right, outer-center, outer-left, inner-right [default] )
#' and y-axis ( inner-middle, inner-bottom, outer-top, outer-middle, outer-bottom, inner-top [default] )
#' @param axis_rotate Boolean value to determine axis rotation. Default is set
#' to False.
#' @param subchart Boolean option to show sub chart for zoom and selection
#' range.Default set to False.
#' @param zoom Boolean option to Zoom by mouse wheel event and
#' slide by drag. Default set to True
#' @param width,height Must be a valid CSS unit (like '100%', '400px', 'auto')
#' or a number, which will be coerced to a string and have
#' 'px' appended.The default is NULL, which results in
#' intelligent automatic sizing based on the chart’s
#' container.
#' @param elementId Use an explicit element ID for the widget Useful if you
#' have other JavaScript that needs to explicitly discover
#' and interact with a specific widget instance. in any
#' other case leave as NULL which results in an
#' automatically generated one.
#' @return Stacked Bar Chart with specified parms.
#' @examples
#' dataset <- list(
#' data1=c(30, 20, 50, 40, 60, 50),
#' data2=c(200, 130, 90, 240, 130, 220),
#' data3=c(300, 200, 160, 400, 250, 250),
#' data4=c(200, 130, 90, 240, 130, 220))
#'
#' data_groups <- list(grp1=c('data1','data3'),grp2=c('data2','data4'))
#' x_categories <- c('one','two','three','four','five','six')
#' colors <- list(data1="orange",data2="green",data3="red")
#' axis_labels <- list(x_axis="species",y_axis="frequency")
#' labels_pos <- list(xpos="outer-center",ypos="outer-middle")
#'
#' p3_stacked_bar_chart(dataset,data_groups,x_categories,colors,labels_pos=labels_pos)
#' p3_stacked_bar_chart (dataset,data_groups,x_categories,colors,axis_labels,labels_pos,subchart=TRUE)
#'
#' @import htmlwidgets
#'
#' @export
p3_stacked_bar_chart <- function(dataset,data_groups,x_categories=NULL,colors=NULL,axis_labels=NULL,
labels_pos=NULL,axis_rotate=FALSE,subchart=FALSE,zoom=TRUE,
width=NULL, height=NULL, elementId=NULL) {
if(is.null(axis_labels))
{
axis_labels <- list(x_axis="x",y_axis="y")
}
if(is.null(labels_pos))
{
labels_pos <- list(xs="outer-right",ys="outer-bottom")
}
# forward options using x
x = list(
dataset = dataset,
data_groups = data_groups,
x_categories = x_categories,
colors = colors,
axis_labels = axis_labels,
labels_pos = labels_pos,
axis_rotate = axis_rotate,
subchart = subchart,
zoom = zoom
)
# create widget
htmlwidgets::createWidget(
name = 'p3_stacked_bar_chart',
x,
width = width,
height = height,
package = 'PantheraWidgets',
elementId = elementId
)
}
#' Shiny bindings for p3_stacked_bar_chart
#'
#' Output and render functions for using p3_stacked_bar_chart within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a p3_stacked_bar_chart
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @name p3_stacked_bar_chart-shiny
#'
#' @export
p3_stacked_bar_chartOutput <- function(outputId, width = '100%', height = '400px'){
htmlwidgets::shinyWidgetOutput(outputId, 'p3_stacked_bar_chart', width, height, package = 'PantheraWidgets')
}
#' @rdname p3_stacked_bar_chart-shiny
#' @export
renderp3_stacked_bar_chart <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, p3_stacked_bar_chartOutput, env, quoted = TRUE)
}
|
test_that("complete.Date", {
expect_error(dtt_complete(NA_Date_[-1]), class = "chk_error")
expect_error(dtt_complete(NA_Date_), class = "chk_error")
expect_identical(
dtt_complete(as.Date("2001-01-02")),
as.Date("2001-01-02")
)
expect_identical(
dtt_complete(as.Date(c("2001-01-02", "2001-01-01"))),
as.Date(c("2001-01-01", "2001-01-02"))
)
expect_identical(
dtt_complete(as.Date(c("2001-01-02", "2001-01-01")), sort = FALSE),
as.Date(c("2001-01-02", "2001-01-01"))
)
expect_identical(
dtt_complete(as.Date(c("2001-01-03", "2001-01-01"))),
as.Date(c("2001-01-01", "2001-01-02", "2001-01-03"))
)
expect_identical(
dtt_complete(as.Date(c("2001-01-03", "2001-01-01")), sort = FALSE),
as.Date(c("2001-01-03", "2001-01-01", "2001-01-02"))
)
expect_identical(
dtt_complete(
as.Date(c("2001-01-03", "2001-01-01", "2001-01-03")),
sort = FALSE
),
as.Date(c("2001-01-03", "2001-01-01", "2001-01-02"))
)
expect_identical(
dtt_complete(
as.Date(c("2001-01-03", "2001-01-01", "2001-01-03")),
sort = FALSE,
unique = FALSE
),
as.Date(c("2001-01-03", "2001-01-01", "2001-01-03", "2001-01-02"))
)
expect_identical(
dtt_complete(
as.Date(c("2001-01-03", "2001-01-01", "2001-01-03")),
unique = FALSE
),
as.Date(c("2001-01-01", "2001-01-02", "2001-01-03", "2001-01-03"))
)
expect_identical(
dtt_complete(as.Date(c("2001-01-03", "2001-01-01")), units = "months"),
as.Date("2001-01-01")
)
})
test_that("complete.POSIXct", {
expect_error(dtt_complete(NA_POSIXct_[-1]), class = "chk_error")
expect_error(dtt_complete(NA_POSIXct_), class = "chk_error")
expect_identical(
dtt_complete(as.POSIXct("2001-01-02", tz = "Etc/GMT+7")),
as.POSIXct("2001-01-02", tz = "Etc/GMT+7")
)
expect_identical(
length(
dtt_complete(as.POSIXct(c("2001-01-02", "2001-01-01"), tz = "Etc/GMT+7"))
),
86401L
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-02", "2001-01-01"), tz = "Etc/GMT+7"),
units = "days"
),
as.POSIXct(c("2001-01-01", "2001-01-02"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-02", "2001-01-01"), tz = "Etc/GMT+7"),
units = "days",
sort = FALSE
),
as.POSIXct(c("2001-01-02", "2001-01-01"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01"), tz = "Etc/GMT+7"),
units = "days"
),
as.POSIXct(c("2001-01-01", "2001-01-02", "2001-01-03"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01"), tz = "Etc/GMT+7"),
units = "days",
sort = FALSE
),
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-02"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-03"), tz = "Etc/GMT+7"),
sort = FALSE,
units = "days"
),
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-02"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-03"), tz = "Etc/GMT+7"),
units = "days",
sort = FALSE,
unique = FALSE
),
as.POSIXct(
c("2001-01-03", "2001-01-01", "2001-01-03", "2001-01-02"),
tz = "Etc/GMT+7"
)
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-03"), tz = "Etc/GMT+7"),
unique = FALSE,
units = "days"
),
as.POSIXct(
c("2001-01-01", "2001-01-02", "2001-01-03", "2001-01-03"),
tz = "Etc/GMT+7"
)
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01"), tz = "Etc/GMT+7"),
units = "months"
),
as.POSIXct("2001-01-01", tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(
c("2001-01-01 00:00:00", "2001-01-01 00:00:00"),
tz = "Etc/GMT+7"
)
),
as.POSIXct("2001-01-01", tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(
c("2001-01-01 00:00:00", "2001-01-01 00:00:02"),
tz = "Etc/GMT+7"
)
),
as.POSIXct(
c("2001-01-01 00:00:00", "2001-01-01 00:00:01", "2001-01-01 00:00:02"),
tz = "Etc/GMT+7"
)
)
expect_identical(
dtt_complete(
as.POSIXct(
c("2001-01-01 00:00:04", "2001-01-01 00:00:02"),
tz = "Etc/GMT+7"
),
sort = FALSE
),
as.POSIXct(
c("2001-01-01 00:00:04", "2001-01-01 00:00:02", "2001-01-01 00:00:03"),
tz = "Etc/GMT+7"
)
)
expect_identical(
dtt_complete(
as.POSIXct(
c("2001-01-01 00:00:04", "2001-01-01 00:00:02"),
tz = "Etc/GMT+7"
)
),
as.POSIXct(
c("2001-01-01 00:00:02", "2001-01-01 00:00:03", "2001-01-01 00:00:04"),
tz = "Etc/GMT+7"
)
)
})
test_that("complete.hms", {
expect_error(dtt_complete(NA_hms_[-1]), class = "chk_error")
expect_error(dtt_complete(NA_hms_), class = "chk_error")
expect_identical(
dtt_complete(hms::as_hms(c("00:00:00", "00:00:00"))),
hms::as_hms("00:00:00")
)
expect_identical(
dtt_complete(hms::as_hms(c("00:00:00", "00:00:02"))),
hms::as_hms(c("00:00:00", "00:00:01", "00:00:02"))
)
expect_identical(
dtt_complete(hms::as_hms(c("00:00:04", "00:00:02")), sort = FALSE),
hms::as_hms(c("00:00:04", "00:00:02", "00:00:03"))
)
expect_identical(
dtt_complete(hms::as_hms(c("00:00:04", "00:00:02"))),
hms::as_hms(c("00:00:02", "00:00:03", "00:00:04"))
)
expect_identical(
length(dtt_complete(hms::as_hms(c("23:59:59", "00:00:01")))),
86399L
)
expect_identical(
length(
dtt_complete(hms::as_hms(c("23:59:59", "00:00:01")), units = "hours")
),
24L
)
})
| /tests/testthat/test-complete.R | permissive | poissonconsulting/dttr2 | R | false | false | 5,852 | r | test_that("complete.Date", {
expect_error(dtt_complete(NA_Date_[-1]), class = "chk_error")
expect_error(dtt_complete(NA_Date_), class = "chk_error")
expect_identical(
dtt_complete(as.Date("2001-01-02")),
as.Date("2001-01-02")
)
expect_identical(
dtt_complete(as.Date(c("2001-01-02", "2001-01-01"))),
as.Date(c("2001-01-01", "2001-01-02"))
)
expect_identical(
dtt_complete(as.Date(c("2001-01-02", "2001-01-01")), sort = FALSE),
as.Date(c("2001-01-02", "2001-01-01"))
)
expect_identical(
dtt_complete(as.Date(c("2001-01-03", "2001-01-01"))),
as.Date(c("2001-01-01", "2001-01-02", "2001-01-03"))
)
expect_identical(
dtt_complete(as.Date(c("2001-01-03", "2001-01-01")), sort = FALSE),
as.Date(c("2001-01-03", "2001-01-01", "2001-01-02"))
)
expect_identical(
dtt_complete(
as.Date(c("2001-01-03", "2001-01-01", "2001-01-03")),
sort = FALSE
),
as.Date(c("2001-01-03", "2001-01-01", "2001-01-02"))
)
expect_identical(
dtt_complete(
as.Date(c("2001-01-03", "2001-01-01", "2001-01-03")),
sort = FALSE,
unique = FALSE
),
as.Date(c("2001-01-03", "2001-01-01", "2001-01-03", "2001-01-02"))
)
expect_identical(
dtt_complete(
as.Date(c("2001-01-03", "2001-01-01", "2001-01-03")),
unique = FALSE
),
as.Date(c("2001-01-01", "2001-01-02", "2001-01-03", "2001-01-03"))
)
expect_identical(
dtt_complete(as.Date(c("2001-01-03", "2001-01-01")), units = "months"),
as.Date("2001-01-01")
)
})
test_that("complete.POSIXct", {
expect_error(dtt_complete(NA_POSIXct_[-1]), class = "chk_error")
expect_error(dtt_complete(NA_POSIXct_), class = "chk_error")
expect_identical(
dtt_complete(as.POSIXct("2001-01-02", tz = "Etc/GMT+7")),
as.POSIXct("2001-01-02", tz = "Etc/GMT+7")
)
expect_identical(
length(
dtt_complete(as.POSIXct(c("2001-01-02", "2001-01-01"), tz = "Etc/GMT+7"))
),
86401L
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-02", "2001-01-01"), tz = "Etc/GMT+7"),
units = "days"
),
as.POSIXct(c("2001-01-01", "2001-01-02"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-02", "2001-01-01"), tz = "Etc/GMT+7"),
units = "days",
sort = FALSE
),
as.POSIXct(c("2001-01-02", "2001-01-01"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01"), tz = "Etc/GMT+7"),
units = "days"
),
as.POSIXct(c("2001-01-01", "2001-01-02", "2001-01-03"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01"), tz = "Etc/GMT+7"),
units = "days",
sort = FALSE
),
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-02"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-03"), tz = "Etc/GMT+7"),
sort = FALSE,
units = "days"
),
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-02"), tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-03"), tz = "Etc/GMT+7"),
units = "days",
sort = FALSE,
unique = FALSE
),
as.POSIXct(
c("2001-01-03", "2001-01-01", "2001-01-03", "2001-01-02"),
tz = "Etc/GMT+7"
)
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01", "2001-01-03"), tz = "Etc/GMT+7"),
unique = FALSE,
units = "days"
),
as.POSIXct(
c("2001-01-01", "2001-01-02", "2001-01-03", "2001-01-03"),
tz = "Etc/GMT+7"
)
)
expect_identical(
dtt_complete(
as.POSIXct(c("2001-01-03", "2001-01-01"), tz = "Etc/GMT+7"),
units = "months"
),
as.POSIXct("2001-01-01", tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(
c("2001-01-01 00:00:00", "2001-01-01 00:00:00"),
tz = "Etc/GMT+7"
)
),
as.POSIXct("2001-01-01", tz = "Etc/GMT+7")
)
expect_identical(
dtt_complete(
as.POSIXct(
c("2001-01-01 00:00:00", "2001-01-01 00:00:02"),
tz = "Etc/GMT+7"
)
),
as.POSIXct(
c("2001-01-01 00:00:00", "2001-01-01 00:00:01", "2001-01-01 00:00:02"),
tz = "Etc/GMT+7"
)
)
expect_identical(
dtt_complete(
as.POSIXct(
c("2001-01-01 00:00:04", "2001-01-01 00:00:02"),
tz = "Etc/GMT+7"
),
sort = FALSE
),
as.POSIXct(
c("2001-01-01 00:00:04", "2001-01-01 00:00:02", "2001-01-01 00:00:03"),
tz = "Etc/GMT+7"
)
)
expect_identical(
dtt_complete(
as.POSIXct(
c("2001-01-01 00:00:04", "2001-01-01 00:00:02"),
tz = "Etc/GMT+7"
)
),
as.POSIXct(
c("2001-01-01 00:00:02", "2001-01-01 00:00:03", "2001-01-01 00:00:04"),
tz = "Etc/GMT+7"
)
)
})
test_that("complete.hms", {
expect_error(dtt_complete(NA_hms_[-1]), class = "chk_error")
expect_error(dtt_complete(NA_hms_), class = "chk_error")
expect_identical(
dtt_complete(hms::as_hms(c("00:00:00", "00:00:00"))),
hms::as_hms("00:00:00")
)
expect_identical(
dtt_complete(hms::as_hms(c("00:00:00", "00:00:02"))),
hms::as_hms(c("00:00:00", "00:00:01", "00:00:02"))
)
expect_identical(
dtt_complete(hms::as_hms(c("00:00:04", "00:00:02")), sort = FALSE),
hms::as_hms(c("00:00:04", "00:00:02", "00:00:03"))
)
expect_identical(
dtt_complete(hms::as_hms(c("00:00:04", "00:00:02"))),
hms::as_hms(c("00:00:02", "00:00:03", "00:00:04"))
)
expect_identical(
length(dtt_complete(hms::as_hms(c("23:59:59", "00:00:01")))),
86399L
)
expect_identical(
length(
dtt_complete(hms::as_hms(c("23:59:59", "00:00:01")), units = "hours")
),
24L
)
})
|
#' Add attribute
#'
#' @param var A character
#' @param attribute An object
#' @param name A character
#' @export
add_attr = function(var, attribute, name){
attr(var, name) <- attribute
var
}
#' This is a generalisation of ifelse that acceots an object and return an objects
#'
#' @import dplyr
#' @importFrom purrr as_mapper
#'
#' @param .x A tibble
#' @param .p A boolean
#' @param .f1 A function
#' @param .f2 A function
#'
#'
#' @return A tibble
ifelse_pipe = function(.x, .p, .f1, .f2 = NULL) {
switch(.p %>% `!` %>% sum(1),
as_mapper(.f1)(.x),
if (.f2 %>% is.null %>% `!`)
as_mapper(.f2)(.x)
else
.x)
}
#' format_for_MPI
#'
#' @description Format reference data frame for MPI
#'
#' @param df A tibble
#' @param shards A integer
#' @param .sample A symbol
#'
format_for_MPI = function(df, shards, .sample) {
.sample = enquo(.sample)
df %>%
left_join((.) %>%
distinct(G) %>%
arrange(G) %>%
mutate(idx_MPI = head(
rep(1:shards, (.) %>% nrow %>% `/` (shards) %>% ceiling), n = (.) %>% nrow
)),
by = "G") %>%
arrange(idx_MPI, G) %>%
# Decide start - end location
group_by(idx_MPI) %>%
do(
(.) %>%
left_join(
(.) %>%
distinct(!!.sample, G) %>%
arrange(G) %>%
count(G) %>%
mutate(end = cumsum(n)) %>%
mutate(start = c(
1, .$end %>% rev() %>% `[` (-1) %>% rev %>% `+` (1)
)),
by = "G"
)
) %>%
ungroup() %>%
# Add symbol MPI rows indexes - otherwise spread below gives error
left_join(
(.) %>%
group_by(idx_MPI) %>%
distinct(G) %>%
arrange(G) %>%
mutate(`symbol MPI row` = 1:n()) %>%
ungroup,
by = c("G", "idx_MPI")
) %>%
# Add counts MPI rows indexes
group_by(idx_MPI) %>%
arrange(G) %>%
mutate(`read count MPI row` = 1:n()) %>%
ungroup
}
#' add_partition
#'
#' @description Add partition column dto data frame
#'
#' @param df.input A tibble
#' @param partition_by A symbol. Column we want to partition by
#' @param n_partitions An integer number of partition
add_partition = function(df.input, partition_by, n_partitions) {
df.input %>%
left_join(
(.) %>%
select(!!partition_by) %>%
distinct %>%
mutate(
partition = 1:n() %>%
divide_by(length((.))) %>%
# multiply_by(min(n_partitions, df.input %>% distinct(symbol) %>% nrow)) %>%
multiply_by(n_partitions) %>%
ceiling
)
)
}
#' Formula parser
#'
#' @param fm A formula
#'
#' @return A character vector
#'
#'
parse_formula <- function(fm) {
if (attr(terms(fm), "response") == 1)
stop("The formula must be of the kind \"~ covariates\" ")
else
as.character(attr(terms(fm), "variables"))[-1]
}
#' Get matrix from tibble
#'
#' @import dplyr
#' @importFrom tidyr gather
#' @importFrom magrittr set_rownames
#'
#' @param tbl A tibble
#' @param rownames A character string of the rownames
#'
#' @return A matrix
as_matrix <- function(tbl, rownames = NULL) {
tbl %>%
ifelse_pipe(
tbl %>%
ifelse_pipe(!is.null(rownames), ~ .x %>% dplyr::select(-contains(rownames))) %>%
summarise_all(class) %>%
gather(variable, class) %>%
pull(class) %>%
unique() %>%
`%in%`(c("numeric", "integer")) %>% `!`() %>% any(),
~ {
warning("to_matrix says: there are NON-numerical columns, the matrix will NOT be numerical")
.x
}
) %>%
as.data.frame() %>%
# Deal with rownames column if present
ifelse_pipe(!is.null(rownames),
~ .x %>%
set_rownames(tbl %>% pull(!!rownames)) %>%
select(-!!rownames)) %>%
# Convert to matrix
as.matrix()
}
#' vb_iterative
#'
#' @description Runs iteratively variational bayes until it suceeds
#'
#' @importFrom rstan vb
#'
#' @param model A Stan model
#' @param output_samples An integer of how many samples from posteriors
#' @param iter An integer of how many max iterations
#' @param tol_rel_obj A real
#' @param additional_parameters_to_save A character vector
#' @param ... List of paramaters for vb function of Stan
#'
#' @return A Stan fit object
#'
vb_iterative = function(model,
output_samples,
iter,
tol_rel_obj,
additional_parameters_to_save,
...) {
res = NULL
i = 0
while (res %>% is.null | i > 5) {
res = tryCatch({
my_res = vb(
model,
output_samples = output_samples,
iter = iter,
tol_rel_obj = tol_rel_obj,
#seed = 654321,
pars=c("counts_rng", "exposure_rate", additional_parameters_to_save),
...
)
boolFalse <- T
return(my_res)
},
error = function(e) {
i = i + 1
writeLines(sprintf("Further attempt with Variational Bayes: %s", e))
return(NULL)
},
finally = {
})
}
return(res)
}
#' Choose the number of chains baed on how many draws we need from the posterior distribution
#' Because there is a fix cost (warmup) to starting a new chain,
#' we need to use the minimum amount that we can parallelise
#' @param how_many_posterior_draws A real number of posterior draws needed
#' @param max_number_to_check A sane upper plateau
#'
#' @return A Stan fit object
find_optimal_number_of_chains = function(how_many_posterior_draws,
max_number_to_check = 100) {
foreach(cc = 2:max_number_to_check, .combine = bind_rows) %do%
{
tibble(chains = cc, tot = how_many_posterior_draws / cc + 150 * cc)
} %>%
filter(tot == tot %>% min) %>%
pull(chains)
}
#' Identify the optimal number of chain
#' based on how many draws we need from the posterior
#'
#' @importFrom tibble rowid_to_column
#' @importFrom purrr map
#'
#' @param counts_MPI A matrix of read count information
#' @param to_exclude A vector of oulier data points to exclude
#' @param shards An integer
#'
#' @return A matrix
get_outlier_data_to_exlude = function(counts_MPI, to_exclude, shards) {
# If there are genes to exclude
switch(
to_exclude %>% nrow %>% `>` (0) %>% `!` %>% sum(1),
foreach(s = 1:shards, .combine = full_join) %do% {
counts_MPI %>%
inner_join(to_exclude, by = c("S", "G")) %>%
filter(idx_MPI == s) %>%
distinct(idx_MPI, `read count MPI row`) %>%
rowid_to_column %>%
spread(idx_MPI, `read count MPI row`) %>%
# If a shard is empty create a dummy data set to avoid error
ifelse_pipe((.) %>% nrow == 0, ~ tibble(rowid = 1,!!as.symbol(s) := NA))
} %>%
# Anonymous function - Add length array to the first row for indexing in MPI
# Input: tibble
# Output: tibble
{
bind_rows((.) %>% map(function(x)
x %>% is.na %>% `!` %>% as.numeric %>% sum) %>% unlist,
(.))
} %>%
select(-rowid) %>%
replace(is.na(.), 0 %>% as.integer) %>%
as_matrix() %>% t,
# Otherwise
matrix(rep(0, shards))
)
}
#' function to pass initialisation values
#'
#' @return A list
inits_fx =
function () {
pars =
res_discovery %>%
filter(`.variable` != "counts_rng") %>%
distinct(`.variable`) %>%
pull(1)
foreach(
par = pars,
.final = function(x)
setNames(x, pars)
) %do% {
res_discovery %>%
filter(`.variable` == par) %>%
mutate(init = rnorm(n(), mean, sd)) %>%
mutate(init = 0) %>%
select(`.variable`, S, G, init) %>%
pull(init)
}
}
#' Produce generated quantities plots with marked uotliers
#'
#' @importFrom purrr pmap
#' @importFrom purrr map_int
#' @import ggplot2
#'
#' @param .x A tibble
#' @param symbol A symbol object
#' @param .abundance A symbol object
#' @param .sample A symbol object
#' @param covariate A character string
#'
#' @return A ggplot
produce_plots = function(.x,
symbol,
.abundance,
.sample,
covariate) {
# Set plot theme
my_theme =
theme_bw() +
theme(
panel.border = element_blank(),
axis.line = element_line(),
panel.grid.major = element_line(size = 0.2),
panel.grid.minor = element_line(size = 0.1),
text = element_text(size = 12),
aspect.ratio = 1,
axis.text.x = element_text(
angle = 90,
hjust = 1,
vjust = 0.5
),
strip.background = element_blank(),
axis.title.x = element_text(margin = margin(
t = 10,
r = 10,
b = 10,
l = 10
)),
axis.title.y = element_text(margin = margin(
t = 10,
r = 10,
b = 10,
l = 10
))
)
{
ggplot(data = .x, aes(
y = !!as.symbol(.abundance),
x = !!as.symbol(.sample)
)) +
geom_errorbar(
aes(ymin = `.lower`,
ymax = `.upper`),
width = 0,
linetype = "dashed",
color = "#D3D3D3"
) +
geom_errorbar(aes(
ymin = `.lower_2`,
ymax = `.upper_2`,
color = `deleterious outliers`
),
width = 0) +
scale_colour_manual(values = c("TRUE" = "red", "FALSE" = "black")) +
my_theme
} %>%
ifelse_pipe(
covariate %>% is.null %>% `!`,
~ .x + geom_point(aes(
size = `exposure rate`, fill = !!as.symbol(covariate)
), shape = 21),
~ .x + geom_point(
aes(size = `exposure rate`),
shape = 21,
fill = "black"
)
) +
ggtitle(symbol)
}
# Add annotation if sample belongs to high or low group
add_deleterious_if_covariate_exists = function(.data, X){
.data %>%
ifelse_pipe(
X %>% ncol %>% `>` (1),
~ .x %>%
left_join(
X %>%
as_tibble %>%
select(2) %>%
setNames("factor or interest") %>%
mutate(S = 1:n()) %>%
mutate(`is group high` = `factor or interest` > mean(`factor or interest`)),
by = "S"
) %>%
# Check if outlier might be deleterious for the statistics
mutate(`deleterious outliers` = (!ppc) &
(`is higher than mean` == `is group high`))
)
}
#' merge_results
#'
#' @importFrom tidyr nest
#'
#' @param res_discovery A tibble
#' @param res_test A tibble
#' @param formula A formula
#' @param .sample A column name
#' @param .transcript A column name
#' @param .abundance A column name
#' @param do_check_only_on_detrimental A boolean
#'
#' @export
merge_results = function(res_discovery, res_test, formula, .transcript, .abundance, .sample, do_check_only_on_detrimental){
res_discovery %>%
filter(`.variable` == "counts_rng") %>%
select(
S,
G,
!!.transcript,
!!.abundance,
!!.sample,
mean,
`.lower`,
`.upper`,
`exposure rate`,
one_of(parse_formula(formula))
) %>%
# Attach results of tests
left_join(
res_test %>% filter(`.variable` == "counts_rng") %>%
select(
S,
G,
mean,
`.lower`,
`.upper`,
ppc,
one_of(c("generated quantities", "deleterious outliers"))
) %>%
rename(mean_2 = mean, `.lower_2` = `.lower`, `.upper_2` = `.upper`),
by = c("S", "G")
) %>%
# Check if new package is installed with different sintax
ifelse_pipe(
packageVersion("tidyr") == "0.8.3.9000",
~ .x %>% nest(`sample wise data` = c(-!!.transcript)),
~ .x %>%
group_by(!!.transcript) %>%
nest(-!!.transcript, .key = `sample wise data`)
) %>%
# Create plots for every tested transcript
mutate(plot =
pmap(
list(
`sample wise data`,
!!.transcript,
# nested data for plot
quo_name(.abundance),
# name of value column
quo_name(.sample),
# name of sample column
parse_formula(formula)[1] # main covariate
),
~ produce_plots(..1, ..2, ..3, ..4, ..5)
)) %>%
# Add summary statistics
mutate(`ppc samples failed` = map_int(`sample wise data`, ~ .x %>% pull(ppc) %>% `!` %>% sum)) %>%
# If deleterious detection add summary as well
ifelse_pipe(
do_check_only_on_detrimental,
~ .x %>%
mutate(
`tot deleterious outliers` =
map_int(`sample wise data`, ~ .x %>% pull(`deleterious outliers`) %>% sum)
)
)
}
#' Select only significant genes plus background for efficient normalisation
#'
#' @importFrom rstan sampling
#' @importFrom rstan vb
#'
#' @param .data A tibble
#' @param .do_check A boolean
#' @param .significance A symbol
#' @param .transcript A column name
#' @param how_many_negative_controls An integer
#'
select_to_check_and_house_keeping = function(.data, .do_check, .significance, .transcript, how_many_negative_controls = 500){
.data %>%
{
bind_rows(
# Genes to check
(.) %>%
filter((!!.do_check)),
# Least changing genes, negative controls
(.) %>%
filter((!!.do_check) %>% `!`) %>%
inner_join(
(.) %>%
arrange(!!.significance) %>%
select(!!.transcript) %>%
distinct() %>%
tail(how_many_negative_controls),
by = quo_name(.transcript)
)
)
}
}
#' add_exposure_rate
#'
#' @importFrom tidyr separate
#'
#' @param .data A data frame
#' @param fit A fit object
#'
add_exposure_rate = function(.data, fit){
writeLines(sprintf("executing %s", "add_exposure_rate"))
.data %>%
left_join(
fit %>%
summary("exposure_rate") %$%
summary %>%
as_tibble(rownames = ".variable") %>%
separate(
.variable,
c(".variable", "S"),
sep = "[\\[,\\]]",
extra = "drop"
) %>%
mutate(S = S %>% as.integer) %>%
rename(`exposure rate` = mean) %>%
select(S, `exposure rate`),
by = "S"
)
}
check_if_within_posterior = function(.data, my_df, .do_check, .abundance){
writeLines(sprintf("executing %s", "check_if_within_posterior"))
.data %>%
left_join(my_df, by = c("S", "G")) %>%
filter((!!.do_check)) %>% # Filter only DE genes
rowwise() %>%
mutate(`ppc` = !!.abundance %>% between(`.lower`, `.upper`)) %>%
mutate(`is higher than mean` = (!`ppc`) &
(!!.abundance > mean)) %>%
ungroup
}
#' fit_to_counts_rng
#'
#' @importFrom tidyr separate
#' @importFrom tidyr nest
#' @importFrom rstan summary
#'
#' @param fit A fit object
#' @param adj_prob_theshold fit real
#'
fit_to_counts_rng = function(fit, adj_prob_theshold){
writeLines(sprintf("executing %s", "fit_to_counts_rng"))
fit %>%
rstan::summary("counts_rng",
prob = c(adj_prob_theshold, 1 - adj_prob_theshold)) %$%
summary %>%
as_tibble(rownames = ".variable") %>%
separate(.variable,
c(".variable", "S", "G"),
sep = "[\\[,\\]]",
extra = "drop") %>%
mutate(S = S %>% as.integer, G = G %>% as.integer) %>%
select(-one_of(c("n_eff", "Rhat", "khat"))) %>%
rename(`.lower` = (.) %>% ncol - 1,
`.upper` = (.) %>% ncol)
}
#' fit_to_counts_rng_approximated
#'
#' @importFrom tidyr separate
#' @importFrom tidyr nest
#' @importFrom tidyr unnest
#' @importFrom rstan summary
#' @importFrom furrr future_map
#' @importFrom future plan
#' @importFrom future multiprocess
#'
#' @param fit A fit object
#' @param adj_prob_theshold fit real
#' @param how_many_posterior_draws An integer
#' @param truncation_compensation A real
#' @param do_correct_approx A boolean
#' @param cores An integer
#'
#' @export
fit_to_counts_rng_approximated = function(fit, adj_prob_theshold, how_many_posterior_draws, truncation_compensation, cores){
writeLines(sprintf("executing %s", "fit_to_counts_rng_approximated"))
draws_mu =
fit %>% extract("lambda_log_param") %>% `[[` (1) %>% as.data.frame() %>% setNames(sprintf("mu.%s", colnames(.))) %>%
as_tibble() %>% mutate(.draw = 1:n()) %>% gather(par, mu, -.draw) %>% separate(par, c("par", "S", "G"), sep="\\.") %>% select(-par)
draws_sigma =
fit %>% extract("sigma_raw") %>% `[[` (1) %>% as.data.frame() %>% setNames(sprintf("sigma.%s", colnames(.) %>% gsub("V", "", .))) %>%
as_tibble() %>% mutate(.draw = 1:n()) %>% gather(par, sigma, -.draw) %>% separate(par, c("par", "G"), sep="\\.") %>% select(-par)
draws_exposure =
fit %>% extract("exposure_rate") %>% `[[` (1) %>% as.data.frame() %>% setNames(sprintf("exposure.%s", colnames(.) %>% gsub("V", "", .))) %>%
as_tibble() %>% mutate(.draw = 1:n()) %>% gather(par, exposure, -.draw) %>% separate(par, c("par", "S"), sep="\\.") %>% select(-par)
draws_mu %>%
left_join(draws_sigma) %>%
left_join(draws_exposure) %>%
nest(data = -c(S, G)) %>%
mutate(
CI = map(
data,
~ {
.x_supersampled = .x %>% sample_n(how_many_posterior_draws, replace = T)
draws = rnbinom(n =how_many_posterior_draws, mu = exp(.x_supersampled$mu + .x_supersampled$exposure), size = 1/exp(.x_supersampled$sigma) * truncation_compensation )
draws %>%
# Process quantile
quantile(c(adj_prob_theshold, 1 - adj_prob_theshold)) %>%
tibble::as_tibble(rownames="prop") %>%
tidyr::spread(prop, value) %>%
setNames(c(".lower", ".upper")) %>%
# Add mean and sd
dplyr::mutate(mean = mean(draws), sd = sd(draws))
}
)
) %>%
select(-data) %>%
unnest(CI) %>%
# Adapt to old dataset
mutate(.variable = "counts_rng") %>%
mutate(S = as.integer(S), G = as.integer(G))
}
save_generated_quantities_in_case = function(.data, fit, save_generated_quantities){
writeLines(sprintf("executing %s", "save_generated_quantities_in_case"))
.data %>%
ifelse_pipe(
save_generated_quantities,
~ .x %>%
# Add generated quantities
left_join(fit %>% tidybayes::gather_draws(counts_rng[S, G])) %>%
# Nest them in the data frame
nest(`generated quantities` = c(.chain, .iteration, .draw, .value ))
)
}
check_columns_exist = function(.data, .sample, .transcript, .abundance, .significance, .do_check){
# Prepare column same enquo
.sample = enquo(.sample)
.transcript = enquo(.transcript)
.abundance = enquo(.abundance)
.significance = enquo(.significance)
.do_check = enquo(.do_check)
columns = c(quo_name(.sample), quo_name(.transcript), quo_name(.abundance), quo_name(.significance), quo_name(.do_check))
if((!columns %in% (.data %>% colnames)) %>% any)
stop(
sprintf(
"The columns %s are not present in your tibble",
paste(columns[(!columns %in% (.data %>% colnames))], collapse=" ")
)
)
}
#' Check if NA
#'
#' @importFrom tidyr drop_na
#' @importFrom dplyr enquo
#'
#' @param .data A tibble including a gene name column | sample name column | read counts column | covariates column
#' @param .sample A column name
#' @param .transcript A column name
#' @param .abundance A column name
#' @param .significance A column name
#' @param .do_check A column name
#' @param formula_columns A symbol vector
#'
check_if_any_NA = function(.data, .sample, .transcript, .abundance, .significance, .do_check, formula_columns){
# Prepare column same enquo
.sample = enquo(.sample)
.transcript = enquo(.transcript)
.abundance = enquo(.abundance)
.significance = enquo(.significance)
.do_check = enquo(.do_check)
columns = c(quo_name(.sample), quo_name(.transcript), quo_name(.abundance), quo_name(.significance), quo_name(.do_check), formula_columns)
if(
.data %>%
drop_na(columns) %>%
nrow %>% `<`
(
.data %>% nrow
)
)
stop(sprintf("There are NA values in you tibble for any of the column %s", paste(columns, collapse=", ")))
}
detect_cores = function(){
if(.Platform$OS.type == "unix")
system("nproc", intern = TRUE) %>% as.integer %>% sum(-1)
else if(.Platform$OS.type == "windows")
parallel::detectCores() %>% as.integer %>% sum(-1)
else stop("Your platform type is not recognised")
}
#' Create the design matrix
#'
#' @param .data A tibble
#' @param formula A formula
#' @param .sample A symbol
#' @export
create_design_matrix = function(.data, formula, .sample){
.sample = enquo(.sample)
model.matrix(
object = formula,
data =
.data %>%
select(!!.sample, one_of(parse_formula(formula))) %>%
distinct %>% arrange(!!.sample)
)
}
#' Format the input
#'
#' @param .data A tibble including a gene name column | sample name column | read counts column | covariates column
#' @param formula A formula
#' @param .sample A column name
#' @param .transcript A column name
#' @param .abundance A column name
#' @param .do_check A symbol
#' @param .significance A column name
#' @param how_many_negative_controls An integer
#'
#' @export
format_input = function(.data, formula, .sample, .transcript, .abundance, .do_check, .significance, how_many_negative_controls = 500){
# Prepare column same enquo
.sample = enquo(.sample)
.transcript = enquo(.transcript)
.abundance = enquo(.abundance)
.do_check = enquo(.do_check)
.significance = enquo(.significance)
.data %>%
# Select only significant genes plus background for efficient normalisation
select_to_check_and_house_keeping(.do_check, .significance, .transcript, how_many_negative_controls) %>%
# Prepare the data frame
select(
!!.transcript,
!!.sample,
!!.abundance,
one_of(parse_formula(formula)),
!!.do_check
) %>%
distinct() %>%
# Add symbol idx
left_join((.) %>%
distinct(!!.transcript) %>%
mutate(G = 1:n()),
by = quo_name(.transcript)) %>%
# Add sample indeces
mutate(S = factor(
!!.sample,
levels = (.) %>% pull(!!.sample) %>% unique
) %>% as.integer)
}
run_model = function(model, approximate_posterior_inference, chains, how_many_posterior_draws, inits_fx, tol_rel_obj, additional_parameters_to_save, seed){
writeLines(sprintf("executing %s", "run_model"))
switch(
approximate_posterior_inference %>% `!` %>% as.integer %>% sum(1),
# VB Repeat strategy for failures of vb
vb_iterative(
model,
#pcc_seq_model, #
output_samples = how_many_posterior_draws,
iter = 50000,
tol_rel_obj = tol_rel_obj,
pars = c(
"counts_rng",
"exposure_rate",
additional_parameters_to_save
)
#,
#sample_file = "temp_stan_sampling.txt"
),
# MCMC
sampling(
model,
#pcc_seq_model, #
chains = chains,
cores = chains,
iter = (how_many_posterior_draws / chains) %>% ceiling %>% sum(150),
warmup = 150,
save_warmup = FALSE,
seed = seed,
init = inits_fx,
pars = c(
"counts_rng",
"exposure_rate",
additional_parameters_to_save
)
)
)
}
| /R/utilities.R | no_license | shulp2211/ppcseq | R | false | false | 21,734 | r | #' Add attribute
#'
#' @param var A character
#' @param attribute An object
#' @param name A character
#' @export
add_attr = function(var, attribute, name){
attr(var, name) <- attribute
var
}
#' This is a generalisation of ifelse that acceots an object and return an objects
#'
#' @import dplyr
#' @importFrom purrr as_mapper
#'
#' @param .x A tibble
#' @param .p A boolean
#' @param .f1 A function
#' @param .f2 A function
#'
#'
#' @return A tibble
ifelse_pipe = function(.x, .p, .f1, .f2 = NULL) {
switch(.p %>% `!` %>% sum(1),
as_mapper(.f1)(.x),
if (.f2 %>% is.null %>% `!`)
as_mapper(.f2)(.x)
else
.x)
}
#' format_for_MPI
#'
#' @description Format reference data frame for MPI
#'
#' @param df A tibble
#' @param shards A integer
#' @param .sample A symbol
#'
format_for_MPI = function(df, shards, .sample) {
.sample = enquo(.sample)
df %>%
left_join((.) %>%
distinct(G) %>%
arrange(G) %>%
mutate(idx_MPI = head(
rep(1:shards, (.) %>% nrow %>% `/` (shards) %>% ceiling), n = (.) %>% nrow
)),
by = "G") %>%
arrange(idx_MPI, G) %>%
# Decide start - end location
group_by(idx_MPI) %>%
do(
(.) %>%
left_join(
(.) %>%
distinct(!!.sample, G) %>%
arrange(G) %>%
count(G) %>%
mutate(end = cumsum(n)) %>%
mutate(start = c(
1, .$end %>% rev() %>% `[` (-1) %>% rev %>% `+` (1)
)),
by = "G"
)
) %>%
ungroup() %>%
# Add symbol MPI rows indexes - otherwise spread below gives error
left_join(
(.) %>%
group_by(idx_MPI) %>%
distinct(G) %>%
arrange(G) %>%
mutate(`symbol MPI row` = 1:n()) %>%
ungroup,
by = c("G", "idx_MPI")
) %>%
# Add counts MPI rows indexes
group_by(idx_MPI) %>%
arrange(G) %>%
mutate(`read count MPI row` = 1:n()) %>%
ungroup
}
#' add_partition
#'
#' @description Add partition column dto data frame
#'
#' @param df.input A tibble
#' @param partition_by A symbol. Column we want to partition by
#' @param n_partitions An integer number of partition
add_partition = function(df.input, partition_by, n_partitions) {
df.input %>%
left_join(
(.) %>%
select(!!partition_by) %>%
distinct %>%
mutate(
partition = 1:n() %>%
divide_by(length((.))) %>%
# multiply_by(min(n_partitions, df.input %>% distinct(symbol) %>% nrow)) %>%
multiply_by(n_partitions) %>%
ceiling
)
)
}
#' Formula parser
#'
#' @param fm A formula
#'
#' @return A character vector
#'
#'
parse_formula <- function(fm) {
if (attr(terms(fm), "response") == 1)
stop("The formula must be of the kind \"~ covariates\" ")
else
as.character(attr(terms(fm), "variables"))[-1]
}
#' Get matrix from tibble
#'
#' @import dplyr
#' @importFrom tidyr gather
#' @importFrom magrittr set_rownames
#'
#' @param tbl A tibble
#' @param rownames A character string of the rownames
#'
#' @return A matrix
as_matrix <- function(tbl, rownames = NULL) {
tbl %>%
ifelse_pipe(
tbl %>%
ifelse_pipe(!is.null(rownames), ~ .x %>% dplyr::select(-contains(rownames))) %>%
summarise_all(class) %>%
gather(variable, class) %>%
pull(class) %>%
unique() %>%
`%in%`(c("numeric", "integer")) %>% `!`() %>% any(),
~ {
warning("to_matrix says: there are NON-numerical columns, the matrix will NOT be numerical")
.x
}
) %>%
as.data.frame() %>%
# Deal with rownames column if present
ifelse_pipe(!is.null(rownames),
~ .x %>%
set_rownames(tbl %>% pull(!!rownames)) %>%
select(-!!rownames)) %>%
# Convert to matrix
as.matrix()
}
#' vb_iterative
#'
#' @description Runs iteratively variational bayes until it suceeds
#'
#' @importFrom rstan vb
#'
#' @param model A Stan model
#' @param output_samples An integer of how many samples from posteriors
#' @param iter An integer of how many max iterations
#' @param tol_rel_obj A real
#' @param additional_parameters_to_save A character vector
#' @param ... List of paramaters for vb function of Stan
#'
#' @return A Stan fit object
#'
vb_iterative = function(model,
output_samples,
iter,
tol_rel_obj,
additional_parameters_to_save,
...) {
res = NULL
i = 0
while (res %>% is.null | i > 5) {
res = tryCatch({
my_res = vb(
model,
output_samples = output_samples,
iter = iter,
tol_rel_obj = tol_rel_obj,
#seed = 654321,
pars=c("counts_rng", "exposure_rate", additional_parameters_to_save),
...
)
boolFalse <- T
return(my_res)
},
error = function(e) {
i = i + 1
writeLines(sprintf("Further attempt with Variational Bayes: %s", e))
return(NULL)
},
finally = {
})
}
return(res)
}
#' Choose the number of chains baed on how many draws we need from the posterior distribution
#' Because there is a fix cost (warmup) to starting a new chain,
#' we need to use the minimum amount that we can parallelise
#' @param how_many_posterior_draws A real number of posterior draws needed
#' @param max_number_to_check A sane upper plateau
#'
#' @return A Stan fit object
find_optimal_number_of_chains = function(how_many_posterior_draws,
max_number_to_check = 100) {
foreach(cc = 2:max_number_to_check, .combine = bind_rows) %do%
{
tibble(chains = cc, tot = how_many_posterior_draws / cc + 150 * cc)
} %>%
filter(tot == tot %>% min) %>%
pull(chains)
}
#' Identify the optimal number of chain
#' based on how many draws we need from the posterior
#'
#' @importFrom tibble rowid_to_column
#' @importFrom purrr map
#'
#' @param counts_MPI A matrix of read count information
#' @param to_exclude A vector of oulier data points to exclude
#' @param shards An integer
#'
#' @return A matrix
get_outlier_data_to_exlude = function(counts_MPI, to_exclude, shards) {
# If there are genes to exclude
switch(
to_exclude %>% nrow %>% `>` (0) %>% `!` %>% sum(1),
foreach(s = 1:shards, .combine = full_join) %do% {
counts_MPI %>%
inner_join(to_exclude, by = c("S", "G")) %>%
filter(idx_MPI == s) %>%
distinct(idx_MPI, `read count MPI row`) %>%
rowid_to_column %>%
spread(idx_MPI, `read count MPI row`) %>%
# If a shard is empty create a dummy data set to avoid error
ifelse_pipe((.) %>% nrow == 0, ~ tibble(rowid = 1,!!as.symbol(s) := NA))
} %>%
# Anonymous function - Add length array to the first row for indexing in MPI
# Input: tibble
# Output: tibble
{
bind_rows((.) %>% map(function(x)
x %>% is.na %>% `!` %>% as.numeric %>% sum) %>% unlist,
(.))
} %>%
select(-rowid) %>%
replace(is.na(.), 0 %>% as.integer) %>%
as_matrix() %>% t,
# Otherwise
matrix(rep(0, shards))
)
}
#' function to pass initialisation values
#'
#' @return A list
inits_fx =
function () {
pars =
res_discovery %>%
filter(`.variable` != "counts_rng") %>%
distinct(`.variable`) %>%
pull(1)
foreach(
par = pars,
.final = function(x)
setNames(x, pars)
) %do% {
res_discovery %>%
filter(`.variable` == par) %>%
mutate(init = rnorm(n(), mean, sd)) %>%
mutate(init = 0) %>%
select(`.variable`, S, G, init) %>%
pull(init)
}
}
#' Produce generated quantities plots with marked uotliers
#'
#' @importFrom purrr pmap
#' @importFrom purrr map_int
#' @import ggplot2
#'
#' @param .x A tibble
#' @param symbol A symbol object
#' @param .abundance A symbol object
#' @param .sample A symbol object
#' @param covariate A character string
#'
#' @return A ggplot
produce_plots = function(.x,
symbol,
.abundance,
.sample,
covariate) {
# Set plot theme
my_theme =
theme_bw() +
theme(
panel.border = element_blank(),
axis.line = element_line(),
panel.grid.major = element_line(size = 0.2),
panel.grid.minor = element_line(size = 0.1),
text = element_text(size = 12),
aspect.ratio = 1,
axis.text.x = element_text(
angle = 90,
hjust = 1,
vjust = 0.5
),
strip.background = element_blank(),
axis.title.x = element_text(margin = margin(
t = 10,
r = 10,
b = 10,
l = 10
)),
axis.title.y = element_text(margin = margin(
t = 10,
r = 10,
b = 10,
l = 10
))
)
{
ggplot(data = .x, aes(
y = !!as.symbol(.abundance),
x = !!as.symbol(.sample)
)) +
geom_errorbar(
aes(ymin = `.lower`,
ymax = `.upper`),
width = 0,
linetype = "dashed",
color = "#D3D3D3"
) +
geom_errorbar(aes(
ymin = `.lower_2`,
ymax = `.upper_2`,
color = `deleterious outliers`
),
width = 0) +
scale_colour_manual(values = c("TRUE" = "red", "FALSE" = "black")) +
my_theme
} %>%
ifelse_pipe(
covariate %>% is.null %>% `!`,
~ .x + geom_point(aes(
size = `exposure rate`, fill = !!as.symbol(covariate)
), shape = 21),
~ .x + geom_point(
aes(size = `exposure rate`),
shape = 21,
fill = "black"
)
) +
ggtitle(symbol)
}
# Add annotation if sample belongs to high or low group
add_deleterious_if_covariate_exists = function(.data, X){
.data %>%
ifelse_pipe(
X %>% ncol %>% `>` (1),
~ .x %>%
left_join(
X %>%
as_tibble %>%
select(2) %>%
setNames("factor or interest") %>%
mutate(S = 1:n()) %>%
mutate(`is group high` = `factor or interest` > mean(`factor or interest`)),
by = "S"
) %>%
# Check if outlier might be deleterious for the statistics
mutate(`deleterious outliers` = (!ppc) &
(`is higher than mean` == `is group high`))
)
}
#' merge_results
#'
#' @importFrom tidyr nest
#'
#' @param res_discovery A tibble
#' @param res_test A tibble
#' @param formula A formula
#' @param .sample A column name
#' @param .transcript A column name
#' @param .abundance A column name
#' @param do_check_only_on_detrimental A boolean
#'
#' @export
merge_results = function(res_discovery, res_test, formula, .transcript, .abundance, .sample, do_check_only_on_detrimental){
res_discovery %>%
filter(`.variable` == "counts_rng") %>%
select(
S,
G,
!!.transcript,
!!.abundance,
!!.sample,
mean,
`.lower`,
`.upper`,
`exposure rate`,
one_of(parse_formula(formula))
) %>%
# Attach results of tests
left_join(
res_test %>% filter(`.variable` == "counts_rng") %>%
select(
S,
G,
mean,
`.lower`,
`.upper`,
ppc,
one_of(c("generated quantities", "deleterious outliers"))
) %>%
rename(mean_2 = mean, `.lower_2` = `.lower`, `.upper_2` = `.upper`),
by = c("S", "G")
) %>%
# Check if new package is installed with different sintax
ifelse_pipe(
packageVersion("tidyr") == "0.8.3.9000",
~ .x %>% nest(`sample wise data` = c(-!!.transcript)),
~ .x %>%
group_by(!!.transcript) %>%
nest(-!!.transcript, .key = `sample wise data`)
) %>%
# Create plots for every tested transcript
mutate(plot =
pmap(
list(
`sample wise data`,
!!.transcript,
# nested data for plot
quo_name(.abundance),
# name of value column
quo_name(.sample),
# name of sample column
parse_formula(formula)[1] # main covariate
),
~ produce_plots(..1, ..2, ..3, ..4, ..5)
)) %>%
# Add summary statistics
mutate(`ppc samples failed` = map_int(`sample wise data`, ~ .x %>% pull(ppc) %>% `!` %>% sum)) %>%
# If deleterious detection add summary as well
ifelse_pipe(
do_check_only_on_detrimental,
~ .x %>%
mutate(
`tot deleterious outliers` =
map_int(`sample wise data`, ~ .x %>% pull(`deleterious outliers`) %>% sum)
)
)
}
#' Select only significant genes plus background for efficient normalisation
#'
#' @importFrom rstan sampling
#' @importFrom rstan vb
#'
#' @param .data A tibble
#' @param .do_check A boolean
#' @param .significance A symbol
#' @param .transcript A column name
#' @param how_many_negative_controls An integer
#'
select_to_check_and_house_keeping = function(.data, .do_check, .significance, .transcript, how_many_negative_controls = 500){
.data %>%
{
bind_rows(
# Genes to check
(.) %>%
filter((!!.do_check)),
# Least changing genes, negative controls
(.) %>%
filter((!!.do_check) %>% `!`) %>%
inner_join(
(.) %>%
arrange(!!.significance) %>%
select(!!.transcript) %>%
distinct() %>%
tail(how_many_negative_controls),
by = quo_name(.transcript)
)
)
}
}
#' add_exposure_rate
#'
#' @importFrom tidyr separate
#'
#' @param .data A data frame
#' @param fit A fit object
#'
add_exposure_rate = function(.data, fit){
writeLines(sprintf("executing %s", "add_exposure_rate"))
.data %>%
left_join(
fit %>%
summary("exposure_rate") %$%
summary %>%
as_tibble(rownames = ".variable") %>%
separate(
.variable,
c(".variable", "S"),
sep = "[\\[,\\]]",
extra = "drop"
) %>%
mutate(S = S %>% as.integer) %>%
rename(`exposure rate` = mean) %>%
select(S, `exposure rate`),
by = "S"
)
}
check_if_within_posterior = function(.data, my_df, .do_check, .abundance){
writeLines(sprintf("executing %s", "check_if_within_posterior"))
.data %>%
left_join(my_df, by = c("S", "G")) %>%
filter((!!.do_check)) %>% # Filter only DE genes
rowwise() %>%
mutate(`ppc` = !!.abundance %>% between(`.lower`, `.upper`)) %>%
mutate(`is higher than mean` = (!`ppc`) &
(!!.abundance > mean)) %>%
ungroup
}
#' fit_to_counts_rng
#'
#' @importFrom tidyr separate
#' @importFrom tidyr nest
#' @importFrom rstan summary
#'
#' @param fit A fit object
#' @param adj_prob_theshold fit real
#'
fit_to_counts_rng = function(fit, adj_prob_theshold){
writeLines(sprintf("executing %s", "fit_to_counts_rng"))
fit %>%
rstan::summary("counts_rng",
prob = c(adj_prob_theshold, 1 - adj_prob_theshold)) %$%
summary %>%
as_tibble(rownames = ".variable") %>%
separate(.variable,
c(".variable", "S", "G"),
sep = "[\\[,\\]]",
extra = "drop") %>%
mutate(S = S %>% as.integer, G = G %>% as.integer) %>%
select(-one_of(c("n_eff", "Rhat", "khat"))) %>%
rename(`.lower` = (.) %>% ncol - 1,
`.upper` = (.) %>% ncol)
}
#' fit_to_counts_rng_approximated
#'
#' @importFrom tidyr separate
#' @importFrom tidyr nest
#' @importFrom tidyr unnest
#' @importFrom rstan summary
#' @importFrom furrr future_map
#' @importFrom future plan
#' @importFrom future multiprocess
#'
#' @param fit A fit object
#' @param adj_prob_theshold fit real
#' @param how_many_posterior_draws An integer
#' @param truncation_compensation A real
#' @param do_correct_approx A boolean
#' @param cores An integer
#'
#' @export
fit_to_counts_rng_approximated = function(fit, adj_prob_theshold, how_many_posterior_draws, truncation_compensation, cores){
writeLines(sprintf("executing %s", "fit_to_counts_rng_approximated"))
draws_mu =
fit %>% extract("lambda_log_param") %>% `[[` (1) %>% as.data.frame() %>% setNames(sprintf("mu.%s", colnames(.))) %>%
as_tibble() %>% mutate(.draw = 1:n()) %>% gather(par, mu, -.draw) %>% separate(par, c("par", "S", "G"), sep="\\.") %>% select(-par)
draws_sigma =
fit %>% extract("sigma_raw") %>% `[[` (1) %>% as.data.frame() %>% setNames(sprintf("sigma.%s", colnames(.) %>% gsub("V", "", .))) %>%
as_tibble() %>% mutate(.draw = 1:n()) %>% gather(par, sigma, -.draw) %>% separate(par, c("par", "G"), sep="\\.") %>% select(-par)
draws_exposure =
fit %>% extract("exposure_rate") %>% `[[` (1) %>% as.data.frame() %>% setNames(sprintf("exposure.%s", colnames(.) %>% gsub("V", "", .))) %>%
as_tibble() %>% mutate(.draw = 1:n()) %>% gather(par, exposure, -.draw) %>% separate(par, c("par", "S"), sep="\\.") %>% select(-par)
draws_mu %>%
left_join(draws_sigma) %>%
left_join(draws_exposure) %>%
nest(data = -c(S, G)) %>%
mutate(
CI = map(
data,
~ {
.x_supersampled = .x %>% sample_n(how_many_posterior_draws, replace = T)
draws = rnbinom(n =how_many_posterior_draws, mu = exp(.x_supersampled$mu + .x_supersampled$exposure), size = 1/exp(.x_supersampled$sigma) * truncation_compensation )
draws %>%
# Process quantile
quantile(c(adj_prob_theshold, 1 - adj_prob_theshold)) %>%
tibble::as_tibble(rownames="prop") %>%
tidyr::spread(prop, value) %>%
setNames(c(".lower", ".upper")) %>%
# Add mean and sd
dplyr::mutate(mean = mean(draws), sd = sd(draws))
}
)
) %>%
select(-data) %>%
unnest(CI) %>%
# Adapt to old dataset
mutate(.variable = "counts_rng") %>%
mutate(S = as.integer(S), G = as.integer(G))
}
save_generated_quantities_in_case = function(.data, fit, save_generated_quantities){
writeLines(sprintf("executing %s", "save_generated_quantities_in_case"))
.data %>%
ifelse_pipe(
save_generated_quantities,
~ .x %>%
# Add generated quantities
left_join(fit %>% tidybayes::gather_draws(counts_rng[S, G])) %>%
# Nest them in the data frame
nest(`generated quantities` = c(.chain, .iteration, .draw, .value ))
)
}
check_columns_exist = function(.data, .sample, .transcript, .abundance, .significance, .do_check){
# Prepare column same enquo
.sample = enquo(.sample)
.transcript = enquo(.transcript)
.abundance = enquo(.abundance)
.significance = enquo(.significance)
.do_check = enquo(.do_check)
columns = c(quo_name(.sample), quo_name(.transcript), quo_name(.abundance), quo_name(.significance), quo_name(.do_check))
if((!columns %in% (.data %>% colnames)) %>% any)
stop(
sprintf(
"The columns %s are not present in your tibble",
paste(columns[(!columns %in% (.data %>% colnames))], collapse=" ")
)
)
}
#' Check if NA
#'
#' @importFrom tidyr drop_na
#' @importFrom dplyr enquo
#'
#' @param .data A tibble including a gene name column | sample name column | read counts column | covariates column
#' @param .sample A column name
#' @param .transcript A column name
#' @param .abundance A column name
#' @param .significance A column name
#' @param .do_check A column name
#' @param formula_columns A symbol vector
#'
check_if_any_NA = function(.data, .sample, .transcript, .abundance, .significance, .do_check, formula_columns){
# Prepare column same enquo
.sample = enquo(.sample)
.transcript = enquo(.transcript)
.abundance = enquo(.abundance)
.significance = enquo(.significance)
.do_check = enquo(.do_check)
columns = c(quo_name(.sample), quo_name(.transcript), quo_name(.abundance), quo_name(.significance), quo_name(.do_check), formula_columns)
if(
.data %>%
drop_na(columns) %>%
nrow %>% `<`
(
.data %>% nrow
)
)
stop(sprintf("There are NA values in you tibble for any of the column %s", paste(columns, collapse=", ")))
}
detect_cores = function(){
if(.Platform$OS.type == "unix")
system("nproc", intern = TRUE) %>% as.integer %>% sum(-1)
else if(.Platform$OS.type == "windows")
parallel::detectCores() %>% as.integer %>% sum(-1)
else stop("Your platform type is not recognised")
}
#' Create the design matrix
#'
#' @param .data A tibble
#' @param formula A formula
#' @param .sample A symbol
#' @export
create_design_matrix = function(.data, formula, .sample){
.sample = enquo(.sample)
model.matrix(
object = formula,
data =
.data %>%
select(!!.sample, one_of(parse_formula(formula))) %>%
distinct %>% arrange(!!.sample)
)
}
#' Format the input
#'
#' @param .data A tibble including a gene name column | sample name column | read counts column | covariates column
#' @param formula A formula
#' @param .sample A column name
#' @param .transcript A column name
#' @param .abundance A column name
#' @param .do_check A symbol
#' @param .significance A column name
#' @param how_many_negative_controls An integer
#'
#' @export
format_input = function(.data, formula, .sample, .transcript, .abundance, .do_check, .significance, how_many_negative_controls = 500){
# Prepare column same enquo
.sample = enquo(.sample)
.transcript = enquo(.transcript)
.abundance = enquo(.abundance)
.do_check = enquo(.do_check)
.significance = enquo(.significance)
.data %>%
# Select only significant genes plus background for efficient normalisation
select_to_check_and_house_keeping(.do_check, .significance, .transcript, how_many_negative_controls) %>%
# Prepare the data frame
select(
!!.transcript,
!!.sample,
!!.abundance,
one_of(parse_formula(formula)),
!!.do_check
) %>%
distinct() %>%
# Add symbol idx
left_join((.) %>%
distinct(!!.transcript) %>%
mutate(G = 1:n()),
by = quo_name(.transcript)) %>%
# Add sample indeces
mutate(S = factor(
!!.sample,
levels = (.) %>% pull(!!.sample) %>% unique
) %>% as.integer)
}
run_model = function(model, approximate_posterior_inference, chains, how_many_posterior_draws, inits_fx, tol_rel_obj, additional_parameters_to_save, seed){
writeLines(sprintf("executing %s", "run_model"))
switch(
approximate_posterior_inference %>% `!` %>% as.integer %>% sum(1),
# VB Repeat strategy for failures of vb
vb_iterative(
model,
#pcc_seq_model, #
output_samples = how_many_posterior_draws,
iter = 50000,
tol_rel_obj = tol_rel_obj,
pars = c(
"counts_rng",
"exposure_rate",
additional_parameters_to_save
)
#,
#sample_file = "temp_stan_sampling.txt"
),
# MCMC
sampling(
model,
#pcc_seq_model, #
chains = chains,
cores = chains,
iter = (how_many_posterior_draws / chains) %>% ceiling %>% sum(150),
warmup = 150,
save_warmup = FALSE,
seed = seed,
init = inits_fx,
pars = c(
"counts_rng",
"exposure_rate",
additional_parameters_to_save
)
)
)
}
|
/Ex 10.R | no_license | migueljnatal/Statistical-Modeling-PPGEst-UFRGS | R | false | false | 1,763 | r | ||
#liraries used for connecting R and Twitter to scrap the data
library("RCurl")
library("httr")
library("openssl")
library("httpuv")
library("twitteR")
#library used for text mining in R
library("tm")
#library used for processing strings in R
library("stringr")
#library used for working with dataframes
library("dplyr")
#library for creating wordcloud
library("wordcloud")
library("syuzhet")
#library for visualisation
library("plotly")
#give you api keys and acces keys here
#the keys given are duplicate not real
api_key <- "abcd123"
api_secret <- "abcd123"
access_token <- "abcd123"
access_secret <- "abcd123"
#seting connection with twitter
setup_twitter_oauth(api_key,api_secret,access_token,access_secret)
#extractig the tweets based on hashtags and number of tweets can also be mentioned
gbm_tweet <- searchTwitter("#youhashtaghere", n=100)
#converting them into dataframe so that they can be processed easily
gbm_tweet.df<-twListToDF(gbm_tweet)
head(gbm_tweet.df)
#cleaning the tweets such as removing the spaces and special characters to make them ready for aanalysis
gbm_tweet.df$text=gsub("&", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("&", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("@\\w+", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("[[:punct:]]", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("[[:digit:]]", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("http\\w+", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("[ \t]{2,}", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("^\\s+|\\s+$", "", gbm_tweet.df$text)
gbm_tweet.df$text <- iconv(gbm_tweet.df$text, "UTF-8", "ASCII", sub="")
#getting the emotions in the tweets
emotions <- get_nrc_sentiment(gbm_tweet.df$text)
emo_bar = colSums(emotions)
emo_sum = data.frame(count=emo_bar, emotion=names(emo_bar))
emo_sum$emotion = factor(emo_sum$emotion, levels=emo_sum$emotion[order(emo_sum$count, decreasing = TRUE)])
#plot that shows a barchar for emotions
p <- plot_ly(emo_sum, x=~emotion, y=~count, type="bar", color=~emotion) %>%
layout(xaxis=list(title=""), showlegend=FALSE,
title="Emotion Type for hashtag: #yourhashtag")
#to seperate the tweets based on emotions
wordcloud_tweet = c(
paste(gbm_tweet.df$text[emotions$anger > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$anticipation > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$disgust > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$fear > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$joy > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$sadness > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$surprise > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$trust > 0], collapse=" ")
)
#store the seperate emotion tweets in seperate documents
corpus = Corpus(VectorSource(wordcloud_tweet))
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, removeWords, c(stopwords("english")))
corpus = tm_map(corpus, stemDocument)
corpus = tm_map(corpus, tolower)
#creating the text document matrix for wordcloud
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
tdmnew <- tdm[nchar(rownames(tdm)) < 11,]
#plotting the wordcloud
colnames(tdm) = c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust')
colnames(tdmnew) <- colnames(tdm)
comparison.cloud(tdmnew, random.order=FALSE,
colors = c("#00B2FF", "red", "#FF0099", "#6600CC", "green", "orange", "blue", "brown"),
title.size=1, max.words=250,scale=c(2, 0.5),rot.per=0.2)
| /tweets_wordcloud.R | no_license | gokulranjiths/Twitter-data-scapping-and-word-cloud- | R | false | false | 3,709 | r | #liraries used for connecting R and Twitter to scrap the data
library("RCurl")
library("httr")
library("openssl")
library("httpuv")
library("twitteR")
#library used for text mining in R
library("tm")
#library used for processing strings in R
library("stringr")
#library used for working with dataframes
library("dplyr")
#library for creating wordcloud
library("wordcloud")
library("syuzhet")
#library for visualisation
library("plotly")
#give you api keys and acces keys here
#the keys given are duplicate not real
api_key <- "abcd123"
api_secret <- "abcd123"
access_token <- "abcd123"
access_secret <- "abcd123"
#seting connection with twitter
setup_twitter_oauth(api_key,api_secret,access_token,access_secret)
#extractig the tweets based on hashtags and number of tweets can also be mentioned
gbm_tweet <- searchTwitter("#youhashtaghere", n=100)
#converting them into dataframe so that they can be processed easily
gbm_tweet.df<-twListToDF(gbm_tweet)
head(gbm_tweet.df)
#cleaning the tweets such as removing the spaces and special characters to make them ready for aanalysis
gbm_tweet.df$text=gsub("&", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("&", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("@\\w+", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("[[:punct:]]", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("[[:digit:]]", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("http\\w+", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("[ \t]{2,}", "", gbm_tweet.df$text)
gbm_tweet.df$text = gsub("^\\s+|\\s+$", "", gbm_tweet.df$text)
gbm_tweet.df$text <- iconv(gbm_tweet.df$text, "UTF-8", "ASCII", sub="")
#getting the emotions in the tweets
emotions <- get_nrc_sentiment(gbm_tweet.df$text)
emo_bar = colSums(emotions)
emo_sum = data.frame(count=emo_bar, emotion=names(emo_bar))
emo_sum$emotion = factor(emo_sum$emotion, levels=emo_sum$emotion[order(emo_sum$count, decreasing = TRUE)])
#plot that shows a barchar for emotions
p <- plot_ly(emo_sum, x=~emotion, y=~count, type="bar", color=~emotion) %>%
layout(xaxis=list(title=""), showlegend=FALSE,
title="Emotion Type for hashtag: #yourhashtag")
#to seperate the tweets based on emotions
wordcloud_tweet = c(
paste(gbm_tweet.df$text[emotions$anger > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$anticipation > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$disgust > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$fear > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$joy > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$sadness > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$surprise > 0], collapse=" "),
paste(gbm_tweet.df$text[emotions$trust > 0], collapse=" ")
)
#store the seperate emotion tweets in seperate documents
corpus = Corpus(VectorSource(wordcloud_tweet))
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, removeWords, c(stopwords("english")))
corpus = tm_map(corpus, stemDocument)
corpus = tm_map(corpus, tolower)
#creating the text document matrix for wordcloud
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
tdmnew <- tdm[nchar(rownames(tdm)) < 11,]
#plotting the wordcloud
colnames(tdm) = c('anger', 'anticipation', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'trust')
colnames(tdmnew) <- colnames(tdm)
comparison.cloud(tdmnew, random.order=FALSE,
colors = c("#00B2FF", "red", "#FF0099", "#6600CC", "green", "orange", "blue", "brown"),
title.size=1, max.words=250,scale=c(2, 0.5),rot.per=0.2)
|
#' Geocode using census geocoder
#'
#' @description
#' Use the census geocoder to lookup lat/long and census geographies.
#'
#' @param data Data set to be geocoded.
#' @param benchmark Benchmark
#' @param vintage Vintage
#' @import dplyr
#' @export
geocode <- function(data, benchmark = "Current", vintage = "Current"){
File <- tempfile(fileext = ".csv")
readr::write_csv(data, File, col_names = FALSE)
Resp <- httr::POST("https://geocoding.geo.census.gov/geocoder/geographies/addressbatch",
body=list(benchmark = paste("Public_AR", benchmark, sep = "_"),
vintage = paste(vintage, benchmark, sep = "_"),
addressFile=upload_file(File)),
encode="multipart") %>%
httr::content("text", encoding = "UTF-8")
readr::read_csv(Resp, col_names = c("id", "address_input", "match", "match_type",
"address_output", "lat_long", "tiger_id",
"side", "statefp", "countyfp", "tractce",
"blockce"))
}
| /R/geocode.R | no_license | mwillumz/geocoder | R | false | false | 1,070 | r | #' Geocode using census geocoder
#'
#' @description
#' Use the census geocoder to lookup lat/long and census geographies.
#'
#' @param data Data set to be geocoded.
#' @param benchmark Benchmark
#' @param vintage Vintage
#' @import dplyr
#' @export
geocode <- function(data, benchmark = "Current", vintage = "Current"){
File <- tempfile(fileext = ".csv")
readr::write_csv(data, File, col_names = FALSE)
Resp <- httr::POST("https://geocoding.geo.census.gov/geocoder/geographies/addressbatch",
body=list(benchmark = paste("Public_AR", benchmark, sep = "_"),
vintage = paste(vintage, benchmark, sep = "_"),
addressFile=upload_file(File)),
encode="multipart") %>%
httr::content("text", encoding = "UTF-8")
readr::read_csv(Resp, col_names = c("id", "address_input", "match", "match_type",
"address_output", "lat_long", "tiger_id",
"side", "statefp", "countyfp", "tractce",
"blockce"))
}
|
valid.mu <- function(object)
{
if (!inherits(object, "memuse"))
return( "Not a class 'memuse' object" )
if (object@size < 0)
return("invalid slot 'size'; must be >= 0")
object@unit.names <- tolower(object@unit.names)
if ( !(object@unit.names %in% c("short", "long")) )
return( "invalid slot 'unit.names'. See ?memuse" )
object@unit.prefix <- toupper(object@unit.prefix)
if ( !(object@unit.prefix %in% c("IEC", "SI")) )
return( "invalid slot 'unit.prefix'. See ?memuse" )
unit <- tolower(object@unit)
if ( !(unit %in% .units[["short"]][["IEC"]][["check"]]) &&
!(unit %in% .units[["short"]][["SI"]][["check"]]) &&
!(unit %in% .units[["long"]][["IEC"]][["check"]]) &&
!(unit %in% .units[["long"]][["SI"]][["check"]]) )
return( "invalid slot 'unit'. See ?memuse" )
}
#' Class memuse
#'
#' Memory usage class object.
#'
#' @slot size
#' The actual size in some memuse units.
#' @slot unit
#' The mem unit (e.g., byte, kilobyte, etc.)
#' @slot unit.prefix
#' IEC or SI units
#' @slot unit.names
#' short (e.g., kb) or long (e.g., kilobyte)
#'
#' @seealso \code{ \link{Control} \link{Constructor} }
#' @keywords Classes
#' @name memuse-class
#' @docType class
setClass("memuse",
representation(
size="numeric",
unit="character",
unit.prefix="character",
unit.names="character"
),
prototype(
size=0,
unit="B",
unit.prefix="IEC",
unit.names="short"
),
validity=valid.mu
)
# to prevent R whining during package installation
setClass("object_size")
| /R/00-classes.r | permissive | shinra-dev/memuse | R | false | false | 1,562 | r | valid.mu <- function(object)
{
if (!inherits(object, "memuse"))
return( "Not a class 'memuse' object" )
if (object@size < 0)
return("invalid slot 'size'; must be >= 0")
object@unit.names <- tolower(object@unit.names)
if ( !(object@unit.names %in% c("short", "long")) )
return( "invalid slot 'unit.names'. See ?memuse" )
object@unit.prefix <- toupper(object@unit.prefix)
if ( !(object@unit.prefix %in% c("IEC", "SI")) )
return( "invalid slot 'unit.prefix'. See ?memuse" )
unit <- tolower(object@unit)
if ( !(unit %in% .units[["short"]][["IEC"]][["check"]]) &&
!(unit %in% .units[["short"]][["SI"]][["check"]]) &&
!(unit %in% .units[["long"]][["IEC"]][["check"]]) &&
!(unit %in% .units[["long"]][["SI"]][["check"]]) )
return( "invalid slot 'unit'. See ?memuse" )
}
#' Class memuse
#'
#' Memory usage class object.
#'
#' @slot size
#' The actual size in some memuse units.
#' @slot unit
#' The mem unit (e.g., byte, kilobyte, etc.)
#' @slot unit.prefix
#' IEC or SI units
#' @slot unit.names
#' short (e.g., kb) or long (e.g., kilobyte)
#'
#' @seealso \code{ \link{Control} \link{Constructor} }
#' @keywords Classes
#' @name memuse-class
#' @docType class
setClass("memuse",
representation(
size="numeric",
unit="character",
unit.prefix="character",
unit.names="character"
),
prototype(
size=0,
unit="B",
unit.prefix="IEC",
unit.names="short"
),
validity=valid.mu
)
# to prevent R whining during package installation
setClass("object_size")
|
.__daRtVersion <- c(minBuildVersion = 1091, maxBuildVersion = Inf)
| /R/opts.R | no_license | willmorrison1/daRt | R | false | false | 67 | r | .__daRtVersion <- c(minBuildVersion = 1091, maxBuildVersion = Inf)
|
### Quick Description ###
#After compute_credible to preprocess the tables, this script can be used to
#Determine the distribution of results
library(dplyr)
library(fitdistrplus)
library(logspline)
library(LambertW)
setwd("~/Oxford/RealScripts/credible_sets/data")
cred_set_results <- read.table("credible_set_sep.txt")
name_var_seq <- read.table("unique_all_name_seq.csv", header = TRUE, sep = ",")
name_to_loc <- read.table("HRC_credset.snp_ann.txt")
#Calculate the differences for each locus and each stage
#You take alternating rows and subtract the second from the first.
#The first row is the
diff <- cred_set_results[seq(1,nrow(cred_set_results),2), ] - cred_set_results[seq(2,nrow(cred_set_results),2), ]
#Plot a histogram of the differences and fit normal distribution
m<-mean(diff$V1)
std<-sqrt(var(diff$V1))
h <- hist(diff$V1, density=50, breaks=1000, freq=TRUE,
xlab="Predicted difference in chromatin openness ", ylim = NULL,
main="Normal Curve over Histogram")
xfit <- seq(min(diff$V1), max(diff$V1), length = 40)
yfit <- dnorm(xfit, mean = mean(diff$V1), sd = sd(diff$V1))
yfit <- yfit * diff(h$mids[1:2]) * length(diff$V1)
lines(xfit, yfit, col = "black", lwd = 2)
#QQplot to be sure, and indeed... Not normally distributed.
qqnorm(cred_set_results[seq(1,nrow(cred_set_results),2), ]$V1); qqline(cred_set_results[seq(1,nrow(cred_set_results),2), ]$V1)
#Use descdist to find out which dist you are dealing with
descdist(diff$V1, discrete = FALSE)
#Example of z-scores using scale:
scale(diff$V1)
#SD is [1] 0.03102844
#Use LambertW for cool graphs:
test_norm(diff$V1)
#Kurtosis = 20. Too high!
#Estimate parameters to make normal:
mod.Lh <- MLE_LambertW(diff$V1, distname = "normal", type = "h")
summary(mod.Lh)
#See how it changed:
xx <- get_input(mod.Lh)
test_norm(xx)
#Rename rows for heatmap
## still have to do this: add locus label ###
ord <- hclust( dist(fdrselect_diffname, method = "euclidean"), method = "ward.D" )$order
### OPTIONAL ###
#Prepare rownames, convert rsXXX names to known names
#clear_names <- merge(fdrselect_diffname, name_to_loc, by.x = "Variant", by.y = "V1")
### \OPTIONAL ###
#Make heatmap w. stage on x and genes on y
fdrselect_diffname$Variant <- (rownames(fdrselect_diffname))
fdrselect_diffname.m <- melt(fdrselect_diffname)
fdrselect_diffname.m$Variant <- factor( fdrselect_diffname.m$Variant, levels = rownames(fdrselect_diffname)[ord])
fdrselect_diffname.m$variable <- factor( fdrselect_diffname.m$variable, levels = colnames(fdrselect_diffname)[1:8] )
ggplot( fdrselect_diffname.m, aes(variable, Variant) ) +
geom_tile(aes(fill = value)) +
scale_fill_gradient2(low = ("blue"), high = ("red"))
########################################################################
### ###
### Save all tables in special directory and go on with second merge:###
### PPa ###
### ###
########################################################################
###Some graphs ###
#Some first picks, do manually
#HNF1A_rs56348580_Known_1, q-value 0.0959408, PPa: 0.11259
colnames(mono_genes_select[, 5:12])
mono_genes_select[mono_genes_select$name == "rs11065397", 5:12]
df_rs11065397 <- data.frame(Stage = c("iPSC", "DE", "PGT", "PFG", "PE", "EP", "EN", "BLC" ),
Activity = c(-0.05624551, -0.1234163, -0.0188171, 0.09596038, 0.1119376, 0.01520568, 0.02171335, 0.0122101))
df_rs11065397$Stage <- factor(df_rs11065397$Stage, levels = df_rs11065397[["Stage"]])
ggplot(data=df_rs11065397, aes(x=Stage, y=Activity, group = 1)) +
geom_line()+
geom_point() +
labs(title="Predicted difference in chromatin openness at HNF1A rs56348580") +
theme_minimal()
#PPARG_rs17819328_Known_2 rs4684854, q-value PGT: 0.004437023, PPa: 0.25062
mono_genes_select[mono_genes_select$name == "rs4684854", 5:12]
df_rs4684854 <- data.frame(Stage = c("iPSC", "DE", "PGT", "PFG", "PE", "EP", "EN", "BLC" ),
Activity = c(-0.04945779, 0.1059107, -0.1125046, -0.07721826, -0.04273896, -0.006742465, -0.009917624, -0.003167973))
df_rs4684854$Stage <- factor(df_rs4684854$Stage, levels = df_rs4684854[["Stage"]])
ggplot(data=df_rs4684854, aes(x=Stage, y=Activity, group = 1)) +
geom_line()+
geom_point() +
labs(title="Predicted difference in chromatin openness at PPARG rs17819328") +
theme_minimal()
| /5.credset_predictions/Legacy/random_legacy.R | no_license | agawes/CNN-iPSC | R | false | false | 4,532 | r | ### Quick Description ###
#After compute_credible to preprocess the tables, this script can be used to
#Determine the distribution of results
library(dplyr)
library(fitdistrplus)
library(logspline)
library(LambertW)
setwd("~/Oxford/RealScripts/credible_sets/data")
cred_set_results <- read.table("credible_set_sep.txt")
name_var_seq <- read.table("unique_all_name_seq.csv", header = TRUE, sep = ",")
name_to_loc <- read.table("HRC_credset.snp_ann.txt")
#Calculate the differences for each locus and each stage
#You take alternating rows and subtract the second from the first.
#The first row is the
diff <- cred_set_results[seq(1,nrow(cred_set_results),2), ] - cred_set_results[seq(2,nrow(cred_set_results),2), ]
#Plot a histogram of the differences and fit normal distribution
m<-mean(diff$V1)
std<-sqrt(var(diff$V1))
h <- hist(diff$V1, density=50, breaks=1000, freq=TRUE,
xlab="Predicted difference in chromatin openness ", ylim = NULL,
main="Normal Curve over Histogram")
xfit <- seq(min(diff$V1), max(diff$V1), length = 40)
yfit <- dnorm(xfit, mean = mean(diff$V1), sd = sd(diff$V1))
yfit <- yfit * diff(h$mids[1:2]) * length(diff$V1)
lines(xfit, yfit, col = "black", lwd = 2)
#QQplot to be sure, and indeed... Not normally distributed.
qqnorm(cred_set_results[seq(1,nrow(cred_set_results),2), ]$V1); qqline(cred_set_results[seq(1,nrow(cred_set_results),2), ]$V1)
#Use descdist to find out which dist you are dealing with
descdist(diff$V1, discrete = FALSE)
#Example of z-scores using scale:
scale(diff$V1)
#SD is [1] 0.03102844
#Use LambertW for cool graphs:
test_norm(diff$V1)
#Kurtosis = 20. Too high!
#Estimate parameters to make normal:
mod.Lh <- MLE_LambertW(diff$V1, distname = "normal", type = "h")
summary(mod.Lh)
#See how it changed:
xx <- get_input(mod.Lh)
test_norm(xx)
#Rename rows for heatmap
## still have to do this: add locus label ###
ord <- hclust( dist(fdrselect_diffname, method = "euclidean"), method = "ward.D" )$order
### OPTIONAL ###
#Prepare rownames, convert rsXXX names to known names
#clear_names <- merge(fdrselect_diffname, name_to_loc, by.x = "Variant", by.y = "V1")
### \OPTIONAL ###
#Make heatmap w. stage on x and genes on y
fdrselect_diffname$Variant <- (rownames(fdrselect_diffname))
fdrselect_diffname.m <- melt(fdrselect_diffname)
fdrselect_diffname.m$Variant <- factor( fdrselect_diffname.m$Variant, levels = rownames(fdrselect_diffname)[ord])
fdrselect_diffname.m$variable <- factor( fdrselect_diffname.m$variable, levels = colnames(fdrselect_diffname)[1:8] )
ggplot( fdrselect_diffname.m, aes(variable, Variant) ) +
geom_tile(aes(fill = value)) +
scale_fill_gradient2(low = ("blue"), high = ("red"))
########################################################################
### ###
### Save all tables in special directory and go on with second merge:###
### PPa ###
### ###
########################################################################
###Some graphs ###
#Some first picks, do manually
#HNF1A_rs56348580_Known_1, q-value 0.0959408, PPa: 0.11259
colnames(mono_genes_select[, 5:12])
mono_genes_select[mono_genes_select$name == "rs11065397", 5:12]
df_rs11065397 <- data.frame(Stage = c("iPSC", "DE", "PGT", "PFG", "PE", "EP", "EN", "BLC" ),
Activity = c(-0.05624551, -0.1234163, -0.0188171, 0.09596038, 0.1119376, 0.01520568, 0.02171335, 0.0122101))
df_rs11065397$Stage <- factor(df_rs11065397$Stage, levels = df_rs11065397[["Stage"]])
ggplot(data=df_rs11065397, aes(x=Stage, y=Activity, group = 1)) +
geom_line()+
geom_point() +
labs(title="Predicted difference in chromatin openness at HNF1A rs56348580") +
theme_minimal()
#PPARG_rs17819328_Known_2 rs4684854, q-value PGT: 0.004437023, PPa: 0.25062
mono_genes_select[mono_genes_select$name == "rs4684854", 5:12]
df_rs4684854 <- data.frame(Stage = c("iPSC", "DE", "PGT", "PFG", "PE", "EP", "EN", "BLC" ),
Activity = c(-0.04945779, 0.1059107, -0.1125046, -0.07721826, -0.04273896, -0.006742465, -0.009917624, -0.003167973))
df_rs4684854$Stage <- factor(df_rs4684854$Stage, levels = df_rs4684854[["Stage"]])
ggplot(data=df_rs4684854, aes(x=Stage, y=Activity, group = 1)) +
geom_line()+
geom_point() +
labs(title="Predicted difference in chromatin openness at PPARG rs17819328") +
theme_minimal()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seg_criteria.R
\name{counts.per.seg}
\alias{counts.per.seg}
\title{The average countings per segments for each replica}
\usage{
counts.per.seg(list.tau, list.data)
}
\arguments{
\item{list.tau}{list of changepoints for each replica, for a given penalty value.}
\item{list.data}{list of dataset for each replica.}
}
\value{
a matrix containing the average count for each segment (column), for each replica(row).
}
\description{
The average countings per segments for each replica
}
\examples{
l.d1 <- log.transform(dataset1)
seg_rob1 <- Rob_seg.std(x = l.d1, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
tau1 <- seg_rob1$t.est
l.d2 <- log.transform(dataset2)
seg_rob2 <- Rob_seg.std(x = l.d2, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
tau2 <- seg_rob2$t.est
l.d3 <- log.transform(dataset3)
seg_rob3 <- Rob_seg.std(x = l.d3, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
tau3 <- seg_rob3$t.est
l.data <- list(dataset1,dataset2,dataset3)
l.tau <- list(tau1,tau2,tau3)
cps <- counts.per.seg(list.tau=l.tau,list.data=l.data)
}
| /segRNAcountings/man/counts.per.seg.Rd | no_license | danilodurs/newsegcrit | R | false | true | 1,205 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seg_criteria.R
\name{counts.per.seg}
\alias{counts.per.seg}
\title{The average countings per segments for each replica}
\usage{
counts.per.seg(list.tau, list.data)
}
\arguments{
\item{list.tau}{list of changepoints for each replica, for a given penalty value.}
\item{list.data}{list of dataset for each replica.}
}
\value{
a matrix containing the average count for each segment (column), for each replica(row).
}
\description{
The average countings per segments for each replica
}
\examples{
l.d1 <- log.transform(dataset1)
seg_rob1 <- Rob_seg.std(x = l.d1, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
tau1 <- seg_rob1$t.est
l.d2 <- log.transform(dataset2)
seg_rob2 <- Rob_seg.std(x = l.d2, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
tau2 <- seg_rob2$t.est
l.d3 <- log.transform(dataset3)
seg_rob3 <- Rob_seg.std(x = l.d3, loss = "Outlier", lambda = 25*log(length(l.d1)), lthreshold=3)
tau3 <- seg_rob3$t.est
l.data <- list(dataset1,dataset2,dataset3)
l.tau <- list(tau1,tau2,tau3)
cps <- counts.per.seg(list.tau=l.tau,list.data=l.data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/police-speeding.R
\docType{data}
\name{police_speeding}
\alias{police_speeding}
\title{Speeding police vehicles in New Zealand}
\format{A data frame with 5595 rows and 5 variables:
\describe{
\item{value}{Number of speeding vehicle detections, number of waivers, or
value of fines in New Zealand dollars}
\item{series}{The thing being counted by \code{value}}
\item{district}{Police district}
\item{area}{Police area (subdivision of district)}
\item{month}{Month of the event}
}}
\source{
\url{http://www.police.govt.nz/about-us/publication/road-policing-driver-offence-data-january-2009-march-2018}
}
\usage{
police_speeding
}
\description{
A dataset containing speeding vehicle detections (by cameras) of vehicles
registered to Police, as well as waivers and fines, in New Zealand between
2009 and 2017.
}
\details{
General notes and caveats by the New Zealand Police:
This data contains provisional data which is drawn from a dynamic operational
database. This is subject to change as new information is recorded or
recoded.
The data does not include cancelled infringements and proceedings, but does
include minor infringements cleared as Written Traffic Warnings (WTWs) since
the adoption of Police's Written Traffic Warning Policy in 2014. As WTWs do
not have a fee these are not included in monetary value tables.
Police speeding data includes only speed camera detections of vehicles
registered to Police that were exceeding the speed limit. The data does not
include driver occupation or whether the vehicle was being driven while on
Police duty; however it could be reasonably presumed that staff were on duty
in the vast majority of cases. Only in special circumstances are contract
vehicles and patrol vehicles permitted to be driven whilst off duty. Police
does not maintain a database of officers issued with speeding fines while
driving Police vehicles. The table containing waived Police speed offences is
a subset of the numbers shown in the top table and include all waived
reasons. Specific reasons cannot be determined without review of individual
files. However, a notice is generally only waived when a Police officer is
undertaking urgent duty driving in response to an incident. Police employees
who travel in excess of the speed limit are treated no differently to members
of the public, and depending on the circumstances may be subject to further
disciplinary action. All drivers of police vehicles detected travelling in
excess of the speed limit are liable for the relevant penalties unless a
legal defence applies. The Land Transport (Road User) Rule 2004 lists the
legal defences Police have when undertaking urgent duty driving, thereby
outlining the criteria for waiving a notice.
Please note that Police vehicle speeding data from 2014 onward cannot be
compared to previous years due to a change in the way the infringements are
recorded. A change to the recording process means that there has been an
increase in the number of infringements recorded for 2014 when compared to
previous years. This is due to a change of process for speed camera photos of
police vehicles with red and blue flashing lights visible in the photographs.
Notices are now issued for many of these photos, pending an explanation from
the driver rather the previous process of presuming an urgent duty driving
defence and not issuing a notice. "
Most speed cameras employ radar technology to detect speeding vehicles. The
process of issuing a speed camera notice involves verification of the
resulting vehicle photo to validate the detection. When counting all vehicles
passing speed cameras (i.e., all moving vehicles complying with the speed
limit and otherwise), a small number of detections may involve other causes.
These cannot be reliably excluded from the total number of detected vehicles
as Police record speed camera notice details separately from raw vehicle
counts. The total number of vehicles detected by speed cameras on deployment
may therefore include a small number of false radar detections. Note also
that this data starts from August 2009 as there were some technical issues
affecting the rollout of digital mobile cameras primarily between January and
July 2009.
}
\seealso{
\code{\link{driving_offences}}, \code{\link{excess}},
\code{\link{fleeing_area}}, \code{\link{fleeing_district}},
\code{\link{police_speeding_band}}, \code{\link{static_camera}}
}
\keyword{datasets}
| /man/police_speeding.Rd | no_license | nacnudus/nzpullover | R | false | true | 4,505 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/police-speeding.R
\docType{data}
\name{police_speeding}
\alias{police_speeding}
\title{Speeding police vehicles in New Zealand}
\format{A data frame with 5595 rows and 5 variables:
\describe{
\item{value}{Number of speeding vehicle detections, number of waivers, or
value of fines in New Zealand dollars}
\item{series}{The thing being counted by \code{value}}
\item{district}{Police district}
\item{area}{Police area (subdivision of district)}
\item{month}{Month of the event}
}}
\source{
\url{http://www.police.govt.nz/about-us/publication/road-policing-driver-offence-data-january-2009-march-2018}
}
\usage{
police_speeding
}
\description{
A dataset containing speeding vehicle detections (by cameras) of vehicles
registered to Police, as well as waivers and fines, in New Zealand between
2009 and 2017.
}
\details{
General notes and caveats by the New Zealand Police:
This data contains provisional data which is drawn from a dynamic operational
database. This is subject to change as new information is recorded or
recoded.
The data does not include cancelled infringements and proceedings, but does
include minor infringements cleared as Written Traffic Warnings (WTWs) since
the adoption of Police's Written Traffic Warning Policy in 2014. As WTWs do
not have a fee these are not included in monetary value tables.
Police speeding data includes only speed camera detections of vehicles
registered to Police that were exceeding the speed limit. The data does not
include driver occupation or whether the vehicle was being driven while on
Police duty; however it could be reasonably presumed that staff were on duty
in the vast majority of cases. Only in special circumstances are contract
vehicles and patrol vehicles permitted to be driven whilst off duty. Police
does not maintain a database of officers issued with speeding fines while
driving Police vehicles. The table containing waived Police speed offences is
a subset of the numbers shown in the top table and include all waived
reasons. Specific reasons cannot be determined without review of individual
files. However, a notice is generally only waived when a Police officer is
undertaking urgent duty driving in response to an incident. Police employees
who travel in excess of the speed limit are treated no differently to members
of the public, and depending on the circumstances may be subject to further
disciplinary action. All drivers of police vehicles detected travelling in
excess of the speed limit are liable for the relevant penalties unless a
legal defence applies. The Land Transport (Road User) Rule 2004 lists the
legal defences Police have when undertaking urgent duty driving, thereby
outlining the criteria for waiving a notice.
Please note that Police vehicle speeding data from 2014 onward cannot be
compared to previous years due to a change in the way the infringements are
recorded. A change to the recording process means that there has been an
increase in the number of infringements recorded for 2014 when compared to
previous years. This is due to a change of process for speed camera photos of
police vehicles with red and blue flashing lights visible in the photographs.
Notices are now issued for many of these photos, pending an explanation from
the driver rather the previous process of presuming an urgent duty driving
defence and not issuing a notice. "
Most speed cameras employ radar technology to detect speeding vehicles. The
process of issuing a speed camera notice involves verification of the
resulting vehicle photo to validate the detection. When counting all vehicles
passing speed cameras (i.e., all moving vehicles complying with the speed
limit and otherwise), a small number of detections may involve other causes.
These cannot be reliably excluded from the total number of detected vehicles
as Police record speed camera notice details separately from raw vehicle
counts. The total number of vehicles detected by speed cameras on deployment
may therefore include a small number of false radar detections. Note also
that this data starts from August 2009 as there were some technical issues
affecting the rollout of digital mobile cameras primarily between January and
July 2009.
}
\seealso{
\code{\link{driving_offences}}, \code{\link{excess}},
\code{\link{fleeing_area}}, \code{\link{fleeing_district}},
\code{\link{police_speeding_band}}, \code{\link{static_camera}}
}
\keyword{datasets}
|
"gene.anot.data.frame" <-
function(data)
{
if(!is.null(data$gene.anot))
{
res<-as.data.frame(data$gene.anot)
}
else
{
res<-NULL
}
res
}
| /R/gene.anot.data.frame.R | no_license | cran/varmixt | R | false | false | 189 | r | "gene.anot.data.frame" <-
function(data)
{
if(!is.null(data$gene.anot))
{
res<-as.data.frame(data$gene.anot)
}
else
{
res<-NULL
}
res
}
|
library(XML)
url <- 'http://www.madmoneyrecap.com/madmoney_pastrecaps.htm'
doc <- htmlParse(url)
nodes <- getNodeSet(doc,
"//a[starts-with(@href, 'madmoney_pastrecaps_')]")
a <- rep(NA, length(nodes))
for (i in 1:length(nodes)) {
a[i] <- as.character(xmlAttrs( nodes[[i]], "@href"))
}
a <- paste('http://www.madmoneyrecap.com/', a, sep="")
doc <- htmlParse(a[1])
| /STAT625/MadMoney/Tues21.9.R | no_license | kaneplusplus/Teaching | R | false | false | 376 | r | library(XML)
url <- 'http://www.madmoneyrecap.com/madmoney_pastrecaps.htm'
doc <- htmlParse(url)
nodes <- getNodeSet(doc,
"//a[starts-with(@href, 'madmoney_pastrecaps_')]")
a <- rep(NA, length(nodes))
for (i in 1:length(nodes)) {
a[i] <- as.character(xmlAttrs( nodes[[i]], "@href"))
}
a <- paste('http://www.madmoneyrecap.com/', a, sep="")
doc <- htmlParse(a[1])
|
rm(list=ls())
library(caret)
library(RWeka)
set.seed(1234)
# separate data into test and train sets, 70/30 split in this case
splitIndex <- createDataPartition(iris$Species, p = 0.7, list = FALSE)
train <- iris[splitIndex, ]
test <- iris[-splitIndex, ]
testInd <- test[ ,!colnames(test) %in% "Species"]
testDep <- as.factor(test[, names(test) == "Species"])
TrainData <- iris[,1:4]
TrainClasses <- iris[,5]
#First Model
jripFit1 <- train(TrainData, TrainClasses,method = "JRip")
jripFit1
plot(jripFit1)
#Second Model
jripFit2 <- train(TrainData, TrainClasses,method = "JRip",preProcess = c("center", "scale"),tuneLength = 10,trControl = trainControl(method = "cv"))
jripFit2
plot(jripFit2)
# K means
neighborCount=2
modelKNN <- knn3(Species ~ ., data = train, k = neighborCount, prob = TRUE)
predKNN <- predict(modelKNN, testInd, type = "prob")
confKNN <- confusionMatrix(testDep, predKNN)
#Another Round
km <- kmeans(iris[,1:4], 3)
plot(iris[,1], iris[,2], col=km$cluster)
points(km$centers[,c(1,2)], col=1:3, pch=19, cex=2)
table(km$cluster, iris$Species)
#Another Way
km2 <- kmeans(iris[,1:4], 3)
plot(iris[,1], iris[,2], col=km2$cluster)
points(km2$centers[,c(1,2)], col=1:3, pch=19, cex=2)
table(km2$cluster, iris$Species)
#heir
m <- matrix(1:15,5,3)
dist(m) # computes the distance between rows of m (since there are 3 columns, it is the euclidian distance between tri-dimensional points)
dist(m,method="manhattan") # using the manhattan metric
sampleiris <- iris[sample(1:150, 40),] # get samples from iris dataset
# each observation has 4 variables, ie, they are interpreted as 4-D points
distance <- dist(sampleiris[,-5], method="euclidean")
cluster <- hclust(distance, method="average")
plot(cluster, hang=-1, label=sampleiris$Species)
plot(as.dendrogram(cluster), edgePar=list(col="darkgreen", lwd=2), horiz=T)
str(as.dendrogram(cluster)) # Prints dendrogram structure as text.
cluster$labels[cluster$order] # Prints the row labels in the order they appear in the tree.
#Prune by cluster
par(mfrow=c(1,2))
group.3 <- cutree(cluster, k = 3) # prune the tree by 3 clusters
table(group.3, sampleiris$Species) # compare with known classes
plot(sampleiris[,c(1,2)], col=group.3, pch=19, cex=2.5, main="3 clusters")
points(sampleiris[,c(1,2)], col=sampleiris$Species, pch=19, cex=1)
group.6 <- cutree(cluster, k = 6) # we can prune by more clusters
table(group.6, sampleiris$Species)
plot(sampleiris[,c(1,2)], col=group.6, pch=19, cex=2.5, main="6 clusters")
points(sampleiris[,c(1,2)], col=sampleiris$Species, pch=19, cex=1) # the little points are the true classes
par(mfrow=c(1,1))
plot(cluster, hang=-1, label=sampleiris$Species)
abline(h=0.9,lty=3,col="red")
height.0.9 <- cutree(cluster, h = 0.9)
table(height.0.9, sampleiris$Species) # compare with known classes
plot(sampleiris[,c(1,2)], col=height.0.9, pch=19, cex=2.5, main="3 clusters")
points(sampleiris[,c(1,2)], col=sampleiris$Species, pch=19, cex=1)
# Calculate the dissimilarity between observations using the Euclidean distance
dist.iris <- dist(iris, method="euclidean")
# Compute a hierarchical cluster analysis on the distance matrix using the complete linkage method
h.iris <- hclust(dist.iris, method="complete")
h.iris
head(h.iris$merge, n=10)
plot(h.iris)
h.iris.heights <- h.iris$height # height values
h.iris.heights[1:10]
subs <- round(h.iris.heights - c(0,h.iris.heights[-length(h.iris.heights)]), 3) # subtract next height
which.max(subs)
# Cuts dendrogram at specified level and draws rectangles around the resulting clusters
plot(cluster); rect.hclust(cluster, k=6, border="red")
| /GenericClustering.R | no_license | Prashant0701/PracticeAnalytics | R | false | false | 3,591 | r | rm(list=ls())
library(caret)
library(RWeka)
set.seed(1234)
# separate data into test and train sets, 70/30 split in this case
splitIndex <- createDataPartition(iris$Species, p = 0.7, list = FALSE)
train <- iris[splitIndex, ]
test <- iris[-splitIndex, ]
testInd <- test[ ,!colnames(test) %in% "Species"]
testDep <- as.factor(test[, names(test) == "Species"])
TrainData <- iris[,1:4]
TrainClasses <- iris[,5]
#First Model
jripFit1 <- train(TrainData, TrainClasses,method = "JRip")
jripFit1
plot(jripFit1)
#Second Model
jripFit2 <- train(TrainData, TrainClasses,method = "JRip",preProcess = c("center", "scale"),tuneLength = 10,trControl = trainControl(method = "cv"))
jripFit2
plot(jripFit2)
# K means
neighborCount=2
modelKNN <- knn3(Species ~ ., data = train, k = neighborCount, prob = TRUE)
predKNN <- predict(modelKNN, testInd, type = "prob")
confKNN <- confusionMatrix(testDep, predKNN)
#Another Round
km <- kmeans(iris[,1:4], 3)
plot(iris[,1], iris[,2], col=km$cluster)
points(km$centers[,c(1,2)], col=1:3, pch=19, cex=2)
table(km$cluster, iris$Species)
#Another Way
km2 <- kmeans(iris[,1:4], 3)
plot(iris[,1], iris[,2], col=km2$cluster)
points(km2$centers[,c(1,2)], col=1:3, pch=19, cex=2)
table(km2$cluster, iris$Species)
#heir
m <- matrix(1:15,5,3)
dist(m) # computes the distance between rows of m (since there are 3 columns, it is the euclidian distance between tri-dimensional points)
dist(m,method="manhattan") # using the manhattan metric
sampleiris <- iris[sample(1:150, 40),] # get samples from iris dataset
# each observation has 4 variables, ie, they are interpreted as 4-D points
distance <- dist(sampleiris[,-5], method="euclidean")
cluster <- hclust(distance, method="average")
plot(cluster, hang=-1, label=sampleiris$Species)
plot(as.dendrogram(cluster), edgePar=list(col="darkgreen", lwd=2), horiz=T)
str(as.dendrogram(cluster)) # Prints dendrogram structure as text.
cluster$labels[cluster$order] # Prints the row labels in the order they appear in the tree.
#Prune by cluster
par(mfrow=c(1,2))
group.3 <- cutree(cluster, k = 3) # prune the tree by 3 clusters
table(group.3, sampleiris$Species) # compare with known classes
plot(sampleiris[,c(1,2)], col=group.3, pch=19, cex=2.5, main="3 clusters")
points(sampleiris[,c(1,2)], col=sampleiris$Species, pch=19, cex=1)
group.6 <- cutree(cluster, k = 6) # we can prune by more clusters
table(group.6, sampleiris$Species)
plot(sampleiris[,c(1,2)], col=group.6, pch=19, cex=2.5, main="6 clusters")
points(sampleiris[,c(1,2)], col=sampleiris$Species, pch=19, cex=1) # the little points are the true classes
par(mfrow=c(1,1))
plot(cluster, hang=-1, label=sampleiris$Species)
abline(h=0.9,lty=3,col="red")
height.0.9 <- cutree(cluster, h = 0.9)
table(height.0.9, sampleiris$Species) # compare with known classes
plot(sampleiris[,c(1,2)], col=height.0.9, pch=19, cex=2.5, main="3 clusters")
points(sampleiris[,c(1,2)], col=sampleiris$Species, pch=19, cex=1)
# Calculate the dissimilarity between observations using the Euclidean distance
dist.iris <- dist(iris, method="euclidean")
# Compute a hierarchical cluster analysis on the distance matrix using the complete linkage method
h.iris <- hclust(dist.iris, method="complete")
h.iris
head(h.iris$merge, n=10)
plot(h.iris)
h.iris.heights <- h.iris$height # height values
h.iris.heights[1:10]
subs <- round(h.iris.heights - c(0,h.iris.heights[-length(h.iris.heights)]), 3) # subtract next height
which.max(subs)
# Cuts dendrogram at specified level and draws rectangles around the resulting clusters
plot(cluster); rect.hclust(cluster, k=6, border="red")
|
#' write_eml
#'
#' write_eml
#' @param eml an eml class object
#' @param file file name to write XML.
#' @param namespaces named character vector of additional XML namespaces to use.
#' @param ns root namespace abbreviation
#' @param ... additional arguments to \code{\link{write_xml}}
#' @return If file is not specified, the result is a character string containing
#' the resulting XML content. Otherwise return silently.
#' @export
#' @import methods
#' @importFrom xml2 write_xml xml_set_namespace xml_name xml_ns
#' xml_find_all xml_remove xml_root
#' @importFrom uuid UUIDgenerate
#' @examples
#' f <- system.file("examples", "example-eml-valid.xml", package = "EML")
#' eml <- read_eml(f)
#' write_eml(eml, "test.xml")
#' unlink("test.xml") # clean up
write_eml <- function(eml,
file,
namespaces = NULL,
ns = "eml",
...) {
## Make sure `eml` node has a schemaLocation
if(is(eml, "eml") && is_blank(eml@schemaLocation))
eml@schemaLocation@.Data <- "eml://ecoinformatics.org/eml-2.1.1 eml.xsd"
## By default, we use UUID system to generate packageId
if("system" %in% slotNames(eml) && is_blank(eml@system))
slot(eml,"system") <- as("uuid", "xml_attribute")
## By default, a packageId will be generated if one is not available (required by schema)
if(is(eml, "eml") && is_blank(eml@packageId))
slot(eml,"packageId") <- as(uuid::UUIDgenerate(), "xml_attribute")
# id <- basename(tempfile("eml"))
## use default namespaces if not provided
if(is.null(namespaces))
namespaces <- eml_namespaces
## Convert to xml
node <- s4_to_xml(eml, ns = namespaces)
root <- xml2::xml_root(node)
prune_empty(root)
## setting root element ns doesn't appear to do anything:
#xml2::xml_set_namespace(tmp, ns,
#paste0(ns, "://ecoinformatics.org/", ns, "-2.1.1"))
## so we set it manually by renaming the node:
if(!is_blank(ns)){
root_name <- xml2::xml_name(root)
xml2::xml_name(root) <- paste(ns, root_name, sep=":")
}
## Now we write out to file
xml2::write_xml(root, file, ...)
}
prune_empty <- function(xml){
before <- 1
after <- 0
empty <- "//*[not(@*)][not(*)][not(normalize-space())]" ##
while(after < before){
before <- length(xml2::xml_name(xml2::xml_find_all(xml, "//*") ))
## Avoid removing document root, which results in a segfault
total <- length(xml_find_all(xml, "//*"))
if(total > 1){
xml2::xml_remove(xml_find_all(xml, empty))
}
after <- length(xml2::xml_name(xml_find_all(xml, "//*") ))
}
xml
}
# character(0) or "" data
is_blank <- function(x) length(x) < 1 || x == ""
## Default XML namespaces
eml_namespaces <- xml2::xml_ns(
xml2::read_xml(
system.file("examples",
"example-eml-valid.xml",
package = "EML")))
| /R/write_eml.R | no_license | fengfengyuyu/EML | R | false | false | 2,865 | r | #' write_eml
#'
#' write_eml
#' @param eml an eml class object
#' @param file file name to write XML.
#' @param namespaces named character vector of additional XML namespaces to use.
#' @param ns root namespace abbreviation
#' @param ... additional arguments to \code{\link{write_xml}}
#' @return If file is not specified, the result is a character string containing
#' the resulting XML content. Otherwise return silently.
#' @export
#' @import methods
#' @importFrom xml2 write_xml xml_set_namespace xml_name xml_ns
#' xml_find_all xml_remove xml_root
#' @importFrom uuid UUIDgenerate
#' @examples
#' f <- system.file("examples", "example-eml-valid.xml", package = "EML")
#' eml <- read_eml(f)
#' write_eml(eml, "test.xml")
#' unlink("test.xml") # clean up
write_eml <- function(eml,
file,
namespaces = NULL,
ns = "eml",
...) {
## Make sure `eml` node has a schemaLocation
if(is(eml, "eml") && is_blank(eml@schemaLocation))
eml@schemaLocation@.Data <- "eml://ecoinformatics.org/eml-2.1.1 eml.xsd"
## By default, we use UUID system to generate packageId
if("system" %in% slotNames(eml) && is_blank(eml@system))
slot(eml,"system") <- as("uuid", "xml_attribute")
## By default, a packageId will be generated if one is not available (required by schema)
if(is(eml, "eml") && is_blank(eml@packageId))
slot(eml,"packageId") <- as(uuid::UUIDgenerate(), "xml_attribute")
# id <- basename(tempfile("eml"))
## use default namespaces if not provided
if(is.null(namespaces))
namespaces <- eml_namespaces
## Convert to xml
node <- s4_to_xml(eml, ns = namespaces)
root <- xml2::xml_root(node)
prune_empty(root)
## setting root element ns doesn't appear to do anything:
#xml2::xml_set_namespace(tmp, ns,
#paste0(ns, "://ecoinformatics.org/", ns, "-2.1.1"))
## so we set it manually by renaming the node:
if(!is_blank(ns)){
root_name <- xml2::xml_name(root)
xml2::xml_name(root) <- paste(ns, root_name, sep=":")
}
## Now we write out to file
xml2::write_xml(root, file, ...)
}
prune_empty <- function(xml){
before <- 1
after <- 0
empty <- "//*[not(@*)][not(*)][not(normalize-space())]" ##
while(after < before){
before <- length(xml2::xml_name(xml2::xml_find_all(xml, "//*") ))
## Avoid removing document root, which results in a segfault
total <- length(xml_find_all(xml, "//*"))
if(total > 1){
xml2::xml_remove(xml_find_all(xml, empty))
}
after <- length(xml2::xml_name(xml_find_all(xml, "//*") ))
}
xml
}
# character(0) or "" data
is_blank <- function(x) length(x) < 1 || x == ""
## Default XML namespaces
eml_namespaces <- xml2::xml_ns(
xml2::read_xml(
system.file("examples",
"example-eml-valid.xml",
package = "EML")))
|
# Setup the R environment:
source("setup.R")
# Number of repetitions
n.rep <- 10
# The leukemia data is available in the 'varbvs' package
library(varbvs)
data(leukemia)
A <- leukemia$x
n <- nrow(A)
r <- ncol(A)
psi <- 1
sigma.sq <- .5
p <- 40 # `p` is the number of nonzeros in `theta`
lambda <- p/r
split.size <- c(1, 2, 4, 8, 16)
n.split.size <- length(split.size)
sequential_PIP <- array(NA_real_, dim = c(n.split.size, n.rep, r))
Gibbs_PIP <- array(NA_real_, dim = c(n.rep, r))
# Set seed for reproducibility
set.seed(1)
for (j in 1:n.rep) {
cat("j:", j, "\n")
theta <- rep(0, r)
theta[sample.int(n = r, size = p)] <- rnorm(n = p, sd = sqrt(psi))
y <- rnorm(n = n, mean = A%*%theta, sd = sqrt(sigma.sq))
Gibbs_PIP[j, ] <- PIP_Gibbs(y, X = A, lambda, psi, sigma.sq, beta.0 = theta, iter = 1e5)
for (k in 1:n.split.size) sequential_PIP[k, j, ] <- PIP_IRGA(y, A, lambda, psi, sigma.sq, p = split.size[k])
}
# We cap the log odds since the approximate PIP can equal 1.
log_odds <- function(PIP) pmin(as.vector(log(PIP) - log(1-PIP)), 50)
Gibbs_log_odds <- log_odds(Gibbs_PIP)
for (k in 1:n.split.size) {
print(split.size[k])
print(summary(as.vector(abs(Gibbs_log_odds - log_odds(sequential_PIP[k, , ])))))
} | /Simulations/leukemia_split_size.R | no_license | anonymnous2023-lab/IRGA | R | false | false | 1,254 | r | # Setup the R environment:
source("setup.R")
# Number of repetitions
n.rep <- 10
# The leukemia data is available in the 'varbvs' package
library(varbvs)
data(leukemia)
A <- leukemia$x
n <- nrow(A)
r <- ncol(A)
psi <- 1
sigma.sq <- .5
p <- 40 # `p` is the number of nonzeros in `theta`
lambda <- p/r
split.size <- c(1, 2, 4, 8, 16)
n.split.size <- length(split.size)
sequential_PIP <- array(NA_real_, dim = c(n.split.size, n.rep, r))
Gibbs_PIP <- array(NA_real_, dim = c(n.rep, r))
# Set seed for reproducibility
set.seed(1)
for (j in 1:n.rep) {
cat("j:", j, "\n")
theta <- rep(0, r)
theta[sample.int(n = r, size = p)] <- rnorm(n = p, sd = sqrt(psi))
y <- rnorm(n = n, mean = A%*%theta, sd = sqrt(sigma.sq))
Gibbs_PIP[j, ] <- PIP_Gibbs(y, X = A, lambda, psi, sigma.sq, beta.0 = theta, iter = 1e5)
for (k in 1:n.split.size) sequential_PIP[k, j, ] <- PIP_IRGA(y, A, lambda, psi, sigma.sq, p = split.size[k])
}
# We cap the log odds since the approximate PIP can equal 1.
log_odds <- function(PIP) pmin(as.vector(log(PIP) - log(1-PIP)), 50)
Gibbs_log_odds <- log_odds(Gibbs_PIP)
for (k in 1:n.split.size) {
print(split.size[k])
print(summary(as.vector(abs(Gibbs_log_odds - log_odds(sequential_PIP[k, , ])))))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.Xy_sim.R
\name{varimp}
\alias{varimp}
\title{Variable Importance}
\usage{
varimp(object, use.noise = FALSE, plot = TRUE)
}
\arguments{
\item{object}{an object of class \code{Xy_sim}}
\item{use.noise}{a boolean indicating whether the noise of the process should
be added to the variable importance}
\item{plot}{a boolean specifying whether to print the variable importance}
}
\description{
Variable Importance
}
\examples{
# Visualize Feature Importance of a Simulation
my_simulation <- Xy()
varimp(my_simulation)
}
| /man/varimp.Rd | permissive | stjordanis/Xy | R | false | true | 603 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.Xy_sim.R
\name{varimp}
\alias{varimp}
\title{Variable Importance}
\usage{
varimp(object, use.noise = FALSE, plot = TRUE)
}
\arguments{
\item{object}{an object of class \code{Xy_sim}}
\item{use.noise}{a boolean indicating whether the noise of the process should
be added to the variable importance}
\item{plot}{a boolean specifying whether to print the variable importance}
}
\description{
Variable Importance
}
\examples{
# Visualize Feature Importance of a Simulation
my_simulation <- Xy()
varimp(my_simulation)
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "oil_spill")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.naiveBayes", par.vals = list(laplace = 0.5), predict.type = "prob")
#:# hash
#:# 981659b633747ee234a2fbd11eba689d
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_oil_spill/classification_class/981659b633747ee234a2fbd11eba689d/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 700 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "oil_spill")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.naiveBayes", par.vals = list(laplace = 0.5), predict.type = "prob")
#:# hash
#:# 981659b633747ee234a2fbd11eba689d
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
library(SWMPrExtension)
### Name: threshold_criteria_plot
### Title: Water Quality Threshold Plot For Parameters With Criteria
### Aliases: threshold_criteria_plot threshold_criteria_plot.swmpr
### ** Examples
data(apacpwq)
dat_wq <- apacpwq
dat_wq <- qaqc(dat_wq, qaqc_keep = c(0, 3, 5))
## Due to the volume of instantaneous data, these plots are a bit slow
x <-
threshold_criteria_plot(dat_wq, param = 'do_mgl'
, rng = 2012
, thresholds = c(2, 5)
, threshold_labs = c('Poor', 'Fair', 'Good')
, monthly_smooth = TRUE
, threshold_cols = c('#FEC596', '#FFFFCC', '#ABD9E9'))
## Not run:
##D y <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Poor', 'Fair', 'Good')
##D , threshold_cols = c('#FEC596', '#FFFFCC', '#ABD9E9'))
##D
##D z <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , rng = 2012
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Poor', 'Fair', 'Good')
##D , threshold_cols = c('#FEC596', '#FFFFCC', '#ABD9E9')
##D , monthly_smooth = TRUE)
##D
##D ## A few examples with only two thresholds
##D xx <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , rng = 2012
##D , thresholds = c(2, 2)
##D
##D # A dummy blank ('') value must be added as a threshold label
##D , threshold_labs = c('Poor', '', 'Good')
##D , threshold_cols = c('#FEC596', '#FFFFCC', '#ABD9E9')
##D , monthly_smooth = TRUE)
##D
##D xy <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , rng = 2012
##D , thresholds = c(5, 5)
##D
##D # A dummy blank ('') value must be added as a threshold label
##D , threshold_labs = c('Poor', '', 'Good')
##D , threshold_cols = c('#FEC596', '#FEC596', '#ABD9E9')
##D , monthly_smooth = TRUE)
##D
##D xz <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , rng = 2012
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Poor', 'Good', 'Poor')
##D , threshold_cols = c('#FEC596', '#ABD9E9', '#FEC596')
##D , monthly_smooth = TRUE)
##D
##D
##D data(apacpnut)
##D dat_nut <- apacpnut
##D
##D dat_nut <- qaqc(dat_nut, qaqc_keep = c(0, 3, 5))
##D dat_nut <- rem_reps(dat_nut)
##D
##D x <-
##D threshold_criteria_plot(dat_nut, param = 'chla_n'
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Good', 'Fair', 'Poor'))
##D
##D
##D y <-
##D threshold_criteria_plot(dat_nut, param = 'chla_n'
##D , rng = 2012
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Good', 'Fair', 'Poor'))
##D
##D ## Nutrient plots are not capable of accidentally displaying any kind of smooth
##D z <-
##D threshold_criteria_plot(dat_nut, param = 'chla_n'
##D , rng = 2012
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Good', 'Fair', 'Poor')
##D , monthly_smooth = TRUE)
## End(Not run)
| /data/genthat_extracted_code/SWMPrExtension/examples/threshold_criteria_plot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 3,394 | r | library(SWMPrExtension)
### Name: threshold_criteria_plot
### Title: Water Quality Threshold Plot For Parameters With Criteria
### Aliases: threshold_criteria_plot threshold_criteria_plot.swmpr
### ** Examples
data(apacpwq)
dat_wq <- apacpwq
dat_wq <- qaqc(dat_wq, qaqc_keep = c(0, 3, 5))
## Due to the volume of instantaneous data, these plots are a bit slow
x <-
threshold_criteria_plot(dat_wq, param = 'do_mgl'
, rng = 2012
, thresholds = c(2, 5)
, threshold_labs = c('Poor', 'Fair', 'Good')
, monthly_smooth = TRUE
, threshold_cols = c('#FEC596', '#FFFFCC', '#ABD9E9'))
## Not run:
##D y <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Poor', 'Fair', 'Good')
##D , threshold_cols = c('#FEC596', '#FFFFCC', '#ABD9E9'))
##D
##D z <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , rng = 2012
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Poor', 'Fair', 'Good')
##D , threshold_cols = c('#FEC596', '#FFFFCC', '#ABD9E9')
##D , monthly_smooth = TRUE)
##D
##D ## A few examples with only two thresholds
##D xx <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , rng = 2012
##D , thresholds = c(2, 2)
##D
##D # A dummy blank ('') value must be added as a threshold label
##D , threshold_labs = c('Poor', '', 'Good')
##D , threshold_cols = c('#FEC596', '#FFFFCC', '#ABD9E9')
##D , monthly_smooth = TRUE)
##D
##D xy <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , rng = 2012
##D , thresholds = c(5, 5)
##D
##D # A dummy blank ('') value must be added as a threshold label
##D , threshold_labs = c('Poor', '', 'Good')
##D , threshold_cols = c('#FEC596', '#FEC596', '#ABD9E9')
##D , monthly_smooth = TRUE)
##D
##D xz <-
##D threshold_criteria_plot(dat_wq, param = 'do_mgl'
##D , rng = 2012
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Poor', 'Good', 'Poor')
##D , threshold_cols = c('#FEC596', '#ABD9E9', '#FEC596')
##D , monthly_smooth = TRUE)
##D
##D
##D data(apacpnut)
##D dat_nut <- apacpnut
##D
##D dat_nut <- qaqc(dat_nut, qaqc_keep = c(0, 3, 5))
##D dat_nut <- rem_reps(dat_nut)
##D
##D x <-
##D threshold_criteria_plot(dat_nut, param = 'chla_n'
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Good', 'Fair', 'Poor'))
##D
##D
##D y <-
##D threshold_criteria_plot(dat_nut, param = 'chla_n'
##D , rng = 2012
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Good', 'Fair', 'Poor'))
##D
##D ## Nutrient plots are not capable of accidentally displaying any kind of smooth
##D z <-
##D threshold_criteria_plot(dat_nut, param = 'chla_n'
##D , rng = 2012
##D , thresholds = c(2, 5)
##D , threshold_labs = c('Good', 'Fair', 'Poor')
##D , monthly_smooth = TRUE)
## End(Not run)
|
## Read dates 01-02-2007 through 01-02-2007 data from original dataset
data <- read.table(
"C:/Users/Mike/Desktop/Coursera/data/household_power_consumption.txt",sep = ";",
colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),
skip = 66637, nrow = 2880,
col.names = colnames(read.table("C:/Users/Mike/Desktop/Coursera/data/household_power_consumption.txt",
nrow = 1, sep = ";", header = TRUE)))
## Convert Date col to date format
data$Date <- as.Date(data$Date, format = "%d/%m/%Y");View(data)#worked
## Combine Date and Time columns to make unique identifier column "datetime"
data$datetime <- as.POSIXct(paste(data$Date, data$Time), format="%Y-%m-%d %H:%M:%S");View(data)#worked add datetime col
## Plot 1 and add annotations
hist(data$Global_active_power, col="red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
## Copy plot to png file
dev.copy(png, file = "plot1.png", width = 480, height = 480)
## Close device
dev.off()
| /plot1.R | no_license | datamick/ExData_Plotting1 | R | false | false | 1,020 | r |
## Read dates 01-02-2007 through 01-02-2007 data from original dataset
data <- read.table(
"C:/Users/Mike/Desktop/Coursera/data/household_power_consumption.txt",sep = ";",
colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),
skip = 66637, nrow = 2880,
col.names = colnames(read.table("C:/Users/Mike/Desktop/Coursera/data/household_power_consumption.txt",
nrow = 1, sep = ";", header = TRUE)))
## Convert Date col to date format
data$Date <- as.Date(data$Date, format = "%d/%m/%Y");View(data)#worked
## Combine Date and Time columns to make unique identifier column "datetime"
data$datetime <- as.POSIXct(paste(data$Date, data$Time), format="%Y-%m-%d %H:%M:%S");View(data)#worked add datetime col
## Plot 1 and add annotations
hist(data$Global_active_power, col="red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
## Copy plot to png file
dev.copy(png, file = "plot1.png", width = 480, height = 480)
## Close device
dev.off()
|
#Question 1###########################
rm(list=ls());
install.packages("ElemStatLearn");
install.packages("class");
install.packages("glmnet");
install.packages("pls");
install.packages("leaps");
install.packages("randomForest")
install.packages("Metrics");
library(ElemStatLearn);
library(class);
library(glmnet);
library(pls);
library(leaps);
library(randomForest);
library(Metrics)
data("spam");
head(spam);
##As the data are not spread correctly we will shuffle the set completely
spam<-spam[sample(nrow(spam)),];
##Now dividing the dataset into Training and test Data:
set.seed(1);
vars=c("A.1","A.2","A.3","A.4","A.5","A.6","A.7","A.8","A.9","A.10","A.11","A.12","A.13","A.14","A.15","A.16","A.17","A.18","A.19","A.20","A.21","A.22","A.23","A.24","A.25","A.26","A.27","A.28","A.29","A.30","A.31","A.32","A.33","A.34","A.35","A.36","A.37","A.38","A.39","A.40","A.41","A.42","A.43","A.44","A.45","A.46","A.47","A.48","A.49","A.50","A.51","A.52","A.53","A.54","A.55","A.56","A.57","spam");
idx=sample(x=nrow(spam), size=0.55*nrow(spam))
train=spam[idx,vars]
test=spam[-idx,vars]
#Random Forest
spam.random=randomForest(train$spam~.,data=train,mtry=57,importance=TRUE)
spam.random
predicSpam = predict(spam.random,newdata=test)
summary(predicSpam)
summary(test$spam)
mValues<-1:57
OOBErr<-numeric(length = length(mValues))
testErr<-numeric(length = length(mValues))
#Now applying the Bagging concept for application of all the predictors at each split.
#Bagging
for (i in seq_along(mValues)) {
spam_predvals_random=randomForest(train$spam~.,data=train,mtry=i,importance=TRUE)
spam_predvals_random
predict.randomForest = predict(spam_predvals_random,newdata=test)
OOBErr[i]<-mean(spam_predvals_random$err.rate);
testErr[i]<-rmse(summary(test$spam),summary(predict.randomForest));
print(i)
}
##plot between the OOBErr,TestErr and the MValues
par(mfrow=c(1,2))
plot(mValues, OOBErr, ann = FALSE, type = "l", ylab= "OOBErr", xlab= "mValues")
plot(mValues, testErr, ann = FALSE, type = "l", ylab= "testErr", xlab = "mValues")
| /StatisticalDataMining R codes/Assignment5/Question1.R | no_license | freyagenesis/Fall2018-codes | R | false | false | 2,054 | r | #Question 1###########################
rm(list=ls());
install.packages("ElemStatLearn");
install.packages("class");
install.packages("glmnet");
install.packages("pls");
install.packages("leaps");
install.packages("randomForest")
install.packages("Metrics");
library(ElemStatLearn);
library(class);
library(glmnet);
library(pls);
library(leaps);
library(randomForest);
library(Metrics)
data("spam");
head(spam);
##As the data are not spread correctly we will shuffle the set completely
spam<-spam[sample(nrow(spam)),];
##Now dividing the dataset into Training and test Data:
set.seed(1);
vars=c("A.1","A.2","A.3","A.4","A.5","A.6","A.7","A.8","A.9","A.10","A.11","A.12","A.13","A.14","A.15","A.16","A.17","A.18","A.19","A.20","A.21","A.22","A.23","A.24","A.25","A.26","A.27","A.28","A.29","A.30","A.31","A.32","A.33","A.34","A.35","A.36","A.37","A.38","A.39","A.40","A.41","A.42","A.43","A.44","A.45","A.46","A.47","A.48","A.49","A.50","A.51","A.52","A.53","A.54","A.55","A.56","A.57","spam");
idx=sample(x=nrow(spam), size=0.55*nrow(spam))
train=spam[idx,vars]
test=spam[-idx,vars]
#Random Forest
spam.random=randomForest(train$spam~.,data=train,mtry=57,importance=TRUE)
spam.random
predicSpam = predict(spam.random,newdata=test)
summary(predicSpam)
summary(test$spam)
mValues<-1:57
OOBErr<-numeric(length = length(mValues))
testErr<-numeric(length = length(mValues))
#Now applying the Bagging concept for application of all the predictors at each split.
#Bagging
for (i in seq_along(mValues)) {
spam_predvals_random=randomForest(train$spam~.,data=train,mtry=i,importance=TRUE)
spam_predvals_random
predict.randomForest = predict(spam_predvals_random,newdata=test)
OOBErr[i]<-mean(spam_predvals_random$err.rate);
testErr[i]<-rmse(summary(test$spam),summary(predict.randomForest));
print(i)
}
##plot between the OOBErr,TestErr and the MValues
par(mfrow=c(1,2))
plot(mValues, OOBErr, ann = FALSE, type = "l", ylab= "OOBErr", xlab= "mValues")
plot(mValues, testErr, ann = FALSE, type = "l", ylab= "testErr", xlab = "mValues")
|
library(Seurat)
library(Matrix)
library(ggplot2)
library(gplots)
library('fgsea')
proper<-function(x) paste0(toupper(substr(x, 1, 1)), tolower(substring(x, 2)))
# Figure 2 -- Single-cell mass/MAR plot -----------------------------------
fl5.growth1 = read.table("coefs_fl5_serial.txt", sep = "\t",header = FALSE)
plot.idx1 = which(fl5.growth1[,5]<0.75 & fl5.growth1[,8]>10 & fl5.growth1[,2]<85)
plot(fl5.growth1[plot.idx1,2],fl5.growth1[plot.idx1,3], ylim = c(0,8), xlim = c(30,85), pch =21, bg = rgb(0,0,1,0.65), xlab = "Mass (pg)", ylab = "MAR (pg/h)", cex = 1.5,cex.axis=2.2,cex.lab=2.2)
# Setup Seurat object -----------------------------------------------------
## READ IN RAW DATA AND META DATA
raw.data = read.table("fl5_serial_rsem3.txt",sep = "\t", header = TRUE, row.names=1)
raw.data = log(raw.data+1)
meta = read.table("qc_fl5_serial3.txt",sep = "\t", header = TRUE, row.names = 1)
## SET UP SEURAT OBJECT
fl5s = CreateSeuratObject(raw.data=raw.data,project = "fl5s",min.cells = 13,names.field=1,min.genes=4000,is.expr=0,meta.data=meta, do.scale=FALSE)
#fl5s = NormalizeData(object= fl5s, normalization.method="LogNormalize")
fl5s = FindVariableGenes(object = fl5s,mean.function = ExpMean,dispersion.function=LogVMR, x.low.cutoff=0.5,y.cutoff=0.5,do.plot=FALSE)
fl5s = ScaleData(object=fl5s)
# PARE DOWN TO CELLS WITH HIGH QUALITY GROWTH MEASUREMENTS
fl5s.phys1 = which(fl5s@meta.data$mass>-100 & fl5s@meta.data$mass<80 & fl5s@meta.data$day ==1)
# Update cell identities
fl5s@ident <- factor(levels = c(levels(fl5s@ident), 'FL51'))
fl5s@ident[which(fl5s@ident =='FL51a')] <- 'FL51'
fl5s@ident[which(fl5s@ident =='FL51b')] <- 'FL51'
# Supplementary tables 1 & 2 ----------------------------------------------
go.set = gmtPathways('c5.all.v6.1.symbols.gmt')
# Mass -- Spearman
mass.corr.spear = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$mass[fl5s.phys1],method='spearman'))
mass.corr.spear[is.na(mass.corr.spear)]=0
mass.corr.spear.rank = sort(mass.corr.spear, decreasing=TRUE,index.return=TRUE)$ix
mass.ranked.list = mass.corr.spear[mass.corr.spear.rank]
names(mass.ranked.list) = toupper(names(mass.corr.spear)[mass.corr.spear.rank])
mass.ranked.enrichment = fgsea(go.set,mass.ranked.list,nperm=15000,maxSize = 500)
mass.ranked.enrichment$NES[is.na(mass.ranked.enrichment$NES)]=0
mass.ranked.enrichment <- mass.ranked.enrichment[which(mass.ranked.enrichment$padj<0.1)]
# Normalized growth rate
fl5s@meta.data$norm = fl5s@meta.data$mar/fl5s@meta.data$mass
# norm -- Spearman
norm.corr.spear = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$norm[fl5s.phys1],method='spearman'))
norm.corr.spear[is.na(norm.corr.spear)]=0
norm.corr.spear.rank = sort(norm.corr.spear, decreasing=TRUE,index.return=TRUE)$ix
norm.ranked.list = norm.corr.spear[norm.corr.spear.rank]
names(norm.ranked.list) = toupper(names(norm.corr.spear)[norm.corr.spear.rank])
norm.ranked.enrichment = fgsea(go.set,norm.ranked.list,nperm=15000,maxSize = 500)
norm.ranked.enrichment$NES[is.na(norm.ranked.enrichment$NES)]=0
norm.ranked.enrichment <- norm.ranked.enrichment[norm.ranked.enrichment$padj<0.1]
# Supplementary figure 4 & 5 ----------------------------------------------
# Null Distribution for mass
fl5.mass.null = sapply(1:10, function(x) sample(fl5s.phys1,length(fl5s.phys1),replace=FALSE))
fl5.mass.corr.null = sapply(1:10,function(x) (apply(fl5s@data[,fl5s.phys1],1,function(y) cor(y,fl5s@meta.data$mass[fl5.mass.null[,x]],method='spearman'))))
fl5.mass.corr.null[is.na(fl5.mass.corr.null)]=0
fl5.mass.null.mean = mean(colMeans(fl5.mass.corr.null))
fl5.mass.null.sd = mean(apply(fl5.mass.corr.null,2,function(x) sd(x)))
cols<- c("blue","red")[(abs(mass.corr.spear[mass.corr.spear.rank])>2*fl5.mass.null.sd) + 1]
barplot(mass.corr.spear[mass.corr.spear.rank], col=cols,border=NA,ylim = c(-0.75,.75),xaxt='n', main = c('fl5 mass',as.character(length(which(mass.corr.spear>2*fl5.mass.null.sd))),as.character(length(which(mass.corr.spear< -2*fl5.mass.null.sd)))))
box()
abline(h = 2*fl5.mass.null.sd,lwd=2,col=1,lty=2)
abline(h = -2*fl5.mass.null.sd,lwd=2,col=1,lty=2)
abline(h=0,lwd=1)
# Plot Spearman v. Pearson for fl5 mass correlations
mass.corr.pears = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$mass[fl5s.phys1]))
plot(mass.corr.spear, mass.corr.pears, pch = 21, bg = rgb(0,0,1,0.3), xlab = 'Spearman Coefficient', ylab = 'Pearson Coefficeint', xlim = c(-0.8,0.8), ylim = c(-0.8,.8), cex.lab =1.2, cex.axis = 1.2,cex=0.75,col=rgb(0,0,0,0))
title(c('fl5 Mass', as.character(cor(mass.corr.spear,mass.corr.pears))))
# Null Distribution for norm
fl5.norm.null = sapply(1:10, function(x) sample(fl5s.phys1,length(fl5s.phys1),replace=FALSE))
fl5.norm.corr.null = sapply(1:10,function(x) (apply(fl5s@data[,fl5s.phys1],1,function(y) cor(y,fl5s@meta.data$norm[fl5.norm.null[,x]],method='spearman'))))
fl5.norm.corr.null[is.na(fl5.norm.corr.null)]=0
fl5.norm.null.mean = mean(colMeans(fl5.norm.corr.null))
fl5.norm.null.sd = mean(apply(fl5.norm.corr.null,2,function(x) sd(x)))
cols<- c("blue","red")[(abs(norm.corr.spear[norm.corr.spear.rank])>2*fl5.norm.null.sd) + 1]
barplot(norm.corr.spear[norm.corr.spear.rank], col=cols,border=NA,ylim = c(-0.75,.75),xaxt='n', main = c('fl5 norm',as.character(length(which(norm.corr.spear>2*fl5.norm.null.sd))),as.character(length(which(norm.corr.spear< -2*fl5.norm.null.sd)))))
box()
abline(h = 2*fl5.norm.null.sd,lwd=2,col=1,lty=2)
abline(h = -2*fl5.norm.null.sd,lwd=2,col=1,lty=2)
abline(h=0,lwd=1)
# Plot Spearman v. Pearson for fl5 norm correlations
norm.corr.pears = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$norm[fl5s.phys1]))
plot(norm.corr.spear, norm.corr.pears, pch = 21, bg = rgb(0,0,1,0.3), xlab = 'Spearman Coefficient', ylab = 'Pearson Coefficeint', xlim = c(-0.8,0.8), ylim = c(-0.8,.8), cex.lab =1.2, cex.axis = 1.2,cex=0.75,col=rgb(0,0,0,0))
title(c('fl5 norm', as.character(cor(norm.corr.spear,norm.corr.pears))))
# Figure 2b -- Cell cycle heat map ----------------------------------------
# Mass ranked heat map
mass.corr.spear = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$mass[fl5s.phys1],method='spearman'))
mass.corr.spear[is.na(mass.corr.spear)]=0
mass.corr.spear.rank = sort(mass.corr.spear, decreasing=TRUE,index.return=TRUE)$ix
mass.ranked.list = mass.corr.spear[mass.corr.spear.rank]
# Find genes in chromosome segregation and DNA replication GO terms with significant correlation with mass
chr.list <- as.matrix(read.table('chromosome_segregation.txt', header = TRUE, sep ='\t'))
# Find those with significant positive correlation with gene expression based on null SD
chr.list.keep <- which(mass.ranked.list[(chr.list)]>2*0.1178759)
chr.list <- proper(chr.list[chr.list.keep])
# Now the negatively correlated
#dna.list <- mass.ranked.enrichment[mass.ranked.enrichment$pathway == "GO_DNA_REPLICATION"]$leadingEdge[[1]]
dna.list <- as.matrix(read.table('dna_replication.txt', header = TRUE, sep ='\t'))
dna.keep <- which(mass.ranked.list[(dna.list)]< -2*0.1178759)
dna.list <- proper(dna.list[dna.keep])
all.genes <- c(chr.list, dna.list)
# Order cells by mass
mass.sort = sort(fl5s@meta.data$mass[fl5s.phys1],decreasing=FALSE,index.return=TRUE)$ix
c1 <- colnames(fl5s@data)[fl5s.phys1][mass.sort]
DoHeatmap(fl5s, genes.use = all.genes, cells.use = c1, disp.min = -1.5, disp.max = 1.5, cex.col = 0)
# Bar plot of masses for heat map
barplot(fl5s@meta.data$mass[fl5s.phys1][mass.sort],space = 1,col=1, ylim = c(20,90))
# Supp. Fig. 7
# G1S scoring
norm.corr.spear = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$norm[fl5s.phys1],method='spearman'))
norm.corr.spear[is.na(norm.corr.spear)]=0
norm.corr.spear.rank = sort(norm.corr.spear, decreasing=TRUE,index.return=TRUE)$ix
norm.ranked.list = norm.corr.spear[norm.corr.spear.rank]
# Re-run null distribution
fl5.norm.null = sapply(1:10, function(x) sample(fl5s.phys1,length(fl5s.phys1),replace=FALSE))
fl5.norm.corr.null = sapply(1:10,function(x) (apply(fl5s@data[,fl5s.phys1],1,function(y) cor(y,fl5s@meta.data$norm[fl5.norm.null[,x]],method='spearman'))))
fl5.norm.corr.null[is.na(fl5.norm.corr.null)]=0
fl5.norm.null.mean = mean(colMeans(fl5.norm.corr.null))
fl5.norm.null.sd = mean(apply(fl5.norm.corr.null,2,function(x) sd(x)))
g1s.list <- as.matrix(read.table('g1_s_transition.txt', header = TRUE, sep = '\t'))
g1s.keep <- which(norm.ranked.list[(g1s.list)] > 2*fl5.norm.null.sd)
g1s.list <- g1s.list[g1s.keep]
g1s.score <- colMeans(as.matrix(fl5s@scale.data)[g1s.list, fl5s.phys1])
g1s.score <- MinMax(g1s.score, -1.0,1.0)
colfunc = colorRampPalette(c("blue","white","red"))
col.score = colfunc(length(g1s.score))[as.numeric(cut((g1s.score),breaks=length(g1s.score)))]
plot(fl5s@meta.data$mass[fl5s.phys1],fl5s@meta.data$norm[fl5s.phys1],pch=21,cex=1.5, xlab = "Mass (pg)", ylab = "MAR (pg/h)", bg=col.score, xlim = c(30,80), ylim = c(0,.12))
| /fl5_analysis.R | no_license | rjkimmer/linkedMeasurementAnalysis | R | false | false | 8,998 | r | library(Seurat)
library(Matrix)
library(ggplot2)
library(gplots)
library('fgsea')
proper<-function(x) paste0(toupper(substr(x, 1, 1)), tolower(substring(x, 2)))
# Figure 2 -- Single-cell mass/MAR plot -----------------------------------
fl5.growth1 = read.table("coefs_fl5_serial.txt", sep = "\t",header = FALSE)
plot.idx1 = which(fl5.growth1[,5]<0.75 & fl5.growth1[,8]>10 & fl5.growth1[,2]<85)
plot(fl5.growth1[plot.idx1,2],fl5.growth1[plot.idx1,3], ylim = c(0,8), xlim = c(30,85), pch =21, bg = rgb(0,0,1,0.65), xlab = "Mass (pg)", ylab = "MAR (pg/h)", cex = 1.5,cex.axis=2.2,cex.lab=2.2)
# Setup Seurat object -----------------------------------------------------
## READ IN RAW DATA AND META DATA
raw.data = read.table("fl5_serial_rsem3.txt",sep = "\t", header = TRUE, row.names=1)
raw.data = log(raw.data+1)
meta = read.table("qc_fl5_serial3.txt",sep = "\t", header = TRUE, row.names = 1)
## SET UP SEURAT OBJECT
fl5s = CreateSeuratObject(raw.data=raw.data,project = "fl5s",min.cells = 13,names.field=1,min.genes=4000,is.expr=0,meta.data=meta, do.scale=FALSE)
#fl5s = NormalizeData(object= fl5s, normalization.method="LogNormalize")
fl5s = FindVariableGenes(object = fl5s,mean.function = ExpMean,dispersion.function=LogVMR, x.low.cutoff=0.5,y.cutoff=0.5,do.plot=FALSE)
fl5s = ScaleData(object=fl5s)
# PARE DOWN TO CELLS WITH HIGH QUALITY GROWTH MEASUREMENTS
fl5s.phys1 = which(fl5s@meta.data$mass>-100 & fl5s@meta.data$mass<80 & fl5s@meta.data$day ==1)
# Update cell identities
fl5s@ident <- factor(levels = c(levels(fl5s@ident), 'FL51'))
fl5s@ident[which(fl5s@ident =='FL51a')] <- 'FL51'
fl5s@ident[which(fl5s@ident =='FL51b')] <- 'FL51'
# Supplementary tables 1 & 2 ----------------------------------------------
go.set = gmtPathways('c5.all.v6.1.symbols.gmt')
# Mass -- Spearman
mass.corr.spear = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$mass[fl5s.phys1],method='spearman'))
mass.corr.spear[is.na(mass.corr.spear)]=0
mass.corr.spear.rank = sort(mass.corr.spear, decreasing=TRUE,index.return=TRUE)$ix
mass.ranked.list = mass.corr.spear[mass.corr.spear.rank]
names(mass.ranked.list) = toupper(names(mass.corr.spear)[mass.corr.spear.rank])
mass.ranked.enrichment = fgsea(go.set,mass.ranked.list,nperm=15000,maxSize = 500)
mass.ranked.enrichment$NES[is.na(mass.ranked.enrichment$NES)]=0
mass.ranked.enrichment <- mass.ranked.enrichment[which(mass.ranked.enrichment$padj<0.1)]
# Normalized growth rate
fl5s@meta.data$norm = fl5s@meta.data$mar/fl5s@meta.data$mass
# norm -- Spearman
norm.corr.spear = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$norm[fl5s.phys1],method='spearman'))
norm.corr.spear[is.na(norm.corr.spear)]=0
norm.corr.spear.rank = sort(norm.corr.spear, decreasing=TRUE,index.return=TRUE)$ix
norm.ranked.list = norm.corr.spear[norm.corr.spear.rank]
names(norm.ranked.list) = toupper(names(norm.corr.spear)[norm.corr.spear.rank])
norm.ranked.enrichment = fgsea(go.set,norm.ranked.list,nperm=15000,maxSize = 500)
norm.ranked.enrichment$NES[is.na(norm.ranked.enrichment$NES)]=0
norm.ranked.enrichment <- norm.ranked.enrichment[norm.ranked.enrichment$padj<0.1]
# Supplementary figure 4 & 5 ----------------------------------------------
# Null Distribution for mass
fl5.mass.null = sapply(1:10, function(x) sample(fl5s.phys1,length(fl5s.phys1),replace=FALSE))
fl5.mass.corr.null = sapply(1:10,function(x) (apply(fl5s@data[,fl5s.phys1],1,function(y) cor(y,fl5s@meta.data$mass[fl5.mass.null[,x]],method='spearman'))))
fl5.mass.corr.null[is.na(fl5.mass.corr.null)]=0
fl5.mass.null.mean = mean(colMeans(fl5.mass.corr.null))
fl5.mass.null.sd = mean(apply(fl5.mass.corr.null,2,function(x) sd(x)))
cols<- c("blue","red")[(abs(mass.corr.spear[mass.corr.spear.rank])>2*fl5.mass.null.sd) + 1]
barplot(mass.corr.spear[mass.corr.spear.rank], col=cols,border=NA,ylim = c(-0.75,.75),xaxt='n', main = c('fl5 mass',as.character(length(which(mass.corr.spear>2*fl5.mass.null.sd))),as.character(length(which(mass.corr.spear< -2*fl5.mass.null.sd)))))
box()
abline(h = 2*fl5.mass.null.sd,lwd=2,col=1,lty=2)
abline(h = -2*fl5.mass.null.sd,lwd=2,col=1,lty=2)
abline(h=0,lwd=1)
# Plot Spearman v. Pearson for fl5 mass correlations
mass.corr.pears = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$mass[fl5s.phys1]))
plot(mass.corr.spear, mass.corr.pears, pch = 21, bg = rgb(0,0,1,0.3), xlab = 'Spearman Coefficient', ylab = 'Pearson Coefficeint', xlim = c(-0.8,0.8), ylim = c(-0.8,.8), cex.lab =1.2, cex.axis = 1.2,cex=0.75,col=rgb(0,0,0,0))
title(c('fl5 Mass', as.character(cor(mass.corr.spear,mass.corr.pears))))
# Null Distribution for norm
fl5.norm.null = sapply(1:10, function(x) sample(fl5s.phys1,length(fl5s.phys1),replace=FALSE))
fl5.norm.corr.null = sapply(1:10,function(x) (apply(fl5s@data[,fl5s.phys1],1,function(y) cor(y,fl5s@meta.data$norm[fl5.norm.null[,x]],method='spearman'))))
fl5.norm.corr.null[is.na(fl5.norm.corr.null)]=0
fl5.norm.null.mean = mean(colMeans(fl5.norm.corr.null))
fl5.norm.null.sd = mean(apply(fl5.norm.corr.null,2,function(x) sd(x)))
cols<- c("blue","red")[(abs(norm.corr.spear[norm.corr.spear.rank])>2*fl5.norm.null.sd) + 1]
barplot(norm.corr.spear[norm.corr.spear.rank], col=cols,border=NA,ylim = c(-0.75,.75),xaxt='n', main = c('fl5 norm',as.character(length(which(norm.corr.spear>2*fl5.norm.null.sd))),as.character(length(which(norm.corr.spear< -2*fl5.norm.null.sd)))))
box()
abline(h = 2*fl5.norm.null.sd,lwd=2,col=1,lty=2)
abline(h = -2*fl5.norm.null.sd,lwd=2,col=1,lty=2)
abline(h=0,lwd=1)
# Plot Spearman v. Pearson for fl5 norm correlations
norm.corr.pears = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$norm[fl5s.phys1]))
plot(norm.corr.spear, norm.corr.pears, pch = 21, bg = rgb(0,0,1,0.3), xlab = 'Spearman Coefficient', ylab = 'Pearson Coefficeint', xlim = c(-0.8,0.8), ylim = c(-0.8,.8), cex.lab =1.2, cex.axis = 1.2,cex=0.75,col=rgb(0,0,0,0))
title(c('fl5 norm', as.character(cor(norm.corr.spear,norm.corr.pears))))
# Figure 2b -- Cell cycle heat map ----------------------------------------
# Mass ranked heat map
mass.corr.spear = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$mass[fl5s.phys1],method='spearman'))
mass.corr.spear[is.na(mass.corr.spear)]=0
mass.corr.spear.rank = sort(mass.corr.spear, decreasing=TRUE,index.return=TRUE)$ix
mass.ranked.list = mass.corr.spear[mass.corr.spear.rank]
# Find genes in chromosome segregation and DNA replication GO terms with significant correlation with mass
chr.list <- as.matrix(read.table('chromosome_segregation.txt', header = TRUE, sep ='\t'))
# Find those with significant positive correlation with gene expression based on null SD
chr.list.keep <- which(mass.ranked.list[(chr.list)]>2*0.1178759)
chr.list <- proper(chr.list[chr.list.keep])
# Now the negatively correlated
#dna.list <- mass.ranked.enrichment[mass.ranked.enrichment$pathway == "GO_DNA_REPLICATION"]$leadingEdge[[1]]
dna.list <- as.matrix(read.table('dna_replication.txt', header = TRUE, sep ='\t'))
dna.keep <- which(mass.ranked.list[(dna.list)]< -2*0.1178759)
dna.list <- proper(dna.list[dna.keep])
all.genes <- c(chr.list, dna.list)
# Order cells by mass
mass.sort = sort(fl5s@meta.data$mass[fl5s.phys1],decreasing=FALSE,index.return=TRUE)$ix
c1 <- colnames(fl5s@data)[fl5s.phys1][mass.sort]
DoHeatmap(fl5s, genes.use = all.genes, cells.use = c1, disp.min = -1.5, disp.max = 1.5, cex.col = 0)
# Bar plot of masses for heat map
barplot(fl5s@meta.data$mass[fl5s.phys1][mass.sort],space = 1,col=1, ylim = c(20,90))
# Supp. Fig. 7
# G1S scoring
norm.corr.spear = apply(t(as.matrix(fl5s@data[,fl5s.phys1])),2,function(x) cor(x,fl5s@meta.data$norm[fl5s.phys1],method='spearman'))
norm.corr.spear[is.na(norm.corr.spear)]=0
norm.corr.spear.rank = sort(norm.corr.spear, decreasing=TRUE,index.return=TRUE)$ix
norm.ranked.list = norm.corr.spear[norm.corr.spear.rank]
# Re-run null distribution
fl5.norm.null = sapply(1:10, function(x) sample(fl5s.phys1,length(fl5s.phys1),replace=FALSE))
fl5.norm.corr.null = sapply(1:10,function(x) (apply(fl5s@data[,fl5s.phys1],1,function(y) cor(y,fl5s@meta.data$norm[fl5.norm.null[,x]],method='spearman'))))
fl5.norm.corr.null[is.na(fl5.norm.corr.null)]=0
fl5.norm.null.mean = mean(colMeans(fl5.norm.corr.null))
fl5.norm.null.sd = mean(apply(fl5.norm.corr.null,2,function(x) sd(x)))
g1s.list <- as.matrix(read.table('g1_s_transition.txt', header = TRUE, sep = '\t'))
g1s.keep <- which(norm.ranked.list[(g1s.list)] > 2*fl5.norm.null.sd)
g1s.list <- g1s.list[g1s.keep]
g1s.score <- colMeans(as.matrix(fl5s@scale.data)[g1s.list, fl5s.phys1])
g1s.score <- MinMax(g1s.score, -1.0,1.0)
colfunc = colorRampPalette(c("blue","white","red"))
col.score = colfunc(length(g1s.score))[as.numeric(cut((g1s.score),breaks=length(g1s.score)))]
plot(fl5s@meta.data$mass[fl5s.phys1],fl5s@meta.data$norm[fl5s.phys1],pch=21,cex=1.5, xlab = "Mass (pg)", ylab = "MAR (pg/h)", bg=col.score, xlim = c(30,80), ylim = c(0,.12))
|
searchResultsSummaryTable <-
function(aSearchResults) {
K <- length(aSearchResults)
seedGenes <- aSearchResults[[1]]$signatureName
for(k in 2:K)
seedGenes <- c(seedGenes, aSearchResults[[k]]$signatureName)
tableOfSignatures <- matrix("", ncol = 5, nrow = K)
rownames(tableOfSignatures) <- seedGenes
colnames(tableOfSignatures) <- c("length", "tValue", "log(pValue)", "tValue improvement", "signature")
for (k in 1:K)
tableOfSignatures[k, ] <- c(length(aSearchResults[[k]]$signature),
round(aSearchResults[[k]]$tValue, 3),
round(log(aSearchResults[[k]]$pValue)/log(10), 3),
round((1 - aSearchResults[[k]]$startingTValue/aSearchResults[[k]]$tValue) * 100, 2),
paste(aSearchResults[[k]]$signature, collapse = ", "))
tableOfSignatures <- as.data.frame(tableOfSignatures)
return(tableOfSignatures)
}
| /geneSignatureFinder/R/searchResultsSummaryTable.R | no_license | ingted/R-Examples | R | false | false | 954 | r | searchResultsSummaryTable <-
function(aSearchResults) {
K <- length(aSearchResults)
seedGenes <- aSearchResults[[1]]$signatureName
for(k in 2:K)
seedGenes <- c(seedGenes, aSearchResults[[k]]$signatureName)
tableOfSignatures <- matrix("", ncol = 5, nrow = K)
rownames(tableOfSignatures) <- seedGenes
colnames(tableOfSignatures) <- c("length", "tValue", "log(pValue)", "tValue improvement", "signature")
for (k in 1:K)
tableOfSignatures[k, ] <- c(length(aSearchResults[[k]]$signature),
round(aSearchResults[[k]]$tValue, 3),
round(log(aSearchResults[[k]]$pValue)/log(10), 3),
round((1 - aSearchResults[[k]]$startingTValue/aSearchResults[[k]]$tValue) * 100, 2),
paste(aSearchResults[[k]]$signature, collapse = ", "))
tableOfSignatures <- as.data.frame(tableOfSignatures)
return(tableOfSignatures)
}
|
rm(list=ls())
library(FactoMineR)
#Read the data
AllWineData <- read.table("/media/walnut/41B4CE9B32C4BFA1/UNI/MVA/Project/MVA_Practical_Work/AllWineDataPreProcessed.csv", header=TRUE, sep=";")
indnames <- rownames(AllWineData)
varnames <- colnames(AllWineData)
#PCA
pca <-PCA(AllWineData, quali.sup = 13, quanti.sup = 12, scale = T) #Shouldn't type and quality be supplementary?
#HCPC clustering
AllWineData.hcpc <- HCPC(pca, nb.clust=-1, consol=T)
#paragons
AllWineData.hcpc$desc.ind$para #Useless
#Profiling
cut <- AllWineData.hcpc$data.clust$clust
catdes <- catdes(cbind(as.factor(cut),AllWineData),1, proba = 0.0005)
#
catdes$category #red or white
catdes$quanti #rest of influenciable variables | /Clustering.R | no_license | Alaakc95/MVA_Practical_Work | R | false | false | 707 | r | rm(list=ls())
library(FactoMineR)
#Read the data
AllWineData <- read.table("/media/walnut/41B4CE9B32C4BFA1/UNI/MVA/Project/MVA_Practical_Work/AllWineDataPreProcessed.csv", header=TRUE, sep=";")
indnames <- rownames(AllWineData)
varnames <- colnames(AllWineData)
#PCA
pca <-PCA(AllWineData, quali.sup = 13, quanti.sup = 12, scale = T) #Shouldn't type and quality be supplementary?
#HCPC clustering
AllWineData.hcpc <- HCPC(pca, nb.clust=-1, consol=T)
#paragons
AllWineData.hcpc$desc.ind$para #Useless
#Profiling
cut <- AllWineData.hcpc$data.clust$clust
catdes <- catdes(cbind(as.factor(cut),AllWineData),1, proba = 0.0005)
#
catdes$category #red or white
catdes$quanti #rest of influenciable variables |
#install.packages("rstan", repo="http://cran.uni-muenster.de/")
library(tidyverse)
library(rstan)
model_string <-"
data {
int<lower=1> K; // number of mixture components
int<lower=1> N; // number of data points
int<lower=1> M; //the number of columns in the model matrix
matrix[N,M] X; //the model matrix
int y[N]; // observations
real<lower=0> alpha0 ; // dirichlet prior
}
transformed data {
vector<lower=0>[K] alpha0_vec = rep_vector(alpha0, K); //symmetric dirichlet prior
}
parameters {
positive_ordered[K] gamma; // primitives of mixing proportions
vector[M] betas[K];
real<lower=0> sigma[K]; // scales of mixture components
}
transformed parameters {
vector[K] theta = gamma / sum(gamma);
}
model {
gamma ~ gamma(alpha0_vec, 1); // implies: theta ~ dirichlet(alpha_vec)
sigma ~ lognormal(0, 2);
for (k in 1:K)
betas[k,] ~ normal(0,10);
for (n in 1:N) {
real lps[K];
for (k in 1:K)
lps[k] = log(theta[k]) + neg_binomial_2_lpmf(y[n] | exp(X[n] * betas[k]),
sigma[k]);
target += log_sum_exp(lps);
}
}"
set.seed(41)
# heavy overlap
betas.hvy <- list(c(0, .5, -.5),
c(1, 0, 0),
c(1.7, -.5, .5),
c(2, 0, 0),
c(2.3, .5, -.5))
sizes.hvy <- seq(1,50,l=5)
# medium overlap
betas.med <- list(c(0, .5, -.5),
c(1.8, 0, 0),
c(2.4, -.5, .5),
c(2.7, 0, 0),
c(3, .5, -.5))
sizes.med <- seq(1,100,l=5)
# low overlap
betas.low <- list(c(0, .5, -.5),
c(3.2, 0, 0),
c(3.9, -.5, .5),
c(4.3, 0, 0),
c(4.6, .5, -.5))
sizes.low <- seq(1,200,l=5)
N <- c(3,5,3,4,2)*1000
X <- list(data.frame(V=rep(1,N[1]),
V1=rnorm(N[1], 0, .5),
V2=rnorm(N[1], 0, .5)),
data.frame(V=rep(1,N[2]),
V1=rnorm(N[2], 0, .5),
V2=rnorm(N[2], 0, .5)),
data.frame(V=rep(1,N[3]),
V1=rnorm(N[3], 0, .5),
V2=rnorm(N[3], 0, .5)),
data.frame(V=rep(1,N[4]),
V1=rnorm(N[4], 0, .5),
V2=rnorm(N[4], 0, .5)),
data.frame(V=rep(1,N[5]),
V1=rnorm(N[5], 0, .5),
V2=rnorm(N[5], 0, .5)))
mus.hvy <- mapply(function(x,y) exp(x%*% y),
x=lapply(X,as.matrix),y=betas.hvy)
mus.med <- mapply(function(x,y) exp(x%*% y),
x=lapply(X,as.matrix),y=betas.med)
mus.low <- mapply(function(x,y) exp(x%*% y),
x=lapply(X,as.matrix),y=betas.low)
y.hvy <- mapply(rnbinom,n=N,size=sizes.hvy,mu=mus.hvy)
y.med <- mapply(rnbinom,n=N,size=sizes.med,mu=mus.med)
y.low <- mapply(rnbinom,n=N,size=sizes.low,mu=mus.low)
df.hvy <- tbl_df(data.frame(y = unlist(y.hvy),
comp = rep(1:5,N),
do.call("rbind",X))) %>%
mutate(comp=factor(comp))
df.med <- tbl_df(data.frame(y = unlist(y.med),
comp = rep(1:5,N),
do.call("rbind",X))) %>%
mutate(comp=factor(comp))
df.low <- tbl_df(data.frame(y = unlist(y.low),
comp = rep(1:5,N),
do.call("rbind",X))) %>%
mutate(comp=factor(comp))
# 3 comp med
X <- df.med %>% filter(comp%in%c(1,2,5)) %>% select(V1,V2) %>% cbind(rep(1,
nrow(filter(df.med,comp%in%c(1,2,5)))), .)
y <- df.med %>% filter(comp%in%c(1,2,5)) %>% select(y)
m3m <- stan(model_code = model_string, data = list(X=as.matrix(X), M=ncol(X),
K=6, y = pull(y), N = nrow(X), iter=2000, warmup=1000,
alpha0=0.1), chains=3,
cores=3,
control=list(adapt_delta=0.9))
m3m
save.image(file="/home/igm/christoph.kurz/R/dpmix/simstudyc3m.Rdata")
| /simstudyc3m.R | no_license | krz/dp-nb | R | false | false | 3,839 | r | #install.packages("rstan", repo="http://cran.uni-muenster.de/")
library(tidyverse)
library(rstan)
model_string <-"
data {
int<lower=1> K; // number of mixture components
int<lower=1> N; // number of data points
int<lower=1> M; //the number of columns in the model matrix
matrix[N,M] X; //the model matrix
int y[N]; // observations
real<lower=0> alpha0 ; // dirichlet prior
}
transformed data {
vector<lower=0>[K] alpha0_vec = rep_vector(alpha0, K); //symmetric dirichlet prior
}
parameters {
positive_ordered[K] gamma; // primitives of mixing proportions
vector[M] betas[K];
real<lower=0> sigma[K]; // scales of mixture components
}
transformed parameters {
vector[K] theta = gamma / sum(gamma);
}
model {
gamma ~ gamma(alpha0_vec, 1); // implies: theta ~ dirichlet(alpha_vec)
sigma ~ lognormal(0, 2);
for (k in 1:K)
betas[k,] ~ normal(0,10);
for (n in 1:N) {
real lps[K];
for (k in 1:K)
lps[k] = log(theta[k]) + neg_binomial_2_lpmf(y[n] | exp(X[n] * betas[k]),
sigma[k]);
target += log_sum_exp(lps);
}
}"
set.seed(41)
# heavy overlap
betas.hvy <- list(c(0, .5, -.5),
c(1, 0, 0),
c(1.7, -.5, .5),
c(2, 0, 0),
c(2.3, .5, -.5))
sizes.hvy <- seq(1,50,l=5)
# medium overlap
betas.med <- list(c(0, .5, -.5),
c(1.8, 0, 0),
c(2.4, -.5, .5),
c(2.7, 0, 0),
c(3, .5, -.5))
sizes.med <- seq(1,100,l=5)
# low overlap
betas.low <- list(c(0, .5, -.5),
c(3.2, 0, 0),
c(3.9, -.5, .5),
c(4.3, 0, 0),
c(4.6, .5, -.5))
sizes.low <- seq(1,200,l=5)
N <- c(3,5,3,4,2)*1000
X <- list(data.frame(V=rep(1,N[1]),
V1=rnorm(N[1], 0, .5),
V2=rnorm(N[1], 0, .5)),
data.frame(V=rep(1,N[2]),
V1=rnorm(N[2], 0, .5),
V2=rnorm(N[2], 0, .5)),
data.frame(V=rep(1,N[3]),
V1=rnorm(N[3], 0, .5),
V2=rnorm(N[3], 0, .5)),
data.frame(V=rep(1,N[4]),
V1=rnorm(N[4], 0, .5),
V2=rnorm(N[4], 0, .5)),
data.frame(V=rep(1,N[5]),
V1=rnorm(N[5], 0, .5),
V2=rnorm(N[5], 0, .5)))
mus.hvy <- mapply(function(x,y) exp(x%*% y),
x=lapply(X,as.matrix),y=betas.hvy)
mus.med <- mapply(function(x,y) exp(x%*% y),
x=lapply(X,as.matrix),y=betas.med)
mus.low <- mapply(function(x,y) exp(x%*% y),
x=lapply(X,as.matrix),y=betas.low)
y.hvy <- mapply(rnbinom,n=N,size=sizes.hvy,mu=mus.hvy)
y.med <- mapply(rnbinom,n=N,size=sizes.med,mu=mus.med)
y.low <- mapply(rnbinom,n=N,size=sizes.low,mu=mus.low)
df.hvy <- tbl_df(data.frame(y = unlist(y.hvy),
comp = rep(1:5,N),
do.call("rbind",X))) %>%
mutate(comp=factor(comp))
df.med <- tbl_df(data.frame(y = unlist(y.med),
comp = rep(1:5,N),
do.call("rbind",X))) %>%
mutate(comp=factor(comp))
df.low <- tbl_df(data.frame(y = unlist(y.low),
comp = rep(1:5,N),
do.call("rbind",X))) %>%
mutate(comp=factor(comp))
# 3 comp med
X <- df.med %>% filter(comp%in%c(1,2,5)) %>% select(V1,V2) %>% cbind(rep(1,
nrow(filter(df.med,comp%in%c(1,2,5)))), .)
y <- df.med %>% filter(comp%in%c(1,2,5)) %>% select(y)
m3m <- stan(model_code = model_string, data = list(X=as.matrix(X), M=ncol(X),
K=6, y = pull(y), N = nrow(X), iter=2000, warmup=1000,
alpha0=0.1), chains=3,
cores=3,
control=list(adapt_delta=0.9))
m3m
save.image(file="/home/igm/christoph.kurz/R/dpmix/simstudyc3m.Rdata")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_plsr.r
\name{plot_pls_args}
\alias{plot_pls_args}
\title{Plot PLSR - Arguments}
\arguments{
\item{do.pls}{Logical. If used in a plotting function, if analysis from PLSR
models should be plotted.}
\item{pls.colorBy}{NULL or character. What class-variable should be
used for coloring in the RMSEC and RMSECV plots. Set to NULL for no coloring,
or provide a character length one with a single column name of a class
variable that should be used for coloring.}
\item{pls.what}{What types of plsr analysis to plot. Possible values are
'both', 'errors', 'regression'.}
\item{pls.rdp}{Logical (TRUE or FALSE). If errors in the error plots should be
given in RDP or not.}
\item{pg.where}{Character length one. If left at the default 'def', the value
from the settings.r file is read in (parameter \code{gen_plot_pgWhereDefault}).
For plotting to PDFs provide "pdf", for plotting to graphics device provide
anything but "pdf".}
\item{pg.main}{Character length one. The additional text on the title of each
single plot.}
\item{pg.sub}{Character length one. The additional text on the subtitle of
each single plot.}
\item{pg.fns}{Character length one. The additional text in the filename of
the pdf.}
}
\description{
The following parameters can be used in the \code{...} argument
e.g. in function \code{\link{plot}} and \code{\link{plot_pls}} to override
the values in the analysis procedure file and so to modify the graphics -
see examples.
\describe{
\item{\code{plot(cube, ...)}}{ }
\item{ \code{plot_pls(cube, ...)}}{ }
}
}
\details{
For a list of all parameters that can be used in the \code{...}
argument in \code{\link{getap}} and in the \code{\link{plot}} functions
please see \code{\link{anproc_file}}.
}
\examples{
\dontrun{
dataset <- gfd()
cube <- gdmm(dataset)
plot(cube, do.pls=FALSE) # to plot everything availalbe except the plsr-models
plot(cube, pls.colorBy="C_Temp")
plot_pls(cube, pls.colorBy="C_Temp")
}
}
\seealso{
\code{\link{plot_pls}}
Other Plot arguments: \code{\link{plot,aquap_data,missing-method}},
\code{\link{plot_NNET_args}},
\code{\link{plot_SVM_args}}, \code{\link{plot_aqg_args}},
\code{\link{plot_discrimAnalysis_args}},
\code{\link{plot_pca_args}}, \code{\link{plot_pg_args}},
\code{\link{plot_randomForest_args}},
\code{\link{plot_sim_args}}
Other PLSR documentation: \code{\link{calc_pls_args}},
\code{\link{plot_pls,aquap_cube-method}},
\code{\link{plot_pls_indepPred}}
}
\concept{PLSR documentation}
\concept{Plot arguments}
| /man/plot_pls_args.Rd | no_license | joescharf/aquap2 | R | false | true | 2,582 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_plsr.r
\name{plot_pls_args}
\alias{plot_pls_args}
\title{Plot PLSR - Arguments}
\arguments{
\item{do.pls}{Logical. If used in a plotting function, if analysis from PLSR
models should be plotted.}
\item{pls.colorBy}{NULL or character. What class-variable should be
used for coloring in the RMSEC and RMSECV plots. Set to NULL for no coloring,
or provide a character length one with a single column name of a class
variable that should be used for coloring.}
\item{pls.what}{What types of plsr analysis to plot. Possible values are
'both', 'errors', 'regression'.}
\item{pls.rdp}{Logical (TRUE or FALSE). If errors in the error plots should be
given in RDP or not.}
\item{pg.where}{Character length one. If left at the default 'def', the value
from the settings.r file is read in (parameter \code{gen_plot_pgWhereDefault}).
For plotting to PDFs provide "pdf", for plotting to graphics device provide
anything but "pdf".}
\item{pg.main}{Character length one. The additional text on the title of each
single plot.}
\item{pg.sub}{Character length one. The additional text on the subtitle of
each single plot.}
\item{pg.fns}{Character length one. The additional text in the filename of
the pdf.}
}
\description{
The following parameters can be used in the \code{...} argument
e.g. in function \code{\link{plot}} and \code{\link{plot_pls}} to override
the values in the analysis procedure file and so to modify the graphics -
see examples.
\describe{
\item{\code{plot(cube, ...)}}{ }
\item{ \code{plot_pls(cube, ...)}}{ }
}
}
\details{
For a list of all parameters that can be used in the \code{...}
argument in \code{\link{getap}} and in the \code{\link{plot}} functions
please see \code{\link{anproc_file}}.
}
\examples{
\dontrun{
dataset <- gfd()
cube <- gdmm(dataset)
plot(cube, do.pls=FALSE) # to plot everything availalbe except the plsr-models
plot(cube, pls.colorBy="C_Temp")
plot_pls(cube, pls.colorBy="C_Temp")
}
}
\seealso{
\code{\link{plot_pls}}
Other Plot arguments: \code{\link{plot,aquap_data,missing-method}},
\code{\link{plot_NNET_args}},
\code{\link{plot_SVM_args}}, \code{\link{plot_aqg_args}},
\code{\link{plot_discrimAnalysis_args}},
\code{\link{plot_pca_args}}, \code{\link{plot_pg_args}},
\code{\link{plot_randomForest_args}},
\code{\link{plot_sim_args}}
Other PLSR documentation: \code{\link{calc_pls_args}},
\code{\link{plot_pls,aquap_cube-method}},
\code{\link{plot_pls_indepPred}}
}
\concept{PLSR documentation}
\concept{Plot arguments}
|
library(dplyr);library(survival);library(dglm);
setwd("/Users/michaelcopeland/Stapleton/Copeland/stapleton_lab/qPCR2vQTL/DGLMsimulation")
bred = read.csv("../fullvqtldata#3.csv")
bred = bred[-c(1,2),]
maincode =read.csv("../dglmmain.csv")
maincode =maincode[,-c(1,2)]
maincode = cbind.data.frame(bred[,c(1,2)],maincode)
meanpval1 = read.csv('meanpval.csv')
#total.mean = mean(maincode$stress)
name.sig.mean = c('stress','BreedType','bnl4.36', 'umc1979', 'AW244963', 'umc1677', 'bnlg1953', 'php06005', 'gta106b', 'ufg24', 'csu332', 'umc2042', 'sdg119',
'bnlg1879', 'gpm820d', 'gpm788b', 'gpm427', 'gpm413a', 'chr117d', 'umc2147')
name.sig.mean.nostress = c('bnl4.36', 'umc1979', 'AW244963', 'umc1677', 'bnlg1953', 'php06005', 'gta106b', 'ufg24', 'csu332', 'umc2042', 'sdg119',
'bnlg1879', 'gpm820d', 'gpm788b', 'gpm427', 'gpm413a', 'chr117d', 'umc2147')
maincode.sig.mean = maincode[,name.sig.mean]
meanpval1.sig.mean= meanpval1[,name.sig.mean.nostress]
inbred.mean.df = data.frame(matrix(ncol = 7, nrow = 0))
hybrid.mean.df = data.frame(matrix(ncol = 7, nrow = 0))
avg.inbred = mean(maincode$stress[maincode$BreedType == "Inbred"])
avg.hybrid = mean(maincode$stress[maincode$BreedType == "Hybrid"])
for (i in c(3:20)){
temp.B.inbred = maincode.sig.mean$stress[maincode.sig.mean[,i]==1 & maincode.sig.mean$BreedType == "Inbred"]
temp.A.inbred = maincode.sig.mean$stress[maincode.sig.mean[,i]==0 & maincode.sig.mean$BreedType == "Inbred"]
temp.B.sum.inbred = mean(temp.B.inbred)
temp.A.sum.inbred = mean(temp.A.inbred)
temp.bind.inbred = c(name.sig.mean[i],temp.B.sum.inbred,temp.A.sum.inbred,temp.B.sum.inbred-temp.A.sum.inbred,
meanpval1.sig.mean[1,i-2], temp.B.sum.inbred-avg.inbred,temp.A.sum.inbred-avg.inbred)
inbred.mean.df = rbind.data.frame(inbred.mean.df,temp.bind.inbred)
temp.B.hybrid = maincode.sig.mean$stress[maincode.sig.mean[,i]==1 & maincode.sig.mean$BreedType == "Hybrid"]
temp.A.hybrid = maincode.sig.mean$stress[maincode.sig.mean[,i]==0 & maincode.sig.mean$BreedType == "Hybrid"]
temp.B.sum.hybrid = mean(temp.B.hybrid)
temp.A.sum.hybrid = mean(temp.A.hybrid)
temp.bind.hybrid = c(name.sig.mean[i],temp.B.sum.hybrid,temp.A.sum.hybrid,temp.B.sum.hybrid-temp.A.sum.hybrid,
meanpval1.sig.mean[1,i-2], temp.B.sum.hybrid-avg.hybrid,temp.A.sum.hybrid-avg.hybrid)
hybrid.mean.df = rbind.data.frame(hybrid.mean.df,temp.bind.hybrid)
}
colnames(inbred.mean.df) = c('Gene','Mean B', 'Mean A', 'B-A', 'pvalue', 'B-avg','A-avg')
colnames(hybrid.mean.df) = c('Gene','Mean B', 'Mean A', 'B-A', 'pvalue', 'B-avg','A-avg')
inbred.mean.df$pvalue = as.numeric(inbred.mean.df$pvalue)
hybrid.mean.df$pvalue = as.numeric(hybrid.mean.df$pvalue)
# bnl4.36 -831.107307
# umc1979 -590.004884
# AW244963 -537.167724
# umc1677 -323.616296
# bnlg1953 -302.272604
# php06005 -282.648984
# gta106b -183.667146
# ufg24 -165.248458
# csu332 -144.304439
# X 1.000000
# umc2042 1.772549
# sdg119 165.720886
# bnlg1879 187.560272
# gpm820d 234.978914
# gpm788b 276.980917
# gpm427 329.913885
# gpm413a 549.351422
# chr117d 700.259355
# umc2147 840.670772
###############################################################################################
############################ VARIANCE #########################################################
###############################################################################################
###############################################################################################
varpval1 = read.csv('varpval.csv')
name.sig.var = c('stress','BreedType','umc110a', 'jpsb365a', 'gpm531b', 'IDP1681', 'AY103770', 'IDP481', 'AY109968', 'AW244963', 'umc1191', 'gpm876', 'umc1677',
'IDP3838', 'bnlg619', 'AY107200', 'umc2350', 'csu1171', 'npi608', 'umc2047', 'IDP2483')
name.sig.var.nostress = c('umc110a', 'jpsb365a', 'gpm531b', 'IDP1681', 'AY103770', 'IDP481', 'AY109968', 'AW244963', 'umc1191', 'gpm876', 'umc1677',
'IDP3838', 'bnlg619', 'AY107200', 'umc2350', 'csu1171', 'npi608', 'umc2047', 'IDP2483')
maincode.sig.var = maincode[,name.sig.var]
varpval1.sig.var= varpval1[,name.sig.var.nostress]
inbred.var.df = data.frame(matrix(ncol = 7, nrow = 0))
hybrid.var.df = data.frame(matrix(ncol = 7, nrow = 0))
avg.inbred = var(maincode$stress[maincode$BreedType == "Inbred"])
avg.hybrid = var(maincode$stress[maincode$BreedType == "Hybrid"])
for (i in c(3:21)){
temp.B.inbred = maincode.sig.var$stress[maincode.sig.var[,i]==1 & maincode.sig.var$BreedType == "Inbred"]
temp.A.inbred = maincode.sig.var$stress[maincode.sig.var[,i]==0 & maincode.sig.var$BreedType == "Inbred"]
temp.B.sum.inbred = var(temp.B.inbred)
temp.A.sum.inbred = var(temp.A.inbred)
temp.bind.inbred = c(name.sig.var[i],temp.B.sum.inbred,temp.A.sum.inbred,temp.B.sum.inbred-temp.A.sum.inbred,
varpval1.sig.var[1,i-2], temp.B.sum.inbred-avg.inbred,temp.A.sum.inbred-avg.inbred)
inbred.var.df = rbind.data.frame(inbred.var.df,temp.bind.inbred)
temp.B.hybrid = maincode.sig.var$stress[maincode.sig.var[,i]==1 & maincode.sig.var$BreedType == "Hybrid"]
temp.A.hybrid = maincode.sig.var$stress[maincode.sig.var[,i]==0 & maincode.sig.var$BreedType == "Hybrid"]
temp.B.sum.hybrid = var(temp.B.hybrid)
temp.A.sum.hybrid = var(temp.A.hybrid)
temp.bind.hybrid = c(name.sig.var[i],temp.B.sum.hybrid,temp.A.sum.hybrid,temp.B.sum.hybrid-temp.A.sum.hybrid,
varpval1.sig.var[1,i-2], temp.B.sum.hybrid-avg.hybrid,temp.A.sum.hybrid-avg.hybrid)
hybrid.var.df = rbind.data.frame(hybrid.var.df,temp.bind.hybrid)
}
colnames(inbred.var.df) = c('Gene','var B', 'var A', 'B-A', 'pvalue', 'B-avg','A-avg')
colnames(hybrid.var.df) = c('Gene','var B', 'var A', 'B-A', 'pvalue', 'B-avg','A-avg')
inbred.var.df$pvalue = as.numeric(inbred.var.df$pvalue)
hybrid.var.df$pvalue = as.numeric(hybrid.var.df$pvalue)
# umc110a -1.5519553
# jpsb365a -1.3980381
# gpm531b -1.2602921
# IDP1681 -0.9924411
# AY103770 -0.9365738
# IDP481 -0.8735649
# AY109968 -0.5442640
# AW244963 -0.4942477
# umc1191 0.1461766
# gpm876 0.5829842
# umc1677 0.5885424
# IDP3838 0.7015249
# bnlg619 0.7606361
# AY107200 0.8501561
# X 1.0000000
# umc2350 1.1356912
# csu1171 1.1885830
# npi608 1.2264972
# umc2047 1.2801697
# IDP2483 1.3488144
######################################################################################################################
###################################### individual hybrid and inbred analysis #########################################
######################################################################################################################
######################################################################################################################
meaneff1.inbred = read.csv('meaneff_inbred.csv')
meanpval1.inbred = read.csv('meanpval_inbred.csv')
vareff1.inbred = read.csv('vareff_inbred.csv')
varpval1.inbred = read.csv('varpval_inbred.csv')
meaneff1.hybrid = read.csv('meaneff_hybrid.csv')
meanpval1.hybrid = read.csv('meanpval_hybrid.csv')
vareff1.hybrid = read.csv('vareff_hybrid.csv')
varpval1.hybrid = read.csv('varpval_hybrid.csv')
#total.mean = mean(maincode$stress)
name.sig.mean.inbred = c('stress','BreedType','AY105205', 'IDP439', 'npi380', 'gpm267', 'IDP2401', 'IDP856', 'gpm922f', 'dmt102b', 'bnlg1131', 'umc2092', 'IDP342',
'IDP1986', 'IDP1468', 'npi352', 'ufg27', 'chr117d', 'gpm492', 'IDP41')
name.sig.mean.nostress.inbred = c('AY105205', 'IDP439', 'npi380', 'gpm267', 'IDP2401', 'IDP856', 'gpm922f', 'dmt102b', 'bnlg1131', 'umc2092', 'IDP342',
'IDP1986', 'IDP1468', 'npi352', 'ufg27', 'chr117d', 'gpm492', 'IDP41')
name.sig.mean.hybrid = c('stress','BreedType','jpsb527a', 'gpm493c', 'bnlg1811', 'gpm590', 'mmp144a', 'umc1920', 'umc1773')
name.sig.mean.nostress.hybrid = c('jpsb527a', 'gpm493c', 'bnlg1811', 'gpm590', 'mmp144a', 'umc1920', 'umc1773')
maincode.sig.mean.inbred = maincode[,name.sig.mean.inbred]
meanpval1.sig.mean.inbred = meanpval1.inbred[,name.sig.mean.nostress.inbred]
maincode.sig.mean.hybrid = maincode[,name.sig.mean.hybrid]
meanpval1.sig.mean.hybrid = meanpval1.hybrid[,name.sig.mean.nostress.hybrid]
inbred.mean.df2 = data.frame(matrix(ncol = 7, nrow = 0))
hybrid.mean.df2 = data.frame(matrix(ncol = 7, nrow = 0))
avg.inbred = mean(maincode$stress[maincode$BreedType == "Inbred"])
avg.hybrid = mean(maincode$stress[maincode$BreedType == "Hybrid"])
for (i in c(3:20)){
temp.B.inbred = maincode.sig.mean.inbred$stress[maincode.sig.mean.inbred[,i]==1 & maincode.sig.mean.inbred$BreedType == "Inbred"]
temp.A.inbred = maincode.sig.mean.inbred$stress[maincode.sig.mean.inbred[,i]==0 & maincode.sig.mean.inbred$BreedType == "Inbred"]
temp.B.sum.inbred = mean(temp.B.inbred)
temp.A.sum.inbred = mean(temp.A.inbred)
temp.bind.inbred = c(name.sig.mean[i],temp.B.sum.inbred,temp.A.sum.inbred,temp.B.sum.inbred-temp.A.sum.inbred,
meanpval1.sig.mean.inbred[1,i-2], temp.B.sum.inbred-avg.inbred,temp.A.sum.inbred-avg.inbred)
inbred.mean.df2 = rbind.data.frame(inbred.mean.df2,temp.bind.inbred)
}
for (i in c(3:9)){
temp.B.hybrid = maincode.sig.mean.hybrid$stress[maincode.sig.mean.hybrid[,i]==1 & maincode.sig.mean.hybrid$BreedType == "Hybrid"]
temp.A.hybrid = maincode.sig.mean.hybrid$stress[maincode.sig.mean.hybrid[,i]==0 & maincode.sig.mean.hybrid$BreedType == "Hybrid"]
temp.B.sum.hybrid = mean(temp.B.hybrid)
temp.A.sum.hybrid = mean(temp.A.hybrid)
temp.bind.hybrid = c(name.sig.mean[i],temp.B.sum.hybrid,temp.A.sum.hybrid,temp.B.sum.hybrid-temp.A.sum.hybrid,
meanpval1.sig.mean.hybrid[1,i-2], temp.B.sum.hybrid-avg.hybrid,temp.A.sum.hybrid-avg.hybrid)
hybrid.mean.df2 = rbind.data.frame(hybrid.mean.df2,temp.bind.hybrid)
}
colnames(inbred.mean.df2) = c('Gene','Mean B', 'Mean A', 'B-A', 'pvalue', 'B-avg','A-avg')
colnames(hybrid.mean.df2) = c('Gene','Mean B', 'Mean A', 'B-A', 'pvalue', 'B-avg','A-avg')
inbred.mean.df2$pvalue = as.numeric(inbred.mean.df2$pvalue)
hybrid.mean.df2$pvalue = as.numeric(hybrid.mean.df2$pvalue)
#INBRED
# AY105205 -1119.20258
# IDP439 -618.89584
# npi380 -524.52571
# gpm267 -511.65447
# IDP2401 -500.08301
# IDP856 -372.55134
# gpm922f -354.71843
# dmt102b -319.01115
# bnlg1131 -269.43307
# umc2092 -149.57773
# IDP342 -139.05553
# X 1.00000
# IDP1986 50.70157
# IDP1468 129.45225
# npi352 159.22654
# ufg27 213.68620
# gpm492 220.93535
# chr117d 740.63412
# IDP41 1066.43940
#HYBRID
# jpsb527a -10771.1193
# X 1.0000
# gpm493c 324.2485
# bnlg1811 336.8626
# gpm590 1013.7649
# mmp144a 1082.9003
# umc1920 4602.6150
# umc1773 6908.1372
###############################################################################################
############################# VARIANCE #######################################################
###############################################################################################
###############################################################################################
name.sig.var.inbred = c('stress','BreedType','nfd101b', 'ufg71', 'umc37a', 'IDP1980', 'gpm663b', 'ufg26', 'gpm258', 'bnlg1816', 'gpm219', 'IDP624', 'AY110389',
'gpm800b', 'gpm1', 'umc2061', 'isu041b', 'umc1073', 'IDP1949', 'haf101','gpm409a')
name.sig.var.nostress.inbred = c('nfd101b', 'ufg71', 'umc37a', 'IDP1980', 'gpm663b', 'ufg26', 'gpm258', 'bnlg1816', 'gpm219', 'IDP624', 'AY110389',
'gpm800b', 'gpm1', 'umc2061', 'isu041b', 'umc1073', 'IDP1949', 'haf101','gpm409a')
name.sig.var.hybrid = c('stress','BreedType','mmp47', 'mmp24', 'psr754a', 'gpm219', 'gpm359b', 'nnr2', 'mmp97','gpm588a','umc1155','umc1822')
name.sig.var.nostress.hybrid = c('mmp47', 'mmp24', 'psr754a', 'gpm219', 'gpm359b', 'nnr2', 'mmp97','gpm588a','umc1155','umc1822')
maincode.sig.var.inbred = maincode[,name.sig.var.inbred]
varpval1.sig.var.inbred = varpval1.inbred[,name.sig.var.nostress.inbred]
maincode.sig.var.hybrid = maincode[,name.sig.var.hybrid]
varpval1.sig.var.hybrid = varpval1.hybrid[,name.sig.var.nostress.hybrid]
inbred.var.df2 = data.frame(matrix(ncol = 7, nrow = 0))
hybrid.var.df2 = data.frame(matrix(ncol = 7, nrow = 0))
avg.inbred = var(maincode$stress[maincode$BreedType == "Inbred"])
avg.hybrid = var(maincode$stress[maincode$BreedType == "Hybrid"])
for (i in c(3:21)){
temp.B.inbred = maincode.sig.var.inbred$stress[maincode.sig.var.inbred[,i]==1 & maincode.sig.var.inbred$BreedType == "Inbred"]
temp.A.inbred = maincode.sig.var.inbred$stress[maincode.sig.var.inbred[,i]==0 & maincode.sig.var.inbred$BreedType == "Inbred"]
temp.B.sum.inbred = var(temp.B.inbred)
temp.A.sum.inbred = var(temp.A.inbred)
temp.bind.inbred = c(name.sig.var[i],temp.B.sum.inbred,temp.A.sum.inbred,temp.B.sum.inbred-temp.A.sum.inbred,
varpval1.sig.var.inbred[1,i-2], temp.B.sum.inbred-avg.inbred,temp.A.sum.inbred-avg.inbred)
inbred.var.df2 = rbind.data.frame(inbred.var.df2,temp.bind.inbred)
}
for (i in c(3:12)){
temp.B.hybrid = maincode.sig.var.hybrid$stress[maincode.sig.var.hybrid[,i]==1 & maincode.sig.var.hybrid$BreedType == "Hybrid"]
temp.A.hybrid = maincode.sig.var.hybrid$stress[maincode.sig.var.hybrid[,i]==0 & maincode.sig.var.hybrid$BreedType == "Hybrid"]
temp.B.sum.hybrid = var(temp.B.hybrid)
temp.A.sum.hybrid = var(temp.A.hybrid)
temp.bind.hybrid = c(name.sig.var[i],temp.B.sum.hybrid,temp.A.sum.hybrid,temp.B.sum.hybrid-temp.A.sum.hybrid,
varpval1.sig.var.hybrid[1,i-2], temp.B.sum.hybrid-avg.hybrid,temp.A.sum.hybrid-avg.hybrid)
hybrid.var.df2 = rbind.data.frame(hybrid.var.df2,temp.bind.hybrid)
}
colnames(inbred.var.df2) = c('Gene','var B', 'var A', 'B-A', 'pvalue', 'B-avg','A-avg')
colnames(hybrid.var.df2) = c('Gene','var B', 'var A', 'B-A', 'pvalue', 'B-avg','A-avg')
inbred.var.df2$pvalue = as.numeric(inbred.var.df2$pvalue)
hybrid.var.df2$pvalue = as.numeric(hybrid.var.df2$pvalue)
#INBRED
# nfd101b -3.6087189
# ufg71 -2.9185692
# umc37a -1.4183746
# IDP1980 -1.1295770
# gpm663b -1.0215143
# ufg26 -0.9796921
# gpm258 -0.9063691
# bnlg1816 -0.7948612
# gpm219 -0.5478515
# IDP624 -0.2216349
# AY110389 -0.2038563
# gpm800b 0.4433892
# gpm1 0.5156570
# umc2061 0.6927400
# isu041b 0.9745269
# X 1.0000000
# umc1073 1.1048187
# IDP1949 1.6310837
# haf101 1.8469959
# gpm409a 2.4090322
#HYBRID
# mmp47 -6.4240913
# mmp24 -4.7921365
# psr754a -4.3045082
# gpm219 -1.6748903
# gpm359b -0.8621597
# nnr2 -0.5032000
# X 1.0000000
# mmp97 1.1726435
# gpm588a 3.0308809
# umc1155 3.1360018
# umc1822 3.3245501
qt(-2.51, 6)
5
5+5
| /qPCR2vQTL/DGLMsimulation/DGLManalysis.R | no_license | MRCopeland74/stapleton_lab | R | false | false | 14,654 | r | library(dplyr);library(survival);library(dglm);
setwd("/Users/michaelcopeland/Stapleton/Copeland/stapleton_lab/qPCR2vQTL/DGLMsimulation")
bred = read.csv("../fullvqtldata#3.csv")
bred = bred[-c(1,2),]
maincode =read.csv("../dglmmain.csv")
maincode =maincode[,-c(1,2)]
maincode = cbind.data.frame(bred[,c(1,2)],maincode)
meanpval1 = read.csv('meanpval.csv')
#total.mean = mean(maincode$stress)
name.sig.mean = c('stress','BreedType','bnl4.36', 'umc1979', 'AW244963', 'umc1677', 'bnlg1953', 'php06005', 'gta106b', 'ufg24', 'csu332', 'umc2042', 'sdg119',
'bnlg1879', 'gpm820d', 'gpm788b', 'gpm427', 'gpm413a', 'chr117d', 'umc2147')
name.sig.mean.nostress = c('bnl4.36', 'umc1979', 'AW244963', 'umc1677', 'bnlg1953', 'php06005', 'gta106b', 'ufg24', 'csu332', 'umc2042', 'sdg119',
'bnlg1879', 'gpm820d', 'gpm788b', 'gpm427', 'gpm413a', 'chr117d', 'umc2147')
maincode.sig.mean = maincode[,name.sig.mean]
meanpval1.sig.mean= meanpval1[,name.sig.mean.nostress]
inbred.mean.df = data.frame(matrix(ncol = 7, nrow = 0))
hybrid.mean.df = data.frame(matrix(ncol = 7, nrow = 0))
avg.inbred = mean(maincode$stress[maincode$BreedType == "Inbred"])
avg.hybrid = mean(maincode$stress[maincode$BreedType == "Hybrid"])
for (i in c(3:20)){
temp.B.inbred = maincode.sig.mean$stress[maincode.sig.mean[,i]==1 & maincode.sig.mean$BreedType == "Inbred"]
temp.A.inbred = maincode.sig.mean$stress[maincode.sig.mean[,i]==0 & maincode.sig.mean$BreedType == "Inbred"]
temp.B.sum.inbred = mean(temp.B.inbred)
temp.A.sum.inbred = mean(temp.A.inbred)
temp.bind.inbred = c(name.sig.mean[i],temp.B.sum.inbred,temp.A.sum.inbred,temp.B.sum.inbred-temp.A.sum.inbred,
meanpval1.sig.mean[1,i-2], temp.B.sum.inbred-avg.inbred,temp.A.sum.inbred-avg.inbred)
inbred.mean.df = rbind.data.frame(inbred.mean.df,temp.bind.inbred)
temp.B.hybrid = maincode.sig.mean$stress[maincode.sig.mean[,i]==1 & maincode.sig.mean$BreedType == "Hybrid"]
temp.A.hybrid = maincode.sig.mean$stress[maincode.sig.mean[,i]==0 & maincode.sig.mean$BreedType == "Hybrid"]
temp.B.sum.hybrid = mean(temp.B.hybrid)
temp.A.sum.hybrid = mean(temp.A.hybrid)
temp.bind.hybrid = c(name.sig.mean[i],temp.B.sum.hybrid,temp.A.sum.hybrid,temp.B.sum.hybrid-temp.A.sum.hybrid,
meanpval1.sig.mean[1,i-2], temp.B.sum.hybrid-avg.hybrid,temp.A.sum.hybrid-avg.hybrid)
hybrid.mean.df = rbind.data.frame(hybrid.mean.df,temp.bind.hybrid)
}
colnames(inbred.mean.df) = c('Gene','Mean B', 'Mean A', 'B-A', 'pvalue', 'B-avg','A-avg')
colnames(hybrid.mean.df) = c('Gene','Mean B', 'Mean A', 'B-A', 'pvalue', 'B-avg','A-avg')
inbred.mean.df$pvalue = as.numeric(inbred.mean.df$pvalue)
hybrid.mean.df$pvalue = as.numeric(hybrid.mean.df$pvalue)
# bnl4.36 -831.107307
# umc1979 -590.004884
# AW244963 -537.167724
# umc1677 -323.616296
# bnlg1953 -302.272604
# php06005 -282.648984
# gta106b -183.667146
# ufg24 -165.248458
# csu332 -144.304439
# X 1.000000
# umc2042 1.772549
# sdg119 165.720886
# bnlg1879 187.560272
# gpm820d 234.978914
# gpm788b 276.980917
# gpm427 329.913885
# gpm413a 549.351422
# chr117d 700.259355
# umc2147 840.670772
###############################################################################################
############################ VARIANCE #########################################################
###############################################################################################
###############################################################################################
varpval1 = read.csv('varpval.csv')
name.sig.var = c('stress','BreedType','umc110a', 'jpsb365a', 'gpm531b', 'IDP1681', 'AY103770', 'IDP481', 'AY109968', 'AW244963', 'umc1191', 'gpm876', 'umc1677',
'IDP3838', 'bnlg619', 'AY107200', 'umc2350', 'csu1171', 'npi608', 'umc2047', 'IDP2483')
name.sig.var.nostress = c('umc110a', 'jpsb365a', 'gpm531b', 'IDP1681', 'AY103770', 'IDP481', 'AY109968', 'AW244963', 'umc1191', 'gpm876', 'umc1677',
'IDP3838', 'bnlg619', 'AY107200', 'umc2350', 'csu1171', 'npi608', 'umc2047', 'IDP2483')
maincode.sig.var = maincode[,name.sig.var]
varpval1.sig.var= varpval1[,name.sig.var.nostress]
inbred.var.df = data.frame(matrix(ncol = 7, nrow = 0))
hybrid.var.df = data.frame(matrix(ncol = 7, nrow = 0))
avg.inbred = var(maincode$stress[maincode$BreedType == "Inbred"])
avg.hybrid = var(maincode$stress[maincode$BreedType == "Hybrid"])
for (i in c(3:21)){
temp.B.inbred = maincode.sig.var$stress[maincode.sig.var[,i]==1 & maincode.sig.var$BreedType == "Inbred"]
temp.A.inbred = maincode.sig.var$stress[maincode.sig.var[,i]==0 & maincode.sig.var$BreedType == "Inbred"]
temp.B.sum.inbred = var(temp.B.inbred)
temp.A.sum.inbred = var(temp.A.inbred)
temp.bind.inbred = c(name.sig.var[i],temp.B.sum.inbred,temp.A.sum.inbred,temp.B.sum.inbred-temp.A.sum.inbred,
varpval1.sig.var[1,i-2], temp.B.sum.inbred-avg.inbred,temp.A.sum.inbred-avg.inbred)
inbred.var.df = rbind.data.frame(inbred.var.df,temp.bind.inbred)
temp.B.hybrid = maincode.sig.var$stress[maincode.sig.var[,i]==1 & maincode.sig.var$BreedType == "Hybrid"]
temp.A.hybrid = maincode.sig.var$stress[maincode.sig.var[,i]==0 & maincode.sig.var$BreedType == "Hybrid"]
temp.B.sum.hybrid = var(temp.B.hybrid)
temp.A.sum.hybrid = var(temp.A.hybrid)
temp.bind.hybrid = c(name.sig.var[i],temp.B.sum.hybrid,temp.A.sum.hybrid,temp.B.sum.hybrid-temp.A.sum.hybrid,
varpval1.sig.var[1,i-2], temp.B.sum.hybrid-avg.hybrid,temp.A.sum.hybrid-avg.hybrid)
hybrid.var.df = rbind.data.frame(hybrid.var.df,temp.bind.hybrid)
}
colnames(inbred.var.df) = c('Gene','var B', 'var A', 'B-A', 'pvalue', 'B-avg','A-avg')
colnames(hybrid.var.df) = c('Gene','var B', 'var A', 'B-A', 'pvalue', 'B-avg','A-avg')
inbred.var.df$pvalue = as.numeric(inbred.var.df$pvalue)
hybrid.var.df$pvalue = as.numeric(hybrid.var.df$pvalue)
# umc110a -1.5519553
# jpsb365a -1.3980381
# gpm531b -1.2602921
# IDP1681 -0.9924411
# AY103770 -0.9365738
# IDP481 -0.8735649
# AY109968 -0.5442640
# AW244963 -0.4942477
# umc1191 0.1461766
# gpm876 0.5829842
# umc1677 0.5885424
# IDP3838 0.7015249
# bnlg619 0.7606361
# AY107200 0.8501561
# X 1.0000000
# umc2350 1.1356912
# csu1171 1.1885830
# npi608 1.2264972
# umc2047 1.2801697
# IDP2483 1.3488144
######################################################################################################################
###################################### individual hybrid and inbred analysis #########################################
######################################################################################################################
######################################################################################################################
meaneff1.inbred = read.csv('meaneff_inbred.csv')
meanpval1.inbred = read.csv('meanpval_inbred.csv')
vareff1.inbred = read.csv('vareff_inbred.csv')
varpval1.inbred = read.csv('varpval_inbred.csv')
meaneff1.hybrid = read.csv('meaneff_hybrid.csv')
meanpval1.hybrid = read.csv('meanpval_hybrid.csv')
vareff1.hybrid = read.csv('vareff_hybrid.csv')
varpval1.hybrid = read.csv('varpval_hybrid.csv')
#total.mean = mean(maincode$stress)
name.sig.mean.inbred = c('stress','BreedType','AY105205', 'IDP439', 'npi380', 'gpm267', 'IDP2401', 'IDP856', 'gpm922f', 'dmt102b', 'bnlg1131', 'umc2092', 'IDP342',
'IDP1986', 'IDP1468', 'npi352', 'ufg27', 'chr117d', 'gpm492', 'IDP41')
name.sig.mean.nostress.inbred = c('AY105205', 'IDP439', 'npi380', 'gpm267', 'IDP2401', 'IDP856', 'gpm922f', 'dmt102b', 'bnlg1131', 'umc2092', 'IDP342',
'IDP1986', 'IDP1468', 'npi352', 'ufg27', 'chr117d', 'gpm492', 'IDP41')
name.sig.mean.hybrid = c('stress','BreedType','jpsb527a', 'gpm493c', 'bnlg1811', 'gpm590', 'mmp144a', 'umc1920', 'umc1773')
name.sig.mean.nostress.hybrid = c('jpsb527a', 'gpm493c', 'bnlg1811', 'gpm590', 'mmp144a', 'umc1920', 'umc1773')
maincode.sig.mean.inbred = maincode[,name.sig.mean.inbred]
meanpval1.sig.mean.inbred = meanpval1.inbred[,name.sig.mean.nostress.inbred]
maincode.sig.mean.hybrid = maincode[,name.sig.mean.hybrid]
meanpval1.sig.mean.hybrid = meanpval1.hybrid[,name.sig.mean.nostress.hybrid]
inbred.mean.df2 = data.frame(matrix(ncol = 7, nrow = 0))
hybrid.mean.df2 = data.frame(matrix(ncol = 7, nrow = 0))
avg.inbred = mean(maincode$stress[maincode$BreedType == "Inbred"])
avg.hybrid = mean(maincode$stress[maincode$BreedType == "Hybrid"])
for (i in c(3:20)){
temp.B.inbred = maincode.sig.mean.inbred$stress[maincode.sig.mean.inbred[,i]==1 & maincode.sig.mean.inbred$BreedType == "Inbred"]
temp.A.inbred = maincode.sig.mean.inbred$stress[maincode.sig.mean.inbred[,i]==0 & maincode.sig.mean.inbred$BreedType == "Inbred"]
temp.B.sum.inbred = mean(temp.B.inbred)
temp.A.sum.inbred = mean(temp.A.inbred)
temp.bind.inbred = c(name.sig.mean[i],temp.B.sum.inbred,temp.A.sum.inbred,temp.B.sum.inbred-temp.A.sum.inbred,
meanpval1.sig.mean.inbred[1,i-2], temp.B.sum.inbred-avg.inbred,temp.A.sum.inbred-avg.inbred)
inbred.mean.df2 = rbind.data.frame(inbred.mean.df2,temp.bind.inbred)
}
for (i in c(3:9)){
temp.B.hybrid = maincode.sig.mean.hybrid$stress[maincode.sig.mean.hybrid[,i]==1 & maincode.sig.mean.hybrid$BreedType == "Hybrid"]
temp.A.hybrid = maincode.sig.mean.hybrid$stress[maincode.sig.mean.hybrid[,i]==0 & maincode.sig.mean.hybrid$BreedType == "Hybrid"]
temp.B.sum.hybrid = mean(temp.B.hybrid)
temp.A.sum.hybrid = mean(temp.A.hybrid)
temp.bind.hybrid = c(name.sig.mean[i],temp.B.sum.hybrid,temp.A.sum.hybrid,temp.B.sum.hybrid-temp.A.sum.hybrid,
meanpval1.sig.mean.hybrid[1,i-2], temp.B.sum.hybrid-avg.hybrid,temp.A.sum.hybrid-avg.hybrid)
hybrid.mean.df2 = rbind.data.frame(hybrid.mean.df2,temp.bind.hybrid)
}
colnames(inbred.mean.df2) = c('Gene','Mean B', 'Mean A', 'B-A', 'pvalue', 'B-avg','A-avg')
colnames(hybrid.mean.df2) = c('Gene','Mean B', 'Mean A', 'B-A', 'pvalue', 'B-avg','A-avg')
inbred.mean.df2$pvalue = as.numeric(inbred.mean.df2$pvalue)
hybrid.mean.df2$pvalue = as.numeric(hybrid.mean.df2$pvalue)
#INBRED
# AY105205 -1119.20258
# IDP439 -618.89584
# npi380 -524.52571
# gpm267 -511.65447
# IDP2401 -500.08301
# IDP856 -372.55134
# gpm922f -354.71843
# dmt102b -319.01115
# bnlg1131 -269.43307
# umc2092 -149.57773
# IDP342 -139.05553
# X 1.00000
# IDP1986 50.70157
# IDP1468 129.45225
# npi352 159.22654
# ufg27 213.68620
# gpm492 220.93535
# chr117d 740.63412
# IDP41 1066.43940
#HYBRID
# jpsb527a -10771.1193
# X 1.0000
# gpm493c 324.2485
# bnlg1811 336.8626
# gpm590 1013.7649
# mmp144a 1082.9003
# umc1920 4602.6150
# umc1773 6908.1372
###############################################################################################
############################# VARIANCE #######################################################
###############################################################################################
###############################################################################################
name.sig.var.inbred = c('stress','BreedType','nfd101b', 'ufg71', 'umc37a', 'IDP1980', 'gpm663b', 'ufg26', 'gpm258', 'bnlg1816', 'gpm219', 'IDP624', 'AY110389',
'gpm800b', 'gpm1', 'umc2061', 'isu041b', 'umc1073', 'IDP1949', 'haf101','gpm409a')
name.sig.var.nostress.inbred = c('nfd101b', 'ufg71', 'umc37a', 'IDP1980', 'gpm663b', 'ufg26', 'gpm258', 'bnlg1816', 'gpm219', 'IDP624', 'AY110389',
'gpm800b', 'gpm1', 'umc2061', 'isu041b', 'umc1073', 'IDP1949', 'haf101','gpm409a')
name.sig.var.hybrid = c('stress','BreedType','mmp47', 'mmp24', 'psr754a', 'gpm219', 'gpm359b', 'nnr2', 'mmp97','gpm588a','umc1155','umc1822')
name.sig.var.nostress.hybrid = c('mmp47', 'mmp24', 'psr754a', 'gpm219', 'gpm359b', 'nnr2', 'mmp97','gpm588a','umc1155','umc1822')
maincode.sig.var.inbred = maincode[,name.sig.var.inbred]
varpval1.sig.var.inbred = varpval1.inbred[,name.sig.var.nostress.inbred]
maincode.sig.var.hybrid = maincode[,name.sig.var.hybrid]
varpval1.sig.var.hybrid = varpval1.hybrid[,name.sig.var.nostress.hybrid]
inbred.var.df2 = data.frame(matrix(ncol = 7, nrow = 0))
hybrid.var.df2 = data.frame(matrix(ncol = 7, nrow = 0))
avg.inbred = var(maincode$stress[maincode$BreedType == "Inbred"])
avg.hybrid = var(maincode$stress[maincode$BreedType == "Hybrid"])
for (i in c(3:21)){
temp.B.inbred = maincode.sig.var.inbred$stress[maincode.sig.var.inbred[,i]==1 & maincode.sig.var.inbred$BreedType == "Inbred"]
temp.A.inbred = maincode.sig.var.inbred$stress[maincode.sig.var.inbred[,i]==0 & maincode.sig.var.inbred$BreedType == "Inbred"]
temp.B.sum.inbred = var(temp.B.inbred)
temp.A.sum.inbred = var(temp.A.inbred)
temp.bind.inbred = c(name.sig.var[i],temp.B.sum.inbred,temp.A.sum.inbred,temp.B.sum.inbred-temp.A.sum.inbred,
varpval1.sig.var.inbred[1,i-2], temp.B.sum.inbred-avg.inbred,temp.A.sum.inbred-avg.inbred)
inbred.var.df2 = rbind.data.frame(inbred.var.df2,temp.bind.inbred)
}
for (i in c(3:12)){
temp.B.hybrid = maincode.sig.var.hybrid$stress[maincode.sig.var.hybrid[,i]==1 & maincode.sig.var.hybrid$BreedType == "Hybrid"]
temp.A.hybrid = maincode.sig.var.hybrid$stress[maincode.sig.var.hybrid[,i]==0 & maincode.sig.var.hybrid$BreedType == "Hybrid"]
temp.B.sum.hybrid = var(temp.B.hybrid)
temp.A.sum.hybrid = var(temp.A.hybrid)
temp.bind.hybrid = c(name.sig.var[i],temp.B.sum.hybrid,temp.A.sum.hybrid,temp.B.sum.hybrid-temp.A.sum.hybrid,
varpval1.sig.var.hybrid[1,i-2], temp.B.sum.hybrid-avg.hybrid,temp.A.sum.hybrid-avg.hybrid)
hybrid.var.df2 = rbind.data.frame(hybrid.var.df2,temp.bind.hybrid)
}
colnames(inbred.var.df2) = c('Gene','var B', 'var A', 'B-A', 'pvalue', 'B-avg','A-avg')
colnames(hybrid.var.df2) = c('Gene','var B', 'var A', 'B-A', 'pvalue', 'B-avg','A-avg')
inbred.var.df2$pvalue = as.numeric(inbred.var.df2$pvalue)
hybrid.var.df2$pvalue = as.numeric(hybrid.var.df2$pvalue)
#INBRED
# nfd101b -3.6087189
# ufg71 -2.9185692
# umc37a -1.4183746
# IDP1980 -1.1295770
# gpm663b -1.0215143
# ufg26 -0.9796921
# gpm258 -0.9063691
# bnlg1816 -0.7948612
# gpm219 -0.5478515
# IDP624 -0.2216349
# AY110389 -0.2038563
# gpm800b 0.4433892
# gpm1 0.5156570
# umc2061 0.6927400
# isu041b 0.9745269
# X 1.0000000
# umc1073 1.1048187
# IDP1949 1.6310837
# haf101 1.8469959
# gpm409a 2.4090322
#HYBRID
# mmp47 -6.4240913
# mmp24 -4.7921365
# psr754a -4.3045082
# gpm219 -1.6748903
# gpm359b -0.8621597
# nnr2 -0.5032000
# X 1.0000000
# mmp97 1.1726435
# gpm588a 3.0308809
# umc1155 3.1360018
# umc1822 3.3245501
qt(-2.51, 6)
5
5+5
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HmcDual.R, R/NutsDual.R
\name{FindReasonableEpsilon}
\alias{FindReasonableEpsilon}
\alias{FindReasonableEpsilon}
\title{FindReasonableEpsilon}
\usage{
FindReasonableEpsilon(theta0, log.start, L)
FindReasonableEpsilon(theta0, log.start, L)
}
\arguments{
\item{log.start}{the log posterior value at initial state}
\item{L}{callable function needed in Leapfrog}
\item{theta}{initial state}
\item{grad.start}{the gradient value at initial state}
\item{theta}{initial state}
\item{log.start}{the log posterior value at initial state}
\item{grad.start}{the gradient value at initial state}
\item{L}{callable function needed in Leapfrog}
}
\value{
initial epsilon
initial epsilon
}
\description{
Heuristic for choosing an initial value of epsilon
Heuristic for choosing an initial value of epsilon
}
| /man/FindReasonableEpsilon.Rd | no_license | JingyueLu/NUTS | R | false | true | 881 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HmcDual.R, R/NutsDual.R
\name{FindReasonableEpsilon}
\alias{FindReasonableEpsilon}
\alias{FindReasonableEpsilon}
\title{FindReasonableEpsilon}
\usage{
FindReasonableEpsilon(theta0, log.start, L)
FindReasonableEpsilon(theta0, log.start, L)
}
\arguments{
\item{log.start}{the log posterior value at initial state}
\item{L}{callable function needed in Leapfrog}
\item{theta}{initial state}
\item{grad.start}{the gradient value at initial state}
\item{theta}{initial state}
\item{log.start}{the log posterior value at initial state}
\item{grad.start}{the gradient value at initial state}
\item{L}{callable function needed in Leapfrog}
}
\value{
initial epsilon
initial epsilon
}
\description{
Heuristic for choosing an initial value of epsilon
Heuristic for choosing an initial value of epsilon
}
|
#' @title Expression distance matrix generated from a \code{taxaExp} object
#'
#' @name expdist
#' @description Generate an expression distance matrix from an object of \code{taxaExp} class
#' using a specified distance method
#'
#' @param objects a vector of objects of class \code{taxonExp} or an object of class \code{taxaExp}
#' @param taxa one single character or a vector of characters specifying main taxa selected for
#' calculating expression distance.
#' If one single character "all" is given,
#' all the taxa included in the \code{taxaExp} will be matched and selected ("all" by default).
#' @param subtaxa one single character or a vector of characters sepcifying sub taxa selected for
#' calculating expression distance.
#' If one singke character "all" is given,
#' all the subtaxa included in the \code{taxaExp} will be matched and selected ("all" by default).
#' @param rowindex a vector of numbers corresponded to indices of selecting rows
#' @param method specifying which distance method to be used
#' to estimate expression phylogeny in bootstrapping.
#' @param logrithm a logical specifying whether to apply expression value log2 tranformation (TRUE by default).
#'
#' @return returns an expression distance matrix
#'
#' @examples
#' data(tetraExp)
#' library('ape')
#' dismat <- expdist(tetraExp, taxa = "all",
#' subtaxa = "Brain",
#' method = "pea")
#' tr <- root(NJ(dismat), "Chicken_Brain")
#' plot(tr)
#'
#' @export
expdist = function (objects = NULL, taxa = "all", subtaxa = "all", rowindex = NULL,
method = c( "sou", "sou_v","pea", "spe","euc", "cos", "jsd",
"tani", "jac"), logrithm = TRUE)
{
if (is.null(objects) || !is(objects) == "taxaExp") {
stop(paste0(date(), ": no valid taxaExp objects input!"))
}
flag1 <- TRUE
flag2 <- TRUE
if (any(grepl("all",taxa, ignore.case = TRUE))) {flag1 = FALSE}
else { taxa <- gsub("\\s+","",taxa)}
if (any(grepl("all",subtaxa, ignore.case = TRUE))) {flag2 = FALSE}
else { subtaxa <- gsub("\\s+","",subtaxa)}
objects_n <- length(objects)
objects_new_n <- 0
if ( flag1 || flag2)
{
#browser()
for (i in 1:objects_n)
{
if (flag1 && flag2) {
if (any(grepl(objects[[i]]$taxon_name,taxa, ignore.case=TRUE))
&& any(grepl(objects[[i]]$subTaxon_name, subtaxa, ignore.case=TRUE)))
{objects_new_n <- objects_new_n + 1}
} else {
if (any(grepl(objects[[i]]$taxon_name,taxa,ignore.case=TRUE))
|| any(grepl(objects[[i]]$subTaxon_name, subtaxa, ignore.case=TRUE)))
{objects_new_n <- objects_new_n + 1}
}
}
objects_new <- vector("list",length = objects_new_n)
counter <- 1
for (i in 1:objects_n)
{
if (flag1 && flag2) {
if (any(grepl(objects[[i]]$taxon_name,taxa,ignore.case=TRUE))
&& any(grepl(objects[[i]]$subTaxon_name, subtaxa, ignore.case=TRUE)))
{
objects_new[[counter]] <- objects[[i]]
counter <- counter + 1
}
} else {
if (any(grepl(objects[[i]]$taxon_name,taxa,ignore.case=TRUE))
|| any(grepl(objects[[i]]$subTaxon_name, subtaxa, ignore.case=TRUE)))
{
objects_new[[counter]] <- objects[[i]]
counter <- counter + 1
}
}
}
class(objects_new) <- "taxaExp"
objects <- objects_new
} else {
objects_new <- vector("list", length = objects_new_n)
counter <- 1
for (i in 1:objects_n) {
objects_new[[counter]] <- objects[[i]]
counter <- counter + 1
}
}
if (length(objects_new) == 0) {
stop(paste0(date(),": taxa and subtaxa name not found."))
}
#browser()
method<-match.arg(method)
message(paste0(date(), ": using ", method, " to calculate pair-wise distance"))
object_n <- length(objects)
gene_n <- objects[[1]]$gene_num
message(paste0(date(),": input ",object_n, " taxa"))
message(paste0(date(),": total ", gene_n, " genes"))
#initialization
expVal <- matrix(0, nrow = gene_n, ncol = object_n)
taxon_names <- vector("character", length = object_n)
for (i in 1:object_n) {
taxon_names[i] = paste0(objects[[i]]$taxon_name, "_", objects[[i]]$subTaxon_name)
expVal[,i] = apply(objects[[i]]$exp_value,1,median)
}
if (!is.null(rowindex)) {
expVal <- expVal[rowindex,]
}
if (logrithm) {
expVal <- apply(expVal, c(1,2), function (x) log2(x+1))
}
#browser()
colnames(expVal) <- taxon_names
dis.mat <- switch (method,
sou = {dist.sou(expVal)},
pea = {dist.pea(expVal)},
spe = {dist.spe(expVal)},
euc = {dist.euc(expVal)},
cos = {dist.cos(expVal)},
jsd = {dist.jsd(expVal)},
tani = {dist.tani(expVal)},
jac = {dist.jac(expVal)},
ced = {dist.ced(expVal)},
sou_v = {dist.sou_v(expVal)}
)
dis.mat
#as.dist(dis.mat)
}
| /R/expdist.R | no_license | jingwyang/TreeExp | R | false | false | 4,921 | r | #' @title Expression distance matrix generated from a \code{taxaExp} object
#'
#' @name expdist
#' @description Generate an expression distance matrix from an object of \code{taxaExp} class
#' using a specified distance method
#'
#' @param objects a vector of objects of class \code{taxonExp} or an object of class \code{taxaExp}
#' @param taxa one single character or a vector of characters specifying main taxa selected for
#' calculating expression distance.
#' If one single character "all" is given,
#' all the taxa included in the \code{taxaExp} will be matched and selected ("all" by default).
#' @param subtaxa one single character or a vector of characters sepcifying sub taxa selected for
#' calculating expression distance.
#' If one singke character "all" is given,
#' all the subtaxa included in the \code{taxaExp} will be matched and selected ("all" by default).
#' @param rowindex a vector of numbers corresponded to indices of selecting rows
#' @param method specifying which distance method to be used
#' to estimate expression phylogeny in bootstrapping.
#' @param logrithm a logical specifying whether to apply expression value log2 tranformation (TRUE by default).
#'
#' @return returns an expression distance matrix
#'
#' @examples
#' data(tetraExp)
#' library('ape')
#' dismat <- expdist(tetraExp, taxa = "all",
#' subtaxa = "Brain",
#' method = "pea")
#' tr <- root(NJ(dismat), "Chicken_Brain")
#' plot(tr)
#'
#' @export
expdist = function (objects = NULL, taxa = "all", subtaxa = "all", rowindex = NULL,
method = c( "sou", "sou_v","pea", "spe","euc", "cos", "jsd",
"tani", "jac"), logrithm = TRUE)
{
if (is.null(objects) || !is(objects) == "taxaExp") {
stop(paste0(date(), ": no valid taxaExp objects input!"))
}
flag1 <- TRUE
flag2 <- TRUE
if (any(grepl("all",taxa, ignore.case = TRUE))) {flag1 = FALSE}
else { taxa <- gsub("\\s+","",taxa)}
if (any(grepl("all",subtaxa, ignore.case = TRUE))) {flag2 = FALSE}
else { subtaxa <- gsub("\\s+","",subtaxa)}
objects_n <- length(objects)
objects_new_n <- 0
if ( flag1 || flag2)
{
#browser()
for (i in 1:objects_n)
{
if (flag1 && flag2) {
if (any(grepl(objects[[i]]$taxon_name,taxa, ignore.case=TRUE))
&& any(grepl(objects[[i]]$subTaxon_name, subtaxa, ignore.case=TRUE)))
{objects_new_n <- objects_new_n + 1}
} else {
if (any(grepl(objects[[i]]$taxon_name,taxa,ignore.case=TRUE))
|| any(grepl(objects[[i]]$subTaxon_name, subtaxa, ignore.case=TRUE)))
{objects_new_n <- objects_new_n + 1}
}
}
objects_new <- vector("list",length = objects_new_n)
counter <- 1
for (i in 1:objects_n)
{
if (flag1 && flag2) {
if (any(grepl(objects[[i]]$taxon_name,taxa,ignore.case=TRUE))
&& any(grepl(objects[[i]]$subTaxon_name, subtaxa, ignore.case=TRUE)))
{
objects_new[[counter]] <- objects[[i]]
counter <- counter + 1
}
} else {
if (any(grepl(objects[[i]]$taxon_name,taxa,ignore.case=TRUE))
|| any(grepl(objects[[i]]$subTaxon_name, subtaxa, ignore.case=TRUE)))
{
objects_new[[counter]] <- objects[[i]]
counter <- counter + 1
}
}
}
class(objects_new) <- "taxaExp"
objects <- objects_new
} else {
objects_new <- vector("list", length = objects_new_n)
counter <- 1
for (i in 1:objects_n) {
objects_new[[counter]] <- objects[[i]]
counter <- counter + 1
}
}
if (length(objects_new) == 0) {
stop(paste0(date(),": taxa and subtaxa name not found."))
}
#browser()
method<-match.arg(method)
message(paste0(date(), ": using ", method, " to calculate pair-wise distance"))
object_n <- length(objects)
gene_n <- objects[[1]]$gene_num
message(paste0(date(),": input ",object_n, " taxa"))
message(paste0(date(),": total ", gene_n, " genes"))
#initialization
expVal <- matrix(0, nrow = gene_n, ncol = object_n)
taxon_names <- vector("character", length = object_n)
for (i in 1:object_n) {
taxon_names[i] = paste0(objects[[i]]$taxon_name, "_", objects[[i]]$subTaxon_name)
expVal[,i] = apply(objects[[i]]$exp_value,1,median)
}
if (!is.null(rowindex)) {
expVal <- expVal[rowindex,]
}
if (logrithm) {
expVal <- apply(expVal, c(1,2), function (x) log2(x+1))
}
#browser()
colnames(expVal) <- taxon_names
dis.mat <- switch (method,
sou = {dist.sou(expVal)},
pea = {dist.pea(expVal)},
spe = {dist.spe(expVal)},
euc = {dist.euc(expVal)},
cos = {dist.cos(expVal)},
jsd = {dist.jsd(expVal)},
tani = {dist.tani(expVal)},
jac = {dist.jac(expVal)},
ced = {dist.ced(expVal)},
sou_v = {dist.sou_v(expVal)}
)
dis.mat
#as.dist(dis.mat)
}
|
\name{methylation}
\alias{methylation}
\docType{data}
\title{
Example Methylation Data
}
\description{
This data frame contains 300 samples and 200 CpG sites.
}
\usage{data(methylation)}
\format{
The format is:
subjects by rows and methylation by columns.
}
\keyword{datasets}
| /man/methylation.txt.Rd | no_license | cran/wtest | R | false | false | 282 | rd | \name{methylation}
\alias{methylation}
\docType{data}
\title{
Example Methylation Data
}
\description{
This data frame contains 300 samples and 200 CpG sites.
}
\usage{data(methylation)}
\format{
The format is:
subjects by rows and methylation by columns.
}
\keyword{datasets}
|
rewire_pathA_complete_classic <- function() {
### First, dismantle minitree
# edges entering hostID, with endtimes
edgesin <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c")
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# coalescent nodes before and within hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# dismantle topology, move transmission node
pbe1$v$nodetimes[transnode] <- pbe1$tinf.prop
### Second, change transmission tree
pbe1$v$inftimes[pbe1$hostID] <- pbe1$tinf.prop
### Third, rebuild minitree
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(edgeintimes, pbe1$tinf.prop, pbe1$p)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
}
rewire_pathB_complete_classic <- function() {
### Identify new index
newindex <- which(pbe1$v$inftimes == sort(pbe1$v$inftimes)[2])
### First, dismantle minitree
# transmission node of new index
transnode_ni <- 2*pbe1$d$nsamples - 1 + newindex
# edges entering hostID, with endtimes (excluding t from new index)
edgesin <- setdiff(which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c"),
transnode_ni)
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# coalescent nodes in hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# dismantle topology, move transmission and bottleneck nodes
pbe1$v$nodehosts[coalnodes] <- -1L
pbe1$v$nodehosts[transnode_ni] <- 0L
pbe1$v$nodehosts[transnode] <- pbe1$infector.proposed.ID
pbe1$v$nodeparents[c(edgesin, coalnodes, transnode, transnode_ni)] <- -1
pbe1$v$nodetimes[transnode] <- pbe1$tinf.prop
### Second, change transmission tree
pbe1$v$infectors[pbe1$hostID] <- pbe1$infector.proposed.ID
pbe1$v$infectors[newindex] <- 0L
pbe1$v$inftimes[pbe1$hostID] <- pbe1$tinf.prop
### Third, rebuild minitree in hostID
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(edgeintimes, pbe1$tinf.prop, pbe1$p)
coalnodes <- tail(coalnodes, -1)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### Fourth, add edges in infector and new index
rewire_pullnodes_complete(0)
rewire_pullnodes_complete(pbe1$infector.proposed.ID)
}
rewire_pathCF1_complete_classic <- function() {
### Identify new infector and old infector
newinfector <- which(pbe1$v$infectors == pbe1$hostID)
newinfector <- newinfector[which(pbe1$v$inftimes[newinfector] == min(pbe1$v$inftimes[newinfector]))]
oldinfector <- pbe1$v$infectors[pbe1$hostID]
### First, dismantle minitree
# edges entering new infector, with endtimes
edgesin_ni <- which(pbe1$v$nodehosts == newinfector & pbe1$v$nodetypes != "c")
edgeintimes_ni <- pbe1$v$nodetimes[edgesin_ni]
# transmission node of new infector
transnode_ni <- 2*pbe1$d$nsamples - 1 + newinfector
# edges entering hostID, with endtimes, excluding t from new infector
edgesin <- setdiff(which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c"),
transnode_ni)
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# all coalescent nodes in new infector and hostID
coalnodes <- which((pbe1$v$nodehosts == newinfector | pbe1$v$nodehosts == pbe1$hostID) & pbe1$v$nodetypes == "c")
# more coalescent nodes:
if(oldinfector > 0) {
# parent of transmission edge leaving hostID
coalnodes <- c(take_cnode(transnode), coalnodes)
}
# dismantle topology, move transmission nodes
pbe1$v$nodehosts[coalnodes] <- -1L
pbe1$v$nodehosts[transnode_ni] <- oldinfector
pbe1$v$nodehosts[transnode] <- newinfector
pbe1$v$nodeparents[c(edgesin, edgesin_ni, coalnodes, transnode, transnode_ni)] <- -1L
pbe1$v$nodetimes[c(transnode, transnode_ni)] <- pbe1$v$nodetimes[c(transnode_ni, transnode)]
### Second, change transmission tree
pbe1$v$infectors[newinfector] <- oldinfector
pbe1$v$infectors[pbe1$hostID] <- newinfector
pbe1$v$inftimes[c(newinfector, pbe1$hostID)] <- pbe1$v$inftimes[c(pbe1$hostID, newinfector)]
### Third, rebuild minitree in hostID
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(edgeintimes, pbe1$v$inftimes[pbe1$hostID], pbe1$p)
coalnodes_ni <- head(coalnodes, length(coalnodes) - length(newcoaltimes))
coalnodes <- tail(coalnodes, length(newcoaltimes))
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### Fourth, rebuild minitree in new infector
# add edges from hostID to incoming edges
edgesin_ni <- c(edgesin_ni, transnode)
edgeintimes_ni <- pbe1$v$nodetimes[edgesin_ni]
# times of coalescent events in new infector and bottleneck size, and distribute coalescent nodes over new infector and pre-newinfector
newcoaltimes <- sample_coaltimes(edgeintimes_ni, pbe1$v$inftimes[newinfector], pbe1$p)
coalnodes_ni <- tail(coalnodes_ni, length(newcoaltimes))
# order all edges (transmission, sample, coalescent) by endtime in new infector
nodeorder <- order(c(newcoaltimes, edgeintimes_ni))
edgeend <- c(coalnodes_ni, edgesin_ni)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes_ni)[nodeorder]
# sample topology of minitree within new infector
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes_ni)))[nodeorder],
transnode_ni)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- newinfector
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### Fifth, add edges before new infector
rewire_pullnodes_complete(oldinfector)
}
rewire_pathD_complete_classic <- function() {
### First, dismantle minitree
# edges entering hostID, with endtimes
edgesin <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c")
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# coalescent nodes of hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# more coalescent nodes:
# parent of transmission edge leaving hostID
coalnodes <- c(take_cnode(transnode), coalnodes)
# new edges entering hostID, from old index
newedgesin <- which(pbe1$v$nodehosts == 0)
newedgeintimes <- pbe1$v$nodetimes[newedgesin]
# dismantle topology, move transmission and bottleneck nodes
pbe1$v$nodehosts[coalnodes] <- -1L
pbe1$v$nodehosts[newedgesin] <- pbe1$hostID
pbe1$v$nodehosts[transnode] <- 0L
pbe1$v$nodeparents[c(edgesin, newedgesin, coalnodes, transnode)] <- -1L
pbe1$v$nodetimes[transnode] <- pbe1$tinf.prop
### Second, change transmission tree
pbe1$v$inftimes[pbe1$hostID] <- pbe1$tinf.prop
pbe1$v$infectors[pbe1$v$infectors == 0] <- pbe1$hostID
pbe1$v$infectors[pbe1$hostID] <- 0L
### Third, rebuild minitree in hostID
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(c(edgeintimes, newedgeintimes), pbe1$tinf.prop, pbe1$p)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes, newedgeintimes))
edgeend <- c(coalnodes, edgesin, newedgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes, newedgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(c(edgeintimes, newedgeintimes))))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### phylotree before index case
rewire_pullnodes_complete(0)
}
rewire_pathE_complete_classic <- function() {
### First, dismantle minitree
# edges entering hostID, with endtimes
edgesin <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c")
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# coalescent nodes of hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# more coalescent nodes:
# parent of transmission edge leaving hostID
coalnodes <- c(take_cnode(transnode), coalnodes)
# dismantle topology, move transmission node
pbe1$v$nodehosts[coalnodes] <- -1L
pbe1$v$nodehosts[transnode] <- pbe1$infector.proposed.ID
pbe1$v$nodeparents[c(edgesin, coalnodes, transnode)] <- -1L
pbe1$v$nodetimes[transnode] <- pbe1$tinf.prop
### Second, change transmission tree
pbe1$v$inftimes[pbe1$hostID] <- pbe1$tinf.prop
pbe1$v$infectors[pbe1$hostID] <- pbe1$infector.proposed.ID
### Third, rebuild minitree in hostID
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(c(edgeintimes), pbe1$tinf.prop, pbe1$p)
coalnodes <- tail(coalnodes, -1)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### phylotree in infector
rewire_pullnodes_complete(pbe1$v$infectors[pbe1$hostID])
}
rewire_pathCF2_complete_classic <- function() {
### Identify new infector and old infector
newinfector <- which(pbe1$v$infectors == pbe1$hostID)
newinfector <- newinfector[which(pbe1$v$inftimes[newinfector] == min(pbe1$v$inftimes[newinfector]))]
oldinfector <- pbe1$v$infectors[pbe1$hostID]
### First, remove sampling nodes and collect coalescent nodes
# remove sample edges from new infector
coalnodes <- c()
sampleedges_nix <- which(pbe1$v$nodehosts == newinfector & pbe1$v$nodetypes == "x")
for(sampleedge in sampleedges_nix) {
coalnodes <- c(take_cnode(sampleedge), coalnodes)
}
coalnodes <- c(take_cnode(newinfector), coalnodes)
# remove sample edges from hostID
sampleedges_x <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "x")
for(sampleedge in sampleedges_x) {
coalnodes <- c(take_cnode(sampleedge), coalnodes)
}
coalnodes <- c(take_cnode(pbe1$hostID), coalnodes)
### Second, switch minitrees between hostID and new infector
# transmission nodes of hostID and new infector
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
transnode_ni <- 2*pbe1$d$nsamples - 1 + newinfector
# switch remaining nodes in hostID and new infector
restnodes <- which(pbe1$v$nodehosts == pbe1$hostID & (pbe1$v$nodetypes == "t" | pbe1$v$nodetypes == "c"))
restnodes_ni <- which(pbe1$v$nodehosts == newinfector & (pbe1$v$nodetypes == "t" | pbe1$v$nodetypes == "c"))
pbe1$v$nodehosts[restnodes] <- newinfector
pbe1$v$nodehosts[restnodes_ni] <- pbe1$hostID
# switch transmission nodes
childnode_ni <- which(pbe1$v$nodeparents == transnode_ni)
childnode <- which(pbe1$v$nodeparents == transnode)
parentnode_ni <- pbe1$v$nodeparents[transnode_ni]
parentnode <- pbe1$v$nodeparents[transnode]
pbe1$v$nodeparents[childnode_ni] <- transnode
pbe1$v$nodeparents[transnode_ni] <- parentnode
if(parentnode_ni == transnode) {
pbe1$v$nodeparents[transnode] <- transnode_ni
} else {
pbe1$v$nodeparents[childnode] <- transnode_ni
pbe1$v$nodeparents[transnode] <- parentnode_ni
}
pbe1$v$nodehosts[c(transnode, transnode_ni)] <- pbe1$v$nodehosts[c(transnode_ni, transnode)]
pbe1$v$nodetimes[c(transnode, transnode_ni)] <- pbe1$v$nodetimes[c(transnode_ni, transnode)]
# place back sampling nodes
pbe1$v$nodehosts[c(newinfector, sampleedges_nix)] <- newinfector
pbe1$v$nodehosts[c(pbe1$hostID, sampleedges_x)] <- pbe1$hostID
# Third, change transmission tree
infectees_ni <- which(pbe1$v$infectors == newinfector)
infectees <- which(pbe1$v$infectors == pbe1$hostID)
pbe1$v$inftimes[c(pbe1$hostID, newinfector)] <- pbe1$v$inftimes[c(newinfector, pbe1$hostID)]
pbe1$v$infectors[infectees] <- newinfector
pbe1$v$infectors[infectees_ni] <- pbe1$hostID
pbe1$v$infectors[c(pbe1$hostID, newinfector)] <- c(newinfector, oldinfector)
# Fourth, add sample edges in hostID and new infector
rewire_pullnodes_complete(pbe1$hostID)
rewire_pullnodes_complete(newinfector)
rewire_pullnodes_complete(oldinfector)
}
rewire_pathK_complete_classic <- function() {
### First, dismantle minitree
# edges entering hostID, with endtimes
edgesin <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c")
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# all coalescent nodes in new infector and hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# dismantle topology, move transmission node
pbe1$v$nodehosts[coalnodes] <- -1
pbe1$v$nodeparents[c(edgesin, coalnodes)] <- -1
### Second, rebuild minitree
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(edgeintimes, pbe1$v$inftimes[pbe1$hostID], pbe1$p)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
}
| /R/mcmc-rewire_paths_complete_classic.R | no_license | donkeyshot/phybreak | R | false | false | 16,934 | r | rewire_pathA_complete_classic <- function() {
### First, dismantle minitree
# edges entering hostID, with endtimes
edgesin <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c")
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# coalescent nodes before and within hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# dismantle topology, move transmission node
pbe1$v$nodetimes[transnode] <- pbe1$tinf.prop
### Second, change transmission tree
pbe1$v$inftimes[pbe1$hostID] <- pbe1$tinf.prop
### Third, rebuild minitree
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(edgeintimes, pbe1$tinf.prop, pbe1$p)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
}
rewire_pathB_complete_classic <- function() {
### Identify new index
newindex <- which(pbe1$v$inftimes == sort(pbe1$v$inftimes)[2])
### First, dismantle minitree
# transmission node of new index
transnode_ni <- 2*pbe1$d$nsamples - 1 + newindex
# edges entering hostID, with endtimes (excluding t from new index)
edgesin <- setdiff(which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c"),
transnode_ni)
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# coalescent nodes in hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# dismantle topology, move transmission and bottleneck nodes
pbe1$v$nodehosts[coalnodes] <- -1L
pbe1$v$nodehosts[transnode_ni] <- 0L
pbe1$v$nodehosts[transnode] <- pbe1$infector.proposed.ID
pbe1$v$nodeparents[c(edgesin, coalnodes, transnode, transnode_ni)] <- -1
pbe1$v$nodetimes[transnode] <- pbe1$tinf.prop
### Second, change transmission tree
pbe1$v$infectors[pbe1$hostID] <- pbe1$infector.proposed.ID
pbe1$v$infectors[newindex] <- 0L
pbe1$v$inftimes[pbe1$hostID] <- pbe1$tinf.prop
### Third, rebuild minitree in hostID
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(edgeintimes, pbe1$tinf.prop, pbe1$p)
coalnodes <- tail(coalnodes, -1)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### Fourth, add edges in infector and new index
rewire_pullnodes_complete(0)
rewire_pullnodes_complete(pbe1$infector.proposed.ID)
}
rewire_pathCF1_complete_classic <- function() {
### Identify new infector and old infector
newinfector <- which(pbe1$v$infectors == pbe1$hostID)
newinfector <- newinfector[which(pbe1$v$inftimes[newinfector] == min(pbe1$v$inftimes[newinfector]))]
oldinfector <- pbe1$v$infectors[pbe1$hostID]
### First, dismantle minitree
# edges entering new infector, with endtimes
edgesin_ni <- which(pbe1$v$nodehosts == newinfector & pbe1$v$nodetypes != "c")
edgeintimes_ni <- pbe1$v$nodetimes[edgesin_ni]
# transmission node of new infector
transnode_ni <- 2*pbe1$d$nsamples - 1 + newinfector
# edges entering hostID, with endtimes, excluding t from new infector
edgesin <- setdiff(which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c"),
transnode_ni)
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# all coalescent nodes in new infector and hostID
coalnodes <- which((pbe1$v$nodehosts == newinfector | pbe1$v$nodehosts == pbe1$hostID) & pbe1$v$nodetypes == "c")
# more coalescent nodes:
if(oldinfector > 0) {
# parent of transmission edge leaving hostID
coalnodes <- c(take_cnode(transnode), coalnodes)
}
# dismantle topology, move transmission nodes
pbe1$v$nodehosts[coalnodes] <- -1L
pbe1$v$nodehosts[transnode_ni] <- oldinfector
pbe1$v$nodehosts[transnode] <- newinfector
pbe1$v$nodeparents[c(edgesin, edgesin_ni, coalnodes, transnode, transnode_ni)] <- -1L
pbe1$v$nodetimes[c(transnode, transnode_ni)] <- pbe1$v$nodetimes[c(transnode_ni, transnode)]
### Second, change transmission tree
pbe1$v$infectors[newinfector] <- oldinfector
pbe1$v$infectors[pbe1$hostID] <- newinfector
pbe1$v$inftimes[c(newinfector, pbe1$hostID)] <- pbe1$v$inftimes[c(pbe1$hostID, newinfector)]
### Third, rebuild minitree in hostID
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(edgeintimes, pbe1$v$inftimes[pbe1$hostID], pbe1$p)
coalnodes_ni <- head(coalnodes, length(coalnodes) - length(newcoaltimes))
coalnodes <- tail(coalnodes, length(newcoaltimes))
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### Fourth, rebuild minitree in new infector
# add edges from hostID to incoming edges
edgesin_ni <- c(edgesin_ni, transnode)
edgeintimes_ni <- pbe1$v$nodetimes[edgesin_ni]
# times of coalescent events in new infector and bottleneck size, and distribute coalescent nodes over new infector and pre-newinfector
newcoaltimes <- sample_coaltimes(edgeintimes_ni, pbe1$v$inftimes[newinfector], pbe1$p)
coalnodes_ni <- tail(coalnodes_ni, length(newcoaltimes))
# order all edges (transmission, sample, coalescent) by endtime in new infector
nodeorder <- order(c(newcoaltimes, edgeintimes_ni))
edgeend <- c(coalnodes_ni, edgesin_ni)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes_ni)[nodeorder]
# sample topology of minitree within new infector
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes_ni)))[nodeorder],
transnode_ni)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- newinfector
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### Fifth, add edges before new infector
rewire_pullnodes_complete(oldinfector)
}
rewire_pathD_complete_classic <- function() {
### First, dismantle minitree
# edges entering hostID, with endtimes
edgesin <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c")
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# coalescent nodes of hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# more coalescent nodes:
# parent of transmission edge leaving hostID
coalnodes <- c(take_cnode(transnode), coalnodes)
# new edges entering hostID, from old index
newedgesin <- which(pbe1$v$nodehosts == 0)
newedgeintimes <- pbe1$v$nodetimes[newedgesin]
# dismantle topology, move transmission and bottleneck nodes
pbe1$v$nodehosts[coalnodes] <- -1L
pbe1$v$nodehosts[newedgesin] <- pbe1$hostID
pbe1$v$nodehosts[transnode] <- 0L
pbe1$v$nodeparents[c(edgesin, newedgesin, coalnodes, transnode)] <- -1L
pbe1$v$nodetimes[transnode] <- pbe1$tinf.prop
### Second, change transmission tree
pbe1$v$inftimes[pbe1$hostID] <- pbe1$tinf.prop
pbe1$v$infectors[pbe1$v$infectors == 0] <- pbe1$hostID
pbe1$v$infectors[pbe1$hostID] <- 0L
### Third, rebuild minitree in hostID
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(c(edgeintimes, newedgeintimes), pbe1$tinf.prop, pbe1$p)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes, newedgeintimes))
edgeend <- c(coalnodes, edgesin, newedgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes, newedgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(c(edgeintimes, newedgeintimes))))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### phylotree before index case
rewire_pullnodes_complete(0)
}
rewire_pathE_complete_classic <- function() {
### First, dismantle minitree
# edges entering hostID, with endtimes
edgesin <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c")
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# coalescent nodes of hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# more coalescent nodes:
# parent of transmission edge leaving hostID
coalnodes <- c(take_cnode(transnode), coalnodes)
# dismantle topology, move transmission node
pbe1$v$nodehosts[coalnodes] <- -1L
pbe1$v$nodehosts[transnode] <- pbe1$infector.proposed.ID
pbe1$v$nodeparents[c(edgesin, coalnodes, transnode)] <- -1L
pbe1$v$nodetimes[transnode] <- pbe1$tinf.prop
### Second, change transmission tree
pbe1$v$inftimes[pbe1$hostID] <- pbe1$tinf.prop
pbe1$v$infectors[pbe1$hostID] <- pbe1$infector.proposed.ID
### Third, rebuild minitree in hostID
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(c(edgeintimes), pbe1$tinf.prop, pbe1$p)
coalnodes <- tail(coalnodes, -1)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
### phylotree in infector
rewire_pullnodes_complete(pbe1$v$infectors[pbe1$hostID])
}
rewire_pathCF2_complete_classic <- function() {
### Identify new infector and old infector
newinfector <- which(pbe1$v$infectors == pbe1$hostID)
newinfector <- newinfector[which(pbe1$v$inftimes[newinfector] == min(pbe1$v$inftimes[newinfector]))]
oldinfector <- pbe1$v$infectors[pbe1$hostID]
### First, remove sampling nodes and collect coalescent nodes
# remove sample edges from new infector
coalnodes <- c()
sampleedges_nix <- which(pbe1$v$nodehosts == newinfector & pbe1$v$nodetypes == "x")
for(sampleedge in sampleedges_nix) {
coalnodes <- c(take_cnode(sampleedge), coalnodes)
}
coalnodes <- c(take_cnode(newinfector), coalnodes)
# remove sample edges from hostID
sampleedges_x <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "x")
for(sampleedge in sampleedges_x) {
coalnodes <- c(take_cnode(sampleedge), coalnodes)
}
coalnodes <- c(take_cnode(pbe1$hostID), coalnodes)
### Second, switch minitrees between hostID and new infector
# transmission nodes of hostID and new infector
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
transnode_ni <- 2*pbe1$d$nsamples - 1 + newinfector
# switch remaining nodes in hostID and new infector
restnodes <- which(pbe1$v$nodehosts == pbe1$hostID & (pbe1$v$nodetypes == "t" | pbe1$v$nodetypes == "c"))
restnodes_ni <- which(pbe1$v$nodehosts == newinfector & (pbe1$v$nodetypes == "t" | pbe1$v$nodetypes == "c"))
pbe1$v$nodehosts[restnodes] <- newinfector
pbe1$v$nodehosts[restnodes_ni] <- pbe1$hostID
# switch transmission nodes
childnode_ni <- which(pbe1$v$nodeparents == transnode_ni)
childnode <- which(pbe1$v$nodeparents == transnode)
parentnode_ni <- pbe1$v$nodeparents[transnode_ni]
parentnode <- pbe1$v$nodeparents[transnode]
pbe1$v$nodeparents[childnode_ni] <- transnode
pbe1$v$nodeparents[transnode_ni] <- parentnode
if(parentnode_ni == transnode) {
pbe1$v$nodeparents[transnode] <- transnode_ni
} else {
pbe1$v$nodeparents[childnode] <- transnode_ni
pbe1$v$nodeparents[transnode] <- parentnode_ni
}
pbe1$v$nodehosts[c(transnode, transnode_ni)] <- pbe1$v$nodehosts[c(transnode_ni, transnode)]
pbe1$v$nodetimes[c(transnode, transnode_ni)] <- pbe1$v$nodetimes[c(transnode_ni, transnode)]
# place back sampling nodes
pbe1$v$nodehosts[c(newinfector, sampleedges_nix)] <- newinfector
pbe1$v$nodehosts[c(pbe1$hostID, sampleedges_x)] <- pbe1$hostID
# Third, change transmission tree
infectees_ni <- which(pbe1$v$infectors == newinfector)
infectees <- which(pbe1$v$infectors == pbe1$hostID)
pbe1$v$inftimes[c(pbe1$hostID, newinfector)] <- pbe1$v$inftimes[c(newinfector, pbe1$hostID)]
pbe1$v$infectors[infectees] <- newinfector
pbe1$v$infectors[infectees_ni] <- pbe1$hostID
pbe1$v$infectors[c(pbe1$hostID, newinfector)] <- c(newinfector, oldinfector)
# Fourth, add sample edges in hostID and new infector
rewire_pullnodes_complete(pbe1$hostID)
rewire_pullnodes_complete(newinfector)
rewire_pullnodes_complete(oldinfector)
}
rewire_pathK_complete_classic <- function() {
### First, dismantle minitree
# edges entering hostID, with endtimes
edgesin <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes != "c")
edgeintimes <- pbe1$v$nodetimes[edgesin]
# transmission node of hostID
transnode <- 2*pbe1$d$nsamples - 1 + pbe1$hostID
# all coalescent nodes in new infector and hostID
coalnodes <- which(pbe1$v$nodehosts == pbe1$hostID & pbe1$v$nodetypes == "c")
# dismantle topology, move transmission node
pbe1$v$nodehosts[coalnodes] <- -1
pbe1$v$nodeparents[c(edgesin, coalnodes)] <- -1
### Second, rebuild minitree
# times of coalescent events in hostID and bottleneck size, and distribute coalescent nodes over hostID and pre-hostID
newcoaltimes <- sample_coaltimes(edgeintimes, pbe1$v$inftimes[pbe1$hostID], pbe1$p)
# order all edges (transmission, sample, coalescent) by endtime in hostID
nodeorder <- order(c(newcoaltimes, edgeintimes))
edgeend <- c(coalnodes, edgesin)[nodeorder]
edgeendtimes <- c(newcoaltimes, edgeintimes)[nodeorder]
# sample topology of minitree within hostID
edgestart <- sample_topology(edgeend,
edgeendtimes,
c(rep("c", length(newcoaltimes)),
rep("x", length(edgeintimes)))[nodeorder],
transnode)
# change minitree in hostID
pbe1$v$nodehosts[edgeend] <- pbe1$hostID
pbe1$v$nodeparents[edgeend] <- edgestart
pbe1$v$nodetimes[edgeend] <- edgeendtimes
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/gen-namespace.R
\name{torch_trace}
\alias{torch_trace}
\title{Trace}
\usage{
torch_trace(self)
}
\arguments{
\item{self}{the input tensor}
}
\description{
Trace
}
\section{trace(input) -> Tensor }{
Returns the sum of the elements of the diagonal of the input 2-D matrix.
}
\examples{
if (torch_is_installed()) {
x <- torch_arange(1, 9)$view(c(3, 3))
x
torch_trace(x)
}
}
| /man/torch_trace.Rd | permissive | mlverse/torch | R | false | true | 509 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/gen-namespace.R
\name{torch_trace}
\alias{torch_trace}
\title{Trace}
\usage{
torch_trace(self)
}
\arguments{
\item{self}{the input tensor}
}
\description{
Trace
}
\section{trace(input) -> Tensor }{
Returns the sum of the elements of the diagonal of the input 2-D matrix.
}
\examples{
if (torch_is_installed()) {
x <- torch_arange(1, 9)$view(c(3, 3))
x
torch_trace(x)
}
}
|
# data.csv : csv file including training data
# data_prediction1.csv : csv file including test data with Y
# data_prediction2.csv : csv file including test data without Y
# X: X of training data
# y: Y of training data
# X_prediction1: X of test data with Y
# y_prediction1: Y of test data with Y
# X_prediction2: X of test data without Y
data <- read.csv("data.csv", row.names = 1)
y <- as.matrix(data[1])
X <- as.matrix(data[c(2:ncol(data))])
data_prediction1 <- read.csv("data_prediction1.csv", row.names = 1)
y_prediction1 <- as.matrix(data_prediction1[1])
X_prediction1 <- as.matrix(data_prediction1[c(2:ncol(data_prediction1))])
data_prediction2 <- read.csv("data_prediction2.csv", row.names = 1)
X_prediction2 <- as.matrix(data_prediction2[c(1:ncol(data_prediction2))]) | /load_supervised_data_regression.R | no_license | R-Techy-work/GMM | R | false | false | 778 | r | # data.csv : csv file including training data
# data_prediction1.csv : csv file including test data with Y
# data_prediction2.csv : csv file including test data without Y
# X: X of training data
# y: Y of training data
# X_prediction1: X of test data with Y
# y_prediction1: Y of test data with Y
# X_prediction2: X of test data without Y
data <- read.csv("data.csv", row.names = 1)
y <- as.matrix(data[1])
X <- as.matrix(data[c(2:ncol(data))])
data_prediction1 <- read.csv("data_prediction1.csv", row.names = 1)
y_prediction1 <- as.matrix(data_prediction1[1])
X_prediction1 <- as.matrix(data_prediction1[c(2:ncol(data_prediction1))])
data_prediction2 <- read.csv("data_prediction2.csv", row.names = 1)
X_prediction2 <- as.matrix(data_prediction2[c(1:ncol(data_prediction2))]) |
#' Create a new branch.
#'
#' Branches are used to describe the data hierarchy of ggvis. As well as
#' using this function to create them, you can also use the many specialised
#' \code{branch_} functions that combine marks and transforms to create
#' useful visualisations.
#'
#' @section Hierarchy:
#'
#' A ggvis plot has a hierarchical structure, where each branch inherits
#' data and properties from its parent. This is somewhat similar to ggplot2,
#' but ggplot2 plots only had a single layer of hierarchy - with ggvis, you
#' can have multiple levels, making it easier to avoid redundancy, both in
#' your specification and in computation.
#'
#' For example, take a linear model. You often want to display both the
#' predictions and the standard error from a linear model. In ggplot2, you
#' had to use \code{geom_smooth()}, which was a special geom that combined a
#' line and a ribbon. With ggvis, you can do it yourself by using two marks
#' nested inside a branch: (and in fact, this is exactly how
#' \code{\link{branch_smooth}}) works.
#'
#' \code{
#' ggvis(mtcars, props(x = ~disp, y = ~mpg),
#' branch(transform_smooth(),
#' mark_area(props(y = ~y_min, y2 = ~y_max, fill := "#eee")),
#' mark_line()
#' ),
#' mark_symbol()
#' )
#' }
#' @param ... components: data, \code{\link{props}}, \code{branch}es,
#' or \code{\link{marks}}
#' @param drop_named if \code{FALSE}, the default, will throw an error if
#' any of the arguments in \code{...} are named. If \code{TRUE} it will
#' silently drop them - this is primarily useful for \code{branch_} functions
#' which send named arguments to the transform, and unnamed arguments to the
#' branch.
#' @export
branch <- function(..., drop_named = FALSE) {
check_empty_args()
comp <- parse_components(..., drop_named = drop_named)
check_branch_components(comp)
class(comp) <- "branch"
comp
}
#' @rdname branch
#' @export
#' @param x object to test for "branch"-ness
is.branch <- function(x) inherits(x, "branch")
check_branch_components <- function(x) {
incorrect <- setdiff(names(x), c("children", "data", "props"))
if (length(incorrect) > 0) {
stop("Branch may only contain other branches, not scales, legends or axes",
call. = FALSE)
}
}
parse_components <- function(..., drop_named = FALSE) {
args <- list(...)
named <- named(args)
if (any(named)) {
if (drop_named) {
args <- args[!named]
} else {
stop("Inputs to ggvis/branch should not be named", call. = FALSE)
}
}
names(args) <- NULL
types <- vapply(args, component_type, character(1))
components <- split(args, types)
components$props <- Reduce(merge_props, components$props)
if (length(components$data) > 0) {
# Capture names from ...
names <- dot_names(...)[types == "data"]
# Convert each component to a pipeline, preserving original names
pls <- Map(as.pipeline, components$data, name = names)
# Collapse into single pipeline
pl <- structure(unlist(pls, recursive = FALSE), class = "pipeline")
# Trim any redundant sources
components$data <- trim_to_source(pl)
}
components$scales <- scales(.scales = components$scales)
components
}
component_type <- function(x) UseMethod("component_type")
#' @export
component_type.branch <- function(x) "children"
#' @export
component_type.scale <- function(x) "scales"
#' @export
component_type.vega_legend <- function(x) "legends"
#' @export
component_type.vega_axis <- function(x) "axes"
#' @export
component_type.ggvis_props <- function(x) "props"
#' @export
component_type.ggvis_opts <- function(x) "opts"
#' @export
component_type.default <- function(x) "data"
#' @export
component_type.handler <- function(x) "handlers"
| /R/branch.R | no_license | wch/ggvis | R | false | false | 3,737 | r | #' Create a new branch.
#'
#' Branches are used to describe the data hierarchy of ggvis. As well as
#' using this function to create them, you can also use the many specialised
#' \code{branch_} functions that combine marks and transforms to create
#' useful visualisations.
#'
#' @section Hierarchy:
#'
#' A ggvis plot has a hierarchical structure, where each branch inherits
#' data and properties from its parent. This is somewhat similar to ggplot2,
#' but ggplot2 plots only had a single layer of hierarchy - with ggvis, you
#' can have multiple levels, making it easier to avoid redundancy, both in
#' your specification and in computation.
#'
#' For example, take a linear model. You often want to display both the
#' predictions and the standard error from a linear model. In ggplot2, you
#' had to use \code{geom_smooth()}, which was a special geom that combined a
#' line and a ribbon. With ggvis, you can do it yourself by using two marks
#' nested inside a branch: (and in fact, this is exactly how
#' \code{\link{branch_smooth}}) works.
#'
#' \code{
#' ggvis(mtcars, props(x = ~disp, y = ~mpg),
#' branch(transform_smooth(),
#' mark_area(props(y = ~y_min, y2 = ~y_max, fill := "#eee")),
#' mark_line()
#' ),
#' mark_symbol()
#' )
#' }
#' @param ... components: data, \code{\link{props}}, \code{branch}es,
#' or \code{\link{marks}}
#' @param drop_named if \code{FALSE}, the default, will throw an error if
#' any of the arguments in \code{...} are named. If \code{TRUE} it will
#' silently drop them - this is primarily useful for \code{branch_} functions
#' which send named arguments to the transform, and unnamed arguments to the
#' branch.
#' @export
branch <- function(..., drop_named = FALSE) {
check_empty_args()
comp <- parse_components(..., drop_named = drop_named)
check_branch_components(comp)
class(comp) <- "branch"
comp
}
#' @rdname branch
#' @export
#' @param x object to test for "branch"-ness
is.branch <- function(x) inherits(x, "branch")
check_branch_components <- function(x) {
incorrect <- setdiff(names(x), c("children", "data", "props"))
if (length(incorrect) > 0) {
stop("Branch may only contain other branches, not scales, legends or axes",
call. = FALSE)
}
}
parse_components <- function(..., drop_named = FALSE) {
args <- list(...)
named <- named(args)
if (any(named)) {
if (drop_named) {
args <- args[!named]
} else {
stop("Inputs to ggvis/branch should not be named", call. = FALSE)
}
}
names(args) <- NULL
types <- vapply(args, component_type, character(1))
components <- split(args, types)
components$props <- Reduce(merge_props, components$props)
if (length(components$data) > 0) {
# Capture names from ...
names <- dot_names(...)[types == "data"]
# Convert each component to a pipeline, preserving original names
pls <- Map(as.pipeline, components$data, name = names)
# Collapse into single pipeline
pl <- structure(unlist(pls, recursive = FALSE), class = "pipeline")
# Trim any redundant sources
components$data <- trim_to_source(pl)
}
components$scales <- scales(.scales = components$scales)
components
}
component_type <- function(x) UseMethod("component_type")
#' @export
component_type.branch <- function(x) "children"
#' @export
component_type.scale <- function(x) "scales"
#' @export
component_type.vega_legend <- function(x) "legends"
#' @export
component_type.vega_axis <- function(x) "axes"
#' @export
component_type.ggvis_props <- function(x) "props"
#' @export
component_type.ggvis_opts <- function(x) "opts"
#' @export
component_type.default <- function(x) "data"
#' @export
component_type.handler <- function(x) "handlers"
|
## Objetos em R
# Vetor: possui 1 dimensao e 1 tipo de dado
vetor1 <- c(1:10) # range de elementos
vetor1
length(vetor1) # len do python =)
mode(vetor1)
class(vetor1)
#Matriz possui 2 dimensoes e 1 tipo de dado
matriz1 <- matrix(1:10, nrow = 2)
matriz1
length(matriz1)
mode(matriz1)
class(matriz1)
#Array: possui 2 ou mais dimensoes e 1 tipo de dados
array1 <- array(1:5, dim=c(3,3,3))
array1
length(array1)
mode(array1)
#Data Frame matriz com diferentes tipos de dados
View(iris)
length(iris)
mode(iris)
class(iris)
typeof(iris)
# Lista : colecoes de diferentes objetos
lista1 = list(a=matriz1,b=vetor1)
lista1
# Funções também são vistas como objetos em R
func1 <- function(x){
var1 = x * x
return(var1)
}
func1(5)
class(func1)
# Removendo objetos
objects()
rm(array1,lista)
objects()
| /Parte1/05 - Objetos.R | no_license | giusepper11/RFundamentos | R | false | false | 806 | r | ## Objetos em R
# Vetor: possui 1 dimensao e 1 tipo de dado
vetor1 <- c(1:10) # range de elementos
vetor1
length(vetor1) # len do python =)
mode(vetor1)
class(vetor1)
#Matriz possui 2 dimensoes e 1 tipo de dado
matriz1 <- matrix(1:10, nrow = 2)
matriz1
length(matriz1)
mode(matriz1)
class(matriz1)
#Array: possui 2 ou mais dimensoes e 1 tipo de dados
array1 <- array(1:5, dim=c(3,3,3))
array1
length(array1)
mode(array1)
#Data Frame matriz com diferentes tipos de dados
View(iris)
length(iris)
mode(iris)
class(iris)
typeof(iris)
# Lista : colecoes de diferentes objetos
lista1 = list(a=matriz1,b=vetor1)
lista1
# Funções também são vistas como objetos em R
func1 <- function(x){
var1 = x * x
return(var1)
}
func1(5)
class(func1)
# Removendo objetos
objects()
rm(array1,lista)
objects()
|
load("Data.Partitioned.RData")
attach(Data.Partitioned$test)
| /capstones/reinsurance/Reinsurance_Capstone_A_Solution/LoadTestData.R | no_license | chiefmurph/CASRBootcamp17 | R | false | false | 63 | r | load("Data.Partitioned.RData")
attach(Data.Partitioned$test)
|
x <- 1:20
x
w <- 1+sqrt(x)/2
w
df <- data.frame(x=x,y=x+rnorm(x)*w)
df
str(df)
fm<-lm(y~x, data=df)
summary(fm)
fm
str(fm)
class(fm)
mode(fm)
plot(fm) | /Clase17Abr2021/ej7.R | no_license | angpa/RugCGFIUBA | R | false | false | 150 | r | x <- 1:20
x
w <- 1+sqrt(x)/2
w
df <- data.frame(x=x,y=x+rnorm(x)*w)
df
str(df)
fm<-lm(y~x, data=df)
summary(fm)
fm
str(fm)
class(fm)
mode(fm)
plot(fm) |
mtcars2 <- within(mtcars, {
vs <- factor(vs, labels = c("V-shaped", "Straight"))
am <- factor(am, labels = c("Automatic", "Manual"))
cyl <- factor(cyl)
gear <- factor(gear)
})
p1 <- ggplot(mtcars2) +
geom_point(aes(x = wt, y = mpg, colour = gear)) +
labs(
title = "Fuel economy declines as weight increases",
subtitle = "(1973-74)",
caption = "Data from the 1974 Motor Trend US magazine.",
tag = "Figure 1",
x = "Weight (1000 lbs)",
y = "Fuel economy (mpg)",
colour = "Gears"
)
p <- p1 + theme_void()
| /ggplot2/Themes/theme_grey/example8.R | no_license | plotly/ssim_baselines | R | false | false | 546 | r | mtcars2 <- within(mtcars, {
vs <- factor(vs, labels = c("V-shaped", "Straight"))
am <- factor(am, labels = c("Automatic", "Manual"))
cyl <- factor(cyl)
gear <- factor(gear)
})
p1 <- ggplot(mtcars2) +
geom_point(aes(x = wt, y = mpg, colour = gear)) +
labs(
title = "Fuel economy declines as weight increases",
subtitle = "(1973-74)",
caption = "Data from the 1974 Motor Trend US magazine.",
tag = "Figure 1",
x = "Weight (1000 lbs)",
y = "Fuel economy (mpg)",
colour = "Gears"
)
p <- p1 + theme_void()
|
install.packages("psych")
library("readr")
library("dplyr")
library("foreign")
library("haven")
library(reshape2)
library(NbClust)
library(cluster)
library(fpc)
library(factoextra)
library(psych)
data <- read_dta("C:/Users/Vu/Google Drive/Ph.D/LMS/factorK42-24major.dta")
data1 <- data[data$specialmajor==0 & data$highqualtymajor==0,]
d <- data1[,4:12]
dat <- na.omit(d)
fa.parallel(dat)
| /principle analysis.R | no_license | haianhvu/LMS | R | false | false | 391 | r | install.packages("psych")
library("readr")
library("dplyr")
library("foreign")
library("haven")
library(reshape2)
library(NbClust)
library(cluster)
library(fpc)
library(factoextra)
library(psych)
data <- read_dta("C:/Users/Vu/Google Drive/Ph.D/LMS/factorK42-24major.dta")
data1 <- data[data$specialmajor==0 & data$highqualtymajor==0,]
d <- data1[,4:12]
dat <- na.omit(d)
fa.parallel(dat)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello48-package.R
\name{hello48}
\alias{hello48}
\title{It does some stuff}
\description{
It does some stuff
}
\section{Support}{
The development repository for the **hello48** package is found at
\url{https://github.com/noamross/hello48}. Please file
bug reports or other feedback at \url{https://github.com/noamross/hello48/issues}
}
\author{
Noam Ross \email{noam.ross@gmail.com}
}
\keyword{package}
| /man/hello48.Rd | permissive | noamross/hello48 | R | false | true | 485 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello48-package.R
\name{hello48}
\alias{hello48}
\title{It does some stuff}
\description{
It does some stuff
}
\section{Support}{
The development repository for the **hello48** package is found at
\url{https://github.com/noamross/hello48}. Please file
bug reports or other feedback at \url{https://github.com/noamross/hello48/issues}
}
\author{
Noam Ross \email{noam.ross@gmail.com}
}
\keyword{package}
|
#############Answer 1 (b)(v)
########MAP - Under Conjugate Prior - Normal Inverse Gamma Distribution
library(stats4)
alpha = 11
beta = 0.7
#Model 1 - Using Conditional Likelihood
model1.conjugate <- function(phi1,phi2,v)
{
(beta/v)+((alpha+1)*log(v))+((((n-2)/2)+1)*log(v))+sum((y1[3:n] - phi1*y1[2:(n-1)]- phi2*y1[1:(n-2)])^2/(2*v))
}
map.conjugate.model1 <- mle(model1.conjugate,start=list(phi1=0,phi2=0,v=0.5))
#Model 2
t<-(1:200)
model2.conjugate <- function(a,b,v)
{
(beta/v)+((alpha+1)*log(v))+(((n/2)+1)*log(v)) + sum((y2-a*cos(2*pi*w*t)-b*sin(2*pi*w*t))^2/(2*v))
}
map.conjugate.model2 <- mle(model2.conjugate,start=list(a=0,b=0,v=0.5))
#############Answer 1 (b)(iii)
########Sketch
#Model 1 - Using Conditional Likelihood
model1.conjugate.v <- function(v)
{
-(beta/v)-((alpha+1)*log(v))-((n-2)*log(v)/2)-sum((y1[3:n] - 0.5*y1[2:(n-1)]- 0.2*y1[1:(n-2)])^2)/(2*v)
}
curve(model1.conjugate.v,xname=v,xlim = c(0,5))
phi1 <- seq(from=-1,to=1,by=0.05)
model1.conjugate.phi1 <- seq(from=-1,to=1,by=0.05)
model1.conjugate.formula.phi1 <- function(phi1)
{
-(beta/v)-((alpha+1)*log(v))-((n-2)*log(v)/2)-sum((y1[3:n] - phi1*y1[2:(n-1)]- 0.2*y1[1:(n-2)])^2)/(2*v)
}
for (i in 1:length(phi1)) model1.conjugate.phi1[i] <- model1.conjugate.formula.phi1(phi1[i])
plot(phi1,model1.conjugate.phi1,type="l",lwd=2)
#par(new=TRUE)
phi2 <- seq(from=-1,to=1,by=0.05)
model1.conjugate.phi2 <- seq(from=-1,to=1,by=0.05)
model1.conjugate.formula.phi2 <- function(phi2)
{
-(beta/v)-((alpha+1)*log(v))-((n-2)*log(v)/2)-sum((y1[3:n] - 0.5*y1[2:(n-1)]- phi2*y1[1:(n-2)])^2)/(2*v)
}
for (i in 1:length(phi2)) model1.conjugate.phi2[i] <- model1.conjugate.formula.phi2(phi2[i])
plot(phi2,model1.conjugate.phi2,type="l",lwd=2)
#Model 2
model2.conjugate.v <- function(v)
{
-(beta/v)-((alpha+1)*log(v))-(n*log(v)/2) - sum((y2-0.5*cos(2*pi*w*t)-0.2*sin(2*pi*w*t))^2)/(2*v)
}
curve(model2.conjugate.v,xname=v,xlim = c(0,5))
a <- seq(from=-1,to=1,by=0.05)
model2.conjugate.a <- seq(from=-1,to=1,by=0.05)
model2.conjugate.formula.a <- function(a)
{
-(beta/v)-((alpha+1)*log(v))-(n*log(v)/2) - sum((y2-a*cos(2*pi*w*t)-0.2*sin(2*pi*w*t))^2/(2*v))
}
for (i in 1:length(a)) model2.conjugate.a[i] <- model2.conjugate.formula.a(a[i])
plot(a,model2.conjugate.a,type="l",lwd=2)
#par(new=TRUE)
b <- seq(from=-1,to=1,by=0.05)
model2.conjugate.b <- seq(from=-1,to=1,by=0.05)
model2.conjugate.formula.b <- function(b)
{
-(beta/v)-((alpha+1)*log(v))-(n*log(v)/2) - sum((y2-0.5*cos(2*pi*w*t)-b*sin(2*pi*w*t))^2/(2*v))
}
for (i in 1:length(b)) model2.conjugate.b[i] <- model2.conjugate.formula.b(b[i])
plot(b,model2.conjugate.b,type="l",lwd=2)
#############Answer 1 (b)(iii)
########Sketch
| /Time_Series/Bayesian_Gibbs/Conjugate_prior.R | no_license | shwetank3/niyati | R | false | false | 2,679 | r | #############Answer 1 (b)(v)
########MAP - Under Conjugate Prior - Normal Inverse Gamma Distribution
library(stats4)
alpha = 11
beta = 0.7
#Model 1 - Using Conditional Likelihood
model1.conjugate <- function(phi1,phi2,v)
{
(beta/v)+((alpha+1)*log(v))+((((n-2)/2)+1)*log(v))+sum((y1[3:n] - phi1*y1[2:(n-1)]- phi2*y1[1:(n-2)])^2/(2*v))
}
map.conjugate.model1 <- mle(model1.conjugate,start=list(phi1=0,phi2=0,v=0.5))
#Model 2
t<-(1:200)
model2.conjugate <- function(a,b,v)
{
(beta/v)+((alpha+1)*log(v))+(((n/2)+1)*log(v)) + sum((y2-a*cos(2*pi*w*t)-b*sin(2*pi*w*t))^2/(2*v))
}
map.conjugate.model2 <- mle(model2.conjugate,start=list(a=0,b=0,v=0.5))
#############Answer 1 (b)(iii)
########Sketch
#Model 1 - Using Conditional Likelihood
model1.conjugate.v <- function(v)
{
-(beta/v)-((alpha+1)*log(v))-((n-2)*log(v)/2)-sum((y1[3:n] - 0.5*y1[2:(n-1)]- 0.2*y1[1:(n-2)])^2)/(2*v)
}
curve(model1.conjugate.v,xname=v,xlim = c(0,5))
phi1 <- seq(from=-1,to=1,by=0.05)
model1.conjugate.phi1 <- seq(from=-1,to=1,by=0.05)
model1.conjugate.formula.phi1 <- function(phi1)
{
-(beta/v)-((alpha+1)*log(v))-((n-2)*log(v)/2)-sum((y1[3:n] - phi1*y1[2:(n-1)]- 0.2*y1[1:(n-2)])^2)/(2*v)
}
for (i in 1:length(phi1)) model1.conjugate.phi1[i] <- model1.conjugate.formula.phi1(phi1[i])
plot(phi1,model1.conjugate.phi1,type="l",lwd=2)
#par(new=TRUE)
phi2 <- seq(from=-1,to=1,by=0.05)
model1.conjugate.phi2 <- seq(from=-1,to=1,by=0.05)
model1.conjugate.formula.phi2 <- function(phi2)
{
-(beta/v)-((alpha+1)*log(v))-((n-2)*log(v)/2)-sum((y1[3:n] - 0.5*y1[2:(n-1)]- phi2*y1[1:(n-2)])^2)/(2*v)
}
for (i in 1:length(phi2)) model1.conjugate.phi2[i] <- model1.conjugate.formula.phi2(phi2[i])
plot(phi2,model1.conjugate.phi2,type="l",lwd=2)
#Model 2
model2.conjugate.v <- function(v)
{
-(beta/v)-((alpha+1)*log(v))-(n*log(v)/2) - sum((y2-0.5*cos(2*pi*w*t)-0.2*sin(2*pi*w*t))^2)/(2*v)
}
curve(model2.conjugate.v,xname=v,xlim = c(0,5))
a <- seq(from=-1,to=1,by=0.05)
model2.conjugate.a <- seq(from=-1,to=1,by=0.05)
model2.conjugate.formula.a <- function(a)
{
-(beta/v)-((alpha+1)*log(v))-(n*log(v)/2) - sum((y2-a*cos(2*pi*w*t)-0.2*sin(2*pi*w*t))^2/(2*v))
}
for (i in 1:length(a)) model2.conjugate.a[i] <- model2.conjugate.formula.a(a[i])
plot(a,model2.conjugate.a,type="l",lwd=2)
#par(new=TRUE)
b <- seq(from=-1,to=1,by=0.05)
model2.conjugate.b <- seq(from=-1,to=1,by=0.05)
model2.conjugate.formula.b <- function(b)
{
-(beta/v)-((alpha+1)*log(v))-(n*log(v)/2) - sum((y2-0.5*cos(2*pi*w*t)-b*sin(2*pi*w*t))^2/(2*v))
}
for (i in 1:length(b)) model2.conjugate.b[i] <- model2.conjugate.formula.b(b[i])
plot(b,model2.conjugate.b,type="l",lwd=2)
#############Answer 1 (b)(iii)
########Sketch
|
#=============================================================================#
# ArrayAnalysis - affyAnalysisQC #
# a tool for quality control and pre-processing of Affymetrix array data #
# #
# Copyright 2010-2011 BiGCaT Bioinformatics #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#=============================================================================#
#for compatibility with R local script, set every variable to a boolean depending on whether it exists
# only to be run when the code is called from the webportal or GenePattern
if(!exists("rawdataZip")) rawdataZip <- ""
if(!exists("refName")) refName <- ""
if(!exists("arrayGroup")) arrayGroup <- ""
reOrder <- exists("reOrder")
if(!exists("maxArray")) maxArray <- 41
layoutPlot <- exists("layoutPlot")
controlPlot <- exists("controlPlot")
samplePrep <- exists("samplePrep")
ratio <- exists("ratio")
degPlot <- exists("degPlot")
hybrid <- exists("hybrid")
percPres <- exists("percPres")
posnegDistrib <- exists("posnegDistrib")
bgPlot <- exists("bgPlot")
scaleFact <- exists("scaleFact")
boxplotRaw <- exists("boxplotRaw")
boxplotNorm <- exists("boxplotNorm")
densityRaw <- exists("densityRaw")
densityNorm <- exists("densityNorm")
MARaw <- exists("MARaw")
MANorm <- exists("MANorm")
if(!exists("MAOption1")) MAOption1 <- ""
spatialImage <- exists("spatialImage")
PLMimage <- exists("PLMimage")
posnegCOI <- exists("posnegCOI")
Nuse <- exists("Nuse")
Rle <- exists("Rle")
correlRaw <- exists("correlRaw")
correlNorm <- exists("correlNorm")
clusterRaw <- exists("clusterRaw")
clusterNorm <- exists("clusterNorm")
if(!exists("clusterOption1")) clusterOption1 <- ""
if(!exists("clusterOption2")) clusterOption2 <- ""
PCARaw <- exists("PCARaw")
PCANorm <- exists("PCANorm")
PMAcalls <- exists("PMAcalls")
if(!exists("normMeth")) normMeth <- ""
if(!exists("normOption1")) normOption1 <- ""
customCDF <- exists("customCDF")
if(!exists("CDFtype")) CDFtype <- ""
if(!exists("species")) species <- ""
print ("Parameters have been registered")
| /setParametersQC_web.R | no_license | inambioinfo/affyQC_Module | R | false | false | 3,151 | r | #=============================================================================#
# ArrayAnalysis - affyAnalysisQC #
# a tool for quality control and pre-processing of Affymetrix array data #
# #
# Copyright 2010-2011 BiGCaT Bioinformatics #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#=============================================================================#
#for compatibility with R local script, set every variable to a boolean depending on whether it exists
# only to be run when the code is called from the webportal or GenePattern
if(!exists("rawdataZip")) rawdataZip <- ""
if(!exists("refName")) refName <- ""
if(!exists("arrayGroup")) arrayGroup <- ""
reOrder <- exists("reOrder")
if(!exists("maxArray")) maxArray <- 41
layoutPlot <- exists("layoutPlot")
controlPlot <- exists("controlPlot")
samplePrep <- exists("samplePrep")
ratio <- exists("ratio")
degPlot <- exists("degPlot")
hybrid <- exists("hybrid")
percPres <- exists("percPres")
posnegDistrib <- exists("posnegDistrib")
bgPlot <- exists("bgPlot")
scaleFact <- exists("scaleFact")
boxplotRaw <- exists("boxplotRaw")
boxplotNorm <- exists("boxplotNorm")
densityRaw <- exists("densityRaw")
densityNorm <- exists("densityNorm")
MARaw <- exists("MARaw")
MANorm <- exists("MANorm")
if(!exists("MAOption1")) MAOption1 <- ""
spatialImage <- exists("spatialImage")
PLMimage <- exists("PLMimage")
posnegCOI <- exists("posnegCOI")
Nuse <- exists("Nuse")
Rle <- exists("Rle")
correlRaw <- exists("correlRaw")
correlNorm <- exists("correlNorm")
clusterRaw <- exists("clusterRaw")
clusterNorm <- exists("clusterNorm")
if(!exists("clusterOption1")) clusterOption1 <- ""
if(!exists("clusterOption2")) clusterOption2 <- ""
PCARaw <- exists("PCARaw")
PCANorm <- exists("PCANorm")
PMAcalls <- exists("PMAcalls")
if(!exists("normMeth")) normMeth <- ""
if(!exists("normOption1")) normOption1 <- ""
customCDF <- exists("customCDF")
if(!exists("CDFtype")) CDFtype <- ""
if(!exists("species")) species <- ""
print ("Parameters have been registered")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/widely_svd.R
\name{widely_svd}
\alias{widely_svd}
\alias{widely_svd_}
\title{Turn into a wide matrix, perform SVD, return to tidy form}
\usage{
widely_svd(tbl, item, feature, value, nv = NULL, weight_d = FALSE, ...)
widely_svd_(tbl, item, feature, value, nv = NULL, weight_d = FALSE, ...)
}
\arguments{
\item{tbl}{Table}
\item{item}{Item to perform dimensionality reduction on; will end up in \code{item} column}
\item{feature}{Column describing the feature that links one item to others.}
\item{value}{Value}
\item{nv}{Optional; the number of principal components to estimate. Recommended for matrices
with many features.}
\item{weight_d}{Whether to multiply each value by the \code{d} principal component.}
\item{...}{Extra arguments passed to \code{svd} (if \code{nv} is \code{NULL})
or \code{irlba} (if \code{nv} is given)}
}
\value{
A tbl_df with three columns. The first is retained from the \code{item} input,
then \code{dimension} and \code{value}. Each row represents one principal component
value.
}
\description{
This is useful for dimensionality reduction of items, especially when setting a
lower nv.
}
\examples{
library(dplyr)
library(gapminder)
# principal components driving change
gapminder_svd <- gapminder \%>\%
widely_svd(country, year, lifeExp)
gapminder_svd
# compare SVDs, join with other data
library(ggplot2)
library(tidyr)
gapminder_svd \%>\%
spread(dimension, value) \%>\%
inner_join(distinct(gapminder, country, continent), by = "country") \%>\%
ggplot(aes(`1`, `2`, label = country)) +
geom_point(aes(color = continent)) +
geom_text(vjust = 1, hjust = 1)
}
| /man/widely_svd.Rd | permissive | Kudusch/widyr | R | false | true | 1,690 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/widely_svd.R
\name{widely_svd}
\alias{widely_svd}
\alias{widely_svd_}
\title{Turn into a wide matrix, perform SVD, return to tidy form}
\usage{
widely_svd(tbl, item, feature, value, nv = NULL, weight_d = FALSE, ...)
widely_svd_(tbl, item, feature, value, nv = NULL, weight_d = FALSE, ...)
}
\arguments{
\item{tbl}{Table}
\item{item}{Item to perform dimensionality reduction on; will end up in \code{item} column}
\item{feature}{Column describing the feature that links one item to others.}
\item{value}{Value}
\item{nv}{Optional; the number of principal components to estimate. Recommended for matrices
with many features.}
\item{weight_d}{Whether to multiply each value by the \code{d} principal component.}
\item{...}{Extra arguments passed to \code{svd} (if \code{nv} is \code{NULL})
or \code{irlba} (if \code{nv} is given)}
}
\value{
A tbl_df with three columns. The first is retained from the \code{item} input,
then \code{dimension} and \code{value}. Each row represents one principal component
value.
}
\description{
This is useful for dimensionality reduction of items, especially when setting a
lower nv.
}
\examples{
library(dplyr)
library(gapminder)
# principal components driving change
gapminder_svd <- gapminder \%>\%
widely_svd(country, year, lifeExp)
gapminder_svd
# compare SVDs, join with other data
library(ggplot2)
library(tidyr)
gapminder_svd \%>\%
spread(dimension, value) \%>\%
inner_join(distinct(gapminder, country, continent), by = "country") \%>\%
ggplot(aes(`1`, `2`, label = country)) +
geom_point(aes(color = continent)) +
geom_text(vjust = 1, hjust = 1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/argo-vars.R
\name{argo_read_vars}
\alias{argo_read_vars}
\title{Read NetCDF variable metadata}
\usage{
argo_read_vars(file, vars = NULL, quiet = FALSE)
}
\arguments{
\item{file}{A previously downloaded Argo NetCDF file
(e.g., using \code{\link[=argo_download]{argo_download()}}).}
\item{vars}{A vector of variable names to include. Explicitly specifying
\code{vars} can lead to much faster read times when reading many files.}
\item{quiet}{Use \code{FALSE} to stop for malformed files, \code{NA} to
silently warn for malformed files, or \code{TRUE} to silently ignore
read errors when possible.}
}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with one row per variable and columns \code{name},
\code{size}, \code{dim}, and \verb{att_*} for variable attributes.
}
\description{
Use \code{argo_vars()} to extract variable information fromm an Argo NetCDF file
in the form of one row per variable.
}
\examples{
prof_file <- system.file(
"cache-test/dac/csio/2900313/profiles/D2900313_000.nc",
package = "argodata"
)
argo_read_vars(prof_file)
}
| /man/argo_read_vars.Rd | permissive | KimBaldry/argodata | R | false | true | 1,138 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/argo-vars.R
\name{argo_read_vars}
\alias{argo_read_vars}
\title{Read NetCDF variable metadata}
\usage{
argo_read_vars(file, vars = NULL, quiet = FALSE)
}
\arguments{
\item{file}{A previously downloaded Argo NetCDF file
(e.g., using \code{\link[=argo_download]{argo_download()}}).}
\item{vars}{A vector of variable names to include. Explicitly specifying
\code{vars} can lead to much faster read times when reading many files.}
\item{quiet}{Use \code{FALSE} to stop for malformed files, \code{NA} to
silently warn for malformed files, or \code{TRUE} to silently ignore
read errors when possible.}
}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with one row per variable and columns \code{name},
\code{size}, \code{dim}, and \verb{att_*} for variable attributes.
}
\description{
Use \code{argo_vars()} to extract variable information fromm an Argo NetCDF file
in the form of one row per variable.
}
\examples{
prof_file <- system.file(
"cache-test/dac/csio/2900313/profiles/D2900313_000.nc",
package = "argodata"
)
argo_read_vars(prof_file)
}
|
testlist <- list(vec = NULL, prob_vec = c(-4.38889631485881e+305, NaN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(eive:::cga_generate_chromosome,testlist)
str(result) | /eive/inst/testfiles/cga_generate_chromosome/AFL_cga_generate_chromosome/cga_generate_chromosome_valgrind_files/1609871081-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 210 | r | testlist <- list(vec = NULL, prob_vec = c(-4.38889631485881e+305, NaN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(eive:::cga_generate_chromosome,testlist)
str(result) |
library(raster)
library(rgdal)
library(lubridate)
library(classInt)
library(randtoolbox)
library(ggplot2)
library(ggpubr)
library(nortest)
library(resample)
library(MASS)
library(fitdistrplus)
######################################################################################
setwd("/home/alf/Scrivania/lav_montecarlo")
source("aux_seas_mcarlo.R")
######################################################################################
# setup parameters
windows=15
dataset_prec=readRDS("dataset_prec.rds")
rains_tosc=readRDS("dataset_prec.rds")
pct9_daily=readRDS("dates_pct9_daily.rds")
iniclim="1981-01-01"
endclim="2010-12-31"
city="Firenze"
WC=c(1:9)
sim_months=10000
######################################################################################
mat_clim=dataset_prec[which( (dataset_prec$dates>as.Date(iniclim) & (dataset_prec$dates<as.Date(endclim)))==T),]
pct9_daily_clim=pct9_daily[which( (dataset_prec$dates>as.Date(iniclim) & (dataset_prec$dates<as.Date(endclim)))==T),]
mat_clim=merge(mat_clim,pct9_daily_clim)
###################################################################################################################################
f_PCT_ecm_marzo=read.csv("Pesi-PCT-ecm_Giorno-marzo.csv",header=T)
frequenze_pct9=read.csv("frequenze_pct9.csv",header=F,sep=" ")
dates_forecast=as.Date(ISOdate(f_PCT_ecm_marzo$year,f_PCT_ecm_marzo$month,f_PCT_ecm_marzo$day))
month_pesi_PCT_ecm_marzo=apply(f_PCT_ecm_marzo[which(f_PCT_ecm_marzo$month==3),4:12]/51,2,mean)
day_pesi_PCT_ecm_marzo=data.frame(dates_forecast,f_PCT_ecm_marzo[which(f_PCT_ecm_marzo$month==3),4:12]/51)
########################################################################################################################
res_obj=list()
for ( jj in 1:length(dates_forecast)) {
mat_day=mat_clim[which(grepl(paste(dates_clim(dates_forecast[jj],daygrep=F),collapse = "|"), mat_clim$dates)==T),]
mat_day_temp=mat_day[,c("dates",city,"ct")]
res=list()
for ( i in WC){
temp=subset(mat_day_temp,ct==i)[,c(city)]
res[[i]]=prob_Rain(rep(0,30))
if(length(temp)>0) {res[[i]]=prob_Rain(temp)}
}
res_obj[[jj]]=res
}
saveRDS(res_obj,"firenze_marzo_sampler.rds")
########################################################################################################################
# generate wt forecast days with daily forecast matrix
wt_months_days=apply(day_pesi_PCT_ecm_marzo[,2:10],1,function(x) gen_WT_month(x,1))
res_final=data.frame(matrix(NA, nrow=sim_months, ncol=length(dates_forecast)))
for ( j in 1:length(dates_forecast)) {
for ( i in 1:nrow(res_final)) {res_final[i,j]=ifelse(runif(1)>res_obj[[j]][[wt_months_days[j]]]$rain,
as.numeric(quantile(res_obj[[j]][[wt_months_days[j]]]$rain_ecf,runif(1))),
0);
}
}
random_matrix=res_final=data.frame(matrix(NA, nrow=sim_months, ncol=length(dates_forecast)))
random_matrix=apply(random_matrix,c(1,2),function(x) runif(1))
for ( j in 1:length(dates_forecast)) {
for ( i in 1:nrow(random_matrix)) {random_matrix[i,j]=ifelse(random_matrix[i,j]>res_obj[[j]][[wt_months_days[j]]]$rain,
as.numeric(quantile(res_obj[[j]][[wt_months_days[j]]]$rain_ecf,runif(1))),
0);
if ( i %% 1000 ==0) {wt_months_days=apply(day_pesi_PCT_ecm_marzo[,2:10],1,function(x) gen_WT_month(x,1))}
}
}
saveRDS(res_final,"res_final.rds")
########################################################################################################################
res_month=apply(res_final,1,sum)
res_month2=apply(random_matrix,1,sum)
ecdf(res_month2)
qplot(res_month,
geom="histogram",
binwidth = 5,
main = "Firenze Distribuzione pioggie Marzo 2020",
xlab = "mm",
ylab = "Simulazioni (N=10000) ",
fill=I("blue"),
col=I("red"),
alpha=I(.2),
xlim=c(0,200))
q1=qplot(res_month2,
geom="histogram",
binwidth = 5,
main = "Firenze Distribuzione pioggie Marzo 2020 ECM PCT9 CT forecast",
xlab = "mm",
ylab = "Simulazioni (N=10000) ",
fill=I("blue"),
col=I("red"),
alpha=I(.2),
xlim=c(0,200))
############################################################################
# clim and bootstrap
dataset_prec$mese=month(dataset_prec$dates)
dataset_prec_marzo=subset(dataset_prec,mese==3)
fir_marzo=tapply(dataset_prec_marzo$Firenze,dataset_prec_marzo$anno,sum)
N=length(fir_marzo)
nboots=10000
boot.result=numeric(nboots)
for(i in 1:nboots){
boot.samp=sample(fir_marzo,N,replace=TRUE)
boot.result[i]=mean(boot.samp)
}
g=fitdistr(fir_marzo,"gamma")
boot_gamma=rgamma(10000,g$estimate[1],g$estimate[2])
fitgmme <- fitdist(as.numeric(fir_marzo), "gamma", method="mle")
summary(fitgmme)
boot_gamma2=rgamma(10000,g$estimate[1],g$estimate[2])
############################################################################
q2=qplot(boot_gamma,
binwidth = 5,
main = "Firenze Distribuzione pioggie Marzo Clim Peretola ",
xlab = "mm",
ylab = "Simulazioni (N=10000) ",
fill=I("blue"),
col=I("red"),
alpha=I(.2),
xlim=c(0,200))
########################################################################################################################
ggarrange(q1,q2,nrow=2)
| /benedetti_sampling.R | no_license | alfcrisci/lav_montecarlo | R | false | false | 5,684 | r | library(raster)
library(rgdal)
library(lubridate)
library(classInt)
library(randtoolbox)
library(ggplot2)
library(ggpubr)
library(nortest)
library(resample)
library(MASS)
library(fitdistrplus)
######################################################################################
setwd("/home/alf/Scrivania/lav_montecarlo")
source("aux_seas_mcarlo.R")
######################################################################################
# setup parameters
windows=15
dataset_prec=readRDS("dataset_prec.rds")
rains_tosc=readRDS("dataset_prec.rds")
pct9_daily=readRDS("dates_pct9_daily.rds")
iniclim="1981-01-01"
endclim="2010-12-31"
city="Firenze"
WC=c(1:9)
sim_months=10000
######################################################################################
mat_clim=dataset_prec[which( (dataset_prec$dates>as.Date(iniclim) & (dataset_prec$dates<as.Date(endclim)))==T),]
pct9_daily_clim=pct9_daily[which( (dataset_prec$dates>as.Date(iniclim) & (dataset_prec$dates<as.Date(endclim)))==T),]
mat_clim=merge(mat_clim,pct9_daily_clim)
###################################################################################################################################
f_PCT_ecm_marzo=read.csv("Pesi-PCT-ecm_Giorno-marzo.csv",header=T)
frequenze_pct9=read.csv("frequenze_pct9.csv",header=F,sep=" ")
dates_forecast=as.Date(ISOdate(f_PCT_ecm_marzo$year,f_PCT_ecm_marzo$month,f_PCT_ecm_marzo$day))
month_pesi_PCT_ecm_marzo=apply(f_PCT_ecm_marzo[which(f_PCT_ecm_marzo$month==3),4:12]/51,2,mean)
day_pesi_PCT_ecm_marzo=data.frame(dates_forecast,f_PCT_ecm_marzo[which(f_PCT_ecm_marzo$month==3),4:12]/51)
########################################################################################################################
res_obj=list()
for ( jj in 1:length(dates_forecast)) {
mat_day=mat_clim[which(grepl(paste(dates_clim(dates_forecast[jj],daygrep=F),collapse = "|"), mat_clim$dates)==T),]
mat_day_temp=mat_day[,c("dates",city,"ct")]
res=list()
for ( i in WC){
temp=subset(mat_day_temp,ct==i)[,c(city)]
res[[i]]=prob_Rain(rep(0,30))
if(length(temp)>0) {res[[i]]=prob_Rain(temp)}
}
res_obj[[jj]]=res
}
saveRDS(res_obj,"firenze_marzo_sampler.rds")
########################################################################################################################
# generate wt forecast days with daily forecast matrix
wt_months_days=apply(day_pesi_PCT_ecm_marzo[,2:10],1,function(x) gen_WT_month(x,1))
res_final=data.frame(matrix(NA, nrow=sim_months, ncol=length(dates_forecast)))
for ( j in 1:length(dates_forecast)) {
for ( i in 1:nrow(res_final)) {res_final[i,j]=ifelse(runif(1)>res_obj[[j]][[wt_months_days[j]]]$rain,
as.numeric(quantile(res_obj[[j]][[wt_months_days[j]]]$rain_ecf,runif(1))),
0);
}
}
random_matrix=res_final=data.frame(matrix(NA, nrow=sim_months, ncol=length(dates_forecast)))
random_matrix=apply(random_matrix,c(1,2),function(x) runif(1))
for ( j in 1:length(dates_forecast)) {
for ( i in 1:nrow(random_matrix)) {random_matrix[i,j]=ifelse(random_matrix[i,j]>res_obj[[j]][[wt_months_days[j]]]$rain,
as.numeric(quantile(res_obj[[j]][[wt_months_days[j]]]$rain_ecf,runif(1))),
0);
if ( i %% 1000 ==0) {wt_months_days=apply(day_pesi_PCT_ecm_marzo[,2:10],1,function(x) gen_WT_month(x,1))}
}
}
saveRDS(res_final,"res_final.rds")
########################################################################################################################
res_month=apply(res_final,1,sum)
res_month2=apply(random_matrix,1,sum)
ecdf(res_month2)
qplot(res_month,
geom="histogram",
binwidth = 5,
main = "Firenze Distribuzione pioggie Marzo 2020",
xlab = "mm",
ylab = "Simulazioni (N=10000) ",
fill=I("blue"),
col=I("red"),
alpha=I(.2),
xlim=c(0,200))
q1=qplot(res_month2,
geom="histogram",
binwidth = 5,
main = "Firenze Distribuzione pioggie Marzo 2020 ECM PCT9 CT forecast",
xlab = "mm",
ylab = "Simulazioni (N=10000) ",
fill=I("blue"),
col=I("red"),
alpha=I(.2),
xlim=c(0,200))
############################################################################
# clim and bootstrap
dataset_prec$mese=month(dataset_prec$dates)
dataset_prec_marzo=subset(dataset_prec,mese==3)
fir_marzo=tapply(dataset_prec_marzo$Firenze,dataset_prec_marzo$anno,sum)
N=length(fir_marzo)
nboots=10000
boot.result=numeric(nboots)
for(i in 1:nboots){
boot.samp=sample(fir_marzo,N,replace=TRUE)
boot.result[i]=mean(boot.samp)
}
g=fitdistr(fir_marzo,"gamma")
boot_gamma=rgamma(10000,g$estimate[1],g$estimate[2])
fitgmme <- fitdist(as.numeric(fir_marzo), "gamma", method="mle")
summary(fitgmme)
boot_gamma2=rgamma(10000,g$estimate[1],g$estimate[2])
############################################################################
q2=qplot(boot_gamma,
binwidth = 5,
main = "Firenze Distribuzione pioggie Marzo Clim Peretola ",
xlab = "mm",
ylab = "Simulazioni (N=10000) ",
fill=I("blue"),
col=I("red"),
alpha=I(.2),
xlim=c(0,200))
########################################################################################################################
ggarrange(q1,q2,nrow=2)
|
#' Health Inequities
#'
#' This package contains data on US counties
#' pertaining to life expectancy.
#' @docType package
#' @name HealthIneq
#' @aliases Health HealthIneq-package
NULL
#' "Health Inequities"
#'
#' A data set containing information on
#' health inequities across the United
#' States by county.
#'
#' @source \url{https://healthinequality.org/data/}
#' @format Codebook Descriptions:
#' \describe{
#' \item{cz}{Commuting Zone ID}
#' \item{czname}{Commuting Zone Name}
#' \item{pop2000}{Commuting Zone Population in 2000}
#' \item{fips}{State FIPS}
#' \item{statename}{State Name}
#' \item{stateabbrv}{State Abbreviation}
#' \item{cursmokeq1}{BRFSS: Fraction Current Smokers in Q1}
#' \item{cursmokeq2}{BRFSS: Fraction Current Smokers in Q2}
#' \item{cursmokeq3}{BRFSS: Fraction Current Smokers in Q3}
#' \item{cursmokeq4}{BRFSS: Fraction Current Smokers in Q4}
#' \item{bmiobeseq1}{BRFSS: Fraction Obese in Q1}
#' \item{bmiobeseq2}{BRFSS: Fraction Obese in Q2}
#' \item{bmiobeseq3}{BRFSS: Fraction Obese in Q3}
#' \item{bmiobeseq4}{BRFSS: Fraction Obese in Q4}
#' \item{exerciseanyq1}{BRFSS: Fraction Exercised in Past 30 Days in Q1}
#' \item{exerciseanyq2}{BRFSS: Fraction Exercised in Past 30 Days in Q2}
#' \item{exerciseanyq3}{BRFSS: Fraction Exercised in Past 30 Days in Q3}
#' \item{exerciseanyq4}{BRFSS: Fraction Exercised in Past 30 Days in Q4}
#' \item{puninsured2010}{Percent Uninsured}
#' \item{reimbpenrolladj10}{Medicare $ Per Enrollee}
#' \item{mort30dayhospz}{30-day Hospital Mortality Rate Index}
#' \item{adjmortmeasamiall30day}{30-day Mortality for Heart Attacks}
#' \item{adjmortmeaschfall30day}{30-day Mortality for Heart Failure}
#' \item{adjmortmeaspnall30day}{30-day Mortality for Pneumonia}
#' \item{medprevqualz}{Mean of Z-Scores for Dartmouth Atlas Ambulatory Care Measures}
#' \item{primcarevis10}{Percent of Medicare Enrollees with at Least One Primary Care Visit}
#' \item{diabhemotest10}{Percent Diabetic with Annual Hemoglobin Test}
#' \item{diabeyeexam10}{Percent Diabetic with Annual Eye Test}
#' \item{diablipids10}{Percent Diabetic with Annual Lipids Test}
#' \item{mammogram10}{Percent Female Aged 67-69 with Mammogram}
#' \item{ambdischper100010}{Discharges for Ambulatory Care Sensitive Conditions Among Medicare Enrollees}
#' \item{cs00seginc}{Income Segregation}
#' \item{cs00segincpov25}{Segregation of Poverty (< p25)}
#' \item{cs00segincaff75}{Segregation of Affluence (>p75)}
#' \item{csracetheil2000}{Racial Segregation}
#' \item{gini99}{Gini Index Within Bottom 99%}
#' \item{poorshare}{Poverty Rate}
#' \item{incshare1perc}{Top 1% Income Share}
#' \item{fracmiddleclass}{Fraction Middle Class (p25-p75)}
#' \item{scapski90pcm}{Social Capital Index}
#' \item{reltot}{Percent Religious}
#' \item{csfracblack}{Percent Black}
#' \item{csfrachisp}{Percent Hispanic}
#' \item{unemprate}{Unemployment Rate in 2000}
#' \item{popd20001980}{Percent Change in Population 1980-2000}
#' \item{lfd20001980}{Percent Change in Labor Force 1980-2000}
#' \item{cslabforce}{Labor Force Participation}
#' \item{cselfindman}{Share Working in Manufacturing}
#' \item{csbornforeign}{Percent Foreign Born}
#' \item{miginflow}{Migration Inflow Rate}
#' \item{migoutflow}{Migration Outflow Rate}
#' \item{popdensity}{Population Density}
#' \item{fractraveltimelt15}{Fraction with Commute < 15 Min}
#' \item{hhinc00}{Mean Household Income}
#' \item{medianhousevalue}{Median House Value}
#' \item{ccdexptot}{School Expenditure per Student}
#' \item{ccdpuptchratio}{Student-Teacher Ratio}
#' \item{scorer}{Test Score Percentile (Income Adjusted)}
#' \item{dropoutr}{High School Dropout Rate (Income Adjusted)}
#' \item{cseducba}{Percent College Grads}
#' \item{tuition}{College Tuition}
#' \item{gradrater}{Percent College Grads}
#' \item{erankb}{Absolute Mobility (Expected Rank at p25)}
#' \item{csfamwkidsinglemom}{Fraction of Children with Single Mother}
#' \item{crimetotal}{Total Crime Rate}
#' \item{subctyexppc}{Local Government Expenditures}
#' \item{taxrate}{Local Tax Rate}
#' \item{taxstdifftop20}{Tax Progressivity}
#' \item{avglifeQ1}{Life Expectancy Income Q1}
#' \item{avglifeQ2}{Life Expectancy Income Q2}
#' \item{avglifeQ3}{Life Expectancy Income Q3}
#' \item{avglifeQ4}{Life Expectancy Income Q4}
#' \item{avglifeM}{Life Expectancy Males}
#' \item{avglifeF}{Life Expectancy Females}
#' \item{avglifeall}{Life Expectancy Overall}
#' }
"HealthIneq"
| /R/HealthIneq.R | permissive | elachtara/HealthIneq | R | false | false | 4,554 | r | #' Health Inequities
#'
#' This package contains data on US counties
#' pertaining to life expectancy.
#' @docType package
#' @name HealthIneq
#' @aliases Health HealthIneq-package
NULL
#' "Health Inequities"
#'
#' A data set containing information on
#' health inequities across the United
#' States by county.
#'
#' @source \url{https://healthinequality.org/data/}
#' @format Codebook Descriptions:
#' \describe{
#' \item{cz}{Commuting Zone ID}
#' \item{czname}{Commuting Zone Name}
#' \item{pop2000}{Commuting Zone Population in 2000}
#' \item{fips}{State FIPS}
#' \item{statename}{State Name}
#' \item{stateabbrv}{State Abbreviation}
#' \item{cursmokeq1}{BRFSS: Fraction Current Smokers in Q1}
#' \item{cursmokeq2}{BRFSS: Fraction Current Smokers in Q2}
#' \item{cursmokeq3}{BRFSS: Fraction Current Smokers in Q3}
#' \item{cursmokeq4}{BRFSS: Fraction Current Smokers in Q4}
#' \item{bmiobeseq1}{BRFSS: Fraction Obese in Q1}
#' \item{bmiobeseq2}{BRFSS: Fraction Obese in Q2}
#' \item{bmiobeseq3}{BRFSS: Fraction Obese in Q3}
#' \item{bmiobeseq4}{BRFSS: Fraction Obese in Q4}
#' \item{exerciseanyq1}{BRFSS: Fraction Exercised in Past 30 Days in Q1}
#' \item{exerciseanyq2}{BRFSS: Fraction Exercised in Past 30 Days in Q2}
#' \item{exerciseanyq3}{BRFSS: Fraction Exercised in Past 30 Days in Q3}
#' \item{exerciseanyq4}{BRFSS: Fraction Exercised in Past 30 Days in Q4}
#' \item{puninsured2010}{Percent Uninsured}
#' \item{reimbpenrolladj10}{Medicare $ Per Enrollee}
#' \item{mort30dayhospz}{30-day Hospital Mortality Rate Index}
#' \item{adjmortmeasamiall30day}{30-day Mortality for Heart Attacks}
#' \item{adjmortmeaschfall30day}{30-day Mortality for Heart Failure}
#' \item{adjmortmeaspnall30day}{30-day Mortality for Pneumonia}
#' \item{medprevqualz}{Mean of Z-Scores for Dartmouth Atlas Ambulatory Care Measures}
#' \item{primcarevis10}{Percent of Medicare Enrollees with at Least One Primary Care Visit}
#' \item{diabhemotest10}{Percent Diabetic with Annual Hemoglobin Test}
#' \item{diabeyeexam10}{Percent Diabetic with Annual Eye Test}
#' \item{diablipids10}{Percent Diabetic with Annual Lipids Test}
#' \item{mammogram10}{Percent Female Aged 67-69 with Mammogram}
#' \item{ambdischper100010}{Discharges for Ambulatory Care Sensitive Conditions Among Medicare Enrollees}
#' \item{cs00seginc}{Income Segregation}
#' \item{cs00segincpov25}{Segregation of Poverty (< p25)}
#' \item{cs00segincaff75}{Segregation of Affluence (>p75)}
#' \item{csracetheil2000}{Racial Segregation}
#' \item{gini99}{Gini Index Within Bottom 99%}
#' \item{poorshare}{Poverty Rate}
#' \item{incshare1perc}{Top 1% Income Share}
#' \item{fracmiddleclass}{Fraction Middle Class (p25-p75)}
#' \item{scapski90pcm}{Social Capital Index}
#' \item{reltot}{Percent Religious}
#' \item{csfracblack}{Percent Black}
#' \item{csfrachisp}{Percent Hispanic}
#' \item{unemprate}{Unemployment Rate in 2000}
#' \item{popd20001980}{Percent Change in Population 1980-2000}
#' \item{lfd20001980}{Percent Change in Labor Force 1980-2000}
#' \item{cslabforce}{Labor Force Participation}
#' \item{cselfindman}{Share Working in Manufacturing}
#' \item{csbornforeign}{Percent Foreign Born}
#' \item{miginflow}{Migration Inflow Rate}
#' \item{migoutflow}{Migration Outflow Rate}
#' \item{popdensity}{Population Density}
#' \item{fractraveltimelt15}{Fraction with Commute < 15 Min}
#' \item{hhinc00}{Mean Household Income}
#' \item{medianhousevalue}{Median House Value}
#' \item{ccdexptot}{School Expenditure per Student}
#' \item{ccdpuptchratio}{Student-Teacher Ratio}
#' \item{scorer}{Test Score Percentile (Income Adjusted)}
#' \item{dropoutr}{High School Dropout Rate (Income Adjusted)}
#' \item{cseducba}{Percent College Grads}
#' \item{tuition}{College Tuition}
#' \item{gradrater}{Percent College Grads}
#' \item{erankb}{Absolute Mobility (Expected Rank at p25)}
#' \item{csfamwkidsinglemom}{Fraction of Children with Single Mother}
#' \item{crimetotal}{Total Crime Rate}
#' \item{subctyexppc}{Local Government Expenditures}
#' \item{taxrate}{Local Tax Rate}
#' \item{taxstdifftop20}{Tax Progressivity}
#' \item{avglifeQ1}{Life Expectancy Income Q1}
#' \item{avglifeQ2}{Life Expectancy Income Q2}
#' \item{avglifeQ3}{Life Expectancy Income Q3}
#' \item{avglifeQ4}{Life Expectancy Income Q4}
#' \item{avglifeM}{Life Expectancy Males}
#' \item{avglifeF}{Life Expectancy Females}
#' \item{avglifeall}{Life Expectancy Overall}
#' }
"HealthIneq"
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(3.0138004396316e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052417-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 244 | r | testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(3.0138004396316e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
#Input files ####
nucmisstab <- "NucMissingExcludeExon25.tsv" #missingness using nucleotide sequence
aamisstab <- "AAMissingExcludeExon25.tsv" #missingness using amino acid sequence
specieslist <- "SpeciesToInclude58Eutherian.txt" #list of species to include
stoptable <- 'LatestStopsExcludeExon25.tsv' #called stop codons using same exclusion criteria
shifttable <- 'LatestFrameshiftExcludeExon25.tsv' #called frameshifts using same exclusion criteria
#Output files ####
maxmisstab <- "MaxMissingExcludeExon25.tsv" #maximum missingness across nuc and aa for relevant species
excludedduplist <- "DuplicateGenesWithMultipleIsoformsSameMissingData.txt" #duplicate isoforms to exclude
allduptable <- "DuplicateGeneStatus.tsv" #table of duplicate genes, ucids, and inclusion/exclusion
maxmisstabnodups <- "MaxMissingExcludeExon25_nodups.tsv"
lesiontabnodups <- "LesionStatusExcludeExon25_nodups.tsv"
#Import subfunctions ####
source("subfunctions_missing_pseudo.R")
#Get maximum proportion of missing data from nuc and aa data ####
pmissnuc <- read.table(nucmisstab,header=T,as.is=T)
pmissaa <- read.table(aamisstab,header=T,as.is=T)
inclsp <- scan(specieslist,what="character")
colstoincl <- c(1,2,which(names(pmissnuc) %in% inclsp))
pmissnuc <- pmissnuc[,colstoincl]
pmissaa <- pmissaa[,colstoincl]
checknucaa <- check_data_frames(pmissnuc,pmissaa,"ucid")
stopifnot(checknucaa)
pmissmax <- pmissnuc
for (sp in c(3:ncol(pmissmax))) {
pmissmax[,sp] <- pmax(pmissaa[,sp],pmissnuc[,sp]) #get maximum for each cell
}
write.table(pmissmax,file=maxmisstab,row.names=FALSE,quote=FALSE)
#Determine which orthologs to filter out for duplicate genes ####
dupgenes <- unique(pmissmax$gene[duplicated(pmissmax$gene)])
duptab <- pmissmax[pmissmax$gene %in% dupgenes,]
duptabtowrite <- duptab[,1:2]
duptabtowrite$include <- c(rep(TRUE,nrow(duptabtowrite)))
duptab$totmiss <- rowSums(duptab[,3:ncol(duptab)])
rowstorem <- list(length(dupgenes))
nondiffgenes <- list(length(dupgenes))
for (w in 1:length(dupgenes)) {
mindup <- duptab[which(duptab$gene==dupgenes[w]),]
ucidrem <- mindup$ucid[which(mindup$totmiss!=min(mindup$totmiss))]
if (length(ucidrem)==(nrow(mindup)-1)) {
rowstorem[[w]] <- which(pmissmax$ucid %in% ucidrem)
nondiffgenes[[w]] <- NA
} else {
print(paste("Multiple isoforms for",dupgenes[w],"have minimum missingness (",min(mindup$totmiss),")"))
ucidrem <- as.vector(mindup$ucid)
rowstorem[[w]] <- which(pmissmax$ucid %in% ucidrem)
nondiffgenes[[w]] <- dupgenes[w]
}
duptabtowrite$include[which(duptabtowrite$ucid %in% ucidrem)] <- FALSE
}
nondiffgenes <- unlist(nondiffgenes)
nondiffgenes <- nondiffgenes[is.na(nondiffgenes)==F]
write(nondiffgenes,file=excludedduplist,ncolumns=1)
write.table(duptabtowrite,file=allduptable,row.names=FALSE,quote=FALSE)
duprowstorem <- unlist(rowstorem)
duprowstorem <- duprowstorem[is.na(duprowstorem)==F] #save these values and eliminate from all working tables
#Read in and combine stops and frameshifts ####
pstop <- read.table(stoptable,header=T,as.is=TRUE)
pshift <- read.table(shifttable,header=T,as.is=TRUE)
pstop <- pstop[,colstoincl]
pshift <- pshift[,colstoincl]
checkstopshift <- check_data_frames(pstop,pshift,"ucid")
stopifnot(checkstopshift)
pboth <- pstop
pboth[,3:ncol(pboth)] <- pstop[,3:ncol(pstop)]+pshift[,3:ncol(pshift)]
pboth[,3:ncol(pboth)] <- pboth[,3:ncol(pboth)] > 0
#Remove duplicate genes from all tables and write to files ####
pmissmax <- pmissmax[-duprowstorem,]
pboth <- pboth[-duprowstorem,]
write.table(pmissmax,file=maxmisstabnodups,row.names=FALSE,quote=FALSE)
write.table(pboth,file=lesiontabnodups,row.names=FALSE,quote=FALSE)
#Determine error rates for a given missingness threshold ####
checkmisslesion <- check_data_frames(pmissmax,pboth,"ucid")
stopifnot(checkmisslesion)
#Find the threshold where there will be no more than 1 error (in any species) for every 10 genes
accepterrorrate <- 1/(10*(ncol(pboth)-2))
props <- c(1:20)/20
err <- list(length(props))
bestval <- NA
for (p in 1:c(length(props))) {
err[[p]] <- calcerrorrates(props[p],pmissmax[,3:ncol(pmissmax)],pboth[,3:ncol(pboth)])
if (err[[p]][1] < accepterrorrate) {
bestval <- p
} else {
break
}
}
print(paste("The largest missingness cutoff that will result in no more that one",
"mis-called functional gene across all species for every 10 genes in",
"the dataset (functional false positive rate =",accepterrorrate,") is",
props[bestval],"."))
print("The error rates associated with this missingness threshold are:")
print(err[[bestval]])
if (bestval < length(props)) {
newprops <- props[bestval]+.01*c(1:4)
newerr <- list(length(newprops))
newbestval <- NA
for (p in 1:c(length(newprops))) {
newerr[[p]] <- calcerrorrates(newprops[p],pmissmax[,3:ncol(pmissmax)],pboth[,3:ncol(pboth)])
if (newerr[[p]][1] < accepterrorrate) {
newbestval <- p
} else {
break
}
}
print(paste("The largest missingness cutoff that will result in no more that one",
"mis-called functional gene across all species for every 10 genes in",
"the dataset (functional false positive rate =",accepterrorrate,") is",
newprops[newbestval],"."))
print("The error rates associated with this missingness threshold are:")
print(newerr[[newbestval]])
}
#Plot some estimates of the distribution of missing data among pseudogenes and non-pseudogenes ####
tlpmiss <- unlist(pmissmax)
tlpboth <- unlist(pboth)
pmisspseudo <- tlpmiss[which(tlpboth==TRUE)]
pmissnonpseudo <- tlpmiss[which(tlpboth==FALSE)]
#Plot histogram of missing data
hist(as.numeric(pmisspseudo),breaks=100,xlab="Proportion missing data",
main="Histogram of proportion missing for callable pseudogenes\n(stops and frameshifts)")
qcuts <- quantile(as.numeric(pmisspseudo),c(.9,.95,.99))
abline(v=qcuts,col=c("navy","blue","lightblue"))
legend(0.8,50000,legend=c(paste("q90:",qcuts[1]),paste("q95",qcuts[2]),paste("q99",qcuts[3]))
,col=c("navy","blue","light blue"),lty=1,cex=0.7,bty="n")
#Viewed as cdf
epmiss <- ecdf(as.numeric(pmisspseudo))
enpmiss <- ecdf(as.numeric(pmissnonpseudo))
epmissnoext <- ecdf(as.numeric(pmisspseudo[pmisspseudo %in% c("0","1")==F]))
enpmissnoext <- ecdf(as.numeric(pmissnonpseudo[pmissnonpseudo %in% c("0","1")==F]))
plot(epmiss,xlab="Proportion missing",ylab="Cumulative fraction",main="CDF of proportion missing")
plot(enpmiss,add=T,col="blue")
plot(epmissnoext,add=T,col="gray")
plot(enpmissnoext,add=T,col="light blue")
legend("right",legend=c("Called pseudogenes\n(stops & frameshifts)","All other",
"Called pseudogenes, no 0","All other, no 0 or 1"),lty=1,
col=c("black","blue","gray","light blue"))
| /PseudogeneIdentification/AnalyzeMissingDataForPseudogenes.R | no_license | bioCKO/MarineFxLoss | R | false | false | 6,733 | r | #Input files ####
nucmisstab <- "NucMissingExcludeExon25.tsv" #missingness using nucleotide sequence
aamisstab <- "AAMissingExcludeExon25.tsv" #missingness using amino acid sequence
specieslist <- "SpeciesToInclude58Eutherian.txt" #list of species to include
stoptable <- 'LatestStopsExcludeExon25.tsv' #called stop codons using same exclusion criteria
shifttable <- 'LatestFrameshiftExcludeExon25.tsv' #called frameshifts using same exclusion criteria
#Output files ####
maxmisstab <- "MaxMissingExcludeExon25.tsv" #maximum missingness across nuc and aa for relevant species
excludedduplist <- "DuplicateGenesWithMultipleIsoformsSameMissingData.txt" #duplicate isoforms to exclude
allduptable <- "DuplicateGeneStatus.tsv" #table of duplicate genes, ucids, and inclusion/exclusion
maxmisstabnodups <- "MaxMissingExcludeExon25_nodups.tsv"
lesiontabnodups <- "LesionStatusExcludeExon25_nodups.tsv"
#Import subfunctions ####
source("subfunctions_missing_pseudo.R")
#Get maximum proportion of missing data from nuc and aa data ####
pmissnuc <- read.table(nucmisstab,header=T,as.is=T)
pmissaa <- read.table(aamisstab,header=T,as.is=T)
inclsp <- scan(specieslist,what="character")
colstoincl <- c(1,2,which(names(pmissnuc) %in% inclsp))
pmissnuc <- pmissnuc[,colstoincl]
pmissaa <- pmissaa[,colstoincl]
checknucaa <- check_data_frames(pmissnuc,pmissaa,"ucid")
stopifnot(checknucaa)
pmissmax <- pmissnuc
for (sp in c(3:ncol(pmissmax))) {
pmissmax[,sp] <- pmax(pmissaa[,sp],pmissnuc[,sp]) #get maximum for each cell
}
write.table(pmissmax,file=maxmisstab,row.names=FALSE,quote=FALSE)
#Determine which orthologs to filter out for duplicate genes ####
dupgenes <- unique(pmissmax$gene[duplicated(pmissmax$gene)])
duptab <- pmissmax[pmissmax$gene %in% dupgenes,]
duptabtowrite <- duptab[,1:2]
duptabtowrite$include <- c(rep(TRUE,nrow(duptabtowrite)))
duptab$totmiss <- rowSums(duptab[,3:ncol(duptab)])
rowstorem <- list(length(dupgenes))
nondiffgenes <- list(length(dupgenes))
for (w in 1:length(dupgenes)) {
mindup <- duptab[which(duptab$gene==dupgenes[w]),]
ucidrem <- mindup$ucid[which(mindup$totmiss!=min(mindup$totmiss))]
if (length(ucidrem)==(nrow(mindup)-1)) {
rowstorem[[w]] <- which(pmissmax$ucid %in% ucidrem)
nondiffgenes[[w]] <- NA
} else {
print(paste("Multiple isoforms for",dupgenes[w],"have minimum missingness (",min(mindup$totmiss),")"))
ucidrem <- as.vector(mindup$ucid)
rowstorem[[w]] <- which(pmissmax$ucid %in% ucidrem)
nondiffgenes[[w]] <- dupgenes[w]
}
duptabtowrite$include[which(duptabtowrite$ucid %in% ucidrem)] <- FALSE
}
nondiffgenes <- unlist(nondiffgenes)
nondiffgenes <- nondiffgenes[is.na(nondiffgenes)==F]
write(nondiffgenes,file=excludedduplist,ncolumns=1)
write.table(duptabtowrite,file=allduptable,row.names=FALSE,quote=FALSE)
duprowstorem <- unlist(rowstorem)
duprowstorem <- duprowstorem[is.na(duprowstorem)==F] #save these values and eliminate from all working tables
#Read in and combine stops and frameshifts ####
pstop <- read.table(stoptable,header=T,as.is=TRUE)
pshift <- read.table(shifttable,header=T,as.is=TRUE)
pstop <- pstop[,colstoincl]
pshift <- pshift[,colstoincl]
checkstopshift <- check_data_frames(pstop,pshift,"ucid")
stopifnot(checkstopshift)
pboth <- pstop
pboth[,3:ncol(pboth)] <- pstop[,3:ncol(pstop)]+pshift[,3:ncol(pshift)]
pboth[,3:ncol(pboth)] <- pboth[,3:ncol(pboth)] > 0
#Remove duplicate genes from all tables and write to files ####
pmissmax <- pmissmax[-duprowstorem,]
pboth <- pboth[-duprowstorem,]
write.table(pmissmax,file=maxmisstabnodups,row.names=FALSE,quote=FALSE)
write.table(pboth,file=lesiontabnodups,row.names=FALSE,quote=FALSE)
#Determine error rates for a given missingness threshold ####
checkmisslesion <- check_data_frames(pmissmax,pboth,"ucid")
stopifnot(checkmisslesion)
#Find the threshold where there will be no more than 1 error (in any species) for every 10 genes
accepterrorrate <- 1/(10*(ncol(pboth)-2))
props <- c(1:20)/20
err <- list(length(props))
bestval <- NA
for (p in 1:c(length(props))) {
err[[p]] <- calcerrorrates(props[p],pmissmax[,3:ncol(pmissmax)],pboth[,3:ncol(pboth)])
if (err[[p]][1] < accepterrorrate) {
bestval <- p
} else {
break
}
}
print(paste("The largest missingness cutoff that will result in no more that one",
"mis-called functional gene across all species for every 10 genes in",
"the dataset (functional false positive rate =",accepterrorrate,") is",
props[bestval],"."))
print("The error rates associated with this missingness threshold are:")
print(err[[bestval]])
if (bestval < length(props)) {
newprops <- props[bestval]+.01*c(1:4)
newerr <- list(length(newprops))
newbestval <- NA
for (p in 1:c(length(newprops))) {
newerr[[p]] <- calcerrorrates(newprops[p],pmissmax[,3:ncol(pmissmax)],pboth[,3:ncol(pboth)])
if (newerr[[p]][1] < accepterrorrate) {
newbestval <- p
} else {
break
}
}
print(paste("The largest missingness cutoff that will result in no more that one",
"mis-called functional gene across all species for every 10 genes in",
"the dataset (functional false positive rate =",accepterrorrate,") is",
newprops[newbestval],"."))
print("The error rates associated with this missingness threshold are:")
print(newerr[[newbestval]])
}
#Plot some estimates of the distribution of missing data among pseudogenes and non-pseudogenes ####
tlpmiss <- unlist(pmissmax)
tlpboth <- unlist(pboth)
pmisspseudo <- tlpmiss[which(tlpboth==TRUE)]
pmissnonpseudo <- tlpmiss[which(tlpboth==FALSE)]
#Plot histogram of missing data
hist(as.numeric(pmisspseudo),breaks=100,xlab="Proportion missing data",
main="Histogram of proportion missing for callable pseudogenes\n(stops and frameshifts)")
qcuts <- quantile(as.numeric(pmisspseudo),c(.9,.95,.99))
abline(v=qcuts,col=c("navy","blue","lightblue"))
legend(0.8,50000,legend=c(paste("q90:",qcuts[1]),paste("q95",qcuts[2]),paste("q99",qcuts[3]))
,col=c("navy","blue","light blue"),lty=1,cex=0.7,bty="n")
#Viewed as cdf
epmiss <- ecdf(as.numeric(pmisspseudo))
enpmiss <- ecdf(as.numeric(pmissnonpseudo))
epmissnoext <- ecdf(as.numeric(pmisspseudo[pmisspseudo %in% c("0","1")==F]))
enpmissnoext <- ecdf(as.numeric(pmissnonpseudo[pmissnonpseudo %in% c("0","1")==F]))
plot(epmiss,xlab="Proportion missing",ylab="Cumulative fraction",main="CDF of proportion missing")
plot(enpmiss,add=T,col="blue")
plot(epmissnoext,add=T,col="gray")
plot(enpmissnoext,add=T,col="light blue")
legend("right",legend=c("Called pseudogenes\n(stops & frameshifts)","All other",
"Called pseudogenes, no 0","All other, no 0 or 1"),lty=1,
col=c("black","blue","gray","light blue"))
|
subject_name <- c("John Doe", "Jane Doe", "Steve Graves")
temperature <- c(98.1, 98.6, 101.4)
flu_status <- c(FALSE, FALSE, TRUE)
gender <- factor(c("MALE", "FEMALE", "MALE"))
blood <- factor(c("O", "AB", "A"),levels=c("A", "B", "AB", "O"))
subject1<- list(fullname=subject_name[1], temperature=temperature[1], flu_status=flu_status[1], gender=gender[1], blood=blood[1])
subject1
pt_data <- data.frame(subject_name, temperature, flu_status,gender,blood, stringsAsFactors = FALSE)
pt_data
#############################
methodology1 <- read.csv("ceosal1.csv", stringsAsFactors = FALSE, header = FALSE)
#####################################
install.packages("readxl")
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(salary~sales, data=Assignment)
fit<- lm(salary~sales, data=Assignment)
abline(fit, col="blue")
summary(fit)
##########################################
library(readxl)
research<- read_excel("a1.xlsx")
research
plot(salary~sales, data=research)
fit<- lm(salary~sales, data=research)
abline(fit, col='blue')
summary(fit)
influencePlot(fit, id.method ="identify")
################################################
mean(Assignment$sales)
Assignment$sales
plot(salary~roe, data=Assignment)
fit2<- lm(salary~roe, data=Assignment)
abline(fit2, col='blue')
summary(fit2)
fit2$residuals
#############이상관측치##############
influencePlot(fit2, id.method ="identify")
############################################
install.packages("readxl")
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(salary~roe, data=Assignment)
fit<- lm(salary~roe, data=Assignment)
abline(fit, col="blue")
summary(fit)
######################################
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(sales~salary, data=Assignment)
fit<- lm(sales~salary, data=Assignment)
abline(fit, col="blue")
summary(fit)
######################################
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(sales~roe, data=Assignment)
fit<- lm(sales~roe, data=Assignment)
abline(fit, col="blue")
summary(fit)
######################################
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(roe~salary, data=Assignment)
fit<- lm(roe~salary, data=Assignment)
abline(fit, col="blue")
summary(fit)
######################################
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(roe~sales, data=Assignment)
fit<- lm(roe~sales, data=Assignment)
abline(fit, col="blue")
summary(fit)
summary(Assignment$roe)
summary(Assignment$sales)
########################################
library(readxl)
Assignment <- read_excel("a1.xlsx")
Assignment
plot(salary~sales, data=Assignment)
fit<- lm(salary~sales, data=Assignment)
abline(fit, col="blue")
summary(fit)
summary(Assignment$roe)
summary(Assignment$sales)
| /연구조사방법론 과제.R | no_license | apentk78/RMarkdown | R | false | false | 2,872 | r | subject_name <- c("John Doe", "Jane Doe", "Steve Graves")
temperature <- c(98.1, 98.6, 101.4)
flu_status <- c(FALSE, FALSE, TRUE)
gender <- factor(c("MALE", "FEMALE", "MALE"))
blood <- factor(c("O", "AB", "A"),levels=c("A", "B", "AB", "O"))
subject1<- list(fullname=subject_name[1], temperature=temperature[1], flu_status=flu_status[1], gender=gender[1], blood=blood[1])
subject1
pt_data <- data.frame(subject_name, temperature, flu_status,gender,blood, stringsAsFactors = FALSE)
pt_data
#############################
methodology1 <- read.csv("ceosal1.csv", stringsAsFactors = FALSE, header = FALSE)
#####################################
install.packages("readxl")
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(salary~sales, data=Assignment)
fit<- lm(salary~sales, data=Assignment)
abline(fit, col="blue")
summary(fit)
##########################################
library(readxl)
research<- read_excel("a1.xlsx")
research
plot(salary~sales, data=research)
fit<- lm(salary~sales, data=research)
abline(fit, col='blue')
summary(fit)
influencePlot(fit, id.method ="identify")
################################################
mean(Assignment$sales)
Assignment$sales
plot(salary~roe, data=Assignment)
fit2<- lm(salary~roe, data=Assignment)
abline(fit2, col='blue')
summary(fit2)
fit2$residuals
#############이상관측치##############
influencePlot(fit2, id.method ="identify")
############################################
install.packages("readxl")
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(salary~roe, data=Assignment)
fit<- lm(salary~roe, data=Assignment)
abline(fit, col="blue")
summary(fit)
######################################
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(sales~salary, data=Assignment)
fit<- lm(sales~salary, data=Assignment)
abline(fit, col="blue")
summary(fit)
######################################
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(sales~roe, data=Assignment)
fit<- lm(sales~roe, data=Assignment)
abline(fit, col="blue")
summary(fit)
######################################
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(roe~salary, data=Assignment)
fit<- lm(roe~salary, data=Assignment)
abline(fit, col="blue")
summary(fit)
######################################
library(readxl)
Assignment <- read_excel("ceosal1.xls")
Assignment
plot(roe~sales, data=Assignment)
fit<- lm(roe~sales, data=Assignment)
abline(fit, col="blue")
summary(fit)
summary(Assignment$roe)
summary(Assignment$sales)
########################################
library(readxl)
Assignment <- read_excel("a1.xlsx")
Assignment
plot(salary~sales, data=Assignment)
fit<- lm(salary~sales, data=Assignment)
abline(fit, col="blue")
summary(fit)
summary(Assignment$roe)
summary(Assignment$sales)
|
#Cleaning data for World Bank(Inflation)
clean_wrldinf <- function(rawdirc, x, directory = "./data/") {
raw <- read.csv(paste0(rawdirc, "/", x), skip = 4, stringsAsFactors = FALSE)
newdf <- raw[which(grepl("Canada|New Zealand|South Africa|United States",
raw$Country.Name) == TRUE), c("Country.Name", paste0("X", 2006:2013))]
names(newdf) <- c("Country", 2006:2013)
rownames(newdf) <- NULL
newdf <- t(newdf)[-1, ]
colnames(newdf) <- c("Canada", "New Zealand", "United States", "South Africa")
write.csv(newdf, paste0(directory, "/clean_", x))
}
#Cleaning
rawdirc <- "./rawdata/Worldbank/Inflation"
inf_dir <- "./data/Worldbank/Inflation"
clean_wrldinf(rawdirc, x = "world_inflation.csv", directory = inf_dir)
| /code/clean/WorldBank_Inflation.R | no_license | audrey-webb/inflation-unemployment-analysis | R | false | false | 752 | r | #Cleaning data for World Bank(Inflation)
clean_wrldinf <- function(rawdirc, x, directory = "./data/") {
raw <- read.csv(paste0(rawdirc, "/", x), skip = 4, stringsAsFactors = FALSE)
newdf <- raw[which(grepl("Canada|New Zealand|South Africa|United States",
raw$Country.Name) == TRUE), c("Country.Name", paste0("X", 2006:2013))]
names(newdf) <- c("Country", 2006:2013)
rownames(newdf) <- NULL
newdf <- t(newdf)[-1, ]
colnames(newdf) <- c("Canada", "New Zealand", "United States", "South Africa")
write.csv(newdf, paste0(directory, "/clean_", x))
}
#Cleaning
rawdirc <- "./rawdata/Worldbank/Inflation"
inf_dir <- "./data/Worldbank/Inflation"
clean_wrldinf(rawdirc, x = "world_inflation.csv", directory = inf_dir)
|
#
# results <- fitzRoy:::results
# fixture <- fitzRoy:::fixture
# stats <- fitzRoy:::stats
# stats_gf <- fitzRoy:::stats_gf
# ladder <- fitzRoy:::ladder
# ladder_round <- fitzRoy:::ladder_round
# sources <- fitzRoy:::sources
# tips <- fitzRoy:::tips
# tips_round <- fitzRoy:::tips_round
#
#
# library(dplyr)
# library(elo)
# library(lubridate)
# library(fitzRoy)
#
#
# results <- get_match_results()
# stats <- get_afltables_stats(start_date = "2018-01-01", end_date = "2018-06-01")
#
# tail(stats)
# ```
#
#
# ### Fixture
# You can access the fixture using `get_fixture` function. This will download the fixture for the current calendar year by default.
#
# ```{r fixture, eval=FALSE}
# fixture <- get_fixture()
# ```
# ```{r fixture2, eval=eval_param}
# head(fixture)
# ```
# ### Footywire Advanced Player Stats
# Footywire data is available in the form of advanced player match statistics from 2010 games onwards. This is when advanced statistics became available.
#
# *Note - as of v0.2.0, all internal data has been removed from the package. Please use the relevant functions instead.*
#
# The following code no longer works.
# ```{r footywire, eval=FALSE, include=TRUE}
# ## Show the top of player_stats
# head(fitzRoy::player_stats)
# ```
#
# We can also use the `update_footywire_stats` function to get the most up to date data. This will merge data from 2010-current with any new data points.
#
# ```{r update_footywire, eval=FALSE, include=TRUE}
# ## Update footywire data
# dat <- update_footywire_stats()
# ```
#
# Alternatively, we can just return one game if we know it's ID. This can be found by looking at the URL of the match you want. For example, the ID of the 2019 AFL Grand Final is 9927.
#
# https://www.footywire.com/afl/footy/ft_match_statistics?mid=9927
#
# ```{r get_footywire_gf, eval=FALSE, include=TRUE}
# ## Update footywire data
# stats_gf <- get_footywire_stats(ids = 9927)
# ```
#
# ```{r get_footywire_gf2, eval=eval_param, include=TRUE}
# head(stats_gf)
# ```
#
# ### Weather
# Note - as of v0.2.0 this has been removed
#
#
# ### Squiggle Data
# You can access data from the [Squiggle API](https://api.squiggle.com.au) where the tips of well known AFL tipping models are collected. See full instructions on the above link.
#
# ```{r squiggle_sources1, message=FALSE, warning=FALSE, eval=FALSE}
# # You can get the sources
# sources <- get_squiggle_data("sources")
# ```
# ```{r squiggle_sources2, message=FALSE, warning=FALSE, eval=eval_param}
# head(sources)
# ```
#
# ```{r squiggle_tips1, message=FALSE, warning=FALSE, eval=FALSE}
# # Get all tips
# tips <- get_squiggle_data("tips")
# ```
# ```{r squiggle_tips2, message=FALSE, warning=FALSE, eval=eval_param}
# head(tips)
# ```
#
# ```{r squiggle_round1, message=FALSE, warning=FALSE, eval=FALSE}
# # Get` just tips from round 1, 2018
# tips_round <- get_squiggle_data("tips", round = 1, year = 2018)
# ```
# ```{r squiggle_round2, message=FALSE, warning=FALSE, eval=eval_param}
# head(tips_round)
# ```
#
#
# ### Create Ladder
#
# You can recreate the ladder for every round of the home and away season since 1897. You can either pass in a dataframe extracted using `get_match_results` (ideal as `get_match_results` doesn't need to be executed every time `return_ladder` is called):
#
# ```{r ladder1, message=FALSE, warning=FALSE, eval=FALSE}
# ladder <- return_ladder(match_results_df = results)
# ```
# ```{r ladder2, message=FALSE, warning=FALSE, eval=eval_param}
# head(ladder)
# ```
#
# Or leave the `match_results_df` argument blank (which will execute the `get_match_results()` function internally):
#
# ```{r ladder3, message=FALSE, warning=FALSE, eval=FALSE}
# ladder <- return_ladder()
# ```
#
# Alternatively, we can also return the ladder for any round, or any season, or a combination of both round and season:
#
# ```{r ladder4, message=FALSE, warning=FALSE, eval=FALSE}
# ladder_round <- return_ladder(match_results_df = results, season_round = 15, season = 2018)
# ```
# ```{r ladder5, message=FALSE, warning=FALSE, eval=eval_param}
# head(ladder_round)
# ```
# ---
# ```{r reset-options, message=FALSE, warning=FALSE, include=FALSE}
# options(original_options)
# ```
| /data-raw/vignette-data/mens-stats/mens-stats-data.R | no_license | jimmyday12/fitzRoy | R | false | false | 4,186 | r | #
# results <- fitzRoy:::results
# fixture <- fitzRoy:::fixture
# stats <- fitzRoy:::stats
# stats_gf <- fitzRoy:::stats_gf
# ladder <- fitzRoy:::ladder
# ladder_round <- fitzRoy:::ladder_round
# sources <- fitzRoy:::sources
# tips <- fitzRoy:::tips
# tips_round <- fitzRoy:::tips_round
#
#
# library(dplyr)
# library(elo)
# library(lubridate)
# library(fitzRoy)
#
#
# results <- get_match_results()
# stats <- get_afltables_stats(start_date = "2018-01-01", end_date = "2018-06-01")
#
# tail(stats)
# ```
#
#
# ### Fixture
# You can access the fixture using `get_fixture` function. This will download the fixture for the current calendar year by default.
#
# ```{r fixture, eval=FALSE}
# fixture <- get_fixture()
# ```
# ```{r fixture2, eval=eval_param}
# head(fixture)
# ```
# ### Footywire Advanced Player Stats
# Footywire data is available in the form of advanced player match statistics from 2010 games onwards. This is when advanced statistics became available.
#
# *Note - as of v0.2.0, all internal data has been removed from the package. Please use the relevant functions instead.*
#
# The following code no longer works.
# ```{r footywire, eval=FALSE, include=TRUE}
# ## Show the top of player_stats
# head(fitzRoy::player_stats)
# ```
#
# We can also use the `update_footywire_stats` function to get the most up to date data. This will merge data from 2010-current with any new data points.
#
# ```{r update_footywire, eval=FALSE, include=TRUE}
# ## Update footywire data
# dat <- update_footywire_stats()
# ```
#
# Alternatively, we can just return one game if we know it's ID. This can be found by looking at the URL of the match you want. For example, the ID of the 2019 AFL Grand Final is 9927.
#
# https://www.footywire.com/afl/footy/ft_match_statistics?mid=9927
#
# ```{r get_footywire_gf, eval=FALSE, include=TRUE}
# ## Update footywire data
# stats_gf <- get_footywire_stats(ids = 9927)
# ```
#
# ```{r get_footywire_gf2, eval=eval_param, include=TRUE}
# head(stats_gf)
# ```
#
# ### Weather
# Note - as of v0.2.0 this has been removed
#
#
# ### Squiggle Data
# You can access data from the [Squiggle API](https://api.squiggle.com.au) where the tips of well known AFL tipping models are collected. See full instructions on the above link.
#
# ```{r squiggle_sources1, message=FALSE, warning=FALSE, eval=FALSE}
# # You can get the sources
# sources <- get_squiggle_data("sources")
# ```
# ```{r squiggle_sources2, message=FALSE, warning=FALSE, eval=eval_param}
# head(sources)
# ```
#
# ```{r squiggle_tips1, message=FALSE, warning=FALSE, eval=FALSE}
# # Get all tips
# tips <- get_squiggle_data("tips")
# ```
# ```{r squiggle_tips2, message=FALSE, warning=FALSE, eval=eval_param}
# head(tips)
# ```
#
# ```{r squiggle_round1, message=FALSE, warning=FALSE, eval=FALSE}
# # Get` just tips from round 1, 2018
# tips_round <- get_squiggle_data("tips", round = 1, year = 2018)
# ```
# ```{r squiggle_round2, message=FALSE, warning=FALSE, eval=eval_param}
# head(tips_round)
# ```
#
#
# ### Create Ladder
#
# You can recreate the ladder for every round of the home and away season since 1897. You can either pass in a dataframe extracted using `get_match_results` (ideal as `get_match_results` doesn't need to be executed every time `return_ladder` is called):
#
# ```{r ladder1, message=FALSE, warning=FALSE, eval=FALSE}
# ladder <- return_ladder(match_results_df = results)
# ```
# ```{r ladder2, message=FALSE, warning=FALSE, eval=eval_param}
# head(ladder)
# ```
#
# Or leave the `match_results_df` argument blank (which will execute the `get_match_results()` function internally):
#
# ```{r ladder3, message=FALSE, warning=FALSE, eval=FALSE}
# ladder <- return_ladder()
# ```
#
# Alternatively, we can also return the ladder for any round, or any season, or a combination of both round and season:
#
# ```{r ladder4, message=FALSE, warning=FALSE, eval=FALSE}
# ladder_round <- return_ladder(match_results_df = results, season_round = 15, season = 2018)
# ```
# ```{r ladder5, message=FALSE, warning=FALSE, eval=eval_param}
# head(ladder_round)
# ```
# ---
# ```{r reset-options, message=FALSE, warning=FALSE, include=FALSE}
# options(original_options)
# ```
|
# To run this script, ensure "household_power_consumption.txt"
# is in your working directory
# load data from Working directory
power.all <- read.table("household_power_consumption.txt",
header=TRUE,sep=";",na.strings="?")
# Convert Data column to Data class
power.all$Date<-as.Date(power.all$Date,format="%d/%m/%Y")
# Subset from dates 2007-02-01 to 2007-02-02
power<-subset(power.all,subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(power.all)
# New column linking Dates with times for linear plots' required higher detail
power$Date_time<-as.POSIXct(Date_time<-with(power,paste(Date,Time)))
rm(Date_time)
#Save to PNG
dev.copy(png,file="plot3.png",480,480)
# Draw linear Plot
with(power, {
plot(Date_time, Sub_metering_1, type="l",
ylab="Energy sub metering", xlab="")
lines(Date_time,Sub_metering_2,col='Red')
lines(Date_time,Sub_metering_3,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=1, cex=.7,
legend=c("Sub metering 1", "Sub metering 2", "Sub metering 3"))
})
# Close graphic devise
dev.off() | /Project 1/plot3.R | no_license | phidesigner/Exploratory-Data | R | false | false | 1,123 | r | # To run this script, ensure "household_power_consumption.txt"
# is in your working directory
# load data from Working directory
power.all <- read.table("household_power_consumption.txt",
header=TRUE,sep=";",na.strings="?")
# Convert Data column to Data class
power.all$Date<-as.Date(power.all$Date,format="%d/%m/%Y")
# Subset from dates 2007-02-01 to 2007-02-02
power<-subset(power.all,subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(power.all)
# New column linking Dates with times for linear plots' required higher detail
power$Date_time<-as.POSIXct(Date_time<-with(power,paste(Date,Time)))
rm(Date_time)
#Save to PNG
dev.copy(png,file="plot3.png",480,480)
# Draw linear Plot
with(power, {
plot(Date_time, Sub_metering_1, type="l",
ylab="Energy sub metering", xlab="")
lines(Date_time,Sub_metering_2,col='Red')
lines(Date_time,Sub_metering_3,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=1, cex=.7,
legend=c("Sub metering 1", "Sub metering 2", "Sub metering 3"))
})
# Close graphic devise
dev.off() |
d <- read.table("household_power_consumption.txt",sep=";",header=TRUE)
d$Date <- as.Date(d$Date,"%d/%m/%Y")
d <- subset( d,Date>=as.Date("2007-02-01") & Date<=as.Date("2007-02-02"))
d$Global_active_power <- as.numeric(as.character(d$Global_active_power))
d$dt <- as.POSIXct(paste(d$Date,d$Time))
png('plot2.png')
plot(d$dt,d$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off()
| /plot2.R | no_license | jstrzelec/ExData_Plotting1 | R | false | false | 413 | r |
d <- read.table("household_power_consumption.txt",sep=";",header=TRUE)
d$Date <- as.Date(d$Date,"%d/%m/%Y")
d <- subset( d,Date>=as.Date("2007-02-01") & Date<=as.Date("2007-02-02"))
d$Global_active_power <- as.numeric(as.character(d$Global_active_power))
d$dt <- as.POSIXct(paste(d$Date,d$Time))
png('plot2.png')
plot(d$dt,d$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off()
|
loans <- homeLoans[derived_race == "Race Not Available", derived_race := NA]
rm(homeLoans)
loans$loan_to_value_ratio <- as.integer(loans$loan_to_value_ratio)
#simple regression
simpleLinear <- lm(denied ~ income + derived_race + loans$loan_to_value_ratio, data = loans)
summary(simpleLinear)
notCred <- loans[loans$`denial_reason-1` != 3]
#Leaving out credit rejections regression
subset <- notCred[,.(denied, income, derived_race, loan_amount, loan_to_value_ratio, interest_rate, debt_to_income_ratio, derived_sex, tract_minority_population_percent, tract_to_msa_income_percentage, ffiec_msa_md_median_family_income)]
subset <- subset[, msa_income_percentage := as.integer(subset$tract_to_msa_income_percentage)]
subset <- subset[, minority_population_percent := as.integer(subset$tract_minority_population_percent)]
linear <- lm(subset$denied ~ income+ debt_to_income_ratio + subset$derived_race + loan_amount + derived_sex + minority_population_percent + msa_income_percentage, data = subset)
summary(linear) | /Code/Old/State County Analysis/Regression.R | no_license | man44duke/Thesis | R | false | false | 1,022 | r |
loans <- homeLoans[derived_race == "Race Not Available", derived_race := NA]
rm(homeLoans)
loans$loan_to_value_ratio <- as.integer(loans$loan_to_value_ratio)
#simple regression
simpleLinear <- lm(denied ~ income + derived_race + loans$loan_to_value_ratio, data = loans)
summary(simpleLinear)
notCred <- loans[loans$`denial_reason-1` != 3]
#Leaving out credit rejections regression
subset <- notCred[,.(denied, income, derived_race, loan_amount, loan_to_value_ratio, interest_rate, debt_to_income_ratio, derived_sex, tract_minority_population_percent, tract_to_msa_income_percentage, ffiec_msa_md_median_family_income)]
subset <- subset[, msa_income_percentage := as.integer(subset$tract_to_msa_income_percentage)]
subset <- subset[, minority_population_percent := as.integer(subset$tract_minority_population_percent)]
linear <- lm(subset$denied ~ income+ debt_to_income_ratio + subset$derived_race + loan_amount + derived_sex + minority_population_percent + msa_income_percentage, data = subset)
summary(linear) |
## R file for the analysis of references of Venice in the medium of poetry.
## This file was created as part of the SHS project "Venice in Poetry"
## of the course Digital Humanities taught by Prof. Frédéric Kaplan
## at EPFL in 2015-2016.
##
## Copyright (C) 2016 Marvin
##
## This program is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## Author: Dario Marvin
## Created: 2016-04-28
## ----------------- Load useful packages ------------------
#install.packages("stringr") # if needed
library(stringr)
## ---------- Erase everything stored in memory ------------
rm(list=ls())
cat("\014")
options(warn=-1)
## ------------------- Useful functions --------------------
readinteger <- function() {
n <- readline(prompt = "Enter your choice: ")
if(!grepl("^[0-9]+$",n) || n>2) {
return(readinteger())
}
return(as.integer(n))
}
read_y_n <- function() {
n <- readline(prompt = "Enter your choice: ")
if(n == "y" || n == 1) {
return(TRUE)
} else {
return(FALSE)
}
}
## ---------------------- Text choice ----------------------
cat ("Please choose a poem:","\n","1: Commedia, by Dante Alighieri","\n",
"2: Orlando Furioso, by Ludovico Ariosto","\n","\n");
choice <- readinteger()
if (choice == 1) {
text = scan("http://www.gutenberg.org/files/1012/1012-0.txt", what="character", sep="\n", encoding="UTF-8")
} else if (choice == 2) {
text = scan("http://www.gutenberg.org/files/3747/3747-0.txt", what="character", sep="\n", encoding="UTF-8")
} else
print("Error! Choose a possible value")
## --------------------- Text analisis ---------------------
if (choice == 1) {
poem.lines = text[19:14356] # clear metadata out of the text
} else if (choice == 2) {
poem.lines = text[19:43645]
}
poem.lines <- tolower(poem.lines) # change all letters to lowercase
poem.lines <- str_trim(poem.lines) # eliminate initial spaces in each line
poem.length <- length(poem.lines)
poem.words <- strsplit(poem.lines, "\\W") # list all words in the poem
poem.words <- unlist(poem.words)
not.blanks <- which(poem.words != "") # clear of all blank spaces
poem.words <- poem.words[not.blanks]
words.freqs <- table(poem.words)
sorted.words.freqs <- sort(words.freqs , decreasing=TRUE)
sorted.words.freqs[1:30]
keywords = c("venezia","venetiae","veneziani","viniziani","rïalto","rialto","san marco","vinegia","doge","mestre","venetia")
#keywords = c("ugolino");
for (i in 1:length(keywords)) {
pos <- str_extract(poem.lines,keywords[i])
pos <- which(!is.na(pos))
count <- length(pos)
if (count == 0) {
cat("Found 0 references of the word",keywords[i],"\n")
} else {
if (count == 1) {
cat("Found 1 reference of the word",keywords[i],"\n","Do you want to visualize it? [y/n]")
} else {
cat("Found ",count," references of the word",keywords[i],"\n","Do you want to visualize them? [y/n]")
}
bool <- read_y_n()
cat("\n")
if (bool == TRUE) {
for (j in 1:length(pos)) {
if (choice == 1) {
tmp = max(5,pos[j]-200)
test1 <- grepl("canto",poem.lines[tmp:pos[j]])
test2 <- (grepl("inferno",poem.lines[tmp:pos[j]])
| grepl("purgatorio",poem.lines[tmp:pos[j]])
| grepl("paradiso",poem.lines[tmp:pos[j]]))
test <- test1 & test2
if (is.element(TRUE,test)) {
ref <- max(which(test == TRUE))
} else {
ref <- 4
}
diff <- ref
canto <- max(4,tmp - 1 + ref)
if (ref == 4) {
line <- pos[j]-4
cat(poem.lines[ref],", ",line,":","\n\n",sep="");
} else {
line <- 201 - diff
cat(poem.lines[canto],", ",line,":","\n\n",sep="");
}
} else if (choice == 2) {
num <- as.numeric(poem.lines[(pos[j]-4):(pos[j]+4)])
loc <- which(!is.na(num))
num <- num[!is.na(num)]
if (loc > 5){
num <- num - 1
}
one <- as.numeric(poem.lines[(max(5,pos[j]-num*9-6)):(max(pos[j]-num*9+8,14))])
loc2 <- which(one == 1)
canto <- max(4,pos[j]-num*9+loc2-8)
ottava <- num
if (loc == 1) {
line <- 4
} else if (loc == 2) {
line <- 3
} else if (loc == 3) {
line <- 2
} else if (loc == 4) {
line <- 1
}else if (loc == 6) {
line <- 8
}else if (loc == 7) {
line <- 7
}else if (loc == 8) {
line <- 6
}else if (loc == 9) {
line <- 5
}
cat(poem.lines[canto],", ",ottava,", ",line,":","\n\n",sep="");
}
cat(poem.lines[(pos[j]-4):(pos[j]+4)], sep="\n")
cat("\n","--------------------------------------------","\n\n")
}
}
}
}
| /ViP.R | no_license | dario-marvin/VeniceInPoetry | R | false | false | 5,543 | r | ## R file for the analysis of references of Venice in the medium of poetry.
## This file was created as part of the SHS project "Venice in Poetry"
## of the course Digital Humanities taught by Prof. Frédéric Kaplan
## at EPFL in 2015-2016.
##
## Copyright (C) 2016 Marvin
##
## This program is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## Author: Dario Marvin
## Created: 2016-04-28
## ----------------- Load useful packages ------------------
#install.packages("stringr") # if needed
library(stringr)
## ---------- Erase everything stored in memory ------------
rm(list=ls())
cat("\014")
options(warn=-1)
## ------------------- Useful functions --------------------
readinteger <- function() {
n <- readline(prompt = "Enter your choice: ")
if(!grepl("^[0-9]+$",n) || n>2) {
return(readinteger())
}
return(as.integer(n))
}
read_y_n <- function() {
n <- readline(prompt = "Enter your choice: ")
if(n == "y" || n == 1) {
return(TRUE)
} else {
return(FALSE)
}
}
## ---------------------- Text choice ----------------------
cat ("Please choose a poem:","\n","1: Commedia, by Dante Alighieri","\n",
"2: Orlando Furioso, by Ludovico Ariosto","\n","\n");
choice <- readinteger()
if (choice == 1) {
text = scan("http://www.gutenberg.org/files/1012/1012-0.txt", what="character", sep="\n", encoding="UTF-8")
} else if (choice == 2) {
text = scan("http://www.gutenberg.org/files/3747/3747-0.txt", what="character", sep="\n", encoding="UTF-8")
} else
print("Error! Choose a possible value")
## --------------------- Text analisis ---------------------
if (choice == 1) {
poem.lines = text[19:14356] # clear metadata out of the text
} else if (choice == 2) {
poem.lines = text[19:43645]
}
poem.lines <- tolower(poem.lines) # change all letters to lowercase
poem.lines <- str_trim(poem.lines) # eliminate initial spaces in each line
poem.length <- length(poem.lines)
poem.words <- strsplit(poem.lines, "\\W") # list all words in the poem
poem.words <- unlist(poem.words)
not.blanks <- which(poem.words != "") # clear of all blank spaces
poem.words <- poem.words[not.blanks]
words.freqs <- table(poem.words)
sorted.words.freqs <- sort(words.freqs , decreasing=TRUE)
sorted.words.freqs[1:30]
keywords = c("venezia","venetiae","veneziani","viniziani","rïalto","rialto","san marco","vinegia","doge","mestre","venetia")
#keywords = c("ugolino");
for (i in 1:length(keywords)) {
pos <- str_extract(poem.lines,keywords[i])
pos <- which(!is.na(pos))
count <- length(pos)
if (count == 0) {
cat("Found 0 references of the word",keywords[i],"\n")
} else {
if (count == 1) {
cat("Found 1 reference of the word",keywords[i],"\n","Do you want to visualize it? [y/n]")
} else {
cat("Found ",count," references of the word",keywords[i],"\n","Do you want to visualize them? [y/n]")
}
bool <- read_y_n()
cat("\n")
if (bool == TRUE) {
for (j in 1:length(pos)) {
if (choice == 1) {
tmp = max(5,pos[j]-200)
test1 <- grepl("canto",poem.lines[tmp:pos[j]])
test2 <- (grepl("inferno",poem.lines[tmp:pos[j]])
| grepl("purgatorio",poem.lines[tmp:pos[j]])
| grepl("paradiso",poem.lines[tmp:pos[j]]))
test <- test1 & test2
if (is.element(TRUE,test)) {
ref <- max(which(test == TRUE))
} else {
ref <- 4
}
diff <- ref
canto <- max(4,tmp - 1 + ref)
if (ref == 4) {
line <- pos[j]-4
cat(poem.lines[ref],", ",line,":","\n\n",sep="");
} else {
line <- 201 - diff
cat(poem.lines[canto],", ",line,":","\n\n",sep="");
}
} else if (choice == 2) {
num <- as.numeric(poem.lines[(pos[j]-4):(pos[j]+4)])
loc <- which(!is.na(num))
num <- num[!is.na(num)]
if (loc > 5){
num <- num - 1
}
one <- as.numeric(poem.lines[(max(5,pos[j]-num*9-6)):(max(pos[j]-num*9+8,14))])
loc2 <- which(one == 1)
canto <- max(4,pos[j]-num*9+loc2-8)
ottava <- num
if (loc == 1) {
line <- 4
} else if (loc == 2) {
line <- 3
} else if (loc == 3) {
line <- 2
} else if (loc == 4) {
line <- 1
}else if (loc == 6) {
line <- 8
}else if (loc == 7) {
line <- 7
}else if (loc == 8) {
line <- 6
}else if (loc == 9) {
line <- 5
}
cat(poem.lines[canto],", ",ottava,", ",line,":","\n\n",sep="");
}
cat(poem.lines[(pos[j]-4):(pos[j]+4)], sep="\n")
cat("\n","--------------------------------------------","\n\n")
}
}
}
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252179819e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615771232-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252179819e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
library(dplyr)
library(ggplot2)
library(gganimate)
data = data.frame(saat = c(0,1,2),
detik = c("Saat awal","Pertengahan","Garis finish"),
Messi = c(0,0,12),
Ronaldo = c(8,9,10))
data = data %>% reshape2::melt(id.vars = c('saat','detik'))
data$detik = factor(data$detik,levels = c("Saat awal","Pertengahan","Garis finish"))
chart =
data %>%
ggplot(aes(x = variable,
y = value)) +
geom_point(aes(shape = variable,color = variable),size = 10) +
theme_minimal() +
labs(title = "{closest_state}") +
theme(axis.title = element_blank(),
legend.position = "none",
axis.text.x = element_text(face = "bold",size = 20)) +
transition_states(detik, transition_length = 1, state_length = 1)
xx = animate(chart,duration = 10)
anim_save("animated.gif", xx) | /_posts/QnA NBC Sep/bikin gif.R | no_license | ikanx101/ikanx101.github.io | R | false | false | 849 | r | library(dplyr)
library(ggplot2)
library(gganimate)
data = data.frame(saat = c(0,1,2),
detik = c("Saat awal","Pertengahan","Garis finish"),
Messi = c(0,0,12),
Ronaldo = c(8,9,10))
data = data %>% reshape2::melt(id.vars = c('saat','detik'))
data$detik = factor(data$detik,levels = c("Saat awal","Pertengahan","Garis finish"))
chart =
data %>%
ggplot(aes(x = variable,
y = value)) +
geom_point(aes(shape = variable,color = variable),size = 10) +
theme_minimal() +
labs(title = "{closest_state}") +
theme(axis.title = element_blank(),
legend.position = "none",
axis.text.x = element_text(face = "bold",size = 20)) +
transition_states(detik, transition_length = 1, state_length = 1)
xx = animate(chart,duration = 10)
anim_save("animated.gif", xx) |
#' Source a javascript file
#'
#' @param x path to a javascript file
#' @export
source_js <- function(x) {
x <- path.expand(x)
if (file.exists(x)) {
rcpp_ctx_exec(paste0(readLines(x), sep="", collapse="\n"))
} else {
stop("File not found.", call.=FALSE)
}
invisible()
}
#' Evaluate javascript code
#'
#' @param x character vector of javascript code
#' @export
eval_js <- function(x) {
rcpp_ctx_exec(paste0(x, sep="", collapse="\n"))
}
#' Retrive the value of a variable
#'
#' @param x javascript variable to retrieve
#' @export
js_get <- function(x) {
jsonlite::fromJSON(rcpp_get(x), flatten=TRUE)
}
| /R/tinyjs.r | no_license | hrbrmstr/tinyjs | R | false | false | 625 | r | #' Source a javascript file
#'
#' @param x path to a javascript file
#' @export
source_js <- function(x) {
x <- path.expand(x)
if (file.exists(x)) {
rcpp_ctx_exec(paste0(readLines(x), sep="", collapse="\n"))
} else {
stop("File not found.", call.=FALSE)
}
invisible()
}
#' Evaluate javascript code
#'
#' @param x character vector of javascript code
#' @export
eval_js <- function(x) {
rcpp_ctx_exec(paste0(x, sep="", collapse="\n"))
}
#' Retrive the value of a variable
#'
#' @param x javascript variable to retrieve
#' @export
js_get <- function(x) {
jsonlite::fromJSON(rcpp_get(x), flatten=TRUE)
}
|
context("May parse tags correctly")
help_test_matthews_col <- function(my_tag) {
expect_equal(my_tag$tag, "col")
expect_equal(my_tag$val$name, "matthews_col")
expect_equal(my_tag$val$direction, list("in"))
expect_identical(my_tag$val$aliases, list("matts_col"))
expect_match(my_tag$val$rd, ".*\\\\strong\\{Beautiful\\}.*")
expect_match(my_tag$val$rd, ".*\\\\link\\[mandrake:extract_column_names\\].*")
expect_match(
my_tag$val$html[[1]],
"<strong>Beautiful</strong>",
all = F)
expect_match(
my_tag$val$html[[1]],
"<a href='.*/extract_column_names.html'>mandrake::extract_column_names.*</a>",
all = F)
}
test_that("parsing our tag works", {
input <- "
#' @col [in] matthews_col [matts_col]
#' I love matt's col. It is **Beautiful**.
#' Check out [mandrake::extract_column_names()]
#' @md
matthews_function <- function(df) df
"
block <- roxygen2::parse_text(input)
my_tag <- block[[1]]$tags[[1]]
help_test_matthews_col(my_tag)
})
test_that("parsing a block with 2 deffs of our tag works", {
input <- "
#' @col [in] matthews_col [matts_col]
#' I love matt's col. It is **Beautiful**.
#' Check out [mandrake::extract_column_names()]
#' @col [out] matthews_new_col [newcol, newest_col]
#' @md
matthews_function <- function(df) df
"
block <- roxygen2::parse_text(input)
tags <- block[[1]]$tags
help_test_matthews_col(tags[[1]])
next_tag <- tags[[2]]
expect_equal(next_tag$val$direction, list("out"))
expect_equal(next_tag$tag, "col")
expect_equal(next_tag$val$name, "matthews_new_col")
expect_equal(!!next_tag$val$aliases, list(c("newcol", "newest_col")))
})
test_that("May parse valid R columns, such as those with full-stops", {
fail("Test not implemented")
})
setup_input_inherit <- function() {
"
#' Hello
#' @col [in] matthews_col [matts_col]
#' I love matt's col. It is **Beautiful**.
#' Check out [mandrake::extract_column_names()] is_in <- any(x$val$direction %in% dirs_in)
#'
#' @inheritCol mandrake [mpg, cyl]
#' @md
matthews_function_inherit <- function(df) df
"
}
inherit_parsed_expect <- function() {
tibble::tibble(src = "mandrake", columns = list(c("mpg", "cyl")))
}
test_that("May parse inheritCol correctly", {
input <- setup_input_inherit()
block <- roxygen2::parse_text(input)
block %<>% .[[1]]
tags <- roxygen2::block_get_tags(block, "inheritCol")
val <- tags[[1]]$val
expect_equal(val, inherit_parsed_expect())
})
| /tests/testthat/test_parsing.R | no_license | strazto/mandrake | R | false | false | 2,458 | r | context("May parse tags correctly")
help_test_matthews_col <- function(my_tag) {
expect_equal(my_tag$tag, "col")
expect_equal(my_tag$val$name, "matthews_col")
expect_equal(my_tag$val$direction, list("in"))
expect_identical(my_tag$val$aliases, list("matts_col"))
expect_match(my_tag$val$rd, ".*\\\\strong\\{Beautiful\\}.*")
expect_match(my_tag$val$rd, ".*\\\\link\\[mandrake:extract_column_names\\].*")
expect_match(
my_tag$val$html[[1]],
"<strong>Beautiful</strong>",
all = F)
expect_match(
my_tag$val$html[[1]],
"<a href='.*/extract_column_names.html'>mandrake::extract_column_names.*</a>",
all = F)
}
test_that("parsing our tag works", {
input <- "
#' @col [in] matthews_col [matts_col]
#' I love matt's col. It is **Beautiful**.
#' Check out [mandrake::extract_column_names()]
#' @md
matthews_function <- function(df) df
"
block <- roxygen2::parse_text(input)
my_tag <- block[[1]]$tags[[1]]
help_test_matthews_col(my_tag)
})
test_that("parsing a block with 2 deffs of our tag works", {
input <- "
#' @col [in] matthews_col [matts_col]
#' I love matt's col. It is **Beautiful**.
#' Check out [mandrake::extract_column_names()]
#' @col [out] matthews_new_col [newcol, newest_col]
#' @md
matthews_function <- function(df) df
"
block <- roxygen2::parse_text(input)
tags <- block[[1]]$tags
help_test_matthews_col(tags[[1]])
next_tag <- tags[[2]]
expect_equal(next_tag$val$direction, list("out"))
expect_equal(next_tag$tag, "col")
expect_equal(next_tag$val$name, "matthews_new_col")
expect_equal(!!next_tag$val$aliases, list(c("newcol", "newest_col")))
})
test_that("May parse valid R columns, such as those with full-stops", {
fail("Test not implemented")
})
setup_input_inherit <- function() {
"
#' Hello
#' @col [in] matthews_col [matts_col]
#' I love matt's col. It is **Beautiful**.
#' Check out [mandrake::extract_column_names()] is_in <- any(x$val$direction %in% dirs_in)
#'
#' @inheritCol mandrake [mpg, cyl]
#' @md
matthews_function_inherit <- function(df) df
"
}
inherit_parsed_expect <- function() {
tibble::tibble(src = "mandrake", columns = list(c("mpg", "cyl")))
}
test_that("May parse inheritCol correctly", {
input <- setup_input_inherit()
block <- roxygen2::parse_text(input)
block %<>% .[[1]]
tags <- roxygen2::block_get_tags(block, "inheritCol")
val <- tags[[1]]$val
expect_equal(val, inherit_parsed_expect())
})
|
degree <- function(g, type="inbound"){
s = function(type) switch(type,
inbound = g$E %>% select(V=V1, W=W),
outbound = g$E %>% select(V=V2, W=W),
both = rbind(s("inbound"), s("outbound")))
type = if(!g$directed) "both" else type
deg = type %>%
s %>%
rbind(data.frame(V=g$V$V, W=0)) %>%
group_by(V) %>%
summarize(degree=sum(W))
deg
}
page.rank <- function(g, threshold=1e-6){
is.converged <- function(x){
diff = x %>%
mutate(diff=abs(PR.old-PR)/PR.old) %>%
group_by %>%
summarize(max(diff))
diff < threshold
}
normalize <- function(z){
n = (z %>% group_by %>% summarize(sum(PR)))[1,1]
z %>% mutate(PR=PR/n)
}
calculate.pr <- function(x){
edges = g$E %>% select(V1, V=V2, W)
pr = x %>% select(V, PR)
pr = left_join(pr, edges) %>%
group_by(V, PR) %>%
summarize(PR.new=sum(W*PR, na.rm=T)) %>%
select(V=V, PR.old=PR, PR=PR.new) %>%
group_by %>%
normalize
if(is.converged(pr)) pr else pr %>% calculate.pr
}
g$V %>% mutate(PR=1) %>% calculate.pr
} | /R/centrality.R | no_license | erickramer/dgraph | R | false | false | 1,189 | r | degree <- function(g, type="inbound"){
s = function(type) switch(type,
inbound = g$E %>% select(V=V1, W=W),
outbound = g$E %>% select(V=V2, W=W),
both = rbind(s("inbound"), s("outbound")))
type = if(!g$directed) "both" else type
deg = type %>%
s %>%
rbind(data.frame(V=g$V$V, W=0)) %>%
group_by(V) %>%
summarize(degree=sum(W))
deg
}
page.rank <- function(g, threshold=1e-6){
is.converged <- function(x){
diff = x %>%
mutate(diff=abs(PR.old-PR)/PR.old) %>%
group_by %>%
summarize(max(diff))
diff < threshold
}
normalize <- function(z){
n = (z %>% group_by %>% summarize(sum(PR)))[1,1]
z %>% mutate(PR=PR/n)
}
calculate.pr <- function(x){
edges = g$E %>% select(V1, V=V2, W)
pr = x %>% select(V, PR)
pr = left_join(pr, edges) %>%
group_by(V, PR) %>%
summarize(PR.new=sum(W*PR, na.rm=T)) %>%
select(V=V, PR.old=PR, PR=PR.new) %>%
group_by %>%
normalize
if(is.converged(pr)) pr else pr %>% calculate.pr
}
g$V %>% mutate(PR=1) %>% calculate.pr
} |
stem_new <- function(
command = NULL,
settings = NULL,
cue = NULL,
value = NULL,
metrics = NULL,
store = NULL,
subpipeline = NULL,
junction = NULL
) {
force(command)
force(settings)
force(cue)
force(value)
force(metrics)
force(store)
force(subpipeline)
force(junction)
enclass(environment(), c("tar_stem", "tar_builder", "tar_target"))
}
#' @export
target_get_children.tar_stem <- function(target) {
if_any(
is.null(target$junction),
character(0),
target$junction$splits
)
}
#' @export
target_get_type.tar_stem <- function(target) {
"stem"
}
#' @export
target_get_type_cli.tar_stem <- function(target) {
"target"
}
#' @export
target_produce_junction.tar_stem <- function(target, pipeline) {
target_ensure_value(target, pipeline)
stem_assert_nonempty(target)
hashes <- value_hash_slices(target$value)
names <- paste0(target_get_parent(target), "_", hashes)
junction_init(target_get_parent(target), names)
}
#' @export
target_produce_record.tar_stem <- function(target, pipeline, meta) {
file <- target$store$file
record_init(
name = target_get_name(target),
type = "stem",
command = target$command$hash,
seed = target$command$seed,
depend = meta$get_depend(target_get_name(target)),
path = file$path,
data = file$hash,
time = file$time,
size = file$size,
bytes = file$bytes,
format = target$settings$format,
iteration = target$settings$iteration,
children = target_get_children(target),
seconds = target$metrics$seconds,
warnings = target$metrics$warnings,
error = target$metrics$error
)
}
#' @export
target_skip.tar_stem <- function(target, pipeline, scheduler, meta) {
NextMethod()
stem_restore_buds(target, pipeline, scheduler, meta)
}
#' @export
target_ensure_buds.tar_stem <- function(target, pipeline, scheduler) {
stem_ensure_buds(target, pipeline, scheduler)
}
#' @export
target_restore_buds.tar_stem <- function(target, pipeline, scheduler, meta) {
stem_restore_buds(target, pipeline, scheduler, meta)
}
#' @export
target_is_branchable.tar_stem <- function(target) {
!identical(target$settings$format, "file")
}
#' @export
target_validate.tar_stem <- function(target) {
assert_correct_fields(target, stem_new)
NextMethod()
if (!is.null(target$junction)) {
junction_validate(target$junction)
}
}
stem_assert_nonempty <- function(target) {
if (value_count_slices(target$value) < 1L) {
throw_run(
"cannot branch over empty target (",
target_get_name(target),
")"
)
}
}
stem_produce_buds <- function(target) {
settings <- target$settings
names <- target_get_children(target)
map(seq_along(names), ~bud_init(settings, names[.x], .x))
}
stem_insert_buds <- function(target, pipeline, scheduler) {
map(stem_produce_buds(target), pipeline_set_target, pipeline = pipeline)
}
stem_ensure_buds <- function(target, pipeline, scheduler) {
if (length(target_downstream_branching(target, pipeline, scheduler))) {
stem_ensure_junction(target, pipeline)
stem_insert_buds(target, pipeline, scheduler)
}
}
stem_restore_buds <- function(target, pipeline, scheduler, meta) {
if (length(target_downstream_branching(target, pipeline, scheduler))) {
stem_restore_junction(target, pipeline, meta)
stem_insert_buds(target, pipeline, scheduler)
}
}
stem_update_junction <- function(target, pipeline) {
target$junction <- target_produce_junction(target, pipeline)
}
stem_ensure_junction <- function(target, pipeline) {
if (is.null(target$junction)) {
stem_update_junction(target, pipeline)
}
}
stem_restore_junction <- function(target, pipeline, meta) {
name <- target_get_name(target)
if (!meta$exists_record(name)) {
return()
}
children <- meta$get_record(name)$children
junction <- if_any(
anyNA(children),
target_produce_junction(target, pipeline),
junction_init(nexus = name, splits = children)
)
target$junction <- junction
}
#' @export
print.tar_stem <- function(x, ...) {
cat(
"<stem target>",
"\n name:", target_get_name(x),
"\n command:\n ",
produce_lines(string_sub_expression(x$command$string)),
"\n format:", x$settings$format,
"\n iteration method:", x$settings$iteration,
"\n error mode:", x$settings$error,
"\n memory mode:", x$settings$memory,
"\n storage mode:", x$settings$storage,
"\n retrieval mode:", x$settings$retrieval,
"\n deploy to:", x$settings$deployment,
"\n resources:\n ",
produce_lines(paste_list(x$settings$resources)),
"\n cue:\n ",
produce_lines(paste_list(as.list(x$cue))),
"\n packages:\n ", produce_lines(x$command$packages),
"\n library:\n ", produce_lines(x$command$library)
)
}
| /R/class_stem.R | permissive | krlmlr/targets | R | false | false | 4,772 | r | stem_new <- function(
command = NULL,
settings = NULL,
cue = NULL,
value = NULL,
metrics = NULL,
store = NULL,
subpipeline = NULL,
junction = NULL
) {
force(command)
force(settings)
force(cue)
force(value)
force(metrics)
force(store)
force(subpipeline)
force(junction)
enclass(environment(), c("tar_stem", "tar_builder", "tar_target"))
}
#' @export
target_get_children.tar_stem <- function(target) {
if_any(
is.null(target$junction),
character(0),
target$junction$splits
)
}
#' @export
target_get_type.tar_stem <- function(target) {
"stem"
}
#' @export
target_get_type_cli.tar_stem <- function(target) {
"target"
}
#' @export
target_produce_junction.tar_stem <- function(target, pipeline) {
target_ensure_value(target, pipeline)
stem_assert_nonempty(target)
hashes <- value_hash_slices(target$value)
names <- paste0(target_get_parent(target), "_", hashes)
junction_init(target_get_parent(target), names)
}
#' @export
target_produce_record.tar_stem <- function(target, pipeline, meta) {
file <- target$store$file
record_init(
name = target_get_name(target),
type = "stem",
command = target$command$hash,
seed = target$command$seed,
depend = meta$get_depend(target_get_name(target)),
path = file$path,
data = file$hash,
time = file$time,
size = file$size,
bytes = file$bytes,
format = target$settings$format,
iteration = target$settings$iteration,
children = target_get_children(target),
seconds = target$metrics$seconds,
warnings = target$metrics$warnings,
error = target$metrics$error
)
}
#' @export
target_skip.tar_stem <- function(target, pipeline, scheduler, meta) {
NextMethod()
stem_restore_buds(target, pipeline, scheduler, meta)
}
#' @export
target_ensure_buds.tar_stem <- function(target, pipeline, scheduler) {
stem_ensure_buds(target, pipeline, scheduler)
}
#' @export
target_restore_buds.tar_stem <- function(target, pipeline, scheduler, meta) {
stem_restore_buds(target, pipeline, scheduler, meta)
}
#' @export
target_is_branchable.tar_stem <- function(target) {
!identical(target$settings$format, "file")
}
#' @export
target_validate.tar_stem <- function(target) {
assert_correct_fields(target, stem_new)
NextMethod()
if (!is.null(target$junction)) {
junction_validate(target$junction)
}
}
stem_assert_nonempty <- function(target) {
if (value_count_slices(target$value) < 1L) {
throw_run(
"cannot branch over empty target (",
target_get_name(target),
")"
)
}
}
stem_produce_buds <- function(target) {
settings <- target$settings
names <- target_get_children(target)
map(seq_along(names), ~bud_init(settings, names[.x], .x))
}
stem_insert_buds <- function(target, pipeline, scheduler) {
map(stem_produce_buds(target), pipeline_set_target, pipeline = pipeline)
}
stem_ensure_buds <- function(target, pipeline, scheduler) {
if (length(target_downstream_branching(target, pipeline, scheduler))) {
stem_ensure_junction(target, pipeline)
stem_insert_buds(target, pipeline, scheduler)
}
}
stem_restore_buds <- function(target, pipeline, scheduler, meta) {
if (length(target_downstream_branching(target, pipeline, scheduler))) {
stem_restore_junction(target, pipeline, meta)
stem_insert_buds(target, pipeline, scheduler)
}
}
stem_update_junction <- function(target, pipeline) {
target$junction <- target_produce_junction(target, pipeline)
}
stem_ensure_junction <- function(target, pipeline) {
if (is.null(target$junction)) {
stem_update_junction(target, pipeline)
}
}
stem_restore_junction <- function(target, pipeline, meta) {
name <- target_get_name(target)
if (!meta$exists_record(name)) {
return()
}
children <- meta$get_record(name)$children
junction <- if_any(
anyNA(children),
target_produce_junction(target, pipeline),
junction_init(nexus = name, splits = children)
)
target$junction <- junction
}
#' @export
print.tar_stem <- function(x, ...) {
cat(
"<stem target>",
"\n name:", target_get_name(x),
"\n command:\n ",
produce_lines(string_sub_expression(x$command$string)),
"\n format:", x$settings$format,
"\n iteration method:", x$settings$iteration,
"\n error mode:", x$settings$error,
"\n memory mode:", x$settings$memory,
"\n storage mode:", x$settings$storage,
"\n retrieval mode:", x$settings$retrieval,
"\n deploy to:", x$settings$deployment,
"\n resources:\n ",
produce_lines(paste_list(x$settings$resources)),
"\n cue:\n ",
produce_lines(paste_list(as.list(x$cue))),
"\n packages:\n ", produce_lines(x$command$packages),
"\n library:\n ", produce_lines(x$command$library)
)
}
|
testlist <- list(type = 0L, z = 1.33990603152146e-320)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609891322-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 108 | r | testlist <- list(type = 0L, z = 1.33990603152146e-320)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
library(DHARMa)
### Name: testTemporalAutocorrelation
### Title: Test for temporal autocorrelation
### Aliases: testTemporalAutocorrelation
### ** Examples
testData = createData(sampleSize = 40, family = gaussian())
fittedModel <- lm(observedResponse ~ Environment1, data = testData)
res = simulateResiduals(fittedModel)
# Standard use
testTemporalAutocorrelation(res, time = testData$time)
# If no time is provided, random values will be created
testTemporalAutocorrelation(res)
| /data/genthat_extracted_code/DHARMa/examples/testTemporalAutocorrelation.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 491 | r | library(DHARMa)
### Name: testTemporalAutocorrelation
### Title: Test for temporal autocorrelation
### Aliases: testTemporalAutocorrelation
### ** Examples
testData = createData(sampleSize = 40, family = gaussian())
fittedModel <- lm(observedResponse ~ Environment1, data = testData)
res = simulateResiduals(fittedModel)
# Standard use
testTemporalAutocorrelation(res, time = testData$time)
# If no time is provided, random values will be created
testTemporalAutocorrelation(res)
|
################################################################################
## ChinaTraits.R: Gathering data from the China Plant 2 Trait Database for all CoRRE species.
##
## Authors: Meghan Avolio, Kimberly Komatsu
################################################################################
library(tidyverse)
setwd('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\CoRRE_database\\Data') #Kim
setwd('E:\\Dropbox\\CoRRE_database\\Data') #Meghan
# Import CoRRE species names
correSpecies <- read.csv("CompiledData\\Species_lists\\FullList_Nov2021.csv") %>% #species names are standardized
left_join(read.csv("CompiledData\\Species_lists\\species_families_trees_2021.csv")) %>%
filter(tree.non.tree != "tree") %>% #Remove trees
separate(species_matched, into=c('genus', 'species', 'subspp'), sep=' ') %>%
filter(species!='sp.') %>%
unite(col='species_matched', genus:species, sep=' ', remove=T) %>%
select(family, species_matched) %>%
unique()
# Import GEx species names
GExSpecies <- read.csv('OriginalData\\Traits\\GEx_species_tree_complete.csv') %>%
filter(tree.non.tree=='non-tree') %>%
select(family, species_matched) %>%
unique()
allSpecies <- rbind(correSpecies, GExSpecies) %>%
unique()
#species from China Plant Trait Database 2 that are in CoRRE database
spList <- read.csv('OriginalData\\Traits\\ChinaPlant2\\Species translations.csv') %>%
unite(col='species_matched', ACCEPTED.GENUS:ACCEPTED.SPECIES, sep=' ') %>%
select(species_matched, Site.ID, SAMPLE.ID) %>%
left_join(allSpecies)
#trait data
chem <- read.csv("OriginalData\\Traits\\ChinaPlant2\\Chemical traits.csv") %>%
filter(flagged=="") %>%
mutate(leaf_area=Average.LA*1000000) %>% #unit conversion to TRY standards: m2 to mm2
mutate(LDMC=LDMC/1000) %>% #unit conversion to TRY standards: mg/g to g/g
select(-LMA,-Narea, -Parea, -Karea, -d13C.12C, -d15N.14N, -flagged, -Average.LA) %>%
rename(leaf_C=Cmass,
leaf_N=Nmass,
leaf_P=Pmass,
leaf_K=Kmass) %>%
pivot_longer(SLA:leaf_area, names_to="CleanTraitName", values_to="StdValue") %>%
right_join(spList) %>%
na.omit()
photo <- read.csv("OriginalData\\Traits\\ChinaPlant2\\Photosynthetic traits.csv") %>%
filter(flagged=="") %>%
select(SAMPLE.ID, Vcmax, Jmax) %>%
rename(Vc_max=Vcmax,
J_max=Jmax) %>%
pivot_longer(Vc_max:J_max, names_to='CleanTraitName', values_to='StdValue') %>%
right_join(spList) %>%
na.omit()
#bind together
traits <- rbind(chem, photo) %>%
mutate(DatabaseID='CPTD2') %>%
rename(DatasetID=Site.ID,
ObservationID=SAMPLE.ID) %>%
filter(CleanTraitName %in% c('LDMC', 'leaf_area', 'leaf_N', 'SLA'))
# write.csv(traits, 'OriginalData\\Traits\\ChinaPlant2\\CPTD2_June2023.csv', row.names=F) | /Traits/06_ChinaTraits.R | no_license | klapierre/CoRRE | R | false | false | 2,803 | r | ################################################################################
## ChinaTraits.R: Gathering data from the China Plant 2 Trait Database for all CoRRE species.
##
## Authors: Meghan Avolio, Kimberly Komatsu
################################################################################
library(tidyverse)
setwd('C:\\Users\\kjkomatsu\\Dropbox (Smithsonian)\\working groups\\CoRRE\\CoRRE_database\\Data') #Kim
setwd('E:\\Dropbox\\CoRRE_database\\Data') #Meghan
# Import CoRRE species names
correSpecies <- read.csv("CompiledData\\Species_lists\\FullList_Nov2021.csv") %>% #species names are standardized
left_join(read.csv("CompiledData\\Species_lists\\species_families_trees_2021.csv")) %>%
filter(tree.non.tree != "tree") %>% #Remove trees
separate(species_matched, into=c('genus', 'species', 'subspp'), sep=' ') %>%
filter(species!='sp.') %>%
unite(col='species_matched', genus:species, sep=' ', remove=T) %>%
select(family, species_matched) %>%
unique()
# Import GEx species names
GExSpecies <- read.csv('OriginalData\\Traits\\GEx_species_tree_complete.csv') %>%
filter(tree.non.tree=='non-tree') %>%
select(family, species_matched) %>%
unique()
allSpecies <- rbind(correSpecies, GExSpecies) %>%
unique()
#species from China Plant Trait Database 2 that are in CoRRE database
spList <- read.csv('OriginalData\\Traits\\ChinaPlant2\\Species translations.csv') %>%
unite(col='species_matched', ACCEPTED.GENUS:ACCEPTED.SPECIES, sep=' ') %>%
select(species_matched, Site.ID, SAMPLE.ID) %>%
left_join(allSpecies)
#trait data
chem <- read.csv("OriginalData\\Traits\\ChinaPlant2\\Chemical traits.csv") %>%
filter(flagged=="") %>%
mutate(leaf_area=Average.LA*1000000) %>% #unit conversion to TRY standards: m2 to mm2
mutate(LDMC=LDMC/1000) %>% #unit conversion to TRY standards: mg/g to g/g
select(-LMA,-Narea, -Parea, -Karea, -d13C.12C, -d15N.14N, -flagged, -Average.LA) %>%
rename(leaf_C=Cmass,
leaf_N=Nmass,
leaf_P=Pmass,
leaf_K=Kmass) %>%
pivot_longer(SLA:leaf_area, names_to="CleanTraitName", values_to="StdValue") %>%
right_join(spList) %>%
na.omit()
photo <- read.csv("OriginalData\\Traits\\ChinaPlant2\\Photosynthetic traits.csv") %>%
filter(flagged=="") %>%
select(SAMPLE.ID, Vcmax, Jmax) %>%
rename(Vc_max=Vcmax,
J_max=Jmax) %>%
pivot_longer(Vc_max:J_max, names_to='CleanTraitName', values_to='StdValue') %>%
right_join(spList) %>%
na.omit()
#bind together
traits <- rbind(chem, photo) %>%
mutate(DatabaseID='CPTD2') %>%
rename(DatasetID=Site.ID,
ObservationID=SAMPLE.ID) %>%
filter(CleanTraitName %in% c('LDMC', 'leaf_area', 'leaf_N', 'SLA'))
# write.csv(traits, 'OriginalData\\Traits\\ChinaPlant2\\CPTD2_June2023.csv', row.names=F) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{renameData}
\alias{renameData}
\title{Rename items in the data slot of an oce object}
\usage{
renameData(x, old = NULL, new = NULL)
}
\arguments{
\item{x}{An \code{oce} object, i.e. one inheriting from
\code{\link{oce-class}}.}
\item{old}{Vector of strings, containing old names.}
\item{new}{Vector of strings, containing old names.}
}
\description{
This function may be used to rename elements within the
\code{data} slot of \code{oce} objects. It also updates
the processing log of the returned object, indicating
the changes.
}
\examples{
data(ctd)
new <- renameData(ctd, "temperature", "temperature68")
new <- oceSetData(new, name="temperature",
value=T90fromT68(new[["temperature68"]]),
unit=list(unit=expression(degree*C),scale="ITS=90"))
}
| /man/renameData.Rd | no_license | pablovaldes/oce | R | false | true | 878 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{renameData}
\alias{renameData}
\title{Rename items in the data slot of an oce object}
\usage{
renameData(x, old = NULL, new = NULL)
}
\arguments{
\item{x}{An \code{oce} object, i.e. one inheriting from
\code{\link{oce-class}}.}
\item{old}{Vector of strings, containing old names.}
\item{new}{Vector of strings, containing old names.}
}
\description{
This function may be used to rename elements within the
\code{data} slot of \code{oce} objects. It also updates
the processing log of the returned object, indicating
the changes.
}
\examples{
data(ctd)
new <- renameData(ctd, "temperature", "temperature68")
new <- oceSetData(new, name="temperature",
value=T90fromT68(new[["temperature68"]]),
unit=list(unit=expression(degree*C),scale="ITS=90"))
}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Flow Rate Calibration with Manometers and a Volumeter"),
sidebarLayout(
fluid=FALSE,
sidebarPanel(
position="left",
helpText("Enter the measurement conditions here:"),
numericInput("num_manom_press",
label = h6("Enter the manometer pressure reading (in mm Hg)"),
value = 760),
numericInput("num_manom_temp",
label = h6("Enter the manometer temperature (in °C)"),
value = 23),
numericInput("num_latitude",
label = h6("Enter your current latitude (in °)"),
value = 43),
helpText("Enter the flow meter values here:"),
numericInput("num_backpressure",
label = h6("Enter the back pressure (in mm H2O)"),
value = 45),
numericInput("num_volume",
label = h6("Enter the volume of gas collected (in mL)"),
value = 200),
numericInput("num_seconds",
label = h6("Enter time required to collect gas (in seconds)"),
value = 23),
actionButton("action_Calc", label = "Refresh & Calculate")
),
mainPanel(
tabsetPanel(
tabPanel("Output",
p(h5("Your entered values:")),
textOutput("text_manom_press"),
textOutput("text_manom_temp"),
textOutput("text_latitude"),
textOutput("text_backpressure"),
textOutput("text_volume"),
textOutput("text_seconds"),
br(),
p(h5("Calculated values:")),
textOutput("text_K"),
textOutput("text_bp_standard"),
textOutput("text_flowrate_standard")
),
tabPanel("Documentation",
p(h5("Pressure and Flow Conversions:")),
helpText("This application calculates local station pressure
corrected to 0°C and latitude for the purposes of
calibrating a gas flow meter. By measuring the time
required for a known, calibrated volume to be collected,
the flow rate, corrected to 0°C can be determined"),
HTML("<u><b>Equations for calculation: </b></u>
<br> <br>
<b> M<sub>tc</sub> = [1 + L*(T-T<sub>s</sub>)] /
[1 + M * (T-T<sub>m</sub>)] </b>
<br>
where: <br>
<b>M<sub>tc</sub></b> is the temperature-correction multiplier for
Barometric Pressure <br>
<b>L</b> = coeff of linear thermal expansion of brass
= 0.0000184 m/m°C <br>
<b>M</b> = coeff of volume thermal expansion of mercury
= 0.0001818 m<sup>3</sup>/m<sup>3</sup>°C<br>
<b>T<sub>m</sub></b> = standard temperature for mercury density
(i.e., 0°C) <br>
<b>T<sub>s</sub></b> = standard temperature for scale (i.e., 0°C) <br>
<b>T</b> = barometer temperature (°C) <br>
<br>
<b> M<sub>gc</sub> = 980.616/980.665*[1 - 2.6363e-3*cos(2*theta) +
5.9e-6*(cos<sup>2</sup>(2*theta)] </b> <br>
where: <br>
<b>M<sub>gc</sub></b> is the gravity-correction multiplier for
Barometric Pressure <br>
<b>theta</b> = latitude expressed as radians (latitude*pi/180)
<br>
<br>
<b>H<sub>b</sub> = BP * M<sub>tc</sub>*M<sub>gc</sub></b> <br>
where: <br>
<b>H<sub>b</sub></b> = Barometric pressure (mm Hg) corrected to 0°C and
for gravity<br>
<b>BP</b> = Barometric pressure reading (mm Hg) from mercurial
barometer <br>
<br>
<b>K = [(T<sub>s</sub> + 273.15) / (T<sub>g</sub> + 273.15)]
* [H<sub>b</sub> + H<sub>v</sub> / 13.600] / P<sub>s</sub></b>
<br>
where: <br>
<b>K</b> = volumeter correction factor <br>
<b>T<sub>s</sub></b> = Standard Temperature (i.e. 0°C) <br>
<b>T<sub>g</sub></b> = Gas Temperature (i.e. same as barometer
temperature, °C) <br>
<b>H<sub>b</sub></b> = Barometric pressure corrected to 0°C and
for gravity<br>
<b>H<sub>v</sub></b> = Back pressure from volumeter exerted on gas flow
(mm H2O) <br>
<b>P<sub>s</sub></b> = Standard Pressure (i.e. 760 mm Hg) <br>
<br>
<b>F = 60 * V / t </b> <br>
where: <br>
<b>F</b> = Flow rate of gas, corrected to STP (0°C, 760 mm Hg) <br>
<b>V</b> = Volume of gas collected (mL, cubic centimeters) by
volumter during calibration <br>
<b>t</b> = Time (seconds) required to collect gas volume <br>
<br>
<br>
<br>
")
),
tabPanel("Set-up",
p(h5("Volumeter Set-up:")),
img(src="VolumeterSetup.jpg", height = 400),
helpText("The volumeter calibration system consists of:"),
HTML("
<b>Volumeter</b> (calibrated glass cylinder for collecting gas) <br>
<b>Mercurial Barometer</b> (for measuring barometric pressure) <br>
<b>Water Manometer</b> (for measuring the backpressure on the gas) <br>
<b>Stopwatch</b> (for measuring the time
required for gas to traverse volume <br>
")
)
)
)
)
))
| /ui.R | no_license | gtatters/FlowRateCalibration | R | false | false | 6,827 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("Flow Rate Calibration with Manometers and a Volumeter"),
sidebarLayout(
fluid=FALSE,
sidebarPanel(
position="left",
helpText("Enter the measurement conditions here:"),
numericInput("num_manom_press",
label = h6("Enter the manometer pressure reading (in mm Hg)"),
value = 760),
numericInput("num_manom_temp",
label = h6("Enter the manometer temperature (in °C)"),
value = 23),
numericInput("num_latitude",
label = h6("Enter your current latitude (in °)"),
value = 43),
helpText("Enter the flow meter values here:"),
numericInput("num_backpressure",
label = h6("Enter the back pressure (in mm H2O)"),
value = 45),
numericInput("num_volume",
label = h6("Enter the volume of gas collected (in mL)"),
value = 200),
numericInput("num_seconds",
label = h6("Enter time required to collect gas (in seconds)"),
value = 23),
actionButton("action_Calc", label = "Refresh & Calculate")
),
mainPanel(
tabsetPanel(
tabPanel("Output",
p(h5("Your entered values:")),
textOutput("text_manom_press"),
textOutput("text_manom_temp"),
textOutput("text_latitude"),
textOutput("text_backpressure"),
textOutput("text_volume"),
textOutput("text_seconds"),
br(),
p(h5("Calculated values:")),
textOutput("text_K"),
textOutput("text_bp_standard"),
textOutput("text_flowrate_standard")
),
tabPanel("Documentation",
p(h5("Pressure and Flow Conversions:")),
helpText("This application calculates local station pressure
corrected to 0°C and latitude for the purposes of
calibrating a gas flow meter. By measuring the time
required for a known, calibrated volume to be collected,
the flow rate, corrected to 0°C can be determined"),
HTML("<u><b>Equations for calculation: </b></u>
<br> <br>
<b> M<sub>tc</sub> = [1 + L*(T-T<sub>s</sub>)] /
[1 + M * (T-T<sub>m</sub>)] </b>
<br>
where: <br>
<b>M<sub>tc</sub></b> is the temperature-correction multiplier for
Barometric Pressure <br>
<b>L</b> = coeff of linear thermal expansion of brass
= 0.0000184 m/m°C <br>
<b>M</b> = coeff of volume thermal expansion of mercury
= 0.0001818 m<sup>3</sup>/m<sup>3</sup>°C<br>
<b>T<sub>m</sub></b> = standard temperature for mercury density
(i.e., 0°C) <br>
<b>T<sub>s</sub></b> = standard temperature for scale (i.e., 0°C) <br>
<b>T</b> = barometer temperature (°C) <br>
<br>
<b> M<sub>gc</sub> = 980.616/980.665*[1 - 2.6363e-3*cos(2*theta) +
5.9e-6*(cos<sup>2</sup>(2*theta)] </b> <br>
where: <br>
<b>M<sub>gc</sub></b> is the gravity-correction multiplier for
Barometric Pressure <br>
<b>theta</b> = latitude expressed as radians (latitude*pi/180)
<br>
<br>
<b>H<sub>b</sub> = BP * M<sub>tc</sub>*M<sub>gc</sub></b> <br>
where: <br>
<b>H<sub>b</sub></b> = Barometric pressure (mm Hg) corrected to 0°C and
for gravity<br>
<b>BP</b> = Barometric pressure reading (mm Hg) from mercurial
barometer <br>
<br>
<b>K = [(T<sub>s</sub> + 273.15) / (T<sub>g</sub> + 273.15)]
* [H<sub>b</sub> + H<sub>v</sub> / 13.600] / P<sub>s</sub></b>
<br>
where: <br>
<b>K</b> = volumeter correction factor <br>
<b>T<sub>s</sub></b> = Standard Temperature (i.e. 0°C) <br>
<b>T<sub>g</sub></b> = Gas Temperature (i.e. same as barometer
temperature, °C) <br>
<b>H<sub>b</sub></b> = Barometric pressure corrected to 0°C and
for gravity<br>
<b>H<sub>v</sub></b> = Back pressure from volumeter exerted on gas flow
(mm H2O) <br>
<b>P<sub>s</sub></b> = Standard Pressure (i.e. 760 mm Hg) <br>
<br>
<b>F = 60 * V / t </b> <br>
where: <br>
<b>F</b> = Flow rate of gas, corrected to STP (0°C, 760 mm Hg) <br>
<b>V</b> = Volume of gas collected (mL, cubic centimeters) by
volumter during calibration <br>
<b>t</b> = Time (seconds) required to collect gas volume <br>
<br>
<br>
<br>
")
),
tabPanel("Set-up",
p(h5("Volumeter Set-up:")),
img(src="VolumeterSetup.jpg", height = 400),
helpText("The volumeter calibration system consists of:"),
HTML("
<b>Volumeter</b> (calibrated glass cylinder for collecting gas) <br>
<b>Mercurial Barometer</b> (for measuring barometric pressure) <br>
<b>Water Manometer</b> (for measuring the backpressure on the gas) <br>
<b>Stopwatch</b> (for measuring the time
required for gas to traverse volume <br>
")
)
)
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svrpath.R
\name{svrpath}
\alias{svrpath}
\title{Fit the entire regularization path for Support Vector Regression}
\usage{
svrpath(x, y, svr.eps = 1, kernel.function = radial.kernel,
param.kernel = 1, ridge = 1e-08, eps = 1e-08, lambda.min = 1e-08, ...)
}
\arguments{
\item{x}{The data matrix (n x p) with n rows (observations) on p variables (columns)}
\item{y}{The real number valued response variable}
\item{svr.eps}{An epsilon in epsilon-insensitive loss function}
\item{kernel.function}{This is a user-defined function. Provided are \code{poly.kernel} (the default, with parameter set to default to a linear kernel) and \code{radial.kernel}}
\item{param.kernel}{The parameter(s) for the kernel. For this radial kernel, the parameter is known in the fields as "gamma". For the polynomial kernel, it is the "degree"}
\item{ridge}{Sometimes the algorithm encounters singularities; in this case a small value of ridge can help, default is \code{ridge = 1e-8}}
\item{eps}{A small machine number which is used to identify minimal step sizes}
\item{lambda.min}{The smallest value of lambda for termination of the algorithm. Default is \code{lambda = 1e-8}}
\item{...}{Generic compatibility}
}
\value{
A 'svrpath' object is returned, for which there are \code{lambda} values and corresponding values of \code{theta} for each data point.
}
\description{
This algorithm computes the entire regularization path for the support vector regression with a relatively low cost compared to quadratic programming problem.
}
\examples{
set.seed(1)
n <- 30
p <- 50
x <- matrix(rnorm(n*p), n, p)
e <- rnorm(n, 0, 1)
beta <- c(1, 1, rep(0, p-2))
y <- x \%*\% beta + e
svr.eps <- 1
obj <- svrpath(x, y, svr.eps = svr.eps)
}
\seealso{
\code{\link{predict.svrpath}}, \code{\link{plot.svrpath}}, \code{\link{epspath}}
}
\author{
Do Hyun Kim, Seung Jun Shin
}
| /man/svrpath.Rd | no_license | cran/svrpath | R | false | true | 1,928 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svrpath.R
\name{svrpath}
\alias{svrpath}
\title{Fit the entire regularization path for Support Vector Regression}
\usage{
svrpath(x, y, svr.eps = 1, kernel.function = radial.kernel,
param.kernel = 1, ridge = 1e-08, eps = 1e-08, lambda.min = 1e-08, ...)
}
\arguments{
\item{x}{The data matrix (n x p) with n rows (observations) on p variables (columns)}
\item{y}{The real number valued response variable}
\item{svr.eps}{An epsilon in epsilon-insensitive loss function}
\item{kernel.function}{This is a user-defined function. Provided are \code{poly.kernel} (the default, with parameter set to default to a linear kernel) and \code{radial.kernel}}
\item{param.kernel}{The parameter(s) for the kernel. For this radial kernel, the parameter is known in the fields as "gamma". For the polynomial kernel, it is the "degree"}
\item{ridge}{Sometimes the algorithm encounters singularities; in this case a small value of ridge can help, default is \code{ridge = 1e-8}}
\item{eps}{A small machine number which is used to identify minimal step sizes}
\item{lambda.min}{The smallest value of lambda for termination of the algorithm. Default is \code{lambda = 1e-8}}
\item{...}{Generic compatibility}
}
\value{
A 'svrpath' object is returned, for which there are \code{lambda} values and corresponding values of \code{theta} for each data point.
}
\description{
This algorithm computes the entire regularization path for the support vector regression with a relatively low cost compared to quadratic programming problem.
}
\examples{
set.seed(1)
n <- 30
p <- 50
x <- matrix(rnorm(n*p), n, p)
e <- rnorm(n, 0, 1)
beta <- c(1, 1, rep(0, p-2))
y <- x \%*\% beta + e
svr.eps <- 1
obj <- svrpath(x, y, svr.eps = svr.eps)
}
\seealso{
\code{\link{predict.svrpath}}, \code{\link{plot.svrpath}}, \code{\link{epspath}}
}
\author{
Do Hyun Kim, Seung Jun Shin
}
|
# LOAD LIBRARIES AND SOURCES --->
library(rjson)
library(deSolve)
source('SystemEquation.R')
# <--- LOAD LIBRARIES AND SOURCES
# SOLVE IVP USING deSolve --->
IVP.Dynamics <- function(t,state,parameters) {
with(as.list(c(state,parameters)),{
dx <- system.Dynamics(x = state,
t = t,
params = parameters,
delta.t = time.delta)
return(list(dx))
})
}
IVP.initialstate <- c( "x1" = system.parameters[["x1.0"]],
"x2" = system.parameters[["x2.0"]],
"x3" = system.parameters[["x3.0"]],
"x4" = system.parameters[["x4.0"]],
"x5" = system.parameters[["x5.0"]],
"x6" = system.parameters[["x6.0"]])
IVP.solution <- ode( y = IVP.initialstate,
times = time.sequence,
func = IVP.Dynamics,
parms = system.parameters)
IVP.solution <- data.frame("time" = IVP.solution[,1],
"x1" = IVP.solution[,2],
"x2" = IVP.solution[,3],
"x3" = IVP.solution[,4],
"x4" = IVP.solution[,5],
"x5" = IVP.solution[,6],
"x6" = IVP.solution[,7])
IVP.observation <- data.frame("time" = IVP.solution$time)
IVP.observation["y1"] <- NA
IVP.observation["y2"] <- NA
IVP.observation["y3"] <- NA
IVP.observation.unperturbed <- data.frame("time" = IVP.solution$time)
IVP.observation.unperturbed["y1"] <- NA
IVP.observation.unperturbed["y2"] <- NA
IVP.observation.unperturbed["y3"] <- NA
for (i in 1:length(time.sequence)) {
IVP.observation[i,-1] <- system.PerturbedObservation(
x = IVP.solution[i,-1],
t = IVP.observation$time[i],
params = system.parameters)
}
for (i in 1:length(time.sequence)) {
IVP.observation.unperturbed[i,-1] <- system.ExpectedObservation(
x = IVP.solution[i,-1],
t = IVP.observation$time[i])
}
# <--- SOLVE IVP USING deSolve
# WRITE SAMPLED DATA INTO FILE --->
write( toJSON(IVP.observation), file = "C:/Users/kahl/Documents/Data/review_observation_json.txt")
write( toJSON(IVP.solution) , file = "C:/Users/kahl/Documents/Data/review_truestate_json.txt")
write.table(IVP.solution, 'state.data.txt', sep = "\t")
write.table(IVP.observation,'observation.data.txt', sep = "\t")
write.table(IVP.observation.unperturbed, 'unperturbedobservation.data.txt', sep = "\t")
# <--- WRITE SAMPLED DATA INTO FILE | /KalmanFilter/JAK-STAT/SampleData.R | no_license | Dominik12345/R | R | false | false | 2,553 | r | # LOAD LIBRARIES AND SOURCES --->
library(rjson)
library(deSolve)
source('SystemEquation.R')
# <--- LOAD LIBRARIES AND SOURCES
# SOLVE IVP USING deSolve --->
IVP.Dynamics <- function(t,state,parameters) {
with(as.list(c(state,parameters)),{
dx <- system.Dynamics(x = state,
t = t,
params = parameters,
delta.t = time.delta)
return(list(dx))
})
}
IVP.initialstate <- c( "x1" = system.parameters[["x1.0"]],
"x2" = system.parameters[["x2.0"]],
"x3" = system.parameters[["x3.0"]],
"x4" = system.parameters[["x4.0"]],
"x5" = system.parameters[["x5.0"]],
"x6" = system.parameters[["x6.0"]])
IVP.solution <- ode( y = IVP.initialstate,
times = time.sequence,
func = IVP.Dynamics,
parms = system.parameters)
IVP.solution <- data.frame("time" = IVP.solution[,1],
"x1" = IVP.solution[,2],
"x2" = IVP.solution[,3],
"x3" = IVP.solution[,4],
"x4" = IVP.solution[,5],
"x5" = IVP.solution[,6],
"x6" = IVP.solution[,7])
IVP.observation <- data.frame("time" = IVP.solution$time)
IVP.observation["y1"] <- NA
IVP.observation["y2"] <- NA
IVP.observation["y3"] <- NA
IVP.observation.unperturbed <- data.frame("time" = IVP.solution$time)
IVP.observation.unperturbed["y1"] <- NA
IVP.observation.unperturbed["y2"] <- NA
IVP.observation.unperturbed["y3"] <- NA
for (i in 1:length(time.sequence)) {
IVP.observation[i,-1] <- system.PerturbedObservation(
x = IVP.solution[i,-1],
t = IVP.observation$time[i],
params = system.parameters)
}
for (i in 1:length(time.sequence)) {
IVP.observation.unperturbed[i,-1] <- system.ExpectedObservation(
x = IVP.solution[i,-1],
t = IVP.observation$time[i])
}
# <--- SOLVE IVP USING deSolve
# WRITE SAMPLED DATA INTO FILE --->
write( toJSON(IVP.observation), file = "C:/Users/kahl/Documents/Data/review_observation_json.txt")
write( toJSON(IVP.solution) , file = "C:/Users/kahl/Documents/Data/review_truestate_json.txt")
write.table(IVP.solution, 'state.data.txt', sep = "\t")
write.table(IVP.observation,'observation.data.txt', sep = "\t")
write.table(IVP.observation.unperturbed, 'unperturbedobservation.data.txt', sep = "\t")
# <--- WRITE SAMPLED DATA INTO FILE |
testlist <- list(ExpressionSet = structure(c(3.10503529562433e+231, 1.23181983389617e+58, 9.52988885923946e+243, 6.36967296041789e+178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), Phylostratum = numeric(0))
result <- do.call(myTAI:::cpp_TAI,testlist)
str(result) | /myTAI/inst/testfiles/cpp_TAI/AFL_cpp_TAI/cpp_TAI_valgrind_files/1615762152-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 334 | r | testlist <- list(ExpressionSet = structure(c(3.10503529562433e+231, 1.23181983389617e+58, 9.52988885923946e+243, 6.36967296041789e+178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), Phylostratum = numeric(0))
result <- do.call(myTAI:::cpp_TAI,testlist)
str(result) |
library(tidyverse)
load("rda/murders.rda")
murders %>%
mutate(abb = reorder(abb, rate)) %>%
ggplot(aes(abb, rate)) +
geom_bar(width = 0.5, stat = "identity", color = "black") +
coord_flip()
ggsave("figs/barplot.png")
| /analysis.R | no_license | RickyRahardja/murders | R | false | false | 226 | r | library(tidyverse)
load("rda/murders.rda")
murders %>%
mutate(abb = reorder(abb, rate)) %>%
ggplot(aes(abb, rate)) +
geom_bar(width = 0.5, stat = "identity", color = "black") +
coord_flip()
ggsave("figs/barplot.png")
|
#' @include FLMatrix.R
NULL
#' Matrix Transpose.
#'
#' \code{t} returns the transpose of FLMatrix objects.
#'
#' @param object is of class FLMatrix
#' @param ... any additional arguments
#' @section Constraints:
#' Input can be a matrix of dimensions (m x n) where m > n, m < n or m = n.
#' @return \code{t} returns a FLMatrix object which is the transpose of input FLMatrix object
#' and replicates the equivalent R output.
#' @examples
#' flmatrix <- FLMatrix("tblMatrixMulti", 5,"MATRIX_ID","ROW_ID","COL_ID","CELL_VAL")
#' resultFLMatrix <- t(flmatrix)
#' @export
t<-function(object, ...){
UseMethod("t", object)
}
#' @export
t.FLMatrix<-function(object,...){
object@dimColumns[2:3] <- rev(object@dimColumns[2:3])
object@select@variables[2:3] <- rev(object@select@variables[2:3])
object@dims <- rev(object@dims)
if(!is.null(object@Dimnames))
object@Dimnames <- rev(object@Dimnames)
return(object)
}
#' @export
t.FLMatrixBind<-function(object,...){
## gk: todo: design deep row/column index swap
return(t(store(object)))
}
| /R/FLTranspose.R | no_license | mohakuma/AdapteR | R | false | false | 1,066 | r | #' @include FLMatrix.R
NULL
#' Matrix Transpose.
#'
#' \code{t} returns the transpose of FLMatrix objects.
#'
#' @param object is of class FLMatrix
#' @param ... any additional arguments
#' @section Constraints:
#' Input can be a matrix of dimensions (m x n) where m > n, m < n or m = n.
#' @return \code{t} returns a FLMatrix object which is the transpose of input FLMatrix object
#' and replicates the equivalent R output.
#' @examples
#' flmatrix <- FLMatrix("tblMatrixMulti", 5,"MATRIX_ID","ROW_ID","COL_ID","CELL_VAL")
#' resultFLMatrix <- t(flmatrix)
#' @export
t<-function(object, ...){
UseMethod("t", object)
}
#' @export
t.FLMatrix<-function(object,...){
object@dimColumns[2:3] <- rev(object@dimColumns[2:3])
object@select@variables[2:3] <- rev(object@select@variables[2:3])
object@dims <- rev(object@dims)
if(!is.null(object@Dimnames))
object@Dimnames <- rev(object@Dimnames)
return(object)
}
#' @export
t.FLMatrixBind<-function(object,...){
## gk: todo: design deep row/column index swap
return(t(store(object)))
}
|
#####################################################################
# ~~~~~~~~~~~~~~~~~~
# Tumor subtype and cell type independent DNA methylation alterations
# associated with stage progression in invasive breast carcinoma
# ~~~~~~~~~~~~~~~~~~
# Way, G., Johnson, K., Christensen, B. 2015
#
# Examine whether DNA methylation of selected CpGs is associated with
# gene expression
#####################################################################
################################
#Load Libraries and the Plot Function
################################
library(readr)
library(plyr)
source("IV.Genomic_analysis/Scripts/Functions/MethRNAseq_functions.R")
source("III.DMGR_analysis/Scripts/Functions/make_heatmaps.R") #Use the scripts in here to extract the common CpGs
################################
# Load and Subset Data
################################
# Stages and subtypes of interest
low <- c("Stage I", "Stage IA", "Stage IB", "Stage II", "Stage IIA", "Stage IIB")
high <- c("Stage III", "Stage IIIA", "Stage IIIB", "Stage IIIC", "Stage IV")
stages <- list(low = low, high = high)
subtypes <- c("Basal", "Her2", "LumA", "LumB", "Normal")
# Load Betas
Betas <- read_tsv("I.Data_Processing/Data/TCGA_BRCA_Betas.tsv")
rownames(Betas) <- Betas[[1]]
Betas[[1]] <- NULL
Betas <- as.data.frame(Betas)
# Load TCGA BRCA Normal RNAseq Data
NormalRNAseq <- read_tsv("IV.Genomic_analysis/Data/unc.edu_BRCA_IlluminaHiSeq_RNASeqV2.geneExp.whitelist_normal")
rownames(NormalRNAseq) <- NormalRNAseq[[1]]
NormalRNAseq[[1]] <- NULL
NormalRNAseq <- as.data.frame(NormalRNAseq)
NormalRNAseq <- NormalRNAseq[-grep("[?]", laply(rownames(NormalRNAseq),
function (x) {unlist(strsplit(x, "[|]"))[1]})), ]
NormalRNAseq <- NormalRNAseq[-grep("SLC35E2", laply(rownames(NormalRNAseq),
function (x) {unlist(strsplit(x, "[|]"))[1]})), ]
colnames(NormalRNAseq) <- substr(colnames(NormalRNAseq), 1, 15)
colnames(NormalRNAseq) <- gsub("-", ".", colnames(NormalRNAseq))
rownames(NormalRNAseq) <- laply(rownames(NormalRNAseq), function (x) {unlist(strsplit(x, "[|]"))[1]})
# Load annotation file
annotation <- read_csv("I.Data_Processing/Files/HumanMethylation450K_Annotation_File.csv", skip = 7)
annotation <- as.data.frame(annotation)
# Load Covariates
covariates <- read.table("I.Data_Processing/Files/BRCAtarget_covariates.csv", row.names = 1,
header = T, sep = ",", stringsAsFactors = F)
# The colnames for the beta file have an "X" appended to the beginning of each basename, remove it
rownames(covariates) <- covariates$Basename
# Subset the covariate data to only the samples in the beta file and then to only the ones with PAM50 data
covariates <- covariates[intersect(rownames(covariates), colnames(Betas)), ]
covariates <- covariates[covariates$PAM50.RNAseq != "", ]
# Interested in "low" vs "high"
for (i in 1:length(stages)) {
subset <- stages[[i]]
for (j in 1:length(subset)) {
covariates$pathologic_stage[covariates$pathologic_stage == subset[j]] <- names(stages)[i]
}
}
# Make sure the "tumor adjacent" samples are marked in this column
covariates$pathologic_stage[covariates$sample.type == "Solid Tissue Normal"] <- "normal"
# Only accept samples that have high or low assignments
covariates <- covariates[covariates$pathologic_stage == "low" | covariates$pathologic_stage == "high" | covariates$pathologic_stage == "normal",]
# Subset Betas to those samples with PAM50 data and stage of interest
Betas <- Betas[ ,rownames(covariates)]
################################
# Run Function
################################
# Load Common Overlaps
CommonOverlaps <- read.csv("III.DMGR_analysis/Tables/commonLowStageOverlaps_FullAnnotation_extended.csv",
row.names = 1, header = T, stringsAsFactors = F)
# Get all the genes in common to low stage tumors
Genes <- laply(rownames(CommonOverlaps), function(x){unlist(strsplit(x, " "))[1]})
# What are the number of comparisons made here? Bonferroni adjusted p value required.
num_unique_cpgs <- 0
for (gene in 1:length(Genes)) {
CpGs <- unique(ExtractCommonCGs(Genes[gene], CommonOverlaps))
num_cpgs <- length(CpGs)
num_unique_cpgs <- num_unique_cpgs + num_cpgs
}
# 101 Unique CpGs, made for 6 comparisons (5 subtypes + all)
# Bonferroni adjustment should be made for 6 * 101 =
alpha <- 0.05 / (6 * num_unique_cpgs)
# Loop over all genes to output several plots investigating methylation influencing gene expression
significantCor <- c()
#for (gene in 17:length(Genes)) {
for (gene in 1:length(Genes)) {
# Extract the CGs associated with a specific gene
#CpGs <- unique(ExtractCommonCGs(Genes[gene], CommonOverlaps))
CpGs <- unique(ExtractCommonCGs(rownames(CommonOverlaps)[gene], CommonOverlaps))
for (i in 1:length(CpGs)) {
# Create and save all of the plots for each combination of CpGs and Genes
png(paste("IV.Genomic_analysis/Figures/GeneExprs/", Genes[gene], "_", CpGs[i], ".png", sep = ""),
height = 400, width = 400)
corTable <- methSeqPlot(gene = Genes[gene], betas = Betas, cg = CpGs[i], covariates = covariates,
method = 'spearman', stages = "low", subtypes = subtypes,
normalExprs = NormalRNAseq)
dev.off()
# Output the Correlation Analysis to File as well
write.table(corTable, paste("IV.Genomic_analysis/Tables/GeneExprs/", Genes[gene], "_", CpGs[i], ".csv", sep = ""),
row.names = T, col.names = NA, sep = ",")
# Test if there are any siginificant findings
if (length(corTable[corTable[ , 3] <= alpha, ]) > 0 | length(corTable[corTable[ , 4] <= alpha, ]) > 0) {
for (sig_row in 1:nrow(corTable)) {
sigHit <- paste("IV.Genomic_analysis/Tables/GeneExprs/", Genes[gene], "_", CpGs[i], ".csv", sep = "")
if (corTable[sig_row, 3] <= 0.05 | corTable[sig_row, 4] <= 0.05 ) {
hitinclude <- c(sigHit, corTable[sig_row, ], rownames(corTable)[sig_row])
significantCor <- rbind(significantCor, hitinclude)
}
}
}
}
}
| /IV.Genomic_analysis/Scripts/A.MethRNAseq.R | permissive | Christensen-Lab-Dartmouth/brca_lowstage_DMGRs | R | false | false | 6,282 | r | #####################################################################
# ~~~~~~~~~~~~~~~~~~
# Tumor subtype and cell type independent DNA methylation alterations
# associated with stage progression in invasive breast carcinoma
# ~~~~~~~~~~~~~~~~~~
# Way, G., Johnson, K., Christensen, B. 2015
#
# Examine whether DNA methylation of selected CpGs is associated with
# gene expression
#####################################################################
################################
#Load Libraries and the Plot Function
################################
library(readr)
library(plyr)
source("IV.Genomic_analysis/Scripts/Functions/MethRNAseq_functions.R")
source("III.DMGR_analysis/Scripts/Functions/make_heatmaps.R") #Use the scripts in here to extract the common CpGs
################################
# Load and Subset Data
################################
# Stages and subtypes of interest
low <- c("Stage I", "Stage IA", "Stage IB", "Stage II", "Stage IIA", "Stage IIB")
high <- c("Stage III", "Stage IIIA", "Stage IIIB", "Stage IIIC", "Stage IV")
stages <- list(low = low, high = high)
subtypes <- c("Basal", "Her2", "LumA", "LumB", "Normal")
# Load Betas
Betas <- read_tsv("I.Data_Processing/Data/TCGA_BRCA_Betas.tsv")
rownames(Betas) <- Betas[[1]]
Betas[[1]] <- NULL
Betas <- as.data.frame(Betas)
# Load TCGA BRCA Normal RNAseq Data
NormalRNAseq <- read_tsv("IV.Genomic_analysis/Data/unc.edu_BRCA_IlluminaHiSeq_RNASeqV2.geneExp.whitelist_normal")
rownames(NormalRNAseq) <- NormalRNAseq[[1]]
NormalRNAseq[[1]] <- NULL
NormalRNAseq <- as.data.frame(NormalRNAseq)
NormalRNAseq <- NormalRNAseq[-grep("[?]", laply(rownames(NormalRNAseq),
function (x) {unlist(strsplit(x, "[|]"))[1]})), ]
NormalRNAseq <- NormalRNAseq[-grep("SLC35E2", laply(rownames(NormalRNAseq),
function (x) {unlist(strsplit(x, "[|]"))[1]})), ]
colnames(NormalRNAseq) <- substr(colnames(NormalRNAseq), 1, 15)
colnames(NormalRNAseq) <- gsub("-", ".", colnames(NormalRNAseq))
rownames(NormalRNAseq) <- laply(rownames(NormalRNAseq), function (x) {unlist(strsplit(x, "[|]"))[1]})
# Load annotation file
annotation <- read_csv("I.Data_Processing/Files/HumanMethylation450K_Annotation_File.csv", skip = 7)
annotation <- as.data.frame(annotation)
# Load Covariates
covariates <- read.table("I.Data_Processing/Files/BRCAtarget_covariates.csv", row.names = 1,
header = T, sep = ",", stringsAsFactors = F)
# The colnames for the beta file have an "X" appended to the beginning of each basename, remove it
rownames(covariates) <- covariates$Basename
# Subset the covariate data to only the samples in the beta file and then to only the ones with PAM50 data
covariates <- covariates[intersect(rownames(covariates), colnames(Betas)), ]
covariates <- covariates[covariates$PAM50.RNAseq != "", ]
# Interested in "low" vs "high"
for (i in 1:length(stages)) {
subset <- stages[[i]]
for (j in 1:length(subset)) {
covariates$pathologic_stage[covariates$pathologic_stage == subset[j]] <- names(stages)[i]
}
}
# Make sure the "tumor adjacent" samples are marked in this column
covariates$pathologic_stage[covariates$sample.type == "Solid Tissue Normal"] <- "normal"
# Only accept samples that have high or low assignments
covariates <- covariates[covariates$pathologic_stage == "low" | covariates$pathologic_stage == "high" | covariates$pathologic_stage == "normal",]
# Subset Betas to those samples with PAM50 data and stage of interest
Betas <- Betas[ ,rownames(covariates)]
################################
# Run Function
################################
# Load Common Overlaps
CommonOverlaps <- read.csv("III.DMGR_analysis/Tables/commonLowStageOverlaps_FullAnnotation_extended.csv",
row.names = 1, header = T, stringsAsFactors = F)
# Get all the genes in common to low stage tumors
Genes <- laply(rownames(CommonOverlaps), function(x){unlist(strsplit(x, " "))[1]})
# What are the number of comparisons made here? Bonferroni adjusted p value required.
num_unique_cpgs <- 0
for (gene in 1:length(Genes)) {
CpGs <- unique(ExtractCommonCGs(Genes[gene], CommonOverlaps))
num_cpgs <- length(CpGs)
num_unique_cpgs <- num_unique_cpgs + num_cpgs
}
# 101 Unique CpGs, made for 6 comparisons (5 subtypes + all)
# Bonferroni adjustment should be made for 6 * 101 =
alpha <- 0.05 / (6 * num_unique_cpgs)
# Loop over all genes to output several plots investigating methylation influencing gene expression
significantCor <- c()
#for (gene in 17:length(Genes)) {
for (gene in 1:length(Genes)) {
# Extract the CGs associated with a specific gene
#CpGs <- unique(ExtractCommonCGs(Genes[gene], CommonOverlaps))
CpGs <- unique(ExtractCommonCGs(rownames(CommonOverlaps)[gene], CommonOverlaps))
for (i in 1:length(CpGs)) {
# Create and save all of the plots for each combination of CpGs and Genes
png(paste("IV.Genomic_analysis/Figures/GeneExprs/", Genes[gene], "_", CpGs[i], ".png", sep = ""),
height = 400, width = 400)
corTable <- methSeqPlot(gene = Genes[gene], betas = Betas, cg = CpGs[i], covariates = covariates,
method = 'spearman', stages = "low", subtypes = subtypes,
normalExprs = NormalRNAseq)
dev.off()
# Output the Correlation Analysis to File as well
write.table(corTable, paste("IV.Genomic_analysis/Tables/GeneExprs/", Genes[gene], "_", CpGs[i], ".csv", sep = ""),
row.names = T, col.names = NA, sep = ",")
# Test if there are any siginificant findings
if (length(corTable[corTable[ , 3] <= alpha, ]) > 0 | length(corTable[corTable[ , 4] <= alpha, ]) > 0) {
for (sig_row in 1:nrow(corTable)) {
sigHit <- paste("IV.Genomic_analysis/Tables/GeneExprs/", Genes[gene], "_", CpGs[i], ".csv", sep = "")
if (corTable[sig_row, 3] <= 0.05 | corTable[sig_row, 4] <= 0.05 ) {
hitinclude <- c(sigHit, corTable[sig_row, ], rownames(corTable)[sig_row])
significantCor <- rbind(significantCor, hitinclude)
}
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.