content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# Name : Manan Bhatt
# CS 513 B - Knowledge Discovery And Data Mining
# Mid Term Examination
# CWID : 104530306
rm(list=ls())
ChooseFile <- file.choose()
COVID19 <- read.csv(ChooseFile,header = TRUE, na.strings='?')
View(COVID19)
## Question 1.1: Summary of the Data
summary(COVID19)
## Question 1.2: Identify Missing Values
missing_values_Age <-which(is.na(COVID19$Age))
missing_values_Age
missing_values_MonthAtHospital <-which(is.na(COVID19$MonthAtHospital))
missing_values_MonthAtHospital
##Question 1.3: Generate Frequncey Table of Infected vs Marital Status
table(COVID19$Infected, COVID19$MaritalStatus)
##Question 1.4: Scatter plot of Age, Marital Status and MonthAtHospital
pairs(COVID19[,-c(1,3,5,7)])
##Question 1.5: Box plot of Age, Marital Status and MonthAtHospital
boxplot(COVID19[,-c(1,3,5,7)], col = c('Blue','Red','Green'))
##Question 1.6: Replace missing values of cases with mean cases
COVID19[is.na(COVID19)] = mean(COVID19$Cases, na.rm = TRUE)
|
/Mid Term/HW_Midterm_Exam_stevens_Q2.R
|
no_license
|
Manan31/CS-513-Knowledge-Discovery-and-Data-Mining-Stevens
|
R
| false
| false
| 981
|
r
|
# Name : Manan Bhatt
# CS 513 B - Knowledge Discovery And Data Mining
# Mid Term Examination
# CWID : 104530306
rm(list=ls())
ChooseFile <- file.choose()
COVID19 <- read.csv(ChooseFile,header = TRUE, na.strings='?')
View(COVID19)
## Question 1.1: Summary of the Data
summary(COVID19)
## Question 1.2: Identify Missing Values
missing_values_Age <-which(is.na(COVID19$Age))
missing_values_Age
missing_values_MonthAtHospital <-which(is.na(COVID19$MonthAtHospital))
missing_values_MonthAtHospital
##Question 1.3: Generate Frequncey Table of Infected vs Marital Status
table(COVID19$Infected, COVID19$MaritalStatus)
##Question 1.4: Scatter plot of Age, Marital Status and MonthAtHospital
pairs(COVID19[,-c(1,3,5,7)])
##Question 1.5: Box plot of Age, Marital Status and MonthAtHospital
boxplot(COVID19[,-c(1,3,5,7)], col = c('Blue','Red','Green'))
##Question 1.6: Replace missing values of cases with mean cases
COVID19[is.na(COVID19)] = mean(COVID19$Cases, na.rm = TRUE)
|
#Preliminary TRY trait analysis
require(parallel)
require(geiger)
require(mvMORPH)
#Trim TRY (will get more later once taxon merge is complete)
data <- read.table("clean-ish-try.txt", header=TRUE)
data[data==-9999] <- NA
t <- sapply(data, function(x) sum(!is.na(x)))
data <- data[,t > 1000]
data$species <- as.character(data$species)
#Get numerical traits (crudely) and transform
n.data <- data
for(i in seq(2,ncol(n.data)))
try(n.data[,i] <- as.numeric(as.character(n.data[,i])))
n.data <- n.data[, sapply(n.data, function(x) sum(!is.na(x))) > 1000]
n.data[,c(2:3,5:15,18:21,24,26,27)] <- log10(n.data[,c(2:3,5:15,18:21,24,26,27)])
n.data <- n.data[,-2]
#Load phylogeny and subset
tree <- read.tree("Vascular_Plants_rooted.dated.tre")
tree$tip.label <- tolower(gsub("_", " ", tree$tip.label))
tree$node.label <- NULL
tree <- drop.tip(tree, setdiff(tree$tip.label, n.data$species))
n.data <- n.data[n.data$species %in% tree$tip.label,]
#Calcualte phylogenetic signal!
wrap.data <- function(trait.index, model){
trait <- setNames(n.data[,trait.index], n.data$species)
trait <- trait[!is.na(trait)]
tree <- drop.tip(tree, setdiff(tree$tip.label, names(trait)))
saveRDS(fitContinuous(tree, trait, model=model), paste0(trait.index, "_", model, ".RDS"))
}
#Multivariate
t.data <- data[,c(3,54)] #Picked by coverage...
rownames(t.data) <- data$species
for(i in seq_len(ncol(t.data)))
t.data[,i] <- as.numeric(as.character(t.data[,i]))
t.data <- na.omit(t.data)
t.tree <- drop.tip(tree, setdiff(tree$tip.label, rownames(t.data)))
model <- mvBM(t.tree, t.data)
save.image("multivariate.RData")
|
/multivariate.R
|
no_license
|
willpearse/sTEP_evol
|
R
| false
| false
| 1,616
|
r
|
#Preliminary TRY trait analysis
require(parallel)
require(geiger)
require(mvMORPH)
#Trim TRY (will get more later once taxon merge is complete)
data <- read.table("clean-ish-try.txt", header=TRUE)
data[data==-9999] <- NA
t <- sapply(data, function(x) sum(!is.na(x)))
data <- data[,t > 1000]
data$species <- as.character(data$species)
#Get numerical traits (crudely) and transform
n.data <- data
for(i in seq(2,ncol(n.data)))
try(n.data[,i] <- as.numeric(as.character(n.data[,i])))
n.data <- n.data[, sapply(n.data, function(x) sum(!is.na(x))) > 1000]
n.data[,c(2:3,5:15,18:21,24,26,27)] <- log10(n.data[,c(2:3,5:15,18:21,24,26,27)])
n.data <- n.data[,-2]
#Load phylogeny and subset
tree <- read.tree("Vascular_Plants_rooted.dated.tre")
tree$tip.label <- tolower(gsub("_", " ", tree$tip.label))
tree$node.label <- NULL
tree <- drop.tip(tree, setdiff(tree$tip.label, n.data$species))
n.data <- n.data[n.data$species %in% tree$tip.label,]
#Calcualte phylogenetic signal!
wrap.data <- function(trait.index, model){
trait <- setNames(n.data[,trait.index], n.data$species)
trait <- trait[!is.na(trait)]
tree <- drop.tip(tree, setdiff(tree$tip.label, names(trait)))
saveRDS(fitContinuous(tree, trait, model=model), paste0(trait.index, "_", model, ".RDS"))
}
#Multivariate
t.data <- data[,c(3,54)] #Picked by coverage...
rownames(t.data) <- data$species
for(i in seq_len(ncol(t.data)))
t.data[,i] <- as.numeric(as.character(t.data[,i]))
t.data <- na.omit(t.data)
t.tree <- drop.tip(tree, setdiff(tree$tip.label, rownames(t.data)))
model <- mvBM(t.tree, t.data)
save.image("multivariate.RData")
|
bc9f3288f3bf3e37cb38858bdc0523e9 gttt_1_1_00011011_4x4_torus_w.qdimacs 6308 14617
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Diptarama-Jordan-Shinohara/Generalized-Tic-Tac-Toe/gttt_1_1_00011011_4x4_torus_w/gttt_1_1_00011011_4x4_torus_w.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 81
|
r
|
bc9f3288f3bf3e37cb38858bdc0523e9 gttt_1_1_00011011_4x4_torus_w.qdimacs 6308 14617
|
################################################################################
## Source: Coursera
## Specialization: Data Science
## Course: 4 Exploratory Data Analysis
## Week: Week 4
## Project: Project 2
## File: plot3.R
## Date: 2016-02-06
################################################################################
################################################################################
## Question 3:
## Of the four types of sources indicated by the type (point, nonpoint, onroad,
## nonroad) variable, which of these four sources have seen decreases
## in emissions from 1999–2008 for Baltimore City? Which have seen increases
## in emissions from 1999–2008?
## Use the ggplot2 plotting system to make a plot answer this question.
################################################################################
################################################################################
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
library(dplyr)
baltimore_data <- subset(NEI, fips=="24510")
total_emissions_by_year_and_type <- baltimore_data %>%
group_by(year, type) %>%
summarise(total.emissions = sum(Emissions))
total_emissions_by_year_and_type
library(ggplot2)
ggplot(total_emissions_by_year_and_type,
aes(x=year, y=total.emissions)) +
geom_line() +
facet_grid(. ~ type) +
ggtitle("Total Emissions Per Source Type During 1999-2008") +
labs(x="Year", y="Total Emissions (in tons)") +
scale_x_continuous(breaks=seq(1999, 2008, by=3))
ggsave(file="output/plot3.png", width=9.5, height=4.5)
################################################################################
|
/04-exploratory-data-analysis/projects/project-02/code/plot3.R
|
no_license
|
Pratiksapkota169/coursera-data-science-specialization
|
R
| false
| false
| 1,707
|
r
|
################################################################################
## Source: Coursera
## Specialization: Data Science
## Course: 4 Exploratory Data Analysis
## Week: Week 4
## Project: Project 2
## File: plot3.R
## Date: 2016-02-06
################################################################################
################################################################################
## Question 3:
## Of the four types of sources indicated by the type (point, nonpoint, onroad,
## nonroad) variable, which of these four sources have seen decreases
## in emissions from 1999–2008 for Baltimore City? Which have seen increases
## in emissions from 1999–2008?
## Use the ggplot2 plotting system to make a plot answer this question.
################################################################################
################################################################################
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
library(dplyr)
baltimore_data <- subset(NEI, fips=="24510")
total_emissions_by_year_and_type <- baltimore_data %>%
group_by(year, type) %>%
summarise(total.emissions = sum(Emissions))
total_emissions_by_year_and_type
library(ggplot2)
ggplot(total_emissions_by_year_and_type,
aes(x=year, y=total.emissions)) +
geom_line() +
facet_grid(. ~ type) +
ggtitle("Total Emissions Per Source Type During 1999-2008") +
labs(x="Year", y="Total Emissions (in tons)") +
scale_x_continuous(breaks=seq(1999, 2008, by=3))
ggsave(file="output/plot3.png", width=9.5, height=4.5)
################################################################################
|
## ---- include = FALSE---------------------------------------------------------
library(knitr)
library(igraph)
library(dplyr)
library(kableExtra)
knitr::opts_chunk$set(
collapse = TRUE,
comment = ">",
fig.pos = 'H'
)
# ibahernando.path <- paste0(getwd(),"/img/ibahernando_256colours.png")
# brozas.path <- paste0(getwd(),"/img/brozas_256colours.png")
# solanas.path <- paste0(getwd(),"/img/solana_detail_256colours.png")
# solanas.vor.path <- paste0(getwd(),"/img/solana_voronoi_256colours.png")
# ibahernando.path <- "img/ibahernando_256colours.png"
# brozas.path <- "img/brozas_256colours.png"
# solanas.path <- "img/solana_detail_256colours.png"
# solanas.vor.path <- "img/solana_voronoi_256colours.png"
ibahernando.path <- "../man/figures/ibahernando_256colours.png"
brozas.path <- "../man/figures/brozas_256colours.png"
solanas.path <- "../man/figures/solana_detail_256colours.png"
solanas.vor.path <- "../man/figures/solana_voronoi_256colours.png"
## ----down,eval=FALSE, echo=TRUE-----------------------------------------------
# devtools::install_github("zoometh/iconr", build_vignettes=TRUE)
## ----load, echo=TRUE----------------------------------------------------------
library(iconr)
## ----ls_ext_data--------------------------------------------------------------
dataDir <- system.file("extdata", package = "iconr")
input.files <- list.files(dataDir)
cat(input.files, sep="\n")
## ----paths.imgs, echo=TRUE----------------------------------------------------
imgs_path <- paste0(dataDir, "/imgs.csv")
imgs <- read.table(imgs_path, sep=";", stringsAsFactors = FALSE)
## ----paths, echo=TRUE---------------------------------------------------------
nodes_path <- paste0(dataDir, "/nodes.shp")
nodes.shp <- rgdal::readOGR(dsn = nodes_path, verbose = FALSE)
nodes <- as.data.frame(nodes.shp)
edges_path <- paste0(dataDir, "/edges.shp")
edges.shp <- rgdal::readOGR(dsn = edges_path, verbose = FALSE)
edges <- as.data.frame(edges.shp)
## ----paths.1, echo=TRUE-------------------------------------------------------
nodes_path <- paste0(dataDir, "/nodes.tsv")
nodes <- read.table(nodes_path, sep="\t", stringsAsFactors = FALSE)
edges_path <- paste0(dataDir, "/edges.tsv")
edges <- read.table(edges_path, sep="\t", stringsAsFactors = FALSE)
## ----graph.clss---------------------------------------------------------------
lgrph <- list_dec(imgs, nodes, edges)
g <- lgrph[[1]]
as.character(class(g))
## ----igraph.1, warning=FALSE, fig.align="center", fig.width=6.5, fig.asp=0.58----
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mar=c(1, 0, 2, 0), mfrow=c(1, 2), cex.main = 0.9, font.main = 1)
coords <- layout.fruchterman.reingold(lgrph[[1]])
plot(g,
vertex.size = 15,
vertex.frame.color="white",
vertex.label.family = "sans",
vertex.label.cex = .8,
main = "Graph drawing based on x, y coordinates"
)
plot(g,
layout = layout.fruchterman.reingold(g),
vertex.size = 5 + degree(g)*10,
vertex.frame.color="white",
vertex.label.family = "sans",
vertex.label.cex = .8,
main = "Force-directed graph drawing,\nwith degree-dependent node size."
)
mtext(g$decor, cex = 1, side = 1, line = -1, outer = TRUE)
## ----imgs,fig.width=6, fig.height=6, fig.align="center",warning=FALSE, fig.cap="\\label{fig:figs}imgs.tsv"----
imgs_path <- paste0(dataDir, "/imgs.tsv")
imgs <- read.table(imgs_path, sep="\t", stringsAsFactors = FALSE)
knitr::kable(imgs, "html") %>%
kableExtra::kable_styling(full_width = FALSE, position = "center", font_size=12)
## ----xy_coords,out.width="50%", fig.align="center",echo=FALSE,warning=FALSE----
df.equi <- data.frame(
"Device/Package" = c("*R graphics*", "*R raster*", "*R magick*", "***GIS interface***"),
"Unit of measure" = c("number of pixels", "number of pixels", "number of pixels", "**number of pixels**"),
"Origin" = c("bottom-left corner", "top-left corner", "top-left corner", "**top-left corner**"),
"x-axis orientation" = c("rightward", "downward", "rightward", "**rightward**"),
"y-axis orientation" = c("upward", "rightward", "downward", "**upward**"),
check.names = FALSE)
knitr::kable(df.equi) %>%
kable_styling(full_width = F)
## ----drawing, out.width="50%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, echo=TRUE, message=FALSE, fig.cap="\\label{fig:figs} `iconr` (GIS) coordinate convention: decoration `Cerro_Muriano.Cerro_Muriano_1.jpg` with the coordinates of its corners."----
library(magick)
library(graphics)
dataDir <- system.file("extdata", package = "iconr")
imgs_path <- paste0(dataDir, "/imgs.csv")
imgs <- read.table(imgs_path, sep=";", stringsAsFactors = FALSE)
cm1 <- image_read(paste0(dataDir, "/", imgs$img[1]))
W <- image_info(cm1)$width
H <- image_info(cm1)$height
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mar = c(0, 0, 0, 0))
plot(cm1)
box(lwd = 2)
text(0, H, paste0(0, ",", 0), cex = 2, adj = c(0, 1.1))
text(W, H, paste0(W, ",", 0), cex = 2, adj = c(1, 1.1))
text(0, 0, paste0(0, ",", -H), cex = 2, adj = c(0, -0.2))
text(W, 0, paste0(W, ",", -H), cex = 2, adj = c(1, -0.2))
## ----nodes.df, warning=FALSE,fig.align="center",warning=FALSE-----------------
nds.df <- read_nds(site = "Cerro Muriano", decor = "Cerro Muriano 1", dir = dataDir)
knitr::kable(nds.df, "html") %>%
kableExtra::kable_styling(full_width = FALSE, position = "center", font_size=12)
## ----edges.df, warning=FALSE--------------------------------------------------
edges <- read.table(edges_path, sep = "\t", stringsAsFactors = FALSE)
knitr::kable(head(edges), "html", align = "llccc") %>%
kableExtra::kable_styling(full_width = FALSE, position = "center",
font_size=12) %>%
gsub("\\+", "$+$", .)
## ----edges.df.1, warning=FALSE------------------------------------------------
eds.df <- read_eds(site = "Cerro Muriano", decor = "Cerro Muriano 1", dir = dataDir)
knitr::kable(head(eds.df), "html", align = "llcccrrrr") %>%
kableExtra::kable_styling(full_width = FALSE, position = "center", font_size=12) %>%
gsub("\\+", "$+$", .)
## ----count4, warning=FALSE----------------------------------------------------
named_elements(lgrph[[1]], focus = "edges", nd.var="type")[1]
## ----count1, warning=FALSE----------------------------------------------------
named_elements(lgrph[[4]], focus = "edges", nd.var="type")
## ----count.all, warning=FALSE-------------------------------------------------
all.edges <- unlist(lapply(lgrph, named_elements,
focus = "edges", nd.var="type", disamb.marker=""))
edges.count <- as.data.frame(table(all.edges))
edges.order <- order(edges.count$Freq, decreasing = TRUE)
edges.count <- edges.count[edges.order, ]
knitr::kable(head(edges.count), row.names = FALSE) %>%
kableExtra::kable_styling(full_width = FALSE, position = "center", font_size=12)
## ----graph.attribute.plot.type, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Zarza De Montanchez stelae (decoration 4) showing *normal* and *attribute* edges"----
site <- "Zarza de Montanchez"
decor <- "Zarza De Montanchez"
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
ed.lwd = 1, ed.color = c("darkorange"),
lbl.size = 0.7)
## ----count_att, warning=FALSE-------------------------------------------------
sort(named_elements(lgrph[[4]], focus = "edges", nd.var = "type"))
## ----graph.overlap.plot.type, out.width="100%", fig.width=12, fig.asp=0.55, fig.align="center", warning=FALSE, fig.cap="Ibahernando stelae (decoration 5) showing *diachronic* and *normal* edges"----
site <- "Ibahernando"
decor <- "Ibahernando"
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow = c(1, 2))
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
lbl.size = 0.7)
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
nd.var = 'type',
lbl.size = 0.6)
## ----count3, warning=FALSE----------------------------------------------------
named_elements(lgrph[[5]], focus = "edges", nd.var = "type")
## ----ls_functions-------------------------------------------------------------
cat(ls("package:iconr"), sep="\n")
## ----img.graph.read, echo=TRUE------------------------------------------------
site <- "Cerro Muriano"
decor <- "Cerro Muriano 1"
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
## ----img.graph.plot.type, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Cerro Muriano 1 stelae (decoration 1) with the type of each GU"----
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
nd.var = 'type',
lbl.size = 0.55)
## ----img.graph.plot.id, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Cerro Muriano 1 stelae (decoration 1) with the maximum length (in cm) of each GU"----
nds.df$long_cm <- paste0(c(47, 9, 47, 18, 7, 3, 13), "cm")
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
nd.var = 'long_cm',
nd.color = "brown",
lbl.color = "brown", lbl.size = 0.7,
ed.color = "brown")
## ----compare.nodes, results='asis', warning=FALSE-----------------------------
imgs_path <- paste0(dataDir, "/imgs.tsv")
nodes_path <- paste0(dataDir, "/nodes.tsv")
edges_path <- paste0(dataDir, "/edges.tsv")
imgs <- read.table(imgs_path, sep="\t", stringsAsFactors = FALSE)
nodes <- read.table(nodes_path, sep="\t", stringsAsFactors = FALSE)
edges <- read.table(edges_path, sep="\t", stringsAsFactors = FALSE)
lgrph <- list_dec(imgs, nodes, edges)
df.same_nodes <- same_elements(lgrph,
focus = "nodes",
nd.var = "type")
diag(df.same_nodes) <- cell_spec(diag(df.same_nodes),
font_size = 9)
knitr::kable(df.same_nodes, row.names = TRUE, escape = FALSE, table.attr = "style='width:30%;'",
caption = "Count of common nodes between decorations") %>%
column_spec(1, bold=TRUE) %>%
kableExtra::kable_styling(position = "center", font_size = 12)
## ----compare.2.nodes, fig.show = TRUE, out.width="100%", fig.width=12, fig.asp=0.52, fig.align="center", warning=FALSE----
dec.to.compare <- c(2, 3, 4)
g.compar <- list_compar(lgrph, nd.var = "type")
plot_compar(listg = g.compar,
dec2comp = dec.to.compare,
focus = "nodes",
nd.size = c(0.5, 1.5),
dir = dataDir)
## ----compare.edges, warning=FALSE---------------------------------------------
df.same_edges <- same_elements(lgrph, nd.var = "type", focus = "edges")
diag(df.same_edges) <- cell_spec(diag(df.same_edges),
font_size = 9)
knitr::kable(df.same_edges, row.names = TRUE, escape = F, table.attr = "style='width:30%;'",
caption = "Count of common edges between decorations") %>%
column_spec(1, bold=TRUE) %>%
kableExtra::kable_styling(position = "center", font_size = 12)
## ----compare.2.edges, out.width="100%", fig.width=12, fig.asp=0.52, fig.align="center", warning=FALSE----
dec.to.compare <- c(2, 3, 4)
g.compar <- list_compar(lgrph, nd.var = "type")
plot_compar(listg = g.compar,
dec2comp = dec.to.compare,
focus = "edges",
nd.size = c(0.5, 1.7),
dir = dataDir)
## ----ibahernando, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Ibahernando stelae (decoration 5) with *diachronic* and *normal* edges, node 1 overlaps node 2 and node 3"----
site <- "Ibahernando"
decor <- "Ibahernando"
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
lbl.size = 0.7)
## ----rm.writing, out.width="100%", fig.width=12, fig.asp=0.55, fig.align="center", warning=FALSE, fig.cap="Ibahernando stelae before and after the selection of node 4 (sword) graph component"----
site <- decor <- "Ibahernando"
selected.nd <- 4
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
l_dec_df <- contemp_nds(nds.df, eds.df, selected.nd)
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1, 2))
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
nd.var = "type",
lbl.color = "brown", lbl.size = 0.6)
plot_dec_grph(l_dec_df$nodes, l_dec_df$edges, imgs,
site, decor, dataDir,
nd.var = "type",
lbl.color = "brown", lbl.size = 0.6)
## ----ibahernando.lat, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Ibahernando stelae after the selection of node 1 (writing) graph component"----
selected.nd <- 1
nds.df <- read_nds(site, decor, dir = dataDir)
eds.df <- read_eds(site, decor, dir = dataDir)
l_dec_df <- contemp_nds(nds.df, eds.df, selected.nd)
plot_dec_grph(l_dec_df$nodes, l_dec_df$edges, imgs,
site, decor, dir = dataDir,
nd.var = "type",
lbl.size = 0.6, lbl.color = "brown")
## ----clust.comp, warning=FALSE, fig.align="center", fig.width=7, fig.height=5----
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1, 2))
df.same_edges <- same_elements(lgrph, "type", "edges")
df.same_nodes<- same_elements(lgrph, "type", "nodes")
dist.nodes <- dist(as.matrix(df.same_nodes), method = "euclidean")
dist.edges <- dist(as.matrix(df.same_edges), method = "euclidean")
hc.nds <- hclust(dist.nodes, method = "ward.D")
hc.eds <- hclust(dist.edges, method = "ward.D")
plot(hc.nds, main = "Common nodes", cex = .8)
plot(hc.eds, main = "Common edges", cex = .8)
## ----hclust.compar, warning=FALSE, fig.align="center", fig.width=7------------
suppressPackageStartupMessages(library(dendextend))
suppressPackageStartupMessages(library(dplyr))
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1, 2))
dend.nds <- as.dendrogram(hc.nds)
dend.eds <- as.dendrogram(hc.eds)
dendlist(dend.nds, dend.eds) %>%
untangle(method = "step1side") %>%
tanglegram(columns_width = c(6, 1, 6),
main_left = "Common nodes",
main_right = "Common edges",
lab.cex = 1.3,
cex_main = 1.5,
highlight_branches_lwd = F)
## ----compare.c.edges, out.width="100%", fig.width=12, fig.asp=0.52, fig.align="center", warning=FALSE----
dec.to.compare <- c(3, 5)
g.compar <- list_compar(lgrph, nd.var = "type")
plot_compar(listg = g.compar,
dec2comp = dec.to.compare,
focus = "nodes",
nd.size = c(0.5, 1.7),
dir = dataDir)
plot_compar(listg = g.compar,
dec2comp = dec.to.compare,
focus = "edges",
nd.size = c(0.5, 1.7),
dir = dataDir)
|
/inst/doc/index.R
|
no_license
|
cran/iconr
|
R
| false
| false
| 15,488
|
r
|
## ---- include = FALSE---------------------------------------------------------
library(knitr)
library(igraph)
library(dplyr)
library(kableExtra)
knitr::opts_chunk$set(
collapse = TRUE,
comment = ">",
fig.pos = 'H'
)
# ibahernando.path <- paste0(getwd(),"/img/ibahernando_256colours.png")
# brozas.path <- paste0(getwd(),"/img/brozas_256colours.png")
# solanas.path <- paste0(getwd(),"/img/solana_detail_256colours.png")
# solanas.vor.path <- paste0(getwd(),"/img/solana_voronoi_256colours.png")
# ibahernando.path <- "img/ibahernando_256colours.png"
# brozas.path <- "img/brozas_256colours.png"
# solanas.path <- "img/solana_detail_256colours.png"
# solanas.vor.path <- "img/solana_voronoi_256colours.png"
ibahernando.path <- "../man/figures/ibahernando_256colours.png"
brozas.path <- "../man/figures/brozas_256colours.png"
solanas.path <- "../man/figures/solana_detail_256colours.png"
solanas.vor.path <- "../man/figures/solana_voronoi_256colours.png"
## ----down,eval=FALSE, echo=TRUE-----------------------------------------------
# devtools::install_github("zoometh/iconr", build_vignettes=TRUE)
## ----load, echo=TRUE----------------------------------------------------------
library(iconr)
## ----ls_ext_data--------------------------------------------------------------
dataDir <- system.file("extdata", package = "iconr")
input.files <- list.files(dataDir)
cat(input.files, sep="\n")
## ----paths.imgs, echo=TRUE----------------------------------------------------
imgs_path <- paste0(dataDir, "/imgs.csv")
imgs <- read.table(imgs_path, sep=";", stringsAsFactors = FALSE)
## ----paths, echo=TRUE---------------------------------------------------------
nodes_path <- paste0(dataDir, "/nodes.shp")
nodes.shp <- rgdal::readOGR(dsn = nodes_path, verbose = FALSE)
nodes <- as.data.frame(nodes.shp)
edges_path <- paste0(dataDir, "/edges.shp")
edges.shp <- rgdal::readOGR(dsn = edges_path, verbose = FALSE)
edges <- as.data.frame(edges.shp)
## ----paths.1, echo=TRUE-------------------------------------------------------
nodes_path <- paste0(dataDir, "/nodes.tsv")
nodes <- read.table(nodes_path, sep="\t", stringsAsFactors = FALSE)
edges_path <- paste0(dataDir, "/edges.tsv")
edges <- read.table(edges_path, sep="\t", stringsAsFactors = FALSE)
## ----graph.clss---------------------------------------------------------------
lgrph <- list_dec(imgs, nodes, edges)
g <- lgrph[[1]]
as.character(class(g))
## ----igraph.1, warning=FALSE, fig.align="center", fig.width=6.5, fig.asp=0.58----
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mar=c(1, 0, 2, 0), mfrow=c(1, 2), cex.main = 0.9, font.main = 1)
coords <- layout.fruchterman.reingold(lgrph[[1]])
plot(g,
vertex.size = 15,
vertex.frame.color="white",
vertex.label.family = "sans",
vertex.label.cex = .8,
main = "Graph drawing based on x, y coordinates"
)
plot(g,
layout = layout.fruchterman.reingold(g),
vertex.size = 5 + degree(g)*10,
vertex.frame.color="white",
vertex.label.family = "sans",
vertex.label.cex = .8,
main = "Force-directed graph drawing,\nwith degree-dependent node size."
)
mtext(g$decor, cex = 1, side = 1, line = -1, outer = TRUE)
## ----imgs,fig.width=6, fig.height=6, fig.align="center",warning=FALSE, fig.cap="\\label{fig:figs}imgs.tsv"----
imgs_path <- paste0(dataDir, "/imgs.tsv")
imgs <- read.table(imgs_path, sep="\t", stringsAsFactors = FALSE)
knitr::kable(imgs, "html") %>%
kableExtra::kable_styling(full_width = FALSE, position = "center", font_size=12)
## ----xy_coords,out.width="50%", fig.align="center",echo=FALSE,warning=FALSE----
df.equi <- data.frame(
"Device/Package" = c("*R graphics*", "*R raster*", "*R magick*", "***GIS interface***"),
"Unit of measure" = c("number of pixels", "number of pixels", "number of pixels", "**number of pixels**"),
"Origin" = c("bottom-left corner", "top-left corner", "top-left corner", "**top-left corner**"),
"x-axis orientation" = c("rightward", "downward", "rightward", "**rightward**"),
"y-axis orientation" = c("upward", "rightward", "downward", "**upward**"),
check.names = FALSE)
knitr::kable(df.equi) %>%
kable_styling(full_width = F)
## ----drawing, out.width="50%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, echo=TRUE, message=FALSE, fig.cap="\\label{fig:figs} `iconr` (GIS) coordinate convention: decoration `Cerro_Muriano.Cerro_Muriano_1.jpg` with the coordinates of its corners."----
library(magick)
library(graphics)
dataDir <- system.file("extdata", package = "iconr")
imgs_path <- paste0(dataDir, "/imgs.csv")
imgs <- read.table(imgs_path, sep=";", stringsAsFactors = FALSE)
cm1 <- image_read(paste0(dataDir, "/", imgs$img[1]))
W <- image_info(cm1)$width
H <- image_info(cm1)$height
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mar = c(0, 0, 0, 0))
plot(cm1)
box(lwd = 2)
text(0, H, paste0(0, ",", 0), cex = 2, adj = c(0, 1.1))
text(W, H, paste0(W, ",", 0), cex = 2, adj = c(1, 1.1))
text(0, 0, paste0(0, ",", -H), cex = 2, adj = c(0, -0.2))
text(W, 0, paste0(W, ",", -H), cex = 2, adj = c(1, -0.2))
## ----nodes.df, warning=FALSE,fig.align="center",warning=FALSE-----------------
nds.df <- read_nds(site = "Cerro Muriano", decor = "Cerro Muriano 1", dir = dataDir)
knitr::kable(nds.df, "html") %>%
kableExtra::kable_styling(full_width = FALSE, position = "center", font_size=12)
## ----edges.df, warning=FALSE--------------------------------------------------
edges <- read.table(edges_path, sep = "\t", stringsAsFactors = FALSE)
knitr::kable(head(edges), "html", align = "llccc") %>%
kableExtra::kable_styling(full_width = FALSE, position = "center",
font_size=12) %>%
gsub("\\+", "$+$", .)
## ----edges.df.1, warning=FALSE------------------------------------------------
eds.df <- read_eds(site = "Cerro Muriano", decor = "Cerro Muriano 1", dir = dataDir)
knitr::kable(head(eds.df), "html", align = "llcccrrrr") %>%
kableExtra::kable_styling(full_width = FALSE, position = "center", font_size=12) %>%
gsub("\\+", "$+$", .)
## ----count4, warning=FALSE----------------------------------------------------
named_elements(lgrph[[1]], focus = "edges", nd.var="type")[1]
## ----count1, warning=FALSE----------------------------------------------------
named_elements(lgrph[[4]], focus = "edges", nd.var="type")
## ----count.all, warning=FALSE-------------------------------------------------
all.edges <- unlist(lapply(lgrph, named_elements,
focus = "edges", nd.var="type", disamb.marker=""))
edges.count <- as.data.frame(table(all.edges))
edges.order <- order(edges.count$Freq, decreasing = TRUE)
edges.count <- edges.count[edges.order, ]
knitr::kable(head(edges.count), row.names = FALSE) %>%
kableExtra::kable_styling(full_width = FALSE, position = "center", font_size=12)
## ----graph.attribute.plot.type, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Zarza De Montanchez stelae (decoration 4) showing *normal* and *attribute* edges"----
site <- "Zarza de Montanchez"
decor <- "Zarza De Montanchez"
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
ed.lwd = 1, ed.color = c("darkorange"),
lbl.size = 0.7)
## ----count_att, warning=FALSE-------------------------------------------------
sort(named_elements(lgrph[[4]], focus = "edges", nd.var = "type"))
## ----graph.overlap.plot.type, out.width="100%", fig.width=12, fig.asp=0.55, fig.align="center", warning=FALSE, fig.cap="Ibahernando stelae (decoration 5) showing *diachronic* and *normal* edges"----
site <- "Ibahernando"
decor <- "Ibahernando"
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow = c(1, 2))
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
lbl.size = 0.7)
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
nd.var = 'type',
lbl.size = 0.6)
## ----count3, warning=FALSE----------------------------------------------------
named_elements(lgrph[[5]], focus = "edges", nd.var = "type")
## ----ls_functions-------------------------------------------------------------
cat(ls("package:iconr"), sep="\n")
## ----img.graph.read, echo=TRUE------------------------------------------------
site <- "Cerro Muriano"
decor <- "Cerro Muriano 1"
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
## ----img.graph.plot.type, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Cerro Muriano 1 stelae (decoration 1) with the type of each GU"----
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
nd.var = 'type',
lbl.size = 0.55)
## ----img.graph.plot.id, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Cerro Muriano 1 stelae (decoration 1) with the maximum length (in cm) of each GU"----
nds.df$long_cm <- paste0(c(47, 9, 47, 18, 7, 3, 13), "cm")
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
nd.var = 'long_cm',
nd.color = "brown",
lbl.color = "brown", lbl.size = 0.7,
ed.color = "brown")
## ----compare.nodes, results='asis', warning=FALSE-----------------------------
imgs_path <- paste0(dataDir, "/imgs.tsv")
nodes_path <- paste0(dataDir, "/nodes.tsv")
edges_path <- paste0(dataDir, "/edges.tsv")
imgs <- read.table(imgs_path, sep="\t", stringsAsFactors = FALSE)
nodes <- read.table(nodes_path, sep="\t", stringsAsFactors = FALSE)
edges <- read.table(edges_path, sep="\t", stringsAsFactors = FALSE)
lgrph <- list_dec(imgs, nodes, edges)
df.same_nodes <- same_elements(lgrph,
focus = "nodes",
nd.var = "type")
diag(df.same_nodes) <- cell_spec(diag(df.same_nodes),
font_size = 9)
knitr::kable(df.same_nodes, row.names = TRUE, escape = FALSE, table.attr = "style='width:30%;'",
caption = "Count of common nodes between decorations") %>%
column_spec(1, bold=TRUE) %>%
kableExtra::kable_styling(position = "center", font_size = 12)
## ----compare.2.nodes, fig.show = TRUE, out.width="100%", fig.width=12, fig.asp=0.52, fig.align="center", warning=FALSE----
dec.to.compare <- c(2, 3, 4)
g.compar <- list_compar(lgrph, nd.var = "type")
plot_compar(listg = g.compar,
dec2comp = dec.to.compare,
focus = "nodes",
nd.size = c(0.5, 1.5),
dir = dataDir)
## ----compare.edges, warning=FALSE---------------------------------------------
df.same_edges <- same_elements(lgrph, nd.var = "type", focus = "edges")
diag(df.same_edges) <- cell_spec(diag(df.same_edges),
font_size = 9)
knitr::kable(df.same_edges, row.names = TRUE, escape = F, table.attr = "style='width:30%;'",
caption = "Count of common edges between decorations") %>%
column_spec(1, bold=TRUE) %>%
kableExtra::kable_styling(position = "center", font_size = 12)
## ----compare.2.edges, out.width="100%", fig.width=12, fig.asp=0.52, fig.align="center", warning=FALSE----
dec.to.compare <- c(2, 3, 4)
g.compar <- list_compar(lgrph, nd.var = "type")
plot_compar(listg = g.compar,
dec2comp = dec.to.compare,
focus = "edges",
nd.size = c(0.5, 1.7),
dir = dataDir)
## ----ibahernando, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Ibahernando stelae (decoration 5) with *diachronic* and *normal* edges, node 1 overlaps node 2 and node 3"----
site <- "Ibahernando"
decor <- "Ibahernando"
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
lbl.size = 0.7)
## ----rm.writing, out.width="100%", fig.width=12, fig.asp=0.55, fig.align="center", warning=FALSE, fig.cap="Ibahernando stelae before and after the selection of node 4 (sword) graph component"----
site <- decor <- "Ibahernando"
selected.nd <- 4
nds.df <- read_nds(site, decor, dataDir)
eds.df <- read_eds(site, decor, dataDir)
l_dec_df <- contemp_nds(nds.df, eds.df, selected.nd)
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1, 2))
plot_dec_grph(nds.df, eds.df, imgs,
site, decor, dataDir,
nd.var = "type",
lbl.color = "brown", lbl.size = 0.6)
plot_dec_grph(l_dec_df$nodes, l_dec_df$edges, imgs,
site, decor, dataDir,
nd.var = "type",
lbl.color = "brown", lbl.size = 0.6)
## ----ibahernando.lat, out.width="60%", fig.width=6, fig.asp=750/666, fig.align="center", warning=FALSE, fig.cap="Ibahernando stelae after the selection of node 1 (writing) graph component"----
selected.nd <- 1
nds.df <- read_nds(site, decor, dir = dataDir)
eds.df <- read_eds(site, decor, dir = dataDir)
l_dec_df <- contemp_nds(nds.df, eds.df, selected.nd)
plot_dec_grph(l_dec_df$nodes, l_dec_df$edges, imgs,
site, decor, dir = dataDir,
nd.var = "type",
lbl.size = 0.6, lbl.color = "brown")
## ----clust.comp, warning=FALSE, fig.align="center", fig.width=7, fig.height=5----
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1, 2))
df.same_edges <- same_elements(lgrph, "type", "edges")
df.same_nodes<- same_elements(lgrph, "type", "nodes")
dist.nodes <- dist(as.matrix(df.same_nodes), method = "euclidean")
dist.edges <- dist(as.matrix(df.same_edges), method = "euclidean")
hc.nds <- hclust(dist.nodes, method = "ward.D")
hc.eds <- hclust(dist.edges, method = "ward.D")
plot(hc.nds, main = "Common nodes", cex = .8)
plot(hc.eds, main = "Common edges", cex = .8)
## ----hclust.compar, warning=FALSE, fig.align="center", fig.width=7------------
suppressPackageStartupMessages(library(dendextend))
suppressPackageStartupMessages(library(dplyr))
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
par(mfrow=c(1, 2))
dend.nds <- as.dendrogram(hc.nds)
dend.eds <- as.dendrogram(hc.eds)
dendlist(dend.nds, dend.eds) %>%
untangle(method = "step1side") %>%
tanglegram(columns_width = c(6, 1, 6),
main_left = "Common nodes",
main_right = "Common edges",
lab.cex = 1.3,
cex_main = 1.5,
highlight_branches_lwd = F)
## ----compare.c.edges, out.width="100%", fig.width=12, fig.asp=0.52, fig.align="center", warning=FALSE----
dec.to.compare <- c(3, 5)
g.compar <- list_compar(lgrph, nd.var = "type")
plot_compar(listg = g.compar,
dec2comp = dec.to.compare,
focus = "nodes",
nd.size = c(0.5, 1.7),
dir = dataDir)
plot_compar(listg = g.compar,
dec2comp = dec.to.compare,
focus = "edges",
nd.size = c(0.5, 1.7),
dir = dataDir)
|
#Script to play with
#Mirae Guenther
#November 5, 2018
#mapping stream locations
#Packages: ggmap, ggplot2
#clear R's brain
rm(list=ls())
#where is R looking
getwd()
#tell R where to look
setwd("/Users/mguenther5/Desktop/R data")
#confirm it's looking there
getwd()
#New attempt from online resource
library(ggplot2)
library(ggmap)
#Polling locations
#Vote_1 -96.670658 40.795535
#Poll_2 -96.688154 40.811577
#Polling_3 -96.62225 40.773502
##Create data frame with lat and long from GPS points
d<-data.frame(lat=c(4.795535, 40.811577, 40.773502),lon=c(-96.670658, -96.688154, -96.62225))
NEmap<-get_map("Lincoln,Nebraska,USA", zoom=7)
p<-ggmap(Lincmap)
p<-p+geom_point(data=d, aes(x=lon,y=lat))
p
ggplot_build(p)
|
/Meeting_script.R
|
no_license
|
rtesting4lab/Go_Vote
|
R
| false
| false
| 752
|
r
|
#Script to play with
#Mirae Guenther
#November 5, 2018
#mapping stream locations
#Packages: ggmap, ggplot2
#clear R's brain
rm(list=ls())
#where is R looking
getwd()
#tell R where to look
setwd("/Users/mguenther5/Desktop/R data")
#confirm it's looking there
getwd()
#New attempt from online resource
library(ggplot2)
library(ggmap)
#Polling locations
#Vote_1 -96.670658 40.795535
#Poll_2 -96.688154 40.811577
#Polling_3 -96.62225 40.773502
##Create data frame with lat and long from GPS points
d<-data.frame(lat=c(4.795535, 40.811577, 40.773502),lon=c(-96.670658, -96.688154, -96.62225))
NEmap<-get_map("Lincoln,Nebraska,USA", zoom=7)
p<-ggmap(Lincmap)
p<-p+geom_point(data=d, aes(x=lon,y=lat))
p
ggplot_build(p)
|
require(rmarkdown)
render(file.path(getwd(), "Volcanic50.Rmd"), output_format="html_document")
file.rename(file.path(getwd(), "Volcanic50.md"), file.path(getwd(), "README.md"))
|
/Volcanic50/make.r
|
no_license
|
benjamin-chan/RaceReports
|
R
| false
| false
| 177
|
r
|
require(rmarkdown)
render(file.path(getwd(), "Volcanic50.Rmd"), output_format="html_document")
file.rename(file.path(getwd(), "Volcanic50.md"), file.path(getwd(), "README.md"))
|
\name{cgd}
\docType{data}
\alias{cgd}
\alias{cgd.raw}
\title{Chronic Granulotomous Disease data}
\description{Data are from a placebo controlled trial of gamma
interferon in chronic granulotomous disease (CGD).
Contains the data on time to serious infections observed through
end of study for each patient.
}
\usage{cgd}
\format{
\describe{
\item{id}{subject identifiction number}
\item{center}{enrolling center }
\item{random}{date of randomization }
\item{treatment}{placebo or gamma interferon }
\item{sex}{sex}
\item{age}{age in years, at study entry }
\item{height}{height in cm at study entry}
\item{weight}{weight in kg at study entry}
\item{inherit}{pattern of inheritance }
\item{steroids}{use of steroids at study entry,1=yes}
\item{propylac}{use of prophylactic antibiotics at study entry}
\item{hos.cat}{a categorization of the centers into 4 groups}
\item{tstart, tstop}{start and end of each time interval }
\item{status}{1=the interval ends with an infection }
\item{enum}{observation number within subject}
}
}
\details{
The \code{cgd0} data set is in the form found in the references,
with one line per patient and no recoding of the variables.
The \code{cgd} data set (this one) has been cast into (start, stop]
format with one line per event, and covariates
such as center recoded as factors
to include meaningful labels.
}
\source{
Fleming and Harrington, Counting Processes and Survival Analysis,
appendix D.2.
}
\seealso{\code{link{cgd0}}}
\keyword{datasets}
\keyword{survival}
|
/survival/man/cgd.Rd
|
no_license
|
fuentesdt/viewsurvivalsource
|
R
| false
| false
| 1,584
|
rd
|
\name{cgd}
\docType{data}
\alias{cgd}
\alias{cgd.raw}
\title{Chronic Granulotomous Disease data}
\description{Data are from a placebo controlled trial of gamma
interferon in chronic granulotomous disease (CGD).
Contains the data on time to serious infections observed through
end of study for each patient.
}
\usage{cgd}
\format{
\describe{
\item{id}{subject identifiction number}
\item{center}{enrolling center }
\item{random}{date of randomization }
\item{treatment}{placebo or gamma interferon }
\item{sex}{sex}
\item{age}{age in years, at study entry }
\item{height}{height in cm at study entry}
\item{weight}{weight in kg at study entry}
\item{inherit}{pattern of inheritance }
\item{steroids}{use of steroids at study entry,1=yes}
\item{propylac}{use of prophylactic antibiotics at study entry}
\item{hos.cat}{a categorization of the centers into 4 groups}
\item{tstart, tstop}{start and end of each time interval }
\item{status}{1=the interval ends with an infection }
\item{enum}{observation number within subject}
}
}
\details{
The \code{cgd0} data set is in the form found in the references,
with one line per patient and no recoding of the variables.
The \code{cgd} data set (this one) has been cast into (start, stop]
format with one line per event, and covariates
such as center recoded as factors
to include meaningful labels.
}
\source{
Fleming and Harrington, Counting Processes and Survival Analysis,
appendix D.2.
}
\seealso{\code{link{cgd0}}}
\keyword{datasets}
\keyword{survival}
|
library(bigmatch)
### Name: nfmatch
### Title: Minimum-distance near-fine matching.
### Aliases: nfmatch
### ** Examples
# To run this example, you must load the optmatch package.
# Caliper of .3 on the propensity score, near fine balance of
# education, a robust Mahalanobis distrance for X.
data(nh0506)
attach(nh0506)
X<-cbind(female,age,black,hispanic,education,povertyr,bmi)
m<-nfmatch(z=z,p=propens,fine=education,X=X,caliper=.3,dat=nh0506,rank=FALSE)
matcheddata=m$data
table(matcheddata$z,matcheddata$education)
head(matcheddata)
detach(nh0506)
## No test:
#finds the optimal caliper for the propensity score while exact matching on female
#near fine balance for education and hispanic jointly.
data(nh0506)
attach(nh0506)
X<-cbind(female,age,black,hispanic,education,povertyr,bmi)
oc<-optcal(z,propens,exact=female,tol=0.1,rank=F)
oc
oco<-optconstant(z,propens,oc$caliper,exact=female,rank=F)
oco
m2<-nfmatch(z,propens,factor(hispanic):factor(education),X,nh0506,oc$caliper,oco$constant,
exact=female,rank=F)
matcheddata2=m2$data
table(matcheddata2$z,matcheddata2$female)
table(matcheddata2$z,matcheddata2$education)
table(matcheddata2$z,matcheddata2$education,matcheddata2$hispanic)
#finds the optimal caliper for the propensity score while exact matching on female
#nearexact on quantiles of povertyr and bmi
#near fine balance for education and hispanic jointly.
pq=cut(povertyr,c(-0.1,1,2,3,4,5))
bq=cut(bmi,(0:7)*20)
#first assume povertyr and bmi are of the same importance
m3<-nfmatch(z,propens,factor(hispanic):factor(education),X,nh0506,oc$caliper,oco$constant,
exact=female,nearexact=cbind(pq,bq),rank=F)
matcheddata3=m3$data
head(matcheddata3)
#then assume povertyr is more important than bmi
m4<-nfmatch(z,propens,factor(hispanic):factor(education),X,nh0506,oc$caliper,oco$constant,
exact=female,nearexact=cbind(pq,bq),nearexPenalty=c(100,50),rank=F)
matcheddata4=m4$data
head(matcheddata4)
detach(nh0506)
## End(No test)
|
/data/genthat_extracted_code/bigmatch/examples/nfmatch.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,054
|
r
|
library(bigmatch)
### Name: nfmatch
### Title: Minimum-distance near-fine matching.
### Aliases: nfmatch
### ** Examples
# To run this example, you must load the optmatch package.
# Caliper of .3 on the propensity score, near fine balance of
# education, a robust Mahalanobis distrance for X.
data(nh0506)
attach(nh0506)
X<-cbind(female,age,black,hispanic,education,povertyr,bmi)
m<-nfmatch(z=z,p=propens,fine=education,X=X,caliper=.3,dat=nh0506,rank=FALSE)
matcheddata=m$data
table(matcheddata$z,matcheddata$education)
head(matcheddata)
detach(nh0506)
## No test:
#finds the optimal caliper for the propensity score while exact matching on female
#near fine balance for education and hispanic jointly.
data(nh0506)
attach(nh0506)
X<-cbind(female,age,black,hispanic,education,povertyr,bmi)
oc<-optcal(z,propens,exact=female,tol=0.1,rank=F)
oc
oco<-optconstant(z,propens,oc$caliper,exact=female,rank=F)
oco
m2<-nfmatch(z,propens,factor(hispanic):factor(education),X,nh0506,oc$caliper,oco$constant,
exact=female,rank=F)
matcheddata2=m2$data
table(matcheddata2$z,matcheddata2$female)
table(matcheddata2$z,matcheddata2$education)
table(matcheddata2$z,matcheddata2$education,matcheddata2$hispanic)
#finds the optimal caliper for the propensity score while exact matching on female
#nearexact on quantiles of povertyr and bmi
#near fine balance for education and hispanic jointly.
pq=cut(povertyr,c(-0.1,1,2,3,4,5))
bq=cut(bmi,(0:7)*20)
#first assume povertyr and bmi are of the same importance
m3<-nfmatch(z,propens,factor(hispanic):factor(education),X,nh0506,oc$caliper,oco$constant,
exact=female,nearexact=cbind(pq,bq),rank=F)
matcheddata3=m3$data
head(matcheddata3)
#then assume povertyr is more important than bmi
m4<-nfmatch(z,propens,factor(hispanic):factor(education),X,nh0506,oc$caliper,oco$constant,
exact=female,nearexact=cbind(pq,bq),nearexPenalty=c(100,50),rank=F)
matcheddata4=m4$data
head(matcheddata4)
detach(nh0506)
## End(No test)
|
###
### AllGenerics.R
###
if (!isGeneric("dpimom")) {
setGeneric("dpimom",
function(x,
tau=1,
phi=1,
logscale=FALSE) standardGeneric("dpimom"))
}
if (!isGeneric("dpmom")) {
setGeneric("dpmom",
function(x,
tau,
a.tau,
b.tau,
phi=1,
r=1,
baseDensity='normal',
logscale=FALSE) standardGeneric("dpmom"))
}
if (!isGeneric("demom")) {
setGeneric("demom",
function(x,
tau,
a.tau,
b.tau,
phi=1,
logscale=FALSE) standardGeneric("demom"))
}
if (!isGeneric("marginalIW")) {
setGeneric("marginalNIW", function(x, xbar, samplecov, n, z, g, mu0=rep(0,ncol(x)), nu0=ncol(x)+4, S0,logscale=TRUE) standardGeneric("marginalNIW"))
}
if (!isGeneric("postProb")) {
setGeneric("postProb", function(object, nmax, method='norm') standardGeneric("postProb"))
}
if (!isGeneric("postSamples")) {
setGeneric("postSamples", function(object) standardGeneric("postSamples"))
}
if (!isGeneric("rnlp")) {
setGeneric("rnlp", function(y, x, m, V, msfit, priorCoef, priorVar, niter=10^3, burnin=round(niter/10), thinning=1, pp='norm') standardGeneric("rnlp"))
}
|
/R/AllGenerics.R
|
no_license
|
szcf-weiya/mombf
|
R
| false
| false
| 1,468
|
r
|
###
### AllGenerics.R
###
if (!isGeneric("dpimom")) {
setGeneric("dpimom",
function(x,
tau=1,
phi=1,
logscale=FALSE) standardGeneric("dpimom"))
}
if (!isGeneric("dpmom")) {
setGeneric("dpmom",
function(x,
tau,
a.tau,
b.tau,
phi=1,
r=1,
baseDensity='normal',
logscale=FALSE) standardGeneric("dpmom"))
}
if (!isGeneric("demom")) {
setGeneric("demom",
function(x,
tau,
a.tau,
b.tau,
phi=1,
logscale=FALSE) standardGeneric("demom"))
}
if (!isGeneric("marginalIW")) {
setGeneric("marginalNIW", function(x, xbar, samplecov, n, z, g, mu0=rep(0,ncol(x)), nu0=ncol(x)+4, S0,logscale=TRUE) standardGeneric("marginalNIW"))
}
if (!isGeneric("postProb")) {
setGeneric("postProb", function(object, nmax, method='norm') standardGeneric("postProb"))
}
if (!isGeneric("postSamples")) {
setGeneric("postSamples", function(object) standardGeneric("postSamples"))
}
if (!isGeneric("rnlp")) {
setGeneric("rnlp", function(y, x, m, V, msfit, priorCoef, priorVar, niter=10^3, burnin=round(niter/10), thinning=1, pp='norm') standardGeneric("rnlp"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jaccard.R
\name{jaccard}
\alias{jaccard}
\title{Calculate the Jaccard index between all columns in a matrix}
\usage{
jaccard(mat)
}
\arguments{
\item{mat}{a matrix of data, with samples in rows and features in columns}
}
\value{
the Jaccard index between non-zero/missing values in each pair of
columns
}
\description{
Calculate the Jaccard index between all pairs of columns in a matrix.
The Jaccard index (also known as the Tanimoto coefficient) is defined as the
size of the intersection of two bitsets divided by the size of the union.
Here, to convert a list of continuous expression values into a set of bits,
measurements that are missing or equal to zero are considered as zeroes and
all other measurements are considered as ones.
}
\examples{
mat = matrix(c(1, rep(0, 9), rep(1, 4), rep(0, 6)), ncol = 2)
jaccard(mat)
mat = cbind(mat, c(0, rep(1, 5), rep(0, 4)))
}
|
/man/jaccard.Rd
|
permissive
|
jordansquair/dismay
|
R
| false
| true
| 958
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jaccard.R
\name{jaccard}
\alias{jaccard}
\title{Calculate the Jaccard index between all columns in a matrix}
\usage{
jaccard(mat)
}
\arguments{
\item{mat}{a matrix of data, with samples in rows and features in columns}
}
\value{
the Jaccard index between non-zero/missing values in each pair of
columns
}
\description{
Calculate the Jaccard index between all pairs of columns in a matrix.
The Jaccard index (also known as the Tanimoto coefficient) is defined as the
size of the intersection of two bitsets divided by the size of the union.
Here, to convert a list of continuous expression values into a set of bits,
measurements that are missing or equal to zero are considered as zeroes and
all other measurements are considered as ones.
}
\examples{
mat = matrix(c(1, rep(0, 9), rep(1, 4), rep(0, 6)), ncol = 2)
jaccard(mat)
mat = cbind(mat, c(0, rep(1, 5), rep(0, 4)))
}
|
plot2 <- function(){
# hold originally directory location
old.dir <- getwd()
# if the file does not exist then download it
if(!file.exists("./exdata-data-household_power_consumption.zip")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="exdata-data-household_power_consumption.zip")
# print date downloaded
print(date())
}
# Verify that the file exists
if(!file.exists("./data/household_power_consumption.txt")){
# extract the file to the data folder
unzip("exdata-data-household_power_consumption.zip", exdir = "data", overwrite=TRUE)
}
## Move to the extracted directory
setwd("./data")
# load package sqldf into workspace
require(sqldf)
# Use SQL and string matching to get data by date
households<- read.csv.sql(
"household_power_consumption.txt",
sep=";",
sql = "select * from file where Date in ('1/2/2007','2/2/2007')"
)
# Close all connections
closeAllConnections()
# load package lubridate into workspace
require(lubridate)
plot(dmy_hms(paste(households$Date, households$Time)),
households$Global_active_power,
type="l",
ylab="Global Active Power (kilowatts)", xlab="")
# Save in original directory, defaults to 480x480 width height in pixels
dev.copy(png,file="../plot2.png")
dev.off()
setwd(old.dir)
}
|
/plot2.R
|
no_license
|
navarq/ExData_Plotting1
|
R
| false
| false
| 1,805
|
r
|
plot2 <- function(){
# hold originally directory location
old.dir <- getwd()
# if the file does not exist then download it
if(!file.exists("./exdata-data-household_power_consumption.zip")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="exdata-data-household_power_consumption.zip")
# print date downloaded
print(date())
}
# Verify that the file exists
if(!file.exists("./data/household_power_consumption.txt")){
# extract the file to the data folder
unzip("exdata-data-household_power_consumption.zip", exdir = "data", overwrite=TRUE)
}
## Move to the extracted directory
setwd("./data")
# load package sqldf into workspace
require(sqldf)
# Use SQL and string matching to get data by date
households<- read.csv.sql(
"household_power_consumption.txt",
sep=";",
sql = "select * from file where Date in ('1/2/2007','2/2/2007')"
)
# Close all connections
closeAllConnections()
# load package lubridate into workspace
require(lubridate)
plot(dmy_hms(paste(households$Date, households$Time)),
households$Global_active_power,
type="l",
ylab="Global Active Power (kilowatts)", xlab="")
# Save in original directory, defaults to 480x480 width height in pixels
dev.copy(png,file="../plot2.png")
dev.off()
setwd(old.dir)
}
|
ConstructContrasts <- function(Factors, MaxOrderIter=2){
Classes=sapply(Factors,class)
Factors=Factors[,Classes=="factor"]
nf=dim(Factors)[2] # Number of factors
if (MaxOrderIter>nf) {
print(paste("The maximum order of the iterations has been set to", nf))
MaxOrderIter=nf}
nlev=rep(0, nf) #Number of levels per factor
for (i in 1:nf)
nlev[i]=length(levels(Factors[[i]]))
nt=prod(nlev) # Number of treatments
# The complete set of groups
Groups=interaction(Factors[[1]],Factors[[2]])
if (nf>2){
for (i in 3:nf)
Groups=interaction(Groups,Factors[[i]])
}
contr=list() # This is a list with all the contrasts for each effect
# Contrasts for main effects
k=1
a1=contr.helmert(nlev[nf])
contr[[nf]]=kronecker(a1, matrix(1, nrow=prod(nlev[1:(nf-1)]), ncol=1))
for (i in (nf-1):1){
k=k+1
if (i>1){
a2=contr.helmert(nlev[i])
c2=kronecker(a2, matrix(1, nrow=prod(nlev[1:(i-1)]), ncol=1))
contr[[nf-k+1]]=kronecker(matrix(1, nrow=prod(nlev[(i+1):nf]), ncol=1), c2)
}
else{
a2=contr.helmert(nlev[i])
contr[[nf-k+1]]=kronecker(matrix(1, nrow=prod(nlev[(i+1):nf]), ncol=1), a2)
}
}
names(contr)=colnames(Factors)
# Contrasts for two-way interactions
for (i in 1:(nf-1)){
for (j in (i+1):nf){
k=k+1
contr[[k]]=MultiplyColumns(contr[[i]],contr[[j]])
names(contr)[k]=paste(names(contr)[i], names(contr)[j], sep="*")
}
}
eff=c()
for (l in 1:k){
eff=c(eff, rep(l, ncol(contr[[l]])))
if (l==1) Cont=contr[[l]]
else Cont=cbind(Cont, contr[[l]])
}
eff=factor(eff)
levels(eff)=names(contr)
return(list(Groups=Groups, Contrasts=t(Cont), Effects=eff))
}
|
/R/ConstructContrasts.R
|
no_license
|
laura20vg/permanova
|
R
| false
| false
| 1,735
|
r
|
ConstructContrasts <- function(Factors, MaxOrderIter=2){
Classes=sapply(Factors,class)
Factors=Factors[,Classes=="factor"]
nf=dim(Factors)[2] # Number of factors
if (MaxOrderIter>nf) {
print(paste("The maximum order of the iterations has been set to", nf))
MaxOrderIter=nf}
nlev=rep(0, nf) #Number of levels per factor
for (i in 1:nf)
nlev[i]=length(levels(Factors[[i]]))
nt=prod(nlev) # Number of treatments
# The complete set of groups
Groups=interaction(Factors[[1]],Factors[[2]])
if (nf>2){
for (i in 3:nf)
Groups=interaction(Groups,Factors[[i]])
}
contr=list() # This is a list with all the contrasts for each effect
# Contrasts for main effects
k=1
a1=contr.helmert(nlev[nf])
contr[[nf]]=kronecker(a1, matrix(1, nrow=prod(nlev[1:(nf-1)]), ncol=1))
for (i in (nf-1):1){
k=k+1
if (i>1){
a2=contr.helmert(nlev[i])
c2=kronecker(a2, matrix(1, nrow=prod(nlev[1:(i-1)]), ncol=1))
contr[[nf-k+1]]=kronecker(matrix(1, nrow=prod(nlev[(i+1):nf]), ncol=1), c2)
}
else{
a2=contr.helmert(nlev[i])
contr[[nf-k+1]]=kronecker(matrix(1, nrow=prod(nlev[(i+1):nf]), ncol=1), a2)
}
}
names(contr)=colnames(Factors)
# Contrasts for two-way interactions
for (i in 1:(nf-1)){
for (j in (i+1):nf){
k=k+1
contr[[k]]=MultiplyColumns(contr[[i]],contr[[j]])
names(contr)[k]=paste(names(contr)[i], names(contr)[j], sep="*")
}
}
eff=c()
for (l in 1:k){
eff=c(eff, rep(l, ncol(contr[[l]])))
if (l==1) Cont=contr[[l]]
else Cont=cbind(Cont, contr[[l]])
}
eff=factor(eff)
levels(eff)=names(contr)
return(list(Groups=Groups, Contrasts=t(Cont), Effects=eff))
}
|
##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 1 Jul 2015
# Function: relativeBowlingER
# This function computes and plots the relative bowling Economy Rate of the bowlers
#
###########################################################################################
relativeBowlingER <- function(frames, names) {
col1 = c("red","blue","cyan","black","brown")
for(i in 1:length(frames))
{
# Clean the bowler data frame
bowler <- cleanBowlerData(frames[[i]])
# Get the max wickets taken by bowler
wkts <- c(0:max(bowler$Wkts))
#compute mean economy rate for the bowler
eRate <- ER(frames[[i]])
# Plot the Economy Rate vs Wickets
if(i == 1) {
plot(wkts,eRate,type="o",pch=13,col=col1[i],lwd=3,
xlab="wickets",ylab="Economy rate",main="Relative economy rate")
}
lines(wkts,eRate,col=col1[i],lwd=3.0)
}
#i <- 1
type = rep(1,length(frames))
width = rep(2.5,length(frames))
legend(x="topright",legend=names, lty=type,
lwd=width,col=col1,bty="n",cex=0.8)
mtext("Data source-Courtesy:ESPN Cricinfo", side=1, line=4, adj=1.0, cex=0.8, col="blue")
}
ER <- function(file){
# Clean the bowler data before processing
bowler <- cleanBowlerData(file)
econRate <- NULL
# Calculate mean economy rate versus number of wickets taken. Loop for 0 to max wickets
for (i in 0: max(as.numeric(as.character(bowler$Wkts)))) {
# Create a vector of Economy rate for number of wickets 'i'
a <- bowler[bowler$Wkts == i,]$Econ
b <- as.numeric(as.character(a))
# Compute the mean economy rate by using lapply on the list
econRate[i+1] <- lapply(list(b),mean)
}
econRate
}
|
/SPL/cricketr-master/cricketr-master/R/relativeBowlingER.R
|
no_license
|
revanth465/CricketStats
|
R
| false
| false
| 1,987
|
r
|
##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 1 Jul 2015
# Function: relativeBowlingER
# This function computes and plots the relative bowling Economy Rate of the bowlers
#
###########################################################################################
relativeBowlingER <- function(frames, names) {
col1 = c("red","blue","cyan","black","brown")
for(i in 1:length(frames))
{
# Clean the bowler data frame
bowler <- cleanBowlerData(frames[[i]])
# Get the max wickets taken by bowler
wkts <- c(0:max(bowler$Wkts))
#compute mean economy rate for the bowler
eRate <- ER(frames[[i]])
# Plot the Economy Rate vs Wickets
if(i == 1) {
plot(wkts,eRate,type="o",pch=13,col=col1[i],lwd=3,
xlab="wickets",ylab="Economy rate",main="Relative economy rate")
}
lines(wkts,eRate,col=col1[i],lwd=3.0)
}
#i <- 1
type = rep(1,length(frames))
width = rep(2.5,length(frames))
legend(x="topright",legend=names, lty=type,
lwd=width,col=col1,bty="n",cex=0.8)
mtext("Data source-Courtesy:ESPN Cricinfo", side=1, line=4, adj=1.0, cex=0.8, col="blue")
}
ER <- function(file){
# Clean the bowler data before processing
bowler <- cleanBowlerData(file)
econRate <- NULL
# Calculate mean economy rate versus number of wickets taken. Loop for 0 to max wickets
for (i in 0: max(as.numeric(as.character(bowler$Wkts)))) {
# Create a vector of Economy rate for number of wickets 'i'
a <- bowler[bowler$Wkts == i,]$Econ
b <- as.numeric(as.character(a))
# Compute the mean economy rate by using lapply on the list
econRate[i+1] <- lapply(list(b),mean)
}
econRate
}
|
# create megamuga fasta file
library(qtl2)
mm <- read_csv("../../Sequences/mm_seq.csv", rownames_included=FALSE)
cat(paste0(">", mm$marker, "\n", mm$probe_seq, "\n", collapse=""),
file="megamuga.fa")
|
/Blast/R/create_mm_fasta.R
|
permissive
|
kbroman/MUGAarrays
|
R
| false
| false
| 207
|
r
|
# create megamuga fasta file
library(qtl2)
mm <- read_csv("../../Sequences/mm_seq.csv", rownames_included=FALSE)
cat(paste0(">", mm$marker, "\n", mm$probe_seq, "\n", collapse=""),
file="megamuga.fa")
|
# library(data.table)
# library(Publish)
# library(caret)
# library(sigmoid)
# library(rpart)
rm(list=ls())
graphics.off()
##########################################################################################################
# Parameters
##########################################################################################################
# Choose the seeds
seeds = c(123,156,67,1,43)
for(data_seed in c(1,2,3,4,5)){
for(r in c(1,2,3)){
##########################################################################################################
# read data
##########################################################################################################
data_list = c("warfarin_0.33.csv","warfarin_r0.06.csv","warfarin_r0.11.csv")
data_enc_list = c("warfarin_enc_0.33.csv","warfarin_enc_r0.06.csv","warfarin_enc_r0.11.csv")
threshold_list = c("0.33","r0.06","r0.11")
path = paste("/Users/nathanjo/Documents/Github/prescriptive-trees/data/processed/warfarin_intermediate/seed",toString(data_seed),"/",sep = "")
data_path = paste("/Users/nathanjo/Documents/Github/prescriptive-trees/data/processed/warfarin/seed",toString(data_seed),"/",data_list[r],sep = "")
data_enc_path = paste("/Users/nathanjo/Documents/Github/prescriptive-trees/data/processed/warfarin/seed",toString(data_seed),"/",data_enc_list[r],sep = "")
data <- read.csv(data_path, header = TRUE, sep = ",",na.strings = "",stringsAsFactors = TRUE)
data_enc <- read.csv(data_enc_path, header = TRUE, sep = ",",na.strings = "",stringsAsFactors = TRUE)
data$t <- as.factor(data$t)
data_enc$t <- as.factor(data_enc$t)
threshold = threshold_list[r]
for(Run in c(1,2,3,4,5)){
## set the seed to make your partition reproducible
set.seed(seeds[Run])
##########################################################################################################
# Splitting data into training and test
##########################################################################################################
smp_size = 3000
if((data_seed==1 | data_seed == 3) & r==3){
rare_index <- (data$t == 2 & data$y ==0)
prob = rep(1/nrow(data), nrow(data))
prob[rare_index]=1
train_ind <- sample(seq_len(nrow(data)), size = smp_size, prob = prob)
}else{
train_ind <- sample(seq_len(nrow(data)), size = smp_size)
}
data_train <- data[train_ind, ]
data_test <- data[-train_ind, ]
data_train_enc <- data_enc[train_ind, ]
data_test_enc <- data_enc[-train_ind, ]
##########################################################################################################
# Learning propensity score P(t|x) for each entry using decision tree
##########################################################################################################
t_train_data = data_train[,!(names(data_train) %in% c("y","y0","y1","y2"))]
t_test_data = data_test[,!(names(data_test) %in% c("y","y0","y1","y2"))]
train_control<- trainControl(method="repeatedcv", number=10, repeats = 3)
model.cv <- train(t ~ .,
data = t_train_data,
method = "rpart",
trControl = train_control)
model <- model.cv$finalModel
data_train_enc$prob_t_pred_tree <- NA
data_test_enc$prob_t_pred_tree <- NA
for(t in levels(data$t)){
index <- data_train$t == t
data_train_enc$prob_t_pred_tree[index] <- predict(model, t_train_data, type = "prob")[index,t]
data_train$prob_t_pred_tree[index] <- predict(model, t_train_data, type = "prob")[index,t]
index <- data_test$t == t
data_test_enc$prob_t_pred_tree[index] <- predict(model, t_test_data, type = "prob")[index,t]
data_test$prob_t_pred_tree[index] <- predict(model, t_test_data, type = "prob")[index,t]
}
rm(t_train_data,t_test_data)
# par(xpd = TRUE)
# plot(model, compress = TRUE)
# text(model, use.n = TRUE)
rm(model,model.cv,train_control)
##########################################################################################################
# Save the files
##########################################################################################################
# Save files
write.csv(data_train_enc,paste("data_train_enc_",toString(threshold),"_",toString(Run),".csv",sep=''),row.names = FALSE)
write.csv(data_test_enc,paste("data_test_enc_",toString(threshold),"_",toString(Run),".csv",sep=''),row.names = FALSE)
write.csv(data_train,paste("data_train_",toString(threshold),"_",toString(Run),".csv",sep=''),row.names = FALSE)
write.csv(data_test,paste("data_test_",toString(threshold),"_",toString(Run),".csv",sep=''),row.names = FALSE)
}
}
}
|
/data/datagen/warfarin/2-split_warfarin.R
|
no_license
|
nathanaj99/prescriptive-trees
|
R
| false
| false
| 4,945
|
r
|
# library(data.table)
# library(Publish)
# library(caret)
# library(sigmoid)
# library(rpart)
rm(list=ls())
graphics.off()
##########################################################################################################
# Parameters
##########################################################################################################
# Choose the seeds
seeds = c(123,156,67,1,43)
for(data_seed in c(1,2,3,4,5)){
for(r in c(1,2,3)){
##########################################################################################################
# read data
##########################################################################################################
data_list = c("warfarin_0.33.csv","warfarin_r0.06.csv","warfarin_r0.11.csv")
data_enc_list = c("warfarin_enc_0.33.csv","warfarin_enc_r0.06.csv","warfarin_enc_r0.11.csv")
threshold_list = c("0.33","r0.06","r0.11")
path = paste("/Users/nathanjo/Documents/Github/prescriptive-trees/data/processed/warfarin_intermediate/seed",toString(data_seed),"/",sep = "")
data_path = paste("/Users/nathanjo/Documents/Github/prescriptive-trees/data/processed/warfarin/seed",toString(data_seed),"/",data_list[r],sep = "")
data_enc_path = paste("/Users/nathanjo/Documents/Github/prescriptive-trees/data/processed/warfarin/seed",toString(data_seed),"/",data_enc_list[r],sep = "")
data <- read.csv(data_path, header = TRUE, sep = ",",na.strings = "",stringsAsFactors = TRUE)
data_enc <- read.csv(data_enc_path, header = TRUE, sep = ",",na.strings = "",stringsAsFactors = TRUE)
data$t <- as.factor(data$t)
data_enc$t <- as.factor(data_enc$t)
threshold = threshold_list[r]
for(Run in c(1,2,3,4,5)){
## set the seed to make your partition reproducible
set.seed(seeds[Run])
##########################################################################################################
# Splitting data into training and test
##########################################################################################################
smp_size = 3000
if((data_seed==1 | data_seed == 3) & r==3){
rare_index <- (data$t == 2 & data$y ==0)
prob = rep(1/nrow(data), nrow(data))
prob[rare_index]=1
train_ind <- sample(seq_len(nrow(data)), size = smp_size, prob = prob)
}else{
train_ind <- sample(seq_len(nrow(data)), size = smp_size)
}
data_train <- data[train_ind, ]
data_test <- data[-train_ind, ]
data_train_enc <- data_enc[train_ind, ]
data_test_enc <- data_enc[-train_ind, ]
##########################################################################################################
# Learning propensity score P(t|x) for each entry using decision tree
##########################################################################################################
t_train_data = data_train[,!(names(data_train) %in% c("y","y0","y1","y2"))]
t_test_data = data_test[,!(names(data_test) %in% c("y","y0","y1","y2"))]
train_control<- trainControl(method="repeatedcv", number=10, repeats = 3)
model.cv <- train(t ~ .,
data = t_train_data,
method = "rpart",
trControl = train_control)
model <- model.cv$finalModel
data_train_enc$prob_t_pred_tree <- NA
data_test_enc$prob_t_pred_tree <- NA
for(t in levels(data$t)){
index <- data_train$t == t
data_train_enc$prob_t_pred_tree[index] <- predict(model, t_train_data, type = "prob")[index,t]
data_train$prob_t_pred_tree[index] <- predict(model, t_train_data, type = "prob")[index,t]
index <- data_test$t == t
data_test_enc$prob_t_pred_tree[index] <- predict(model, t_test_data, type = "prob")[index,t]
data_test$prob_t_pred_tree[index] <- predict(model, t_test_data, type = "prob")[index,t]
}
rm(t_train_data,t_test_data)
# par(xpd = TRUE)
# plot(model, compress = TRUE)
# text(model, use.n = TRUE)
rm(model,model.cv,train_control)
##########################################################################################################
# Save the files
##########################################################################################################
# Save files
write.csv(data_train_enc,paste("data_train_enc_",toString(threshold),"_",toString(Run),".csv",sep=''),row.names = FALSE)
write.csv(data_test_enc,paste("data_test_enc_",toString(threshold),"_",toString(Run),".csv",sep=''),row.names = FALSE)
write.csv(data_train,paste("data_train_",toString(threshold),"_",toString(Run),".csv",sep=''),row.names = FALSE)
write.csv(data_test,paste("data_test_",toString(threshold),"_",toString(Run),".csv",sep=''),row.names = FALSE)
}
}
}
|
#importujemy pobraną tablicę wielowymiarową z naszymi produktami, zmieniamy znak separujący i wprowadzamy polskie znaki:
ceny <- read.table("CENY_2917_CTAB_20200123153137.csv", sep=";",dec=",",fileEncoding = "UTF-8")
#pozbywamy się niepotrzebnej kolumny
ceny <- ceny[,-1]
#TABELE Z CENAMI RYZU W POSZCZEGOLNYCH LATACH
ryz06 <- subset(ceny,select=c(V2,V3,V143,V283,V423,V563,V703,V843,V983,V1123,V1263,V1403,V1543))
ryz07 <- subset(ceny,select=c(V2,V4,V144,V284,V424,V564,V704,V844,V984,V1124,V1264,V1404,V1544))
ryz08 <- subset(ceny,select=c(V2,V5,V145,V285,V425,V565,V705,V845,V985,V1125,V1265,V1405,V1545))
ryz09 <- subset(ceny,select=c(V2,V6,V146,V286,V426,V566,V706,V846,V986,V1126,V1266,V1406,V1546))
ryz10 <- subset(ceny,select=c(V2,V7,V147,V287,V427,V567,V707,V847,V987,V1127,V1267,V1407,V1547))
ryz11 <- subset(ceny,select=c(V2,V8,V148,V288,V428,V568,V708,V848,V988,V1128,V1268,V1408,V1548))
ryz12 <- subset(ceny,select=c(V2,V9,V149,V289,V429,V569,V709,V849,V989,V1129,V1269,V1409,V1549))
ryz13 <- subset(ceny,select=c(V2,V10,V150,V290,V430,V570,V710,V850,V990,V1130,V1270,V1410,V1550))
ryz14 <- subset(ceny,select=c(V2,V11,V151,V291,V431,V571,V711,V851,V991,V1131,V1271,V1411,V1551))
ryz15 <- subset(ceny,select=c(V2,V12,V152,V292,V432,V572,V712,V852,V992,V1132,V1272,V1412,V1552))
ryz16 <- subset(ceny,select=c(V2,V13,V153,V293,V433,V573,V713,V853,V993,V1133,V1273,V1413,V1553))
ryz17 <- subset(ceny,select=c(V2,V14,V154,V294,V434,V574,V714,V854,V994,V1134,V1274,V1414,V1554))
ryz18 <- subset(ceny,select=c(V2,V15,V155,V295,V435,V575,V715,V855,V995,V1135,V1275,V1415,V1555))
ryz19 <- subset(ceny,select=c(V2,V16,V156,V296,V436,V576,V716,V856,V996,V1136,V1276,V1416,V1556))
#srednia cena w polsce w 2006
r6 <- mean(c(2.59,2.56,2.57,2.60,2.58,2.62,2.63,2.65,2.69,2.69,2.71,2.72))
#2007
r7 <- mean(c(2.73, 2.78, 2.79, 2.83, 2.85, 2.86, 2.88, 2.89, 2.91, 2.97, 3.09, 3.15))
#i tak dalej...
r8 <- mean(c(3.26, 3.33, 3.38, 3.46, 3.69, 3.97, 4.21, 4.38, 4.48, 4.55, 4.58, 4.60))
r9 <- mean(c(4.58, 4.60, 4.62, 4.71, 4.74, 4.72, 4.72, 4.70, 4.66, 4.56, 4.48, 4.41))
r10 <- mean(c(4.41, 4.36, 4.35, 4.30, 4.27, 4.25, 4.15, 4.13, 4.07, 4.02, 3.95, 3.97))
r11 <- mean(c(4.01, 4.04, 4.10, 4.11, 4.09, 4.12 ,4.11, 4.09, 4.10, 4.12, 4.14, 4.18))
r12 <- mean(c(4.19, 4.26, 4.31, 4.30, 4.29, 4.35, 4.32, 4.27, 4.22, 4.23, 4.23, 4.24))
r13 <- mean(c(4.27, 4.27, 4.25, 4.25, 4.19, 4.16, 4.12, 4.08, 4.08, 4.07, 4.03, 4.01))
r14 <- mean(c(4.03, 4.03, 4.02, 4.00, 3.95, 3.91, 3.88, 3.86, 3.82, 3.85, 3.82, 3.81))
r15 <- mean(c(3.80, 3.80, 3.79, 3.77, 3.79, 3.80, 3.78, 3.80, 3.79, 3.78, 3.79, 3.76))
r16 <- mean(c(3.80, 3.81, 3.78, 3.74, 3.81, 3.79, 3.81, 3.78, 3.82, 3.80, 3.86, 3.80))
r17 <- mean(c(3.86, 3.88, 3.86, 3.84, 3.85, 3.84, 3.86, 3.83, 3.84, 3.82, 3.86, 3.83))
r18 <- mean(c(3.83, 3.81, 3.83, 3.83, 3.83, 3.81, 3.77, 3.77, 3.85, 3.81, 3.79, 3.80))
r19 <- mean(c(3.82, 3.85, 3.87, 3.92, 3.97, 3.99, 4.03, 4.01, 4.02, 4.03, 4.01, 3.99))
#srednia cena ryzu z wszystkich lat
sredniaryz=(r6+r7+r8+r9+r10+r11+r12+r13+r14+r15+r16+r17+r18+r19)/14
daneryz <- c(r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17,r18,r19)
#odchylenie standardowe
sd(daneryz)
#wariacja
var(daneryz)
#pozostale wartosci
summary(daneryz)
|
/projektr.R
|
no_license
|
brzezinskamonika/R-projekt
|
R
| false
| false
| 3,225
|
r
|
#importujemy pobraną tablicę wielowymiarową z naszymi produktami, zmieniamy znak separujący i wprowadzamy polskie znaki:
ceny <- read.table("CENY_2917_CTAB_20200123153137.csv", sep=";",dec=",",fileEncoding = "UTF-8")
#pozbywamy się niepotrzebnej kolumny
ceny <- ceny[,-1]
#TABELE Z CENAMI RYZU W POSZCZEGOLNYCH LATACH
ryz06 <- subset(ceny,select=c(V2,V3,V143,V283,V423,V563,V703,V843,V983,V1123,V1263,V1403,V1543))
ryz07 <- subset(ceny,select=c(V2,V4,V144,V284,V424,V564,V704,V844,V984,V1124,V1264,V1404,V1544))
ryz08 <- subset(ceny,select=c(V2,V5,V145,V285,V425,V565,V705,V845,V985,V1125,V1265,V1405,V1545))
ryz09 <- subset(ceny,select=c(V2,V6,V146,V286,V426,V566,V706,V846,V986,V1126,V1266,V1406,V1546))
ryz10 <- subset(ceny,select=c(V2,V7,V147,V287,V427,V567,V707,V847,V987,V1127,V1267,V1407,V1547))
ryz11 <- subset(ceny,select=c(V2,V8,V148,V288,V428,V568,V708,V848,V988,V1128,V1268,V1408,V1548))
ryz12 <- subset(ceny,select=c(V2,V9,V149,V289,V429,V569,V709,V849,V989,V1129,V1269,V1409,V1549))
ryz13 <- subset(ceny,select=c(V2,V10,V150,V290,V430,V570,V710,V850,V990,V1130,V1270,V1410,V1550))
ryz14 <- subset(ceny,select=c(V2,V11,V151,V291,V431,V571,V711,V851,V991,V1131,V1271,V1411,V1551))
ryz15 <- subset(ceny,select=c(V2,V12,V152,V292,V432,V572,V712,V852,V992,V1132,V1272,V1412,V1552))
ryz16 <- subset(ceny,select=c(V2,V13,V153,V293,V433,V573,V713,V853,V993,V1133,V1273,V1413,V1553))
ryz17 <- subset(ceny,select=c(V2,V14,V154,V294,V434,V574,V714,V854,V994,V1134,V1274,V1414,V1554))
ryz18 <- subset(ceny,select=c(V2,V15,V155,V295,V435,V575,V715,V855,V995,V1135,V1275,V1415,V1555))
ryz19 <- subset(ceny,select=c(V2,V16,V156,V296,V436,V576,V716,V856,V996,V1136,V1276,V1416,V1556))
#srednia cena w polsce w 2006
r6 <- mean(c(2.59,2.56,2.57,2.60,2.58,2.62,2.63,2.65,2.69,2.69,2.71,2.72))
#2007
r7 <- mean(c(2.73, 2.78, 2.79, 2.83, 2.85, 2.86, 2.88, 2.89, 2.91, 2.97, 3.09, 3.15))
#i tak dalej...
r8 <- mean(c(3.26, 3.33, 3.38, 3.46, 3.69, 3.97, 4.21, 4.38, 4.48, 4.55, 4.58, 4.60))
r9 <- mean(c(4.58, 4.60, 4.62, 4.71, 4.74, 4.72, 4.72, 4.70, 4.66, 4.56, 4.48, 4.41))
r10 <- mean(c(4.41, 4.36, 4.35, 4.30, 4.27, 4.25, 4.15, 4.13, 4.07, 4.02, 3.95, 3.97))
r11 <- mean(c(4.01, 4.04, 4.10, 4.11, 4.09, 4.12 ,4.11, 4.09, 4.10, 4.12, 4.14, 4.18))
r12 <- mean(c(4.19, 4.26, 4.31, 4.30, 4.29, 4.35, 4.32, 4.27, 4.22, 4.23, 4.23, 4.24))
r13 <- mean(c(4.27, 4.27, 4.25, 4.25, 4.19, 4.16, 4.12, 4.08, 4.08, 4.07, 4.03, 4.01))
r14 <- mean(c(4.03, 4.03, 4.02, 4.00, 3.95, 3.91, 3.88, 3.86, 3.82, 3.85, 3.82, 3.81))
r15 <- mean(c(3.80, 3.80, 3.79, 3.77, 3.79, 3.80, 3.78, 3.80, 3.79, 3.78, 3.79, 3.76))
r16 <- mean(c(3.80, 3.81, 3.78, 3.74, 3.81, 3.79, 3.81, 3.78, 3.82, 3.80, 3.86, 3.80))
r17 <- mean(c(3.86, 3.88, 3.86, 3.84, 3.85, 3.84, 3.86, 3.83, 3.84, 3.82, 3.86, 3.83))
r18 <- mean(c(3.83, 3.81, 3.83, 3.83, 3.83, 3.81, 3.77, 3.77, 3.85, 3.81, 3.79, 3.80))
r19 <- mean(c(3.82, 3.85, 3.87, 3.92, 3.97, 3.99, 4.03, 4.01, 4.02, 4.03, 4.01, 3.99))
#srednia cena ryzu z wszystkich lat
sredniaryz=(r6+r7+r8+r9+r10+r11+r12+r13+r14+r15+r16+r17+r18+r19)/14
daneryz <- c(r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17,r18,r19)
#odchylenie standardowe
sd(daneryz)
#wariacja
var(daneryz)
#pozostale wartosci
summary(daneryz)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/represearch.R
\name{parse_currency}
\alias{parse_currency}
\title{parse_currency}
\usage{
parse_currency(amount, currency)
}
\arguments{
\item{amount}{amount}
\item{currency}{currency}
}
\description{
This function will parse a currency amount
}
\keyword{org-mode,}
\keyword{table}
|
/man/parse_currency.Rd
|
no_license
|
philipphoman/represearch
|
R
| false
| true
| 361
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/represearch.R
\name{parse_currency}
\alias{parse_currency}
\title{parse_currency}
\usage{
parse_currency(amount, currency)
}
\arguments{
\item{amount}{amount}
\item{currency}{currency}
}
\description{
This function will parse a currency amount
}
\keyword{org-mode,}
\keyword{table}
|
# clean everything done before
rm(list=ls())
# use more memory
memory.limit(9999999999)
# load packages
library(data.table)
library(tidyverse)
library(bit64)
# read data
getwd()
wd1 <- "D:/SIH CSV"
setwd(wd1)
SIH2010 <- as.data.frame(fread("./SIH2010.csv",
select = c("CEP","MUNIC_RES","NASC","SEXO","RACA_COR",
"DT_INTER","DT_SAIDA","DIAG_PRINC","CGC_HOSP",
"MUNIC_MOV","CNES")))
# subset with distinct values
df <- SIH2010 %>%
distinct(CEP,MUNIC_RES,NASC,SEXO,RACA_COR,DT_INTER,DIAG_PRINC,CGC_HOSP,
MUNIC_MOV,CNES, .keep_all = TRUE)
# number of duplicated values = 316351
nrow(SIH2010) - nrow(df)
# proportion of duplicated values = 0.02698128
(nrow(SIH2010) - nrow(df)) / nrow(SIH2010)
# find if there are hospitalization in 2010
table(startsWith(as.character(SIH2010$DT_INTER), "2010"))
table(startsWith(as.character(SIH2011$DT_INTER), "2010"))
table(startsWith(as.character(SIH2012$DT_INTER), "2010"))
table(startsWith(as.character(SIH2013$DT_INTER), "2010"))
table(startsWith(as.character(SIH2014$DT_INTER), "2010"))
table(startsWith(as.character(SIH2015$DT_INTER), "2010"))
table(startsWith(as.character(SIH2016$DT_INTER), "2010"))
table(startsWith(as.character(SIH2017$DT_INTER), "2010"))
table(startsWith(as.character(SIH2018$DT_INTER), "2010"))
table(startsWith(as.character(SIH2019$DT_INTER), "2010"))
table(startsWith(as.character(SIH2020$DT_INTER), "2010"))
# select just hospitalizations in 2010
SIH2010_2010 <- SIH2010 %>%
filter(startsWith(as.character(SIH2010$DT_INTER), "2010"))
SIH2011_2010 <- SIH2011 %>%
filter(startsWith(as.character(SIH2011$DT_INTER), "2010"))
SIH2012_2010 <- SIH2012 %>%
filter(startsWith(as.character(SIH2012$DT_INTER), "2010"))
SIH2013_2010 <- SIH2013 %>%
filter(startsWith(as.character(SIH2013$DT_INTER), "2010"))
SIH2014_2010 <- SIH2014 %>%
filter(startsWith(as.character(SIH2014$DT_INTER), "2010"))
SIH2015_2010 <- SIH2015 %>%
filter(startsWith(as.character(SIH2015$DT_INTER), "2010"))
SIH2016_2010 <- SIH2016 %>%
filter(startsWith(as.character(SIH2016$DT_INTER), "2010"))
SIH2017_2010 <- SIH2017 %>%
filter(startsWith(as.character(SIH2017$DT_INTER), "2010"))
SIH2018_2010 <- SIH2018 %>%
filter(startsWith(as.character(SIH2018$DT_INTER), "2010"))
SIH2019_2010 <- SIH2019 %>%
filter(startsWith(as.character(SIH2019$DT_INTER), "2010"))
SIH2020_2010 <- SIH2020 %>%
filter(startsWith(as.character(SIH2020$DT_INTER), "2010"))
# gather hospitalizations in 2010 in just one data frame
SIH2010 <- rbind(SIH2010_2010, SIH2011_2010, SIH2012_2010, SIH2013_2010,
SIH2014_2010, SIH2015_2010, SIH2016_2010, SIH2017_2010,
SIH2018_2010, SIH2019_2010, SIH2020_2010)
# find duplicated values
duplicated <- SIH2010[duplicated(SIH2010[,c("SEXO","NASC","DT_INTER",
"MUNIC_MOV","DIAG_PRINC")]),]
# proportion of duplicated values
nrow(duplicated) / nrow(SIH2010)
# return number of characters from variable "DIAG_PRINC"
table(nchar(SIH2010$DIAG_PRINC, keepNA = FALSE), exclude = NULL)
# create binomial variable "ICSAP"
# where CIDs has 3 characters
SIH2010$ICSAP <- "Preencher"
SIH2010$ICSAP <- ifelse(grepl("^A18", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A19", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A33", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A34", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A35", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A36", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A37", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A95", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B05", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B06", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B16", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B26", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B54", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B77", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I00", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A00", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A03", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A04", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A05", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A06", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A07", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A08", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A09", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E86", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^D50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E40", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E41", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E42", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E43", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E44", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E45", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E46", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E54", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E55", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E56", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E57", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E58", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E59", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E60", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E61", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E62", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E63", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E64", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^H66", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J00", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J03", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J06", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J31", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J13", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J14", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J45", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J46", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J20", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J21", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J40", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J41", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J42", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J43", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J44", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J47", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I10", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I11", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I20", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J81", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^G45", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^G46", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I63", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I64", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I65", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I66", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I67", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I69", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^G40", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^G41", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N10", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N11", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N12", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N30", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N34", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A46", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L03", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L04", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L08", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N70", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N71", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N72", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N73", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N75", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N76", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^K25", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^K26", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^K27", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^K28", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^O23", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
# ICSAP 4 characters
ICSAP4c <- c("A150","A151","A152","A153","A154","A155","A156","A157","A158",
"A159","A160","A161","A162","A163","A164","A165","A166","A167",
"A168","A169","A170","A171","A172","A173","A174","A175","A176",
"A177","A178","A179","G000","J153","J154","J158","J159","J181",
"E100","E101","E102","E103","E104","E105","E106","E107","E108",
"E109","E110","E111","E112","E113","E114","E115","E116","E117",
"E118","E119","E120","E121","E122","E123","E124","E125","E126",
"E127","E128","E129","E130","E131","E132","E133","E134","E135",
"E136","E137","E138","E139","E140","E141","E142","E143","E144",
"E145","E146","E147","E148","E149","N390","K920","K921","K922",
"P350")
# where CIDs has 4 characters
SIH2010$ICSAP <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP4c, 1, SIH2010$ICSAP)
# change "Preencher" to 0
SIH2010$ICSAP <- ifelse(SIH2010$ICSAP == "Preencher", 0, SIH2010$ICSAP)
# check variable building
table(SIH2010$ICSAP, exclude = NULL)
# proportion of ICSAPs
SIH2010$ICSAP <- as.numeric(SIH2010$ICSAP)
sum(SIH2010$ICSAP) / length(SIH2010$ICSAP)
# create "ICSAPGroup" - Group 1
SIH2010$ICSAPGroup <- 0
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^A18", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A19", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A33", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A34", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A35", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A36", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A37", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A95", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B05", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B06", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B16", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B26", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B54", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B77", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I00", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
# ICSAP Group 1 with 4 characters
ICSAP014 <- c("A150","A151","A152","A153","A154","A155","A156","A157","A158",
"A159","A160","A161","A162","A163","A164","A165","A166","A167",
"A168","A169","A170","A171","A172","A173","A174","A175","A176",
"A177","A178","A179","G000")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP014, 1, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 1"
sum(SIH2010$ICSAPGroup == 1) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 2
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^A00", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A01", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A02", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A03", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A04", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A05", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A06", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A07", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A08", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A09", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E86", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 2"
sum(SIH2010$ICSAPGroup == 2) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 3
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^D50", SIH2010$DIAG_PRINC), 3, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 3"
sum(SIH2010$ICSAPGroup == 3) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 4
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^E40", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E41", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E42", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E43", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E44", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E45", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E46", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E50", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E51", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E52", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E53", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E54", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E55", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E56", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E57", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E58", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E59", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E60", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E61", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E62", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E63", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E64", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 4"
sum(SIH2010$ICSAPGroup == 4) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 5
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^H66", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J00", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J01", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J02", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J03", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J06", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J31", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 5"
sum(SIH2010$ICSAPGroup == 5) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 6
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^J13", SIH2010$DIAG_PRINC), 6, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J14", SIH2010$DIAG_PRINC), 6, SIH2010$ICSAPGroup)
# ICSAP Group 6 with 4 characters
ICSAP064 <- c("J153","J154","J158","J159","J181")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP064, 6, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 6"
sum(SIH2010$ICSAPGroup == 6) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 7
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^J45", SIH2010$DIAG_PRINC), 7, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J46", SIH2010$DIAG_PRINC), 7, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 7"
sum(SIH2010$ICSAPGroup == 7) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 8
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^J20", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J21", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J40", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J41", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J42", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J43", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J44", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J47", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 8"
sum(SIH2010$ICSAPGroup == 8) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 9
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^I10", SIH2010$DIAG_PRINC), 9, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I11", SIH2010$DIAG_PRINC), 9, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 9"
sum(SIH2010$ICSAPGroup == 9) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 10
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^I20", SIH2010$DIAG_PRINC), 10, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 10"
sum(SIH2010$ICSAPGroup == 10) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 11
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^I50", SIH2010$DIAG_PRINC), 11, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J81", SIH2010$DIAG_PRINC), 11, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 11"
sum(SIH2010$ICSAPGroup == 11) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 12
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^G45", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^G46", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I63", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I64", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I65", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I66", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I67", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I69", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 12"
sum(SIH2010$ICSAPGroup == 12) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 13
# ICSAP Group 13 with 4 characters
ICSAP134 <- c("E100","E101","E102","E103","E104","E105","E106","E107","E108",
"E109","E110","E111","E112","E113","E114","E115","E116","E117",
"E118","E119","E120","E121","E122","E123","E124","E125","E126",
"E127","E128","E129","E130","E131","E132","E133","E134","E135",
"E136","E137","E138","E139","E140","E141","E142","E143","E144",
"E145","E146","E147","E148","E149")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP134, 13, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 13"
sum(SIH2010$ICSAPGroup == 13) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 14
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^G40", SIH2010$DIAG_PRINC), 14, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^G41", SIH2010$DIAG_PRINC), 14, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 14"
sum(SIH2010$ICSAPGroup == 14) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 15
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^N10", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N11", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N12", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N30", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N34", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
# ICSAP Group 15 with 4 characters
ICSAP154 <- c("N390")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP154, 15, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 15"
sum(SIH2010$ICSAPGroup == 15) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 16
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^A46", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L01", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L02", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L03", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L04", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L08", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 16"
sum(SIH2010$ICSAPGroup == 16) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 17
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^N70", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N71", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N72", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N73", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N75", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N76", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 17"
sum(SIH2010$ICSAPGroup == 17) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 18
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^K25", SIH2010$DIAG_PRINC), 18, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^K26", SIH2010$DIAG_PRINC), 18, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^K27", SIH2010$DIAG_PRINC), 18, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^K28", SIH2010$DIAG_PRINC), 18, SIH2010$ICSAPGroup)
# ICSAP Group 18 with 4 characters
ICSAP184 <- c("K920","K921","K922")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP184, 18, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 18"
sum(SIH2010$ICSAPGroup == 18) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 19
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^A50", SIH2010$DIAG_PRINC), 19, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^O23", SIH2010$DIAG_PRINC), 19, SIH2010$ICSAPGroup)
# ICSAP Group 19 with 4 characters
ICSAP194 <- c("P350")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP194, 19, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 19"
sum(SIH2010$ICSAPGroup == 19) / sum(SIH2010$ICSAP == 1)
# "ICSAPGroup" by "SEXO"
t1 <- table(SIH2010$ICSAPGroup, SIH2010$SEXO)
t1
library("writexl")
write_xlsx(as.data.frame(t1),"./t1.xlsx")
# "ICSAPGroup" by "RACA_COR"
t2 <- table(SIH2010$ICSAPGroup, SIH2010$RACA_COR)
t2
write_xlsx(as.data.frame(t2),"./t2.xlsx")
# "ICSAPGroup" by "INSTRU"
t3 <- table(SIH2010$ICSAPGroup, SIH2010$INSTRU)
t3
# "ICSAP" by "MUNIC_RES"
cityICSAP <- SIH2010 %>%
group_by(MUNIC_RES) %>%
summarize(ICSAPNum = sum(ICSAP),
ITNum = n(),
ICSAPRate = sum(ICSAP) / n())
# ICSAP - visual and numerical summary of distributions
windows()
hist(cityICSAP$ICSAPRate)
plot(density(cityICSAP$ICSAPRate))
summary(cityICSAP$ICSAPRate)
# download da tabela de codigos de municipios do IBGE
URL <- "ftp://geoftp.ibge.gov.br/organizacao_do_territorio/estrutura_territorial/divisao_territorial/2018/DTB_2018.zip"
download.file(URL, destfile = "./tabelaIBGE.zip")
# unzip file
unzip("./tabelaIBGE.zip", files = "RELATORIO_DTB_BRASIL_MUNICIPIO.xls")
# read file into R
library("readxl")
tabelaIBGE <- read_excel("./RELATORIO_DTB_BRASIL_MUNICIPIO.xls")
# merge 2 data sets
names(cityICSAP)[1] <- "COD_MUNIC2"
df1 <- merge(city2010, cityICSAP, by = "COD_MUNIC2")
# Loess method
library(ggplot2)
ggplot(df1, aes(x=log(POPULACAO), y=ICSAPRate)) +
geom_point()+
geom_smooth()
ggplot(df1, aes(x='MULHER%', y=ICSAPRate)) +
geom_point()+
geom_smooth()
windows()
boxplot(df1$ICSAPRate ~ as.factor(df1$UF), pch = 20)
ggplot(df1, aes(x=log(POPULACAO), y=ICSAPRate)) +
geom_point()+
geom_smooth()
plot(df1$'MULHER%', df1$ICSAPRate, pch = 20)
plot(df1$'URBANA%', df1$ICSAPRate, pch = 20)
boxplot(df1$ICSAPRate ~ as.factor(df1$COB.GROUP), pch = 20)
boxplot(df1$ICSAPRate ~ as.factor(df1$PORTE.POP), pch = 20)
plot(df1$IDHM.LONGEVIDADE, df1$ICSAPRate, pch = 20)
plot(df1$IDHM.SCHOOL, df1$ICSAPRate, pch = 20)
plot(df1$IDHM.RENDA, df1$ICSAPRate, pch = 20)
plot(df1$IDHM.TOTAL, df1$ICSAPRate, pch = 20)
plot(df1$'COB_MEDIAN%', df1$ICSAPRate, pch = 20)
plot(df1$'COB_MEAN%', df1$ICSAPRate, pch = 20)
# correlation matrix
cor <- round(cor(df1[,-c(1:5, 79:82)], method ="pearson"),2)
# function to compute p values matrix
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
# p values correlation matrix
p.mat <- cor.mtest(df1[,-c(1:5, 79:82)])
# correlogram
library(RColorBrewer)
col <- colorRampPalette(c("red", "white", "blue"))(20)
library(corrplot)
corrplot(cor, method="pie", type="upper", order="hclust", p.mat = p.mat,
sig.level = 0.05, insig = "blank", tl.col="black", tl.srt=75,
col=brewer.pal(n=8,name="RdYlBu"))
URBANA%
df1[which.min(df1$'MULHER%'),]
MULHER%
POPULACAO
summary(df1$ICSAPRate)
colnames(df1)
ICSAPRate
# explore data
str(SIH2010)
dim(SIH2010)
colnames(SIH2010)
# remove CID that starts with "O"
library(tidyverse)
library(stringr)
SIH2010_sem_cid_o <- SIH2010 %>%
filter(!str_detect(DIAG_PRINC, "^O"))
|
/ICSAP 02.R
|
no_license
|
azous-nomar85/ICSAP
|
R
| false
| false
| 32,317
|
r
|
# clean everything done before
rm(list=ls())
# use more memory
memory.limit(9999999999)
# load packages
library(data.table)
library(tidyverse)
library(bit64)
# read data
getwd()
wd1 <- "D:/SIH CSV"
setwd(wd1)
SIH2010 <- as.data.frame(fread("./SIH2010.csv",
select = c("CEP","MUNIC_RES","NASC","SEXO","RACA_COR",
"DT_INTER","DT_SAIDA","DIAG_PRINC","CGC_HOSP",
"MUNIC_MOV","CNES")))
# subset with distinct values
df <- SIH2010 %>%
distinct(CEP,MUNIC_RES,NASC,SEXO,RACA_COR,DT_INTER,DIAG_PRINC,CGC_HOSP,
MUNIC_MOV,CNES, .keep_all = TRUE)
# number of duplicated values = 316351
nrow(SIH2010) - nrow(df)
# proportion of duplicated values = 0.02698128
(nrow(SIH2010) - nrow(df)) / nrow(SIH2010)
# find if there are hospitalization in 2010
table(startsWith(as.character(SIH2010$DT_INTER), "2010"))
table(startsWith(as.character(SIH2011$DT_INTER), "2010"))
table(startsWith(as.character(SIH2012$DT_INTER), "2010"))
table(startsWith(as.character(SIH2013$DT_INTER), "2010"))
table(startsWith(as.character(SIH2014$DT_INTER), "2010"))
table(startsWith(as.character(SIH2015$DT_INTER), "2010"))
table(startsWith(as.character(SIH2016$DT_INTER), "2010"))
table(startsWith(as.character(SIH2017$DT_INTER), "2010"))
table(startsWith(as.character(SIH2018$DT_INTER), "2010"))
table(startsWith(as.character(SIH2019$DT_INTER), "2010"))
table(startsWith(as.character(SIH2020$DT_INTER), "2010"))
# select just hospitalizations in 2010
SIH2010_2010 <- SIH2010 %>%
filter(startsWith(as.character(SIH2010$DT_INTER), "2010"))
SIH2011_2010 <- SIH2011 %>%
filter(startsWith(as.character(SIH2011$DT_INTER), "2010"))
SIH2012_2010 <- SIH2012 %>%
filter(startsWith(as.character(SIH2012$DT_INTER), "2010"))
SIH2013_2010 <- SIH2013 %>%
filter(startsWith(as.character(SIH2013$DT_INTER), "2010"))
SIH2014_2010 <- SIH2014 %>%
filter(startsWith(as.character(SIH2014$DT_INTER), "2010"))
SIH2015_2010 <- SIH2015 %>%
filter(startsWith(as.character(SIH2015$DT_INTER), "2010"))
SIH2016_2010 <- SIH2016 %>%
filter(startsWith(as.character(SIH2016$DT_INTER), "2010"))
SIH2017_2010 <- SIH2017 %>%
filter(startsWith(as.character(SIH2017$DT_INTER), "2010"))
SIH2018_2010 <- SIH2018 %>%
filter(startsWith(as.character(SIH2018$DT_INTER), "2010"))
SIH2019_2010 <- SIH2019 %>%
filter(startsWith(as.character(SIH2019$DT_INTER), "2010"))
SIH2020_2010 <- SIH2020 %>%
filter(startsWith(as.character(SIH2020$DT_INTER), "2010"))
# gather hospitalizations in 2010 in just one data frame
SIH2010 <- rbind(SIH2010_2010, SIH2011_2010, SIH2012_2010, SIH2013_2010,
SIH2014_2010, SIH2015_2010, SIH2016_2010, SIH2017_2010,
SIH2018_2010, SIH2019_2010, SIH2020_2010)
# find duplicated values
duplicated <- SIH2010[duplicated(SIH2010[,c("SEXO","NASC","DT_INTER",
"MUNIC_MOV","DIAG_PRINC")]),]
# proportion of duplicated values
nrow(duplicated) / nrow(SIH2010)
# return number of characters from variable "DIAG_PRINC"
table(nchar(SIH2010$DIAG_PRINC, keepNA = FALSE), exclude = NULL)
# create binomial variable "ICSAP"
# where CIDs has 3 characters
SIH2010$ICSAP <- "Preencher"
SIH2010$ICSAP <- ifelse(grepl("^A18", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A19", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A33", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A34", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A35", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A36", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A37", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A95", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B05", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B06", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B16", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B26", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B54", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^B77", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I00", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A00", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A03", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A04", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A05", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A06", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A07", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A08", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A09", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E86", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^D50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E40", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E41", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E42", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E43", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E44", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E45", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E46", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E54", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E55", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E56", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E57", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E58", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E59", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E60", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E61", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E62", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E63", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^E64", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^H66", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J00", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J03", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J06", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J31", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J13", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J14", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J45", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J46", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J20", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J21", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J40", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J41", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J42", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J43", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J44", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J47", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I10", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I11", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I20", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^J81", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^G45", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^G46", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I63", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I64", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I65", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I66", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I67", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^I69", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^G40", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^G41", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N10", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N11", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N12", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N30", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N34", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A46", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L03", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L04", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^L08", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N70", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N71", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N72", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N73", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N75", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^N76", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^K25", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^K26", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^K27", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^K28", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^A50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
SIH2010$ICSAP <- ifelse(grepl("^O23", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAP)
# ICSAP 4 characters
ICSAP4c <- c("A150","A151","A152","A153","A154","A155","A156","A157","A158",
"A159","A160","A161","A162","A163","A164","A165","A166","A167",
"A168","A169","A170","A171","A172","A173","A174","A175","A176",
"A177","A178","A179","G000","J153","J154","J158","J159","J181",
"E100","E101","E102","E103","E104","E105","E106","E107","E108",
"E109","E110","E111","E112","E113","E114","E115","E116","E117",
"E118","E119","E120","E121","E122","E123","E124","E125","E126",
"E127","E128","E129","E130","E131","E132","E133","E134","E135",
"E136","E137","E138","E139","E140","E141","E142","E143","E144",
"E145","E146","E147","E148","E149","N390","K920","K921","K922",
"P350")
# where CIDs has 4 characters
SIH2010$ICSAP <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP4c, 1, SIH2010$ICSAP)
# change "Preencher" to 0
SIH2010$ICSAP <- ifelse(SIH2010$ICSAP == "Preencher", 0, SIH2010$ICSAP)
# check variable building
table(SIH2010$ICSAP, exclude = NULL)
# proportion of ICSAPs
SIH2010$ICSAP <- as.numeric(SIH2010$ICSAP)
sum(SIH2010$ICSAP) / length(SIH2010$ICSAP)
# create "ICSAPGroup" - Group 1
SIH2010$ICSAPGroup <- 0
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^A18", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A19", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A33", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A34", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A35", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A36", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A37", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A95", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B05", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B06", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B16", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B26", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B50", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B51", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B52", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B53", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B54", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^B77", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I00", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I01", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I02", SIH2010$DIAG_PRINC), 1, SIH2010$ICSAPGroup)
# ICSAP Group 1 with 4 characters
ICSAP014 <- c("A150","A151","A152","A153","A154","A155","A156","A157","A158",
"A159","A160","A161","A162","A163","A164","A165","A166","A167",
"A168","A169","A170","A171","A172","A173","A174","A175","A176",
"A177","A178","A179","G000")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP014, 1, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 1"
sum(SIH2010$ICSAPGroup == 1) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 2
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^A00", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A01", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A02", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A03", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A04", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A05", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A06", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A07", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A08", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^A09", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E86", SIH2010$DIAG_PRINC), 2, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 2"
sum(SIH2010$ICSAPGroup == 2) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 3
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^D50", SIH2010$DIAG_PRINC), 3, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 3"
sum(SIH2010$ICSAPGroup == 3) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 4
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^E40", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E41", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E42", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E43", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E44", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E45", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E46", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E50", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E51", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E52", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E53", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E54", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E55", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E56", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E57", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E58", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E59", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E60", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E61", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E62", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E63", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^E64", SIH2010$DIAG_PRINC), 4, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 4"
sum(SIH2010$ICSAPGroup == 4) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 5
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^H66", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J00", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J01", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J02", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J03", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J06", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J31", SIH2010$DIAG_PRINC), 5, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 5"
sum(SIH2010$ICSAPGroup == 5) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 6
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^J13", SIH2010$DIAG_PRINC), 6, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J14", SIH2010$DIAG_PRINC), 6, SIH2010$ICSAPGroup)
# ICSAP Group 6 with 4 characters
ICSAP064 <- c("J153","J154","J158","J159","J181")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP064, 6, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 6"
sum(SIH2010$ICSAPGroup == 6) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 7
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^J45", SIH2010$DIAG_PRINC), 7, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J46", SIH2010$DIAG_PRINC), 7, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 7"
sum(SIH2010$ICSAPGroup == 7) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 8
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^J20", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J21", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J40", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J41", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J42", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J43", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J44", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J47", SIH2010$DIAG_PRINC), 8, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 8"
sum(SIH2010$ICSAPGroup == 8) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 9
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^I10", SIH2010$DIAG_PRINC), 9, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I11", SIH2010$DIAG_PRINC), 9, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 9"
sum(SIH2010$ICSAPGroup == 9) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 10
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^I20", SIH2010$DIAG_PRINC), 10, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 10"
sum(SIH2010$ICSAPGroup == 10) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 11
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^I50", SIH2010$DIAG_PRINC), 11, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^J81", SIH2010$DIAG_PRINC), 11, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 11"
sum(SIH2010$ICSAPGroup == 11) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 12
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^G45", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^G46", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I63", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I64", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I65", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I66", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I67", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^I69", SIH2010$DIAG_PRINC), 12, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 12"
sum(SIH2010$ICSAPGroup == 12) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 13
# ICSAP Group 13 with 4 characters
ICSAP134 <- c("E100","E101","E102","E103","E104","E105","E106","E107","E108",
"E109","E110","E111","E112","E113","E114","E115","E116","E117",
"E118","E119","E120","E121","E122","E123","E124","E125","E126",
"E127","E128","E129","E130","E131","E132","E133","E134","E135",
"E136","E137","E138","E139","E140","E141","E142","E143","E144",
"E145","E146","E147","E148","E149")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP134, 13, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 13"
sum(SIH2010$ICSAPGroup == 13) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 14
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^G40", SIH2010$DIAG_PRINC), 14, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^G41", SIH2010$DIAG_PRINC), 14, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 14"
sum(SIH2010$ICSAPGroup == 14) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 15
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^N10", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N11", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N12", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N30", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N34", SIH2010$DIAG_PRINC), 15, SIH2010$ICSAPGroup)
# ICSAP Group 15 with 4 characters
ICSAP154 <- c("N390")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP154, 15, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 15"
sum(SIH2010$ICSAPGroup == 15) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 16
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^A46", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L01", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L02", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L03", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L04", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^L08", SIH2010$DIAG_PRINC), 16, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 16"
sum(SIH2010$ICSAPGroup == 16) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 17
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^N70", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N71", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N72", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N73", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N75", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^N76", SIH2010$DIAG_PRINC), 17, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 17"
sum(SIH2010$ICSAPGroup == 17) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 18
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^K25", SIH2010$DIAG_PRINC), 18, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^K26", SIH2010$DIAG_PRINC), 18, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^K27", SIH2010$DIAG_PRINC), 18, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^K28", SIH2010$DIAG_PRINC), 18, SIH2010$ICSAPGroup)
# ICSAP Group 18 with 4 characters
ICSAP184 <- c("K920","K921","K922")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP184, 18, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 18"
sum(SIH2010$ICSAPGroup == 18) / sum(SIH2010$ICSAP == 1)
# create "ICSAPGroup" - Group 19
# where CIDs has 3 characters
SIH2010$ICSAPGroup <- ifelse(grepl("^A50", SIH2010$DIAG_PRINC), 19, SIH2010$ICSAPGroup)
SIH2010$ICSAPGroup <- ifelse(grepl("^O23", SIH2010$DIAG_PRINC), 19, SIH2010$ICSAPGroup)
# ICSAP Group 19 with 4 characters
ICSAP194 <- c("P350")
# where CIDs has 4 characters
SIH2010$ICSAPGroup <- ifelse(SIH2010$DIAG_PRINC %in% ICSAP194, 19, SIH2010$ICSAPGroup)
# check variable building
table(SIH2010$ICSAPGroup, exclude = NULL)
# proportion of "Group 19"
sum(SIH2010$ICSAPGroup == 19) / sum(SIH2010$ICSAP == 1)
# "ICSAPGroup" by "SEXO"
t1 <- table(SIH2010$ICSAPGroup, SIH2010$SEXO)
t1
library("writexl")
write_xlsx(as.data.frame(t1),"./t1.xlsx")
# "ICSAPGroup" by "RACA_COR"
t2 <- table(SIH2010$ICSAPGroup, SIH2010$RACA_COR)
t2
write_xlsx(as.data.frame(t2),"./t2.xlsx")
# "ICSAPGroup" by "INSTRU"
t3 <- table(SIH2010$ICSAPGroup, SIH2010$INSTRU)
t3
# "ICSAP" by "MUNIC_RES"
cityICSAP <- SIH2010 %>%
group_by(MUNIC_RES) %>%
summarize(ICSAPNum = sum(ICSAP),
ITNum = n(),
ICSAPRate = sum(ICSAP) / n())
# ICSAP - visual and numerical summary of distributions
windows()
hist(cityICSAP$ICSAPRate)
plot(density(cityICSAP$ICSAPRate))
summary(cityICSAP$ICSAPRate)
# download da tabela de codigos de municipios do IBGE
URL <- "ftp://geoftp.ibge.gov.br/organizacao_do_territorio/estrutura_territorial/divisao_territorial/2018/DTB_2018.zip"
download.file(URL, destfile = "./tabelaIBGE.zip")
# unzip file
unzip("./tabelaIBGE.zip", files = "RELATORIO_DTB_BRASIL_MUNICIPIO.xls")
# read file into R
library("readxl")
tabelaIBGE <- read_excel("./RELATORIO_DTB_BRASIL_MUNICIPIO.xls")
# merge 2 data sets
names(cityICSAP)[1] <- "COD_MUNIC2"
df1 <- merge(city2010, cityICSAP, by = "COD_MUNIC2")
# Loess method
library(ggplot2)
ggplot(df1, aes(x=log(POPULACAO), y=ICSAPRate)) +
geom_point()+
geom_smooth()
ggplot(df1, aes(x='MULHER%', y=ICSAPRate)) +
geom_point()+
geom_smooth()
windows()
boxplot(df1$ICSAPRate ~ as.factor(df1$UF), pch = 20)
ggplot(df1, aes(x=log(POPULACAO), y=ICSAPRate)) +
geom_point()+
geom_smooth()
plot(df1$'MULHER%', df1$ICSAPRate, pch = 20)
plot(df1$'URBANA%', df1$ICSAPRate, pch = 20)
boxplot(df1$ICSAPRate ~ as.factor(df1$COB.GROUP), pch = 20)
boxplot(df1$ICSAPRate ~ as.factor(df1$PORTE.POP), pch = 20)
plot(df1$IDHM.LONGEVIDADE, df1$ICSAPRate, pch = 20)
plot(df1$IDHM.SCHOOL, df1$ICSAPRate, pch = 20)
plot(df1$IDHM.RENDA, df1$ICSAPRate, pch = 20)
plot(df1$IDHM.TOTAL, df1$ICSAPRate, pch = 20)
plot(df1$'COB_MEDIAN%', df1$ICSAPRate, pch = 20)
plot(df1$'COB_MEAN%', df1$ICSAPRate, pch = 20)
# correlation matrix
cor <- round(cor(df1[,-c(1:5, 79:82)], method ="pearson"),2)
# function to compute p values matrix
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
# p values correlation matrix
p.mat <- cor.mtest(df1[,-c(1:5, 79:82)])
# correlogram
library(RColorBrewer)
col <- colorRampPalette(c("red", "white", "blue"))(20)
library(corrplot)
corrplot(cor, method="pie", type="upper", order="hclust", p.mat = p.mat,
sig.level = 0.05, insig = "blank", tl.col="black", tl.srt=75,
col=brewer.pal(n=8,name="RdYlBu"))
URBANA%
df1[which.min(df1$'MULHER%'),]
MULHER%
POPULACAO
summary(df1$ICSAPRate)
colnames(df1)
ICSAPRate
# explore data
str(SIH2010)
dim(SIH2010)
colnames(SIH2010)
# remove CID that starts with "O"
library(tidyverse)
library(stringr)
SIH2010_sem_cid_o <- SIH2010 %>%
filter(!str_detect(DIAG_PRINC, "^O"))
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/progenyarray-methods.R
\docType{methods}
\name{progenyGenotypes,ProgenyArray-method}
\alias{progenyGenotypes,ProgenyArray-method}
\title{Accessor for progeny genotypes}
\usage{
\S4method{progenyGenotypes}{ProgenyArray}(x, seqname = NULL)
}
\arguments{
\item{x}{a ProgenyArray object}
}
\description{
Accessor for progeny genotypes
}
|
/man/progenyGenotypes-ProgenyArray-method.Rd
|
no_license
|
kate-crosby/ProgenyArray
|
R
| false
| false
| 420
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/progenyarray-methods.R
\docType{methods}
\name{progenyGenotypes,ProgenyArray-method}
\alias{progenyGenotypes,ProgenyArray-method}
\title{Accessor for progeny genotypes}
\usage{
\S4method{progenyGenotypes}{ProgenyArray}(x, seqname = NULL)
}
\arguments{
\item{x}{a ProgenyArray object}
}
\description{
Accessor for progeny genotypes
}
|
data <- read.csv("result.csv", header = TRUE, sep = ";")
anova_fun <- function(analysis, type){
#anova
#h0: they are equal, h1: they are not (p<0.05)
anova <- aov(analysis)
file_name <- " anova sumary.txt"
file_name <- paste(type, file_name)
s<-summary(anova)
capture.output(s, file = file_name)
name <- " plot witch 1.png"
name <- paste(type, name)
png(filename=name)
#Caso queira visualizar o gráfico
plot(analysis, which = 1)
dev.off()
name <- " plot witch 2.png"
name <- paste(type, name)
png(filename=name)
#can se that there's a positive skewness in the data
plot(analysis, which = 2)
dev.off()
sresids <- rstandard(analysis)
name <- " histogram.png"
name <- paste(type, name)
png(filename=name)
hist(sresids)
dev.off()
#to know the variable that are significantly different
# significant difference p < 0.05
#there's just two types, so no need for turkey's test
file_name <- " TukeyHSD sumary.txt"
file_name <- paste(type, file_name)
s<-TukeyHSD(anova)
capture.output(s, file = file_name)
}
name <- "analyse/type stripchart.png"
png(filename=name)
#visualizing data
stripchart(time~type,
data=data,
main="Different strip chart for each type",
xlab="Type",
ylab="Time",
col="brown3",
vertical=TRUE,
pch=19,
method = "jitter", jitter = 0.004
)
dev.off()
name <- "analyse/size stripchart.png"
png(filename=name)
stripchart(time~as.character(size),
data=data,
main="Different strip chart for each type",
xlab="Type",
ylab="Time",
col="brown3",
vertical=TRUE,
pch=19,
method = "jitter", jitter = 0.004
)
dev.off()
name <- "analyse/cluster stripchart.png"
png(filename=name)
stripchart(time~as.character(cluster),
data=data,
main="Different strip chart for each type",
xlab="Type",
ylab="Time",
col="brown3",
vertical=TRUE,
pch=19,
method = "jitter", jitter = 0.004
)
dev.off()
#fitting data to anova
aov_type <- lm(time~type,data=data)
aov_size <- lm(time~as.character(size),data=data)
aov_cluster <- lm(time~as.character(cluster),data=data)
anova_fun(aov_type, "analyse/type")
anova_fun(aov_size, "analyse/size")
anova_fun(aov_cluster, "analyse/cluster")
|
/analyse.R
|
no_license
|
tatianass/Projeto-Metodologia-Cientifica
|
R
| false
| false
| 2,420
|
r
|
data <- read.csv("result.csv", header = TRUE, sep = ";")
anova_fun <- function(analysis, type){
#anova
#h0: they are equal, h1: they are not (p<0.05)
anova <- aov(analysis)
file_name <- " anova sumary.txt"
file_name <- paste(type, file_name)
s<-summary(anova)
capture.output(s, file = file_name)
name <- " plot witch 1.png"
name <- paste(type, name)
png(filename=name)
#Caso queira visualizar o gráfico
plot(analysis, which = 1)
dev.off()
name <- " plot witch 2.png"
name <- paste(type, name)
png(filename=name)
#can se that there's a positive skewness in the data
plot(analysis, which = 2)
dev.off()
sresids <- rstandard(analysis)
name <- " histogram.png"
name <- paste(type, name)
png(filename=name)
hist(sresids)
dev.off()
#to know the variable that are significantly different
# significant difference p < 0.05
#there's just two types, so no need for turkey's test
file_name <- " TukeyHSD sumary.txt"
file_name <- paste(type, file_name)
s<-TukeyHSD(anova)
capture.output(s, file = file_name)
}
name <- "analyse/type stripchart.png"
png(filename=name)
#visualizing data
stripchart(time~type,
data=data,
main="Different strip chart for each type",
xlab="Type",
ylab="Time",
col="brown3",
vertical=TRUE,
pch=19,
method = "jitter", jitter = 0.004
)
dev.off()
name <- "analyse/size stripchart.png"
png(filename=name)
stripchart(time~as.character(size),
data=data,
main="Different strip chart for each type",
xlab="Type",
ylab="Time",
col="brown3",
vertical=TRUE,
pch=19,
method = "jitter", jitter = 0.004
)
dev.off()
name <- "analyse/cluster stripchart.png"
png(filename=name)
stripchart(time~as.character(cluster),
data=data,
main="Different strip chart for each type",
xlab="Type",
ylab="Time",
col="brown3",
vertical=TRUE,
pch=19,
method = "jitter", jitter = 0.004
)
dev.off()
#fitting data to anova
aov_type <- lm(time~type,data=data)
aov_size <- lm(time~as.character(size),data=data)
aov_cluster <- lm(time~as.character(cluster),data=data)
anova_fun(aov_type, "analyse/type")
anova_fun(aov_size, "analyse/size")
anova_fun(aov_cluster, "analyse/cluster")
|
ssa.adaptivetau <-
function(init.values, transitions, rateFunc, params, tf,
jacobianFunc = NULL, maxTauFunc = NULL,
deterministic = NULL, halting = NULL,
relratechange=rep(1, length(init.values)),
tl.params = NULL) {
return(.Call('simAdaptiveTau', PACKAGE='adaptivetau',
init.values, transitions,
rateFunc, jacobianFunc,
params, tf, deterministic, halting,
relratechange, tl.params, maxTauFunc))
}
ssa.exact <-
function(init.values, transitions, rateFunc, params, tf) {
return(.Call('simExact', PACKAGE='adaptivetau',
init.values, transitions, rateFunc, params, tf))
}
ssa.maketrans <- function(variables, ...) {
userTrans = list(...)
if (length(userTrans) == 0) {
stop("no transitions passed into ssa.maketrans!")
}
if (length(variables) == 1 && is.numeric(variables)) {
numVariables = variables;
} else if (is.character(variables)) {
numVariables = length(variables)
} else {
stop("Cannot deduce number of variables -- ssa.maketrans requires ",
"either a vector of variable names or the number of variables")
}
allTrans = vector("list", length = sum(sapply(userTrans, function(x) (
if (is.matrix(x)) ncol(x) else 1))));
trI = 0
for (i in 1:length(userTrans)) {
x = userTrans[[i]]
if (length(x) == 1 && is.na(x)) {
trI = trI + 1
allTrans[[trI]] = integer(0)
next
}
idx = (1:(nrow(x) %/% 2))*2-1 #indices of variables
mag = idx + 1 #indices of magnitudes
for (j in seq_len(ncol(x))) {
trI = trI + 1
if (is.numeric(x)) {
if (any(x[idx,j] < 1 | x[idx,j] > numVariables)) {
stop("variable index outside valid range (1:numVariables)")
}
allTrans[[trI]] = structure(as.integer(x[mag,j]), names = x[idx,j])
} else if (is.character(x)) {
if (any(!(x[idx,j] %in% variables))) {
stop("unknown variable(s): ",
paste(x[idx,j][!(x[idx,j] %in% variables)], collapse=", "))
}
allTrans[[trI]] = structure(as.integer(x[mag,j]), names = x[idx,j])
} else {
stop("transitions passed to ssa.maketrans must be integer or character")
}
}
}
allTrans
}
|
/R/ssa.adaptivetau.R
|
no_license
|
cran/adaptivetau
|
R
| false
| false
| 2,325
|
r
|
ssa.adaptivetau <-
function(init.values, transitions, rateFunc, params, tf,
jacobianFunc = NULL, maxTauFunc = NULL,
deterministic = NULL, halting = NULL,
relratechange=rep(1, length(init.values)),
tl.params = NULL) {
return(.Call('simAdaptiveTau', PACKAGE='adaptivetau',
init.values, transitions,
rateFunc, jacobianFunc,
params, tf, deterministic, halting,
relratechange, tl.params, maxTauFunc))
}
ssa.exact <-
function(init.values, transitions, rateFunc, params, tf) {
return(.Call('simExact', PACKAGE='adaptivetau',
init.values, transitions, rateFunc, params, tf))
}
ssa.maketrans <- function(variables, ...) {
userTrans = list(...)
if (length(userTrans) == 0) {
stop("no transitions passed into ssa.maketrans!")
}
if (length(variables) == 1 && is.numeric(variables)) {
numVariables = variables;
} else if (is.character(variables)) {
numVariables = length(variables)
} else {
stop("Cannot deduce number of variables -- ssa.maketrans requires ",
"either a vector of variable names or the number of variables")
}
allTrans = vector("list", length = sum(sapply(userTrans, function(x) (
if (is.matrix(x)) ncol(x) else 1))));
trI = 0
for (i in 1:length(userTrans)) {
x = userTrans[[i]]
if (length(x) == 1 && is.na(x)) {
trI = trI + 1
allTrans[[trI]] = integer(0)
next
}
idx = (1:(nrow(x) %/% 2))*2-1 #indices of variables
mag = idx + 1 #indices of magnitudes
for (j in seq_len(ncol(x))) {
trI = trI + 1
if (is.numeric(x)) {
if (any(x[idx,j] < 1 | x[idx,j] > numVariables)) {
stop("variable index outside valid range (1:numVariables)")
}
allTrans[[trI]] = structure(as.integer(x[mag,j]), names = x[idx,j])
} else if (is.character(x)) {
if (any(!(x[idx,j] %in% variables))) {
stop("unknown variable(s): ",
paste(x[idx,j][!(x[idx,j] %in% variables)], collapse=", "))
}
allTrans[[trI]] = structure(as.integer(x[mag,j]), names = x[idx,j])
} else {
stop("transitions passed to ssa.maketrans must be integer or character")
}
}
}
allTrans
}
|
#####################################################################################################################
########################################## CLASS DEFINITIONS AND VALIDITY CHECKS ####################################
#####################################################################################################################
#' Dummy class
#' @description Class unions for internal use only
#' @name numericOrNULL-class
#' @rdname numericOrNULL-class
#' @exportClass numericOrNULL
setClassUnion("numericOrNULL", c("numeric", "NULL"))
#' Dummy class
#' @description Class unions for internal use only
#' @name ANYOrNULL-class
#' @rdname ANYOrNULL-class
#' @exportClass ANYOrNULL
setClassUnion("ANYOrNULL", c("ANY", "NULL"))
#' Dummy class
#' @description Class unions for internal use only
#' @name functionOrNULL-class
#' @rdname functionOrNULL-class
#' @exportClass functionOrNULL
setClassUnion("functionOrNULL", c("function", "NULL"))
##################################
######### synlik: the base class
##################################
### Validity check
.check.synlik <- function(object)
{
if(!is(object, "synlik")) stop("object has to be of class \"synlik\" ")
errors <- character()
if(length(object@param) == 0) errors <- c(errors, "length(param) == 0")
if(is.null(names(object@param)) || any("" %in% names(object@param)) )
errors <- c(errors, "param has to be a named vector")
simulArgs <- names(as.list(args(object@simulator)))
if( length(simulArgs) < 5 || !identical(simulArgs[1:3], c("param", "nsim", "extraArgs")) || simulArgs[length(simulArgs) - 1] != "...")
stop("The first 3 arguments of the simulator should be \"param\", \"nsim\" and \"extraArgs\" (in that order) and the last should be \"...\"")
if( !is.null(object@summaries) )
{
statsArgs <- names(as.list(args(object@summaries)))
if( length(statsArgs) < 4 || (statsArgs[1] != "x") || statsArgs[length(statsArgs) - 1] != "...")
stop("The first 2 argument of the \"summaries\" function should be \"x\" and \"extraArgs\" (in that order) and the last should be \"...\"")
}
if(length(errors) == 0) TRUE else errors
}
### Class Definition
#' \code{synlik-class}
#'
#' @description Basic class for simulation-based approximate inference using Synthetic Likelihood methods.
#'
#' @section Slots:
#' \describe{
#' \item{param}{Named vector of parameters used by \code{object@@simulator} (\code{numeric}).}
#' \item{simulator}{Function that simulates from the model (\code{function}). It has to have prototype \code{fun(param, nsim, extraArgs, ...)}.
#' If \code{summaries()} is not specified the \code{simulator()} has output a matrix with \code{nsim} rows, where
#' each row is a vector of simulated statistics. Otherwise it can output any kind of object, and this output will be
#' passed to \code{summaries()}.}
#' \item{summaries}{Function that transforms simulated data into summary statistics (\code{function}).
#' It has to have prototype \code{fun(x, extraArgs, ...)} and it has to output a matrix with \code{nsim} rows, where
#' each row is a vector of simulated statistics. Parameter \code{x} contains the data.}
#' \item{data}{Object containing the observed data or statistics (\code{ANY}).}
#' \item{extraArgs}{List containing all the extra arguments to be passed to \code{object@@simulator} and \code{object@@summaries} (\code{list}).}
#' \item{plotFun}{Function that will be used to plot \code{object@@data}. Prototype should be \code{fun(x, ...)} (\code{function}).}
#' }
#'
#' @name synlik-class
#' @rdname synlik-class
#' @references Simon N Wood. Statistical inference for noisy nonlinear ecological dynamic systems. Nature, 466(7310):1102--1104, 2010.
#' @author Matteo Fasiolo <matteo.fasiolo@@gmail.com>
#' @examples
#' #### Create Object
#' ricker_sl <- synlik(simulator = rickerSimul,
#' summaries = rickerStats,
#' param = c( logR = 3.8, logSigma = log(0.3), logPhi = log(10) ),
#' extraArgs = list("nObs" = 50, "nBurn" = 50),
#' plotFun = function(input, ...)
#' plot(drop(input), type = 'l', ylab = "Pop", xlab = "Time", ...)
#' )
#'
#' # Simulate from the object
#' ricker_sl@@data <- simulate(ricker_sl)
#' ricker_sl@@extraArgs$obsData <- ricker_sl@@data # Needed by WOOD2010 statistics
#'
#' plot(ricker_sl)
#' @exportClass synlik
#'
setClass( "synlik",
representation( param = "numeric",
simulator = "function",
summaries = "functionOrNULL",
data = "ANY",
extraArgs = "list",
plotFun = "functionOrNULL"
),
prototype = prototype(
param = numeric(),
simulator = function() NULL,
summaries = NULL,
data = NULL,
extraArgs = list(),
plotFun = NULL
),
validity = .check.synlik
)
#' @param ... See section "Slots".
#' @rdname synlik-class
synlik <- function(...)
{
# Expanding arguments and adding "synlik" class
arg <- c("synlik", as.list(match.call(expand.dots = TRUE))[-1])
do.call("new", arg)
}
##################################
######### smcmc: a synlik object after MCMC
##################################
### Validity check
.check.smcmc <- function(object)
{
if(!is(object, "smcmc")) stop("object has to be of class \"smcmc\" ")
errors <- character()
if(length(object@initPar) == 0) errors <- c(errors, "length(initPar) should be > 0")
if(length(errors) == 0) TRUE else errors
}
### Class Definition
#' \code{smcmc-class}
#'
#' @description Object representing the results of MCMC estimation on an object of class \code{synlik}, from which it inherits.
#'
#' @section Slots:
#' \describe{
#' \item{initPar}{Vector of initial parameters where the MCMC chain will start (\code{numeric}).}
#' \item{niter}{Number of MCMC iterations (\code{integer}).}
#' \item{nsim}{Number of simulations from the simulator at each step of the MCMC algorithm (\code{integer}).}
#' \item{burn}{Number of initial MCMC iterations that are discarded (\code{integer}).}
#' \item{priorFun}{Function that takes a vector of parameters as input and the log-density of the prior
#' as output. If the output is not finite the proposed point will be discarded. (\code{function}).
#' The function needs to have signature \code{fun(x, ...)}, where \code{x} represents the input parameters (\code{function}).}
#' \item{propCov}{Matrix representing the covariance matrix to be used to perturb the
#' parameters at each step of the MCMC chain (\code{matrix}).}
#' \item{targetRate}{Target rate for the adaptive MCMC sampler. Should be in (0, 1), default is NULL (no adaptation). The adaptation
#' uses the approach of Vihola (2011). (\code{numeric})}
#' \item{recompute}{If TRUE the synthetic likelihood will be evaluated at the current and proposed positions in the parameter
#' space (thus doubling the computational effort). If FALSE the likelihood of the current
#' position won't be re-estimated (\code{logical}).}
#' \item{multicore}{If TRUE the \code{object@@simulator} and \code{object@@summaries} functions will
#' be executed in parallel. That is the nsim simulations will be divided in multiple cores (\code{logical}).}
#' \item{ncores}{Number of cores to use if multicore == TRUE (\code{integer}).}
#' \item{accRate}{Acceptance rate of the MCMC chain, between 0 and 1 (\code{numeric}).}
#' \item{chains}{Matrix of size niter by length(initPar) where the i-th row contains the position of the MCMC algorithm
#' in the parameter space at the i-th (\code{matrix}).}
#' \item{llkChain}{Vector of niter elements where the i-th element is contains the estimate of the
#' synthetic likelihood at the i-th iteration (\code{numeric}).}
#' \item{control}{Control parameters used by the MCMC sampler: \itemize{
#' \item{\code{theta} = controls the speed of adaption. Should be between 0.5 and 1.
#' A lower gamma leads to faster adaption.}
#' \item{\code{adaptStart} = iteration where the adaption starts. Default 0.}
#' \item{\code{adaptStop} = iteration where the adaption stops. Default \code{burn + niter}}
#' \item{\code{saveFile} = path to the file where the intermediate results will be stored (ex: "~/Res.RData").}
#' \item{\code{saveFreq} = frequency with which the intermediate results will be saved on \code{saveFile}.
#' Default 100.}
#' \item{\code{verbose} = if \code{TRUE} intermediate posterior means will be printer.}
#' \item{\code{verbFreq} = frequency with which the intermediate posterior means will be printer. Default 500.}
#' } }
#' }
#'
#' @name smcmc-class
#' @rdname smcmc-class
#' @exportClass smcmc
#' @references Vihola, M. (2011) Robust adaptive Metropolis algorithm with coerced acceptance rate.
#' Statistics and Computing.
#' @author Matteo Fasiolo <matteo.fasiolo@@gmail.com>
#' @examples
#' # Load "synlik" object
#' data(ricker_sl)
#'
#' plot(ricker_sl)
#'
#' # MCMC estimation
#' set.seed(4235)
#' ricker_sl <- smcmc(ricker_sl,
#' initPar = c(3.2, -1, 2.6),
#' niter = 50,
#' burn = 3,
#' priorFun = function(input, ...) 1,
#' propCov = diag( c(0.1, 0.1, 0.1) )^2,
#' nsim = 200,
#' multicore = FALSE)
#'
#' # Continue with additional 50 iterations
#' ricker_sl <- continue(ricker_sl, niter = 50)
#'
#' plot(ricker_sl)
#'
setClass("smcmc",
representation( initPar = "numeric",
niter = "integer",
nsim = "integer",
propCov = "matrix",
burn = "integer",
priorFun = "function",
targetRate = "numericOrNULL",
recompute = "logical",
multicore = "logical",
ncores = "integer",
control = "list",
accRate = "numeric",
chains = "matrix",
llkChain = "numeric"
),
prototype = prototype(initPar = numeric(),
niter = 0L,
nsim = 0L,
propCov = matrix( , 0, 0),
burn = 0L,
priorFun = function(param, ...) 0,
targetRate = NULL,
recompute = FALSE,
multicore = FALSE,
ncores = 1L,
control = list(),
accRate = numeric(),
chains = matrix( , 0, 0),
llkChain = numeric()),
contains = "synlik",
validity = .check.smcmc
)
|
/fuzzedpackages/synlik/R/Class_definitions.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 11,728
|
r
|
#####################################################################################################################
########################################## CLASS DEFINITIONS AND VALIDITY CHECKS ####################################
#####################################################################################################################
#' Dummy class
#' @description Class unions for internal use only
#' @name numericOrNULL-class
#' @rdname numericOrNULL-class
#' @exportClass numericOrNULL
setClassUnion("numericOrNULL", c("numeric", "NULL"))
#' Dummy class
#' @description Class unions for internal use only
#' @name ANYOrNULL-class
#' @rdname ANYOrNULL-class
#' @exportClass ANYOrNULL
setClassUnion("ANYOrNULL", c("ANY", "NULL"))
#' Dummy class
#' @description Class unions for internal use only
#' @name functionOrNULL-class
#' @rdname functionOrNULL-class
#' @exportClass functionOrNULL
setClassUnion("functionOrNULL", c("function", "NULL"))
##################################
######### synlik: the base class
##################################
### Validity check
.check.synlik <- function(object)
{
if(!is(object, "synlik")) stop("object has to be of class \"synlik\" ")
errors <- character()
if(length(object@param) == 0) errors <- c(errors, "length(param) == 0")
if(is.null(names(object@param)) || any("" %in% names(object@param)) )
errors <- c(errors, "param has to be a named vector")
simulArgs <- names(as.list(args(object@simulator)))
if( length(simulArgs) < 5 || !identical(simulArgs[1:3], c("param", "nsim", "extraArgs")) || simulArgs[length(simulArgs) - 1] != "...")
stop("The first 3 arguments of the simulator should be \"param\", \"nsim\" and \"extraArgs\" (in that order) and the last should be \"...\"")
if( !is.null(object@summaries) )
{
statsArgs <- names(as.list(args(object@summaries)))
if( length(statsArgs) < 4 || (statsArgs[1] != "x") || statsArgs[length(statsArgs) - 1] != "...")
stop("The first 2 argument of the \"summaries\" function should be \"x\" and \"extraArgs\" (in that order) and the last should be \"...\"")
}
if(length(errors) == 0) TRUE else errors
}
### Class Definition
#' \code{synlik-class}
#'
#' @description Basic class for simulation-based approximate inference using Synthetic Likelihood methods.
#'
#' @section Slots:
#' \describe{
#' \item{param}{Named vector of parameters used by \code{object@@simulator} (\code{numeric}).}
#' \item{simulator}{Function that simulates from the model (\code{function}). It has to have prototype \code{fun(param, nsim, extraArgs, ...)}.
#' If \code{summaries()} is not specified the \code{simulator()} has output a matrix with \code{nsim} rows, where
#' each row is a vector of simulated statistics. Otherwise it can output any kind of object, and this output will be
#' passed to \code{summaries()}.}
#' \item{summaries}{Function that transforms simulated data into summary statistics (\code{function}).
#' It has to have prototype \code{fun(x, extraArgs, ...)} and it has to output a matrix with \code{nsim} rows, where
#' each row is a vector of simulated statistics. Parameter \code{x} contains the data.}
#' \item{data}{Object containing the observed data or statistics (\code{ANY}).}
#' \item{extraArgs}{List containing all the extra arguments to be passed to \code{object@@simulator} and \code{object@@summaries} (\code{list}).}
#' \item{plotFun}{Function that will be used to plot \code{object@@data}. Prototype should be \code{fun(x, ...)} (\code{function}).}
#' }
#'
#' @name synlik-class
#' @rdname synlik-class
#' @references Simon N Wood. Statistical inference for noisy nonlinear ecological dynamic systems. Nature, 466(7310):1102--1104, 2010.
#' @author Matteo Fasiolo <matteo.fasiolo@@gmail.com>
#' @examples
#' #### Create Object
#' ricker_sl <- synlik(simulator = rickerSimul,
#' summaries = rickerStats,
#' param = c( logR = 3.8, logSigma = log(0.3), logPhi = log(10) ),
#' extraArgs = list("nObs" = 50, "nBurn" = 50),
#' plotFun = function(input, ...)
#' plot(drop(input), type = 'l', ylab = "Pop", xlab = "Time", ...)
#' )
#'
#' # Simulate from the object
#' ricker_sl@@data <- simulate(ricker_sl)
#' ricker_sl@@extraArgs$obsData <- ricker_sl@@data # Needed by WOOD2010 statistics
#'
#' plot(ricker_sl)
#' @exportClass synlik
#'
setClass( "synlik",
representation( param = "numeric",
simulator = "function",
summaries = "functionOrNULL",
data = "ANY",
extraArgs = "list",
plotFun = "functionOrNULL"
),
prototype = prototype(
param = numeric(),
simulator = function() NULL,
summaries = NULL,
data = NULL,
extraArgs = list(),
plotFun = NULL
),
validity = .check.synlik
)
#' @param ... See section "Slots".
#' @rdname synlik-class
synlik <- function(...)
{
# Expanding arguments and adding "synlik" class
arg <- c("synlik", as.list(match.call(expand.dots = TRUE))[-1])
do.call("new", arg)
}
##################################
######### smcmc: a synlik object after MCMC
##################################
### Validity check
.check.smcmc <- function(object)
{
if(!is(object, "smcmc")) stop("object has to be of class \"smcmc\" ")
errors <- character()
if(length(object@initPar) == 0) errors <- c(errors, "length(initPar) should be > 0")
if(length(errors) == 0) TRUE else errors
}
### Class Definition
#' \code{smcmc-class}
#'
#' @description Object representing the results of MCMC estimation on an object of class \code{synlik}, from which it inherits.
#'
#' @section Slots:
#' \describe{
#' \item{initPar}{Vector of initial parameters where the MCMC chain will start (\code{numeric}).}
#' \item{niter}{Number of MCMC iterations (\code{integer}).}
#' \item{nsim}{Number of simulations from the simulator at each step of the MCMC algorithm (\code{integer}).}
#' \item{burn}{Number of initial MCMC iterations that are discarded (\code{integer}).}
#' \item{priorFun}{Function that takes a vector of parameters as input and the log-density of the prior
#' as output. If the output is not finite the proposed point will be discarded. (\code{function}).
#' The function needs to have signature \code{fun(x, ...)}, where \code{x} represents the input parameters (\code{function}).}
#' \item{propCov}{Matrix representing the covariance matrix to be used to perturb the
#' parameters at each step of the MCMC chain (\code{matrix}).}
#' \item{targetRate}{Target rate for the adaptive MCMC sampler. Should be in (0, 1), default is NULL (no adaptation). The adaptation
#' uses the approach of Vihola (2011). (\code{numeric})}
#' \item{recompute}{If TRUE the synthetic likelihood will be evaluated at the current and proposed positions in the parameter
#' space (thus doubling the computational effort). If FALSE the likelihood of the current
#' position won't be re-estimated (\code{logical}).}
#' \item{multicore}{If TRUE the \code{object@@simulator} and \code{object@@summaries} functions will
#' be executed in parallel. That is the nsim simulations will be divided in multiple cores (\code{logical}).}
#' \item{ncores}{Number of cores to use if multicore == TRUE (\code{integer}).}
#' \item{accRate}{Acceptance rate of the MCMC chain, between 0 and 1 (\code{numeric}).}
#' \item{chains}{Matrix of size niter by length(initPar) where the i-th row contains the position of the MCMC algorithm
#' in the parameter space at the i-th (\code{matrix}).}
#' \item{llkChain}{Vector of niter elements where the i-th element is contains the estimate of the
#' synthetic likelihood at the i-th iteration (\code{numeric}).}
#' \item{control}{Control parameters used by the MCMC sampler: \itemize{
#' \item{\code{theta} = controls the speed of adaption. Should be between 0.5 and 1.
#' A lower gamma leads to faster adaption.}
#' \item{\code{adaptStart} = iteration where the adaption starts. Default 0.}
#' \item{\code{adaptStop} = iteration where the adaption stops. Default \code{burn + niter}}
#' \item{\code{saveFile} = path to the file where the intermediate results will be stored (ex: "~/Res.RData").}
#' \item{\code{saveFreq} = frequency with which the intermediate results will be saved on \code{saveFile}.
#' Default 100.}
#' \item{\code{verbose} = if \code{TRUE} intermediate posterior means will be printer.}
#' \item{\code{verbFreq} = frequency with which the intermediate posterior means will be printer. Default 500.}
#' } }
#' }
#'
#' @name smcmc-class
#' @rdname smcmc-class
#' @exportClass smcmc
#' @references Vihola, M. (2011) Robust adaptive Metropolis algorithm with coerced acceptance rate.
#' Statistics and Computing.
#' @author Matteo Fasiolo <matteo.fasiolo@@gmail.com>
#' @examples
#' # Load "synlik" object
#' data(ricker_sl)
#'
#' plot(ricker_sl)
#'
#' # MCMC estimation
#' set.seed(4235)
#' ricker_sl <- smcmc(ricker_sl,
#' initPar = c(3.2, -1, 2.6),
#' niter = 50,
#' burn = 3,
#' priorFun = function(input, ...) 1,
#' propCov = diag( c(0.1, 0.1, 0.1) )^2,
#' nsim = 200,
#' multicore = FALSE)
#'
#' # Continue with additional 50 iterations
#' ricker_sl <- continue(ricker_sl, niter = 50)
#'
#' plot(ricker_sl)
#'
setClass("smcmc",
representation( initPar = "numeric",
niter = "integer",
nsim = "integer",
propCov = "matrix",
burn = "integer",
priorFun = "function",
targetRate = "numericOrNULL",
recompute = "logical",
multicore = "logical",
ncores = "integer",
control = "list",
accRate = "numeric",
chains = "matrix",
llkChain = "numeric"
),
prototype = prototype(initPar = numeric(),
niter = 0L,
nsim = 0L,
propCov = matrix( , 0, 0),
burn = 0L,
priorFun = function(param, ...) 0,
targetRate = NULL,
recompute = FALSE,
multicore = FALSE,
ncores = 1L,
control = list(),
accRate = numeric(),
chains = matrix( , 0, 0),
llkChain = numeric()),
contains = "synlik",
validity = .check.smcmc
)
|
## Capstone: Coursera Data Science
## Kristen Dardia
# SHINY SERVER
library(shiny); library(stringr); library(tm)
# Loading n-gram frequencies words matrix frequencies
bg <- readRDS("bigram.RData"); tg <- readRDS("trigram.RData"); qd <- readRDS("quadgram.RData")
names(bg)[names(bg) == 'word1'] <- 'w1'; names(bg)[names(bg) == 'word2'] <- 'w2';
names(tg)[names(tg) == 'word1'] <- 'w1'; names(tg)[names(tg) == 'word2'] <- 'w2'; names(tg)[names(tg) == 'word3'] <- 'w3';
names(qd)[names(qd) == 'word1'] <- 'w1'; names(qd)[names(qd) == 'word2'] <- 'w2'; names(qd)[names(qd) == 'word3'] <- 'w3'
names(qd)[names(qd) == 'word4'] <- 'w4';
message <- ""
## Function predicting the next word
predictWord <- function(the_word) {
word_add <- stripWhitespace(removeNumbers(removePunctuation(tolower(the_word),preserve_intra_word_dashes = TRUE)))
# testing print("word_add")
the_word <- strsplit(word_add, " ")[[1]]
# testing print("the_word")
n <- length(the_word)
# testing print(n)
########### check Bigram
if (n == 1) {the_word <- as.character(tail(the_word,1)); functionBigram(the_word)}
################ check trigram
else if (n == 2) {the_word <- as.character(tail(the_word,2)); functionTrigram(the_word)}
############### check quadgram
else if (n >= 3) {the_word <- as.character(tail(the_word,3)); functionQuadgram(the_word)}
}
########################################################################
functionBigram <- function(the_word) {
# testing print(the_word)
if (identical(character(0),as.character(head(bg[bg$w1 == the_word[1], 2], 1)))) {
# testing print(bg$w1)
message<<-"No result is found, the app will return N/A"
as.character(head("N/A",1))
}
else {
message <<- "Predicting the Word using N-gram Freqeuncy Matrix"
as.character(head(bg[bg$w1 == the_word[1],2], 1))
# testing print of bg$w1, the_word[1]
}
}
########################################################################
functionTrigram <- function(the_word) {
# # testing print(the_word)
if (identical(character(0),as.character(head(tg[tg$w1 == the_word[1]
& tg$w2 == the_word[2], 3], 1)))) {
as.character(predictWord(the_word[2]))
# testing print tg$w1, tg$w2, the_word[1], the_word[2]
}
else {
message<<- "Predicting the Word using N-gram Fruequency Matrix "
as.character(head(tg[tg$w1 == the_word[1]
& tg$w2 == the_word[2], 3], 1))
# testing print of tg$w1, tg$w2, the_word[1], the_word[2]
}
}
########################################################################
functionQuadgram <- function(the_word) {
# testing print(the_word)
if (identical(character(0),as.character(head(qd[qd$w1 == the_word[1]
& qd$w2 == the_word[2]
& qd$w3 == the_word[3], 4], 1)))) {
# testing print of qd$w1, qd$w2, qd#w3, the_word[1], the_word[2], the_word3
as.character(predictWord(paste(the_word[2],the_word[3],sep=" ")))
}
else {
message <<- "Trying to Predict the Word using Quadgram Frequency Matrix"
as.character(head(qd[qd$w1 == the_word[1]
& qd$w2 == the_word[2]
& qd$w3 == the_word[3], 4], 1))
# testing print of qd$w1, qd$w2, qd#w3, the_word[1], the_word[2], the_word3
}
}
#################################################
## ShineServer code to call the function predictWord
shinyServer(function(input, output) {
output$prediction <- renderPrint({
result <- predictWord(input$inputText)
output$sentence2 <- renderText({message})
result
});
output$sentence1 <- renderText({
input$inputText});
}
)
|
/server.R
|
no_license
|
ChaChaWoo/Capstone
|
R
| false
| false
| 3,841
|
r
|
## Capstone: Coursera Data Science
## Kristen Dardia
# SHINY SERVER
library(shiny); library(stringr); library(tm)
# Loading n-gram frequencies words matrix frequencies
bg <- readRDS("bigram.RData"); tg <- readRDS("trigram.RData"); qd <- readRDS("quadgram.RData")
names(bg)[names(bg) == 'word1'] <- 'w1'; names(bg)[names(bg) == 'word2'] <- 'w2';
names(tg)[names(tg) == 'word1'] <- 'w1'; names(tg)[names(tg) == 'word2'] <- 'w2'; names(tg)[names(tg) == 'word3'] <- 'w3';
names(qd)[names(qd) == 'word1'] <- 'w1'; names(qd)[names(qd) == 'word2'] <- 'w2'; names(qd)[names(qd) == 'word3'] <- 'w3'
names(qd)[names(qd) == 'word4'] <- 'w4';
message <- ""
## Function predicting the next word
predictWord <- function(the_word) {
word_add <- stripWhitespace(removeNumbers(removePunctuation(tolower(the_word),preserve_intra_word_dashes = TRUE)))
# testing print("word_add")
the_word <- strsplit(word_add, " ")[[1]]
# testing print("the_word")
n <- length(the_word)
# testing print(n)
########### check Bigram
if (n == 1) {the_word <- as.character(tail(the_word,1)); functionBigram(the_word)}
################ check trigram
else if (n == 2) {the_word <- as.character(tail(the_word,2)); functionTrigram(the_word)}
############### check quadgram
else if (n >= 3) {the_word <- as.character(tail(the_word,3)); functionQuadgram(the_word)}
}
########################################################################
functionBigram <- function(the_word) {
# testing print(the_word)
if (identical(character(0),as.character(head(bg[bg$w1 == the_word[1], 2], 1)))) {
# testing print(bg$w1)
message<<-"No result is found, the app will return N/A"
as.character(head("N/A",1))
}
else {
message <<- "Predicting the Word using N-gram Freqeuncy Matrix"
as.character(head(bg[bg$w1 == the_word[1],2], 1))
# testing print of bg$w1, the_word[1]
}
}
########################################################################
functionTrigram <- function(the_word) {
# # testing print(the_word)
if (identical(character(0),as.character(head(tg[tg$w1 == the_word[1]
& tg$w2 == the_word[2], 3], 1)))) {
as.character(predictWord(the_word[2]))
# testing print tg$w1, tg$w2, the_word[1], the_word[2]
}
else {
message<<- "Predicting the Word using N-gram Fruequency Matrix "
as.character(head(tg[tg$w1 == the_word[1]
& tg$w2 == the_word[2], 3], 1))
# testing print of tg$w1, tg$w2, the_word[1], the_word[2]
}
}
########################################################################
functionQuadgram <- function(the_word) {
# testing print(the_word)
if (identical(character(0),as.character(head(qd[qd$w1 == the_word[1]
& qd$w2 == the_word[2]
& qd$w3 == the_word[3], 4], 1)))) {
# testing print of qd$w1, qd$w2, qd#w3, the_word[1], the_word[2], the_word3
as.character(predictWord(paste(the_word[2],the_word[3],sep=" ")))
}
else {
message <<- "Trying to Predict the Word using Quadgram Frequency Matrix"
as.character(head(qd[qd$w1 == the_word[1]
& qd$w2 == the_word[2]
& qd$w3 == the_word[3], 4], 1))
# testing print of qd$w1, qd$w2, qd#w3, the_word[1], the_word[2], the_word3
}
}
#################################################
## ShineServer code to call the function predictWord
shinyServer(function(input, output) {
output$prediction <- renderPrint({
result <- predictWord(input$inputText)
output$sentence2 <- renderText({message})
result
});
output$sentence1 <- renderText({
input$inputText});
}
)
|
##' Setting scales for \code{sonify} objects
##'
##' \code{sonscaling()} is invoked in the call to \code{sonify} or
##' added onto it in order to specify precisely how the data
##' parameters are mapped onto sonic parameters. It is most easy to
##' use via the \code{scale_} convenience functions, such as
##' \code{\link{scale_time_continuous}}.
##'
##' Each argument of \code{sonscaling} is in the form of a list, where
##' the first element is the minimum value of the sonic parameter, the
##' second is the maximum, and the third is a function that maps the
##' data column onto the range of the sonic parameter. The only such
##' function included with \pkg{playitbyr} right now is
##' \code{\link{linear_scale}}.
##'
##'
##' @param \dots See Details section.
##' @return A \code{sonscaling} object
##' @seealso \code{\link{sonify}} where this is eventually used;
##' \code{\link{sonaes}} for defining the which data columns get
##' mapped onto these sonic parameters;
##' \code{\link{linear_scale}} for an example of a scaling function.
##' @keywords internal
##' @export
sonscaling <- function(...) {
out <- list(...)
out <- lapply(out, sonscale)
if(length(out) > 0
&& is.null(names(out)) | any(names(out) == ""))
stop("All arguments must be named.")
.checkSoundParams(names(out))
class(out) <- "sonscaling"
out
}
sonscale <- function(x) {
if(!is.list(x))
stop("All arguments to sonscaling() must be a list")
else if(length(x) == 3) {
names(x) <- c("limits", "soundlimits", "scaling.function")
} else stop("Each argument must be a list of length 2 or 3.")
if((!is.numeric(x$limits) | length(x$limits) != 2) & (!is.null(x$limits)))
stop("All arguments' 'limits' slot must be numeric of length 2, or NULL")
if(!is.numeric(x$soundlimits) | length(x$soundlimits) != 2)
stop("All arguments' 'soundlimits' slot must be of length 2")
if(!is.function(x$scaling.function))
stop("sonscaling: The third element of each argument must be a function",
" such as linear_scale()", call.=F)
return(x)
}
|
/R/sonscaling.R
|
no_license
|
cran/playitbyr
|
R
| false
| false
| 2,086
|
r
|
##' Setting scales for \code{sonify} objects
##'
##' \code{sonscaling()} is invoked in the call to \code{sonify} or
##' added onto it in order to specify precisely how the data
##' parameters are mapped onto sonic parameters. It is most easy to
##' use via the \code{scale_} convenience functions, such as
##' \code{\link{scale_time_continuous}}.
##'
##' Each argument of \code{sonscaling} is in the form of a list, where
##' the first element is the minimum value of the sonic parameter, the
##' second is the maximum, and the third is a function that maps the
##' data column onto the range of the sonic parameter. The only such
##' function included with \pkg{playitbyr} right now is
##' \code{\link{linear_scale}}.
##'
##'
##' @param \dots See Details section.
##' @return A \code{sonscaling} object
##' @seealso \code{\link{sonify}} where this is eventually used;
##' \code{\link{sonaes}} for defining the which data columns get
##' mapped onto these sonic parameters;
##' \code{\link{linear_scale}} for an example of a scaling function.
##' @keywords internal
##' @export
sonscaling <- function(...) {
out <- list(...)
out <- lapply(out, sonscale)
if(length(out) > 0
&& is.null(names(out)) | any(names(out) == ""))
stop("All arguments must be named.")
.checkSoundParams(names(out))
class(out) <- "sonscaling"
out
}
sonscale <- function(x) {
if(!is.list(x))
stop("All arguments to sonscaling() must be a list")
else if(length(x) == 3) {
names(x) <- c("limits", "soundlimits", "scaling.function")
} else stop("Each argument must be a list of length 2 or 3.")
if((!is.numeric(x$limits) | length(x$limits) != 2) & (!is.null(x$limits)))
stop("All arguments' 'limits' slot must be numeric of length 2, or NULL")
if(!is.numeric(x$soundlimits) | length(x$soundlimits) != 2)
stop("All arguments' 'soundlimits' slot must be of length 2")
if(!is.function(x$scaling.function))
stop("sonscaling: The third element of each argument must be a function",
" such as linear_scale()", call.=F)
return(x)
}
|
#Canopy imagery analysis in R
rm(list=ls())
setwd("/Users/zach/Dropbox (ZachTeam)/Bioverse_Labs/Projects/Canopy_analysis")
#load packages
library(itcSegment)
library(raster)
library(sp)
library(rgdal)
library(tiff)
library(raster)
library(ggplot2)
library(gridGraphics)
library(magick)
library(gridExtra)
library(rgeos)
library(knitr)
library(imager)
library(spatialEco)
##################################################################################################
new.img<-image_read("/Users/zach/Dropbox (ZachTeam)/Projects/Spotted_Lanternfly/Analyses/Canopy_analysis/Data/raymond-gehman-an-aerial-view-of-a-road-passing-through-a-thick-forest_a-l-3996134-4990703.jpg")
plot(new.img)
img.ras<-raster("/Users/zach/Dropbox (ZachTeam)/Projects/Spotted_Lanternfly/Analyses/Canopy_analysis/Data/raymond-gehman-an-aerial-view-of-a-road-passing-through-a-thick-forest_a-l-3996134-4990703.jpg")
plot(img.ras)
img.ras[img.ras > 140] <- NA
plot(img.ras)
tree_crowns<-itcIMG(img.ras, epsg=32632,searchWinSize = 33, TRESHSeed = 0.5,
TRESHCrown = 0.1, DIST = 70, th = 50, ischm = FALSE)
plot(new.img)
plot(tree_crowns, border="red", add=TRUE)
|
/Tree_of_Heaven_detection/tree_canopy_analysis.R
|
permissive
|
zachladin/Spotted_Lanternfly
|
R
| false
| false
| 1,179
|
r
|
#Canopy imagery analysis in R
rm(list=ls())
setwd("/Users/zach/Dropbox (ZachTeam)/Bioverse_Labs/Projects/Canopy_analysis")
#load packages
library(itcSegment)
library(raster)
library(sp)
library(rgdal)
library(tiff)
library(raster)
library(ggplot2)
library(gridGraphics)
library(magick)
library(gridExtra)
library(rgeos)
library(knitr)
library(imager)
library(spatialEco)
##################################################################################################
new.img<-image_read("/Users/zach/Dropbox (ZachTeam)/Projects/Spotted_Lanternfly/Analyses/Canopy_analysis/Data/raymond-gehman-an-aerial-view-of-a-road-passing-through-a-thick-forest_a-l-3996134-4990703.jpg")
plot(new.img)
img.ras<-raster("/Users/zach/Dropbox (ZachTeam)/Projects/Spotted_Lanternfly/Analyses/Canopy_analysis/Data/raymond-gehman-an-aerial-view-of-a-road-passing-through-a-thick-forest_a-l-3996134-4990703.jpg")
plot(img.ras)
img.ras[img.ras > 140] <- NA
plot(img.ras)
tree_crowns<-itcIMG(img.ras, epsg=32632,searchWinSize = 33, TRESHSeed = 0.5,
TRESHCrown = 0.1, DIST = 70, th = 50, ischm = FALSE)
plot(new.img)
plot(tree_crowns, border="red", add=TRUE)
|
library(DALEX)
library(caret)
data("apartments")
data("apartmentsTest")
set.seed(123)
regr_rf <- train(m2.price~., data = apartments, method = "rf", ntree = 100)
regr_gbm <- train(m2.price~., data = apartments, method = "gbm")
regr_nn <- train(m2.price~., data = apartments, method = "nnet",
linout = TRUE, preProcess = c("center", "scale"),
maxit = 500,
tuneGrid = expand.grid(size = 2, decay = 0),
trControl = trainControl(method = "none", seeds = 1))
explainer_regr_rf <- explain(regr_rf, label = "rf",
data = apartmentsTest,
y = apartmentsTest$m2.price)
explainer_regr_gbm <- explain(regr_gbm, label = "gbm",
data = apartmentsTest,
y = apartmentsTest$m2.price)
explainer_regr_nn <- explain(regr_nn, label = "nn",
data = apartmentsTest,
y = apartmentsTest$m2.price)
# Model performance
mp_regr_rf <- model_performance(explainer_regr_rf)
mp_regr_gbm <- model_performance(explainer_regr_gbm)
mp_regr_nn <- model_performance(explainer_regr_nn)
plot(mp_regr_rf, mp_regr_nn, mp_regr_gbm)
plot(mp_regr_rf, mp_regr_nn, mp_regr_gbm, geom = "boxplot")
# variable importance
vi_regr_rf <- variable_importance(explainer_regr_rf, loss_function = loss_root_mean_square)
vi_regr_gbm <- variable_importance(explainer_regr_gbm, loss_function = loss_root_mean_square)
vi_regr_nn <- variable_importance(explainer_regr_nn, loss_function = loss_root_mean_square)
plot(vi_regr_rf, vi_regr_gbm, vi_regr_nn)
pdp_regr_rf <- variable_response(explainer_regr_rf, variable = "construction.year", type = "pdp")
pdp_regr_gbm <- variable_response(explainer_regr_gbm, variable = "construction.year", type = "pdp")
pdp_regr_nn <- variable_response(explainer_regr_nn, variable = "construction.year", type = "pdp")
plot(pdp_regr_rf, pdp_regr_gbm, pdp_regr_nn)
ale_regr_rf <- variable_response(explainer_regr_rf, variable = "construction.year", type = "ale")
ale_regr_gbm <- variable_response(explainer_regr_gbm, variable = "construction.year", type = "ale")
ale_regr_nn <- variable_response(explainer_regr_nn, variable = "construction.year", type = "ale")
plot(ale_regr_rf, ale_regr_gbm, ale_regr_nn)
mpp_regr_rf <- variable_response(explainer_regr_rf, variable = "district", type = "factor")
mpp_regr_gbm <- variable_response(explainer_regr_gbm, variable = "district", type = "factor")
mpp_regr_nn <- variable_response(explainer_regr_nn, variable = "district", type = "factor")
plot(mpp_regr_rf, mpp_regr_gbm, mpp_regr_nn)
# ceteris paribus plot
# ceteris paribus - with other conditions remaining the same.
# explore local behavior of a model
library("ceterisParibus")
explainer_rf <- explain(regr_rf,
data = apartmentsTest,
y = apartmentsTest$m2.price)
apartments_small_1 <- apartmentsTest[1,]
apartments_small_2 <- select_sample(apartmentsTest, n = 20)
cp_rf_y1 <- ceteris_paribus(explainer_rf, apartments_small_1,
y = apartments_small_1$m2.price)
cp_rf_y2 <- ceteris_paribus(explainer_rf, apartments_small_2,
y = apartments_small_2$m2.price)
plot(cp_rf_y1, show_profiles = TRUE, show_observations = TRUE, show_rugs = TRUE,
show_residuals = TRUE, alpha = 0.5, size_points = 3,
alpha_points = 1, size_rugs = 0.5)
plot(cp_rf_y2, show_profiles = TRUE, show_observations = TRUE, show_rugs = TRUE,
alpha = 0.2, alpha_points = 1, size_rugs = 0.5)
#plot(cp_rf_y1, show_profiles = TRUE, show_observations = TRUE, show_rugs = TRUE,
# show_residuals = TRUE, alpha = 0.5,
# color = "construction.year", size_points = 3)
#plot(cp_rf_y2, show_profiles = TRUE, show_observations = TRUE, show_rugs = TRUE,
# size = 0.5, alpha = 0.5, color = "surface")
# wangkardu plots
# explore local goodness of fit
cr_rf <- local_fit(explainer_rf, observation = apartments_small_1,
select_points = 0.002, selected_variable = "surface")
plot(cr_rf)
# breakdown plots - distribution changes
br_rf <- prediction_breakdown(explainer_rf, observation = apartments_small_1)
plot(br_rf)
#modelDown
#devtools::install_github("MI2DataLab/modelDown")
library(modelDown)
modelDown::modelDown(explainer_regr_rf, explainer_regr_gbm)
#devtools::install_github('compstat-lmu/imlplots')
library(imlplots)
apartments_task <- makeRegrTask(data = apartments, target = "m2.price")
rf_model <- train("regr.randomForest", apartments_task)
glm_model <- train("regr.glm", apartments_task)
model_list <- list(rf_model, glm_model)
imlplots(data = apartments, task = apartments_task, models = model_list)
|
/Scripts/dalex_imlplots.R
|
no_license
|
rajkorde/RTestCode
|
R
| false
| false
| 4,770
|
r
|
library(DALEX)
library(caret)
data("apartments")
data("apartmentsTest")
set.seed(123)
regr_rf <- train(m2.price~., data = apartments, method = "rf", ntree = 100)
regr_gbm <- train(m2.price~., data = apartments, method = "gbm")
regr_nn <- train(m2.price~., data = apartments, method = "nnet",
linout = TRUE, preProcess = c("center", "scale"),
maxit = 500,
tuneGrid = expand.grid(size = 2, decay = 0),
trControl = trainControl(method = "none", seeds = 1))
explainer_regr_rf <- explain(regr_rf, label = "rf",
data = apartmentsTest,
y = apartmentsTest$m2.price)
explainer_regr_gbm <- explain(regr_gbm, label = "gbm",
data = apartmentsTest,
y = apartmentsTest$m2.price)
explainer_regr_nn <- explain(regr_nn, label = "nn",
data = apartmentsTest,
y = apartmentsTest$m2.price)
# Model performance
mp_regr_rf <- model_performance(explainer_regr_rf)
mp_regr_gbm <- model_performance(explainer_regr_gbm)
mp_regr_nn <- model_performance(explainer_regr_nn)
plot(mp_regr_rf, mp_regr_nn, mp_regr_gbm)
plot(mp_regr_rf, mp_regr_nn, mp_regr_gbm, geom = "boxplot")
# variable importance
vi_regr_rf <- variable_importance(explainer_regr_rf, loss_function = loss_root_mean_square)
vi_regr_gbm <- variable_importance(explainer_regr_gbm, loss_function = loss_root_mean_square)
vi_regr_nn <- variable_importance(explainer_regr_nn, loss_function = loss_root_mean_square)
plot(vi_regr_rf, vi_regr_gbm, vi_regr_nn)
pdp_regr_rf <- variable_response(explainer_regr_rf, variable = "construction.year", type = "pdp")
pdp_regr_gbm <- variable_response(explainer_regr_gbm, variable = "construction.year", type = "pdp")
pdp_regr_nn <- variable_response(explainer_regr_nn, variable = "construction.year", type = "pdp")
plot(pdp_regr_rf, pdp_regr_gbm, pdp_regr_nn)
ale_regr_rf <- variable_response(explainer_regr_rf, variable = "construction.year", type = "ale")
ale_regr_gbm <- variable_response(explainer_regr_gbm, variable = "construction.year", type = "ale")
ale_regr_nn <- variable_response(explainer_regr_nn, variable = "construction.year", type = "ale")
plot(ale_regr_rf, ale_regr_gbm, ale_regr_nn)
mpp_regr_rf <- variable_response(explainer_regr_rf, variable = "district", type = "factor")
mpp_regr_gbm <- variable_response(explainer_regr_gbm, variable = "district", type = "factor")
mpp_regr_nn <- variable_response(explainer_regr_nn, variable = "district", type = "factor")
plot(mpp_regr_rf, mpp_regr_gbm, mpp_regr_nn)
# ceteris paribus plot
# ceteris paribus - with other conditions remaining the same.
# explore local behavior of a model
library("ceterisParibus")
explainer_rf <- explain(regr_rf,
data = apartmentsTest,
y = apartmentsTest$m2.price)
apartments_small_1 <- apartmentsTest[1,]
apartments_small_2 <- select_sample(apartmentsTest, n = 20)
cp_rf_y1 <- ceteris_paribus(explainer_rf, apartments_small_1,
y = apartments_small_1$m2.price)
cp_rf_y2 <- ceteris_paribus(explainer_rf, apartments_small_2,
y = apartments_small_2$m2.price)
plot(cp_rf_y1, show_profiles = TRUE, show_observations = TRUE, show_rugs = TRUE,
show_residuals = TRUE, alpha = 0.5, size_points = 3,
alpha_points = 1, size_rugs = 0.5)
plot(cp_rf_y2, show_profiles = TRUE, show_observations = TRUE, show_rugs = TRUE,
alpha = 0.2, alpha_points = 1, size_rugs = 0.5)
#plot(cp_rf_y1, show_profiles = TRUE, show_observations = TRUE, show_rugs = TRUE,
# show_residuals = TRUE, alpha = 0.5,
# color = "construction.year", size_points = 3)
#plot(cp_rf_y2, show_profiles = TRUE, show_observations = TRUE, show_rugs = TRUE,
# size = 0.5, alpha = 0.5, color = "surface")
# wangkardu plots
# explore local goodness of fit
cr_rf <- local_fit(explainer_rf, observation = apartments_small_1,
select_points = 0.002, selected_variable = "surface")
plot(cr_rf)
# breakdown plots - distribution changes
br_rf <- prediction_breakdown(explainer_rf, observation = apartments_small_1)
plot(br_rf)
#modelDown
#devtools::install_github("MI2DataLab/modelDown")
library(modelDown)
modelDown::modelDown(explainer_regr_rf, explainer_regr_gbm)
#devtools::install_github('compstat-lmu/imlplots')
library(imlplots)
apartments_task <- makeRegrTask(data = apartments, target = "m2.price")
rf_model <- train("regr.randomForest", apartments_task)
glm_model <- train("regr.glm", apartments_task)
model_list <- list(rf_model, glm_model)
imlplots(data = apartments, task = apartments_task, models = model_list)
|
library(shiny)
library(raincpc)
library(SDMTools)
library(raster)
library(ggplot2)
library(rnoaa)
library('plyr')
# Data pre-processing ----
# Tweak the "am" variable to have nicer factor labels -- since this
# doesn't rely on any user inputs, we can do this once at startup
# and then use the value throughout the lifetime of the app
# Define UI for miles per gallon app ----
ui <- fluidPage(
# App title ----
titlePanel("Daily Global Rainfall"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for variable to plot against mpg ----
selectInput("variable", "Variable:",
c("Year" = "yyyy",
"Month" = "mm",
"Day" = "dd")),
# Input: Checkbox for whether outliers should be included ----
checkboxInput("outliers", "Show outliers", TRUE)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption")),
# Output: Plot of the requested variable against mpg ----
plotOutput("rain")
)
)
)
# Define server logic to plot various variables against mpg ----
server <- function(input, output) {
# Compute the formula text ----
# This is in a reactive expression since it is shared by the
# output$caption and output$mpgPlot functions
formulaText <- reactive({
paste("rain ~", input$variable)
})
# Return the formula text for printing as a caption ----
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg ----
# and only exclude outliers if requested
output$mpgPlot <- renderPlot({
boxplot(as.formula(formulaText()),
data = mpgData,
outline = input$outliers,
col = "#75AADB", pch = 19)
})
}
# Create Shiny app ----
shinyApp(ui, server)
library(shiny)
# Define UI for dataset viewer app ----
ui <- fluidPage(
# App title ----
titlePanel("Reactivity"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Text for providing a caption ----
# Note: Changes made to the caption in the textInput control
# are updated in the output area immediately as you type
textInput(inputId = "caption",
label = "Caption:",
value = "Data Summary"),
# Input: Selector for choosing dataset ----
selectInput(inputId = "dataset",
label = "Choose a dataset:",
choices = c("rock", "pressure", "cars")),
# Input: Numeric entry for number of obs to view ----
numericInput(inputId = "obs",
label = "Number of observations to view:",
value = 10)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption", container = span)),
# Output: Verbatim text for data summary ----
verbatimTextOutput("summary"),
# Output: HTML table with requested number of observations ----
tableOutput("view")
)
)
)
# Define server logic to summarize and view selected dataset ----
server <- function(input, output) {
# Return the requested dataset ----
# By declaring datasetInput as a reactive expression we ensure
# that:
#
# 1. It is only called when the inputs it depends on changes
# 2. The computation and result are shared by all the callers,
# i.e. it only executes a single time
datasetInput <- reactive({
switch(input$dataset,
"rock" = rock,
"pressure" = pressure,
"cars" = cars)
})
# Create caption ----
# The output$caption is computed based on a reactive expression
# that returns input$caption. When the user changes the
# "caption" field:
#
# 1. This function is automatically called to recompute the output
# 2. New caption is pushed back to the browser for re-display
#
# Note that because the data-oriented reactive expressions
# below don't depend on input$caption, those expressions are
# NOT called when input$caption changes
output$caption <- renderText({
input$caption
})
# Generate a summary of the dataset ----
# The output$summary depends on the datasetInput reactive
# expression, so will be re-executed whenever datasetInput is
# invalidated, i.e. whenever the input$dataset changes
output$summary <- renderPrint({
dataset <- datasetInput()
summary(dataset)
})
# Show the first "n" observations ----
# The output$view depends on both the databaseInput reactive
# expression and input$obs, so it will be re-executed whenever
# input$dataset or input$obs is changed
output$view <- renderTable({
head(datasetInput(), n = input$obs)
})
}
# Create Shiny app ----
shinyApp(ui, server)
|
/R/R Basics for weather data/3 basic shiny.R
|
no_license
|
sandlim/WGeeks
|
R
| false
| false
| 5,050
|
r
|
library(shiny)
library(raincpc)
library(SDMTools)
library(raster)
library(ggplot2)
library(rnoaa)
library('plyr')
# Data pre-processing ----
# Tweak the "am" variable to have nicer factor labels -- since this
# doesn't rely on any user inputs, we can do this once at startup
# and then use the value throughout the lifetime of the app
# Define UI for miles per gallon app ----
ui <- fluidPage(
# App title ----
titlePanel("Daily Global Rainfall"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for variable to plot against mpg ----
selectInput("variable", "Variable:",
c("Year" = "yyyy",
"Month" = "mm",
"Day" = "dd")),
# Input: Checkbox for whether outliers should be included ----
checkboxInput("outliers", "Show outliers", TRUE)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption")),
# Output: Plot of the requested variable against mpg ----
plotOutput("rain")
)
)
)
# Define server logic to plot various variables against mpg ----
server <- function(input, output) {
# Compute the formula text ----
# This is in a reactive expression since it is shared by the
# output$caption and output$mpgPlot functions
formulaText <- reactive({
paste("rain ~", input$variable)
})
# Return the formula text for printing as a caption ----
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg ----
# and only exclude outliers if requested
output$mpgPlot <- renderPlot({
boxplot(as.formula(formulaText()),
data = mpgData,
outline = input$outliers,
col = "#75AADB", pch = 19)
})
}
# Create Shiny app ----
shinyApp(ui, server)
library(shiny)
# Define UI for dataset viewer app ----
ui <- fluidPage(
# App title ----
titlePanel("Reactivity"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Text for providing a caption ----
# Note: Changes made to the caption in the textInput control
# are updated in the output area immediately as you type
textInput(inputId = "caption",
label = "Caption:",
value = "Data Summary"),
# Input: Selector for choosing dataset ----
selectInput(inputId = "dataset",
label = "Choose a dataset:",
choices = c("rock", "pressure", "cars")),
# Input: Numeric entry for number of obs to view ----
numericInput(inputId = "obs",
label = "Number of observations to view:",
value = 10)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption", container = span)),
# Output: Verbatim text for data summary ----
verbatimTextOutput("summary"),
# Output: HTML table with requested number of observations ----
tableOutput("view")
)
)
)
# Define server logic to summarize and view selected dataset ----
server <- function(input, output) {
# Return the requested dataset ----
# By declaring datasetInput as a reactive expression we ensure
# that:
#
# 1. It is only called when the inputs it depends on changes
# 2. The computation and result are shared by all the callers,
# i.e. it only executes a single time
datasetInput <- reactive({
switch(input$dataset,
"rock" = rock,
"pressure" = pressure,
"cars" = cars)
})
# Create caption ----
# The output$caption is computed based on a reactive expression
# that returns input$caption. When the user changes the
# "caption" field:
#
# 1. This function is automatically called to recompute the output
# 2. New caption is pushed back to the browser for re-display
#
# Note that because the data-oriented reactive expressions
# below don't depend on input$caption, those expressions are
# NOT called when input$caption changes
output$caption <- renderText({
input$caption
})
# Generate a summary of the dataset ----
# The output$summary depends on the datasetInput reactive
# expression, so will be re-executed whenever datasetInput is
# invalidated, i.e. whenever the input$dataset changes
output$summary <- renderPrint({
dataset <- datasetInput()
summary(dataset)
})
# Show the first "n" observations ----
# The output$view depends on both the databaseInput reactive
# expression and input$obs, so it will be re-executed whenever
# input$dataset or input$obs is changed
output$view <- renderTable({
head(datasetInput(), n = input$obs)
})
}
# Create Shiny app ----
shinyApp(ui, server)
|
library(raster)
### Name: flip
### Title: Flip
### Aliases: flip flip,RasterLayer-method flip,RasterStackBrick-method
### Keywords: spatial
### ** Examples
r <- raster(nrow=18, ncol=36)
m <- matrix(1:ncell(r), nrow=18)
r[] <- as.vector(t(m))
rx <- flip(r, direction='x')
r[] <- as.vector(m)
ry <- flip(r, direction='y')
|
/data/genthat_extracted_code/raster/examples/flip.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 327
|
r
|
library(raster)
### Name: flip
### Title: Flip
### Aliases: flip flip,RasterLayer-method flip,RasterStackBrick-method
### Keywords: spatial
### ** Examples
r <- raster(nrow=18, ncol=36)
m <- matrix(1:ncell(r), nrow=18)
r[] <- as.vector(t(m))
rx <- flip(r, direction='x')
r[] <- as.vector(m)
ry <- flip(r, direction='y')
|
`%notin%` <- function(x, table) {
# Same as !(x %in% table)
match(x, table, nomatch = 0L) == 0L
}
force_double <- function(v) {
suppressWarnings(as.double(v))
}
# Distance over a great circle. Reasonable approximation.
haversine_distance <- function(lat1, lon1, lat2, lon2) {
# to radians
lat1 <- lat1 * pi / 180
lat2 <- lat2 * pi / 180
lon1 <- lon1 * pi / 180
lon2 <- lon2 * pi / 180
delta_lat <- abs(lat1 - lat2)
delta_lon <- abs(lon1 - lon2)
# radius of earth
6371 * 2 * asin(sqrt(`+`(
(sin(delta_lat / 2)) ^ 2,
cos(lat1) * cos(lat2) * (sin(delta_lon / 2)) ^ 2
)))
}
#' @noRd
# Check if user enables caching. If so use cache directory, else use tempdir()
.set_cache <- function(cache) {
if (isTRUE(cache)) {
if (!dir.exists(manage_cache$cache_path_get())) {
manage_cache$mkdir()
}
cache_dir <- manage_cache$cache_path_get()
} else {
cache_dir <- tempdir()
}
return(cache_dir)
}
#' @noRd
# Check states for précis and ag bulletin, use fuzzy matching
.check_states <- function(state) {
states <- c(
"ACT",
"NSW",
"NT",
"QLD",
"SA",
"TAS",
"VIC",
"WA",
"Canberra",
"New South Wales",
"Northern Territory",
"Queensland",
"South Australia",
"Tasmania",
"Victoria",
"Western Australia",
"Australia",
"AU",
"AUS",
"Oz"
)
if (state %in% states) {
the_state <- toupper(state)
return(the_state)
} else {
likely_states <- agrep(pattern = state,
x = states,
value = TRUE)
if (length(likely_states) == 1) {
the_state <- toupper(likely_states)
message(
paste0(
"\nUsing state = ",
likely_states,
".\n",
"If this is not what you intended, please check your entry."
)
)
return(the_state)
} else if (length(likely_states) == 0) {
stop(
"\nA state or territory matching what you entered was not found.",
"Please check and try again.\n"
)
}
}
if (length(likely_states) > 1) {
message(
"Multiple states match state.",
"'\ndid you mean:\n\tstate = '",
paste(likely_states[1],
"or",
likely_states[2],
"or",
likely_states[3]),
"'?"
)
}
}
#' convert_state
#'
#' Convert state to standard abbreviation
#' @noRd
convert_state <- function(state) {
state <- gsub(" ", "", state)
state <-
substring(gsub("[[:punct:]]", "", tolower(state)), 1, 2)
state_code <- c(
"NSW",
"NSW",
"VIC",
"VIC",
"QLD",
"QLD",
"QLD",
"WA",
"WA",
"WA",
"SA",
"SA",
"SA",
"TAS",
"TAS",
"ACT",
"NT",
"NT"
)
state_names <- c(
"ne",
"ns",
"vi",
"v",
"ql",
"qe",
"q",
"wa",
"we",
"w",
"s",
"sa",
"so",
"ta",
"t",
"ac",
"no",
"nt"
)
state <- state_code[pmatch(state, state_names)]
if (any(is.na(state)))
stop("Unable to determine state")
return(state)
}
#' Parse areas for précis forecasts
#'
#' @param x a précis forecast object
#'
#' @return a data.frame of forecast areas and aac codes
#' @keywords internal
#' @author Adam H Sparks, \email{adamhspark@@s@gmail.com}
#' @noRd
# get the data from areas --------------------------------------------------
.parse_areas <- function(x) {
aac <- as.character(xml2::xml_attr(x, "aac"))
# get xml children for the forecast (there are seven of these for each area)
forecast_periods <- xml2::xml_children(x)
sub_out <-
lapply(X = forecast_periods, FUN = .extract_values)
sub_out <- do.call(rbind, sub_out)
sub_out <- cbind(aac, sub_out)
return(sub_out)
}
#' extract the values of the forecast items
#'
#' @param y précis forecast values
#'
#' @return a data.frame of forecast values
#' @keywords internal
#' @author Adam H Sparks, \email{adamhsparks@gmail.com}
#' @noRd
.extract_values <- function(y) {
values <- xml2::xml_children(y)
attrs <- unlist(as.character(xml2::xml_attrs(values)))
values <- unlist(as.character(xml2::xml_contents(values)))
time_period <- unlist(t(as.data.frame(xml2::xml_attrs(y))))
time_period <-
time_period[rep(seq_len(nrow(time_period)), each = length(attrs)), ]
sub_out <- cbind(time_period, attrs, values)
row.names(sub_out) <- NULL
return(sub_out)
}
#' Get latest historical station metadata
#'
#' Fetches BOM metadata for checking historical record availability. Also can be
#' used to return the metadata if user desires.
#'
#' @md
#'
#' @return A data frame of metadata for BOM historical records
#' @keywords internal
#' @author Adam H. Sparks, \email{adamhsparks@@gmail.com}
#' @noRd
.get_ncc <- function() {
# CRAN NOTE avoidance
site <- name <- lat <- lon <- start_month <- #nocov start
start_year <- end_month <- end_year <- years <- percent <- AWS <-
start <- end <- ncc_obs_code <- site <- NULL #nocov end
base_url <- "http://www.bom.gov.au/climate/data/lists_by_element/"
rain <- paste0(base_url, "alphaAUS_136.txt")
tmax <- paste0(base_url, "alphaAUS_122.txt")
tmin <- paste0(base_url, "alphaAUS_123.txt")
solar <- paste0(base_url, "alphaAUS_193.txt")
weather <- c(rain, tmax, tmin, solar)
names(weather) <- c("rain", "tmax", "tmin", "solar")
ncc_codes <- vector(mode = "list", length = length(weather))
names(ncc_codes) <- names(weather)
for (i in seq_along(weather)) {
ncc_obs_code <- substr(weather[i],
nchar(weather[i]) - 6,
nchar(weather[i]) - 4)
ncc <-
readr::read_table(
weather[i],
skip = 4,
col_names = c(
"site",
"name",
"lat",
"lon",
"start_month",
"start_year",
"end_month",
"end_year",
"years",
"percent",
"AWS"
),
col_types = c(
site = readr::col_integer(),
name = readr::col_character(),
lat = readr::col_double(),
lon = readr::col_double(),
start_month = readr::col_character(),
start_year = readr::col_character(),
end_month = readr::col_character(),
end_year = readr::col_character(),
years = readr::col_double(),
percent = readr::col_integer(),
AWS = readr::col_character()
),
na = ""
)
# trim the end of the rows off that have extra info that's not in columns
nrows <- nrow(ncc) - 7
ncc <- ncc[1:nrows, ]
# unite month and year, convert to a date and add ncc_obs_code
ncc <-
ncc %>%
tidyr::unite(start, start_month, start_year, sep = "-") %>%
tidyr::unite(end, end_month, end_year, sep = "-") %>%
dplyr::mutate(start = lubridate::dmy(paste0("01-", start))) %>%
dplyr::mutate(end = lubridate::dmy(paste0("01-", end))) %>%
dplyr::mutate(ncc_obs_code = ncc_obs_code)
ncc_codes[[i]] <- ncc
}
dplyr::bind_rows(ncc_codes)
}
#' Identify URL of historical observations resources
#'
#' BOM data is available via URL endpoints but the arguments are not (well)
#' documented. This function first obtains an auxilliary data file for the given
#' station/measurement type which contains the remaining value `p_c`. It then
#' constructs the approriate resource URL.
#'
#' @md
#' @param site site ID.
#' @param code measurement type. See internals of [get_historical].
#' @importFrom httr GET content
#'
#' @return URL of the historical observation resource
#' @keywords internal
#' @author Jonathan Carroll, \email{rpkg@@jcarroll.com.au}
#' @noRd
.get_zip_url <- function(site, code = 122) {
url1 <-
paste0(
"http://www.bom.gov.au/jsp/ncc/cdio/weatherData/av?p_stn_num=",
site,
"&p_display_type=availableYears&p_nccObsCode=",
code
)
raw <- httr::content(httr::GET(url1), "text")
if (grepl("BUREAU FOOTER", raw))
stop("Error in retrieving resource identifiers.")
pc <- sub("^.*:", "", raw)
url2 <-
paste0(
"http://www.bom.gov.au/jsp/ncc/cdio/weatherData/av?p_display_type=dailyZippedDataFile&p_stn_num=",
site,
"&p_c=",
pc,
"&p_nccObsCode=",
code
)
url2
}
#' Download a BOM Data .zip File and Load into Session
#'
#' @param url URL of zip file to be downloaded/extracted/loaded.
#' @importFrom utils download.file unzip read.csv
#'
#' @return data loaded from the zip file
#' @keywords internal
#' @author Jonathan Carroll, \email{rpkg@@jcarroll.com.au}
#' @noRd
.get_zip_and_load <- function(url) {
tmp <- tempfile(fileext = ".zip")
utils::download.file(url, tmp, mode = "wb")
zipped <- utils::unzip(tmp, exdir = dirname(tmp))
unlink(tmp)
datfile <- grep("Data.csv", zipped, value = TRUE)
message("Data saved as ", datfile)
dat <- utils::read.csv(datfile, header = TRUE)
dat
}
|
/R/internal_functions.R
|
no_license
|
rosseji/bomrang
|
R
| false
| false
| 8,973
|
r
|
`%notin%` <- function(x, table) {
# Same as !(x %in% table)
match(x, table, nomatch = 0L) == 0L
}
force_double <- function(v) {
suppressWarnings(as.double(v))
}
# Distance over a great circle. Reasonable approximation.
haversine_distance <- function(lat1, lon1, lat2, lon2) {
# to radians
lat1 <- lat1 * pi / 180
lat2 <- lat2 * pi / 180
lon1 <- lon1 * pi / 180
lon2 <- lon2 * pi / 180
delta_lat <- abs(lat1 - lat2)
delta_lon <- abs(lon1 - lon2)
# radius of earth
6371 * 2 * asin(sqrt(`+`(
(sin(delta_lat / 2)) ^ 2,
cos(lat1) * cos(lat2) * (sin(delta_lon / 2)) ^ 2
)))
}
#' @noRd
# Check if user enables caching. If so use cache directory, else use tempdir()
.set_cache <- function(cache) {
if (isTRUE(cache)) {
if (!dir.exists(manage_cache$cache_path_get())) {
manage_cache$mkdir()
}
cache_dir <- manage_cache$cache_path_get()
} else {
cache_dir <- tempdir()
}
return(cache_dir)
}
#' @noRd
# Check states for précis and ag bulletin, use fuzzy matching
.check_states <- function(state) {
states <- c(
"ACT",
"NSW",
"NT",
"QLD",
"SA",
"TAS",
"VIC",
"WA",
"Canberra",
"New South Wales",
"Northern Territory",
"Queensland",
"South Australia",
"Tasmania",
"Victoria",
"Western Australia",
"Australia",
"AU",
"AUS",
"Oz"
)
if (state %in% states) {
the_state <- toupper(state)
return(the_state)
} else {
likely_states <- agrep(pattern = state,
x = states,
value = TRUE)
if (length(likely_states) == 1) {
the_state <- toupper(likely_states)
message(
paste0(
"\nUsing state = ",
likely_states,
".\n",
"If this is not what you intended, please check your entry."
)
)
return(the_state)
} else if (length(likely_states) == 0) {
stop(
"\nA state or territory matching what you entered was not found.",
"Please check and try again.\n"
)
}
}
if (length(likely_states) > 1) {
message(
"Multiple states match state.",
"'\ndid you mean:\n\tstate = '",
paste(likely_states[1],
"or",
likely_states[2],
"or",
likely_states[3]),
"'?"
)
}
}
#' convert_state
#'
#' Convert state to standard abbreviation
#' @noRd
convert_state <- function(state) {
state <- gsub(" ", "", state)
state <-
substring(gsub("[[:punct:]]", "", tolower(state)), 1, 2)
state_code <- c(
"NSW",
"NSW",
"VIC",
"VIC",
"QLD",
"QLD",
"QLD",
"WA",
"WA",
"WA",
"SA",
"SA",
"SA",
"TAS",
"TAS",
"ACT",
"NT",
"NT"
)
state_names <- c(
"ne",
"ns",
"vi",
"v",
"ql",
"qe",
"q",
"wa",
"we",
"w",
"s",
"sa",
"so",
"ta",
"t",
"ac",
"no",
"nt"
)
state <- state_code[pmatch(state, state_names)]
if (any(is.na(state)))
stop("Unable to determine state")
return(state)
}
#' Parse areas for précis forecasts
#'
#' @param x a précis forecast object
#'
#' @return a data.frame of forecast areas and aac codes
#' @keywords internal
#' @author Adam H Sparks, \email{adamhspark@@s@gmail.com}
#' @noRd
# get the data from areas --------------------------------------------------
.parse_areas <- function(x) {
aac <- as.character(xml2::xml_attr(x, "aac"))
# get xml children for the forecast (there are seven of these for each area)
forecast_periods <- xml2::xml_children(x)
sub_out <-
lapply(X = forecast_periods, FUN = .extract_values)
sub_out <- do.call(rbind, sub_out)
sub_out <- cbind(aac, sub_out)
return(sub_out)
}
#' extract the values of the forecast items
#'
#' @param y précis forecast values
#'
#' @return a data.frame of forecast values
#' @keywords internal
#' @author Adam H Sparks, \email{adamhsparks@gmail.com}
#' @noRd
.extract_values <- function(y) {
values <- xml2::xml_children(y)
attrs <- unlist(as.character(xml2::xml_attrs(values)))
values <- unlist(as.character(xml2::xml_contents(values)))
time_period <- unlist(t(as.data.frame(xml2::xml_attrs(y))))
time_period <-
time_period[rep(seq_len(nrow(time_period)), each = length(attrs)), ]
sub_out <- cbind(time_period, attrs, values)
row.names(sub_out) <- NULL
return(sub_out)
}
#' Get latest historical station metadata
#'
#' Fetches BOM metadata for checking historical record availability. Also can be
#' used to return the metadata if user desires.
#'
#' @md
#'
#' @return A data frame of metadata for BOM historical records
#' @keywords internal
#' @author Adam H. Sparks, \email{adamhsparks@@gmail.com}
#' @noRd
.get_ncc <- function() {
# CRAN NOTE avoidance
site <- name <- lat <- lon <- start_month <- #nocov start
start_year <- end_month <- end_year <- years <- percent <- AWS <-
start <- end <- ncc_obs_code <- site <- NULL #nocov end
base_url <- "http://www.bom.gov.au/climate/data/lists_by_element/"
rain <- paste0(base_url, "alphaAUS_136.txt")
tmax <- paste0(base_url, "alphaAUS_122.txt")
tmin <- paste0(base_url, "alphaAUS_123.txt")
solar <- paste0(base_url, "alphaAUS_193.txt")
weather <- c(rain, tmax, tmin, solar)
names(weather) <- c("rain", "tmax", "tmin", "solar")
ncc_codes <- vector(mode = "list", length = length(weather))
names(ncc_codes) <- names(weather)
for (i in seq_along(weather)) {
ncc_obs_code <- substr(weather[i],
nchar(weather[i]) - 6,
nchar(weather[i]) - 4)
ncc <-
readr::read_table(
weather[i],
skip = 4,
col_names = c(
"site",
"name",
"lat",
"lon",
"start_month",
"start_year",
"end_month",
"end_year",
"years",
"percent",
"AWS"
),
col_types = c(
site = readr::col_integer(),
name = readr::col_character(),
lat = readr::col_double(),
lon = readr::col_double(),
start_month = readr::col_character(),
start_year = readr::col_character(),
end_month = readr::col_character(),
end_year = readr::col_character(),
years = readr::col_double(),
percent = readr::col_integer(),
AWS = readr::col_character()
),
na = ""
)
# trim the end of the rows off that have extra info that's not in columns
nrows <- nrow(ncc) - 7
ncc <- ncc[1:nrows, ]
# unite month and year, convert to a date and add ncc_obs_code
ncc <-
ncc %>%
tidyr::unite(start, start_month, start_year, sep = "-") %>%
tidyr::unite(end, end_month, end_year, sep = "-") %>%
dplyr::mutate(start = lubridate::dmy(paste0("01-", start))) %>%
dplyr::mutate(end = lubridate::dmy(paste0("01-", end))) %>%
dplyr::mutate(ncc_obs_code = ncc_obs_code)
ncc_codes[[i]] <- ncc
}
dplyr::bind_rows(ncc_codes)
}
#' Identify URL of historical observations resources
#'
#' BOM data is available via URL endpoints but the arguments are not (well)
#' documented. This function first obtains an auxilliary data file for the given
#' station/measurement type which contains the remaining value `p_c`. It then
#' constructs the approriate resource URL.
#'
#' @md
#' @param site site ID.
#' @param code measurement type. See internals of [get_historical].
#' @importFrom httr GET content
#'
#' @return URL of the historical observation resource
#' @keywords internal
#' @author Jonathan Carroll, \email{rpkg@@jcarroll.com.au}
#' @noRd
.get_zip_url <- function(site, code = 122) {
url1 <-
paste0(
"http://www.bom.gov.au/jsp/ncc/cdio/weatherData/av?p_stn_num=",
site,
"&p_display_type=availableYears&p_nccObsCode=",
code
)
raw <- httr::content(httr::GET(url1), "text")
if (grepl("BUREAU FOOTER", raw))
stop("Error in retrieving resource identifiers.")
pc <- sub("^.*:", "", raw)
url2 <-
paste0(
"http://www.bom.gov.au/jsp/ncc/cdio/weatherData/av?p_display_type=dailyZippedDataFile&p_stn_num=",
site,
"&p_c=",
pc,
"&p_nccObsCode=",
code
)
url2
}
#' Download a BOM Data .zip File and Load into Session
#'
#' @param url URL of zip file to be downloaded/extracted/loaded.
#' @importFrom utils download.file unzip read.csv
#'
#' @return data loaded from the zip file
#' @keywords internal
#' @author Jonathan Carroll, \email{rpkg@@jcarroll.com.au}
#' @noRd
.get_zip_and_load <- function(url) {
tmp <- tempfile(fileext = ".zip")
utils::download.file(url, tmp, mode = "wb")
zipped <- utils::unzip(tmp, exdir = dirname(tmp))
unlink(tmp)
datfile <- grep("Data.csv", zipped, value = TRUE)
message("Data saved as ", datfile)
dat <- utils::read.csv(datfile, header = TRUE)
dat
}
|
as_CellData <- function(x, .na = NULL) {
UseMethod("as_CellData")
}
#' @export
as_CellData.default <- function(x, .na = NULL) {
stop_glue(
"Don't know how to make an instance of {bt('CellData')} from something of ",
"class {class_collapse(x)}."
)
}
# I want to centralize what value we send for NA, even though -- for now, at
# least -- I have not exposed this in user-facing functions. You could imagine
# generalizing to allow user to request we send #N/A instead of an empty cell.
# More about #N/A:
# https://support.google.com/docs/answer/3093359?hl=en
# Currently this is sort of possible:
# as_CellData(c(TRUE, FALSE, NA), .na = list(formulaValue = "=NA()"))
empty_cell <- function(..., .na = NULL) {
.na %||% list(userEnteredValue = NA)
}
cell_data <- function(x, val_type, .na = NULL) {
force(val_type)
f<- function(y, ...) {
list(userEnteredValue = rlang::list2(!!val_type := y))
}
purrr::map_if(x, rlang::is_na, empty_cell, .na = .na, .else = f)
}
# Possibly premature worrying, but I'm not using new("CellData", ...) because
# storing the tidy schema as an attribute for each cell seems excessive.
# That would look something like this for logical:
# map(x, ~ new("CellData", userEnteredValue = list(boolValue = .x)))
#' @export
as_CellData.NULL <- function(x, .na = NULL) {
empty_cell(.na)
}
#' @export
as_CellData.logical <- function(x, .na = NULL) {
cell_data(x, val_type = "boolValue", .na = .na)
}
#' @export
as_CellData.character <- function(x, .na = NULL) {
cell_data(x, val_type = "stringValue", .na = .na)
}
#' @export
as_CellData.numeric <- function(x, .na = NULL) {
cell_data(x, val_type = "numberValue", .na = .na)
}
#' @export
as_CellData.list <- function(x, .na = NULL) {
map(x, as_CellData, .na = .na)
}
#' @export
as_CellData.factor <- function(x, .na = NULL) {
as_CellData(as.character(x), .na = .na)
}
add_format <- function(x, fmt) {
x[["userEnteredFormat"]] <- list(numberFormat = rlang::list2(!!!fmt))
x
}
#' @export
as_CellData.Date <- function(x, .na = NULL) {
# 25569 = DATEVALUE("1970-01-01), i.e. Unix epoch as a serial date, when the
# date origin is December 30th 1899
x <- unclass(x) + 25569
x <- cell_data(x, val_type = "numberValue", .na = .na)
map(x, add_format, fmt = list(type = "DATE", pattern = "yyyy-mm-dd"))
}
#' @export
as_CellData.POSIXct <- function(x, .na = NULL) {
# 86400 = 60 * 60 * 24 = number of seconds in a day
x <- (unclass(x) / 86400) + 25569
x <- cell_data(x, val_type = "numberValue", .na = .na)
map(
x,
add_format,
fmt = list(type = "DATE_TIME", pattern = "yyyy-mm-ddThh:mm")
)
}
|
/R/schema_CellData.R
|
permissive
|
MarkEdmondson1234/googlesheets4
|
R
| false
| false
| 2,635
|
r
|
as_CellData <- function(x, .na = NULL) {
UseMethod("as_CellData")
}
#' @export
as_CellData.default <- function(x, .na = NULL) {
stop_glue(
"Don't know how to make an instance of {bt('CellData')} from something of ",
"class {class_collapse(x)}."
)
}
# I want to centralize what value we send for NA, even though -- for now, at
# least -- I have not exposed this in user-facing functions. You could imagine
# generalizing to allow user to request we send #N/A instead of an empty cell.
# More about #N/A:
# https://support.google.com/docs/answer/3093359?hl=en
# Currently this is sort of possible:
# as_CellData(c(TRUE, FALSE, NA), .na = list(formulaValue = "=NA()"))
empty_cell <- function(..., .na = NULL) {
.na %||% list(userEnteredValue = NA)
}
cell_data <- function(x, val_type, .na = NULL) {
force(val_type)
f<- function(y, ...) {
list(userEnteredValue = rlang::list2(!!val_type := y))
}
purrr::map_if(x, rlang::is_na, empty_cell, .na = .na, .else = f)
}
# Possibly premature worrying, but I'm not using new("CellData", ...) because
# storing the tidy schema as an attribute for each cell seems excessive.
# That would look something like this for logical:
# map(x, ~ new("CellData", userEnteredValue = list(boolValue = .x)))
#' @export
as_CellData.NULL <- function(x, .na = NULL) {
empty_cell(.na)
}
#' @export
as_CellData.logical <- function(x, .na = NULL) {
cell_data(x, val_type = "boolValue", .na = .na)
}
#' @export
as_CellData.character <- function(x, .na = NULL) {
cell_data(x, val_type = "stringValue", .na = .na)
}
#' @export
as_CellData.numeric <- function(x, .na = NULL) {
cell_data(x, val_type = "numberValue", .na = .na)
}
#' @export
as_CellData.list <- function(x, .na = NULL) {
map(x, as_CellData, .na = .na)
}
#' @export
as_CellData.factor <- function(x, .na = NULL) {
as_CellData(as.character(x), .na = .na)
}
add_format <- function(x, fmt) {
x[["userEnteredFormat"]] <- list(numberFormat = rlang::list2(!!!fmt))
x
}
#' @export
as_CellData.Date <- function(x, .na = NULL) {
# 25569 = DATEVALUE("1970-01-01), i.e. Unix epoch as a serial date, when the
# date origin is December 30th 1899
x <- unclass(x) + 25569
x <- cell_data(x, val_type = "numberValue", .na = .na)
map(x, add_format, fmt = list(type = "DATE", pattern = "yyyy-mm-dd"))
}
#' @export
as_CellData.POSIXct <- function(x, .na = NULL) {
# 86400 = 60 * 60 * 24 = number of seconds in a day
x <- (unclass(x) / 86400) + 25569
x <- cell_data(x, val_type = "numberValue", .na = .na)
map(
x,
add_format,
fmt = list(type = "DATE_TIME", pattern = "yyyy-mm-ddThh:mm")
)
}
|
## This line of code will install the library reshape2 if not
## installed on your computer:
if(!require(reshape2)){install.packages("reshape2")}
library(reshape2) ### calling the library reshape2
## set your working directory or the folder where you have the file:
setwd("C:/Users/Christian/Dropbox/")
data<-read.csv("off site.csv") ## uploading the file
## melt function from the reshape2 library.
### melt (dataset, id.vars=c("here all the columns you want to keep"))
data2<-melt(data, id.vars=c("Block", "Trt", "Pen"))
## exports the data as an excel file:
write.csv(data2, file="off site long.csv")
|
/reshape.R
|
no_license
|
cdramirezcamba/Tableu-example
|
R
| false
| false
| 636
|
r
|
## This line of code will install the library reshape2 if not
## installed on your computer:
if(!require(reshape2)){install.packages("reshape2")}
library(reshape2) ### calling the library reshape2
## set your working directory or the folder where you have the file:
setwd("C:/Users/Christian/Dropbox/")
data<-read.csv("off site.csv") ## uploading the file
## melt function from the reshape2 library.
### melt (dataset, id.vars=c("here all the columns you want to keep"))
data2<-melt(data, id.vars=c("Block", "Trt", "Pen"))
## exports the data as an excel file:
write.csv(data2, file="off site long.csv")
|
#!/usr/bin/env Rscript --vanilla
suppressPackageStartupMessages(library("roxygen2"))
suppressPackageStartupMessages(library("optparse"))
# specify our desired options in a list
# by default OptionParser will add an help option equivalent to
# make_option(c("-h", "--help"), action="store_true", default=FALSE,
# help="Show this help message and exit")
option_list <- list(
make_option(c("-v", "--verbose"), action="store_true", default=TRUE,
help="Print extra output [default]"),
make_option(c("-q", "--quietly"), action="store_false",
dest="verbose", help="Print little output"),
make_option(c("-c", "--clean"), action="store_true", default=FALSE,
dest="clean", help="Clean old Rd files first? (requires devtools)"),
make_option(c("-p", "--package.dir"), type="character", default=".",
dest='path',
help="path to package [default %default]",
metavar="path")
)
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
opt <- parse_args(OptionParser(option_list=option_list))
# print some progress messages to stderr if "quietly" wasn't requested
if ( opt$verbose ) {
write(paste("Processing package", opt$path, "...\n"), stderr())
}
if (require(devtools)) {
suppressPackageStartupMessages(library("devtools"))
document(opt$path, clean=opt$clean)
} else {
roxygenize(opt$path,clean=opt$clean)
}
cat("\n")
|
/mosaicApps/mosaicManipOriginal/bin/roxy
|
no_license
|
dtkaplan/MOSAIC-Summer-2015
|
R
| false
| false
| 1,436
|
#!/usr/bin/env Rscript --vanilla
suppressPackageStartupMessages(library("roxygen2"))
suppressPackageStartupMessages(library("optparse"))
# specify our desired options in a list
# by default OptionParser will add an help option equivalent to
# make_option(c("-h", "--help"), action="store_true", default=FALSE,
# help="Show this help message and exit")
option_list <- list(
make_option(c("-v", "--verbose"), action="store_true", default=TRUE,
help="Print extra output [default]"),
make_option(c("-q", "--quietly"), action="store_false",
dest="verbose", help="Print little output"),
make_option(c("-c", "--clean"), action="store_true", default=FALSE,
dest="clean", help="Clean old Rd files first? (requires devtools)"),
make_option(c("-p", "--package.dir"), type="character", default=".",
dest='path',
help="path to package [default %default]",
metavar="path")
)
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
opt <- parse_args(OptionParser(option_list=option_list))
# print some progress messages to stderr if "quietly" wasn't requested
if ( opt$verbose ) {
write(paste("Processing package", opt$path, "...\n"), stderr())
}
if (require(devtools)) {
suppressPackageStartupMessages(library("devtools"))
document(opt$path, clean=opt$clean)
} else {
roxygenize(opt$path,clean=opt$clean)
}
cat("\n")
|
|
library(ggplot2)
library(grid)
######## my path is relative to the project folder (hwq_chuw) ,and system is Mac OS #######
## Please open the project file ##
## read data ----
inputPath <- "data/pokemon.csv"
pData <- read.csv(inputPath)
## initiate output file
outputPath <- "output/output.pdf"
pdf(outputPath)
## colors ----
titleColor <- "black"
backgroundColor <- '#faeec8'
panelBackgroundColor <- '#fdd45f'
falseBoxColor <- '#FFCCCC'
trueBoxColor <- '#99CCFF'
## author info ----
author = grobTree(textGrob("@Chu Wu", x=0.86, y=0.96, hjust=0,
gp=gpar(col="black", fontsize=7, fontface="italic")))
## main ----
base<-ggplot(data = pData, aes(x=factor(generation), y=speed)) +
geom_boxplot(aes(fill=legendary), outlier.shape=8, outlier.size=1) +
labs(x="Generation",y="Speed",title="Pokemon Speed In Different Generation")+
scale_x_discrete(label=function(x){return(paste("G",x,sep = ""))}) +
scale_fill_manual(values=c(falseBoxColor, trueBoxColor))+
scale_color_manual(values=c(backgroundColor))+
theme(axis.title.x = element_text(colour = titleColor,size = 13,face = "bold",margin = margin(10,0,20,0)),
axis.title.y = element_text(colour = titleColor,size = 13,face = "bold",margin = margin(0,10,0,20)),
plot.title = element_text(colour = titleColor,hjust = 0.5,size=16, face="bold", margin = margin(20, 0, 15, 0)),
legend.title = element_text(colour = titleColor,size=11, face="bold",margin = margin(0,20,0,0)),
legend.background=element_rect(fill=backgroundColor),
legend.key = element_blank(),
panel.background = element_rect(fill=panelBackgroundColor),
panel.border = element_rect(fill = NA,color=titleColor,linetype=3),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = backgroundColor))
base + annotation_custom(author)
## output ----
dev.off()
|
/hw1_chuw/R/boxplot.R
|
no_license
|
chuwu1995/R_analytic
|
R
| false
| false
| 1,947
|
r
|
library(ggplot2)
library(grid)
######## my path is relative to the project folder (hwq_chuw) ,and system is Mac OS #######
## Please open the project file ##
## read data ----
inputPath <- "data/pokemon.csv"
pData <- read.csv(inputPath)
## initiate output file
outputPath <- "output/output.pdf"
pdf(outputPath)
## colors ----
titleColor <- "black"
backgroundColor <- '#faeec8'
panelBackgroundColor <- '#fdd45f'
falseBoxColor <- '#FFCCCC'
trueBoxColor <- '#99CCFF'
## author info ----
author = grobTree(textGrob("@Chu Wu", x=0.86, y=0.96, hjust=0,
gp=gpar(col="black", fontsize=7, fontface="italic")))
## main ----
base<-ggplot(data = pData, aes(x=factor(generation), y=speed)) +
geom_boxplot(aes(fill=legendary), outlier.shape=8, outlier.size=1) +
labs(x="Generation",y="Speed",title="Pokemon Speed In Different Generation")+
scale_x_discrete(label=function(x){return(paste("G",x,sep = ""))}) +
scale_fill_manual(values=c(falseBoxColor, trueBoxColor))+
scale_color_manual(values=c(backgroundColor))+
theme(axis.title.x = element_text(colour = titleColor,size = 13,face = "bold",margin = margin(10,0,20,0)),
axis.title.y = element_text(colour = titleColor,size = 13,face = "bold",margin = margin(0,10,0,20)),
plot.title = element_text(colour = titleColor,hjust = 0.5,size=16, face="bold", margin = margin(20, 0, 15, 0)),
legend.title = element_text(colour = titleColor,size=11, face="bold",margin = margin(0,20,0,0)),
legend.background=element_rect(fill=backgroundColor),
legend.key = element_blank(),
panel.background = element_rect(fill=panelBackgroundColor),
panel.border = element_rect(fill = NA,color=titleColor,linetype=3),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = backgroundColor))
base + annotation_custom(author)
## output ----
dev.off()
|
#' Select top n rows (by value).
#'
#' This is a convenient wrapper that uses \code{\link{filter}} and
#' \code{\link{min_rank}} to select the top n entries in each group, ordered
#' by \code{wt}.
#'
#' @param x a \code{\link{tbl}} to filter
#' @param n number of rows to return. If \code{x} is grouped, this is
#' the number of rows per group. May include more than \code{n} if there
#' are ties.
#' @param wt (Optional). The variable to use for ordering. If not specified,
#' defaults to the last variable in the tbl.
#' @export
#' @examples
#' # Find 10 players with most games
#' if (require("Lahman")) {
#' players <- group_by(tbl_df(Batting), playerID)
#' games <- tally(players, G)
#' top_n(games, 10, n)
#'
#' # A little nicer with %>%
#' tbl_df(Batting) %>%
#' group_by(playerID) %>%
#' tally(G) %>%
#' top_n(10)
#'
#' # Find year with most games for each player
#' tbl_df(Batting) %>% group_by(playerID) %>% top_n(1, G)
#' }
top_n <- function(x, n, wt) {
if (missing(wt)) {
vars <- tbl_vars(x)
message("Selecting by ", vars[length(vars)])
wt <- as.name(vars[length(vars)])
}
call <- substitute(filter(x, min_rank(desc(wt)) <= n),
list(n = n, wt = substitute(wt)))
eval(call)
}
|
/R/top-n.R
|
no_license
|
mattwg/dplyr
|
R
| false
| false
| 1,225
|
r
|
#' Select top n rows (by value).
#'
#' This is a convenient wrapper that uses \code{\link{filter}} and
#' \code{\link{min_rank}} to select the top n entries in each group, ordered
#' by \code{wt}.
#'
#' @param x a \code{\link{tbl}} to filter
#' @param n number of rows to return. If \code{x} is grouped, this is
#' the number of rows per group. May include more than \code{n} if there
#' are ties.
#' @param wt (Optional). The variable to use for ordering. If not specified,
#' defaults to the last variable in the tbl.
#' @export
#' @examples
#' # Find 10 players with most games
#' if (require("Lahman")) {
#' players <- group_by(tbl_df(Batting), playerID)
#' games <- tally(players, G)
#' top_n(games, 10, n)
#'
#' # A little nicer with %>%
#' tbl_df(Batting) %>%
#' group_by(playerID) %>%
#' tally(G) %>%
#' top_n(10)
#'
#' # Find year with most games for each player
#' tbl_df(Batting) %>% group_by(playerID) %>% top_n(1, G)
#' }
top_n <- function(x, n, wt) {
if (missing(wt)) {
vars <- tbl_vars(x)
message("Selecting by ", vars[length(vars)])
wt <- as.name(vars[length(vars)])
}
call <- substitute(filter(x, min_rank(desc(wt)) <= n),
list(n = n, wt = substitute(wt)))
eval(call)
}
|
# "`-''-/").___..--''"`-._
# (`6_ 6 ) `-. ( ).`-.__.`) WE ARE ...
# (_Y_.)' ._ ) `._ `. ``-..-' PENN STATE!
# _ ..`--'_..-_/ /--'_.' ,'
# (il),-'' (li),' ((!.-'
#
# Author: Benjamin Tate <bgt5073@psu.edu>
# Weiming Hu <weiming@psu.edu>
#
# Geoinformatics and Earth Observation Laboratory (http://geolab.psu.edu)
# Department of Geography and Institute for CyberScience
# The Pennsylvania State University
#
library(RColorBrewer)
library(leaflet)
library(raster)
library(shiny)
library(sp)
# Define the function to draw arrows
# Referenced from shape::Arrowhead
#
arrow.shapes <- function (
x0, y0, angle = 0, arr.length = 0.4, arr.width = arr.length/2,
arr.adj = 0.5, arr.type = "curved", lcol = "black", lty = 1,
arr.col = lcol, arr.lwd = 2, npoint = 5, ...) {
require(sp)
require(raster)
if (arr.type == "curved") {
rad <- 0.7
len <- 0.25 * pi
mid <- c(0, rad)
x <- seq(1.5 * pi + len, 1.5 * pi, length.out = npoint)
rr <- cbind(mid[1] - rad * cos(x), mid[2] + rad * sin(x))
mid <- c(0, -rad)
x <- rev(x)
rr <- rbind(rr, cbind(mid[1] - rad * cos(x), mid[2] -
rad * sin(x)))
mid <- c(rr[nrow(rr), 1], 0)
rd <- rr[1, 2]
x <- seq(pi/2, 3 * pi/2, length.out = 3 * npoint)
rr <- rbind(rr, cbind(mid[1] - rd * 0.25 * cos(x), mid[2] -
rd * sin(x)))
rr[, 1] <- rr[, 1] * 2.6
rr[, 2] <- rr[, 2] * 3.45
}
else if (arr.type == "triangle") {
x <- c(-0.2, 0, -0.2)
y <- c(-0.1, 0, 0.1)
rr <- 6.22 * cbind(x, y)
}
else if (arr.type %in% c("circle", "ellipse")) {
if (arr.type == "circle")
arr.width = arr.length
rad <- 0.1
mid <- c(-rad, 0)
x <- seq(0, 2 * pi, length.out = 15 * npoint)
rr <- 6.22 * cbind(mid[1] + rad * sin(x), mid[2] + rad *
cos(x))
}
if (arr.adj == 0.5)
rr[, 1] <- rr[, 1] - min(rr[, 1])/2
if (arr.adj == 0)
rr[, 1] <- rr[, 1] - min(rr[, 1])
user <- par("usr")
pcm <- par("pin") * 2.54
sy <- (user[4] - user[3])/pcm[2]
sx <- (user[2] - user[1])/pcm[1]
nr <- max(length(x0), length(y0), length(angle), length(arr.length),
length(arr.width), length(lcol), length(lty), length(arr.col))
if (nr > 1) {
x0 <- rep(x0, length.out = nr)
y0 <- rep(y0, length.out = nr)
angle <- rep(angle, length.out = nr)
arr.length <- rep(arr.length, length.out = nr)
arr.width <- rep(arr.width, length.out = nr)
lcol <- rep(lcol, length.out = nr)
lty <- rep(lty, length.out = nr)
arr.col <- rep(arr.col, length.out = nr)
}
RR <- rr
mat.l <- list()
for (i in 1:nr) {
dx <- rr[, 1] * arr.length[i]
dy <- rr[, 2] * arr.width[i]
angpi <- angle[i]/180 * pi
cosa <- cos(angpi)
sina <- sin(angpi)
RR[, 1] <- cosa * dx - sina * dy
RR[, 2] <- sina * dx + cosa * dy
RR[, 1] <- x0[i] + RR[, 1] * sx
RR[, 2] <- y0[i] + RR[, 2] * sy
mat.l <- c(mat.l, list(RR))
}
spplys <- spPolygons(mat.l)
return(spplys)
}
# Define basic visualization parameters
zoom <- 5
rast.alpha <- 0.7
center.x <- -77.84483
center.y <- 38.08232
# Get current date
current.date <- Sys.Date()
current.date.str <- format(current.date, format = '%Y%m%d')
# This regex will be used to match different components in a file name
regex <- '^(\\d+)-(.*?)-(\\d+)\\.tif$'
# Get all available tiff files in the current folder
all.files <- list.files(path = '.', pattern = 'tif')
# Extract the available variables for select boxes
select.dates <- unique(gsub(regex, '\\1', all.files))
select.variables <- unique(gsub(regex, '\\2', all.files))
select.flts <- sort(as.numeric(unique(gsub(
regex, '\\3', all.files))) / 3600)
# Whether to create an animation for the slider input
# animate <- F
animate <- animationOptions(interval = 1000, loop = T)
ui <- fixedPage(
titlePanel("Operational Analog Ensemble"),
sidebarLayout(
sidebarPanel(
selectInput(
"date", label = h3("Date"),
choices = select.dates, selected = 1),
selectInput(
"variable", label = h3("Weather variable"),
choices = select.variables, selected = 1),
sliderInput("flt", label = h3("Lead time (h)"),
min = min(select.flts),
max = max(select.flts),
value = min(select.flts),
animate = animate),
width = 3
),
mainPanel(
leafletOutput("weatherMap", height = 600))
)
)
shinyserver <- function(input, output) {
# Define the image file path to read
file.tif <- reactive({
paste0(paste(
input$date, input$variable,
as.numeric(input$flt) * 3600,
sep = '-'), '.tif')
})
observe({
if (!file.exists(file.tif())) {
showNotification('This combination is not available!')
}
})
# Define the base map that won't change
output$weatherMap <- renderLeaflet({
leaflet() %>%
setView(
lng = center.x,
lat = center.y,
zoom = zoom) %>%
addTiles()
})
observe({
# Convert reactiveExpr to the actual object
file.tif <- file.tif()
if (file.exists(file.tif)) {
# Read the file as a raster
rast <- raster(file.tif)
# This is the color function
pal <- colorNumeric(
palette = 'RdYlBu',
domain = values(rast),
na.color = NA,
reverse = T)
if (grepl('Direction', file.tif)) {
cell.centers <- coordinates(rast)
spplys <- arrow.shapes(
cell.centers[, 1], cell.centers[, 2],
values(rast), arr.length = 2)
leafletProxy('weatherMap') %>%
clearImages() %>%
clearShapes() %>%
removeControl(layerId = 'legend') %>%
addPolygons(data = spplys,
fillColor = 'transparent',
weight = 1, color = 'black')
} else {
leafletProxy('weatherMap') %>%
clearImages() %>%
clearShapes() %>%
removeControl(layerId = 'legend') %>%
addRasterImage(
rast, colors = pal,
opacity = rast.alpha,
project = F, layerId = 'raster') %>%
addLegend(
pal = pal, layerId = 'legend',
values = values(rast),
title = gsub(regex, '\\2', file.tif))
}
}
})
}
shinyApp(ui, shinyserver)
# Display a fixed legend.
# Wind
#
#
# Speed up the display.
# Smooth transition.
#
|
/data-process/app-data/app.R
|
permissive
|
Weiming-Hu/OperationalAnEn
|
R
| false
| false
| 6,078
|
r
|
# "`-''-/").___..--''"`-._
# (`6_ 6 ) `-. ( ).`-.__.`) WE ARE ...
# (_Y_.)' ._ ) `._ `. ``-..-' PENN STATE!
# _ ..`--'_..-_/ /--'_.' ,'
# (il),-'' (li),' ((!.-'
#
# Author: Benjamin Tate <bgt5073@psu.edu>
# Weiming Hu <weiming@psu.edu>
#
# Geoinformatics and Earth Observation Laboratory (http://geolab.psu.edu)
# Department of Geography and Institute for CyberScience
# The Pennsylvania State University
#
library(RColorBrewer)
library(leaflet)
library(raster)
library(shiny)
library(sp)
# Define the function to draw arrows
# Referenced from shape::Arrowhead
#
arrow.shapes <- function (
x0, y0, angle = 0, arr.length = 0.4, arr.width = arr.length/2,
arr.adj = 0.5, arr.type = "curved", lcol = "black", lty = 1,
arr.col = lcol, arr.lwd = 2, npoint = 5, ...) {
require(sp)
require(raster)
if (arr.type == "curved") {
rad <- 0.7
len <- 0.25 * pi
mid <- c(0, rad)
x <- seq(1.5 * pi + len, 1.5 * pi, length.out = npoint)
rr <- cbind(mid[1] - rad * cos(x), mid[2] + rad * sin(x))
mid <- c(0, -rad)
x <- rev(x)
rr <- rbind(rr, cbind(mid[1] - rad * cos(x), mid[2] -
rad * sin(x)))
mid <- c(rr[nrow(rr), 1], 0)
rd <- rr[1, 2]
x <- seq(pi/2, 3 * pi/2, length.out = 3 * npoint)
rr <- rbind(rr, cbind(mid[1] - rd * 0.25 * cos(x), mid[2] -
rd * sin(x)))
rr[, 1] <- rr[, 1] * 2.6
rr[, 2] <- rr[, 2] * 3.45
}
else if (arr.type == "triangle") {
x <- c(-0.2, 0, -0.2)
y <- c(-0.1, 0, 0.1)
rr <- 6.22 * cbind(x, y)
}
else if (arr.type %in% c("circle", "ellipse")) {
if (arr.type == "circle")
arr.width = arr.length
rad <- 0.1
mid <- c(-rad, 0)
x <- seq(0, 2 * pi, length.out = 15 * npoint)
rr <- 6.22 * cbind(mid[1] + rad * sin(x), mid[2] + rad *
cos(x))
}
if (arr.adj == 0.5)
rr[, 1] <- rr[, 1] - min(rr[, 1])/2
if (arr.adj == 0)
rr[, 1] <- rr[, 1] - min(rr[, 1])
user <- par("usr")
pcm <- par("pin") * 2.54
sy <- (user[4] - user[3])/pcm[2]
sx <- (user[2] - user[1])/pcm[1]
nr <- max(length(x0), length(y0), length(angle), length(arr.length),
length(arr.width), length(lcol), length(lty), length(arr.col))
if (nr > 1) {
x0 <- rep(x0, length.out = nr)
y0 <- rep(y0, length.out = nr)
angle <- rep(angle, length.out = nr)
arr.length <- rep(arr.length, length.out = nr)
arr.width <- rep(arr.width, length.out = nr)
lcol <- rep(lcol, length.out = nr)
lty <- rep(lty, length.out = nr)
arr.col <- rep(arr.col, length.out = nr)
}
RR <- rr
mat.l <- list()
for (i in 1:nr) {
dx <- rr[, 1] * arr.length[i]
dy <- rr[, 2] * arr.width[i]
angpi <- angle[i]/180 * pi
cosa <- cos(angpi)
sina <- sin(angpi)
RR[, 1] <- cosa * dx - sina * dy
RR[, 2] <- sina * dx + cosa * dy
RR[, 1] <- x0[i] + RR[, 1] * sx
RR[, 2] <- y0[i] + RR[, 2] * sy
mat.l <- c(mat.l, list(RR))
}
spplys <- spPolygons(mat.l)
return(spplys)
}
# Define basic visualization parameters
zoom <- 5
rast.alpha <- 0.7
center.x <- -77.84483
center.y <- 38.08232
# Get current date
current.date <- Sys.Date()
current.date.str <- format(current.date, format = '%Y%m%d')
# This regex will be used to match different components in a file name
regex <- '^(\\d+)-(.*?)-(\\d+)\\.tif$'
# Get all available tiff files in the current folder
all.files <- list.files(path = '.', pattern = 'tif')
# Extract the available variables for select boxes
select.dates <- unique(gsub(regex, '\\1', all.files))
select.variables <- unique(gsub(regex, '\\2', all.files))
select.flts <- sort(as.numeric(unique(gsub(
regex, '\\3', all.files))) / 3600)
# Whether to create an animation for the slider input
# animate <- F
animate <- animationOptions(interval = 1000, loop = T)
ui <- fixedPage(
titlePanel("Operational Analog Ensemble"),
sidebarLayout(
sidebarPanel(
selectInput(
"date", label = h3("Date"),
choices = select.dates, selected = 1),
selectInput(
"variable", label = h3("Weather variable"),
choices = select.variables, selected = 1),
sliderInput("flt", label = h3("Lead time (h)"),
min = min(select.flts),
max = max(select.flts),
value = min(select.flts),
animate = animate),
width = 3
),
mainPanel(
leafletOutput("weatherMap", height = 600))
)
)
shinyserver <- function(input, output) {
# Define the image file path to read
file.tif <- reactive({
paste0(paste(
input$date, input$variable,
as.numeric(input$flt) * 3600,
sep = '-'), '.tif')
})
observe({
if (!file.exists(file.tif())) {
showNotification('This combination is not available!')
}
})
# Define the base map that won't change
output$weatherMap <- renderLeaflet({
leaflet() %>%
setView(
lng = center.x,
lat = center.y,
zoom = zoom) %>%
addTiles()
})
observe({
# Convert reactiveExpr to the actual object
file.tif <- file.tif()
if (file.exists(file.tif)) {
# Read the file as a raster
rast <- raster(file.tif)
# This is the color function
pal <- colorNumeric(
palette = 'RdYlBu',
domain = values(rast),
na.color = NA,
reverse = T)
if (grepl('Direction', file.tif)) {
cell.centers <- coordinates(rast)
spplys <- arrow.shapes(
cell.centers[, 1], cell.centers[, 2],
values(rast), arr.length = 2)
leafletProxy('weatherMap') %>%
clearImages() %>%
clearShapes() %>%
removeControl(layerId = 'legend') %>%
addPolygons(data = spplys,
fillColor = 'transparent',
weight = 1, color = 'black')
} else {
leafletProxy('weatherMap') %>%
clearImages() %>%
clearShapes() %>%
removeControl(layerId = 'legend') %>%
addRasterImage(
rast, colors = pal,
opacity = rast.alpha,
project = F, layerId = 'raster') %>%
addLegend(
pal = pal, layerId = 'legend',
values = values(rast),
title = gsub(regex, '\\2', file.tif))
}
}
})
}
shinyApp(ui, shinyserver)
# Display a fixed legend.
# Wind
#
#
# Speed up the display.
# Smooth transition.
#
|
# auxiliary function to predict based on the estimated coefficients
predict_func <- function(Phi, w){
return(Phi%*%w)
}
# auxiliary function to calculate a cost function
error_func <- function (Phi, w, label){
#Error Function. MSE
return(mean((predict_func(Phi, w) - label)^2))
}
bgd <- function(train.data,train.label,test.data,test.label,tau.max,eta,epsilon)
{
#Initializing the variables
train.len <- nrow(train.data)
error <- data.frame('tau'=1:tau.max) # to be used to trace the test and training errors in each iteration
Phi <- as.matrix(cbind('X0'=1, train.data))
T <- train.label
W <- matrix(,nrow=tau.max, ncol=ncol(Phi)) # be used to store the estimated oefficients
W[1,] <- runif(ncol(Phi)) # initial weight
tau <- 1 #Setting the counter to 1
terminate <- FALSE
while(!terminate){
# check termination criteria:
terminate <- tau >= tau.max-1 | error_func(Phi, W[tau,],T)<=epsilon
#Finding the predicted value
t_pred = predict_func(Phi, W[tau,])
#Updating the weights with the L2 regression
W[(tau+1),] <- W[tau,] - (eta * (1/nrow(Phi)) * (t(Phi)%*%(t_pred - T)))
#Finding the train and test error
error[tau, 'train'] <- error_func(as.matrix(cbind(1, train.data)), W[tau,],train.label)
error[tau, 'test'] <- error_func(as.matrix(cbind(1, test.data)), W[tau,],test.label)
#Incrementing the counter
tau <- tau + 1
}
#Incrementing the counter
return(W[tau,])
}
## 80% of the sample size
smp_size <- floor(0.8 * nrow(mtcars))
train_ind <- sample(seq_len(nrow(mtcars)), size = smp_size)
train <- mtcars[train_ind, ]
test <- mtcars[-train_ind, ]
#Splitting the columns into x and y for train and test
train.x <- train[,c(2,3,4)]
train.y <- train[,1,drop=FALSE]
test.x <- test[,c(2,3,4)]
test.y <- test[,1,drop=FALSE]
tau.max <- 18 * nrow(train.x) # maximum number of iterations
eta <- 0.01 # learning rate
epsilon <- 0.1 # a threshold on the cost (to terminate the process)
#Calling the function
theta <- bgd(train.x,train.y,test.x,test.y,tau.max,eta,epsilon)
#Predicting
pred_y <- as.matrix(cbind("X0"=1,train.x))%*%theta
|
/Linear Regression/Linear_Regression_Batch_Gradient_Descent.R
|
no_license
|
ksashok/Machine-Learning-Algorithms
|
R
| false
| false
| 2,151
|
r
|
# auxiliary function to predict based on the estimated coefficients
predict_func <- function(Phi, w){
return(Phi%*%w)
}
# auxiliary function to calculate a cost function
error_func <- function (Phi, w, label){
#Error Function. MSE
return(mean((predict_func(Phi, w) - label)^2))
}
bgd <- function(train.data,train.label,test.data,test.label,tau.max,eta,epsilon)
{
#Initializing the variables
train.len <- nrow(train.data)
error <- data.frame('tau'=1:tau.max) # to be used to trace the test and training errors in each iteration
Phi <- as.matrix(cbind('X0'=1, train.data))
T <- train.label
W <- matrix(,nrow=tau.max, ncol=ncol(Phi)) # be used to store the estimated oefficients
W[1,] <- runif(ncol(Phi)) # initial weight
tau <- 1 #Setting the counter to 1
terminate <- FALSE
while(!terminate){
# check termination criteria:
terminate <- tau >= tau.max-1 | error_func(Phi, W[tau,],T)<=epsilon
#Finding the predicted value
t_pred = predict_func(Phi, W[tau,])
#Updating the weights with the L2 regression
W[(tau+1),] <- W[tau,] - (eta * (1/nrow(Phi)) * (t(Phi)%*%(t_pred - T)))
#Finding the train and test error
error[tau, 'train'] <- error_func(as.matrix(cbind(1, train.data)), W[tau,],train.label)
error[tau, 'test'] <- error_func(as.matrix(cbind(1, test.data)), W[tau,],test.label)
#Incrementing the counter
tau <- tau + 1
}
#Incrementing the counter
return(W[tau,])
}
## 80% of the sample size
smp_size <- floor(0.8 * nrow(mtcars))
train_ind <- sample(seq_len(nrow(mtcars)), size = smp_size)
train <- mtcars[train_ind, ]
test <- mtcars[-train_ind, ]
#Splitting the columns into x and y for train and test
train.x <- train[,c(2,3,4)]
train.y <- train[,1,drop=FALSE]
test.x <- test[,c(2,3,4)]
test.y <- test[,1,drop=FALSE]
tau.max <- 18 * nrow(train.x) # maximum number of iterations
eta <- 0.01 # learning rate
epsilon <- 0.1 # a threshold on the cost (to terminate the process)
#Calling the function
theta <- bgd(train.x,train.y,test.x,test.y,tau.max,eta,epsilon)
#Predicting
pred_y <- as.matrix(cbind("X0"=1,train.x))%*%theta
|
# CHAPTER 5
# Intermediate Statistics and Probability
#############################
# Stock price distributions #
#############################
# Extract prices and compute statistics
prices <- SPY$SPY.Adjusted
mean_prices <- round(mean(prices), 2)
sd_prices <- round(sd(prices), 2)
# Plot the histogram along with a legend
hist(prices, breaks = 100, prob=T, cex.main = 0.9)
abline(v = mean_prices, lwd = 2)
legend("topright", cex = 0.8, border = NULL, bty = "n",
paste("mean=", mean_prices, "; sd=", sd_prices))
plot_4_ranges <- function(data, start_date, end_date, title) {
# Set the plot window to be 2 rows and 2 columns
par(mfrow = c(2, 2))
for(i in 1:4) {
# Create a string with the appropriate date range
range <- paste(start_date[i], "::", end_date[i], sep = "")
# Create the price vector and necessary statistics
time_series <- data[range]
mean_data <- round(mean(time_series, na.rm = TRUE), 3)
sd_data <- round(sd(time_series, na.rm = TRUE), 3)
# Plot the histogram along with a legend
hist_title <- paste(title, range)
hist(time_series, breaks = 100, prob=TRUE,
xlab = "", main = hist_title, cex.main = 0.8)
legend("topright", cex = 0.7, bty = 'n',
paste("mean=", mean_data, "; sd=", sd_data))
}
# Reset the plot window
par(mfrow = c(1, 1))
}
# Define start and end dates of interest
begin_dates <- c("2007-01-01", "2008-06-06",
"2009-10-10", "2011-03-03")
end_dates <- c("2008-06-05", "2009-09-09",
"2010-12-30", "2013-01-06")
# Create plots
plot_4_ranges(prices, begin_dates,
end_dates, "SPY prices for:")
################
# Stationarity #
################
# Compute log returns
returns <- diff(log(prices))
# Use the same function as before to plot returns rather than prices
plot_4_ranges(returns, begin_dates, end_dates, "SPY log prices for:")
######################################
# Determining stationarity with urca #
######################################
# Get SPY data and let's confirm that it is non-stationary
require(quantmod)
getSymbols("SPY")
spy <- SPY$SPY.Adjusted
# Use the default settings
require(urca)
test <- ur.kpss(as.numeric(spy))
# The output is an S4 object
class(test)
## [1] "ur.kpss"
## attr(,"package")
## [1] "urca"
# Extract the test statistic
test@teststat
## [1] 11.63543
# Look at the critical values
test@cval
## 10pct 5pct 2.5pct 1pct
## critical values 0.347 0.463 0.574 0.739
spy_returns <- diff(log(spy))
# Test on the returns
test_returns <- ur.kpss(as.numeric(spy_returns))
test_returns@teststat
## [1] 0.336143
test_returns@cval
## 10pct 5pct 2.5pct 1pct
## critical values 0.347 0.463 0.574 0.739
test_post_2013 <- ur.kpss(as.numeric(spy_returns['2013::']))
test_post_2013@teststat
## [1] 0.06936672
############################
# Assumptions of normality #
############################
# Plot histogram and density
mu <- mean(returns, na.rm = TRUE)
sigma <- sd(returns, na.rm = TRUE)
x <- seq(-5 * sigma, 5 * sigma, length = nrow(returns))
hist(returns, breaks = 100,
main = "Histogram of returns for SPY",
cex.main = 0.8, prob=TRUE)
lines(x, dnorm(x, mu, sigma), col = "red", lwd = 2)
# Set plotting window
par(mfrow = c(1, 2))
# SPY data
qqnorm(as.numeric(returns),
main = "SPY empirical returns qqplot()",
cex.main = 0.8)
qqline(as.numeric(returns), lwd = 2)
grid()
# Normal random data
normal_data <- rnorm(nrow(returns), mean = mu, sd = sigma)
qqnorm(normal_data, main = "Normal returns", cex.main = 0.8)
qqline(normal_data, lwd = 2)
grid()
answer <- shapiro.test(as.numeric(returns))
answer[[2]]
## [1] 5.118396e-34
set.seed(129)
normal_numbers <- rnorm(5000, 0, 1)
ans <- shapiro.test(normal_numbers)
ans[[2]]
## [1] 0.9963835
# Corrupt a single data point
normal_numbers[50] <- 1000
ans <- shapiro.test(normal_numbers)
ans[[2]]
## [1] 1.775666e-95
###############
# Correlation #
###############
sv <- as.xts(returns_matrix[, c(1, 6)])
head(sv)
## SPY.Close VXX.Close
## 2009-02-02 -0.003022794 -0.003160468
## 2009-02-03 0.013949240 -0.047941603
## 2009-02-04 -0.004908132 0.003716543
## 2009-02-05 0.014770965 -0.006134680
cor(sv)
## SPY.Close VXX.Close
## SPY.Close 1.0000000 -0.4603908
## VXX.Close -0.4603908 1.0000000
##################
# Filtering data #
##################
# Find the outliers
outliers <- which(sv[, 2] > 1.0)
# If any outliers exist, remove them
if(length(outliers) > 0) {
sv <- sv[-outliers, ]
}
cor(sv)
## SPY.Close VXX.Close
## SPY.Close 1.0000000 -0.8066466
## VXX.Close -0.8066466 1.0000000
##############
# R formulas #
##############
# Create a formula
my_formula <- as.formula("y ~ x")
# What is the output?
my_formula
## y ~ x
# What is the class of my_formula?
class(my_formula)
## [1] "formula"
# Create a linear regression object
reg <- lm(VXX.Close ~ SPY.Close, data = sv)
# Here is the output
summary(reg)
## Call:
## lm(formula = VXX.Close ~ SPY.Close, data = sv)
## Residuals:
## Min 1Q Median 3Q Max
## -0.085607 -0.012830 -0.000865 0.012188 0.116349
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.0024365 0.0006641 -3.669 0.000254 ***
## SPY.Close -2.5848492 0.0552193 -46.811 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1
## Residual standard error: 0.02287 on 1187 degrees of freedom
## Multiple R-squared: 0.6486,Adjusted R-squared: 0.6483
## F-statistic: 2191 on 1 and 1187 DF, p-value: < 2.2e-16
b <- reg$coefficients[1]
a <- reg$coefficients[2]
par(mfrow = c(2, 2))
plot(reg$residuals,
main = "Residuals through time",
xlab = "Days", ylab = "Residuals")
hist(reg$residuals, breaks = 100,
main = "Distribution of residuals",
xlab = "Residuals")
qqnorm(reg$residuals)
qqline(reg$residuals)
acf(reg$residuals, main = "Autocorrelation")
vxx_lag_1 <- lag(VXX$VXX.Close, k = 1)
head(vxx_lag_1)
## VXX.Close
## 2009-01-30 NA
## 2009-02-02 104.58
## 2009-02-03 104.25
## 2009-02-04 99.37
## 2009-02-05 99.74
## 2009-02-06 99.13
head(VXX$VXX.Close)
## VXX.Close
## 2009-01-30 104.58
## 2009-02-02 104.25
## 2009-02-03 99.37
## 2009-02-04 99.74
## 2009-02-05 99.13
## 2009-02-06 97.70
# Merge returns with lagged returns
sv <- merge(sv, lag(sv))
# Scatter plot of lagged SPY vs. VXX
plot(as.numeric(sv[, 3]), as.numeric(sv[, 2]),
main = "Scatter plot SPY lagged vs. VXX.",
xlab = "SPY lagged",
ylab = "VXX"
cex.main = 0.8,
cex.axis = 0.8,
cex.lab = 0.8)
grid()
reg2 <- lm(VXX.Close ~ SPY.Close.1, data = sv)
summary(reg2)
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.004140 0.001121 -3.694 0.000231 ***
## SPY.Close.1 0.104119 0.093154 1.118 0.263918
## Residual standard error: 0.03857 on 1186 degrees of freedom
## (1 observation deleted due to missingness)
## Multiple R-squared: 0.001052,Adjusted R-squared: 0.00021
## F-statistic: 1.249 on 1 and 1186 DF, p-value: 0.2639
ccf(as.numeric(sv[, 1]), as.numeric(sv[, 2]),
main = "Cross correlation between SPY and VXX",
ylab = "Cross correlation", xlab = "Lag", cex.main = 0.8,
cex.lab = 0.8, cex.axis = 0.8)
###################################
# The linear in linear regression #
###################################
x <- seq(1:100)
y <- x ^ 2
# Generate the plot
plot(x, y)
# Fit the regression
reg_parabola <- lm(y ~ x)
# Superimpose the best fit line on the plot
abline(reg_parabola, lwd = 2)
# Look at the results
summary(reg_parabola)
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -1717.000 151.683 -11.32 <2e-16 ***
## x 101.000 2.608 38.73 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1
## Residual standard error: 752.7 on 98 degrees of freedom
## Multiple R-squared: 0.9387,Adjusted R-squared: 0.9381
## F-statistic: 1500 on 1 and 98 DF, p-value: < 2.2e-16
plot(x, sqrt(y))
reg_transformed <- lm(sqrt(y) ~ x)
abline(reg_transformed)
summary(reg_transformed)
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.684e-14 5.598e-15 -1.015e+01 <2e-16 ***
## x 1.000e+00 9.624e-17 1.039e+16 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1
## Residual standard error: 2.778e-14 on 98 degrees of freedom
## Multiple R-squared: 1,Adjusted R-squared: 1
## F-statistic: 1.08e+32 on 1 and 98 DF, p-value: < 2.2e-16
##############
# Volatility #
##############
# Generate 1000 IID numbers from a normal distribution.
z <- rnorm(1000, 0, 1)
# Autocorrelation of returns and squared returns
par(mfrow = c(2, 1))
acf(z, main = "returns", cex.main = 0.8,
cex.lab = 0.8, cex.axis = 0.8)
grid()
acf(z ^ 2, main = "returns squared",
cex.lab = 0.8, cex.axis = 0.8)
grid()
par(mfrow = c(1, 1))
acf(sv[, 1] ^ 2, main = "Actual returns squared",
cex.main = 0.8, cex.lab = 0.8, cex.axis = 0.8)
grid()
par(mfrow = c(1, 2))
acf(sv[, 1]^3)
acf(abs(sv[, 1])
|
/Chapter_05/code.R
|
permissive
|
anselmoaraujo/rfortraders
|
R
| false
| false
| 9,125
|
r
|
# CHAPTER 5
# Intermediate Statistics and Probability
#############################
# Stock price distributions #
#############################
# Extract prices and compute statistics
prices <- SPY$SPY.Adjusted
mean_prices <- round(mean(prices), 2)
sd_prices <- round(sd(prices), 2)
# Plot the histogram along with a legend
hist(prices, breaks = 100, prob=T, cex.main = 0.9)
abline(v = mean_prices, lwd = 2)
legend("topright", cex = 0.8, border = NULL, bty = "n",
paste("mean=", mean_prices, "; sd=", sd_prices))
plot_4_ranges <- function(data, start_date, end_date, title) {
# Set the plot window to be 2 rows and 2 columns
par(mfrow = c(2, 2))
for(i in 1:4) {
# Create a string with the appropriate date range
range <- paste(start_date[i], "::", end_date[i], sep = "")
# Create the price vector and necessary statistics
time_series <- data[range]
mean_data <- round(mean(time_series, na.rm = TRUE), 3)
sd_data <- round(sd(time_series, na.rm = TRUE), 3)
# Plot the histogram along with a legend
hist_title <- paste(title, range)
hist(time_series, breaks = 100, prob=TRUE,
xlab = "", main = hist_title, cex.main = 0.8)
legend("topright", cex = 0.7, bty = 'n',
paste("mean=", mean_data, "; sd=", sd_data))
}
# Reset the plot window
par(mfrow = c(1, 1))
}
# Define start and end dates of interest
begin_dates <- c("2007-01-01", "2008-06-06",
"2009-10-10", "2011-03-03")
end_dates <- c("2008-06-05", "2009-09-09",
"2010-12-30", "2013-01-06")
# Create plots
plot_4_ranges(prices, begin_dates,
end_dates, "SPY prices for:")
################
# Stationarity #
################
# Compute log returns
returns <- diff(log(prices))
# Use the same function as before to plot returns rather than prices
plot_4_ranges(returns, begin_dates, end_dates, "SPY log prices for:")
######################################
# Determining stationarity with urca #
######################################
# Get SPY data and let's confirm that it is non-stationary
require(quantmod)
getSymbols("SPY")
spy <- SPY$SPY.Adjusted
# Use the default settings
require(urca)
test <- ur.kpss(as.numeric(spy))
# The output is an S4 object
class(test)
## [1] "ur.kpss"
## attr(,"package")
## [1] "urca"
# Extract the test statistic
test@teststat
## [1] 11.63543
# Look at the critical values
test@cval
## 10pct 5pct 2.5pct 1pct
## critical values 0.347 0.463 0.574 0.739
spy_returns <- diff(log(spy))
# Test on the returns
test_returns <- ur.kpss(as.numeric(spy_returns))
test_returns@teststat
## [1] 0.336143
test_returns@cval
## 10pct 5pct 2.5pct 1pct
## critical values 0.347 0.463 0.574 0.739
test_post_2013 <- ur.kpss(as.numeric(spy_returns['2013::']))
test_post_2013@teststat
## [1] 0.06936672
############################
# Assumptions of normality #
############################
# Plot histogram and density
mu <- mean(returns, na.rm = TRUE)
sigma <- sd(returns, na.rm = TRUE)
x <- seq(-5 * sigma, 5 * sigma, length = nrow(returns))
hist(returns, breaks = 100,
main = "Histogram of returns for SPY",
cex.main = 0.8, prob=TRUE)
lines(x, dnorm(x, mu, sigma), col = "red", lwd = 2)
# Set plotting window
par(mfrow = c(1, 2))
# SPY data
qqnorm(as.numeric(returns),
main = "SPY empirical returns qqplot()",
cex.main = 0.8)
qqline(as.numeric(returns), lwd = 2)
grid()
# Normal random data
normal_data <- rnorm(nrow(returns), mean = mu, sd = sigma)
qqnorm(normal_data, main = "Normal returns", cex.main = 0.8)
qqline(normal_data, lwd = 2)
grid()
answer <- shapiro.test(as.numeric(returns))
answer[[2]]
## [1] 5.118396e-34
set.seed(129)
normal_numbers <- rnorm(5000, 0, 1)
ans <- shapiro.test(normal_numbers)
ans[[2]]
## [1] 0.9963835
# Corrupt a single data point
normal_numbers[50] <- 1000
ans <- shapiro.test(normal_numbers)
ans[[2]]
## [1] 1.775666e-95
###############
# Correlation #
###############
sv <- as.xts(returns_matrix[, c(1, 6)])
head(sv)
## SPY.Close VXX.Close
## 2009-02-02 -0.003022794 -0.003160468
## 2009-02-03 0.013949240 -0.047941603
## 2009-02-04 -0.004908132 0.003716543
## 2009-02-05 0.014770965 -0.006134680
cor(sv)
## SPY.Close VXX.Close
## SPY.Close 1.0000000 -0.4603908
## VXX.Close -0.4603908 1.0000000
##################
# Filtering data #
##################
# Find the outliers
outliers <- which(sv[, 2] > 1.0)
# If any outliers exist, remove them
if(length(outliers) > 0) {
sv <- sv[-outliers, ]
}
cor(sv)
## SPY.Close VXX.Close
## SPY.Close 1.0000000 -0.8066466
## VXX.Close -0.8066466 1.0000000
##############
# R formulas #
##############
# Create a formula
my_formula <- as.formula("y ~ x")
# What is the output?
my_formula
## y ~ x
# What is the class of my_formula?
class(my_formula)
## [1] "formula"
# Create a linear regression object
reg <- lm(VXX.Close ~ SPY.Close, data = sv)
# Here is the output
summary(reg)
## Call:
## lm(formula = VXX.Close ~ SPY.Close, data = sv)
## Residuals:
## Min 1Q Median 3Q Max
## -0.085607 -0.012830 -0.000865 0.012188 0.116349
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.0024365 0.0006641 -3.669 0.000254 ***
## SPY.Close -2.5848492 0.0552193 -46.811 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1
## Residual standard error: 0.02287 on 1187 degrees of freedom
## Multiple R-squared: 0.6486,Adjusted R-squared: 0.6483
## F-statistic: 2191 on 1 and 1187 DF, p-value: < 2.2e-16
b <- reg$coefficients[1]
a <- reg$coefficients[2]
par(mfrow = c(2, 2))
plot(reg$residuals,
main = "Residuals through time",
xlab = "Days", ylab = "Residuals")
hist(reg$residuals, breaks = 100,
main = "Distribution of residuals",
xlab = "Residuals")
qqnorm(reg$residuals)
qqline(reg$residuals)
acf(reg$residuals, main = "Autocorrelation")
vxx_lag_1 <- lag(VXX$VXX.Close, k = 1)
head(vxx_lag_1)
## VXX.Close
## 2009-01-30 NA
## 2009-02-02 104.58
## 2009-02-03 104.25
## 2009-02-04 99.37
## 2009-02-05 99.74
## 2009-02-06 99.13
head(VXX$VXX.Close)
## VXX.Close
## 2009-01-30 104.58
## 2009-02-02 104.25
## 2009-02-03 99.37
## 2009-02-04 99.74
## 2009-02-05 99.13
## 2009-02-06 97.70
# Merge returns with lagged returns
sv <- merge(sv, lag(sv))
# Scatter plot of lagged SPY vs. VXX
plot(as.numeric(sv[, 3]), as.numeric(sv[, 2]),
main = "Scatter plot SPY lagged vs. VXX.",
xlab = "SPY lagged",
ylab = "VXX"
cex.main = 0.8,
cex.axis = 0.8,
cex.lab = 0.8)
grid()
reg2 <- lm(VXX.Close ~ SPY.Close.1, data = sv)
summary(reg2)
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.004140 0.001121 -3.694 0.000231 ***
## SPY.Close.1 0.104119 0.093154 1.118 0.263918
## Residual standard error: 0.03857 on 1186 degrees of freedom
## (1 observation deleted due to missingness)
## Multiple R-squared: 0.001052,Adjusted R-squared: 0.00021
## F-statistic: 1.249 on 1 and 1186 DF, p-value: 0.2639
ccf(as.numeric(sv[, 1]), as.numeric(sv[, 2]),
main = "Cross correlation between SPY and VXX",
ylab = "Cross correlation", xlab = "Lag", cex.main = 0.8,
cex.lab = 0.8, cex.axis = 0.8)
###################################
# The linear in linear regression #
###################################
x <- seq(1:100)
y <- x ^ 2
# Generate the plot
plot(x, y)
# Fit the regression
reg_parabola <- lm(y ~ x)
# Superimpose the best fit line on the plot
abline(reg_parabola, lwd = 2)
# Look at the results
summary(reg_parabola)
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -1717.000 151.683 -11.32 <2e-16 ***
## x 101.000 2.608 38.73 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1
## Residual standard error: 752.7 on 98 degrees of freedom
## Multiple R-squared: 0.9387,Adjusted R-squared: 0.9381
## F-statistic: 1500 on 1 and 98 DF, p-value: < 2.2e-16
plot(x, sqrt(y))
reg_transformed <- lm(sqrt(y) ~ x)
abline(reg_transformed)
summary(reg_transformed)
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.684e-14 5.598e-15 -1.015e+01 <2e-16 ***
## x 1.000e+00 9.624e-17 1.039e+16 <2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1
## Residual standard error: 2.778e-14 on 98 degrees of freedom
## Multiple R-squared: 1,Adjusted R-squared: 1
## F-statistic: 1.08e+32 on 1 and 98 DF, p-value: < 2.2e-16
##############
# Volatility #
##############
# Generate 1000 IID numbers from a normal distribution.
z <- rnorm(1000, 0, 1)
# Autocorrelation of returns and squared returns
par(mfrow = c(2, 1))
acf(z, main = "returns", cex.main = 0.8,
cex.lab = 0.8, cex.axis = 0.8)
grid()
acf(z ^ 2, main = "returns squared",
cex.lab = 0.8, cex.axis = 0.8)
grid()
par(mfrow = c(1, 1))
acf(sv[, 1] ^ 2, main = "Actual returns squared",
cex.main = 0.8, cex.lab = 0.8, cex.axis = 0.8)
grid()
par(mfrow = c(1, 2))
acf(sv[, 1]^3)
acf(abs(sv[, 1])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/undim.R
\name{undim}
\alias{undim}
\alias{undim.default}
\title{Removes the dimension of an object}
\usage{
undim(x, ...)
}
\arguments{
\item{x}{An object with or without dimensions}
\item{...}{Not used.}
}
\value{
The object with the dimension attribute removed.
}
\description{
Removes the dimension of an object
}
\details{
This function does \code{attr(x, "dim") <- NULL}, which
automatically also does \code{attr(x, "dimnames") <- NULL}.
However, other attributes such as names attributes are preserved,
which is not the case if one do \code{dim(x) <- NULL}.
}
|
/man/undim.Rd
|
no_license
|
EdwardBetts/listenv
|
R
| false
| true
| 646
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/undim.R
\name{undim}
\alias{undim}
\alias{undim.default}
\title{Removes the dimension of an object}
\usage{
undim(x, ...)
}
\arguments{
\item{x}{An object with or without dimensions}
\item{...}{Not used.}
}
\value{
The object with the dimension attribute removed.
}
\description{
Removes the dimension of an object
}
\details{
This function does \code{attr(x, "dim") <- NULL}, which
automatically also does \code{attr(x, "dimnames") <- NULL}.
However, other attributes such as names attributes are preserved,
which is not the case if one do \code{dim(x) <- NULL}.
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{plot.cumulative_animated_lexical_classification}
\alias{plot.cumulative_animated_lexical_classification}
\title{Plots a cumulative_animated_lexical_classification Object}
\usage{
\method{plot}{cumulative_animated_lexical_classification}(x, ...)
}
\arguments{
\item{x}{The cumulative_animated_lexical_classification object.}
\item{\ldots}{ignored}
}
\description{
Plots a cumulative_animated_lexical_classification object.
}
|
/man/plot.cumulative_animated_lexical_classification.Rd
|
no_license
|
joffrevillanueva/qdap
|
R
| false
| false
| 485
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{plot.cumulative_animated_lexical_classification}
\alias{plot.cumulative_animated_lexical_classification}
\title{Plots a cumulative_animated_lexical_classification Object}
\usage{
\method{plot}{cumulative_animated_lexical_classification}(x, ...)
}
\arguments{
\item{x}{The cumulative_animated_lexical_classification object.}
\item{\ldots}{ignored}
}
\description{
Plots a cumulative_animated_lexical_classification object.
}
|
envOne <- new.env()
typeof(envOne)
ls()
ls(envOne)
assign("bubba",12,envir=envOne)
ls()
ls(envOne)
envOne$bubba
get("bubba",envOne)
bubba
|
/book/packt/R.Object-oriented.Programming/6682OS_07_Codes/OLD/chapter_7_ex20.R
|
no_license
|
xenron/sandbox-da-r
|
R
| false
| false
| 152
|
r
|
envOne <- new.env()
typeof(envOne)
ls()
ls(envOne)
assign("bubba",12,envir=envOne)
ls()
ls(envOne)
envOne$bubba
get("bubba",envOne)
bubba
|
library(forecast)
library(itsmr)
library(tseries)
# for making data readable in R
#write.table(data_excess_cc,file="data_excess_cc.csv",sep = ",",col.names=TRUE)
table<-read.csv("data.csv",header=TRUE,sep=",")
head(table)
# for stationarity
KPassValue = kpss.test(table$avgloss) # if p-value is smaller than 0.05(standard) then its not
p_value =KPassValue$p.value
if(p_value <0.05)
{
print("Non Standard values")
}
#stationary and vice versa.
# for arima
Ar<- arar( table$avgloss,h=10,opt=2 )
# opt means it will give values and graph both.
# in summary pred value gives the forecasted value
# for checking how effectively forecasted values are we have to see
#arar$ pred value with actual value.
|
/arar.r
|
no_license
|
vishvnath-github/gitRepo
|
R
| false
| false
| 709
|
r
|
library(forecast)
library(itsmr)
library(tseries)
# for making data readable in R
#write.table(data_excess_cc,file="data_excess_cc.csv",sep = ",",col.names=TRUE)
table<-read.csv("data.csv",header=TRUE,sep=",")
head(table)
# for stationarity
KPassValue = kpss.test(table$avgloss) # if p-value is smaller than 0.05(standard) then its not
p_value =KPassValue$p.value
if(p_value <0.05)
{
print("Non Standard values")
}
#stationary and vice versa.
# for arima
Ar<- arar( table$avgloss,h=10,opt=2 )
# opt means it will give values and graph both.
# in summary pred value gives the forecasted value
# for checking how effectively forecasted values are we have to see
#arar$ pred value with actual value.
|
setClass ("LocalFilePreparedTablePatientHistoryProvider",
contains="PatientHistoryProvider"
)
#----------------------------------------------------------------------------------------------------
LocalFilePreparedTablePatientHistoryProvider <- function(path)
{
tokens <<- strsplit(path, "://")[[1]]
if(!length(tokens) == 2){
printf("Oncoscape LocalFilePreparedTablePatientHistoryProvider error. Manifest line ill-formed: '%s'", path);
stop()
}
protocol <- tokens[1]
path <- tokens[2]
if(protocol == "tbl")
full.path <- system.file(package="Oncoscape", "extdata", path)
else
stop(sprintf("LocalFilePreparedTablePatientHistoryProvider, unrecognized protocol: '%s'", protocol))
# check first for truly local file, then look in package extdata/
if(file.exists(path)) {
full.path <- path
}
else {
full.path <- system.file(package="Oncoscape", "extdata", path)
}
printf("reading patient history table from local file: %s", full.path)
standard.name <- "tbl.patientHistory"
if(!file.exists(full.path)){
printf("Oncoscape LocalFilePreparedTablePatientHistoryProvider error, file not found: '%s'", full.path);
stop()
}
eval(parse(text=sprintf("%s <<- %s", standard.name, load(full.path))))
printf("loaded %s from %s, %d x %d", standard.name, full.path,
nrow(tbl.patientHistory), ncol(tbl.patientHistory))
this <- new ("LocalFilePreparedTablePatientHistoryProvider", table=tbl.patientHistory, events=list())
this
} # LocalFilePreparedTablePatientHistoryProvider
#----------------------------------------------------------------------------------------------------
setMethod("show", "LocalFilePreparedTablePatientHistoryProvider",
function(object) {
msg <- sprintf("LocalFilePreparedTablePatientHistoryProvider")
cat(msg, "\n", sep="")
msg <- sprintf("tbl dimensions: %d x %d", nrow(object@table), ncol(object@table))
cat(msg, "\n", sep="")
msg <- sprintf("events list length: %d", length(object@events))
cat(msg, "\n", sep="")
}) # show
#---------------------------------------------------------------------------------------------------
# setMethod ("getPatientData", "LocalFilePreparedTableDataProvider",
#
# function(self, patients=NA, events=NA) {
#
# if(all(is.na(patients)))
# patients <- patientIDs(self)
#
# if(all(is.na(events)))
# events <- patientEventNames(self)
#
# unrecognized.events <- setdiff(events, patientEventNames(self))
# if(length(unrecognized.events) > 0){
# warning(sprintf("unrecognized events skipped: %s",
# paste(unrecognized.events, collapse=", ")));
# }
#
# recognized.events <- intersect(events, patientEventNames(self));
# recognized.patients <- intersect(patients, patientIDs(self))
# result <- vector("list", length(recognized.events) * length(recognized.patients))
# i = 0;
# for(event in recognized.events){
# for(patient in recognized.patients){
# tbl.tmp <- subset(self@data[[event]], PatientId==patient)
# #printf("events for pt %s and event %s: %d", patient, event, nrow(tbl.tmp))
# rows <- split(tbl.tmp, rownames(tbl.tmp))
# rows <- lapply(rows, as.list)
# for(row in rows){
# row[["TableName"]] <- event
# i = i + 1
# result[[i]] <- row
# } # for row
# }# for patient
# } # for event
# invisible(result)
# }); # getPatientData
#
#---------------------------------------------------------------------------------------------------
setMethod("getTable", "LocalFilePreparedTablePatientHistoryProvider",
function(self) {
self@table
})
#---------------------------------------------------------------------------------------------------
setMethod("getEvents", "LocalFilePreparedTablePatientHistoryProvider",
function(self) {
self@events
})
#---------------------------------------------------------------------------------------------------
|
/hbolouri/oncoDev/Oncoscape/R/LocalFilePreparedTableHistoryProvider.R
|
permissive
|
oncoscape/webapp-R-package
|
R
| false
| false
| 4,272
|
r
|
setClass ("LocalFilePreparedTablePatientHistoryProvider",
contains="PatientHistoryProvider"
)
#----------------------------------------------------------------------------------------------------
LocalFilePreparedTablePatientHistoryProvider <- function(path)
{
tokens <<- strsplit(path, "://")[[1]]
if(!length(tokens) == 2){
printf("Oncoscape LocalFilePreparedTablePatientHistoryProvider error. Manifest line ill-formed: '%s'", path);
stop()
}
protocol <- tokens[1]
path <- tokens[2]
if(protocol == "tbl")
full.path <- system.file(package="Oncoscape", "extdata", path)
else
stop(sprintf("LocalFilePreparedTablePatientHistoryProvider, unrecognized protocol: '%s'", protocol))
# check first for truly local file, then look in package extdata/
if(file.exists(path)) {
full.path <- path
}
else {
full.path <- system.file(package="Oncoscape", "extdata", path)
}
printf("reading patient history table from local file: %s", full.path)
standard.name <- "tbl.patientHistory"
if(!file.exists(full.path)){
printf("Oncoscape LocalFilePreparedTablePatientHistoryProvider error, file not found: '%s'", full.path);
stop()
}
eval(parse(text=sprintf("%s <<- %s", standard.name, load(full.path))))
printf("loaded %s from %s, %d x %d", standard.name, full.path,
nrow(tbl.patientHistory), ncol(tbl.patientHistory))
this <- new ("LocalFilePreparedTablePatientHistoryProvider", table=tbl.patientHistory, events=list())
this
} # LocalFilePreparedTablePatientHistoryProvider
#----------------------------------------------------------------------------------------------------
setMethod("show", "LocalFilePreparedTablePatientHistoryProvider",
function(object) {
msg <- sprintf("LocalFilePreparedTablePatientHistoryProvider")
cat(msg, "\n", sep="")
msg <- sprintf("tbl dimensions: %d x %d", nrow(object@table), ncol(object@table))
cat(msg, "\n", sep="")
msg <- sprintf("events list length: %d", length(object@events))
cat(msg, "\n", sep="")
}) # show
#---------------------------------------------------------------------------------------------------
# setMethod ("getPatientData", "LocalFilePreparedTableDataProvider",
#
# function(self, patients=NA, events=NA) {
#
# if(all(is.na(patients)))
# patients <- patientIDs(self)
#
# if(all(is.na(events)))
# events <- patientEventNames(self)
#
# unrecognized.events <- setdiff(events, patientEventNames(self))
# if(length(unrecognized.events) > 0){
# warning(sprintf("unrecognized events skipped: %s",
# paste(unrecognized.events, collapse=", ")));
# }
#
# recognized.events <- intersect(events, patientEventNames(self));
# recognized.patients <- intersect(patients, patientIDs(self))
# result <- vector("list", length(recognized.events) * length(recognized.patients))
# i = 0;
# for(event in recognized.events){
# for(patient in recognized.patients){
# tbl.tmp <- subset(self@data[[event]], PatientId==patient)
# #printf("events for pt %s and event %s: %d", patient, event, nrow(tbl.tmp))
# rows <- split(tbl.tmp, rownames(tbl.tmp))
# rows <- lapply(rows, as.list)
# for(row in rows){
# row[["TableName"]] <- event
# i = i + 1
# result[[i]] <- row
# } # for row
# }# for patient
# } # for event
# invisible(result)
# }); # getPatientData
#
#---------------------------------------------------------------------------------------------------
setMethod("getTable", "LocalFilePreparedTablePatientHistoryProvider",
function(self) {
self@table
})
#---------------------------------------------------------------------------------------------------
setMethod("getEvents", "LocalFilePreparedTablePatientHistoryProvider",
function(self) {
self@events
})
#---------------------------------------------------------------------------------------------------
|
rm(list=ls())
library(cl)
library(oce)
library(fields)
library(xtable)
source('filterAndSpectrumParameters.R')
plot <- FALSE
mround <- function(x,base){
base*round(x/base)
}
cohci <- function(x, ci=0.95, ci.lty=3, ci.col='blue'){
nser <- NCOL(x$spec)
gg <- 2/x$df
se <- sqrt(gg/2)
z <- -qnorm((1 - ci)/2)
coh <- pmin(0.99999, sqrt(x$coh))
lines(x$freq, (tanh(atanh(coh) + z * se))^2, lty = ci.lty, col = ci.col)
lines(x$freq, (pmax(0, tanh(atanh(coh) - z * se)))^2, lty = ci.lty, col = ci.col)
cohciupper <- (tanh(atanh(coh) + z * se))^2
cohcilower <- (pmax(0, tanh(atanh(coh) - z * se)))^2
invisible(list(cohciupper = cohciupper, cohcilower=cohcilower))
}
phaseci <- function(x, ci=0.95, ci.lty=3, ci.col='blue'){
nser <- NCOL(x$spec)
gg <- 2/x$df
coh <- sqrt(x$coh)
cl <- asin(pmin(0.9999, qt(ci, 2/gg - 2) * sqrt(gg *(coh^{-2} - 1)/(2 * (1 - gg)))))
lines(x$freq, x$phase + cl, lty = ci.lty, col = ci.col)
lines(x$freq, x$phase - cl, lty = ci.lty, col = ci.col)
phaseciupper <- x$phase + cl
phasecilower <- x$phase - cl
invisible(list(phaseciupper=phaseciupper, phasecilower=phasecilower))
}
##order microcat data to get upper depths for ADCP
d <- m1mc
depth <- unlist(lapply(d, function(stn) ifelse(length(stn[['pressure']]) != 0, mean(stn[['pressure']], na.rm=TRUE), stn[['depthMax']])))
o <- order(depth)
Hyddepth <- depth[o]
#adcp data
d <- m1adcp
depth <- unlist(lapply(d, function(stn) ifelse(length(stn[['pressure']]) != 0, mean(stn[['pressure']], na.rm=TRUE), stn[['depthMax']])))
look1 <- which.min(abs(Hyddepth[1] - depth))
look2 <- which.min(abs(Hyddepth[2] - depth))
adpd <- d[c(look1,look2)]
##order current meter data
d <- m1rcm
depth <- unlist(lapply(d, function(stn) ifelse(length(stn[['pressure']]) != 0, mean(stn[['pressure']], na.rm=TRUE), stn[['depthMax']])))
o <- order(depth)
depth <- depth[o]
mcd <- d[o]
data <- c(adpd, mcd) #adcp data and current meter
#####
coherencyStats <- function(data, type='u') {
p <- unlist(lapply(data, function(stn) ifelse(length(stn[['pressure']]) !=0, rep(round(mean(stn[['pressure']], na.rm=TRUE)), length(stn[['pressure']])), rep(round(stn[['depthMax']]), length(stn[['time']])))))
t <- lapply(data, function(stn) trunc(stn[['time']],'hours')) #adcp is every hour and 3 min
len <- unlist(lapply(t, function(k) length(k)))
look <- which.max(len)
deftime <- t[[look]]
tmat <- lapply(t, function(k) intersect(deftime,k))
r1 <- unlist(lapply(tmat, function(k) which(deftime == k[1])))
r2 <- unlist(lapply(tmat, function(k) which(deftime == k[length(k)])))
for (i in 1:length(data)){
u <- rep(NA, length(deftime))
if(type=='u'){
u[r1[i]:r2[i]] <- RCMvelocity(data[[i]])$u}
if(type=='v'){
u[r1[i]:r2[i]] <- RCMvelocity(data[[i]])$v}
if(type=='U'){
u[r1[i]:r2[i]] <- sqrt(RCMvelocity(data[[i]])$u^2 + RCMvelocity(data[[i]])$v^2)}
if (i ==1){
m <- u
}
else{
m <- cbind(m, u)}
}
colnames(m) <- p
deftime <- as.POSIXct(deftime)
for(i in 1:length(data)){
#print(p[i])
#check <- i == seq(2,length(data),2)
#print(length(check[check]))
u <- m[,i]
tm <- tidem(deftime, m[,i])
ut <- u - predict(tm)
notna <- which(!is.na(ut))
na <- which(is.na(ut))
ut <- fillGap(ut)
wna <- which(is.na(ut))
ut[wna] <- ut[(min(wna) - 1)]
utb <- butterfilter(ut, deltat=deltat, cutoff=cutoff)
utb[na] <- NA
utb[wna] <- NA
ub <- rep(NA, length(deftime))
ub[r1[i]:r2[i]] <- ut
if(i == 1){
b <- ub
}
else{
b <- cbind(b,ub)
}
ubf <- rep(NA, length(deftime))
ubf[r1[i]:r2[i]] <- utb
if(i == 1){
bfd <- ubf
}
else{
bfd <- cbind(bfd,ubf)
}
}
colnames(b) <- p
coh <- cohcil <- cohciu <- phase <- phasecil <- phaseciu <- vector(length=(length(b[1,])-1))
for (i in 2:length(b[1,])){
naunique <- intersect(which(!is.na(b[,i])), which(!is.na(b[,(i-1)])))
naunique2 <- intersect(which(!is.na(bfd[,i])), which(!is.na(bfd[,(i-1)])))
cf <- ccf(bfd[naunique2,(i)], bfd[naunique2,(i-1)], plot=FALSE, lag.max=60*24)
sp <- spectrum(cbind(b[naunique,(i)], b[naunique,(i-1)]), spans=spans, plot=FALSE)
par(mfrow=c(3,1), mar=c(3,3,1,1))
if(i == length(b[1,])){
if(!interactive() | plot == TRUE) {png(paste('002_SpecCohPhaseUNFC',i,'.png',sep=""), height=4, width=7, units='in', res=200, pointsize=11)}
par(mar=c(3,3.2,1,1))
layoutmat <- cbind(c(1,1,2,2,3,3), c(1,1,2,2,3,3), c(4,4,4,5,5,5))
layout(layoutmat)
}
plot(sp$freq, sp$spec[,1], type='l', xlim=c(0,1/2/24), xlab="", ylab="")
mtext(expression(paste('variance [m'^2,'s'^-2,'cph'^-1,']')), side=2, line=1.8, cex=2/3)
lines(sp$freq, sp$spec[,2], col='red')
legend('topright', lty=1, col=c('black', 'red'), legend=c(paste(p[i],'m'), paste(p[(i-1)],'m')))
sp1max <- which.max(sp$spec[,1])
sp2max <- which.max(sp$spec[,2])
abline(v=sp$freq[sp1max], lty=2, col='black')
abline(v=sp$freq[sp2max], lty=2, col='black')
#mtext(paste(type,colnames(b)[i],'m and',colnames(b)[(i-1)],'m'), adj=1)
plot(sp$freq, sp$coh, type='l', xlim=c(0,1/2/24), xlab="", ylab="")
cohci(sp)
spcohci <- cohci(sp)
mtext('Squared coherency', side=2, line=2, cex=2/3)
abline(v=sp$freq[sp1max], lty=2, col='black')
abline(v=sp$freq[sp2max], lty=2, col='black')
#print(sp$freq[sp1max])
plot(sp$freq, sp$phase, type='l', xlim=c(0,1/2/24), xlab="", ylab="")
spphaseci <- phaseci(sp)
mtext('Phase [rad]', side=2, line=2, cex=2/3)
mtext('Frequency [cph]',side=1, line=2, cex=2/3)
abline(v=sp$freq[sp1max], lty=2, col='black')
abline(v=sp$freq[sp2max], lty=2, col='black')
freqlook <- which.min(abs(sp$freq - 0.002)) #20.83days
coh[(i-1)] <- sp$coh[freqlook]
cohcil[(i-1)] <- spcohci$cohcilower[freqlook]
cohciu[(i-1)] <- spcohci$cohciupper[freqlook]
phase[(i-1)] <- sp$phase[freqlook]
phasecil[(i-1)] <- spphaseci$phasecilower[freqlook]
phaseciu[(i-1)] <- spphaseci$phaseciupper[freqlook]
}
invisible(list(coh=coh, cohcil=cohcil, cohciu=cohciu, phase=phase, phasecil=phasecil, phaseciu=phaseciu, p=p))
}
ci <- coherencyStats(data, type='U')
par(mar=c(3,3,1,1))
plot(ci$coh,ci$p[-(length(ci$p))], ylim=rev(range(ci$p[-(length(ci$p))])), xlim=c(0,1), xlab="", ylab="")
for (i in 1:length(ci$coh)){
lines(c(ci$cohcil[i], ci$coh[i]),c(ci$p[i], ci$p[i]))
lines(c(ci$cohciu[i], ci$coh[i]),c(ci$p[i], ci$p[i]))
}
points(ci$coh,ci$p[-(length(ci$p))], pch=21, bg='white')
mtext('Squared coherency', side=1, line=2, cex=2/3)
mtext('Pressure [dbar]', side=2, line=2, cex=2/3)
plot(ci$phase,ci$p[-(length(ci$p))], ylim=rev(range(ci$p[-(length(ci$p))])), xlim=c(min(ci$phasecil),max(ci$phaseciu)), xlab="", ylab="")
for (i in 1:length(ci$coh)){
lines(c(ci$phasecil[i], ci$phase[i]),c(ci$p[i], ci$p[i]))
lines(c(ci$phaseciu[i], ci$phase[i]),c(ci$p[i], ci$p[i]))
}
points(ci$phase,ci$p[-(length(ci$p))], pch=21, bg='white')
mtext('Phase [rad]', side=1, line=2, cex=2/3)
mtext('Pressure [dbar]', side=2, line=2, cex=2/3)
if(!interactive()) dev.off()
save(ci, file='FCcoherency.rda')
|
/src/002_coherency.R
|
no_license
|
dfo-mar-odis/Publication-FlemishCap-2018
|
R
| false
| false
| 6,901
|
r
|
rm(list=ls())
library(cl)
library(oce)
library(fields)
library(xtable)
source('filterAndSpectrumParameters.R')
plot <- FALSE
mround <- function(x,base){
base*round(x/base)
}
cohci <- function(x, ci=0.95, ci.lty=3, ci.col='blue'){
nser <- NCOL(x$spec)
gg <- 2/x$df
se <- sqrt(gg/2)
z <- -qnorm((1 - ci)/2)
coh <- pmin(0.99999, sqrt(x$coh))
lines(x$freq, (tanh(atanh(coh) + z * se))^2, lty = ci.lty, col = ci.col)
lines(x$freq, (pmax(0, tanh(atanh(coh) - z * se)))^2, lty = ci.lty, col = ci.col)
cohciupper <- (tanh(atanh(coh) + z * se))^2
cohcilower <- (pmax(0, tanh(atanh(coh) - z * se)))^2
invisible(list(cohciupper = cohciupper, cohcilower=cohcilower))
}
phaseci <- function(x, ci=0.95, ci.lty=3, ci.col='blue'){
nser <- NCOL(x$spec)
gg <- 2/x$df
coh <- sqrt(x$coh)
cl <- asin(pmin(0.9999, qt(ci, 2/gg - 2) * sqrt(gg *(coh^{-2} - 1)/(2 * (1 - gg)))))
lines(x$freq, x$phase + cl, lty = ci.lty, col = ci.col)
lines(x$freq, x$phase - cl, lty = ci.lty, col = ci.col)
phaseciupper <- x$phase + cl
phasecilower <- x$phase - cl
invisible(list(phaseciupper=phaseciupper, phasecilower=phasecilower))
}
##order microcat data to get upper depths for ADCP
d <- m1mc
depth <- unlist(lapply(d, function(stn) ifelse(length(stn[['pressure']]) != 0, mean(stn[['pressure']], na.rm=TRUE), stn[['depthMax']])))
o <- order(depth)
Hyddepth <- depth[o]
#adcp data
d <- m1adcp
depth <- unlist(lapply(d, function(stn) ifelse(length(stn[['pressure']]) != 0, mean(stn[['pressure']], na.rm=TRUE), stn[['depthMax']])))
look1 <- which.min(abs(Hyddepth[1] - depth))
look2 <- which.min(abs(Hyddepth[2] - depth))
adpd <- d[c(look1,look2)]
##order current meter data
d <- m1rcm
depth <- unlist(lapply(d, function(stn) ifelse(length(stn[['pressure']]) != 0, mean(stn[['pressure']], na.rm=TRUE), stn[['depthMax']])))
o <- order(depth)
depth <- depth[o]
mcd <- d[o]
data <- c(adpd, mcd) #adcp data and current meter
#####
coherencyStats <- function(data, type='u') {
p <- unlist(lapply(data, function(stn) ifelse(length(stn[['pressure']]) !=0, rep(round(mean(stn[['pressure']], na.rm=TRUE)), length(stn[['pressure']])), rep(round(stn[['depthMax']]), length(stn[['time']])))))
t <- lapply(data, function(stn) trunc(stn[['time']],'hours')) #adcp is every hour and 3 min
len <- unlist(lapply(t, function(k) length(k)))
look <- which.max(len)
deftime <- t[[look]]
tmat <- lapply(t, function(k) intersect(deftime,k))
r1 <- unlist(lapply(tmat, function(k) which(deftime == k[1])))
r2 <- unlist(lapply(tmat, function(k) which(deftime == k[length(k)])))
for (i in 1:length(data)){
u <- rep(NA, length(deftime))
if(type=='u'){
u[r1[i]:r2[i]] <- RCMvelocity(data[[i]])$u}
if(type=='v'){
u[r1[i]:r2[i]] <- RCMvelocity(data[[i]])$v}
if(type=='U'){
u[r1[i]:r2[i]] <- sqrt(RCMvelocity(data[[i]])$u^2 + RCMvelocity(data[[i]])$v^2)}
if (i ==1){
m <- u
}
else{
m <- cbind(m, u)}
}
colnames(m) <- p
deftime <- as.POSIXct(deftime)
for(i in 1:length(data)){
#print(p[i])
#check <- i == seq(2,length(data),2)
#print(length(check[check]))
u <- m[,i]
tm <- tidem(deftime, m[,i])
ut <- u - predict(tm)
notna <- which(!is.na(ut))
na <- which(is.na(ut))
ut <- fillGap(ut)
wna <- which(is.na(ut))
ut[wna] <- ut[(min(wna) - 1)]
utb <- butterfilter(ut, deltat=deltat, cutoff=cutoff)
utb[na] <- NA
utb[wna] <- NA
ub <- rep(NA, length(deftime))
ub[r1[i]:r2[i]] <- ut
if(i == 1){
b <- ub
}
else{
b <- cbind(b,ub)
}
ubf <- rep(NA, length(deftime))
ubf[r1[i]:r2[i]] <- utb
if(i == 1){
bfd <- ubf
}
else{
bfd <- cbind(bfd,ubf)
}
}
colnames(b) <- p
coh <- cohcil <- cohciu <- phase <- phasecil <- phaseciu <- vector(length=(length(b[1,])-1))
for (i in 2:length(b[1,])){
naunique <- intersect(which(!is.na(b[,i])), which(!is.na(b[,(i-1)])))
naunique2 <- intersect(which(!is.na(bfd[,i])), which(!is.na(bfd[,(i-1)])))
cf <- ccf(bfd[naunique2,(i)], bfd[naunique2,(i-1)], plot=FALSE, lag.max=60*24)
sp <- spectrum(cbind(b[naunique,(i)], b[naunique,(i-1)]), spans=spans, plot=FALSE)
par(mfrow=c(3,1), mar=c(3,3,1,1))
if(i == length(b[1,])){
if(!interactive() | plot == TRUE) {png(paste('002_SpecCohPhaseUNFC',i,'.png',sep=""), height=4, width=7, units='in', res=200, pointsize=11)}
par(mar=c(3,3.2,1,1))
layoutmat <- cbind(c(1,1,2,2,3,3), c(1,1,2,2,3,3), c(4,4,4,5,5,5))
layout(layoutmat)
}
plot(sp$freq, sp$spec[,1], type='l', xlim=c(0,1/2/24), xlab="", ylab="")
mtext(expression(paste('variance [m'^2,'s'^-2,'cph'^-1,']')), side=2, line=1.8, cex=2/3)
lines(sp$freq, sp$spec[,2], col='red')
legend('topright', lty=1, col=c('black', 'red'), legend=c(paste(p[i],'m'), paste(p[(i-1)],'m')))
sp1max <- which.max(sp$spec[,1])
sp2max <- which.max(sp$spec[,2])
abline(v=sp$freq[sp1max], lty=2, col='black')
abline(v=sp$freq[sp2max], lty=2, col='black')
#mtext(paste(type,colnames(b)[i],'m and',colnames(b)[(i-1)],'m'), adj=1)
plot(sp$freq, sp$coh, type='l', xlim=c(0,1/2/24), xlab="", ylab="")
cohci(sp)
spcohci <- cohci(sp)
mtext('Squared coherency', side=2, line=2, cex=2/3)
abline(v=sp$freq[sp1max], lty=2, col='black')
abline(v=sp$freq[sp2max], lty=2, col='black')
#print(sp$freq[sp1max])
plot(sp$freq, sp$phase, type='l', xlim=c(0,1/2/24), xlab="", ylab="")
spphaseci <- phaseci(sp)
mtext('Phase [rad]', side=2, line=2, cex=2/3)
mtext('Frequency [cph]',side=1, line=2, cex=2/3)
abline(v=sp$freq[sp1max], lty=2, col='black')
abline(v=sp$freq[sp2max], lty=2, col='black')
freqlook <- which.min(abs(sp$freq - 0.002)) #20.83days
coh[(i-1)] <- sp$coh[freqlook]
cohcil[(i-1)] <- spcohci$cohcilower[freqlook]
cohciu[(i-1)] <- spcohci$cohciupper[freqlook]
phase[(i-1)] <- sp$phase[freqlook]
phasecil[(i-1)] <- spphaseci$phasecilower[freqlook]
phaseciu[(i-1)] <- spphaseci$phaseciupper[freqlook]
}
invisible(list(coh=coh, cohcil=cohcil, cohciu=cohciu, phase=phase, phasecil=phasecil, phaseciu=phaseciu, p=p))
}
ci <- coherencyStats(data, type='U')
par(mar=c(3,3,1,1))
plot(ci$coh,ci$p[-(length(ci$p))], ylim=rev(range(ci$p[-(length(ci$p))])), xlim=c(0,1), xlab="", ylab="")
for (i in 1:length(ci$coh)){
lines(c(ci$cohcil[i], ci$coh[i]),c(ci$p[i], ci$p[i]))
lines(c(ci$cohciu[i], ci$coh[i]),c(ci$p[i], ci$p[i]))
}
points(ci$coh,ci$p[-(length(ci$p))], pch=21, bg='white')
mtext('Squared coherency', side=1, line=2, cex=2/3)
mtext('Pressure [dbar]', side=2, line=2, cex=2/3)
plot(ci$phase,ci$p[-(length(ci$p))], ylim=rev(range(ci$p[-(length(ci$p))])), xlim=c(min(ci$phasecil),max(ci$phaseciu)), xlab="", ylab="")
for (i in 1:length(ci$coh)){
lines(c(ci$phasecil[i], ci$phase[i]),c(ci$p[i], ci$p[i]))
lines(c(ci$phaseciu[i], ci$phase[i]),c(ci$p[i], ci$p[i]))
}
points(ci$phase,ci$p[-(length(ci$p))], pch=21, bg='white')
mtext('Phase [rad]', side=1, line=2, cex=2/3)
mtext('Pressure [dbar]', side=2, line=2, cex=2/3)
if(!interactive()) dev.off()
save(ci, file='FCcoherency.rda')
|
#' Iterative robust model-based imputation (IRMI)
#'
#' In each step of the iteration, one variable is used as a response variable
#' and the remaining variables serve as the regressors.
#'
#' The method works sequentially and iterative. The method can deal with a
#' mixture of continuous, semi-continuous, ordinal and nominal variables
#' including outliers.
#'
#' A full description of the method can be found in the mentioned reference.
#'
#' @param x data.frame or matrix
#' @param eps threshold for convergency
#' @param maxit maximum number of iterations
#' @param mixed column index of the semi-continuous variables
#' @param mixed.constant vector with length equal to the number of
#' semi-continuous variables specifying the point of the semi-continuous
#' distribution with non-zero probability
#' @param count column index of count variables
#' @param step a stepwise model selection is applied when the parameter is set
#' to TRUE
#' @param robust if TRUE, robust regression methods will be applied
#' @param takeAll takes information of (initialised) missings in the response
#' as well for regression imputation.
#' @param noise irmi has the option to add a random error term to the imputed
#' values, this creates the possibility for multiple imputation. The error term
#' has mean 0 and variance corresponding to the variance of the regression
#' residuals.
#' @param noise.factor amount of noise.
#' @param force if TRUE, the algorithm tries to find a solution in any case,
#' possible by using different robust methods automatically.
#' @param robMethod regression method when the response is continuous.
#' @param force.mixed if TRUE, the algorithm tries to find a solution in any
#' case, possible by using different robust methods automatically.
#' @param addMixedFactors if TRUE add additional factor variable for each mixed variable as X variable in the regression
#' @param modelFormulas a named list with the name of variables for the rhs of the formulas, which must contain a rhs formula for each variable with missing values, it should look like list(y1=c("x1","x2"),y2=c("x1","x3"))
#'
#' if factor variables for the mixed variables should be created for the
#' regression models
#' @param mi number of multiple imputations.
#' @param trace Additional information about the iterations when trace equals
#' TRUE.
#' @param init.method Method for initialization of missing values (kNN or
#' median)
#' @param multinom.method Method for estimating the multinomial models
#' (current default and only available method is multinom)
#' @return the imputed data set.
#' @author Matthias Templ, Alexander Kowarik
#' @seealso \code{\link[mi]{mi}}
#' @references M. Templ, A. Kowarik, P. Filzmoser (2011) Iterative stepwise
#' regression imputation using standard and robust methods. \emph{Journal of
#' Computational Statistics and Data Analysis}, Vol. 55, pp. 2793-2806.
#' @references A. Kowarik, M. Templ (2016) Imputation with
#' R package VIM. \emph{Journal of
#' Statistical Software}, 74(7), 1-16.
#' @keywords manip
#' @examples
#'
#' data(sleep)
#' irmi(sleep)
#'
#' data(testdata)
#' imp_testdata1 <- irmi(testdata$wna,mixed=testdata$mixed)
#'
#' # mixed.constant != 0 (-10)
#' testdata$wna$m1[testdata$wna$m1==0] <- -10
#' testdata$wna$m2 <- log(testdata$wna$m2+0.001)
#' imp_testdata2 <- irmi(testdata$wna,mixed=testdata$mixed,mixed.constant=c(-10,log(0.001)))
#' imp_testdata2$m2 <- exp(imp_testdata2$m2)-0.001
#'
#' #example with fixed formulas for the variables with missing
#' form=list(
#' NonD=c("BodyWgt","BrainWgt"),
#' Dream=c("BodyWgt","BrainWgt"),
#' Sleep=c("BrainWgt"),
#' Span=c("BodyWgt"),
#' Gest=c("BodyWgt","BrainWgt")
#' )
#' irmi(sleep,modelFormulas=form,trace=TRUE)
#'
#' # Example with ordered variable
#' td <- testdata$wna
#' td$c1 <- as.ordered(td$c1)
#' irmi(td)
#'
#' @export irmi
#' @S3method irmi data.frame
#' @S3method irmi survey.design
#' @S3method irmi default
irmi <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom") {
UseMethod("irmi", x)
}
irmi.data.frame <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom") {
irmi_work(x, eps, maxit, mixed, mixed.constant, count, step,
robust, takeAll, noise, noise.factor, force,
robMethod, force.mixed, mi, addMixedFactors,
trace,init.method,modelFormulas=modelFormulas,multinom.method=multinom.method)
}
irmi.survey.design <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom") {
x$variables <- irmi_work(x$variables, eps, maxit, mixed, mixed.constant, count, step,
robust, takeAll, noise, noise.factor, force,
robMethod, force.mixed, mi, addMixedFactors,
trace,init.method,modelFormulas=modelFormulas,multinom.method=multinom.method)
x$call <- sys.call(-1)
x
}
irmi.default <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom") {
irmi_work(as.data.frame(x), eps, maxit, mixed, mixed.constant, count, step,
robust, takeAll, noise, noise.factor, force,
robMethod, force.mixed, mi, addMixedFactors,
trace,init.method,modelFormulas=modelFormulas,multinom.method=multinom.method)
}
`irmi_work` <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom"){
#Authors: Alexander Kowarik and Matthias Templ, Statistics Austria, GPL 2 or newer, version: 15. Nov. 2012
#object mixed conversion into the right format (vector of variable names of type mixed)
#TODO: Data sets with variables "y" might fail
if(trace){
cat("Method for multinomial models:",multinom.method,"\n")
}
if(!is.data.frame(x)){
if(is.matrix(x))
x <- as.data.frame(x)
else
stop("data frame must be provided")
}
if(!is.null(mixed.constant)&&!is.null(mixed)){
if(length(mixed)!=length(mixed.constant))
stop("The length of 'mixed' and 'mixed.constant' differ.")
}
if(!is.null(mixed)){
if(!is.character(mixed)){
if(is.logical(mixed)){
if(length(mixed)!=length(colnames(x)))
stop("the mixed parameter is not defined correct.")
mixed <- colnames(x)[mixed]
} else if(is.numeric(mixed)){
if(max(mixed)>length(colnames(x)))
stop("the mixed parameter is not defined correct.")
mixed <- colnames(x)[mixed]
}
}else if(!all(mixed%in%colnames(x))){
stop("Not all mixed variables are found in the colnames of the input dataset.")
}
}
if(!is.null(count)){
if(!is.character(count)){
if(is.logical(count)){
if(length(count)!=length(colnames(x)))
stop("the count parameter is not defined correct.")
count <- colnames(x)[count]
}else if(is.numeric(count)){
if(max(count)>length(colnames(x)))
stop("the count parameter is not defined correct.")
count <- colnames(x)[count]
}
}else if(!all(count%in%colnames(x))){
stop("Not all count variables are found in the colnames of the input dataset.")
}
}
class1 <- function(x) class(x)[1]
types <- lapply(x,class1)
# if(any(types=="ordered")){
# for(i in which(types=="ordered")){
# msg <- paste(names(x)[i]," is defined as ordered,but irmi cannot deal with ordered variables
# at the moment, therefore the ordered attribute is set to FALSE \n",sep="")
# cat(msg)
# x[,i] <- factor(x[,i],ordered=FALSE)
# types[i] <- "factor"
# }
# }
types[colnames(x)%in%mixed] <- "mixed"
types[colnames(x)%in%count] <- "count"
attributes(types)$names <-NULL
types <- unlist(types)
if(any(types=="character")){
chrInd <- which(types=="character")
warning("At least one character variable is converted into a factor")
for(ind in chrInd){
x[,ind] <- as.factor(x[,ind])
types[ind] <- "factor"
}
}
#determine factor type: dichotomous or polytomous
#detect problematic factors
indFac <- which(types == "factor")
for(ind in indFac){
#get number of levels
fac_nlevels = nlevels(x[[ind]])
if (fac_nlevels == 2)
types[ind] <- "binary"
else if (fac_nlevels > 2)
types[ind] <- "nominal"
else stop(sprintf("factor with less than 2 levels detected! - `%s`", names(x)[ind]))
}
missingSummary <- cbind(types,apply(x,2,function(x)sum(is.na(x))))
colnames(missingSummary) <- c("type","#missing")
# save(x, file="xtest.RData")
N <- n <- dim(x)[1]
P <- dim(x)[2]
## error management:
if(dim(x)[2] < 2) stop("Less than 2 variables included in x.")
if(step&&robust)
stop("robust stepwise is not yet implemented")
if(!any(is.na(x))) cat("No missings in x. Nothing to impute")
if(any(apply(x, 1, function(x) all(is.na(x))))) stop("Unit non-responses included in x.")
## mixed into logical vector:
if(!is.logical(mixed) & !is.null(mixed)){
ind <- rep(FALSE, P)
ind[mixed] <- TRUE
mixedlog <- ind
} else mixedlog <- mixed
if(!is.character(mixed)){
mixed <- colnames(x)[mixed]
}
if(!is.character(count)){
count <- colnames(x)[count]
}
# if(!is.null(mixed) && length(mixed) != P) stop(paste("Length of mixed must either be NULL or", P))
## count into logical vector:
#if(!is.logical(count) & !is.null(count)){
# ind <- rep(FALSE, P)
# ind[which(colnames(x) == count)] <- TRUE
# countlog <- ind
#} else countlog <- count
# if(is.null(mixed)) mixed <- rep(FALSE, P)
# if(is.null(count)) count <- rep(FALSE, P)
# if(!is.null(count) && length(count) != P) stop(paste("Length of mixed must either be NULL or", P))
#if(any(countlog == mixedlog) && countlog == TRUE) stop(paste("you declined variable", which(countlog==mixedlog && countlog==TRUE), "to be both, count and mixed"))
if(length(Inter(list(count,mixed)))>0) stop(paste("you declined a variable to be both, count and mixed"))
#for(i in which(countlog)){
# class(x[,i]) <- c("count", "numeric")
# }
## check for factors in x
factors <- vector()
for(i in 1:ncol(x)){
factors <- c(factors,is.factor(x[,i]))
}
## Recode the levels of a factor to 1:number of levels
if(any(factors)){
factors <- colnames(x)[factors]
origLevels <- list()
for(f in 1:length(factors)){
origLevels[[f]] <- levels(x[,factors[f]])
levels(x[,factors[f]]) <- 0:(length(origLevels[[f]])-1)
}
} else factors <- character(0)
VarswithNA <-vector()
## index for missingness
w2 <- is.na(x)
## variables that include missings
for(i in seq(P)){
if(anyNA(x[,i]))
VarswithNA <- c(VarswithNA,i)
}
## count runden, da MIttelwertimputation in initialise:
ndigitsCount <- apply(x[,types=="count", drop=FALSE], 2,
function(x){
x <- as.character(x)
max(unlist(lapply(strsplit(x, "\\."), function(x) ifelse(length(x) > 1, nchar(strsplit(x, "\\.")[2]), 0))))
})
## initialisiere
#for( j in 1:ncol(x) ) {
#print(paste("HIER:", j))
x <- initialise(x,mixed=mixed,method=init.method,mixed.constant=mixed.constant)
#}
## round count variables:
j <- 0
for( i in which(types=="count")){
j <- j+1
x[,i] <- round(x[,i], ndigitsCount[j])
}
if(trace) print(head(x))
mixedTF <- FALSE
mixedConstant <- 0
### outer loop
d <- 99999
it <- 0
while(d > eps && it < maxit){
it = it + 1
if(trace)
cat("Iteration",it,"\n")
xSave <- x
## inner loop
for(i in VarswithNA){
if(trace){
print(paste("inner loop:",i))
if(Sys.info()[1] == "Windows") flush.console()
}
yPart <- x[, i, drop=FALSE]
wy <- which(w2[,i])
xPart <- x[, -i, drop=FALSE]
## --- Start Additonal xvars for mixed vars
if(!is.null(mixed)&&addMixedFactors){
if(any(names(xPart)%in%mixed)){
mixedIndex <- which(names(xPart)%in%mixed)
for(ii in 1:length(mixedIndex)){
namenew <- paste(names(xPart)[mixedIndex[ii]],"ADDMIXED",sep="")
if(is.null(mixed.constant))
xPart[,namenew] <- as.numeric(xPart[,mixedIndex[ii]]==0)
else
xPart[,namenew] <- as.numeric(xPart[,mixedIndex[ii]]==mixed.constant[ii])
}
}
} ## end additional xvars for mixed vars ---
if(!takeAll){
dataForReg <- data.frame(cbind(yPart[-wy,], xPart[-wy,])) ## part, wo in y keine missings
} else{
dataForReg <- data.frame(cbind(yPart, xPart))
}
if(!is.null(mixed)){
if(names(x)[i] %in% mixed){
mixedTF <- TRUE
if(is.null(mixed.constant)){
mixedConstant <- 0
}else{
mixedConstant <- mixed.constant[which(mixed==names(x)[i])]
}
} else{
mixedTF <- FALSE
}
}
colnames(dataForReg)[1] <- "y"
new.dat <- data.frame(cbind(rep(1,length(wy)), xPart[wy,,drop=FALSE]))
#print(attributes(dataForReg$y)$cn)
if(trace){
print(types[[i]])
}
if( types[i]=="integer"||types[i]=="numeric" || types[i] =="mixed"){ ## todo: ausserhalb der Schleife!!
meth = "numeric"
} else if( types[i]=="binary" ){
meth = "bin"
} else if( types[i]=="nominal" ){
meth = "factor"
} else if( types[i]=="count"){
meth = "count"
}else if( types[i]=="ordered" ){
meth = "ordered"
}
## replace initialised missings:
if(length(wy) > 0){
#idataForReg <<- dataForReg
#indata <<- new.dat[,-1,drop=FALSE]
#imeth <<- meth
#ii <<- i
#iindex <<- wy
#imixedTF<<- mixedTF
#ifactors <<- factors
#istep <<- step
#irobust <<- robust
#inoise <<- FALSE
#itypes <<- types
#debug(getM)
if(trace)
print(meth)
#print(lapply(dataForReg, class))
#if(i==10) stop("ZUR KONTROLLE i=10")
if(!is.null(modelFormulas)){
TFform <- names(modelFormulas)==colnames(x)[i]
if(any(TFform))
activeFormula <- modelFormulas[[which(TFform)]]
else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
}else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
if(trace){
print(paste("formula used:",paste(colnames(x)[i],"~",paste(activeFormula,collapse="+"))))
if(Sys.info()[1] == "Windows") flush.console()
}
x[wy,i] <- getM(xReg=dataForReg, ndata=new.dat[,-1,drop=FALSE], type=meth,
index=wy, mixedTF=mixedTF,mixedConstant=mixedConstant, factors=factors, step=step,
robust=robust, noise=FALSE, force=force, robMethod,form=activeFormula,multinom.method=multinom.method)
#if(!testdigits(x$x5)) stop()
}
} ## end inner loop
d <- 0
if(any(types%in%c("numeric","mixed")))
d <- sum((xSave[,types%in%c("numeric","mixed")] - x[,types%in%c("numeric","mixed")])^2, na.rm=TRUE) #todo: Faktoren anders behandeln.
if(any(!types%in%c("numeric","mixed")))
d <- d + sum(xSave[,!types%in%c("numeric","mixed")]!=x[,!types%in%c("numeric","mixed")])
flush.console()
if(trace){
print(paste("it =",it,", Wert =",d))
print(paste("eps", eps))
print(paste("test:", d > eps))
}
} ## end outer loop
if( it > 1 ){
d <- 0
if(any(types%in%c("numeric","mixed")))
d <- sum((xSave[,types%in%c("numeric","mixed")] - x[,types%in%c("numeric","mixed")])^2, na.rm=TRUE) #todo: Faktoren anders behandeln.
if(any(!types%in%c("numeric","mixed")))
d <- d + sum(xSave[,!types%in%c("numeric","mixed")]!=x[,!types%in%c("numeric","mixed")])
if(trace){
if( it < maxit ){
print(paste(d, "<", eps, "= eps")); print(paste(" --> finished after", it, "iterations"))
} else if (it == maxit){
print("not converged...");print(paste(d, "<", eps, "= eps"))
}
}
}
### Add NOISE:
### A last run with building the model and adding noise...
if(noise && mi==1){
for(i in seq(P)){
flush.console()
yPart <- x[, i, drop=FALSE]
wy <- which(w2[,i])
xPart <- x[, -i, drop=FALSE]
if(!takeAll){
dataForReg <- data.frame(cbind(yPart[-wy,], xPart[-wy,])) ## part, wo in y keine missings
}else{
dataForReg <- data.frame(cbind(yPart, xPart))
}
if(!is.null(mixed)){
if(names(x)[i] %in% mixed){
mixedTF <- TRUE
if(is.null(mixed.constant)){
mixedConstant <- 0
}else{
mixedConstant <- mixed.constant[which(mixed==names(x)[i])]
}
}else{
mixedTF <- FALSE
}
}
colnames(dataForReg)[1] <- "y"
new.dat <- data.frame(cbind(rep(1,length(wy)), xPart[wy,,drop=FALSE]))
if( types[i]=="numeric" || types[i] =="mixed"){ ## todo: ausserhalb der Schleife!!
meth = "numeric"
} else if( types[i]=="binary" ){
meth = "bin"
} else if( types[i]=="nominal" ){
meth = "factor"
} else if( types[i]=="count"){
meth = "count"
} else if( types[i]=="ordered"){
meth = "ordered"
}
if(!is.null(modelFormulas)){
TFform <- names(modelFormulas)==colnames(x)[i]
if(any(TFform))
activeFormula <- modelFormulas[[which(TFform)]]
else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
}else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
if(length(wy) > 0) x[wy,i] <- getM(xReg=dataForReg, ndata=new.dat[,-1,drop=FALSE],
type=meth, index=wy,mixedTF=mixedTF,mixedConstant=mixedConstant,factors=factors,
step=step,robust=robust,noise=TRUE,noise.factor=noise.factor,force=force,robMethod,form=activeFormula,multinom.method=multinom.method)
}
}
## End NOISE
#if(!testdigits(x$x5)) stop("s121212121212asasa\n")
## Begin multiple imputation
if(mi>1&&!noise){
cat("Noise option is set automatically to TRUE")
noise <- TRUE
}
if(mi>1){
mimp <- list()
xSave1 <- x
for(m in 1:mi){
for(i in seq(P)){
flush.console()
yPart <- x[, i, drop=FALSE]
wy <- which(w2[,i])
xPart <- x[, -i, drop=FALSE]
if(!takeAll){
dataForReg <- data.frame(cbind(yPart[-wy,], xPart[-wy,])) ## part, wo in y keine missings
}else{
dataForReg <- data.frame(cbind(yPart, xPart))
}
if(!is.null(mixed)){
if(names(x)[i] %in% mixed){
mixedTF <- TRUE
if(is.null(mixed.constant))
mixedConstant <- 0
else
mixedConstant <- mixed.constant[which(mixed==names(x)[i])]
}else{
mixedTF <- FALSE
}
}
colnames(dataForReg)[1] <- "y"
new.dat <- data.frame(cbind(rep(1,length(wy)), xPart[wy,,drop=FALSE]))
if( class(dataForReg$y) == "numeric" ) meth = "numeric" else if( class(dataForReg$y) == "factor" & length(levels(dataForReg$y))==2) meth = "bin" else meth = "factor"
## replace initialised missings:
if(!is.null(modelFormulas)){
TFform <- names(modelFormulas)==colnames(x)[i]
if(any(TFform))
activeFormula <- modelFormulas[[which(TFform)]]
else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
}else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
if(length(wy) > 0) x[wy,i] <- getM(xReg=dataForReg, ndata=new.dat[,-1,drop=FALSE], type=meth, index=wy,mixedTF=mixedTF,mixedConstant=mixedConstant,
factors=factors,step=step,robust=robust,noise=TRUE,
noise.factor=noise.factor,force=force,robMethod,form=activeFormula,multinom.method=multinom.method)
}
mimp[[m]] <- x
x <- xSave1
}
x <- mimp
}
## End Multiple Imputation
## Recode factors to their original coding
if(length(factors)>0){
for(f in 1:length(factors)){
# cat("vorher\n")
# print(str(x))
# print(origLevels[[f]])
if(mi>1){
for(mii in 1:mi)
levels(x[[mii]][,factors[f]]) <- origLevels[[f]]
}else{
levels(x[,factors[f]]) <- origLevels[[f]]
}
# cat("nachher\n")
}
}
if(trace){
cat("Imputation performed on the following data set:\n")
print(missingSummary)
}
invisible(x)
}
### utility functions
anyNA <- function(X) any(is.na(X))
Unit <- function(A) UseMethod("Unit")
Unit.list <- function(A){ # Units a list of vectors into one vector
a<-vector()
for(i in 1:length(A)){
a <- c(a,A[[i]])
}
levels(as.factor(a))
}
Inter <- function(A) UseMethod("Inter")
Inter.list <- function(A){ # common entries from a list of vectors
a<-Unit(A)
TF <- rep(TRUE,length(a))
for(i in 1:length(a)){
for(j in 1:length(A)){
TF[i] <- TF[i] && a[i] %in% A[[j]]
}
}
levels(as.factor(a[TF]))
}
#' Initialization of missing values
#'
#' Rough estimation of missing values in a vector according to its type.
#'
#' Missing values are imputed with the mean for vectors of class
#' \code{"numeric"}, with the median for vectors of class \code{"integer"}, and
#' with the mode for vectors of class \code{"factor"}. Hence, \code{x} should
#' be prepared in the following way: assign class \code{"numeric"} to numeric
#' vectors, assign class \code{"integer"} to ordinal vectors, and assign class
#' \code{"factor"} to nominal or binary vectors.
#'
#' @param x a vector.
#' @param mixed a character vector containing the names of variables of type
#' mixed (semi-continous).
#' @param method Method used for Initialization (median or kNN)
#' @param mixed.constant vector with length equal to the number of
#' semi-continuous variables specifying the point of the semi-continuous
#' distribution with non-zero probability
#' @return the initialized vector.
#' @note The function is used internally by some imputation algorithms.
#' @author Matthias Templ, modifications by Andreas Alfons
#' @keywords manip
#' @export initialise
`initialise` <- function(x,mixed,method="kNN",mixed.constant=NULL){
if(method=="median"){
for( j in 1:ncol(x) ) {
xx <- x[,j]
if(class(xx) == "numeric") {xx <- as.vector(impute(as.matrix(xx), "median"))}
if(class(xx) == "integer") {xx <- as.vector(impute(as.matrix(xx), "median"))}
if(class(xx) == "factor") {xx <- as.character(xx)
#if(class(x)[2] == "count") {x <-as.vector(impute(as.matrix(x), "mean"))} ### hier Fehler #TODO: verbessern
xx[which(is.na(xx))] <- names(which.max(table(xx)))
xx <- as.factor(xx)}
x[,j] <- xx
}
}else{
x <- invisible(kNN(x,imp_var=FALSE,mixed=mixed,mixed.constant=mixed.constant))
}
return(x)
}
## switch function to automatically select methods
getM <- function(xReg, ndata, type, index,mixedTF,mixedConstant,factors,step,robust,noise,noise.factor=1,force=FALSE, robMethod="MM",form=NULL,multinom.method="mnlogit") {
switch(type,
numeric = useLM(xReg, ndata, index,mixedTF,mixedConstant,factors,step,robust,noise,noise.factor,force,robMethod,form=form),
factor = useMN(xReg, ndata, index,factors,step,robust,form=form,multinom.method=multinom.method),
bin = useB(xReg, ndata, index,factors,step,robust,form=form),
count = useGLMcount(xReg, ndata, index, factors, step, robust,form=form),
ordered = useOrd(xReg, ndata, index,factors,step,robust,form=form),
)
}
### LM+GLM --- useLM start
useLM <- function(xReg, ndata, wy, mixedTF,mixedConstant, factors, step, robust, noise, noise.factor, force, robMethod,form){
n <- nrow(xReg)
factors <- Inter(list(colnames(xReg),factors))
## for semicontinuous variables
if(mixedTF){
delFactors <- vector()
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
delFactors <- c(delFactors,factors[f])
}
}
}
xReg1 <- xReg
xReg1$y[xReg$y==mixedConstant] <- 0
xReg1$y[xReg$y!=mixedConstant] <- 1
form <- form[form%in%names(xReg1)]
if(class(form)!="formula")
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
if(!robust)
glm.bin <- glm(form , data=xReg1, family="binomial")
else{
glm.bin <- glm(form , data=xReg1, family="binomial")
}
# if VGAM will be chosen instead of multinom:
# op <- options() #Alles auskommentiert, weil VGAM draussen!
# options(show.error.messages=FALSE)
# try(detach(package:VGAM))
# options(op)
if(step)
glm.bin <- stepAIC(glm.bin,trace=-1)
## imputation
imp <- predict(glm.bin, newdata=ndata, type="response")
imp[imp < 0.5] <- 0
imp[imp >= 0.5] <- 1
xReg <- xReg[xReg$y != mixedConstant,]
factors2 <- factors[!factors%in%delFactors]
if(length(factors2) > 0){
for(f in 1:length(factors2)){
if(any(summary(xReg[,factors2[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors2[f])]
ndata <- ndata[,-which(colnames(ndata)==factors2[f])]
}
}
}
## for continuous variables:
} else{
if(length(factors)>0){
delFactors <- vector()
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
delFactors <- c(delFactors,factors[f])
}
}
}
imp <- rep(1,nrow(ndata))
}
##Two-Step
if(class(form)!="formula"){
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
}else{
formVars <- all.vars(form)[-1]
if(any(!formVars%in%colnames(xReg))){
formVars <- formVars[formVars%in%colnames(xReg)]
form <- as.formula(paste("y ~",paste(formVars,collapse="+")))
}
}
if(!robust){
glm.num <- glm(form, data=xReg, family="gaussian")
#cat("not ROBUST!!!!!!!!\n")
} else{
if(exists("glm.num"))
rm(glm.num)
if(force){
try(glm.num <- rlm(form , data=xReg,method="MM"),silent=TRUE)
if(!exists("glm.num")){
try(glm.num <- lmrob(form , data=xReg),silent=TRUE)
if(!exists("glm.num")){
glm.num <- rlm(form , data=xReg,method="M")
if(!exists("glm.num")){
glm.num <- glm(form, data=xReg, family="gaussian")
}
}
}
} else{
if(robMethod=="lmrob"){
glm.num <- lmrob(form , data=xReg)
}else if(robMethod=="lqs"){
glm.num <- lqs(form , data=xReg)
}else{
glm.num <- rlm(form , data=xReg,method=robMethod)
}
}
}
# op <- options()#Alles auskommentiert, weil VGAM draussen
# options(show.error.messages=FALSE)
# try(detach(package:VGAM))
# options(op)
if(step){
glm.num <- stepAIC(glm.num,trace=-1)
}
if(noise){
if(!robust){
consistencyFactor <- sqrt((nrow(ndata[imp==1,,drop=FALSE])/n + 1))#*n/(n+1)
nout <- nrow(ndata[imp==1,,drop=FALSE])
p.glm.num <- predict(glm.num, newdata=ndata[imp==1,,drop=FALSE],se.fit=TRUE)
if(is.nan(p.glm.num$residual.scale)){
warning("The residual scale could not be computed, probably due to a rank deficient model. It is set to 1\n")
p.glm.num$residual.scale <- 1
}
imp2 <- p.glm.num$fit+noise.factor*rnorm(length(p.glm.num$fit),0,p.glm.num$residual.scale*consistencyFactor)
} else{
nout <- nrow(ndata[imp==1,,drop=FALSE])
consistencyFactor <- sqrt((nrow(ndata[imp==1,,drop=FALSE])/n + 1))#*(n)/(n+1))
p.glm.num <- predict(glm.num, newdata=ndata[imp==1,,drop=FALSE])
if(is.nan(glm.num$s)){
warning("The residual scale could not be computed, probably due to a rank deficient model. It is set to 1\n")
glm.num$s <- 1
}
imp2 <- p.glm.num + noise.factor*rnorm(length(p.glm.num),0,glm.num$s*consistencyFactor)
}
} else
imp2 <- predict(glm.num, newdata=ndata[imp==1,,drop=FALSE])
imp3 <- imp
imp3[imp==0] <- mixedConstant
imp3[imp==1] <- imp2
return(imp3)
# library(VGAM, warn.conflicts = FALSE, verbose=FALSE)
# -end useLM-
}
## count data as response
useGLMcount <- function(xReg, ndata, wy, factors, step, robust,form){
factors <- Inter(list(colnames(xReg),factors))
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
}
}
}
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
if(robust){
#glmc <- glm(y~ ., data=xReg, family=poisson)
glmc <- glmrob(form, data=xReg, family=poisson)
glmc$rank<-ncol(xReg)
#glmc$coef <- glmcR$coef
} else {
glmc <- glm(form, data=xReg, family=poisson)
}
if(step & robust) stop("both step and robust equals TRUE not provided")
if(step){
glmc <- stepAIC(glmc, trace=-1)
}
imp2 <- round(predict(glmc, newdata=ndata,type="response"))
#iin[[length(iin)+1]]<<-imp2
return(imp2)
}
# categorical response
useMN <- function(xReg, ndata, wy, factors, step, robust,form,multinom.method){
factors <- Inter(list(colnames(xReg),factors))
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
}
}
}
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
if(multinom.method=="multinom"){
co <- capture.output(multimod <- multinom(form, data=xReg,summ=2,maxit=50,trace=FALSE))
if(step){
multimod <- stepAIC(multimod,xReg)
}
imp <- predict(multimod, newdata=ndata)
}else{
stop("multinom is the only implemented method at the moment!\n")
}
return(imp)
}
# ordered response
useOrd <- function(xReg, ndata, wy, factors, step, robust,form){
factors <- Inter(list(colnames(xReg),factors))
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
}
}
}
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
co <- capture.output(multimod <- polr(form, data=xReg))
if(step){
multimod <- stepAIC(multimod,xReg)
}
imp <- predict(multimod, newdata=ndata)
return(imp)
}
# binary response
useB <- function(xReg, ndata, wy,factors,step,robust,form){
factors <- Inter(list(colnames(xReg),factors))
#TODO: Faktoren mit 2 Levels und nicht Levels 0 1, funktionieren NICHT!!!!
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
}
}
}
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
if(!robust)
glm.bin <- glm(form , data=xReg, family="binomial")
else{
# glm.bin <- BYlogreg(x0=xReg[,-1], xReg[,1]) ## BYlogreg kann niemals funken
glm.bin <- glm(form , data=xReg, family="binomial")
# if(exists("glm.bin"))
# rm(glm.bin)
# try(glm.bin <- glmrob(y ~ . , data=xReg, family="binomial"),silent=TRUE)
# if(exists("glm.bin"))
# glm.bin$rank <- ncol(xReg)
# else
# glm.bin <- glm(y ~ . , data=xReg, family="binomial")
}
# op <- options() # Alles auskommentiert, weil VGAM draussen
# options(show.error.messages=FALSE)
# try(detach(package:VGAM))
# options(op)
if(step)
glm.bin <- stepAIC(glm.bin,trace=-1)
imp <- predict(glm.bin, newdata=ndata, type="response")
imp[imp < 0.5] <- 0
imp[imp >= 0.5] <- 1
# library(VGAM, warn.conflicts = FALSE, verbose=FALSE)
return(imp)
}
|
/R/irmi.R
|
no_license
|
ashimb9/VIM
|
R
| false
| false
| 34,489
|
r
|
#' Iterative robust model-based imputation (IRMI)
#'
#' In each step of the iteration, one variable is used as a response variable
#' and the remaining variables serve as the regressors.
#'
#' The method works sequentially and iterative. The method can deal with a
#' mixture of continuous, semi-continuous, ordinal and nominal variables
#' including outliers.
#'
#' A full description of the method can be found in the mentioned reference.
#'
#' @param x data.frame or matrix
#' @param eps threshold for convergency
#' @param maxit maximum number of iterations
#' @param mixed column index of the semi-continuous variables
#' @param mixed.constant vector with length equal to the number of
#' semi-continuous variables specifying the point of the semi-continuous
#' distribution with non-zero probability
#' @param count column index of count variables
#' @param step a stepwise model selection is applied when the parameter is set
#' to TRUE
#' @param robust if TRUE, robust regression methods will be applied
#' @param takeAll takes information of (initialised) missings in the response
#' as well for regression imputation.
#' @param noise irmi has the option to add a random error term to the imputed
#' values, this creates the possibility for multiple imputation. The error term
#' has mean 0 and variance corresponding to the variance of the regression
#' residuals.
#' @param noise.factor amount of noise.
#' @param force if TRUE, the algorithm tries to find a solution in any case,
#' possible by using different robust methods automatically.
#' @param robMethod regression method when the response is continuous.
#' @param force.mixed if TRUE, the algorithm tries to find a solution in any
#' case, possible by using different robust methods automatically.
#' @param addMixedFactors if TRUE add additional factor variable for each mixed variable as X variable in the regression
#' @param modelFormulas a named list with the name of variables for the rhs of the formulas, which must contain a rhs formula for each variable with missing values, it should look like list(y1=c("x1","x2"),y2=c("x1","x3"))
#'
#' if factor variables for the mixed variables should be created for the
#' regression models
#' @param mi number of multiple imputations.
#' @param trace Additional information about the iterations when trace equals
#' TRUE.
#' @param init.method Method for initialization of missing values (kNN or
#' median)
#' @param multinom.method Method for estimating the multinomial models
#' (current default and only available method is multinom)
#' @return the imputed data set.
#' @author Matthias Templ, Alexander Kowarik
#' @seealso \code{\link[mi]{mi}}
#' @references M. Templ, A. Kowarik, P. Filzmoser (2011) Iterative stepwise
#' regression imputation using standard and robust methods. \emph{Journal of
#' Computational Statistics and Data Analysis}, Vol. 55, pp. 2793-2806.
#' @references A. Kowarik, M. Templ (2016) Imputation with
#' R package VIM. \emph{Journal of
#' Statistical Software}, 74(7), 1-16.
#' @keywords manip
#' @examples
#'
#' data(sleep)
#' irmi(sleep)
#'
#' data(testdata)
#' imp_testdata1 <- irmi(testdata$wna,mixed=testdata$mixed)
#'
#' # mixed.constant != 0 (-10)
#' testdata$wna$m1[testdata$wna$m1==0] <- -10
#' testdata$wna$m2 <- log(testdata$wna$m2+0.001)
#' imp_testdata2 <- irmi(testdata$wna,mixed=testdata$mixed,mixed.constant=c(-10,log(0.001)))
#' imp_testdata2$m2 <- exp(imp_testdata2$m2)-0.001
#'
#' #example with fixed formulas for the variables with missing
#' form=list(
#' NonD=c("BodyWgt","BrainWgt"),
#' Dream=c("BodyWgt","BrainWgt"),
#' Sleep=c("BrainWgt"),
#' Span=c("BodyWgt"),
#' Gest=c("BodyWgt","BrainWgt")
#' )
#' irmi(sleep,modelFormulas=form,trace=TRUE)
#'
#' # Example with ordered variable
#' td <- testdata$wna
#' td$c1 <- as.ordered(td$c1)
#' irmi(td)
#'
#' @export irmi
#' @S3method irmi data.frame
#' @S3method irmi survey.design
#' @S3method irmi default
irmi <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom") {
UseMethod("irmi", x)
}
irmi.data.frame <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom") {
irmi_work(x, eps, maxit, mixed, mixed.constant, count, step,
robust, takeAll, noise, noise.factor, force,
robMethod, force.mixed, mi, addMixedFactors,
trace,init.method,modelFormulas=modelFormulas,multinom.method=multinom.method)
}
irmi.survey.design <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom") {
x$variables <- irmi_work(x$variables, eps, maxit, mixed, mixed.constant, count, step,
robust, takeAll, noise, noise.factor, force,
robMethod, force.mixed, mi, addMixedFactors,
trace,init.method,modelFormulas=modelFormulas,multinom.method=multinom.method)
x$call <- sys.call(-1)
x
}
irmi.default <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom") {
irmi_work(as.data.frame(x), eps, maxit, mixed, mixed.constant, count, step,
robust, takeAll, noise, noise.factor, force,
robMethod, force.mixed, mi, addMixedFactors,
trace,init.method,modelFormulas=modelFormulas,multinom.method=multinom.method)
}
`irmi_work` <- function(x, eps=5, maxit=100, mixed=NULL,mixed.constant=NULL, count=NULL, step=FALSE,
robust=FALSE, takeAll=TRUE,
noise=TRUE, noise.factor=1, force=FALSE,
robMethod="MM", force.mixed=TRUE, mi=1,
addMixedFactors=FALSE, trace=FALSE,init.method="kNN",modelFormulas=NULL,multinom.method="multinom"){
#Authors: Alexander Kowarik and Matthias Templ, Statistics Austria, GPL 2 or newer, version: 15. Nov. 2012
#object mixed conversion into the right format (vector of variable names of type mixed)
#TODO: Data sets with variables "y" might fail
if(trace){
cat("Method for multinomial models:",multinom.method,"\n")
}
if(!is.data.frame(x)){
if(is.matrix(x))
x <- as.data.frame(x)
else
stop("data frame must be provided")
}
if(!is.null(mixed.constant)&&!is.null(mixed)){
if(length(mixed)!=length(mixed.constant))
stop("The length of 'mixed' and 'mixed.constant' differ.")
}
if(!is.null(mixed)){
if(!is.character(mixed)){
if(is.logical(mixed)){
if(length(mixed)!=length(colnames(x)))
stop("the mixed parameter is not defined correct.")
mixed <- colnames(x)[mixed]
} else if(is.numeric(mixed)){
if(max(mixed)>length(colnames(x)))
stop("the mixed parameter is not defined correct.")
mixed <- colnames(x)[mixed]
}
}else if(!all(mixed%in%colnames(x))){
stop("Not all mixed variables are found in the colnames of the input dataset.")
}
}
if(!is.null(count)){
if(!is.character(count)){
if(is.logical(count)){
if(length(count)!=length(colnames(x)))
stop("the count parameter is not defined correct.")
count <- colnames(x)[count]
}else if(is.numeric(count)){
if(max(count)>length(colnames(x)))
stop("the count parameter is not defined correct.")
count <- colnames(x)[count]
}
}else if(!all(count%in%colnames(x))){
stop("Not all count variables are found in the colnames of the input dataset.")
}
}
class1 <- function(x) class(x)[1]
types <- lapply(x,class1)
# if(any(types=="ordered")){
# for(i in which(types=="ordered")){
# msg <- paste(names(x)[i]," is defined as ordered,but irmi cannot deal with ordered variables
# at the moment, therefore the ordered attribute is set to FALSE \n",sep="")
# cat(msg)
# x[,i] <- factor(x[,i],ordered=FALSE)
# types[i] <- "factor"
# }
# }
types[colnames(x)%in%mixed] <- "mixed"
types[colnames(x)%in%count] <- "count"
attributes(types)$names <-NULL
types <- unlist(types)
if(any(types=="character")){
chrInd <- which(types=="character")
warning("At least one character variable is converted into a factor")
for(ind in chrInd){
x[,ind] <- as.factor(x[,ind])
types[ind] <- "factor"
}
}
#determine factor type: dichotomous or polytomous
#detect problematic factors
indFac <- which(types == "factor")
for(ind in indFac){
#get number of levels
fac_nlevels = nlevels(x[[ind]])
if (fac_nlevels == 2)
types[ind] <- "binary"
else if (fac_nlevels > 2)
types[ind] <- "nominal"
else stop(sprintf("factor with less than 2 levels detected! - `%s`", names(x)[ind]))
}
missingSummary <- cbind(types,apply(x,2,function(x)sum(is.na(x))))
colnames(missingSummary) <- c("type","#missing")
# save(x, file="xtest.RData")
N <- n <- dim(x)[1]
P <- dim(x)[2]
## error management:
if(dim(x)[2] < 2) stop("Less than 2 variables included in x.")
if(step&&robust)
stop("robust stepwise is not yet implemented")
if(!any(is.na(x))) cat("No missings in x. Nothing to impute")
if(any(apply(x, 1, function(x) all(is.na(x))))) stop("Unit non-responses included in x.")
## mixed into logical vector:
if(!is.logical(mixed) & !is.null(mixed)){
ind <- rep(FALSE, P)
ind[mixed] <- TRUE
mixedlog <- ind
} else mixedlog <- mixed
if(!is.character(mixed)){
mixed <- colnames(x)[mixed]
}
if(!is.character(count)){
count <- colnames(x)[count]
}
# if(!is.null(mixed) && length(mixed) != P) stop(paste("Length of mixed must either be NULL or", P))
## count into logical vector:
#if(!is.logical(count) & !is.null(count)){
# ind <- rep(FALSE, P)
# ind[which(colnames(x) == count)] <- TRUE
# countlog <- ind
#} else countlog <- count
# if(is.null(mixed)) mixed <- rep(FALSE, P)
# if(is.null(count)) count <- rep(FALSE, P)
# if(!is.null(count) && length(count) != P) stop(paste("Length of mixed must either be NULL or", P))
#if(any(countlog == mixedlog) && countlog == TRUE) stop(paste("you declined variable", which(countlog==mixedlog && countlog==TRUE), "to be both, count and mixed"))
if(length(Inter(list(count,mixed)))>0) stop(paste("you declined a variable to be both, count and mixed"))
#for(i in which(countlog)){
# class(x[,i]) <- c("count", "numeric")
# }
## check for factors in x
factors <- vector()
for(i in 1:ncol(x)){
factors <- c(factors,is.factor(x[,i]))
}
## Recode the levels of a factor to 1:number of levels
if(any(factors)){
factors <- colnames(x)[factors]
origLevels <- list()
for(f in 1:length(factors)){
origLevels[[f]] <- levels(x[,factors[f]])
levels(x[,factors[f]]) <- 0:(length(origLevels[[f]])-1)
}
} else factors <- character(0)
VarswithNA <-vector()
## index for missingness
w2 <- is.na(x)
## variables that include missings
for(i in seq(P)){
if(anyNA(x[,i]))
VarswithNA <- c(VarswithNA,i)
}
## count runden, da MIttelwertimputation in initialise:
ndigitsCount <- apply(x[,types=="count", drop=FALSE], 2,
function(x){
x <- as.character(x)
max(unlist(lapply(strsplit(x, "\\."), function(x) ifelse(length(x) > 1, nchar(strsplit(x, "\\.")[2]), 0))))
})
## initialisiere
#for( j in 1:ncol(x) ) {
#print(paste("HIER:", j))
x <- initialise(x,mixed=mixed,method=init.method,mixed.constant=mixed.constant)
#}
## round count variables:
j <- 0
for( i in which(types=="count")){
j <- j+1
x[,i] <- round(x[,i], ndigitsCount[j])
}
if(trace) print(head(x))
mixedTF <- FALSE
mixedConstant <- 0
### outer loop
d <- 99999
it <- 0
while(d > eps && it < maxit){
it = it + 1
if(trace)
cat("Iteration",it,"\n")
xSave <- x
## inner loop
for(i in VarswithNA){
if(trace){
print(paste("inner loop:",i))
if(Sys.info()[1] == "Windows") flush.console()
}
yPart <- x[, i, drop=FALSE]
wy <- which(w2[,i])
xPart <- x[, -i, drop=FALSE]
## --- Start Additonal xvars for mixed vars
if(!is.null(mixed)&&addMixedFactors){
if(any(names(xPart)%in%mixed)){
mixedIndex <- which(names(xPart)%in%mixed)
for(ii in 1:length(mixedIndex)){
namenew <- paste(names(xPart)[mixedIndex[ii]],"ADDMIXED",sep="")
if(is.null(mixed.constant))
xPart[,namenew] <- as.numeric(xPart[,mixedIndex[ii]]==0)
else
xPart[,namenew] <- as.numeric(xPart[,mixedIndex[ii]]==mixed.constant[ii])
}
}
} ## end additional xvars for mixed vars ---
if(!takeAll){
dataForReg <- data.frame(cbind(yPart[-wy,], xPart[-wy,])) ## part, wo in y keine missings
} else{
dataForReg <- data.frame(cbind(yPart, xPart))
}
if(!is.null(mixed)){
if(names(x)[i] %in% mixed){
mixedTF <- TRUE
if(is.null(mixed.constant)){
mixedConstant <- 0
}else{
mixedConstant <- mixed.constant[which(mixed==names(x)[i])]
}
} else{
mixedTF <- FALSE
}
}
colnames(dataForReg)[1] <- "y"
new.dat <- data.frame(cbind(rep(1,length(wy)), xPart[wy,,drop=FALSE]))
#print(attributes(dataForReg$y)$cn)
if(trace){
print(types[[i]])
}
if( types[i]=="integer"||types[i]=="numeric" || types[i] =="mixed"){ ## todo: ausserhalb der Schleife!!
meth = "numeric"
} else if( types[i]=="binary" ){
meth = "bin"
} else if( types[i]=="nominal" ){
meth = "factor"
} else if( types[i]=="count"){
meth = "count"
}else if( types[i]=="ordered" ){
meth = "ordered"
}
## replace initialised missings:
if(length(wy) > 0){
#idataForReg <<- dataForReg
#indata <<- new.dat[,-1,drop=FALSE]
#imeth <<- meth
#ii <<- i
#iindex <<- wy
#imixedTF<<- mixedTF
#ifactors <<- factors
#istep <<- step
#irobust <<- robust
#inoise <<- FALSE
#itypes <<- types
#debug(getM)
if(trace)
print(meth)
#print(lapply(dataForReg, class))
#if(i==10) stop("ZUR KONTROLLE i=10")
if(!is.null(modelFormulas)){
TFform <- names(modelFormulas)==colnames(x)[i]
if(any(TFform))
activeFormula <- modelFormulas[[which(TFform)]]
else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
}else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
if(trace){
print(paste("formula used:",paste(colnames(x)[i],"~",paste(activeFormula,collapse="+"))))
if(Sys.info()[1] == "Windows") flush.console()
}
x[wy,i] <- getM(xReg=dataForReg, ndata=new.dat[,-1,drop=FALSE], type=meth,
index=wy, mixedTF=mixedTF,mixedConstant=mixedConstant, factors=factors, step=step,
robust=robust, noise=FALSE, force=force, robMethod,form=activeFormula,multinom.method=multinom.method)
#if(!testdigits(x$x5)) stop()
}
} ## end inner loop
d <- 0
if(any(types%in%c("numeric","mixed")))
d <- sum((xSave[,types%in%c("numeric","mixed")] - x[,types%in%c("numeric","mixed")])^2, na.rm=TRUE) #todo: Faktoren anders behandeln.
if(any(!types%in%c("numeric","mixed")))
d <- d + sum(xSave[,!types%in%c("numeric","mixed")]!=x[,!types%in%c("numeric","mixed")])
flush.console()
if(trace){
print(paste("it =",it,", Wert =",d))
print(paste("eps", eps))
print(paste("test:", d > eps))
}
} ## end outer loop
if( it > 1 ){
d <- 0
if(any(types%in%c("numeric","mixed")))
d <- sum((xSave[,types%in%c("numeric","mixed")] - x[,types%in%c("numeric","mixed")])^2, na.rm=TRUE) #todo: Faktoren anders behandeln.
if(any(!types%in%c("numeric","mixed")))
d <- d + sum(xSave[,!types%in%c("numeric","mixed")]!=x[,!types%in%c("numeric","mixed")])
if(trace){
if( it < maxit ){
print(paste(d, "<", eps, "= eps")); print(paste(" --> finished after", it, "iterations"))
} else if (it == maxit){
print("not converged...");print(paste(d, "<", eps, "= eps"))
}
}
}
### Add NOISE:
### A last run with building the model and adding noise...
if(noise && mi==1){
for(i in seq(P)){
flush.console()
yPart <- x[, i, drop=FALSE]
wy <- which(w2[,i])
xPart <- x[, -i, drop=FALSE]
if(!takeAll){
dataForReg <- data.frame(cbind(yPart[-wy,], xPart[-wy,])) ## part, wo in y keine missings
}else{
dataForReg <- data.frame(cbind(yPart, xPart))
}
if(!is.null(mixed)){
if(names(x)[i] %in% mixed){
mixedTF <- TRUE
if(is.null(mixed.constant)){
mixedConstant <- 0
}else{
mixedConstant <- mixed.constant[which(mixed==names(x)[i])]
}
}else{
mixedTF <- FALSE
}
}
colnames(dataForReg)[1] <- "y"
new.dat <- data.frame(cbind(rep(1,length(wy)), xPart[wy,,drop=FALSE]))
if( types[i]=="numeric" || types[i] =="mixed"){ ## todo: ausserhalb der Schleife!!
meth = "numeric"
} else if( types[i]=="binary" ){
meth = "bin"
} else if( types[i]=="nominal" ){
meth = "factor"
} else if( types[i]=="count"){
meth = "count"
} else if( types[i]=="ordered"){
meth = "ordered"
}
if(!is.null(modelFormulas)){
TFform <- names(modelFormulas)==colnames(x)[i]
if(any(TFform))
activeFormula <- modelFormulas[[which(TFform)]]
else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
}else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
if(length(wy) > 0) x[wy,i] <- getM(xReg=dataForReg, ndata=new.dat[,-1,drop=FALSE],
type=meth, index=wy,mixedTF=mixedTF,mixedConstant=mixedConstant,factors=factors,
step=step,robust=robust,noise=TRUE,noise.factor=noise.factor,force=force,robMethod,form=activeFormula,multinom.method=multinom.method)
}
}
## End NOISE
#if(!testdigits(x$x5)) stop("s121212121212asasa\n")
## Begin multiple imputation
if(mi>1&&!noise){
cat("Noise option is set automatically to TRUE")
noise <- TRUE
}
if(mi>1){
mimp <- list()
xSave1 <- x
for(m in 1:mi){
for(i in seq(P)){
flush.console()
yPart <- x[, i, drop=FALSE]
wy <- which(w2[,i])
xPart <- x[, -i, drop=FALSE]
if(!takeAll){
dataForReg <- data.frame(cbind(yPart[-wy,], xPart[-wy,])) ## part, wo in y keine missings
}else{
dataForReg <- data.frame(cbind(yPart, xPart))
}
if(!is.null(mixed)){
if(names(x)[i] %in% mixed){
mixedTF <- TRUE
if(is.null(mixed.constant))
mixedConstant <- 0
else
mixedConstant <- mixed.constant[which(mixed==names(x)[i])]
}else{
mixedTF <- FALSE
}
}
colnames(dataForReg)[1] <- "y"
new.dat <- data.frame(cbind(rep(1,length(wy)), xPart[wy,,drop=FALSE]))
if( class(dataForReg$y) == "numeric" ) meth = "numeric" else if( class(dataForReg$y) == "factor" & length(levels(dataForReg$y))==2) meth = "bin" else meth = "factor"
## replace initialised missings:
if(!is.null(modelFormulas)){
TFform <- names(modelFormulas)==colnames(x)[i]
if(any(TFform))
activeFormula <- modelFormulas[[which(TFform)]]
else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
}else
activeFormula <- names(dataForReg)[names(dataForReg)!="y"]
if(length(wy) > 0) x[wy,i] <- getM(xReg=dataForReg, ndata=new.dat[,-1,drop=FALSE], type=meth, index=wy,mixedTF=mixedTF,mixedConstant=mixedConstant,
factors=factors,step=step,robust=robust,noise=TRUE,
noise.factor=noise.factor,force=force,robMethod,form=activeFormula,multinom.method=multinom.method)
}
mimp[[m]] <- x
x <- xSave1
}
x <- mimp
}
## End Multiple Imputation
## Recode factors to their original coding
if(length(factors)>0){
for(f in 1:length(factors)){
# cat("vorher\n")
# print(str(x))
# print(origLevels[[f]])
if(mi>1){
for(mii in 1:mi)
levels(x[[mii]][,factors[f]]) <- origLevels[[f]]
}else{
levels(x[,factors[f]]) <- origLevels[[f]]
}
# cat("nachher\n")
}
}
if(trace){
cat("Imputation performed on the following data set:\n")
print(missingSummary)
}
invisible(x)
}
### utility functions
anyNA <- function(X) any(is.na(X))
Unit <- function(A) UseMethod("Unit")
Unit.list <- function(A){ # Units a list of vectors into one vector
a<-vector()
for(i in 1:length(A)){
a <- c(a,A[[i]])
}
levels(as.factor(a))
}
Inter <- function(A) UseMethod("Inter")
Inter.list <- function(A){ # common entries from a list of vectors
a<-Unit(A)
TF <- rep(TRUE,length(a))
for(i in 1:length(a)){
for(j in 1:length(A)){
TF[i] <- TF[i] && a[i] %in% A[[j]]
}
}
levels(as.factor(a[TF]))
}
#' Initialization of missing values
#'
#' Rough estimation of missing values in a vector according to its type.
#'
#' Missing values are imputed with the mean for vectors of class
#' \code{"numeric"}, with the median for vectors of class \code{"integer"}, and
#' with the mode for vectors of class \code{"factor"}. Hence, \code{x} should
#' be prepared in the following way: assign class \code{"numeric"} to numeric
#' vectors, assign class \code{"integer"} to ordinal vectors, and assign class
#' \code{"factor"} to nominal or binary vectors.
#'
#' @param x a vector.
#' @param mixed a character vector containing the names of variables of type
#' mixed (semi-continous).
#' @param method Method used for Initialization (median or kNN)
#' @param mixed.constant vector with length equal to the number of
#' semi-continuous variables specifying the point of the semi-continuous
#' distribution with non-zero probability
#' @return the initialized vector.
#' @note The function is used internally by some imputation algorithms.
#' @author Matthias Templ, modifications by Andreas Alfons
#' @keywords manip
#' @export initialise
`initialise` <- function(x,mixed,method="kNN",mixed.constant=NULL){
if(method=="median"){
for( j in 1:ncol(x) ) {
xx <- x[,j]
if(class(xx) == "numeric") {xx <- as.vector(impute(as.matrix(xx), "median"))}
if(class(xx) == "integer") {xx <- as.vector(impute(as.matrix(xx), "median"))}
if(class(xx) == "factor") {xx <- as.character(xx)
#if(class(x)[2] == "count") {x <-as.vector(impute(as.matrix(x), "mean"))} ### hier Fehler #TODO: verbessern
xx[which(is.na(xx))] <- names(which.max(table(xx)))
xx <- as.factor(xx)}
x[,j] <- xx
}
}else{
x <- invisible(kNN(x,imp_var=FALSE,mixed=mixed,mixed.constant=mixed.constant))
}
return(x)
}
## switch function to automatically select methods
getM <- function(xReg, ndata, type, index,mixedTF,mixedConstant,factors,step,robust,noise,noise.factor=1,force=FALSE, robMethod="MM",form=NULL,multinom.method="mnlogit") {
switch(type,
numeric = useLM(xReg, ndata, index,mixedTF,mixedConstant,factors,step,robust,noise,noise.factor,force,robMethod,form=form),
factor = useMN(xReg, ndata, index,factors,step,robust,form=form,multinom.method=multinom.method),
bin = useB(xReg, ndata, index,factors,step,robust,form=form),
count = useGLMcount(xReg, ndata, index, factors, step, robust,form=form),
ordered = useOrd(xReg, ndata, index,factors,step,robust,form=form),
)
}
### LM+GLM --- useLM start
useLM <- function(xReg, ndata, wy, mixedTF,mixedConstant, factors, step, robust, noise, noise.factor, force, robMethod,form){
n <- nrow(xReg)
factors <- Inter(list(colnames(xReg),factors))
## for semicontinuous variables
if(mixedTF){
delFactors <- vector()
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
delFactors <- c(delFactors,factors[f])
}
}
}
xReg1 <- xReg
xReg1$y[xReg$y==mixedConstant] <- 0
xReg1$y[xReg$y!=mixedConstant] <- 1
form <- form[form%in%names(xReg1)]
if(class(form)!="formula")
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
if(!robust)
glm.bin <- glm(form , data=xReg1, family="binomial")
else{
glm.bin <- glm(form , data=xReg1, family="binomial")
}
# if VGAM will be chosen instead of multinom:
# op <- options() #Alles auskommentiert, weil VGAM draussen!
# options(show.error.messages=FALSE)
# try(detach(package:VGAM))
# options(op)
if(step)
glm.bin <- stepAIC(glm.bin,trace=-1)
## imputation
imp <- predict(glm.bin, newdata=ndata, type="response")
imp[imp < 0.5] <- 0
imp[imp >= 0.5] <- 1
xReg <- xReg[xReg$y != mixedConstant,]
factors2 <- factors[!factors%in%delFactors]
if(length(factors2) > 0){
for(f in 1:length(factors2)){
if(any(summary(xReg[,factors2[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors2[f])]
ndata <- ndata[,-which(colnames(ndata)==factors2[f])]
}
}
}
## for continuous variables:
} else{
if(length(factors)>0){
delFactors <- vector()
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
delFactors <- c(delFactors,factors[f])
}
}
}
imp <- rep(1,nrow(ndata))
}
##Two-Step
if(class(form)!="formula"){
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
}else{
formVars <- all.vars(form)[-1]
if(any(!formVars%in%colnames(xReg))){
formVars <- formVars[formVars%in%colnames(xReg)]
form <- as.formula(paste("y ~",paste(formVars,collapse="+")))
}
}
if(!robust){
glm.num <- glm(form, data=xReg, family="gaussian")
#cat("not ROBUST!!!!!!!!\n")
} else{
if(exists("glm.num"))
rm(glm.num)
if(force){
try(glm.num <- rlm(form , data=xReg,method="MM"),silent=TRUE)
if(!exists("glm.num")){
try(glm.num <- lmrob(form , data=xReg),silent=TRUE)
if(!exists("glm.num")){
glm.num <- rlm(form , data=xReg,method="M")
if(!exists("glm.num")){
glm.num <- glm(form, data=xReg, family="gaussian")
}
}
}
} else{
if(robMethod=="lmrob"){
glm.num <- lmrob(form , data=xReg)
}else if(robMethod=="lqs"){
glm.num <- lqs(form , data=xReg)
}else{
glm.num <- rlm(form , data=xReg,method=robMethod)
}
}
}
# op <- options()#Alles auskommentiert, weil VGAM draussen
# options(show.error.messages=FALSE)
# try(detach(package:VGAM))
# options(op)
if(step){
glm.num <- stepAIC(glm.num,trace=-1)
}
if(noise){
if(!robust){
consistencyFactor <- sqrt((nrow(ndata[imp==1,,drop=FALSE])/n + 1))#*n/(n+1)
nout <- nrow(ndata[imp==1,,drop=FALSE])
p.glm.num <- predict(glm.num, newdata=ndata[imp==1,,drop=FALSE],se.fit=TRUE)
if(is.nan(p.glm.num$residual.scale)){
warning("The residual scale could not be computed, probably due to a rank deficient model. It is set to 1\n")
p.glm.num$residual.scale <- 1
}
imp2 <- p.glm.num$fit+noise.factor*rnorm(length(p.glm.num$fit),0,p.glm.num$residual.scale*consistencyFactor)
} else{
nout <- nrow(ndata[imp==1,,drop=FALSE])
consistencyFactor <- sqrt((nrow(ndata[imp==1,,drop=FALSE])/n + 1))#*(n)/(n+1))
p.glm.num <- predict(glm.num, newdata=ndata[imp==1,,drop=FALSE])
if(is.nan(glm.num$s)){
warning("The residual scale could not be computed, probably due to a rank deficient model. It is set to 1\n")
glm.num$s <- 1
}
imp2 <- p.glm.num + noise.factor*rnorm(length(p.glm.num),0,glm.num$s*consistencyFactor)
}
} else
imp2 <- predict(glm.num, newdata=ndata[imp==1,,drop=FALSE])
imp3 <- imp
imp3[imp==0] <- mixedConstant
imp3[imp==1] <- imp2
return(imp3)
# library(VGAM, warn.conflicts = FALSE, verbose=FALSE)
# -end useLM-
}
## count data as response
useGLMcount <- function(xReg, ndata, wy, factors, step, robust,form){
factors <- Inter(list(colnames(xReg),factors))
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
}
}
}
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
if(robust){
#glmc <- glm(y~ ., data=xReg, family=poisson)
glmc <- glmrob(form, data=xReg, family=poisson)
glmc$rank<-ncol(xReg)
#glmc$coef <- glmcR$coef
} else {
glmc <- glm(form, data=xReg, family=poisson)
}
if(step & robust) stop("both step and robust equals TRUE not provided")
if(step){
glmc <- stepAIC(glmc, trace=-1)
}
imp2 <- round(predict(glmc, newdata=ndata,type="response"))
#iin[[length(iin)+1]]<<-imp2
return(imp2)
}
# categorical response
useMN <- function(xReg, ndata, wy, factors, step, robust,form,multinom.method){
factors <- Inter(list(colnames(xReg),factors))
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
}
}
}
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
if(multinom.method=="multinom"){
co <- capture.output(multimod <- multinom(form, data=xReg,summ=2,maxit=50,trace=FALSE))
if(step){
multimod <- stepAIC(multimod,xReg)
}
imp <- predict(multimod, newdata=ndata)
}else{
stop("multinom is the only implemented method at the moment!\n")
}
return(imp)
}
# ordered response
useOrd <- function(xReg, ndata, wy, factors, step, robust,form){
factors <- Inter(list(colnames(xReg),factors))
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
}
}
}
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
co <- capture.output(multimod <- polr(form, data=xReg))
if(step){
multimod <- stepAIC(multimod,xReg)
}
imp <- predict(multimod, newdata=ndata)
return(imp)
}
# binary response
useB <- function(xReg, ndata, wy,factors,step,robust,form){
factors <- Inter(list(colnames(xReg),factors))
#TODO: Faktoren mit 2 Levels und nicht Levels 0 1, funktionieren NICHT!!!!
if(length(factors)>0){
for(f in 1:length(factors)){
if(any(summary(xReg[,factors[f]])==0)){
xReg <- xReg[,-which(colnames(xReg)==factors[f])]
ndata <- ndata[,-which(colnames(ndata)==factors[f])]
}
}
}
form <- form[form%in%names(xReg)]
if(length(form)>0)
form <- as.formula(paste("y ~",paste(form,collapse="+")))
else
form <- y~.
if(!robust)
glm.bin <- glm(form , data=xReg, family="binomial")
else{
# glm.bin <- BYlogreg(x0=xReg[,-1], xReg[,1]) ## BYlogreg kann niemals funken
glm.bin <- glm(form , data=xReg, family="binomial")
# if(exists("glm.bin"))
# rm(glm.bin)
# try(glm.bin <- glmrob(y ~ . , data=xReg, family="binomial"),silent=TRUE)
# if(exists("glm.bin"))
# glm.bin$rank <- ncol(xReg)
# else
# glm.bin <- glm(y ~ . , data=xReg, family="binomial")
}
# op <- options() # Alles auskommentiert, weil VGAM draussen
# options(show.error.messages=FALSE)
# try(detach(package:VGAM))
# options(op)
if(step)
glm.bin <- stepAIC(glm.bin,trace=-1)
imp <- predict(glm.bin, newdata=ndata, type="response")
imp[imp < 0.5] <- 0
imp[imp >= 0.5] <- 1
# library(VGAM, warn.conflicts = FALSE, verbose=FALSE)
return(imp)
}
|
# 셀레니움 문법 정리
install.packages("RSelenium")
library(RSelenium)
# 드라이버 정보 전달해서 open
# 드라이버 객체 생성 :
remDr <- remoteDriver(remoteServerAddr='localhost', port =4445L, browserName='chrome')
remDr$open() # 브라우저 열기
remDr$getStatus() # 브라우저(서버) 상태 확인
#--페이지 요청
remDr$navigate("http://www.google.com/ncr")
# 현재 페이지 url
remDr$getCurrentUrl()
# 사이트 이동
remDr$navigate("http://www.naver.com")
# 사이트 히스토리 이용해 이동
remDr$goBack() # 뒤로가기
remDr$goForward() # 앞으로 가기
# 현재 페이지 새로고침
remDr$refresh()
# 페이지 내에서 element 찾기
webElem <- remDr$findElement(using = "name", value='q')
webElem$getElementAtrribute('name')
webElem$getElementAtrribute('class')
webElem$getElementAtrribute('id')
# css selector 사용
webElem <- remDr$findElement(using = "css","input[name='q']")
webElem <- remDr$findElement(using = "css",name='q')
# Xpath 사용
webElem <- remDr$findElement(using='xpath', 'Xpath copy해서 붙여넣기')
# Element로 txt 보내기
webElem <- remDr$findElement(using = "name", value='q')
webElem$sendKeysToElement(list("R Cran"))
## keypress to element
webElem <- remDr$findElement(using = "name", value='q')
webElem$sendKeysToElement(list("R Cran","\uE007")) # \uE007 : 엔터키
webElem$sendKeysToElement(list("R Cran", key = 'enter'))
webElems <- remDr$findElements(using ='css selector',"h3.r")
webElems
# getElementText() : 객체의 text 찾아오기
resHeaders <- unlist(lapply(webElems, function(x) {x$getElementText()}))
resHeaders
# selector와 부합하는 여러 객체 중 값이 Ava~인 객체의 위치
webElem <- webElems[[which(resHeaders=='Available CRAN Packages for')]]
# 해당 하이퍼텍스트 클릭
webElem$clickElement()
remDr$getCurrentUrl()
remDr$getTitle()
# 브라우저 닫기
remDr$close()
|
/210317 Selenium.R
|
no_license
|
yschoi9930/RStudy
|
R
| false
| false
| 1,917
|
r
|
# 셀레니움 문법 정리
install.packages("RSelenium")
library(RSelenium)
# 드라이버 정보 전달해서 open
# 드라이버 객체 생성 :
remDr <- remoteDriver(remoteServerAddr='localhost', port =4445L, browserName='chrome')
remDr$open() # 브라우저 열기
remDr$getStatus() # 브라우저(서버) 상태 확인
#--페이지 요청
remDr$navigate("http://www.google.com/ncr")
# 현재 페이지 url
remDr$getCurrentUrl()
# 사이트 이동
remDr$navigate("http://www.naver.com")
# 사이트 히스토리 이용해 이동
remDr$goBack() # 뒤로가기
remDr$goForward() # 앞으로 가기
# 현재 페이지 새로고침
remDr$refresh()
# 페이지 내에서 element 찾기
webElem <- remDr$findElement(using = "name", value='q')
webElem$getElementAtrribute('name')
webElem$getElementAtrribute('class')
webElem$getElementAtrribute('id')
# css selector 사용
webElem <- remDr$findElement(using = "css","input[name='q']")
webElem <- remDr$findElement(using = "css",name='q')
# Xpath 사용
webElem <- remDr$findElement(using='xpath', 'Xpath copy해서 붙여넣기')
# Element로 txt 보내기
webElem <- remDr$findElement(using = "name", value='q')
webElem$sendKeysToElement(list("R Cran"))
## keypress to element
webElem <- remDr$findElement(using = "name", value='q')
webElem$sendKeysToElement(list("R Cran","\uE007")) # \uE007 : 엔터키
webElem$sendKeysToElement(list("R Cran", key = 'enter'))
webElems <- remDr$findElements(using ='css selector',"h3.r")
webElems
# getElementText() : 객체의 text 찾아오기
resHeaders <- unlist(lapply(webElems, function(x) {x$getElementText()}))
resHeaders
# selector와 부합하는 여러 객체 중 값이 Ava~인 객체의 위치
webElem <- webElems[[which(resHeaders=='Available CRAN Packages for')]]
# 해당 하이퍼텍스트 클릭
webElem$clickElement()
remDr$getCurrentUrl()
remDr$getTitle()
# 브라우저 닫기
remDr$close()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{pricing}
\alias{pricing}
\title{AWS Price List Service}
\usage{
pricing(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
AWS Price List Service API (AWS Price List Service) is a centralized and
convenient way to programmatically query Amazon Web Services for
services, products, and pricing information. The AWS Price List Service
uses standardized product attributes such as \code{Location},
\verb{Storage Class}, and \verb{Operating System}, and provides prices at the SKU
level. You can use the AWS Price List Service to build cost control and
scenario planning tools, reconcile billing data, forecast future spend
for budgeting purposes, and provide cost benefit analysis that compare
your internal workloads with AWS.
Use \code{GetServices} without a service code to retrieve the service codes
for all AWS services, then \code{GetServices} with a service code to retreive
the attribute names for that service. After you have the service code
and attribute names, you can use \code{GetAttributeValues} to see what values
are available for an attribute. With the service code and an attribute
name and value, you can use \code{GetProducts} to find specific products that
you're interested in, such as an \code{AmazonEC2} instance, with a
\verb{Provisioned IOPS} \code{volumeType}.
Service Endpoint
AWS Price List Service API provides the following two endpoints:
\itemize{
\item https://api.pricing.us-east-1.amazonaws.com
\item https://api.pricing.ap-south-1.amazonaws.com
}
}
\section{Service syntax}{
\preformatted{svc <- pricing(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=pricing_describe_services]{describe_services} \tab Returns the metadata for one service or a list of the metadata for all services\cr
\link[=pricing_get_attribute_values]{get_attribute_values} \tab Returns a list of attribute values\cr
\link[=pricing_get_products]{get_products} \tab Returns a list of all products that match the filter criteria
}
}
\examples{
\dontrun{
svc <- pricing()
svc$describe_services(
FormatVersion = "aws_v1",
MaxResults = 1L,
ServiceCode = "AmazonEC2"
)
}
}
|
/cran/paws/man/pricing.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 2,511
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{pricing}
\alias{pricing}
\title{AWS Price List Service}
\usage{
pricing(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
AWS Price List Service API (AWS Price List Service) is a centralized and
convenient way to programmatically query Amazon Web Services for
services, products, and pricing information. The AWS Price List Service
uses standardized product attributes such as \code{Location},
\verb{Storage Class}, and \verb{Operating System}, and provides prices at the SKU
level. You can use the AWS Price List Service to build cost control and
scenario planning tools, reconcile billing data, forecast future spend
for budgeting purposes, and provide cost benefit analysis that compare
your internal workloads with AWS.
Use \code{GetServices} without a service code to retrieve the service codes
for all AWS services, then \code{GetServices} with a service code to retreive
the attribute names for that service. After you have the service code
and attribute names, you can use \code{GetAttributeValues} to see what values
are available for an attribute. With the service code and an attribute
name and value, you can use \code{GetProducts} to find specific products that
you're interested in, such as an \code{AmazonEC2} instance, with a
\verb{Provisioned IOPS} \code{volumeType}.
Service Endpoint
AWS Price List Service API provides the following two endpoints:
\itemize{
\item https://api.pricing.us-east-1.amazonaws.com
\item https://api.pricing.ap-south-1.amazonaws.com
}
}
\section{Service syntax}{
\preformatted{svc <- pricing(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=pricing_describe_services]{describe_services} \tab Returns the metadata for one service or a list of the metadata for all services\cr
\link[=pricing_get_attribute_values]{get_attribute_values} \tab Returns a list of attribute values\cr
\link[=pricing_get_products]{get_products} \tab Returns a list of all products that match the filter criteria
}
}
\examples{
\dontrun{
svc <- pricing()
svc$describe_services(
FormatVersion = "aws_v1",
MaxResults = 1L,
ServiceCode = "AmazonEC2"
)
}
}
|
spotRate <- read.csv('spotRate.csv')
show(spotRate)
#for calculating forward rate, we will use the calculated spot rate before
#and using the formula for the 1- year forward rate1-2 F12= (R2 * T2 - R1 * T1) / T2 - T1
spotRate_y1 <- spotRate$X1Y
spotRate_y2 <- spotRate$X2Y
spotRate_y3 <- spotRate$X3Y
spotRate_y4 <- spotRate$X4Y
spotRate_y5 <- spotRate$X5Y
#F12,F13,F14,F15
F12 <- (spotRate_y2 * 2 - spotRate_y1 * 1) / (2 - 1)
F13 <- (spotRate_y3 * 3 - spotRate_y1 * 1) / (3 - 1)
F14 <- (spotRate_y4 * 4 - spotRate_y1 * 1) / (4 - 1)
F15 <- (spotRate_y5 * 5 - spotRate_y1 * 1) / (5 - 1)
F12
F13
F14
F15
years <- c(2,3,4,5)
plot(years, F12 ,type = "o",col = "red", xlab = "year", ylab = "foward rate", main = "foward curve")
lines(years, F12, type = "o", col = "orange")
lines(years, F13, type = "o", col = "black")
lines(years,F14, type = "o", col = "green")
lines(years,F15, type = "o", col = "cyan")
|
/forwardCal.R
|
no_license
|
zyws11/apm466
|
R
| false
| false
| 902
|
r
|
spotRate <- read.csv('spotRate.csv')
show(spotRate)
#for calculating forward rate, we will use the calculated spot rate before
#and using the formula for the 1- year forward rate1-2 F12= (R2 * T2 - R1 * T1) / T2 - T1
spotRate_y1 <- spotRate$X1Y
spotRate_y2 <- spotRate$X2Y
spotRate_y3 <- spotRate$X3Y
spotRate_y4 <- spotRate$X4Y
spotRate_y5 <- spotRate$X5Y
#F12,F13,F14,F15
F12 <- (spotRate_y2 * 2 - spotRate_y1 * 1) / (2 - 1)
F13 <- (spotRate_y3 * 3 - spotRate_y1 * 1) / (3 - 1)
F14 <- (spotRate_y4 * 4 - spotRate_y1 * 1) / (4 - 1)
F15 <- (spotRate_y5 * 5 - spotRate_y1 * 1) / (5 - 1)
F12
F13
F14
F15
years <- c(2,3,4,5)
plot(years, F12 ,type = "o",col = "red", xlab = "year", ylab = "foward rate", main = "foward curve")
lines(years, F12, type = "o", col = "orange")
lines(years, F13, type = "o", col = "black")
lines(years,F14, type = "o", col = "green")
lines(years,F15, type = "o", col = "cyan")
|
test_that("area_square calculates the area of a given square correctly"){
#expected outputs
expect_identical(area.my_square(my_square(3)), 9)
expect_identical(area.my_square(my_square(10)), 100)
expect_identical(area.my_square(my_square(7)), 49)
#unusual inputs
expect_identical(area.my_square(my_square(0), 0))
#expected errors
expect_error(area.my_square(my_square(c())), " Error in my_square() : argument "a" is missing, with no default ")
expect_error(area.my_square(my_square((c(1,2)), "Error in my_square(1, 2) : unused argument (2)")))
expected_error(area.my_square(my_square("hello")), "Error in (obj$side)^2 : non-numeric argument to binary operator")
}
|
/tests/testthat/tests_area_squared.R
|
no_license
|
psarana/area_square
|
R
| false
| false
| 719
|
r
|
test_that("area_square calculates the area of a given square correctly"){
#expected outputs
expect_identical(area.my_square(my_square(3)), 9)
expect_identical(area.my_square(my_square(10)), 100)
expect_identical(area.my_square(my_square(7)), 49)
#unusual inputs
expect_identical(area.my_square(my_square(0), 0))
#expected errors
expect_error(area.my_square(my_square(c())), " Error in my_square() : argument "a" is missing, with no default ")
expect_error(area.my_square(my_square((c(1,2)), "Error in my_square(1, 2) : unused argument (2)")))
expected_error(area.my_square(my_square("hello")), "Error in (obj$side)^2 : non-numeric argument to binary operator")
}
|
NEI <- readRDS("exdata%2Fdata%2FNEI_data/summarySCC_PM25.rds")
baltimore=NEI[NEI$fips=="24510",]
baltimoreSum=tapply(baltimore$Emissions,baltimore$year,sum)
png("plot2.png")
barplot(baltimoreSum,xlab = "years", ylab = "Total Emmissions",main="Emission in Boltimore")
dev.off()
|
/ExploratoryDataAnalysis/Project/plot2.R
|
no_license
|
AbdelrahmanElsehaily/Data-Science-Specialization-Coursera
|
R
| false
| false
| 276
|
r
|
NEI <- readRDS("exdata%2Fdata%2FNEI_data/summarySCC_PM25.rds")
baltimore=NEI[NEI$fips=="24510",]
baltimoreSum=tapply(baltimore$Emissions,baltimore$year,sum)
png("plot2.png")
barplot(baltimoreSum,xlab = "years", ylab = "Total Emmissions",main="Emission in Boltimore")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yandextexttranslater.R
\name{yandex_translate}
\alias{yandex_translate}
\title{Translates text to the specified language}
\usage{
yandex_translate(yandex_api_key, text = "", lang = "")
}
\arguments{
\item{yandex_api_key}{yandex API key}
\item{text}{The text to translate. The maximum size of the text being passed is 10000 characters.}
\item{lang}{The translation direction. You can use any of the following ways to set it:
As a pair of language codes separated by a hyphen ("from"-"to").
For example, en-ru indicates translating from English to Russian.
As the final language code (for example, ru). In this case, the service tries to detect the source language automatically.}
}
\value{
translated text (character type)
See \url{https://tech.yandex.com/translate/doc/dg/reference/translate-docpage/} for more details
}
\description{
Translates text to the specified language
}
\examples{
translated_text = yandex_translate(yandex_api_key, text="voglio mangiare cena", lang="it-en")
}
|
/man/yandex_translate.Rd
|
permissive
|
kpolimis/yandextexttranslater
|
R
| false
| true
| 1,066
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yandextexttranslater.R
\name{yandex_translate}
\alias{yandex_translate}
\title{Translates text to the specified language}
\usage{
yandex_translate(yandex_api_key, text = "", lang = "")
}
\arguments{
\item{yandex_api_key}{yandex API key}
\item{text}{The text to translate. The maximum size of the text being passed is 10000 characters.}
\item{lang}{The translation direction. You can use any of the following ways to set it:
As a pair of language codes separated by a hyphen ("from"-"to").
For example, en-ru indicates translating from English to Russian.
As the final language code (for example, ru). In this case, the service tries to detect the source language automatically.}
}
\value{
translated text (character type)
See \url{https://tech.yandex.com/translate/doc/dg/reference/translate-docpage/} for more details
}
\description{
Translates text to the specified language
}
\examples{
translated_text = yandex_translate(yandex_api_key, text="voglio mangiare cena", lang="it-en")
}
|
testlist <- list(a = 0L, b = 0L, x = c(-66561L, -10872294L, 1593777664L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610130048-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 354
|
r
|
testlist <- list(a = 0L, b = 0L, x = c(-66561L, -10872294L, 1593777664L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
asfdjngasd
faslkfjnas
fdasfgasfg
asdf
sadf
sadf
sad
f
sad
fas
df
asd
f
sadf
sa
df
sd
|
/fasfd.R
|
no_license
|
ksimpkinson/test2
|
R
| false
| false
| 84
|
r
|
asfdjngasd
faslkfjnas
fdasfgasfg
asdf
sadf
sadf
sad
f
sad
fas
df
asd
f
sadf
sa
df
sd
|
libraryCheck <- function(libraryList = NULL) {
if (length(libraryList) > 0) {
for (libraryName in libraryList) {
if (library(libraryName, logical.return = TRUE, character.only = TRUE)) {
# print('ok')
} else {
errorText <- paste('library', libraryName, 'not found')
print(errorText)
return(FALSE)
}
}
return(TRUE)
} else {
return(TRUE)
}
}
|
/trfzp/functions.r
|
no_license
|
gogbajbobo/R_project
|
R
| false
| false
| 406
|
r
|
libraryCheck <- function(libraryList = NULL) {
if (length(libraryList) > 0) {
for (libraryName in libraryList) {
if (library(libraryName, logical.return = TRUE, character.only = TRUE)) {
# print('ok')
} else {
errorText <- paste('library', libraryName, 'not found')
print(errorText)
return(FALSE)
}
}
return(TRUE)
} else {
return(TRUE)
}
}
|
library(stringr)
testData <<-data.frame(firm=rep(LETTERS[1:10],each=500),
date=as.Date("2012-01-01")+1:500,
y1=sample(1:100,50),y2=sample(1:100,50),y3=sample(1:100,50),x1=sample(1:2))
testData$X<-NULL
testData$date<-as.Date(testData$date)
testData$date <- format(testData$date,'%Y-%m-%d')
testData$MONTHDATE = as.Date(paste(paste0('20',str_sub(strftime(testData$date,'%y')), start= -2),strftime(testData$date,'%m'),'01',sep='-'))
testData$MONTHDATE <- format(testData$MONTHDATE,'%Y-%m-%d')
nweek <- function(x, format="%Y-%m-%d")
{as.integer(format(strptime(x, format=format), "%W"))
}
testData$WEEKDATE<-nweek(testData$date)
testData$YEARWEEK<-paste(strftime(testData$date,'%y'),testData$WEEKDATE,sep='-')
dataToPlot<-function(firmName="All", dateRange="All")
{
td <- max(as.Date(testData$date))
if(firmName=='All'){
if (dateRange=='Last Month'){plotData <-testData[testData$date>=(td-30),]}
else if (dateRange=='Last Quarter'){plotData <-testData[testData$date>=(td-90),]}
else if (dateRange=='Last Year'){plotData <-testData[testData$date>=(td-356),]}
else if (dateRange=='All') {plotData <-testData}
}
else{
if (dateRange=='Last Month'){plotData <-testData[(testData$date>=(td-30)&(testData$firm==firmName)),]}
else if (dateRange=='Last Quarter'){plotData <-testData[(testData$date>=(td-90)&(testData$firm==firmName)),]}
else if (dateRange=='Last Year'){plotData <-testData[(testData$date>=(td-356)&(testData$firm==firmName)),]}
else if (dateRange=='All') {plotData <-testData[testData$firm==firmName,]}
}
}
|
/global.R
|
no_license
|
tswenzel/CourserashinyApp
|
R
| false
| false
| 1,605
|
r
|
library(stringr)
testData <<-data.frame(firm=rep(LETTERS[1:10],each=500),
date=as.Date("2012-01-01")+1:500,
y1=sample(1:100,50),y2=sample(1:100,50),y3=sample(1:100,50),x1=sample(1:2))
testData$X<-NULL
testData$date<-as.Date(testData$date)
testData$date <- format(testData$date,'%Y-%m-%d')
testData$MONTHDATE = as.Date(paste(paste0('20',str_sub(strftime(testData$date,'%y')), start= -2),strftime(testData$date,'%m'),'01',sep='-'))
testData$MONTHDATE <- format(testData$MONTHDATE,'%Y-%m-%d')
nweek <- function(x, format="%Y-%m-%d")
{as.integer(format(strptime(x, format=format), "%W"))
}
testData$WEEKDATE<-nweek(testData$date)
testData$YEARWEEK<-paste(strftime(testData$date,'%y'),testData$WEEKDATE,sep='-')
dataToPlot<-function(firmName="All", dateRange="All")
{
td <- max(as.Date(testData$date))
if(firmName=='All'){
if (dateRange=='Last Month'){plotData <-testData[testData$date>=(td-30),]}
else if (dateRange=='Last Quarter'){plotData <-testData[testData$date>=(td-90),]}
else if (dateRange=='Last Year'){plotData <-testData[testData$date>=(td-356),]}
else if (dateRange=='All') {plotData <-testData}
}
else{
if (dateRange=='Last Month'){plotData <-testData[(testData$date>=(td-30)&(testData$firm==firmName)),]}
else if (dateRange=='Last Quarter'){plotData <-testData[(testData$date>=(td-90)&(testData$firm==firmName)),]}
else if (dateRange=='Last Year'){plotData <-testData[(testData$date>=(td-356)&(testData$firm==firmName)),]}
else if (dateRange=='All') {plotData <-testData[testData$firm==firmName,]}
}
}
|
# ---
# 02.2 - Regressões: Paper 21/09/2020 (1a Versão do Paper)
# Regressões para diferntes modelos
# Atitudes Populistas e Voto em Bolsonaro em 2018 (2020)
# ---
# Eduardo Ryô Tamaki
# Mestrando em Ciência Política da UFMG
# e-mail: eduardo.rtamaki@gmail.com
# ---
# 22/09/2020
# ---
## PREAMBULO -------------------------------------------------------------------
library(tidyverse)
library(here)
library(stargazer)
source(here::here("00 - Tratando e Criando as Variaveis.R"), encoding = "UTF-8")
######################################
# Regressões para o Paper 21/09/2020 #
######################################
# Var.: pop_2c - Categórica
# pop_ed_sart - Tipologia
# pop_goertz - Goertziana (Minimo)
# pop_ad - Adição
## ## ## ## ## ## ## ##
# pop_2c - Categórica #
## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_cat_1 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp <- exp(mod_cat_1$coefficients)
# P-valor:
mod_cat_p <- list(summary(mod_cat_1)$coefficients[,4])
##
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Ideologia
mod_cat_2 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda + ideo2.2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp2 <- exp(mod_cat_2$coefficients)
# P-valor:
mod_cat_p2 <- list(summary(mod_cat_2)$coefficients[,4])
##
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação com Ideologia
mod_cat_3 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp3 <- exp(mod_cat_3$coefficients)
# P-valor:
mod_cat_p3 <- list(summary(mod_cat_3)$coefficients[,4])
##
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação com Ideologia +
# PC2
mod_cat_4 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed + fx_renda +
PC2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp4 <- exp(mod_cat_4$coefficients)
# P-valor:
mod_cat_p4 <- list(summary(mod_cat_4)$coefficients[,4])
##
# Modelo 5 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt
mod_cat_5 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed +
fx_renda +
PC2 +
antipt,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp5 <- exp(mod_cat_5$coefficients)
# P-valor:
mod_cat_p5 <- list(summary(mod_cat_5)$coefficients[,4])
##
# Modelo 6 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
# Corrupção
mod_cat_6 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed +
fx_renda +
PC2 +
antipt +
corrup1,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp6 <- exp(mod_cat_6$coefficients)
# P-valor:
mod_cat_p6 <- list(summary(mod_cat_6)$coefficients[,4])
##
# Modelo 7 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
# Corrupção +
# Religião (Ser Evangélico)
mod_cat_7 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed +
fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp7 <- exp(mod_cat_7$coefficients)
# P-valor:
mod_cat_p7 <- list(summary(mod_cat_7)$coefficients[,4])
##############################
# ## ## ## ## ## ## ## ## #
# pop_ed_sart - Tipologia #
# ## ## ## ## ## ## ## ## #
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Tipologia +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_tip_1 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp <- exp(mod_tip_1$coefficients)
# P-valor:
mod_tip_p <- list(summary(mod_tip_1)$coefficients[,4])
##
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# PC2
mod_tip_2 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda + PC2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp2 <- exp(mod_tip_2$coefficients)
# P-valor:
mod_tip_p2 <- list(summary(mod_tip_2)$coefficients[,4])
##
# Modelo 3 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Tipologia +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
mod_tip_3 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp3 <- exp(mod_tip_3$coefficients)
# P-valor:
mod_tip_p3 <- list(summary(mod_tip_3)$coefficients[,4])
##
# Modelo 4 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Tipologia +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
# Corrupção
mod_tip_4 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp4 <- exp(mod_tip_4$coefficients)
# P-valor:
mod_tip_p4 <- list(summary(mod_tip_4)$coefficients[,4])
##
# Modelo 5 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Tipologia +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
# Corrupção +
# Religião (Ser Evangélico)
mod_tip_5 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp5 <- exp(mod_tip_5$coefficients)
# P-valor:
mod_tip_p5 <- list(summary(mod_tip_5)$coefficients[,4])
##############################
## ## ## ## ## ## ## ## ##
# pop_goertz - Goertziana #
## ## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_gtz_1 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp <- exp(mod_gtz_1$coefficients)
# P-valor:
mod_gtz_p <- list(summary(mod_gtz_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Ideologia
mod_gtz_2 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda + ideo2.2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp2 <- exp(mod_gtz_2$coefficients)
# P-valor:
mod_gtz_p2 <- list(summary(mod_gtz_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia
mod_gtz_3 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp3 <- exp(mod_gtz_3$coefficients)
# P-valor:
mod_gtz_p3 <- list(summary(mod_gtz_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia +
# PC2
mod_gtz_4 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda +
PC2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp4 <- exp(mod_gtz_4$coefficients)
# P-valor:
mod_gtz_p4 <- list(summary(mod_gtz_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia +
# PC2 +
# Antipt
mod_gtz_5 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp5 <- exp(mod_gtz_5$coefficients)
# P-valor:
mod_gtz_p5 <- list(summary(mod_gtz_5)$coefficients[,4])
# Modelo 6:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia +
# PC2 +
# Antipt #
# Corrupção
mod_gtz_6 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp6 <- exp(mod_gtz_6$coefficients)
# P-valor:
mod_gtz_p6 <- list(summary(mod_gtz_6)$coefficients[,4])
# Modelo 7:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia +
# PC2 +
# Antipt #
# Corrupção +
# Religião (Ser Evangélico)
mod_gtz_7 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp7 <- exp(mod_gtz_7$coefficients)
# P-valor:
mod_gtz_p7 <- list(summary(mod_gtz_7)$coefficients[,4])
##############################
## ## ## ## ## ## ## ## ## ## ##
# pop_ad_n - Additive Approach #
## ## ## ## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_ad_1 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp <- exp(mod_ad_1$coefficients)
# P-valor:
mod_ad_p <- list(summary(mod_ad_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Ideologia
mod_ad_2 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
ideo2.2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp2 <- exp(mod_ad_2$coefficients)
# P-valor:
mod_ad_p2 <- list(summary(mod_ad_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
mod_ad_3 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp3 <- exp(mod_ad_3$coefficients)
# P-valor:
mod_ad_p3 <- list(summary(mod_ad_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
# + PC2
mod_ad_4 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda +
PC2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp4 <- exp(mod_ad_4$coefficients)
# P-valor:
mod_ad_p4 <- list(summary(mod_ad_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
# + PC2
# + Corrupção (APENDICE)
mod_ad_5 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp5 <- exp(mod_ad_5$coefficients)
# P-valor:
mod_ad_p5 <- list(summary(mod_ad_5)$coefficients[,4])
# Modelo 6:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
# + PC2
# + Antipt (APENDICE)
# + Corrupção
mod_ad_6 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp6 <- exp(mod_ad_6$coefficients)
# P-valor:
mod_ad_p6 <- list(summary(mod_ad_6)$coefficients[,4])
# Modelo 7:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
# + PC2
# + Antipt (APENDICE)
# + Corrupção
# + Religião (Ser Evangélico)
mod_ad_7 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp7 <- exp(mod_ad_7$coefficients)
# P-valor:
mod_ad_p7 <- list(summary(mod_ad_7)$coefficients[,4])
############################################################
### ### ### ### ### ### ### ### ### ###
# Apenas Com os Eleitores de Direita #
### ### ### ### ### ### ### ### ### ###
## Banco de Dados Filtrado pelos de Direita: e19_dir
## ideo: Variável de Aultolocalização Ideológica de Direita, Numérica;
# 6 a 10, onde 10 é extrema direita;
# Var.: pop_2c - Categórica
# pop_ed_sart - Tipologia
# pop_goertz - Goertziana (Minimo)
# pop_ad - Adição (To do)
## ## ## ## ## ## ## ##
# pop_2c - Categórica #
## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_dir_cat_1 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp <- exp(mod_dir_cat_1$coefficients)
# P-valor:
mod_dir_cat_p <- list(summary(mod_dir_cat_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# PC2
mod_dir_cat_2 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda +
PC2,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp2 <- exp(mod_dir_cat_2$coefficients)
# P-valor:
mod_dir_cat_p2 <- list(summary(mod_dir_cat_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
mod_dir_cat_3 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp3 <- exp(mod_dir_cat_3$coefficients)
# P-valor:
mod_dir_cat_p3 <- list(summary(mod_dir_cat_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
# + Corrupção
mod_dir_cat_4 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp4 <- exp(mod_dir_cat_4$coefficients)
# P-valor:
mod_dir_cat_p4 <- list(summary(mod_dir_cat_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
# + Corrupção
# + Religião (Ser Evangélico)
mod_dir_cat_5 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp5 <- exp(mod_dir_cat_5$coefficients)
# P-valor:
mod_dir_cat_p5 <- list(summary(mod_dir_cat_5)$coefficients[,4])
##############################
## ## ## ## ## ## ## ## ##
# pop_goertz - Goertziana #
## ## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_dir_gtz_1 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp <- exp(mod_dir_gtz_1$coefficients)
# P-valor:
mod_dir_gtz_p <- list(summary(mod_dir_gtz_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
mod_dir_gtz_2 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda +
PC2,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp2 <- exp(mod_dir_gtz_2$coefficients)
# P-valor:
mod_dir_gtz_p2 <- list(summary(mod_dir_gtz_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + Antipt
mod_dir_gtz_3 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp3 <- exp(mod_dir_gtz_3$coefficients)
# P-valor:
mod_dir_gtz_p3 <- list(summary(mod_dir_gtz_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + Antipt
# + Corrupção
mod_dir_gtz_4 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp4 <- exp(mod_dir_gtz_4$coefficients)
# P-valor:
mod_dir_gtz_p4 <- list(summary(mod_dir_gtz_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + Antipt
# + Corrupção
# + Religião (Ser Evangélico)
mod_dir_gtz_5 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp5 <- exp(mod_dir_gtz_5$coefficients)
# P-valor:
mod_dir_gtz_p5 <- list(summary(mod_dir_gtz_5)$coefficients[,4])
##############################
## ## ## ## ## ## ## ##
# pop_goertz - Adição #
## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_dir_ad_1 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp <- exp(mod_dir_ad_1$coefficients)
# P-valor:
mod_dir_ad_p <- list(summary(mod_dir_ad_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
mod_dir_ad_2 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
PC2,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp2 <- exp(mod_dir_ad_2$coefficients)
# P-valor:
mod_dir_ad_p2 <- list(summary(mod_dir_ad_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
mod_dir_ad_3 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp3 <- exp(mod_dir_ad_3$coefficients)
# P-valor:
mod_dir_ad_p3 <- list(summary(mod_dir_ad_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
# + Corrupção
mod_dir_ad_4 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp4 <- exp(mod_dir_ad_4$coefficients)
# P-valor:
mod_dir_ad_p4 <- list(summary(mod_dir_ad_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
# + Corrupção
# + Religião (Ser Evangélico)
mod_dir_ad_5 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp5 <- exp(mod_dir_ad_5$coefficients)
# P-valor:
mod_dir_ad_p5 <- list(summary(mod_dir_ad_5)$coefficients[,4])
|
/Scripts/Brazil/02.2 - Regressoes _ 1a Versao Paper 21.09.R
|
no_license
|
Ttytamaki/Populist_Attitudes
|
R
| false
| false
| 27,463
|
r
|
# ---
# 02.2 - Regressões: Paper 21/09/2020 (1a Versão do Paper)
# Regressões para diferntes modelos
# Atitudes Populistas e Voto em Bolsonaro em 2018 (2020)
# ---
# Eduardo Ryô Tamaki
# Mestrando em Ciência Política da UFMG
# e-mail: eduardo.rtamaki@gmail.com
# ---
# 22/09/2020
# ---
## PREAMBULO -------------------------------------------------------------------
library(tidyverse)
library(here)
library(stargazer)
source(here::here("00 - Tratando e Criando as Variaveis.R"), encoding = "UTF-8")
######################################
# Regressões para o Paper 21/09/2020 #
######################################
# Var.: pop_2c - Categórica
# pop_ed_sart - Tipologia
# pop_goertz - Goertziana (Minimo)
# pop_ad - Adição
## ## ## ## ## ## ## ##
# pop_2c - Categórica #
## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_cat_1 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp <- exp(mod_cat_1$coefficients)
# P-valor:
mod_cat_p <- list(summary(mod_cat_1)$coefficients[,4])
##
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Ideologia
mod_cat_2 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda + ideo2.2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp2 <- exp(mod_cat_2$coefficients)
# P-valor:
mod_cat_p2 <- list(summary(mod_cat_2)$coefficients[,4])
##
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação com Ideologia
mod_cat_3 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp3 <- exp(mod_cat_3$coefficients)
# P-valor:
mod_cat_p3 <- list(summary(mod_cat_3)$coefficients[,4])
##
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação com Ideologia +
# PC2
mod_cat_4 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed + fx_renda +
PC2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp4 <- exp(mod_cat_4$coefficients)
# P-valor:
mod_cat_p4 <- list(summary(mod_cat_4)$coefficients[,4])
##
# Modelo 5 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt
mod_cat_5 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed +
fx_renda +
PC2 +
antipt,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp5 <- exp(mod_cat_5$coefficients)
# P-valor:
mod_cat_p5 <- list(summary(mod_cat_5)$coefficients[,4])
##
# Modelo 6 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
# Corrupção
mod_cat_6 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed +
fx_renda +
PC2 +
antipt +
corrup1,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp6 <- exp(mod_cat_6$coefficients)
# P-valor:
mod_cat_p6 <- list(summary(mod_cat_6)$coefficients[,4])
##
# Modelo 7 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
# Corrupção +
# Religião (Ser Evangélico)
mod_cat_7 <- glm(voto_b ~ pop_2c * ideo2.2 + sexo + id + ed +
fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_cat_exp7 <- exp(mod_cat_7$coefficients)
# P-valor:
mod_cat_p7 <- list(summary(mod_cat_7)$coefficients[,4])
##############################
# ## ## ## ## ## ## ## ## #
# pop_ed_sart - Tipologia #
# ## ## ## ## ## ## ## ## #
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Tipologia +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_tip_1 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp <- exp(mod_tip_1$coefficients)
# P-valor:
mod_tip_p <- list(summary(mod_tip_1)$coefficients[,4])
##
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# PC2
mod_tip_2 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda + PC2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp2 <- exp(mod_tip_2$coefficients)
# P-valor:
mod_tip_p2 <- list(summary(mod_tip_2)$coefficients[,4])
##
# Modelo 3 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Tipologia +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
mod_tip_3 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp3 <- exp(mod_tip_3$coefficients)
# P-valor:
mod_tip_p3 <- list(summary(mod_tip_3)$coefficients[,4])
##
# Modelo 4 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Tipologia +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
# Corrupção
mod_tip_4 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp4 <- exp(mod_tip_4$coefficients)
# P-valor:
mod_tip_p4 <- list(summary(mod_tip_4)$coefficients[,4])
##
# Modelo 5 (Apendice):
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes Populistas Tipologia +
# Demographic Controls: Sexo, Idade, Educação e Renda +
# Interação com Ideologia +
# PC2 +
# Antipt +
# Corrupção +
# Religião (Ser Evangélico)
mod_tip_5 <- glm(voto_b ~ pop_ed_sart + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_tip_exp5 <- exp(mod_tip_5$coefficients)
# P-valor:
mod_tip_p5 <- list(summary(mod_tip_5)$coefficients[,4])
##############################
## ## ## ## ## ## ## ## ##
# pop_goertz - Goertziana #
## ## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_gtz_1 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp <- exp(mod_gtz_1$coefficients)
# P-valor:
mod_gtz_p <- list(summary(mod_gtz_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Ideologia
mod_gtz_2 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda + ideo2.2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp2 <- exp(mod_gtz_2$coefficients)
# P-valor:
mod_gtz_p2 <- list(summary(mod_gtz_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia
mod_gtz_3 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp3 <- exp(mod_gtz_3$coefficients)
# P-valor:
mod_gtz_p3 <- list(summary(mod_gtz_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia +
# PC2
mod_gtz_4 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda +
PC2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp4 <- exp(mod_gtz_4$coefficients)
# P-valor:
mod_gtz_p4 <- list(summary(mod_gtz_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia +
# PC2 +
# Antipt
mod_gtz_5 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp5 <- exp(mod_gtz_5$coefficients)
# P-valor:
mod_gtz_p5 <- list(summary(mod_gtz_5)$coefficients[,4])
# Modelo 6:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia +
# PC2 +
# Antipt #
# Corrupção
mod_gtz_6 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp6 <- exp(mod_gtz_6$coefficients)
# P-valor:
mod_gtz_p6 <- list(summary(mod_gtz_6)$coefficients[,4])
# Modelo 7:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda +
# Interação Ideologia +
# PC2 +
# Antipt #
# Corrupção +
# Religião (Ser Evangélico)
mod_gtz_7 <- glm(voto_b ~ pop_gz * ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_gtz_exp7 <- exp(mod_gtz_7$coefficients)
# P-valor:
mod_gtz_p7 <- list(summary(mod_gtz_7)$coefficients[,4])
##############################
## ## ## ## ## ## ## ## ## ## ##
# pop_ad_n - Additive Approach #
## ## ## ## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_ad_1 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp <- exp(mod_ad_1$coefficients)
# P-valor:
mod_ad_p <- list(summary(mod_ad_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Ideologia
mod_ad_2 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
ideo2.2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp2 <- exp(mod_ad_2$coefficients)
# P-valor:
mod_ad_p2 <- list(summary(mod_ad_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
mod_ad_3 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp3 <- exp(mod_ad_3$coefficients)
# P-valor:
mod_ad_p3 <- list(summary(mod_ad_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
# + PC2
mod_ad_4 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda +
PC2,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp4 <- exp(mod_ad_4$coefficients)
# P-valor:
mod_ad_p4 <- list(summary(mod_ad_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
# + PC2
# + Corrupção (APENDICE)
mod_ad_5 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp5 <- exp(mod_ad_5$coefficients)
# P-valor:
mod_ad_p5 <- list(summary(mod_ad_5)$coefficients[,4])
# Modelo 6:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
# + PC2
# + Antipt (APENDICE)
# + Corrupção
mod_ad_6 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp6 <- exp(mod_ad_6$coefficients)
# P-valor:
mod_ad_p6 <- list(summary(mod_ad_6)$coefficients[,4])
# Modelo 7:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + Interação Ideologia
# + PC2
# + Antipt (APENDICE)
# + Corrupção
# + Religião (Ser Evangélico)
mod_ad_7 <- glm(voto_b ~ pop_ad_n*ideo2.2 + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_ad_exp7 <- exp(mod_ad_7$coefficients)
# P-valor:
mod_ad_p7 <- list(summary(mod_ad_7)$coefficients[,4])
############################################################
### ### ### ### ### ### ### ### ### ###
# Apenas Com os Eleitores de Direita #
### ### ### ### ### ### ### ### ### ###
## Banco de Dados Filtrado pelos de Direita: e19_dir
## ideo: Variável de Aultolocalização Ideológica de Direita, Numérica;
# 6 a 10, onde 10 é extrema direita;
# Var.: pop_2c - Categórica
# pop_ed_sart - Tipologia
# pop_goertz - Goertziana (Minimo)
# pop_ad - Adição (To do)
## ## ## ## ## ## ## ##
# pop_2c - Categórica #
## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_dir_cat_1 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp <- exp(mod_dir_cat_1$coefficients)
# P-valor:
mod_dir_cat_p <- list(summary(mod_dir_cat_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# PC2
mod_dir_cat_2 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda +
PC2,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp2 <- exp(mod_dir_cat_2$coefficients)
# P-valor:
mod_dir_cat_p2 <- list(summary(mod_dir_cat_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
mod_dir_cat_3 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp3 <- exp(mod_dir_cat_3$coefficients)
# P-valor:
mod_dir_cat_p3 <- list(summary(mod_dir_cat_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
# + Corrupção
mod_dir_cat_4 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp4 <- exp(mod_dir_cat_4$coefficients)
# P-valor:
mod_dir_cat_p4 <- list(summary(mod_dir_cat_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Categórica +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
# + Corrupção
# + Religião (Ser Evangélico)
mod_dir_cat_5 <- glm(voto_b ~ pop_2c + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_cat_exp5 <- exp(mod_dir_cat_5$coefficients)
# P-valor:
mod_dir_cat_p5 <- list(summary(mod_dir_cat_5)$coefficients[,4])
##############################
## ## ## ## ## ## ## ## ##
# pop_goertz - Goertziana #
## ## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_dir_gtz_1 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp <- exp(mod_dir_gtz_1$coefficients)
# P-valor:
mod_dir_gtz_p <- list(summary(mod_dir_gtz_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
mod_dir_gtz_2 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda +
PC2,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp2 <- exp(mod_dir_gtz_2$coefficients)
# P-valor:
mod_dir_gtz_p2 <- list(summary(mod_dir_gtz_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + Antipt
mod_dir_gtz_3 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp3 <- exp(mod_dir_gtz_3$coefficients)
# P-valor:
mod_dir_gtz_p3 <- list(summary(mod_dir_gtz_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + Antipt
# + Corrupção
mod_dir_gtz_4 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp4 <- exp(mod_dir_gtz_4$coefficients)
# P-valor:
mod_dir_gtz_p4 <- list(summary(mod_dir_gtz_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Goertziana +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + Antipt
# + Corrupção
# + Religião (Ser Evangélico)
mod_dir_gtz_5 <- glm(voto_b ~ pop_gz + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_gtz_exp5 <- exp(mod_dir_gtz_5$coefficients)
# P-valor:
mod_dir_gtz_p5 <- list(summary(mod_dir_gtz_5)$coefficients[,4])
##############################
## ## ## ## ## ## ## ##
# pop_goertz - Adição #
## ## ## ## ## ## ## ##
# Modelo 1:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
mod_dir_ad_1 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp <- exp(mod_dir_ad_1$coefficients)
# P-valor:
mod_dir_ad_p <- list(summary(mod_dir_ad_1)$coefficients[,4])
# Modelo 2:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
mod_dir_ad_2 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
PC2,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp2 <- exp(mod_dir_ad_2$coefficients)
# P-valor:
mod_dir_ad_p2 <- list(summary(mod_dir_ad_2)$coefficients[,4])
# Modelo 3:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
mod_dir_ad_3 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
PC2 +
antipt,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp3 <- exp(mod_dir_ad_3$coefficients)
# P-valor:
mod_dir_ad_p3 <- list(summary(mod_dir_ad_3)$coefficients[,4])
# Modelo 4:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
# + Corrupção
mod_dir_ad_4 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp4 <- exp(mod_dir_ad_4$coefficients)
# P-valor:
mod_dir_ad_p4 <- list(summary(mod_dir_ad_4)$coefficients[,4])
# Modelo 5:
# Var. Dep.: Voto em Bolsonaro
# Var. Ind.: Atitudes populistas Adição +
# Demographic Controls: Sexo, Idade, Educação, Renda
# + PC2
# + AntiPT
# + Corrupção
# + Religião (Ser Evangélico)
mod_dir_ad_5 <- glm(voto_b ~ pop_ad_n + sexo + id + ed + fx_renda +
PC2 +
antipt +
corrup1 +
relig,
data = e19_dir,
na.action = na.omit,
family = binomial(link = "logit"),
maxit = 100)
# Exponencial:
mod_dir_ad_exp5 <- exp(mod_dir_ad_5$coefficients)
# P-valor:
mod_dir_ad_p5 <- list(summary(mod_dir_ad_5)$coefficients[,4])
|
#LEARNING NON_NEGATIVE MATRIX FACTORIZATION
#08/26/13
X<-matrix(1:12, 3, 4)
X
library(NMFN)
Z.MM=nnmf(X, 10,method=multiplicative update) #K is the number of factors/components in W and H matrices
Z.MM
Z.MM$W %*% Z.MM$H
library(NMF)
data(esGolub)
esGolub
esGolub<-esGolub[1:200,]
esGolub$Sample<-NULL
RES<-nmf(esGolub,3)
RES
fit(RES)
|
/BACKUP.SCRIPTS/R_SCRIPTS/NMF.R
|
no_license
|
LOBUTO/CANCER.GENOMICS
|
R
| false
| false
| 337
|
r
|
#LEARNING NON_NEGATIVE MATRIX FACTORIZATION
#08/26/13
X<-matrix(1:12, 3, 4)
X
library(NMFN)
Z.MM=nnmf(X, 10,method=multiplicative update) #K is the number of factors/components in W and H matrices
Z.MM
Z.MM$W %*% Z.MM$H
library(NMF)
data(esGolub)
esGolub
esGolub<-esGolub[1:200,]
esGolub$Sample<-NULL
RES<-nmf(esGolub,3)
RES
fit(RES)
|
library(shiny)
library(readr)
library(dplyr)
files <- paste0("data/C", 2011:2018, "_A.csv")
dat <- do.call(plyr::rbind.fill, lapply(files, read_csv))
cip <- read_csv("data/CIPCode2020.csv")
institutions <- read_csv("data/hd2018.csv")
d <- left_join(dat, cip, by=c("CIPCODE"="CIPCode"))
d <- left_join(d, institutions, by="UNITID")
tab <- d %>%
group_by(CIPTitle, INSTNM) %>%
summarize(
Total = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE),
Pct = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE)/
sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE),
Denom = sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE)
) %>%
filter(Denom > 0)
margin_field <- d %>%
group_by(CIPTitle) %>%
summarize(
INSTNM = "Overall",
Total = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE),
Pct = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE)/
sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE),
Denom = sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE)
) %>%
filter(Denom > 0)
margin_school <- d %>%
group_by(INSTNM) %>%
summarize(
CIPTitle = "Overall",
Total = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE),
Pct = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE)/
sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE),
Denom = sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE)
) %>%
filter(Denom > 0)
overall_margin <- d %>%
summarize(
CIPTitle = "Overall",
INSTNM = "Overall",
Total = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE),
Pct = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE)/
sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE),
Denom = sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE)
)
all = bind_rows(
tab,
margin_field,
margin_school,
overall_margin
) %>%
arrange(-Denom) %>%
filter(!is.na(CIPTitle), !is.na(INSTNM))
write_csv(all, path="phd_diversity/data/aggregated_data.csv")
|
/prep_data.R
|
no_license
|
ddimmery/PhD_diversity
|
R
| false
| false
| 1,904
|
r
|
library(shiny)
library(readr)
library(dplyr)
files <- paste0("data/C", 2011:2018, "_A.csv")
dat <- do.call(plyr::rbind.fill, lapply(files, read_csv))
cip <- read_csv("data/CIPCode2020.csv")
institutions <- read_csv("data/hd2018.csv")
d <- left_join(dat, cip, by=c("CIPCODE"="CIPCode"))
d <- left_join(d, institutions, by="UNITID")
tab <- d %>%
group_by(CIPTitle, INSTNM) %>%
summarize(
Total = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE),
Pct = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE)/
sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE),
Denom = sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE)
) %>%
filter(Denom > 0)
margin_field <- d %>%
group_by(CIPTitle) %>%
summarize(
INSTNM = "Overall",
Total = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE),
Pct = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE)/
sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE),
Denom = sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE)
) %>%
filter(Denom > 0)
margin_school <- d %>%
group_by(INSTNM) %>%
summarize(
CIPTitle = "Overall",
Total = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE),
Pct = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE)/
sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE),
Denom = sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE)
) %>%
filter(Denom > 0)
overall_margin <- d %>%
summarize(
CIPTitle = "Overall",
INSTNM = "Overall",
Total = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE),
Pct = sum(CBKAAT[AWLEVEL == 17], na.rm=TRUE)/
sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE),
Denom = sum(CTOTALT[AWLEVEL == 17], na.rm=TRUE)
)
all = bind_rows(
tab,
margin_field,
margin_school,
overall_margin
) %>%
arrange(-Denom) %>%
filter(!is.na(CIPTitle), !is.na(INSTNM))
write_csv(all, path="phd_diversity/data/aggregated_data.csv")
|
\name{cuGraphicsResourceSetMapFlags}
\alias{cuGraphicsResourceSetMapFlags}
\title{Set usage flags for mapping a graphics resource}
\description{ Set \code{flags} for mapping the graphics resource \code{resource}.}
\usage{cuGraphicsResourceSetMapFlags(resource, flags)}
\arguments{
\item{resource}{Registered resource to set flags for}
\item{flags}{Parameters for resource mapping}
}
\seealso{\code{\link{cuGraphicsMapResources}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.htm}}
\keyword{programming}
\concept{GPU}
|
/man/cuGraphicsResourceSetMapFlags.Rd
|
no_license
|
xfbingshan/RCUDA
|
R
| false
| false
| 544
|
rd
|
\name{cuGraphicsResourceSetMapFlags}
\alias{cuGraphicsResourceSetMapFlags}
\title{Set usage flags for mapping a graphics resource}
\description{ Set \code{flags} for mapping the graphics resource \code{resource}.}
\usage{cuGraphicsResourceSetMapFlags(resource, flags)}
\arguments{
\item{resource}{Registered resource to set flags for}
\item{flags}{Parameters for resource mapping}
}
\seealso{\code{\link{cuGraphicsMapResources}}}
\references{\url{http://docs.nvidia.com/cuda/cuda-driver-api/index.htm}}
\keyword{programming}
\concept{GPU}
|
library(DT)
library(shiny)
dat <- na_replace_this_df(cdd)
rowCallback <- c(
"function(row, data, displayNum, displayIndex){",
paste0(" var indices = [",paste0(seq(nrow(dat))-1, collapse = ", "),"];"),
" if(indices.indexOf(displayIndex) > -1){",
" $(row).find('td:empty').addClass('notselectable');",
" }",
"}"
)
# shinyApp(
# ui = fluidPage(
# DTOutput("table")
# ),
# server = function(input, output, session) {
# output[["table"]] <- renderDT({
# dat %>%
# datatable(options = list(
# rowCallback = JS(rowCallback),
# select = list(style = "multi", selector = "td:not(.notselectable)", items = "cell")
# ),
# extensions = "Select", selection = "none"
# )
# }, server = FALSE)
# }
# )
#
callback <- c(
"var id = $(table.table().node()).closest('.datatables').attr('id');",
"table.on('click', 'tbody', function(){",
" setTimeout(function(){",
" var indexes = table.cells({selected:true}).indexes();",
" var indices = Array(indexes.length);",
" for(var i = 0; i < indices.length; ++i){",
" indices[i] = indexes[i];",
" }",
" Shiny.setInputValue(id + '_cells_selected_raw', indices);",
" }, 0);",
"});"
)
shinyApp(
ui = fluidPage(
DTOutput("table")
),
server = function(input, output, session) {
output[["table"]] <- renderDT({
dat %>%
datatable(
callback = JS(callback),
style = "bootstrap",
options = list(
rowCallback = JS(rowCallback),
select = list(style = "os", selector = "td:not(.notselectable)", items = "cell")
),
extensions = "Select", selection = "none"
)
}, server = T)
observe({
cat("\014")
io <- input[["table_cells_selected_raw"]]
if(!is.null(io)){
#print(io)
#print(matrix(io, ncol = 3, byrow = T))
m <- matrix(io, ncol = 3, byrow = T)
colnames(m) <- names(io)[seq(3)]
m[,1] <- m[,1]+1
m <- m[,-3]
print(m)
}
})
}
)
|
/00_nightly_only/dev/shiny_dt_experiments.R
|
permissive
|
bedantaguru/tidycells_nightly
|
R
| false
| false
| 2,094
|
r
|
library(DT)
library(shiny)
dat <- na_replace_this_df(cdd)
rowCallback <- c(
"function(row, data, displayNum, displayIndex){",
paste0(" var indices = [",paste0(seq(nrow(dat))-1, collapse = ", "),"];"),
" if(indices.indexOf(displayIndex) > -1){",
" $(row).find('td:empty').addClass('notselectable');",
" }",
"}"
)
# shinyApp(
# ui = fluidPage(
# DTOutput("table")
# ),
# server = function(input, output, session) {
# output[["table"]] <- renderDT({
# dat %>%
# datatable(options = list(
# rowCallback = JS(rowCallback),
# select = list(style = "multi", selector = "td:not(.notselectable)", items = "cell")
# ),
# extensions = "Select", selection = "none"
# )
# }, server = FALSE)
# }
# )
#
callback <- c(
"var id = $(table.table().node()).closest('.datatables').attr('id');",
"table.on('click', 'tbody', function(){",
" setTimeout(function(){",
" var indexes = table.cells({selected:true}).indexes();",
" var indices = Array(indexes.length);",
" for(var i = 0; i < indices.length; ++i){",
" indices[i] = indexes[i];",
" }",
" Shiny.setInputValue(id + '_cells_selected_raw', indices);",
" }, 0);",
"});"
)
shinyApp(
ui = fluidPage(
DTOutput("table")
),
server = function(input, output, session) {
output[["table"]] <- renderDT({
dat %>%
datatable(
callback = JS(callback),
style = "bootstrap",
options = list(
rowCallback = JS(rowCallback),
select = list(style = "os", selector = "td:not(.notselectable)", items = "cell")
),
extensions = "Select", selection = "none"
)
}, server = T)
observe({
cat("\014")
io <- input[["table_cells_selected_raw"]]
if(!is.null(io)){
#print(io)
#print(matrix(io, ncol = 3, byrow = T))
m <- matrix(io, ncol = 3, byrow = T)
colnames(m) <- names(io)[seq(3)]
m[,1] <- m[,1]+1
m <- m[,-3]
print(m)
}
})
}
)
|
#' Construct copy-number single sample plot
#'
#' Given a data frame construct a plot to display raw copy number calls for a
#' single sample.
#' @name cnView
#' @param x Object of class data frame with rows representing copy number calls
#' from a single sample. The data frame must contain columns with the following
#' names "chromosome", "coordinate", "cn", and optionally "p_value"
#' (see details).
#' @param y Object of class data frame with rows representing cytogenetic bands
#' for a chromosome. The data frame must contain columns with the following
#' names "chrom", "chromStart", "chromEnd", "name", "gieStain" for plotting the
#' ideogram (optional: see details).
#' @param z Object of class data frame with row representing copy number segment
#' calls. The data frame must contain columns with the following names
#' "chromosome", "start", "end", "segmean" (optional: see details)
#' @param genome Character string specifying a valid UCSC genome (see details).
#' @param chr Character string specifying which chromosome to plot one of
#' "chr..." or "all"
#' @param CNscale Character string specifying if copy number calls supplied are
#' relative (i.e.copy neutral == 0) or absolute (i.e. copy neutral ==2). One of
#' "relative" or "absolute"
#' @param ideogram_txtAngle Integer specifying the angle of cytogenetic labels
#' on the ideogram subplot.
#' @param ideogram_txtSize Integer specifying the size of cytogenetic labels on
#' the ideogram subplot.
#' @param plotLayer Valid ggplot2 layer to be added to the copy number plot.
#' @param ideogramLayer Valid ggplot2 layer to be added to the ideogram
#' sub-plot.
#' @param out Character vector specifying the the object to output, one of
#' "data", "grob", or "plot", defaults to "plot" (see returns).
#' @param segmentColor Character string specifying the color of segment lines. Used only if
#' Z is not null.
#' @details cnView is able to plot in two modes specified via the `chr`
#' parameter, these modes are single chromosome view in which an ideogram is
#' displayed and genome view where chromosomes are faceted. For the single
#' chromosome view cytogenetic band information is required giving the
#' coordinate, stain, and name of each band. As a convenience cnView stores this
#' information for the following genomes "hg19", "hg38", "mm9", "mm10", and
#' "rn5". If the genome assembly supplied to the `genome` parameter is not one
#' of the 5 afore mentioned genome assemblies cnView will attempt to query the
#' UCSC MySQL database to retrieve this information. Alternatively the user can
#' manually supply this information as a data frame to the `y` parameter, input
#' to the `y` parameter take precedence of input to `genome`.
#'
#' cnView is also able to represent p-values for copy-number calls if they are
#' supplied via the "p_value" column in the argument supplied to x. The presence
#' of this column in x will set a transparency value to copy-number calls with
#' calls of less significance becoming more transparent.
#'
#' If it is available cnView can plot copy-number segment calls on top of raw
#' calls supplied to parameter `x` via the parameter `z`.
#' @examples
#' # Create data
#' chromosome <- 'chr14'
#' coordinate <- sort(sample(0:106455000, size=2000, replace=FALSE))
#' cn <- c(rnorm(300, mean=3, sd=.2), rnorm(700, mean=2, sd=.2), rnorm(1000, mean=3, sd=.2))
#' data <- as.data.frame(cbind(chromosome, coordinate, cn))
#'
#' # Plot raw copy number calls
#' cnView(data, chr='chr14', genome='hg19', ideogram_txtSize=4)
#' @return One of the following, a list of dataframes containing data to be
#' plotted, a grob object, or a plot.
#' @importFrom stats aggregate
#' @export
cnView <- function(x, y=NULL, z=NULL, genome='hg19', chr='chr1',
CNscale="absolute", ideogram_txtAngle=45,
ideogram_txtSize=5, plotLayer=NULL, ideogramLayer=NULL,
out="plot", segmentColor=NULL)
{
# Perform a basic quality check
input <- cnView_qual(x, y, z, genome, CNscale=CNscale)
x <- input[[1]]
y <- input[[2]]
z <- input[[3]]
# Obtain Cytogenetic Band information
# use y input or query UCSC for the data if it's not preloaded
preloaded <- c("hg38", "hg19", "mm10", "mm9", "rn5")
if(is.null(y) && any(genome == preloaded))
{
message("genome specified is preloaded, retrieving data...")
cytobands <- GenVisR::cytoGeno[GenVisR::cytoGeno$genome == genome,]
cytobands <- cytobands[,-which(colnames(cytobands) == "genome")]
} else if(is.null(y)) {
# Obtain data for UCSC genome and extract relevant columns
memo <- paste0("attempting to query UCSC mySQL database for chromosome",
" positions and cytogenetic information")
message(memo)
cytobands <- suppressWarnings(multi_cytobandRet(genome=genome))
} else {
memo <- paste0("Detected argument supplied to y.. using y for",
"position and cytogenetic information")
message(memo)
cytobands <- y
}
# Create Dummy data and add to x for proper plot dimensions
fakeStart <- stats::aggregate(data=cytobands, FUN=min, chromStart~chrom)
colnames(fakeStart) <- c("chromosome", "coordinate")
fakeEnd <- stats::aggregate(data=cytobands, FUN=max, chromEnd~chrom)
colnames(fakeEnd) <- c("chromosome", "coordinate")
dummyData <- rbind(fakeStart, fakeEnd)
dummyData$chromosome <- as.factor(dummyData$chromosome)
dummyData <- multi_subsetChr(dummyData, chr)
# Plot all chromosomes at once if specified
if(chr == 'all')
{
# plot the graphic
p1 <- cnView_buildMain(x, z=z, dummyData, chr=chr)
} else {
# plot chromosome
chromosome_plot <- ideoView(cytobands, chromosome=chr,
txtAngle=ideogram_txtAngle,
txtSize=ideogram_txtSize,
plotLayer=ideogramLayer)
# if requested plot only selected chromosome
x <- multi_subsetChr(x, chr)
if(!is.null(z))
{
z <- multi_subsetChr(z, chr)
}
# build the plot
CN_plot <- cnView_buildMain(x, dummyData, z=z, chr=chr, CNscale=CNscale,
layers=plotLayer, segmentColor=segmentColor)
}
# Decide what to output
dataOut <- list(main=x, dummyData=dummyData, segments=z, cytobands=cytobands)
if(!exists("p1", inherits=FALSE))
{
p1 <- multi_align(chromosome_plot, CN_plot)
output <- multi_selectOut(data=dataOut, plot=p1, draw=TRUE, out=out)
} else {
output <- multi_selectOut(data=dataOut, plot=p1, draw=FALSE, out=out)
}
return(output)
}
|
/R/cnView.R
|
permissive
|
griffithlab/GenVisR
|
R
| false
| false
| 6,770
|
r
|
#' Construct copy-number single sample plot
#'
#' Given a data frame construct a plot to display raw copy number calls for a
#' single sample.
#' @name cnView
#' @param x Object of class data frame with rows representing copy number calls
#' from a single sample. The data frame must contain columns with the following
#' names "chromosome", "coordinate", "cn", and optionally "p_value"
#' (see details).
#' @param y Object of class data frame with rows representing cytogenetic bands
#' for a chromosome. The data frame must contain columns with the following
#' names "chrom", "chromStart", "chromEnd", "name", "gieStain" for plotting the
#' ideogram (optional: see details).
#' @param z Object of class data frame with row representing copy number segment
#' calls. The data frame must contain columns with the following names
#' "chromosome", "start", "end", "segmean" (optional: see details)
#' @param genome Character string specifying a valid UCSC genome (see details).
#' @param chr Character string specifying which chromosome to plot one of
#' "chr..." or "all"
#' @param CNscale Character string specifying if copy number calls supplied are
#' relative (i.e.copy neutral == 0) or absolute (i.e. copy neutral ==2). One of
#' "relative" or "absolute"
#' @param ideogram_txtAngle Integer specifying the angle of cytogenetic labels
#' on the ideogram subplot.
#' @param ideogram_txtSize Integer specifying the size of cytogenetic labels on
#' the ideogram subplot.
#' @param plotLayer Valid ggplot2 layer to be added to the copy number plot.
#' @param ideogramLayer Valid ggplot2 layer to be added to the ideogram
#' sub-plot.
#' @param out Character vector specifying the the object to output, one of
#' "data", "grob", or "plot", defaults to "plot" (see returns).
#' @param segmentColor Character string specifying the color of segment lines. Used only if
#' Z is not null.
#' @details cnView is able to plot in two modes specified via the `chr`
#' parameter, these modes are single chromosome view in which an ideogram is
#' displayed and genome view where chromosomes are faceted. For the single
#' chromosome view cytogenetic band information is required giving the
#' coordinate, stain, and name of each band. As a convenience cnView stores this
#' information for the following genomes "hg19", "hg38", "mm9", "mm10", and
#' "rn5". If the genome assembly supplied to the `genome` parameter is not one
#' of the 5 afore mentioned genome assemblies cnView will attempt to query the
#' UCSC MySQL database to retrieve this information. Alternatively the user can
#' manually supply this information as a data frame to the `y` parameter, input
#' to the `y` parameter take precedence of input to `genome`.
#'
#' cnView is also able to represent p-values for copy-number calls if they are
#' supplied via the "p_value" column in the argument supplied to x. The presence
#' of this column in x will set a transparency value to copy-number calls with
#' calls of less significance becoming more transparent.
#'
#' If it is available cnView can plot copy-number segment calls on top of raw
#' calls supplied to parameter `x` via the parameter `z`.
#' @examples
#' # Create data
#' chromosome <- 'chr14'
#' coordinate <- sort(sample(0:106455000, size=2000, replace=FALSE))
#' cn <- c(rnorm(300, mean=3, sd=.2), rnorm(700, mean=2, sd=.2), rnorm(1000, mean=3, sd=.2))
#' data <- as.data.frame(cbind(chromosome, coordinate, cn))
#'
#' # Plot raw copy number calls
#' cnView(data, chr='chr14', genome='hg19', ideogram_txtSize=4)
#' @return One of the following, a list of dataframes containing data to be
#' plotted, a grob object, or a plot.
#' @importFrom stats aggregate
#' @export
cnView <- function(x, y=NULL, z=NULL, genome='hg19', chr='chr1',
CNscale="absolute", ideogram_txtAngle=45,
ideogram_txtSize=5, plotLayer=NULL, ideogramLayer=NULL,
out="plot", segmentColor=NULL)
{
# Perform a basic quality check
input <- cnView_qual(x, y, z, genome, CNscale=CNscale)
x <- input[[1]]
y <- input[[2]]
z <- input[[3]]
# Obtain Cytogenetic Band information
# use y input or query UCSC for the data if it's not preloaded
preloaded <- c("hg38", "hg19", "mm10", "mm9", "rn5")
if(is.null(y) && any(genome == preloaded))
{
message("genome specified is preloaded, retrieving data...")
cytobands <- GenVisR::cytoGeno[GenVisR::cytoGeno$genome == genome,]
cytobands <- cytobands[,-which(colnames(cytobands) == "genome")]
} else if(is.null(y)) {
# Obtain data for UCSC genome and extract relevant columns
memo <- paste0("attempting to query UCSC mySQL database for chromosome",
" positions and cytogenetic information")
message(memo)
cytobands <- suppressWarnings(multi_cytobandRet(genome=genome))
} else {
memo <- paste0("Detected argument supplied to y.. using y for",
"position and cytogenetic information")
message(memo)
cytobands <- y
}
# Create Dummy data and add to x for proper plot dimensions
fakeStart <- stats::aggregate(data=cytobands, FUN=min, chromStart~chrom)
colnames(fakeStart) <- c("chromosome", "coordinate")
fakeEnd <- stats::aggregate(data=cytobands, FUN=max, chromEnd~chrom)
colnames(fakeEnd) <- c("chromosome", "coordinate")
dummyData <- rbind(fakeStart, fakeEnd)
dummyData$chromosome <- as.factor(dummyData$chromosome)
dummyData <- multi_subsetChr(dummyData, chr)
# Plot all chromosomes at once if specified
if(chr == 'all')
{
# plot the graphic
p1 <- cnView_buildMain(x, z=z, dummyData, chr=chr)
} else {
# plot chromosome
chromosome_plot <- ideoView(cytobands, chromosome=chr,
txtAngle=ideogram_txtAngle,
txtSize=ideogram_txtSize,
plotLayer=ideogramLayer)
# if requested plot only selected chromosome
x <- multi_subsetChr(x, chr)
if(!is.null(z))
{
z <- multi_subsetChr(z, chr)
}
# build the plot
CN_plot <- cnView_buildMain(x, dummyData, z=z, chr=chr, CNscale=CNscale,
layers=plotLayer, segmentColor=segmentColor)
}
# Decide what to output
dataOut <- list(main=x, dummyData=dummyData, segments=z, cytobands=cytobands)
if(!exists("p1", inherits=FALSE))
{
p1 <- multi_align(chromosome_plot, CN_plot)
output <- multi_selectOut(data=dataOut, plot=p1, draw=TRUE, out=out)
} else {
output <- multi_selectOut(data=dataOut, plot=p1, draw=FALSE, out=out)
}
return(output)
}
|
\name{assortment.continuous}
\alias{assortment.continuous}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Assortment on continuous vertex values
}
\description{
Calculates the assortativity coefficient for weighted and unweighted graphs with numerical vertex values
}
\usage{
assortment.continuous(graph, vertex_values, weighted = TRUE, SE = FALSE, M = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{graph}{A
Adjacency matrix, as an N x N matrix. Can be weighted or binary.
}
\item{vertex_values}{
Values on which to calculate assortment, vector of N numbers
}
\item{weighted}{
Flag: TRUE to use weighted edges, FALSE to turn edges into binary (even if weights are given)
}
\item{SE}{
Calculate standard error using the Jackknife method.
}
\item{M}{
Binning value for Jackknife, where M edges are removed rather than single edges. This helps speed up the estimate for large networks with many edges.
}
}
\value{
This function returns a named list, with two elements:
$r the assortativity coefficient
$SE the standard error
}
\references{
Newman (2003) Mixing patterns in networks. Physical Review E (67)
Farine, D.R. (2014) Measuring phenotypic assortment in animal social networks: weighted associations are more robust than binary edges. Animal Behaviour 89: 141-153.
}
\author{
Damien Farine dfarine@orn.mpg.de
}
\examples{
# DIRECTED NETWORK EXAMPLE
# Create a random directed network
N <- 20
dyads <- expand.grid(ID1=1:20,ID2=1:20)
dyads <- dyads[which(dyads$ID1 != dyads$ID2),]
weights <- rbeta(nrow(dyads),1,15)
network <- matrix(0, nrow=N, ncol=N)
network[cbind(dyads$ID1,dyads$ID2)] <- weights
# Create random continues trait values
traits <- rnorm(N)
# Test for assortment as binary network
assortment.continuous(network,traits,weighted=FALSE)
# Test for assortment as weighted network
assortment.continuous(network,traits,weighted=TRUE)
# UNDIRECTED NETWORK EXAMPLE
# Create a random undirected network
N <- 20
dyads <- expand.grid(ID1=1:20,ID2=1:20)
dyads <- dyads[which(dyads$ID1 < dyads$ID2),]
weights <- rbeta(nrow(dyads),1,15)
network <- matrix(0, nrow=N, ncol=N)
network[cbind(dyads$ID1,dyads$ID2)] <- weights
network[cbind(dyads$ID2,dyads$ID1)] <- weights
# Create random continues trait values
traits <- rnorm(N)
# Test for assortment as binary network
assortment.continuous(network,traits,weighted=FALSE)
# Test for assortment as weighted network
assortment.continuous(network,traits,weighted=TRUE)
}
|
/man/assortment.continuous.Rd
|
no_license
|
cjw326/assortnet
|
R
| false
| false
| 2,554
|
rd
|
\name{assortment.continuous}
\alias{assortment.continuous}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Assortment on continuous vertex values
}
\description{
Calculates the assortativity coefficient for weighted and unweighted graphs with numerical vertex values
}
\usage{
assortment.continuous(graph, vertex_values, weighted = TRUE, SE = FALSE, M = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{graph}{A
Adjacency matrix, as an N x N matrix. Can be weighted or binary.
}
\item{vertex_values}{
Values on which to calculate assortment, vector of N numbers
}
\item{weighted}{
Flag: TRUE to use weighted edges, FALSE to turn edges into binary (even if weights are given)
}
\item{SE}{
Calculate standard error using the Jackknife method.
}
\item{M}{
Binning value for Jackknife, where M edges are removed rather than single edges. This helps speed up the estimate for large networks with many edges.
}
}
\value{
This function returns a named list, with two elements:
$r the assortativity coefficient
$SE the standard error
}
\references{
Newman (2003) Mixing patterns in networks. Physical Review E (67)
Farine, D.R. (2014) Measuring phenotypic assortment in animal social networks: weighted associations are more robust than binary edges. Animal Behaviour 89: 141-153.
}
\author{
Damien Farine dfarine@orn.mpg.de
}
\examples{
# DIRECTED NETWORK EXAMPLE
# Create a random directed network
N <- 20
dyads <- expand.grid(ID1=1:20,ID2=1:20)
dyads <- dyads[which(dyads$ID1 != dyads$ID2),]
weights <- rbeta(nrow(dyads),1,15)
network <- matrix(0, nrow=N, ncol=N)
network[cbind(dyads$ID1,dyads$ID2)] <- weights
# Create random continues trait values
traits <- rnorm(N)
# Test for assortment as binary network
assortment.continuous(network,traits,weighted=FALSE)
# Test for assortment as weighted network
assortment.continuous(network,traits,weighted=TRUE)
# UNDIRECTED NETWORK EXAMPLE
# Create a random undirected network
N <- 20
dyads <- expand.grid(ID1=1:20,ID2=1:20)
dyads <- dyads[which(dyads$ID1 < dyads$ID2),]
weights <- rbeta(nrow(dyads),1,15)
network <- matrix(0, nrow=N, ncol=N)
network[cbind(dyads$ID1,dyads$ID2)] <- weights
network[cbind(dyads$ID2,dyads$ID1)] <- weights
# Create random continues trait values
traits <- rnorm(N)
# Test for assortment as binary network
assortment.continuous(network,traits,weighted=FALSE)
# Test for assortment as weighted network
assortment.continuous(network,traits,weighted=TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{druid.query.segmentMetadata}
\alias{druid.query.segmentMetadata}
\title{Query segment metadata}
\usage{
druid.query.segmentMetadata(url = druid.url(), dataSource, intervals,
verbose = F, ...)
}
\arguments{
\item{url}{URL to connect to druid, defaults to druid.url()}
\item{dataSource}{name of the data source to query}
\item{intervals}{time period to retrieve data for}
\item{verbose}{prints out the JSON query sent to Druid}
}
\description{
Query segment metadata
}
|
/man/druid.query.segmentMetadata.Rd
|
permissive
|
JesseKolb/RDruid
|
R
| false
| true
| 565
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{druid.query.segmentMetadata}
\alias{druid.query.segmentMetadata}
\title{Query segment metadata}
\usage{
druid.query.segmentMetadata(url = druid.url(), dataSource, intervals,
verbose = F, ...)
}
\arguments{
\item{url}{URL to connect to druid, defaults to druid.url()}
\item{dataSource}{name of the data source to query}
\item{intervals}{time period to retrieve data for}
\item{verbose}{prints out the JSON query sent to Druid}
}
\description{
Query segment metadata
}
|
# Loading provided datasets from local machine
NEI <- readRDS("C:/Users/lsharifi/Desktop/Rot2/coursera/A4-courseProject/summarySCC_PM25.rds")
SCC <- readRDS("C:/Users/lsharifi/Desktop/Rot2/coursera/A4-courseProject/Source_Classification_Code.rds")
baltimore <- NEI[NEI$fips=="24510",]
TotalPMBaltimore <- aggregate(Emissions ~ year, baltimore,FUN=sum)
barplot(
TotalPMBaltimore$Emissions,
names.arg=TotalPMBaltimore$year,
xlab="Year",
ylab="PM2.5 Emissions (Tons)",
main="Total PM2.5 Emissions From All Baltimore City Sources"
)
png(file="C:/Users/lsharifi/Desktop/Rot2/coursera/A4-courseProject/plot2.png", width=480, height=480)
dev.off()
# dev.copy(png, file="plot3.png", width=480, height=480)
# dev.off()
|
/Plot2.R
|
no_license
|
layaSharifi/Exploratory-Data-Analysis-CourseProject
|
R
| false
| false
| 726
|
r
|
# Loading provided datasets from local machine
NEI <- readRDS("C:/Users/lsharifi/Desktop/Rot2/coursera/A4-courseProject/summarySCC_PM25.rds")
SCC <- readRDS("C:/Users/lsharifi/Desktop/Rot2/coursera/A4-courseProject/Source_Classification_Code.rds")
baltimore <- NEI[NEI$fips=="24510",]
TotalPMBaltimore <- aggregate(Emissions ~ year, baltimore,FUN=sum)
barplot(
TotalPMBaltimore$Emissions,
names.arg=TotalPMBaltimore$year,
xlab="Year",
ylab="PM2.5 Emissions (Tons)",
main="Total PM2.5 Emissions From All Baltimore City Sources"
)
png(file="C:/Users/lsharifi/Desktop/Rot2/coursera/A4-courseProject/plot2.png", width=480, height=480)
dev.off()
# dev.copy(png, file="plot3.png", width=480, height=480)
# dev.off()
|
options <- commandArgs(trailingOnly = T)
#universal
directory_name <- options[1]
threads <- as.numeric(options[2])
library_location <- options[3]
if(library_location == ""){
library_location <- .libPaths()
}
ref_fasta <- options[4]
ref_genes <- options[5]
print(ref_fasta)
print(ref_genes)
#Component specific
min_cov <- as.numeric(options[6])
min_dep <- as.numeric(options[7])
sub_samp <- as.numeric(options[8])
plot_all <- options[9]
if(plot_all == "1"){
plot_all <- T
}else{
plot_all <- F
}
snp_scale <- options[10]
setwd(directory_name)
get_names_and_lengths_from_fasta <- function(file){
original_assemblies <- readLines(file)
header_lines = substr(original_assemblies, 1, 1) == ">"
seq_ids = original_assemblies[header_lines]
char_counts = nchar(original_assemblies)
#char_counts[header_lines] = 0
rm(original_assemblies)
lengths = rep(0, sum(header_lines))
current_length = 0
headers_count = 1
for(i in 2:length(header_lines)){
if(header_lines[i]){
lengths[headers_count] = current_length
headers_count = headers_count + 1
current_length = 0
}else{
current_length = current_length + char_counts[i]
}
}
#Final iter
lengths[headers_count] = current_length
seq_ids = substr(seq_ids, 2, nchar(seq_ids ))
seq_ids <- unlist(lapply(seq_ids , function(x){
res = strsplit(x, split = ' ')[[1]]
if(length(res) > 1){
res = res[1]
}
return(res)
}))
result = data.table(seq_ids, lengths)
return(result)
}
#suppressMessages(suppressWarnings(library(Biostrings, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(ggplot2, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(gggenes, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(cowplot, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(doParallel, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(RColorBrewer, lib.loc = library_location)))
# SNP positions in codons code
codon_pos_count <- fread("MetaPop/10.Microdiversity/global_codon_position_summary.tsv", sep = "\t")
codon_pos_count <- codon_pos_count[, list(sum(first_pos), sum(second_pos), sum(third_pos)), by = source]
codon_pos_count_all <- melt.data.table(codon_pos_count, id.vars = c("source"))
codon_pos_count_all <- codon_pos_count_all[, sum(value), by = source]
p_all <- ggplot(codon_pos_count_all, aes(x = source, y = V1)) +
geom_bar(stat = "identity", position = "stack") +
ylab("Count of SNPs in sample") +
xlab("Sample of origin") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90),
axis.title.x = element_blank(),
axis.title = element_text(size = 16),
axis.text.y = element_text(size = 14),
axis.line = element_line("black"),
axis.ticks.y = element_line("black"),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14))+
scale_y_continuous(labels = scales::comma)
codon_pos_count[, sum := V1+V2+V3,]
codon_pos_count[, V1 := V1/sum]
codon_pos_count[, V2 := V2/sum]
codon_pos_count[, V3 := V3/sum]
codon_pos_count[, sum := NULL]
colnames(codon_pos_count)[2:4] = c("First Pos.", "Second Pos.", "Third Pos.")
codon_pos_count <- melt.data.table(codon_pos_count, id.vars = c("source"))
p <- ggplot(codon_pos_count, aes(x = source, y = value, fill = variable)) +
geom_bar(stat = "identity", position = "stack") +
ylab("Proportion of SNPs by position in codon") +
xlab("Sample of origin") +
theme_minimal() +
scale_fill_manual("Position\nof SNP in\nCodon", labels = c("Pos. 1", "Pos. 2", "Pos. 3"), values = c("grey50", "#2ca9e1", "#FF0000")) +
theme(axis.text.x = element_text(angle = 90),
axis.title.x = element_blank(),
axis.title = element_text(size = 16),
axis.text.y = element_text(size = 14),
axis.line = element_line("black"),
axis.ticks.y = element_line("black"),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14))
pdf("MetaPop/12.Visualizations/Third_Pos_SNP_summary.pdf", 11, 11)
print(p)
print(p_all)
dev.off()
#FST
if(file.exists("MetaPop/10.Microdiversity/fixation_index.tsv")){
fixation_data <- fread("MetaPop/10.Microdiversity/fixation_index.tsv", sep = "\t")
fixation_data_filtered <- fixation_data[!is.na(fixation_data$fst),]
contigs_names <- unique(fixation_data_filtered$contig)
namesave = unique(fixation_data_filtered$contig)
fixation_data <- fixation_data_filtered[, list(list(.SD)), by = contig]$V1
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "fixation_data"), envir = environment())
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
registerDoParallel(cl)
fixation_data <- foreach(x = 1:length(fixation_data)) %dopar% {
x <- fixation_data[[x]]
tmp_contigs <- unique(c(x$row_samp, x$col_samp))
x2 <- x[, list(col_samp, row_samp, fst) ]
colnames(x2)[1:2] = c("row_samp", "col_samp")
missing <- data.table(row_samp = tmp_contigs, col_samp = tmp_contigs, fst = 0)
x <- rbind(x, x2, missing)
organize_rows_and_cols <- sort(unique(c(x$row_samp, x$col_samp)))
x$row_samp <- factor(x$row_samp, levels = organize_rows_and_cols)
x$col_samp <- factor(x$col_samp, levels = organize_rows_and_cols)
#Silly, but works as a method of ensuring that the plotting output is correct in shape.
x2 <- dcast(x, col_samp ~ row_samp, value.var ="fst")
rownames(x2) = x2$col_samp
x2[, col_samp := NULL]
x2[upper.tri(x2)] <- NA
x2[, col_samp := rownames(x2)]
x <- melt.data.table(x2, na.rm = T, id.vars = c("col_samp"))
colnames(x)[2:3] = c("row_samp", "fst")
return(x)
}
stopCluster(cl)
names(fixation_data) = namesave
groups <- (1:length(fixation_data))%/%threads
unique_groups <- unique(groups)
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "groups", "unique_groups"), envir = environment())
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(ggplot2, lib.loc = library_location))))
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(RColorBrewer, lib.loc = library_location))))
registerDoParallel(cl)
pdf("MetaPop/12.Visualizations/fst_genome_heatmap_plots.pdf", width = 17, height = 11)
for(k in unique_groups){
heatmaps <- foreach(i = fixation_data[groups==k], j = names(fixation_data)[groups==k]) %dopar% {
fst_heatmap <- ggplot(i, aes(y = row_samp, x = col_samp, fill = fst))+
geom_raster()+
theme_classic()+
theme(plot.title = element_text(size = 18, face = "bold"),
axis.text.x = element_text(angle = 90, hjust = 0.5),
axis.text.y = element_text(vjust = 0.5),
plot.margin = unit(c(0,0,0,0), "cm"),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14))+
ylab("")+
xlab("")+
ggtitle(j)+
scale_fill_gradient2(low = "#2ca9e1", mid="grey80", high="#ff0000", na.value = "black", midpoint = 0.5, limits= c(0,1))+
guides(fill = guide_legend(title = "Fst"))
return(fst_heatmap)
}
for(i in heatmaps){
print(i)
}
}
stopCluster(cl)
dev.off()
}
#main genomes
parse_genes = function(file){
genes <- readLines(file)
headers = substr(genes, 1, 1) == ">"
genes = genes[headers]
genes = substr(genes, 2, nchar(genes))
s <- strsplit(genes, "[# \t]+") # split names by tab/space
genes <- data.table(matrix(unlist(s), ncol=5, byrow=T))
names(genes)[1:4] = c("contig_gene", "start", "end", "OC")
genes$start <- as.numeric((genes$start))
genes$end <- as.numeric((genes$end))
genes <- genes[,-5]
genes$parent_contig <- gsub("_\\d+$", "", genes$contig_gene)
return(genes)
}
if(snp_scale=="local" | snp_scale == "both"){
print("Creating local scale SNP plots...")
genes = parse_genes(ref_genes)
gene_microdiv <- fread("MetaPop/10.Microdiversity/local_gene_microdiversity.tsv", sep = "\t")
gene_microdiv$parent_contig <- gsub("_\\d+$", "", gene_microdiv$contig_gene)
setkeyv(gene_microdiv, c("parent_contig", "source"))
if(!plot_all){
highest_selected_contigs <- gene_microdiv[, valuable <- sum(pNpS_ratio > 1, na.rm = T), by = key(gene_microdiv)]
highest_selected_contigs <- highest_selected_contigs[order(highest_selected_contigs$source, V1),]
retained_contigs <- unique(highest_selected_contigs[, tail(parent_contig, 3), by = source]$V1)
gene_microdiv <- gene_microdiv[gene_microdiv$parent_contig %in% retained_contigs,]
}
num_src <- length(unique(gene_microdiv$source))
genes <- genes[genes$parent_contig %in% gene_microdiv$parent_contig,]
genes$OC <- as.numeric(genes$OC)
reverser <- genes[,list(start = ifelse(OC < 0, end, start), end = ifelse(OC < 0, start, end))]
genes$start <- reverser$start
genes$end <- reverser$end
rm(reverser)
genes$pnps <- NA
genes$pi <- NA
genes$theta <- NA
genes$tajD <- NA
genes[, midpt := (start+end)/2]
unique_contigs <- genes[, list(list(.SD)), by = parent_contig]$V1
names(unique_contigs) = unique(genes$parent_contig)
rm(genes)
NS = unique(gene_microdiv$parent_contig)
gene_microdiv <- gene_microdiv[, list(list(.SD)), by = parent_contig]$V1
names(gene_microdiv) = NS
rm(NS)
fastaLengths = get_names_and_lengths_from_fasta(ref_fasta)
colnames(fastaLengths) = c("contig", "num_bases")
fastaLengths <- fastaLengths[ contig %in% names(gene_microdiv),]
fastaLengths[, bigness := round(log10(num_bases))]
setkey(fastaLengths, "bigness")
unique_contigs <- unique_contigs[fastaLengths$contig]
depth_info <- list.files(path = "MetaPop/04.Depth_per_Pos", full.names = T)
depth_names <- substr(depth_info, 26, nchar(depth_info)-20)
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "depth_info"), envir = environment())
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
registerDoParallel(cl)
depth_bins <- foreach(x = depth_info) %dopar% {
tmp <- fread(x, sep = "\t", header = F)
colnames(tmp) = c("contig", "pos", "depth")
tmp[,pos := ((pos %/% 250))*250,]
setkeyv(tmp, c("contig", "pos"))
contig_names <- unique(tmp$contig)
tmp <- tmp[, sum(depth)/250, by = key(tmp)]
}
stopCluster(cl)
names(depth_bins) = depth_names
size_category <- fastaLengths[, list(list(contig)), by = bigness]
#Tajima's D plot needs a legend to specify the annotations, but without coloring the points. The easiest way to do this is to make a fake one and save it for later.
taj_d_legend_plot <- ggplot(data = data.table(dots = c(1,2,3)), aes(x = dots, fill = factor(dots))) +
geom_bar()+
scale_fill_manual(name = "Tajima's D\nSelection", values = alpha(c("red","grey50", "lightblue"), 0.35), labels = c("Positive", "Neutral", "Purifying"))+
theme(legend.text = element_text(size = 14),
legend.title = element_text(size = 14))
taj_d_legend <- get_legend(taj_d_legend_plot)
cl <- makeCluster(min(detectCores(), threads, length(unique_contigs)))
clusterExport(cl, varlist = c("library_location", "depth_info", "depth_names", "depth_bins"))
clusterEvalQ(cl, library(data.table, lib.loc = library_location))
clusterEvalQ(cl, library(ggplot2, lib.loc = library_location))
clusterEvalQ(cl, library(gggenes, lib.loc = library_location))
clusterEvalQ(cl, library(cowplot, lib.loc = library_location))
registerDoParallel(cl)
for(z in size_category$bigness){
current_conts <- unlist(size_category$V1[size_category$bigness == z])
groups <- (1:length(current_conts)) %/% threads
unique_groups <- unique(groups)
pdf(paste0("MetaPop/12.Visualizations/local_contigs_bp_cat_",as.character(format(10^z, scientific = F)),"_microdiversity_viz.pdf"), width = 13 + (2^(z+1)), height = 11)
for(k in unique_groups){
all_contigs <- foreach(i = 1:length(unique_contigs[current_conts][groups == k])) %dopar% {
name <- names(unique_contigs[current_conts][groups == k])[i]
sub <- unique_contigs[[name]]
sub_data <- gene_microdiv[[name]]
full_len <- fastaLengths[contig == name, num_bases]
#Extremely high pNpS is largely irrelevant to answering the question of selection. This fix keeps scaling consistent, but always displays high pNpS as pos. select.
sub_data$pNpS_ratio <- ifelse(sub_data$pNpS_ratio > 2, 2, sub_data$pNpS_ratio)
sub_data$pNpS_ratio <- ifelse(!sub_data$snps_present, 0, sub_data$pNpS_ratio)
a_contig <- lapply(unique(sub_data$source), function(j){
depth_dat <- depth_bins[[j]][depth_bins[[j]]$contig == name,]
missing_bins <- seq(0, full_len, by = 250)
missing_bins <- missing_bins[!missing_bins %in% depth_dat$pos]
if(length(missing_bins) > 0){
missing_bins <- data.table(contig = name, pos = missing_bins, V1 = 0)
depth_dat <- rbind(depth_dat, missing_bins)
}
sub$pnps[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$pNpS_ratio[sub_data$source == j]
sub$pi[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$pi[sub_data$source == j]
sub$theta[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$theta[sub_data$source == j]
sub$tajD[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$taj_D[sub_data$source == j]
pnps_fill <- any(!is.na(sub$pnps))
pi_fill <- any(!is.na(sub$pi))
theta_fill <- any(!is.na(sub$theta))
tajD_fill <- any(!is.na(sub$tajD))
if(tajD_fill){
sub[is.infinite(tajD), tajD := NA]
}
labs <- c("Purifying (0)", "Neutral (1)", "Positive (>1)")
brk <- c(0, 1, 2)
genes_plot <- ggplot(sub, aes(fill=pnps, xmin = start, xmax = end, y = OC/2)) +
geom_gene_arrow() +
xlab("")+
ylab("Strand") +
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
scale_y_continuous(breaks = c(-0.5, 0.5), labels = c("-", "+"), limits = c(-.75, 0.75)) +
scale_fill_gradient2(low="lightblue", mid = "white", high="red", na.value="black", breaks = brk, labels = labs, name = "pN/pS Ratio", limits = c(0, 2), midpoint = 1)+
theme_genes() +
ggtitle("pN/pS Selection by Gene")+
theme(axis.text.y = element_text(size = 20, vjust = 0.38),
axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
title = element_text(size = 14)) +
scale_color_manual(values = 'black', labels = 'No pNpS') +
guides(color = guide_legend(override.aes = list(fill = "black")))
#Adds in pi, theta, and tajima's D points. Offset by their section of the plot.
gene_legend <- get_legend(genes_plot)
genes_plot <- genes_plot + theme(legend.position = "none",
axis.line.x.bottom = element_line("black"))
depth_by_pos_plot <- ggplot(depth_dat, aes(x = pos, y = V1))+
geom_step() +
theme_minimal()+
ylab("Depth of Coverage")+
xlab("") +
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
ggtitle("Avg. Depth of Coverage over Contig")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
thet_dat <- melt.data.table(sub, id.vars = "midpt", measure.vars = c("pi", "theta"))
pi_and_theta_plot <- ggplot(thet_dat, aes(x = midpt, y = value, color = variable))+
geom_point(size = 2) +
theme_minimal() +
xlab("")+
ylab("Pi and Theta")+
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
scale_color_manual("Nucleotide\nDiversity\nMeasure", labels = c("Pi", "Theta"), values = c("#af8dc3", "#7fbf7b")) +
ggtitle("Pi and Theta Nucleotide Diversity by Gene")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
thet_legend <- get_legend(pi_and_theta_plot)
pi_and_theta_plot <- pi_and_theta_plot + theme(legend.position = "none")
#Needs a fill legend for the colors.
taj_d_plot <- ggplot(sub, aes(x = midpt))+
geom_point(aes(y = tajD), size = 2) +
theme_minimal() +
xlab("")+
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
ylab("Tajima's D") +
ylim(c(min(sub$tajD), max(sub$tajD)))+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = min(-2, max(sub$tajD)), fill = "lightblue", alpha = 0.35)+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = max(-2, min(sub$tajD)), ymax = min(2, max(sub$tajD)), fill = "grey50", alpha = 0.35)+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = max(2, min(sub$tajD)), ymax = Inf, fill = "red", alpha = 0.35) +
ggtitle("Tajima's D Selection by Gene")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
taj_d_plot <- taj_d_plot + theme(legend.position = "none")
taj_d_plot <- taj_d_plot+ coord_cartesian(xlim = c(0,max(sub$end)))
pi_and_theta_plot <- pi_and_theta_plot + coord_cartesian(xlim = c(0,max(sub$end)))
depth_by_pos_plot <- depth_by_pos_plot+ coord_cartesian(xlim = c(0,max(sub$end)))
genes_plot <- genes_plot + coord_cartesian(xlim = c(0,max(sub$end)))
genes_plot <- plot_grid( depth_by_pos_plot,genes_plot, pi_and_theta_plot, taj_d_plot, align = "vh", ncol = 1, axis = "x")
legends <- plot_grid(NULL, gene_legend, thet_legend, taj_d_legend, ncol = 1,nrow = 4, align = "vh")
legend_area <- 0.1-(0.01 * z)
title <- ggdraw() +
draw_label(
paste("Contig:", name, "Sample:", j),
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 7)
)
genes_plot <- plot_grid(genes_plot, legends, ncol = 2, rel_widths = c(1-legend_area, legend_area))
genes_plot <- ggdraw(add_sub(genes_plot, "Contig Position (bp)", vpadding=grid::unit(0,"lines"),y=6, x=0.5, vjust=4.5))
genes_plot <- plot_grid(
title, genes_plot,
ncol = 1,
# rel_heights values control vertical title margins
rel_heights = c(0.07, 1)
)
return(genes_plot)
})
return(a_contig)
}
silent <- suppressWarnings(lapply(all_contigs, function(f){
suppressWarnings(lapply(f, print))
return(NA)
}))
}
dev.off()
}
}
if(snp_scale=="global" | snp_scale == "both"){
print("Creating global scale SNP plots...")
genes = parse_genes(ref_genes)
gene_microdiv <- fread("MetaPop/10.Microdiversity/global_gene_microdiversity.tsv", sep = "\t")
gene_microdiv$parent_contig <- gsub("_\\d+$", "", gene_microdiv$contig_gene)
setkeyv(gene_microdiv, c("parent_contig", "source"))
if(!plot_all){
highest_selected_contigs <- gene_microdiv[, valuable <- sum(pNpS_ratio > 1, na.rm = T), by = key(gene_microdiv)]
highest_selected_contigs <- highest_selected_contigs[order(highest_selected_contigs$source, V1),]
retained_contigs <- unique(highest_selected_contigs[, tail(parent_contig, 3), by = source]$V1)
gene_microdiv <- gene_microdiv[gene_microdiv$parent_contig %in% retained_contigs,]
}
num_src <- length(unique(gene_microdiv$source))
genes <- genes[genes$parent_contig %in% gene_microdiv$parent_contig,]
genes$OC <- as.numeric(genes$OC)
reverser <- genes[,list(start = ifelse(OC < 0, end, start), end = ifelse(OC < 0, start, end))]
genes$start <- reverser$start
genes$end <- reverser$end
rm(reverser)
genes$pnps <- NA
genes$pi <- NA
genes$theta <- NA
genes$tajD <- NA
genes[, midpt := (start+end)/2]
unique_contigs <- genes[, list(list(.SD)), by = parent_contig]$V1
names(unique_contigs) = unique(genes$parent_contig)
rm(genes)
NS = unique(gene_microdiv$parent_contig)
gene_microdiv <- gene_microdiv[, list(list(.SD)), by = parent_contig]$V1
names(gene_microdiv) = NS
rm(NS)
fastaLengths = get_names_and_lengths_from_fasta(ref_fasta)
colnames(fastaLengths) = c("contig", "length")
fastaLengths <- fastaLengths[ contig %in% names(gene_microdiv),]
fastaLengths[, bigness := round(log10(num_bases))]
setkey(fastaLengths, "bigness")
unique_contigs <- unique_contigs[fastaLengths$contig]
depth_info <- list.files(path = "MetaPop/03.Breadth_and_Depth", full.names = T)
depth_names <- substr(depth_info, 30, nchar(depth_info)-22)
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "depth_info"), envir = environment())
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
registerDoParallel(cl)
depth_bins <- foreach(x = depth_info) %dopar% {
tmp <- fread(x, sep = "\t")
colnames(tmp) = c("contig", "pos", "depth")
tmp[,pos := ((pos %/% 250))*250,]
setkeyv(tmp, c("contig", "pos"))
contig_names <- unique(tmp$contig)
tmp <- tmp[, sum(depth)/250, by = key(tmp)]
}
stopCluster(cl)
names(depth_bins) = depth_names
size_category <- fastaLengths[, list(list(contig)), by = bigness]
#Tajima's D plot needs a legend to specify the annotations, but without coloring the points. The easiest way to do this is to make a fake one and save it for later.
taj_d_legend_plot <- ggplot(data = data.table(dots = c(1,2,3)), aes(x = dots, fill = factor(dots))) +
geom_bar()+
scale_fill_manual(name = "Tajima's D\nSelection", values = alpha(c("red","grey50", "lightblue"), 0.35), labels = c("Positive", "Neutral", "Purifying"))+
theme(legend.text = element_text(size = 14),
legend.title = element_text(size = 14))
taj_d_legend <- get_legend(taj_d_legend_plot)
cl <- makeCluster(min(detectCores(), threads, length(unique_contigs)))
clusterExport(cl, varlist = c("library_location", "groups", "unique_groups", "depth_info", "depth_names", "depth_bins"))
clusterEvalQ(cl, library(data.table, lib.loc = library_location))
clusterEvalQ(cl, library(ggplot2, lib.loc = library_location))
clusterEvalQ(cl, library(gggenes, lib.loc = library_location))
clusterEvalQ(cl, library(cowplot, lib.loc = library_location))
registerDoParallel(cl)
for(z in size_category$bigness){
current_conts <- unlist(size_category$V1[size_category$bigness == z])
groups <- (1:length(current_conts)) %/% threads
unique_groups <- unique(groups)
pdf(paste0("MetaPop/12.Visualizations/global_contigs_bp_cat_",as.character(format(10^z, scientific = F)),"_microdiversity_viz.pdf"), width = 13 + (2^(z+1)), height = 11)
for(k in unique_groups){
all_contigs <- foreach(i = 1:length(unique_contigs[current_conts][groups == k])) %dopar% {
name <- names(unique_contigs[current_conts][groups == k])[i]
sub <- unique_contigs[[name]]
sub_data <- gene_microdiv[[name]]
full_len <- fastaLengths[contig == name, num_bases]
#Extremely high pNpS is largely irrelevant to answering the question of selection. This fix keeps scaling consistent, but always displays high pNpS as pos. select.
sub_data$pNpS_ratio <- ifelse(sub_data$pNpS_ratio > 2, 2, sub_data$pNpS_ratio)
sub_data$pNpS_ratio <- ifelse(!sub_data$snps_present, 0, sub_data$pNpS_ratio)
a_contig <- lapply(unique(sub_data$source), function(j){
depth_dat <- depth_bins[[j]][depth_bins[[j]]$contig == name,]
missing_bins <- seq(0, full_len, by = 250)
missing_bins <- missing_bins[!missing_bins %in% depth_dat$pos]
if(length(missing_bins) > 0){
missing_bins <- data.table(contig = name, pos = missing_bins, V1 = 0)
depth_dat <- rbind(depth_dat, missing_bins)
}
sub$pnps[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$pNpS_ratio[sub_data$source == j]
sub$pi[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$pi[sub_data$source == j]
sub$theta[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$theta[sub_data$source == j]
sub$tajD[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$taj_D[sub_data$source == j]
pnps_fill <- any(!is.na(sub$pnps))
pi_fill <- any(!is.na(sub$pi))
theta_fill <- any(!is.na(sub$theta))
tajD_fill <- any(!is.na(sub$tajD))
if(tajD_fill){
sub[is.infinite(tajD), tajD := NA]
}
labs <- c("Purifying (0)", "Neutral (1)", "Positive (>1)")
brk <- c(0, 1, 2)
genes_plot <- ggplot(sub, aes(fill=pnps, xmin = start, xmax = end, y = OC/2)) +
geom_gene_arrow() +
xlab("")+
ylab("Strand") +
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
scale_y_continuous(breaks = c(-0.5, 0.5), labels = c("-", "+"), limits = c(-.75, 0.75)) +
scale_fill_gradient2(low="lightblue", mid = "white", high="red", na.value="black", breaks = brk, labels = labs, name = "pN/pS Ratio", limits = c(0, 2), midpoint = 1)+
theme_genes() +
ggtitle("pN/pS Selection by Gene")+
theme(axis.text.y = element_text(size = 20, vjust = 0.38),
axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
title = element_text(size = 14)) +
scale_color_manual(values = 'black', labels = 'No pNpS') +
guides(color = guide_legend(override.aes = list(fill = "black")))
#Adds in pi, theta, and tajima's D points. Offset by their section of the plot.
gene_legend <- get_legend(genes_plot)
genes_plot <- genes_plot + theme(legend.position = "none",
axis.line.x.bottom = element_line("black"))
depth_by_pos_plot <- ggplot(depth_dat, aes(x = pos, y = V1))+
geom_step() +
theme_minimal()+
ylab("Depth of Coverage")+
xlab("") +
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
ggtitle("Avg. Depth of Coverage over Contig")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
thet_dat <- melt.data.table(sub, id.vars = "midpt", measure.vars = c("pi", "theta"))
pi_and_theta_plot <- ggplot(thet_dat, aes(x = midpt, y = value, color = variable))+
geom_point(size = 2) +
theme_minimal() +
xlab("")+
ylab("Pi and Theta")+
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
scale_color_manual("Nucleotide\nDiversity\nMeasure", labels = c("Pi", "Theta"), values = c("#af8dc3", "#7fbf7b")) +
ggtitle("Pi and Theta Nucleotide Diversity by Gene")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
thet_legend <- get_legend(pi_and_theta_plot)
pi_and_theta_plot <- pi_and_theta_plot + theme(legend.position = "none")
#Needs a fill legend for the colors.
taj_d_plot <- ggplot(sub, aes(x = midpt))+
geom_point(aes(y = tajD), size = 2) +
theme_minimal() +
xlab("")+
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
ylab("Tajima's D") +
ylim(c(min(sub$tajD), max(sub$tajD)))+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = min(-2, max(sub$tajD)), fill = "lightblue", alpha = 0.35)+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = max(-2, min(sub$tajD)), ymax = min(2, max(sub$tajD)), fill = "grey50", alpha = 0.35)+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = max(2, min(sub$tajD)), ymax = Inf, fill = "red", alpha = 0.35) +
ggtitle("Tajima's D Selection by Gene")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
taj_d_plot <- taj_d_plot + theme(legend.position = "none")
taj_d_plot <- taj_d_plot+ coord_cartesian(xlim = c(0,max(sub$end)))
pi_and_theta_plot <- pi_and_theta_plot + coord_cartesian(xlim = c(0,max(sub$end)))
depth_by_pos_plot <- depth_by_pos_plot+ coord_cartesian(xlim = c(0,max(sub$end)))
genes_plot <- genes_plot + coord_cartesian(xlim = c(0,max(sub$end)))
genes_plot <- plot_grid( depth_by_pos_plot,genes_plot, pi_and_theta_plot, taj_d_plot, align = "vh", ncol = 1, axis = "x")
legends <- plot_grid(NULL, gene_legend, thet_legend, taj_d_legend, ncol = 1,nrow = 4, align = "vh")
legend_area <- 0.1-(0.01 * z)
title <- ggdraw() +
draw_label(
paste("Contig:", name, "Sample:", j),
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 7)
)
genes_plot <- plot_grid(genes_plot, legends, ncol = 2, rel_widths = c(1-legend_area, legend_area))
genes_plot <- ggdraw(add_sub(genes_plot, "Contig Position (bp)", vpadding=grid::unit(0,"lines"),y=6, x=0.5, vjust=4.5))
genes_plot <- plot_grid(
title, genes_plot,
ncol = 1,
# rel_heights values control vertical title margins
rel_heights = c(0.07, 1)
)
return(genes_plot)
})
return(a_contig)
}
silent <- suppressWarnings(lapply(all_contigs, function(f){
suppressWarnings(lapply(f, print))
return(NA)
}))
}
dev.off()
}
}
|
/metapop/metapop_r/MetaPop_Microdiversity_Visualizations.R
|
permissive
|
metaGmetapop/metapop
|
R
| false
| false
| 34,107
|
r
|
options <- commandArgs(trailingOnly = T)
#universal
directory_name <- options[1]
threads <- as.numeric(options[2])
library_location <- options[3]
if(library_location == ""){
library_location <- .libPaths()
}
ref_fasta <- options[4]
ref_genes <- options[5]
print(ref_fasta)
print(ref_genes)
#Component specific
min_cov <- as.numeric(options[6])
min_dep <- as.numeric(options[7])
sub_samp <- as.numeric(options[8])
plot_all <- options[9]
if(plot_all == "1"){
plot_all <- T
}else{
plot_all <- F
}
snp_scale <- options[10]
setwd(directory_name)
get_names_and_lengths_from_fasta <- function(file){
original_assemblies <- readLines(file)
header_lines = substr(original_assemblies, 1, 1) == ">"
seq_ids = original_assemblies[header_lines]
char_counts = nchar(original_assemblies)
#char_counts[header_lines] = 0
rm(original_assemblies)
lengths = rep(0, sum(header_lines))
current_length = 0
headers_count = 1
for(i in 2:length(header_lines)){
if(header_lines[i]){
lengths[headers_count] = current_length
headers_count = headers_count + 1
current_length = 0
}else{
current_length = current_length + char_counts[i]
}
}
#Final iter
lengths[headers_count] = current_length
seq_ids = substr(seq_ids, 2, nchar(seq_ids ))
seq_ids <- unlist(lapply(seq_ids , function(x){
res = strsplit(x, split = ' ')[[1]]
if(length(res) > 1){
res = res[1]
}
return(res)
}))
result = data.table(seq_ids, lengths)
return(result)
}
#suppressMessages(suppressWarnings(library(Biostrings, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(ggplot2, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(gggenes, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(cowplot, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(doParallel, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(RColorBrewer, lib.loc = library_location)))
# SNP positions in codons code
codon_pos_count <- fread("MetaPop/10.Microdiversity/global_codon_position_summary.tsv", sep = "\t")
codon_pos_count <- codon_pos_count[, list(sum(first_pos), sum(second_pos), sum(third_pos)), by = source]
codon_pos_count_all <- melt.data.table(codon_pos_count, id.vars = c("source"))
codon_pos_count_all <- codon_pos_count_all[, sum(value), by = source]
p_all <- ggplot(codon_pos_count_all, aes(x = source, y = V1)) +
geom_bar(stat = "identity", position = "stack") +
ylab("Count of SNPs in sample") +
xlab("Sample of origin") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90),
axis.title.x = element_blank(),
axis.title = element_text(size = 16),
axis.text.y = element_text(size = 14),
axis.line = element_line("black"),
axis.ticks.y = element_line("black"),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14))+
scale_y_continuous(labels = scales::comma)
codon_pos_count[, sum := V1+V2+V3,]
codon_pos_count[, V1 := V1/sum]
codon_pos_count[, V2 := V2/sum]
codon_pos_count[, V3 := V3/sum]
codon_pos_count[, sum := NULL]
colnames(codon_pos_count)[2:4] = c("First Pos.", "Second Pos.", "Third Pos.")
codon_pos_count <- melt.data.table(codon_pos_count, id.vars = c("source"))
p <- ggplot(codon_pos_count, aes(x = source, y = value, fill = variable)) +
geom_bar(stat = "identity", position = "stack") +
ylab("Proportion of SNPs by position in codon") +
xlab("Sample of origin") +
theme_minimal() +
scale_fill_manual("Position\nof SNP in\nCodon", labels = c("Pos. 1", "Pos. 2", "Pos. 3"), values = c("grey50", "#2ca9e1", "#FF0000")) +
theme(axis.text.x = element_text(angle = 90),
axis.title.x = element_blank(),
axis.title = element_text(size = 16),
axis.text.y = element_text(size = 14),
axis.line = element_line("black"),
axis.ticks.y = element_line("black"),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14))
pdf("MetaPop/12.Visualizations/Third_Pos_SNP_summary.pdf", 11, 11)
print(p)
print(p_all)
dev.off()
#FST
if(file.exists("MetaPop/10.Microdiversity/fixation_index.tsv")){
fixation_data <- fread("MetaPop/10.Microdiversity/fixation_index.tsv", sep = "\t")
fixation_data_filtered <- fixation_data[!is.na(fixation_data$fst),]
contigs_names <- unique(fixation_data_filtered$contig)
namesave = unique(fixation_data_filtered$contig)
fixation_data <- fixation_data_filtered[, list(list(.SD)), by = contig]$V1
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "fixation_data"), envir = environment())
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
registerDoParallel(cl)
fixation_data <- foreach(x = 1:length(fixation_data)) %dopar% {
x <- fixation_data[[x]]
tmp_contigs <- unique(c(x$row_samp, x$col_samp))
x2 <- x[, list(col_samp, row_samp, fst) ]
colnames(x2)[1:2] = c("row_samp", "col_samp")
missing <- data.table(row_samp = tmp_contigs, col_samp = tmp_contigs, fst = 0)
x <- rbind(x, x2, missing)
organize_rows_and_cols <- sort(unique(c(x$row_samp, x$col_samp)))
x$row_samp <- factor(x$row_samp, levels = organize_rows_and_cols)
x$col_samp <- factor(x$col_samp, levels = organize_rows_and_cols)
#Silly, but works as a method of ensuring that the plotting output is correct in shape.
x2 <- dcast(x, col_samp ~ row_samp, value.var ="fst")
rownames(x2) = x2$col_samp
x2[, col_samp := NULL]
x2[upper.tri(x2)] <- NA
x2[, col_samp := rownames(x2)]
x <- melt.data.table(x2, na.rm = T, id.vars = c("col_samp"))
colnames(x)[2:3] = c("row_samp", "fst")
return(x)
}
stopCluster(cl)
names(fixation_data) = namesave
groups <- (1:length(fixation_data))%/%threads
unique_groups <- unique(groups)
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "groups", "unique_groups"), envir = environment())
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(ggplot2, lib.loc = library_location))))
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(RColorBrewer, lib.loc = library_location))))
registerDoParallel(cl)
pdf("MetaPop/12.Visualizations/fst_genome_heatmap_plots.pdf", width = 17, height = 11)
for(k in unique_groups){
heatmaps <- foreach(i = fixation_data[groups==k], j = names(fixation_data)[groups==k]) %dopar% {
fst_heatmap <- ggplot(i, aes(y = row_samp, x = col_samp, fill = fst))+
geom_raster()+
theme_classic()+
theme(plot.title = element_text(size = 18, face = "bold"),
axis.text.x = element_text(angle = 90, hjust = 0.5),
axis.text.y = element_text(vjust = 0.5),
plot.margin = unit(c(0,0,0,0), "cm"),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14))+
ylab("")+
xlab("")+
ggtitle(j)+
scale_fill_gradient2(low = "#2ca9e1", mid="grey80", high="#ff0000", na.value = "black", midpoint = 0.5, limits= c(0,1))+
guides(fill = guide_legend(title = "Fst"))
return(fst_heatmap)
}
for(i in heatmaps){
print(i)
}
}
stopCluster(cl)
dev.off()
}
#main genomes
parse_genes = function(file){
genes <- readLines(file)
headers = substr(genes, 1, 1) == ">"
genes = genes[headers]
genes = substr(genes, 2, nchar(genes))
s <- strsplit(genes, "[# \t]+") # split names by tab/space
genes <- data.table(matrix(unlist(s), ncol=5, byrow=T))
names(genes)[1:4] = c("contig_gene", "start", "end", "OC")
genes$start <- as.numeric((genes$start))
genes$end <- as.numeric((genes$end))
genes <- genes[,-5]
genes$parent_contig <- gsub("_\\d+$", "", genes$contig_gene)
return(genes)
}
if(snp_scale=="local" | snp_scale == "both"){
print("Creating local scale SNP plots...")
genes = parse_genes(ref_genes)
gene_microdiv <- fread("MetaPop/10.Microdiversity/local_gene_microdiversity.tsv", sep = "\t")
gene_microdiv$parent_contig <- gsub("_\\d+$", "", gene_microdiv$contig_gene)
setkeyv(gene_microdiv, c("parent_contig", "source"))
if(!plot_all){
highest_selected_contigs <- gene_microdiv[, valuable <- sum(pNpS_ratio > 1, na.rm = T), by = key(gene_microdiv)]
highest_selected_contigs <- highest_selected_contigs[order(highest_selected_contigs$source, V1),]
retained_contigs <- unique(highest_selected_contigs[, tail(parent_contig, 3), by = source]$V1)
gene_microdiv <- gene_microdiv[gene_microdiv$parent_contig %in% retained_contigs,]
}
num_src <- length(unique(gene_microdiv$source))
genes <- genes[genes$parent_contig %in% gene_microdiv$parent_contig,]
genes$OC <- as.numeric(genes$OC)
reverser <- genes[,list(start = ifelse(OC < 0, end, start), end = ifelse(OC < 0, start, end))]
genes$start <- reverser$start
genes$end <- reverser$end
rm(reverser)
genes$pnps <- NA
genes$pi <- NA
genes$theta <- NA
genes$tajD <- NA
genes[, midpt := (start+end)/2]
unique_contigs <- genes[, list(list(.SD)), by = parent_contig]$V1
names(unique_contigs) = unique(genes$parent_contig)
rm(genes)
NS = unique(gene_microdiv$parent_contig)
gene_microdiv <- gene_microdiv[, list(list(.SD)), by = parent_contig]$V1
names(gene_microdiv) = NS
rm(NS)
fastaLengths = get_names_and_lengths_from_fasta(ref_fasta)
colnames(fastaLengths) = c("contig", "num_bases")
fastaLengths <- fastaLengths[ contig %in% names(gene_microdiv),]
fastaLengths[, bigness := round(log10(num_bases))]
setkey(fastaLengths, "bigness")
unique_contigs <- unique_contigs[fastaLengths$contig]
depth_info <- list.files(path = "MetaPop/04.Depth_per_Pos", full.names = T)
depth_names <- substr(depth_info, 26, nchar(depth_info)-20)
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "depth_info"), envir = environment())
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
registerDoParallel(cl)
depth_bins <- foreach(x = depth_info) %dopar% {
tmp <- fread(x, sep = "\t", header = F)
colnames(tmp) = c("contig", "pos", "depth")
tmp[,pos := ((pos %/% 250))*250,]
setkeyv(tmp, c("contig", "pos"))
contig_names <- unique(tmp$contig)
tmp <- tmp[, sum(depth)/250, by = key(tmp)]
}
stopCluster(cl)
names(depth_bins) = depth_names
size_category <- fastaLengths[, list(list(contig)), by = bigness]
#Tajima's D plot needs a legend to specify the annotations, but without coloring the points. The easiest way to do this is to make a fake one and save it for later.
taj_d_legend_plot <- ggplot(data = data.table(dots = c(1,2,3)), aes(x = dots, fill = factor(dots))) +
geom_bar()+
scale_fill_manual(name = "Tajima's D\nSelection", values = alpha(c("red","grey50", "lightblue"), 0.35), labels = c("Positive", "Neutral", "Purifying"))+
theme(legend.text = element_text(size = 14),
legend.title = element_text(size = 14))
taj_d_legend <- get_legend(taj_d_legend_plot)
cl <- makeCluster(min(detectCores(), threads, length(unique_contigs)))
clusterExport(cl, varlist = c("library_location", "depth_info", "depth_names", "depth_bins"))
clusterEvalQ(cl, library(data.table, lib.loc = library_location))
clusterEvalQ(cl, library(ggplot2, lib.loc = library_location))
clusterEvalQ(cl, library(gggenes, lib.loc = library_location))
clusterEvalQ(cl, library(cowplot, lib.loc = library_location))
registerDoParallel(cl)
for(z in size_category$bigness){
current_conts <- unlist(size_category$V1[size_category$bigness == z])
groups <- (1:length(current_conts)) %/% threads
unique_groups <- unique(groups)
pdf(paste0("MetaPop/12.Visualizations/local_contigs_bp_cat_",as.character(format(10^z, scientific = F)),"_microdiversity_viz.pdf"), width = 13 + (2^(z+1)), height = 11)
for(k in unique_groups){
all_contigs <- foreach(i = 1:length(unique_contigs[current_conts][groups == k])) %dopar% {
name <- names(unique_contigs[current_conts][groups == k])[i]
sub <- unique_contigs[[name]]
sub_data <- gene_microdiv[[name]]
full_len <- fastaLengths[contig == name, num_bases]
#Extremely high pNpS is largely irrelevant to answering the question of selection. This fix keeps scaling consistent, but always displays high pNpS as pos. select.
sub_data$pNpS_ratio <- ifelse(sub_data$pNpS_ratio > 2, 2, sub_data$pNpS_ratio)
sub_data$pNpS_ratio <- ifelse(!sub_data$snps_present, 0, sub_data$pNpS_ratio)
a_contig <- lapply(unique(sub_data$source), function(j){
depth_dat <- depth_bins[[j]][depth_bins[[j]]$contig == name,]
missing_bins <- seq(0, full_len, by = 250)
missing_bins <- missing_bins[!missing_bins %in% depth_dat$pos]
if(length(missing_bins) > 0){
missing_bins <- data.table(contig = name, pos = missing_bins, V1 = 0)
depth_dat <- rbind(depth_dat, missing_bins)
}
sub$pnps[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$pNpS_ratio[sub_data$source == j]
sub$pi[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$pi[sub_data$source == j]
sub$theta[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$theta[sub_data$source == j]
sub$tajD[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$taj_D[sub_data$source == j]
pnps_fill <- any(!is.na(sub$pnps))
pi_fill <- any(!is.na(sub$pi))
theta_fill <- any(!is.na(sub$theta))
tajD_fill <- any(!is.na(sub$tajD))
if(tajD_fill){
sub[is.infinite(tajD), tajD := NA]
}
labs <- c("Purifying (0)", "Neutral (1)", "Positive (>1)")
brk <- c(0, 1, 2)
genes_plot <- ggplot(sub, aes(fill=pnps, xmin = start, xmax = end, y = OC/2)) +
geom_gene_arrow() +
xlab("")+
ylab("Strand") +
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
scale_y_continuous(breaks = c(-0.5, 0.5), labels = c("-", "+"), limits = c(-.75, 0.75)) +
scale_fill_gradient2(low="lightblue", mid = "white", high="red", na.value="black", breaks = brk, labels = labs, name = "pN/pS Ratio", limits = c(0, 2), midpoint = 1)+
theme_genes() +
ggtitle("pN/pS Selection by Gene")+
theme(axis.text.y = element_text(size = 20, vjust = 0.38),
axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
title = element_text(size = 14)) +
scale_color_manual(values = 'black', labels = 'No pNpS') +
guides(color = guide_legend(override.aes = list(fill = "black")))
#Adds in pi, theta, and tajima's D points. Offset by their section of the plot.
gene_legend <- get_legend(genes_plot)
genes_plot <- genes_plot + theme(legend.position = "none",
axis.line.x.bottom = element_line("black"))
depth_by_pos_plot <- ggplot(depth_dat, aes(x = pos, y = V1))+
geom_step() +
theme_minimal()+
ylab("Depth of Coverage")+
xlab("") +
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
ggtitle("Avg. Depth of Coverage over Contig")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
thet_dat <- melt.data.table(sub, id.vars = "midpt", measure.vars = c("pi", "theta"))
pi_and_theta_plot <- ggplot(thet_dat, aes(x = midpt, y = value, color = variable))+
geom_point(size = 2) +
theme_minimal() +
xlab("")+
ylab("Pi and Theta")+
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
scale_color_manual("Nucleotide\nDiversity\nMeasure", labels = c("Pi", "Theta"), values = c("#af8dc3", "#7fbf7b")) +
ggtitle("Pi and Theta Nucleotide Diversity by Gene")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
thet_legend <- get_legend(pi_and_theta_plot)
pi_and_theta_plot <- pi_and_theta_plot + theme(legend.position = "none")
#Needs a fill legend for the colors.
taj_d_plot <- ggplot(sub, aes(x = midpt))+
geom_point(aes(y = tajD), size = 2) +
theme_minimal() +
xlab("")+
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
ylab("Tajima's D") +
ylim(c(min(sub$tajD), max(sub$tajD)))+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = min(-2, max(sub$tajD)), fill = "lightblue", alpha = 0.35)+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = max(-2, min(sub$tajD)), ymax = min(2, max(sub$tajD)), fill = "grey50", alpha = 0.35)+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = max(2, min(sub$tajD)), ymax = Inf, fill = "red", alpha = 0.35) +
ggtitle("Tajima's D Selection by Gene")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
taj_d_plot <- taj_d_plot + theme(legend.position = "none")
taj_d_plot <- taj_d_plot+ coord_cartesian(xlim = c(0,max(sub$end)))
pi_and_theta_plot <- pi_and_theta_plot + coord_cartesian(xlim = c(0,max(sub$end)))
depth_by_pos_plot <- depth_by_pos_plot+ coord_cartesian(xlim = c(0,max(sub$end)))
genes_plot <- genes_plot + coord_cartesian(xlim = c(0,max(sub$end)))
genes_plot <- plot_grid( depth_by_pos_plot,genes_plot, pi_and_theta_plot, taj_d_plot, align = "vh", ncol = 1, axis = "x")
legends <- plot_grid(NULL, gene_legend, thet_legend, taj_d_legend, ncol = 1,nrow = 4, align = "vh")
legend_area <- 0.1-(0.01 * z)
title <- ggdraw() +
draw_label(
paste("Contig:", name, "Sample:", j),
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 7)
)
genes_plot <- plot_grid(genes_plot, legends, ncol = 2, rel_widths = c(1-legend_area, legend_area))
genes_plot <- ggdraw(add_sub(genes_plot, "Contig Position (bp)", vpadding=grid::unit(0,"lines"),y=6, x=0.5, vjust=4.5))
genes_plot <- plot_grid(
title, genes_plot,
ncol = 1,
# rel_heights values control vertical title margins
rel_heights = c(0.07, 1)
)
return(genes_plot)
})
return(a_contig)
}
silent <- suppressWarnings(lapply(all_contigs, function(f){
suppressWarnings(lapply(f, print))
return(NA)
}))
}
dev.off()
}
}
if(snp_scale=="global" | snp_scale == "both"){
print("Creating global scale SNP plots...")
genes = parse_genes(ref_genes)
gene_microdiv <- fread("MetaPop/10.Microdiversity/global_gene_microdiversity.tsv", sep = "\t")
gene_microdiv$parent_contig <- gsub("_\\d+$", "", gene_microdiv$contig_gene)
setkeyv(gene_microdiv, c("parent_contig", "source"))
if(!plot_all){
highest_selected_contigs <- gene_microdiv[, valuable <- sum(pNpS_ratio > 1, na.rm = T), by = key(gene_microdiv)]
highest_selected_contigs <- highest_selected_contigs[order(highest_selected_contigs$source, V1),]
retained_contigs <- unique(highest_selected_contigs[, tail(parent_contig, 3), by = source]$V1)
gene_microdiv <- gene_microdiv[gene_microdiv$parent_contig %in% retained_contigs,]
}
num_src <- length(unique(gene_microdiv$source))
genes <- genes[genes$parent_contig %in% gene_microdiv$parent_contig,]
genes$OC <- as.numeric(genes$OC)
reverser <- genes[,list(start = ifelse(OC < 0, end, start), end = ifelse(OC < 0, start, end))]
genes$start <- reverser$start
genes$end <- reverser$end
rm(reverser)
genes$pnps <- NA
genes$pi <- NA
genes$theta <- NA
genes$tajD <- NA
genes[, midpt := (start+end)/2]
unique_contigs <- genes[, list(list(.SD)), by = parent_contig]$V1
names(unique_contigs) = unique(genes$parent_contig)
rm(genes)
NS = unique(gene_microdiv$parent_contig)
gene_microdiv <- gene_microdiv[, list(list(.SD)), by = parent_contig]$V1
names(gene_microdiv) = NS
rm(NS)
fastaLengths = get_names_and_lengths_from_fasta(ref_fasta)
colnames(fastaLengths) = c("contig", "length")
fastaLengths <- fastaLengths[ contig %in% names(gene_microdiv),]
fastaLengths[, bigness := round(log10(num_bases))]
setkey(fastaLengths, "bigness")
unique_contigs <- unique_contigs[fastaLengths$contig]
depth_info <- list.files(path = "MetaPop/03.Breadth_and_Depth", full.names = T)
depth_names <- substr(depth_info, 30, nchar(depth_info)-22)
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "depth_info"), envir = environment())
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
registerDoParallel(cl)
depth_bins <- foreach(x = depth_info) %dopar% {
tmp <- fread(x, sep = "\t")
colnames(tmp) = c("contig", "pos", "depth")
tmp[,pos := ((pos %/% 250))*250,]
setkeyv(tmp, c("contig", "pos"))
contig_names <- unique(tmp$contig)
tmp <- tmp[, sum(depth)/250, by = key(tmp)]
}
stopCluster(cl)
names(depth_bins) = depth_names
size_category <- fastaLengths[, list(list(contig)), by = bigness]
#Tajima's D plot needs a legend to specify the annotations, but without coloring the points. The easiest way to do this is to make a fake one and save it for later.
taj_d_legend_plot <- ggplot(data = data.table(dots = c(1,2,3)), aes(x = dots, fill = factor(dots))) +
geom_bar()+
scale_fill_manual(name = "Tajima's D\nSelection", values = alpha(c("red","grey50", "lightblue"), 0.35), labels = c("Positive", "Neutral", "Purifying"))+
theme(legend.text = element_text(size = 14),
legend.title = element_text(size = 14))
taj_d_legend <- get_legend(taj_d_legend_plot)
cl <- makeCluster(min(detectCores(), threads, length(unique_contigs)))
clusterExport(cl, varlist = c("library_location", "groups", "unique_groups", "depth_info", "depth_names", "depth_bins"))
clusterEvalQ(cl, library(data.table, lib.loc = library_location))
clusterEvalQ(cl, library(ggplot2, lib.loc = library_location))
clusterEvalQ(cl, library(gggenes, lib.loc = library_location))
clusterEvalQ(cl, library(cowplot, lib.loc = library_location))
registerDoParallel(cl)
for(z in size_category$bigness){
current_conts <- unlist(size_category$V1[size_category$bigness == z])
groups <- (1:length(current_conts)) %/% threads
unique_groups <- unique(groups)
pdf(paste0("MetaPop/12.Visualizations/global_contigs_bp_cat_",as.character(format(10^z, scientific = F)),"_microdiversity_viz.pdf"), width = 13 + (2^(z+1)), height = 11)
for(k in unique_groups){
all_contigs <- foreach(i = 1:length(unique_contigs[current_conts][groups == k])) %dopar% {
name <- names(unique_contigs[current_conts][groups == k])[i]
sub <- unique_contigs[[name]]
sub_data <- gene_microdiv[[name]]
full_len <- fastaLengths[contig == name, num_bases]
#Extremely high pNpS is largely irrelevant to answering the question of selection. This fix keeps scaling consistent, but always displays high pNpS as pos. select.
sub_data$pNpS_ratio <- ifelse(sub_data$pNpS_ratio > 2, 2, sub_data$pNpS_ratio)
sub_data$pNpS_ratio <- ifelse(!sub_data$snps_present, 0, sub_data$pNpS_ratio)
a_contig <- lapply(unique(sub_data$source), function(j){
depth_dat <- depth_bins[[j]][depth_bins[[j]]$contig == name,]
missing_bins <- seq(0, full_len, by = 250)
missing_bins <- missing_bins[!missing_bins %in% depth_dat$pos]
if(length(missing_bins) > 0){
missing_bins <- data.table(contig = name, pos = missing_bins, V1 = 0)
depth_dat <- rbind(depth_dat, missing_bins)
}
sub$pnps[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$pNpS_ratio[sub_data$source == j]
sub$pi[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$pi[sub_data$source == j]
sub$theta[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$theta[sub_data$source == j]
sub$tajD[match(sub_data$contig_gene[sub_data$source == j], sub$contig_gene)] <- sub_data$taj_D[sub_data$source == j]
pnps_fill <- any(!is.na(sub$pnps))
pi_fill <- any(!is.na(sub$pi))
theta_fill <- any(!is.na(sub$theta))
tajD_fill <- any(!is.na(sub$tajD))
if(tajD_fill){
sub[is.infinite(tajD), tajD := NA]
}
labs <- c("Purifying (0)", "Neutral (1)", "Positive (>1)")
brk <- c(0, 1, 2)
genes_plot <- ggplot(sub, aes(fill=pnps, xmin = start, xmax = end, y = OC/2)) +
geom_gene_arrow() +
xlab("")+
ylab("Strand") +
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
scale_y_continuous(breaks = c(-0.5, 0.5), labels = c("-", "+"), limits = c(-.75, 0.75)) +
scale_fill_gradient2(low="lightblue", mid = "white", high="red", na.value="black", breaks = brk, labels = labs, name = "pN/pS Ratio", limits = c(0, 2), midpoint = 1)+
theme_genes() +
ggtitle("pN/pS Selection by Gene")+
theme(axis.text.y = element_text(size = 20, vjust = 0.38),
axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
title = element_text(size = 14)) +
scale_color_manual(values = 'black', labels = 'No pNpS') +
guides(color = guide_legend(override.aes = list(fill = "black")))
#Adds in pi, theta, and tajima's D points. Offset by their section of the plot.
gene_legend <- get_legend(genes_plot)
genes_plot <- genes_plot + theme(legend.position = "none",
axis.line.x.bottom = element_line("black"))
depth_by_pos_plot <- ggplot(depth_dat, aes(x = pos, y = V1))+
geom_step() +
theme_minimal()+
ylab("Depth of Coverage")+
xlab("") +
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
ggtitle("Avg. Depth of Coverage over Contig")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
thet_dat <- melt.data.table(sub, id.vars = "midpt", measure.vars = c("pi", "theta"))
pi_and_theta_plot <- ggplot(thet_dat, aes(x = midpt, y = value, color = variable))+
geom_point(size = 2) +
theme_minimal() +
xlab("")+
ylab("Pi and Theta")+
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
scale_color_manual("Nucleotide\nDiversity\nMeasure", labels = c("Pi", "Theta"), values = c("#af8dc3", "#7fbf7b")) +
ggtitle("Pi and Theta Nucleotide Diversity by Gene")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
thet_legend <- get_legend(pi_and_theta_plot)
pi_and_theta_plot <- pi_and_theta_plot + theme(legend.position = "none")
#Needs a fill legend for the colors.
taj_d_plot <- ggplot(sub, aes(x = midpt))+
geom_point(aes(y = tajD), size = 2) +
theme_minimal() +
xlab("")+
scale_x_continuous(expand = c(0,0), labels = scales::comma)+
ylab("Tajima's D") +
ylim(c(min(sub$tajD), max(sub$tajD)))+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = min(-2, max(sub$tajD)), fill = "lightblue", alpha = 0.35)+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = max(-2, min(sub$tajD)), ymax = min(2, max(sub$tajD)), fill = "grey50", alpha = 0.35)+
annotate("rect", xmin = -Inf, xmax = Inf, ymin = max(2, min(sub$tajD)), ymax = Inf, fill = "red", alpha = 0.35) +
ggtitle("Tajima's D Selection by Gene")+
theme(axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 14),
legend.text = element_text(size = 14),
legend.title = element_text(size = 14),
axis.line = element_line("black"),
title = element_text(size = 14),
axis.ticks = element_line("black"))
taj_d_plot <- taj_d_plot + theme(legend.position = "none")
taj_d_plot <- taj_d_plot+ coord_cartesian(xlim = c(0,max(sub$end)))
pi_and_theta_plot <- pi_and_theta_plot + coord_cartesian(xlim = c(0,max(sub$end)))
depth_by_pos_plot <- depth_by_pos_plot+ coord_cartesian(xlim = c(0,max(sub$end)))
genes_plot <- genes_plot + coord_cartesian(xlim = c(0,max(sub$end)))
genes_plot <- plot_grid( depth_by_pos_plot,genes_plot, pi_and_theta_plot, taj_d_plot, align = "vh", ncol = 1, axis = "x")
legends <- plot_grid(NULL, gene_legend, thet_legend, taj_d_legend, ncol = 1,nrow = 4, align = "vh")
legend_area <- 0.1-(0.01 * z)
title <- ggdraw() +
draw_label(
paste("Contig:", name, "Sample:", j),
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 7)
)
genes_plot <- plot_grid(genes_plot, legends, ncol = 2, rel_widths = c(1-legend_area, legend_area))
genes_plot <- ggdraw(add_sub(genes_plot, "Contig Position (bp)", vpadding=grid::unit(0,"lines"),y=6, x=0.5, vjust=4.5))
genes_plot <- plot_grid(
title, genes_plot,
ncol = 1,
# rel_heights values control vertical title margins
rel_heights = c(0.07, 1)
)
return(genes_plot)
})
return(a_contig)
}
silent <- suppressWarnings(lapply(all_contigs, function(f){
suppressWarnings(lapply(f, print))
return(NA)
}))
}
dev.off()
}
}
|
library(sdmvspecies)
### Name: pickMedian
### Title: pickMedian
### Aliases: pickMedian
### ** Examples
# load the sdmvspecies library
library("sdmvspecies")
library("raster")
# find package's location
package.dir <- system.file(package="sdmvspecies")
# let see where is our sdmvspecies is installed in
package.dir
# find env dir under the package's location
env.dir <- paste(package.dir, "/external/env/", sep="")
# let see env dir
env.dir
# get the environment raster file
files <- list.files(path=env.dir, pattern="*.bil$", full.names=TRUE)
# make raster stack
env.stack <- stack(files)
# run pick mean
species.raster <- pickMedian(env.stack)
# plot map
plot(species.raster)
|
/data/genthat_extracted_code/sdmvspecies/examples/pickMedian.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 685
|
r
|
library(sdmvspecies)
### Name: pickMedian
### Title: pickMedian
### Aliases: pickMedian
### ** Examples
# load the sdmvspecies library
library("sdmvspecies")
library("raster")
# find package's location
package.dir <- system.file(package="sdmvspecies")
# let see where is our sdmvspecies is installed in
package.dir
# find env dir under the package's location
env.dir <- paste(package.dir, "/external/env/", sep="")
# let see env dir
env.dir
# get the environment raster file
files <- list.files(path=env.dir, pattern="*.bil$", full.names=TRUE)
# make raster stack
env.stack <- stack(files)
# run pick mean
species.raster <- pickMedian(env.stack)
# plot map
plot(species.raster)
|
# STEP - 1 START #
install.packages("titanic")
install.packages("rpart.plot")
install.packages("randomForest")
install.packages("DAAG")
library(titanic)
library(rpart.plot)
library(gmodels)
library(Hmisc)
library(pROC)
library(ResourceSelection)
library(car)
library(caret)
library(dplyr)
library(InformationValue)
library(rpart)
library(randomForest)
library("DAAG")
cat("\014") # Clearing the screen
getwd()
setwd("C:/08072017/AMMA 2017/Data/Assignment_2_Monica") #This working directory is the folder where all the bank data is stored
titanic_train_2<-read.csv('train.csv')
titanic_train<-titanic_train_2
titanic_train_3 <- read.csv('train.csv')
#titanic test
titanic_test_const <-read.csv('test-3.csv')
#splitting titanic train into 70,30
set.seed(1234) # for reproducibility
titanic_train$rand <- runif(nrow(titanic_train))
titanic_train_start <- titanic_train[titanic_train$rand <= 0.7,]
titanic_test_start <- titanic_train[titanic_train$rand > 0.7,]
# number of survived vs number of dead
CrossTable(titanic_train$Survived)
# removing NA row entries
#titanic_train <- titanic_train_start
titanic_train <- titanic_train[!apply(titanic_train[,c("Pclass", "Sex", "SibSp", "Parch", "Fare", "Age")], 1, anyNA),]
titanic_train_NA_allcols <- titanic_train_2[!apply(titanic_train_2[,c("Pclass", "Sex", "SibSp", "Parch", "Fare", "Age")], 1, anyNA),]
nrow(titanic_train_2)
# replacing NA by mean
mean_age = mean(titanic_train_2$Age)
titanic_train_mean_monica <- titanic_train_start
titanic_train_mean_monica2 <- titanic_train_start
titanic_train_mean_monica$Age[is.na(titanic_train_mean_monica$Age)] = mean(titanic_train_mean_monica$Age, na.rm = TRUE)
titanic_train_mean_monica2$Age[is.na(titanic_train_mean_monica2$Age)] = mean(titanic_train_mean_monica2$Age, na.rm = TRUE)
# STEP - 1 END #
# STEP - 2 START #
########## Build model from mean imputed into the data set ##########
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Parch + Fare + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
#lm
fit.train.mean <- lm(formula = Survived ~ Pclass + Sex + SibSp + Parch + Fare + Age,
data=titanic_train_mean_monica2) #family = binomial implies that the type of regression is logistic
summary(fit.train.mean)
#vif - remove those variables which have high vif >5
vif(fit.train.mean)
#removing insignificant variables
titanic_train_mean_monica$Parch<-NULL
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Fare + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
summary(full.model.titanic.mean)
titanic_train_mean_monica$Fare<-NULL
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
summary(full.model.titanic.mean)
#Testing performance on Train set
titanic_train_mean_monica$prob = predict(full.model.titanic.mean, type=c("response"))
titanic_train_mean_monica$Survived.pred = ifelse(titanic_train_mean_monica$prob>=.5,'pred_yes','pred_no')
table(titanic_train_mean_monica$Survived.pred,titanic_train_mean_monica$Survived)
#Testing performance on test set
nrow(titanic_test)
nrow(titanic_test2_mean_monica)
titanic_test2_mean_monica <- titanic_test_start
#imputation by replacing NAs by means in the test set
titanic_test2_mean_monica$Age[is.na(titanic_test2_mean_monica$Age)] = mean(titanic_test2_mean_monica$Age, na.rm = TRUE)
titanic_test2_mean_monica$prob = predict(full.model.titanic.mean, newdata=titanic_test2_mean_monica, type=c("response"))
titanic_test2_mean_monica$Survived.pred = ifelse(titanic_test2_mean_monica$prob>=.5,'pred_yes','pred_no')
table(titanic_test2_mean_monica$Survived.pred,titanic_test2_mean_monica$Survived)
########## END - Model with mean included instead of NA #########
# STEP - 2 END #
# STEP - 3 START #
### Testing for Jack n Rose's survival ###
df.jackrose <- read.csv('Book1.csv')
df.jackrose$prob = predict(full.model.titanic.mean, newdata=df.jackrose, type=c("response"))
df.jackrose$Survived.pred = ifelse(df.jackrose$prob>=.5,'pred_yes','pred_no')
head(df.jackrose)
# Jack dies, Rose survives
### END - Testing on Jack n Rose ###
# STEP - 3 END #
# STEP - 4 START #
## START K-fold cross validation ##
# Defining the K Fold CV function here
Kfold_func <- function(dataset,formula,family,k)
{
object <- glm(formula=formula, data=dataset, family = family)
CVbinary(object, nfolds= k, print.details=TRUE)
}
#Defining the function to calculate Mean Squared Error here
MeanSquareError_func <- function(dataset,formula)
{
LM_Object <- lm(formula=formula, data=dataset)
LM_Object_sum <-summary(LM_Object)
MSE <- mean(LM_Object_sum$residuals^2)
print("Mean squared error")
print(MSE)
}
#Performing KFold CV on Training set by calling the KFOLD CV function here
Kfoldobj <- Kfold_func(titanic_train_mean_monica,Survived ~ Pclass + Sex + SibSp + Age,binomial,10)
#Calling the Mean Squared Error function on the training set here
MSE_Train <-MeanSquareError_func(titanic_train_mean_monica,Survived ~ Pclass + Sex + SibSp + Age)
#confusion matrix on training set
table(titanic_train_mean_monica$Survived,round(Kfoldobj$cvhat))
print("Estimate of Accuracy")
print(Kfoldobj$acc.cv)
#Performing KFold CV on test set by calling the KFOLD CV function here
Kfoldobj.test <- Kfold_func(titanic_test2_mean_monica,Survived ~ Pclass + Sex + SibSp + Age,binomial,10)
#Calling the Mean Squared Error function on the test set here
MSE_Test <-MeanSquareError_func(titanic_test2_mean_monica,Survived ~ Pclass + Sex + SibSp + Age)
#Confusion matrix on test set
table(titanic_test2_mean_monica$Survived,round(Kfoldobj.test$cvhat))
print("Estimate of Accuracy")
print(Kfoldobj.test$acc.cv)
## END K-FOLD CROSS VALIDATION ##
# STEP - 4 END #
|
/Assignment 2 - K Fold.R
|
no_license
|
monicachandil/AMMA2017-18
|
R
| false
| false
| 6,067
|
r
|
# STEP - 1 START #
install.packages("titanic")
install.packages("rpart.plot")
install.packages("randomForest")
install.packages("DAAG")
library(titanic)
library(rpart.plot)
library(gmodels)
library(Hmisc)
library(pROC)
library(ResourceSelection)
library(car)
library(caret)
library(dplyr)
library(InformationValue)
library(rpart)
library(randomForest)
library("DAAG")
cat("\014") # Clearing the screen
getwd()
setwd("C:/08072017/AMMA 2017/Data/Assignment_2_Monica") #This working directory is the folder where all the bank data is stored
titanic_train_2<-read.csv('train.csv')
titanic_train<-titanic_train_2
titanic_train_3 <- read.csv('train.csv')
#titanic test
titanic_test_const <-read.csv('test-3.csv')
#splitting titanic train into 70,30
set.seed(1234) # for reproducibility
titanic_train$rand <- runif(nrow(titanic_train))
titanic_train_start <- titanic_train[titanic_train$rand <= 0.7,]
titanic_test_start <- titanic_train[titanic_train$rand > 0.7,]
# number of survived vs number of dead
CrossTable(titanic_train$Survived)
# removing NA row entries
#titanic_train <- titanic_train_start
titanic_train <- titanic_train[!apply(titanic_train[,c("Pclass", "Sex", "SibSp", "Parch", "Fare", "Age")], 1, anyNA),]
titanic_train_NA_allcols <- titanic_train_2[!apply(titanic_train_2[,c("Pclass", "Sex", "SibSp", "Parch", "Fare", "Age")], 1, anyNA),]
nrow(titanic_train_2)
# replacing NA by mean
mean_age = mean(titanic_train_2$Age)
titanic_train_mean_monica <- titanic_train_start
titanic_train_mean_monica2 <- titanic_train_start
titanic_train_mean_monica$Age[is.na(titanic_train_mean_monica$Age)] = mean(titanic_train_mean_monica$Age, na.rm = TRUE)
titanic_train_mean_monica2$Age[is.na(titanic_train_mean_monica2$Age)] = mean(titanic_train_mean_monica2$Age, na.rm = TRUE)
# STEP - 1 END #
# STEP - 2 START #
########## Build model from mean imputed into the data set ##########
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Parch + Fare + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
#lm
fit.train.mean <- lm(formula = Survived ~ Pclass + Sex + SibSp + Parch + Fare + Age,
data=titanic_train_mean_monica2) #family = binomial implies that the type of regression is logistic
summary(fit.train.mean)
#vif - remove those variables which have high vif >5
vif(fit.train.mean)
#removing insignificant variables
titanic_train_mean_monica$Parch<-NULL
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Fare + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
summary(full.model.titanic.mean)
titanic_train_mean_monica$Fare<-NULL
full.model.titanic.mean <- glm(formula = Survived ~ Pclass + Sex + SibSp + Age,
data=titanic_train_mean_monica, family = binomial) #family = binomial implies that the type of regression is logistic
summary(full.model.titanic.mean)
#Testing performance on Train set
titanic_train_mean_monica$prob = predict(full.model.titanic.mean, type=c("response"))
titanic_train_mean_monica$Survived.pred = ifelse(titanic_train_mean_monica$prob>=.5,'pred_yes','pred_no')
table(titanic_train_mean_monica$Survived.pred,titanic_train_mean_monica$Survived)
#Testing performance on test set
nrow(titanic_test)
nrow(titanic_test2_mean_monica)
titanic_test2_mean_monica <- titanic_test_start
#imputation by replacing NAs by means in the test set
titanic_test2_mean_monica$Age[is.na(titanic_test2_mean_monica$Age)] = mean(titanic_test2_mean_monica$Age, na.rm = TRUE)
titanic_test2_mean_monica$prob = predict(full.model.titanic.mean, newdata=titanic_test2_mean_monica, type=c("response"))
titanic_test2_mean_monica$Survived.pred = ifelse(titanic_test2_mean_monica$prob>=.5,'pred_yes','pred_no')
table(titanic_test2_mean_monica$Survived.pred,titanic_test2_mean_monica$Survived)
########## END - Model with mean included instead of NA #########
# STEP - 2 END #
# STEP - 3 START #
### Testing for Jack n Rose's survival ###
df.jackrose <- read.csv('Book1.csv')
df.jackrose$prob = predict(full.model.titanic.mean, newdata=df.jackrose, type=c("response"))
df.jackrose$Survived.pred = ifelse(df.jackrose$prob>=.5,'pred_yes','pred_no')
head(df.jackrose)
# Jack dies, Rose survives
### END - Testing on Jack n Rose ###
# STEP - 3 END #
# STEP - 4 START #
## START K-fold cross validation ##
# Defining the K Fold CV function here
Kfold_func <- function(dataset,formula,family,k)
{
object <- glm(formula=formula, data=dataset, family = family)
CVbinary(object, nfolds= k, print.details=TRUE)
}
#Defining the function to calculate Mean Squared Error here
MeanSquareError_func <- function(dataset,formula)
{
LM_Object <- lm(formula=formula, data=dataset)
LM_Object_sum <-summary(LM_Object)
MSE <- mean(LM_Object_sum$residuals^2)
print("Mean squared error")
print(MSE)
}
#Performing KFold CV on Training set by calling the KFOLD CV function here
Kfoldobj <- Kfold_func(titanic_train_mean_monica,Survived ~ Pclass + Sex + SibSp + Age,binomial,10)
#Calling the Mean Squared Error function on the training set here
MSE_Train <-MeanSquareError_func(titanic_train_mean_monica,Survived ~ Pclass + Sex + SibSp + Age)
#confusion matrix on training set
table(titanic_train_mean_monica$Survived,round(Kfoldobj$cvhat))
print("Estimate of Accuracy")
print(Kfoldobj$acc.cv)
#Performing KFold CV on test set by calling the KFOLD CV function here
Kfoldobj.test <- Kfold_func(titanic_test2_mean_monica,Survived ~ Pclass + Sex + SibSp + Age,binomial,10)
#Calling the Mean Squared Error function on the test set here
MSE_Test <-MeanSquareError_func(titanic_test2_mean_monica,Survived ~ Pclass + Sex + SibSp + Age)
#Confusion matrix on test set
table(titanic_test2_mean_monica$Survived,round(Kfoldobj.test$cvhat))
print("Estimate of Accuracy")
print(Kfoldobj.test$acc.cv)
## END K-FOLD CROSS VALIDATION ##
# STEP - 4 END #
|
get_ndx_scores <-
function(weights, geno, names) {
scored = as.vector(weights != 0)
geno2 = subset(geno, scored == TRUE)
col = match(geno2,names)
u = as.vector(na.omit(unique(col)))
return (sort(u))
}
|
/R/get_ndx_scores.R
|
no_license
|
cran/SpATS
|
R
| false
| false
| 219
|
r
|
get_ndx_scores <-
function(weights, geno, names) {
scored = as.vector(weights != 0)
geno2 = subset(geno, scored == TRUE)
col = match(geno2,names)
u = as.vector(na.omit(unique(col)))
return (sort(u))
}
|
\name{Utils-class}
\Rdversion{1.1}
\docType{class}
\alias{Utils-class}
\alias{Utils}
\title{Class \code{Utils}}
\description{
A convenience object for working with various aspects of web requests and responses.
}
\seealso{
\code{\link{Multipart}}.
}
\examples{
Utils$bytesize('foo')
Utils$escape('foo bar')
Utils$unescape('foo+bar')
Utils$escape_html('foo <bar>')
Utils$escape('foo <bar>')
Utils$escape('foo\n<bar>')
Utils$status_code('OK')
Utils$status_code('Found')
Utils$status_code('Not Found')
x <- Utils$parse_query('foo=1&bar=baz')
x
Utils$rfc2822(Sys.time())
Utils$timezero()
Utils$build_query(x)
rm(x)
}
\keyword{classes}
\section{Methods}{
\describe{
\item{\code{bytesize(string=NULL)}:}{ Returns size in bytes for \code{string}, a character vector. }
\item{\code{unescape(s=NULL)}:}{ returns the url decoded value of the character vector \code{s}.Also replaces the space character with \code{'+'}. }
\item{\code{status_code(status=NULL)}:}{ returns integer value for the given HTTP \code{status}, which can either be numeric or or a character vector describing the status. Returns \code{as.integer(500)} if \code{status} is NULL.}
\item{\code{escape_html(string=NULL)}:}{ replaces \code{"&"}, \code{"<"}, \code{">"}, \code{"'"}, and \code{'"'} with entity equivalents. }
\item{\code{raw.match(needle=NULL, haystack=NULL, all=TRUE)}:}{ returns index position of \code{needle} in \code{haystack}. All matched indexes are returned by default. \code{needle} is either a raw vector or character string. \code{haystack} is a raw vector.}
\item{\code{parse_query(qs=NULL, d=DEFAULT_SEP)}:}{ Creates a named list from the the query string \code{qs}. \code{d} is the separator value and defaults to \code{'[&;] *'}.}
\item{\code{rfc2822(ts=NULL)}:}{ Formats \code{ts} in RFC2822 time. \code{ts} must be a \code{\link{POSIXt}} object.}
\item{\code{escape(s=NULL)}:}{ Transforms any non-printable characters found in \code{s} to their percent-encoded equivalents.}
\item{\code{build_query(params=NULL)}:}{ Creates a query string from the named list given in \code{params}. }
\item{\code{timezero()}:}{ Returns a \code{POSIXct} object set to UNIX epoch. }
\item{\code{set_cookie_header(header, key, value, expires, path, domain, secure, httpOnly)}:}{ Sets an HTTP cookie header in the environment \code{header}. All arguments except \code{expires} are length 1 character vectors, while \code{expires} must be a \code{POSIXct} object. }
\item{\code{delete_cookie_header(header, key, value, expires, path, domain, secure, httpOnly)}:}{ Deletes the HTTP cookie header. }
}
}
|
/Rook/man/Utils-class.Rd
|
no_license
|
cadar/rRack
|
R
| false
| false
| 2,625
|
rd
|
\name{Utils-class}
\Rdversion{1.1}
\docType{class}
\alias{Utils-class}
\alias{Utils}
\title{Class \code{Utils}}
\description{
A convenience object for working with various aspects of web requests and responses.
}
\seealso{
\code{\link{Multipart}}.
}
\examples{
Utils$bytesize('foo')
Utils$escape('foo bar')
Utils$unescape('foo+bar')
Utils$escape_html('foo <bar>')
Utils$escape('foo <bar>')
Utils$escape('foo\n<bar>')
Utils$status_code('OK')
Utils$status_code('Found')
Utils$status_code('Not Found')
x <- Utils$parse_query('foo=1&bar=baz')
x
Utils$rfc2822(Sys.time())
Utils$timezero()
Utils$build_query(x)
rm(x)
}
\keyword{classes}
\section{Methods}{
\describe{
\item{\code{bytesize(string=NULL)}:}{ Returns size in bytes for \code{string}, a character vector. }
\item{\code{unescape(s=NULL)}:}{ returns the url decoded value of the character vector \code{s}.Also replaces the space character with \code{'+'}. }
\item{\code{status_code(status=NULL)}:}{ returns integer value for the given HTTP \code{status}, which can either be numeric or or a character vector describing the status. Returns \code{as.integer(500)} if \code{status} is NULL.}
\item{\code{escape_html(string=NULL)}:}{ replaces \code{"&"}, \code{"<"}, \code{">"}, \code{"'"}, and \code{'"'} with entity equivalents. }
\item{\code{raw.match(needle=NULL, haystack=NULL, all=TRUE)}:}{ returns index position of \code{needle} in \code{haystack}. All matched indexes are returned by default. \code{needle} is either a raw vector or character string. \code{haystack} is a raw vector.}
\item{\code{parse_query(qs=NULL, d=DEFAULT_SEP)}:}{ Creates a named list from the the query string \code{qs}. \code{d} is the separator value and defaults to \code{'[&;] *'}.}
\item{\code{rfc2822(ts=NULL)}:}{ Formats \code{ts} in RFC2822 time. \code{ts} must be a \code{\link{POSIXt}} object.}
\item{\code{escape(s=NULL)}:}{ Transforms any non-printable characters found in \code{s} to their percent-encoded equivalents.}
\item{\code{build_query(params=NULL)}:}{ Creates a query string from the named list given in \code{params}. }
\item{\code{timezero()}:}{ Returns a \code{POSIXct} object set to UNIX epoch. }
\item{\code{set_cookie_header(header, key, value, expires, path, domain, secure, httpOnly)}:}{ Sets an HTTP cookie header in the environment \code{header}. All arguments except \code{expires} are length 1 character vectors, while \code{expires} must be a \code{POSIXct} object. }
\item{\code{delete_cookie_header(header, key, value, expires, path, domain, secure, httpOnly)}:}{ Deletes the HTTP cookie header. }
}
}
|
#'Lineup proportion
#'
#'Computes the proportion of mock witnesses identifying a particular lineup member
#'@param lineup_table A table of lineup choices
#'@param target_pos A scalar, representing target position in lineup. Must be declared by user
#'@return Returns a proportion indicating the frequency with which a lineup
#' member was selected
#'@references Wells, G. L.,Leippe, M. R., & Ostrom, T. M. (1979). Guidelines for
#' empirically assessing the fairness of a lineup. \emph{Law and Human Behavior,
#' 3}(4), 285-293.
#'@examples
#'#Data:
#'lineup_vec <- round(runif(100, 1))
#'lineup_table <- table(lineup_vec)
#'
#'#Call:
#'lineup_prop_tab(lineup_table, 3)
#'lineup_prop_tab(table(lineup_vec), 2)
#'
#'@export
lineup_prop_tab <- function(lineup_table, target_pos){
lineup_table[target_pos]/sum(lineup_table)
}
|
/R/lineup_prop_tab.R
|
no_license
|
tmnaylor/r4lineups
|
R
| false
| false
| 859
|
r
|
#'Lineup proportion
#'
#'Computes the proportion of mock witnesses identifying a particular lineup member
#'@param lineup_table A table of lineup choices
#'@param target_pos A scalar, representing target position in lineup. Must be declared by user
#'@return Returns a proportion indicating the frequency with which a lineup
#' member was selected
#'@references Wells, G. L.,Leippe, M. R., & Ostrom, T. M. (1979). Guidelines for
#' empirically assessing the fairness of a lineup. \emph{Law and Human Behavior,
#' 3}(4), 285-293.
#'@examples
#'#Data:
#'lineup_vec <- round(runif(100, 1))
#'lineup_table <- table(lineup_vec)
#'
#'#Call:
#'lineup_prop_tab(lineup_table, 3)
#'lineup_prop_tab(table(lineup_vec), 2)
#'
#'@export
lineup_prop_tab <- function(lineup_table, target_pos){
lineup_table[target_pos]/sum(lineup_table)
}
|
library(DESeq2)
library(ggplot2)
#count matrix input
cts <- read.csv("RawCounts.csv", sep = ",")
coldata <- read.csv("Infected_Uninfected.csv", sep = ",", row.names = 1)
head(cts,2)
coldata
#making sure they are in order
all(rownames(coldata) %in% colnames(cts))
all(rownames(coldata) == colnames(cts))
cts <- cts[, rownames(coldata)]
all(rownames(coldata) == colnames(cts))
#constructing DESeqDataSet
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ Condition)
dds
#factor levels
dds$Condition <- factor(dds$Condition, levels = c("Uninfected","Infected"))
dds$Condition <- relevel(dds$Condition, ref = "Uninfected")
#differential expression analysis
dds <- DESeq(dds)
res <-results(dds)
res
resultsNames(dds)
#exporting DESeq2 results without any filtering
write.csv(as.data.frame(res),
file="Uninfected_Infected_nofilter.csv")
#apeglm method for effect size shrinkage
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("apeglm", version = "3.8")
library(apeglm)
#Log fold change shrinkage for visualization and ranking
resLFC <- lfcShrink(dds, coef="Condition_Infected_vs_Uninfected", type="apeglm")
resLFC
#p-values and adjusted p-values
resOrdered <- res[order(res$pvalue),]
summary(res)
#adjusted p-values less than 0.05
sum(res$pvalue < 0.05, na.rm=TRUE)
#MA-plot (red dot means if the adjusted p value is les than 0.1)
plotMA(res, ylim=c(-2,2))
#shrunken log 2 fold changes
plotMA(resLFC, ylim=c(-2,2))
#plot counts
plotCounts(dds, gene=which.min(res$padj), intgroup="Condition")
#plot counts ggplot
d <- plotCounts(dds, gene=which.min(res$padj), intgroup="Condition",
returnData=TRUE)
library("ggplot2")
ggplot(d, aes(x=Condition, y=count)) +
geom_point(position=position_jitter(w=0.1,h=0)) +
scale_y_log10(breaks=c(25,100,400))
#exporting results to CSV files
write.csv(as.data.frame(resOrdered),
file="condition_treated_results.csv")
colData(dds)
#extracting transformed values
vsd <- vst(dds, blind=FALSE)
rld <- rlog(dds, blind=FALSE)
head(assay(vsd), 3)
#download vsn
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("vsn", version = "3.8")
#effects of transformations on the variance
ntd <- normTransform(dds)
library("vsn")
meanSdPlot(assay(ntd))
#heatmap of the count matrix
install.packages("pheatmap")
library("pheatmap")
library("ggplot2")
select <- order(rowMeans(counts(dds,normalized=TRUE)),
decreasing=TRUE)[1:20]
df <- as.data.frame(colData(dds) [,c("Condition")])
rownames(df) <-colnames(select)
pheatmap(assay(ntd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
cluster_cols=FALSE, annotation_col=df)
#PCA analysis
pcaData <- plotPCA(vsd, intgroup=c("Condition"), returnData=TRUE)
percentVar <- round(100 * attr(pcaData, "percentVar"))
ggplot(pcaData, aes(PC1, PC2, color=Condition)) +
geom_point(size=3) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
coord_fixed()
plotPCA(vsd, intgroup=c("Condition"))
#likelihood ratio test
dds <- DESeq(dds, test="LRT", reduced=~1)
res <- results(dds)
resApeT <- lfcShrink(dds, coef=2, type="apeglm", lfcThreshold=1)
plotMA(resApeT, ylim=c(-3,3), cex=.8)
abline(h=c(-1,1), col="dodgerblue", lwd=2)
#outliers box plot
par(mar=c(8,5,2,2))
boxplot(log10(assays(dds)[["cooks"]]), range=0, las=2)
#reset par
par(mfrow=c(1,1))
# Make a basic volcano plot
with(res, plot(log2FoldChange, -log10(pvalue), pch=20, main="Volcano plot", xlim=c(-7,5)))
#adding colors to volcano plot
with(subset(res, padj<.01 ), points(log2FoldChange, -log10(pvalue), pch=20, col="red"))
with(subset(res, padj>.01 ), points(log2FoldChange, -log10(pvalue), pch=20, col="black"))
|
/DESeq2 attempt1.R
|
no_license
|
leekun4/CMSE890
|
R
| false
| false
| 3,886
|
r
|
library(DESeq2)
library(ggplot2)
#count matrix input
cts <- read.csv("RawCounts.csv", sep = ",")
coldata <- read.csv("Infected_Uninfected.csv", sep = ",", row.names = 1)
head(cts,2)
coldata
#making sure they are in order
all(rownames(coldata) %in% colnames(cts))
all(rownames(coldata) == colnames(cts))
cts <- cts[, rownames(coldata)]
all(rownames(coldata) == colnames(cts))
#constructing DESeqDataSet
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design = ~ Condition)
dds
#factor levels
dds$Condition <- factor(dds$Condition, levels = c("Uninfected","Infected"))
dds$Condition <- relevel(dds$Condition, ref = "Uninfected")
#differential expression analysis
dds <- DESeq(dds)
res <-results(dds)
res
resultsNames(dds)
#exporting DESeq2 results without any filtering
write.csv(as.data.frame(res),
file="Uninfected_Infected_nofilter.csv")
#apeglm method for effect size shrinkage
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("apeglm", version = "3.8")
library(apeglm)
#Log fold change shrinkage for visualization and ranking
resLFC <- lfcShrink(dds, coef="Condition_Infected_vs_Uninfected", type="apeglm")
resLFC
#p-values and adjusted p-values
resOrdered <- res[order(res$pvalue),]
summary(res)
#adjusted p-values less than 0.05
sum(res$pvalue < 0.05, na.rm=TRUE)
#MA-plot (red dot means if the adjusted p value is les than 0.1)
plotMA(res, ylim=c(-2,2))
#shrunken log 2 fold changes
plotMA(resLFC, ylim=c(-2,2))
#plot counts
plotCounts(dds, gene=which.min(res$padj), intgroup="Condition")
#plot counts ggplot
d <- plotCounts(dds, gene=which.min(res$padj), intgroup="Condition",
returnData=TRUE)
library("ggplot2")
ggplot(d, aes(x=Condition, y=count)) +
geom_point(position=position_jitter(w=0.1,h=0)) +
scale_y_log10(breaks=c(25,100,400))
#exporting results to CSV files
write.csv(as.data.frame(resOrdered),
file="condition_treated_results.csv")
colData(dds)
#extracting transformed values
vsd <- vst(dds, blind=FALSE)
rld <- rlog(dds, blind=FALSE)
head(assay(vsd), 3)
#download vsn
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("vsn", version = "3.8")
#effects of transformations on the variance
ntd <- normTransform(dds)
library("vsn")
meanSdPlot(assay(ntd))
#heatmap of the count matrix
install.packages("pheatmap")
library("pheatmap")
library("ggplot2")
select <- order(rowMeans(counts(dds,normalized=TRUE)),
decreasing=TRUE)[1:20]
df <- as.data.frame(colData(dds) [,c("Condition")])
rownames(df) <-colnames(select)
pheatmap(assay(ntd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
cluster_cols=FALSE, annotation_col=df)
#PCA analysis
pcaData <- plotPCA(vsd, intgroup=c("Condition"), returnData=TRUE)
percentVar <- round(100 * attr(pcaData, "percentVar"))
ggplot(pcaData, aes(PC1, PC2, color=Condition)) +
geom_point(size=3) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
coord_fixed()
plotPCA(vsd, intgroup=c("Condition"))
#likelihood ratio test
dds <- DESeq(dds, test="LRT", reduced=~1)
res <- results(dds)
resApeT <- lfcShrink(dds, coef=2, type="apeglm", lfcThreshold=1)
plotMA(resApeT, ylim=c(-3,3), cex=.8)
abline(h=c(-1,1), col="dodgerblue", lwd=2)
#outliers box plot
par(mar=c(8,5,2,2))
boxplot(log10(assays(dds)[["cooks"]]), range=0, las=2)
#reset par
par(mfrow=c(1,1))
# Make a basic volcano plot
with(res, plot(log2FoldChange, -log10(pvalue), pch=20, main="Volcano plot", xlim=c(-7,5)))
#adding colors to volcano plot
with(subset(res, padj<.01 ), points(log2FoldChange, -log10(pvalue), pch=20, col="red"))
with(subset(res, padj>.01 ), points(log2FoldChange, -log10(pvalue), pch=20, col="black"))
|
# define "tolower error handling" function
tryTolower = function(x) {
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
## Retirado de http://stackoverflow.com/questions/15155814/check-if-r-package-is-installed-then-load-library
carregarPacote <- function(x) {
if (!require(x,character.only = TRUE)) {
install.packages(x,dependencies=TRUE, repos='http://star-www.st-andrews.ac.uk/cran/')
if(!require(x,character.only = TRUE)) stop("Pacote não encontrado!")
}
}
zeroPad <- function(str, len.out, num.zeros = len.out[1] - nchar(str)){
paste0(paste(rep("0", num.zeros), collapse = ""), str)
}
#' Testa se a conexão odbc ainda está aberta.
#'
#' Função responsável por testar se a conexão odbc ainda está aberta.
#'
#' @param con a conexão odbc a ser testada.
isConnectionOpen <- function(con) {
tryCatch({odbcGetInfo(conn);TRUE},error=function(...)FALSE)
}
removerAcento = function(textos) {
# ver http://stackoverflow.com/questions/20495598/replace-accented-characters-in-r-with-non-accented-counterpart-utf-8-encoding
unwanted_array = list( 'S'='S', 's'='s', 'Z'='Z', 'z'='z', 'À'='A', 'Á'='A', 'Â'='A', 'Ã'='A', 'Ä'='A', 'Å'='A', 'Æ'='A', 'Ç'='C', 'È'='E', 'É'='E',
'Ê'='E', 'Ë'='E', 'Ì'='I', 'Í'='I', 'Î'='I', 'Ï'='I', 'Ñ'='N', 'Ò'='O', 'Ó'='O', 'Ô'='O', 'Õ'='O', 'Ö'='O', 'Ø'='O', 'Ù'='U',
'Ú'='U', 'Û'='U', 'Ü'='U', 'Ý'='Y', 'Þ'='B', 'ß'='Ss', 'à'='a', 'á'='a', 'â'='a', 'ã'='a', 'ä'='a', 'å'='a', 'æ'='a', 'ç'='c',
'è'='e', 'é'='e', 'ê'='e', 'ë'='e', 'ì'='i', 'í'='i', 'î'='i', 'ï'='i', 'ð'='o', 'ñ'='n', 'ò'='o', 'ó'='o', 'ô'='o', 'õ'='o',
'ö'='o', 'ø'='o', 'ù'='u', 'ú'='u', 'û'='u', 'ý'='y', 'ý'='y', 'þ'='b')
require(gsubfn)
resultado = gsubfn(paste(names(unwanted_array),collapse='|'), unwanted_array, textos)
resultado
}
lerArquivo <- function(arquivo) {
return(readChar(arquivo, file.info(arquivo)$size))
}
executarSql = function(query, dsn, uid, pwd, time = 5) {
require(RODBC)
resultado = tryCatch({
options(stringsAsFactors = FALSE)
if (exists('ch')) {
if (!isConnectionOpen(ch)) {
## Abrir conexao com o banco
ch = odbcConnect(dsn = dsn, uid=uid, pwd=pwd)
}
} else {
## Abrir conexao com o banco
ch = odbcConnect(dsn = dsn, uid=uid, pwd=pwd)
}
##buscar dados
resultado = sqlQuery(ch, query)
## Fechar conexao com banco
close(ch)
return(resultado)
}, error = function(e) {
print(paste0("Erro ao executar query: ", query, "\n\t", e))
## Tentar executar a consulta novamente depois de time segundos.
Sys.sleep(time)
## Antes de executar a consulta novamente, dobrar o tempo de espera (time*2)
time = time*2
resultado = executarSql(query, dsn, uid, pwd, time)
return(resultado)
})
resultado
}
resave <- function(..., list = character(), file) {
previous <- load(file)
var.names <- c(list, as.character(substitute(list(...)))[-1L])
for (var in var.names) assign(var, get(var, envir = parent.frame()))
save(list = unique(c(previous, var.names)), file = file)
}
|
/R/utils.R
|
no_license
|
fernandosjp/hudar
|
R
| false
| false
| 3,399
|
r
|
# define "tolower error handling" function
tryTolower = function(x) {
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
## Retirado de http://stackoverflow.com/questions/15155814/check-if-r-package-is-installed-then-load-library
carregarPacote <- function(x) {
if (!require(x,character.only = TRUE)) {
install.packages(x,dependencies=TRUE, repos='http://star-www.st-andrews.ac.uk/cran/')
if(!require(x,character.only = TRUE)) stop("Pacote não encontrado!")
}
}
zeroPad <- function(str, len.out, num.zeros = len.out[1] - nchar(str)){
paste0(paste(rep("0", num.zeros), collapse = ""), str)
}
#' Testa se a conexão odbc ainda está aberta.
#'
#' Função responsável por testar se a conexão odbc ainda está aberta.
#'
#' @param con a conexão odbc a ser testada.
isConnectionOpen <- function(con) {
tryCatch({odbcGetInfo(conn);TRUE},error=function(...)FALSE)
}
removerAcento = function(textos) {
# ver http://stackoverflow.com/questions/20495598/replace-accented-characters-in-r-with-non-accented-counterpart-utf-8-encoding
unwanted_array = list( 'S'='S', 's'='s', 'Z'='Z', 'z'='z', 'À'='A', 'Á'='A', 'Â'='A', 'Ã'='A', 'Ä'='A', 'Å'='A', 'Æ'='A', 'Ç'='C', 'È'='E', 'É'='E',
'Ê'='E', 'Ë'='E', 'Ì'='I', 'Í'='I', 'Î'='I', 'Ï'='I', 'Ñ'='N', 'Ò'='O', 'Ó'='O', 'Ô'='O', 'Õ'='O', 'Ö'='O', 'Ø'='O', 'Ù'='U',
'Ú'='U', 'Û'='U', 'Ü'='U', 'Ý'='Y', 'Þ'='B', 'ß'='Ss', 'à'='a', 'á'='a', 'â'='a', 'ã'='a', 'ä'='a', 'å'='a', 'æ'='a', 'ç'='c',
'è'='e', 'é'='e', 'ê'='e', 'ë'='e', 'ì'='i', 'í'='i', 'î'='i', 'ï'='i', 'ð'='o', 'ñ'='n', 'ò'='o', 'ó'='o', 'ô'='o', 'õ'='o',
'ö'='o', 'ø'='o', 'ù'='u', 'ú'='u', 'û'='u', 'ý'='y', 'ý'='y', 'þ'='b')
require(gsubfn)
resultado = gsubfn(paste(names(unwanted_array),collapse='|'), unwanted_array, textos)
resultado
}
lerArquivo <- function(arquivo) {
return(readChar(arquivo, file.info(arquivo)$size))
}
executarSql = function(query, dsn, uid, pwd, time = 5) {
require(RODBC)
resultado = tryCatch({
options(stringsAsFactors = FALSE)
if (exists('ch')) {
if (!isConnectionOpen(ch)) {
## Abrir conexao com o banco
ch = odbcConnect(dsn = dsn, uid=uid, pwd=pwd)
}
} else {
## Abrir conexao com o banco
ch = odbcConnect(dsn = dsn, uid=uid, pwd=pwd)
}
##buscar dados
resultado = sqlQuery(ch, query)
## Fechar conexao com banco
close(ch)
return(resultado)
}, error = function(e) {
print(paste0("Erro ao executar query: ", query, "\n\t", e))
## Tentar executar a consulta novamente depois de time segundos.
Sys.sleep(time)
## Antes de executar a consulta novamente, dobrar o tempo de espera (time*2)
time = time*2
resultado = executarSql(query, dsn, uid, pwd, time)
return(resultado)
})
resultado
}
resave <- function(..., list = character(), file) {
previous <- load(file)
var.names <- c(list, as.character(substitute(list(...)))[-1L])
for (var in var.names) assign(var, get(var, envir = parent.frame()))
save(list = unique(c(previous, var.names)), file = file)
}
|
# File:
# This file creates a data set. It is intended to be sourced
# by the RMarkdown file 'Sourcing Files.Rmd'.
Mother <- c(0,0,0,1,1,NA)
Father <- c(0,1,1,0,0,1)
df <- data.frame(Mother, Father)
df
str(df) #both Mother and Father columns are numeric
|
/Sourcing_Files/File to be sourced.R
|
no_license
|
BarryDeCicco/EDA_Descriptives_Tables
|
R
| false
| false
| 255
|
r
|
# File:
# This file creates a data set. It is intended to be sourced
# by the RMarkdown file 'Sourcing Files.Rmd'.
Mother <- c(0,0,0,1,1,NA)
Father <- c(0,1,1,0,0,1)
df <- data.frame(Mother, Father)
df
str(df) #both Mother and Father columns are numeric
|
##### Import results from Experiment Factory's
##### bis-bas-survey and save as csv file
# load libraries
library(jsonlite)
library(stringr)
library(tidyverse)
# get IDs
id_list <- list.dirs(
path = "data/expfactory",
full.names = FALSE) %>%
str_subset("_finished") %>%
str_remove_all("_finished")
# create empty tibble
dataset <- tibble()
for (id in id_list) {
# import json file
data_list <- read_json(
paste0("data/expfactory/", id, "_finished/bis11-survey-results.json"))
# convert list to tibble
tbl <- as_tibble(data_list)
# move variable names into one column and values into another
tbl <- gather(tbl, key = varname, value = value)
# edit variable names in column 1
tbl$varname <- tbl$varname %>%
str_remove_all(pattern = "data") %>%
str_remove_all(pattern = "\\[") %>%
str_remove_all(pattern = "\\]")
# separate question number and variable name
tbl <- tbl %>%
extract(varname, c("question_num", "varname"), "([0-9]+)([a-z].*)")
# move variables from varname to separate columns
tbl <- tbl %>%
spread(varname, value)
# add subject id
tbl <- tbl %>%
mutate(id = id)
# reorder columns
tbl <- tbl %>%
select(id, question_num, name, text, options, value)
# sort rows by question number
tbl$question_num <- as.integer(tbl$question_num)
tbl <- tbl %>%
arrange(question_num)
# add tbl to dataset
dataset <- bind_rows(dataset, tbl)
}
# save as csv file
write_csv(x = dataset, path = "bis11-survey-results-all.csv")
|
/R/bis11_survey.R
|
no_license
|
mrweiler/dos08
|
R
| false
| false
| 1,538
|
r
|
##### Import results from Experiment Factory's
##### bis-bas-survey and save as csv file
# load libraries
library(jsonlite)
library(stringr)
library(tidyverse)
# get IDs
id_list <- list.dirs(
path = "data/expfactory",
full.names = FALSE) %>%
str_subset("_finished") %>%
str_remove_all("_finished")
# create empty tibble
dataset <- tibble()
for (id in id_list) {
# import json file
data_list <- read_json(
paste0("data/expfactory/", id, "_finished/bis11-survey-results.json"))
# convert list to tibble
tbl <- as_tibble(data_list)
# move variable names into one column and values into another
tbl <- gather(tbl, key = varname, value = value)
# edit variable names in column 1
tbl$varname <- tbl$varname %>%
str_remove_all(pattern = "data") %>%
str_remove_all(pattern = "\\[") %>%
str_remove_all(pattern = "\\]")
# separate question number and variable name
tbl <- tbl %>%
extract(varname, c("question_num", "varname"), "([0-9]+)([a-z].*)")
# move variables from varname to separate columns
tbl <- tbl %>%
spread(varname, value)
# add subject id
tbl <- tbl %>%
mutate(id = id)
# reorder columns
tbl <- tbl %>%
select(id, question_num, name, text, options, value)
# sort rows by question number
tbl$question_num <- as.integer(tbl$question_num)
tbl <- tbl %>%
arrange(question_num)
# add tbl to dataset
dataset <- bind_rows(dataset, tbl)
}
# save as csv file
write_csv(x = dataset, path = "bis11-survey-results-all.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defaultRegressionFunctions.R
\name{gwasLMEHFuncsf}
\alias{gwasLMEHFuncsf}
\title{run a LMEH for a GWAS in Parallel}
\usage{
gwasLMEHFuncsf(i, g, pgx, Model, interactionTerm, genderVar, dbPath, dbTable,
overwriteEntry = FALSE, tempFile = tempfile(), randomTerm,
heterogeneousTerm)
}
\arguments{
\item{g}{vector object of numeric genotype calls for a single snp}
\item{pgx}{The data.frame containing the phenotypes used in the model}
\item{Model}{The original base model for the GWAS, excluding snp and snp interaction terms}
\item{interactionTerm}{The name of the variable to be included as the interaction term with the snp}
\item{dbPath}{The path to database to save results}
\item{overwriteEntry}{Boolean, passed to \code{\link[AxioSerializer]{writeObjectToTable}} whether to overwrite saved objects in the table specified}
\item{randomTerm}{The name of the random varable to include in results}
\item{heterogeneousTerm}{The name of the heterogeneous varable to include in results}
}
\value{
a P value, and saves lme models to the db specified in dbPath
}
\description{
A function called by runGWAS to perform a LMEH for each snp, designed for SnowFall parallel processing, and input snp is a "CNSet" or "SnpSuperSet"
}
|
/axioPackages/AxioGWAS/man/gwasLMEHFuncSF.Rd
|
no_license
|
jjsayleraxio/Axio_rstudio
|
R
| false
| true
| 1,345
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defaultRegressionFunctions.R
\name{gwasLMEHFuncsf}
\alias{gwasLMEHFuncsf}
\title{run a LMEH for a GWAS in Parallel}
\usage{
gwasLMEHFuncsf(i, g, pgx, Model, interactionTerm, genderVar, dbPath, dbTable,
overwriteEntry = FALSE, tempFile = tempfile(), randomTerm,
heterogeneousTerm)
}
\arguments{
\item{g}{vector object of numeric genotype calls for a single snp}
\item{pgx}{The data.frame containing the phenotypes used in the model}
\item{Model}{The original base model for the GWAS, excluding snp and snp interaction terms}
\item{interactionTerm}{The name of the variable to be included as the interaction term with the snp}
\item{dbPath}{The path to database to save results}
\item{overwriteEntry}{Boolean, passed to \code{\link[AxioSerializer]{writeObjectToTable}} whether to overwrite saved objects in the table specified}
\item{randomTerm}{The name of the random varable to include in results}
\item{heterogeneousTerm}{The name of the heterogeneous varable to include in results}
}
\value{
a P value, and saves lme models to the db specified in dbPath
}
\description{
A function called by runGWAS to perform a LMEH for each snp, designed for SnowFall parallel processing, and input snp is a "CNSet" or "SnpSuperSet"
}
|
library(data.table)
MAX_VAL <- 1e6
MAX_OBS <- 1e8
create_prior_dist <- function(dist_name
, expected_rate
, upper_bound_rate
, expected_mean
, upper_bound_mean
, expected_sd
, upper_bound_sd
, expected_lambda
, upper_bound_lambda
) {
if (dist_name == 'binomial') {
in_vals <- c(expected_rate, upper_bound_rate)
if (any(is.na(in_vals))) {
return('Please enter data')
}
if (!all(data.table::between(in_vals, 0, 1))) {
return('Rates must be between 0 and 1')
}
return(get_supported_beta(mu = expected_rate, bound = upper_bound_rate, desired_support = 0.05))
} else if (dist_name == 'normal') {
means <- c(expected_mean, upper_bound_mean)
sds <- c(expected_sd, upper_bound_sd)
if (any(is.na(c(means, sds)))) {
return('Please enter data')
}
if (!all(data.table::between(means, -MAX_VAL, MAX_VAL))) {
return('Average must be less extreme')
}
if (!all(data.table::between(sds, 0, MAX_VAL, incbounds = FALSE))) {
return('Standard deviations must be less extreme')
}
return(get_supported_normal_gamma(mu = expected_mean, bound_mu = upper_bound_mean
, sigma = expected_sd, bound_sigma = upper_bound_sd
, desired_support = 0.05))
} else {
in_vals <- c(expected_lambda, upper_bound_lambda)
if (any(is.na(in_vals))) {
return('Please enter data')
}
if (!all(data.table::between(in_vals, 0, MAX_VAL))) {
return('Values must be less extreme')
}
return(get_supported_gamma(mu = expected_lambda, bound = upper_bound_lambda, desired_support = 0.05))
}
}
create_results_dt <- function(dist_name
, obs_a, obs_b, rate_a, rate_b
, count_a, count_b, mean_a, mean_b, sd_a, sd_b
, num_a, num_b, total_a, total_b
) {
if (dist_name == 'binomial') {
obs <- c(obs_a, obs_b)
rates <- c(rate_a, rate_b)
if (any(is.na(c(obs, rates)))) {
return('Please enter data')
}
if (!all(data.table::between(obs, 0, MAX_OBS))) {
return('Please use less extreme number of observations')
}
if (!all(data.table::between(rates, 0, 1))) {
return('Rates must be between 0 and 1')
}
return(data.table::data.table('variant' = c('a', 'b')
, 'num_obs' = obs
, 'observed_rate' = rates))
} else if (dist_name == 'normal') {
counts <- c(count_a, count_b)
means <- c(mean_a, mean_b)
sds <- c(sd_a, sd_b)
if (any(is.na(c(counts, means, sds)))) {
return('Please enter data')
}
if (!all(data.table::between(counts, 0, MAX_OBS))) {
return('Please use less extreme number of observations')
}
if (!all(data.table::between(means, -MAX_VAL, MAX_VAL))) {
return('Average must be less extreme')
}
if (!all(data.table::between(sds, 0, MAX_VAL, incbounds=FALSE))) {
return('Standard deviations must be less extreme')
}
return(data.table::data.table('variant' = c('a', 'b')
, 'num_obs' = counts
, 'avg' = means
, 'std_dev' = sds))
} else {
nums <- c(num_a, num_b)
totals <- c(total_a, total_b)
if (any(is.na(c(nums, totals)))) {
return('Please enter data')
}
if (!all(data.table::between(nums, 0, MAX_VAL))) {
return('Please use less extreme number of sessions')
}
if (!all(data.table::between(totals, 0, MAX_VAL))) {
return('Please use less extreme number of observations')
}
return(data.table::data.table('variant' = c('a', 'b')
, 'num_sessions' = c(num_a, num_b)
, 'observed_count' = c(total_a, total_b)))
}
}
create_description <- function(dist_name, dist) {
if (is.character(dist)) {
return('')
} else {
if (dist_name == 'binomial') {
moments <- abayes::compute_moments(dist)
mu <- moments[['mu']]; sigma <- moments[['sigma']]
return(paste0('The distribution is a beta distribution with parameters: alpha = '
, round(dist[['alpha']], 2), ', beta = ', round(dist[['beta']], 2)
, '. Mean: ', signif(mu, 3), ' and Standard Deviation: ', signif(sigma, 3)))
} else if (dist_name == 'normal') {
moments <- abayes::compute_moments(dist)
x_mu <- moments[['x']][['mu']]; x_sigma <- moments[['x']][['sigma']]
tau_mu <- moments[['tau']][['mu']]; tau_sigma <- moments[['tau']][['sigma']]
return(paste0('The distribution is a normal gamma distribution with parameters: mu = '
, round(dist[['mu']], 2), ', lambda = ', round(dist[['lambda']], 2)
, ', alpha = ', round(dist[['alpha']], 2), ', beta = ', round(dist[['beta']], 2)
, '. Mean: ', signif(x_mu, 3), ' and Standard Deviation: ', signif(x_sigma, 3)))
} else {
moments <- abayes::compute_moments(dist)
mu <- moments[['mu']]; sigma <- moments[['sigma']]
return(paste0('The distribution is a gamma distribution with parameters: alpha = '
, round(dist[['alpha']], 2), ', beta = ', round(dist[['beta']], 2)
, '. Mean: ', signif(mu, 3), ' and Standard Deviation: ', signif(sigma, 3)))
}
}
}
create_metric_dt <- function(posteriors, method) {
metrics <- abayes::get_metrics(posteriors = posteriors, sim_batch_size = 1e5, method = method)
dt <- data.table::data.table(x = c('A', 'B')
, y = as.character(signif(c(metrics[['loss_a']], metrics[['loss_b']]), 3))
, z = as.character(signif(c(1 - metrics[['prob_b_gt_a']], metrics[['prob_b_gt_a']]), 3))
, w = c('-', as.character(signif(metrics[['effect_lower']], 3)))
, a = c('-', as.character(signif(metrics[['effect_expected']], 3)))
, b = c('-', as.character(signif(metrics[['effect_upper']], 3)))
)
data.table::setnames(dt, c('Variant', 'Risk Of Choosing Variant', 'Prob Variant is Larger'
, '95% CI Lower Bound', 'Expected Effect Size (B - A)', '95% CI Upper Bound'))
return(dt)
}
plot_dists <- function(dists, dist_name) {
if (is.character(dists)) {
df <- data.frame()
return(ggplot(df) + geom_point() + xlim(0, 1) + ylim(0, 1) +
annotate('text', x = 0.5, y = 0.5, label = dists, size = 5))
} else {
if (dist_name == 'binomial') {
return(plot_beta(betas = dists, title = 'What We Believe About the Rate'))
} else if (dist_name == 'normal') {
return(plot_normal(normals = dists))
} else {
return(plot_gamma(gammas = dists, title = 'What We Believe About the Expected Count'))
}
}
}
|
/inst/abayes/server_funs.R
|
permissive
|
sambrilleman/abayes
|
R
| false
| false
| 7,716
|
r
|
library(data.table)
MAX_VAL <- 1e6
MAX_OBS <- 1e8
create_prior_dist <- function(dist_name
, expected_rate
, upper_bound_rate
, expected_mean
, upper_bound_mean
, expected_sd
, upper_bound_sd
, expected_lambda
, upper_bound_lambda
) {
if (dist_name == 'binomial') {
in_vals <- c(expected_rate, upper_bound_rate)
if (any(is.na(in_vals))) {
return('Please enter data')
}
if (!all(data.table::between(in_vals, 0, 1))) {
return('Rates must be between 0 and 1')
}
return(get_supported_beta(mu = expected_rate, bound = upper_bound_rate, desired_support = 0.05))
} else if (dist_name == 'normal') {
means <- c(expected_mean, upper_bound_mean)
sds <- c(expected_sd, upper_bound_sd)
if (any(is.na(c(means, sds)))) {
return('Please enter data')
}
if (!all(data.table::between(means, -MAX_VAL, MAX_VAL))) {
return('Average must be less extreme')
}
if (!all(data.table::between(sds, 0, MAX_VAL, incbounds = FALSE))) {
return('Standard deviations must be less extreme')
}
return(get_supported_normal_gamma(mu = expected_mean, bound_mu = upper_bound_mean
, sigma = expected_sd, bound_sigma = upper_bound_sd
, desired_support = 0.05))
} else {
in_vals <- c(expected_lambda, upper_bound_lambda)
if (any(is.na(in_vals))) {
return('Please enter data')
}
if (!all(data.table::between(in_vals, 0, MAX_VAL))) {
return('Values must be less extreme')
}
return(get_supported_gamma(mu = expected_lambda, bound = upper_bound_lambda, desired_support = 0.05))
}
}
create_results_dt <- function(dist_name
, obs_a, obs_b, rate_a, rate_b
, count_a, count_b, mean_a, mean_b, sd_a, sd_b
, num_a, num_b, total_a, total_b
) {
if (dist_name == 'binomial') {
obs <- c(obs_a, obs_b)
rates <- c(rate_a, rate_b)
if (any(is.na(c(obs, rates)))) {
return('Please enter data')
}
if (!all(data.table::between(obs, 0, MAX_OBS))) {
return('Please use less extreme number of observations')
}
if (!all(data.table::between(rates, 0, 1))) {
return('Rates must be between 0 and 1')
}
return(data.table::data.table('variant' = c('a', 'b')
, 'num_obs' = obs
, 'observed_rate' = rates))
} else if (dist_name == 'normal') {
counts <- c(count_a, count_b)
means <- c(mean_a, mean_b)
sds <- c(sd_a, sd_b)
if (any(is.na(c(counts, means, sds)))) {
return('Please enter data')
}
if (!all(data.table::between(counts, 0, MAX_OBS))) {
return('Please use less extreme number of observations')
}
if (!all(data.table::between(means, -MAX_VAL, MAX_VAL))) {
return('Average must be less extreme')
}
if (!all(data.table::between(sds, 0, MAX_VAL, incbounds=FALSE))) {
return('Standard deviations must be less extreme')
}
return(data.table::data.table('variant' = c('a', 'b')
, 'num_obs' = counts
, 'avg' = means
, 'std_dev' = sds))
} else {
nums <- c(num_a, num_b)
totals <- c(total_a, total_b)
if (any(is.na(c(nums, totals)))) {
return('Please enter data')
}
if (!all(data.table::between(nums, 0, MAX_VAL))) {
return('Please use less extreme number of sessions')
}
if (!all(data.table::between(totals, 0, MAX_VAL))) {
return('Please use less extreme number of observations')
}
return(data.table::data.table('variant' = c('a', 'b')
, 'num_sessions' = c(num_a, num_b)
, 'observed_count' = c(total_a, total_b)))
}
}
create_description <- function(dist_name, dist) {
if (is.character(dist)) {
return('')
} else {
if (dist_name == 'binomial') {
moments <- abayes::compute_moments(dist)
mu <- moments[['mu']]; sigma <- moments[['sigma']]
return(paste0('The distribution is a beta distribution with parameters: alpha = '
, round(dist[['alpha']], 2), ', beta = ', round(dist[['beta']], 2)
, '. Mean: ', signif(mu, 3), ' and Standard Deviation: ', signif(sigma, 3)))
} else if (dist_name == 'normal') {
moments <- abayes::compute_moments(dist)
x_mu <- moments[['x']][['mu']]; x_sigma <- moments[['x']][['sigma']]
tau_mu <- moments[['tau']][['mu']]; tau_sigma <- moments[['tau']][['sigma']]
return(paste0('The distribution is a normal gamma distribution with parameters: mu = '
, round(dist[['mu']], 2), ', lambda = ', round(dist[['lambda']], 2)
, ', alpha = ', round(dist[['alpha']], 2), ', beta = ', round(dist[['beta']], 2)
, '. Mean: ', signif(x_mu, 3), ' and Standard Deviation: ', signif(x_sigma, 3)))
} else {
moments <- abayes::compute_moments(dist)
mu <- moments[['mu']]; sigma <- moments[['sigma']]
return(paste0('The distribution is a gamma distribution with parameters: alpha = '
, round(dist[['alpha']], 2), ', beta = ', round(dist[['beta']], 2)
, '. Mean: ', signif(mu, 3), ' and Standard Deviation: ', signif(sigma, 3)))
}
}
}
create_metric_dt <- function(posteriors, method) {
metrics <- abayes::get_metrics(posteriors = posteriors, sim_batch_size = 1e5, method = method)
dt <- data.table::data.table(x = c('A', 'B')
, y = as.character(signif(c(metrics[['loss_a']], metrics[['loss_b']]), 3))
, z = as.character(signif(c(1 - metrics[['prob_b_gt_a']], metrics[['prob_b_gt_a']]), 3))
, w = c('-', as.character(signif(metrics[['effect_lower']], 3)))
, a = c('-', as.character(signif(metrics[['effect_expected']], 3)))
, b = c('-', as.character(signif(metrics[['effect_upper']], 3)))
)
data.table::setnames(dt, c('Variant', 'Risk Of Choosing Variant', 'Prob Variant is Larger'
, '95% CI Lower Bound', 'Expected Effect Size (B - A)', '95% CI Upper Bound'))
return(dt)
}
plot_dists <- function(dists, dist_name) {
if (is.character(dists)) {
df <- data.frame()
return(ggplot(df) + geom_point() + xlim(0, 1) + ylim(0, 1) +
annotate('text', x = 0.5, y = 0.5, label = dists, size = 5))
} else {
if (dist_name == 'binomial') {
return(plot_beta(betas = dists, title = 'What We Believe About the Rate'))
} else if (dist_name == 'normal') {
return(plot_normal(normals = dists))
} else {
return(plot_gamma(gammas = dists, title = 'What We Believe About the Expected Count'))
}
}
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
source("fnc_Process.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Citi Analytics Password Query"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
textInput("username", h3("User Name")),
textInput("passphrase", h3("Passphrase"))
),
# Show a plot of the generated distribution
mainPanel(
tableOutput("pwTable")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# PW <- eventReactive(input$passphrase, {
# d1 <- query_passwords(input$username, input$passphrase)
# })
#
PW <- reactive({
d1 <- query_passwords(input$username, input$passphrase)
return(d1)
})
output$pwTable <- renderTable({
PW()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/codes/pw_query_shiny/old/app.R
|
permissive
|
ytk77/simple_keyserver
|
R
| false
| false
| 1,171
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
source("fnc_Process.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Citi Analytics Password Query"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
textInput("username", h3("User Name")),
textInput("passphrase", h3("Passphrase"))
),
# Show a plot of the generated distribution
mainPanel(
tableOutput("pwTable")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# PW <- eventReactive(input$passphrase, {
# d1 <- query_passwords(input$username, input$passphrase)
# })
#
PW <- reactive({
d1 <- query_passwords(input$username, input$passphrase)
return(d1)
})
output$pwTable <- renderTable({
PW()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
setGeneric("UDStack", function(x,...){standardGeneric("UDStack")})
setMethod("UDStack",
signature="RasterStack",
definition=function(x,method="unknown",...){
new(".UDStack",x, method=method)
})
setMethod("UDStack",
signature="RasterBrick",
definition=function(x,method,...){
callGeneric(stack(x),...)
})
setMethod("UDStack",
signature="list",
definition=function(x,method,...){
callGeneric(stack(x),...)
})
setMethod("UDStack",
signature=".UDBurstStack",
definition=function(x,method="unknown",...){
callGeneric(x/cellStats(x,sum), ...)
})
|
/R/UDStack.R
|
no_license
|
guzhongru/move
|
R
| false
| false
| 712
|
r
|
setGeneric("UDStack", function(x,...){standardGeneric("UDStack")})
setMethod("UDStack",
signature="RasterStack",
definition=function(x,method="unknown",...){
new(".UDStack",x, method=method)
})
setMethod("UDStack",
signature="RasterBrick",
definition=function(x,method,...){
callGeneric(stack(x),...)
})
setMethod("UDStack",
signature="list",
definition=function(x,method,...){
callGeneric(stack(x),...)
})
setMethod("UDStack",
signature=".UDBurstStack",
definition=function(x,method="unknown",...){
callGeneric(x/cellStats(x,sum), ...)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getEntries.R
\name{getEntries}
\alias{getEntries}
\title{Scrapes a page of entries}
\usage{
getEntries(returns)
}
\arguments{
\item{returns}{a list returned by getPages}
}
\value{
returns a list of scraped entries from one page in a raw
character string.
}
\description{
this function uses the information from getEntries
in order to scrape a single page of data about stories
}
\examples{
getEntries(list(noPages=FALSE, page_use="/cartoon/Invader-Zim/?&srt=1&r=103&p=",
page_num=11, url="https://www.fanfiction.net/cartoon/Invader-Zim/", max.entries=NA))
}
|
/man/getEntries.Rd
|
no_license
|
ekmaus19/absentfan
|
R
| false
| true
| 642
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getEntries.R
\name{getEntries}
\alias{getEntries}
\title{Scrapes a page of entries}
\usage{
getEntries(returns)
}
\arguments{
\item{returns}{a list returned by getPages}
}
\value{
returns a list of scraped entries from one page in a raw
character string.
}
\description{
this function uses the information from getEntries
in order to scrape a single page of data about stories
}
\examples{
getEntries(list(noPages=FALSE, page_use="/cartoon/Invader-Zim/?&srt=1&r=103&p=",
page_num=11, url="https://www.fanfiction.net/cartoon/Invader-Zim/", max.entries=NA))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_estimates.R
\name{ml_binom}
\alias{ml_binom}
\alias{ml_multinom}
\title{Maximum-likelihood Estimate}
\usage{
ml_binom(k, n, A, b, map, strategy, n.fit = 3, start, progress = FALSE,
...)
ml_multinom(k, options, A, b, V, n.fit = 3, start, progress = FALSE,
...)
}
\arguments{
\item{k}{vector of observed response frequencies.}
\item{n}{the number of choices per item type.
If \code{k=n=0}, Bayesian inference is relies on the prior distribution only.}
\item{A}{a matrix with one row for each linear inequality constraint and one
column for each of the free parameters. The parameter space is defined
as all probabilities \code{x} that fulfill the order constraints \code{A*x <= b}.}
\item{b}{a vector of the same length as the number of rows of \code{A}.}
\item{map}{optional: numeric vector of the same length as \code{k} with integers
mapping the frequencies \code{k} to the free parameters/columns of \code{A}/\code{V},
thereby allowing for equality constraints (e.g., \code{map=c(1,1,2,2)}).
Reversed probabilities \code{1-p} are coded by negative integers.
Guessing probabilities of .50 are encoded by zeros. The default assumes
different parameters for each item type: \code{map=1:ncol(A)}}
\item{strategy}{a list that defines the predictions of a strategy, see\code{\link{strategy_multiattribute}}.}
\item{n.fit}{number of calls to \link[stats]{constrOptim}.}
\item{start}{only relevant if \code{steps} is defined or \code{cmin>0}:
a vector with starting values in the interior of the polytope.
If missing, an approximate maximum-likelihood estimate is used.}
\item{progress}{whether a progress bar should be shown (if \code{cpu=1}).}
\item{...}{further arguments passed to the function
\code{\link[stats]{constrOptim}}. To ensure high accuracy, the number of
maximum iterations should be sufficiently large (e.g., by setting
\code{control = list(maxit = 1e6, reltol=.Machine$double.eps^.6), outer.iterations = 1000}.}
\item{options}{number of observable categories/probabilities for each item
type/multinomial distribution, e.g., \code{c(3,2)} for a ternary and binary item.}
\item{V}{a matrix of vertices (one per row) that define the polytope of
admissible parameters as the convex hull over these points
(if provided, \code{A} and \code{b} are ignored).
Similar as for \code{A}, columns of \code{V} omit the last value for each
multinomial condition (e.g., a1,a2,a3,b1,b2 becomes a1,a2,b1).
Note that this method is comparatively slow since it solves linear-programming problems
to test whether a point is inside a polytope (Fukuda, 2004) or to run the Gibbs sampler.}
}
\value{
the list returned by the optimizer \code{\link[stats]{constrOptim}},
including the input arguments (e.g., \code{k}, \code{options}, \code{A}, \code{V}, etc.).
\itemize{
\item If the Ab-representation was used, \code{par} provides the ML estimate for
the probability vector \eqn{\theta}.
\item If the V-representation was used, \code{par} provides the estimates for the
(usually not identifiable) mixture weights \eqn{\alpha} that define the convex
hull of the vertices in \eqn{V}, while \code{p} provides the ML estimates for
the probability parameters. Because the weights must sum to one, the
\eqn{\alpha}-parameter for the last row of the matrix \eqn{V} is dropped.
If the unconstrained ML estimate is inside the convex hull, the mixture weights
\eqn{\alpha} are not estimated and replaced by missings (\code{NA}).
}
}
\description{
Get ML estimate for product-binomial/multinomial model with linear inequality constraints.
}
\details{
First, it is checked whether the unconstrained maximum-likelihood estimator
(e.g., for the binomial: \code{k/n}) is inside the constrained parameter space.
Only if this is not the case, nonlinear optimization with convex linear-inequality
constrained is used to estimate (A) the probability parameters \eqn{\theta}
for the Ab-representation or (B) the mixture weights \eqn{\alpha} for the V-representation.
}
\examples{
# predicted linear order: p1 < p2 < p3 < .50
# (cf. WADDprob in ?strategy_multiattribute)
A <- matrix(c(1, -1, 0,
0, 1, -1,
0, 0, 1),
ncol = 3, byrow = TRUE)
b <- c(0, 0, .50)
ml_binom(k = c(4,1,23), n = 40, A, b)[1:2]
ml_multinom(k = c(4,36, 1,39, 23,17),
options = c(2,2,2), A, b)[1:2]
# probabilistic strategy: A,A,A,B [e1<e2<e3<e4<.50]
strat <- list(pattern = c(-1, -2, -3, 4),
c = .5, ordered = TRUE, prior = c(1,1))
ml_binom(c(7,3,1, 19), 20, strategy = strat)[1:2]
# vertex representation (one prediction per row)
V <- matrix(c(
# strict weak orders
0, 1, 0, 1, 0, 1, # a < b < c
1, 0, 0, 1, 0, 1, # b < a < c
0, 1, 0, 1, 1, 0, # a < c < b
0, 1, 1, 0, 1, 0, # c < a < b
1, 0, 1, 0, 1, 0, # c < b < a
1, 0, 1, 0, 0, 1, # b < c < a
0, 0, 0, 1, 0, 1, # a ~ b < c
0, 1, 0, 0, 1, 0, # a ~ c < b
1, 0, 1, 0, 0, 0, # c ~ b < a
0, 1, 0, 1, 0, 0, # a < b ~ c
1, 0, 0, 0, 0, 1, # b < a ~ c
0, 0, 1, 0, 1, 0, # c < a ~ b
0, 0, 0, 0, 0, 0 # a ~ b ~ c
), byrow = TRUE, ncol = 6)
ml_multinom(k = c(4,1,5, 1,9,0, 7,2,1), n.fit = 1,
options = c(3,3,3), V = V)
}
|
/multinomineq/man/ml_binom.Rd
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| true
| 5,385
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_estimates.R
\name{ml_binom}
\alias{ml_binom}
\alias{ml_multinom}
\title{Maximum-likelihood Estimate}
\usage{
ml_binom(k, n, A, b, map, strategy, n.fit = 3, start, progress = FALSE,
...)
ml_multinom(k, options, A, b, V, n.fit = 3, start, progress = FALSE,
...)
}
\arguments{
\item{k}{vector of observed response frequencies.}
\item{n}{the number of choices per item type.
If \code{k=n=0}, Bayesian inference is relies on the prior distribution only.}
\item{A}{a matrix with one row for each linear inequality constraint and one
column for each of the free parameters. The parameter space is defined
as all probabilities \code{x} that fulfill the order constraints \code{A*x <= b}.}
\item{b}{a vector of the same length as the number of rows of \code{A}.}
\item{map}{optional: numeric vector of the same length as \code{k} with integers
mapping the frequencies \code{k} to the free parameters/columns of \code{A}/\code{V},
thereby allowing for equality constraints (e.g., \code{map=c(1,1,2,2)}).
Reversed probabilities \code{1-p} are coded by negative integers.
Guessing probabilities of .50 are encoded by zeros. The default assumes
different parameters for each item type: \code{map=1:ncol(A)}}
\item{strategy}{a list that defines the predictions of a strategy, see\code{\link{strategy_multiattribute}}.}
\item{n.fit}{number of calls to \link[stats]{constrOptim}.}
\item{start}{only relevant if \code{steps} is defined or \code{cmin>0}:
a vector with starting values in the interior of the polytope.
If missing, an approximate maximum-likelihood estimate is used.}
\item{progress}{whether a progress bar should be shown (if \code{cpu=1}).}
\item{...}{further arguments passed to the function
\code{\link[stats]{constrOptim}}. To ensure high accuracy, the number of
maximum iterations should be sufficiently large (e.g., by setting
\code{control = list(maxit = 1e6, reltol=.Machine$double.eps^.6), outer.iterations = 1000}.}
\item{options}{number of observable categories/probabilities for each item
type/multinomial distribution, e.g., \code{c(3,2)} for a ternary and binary item.}
\item{V}{a matrix of vertices (one per row) that define the polytope of
admissible parameters as the convex hull over these points
(if provided, \code{A} and \code{b} are ignored).
Similar as for \code{A}, columns of \code{V} omit the last value for each
multinomial condition (e.g., a1,a2,a3,b1,b2 becomes a1,a2,b1).
Note that this method is comparatively slow since it solves linear-programming problems
to test whether a point is inside a polytope (Fukuda, 2004) or to run the Gibbs sampler.}
}
\value{
the list returned by the optimizer \code{\link[stats]{constrOptim}},
including the input arguments (e.g., \code{k}, \code{options}, \code{A}, \code{V}, etc.).
\itemize{
\item If the Ab-representation was used, \code{par} provides the ML estimate for
the probability vector \eqn{\theta}.
\item If the V-representation was used, \code{par} provides the estimates for the
(usually not identifiable) mixture weights \eqn{\alpha} that define the convex
hull of the vertices in \eqn{V}, while \code{p} provides the ML estimates for
the probability parameters. Because the weights must sum to one, the
\eqn{\alpha}-parameter for the last row of the matrix \eqn{V} is dropped.
If the unconstrained ML estimate is inside the convex hull, the mixture weights
\eqn{\alpha} are not estimated and replaced by missings (\code{NA}).
}
}
\description{
Get ML estimate for product-binomial/multinomial model with linear inequality constraints.
}
\details{
First, it is checked whether the unconstrained maximum-likelihood estimator
(e.g., for the binomial: \code{k/n}) is inside the constrained parameter space.
Only if this is not the case, nonlinear optimization with convex linear-inequality
constrained is used to estimate (A) the probability parameters \eqn{\theta}
for the Ab-representation or (B) the mixture weights \eqn{\alpha} for the V-representation.
}
\examples{
# predicted linear order: p1 < p2 < p3 < .50
# (cf. WADDprob in ?strategy_multiattribute)
A <- matrix(c(1, -1, 0,
0, 1, -1,
0, 0, 1),
ncol = 3, byrow = TRUE)
b <- c(0, 0, .50)
ml_binom(k = c(4,1,23), n = 40, A, b)[1:2]
ml_multinom(k = c(4,36, 1,39, 23,17),
options = c(2,2,2), A, b)[1:2]
# probabilistic strategy: A,A,A,B [e1<e2<e3<e4<.50]
strat <- list(pattern = c(-1, -2, -3, 4),
c = .5, ordered = TRUE, prior = c(1,1))
ml_binom(c(7,3,1, 19), 20, strategy = strat)[1:2]
# vertex representation (one prediction per row)
V <- matrix(c(
# strict weak orders
0, 1, 0, 1, 0, 1, # a < b < c
1, 0, 0, 1, 0, 1, # b < a < c
0, 1, 0, 1, 1, 0, # a < c < b
0, 1, 1, 0, 1, 0, # c < a < b
1, 0, 1, 0, 1, 0, # c < b < a
1, 0, 1, 0, 0, 1, # b < c < a
0, 0, 0, 1, 0, 1, # a ~ b < c
0, 1, 0, 0, 1, 0, # a ~ c < b
1, 0, 1, 0, 0, 0, # c ~ b < a
0, 1, 0, 1, 0, 0, # a < b ~ c
1, 0, 0, 0, 0, 1, # b < a ~ c
0, 0, 1, 0, 1, 0, # c < a ~ b
0, 0, 0, 0, 0, 0 # a ~ b ~ c
), byrow = TRUE, ncol = 6)
ml_multinom(k = c(4,1,5, 1,9,0, 7,2,1), n.fit = 1,
options = c(3,3,3), V = V)
}
|
library(foreach)
library(data.table)
ncores <- parallel::detectCores(logical = T)
data.table::setDTthreads(ncores)
source(here::here("analysis","Utils.R"))
###########################################################
dt.tv <- data.table::setDT(arrow::read_feather(here::here("output","cohort_long.feather")))
procedures.sub <- c('Colectomy','Cholecystectomy',
'HipReplacement','KneeReplacement')
covariates <- c(procedures.sub,'age.cat','sex','bmi.cat','imd5','wave',
'vaccination.status.factor','region','Current.Cancer','Emergency','LOS.bin','Charl12','recentCOVID','previousCOVID')
drop.vars <- names(dt.tv)[!(names(dt.tv) %in% c(covariates, 'patient_id', 'tstart','tstop','start','end','event','postop.covid.cohort','end.fu'))]
dt.tv[,(drop.vars) := NULL]
dt.tv[,(procedures.sub) := lapply(.SD,function(x) x==1), .SDcols = (procedures.sub)]
data.table::setkey(dt.tv,patient_id,tstart,tstop)
n.type.events <- sort(unique(dt.tv[(postop.covid.cohort) ,event]))[-1]
dt.tv[, sub.op := (is.finite(Colectomy) & Colectomy ==T) |
(is.finite(Cholecystectomy) & Cholecystectomy == T) |
(is.finite(HipReplacement) & HipReplacement == T) |
(is.finite(KneeReplacement) & KneeReplacement == T)]
data.table::setkey(dt.tv,patient_id,tstart,tstop)
max.grp.col_(dt = 'dt.tv',
max.var.name = 'sub.op',
aggregate.cols = 'sub.op',
id.vars = c("patient_id","end.fu"))
post.op.covid.model.waves.sub <-
lapply(n.type.events, function(i) survival::coxph(survival::Surv(start,end,event==i) ~ Colectomy*wave + Cholecystectomy*wave + KneeReplacement*wave + age.cat + sex + bmi.cat + imd5 +
vaccination.status.factor + region + Current.Cancer + Emergency*wave + Charl12 + recentCOVID + previousCOVID, id = patient_id,
data = dt.tv[(postop.covid.cohort) & sub.op == T], model = T))
data.table::fwrite(broom::tidy(post.op.covid.model.waves.sub[[1]], exponentiate= T, conf.int = T), file = here::here("output","postopcovidmodelwavessub.csv"))
new.data.postop <- data.table::data.table(
'start' = rep(0,8*length(procedures.sub)),
'end' = rep(30,8*length(procedures.sub)),
'event' = rep(F,8*length(procedures.sub)),
'Colectomy' = c(rep(T,8),rep(F,24)),
'Cholecystectomy'=c(rep(F,8),rep(T,8),rep(F,16)),
'HipReplacement'=c(rep(F,16),rep(T,8),rep(F,8)),
'KneeReplacement'=c(rep(F,24),rep(T,8)),
'age.cat' = rep('(50,70]',8*length(procedures.sub)),
'sex' = rep('F',8*length(procedures.sub)),
'bmi.cat' = rep(levels(dt.tv$bmi.cat)[2],8*length(procedures.sub)),
'imd5' = rep(levels(dt.tv$imd5)[3], 8*length(procedures.sub)),
'wave' = rep(paste0('Wave_',1:4),times = 2*length(procedures.sub)),
'vaccination.status.factor' = rep('3',8*length(procedures.sub)),
'region' = rep("East Midlands",8*length(procedures.sub)),
'Current.Cancer' = rep(T,8*length(procedures.sub)),
'Emergency' = rep(c(rep(F,4),rep(T,4)), times = length(procedures.sub)),
'LOS.bin' = rep(F,8*length(procedures.sub)),
'Charl12' = rep('Single',8*length(procedures.sub)),
'recentCOVID' = rep(F,8*length(procedures.sub)),
'previousCOVID' = rep(F,8*length(procedures.sub)),
'patient_id' = 1:(8*length(procedures.sub)))
cuminc.adjusted.waves.sub <-
matrix(cuminc.cox(n.type.events = n.type.events,
dt = 'dt.tv[(postop.covid.cohort) & sub.op == T]',
model = 'post.op.covid.model.waves.sub',
newdata = 'new.data.postop',
day = 30), byrow = T, ncol = 4)
colnames(cuminc.adjusted.waves.sub) <- paste0('Wave_',1:4)
rownames(cuminc.adjusted.waves.sub) <- paste0(c('Elective','Emergency'),rep(procedures.sub, each = 2))
data.table::fwrite(cuminc.adjusted.waves.sub, file = here::here("output","postopcovid_adjusted_waves_sub.csv"))
adjusted.waves.sub.plot <- ggplot2::ggplot(data.table::melt(data.table::data.table(cuminc.adjusted.waves.sub, keep.rownames = T),
id.vars = 'rn',
variable.name = 'Wave',
value.name = '30 Day COVID Cumulative Incidence (%)')[, `:=`(Emergency = grepl('Emergency*',rn),
Operation = gsub('Emergency|Elective', '',rn))],
ggplot2::aes(x = Wave,
y = `30 Day COVID Cumulative Incidence (%)`,
group = rn,
colour = Operation,
linetype = Emergency)) +
ggplot2::geom_line()
ggplot2::ggsave(plot = adjusted.waves.sub.plot, here::here('output','adjusted_waves_sub_plot.png'),dpi = 'retina', width = 7, height = 5, units = 'in', device = 'png' )
#############################################################################################
library(doParallel)
ncores <- parallel::detectCores(logical = F)
cl <- parallel::makeCluster(ncores)
doParallel::registerDoParallel(cl)
data.table::setkey(dt.tv,"patient_id","tstart","tstop")
dt.tv[, sub.op := (is.finite(Colectomy) & Colectomy ==T) |
(is.finite(Cholecystectomy) & Cholecystectomy == T) |
(is.finite(HipReplacement) & HipReplacement == T) |
(is.finite(KneeReplacement) & KneeReplacement == T)]
post.op.covid.model.sub <-
lapply(n.type.events, function(i) survival::coxph(survival::Surv(start,end,event==i) ~ Colectomy + Cholecystectomy + KneeReplacement +
age.cat + sex + bmi.cat + imd5 + wave +
vaccination.status.factor + region + Current.Cancer +
Emergency + LOS.bin + Charl12 + recentCOVID + previousCOVID,
id = patient_id,
data = dt.tv[(postop.covid.cohort) & sub.op == T], model = T))
data.table::fwrite(broom::tidy(post.op.covid.model.sub[[1]], exponentiate= T, conf.int = T), file = here::here("output","postopcovidmodelsub.csv"))
adjusted.cuminc.sub <- data.table::as.data.table(foreach::foreach(predi = 1:length(covariates), .combine = 'rbind', .inorder = T) %do% {
newdata.rows <- length(unique(dt.tv[!is.na(get(covariates[predi])) ,get(covariates[predi])]))
newdata.pred <- data.table::data.table('start' = rep(0,newdata.rows),
'end' = rep(30,newdata.rows),
'event' = rep(F,newdata.rows),
'patient_id' = 1:newdata.rows,
'Colectomy' = c(rep(T,newdata.rows)),
'Cholecystectomy'=c(rep(F,newdata.rows)),
'HipReplacement'=c(rep(F,newdata.rows)),
'KneeReplacement'=c(rep(F,newdata.rows)),
'age.cat' = rep('(50,70]',newdata.rows),
'sex' = rep('F',newdata.rows),
'bmi.cat' = rep(levels(dt.tv$bmi.cat)[2],newdata.rows),
'imd5' = rep(levels(dt.tv$imd5)[3], newdata.rows),
'wave' = rep(paste0('Wave_',3),times = newdata.rows),
'vaccination.status.factor' = rep('3',newdata.rows),
'region' = rep("East Midlands",newdata.rows),
'Current.Cancer' = rep(T,newdata.rows),
'LOS.bin' = rep(F,newdata.rows),
'Emergency' = rep(F,newdata.rows),
'Charl12' = rep('Single',newdata.rows),
'recentCOVID' = rep(F,newdata.rows),
'previousCOVID' = rep(F,newdata.rows)
)
if ( predi <= length(procedures.sub)) {
newdata.pred[,(procedures.sub) := F]
newdata.pred[,(procedures.sub[predi]) := c(F,T)]
} else {
# newdata.pred <- data.table::data.table('start' = rep(0,newdata.rows),
# 'end' = rep(30,newdata.rows),
# 'event' = rep(F,newdata.rows),
# 'patient_id' = 1:newdata.rows)
# if ( predi > length(procedures.sub)) {
# newdata.pred[,(procedures.sub) := lapply(procedures.sub, function(x) x == procedures.sub[which.max(dt.tv[,lapply(.SD,sum,na.rm = T), .SDcols = c(procedures.sub)])])] } else {
# newdata.pred[,(procedures.sub) := lapply(procedures.sub, function(x) x == covariates[predi] & patient_id > 1)]
# }
# newdata.pred[,(covariates[-c(1:length(procedures.sub))]) := lapply(((length(procedures.sub)+1):length(covariates)), function(i.c) {
# if(is.factor(dt.tv[!is.na(get(covariates[i.c])),get(covariates[i.c])])) {
# as.character(rep(max.category(i.c),newdata.rows))
# } else if(is.logical(dt.tv[!is.na(get(covariates[i.c])),get(covariates[i.c])])) {
# as.logical(rep(max.category(i.c),newdata.rows))
# } else if(is.numeric(dt.tv[!is.na(get(covariates[i.c])),get(covariates[i.c])])) {
# is.numeric(rep(max.category(i.c),newdata.rows))
# } else {
# rep(max.category(i.c),newdata.rows)
# }
# })]
# #names(newdata.pred) <- c('start','end','event', covariates,'patient_id')
if(is.factor(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])])) {
newdata.pred[,(covariates[predi]) := as.character(sort(unique(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])], na.rm = T)))]
} else if(is.logical(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])])) {
newdata.pred[,(covariates[predi]) := as.logical(sort(unique(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])], na.rm = T)))]
} else if(is.numeric(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])])) {
newdata.pred[,(covariates[predi]) := is.numeric(sort(unique(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])], na.rm = T)))]
} else {
newdata.pred[,(covariates[predi]) := sort(unique(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])], na.rm = T))]
}
}
# samples <- foreach::foreach(i = 1:1000, .combine = cbind, .multicombine = T, .inorder = F, .verbose = F,
# .packages = c('data.table','survival'),
# .export = c('n.type.events','dt.tv', 'post.op.covid.model.sub','newdata.pred')) %dopar% {
# cuminc.cox(n.type.events = n.type.events,
# dt = 'dt.tv[patient_id %in% sample(unique(patient_id), replace = T) & (postop.covid.cohort) & sub.op == T]',
# model = 'post.op.covid.model.sub',
# newdata = 'newdata.pred',
# day = 30)}
# t.samples <- t(apply(samples,1,quantile,c(0.25,0.5,0.75)))
# boot.IQR <-apply(t.samples,1,function(x) paste0(x[2],' (',x[1],',',x[3],')'))
death.risk.30day <- predict(object = post.op.covid.model.sub[[3]],
newdata = newdata.pred,, type = 'expected',se.fit = T)
readmit.risk.30day <- predict(object = post.op.covid.model.sub[[2]],
newdata = newdata.pred,, type = 'expected',se.fit = T)
covid.risk.30day <- predict(object = post.op.covid.model.sub[[1]],
newdata = newdata.pred,, type = 'expected',se.fit = T)
cbind(matrix(paste0(round((1- exp(-covid.risk.30day$fit))*100,3),
' (', round((1 - exp(-(covid.risk.30day$fit - 1.96*covid.risk.30day$se.fit)))*100,3),',',
round((1 - exp(-(covid.risk.30day$fit + 1.96*covid.risk.30day$se.fit)))*100,3),')'),nrow =newdata.rows),
cuminc.cox(n.type.events = n.type.events,
dt = 'dt.tv[(postop.covid.cohort) & sub.op == T]',
model = 'post.op.covid.model.sub',
newdata = 'newdata.pred',
day = 30),
matrix(paste0(round((1- exp(-readmit.risk.30day$fit))*100,3),
' (', round((1 - exp(-(readmit.risk.30day$fit - 1.96*readmit.risk.30day$se.fit)))*100,3),',',
round((1 - exp(-(readmit.risk.30day$fit + 1.96*readmit.risk.30day$se.fit)))*100,3),')'),nrow =newdata.rows),
matrix(paste0(round((1- exp(-death.risk.30day$fit))*100,3),
' (', round((1 - exp(-(death.risk.30day$fit - 1.96*death.risk.30day$se.fit)))*100,3),',',
round((1 - exp(-(death.risk.30day$fit + 1.96*death.risk.30day$se.fit)))*100,3),')'),nrow =newdata.rows)
)
})
save(cuminc.adjusted.waves.sub,post.op.covid.model.sub,adjusted.cuminc.sub, file = here::here("output","postopcovid_adjusted_sub.RData"))
# Take out baseline no procedures.sub groups that are not observeds
data.table::fwrite(adjusted.cuminc.sub, file = here::here("output","postopcovid_adjusted_sub.csv"))
|
/analysis/Table_postopcovid_adjusted_sub.R
|
permissive
|
opensafely/PostOpCovid
|
R
| false
| false
| 16,179
|
r
|
library(foreach)
library(data.table)
ncores <- parallel::detectCores(logical = T)
data.table::setDTthreads(ncores)
source(here::here("analysis","Utils.R"))
###########################################################
dt.tv <- data.table::setDT(arrow::read_feather(here::here("output","cohort_long.feather")))
procedures.sub <- c('Colectomy','Cholecystectomy',
'HipReplacement','KneeReplacement')
covariates <- c(procedures.sub,'age.cat','sex','bmi.cat','imd5','wave',
'vaccination.status.factor','region','Current.Cancer','Emergency','LOS.bin','Charl12','recentCOVID','previousCOVID')
drop.vars <- names(dt.tv)[!(names(dt.tv) %in% c(covariates, 'patient_id', 'tstart','tstop','start','end','event','postop.covid.cohort','end.fu'))]
dt.tv[,(drop.vars) := NULL]
dt.tv[,(procedures.sub) := lapply(.SD,function(x) x==1), .SDcols = (procedures.sub)]
data.table::setkey(dt.tv,patient_id,tstart,tstop)
n.type.events <- sort(unique(dt.tv[(postop.covid.cohort) ,event]))[-1]
dt.tv[, sub.op := (is.finite(Colectomy) & Colectomy ==T) |
(is.finite(Cholecystectomy) & Cholecystectomy == T) |
(is.finite(HipReplacement) & HipReplacement == T) |
(is.finite(KneeReplacement) & KneeReplacement == T)]
data.table::setkey(dt.tv,patient_id,tstart,tstop)
max.grp.col_(dt = 'dt.tv',
max.var.name = 'sub.op',
aggregate.cols = 'sub.op',
id.vars = c("patient_id","end.fu"))
post.op.covid.model.waves.sub <-
lapply(n.type.events, function(i) survival::coxph(survival::Surv(start,end,event==i) ~ Colectomy*wave + Cholecystectomy*wave + KneeReplacement*wave + age.cat + sex + bmi.cat + imd5 +
vaccination.status.factor + region + Current.Cancer + Emergency*wave + Charl12 + recentCOVID + previousCOVID, id = patient_id,
data = dt.tv[(postop.covid.cohort) & sub.op == T], model = T))
data.table::fwrite(broom::tidy(post.op.covid.model.waves.sub[[1]], exponentiate= T, conf.int = T), file = here::here("output","postopcovidmodelwavessub.csv"))
new.data.postop <- data.table::data.table(
'start' = rep(0,8*length(procedures.sub)),
'end' = rep(30,8*length(procedures.sub)),
'event' = rep(F,8*length(procedures.sub)),
'Colectomy' = c(rep(T,8),rep(F,24)),
'Cholecystectomy'=c(rep(F,8),rep(T,8),rep(F,16)),
'HipReplacement'=c(rep(F,16),rep(T,8),rep(F,8)),
'KneeReplacement'=c(rep(F,24),rep(T,8)),
'age.cat' = rep('(50,70]',8*length(procedures.sub)),
'sex' = rep('F',8*length(procedures.sub)),
'bmi.cat' = rep(levels(dt.tv$bmi.cat)[2],8*length(procedures.sub)),
'imd5' = rep(levels(dt.tv$imd5)[3], 8*length(procedures.sub)),
'wave' = rep(paste0('Wave_',1:4),times = 2*length(procedures.sub)),
'vaccination.status.factor' = rep('3',8*length(procedures.sub)),
'region' = rep("East Midlands",8*length(procedures.sub)),
'Current.Cancer' = rep(T,8*length(procedures.sub)),
'Emergency' = rep(c(rep(F,4),rep(T,4)), times = length(procedures.sub)),
'LOS.bin' = rep(F,8*length(procedures.sub)),
'Charl12' = rep('Single',8*length(procedures.sub)),
'recentCOVID' = rep(F,8*length(procedures.sub)),
'previousCOVID' = rep(F,8*length(procedures.sub)),
'patient_id' = 1:(8*length(procedures.sub)))
cuminc.adjusted.waves.sub <-
matrix(cuminc.cox(n.type.events = n.type.events,
dt = 'dt.tv[(postop.covid.cohort) & sub.op == T]',
model = 'post.op.covid.model.waves.sub',
newdata = 'new.data.postop',
day = 30), byrow = T, ncol = 4)
colnames(cuminc.adjusted.waves.sub) <- paste0('Wave_',1:4)
rownames(cuminc.adjusted.waves.sub) <- paste0(c('Elective','Emergency'),rep(procedures.sub, each = 2))
data.table::fwrite(cuminc.adjusted.waves.sub, file = here::here("output","postopcovid_adjusted_waves_sub.csv"))
adjusted.waves.sub.plot <- ggplot2::ggplot(data.table::melt(data.table::data.table(cuminc.adjusted.waves.sub, keep.rownames = T),
id.vars = 'rn',
variable.name = 'Wave',
value.name = '30 Day COVID Cumulative Incidence (%)')[, `:=`(Emergency = grepl('Emergency*',rn),
Operation = gsub('Emergency|Elective', '',rn))],
ggplot2::aes(x = Wave,
y = `30 Day COVID Cumulative Incidence (%)`,
group = rn,
colour = Operation,
linetype = Emergency)) +
ggplot2::geom_line()
ggplot2::ggsave(plot = adjusted.waves.sub.plot, here::here('output','adjusted_waves_sub_plot.png'),dpi = 'retina', width = 7, height = 5, units = 'in', device = 'png' )
#############################################################################################
library(doParallel)
ncores <- parallel::detectCores(logical = F)
cl <- parallel::makeCluster(ncores)
doParallel::registerDoParallel(cl)
data.table::setkey(dt.tv,"patient_id","tstart","tstop")
dt.tv[, sub.op := (is.finite(Colectomy) & Colectomy ==T) |
(is.finite(Cholecystectomy) & Cholecystectomy == T) |
(is.finite(HipReplacement) & HipReplacement == T) |
(is.finite(KneeReplacement) & KneeReplacement == T)]
post.op.covid.model.sub <-
lapply(n.type.events, function(i) survival::coxph(survival::Surv(start,end,event==i) ~ Colectomy + Cholecystectomy + KneeReplacement +
age.cat + sex + bmi.cat + imd5 + wave +
vaccination.status.factor + region + Current.Cancer +
Emergency + LOS.bin + Charl12 + recentCOVID + previousCOVID,
id = patient_id,
data = dt.tv[(postop.covid.cohort) & sub.op == T], model = T))
data.table::fwrite(broom::tidy(post.op.covid.model.sub[[1]], exponentiate= T, conf.int = T), file = here::here("output","postopcovidmodelsub.csv"))
adjusted.cuminc.sub <- data.table::as.data.table(foreach::foreach(predi = 1:length(covariates), .combine = 'rbind', .inorder = T) %do% {
newdata.rows <- length(unique(dt.tv[!is.na(get(covariates[predi])) ,get(covariates[predi])]))
newdata.pred <- data.table::data.table('start' = rep(0,newdata.rows),
'end' = rep(30,newdata.rows),
'event' = rep(F,newdata.rows),
'patient_id' = 1:newdata.rows,
'Colectomy' = c(rep(T,newdata.rows)),
'Cholecystectomy'=c(rep(F,newdata.rows)),
'HipReplacement'=c(rep(F,newdata.rows)),
'KneeReplacement'=c(rep(F,newdata.rows)),
'age.cat' = rep('(50,70]',newdata.rows),
'sex' = rep('F',newdata.rows),
'bmi.cat' = rep(levels(dt.tv$bmi.cat)[2],newdata.rows),
'imd5' = rep(levels(dt.tv$imd5)[3], newdata.rows),
'wave' = rep(paste0('Wave_',3),times = newdata.rows),
'vaccination.status.factor' = rep('3',newdata.rows),
'region' = rep("East Midlands",newdata.rows),
'Current.Cancer' = rep(T,newdata.rows),
'LOS.bin' = rep(F,newdata.rows),
'Emergency' = rep(F,newdata.rows),
'Charl12' = rep('Single',newdata.rows),
'recentCOVID' = rep(F,newdata.rows),
'previousCOVID' = rep(F,newdata.rows)
)
if ( predi <= length(procedures.sub)) {
newdata.pred[,(procedures.sub) := F]
newdata.pred[,(procedures.sub[predi]) := c(F,T)]
} else {
# newdata.pred <- data.table::data.table('start' = rep(0,newdata.rows),
# 'end' = rep(30,newdata.rows),
# 'event' = rep(F,newdata.rows),
# 'patient_id' = 1:newdata.rows)
# if ( predi > length(procedures.sub)) {
# newdata.pred[,(procedures.sub) := lapply(procedures.sub, function(x) x == procedures.sub[which.max(dt.tv[,lapply(.SD,sum,na.rm = T), .SDcols = c(procedures.sub)])])] } else {
# newdata.pred[,(procedures.sub) := lapply(procedures.sub, function(x) x == covariates[predi] & patient_id > 1)]
# }
# newdata.pred[,(covariates[-c(1:length(procedures.sub))]) := lapply(((length(procedures.sub)+1):length(covariates)), function(i.c) {
# if(is.factor(dt.tv[!is.na(get(covariates[i.c])),get(covariates[i.c])])) {
# as.character(rep(max.category(i.c),newdata.rows))
# } else if(is.logical(dt.tv[!is.na(get(covariates[i.c])),get(covariates[i.c])])) {
# as.logical(rep(max.category(i.c),newdata.rows))
# } else if(is.numeric(dt.tv[!is.na(get(covariates[i.c])),get(covariates[i.c])])) {
# is.numeric(rep(max.category(i.c),newdata.rows))
# } else {
# rep(max.category(i.c),newdata.rows)
# }
# })]
# #names(newdata.pred) <- c('start','end','event', covariates,'patient_id')
if(is.factor(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])])) {
newdata.pred[,(covariates[predi]) := as.character(sort(unique(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])], na.rm = T)))]
} else if(is.logical(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])])) {
newdata.pred[,(covariates[predi]) := as.logical(sort(unique(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])], na.rm = T)))]
} else if(is.numeric(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])])) {
newdata.pred[,(covariates[predi]) := is.numeric(sort(unique(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])], na.rm = T)))]
} else {
newdata.pred[,(covariates[predi]) := sort(unique(dt.tv[!is.na(get(covariates[predi])),get(covariates[predi])], na.rm = T))]
}
}
# samples <- foreach::foreach(i = 1:1000, .combine = cbind, .multicombine = T, .inorder = F, .verbose = F,
# .packages = c('data.table','survival'),
# .export = c('n.type.events','dt.tv', 'post.op.covid.model.sub','newdata.pred')) %dopar% {
# cuminc.cox(n.type.events = n.type.events,
# dt = 'dt.tv[patient_id %in% sample(unique(patient_id), replace = T) & (postop.covid.cohort) & sub.op == T]',
# model = 'post.op.covid.model.sub',
# newdata = 'newdata.pred',
# day = 30)}
# t.samples <- t(apply(samples,1,quantile,c(0.25,0.5,0.75)))
# boot.IQR <-apply(t.samples,1,function(x) paste0(x[2],' (',x[1],',',x[3],')'))
death.risk.30day <- predict(object = post.op.covid.model.sub[[3]],
newdata = newdata.pred,, type = 'expected',se.fit = T)
readmit.risk.30day <- predict(object = post.op.covid.model.sub[[2]],
newdata = newdata.pred,, type = 'expected',se.fit = T)
covid.risk.30day <- predict(object = post.op.covid.model.sub[[1]],
newdata = newdata.pred,, type = 'expected',se.fit = T)
cbind(matrix(paste0(round((1- exp(-covid.risk.30day$fit))*100,3),
' (', round((1 - exp(-(covid.risk.30day$fit - 1.96*covid.risk.30day$se.fit)))*100,3),',',
round((1 - exp(-(covid.risk.30day$fit + 1.96*covid.risk.30day$se.fit)))*100,3),')'),nrow =newdata.rows),
cuminc.cox(n.type.events = n.type.events,
dt = 'dt.tv[(postop.covid.cohort) & sub.op == T]',
model = 'post.op.covid.model.sub',
newdata = 'newdata.pred',
day = 30),
matrix(paste0(round((1- exp(-readmit.risk.30day$fit))*100,3),
' (', round((1 - exp(-(readmit.risk.30day$fit - 1.96*readmit.risk.30day$se.fit)))*100,3),',',
round((1 - exp(-(readmit.risk.30day$fit + 1.96*readmit.risk.30day$se.fit)))*100,3),')'),nrow =newdata.rows),
matrix(paste0(round((1- exp(-death.risk.30day$fit))*100,3),
' (', round((1 - exp(-(death.risk.30day$fit - 1.96*death.risk.30day$se.fit)))*100,3),',',
round((1 - exp(-(death.risk.30day$fit + 1.96*death.risk.30day$se.fit)))*100,3),')'),nrow =newdata.rows)
)
})
save(cuminc.adjusted.waves.sub,post.op.covid.model.sub,adjusted.cuminc.sub, file = here::here("output","postopcovid_adjusted_sub.RData"))
# Take out baseline no procedures.sub groups that are not observeds
data.table::fwrite(adjusted.cuminc.sub, file = here::here("output","postopcovid_adjusted_sub.csv"))
|
### plot temperature data from Panama and Bermuda
## created by Nyssa Silbiger
## edited on 11/30/2018
#############
library(tidyverse)
library(lubridate)
library(sf)
library(gridExtra)
library(grid)
# Panama temperature
PData<-read.csv('Data/TemperatureData/PanamaTemperature.csv')
# remove the missing data
PData<-PData[PData$chk_note=='good',]
# make dates
PData$datetime<-parse_date_time(PData$datetime, c('dmy_hms','dmy_hm')) # they are in multiple formats
PData$date<-dmy(PData$date)
# Bermuda Data
BData2016<-read.table('Data/TemperatureData/bepb6h2016.txt')
colnames(BData2016)<-c('YY', 'MM', 'DD', 'hh', 'mm', 'WDIR', 'WSPD', 'GST', 'WVHT',
'DPD', 'APD', 'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE')
BData2017<-read.table('Data/TemperatureData/bepb6h2017.txt')
colnames(BData2017)<-c('YY', 'MM', 'DD', 'hh', 'mm', 'WDIR', 'WSPD', 'GST', 'WVHT',
'DPD', 'APD', 'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE')
BData<-rbind(BData2016,BData2017)
# make dates
BData$Date.Time<-mdy_hm(paste(BData$MM, BData$DD, BData$YY, BData$hh, ':', BData$mm))
# remove the bad temp data
bad<-which(BData$WTMP==999)
BData<-BData[-bad,]
png('Output/MSFigures/TempTimeSeries.png', width = 5, height = 4.5, units = 'in', res = 300)
par(mar=c(4.1,4.1,3.1,2.1))
plot(PData$datetime[PData$date>"2016-01-01" & PData$date < "2017-12-31" ],
PData$wt[PData$date>"2016-01-01"& PData$date < "2017-12-31" ],
type = 'l', col = 'tomato', xlab = "", ylab = 'Temperature'~degree~C,
ylim = c(min(BData$WTMP, na.rm=TRUE), max(PData$wt, na.rm=TRUE)))
lines(BData$Date.Time, BData$WTMP, col = 'skyblue')
legend('bottomright',c('Panama','Bermuda'),
lty=1, col = c('tomato','skyblue'), bty = 'n')
dev.off()
PData<-PData %>%
filter(date>"2016-01-01"& date < "2017-12-31")
PData$MM<-month(PData$datetime, label = FALSE)
PData$Y<-year(PData$datetime)
## Make maps for Figure 2
map.world <- map_data("world")
Atlantic<-map.world %>%
filter(region == c('Bermuda','Canada', 'USA', 'Mexico',
'Guatemala','Hondurus', 'El Salvador','Belize',
'Panama', 'Columbia'))
# make some maps
# shapefiles from http://datapages.com/gis-map-publishing-program/gis-open-files/global-framework/global-heat-flow-database/shapefiles-list
# http://www.diva-gis.org/gdata
aoi_boundary_Bermuda <- st_read(
"Data/Mapping/BMU_adm/BMU_adm0.shp")
aoi_boundary_Panama <- st_read(
"Data/Mapping/PAN_adm/PAN_adm0.shp")
# plot of Bermuda
berm<-ggplot() +
geom_sf(data = aoi_boundary_Bermuda, size = 2, color = "black", fill = "lightblue") +
ggtitle("Bermuda") +
coord_sf()+
theme_light()
ggsave('Output/MSFigures/berm_map.png', berm, device = 'png', width = 5, height = 5)
pan<-ggplot() +
geom_sf(data = aoi_boundary_Panama, size = 2, color = "black", fill = "tomato") +
ggtitle("Panama") +
coord_sf()+
theme_light()
ggsave('Output/MSFigures/pan_map.png', pan, device = 'png', width = 5, height = 5)
NAO<-ggplot() +
geom_polygon(data = map.world, aes(x = long, y = lat, group = group))+
coord_sf(xlim = c(-100,0), ylim = c(0,50))+
xlab("")+
ylab("")+
theme_light()
ggsave('Output/MSFigures/NAO_map.pdf', NAO, device = 'pdf', width = 10, height = 5)
# calculate average yearly max for year site
BData %>%
group_by(YY) %>%
summarise(max = max(WTMP, na.rm=T)) %>% # take yearly max
summarise(mean = mean(max)) # take average max
PData %>%
group_by(Y) %>%
summarise(max = max(wt, na.rm=T))%>% # take yearly max
summarise(mean = mean(max)) # take average max
|
/Scripts/TemperatureScript.R
|
no_license
|
njsilbiger/Comparative_thermal_tolerance_of_Ofranski
|
R
| false
| false
| 3,606
|
r
|
### plot temperature data from Panama and Bermuda
## created by Nyssa Silbiger
## edited on 11/30/2018
#############
library(tidyverse)
library(lubridate)
library(sf)
library(gridExtra)
library(grid)
# Panama temperature
PData<-read.csv('Data/TemperatureData/PanamaTemperature.csv')
# remove the missing data
PData<-PData[PData$chk_note=='good',]
# make dates
PData$datetime<-parse_date_time(PData$datetime, c('dmy_hms','dmy_hm')) # they are in multiple formats
PData$date<-dmy(PData$date)
# Bermuda Data
BData2016<-read.table('Data/TemperatureData/bepb6h2016.txt')
colnames(BData2016)<-c('YY', 'MM', 'DD', 'hh', 'mm', 'WDIR', 'WSPD', 'GST', 'WVHT',
'DPD', 'APD', 'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE')
BData2017<-read.table('Data/TemperatureData/bepb6h2017.txt')
colnames(BData2017)<-c('YY', 'MM', 'DD', 'hh', 'mm', 'WDIR', 'WSPD', 'GST', 'WVHT',
'DPD', 'APD', 'MWD', 'PRES', 'ATMP', 'WTMP', 'DEWP', 'VIS', 'TIDE')
BData<-rbind(BData2016,BData2017)
# make dates
BData$Date.Time<-mdy_hm(paste(BData$MM, BData$DD, BData$YY, BData$hh, ':', BData$mm))
# remove the bad temp data
bad<-which(BData$WTMP==999)
BData<-BData[-bad,]
png('Output/MSFigures/TempTimeSeries.png', width = 5, height = 4.5, units = 'in', res = 300)
par(mar=c(4.1,4.1,3.1,2.1))
plot(PData$datetime[PData$date>"2016-01-01" & PData$date < "2017-12-31" ],
PData$wt[PData$date>"2016-01-01"& PData$date < "2017-12-31" ],
type = 'l', col = 'tomato', xlab = "", ylab = 'Temperature'~degree~C,
ylim = c(min(BData$WTMP, na.rm=TRUE), max(PData$wt, na.rm=TRUE)))
lines(BData$Date.Time, BData$WTMP, col = 'skyblue')
legend('bottomright',c('Panama','Bermuda'),
lty=1, col = c('tomato','skyblue'), bty = 'n')
dev.off()
PData<-PData %>%
filter(date>"2016-01-01"& date < "2017-12-31")
PData$MM<-month(PData$datetime, label = FALSE)
PData$Y<-year(PData$datetime)
## Make maps for Figure 2
map.world <- map_data("world")
Atlantic<-map.world %>%
filter(region == c('Bermuda','Canada', 'USA', 'Mexico',
'Guatemala','Hondurus', 'El Salvador','Belize',
'Panama', 'Columbia'))
# make some maps
# shapefiles from http://datapages.com/gis-map-publishing-program/gis-open-files/global-framework/global-heat-flow-database/shapefiles-list
# http://www.diva-gis.org/gdata
aoi_boundary_Bermuda <- st_read(
"Data/Mapping/BMU_adm/BMU_adm0.shp")
aoi_boundary_Panama <- st_read(
"Data/Mapping/PAN_adm/PAN_adm0.shp")
# plot of Bermuda
berm<-ggplot() +
geom_sf(data = aoi_boundary_Bermuda, size = 2, color = "black", fill = "lightblue") +
ggtitle("Bermuda") +
coord_sf()+
theme_light()
ggsave('Output/MSFigures/berm_map.png', berm, device = 'png', width = 5, height = 5)
pan<-ggplot() +
geom_sf(data = aoi_boundary_Panama, size = 2, color = "black", fill = "tomato") +
ggtitle("Panama") +
coord_sf()+
theme_light()
ggsave('Output/MSFigures/pan_map.png', pan, device = 'png', width = 5, height = 5)
NAO<-ggplot() +
geom_polygon(data = map.world, aes(x = long, y = lat, group = group))+
coord_sf(xlim = c(-100,0), ylim = c(0,50))+
xlab("")+
ylab("")+
theme_light()
ggsave('Output/MSFigures/NAO_map.pdf', NAO, device = 'pdf', width = 10, height = 5)
# calculate average yearly max for year site
BData %>%
group_by(YY) %>%
summarise(max = max(WTMP, na.rm=T)) %>% # take yearly max
summarise(mean = mean(max)) # take average max
PData %>%
group_by(Y) %>%
summarise(max = max(wt, na.rm=T))%>% # take yearly max
summarise(mean = mean(max)) # take average max
|
.email_to <- new.env(parent = emptyenv())
#' @export
#' @rdname errors
on_error_email_to <- function(recipient) {
.email_to$email <- recipient
invisible(recipient)
}
#' @export
#' @rdname errors
email_to <- function() {
.email_to$email
}
#' @export
#' @rdname errors
gmail_id <- function() {
Sys.getenv("GMAIL_ID")
}
#' @export
#' @rdname errors
gmail_secret <- function() {
Sys.getenv("GMAIL_SECRET")
}
#' @export
#' @rdname errors
gmail_email <- function() {
gmail("ser.twitteracct")
}
build_error_html <- function(.error) {
paste("<h2>Error in SER code:", Sys.time(), "</h2> \n", .error)
}
#' Email errors in code
#'
#' `email_on_error()` will email on error. Use it with `action_safely()` to wrap
#' a function using the emailer. Set the email recipient globally with
#' `on_error_email_to()` and retrieve it with `email_to()`.
#'
#' @param .e the error message
#' @param recipient an email address.
#' @param .f a function to wrap in the email error catching function
#'
#' @return a character vector containing the error email
#' @export
#'
#' @rdname errors
email_on_error <- function(.e, recipient = email_to(), .msg = NULL) {
authorize_gmailr()
email_msg <- build_error_html(.e)
gmailr::mime() %>%
gmailr::to(recipient) %>%
gmailr::from("ser.twitteracct@gmail.com") %>%
gmailr::subject(paste("Error in SER code:", Sys.time(), .msg)) %>%
gmailr::html_body(email_msg) %>%
gmailr::send_message()
.e
}
#' @export
#' @rdname errors
action_safely <- function(.f, .msg = NULL) {
function(...) {
tryCatch(
.f(...),
error = purrr::partial(email_on_error, .msg = .msg)
)
}
}
|
/R/error_email.R
|
permissive
|
SERTwitter/ser
|
R
| false
| false
| 1,651
|
r
|
.email_to <- new.env(parent = emptyenv())
#' @export
#' @rdname errors
on_error_email_to <- function(recipient) {
.email_to$email <- recipient
invisible(recipient)
}
#' @export
#' @rdname errors
email_to <- function() {
.email_to$email
}
#' @export
#' @rdname errors
gmail_id <- function() {
Sys.getenv("GMAIL_ID")
}
#' @export
#' @rdname errors
gmail_secret <- function() {
Sys.getenv("GMAIL_SECRET")
}
#' @export
#' @rdname errors
gmail_email <- function() {
gmail("ser.twitteracct")
}
build_error_html <- function(.error) {
paste("<h2>Error in SER code:", Sys.time(), "</h2> \n", .error)
}
#' Email errors in code
#'
#' `email_on_error()` will email on error. Use it with `action_safely()` to wrap
#' a function using the emailer. Set the email recipient globally with
#' `on_error_email_to()` and retrieve it with `email_to()`.
#'
#' @param .e the error message
#' @param recipient an email address.
#' @param .f a function to wrap in the email error catching function
#'
#' @return a character vector containing the error email
#' @export
#'
#' @rdname errors
email_on_error <- function(.e, recipient = email_to(), .msg = NULL) {
authorize_gmailr()
email_msg <- build_error_html(.e)
gmailr::mime() %>%
gmailr::to(recipient) %>%
gmailr::from("ser.twitteracct@gmail.com") %>%
gmailr::subject(paste("Error in SER code:", Sys.time(), .msg)) %>%
gmailr::html_body(email_msg) %>%
gmailr::send_message()
.e
}
#' @export
#' @rdname errors
action_safely <- function(.f, .msg = NULL) {
function(...) {
tryCatch(
.f(...),
error = purrr::partial(email_on_error, .msg = .msg)
)
}
}
|
#Read Data
x<-read.table('household_power_consumption.txt',header=TRUE,sep=';',na.strings='?')
x<-x[x$Date=='1/2/2007' | x$Date=='2/2/2007',]
x$Date_time <-strptime(paste(x$Date , x$Time) , "%d/%m/%Y %H:%M:%S")
#plot Req graph
png(filename='plot2.png', units='px', height=480,width=480)
plot(x$Date_time,x$Global_active_power,ylab='Global Active Power (kilowatts)', xlab='' , type="l")
dev.off()
|
/plot2.R
|
no_license
|
ahmedshawky1/ExData_Plotting1
|
R
| false
| false
| 411
|
r
|
#Read Data
x<-read.table('household_power_consumption.txt',header=TRUE,sep=';',na.strings='?')
x<-x[x$Date=='1/2/2007' | x$Date=='2/2/2007',]
x$Date_time <-strptime(paste(x$Date , x$Time) , "%d/%m/%Y %H:%M:%S")
#plot Req graph
png(filename='plot2.png', units='px', height=480,width=480)
plot(x$Date_time,x$Global_active_power,ylab='Global Active Power (kilowatts)', xlab='' , type="l")
dev.off()
|
library(groupedstats)
### Name: grouped_robustslr
### Title: Function to run robust simple linear regression (slr) on
### multiple variables across multiple grouping variables.
### Aliases: grouped_robustslr
### ** Examples
# in case of just one grouping variable
groupedstats::grouped_robustslr(
data = iris,
dep.vars = c(Sepal.Length, Petal.Length),
indep.vars = c(Sepal.Width, Petal.Width),
grouping.vars = Species
)
|
/data/genthat_extracted_code/groupedstats/examples/grouped_robustslr.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 438
|
r
|
library(groupedstats)
### Name: grouped_robustslr
### Title: Function to run robust simple linear regression (slr) on
### multiple variables across multiple grouping variables.
### Aliases: grouped_robustslr
### ** Examples
# in case of just one grouping variable
groupedstats::grouped_robustslr(
data = iris,
dep.vars = c(Sepal.Length, Petal.Length),
indep.vars = c(Sepal.Width, Petal.Width),
grouping.vars = Species
)
|
if(!require(dplyr)) {install.packages("dplyr"); library(dplyr)}
if(!require(tm)) {install.packages("tm"); library(tm)}
source("read-in-data.R")
#look at ballot designations
#figure out the most successful full designations
designation_winners <- count(filter(all_data, Won), Ballot.Designation)
designation_losers <- count(filter(all_data, !Won), Ballot.Designation)
designation_success <- full_join(designation_winners, designation_losers, by = "Ballot.Designation")
designation_success[is.na(designation_success)] <- 0
designation_success <- transmute(designation_success, Ballot.Designation,
Tries = n.x+n.y,
Success.Rate = n.x/Tries) %>%
filter(Tries > 1) %>%
arrange(desc(Success.Rate),desc(Tries))
#figure out the most successful terms in designations
all_data$Designation.Cleaned <- gsub("/|,", " ", all_data$Ballot.Designation)
term_winners <- Corpus(VectorSource(select(filter(all_data, Won), Designation.Cleaned))) %>%
TermDocumentMatrix() %>%
as.matrix() %>%
data.frame()
term_winners <- cbind(rownames(term_winners), term_winners)
colnames(term_winners) <- c("Term", "n")
term_losers <- Corpus(VectorSource(select(filter(all_data, !Won), Designation.Cleaned))) %>%
TermDocumentMatrix() %>%
as.matrix() %>%
data.frame()
term_losers <- cbind(rownames(term_losers), term_losers)
colnames(term_losers) <- c("Term", "n")
term_success <- full_join(term_winners, term_losers, by = "Term")
term_success[is.na(term_success)] <- 0
term_success <- transmute(term_success, Term,
Tries = n.x+n.y,
Success.Rate = n.x/Tries) %>%
filter(Tries > 1) %>%
arrange(desc(Success.Rate),desc(Tries))
term_success$Success.Rate <- percent(term_success$Success.Rate)
|
/explore-ballot-designation.R
|
no_license
|
ccowens/Recent-School-Board-Elections-in-WCCUSD
|
R
| false
| false
| 1,813
|
r
|
if(!require(dplyr)) {install.packages("dplyr"); library(dplyr)}
if(!require(tm)) {install.packages("tm"); library(tm)}
source("read-in-data.R")
#look at ballot designations
#figure out the most successful full designations
designation_winners <- count(filter(all_data, Won), Ballot.Designation)
designation_losers <- count(filter(all_data, !Won), Ballot.Designation)
designation_success <- full_join(designation_winners, designation_losers, by = "Ballot.Designation")
designation_success[is.na(designation_success)] <- 0
designation_success <- transmute(designation_success, Ballot.Designation,
Tries = n.x+n.y,
Success.Rate = n.x/Tries) %>%
filter(Tries > 1) %>%
arrange(desc(Success.Rate),desc(Tries))
#figure out the most successful terms in designations
all_data$Designation.Cleaned <- gsub("/|,", " ", all_data$Ballot.Designation)
term_winners <- Corpus(VectorSource(select(filter(all_data, Won), Designation.Cleaned))) %>%
TermDocumentMatrix() %>%
as.matrix() %>%
data.frame()
term_winners <- cbind(rownames(term_winners), term_winners)
colnames(term_winners) <- c("Term", "n")
term_losers <- Corpus(VectorSource(select(filter(all_data, !Won), Designation.Cleaned))) %>%
TermDocumentMatrix() %>%
as.matrix() %>%
data.frame()
term_losers <- cbind(rownames(term_losers), term_losers)
colnames(term_losers) <- c("Term", "n")
term_success <- full_join(term_winners, term_losers, by = "Term")
term_success[is.na(term_success)] <- 0
term_success <- transmute(term_success, Term,
Tries = n.x+n.y,
Success.Rate = n.x/Tries) %>%
filter(Tries > 1) %>%
arrange(desc(Success.Rate),desc(Tries))
term_success$Success.Rate <- percent(term_success$Success.Rate)
|
# Script used to add observed catch and observed biomass to species params
# data frame `nsParams`
library(mizerHowTo)
time_averaged_catches <- readr::read_csv("data-raw/time-averaged-catches.csv")
time_averaged_SSB <- readr::read_csv("data-raw/time-averaged-SSB.csv")
library(dplyr)
nsParams <- nsParams %>%
left_join(time_averaged_catches, by = "species") %>%
left_join(time_averaged_SSB, by = "species") %>%
rename(catch_observed = Catch_1419_tonnes,
biomass_observed = SSB_1419)
nsParams$cutoff_size <- sp$w_mat
usethis::use_data(nsParams, overwrite = TRUE)
|
/data-raw/add_observations.R
|
no_license
|
abesolberg/mizerHowTo
|
R
| false
| false
| 579
|
r
|
# Script used to add observed catch and observed biomass to species params
# data frame `nsParams`
library(mizerHowTo)
time_averaged_catches <- readr::read_csv("data-raw/time-averaged-catches.csv")
time_averaged_SSB <- readr::read_csv("data-raw/time-averaged-SSB.csv")
library(dplyr)
nsParams <- nsParams %>%
left_join(time_averaged_catches, by = "species") %>%
left_join(time_averaged_SSB, by = "species") %>%
rename(catch_observed = Catch_1419_tonnes,
biomass_observed = SSB_1419)
nsParams$cutoff_size <- sp$w_mat
usethis::use_data(nsParams, overwrite = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_de_cluster.R
\name{sc_de_cluster}
\alias{sc_de_cluster}
\title{Find differentially expressed genes across clusters}
\usage{
sc_de_cluster(
sce_list,
cluster_resolution,
main_result_path,
supplemental_result_path,
cache_path,
result_name
)
}
\arguments{
\item{sce_list}{list of SingleCellExperiment objects}
\item{cluster_resolution}{cluster resolution to use}
\item{main_result_path}{path for storing main results}
\item{supplemental_result_path}{path for storing supplemental results}
\item{cache_path}{path for caching results}
\item{result_name}{unique result name used in caching}
}
\value{
}
\description{
presto::wilcoxauc is used for differential expression on each individual cluster, and metap::minimump is used to combine p-values across samples (similar to FindConservedMarkers in Seurat).
}
|
/man/sc_de_cluster.Rd
|
no_license
|
keshav-motwani/tregPaper
|
R
| false
| true
| 902
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_de_cluster.R
\name{sc_de_cluster}
\alias{sc_de_cluster}
\title{Find differentially expressed genes across clusters}
\usage{
sc_de_cluster(
sce_list,
cluster_resolution,
main_result_path,
supplemental_result_path,
cache_path,
result_name
)
}
\arguments{
\item{sce_list}{list of SingleCellExperiment objects}
\item{cluster_resolution}{cluster resolution to use}
\item{main_result_path}{path for storing main results}
\item{supplemental_result_path}{path for storing supplemental results}
\item{cache_path}{path for caching results}
\item{result_name}{unique result name used in caching}
}
\value{
}
\description{
presto::wilcoxauc is used for differential expression on each individual cluster, and metap::minimump is used to combine p-values across samples (similar to FindConservedMarkers in Seurat).
}
|
# Multivariate analysis against the target variable
# Will be using Density Graphs to do this analysis for cont, variable as it is lucid and easily comprehendable
# will use histograms to do analysis for categorical var.
################### A) Employee Background #####################
#Age
master %>%
ggplot(aes(x = Age, fill = Attrition)) +
geom_density(alpha = 0.5) +
ggtitle("Attrition with Age")
# Younger employees upto 30 years have a higher attrition rate
#Gender
master %>% group_by(Gender) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = Gender, fill = Attrition)) +
geom_bar(col = "black", position = "fill") +
ggtitle("Attrition with Gender")
# Males have a slightly higher attrition rate than females
#MaritalStatus
master %>% group_by(MaritalStatus) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = MaritalStatus, fill = Attrition)) +
geom_bar(col = "black", position = "fill") +
ggtitle("Attrition with MaritalStatus")
# Singles have a high attrition rate.
# They are more likely to leave probably because they do not have spouse or kids to worry about, hence less risky to leave
#Education
master %>% group_by(Education) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = Education, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with Education")
# almost 19% attrition rate for employees who have studied till +2
# Probably have left for higher studies
# Maximum attrition for employees who have completed Bachelor's
# Either for higher studies or for better job opportunities
#education vs income
master %>% group_by(Education) %>%
summarise(avg = median(MonthlyIncome))
#Surprisingly, the difference is not very significant
#EducationField
master %>% group_by(EducationField) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = EducationField, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with EducationField") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Attrition Rate is almost 50% for employees who have studied Human Resources
# Life Sciences and Medical also have high attrition
# Is this because HR employees have low salary
master %>% group_by(EducationField, Attrition) %>%
summarise(avg_income = median(MonthlyIncome))
# No
#Employees who have studied HR, do they have the same job role?
HR_edu <- master[which(master$EducationField == "human resources"),]
#No, Employees who have studied HR, belong to the same Department, but their job role is not that of HR.
#This maybe the reason for attrition as these employees did not work in fields they specialize in
#TotalWorkingYears
master %>%
ggplot(aes(x = TotalWorkingYears, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with TotalWorkingYears")
# Higher Attrition for employees whose Total working years is less than 6-7
# This is because it gets riskier with age to quit a job due to reasons such as family
#NumCompaniesWorked
master %>%
ggplot(aes(x = NumCompaniesWorked, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with NumCompaniesWorked")
# attrition tends to be higher for those employees who have worked in 5-7 companies
###################### B) Position and Experience #####################
#Department
master %>% group_by(Department) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = Department, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with Department") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# Here again, Atrrition Rate is highest for employees working in the HR department
#JobRole
master %>% group_by(JobRole) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = JobRole, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with JobRole") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# Attrition is high for Research Scientist and Sales Executives
# Highest attrition rates are observed for Research Director
master %>% group_by(JobRole) %>%
summarize(avg = median(MonthlyIncome))
# median Salary is almost same for all job roles, thus,does not seem to be a factor
#checking with job satisfaction
master %>%
ggplot(aes(x = JobRole, fill = as.factor(JobSatisfaction))) +
geom_bar(col = "black") +
ggtitle("Attrition with JobRole") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# yes, tese are the job roles with the highest number of "Low Satisfaction" employees
# JobLevel
master %>% group_by(JobLevel) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = JobLevel, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with JobLevel")
# Attrition is high for entry or junior/associate level jobs
# maybe due to low job security or in search better job opportunities/ higher studies
#YearsWithCurrManager
master %>%
ggplot(aes(x = YearsWithCurrManager, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with YearsWithCurrManager")
#Attrition is high for employees if he/she is with the same manager for less than 1.5 years or 5-6 years.
# YearsAtCompany
master %>%
ggplot(aes(x = YearsAtCompany, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with YearsAtCompany")
# Attrition is very high for employees who have been in the company for less than 2-3 years
############################## C) Payment/Salary ####################
# MonthlyIncome
master %>%
ggplot(aes(x = MonthlyIncome, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with Monthly income") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# Monthly income has not made much of a difference in the attrition
# However employees with salary between 10,000-20,000 are more likely to resign
#PercentSalaryHike
master %>%
ggplot(aes(x = PercentSalaryHike, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with PercentSalaryHike")
# Almost the same
#StockOptionLevel
master %>% group_by(StockOptionLevel) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = StockOptionLevel, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with StockOptionLevel")
# high attrition for employees having Stock option level of 1 and 2
# Less hassle for those Employees who do not own a stock in the company to resign
##################### D) Travel and Work Time #########################
#Distance From Home
master %>%
ggplot(aes(x = DistanceFromHome, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with Distance From Home")
#Attrition rate sees a slight increase between 10-20 kms
#BusinessTravel
master %>% group_by(BusinessTravel) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = BusinessTravel, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with BusinessTravel") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# employees who travel frequently have a much higher attrition rate
#Overtime
master %>% group_by(Overtime) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = Overtime, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with Overtime")
# Attrition rate is higher for employees working overtime
#NumOvertimeDays
master %>%
ggplot(aes(x = NumOvertimeDays, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with Num of OvertimeDays")
# Employees who have worked overtime more than 150 days have very high attrition rate
# as compared to employees who have worked overtime for less than 150 days
#Average_HoursWorked
master %>%
ggplot(aes(x = Average_HoursWorked, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with Average_HoursWorked")
# Attrition is very high for employees who work more than 8 hours a day
#LeaveTaken
master %>%
ggplot(aes(x = LeaveTaken, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with LeaveTaken")
# attrition is high for those employees who have taken leave for 6-8 days and 14-16 days
################## E) Employee Satisfaction ##########################
# EnvironmentSatisfaction
master %>% group_by(EnvironmentSatisfaction) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = EnvironmentSatisfaction, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with EnvironmentSatisfaction")
# Attrition is higher for those employees who have low Environment Satisfaction
#JobSatisfaction
master %>% group_by(JobSatisfaction) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = JobSatisfaction, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with JobSatisfaction")
# Attrition is higher for those employees who have low Job satisfaction too
#WorkLifeBalance
master %>% group_by(WorkLifeBalance) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = WorkLifeBalance, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with WorkLifeBalance")
# Again, Attrition is higher for those employees who have low work life balance
################## F) Employee Performance ##########################
# JobInvolvement
master %>% group_by(JobInvolvement) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = JobInvolvement, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with JobInvolvement")
#Again, as obvious, employees with low involvement in the job are more likely to resign
# PerformanceRating
master %>% group_by(PerformanceRating) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = PerformanceRating, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with PerformanceRating") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# employees with higher performance rating have higher attrition rates
# EmployeeRating
master %>%
ggplot(aes(x = EmployeeRating, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with EmployeeRating")
# attrition is very high for employees whose rating is below 13.
# after 13 it decreases gradually
################## G) Employee Develpoment ##########################
# YearsSinceLastPromotion
master %>%
ggplot(aes(x = YearsSinceLastPromotion , fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with YearsSinceLastPromotion ")
# employees with 0-1 and 6-7 years since last promotion have higher attrition rates
# TrainingTimesLastYear
master %>% group_by(TrainingTimesLastYear) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = TrainingTimesLastYear, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with TrainingTimesLastYear")
# employees who have received more than 5 trainings in the last year have very low attrition rate
rm(list = ls()[! ls() %in% c("master","master1")])
dev.off()
#clear console
############### Please go over to "Model_Building.R"###############
|
/4_Bivariate Analysis.R
|
no_license
|
siddata01/HR-Analytics-Employee-Attrition-
|
R
| false
| false
| 12,338
|
r
|
# Multivariate analysis against the target variable
# Will be using Density Graphs to do this analysis for cont, variable as it is lucid and easily comprehendable
# will use histograms to do analysis for categorical var.
################### A) Employee Background #####################
#Age
master %>%
ggplot(aes(x = Age, fill = Attrition)) +
geom_density(alpha = 0.5) +
ggtitle("Attrition with Age")
# Younger employees upto 30 years have a higher attrition rate
#Gender
master %>% group_by(Gender) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = Gender, fill = Attrition)) +
geom_bar(col = "black", position = "fill") +
ggtitle("Attrition with Gender")
# Males have a slightly higher attrition rate than females
#MaritalStatus
master %>% group_by(MaritalStatus) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = MaritalStatus, fill = Attrition)) +
geom_bar(col = "black", position = "fill") +
ggtitle("Attrition with MaritalStatus")
# Singles have a high attrition rate.
# They are more likely to leave probably because they do not have spouse or kids to worry about, hence less risky to leave
#Education
master %>% group_by(Education) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = Education, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with Education")
# almost 19% attrition rate for employees who have studied till +2
# Probably have left for higher studies
# Maximum attrition for employees who have completed Bachelor's
# Either for higher studies or for better job opportunities
#education vs income
master %>% group_by(Education) %>%
summarise(avg = median(MonthlyIncome))
#Surprisingly, the difference is not very significant
#EducationField
master %>% group_by(EducationField) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = EducationField, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with EducationField") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
#Attrition Rate is almost 50% for employees who have studied Human Resources
# Life Sciences and Medical also have high attrition
# Is this because HR employees have low salary
master %>% group_by(EducationField, Attrition) %>%
summarise(avg_income = median(MonthlyIncome))
# No
#Employees who have studied HR, do they have the same job role?
HR_edu <- master[which(master$EducationField == "human resources"),]
#No, Employees who have studied HR, belong to the same Department, but their job role is not that of HR.
#This maybe the reason for attrition as these employees did not work in fields they specialize in
#TotalWorkingYears
master %>%
ggplot(aes(x = TotalWorkingYears, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with TotalWorkingYears")
# Higher Attrition for employees whose Total working years is less than 6-7
# This is because it gets riskier with age to quit a job due to reasons such as family
#NumCompaniesWorked
master %>%
ggplot(aes(x = NumCompaniesWorked, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with NumCompaniesWorked")
# attrition tends to be higher for those employees who have worked in 5-7 companies
###################### B) Position and Experience #####################
#Department
master %>% group_by(Department) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = Department, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with Department") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# Here again, Atrrition Rate is highest for employees working in the HR department
#JobRole
master %>% group_by(JobRole) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = JobRole, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with JobRole") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# Attrition is high for Research Scientist and Sales Executives
# Highest attrition rates are observed for Research Director
master %>% group_by(JobRole) %>%
summarize(avg = median(MonthlyIncome))
# median Salary is almost same for all job roles, thus,does not seem to be a factor
#checking with job satisfaction
master %>%
ggplot(aes(x = JobRole, fill = as.factor(JobSatisfaction))) +
geom_bar(col = "black") +
ggtitle("Attrition with JobRole") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# yes, tese are the job roles with the highest number of "Low Satisfaction" employees
# JobLevel
master %>% group_by(JobLevel) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = JobLevel, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with JobLevel")
# Attrition is high for entry or junior/associate level jobs
# maybe due to low job security or in search better job opportunities/ higher studies
#YearsWithCurrManager
master %>%
ggplot(aes(x = YearsWithCurrManager, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with YearsWithCurrManager")
#Attrition is high for employees if he/she is with the same manager for less than 1.5 years or 5-6 years.
# YearsAtCompany
master %>%
ggplot(aes(x = YearsAtCompany, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with YearsAtCompany")
# Attrition is very high for employees who have been in the company for less than 2-3 years
############################## C) Payment/Salary ####################
# MonthlyIncome
master %>%
ggplot(aes(x = MonthlyIncome, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with Monthly income") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# Monthly income has not made much of a difference in the attrition
# However employees with salary between 10,000-20,000 are more likely to resign
#PercentSalaryHike
master %>%
ggplot(aes(x = PercentSalaryHike, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with PercentSalaryHike")
# Almost the same
#StockOptionLevel
master %>% group_by(StockOptionLevel) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = StockOptionLevel, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with StockOptionLevel")
# high attrition for employees having Stock option level of 1 and 2
# Less hassle for those Employees who do not own a stock in the company to resign
##################### D) Travel and Work Time #########################
#Distance From Home
master %>%
ggplot(aes(x = DistanceFromHome, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with Distance From Home")
#Attrition rate sees a slight increase between 10-20 kms
#BusinessTravel
master %>% group_by(BusinessTravel) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = BusinessTravel, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with BusinessTravel") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# employees who travel frequently have a much higher attrition rate
#Overtime
master %>% group_by(Overtime) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = Overtime, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with Overtime")
# Attrition rate is higher for employees working overtime
#NumOvertimeDays
master %>%
ggplot(aes(x = NumOvertimeDays, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with Num of OvertimeDays")
# Employees who have worked overtime more than 150 days have very high attrition rate
# as compared to employees who have worked overtime for less than 150 days
#Average_HoursWorked
master %>%
ggplot(aes(x = Average_HoursWorked, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with Average_HoursWorked")
# Attrition is very high for employees who work more than 8 hours a day
#LeaveTaken
master %>%
ggplot(aes(x = LeaveTaken, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with LeaveTaken")
# attrition is high for those employees who have taken leave for 6-8 days and 14-16 days
################## E) Employee Satisfaction ##########################
# EnvironmentSatisfaction
master %>% group_by(EnvironmentSatisfaction) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = EnvironmentSatisfaction, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with EnvironmentSatisfaction")
# Attrition is higher for those employees who have low Environment Satisfaction
#JobSatisfaction
master %>% group_by(JobSatisfaction) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = JobSatisfaction, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with JobSatisfaction")
# Attrition is higher for those employees who have low Job satisfaction too
#WorkLifeBalance
master %>% group_by(WorkLifeBalance) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = WorkLifeBalance, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with WorkLifeBalance")
# Again, Attrition is higher for those employees who have low work life balance
################## F) Employee Performance ##########################
# JobInvolvement
master %>% group_by(JobInvolvement) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = JobInvolvement, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with JobInvolvement")
#Again, as obvious, employees with low involvement in the job are more likely to resign
# PerformanceRating
master %>% group_by(PerformanceRating) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = PerformanceRating, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with PerformanceRating") +
theme(axis.text.x = element_text(angle = 30, hjust = 1))
# employees with higher performance rating have higher attrition rates
# EmployeeRating
master %>%
ggplot(aes(x = EmployeeRating, fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with EmployeeRating")
# attrition is very high for employees whose rating is below 13.
# after 13 it decreases gradually
################## G) Employee Develpoment ##########################
# YearsSinceLastPromotion
master %>%
ggplot(aes(x = YearsSinceLastPromotion , fill = Attrition)) +
geom_density(alpha = 0.7) +
ggtitle("Attrition with YearsSinceLastPromotion ")
# employees with 0-1 and 6-7 years since last promotion have higher attrition rates
# TrainingTimesLastYear
master %>% group_by(TrainingTimesLastYear) %>%
summarize(attrition_rate = (sum(ifelse(Attrition == "yes", 1, 0)) / n() * 100))
master %>%
ggplot(aes(x = TrainingTimesLastYear, fill = Attrition)) +
geom_bar(col = "black") +
ggtitle("Attrition with TrainingTimesLastYear")
# employees who have received more than 5 trainings in the last year have very low attrition rate
rm(list = ls()[! ls() %in% c("master","master1")])
dev.off()
#clear console
############### Please go over to "Model_Building.R"###############
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualizeResults.R
\name{plotRt}
\alias{plotRt}
\title{Creates a plot of the effective reproductive number}
\usage{
plotRt(
rData,
includeRtAvg = FALSE,
includeRtCI = FALSE,
includeRtAvgCI = FALSE
)
}
\arguments{
\item{rData}{A list that is the output of \code{\link{estimateR}}. It should contain
the dataframes \code{RtDf}, \code{RtAvgDf}, and vectors \code{timeFrame} and \code{rangeForAvg}}
\item{includeRtAvg}{A logical. If TRUE, a horizontal line will be drawn for the average
Rt value over \code{rangeForAvg} and verticle lines will be drawn at the
\code{rangeForAvg} values.}
\item{includeRtCI}{A logical. If TRUE, error bars will be added to the Rt values
representing the bootstrap confidence intervals.}
\item{includeRtAvgCI}{A logical. If TRUE, horizontal lines will be drawn around the Rt average
line representing the bootstrap confidence interval.}
}
\description{
The function \code{plotRt} creates a plot of the effective reproductive number (Rt) over
the course of the outbreak. Using various options, the plot can include the overall average
Rt value for the outbreak and the confidence intervals.
}
\details{
The main input \code{rData} should be the output of \code{\link{estimateRt}} with the
time-level reproductive numbers, overall average, range used to calculate that average,
and time frame.
The options \code{includeRtCI} and \code{includeRtAvgCI} add confidence interval bounds
to the plot. If set to true, \code{rData} should be from a call of \code{\link{estimateRt}}
with \code{bootSamples > 0} so that confidence intervals are available.
If \code{includeRtAvgCI} is set to \code{TRUE}, a line for the point estimate of the average
Rt value will be drawn even if \code{includeRtAvg} is set to \code{FALSE}.
}
\examples{
## Use the nbResults data frame included in the package which has the results
# of the nbProbabilities() function on a TB-like outbreak.
## Getting initial estimates of the reproductive number
# (ithout specifying nbResults and without confidence intervals)
rInitial <- estimateR(nbResults, dateVar = "infectionDate",
indIDVar = "individualID", pVar = "pScaled",
timeFrame = "months")
## Finding the stable portion of the outbreak for rangeForAvg using the plot
plotRt(rInitial)
cut1 <- 25
cut2 <- 125
## Finding the final reproductive number estimates with confidence intervals
# NOTE should run with bootSamples > 10.
rFinal <- estimateR(nbResults, dateVar = "infectionDate",
indIDVar = "individualID", pVar = "pScaled",
timeFrame = "months", rangeForAvg = c(cut1, cut2),
bootSamples = 10, alpha = 0.05)
## Ploting the final result
plotRt(rFinal, includeRtAvg = TRUE, includeRtCI = TRUE, includeRtAvgCI = TRUE)
}
\seealso{
\code{\link{nbProbabilities}} \code{\link{estimateR}}
}
|
/man/plotRt.Rd
|
no_license
|
cran/nbTransmission
|
R
| false
| true
| 3,001
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualizeResults.R
\name{plotRt}
\alias{plotRt}
\title{Creates a plot of the effective reproductive number}
\usage{
plotRt(
rData,
includeRtAvg = FALSE,
includeRtCI = FALSE,
includeRtAvgCI = FALSE
)
}
\arguments{
\item{rData}{A list that is the output of \code{\link{estimateR}}. It should contain
the dataframes \code{RtDf}, \code{RtAvgDf}, and vectors \code{timeFrame} and \code{rangeForAvg}}
\item{includeRtAvg}{A logical. If TRUE, a horizontal line will be drawn for the average
Rt value over \code{rangeForAvg} and verticle lines will be drawn at the
\code{rangeForAvg} values.}
\item{includeRtCI}{A logical. If TRUE, error bars will be added to the Rt values
representing the bootstrap confidence intervals.}
\item{includeRtAvgCI}{A logical. If TRUE, horizontal lines will be drawn around the Rt average
line representing the bootstrap confidence interval.}
}
\description{
The function \code{plotRt} creates a plot of the effective reproductive number (Rt) over
the course of the outbreak. Using various options, the plot can include the overall average
Rt value for the outbreak and the confidence intervals.
}
\details{
The main input \code{rData} should be the output of \code{\link{estimateRt}} with the
time-level reproductive numbers, overall average, range used to calculate that average,
and time frame.
The options \code{includeRtCI} and \code{includeRtAvgCI} add confidence interval bounds
to the plot. If set to true, \code{rData} should be from a call of \code{\link{estimateRt}}
with \code{bootSamples > 0} so that confidence intervals are available.
If \code{includeRtAvgCI} is set to \code{TRUE}, a line for the point estimate of the average
Rt value will be drawn even if \code{includeRtAvg} is set to \code{FALSE}.
}
\examples{
## Use the nbResults data frame included in the package which has the results
# of the nbProbabilities() function on a TB-like outbreak.
## Getting initial estimates of the reproductive number
# (ithout specifying nbResults and without confidence intervals)
rInitial <- estimateR(nbResults, dateVar = "infectionDate",
indIDVar = "individualID", pVar = "pScaled",
timeFrame = "months")
## Finding the stable portion of the outbreak for rangeForAvg using the plot
plotRt(rInitial)
cut1 <- 25
cut2 <- 125
## Finding the final reproductive number estimates with confidence intervals
# NOTE should run with bootSamples > 10.
rFinal <- estimateR(nbResults, dateVar = "infectionDate",
indIDVar = "individualID", pVar = "pScaled",
timeFrame = "months", rangeForAvg = c(cut1, cut2),
bootSamples = 10, alpha = 0.05)
## Ploting the final result
plotRt(rFinal, includeRtAvg = TRUE, includeRtCI = TRUE, includeRtAvgCI = TRUE)
}
\seealso{
\code{\link{nbProbabilities}} \code{\link{estimateR}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/games_objects.R
\name{InstanceAndroidDetails}
\alias{InstanceAndroidDetails}
\title{InstanceAndroidDetails Object}
\usage{
InstanceAndroidDetails(enablePiracyCheck = NULL, packageName = NULL,
preferred = NULL)
}
\arguments{
\item{enablePiracyCheck}{Flag indicating whether the anti-piracy check is enabled}
\item{packageName}{Android package name which maps to Google Play URL}
\item{preferred}{Indicates that this instance is the default for new installations}
}
\value{
InstanceAndroidDetails object
}
\description{
InstanceAndroidDetails Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
This is a JSON template for the Android instance details resource.
}
|
/googlegamesv1.auto/man/InstanceAndroidDetails.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 780
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/games_objects.R
\name{InstanceAndroidDetails}
\alias{InstanceAndroidDetails}
\title{InstanceAndroidDetails Object}
\usage{
InstanceAndroidDetails(enablePiracyCheck = NULL, packageName = NULL,
preferred = NULL)
}
\arguments{
\item{enablePiracyCheck}{Flag indicating whether the anti-piracy check is enabled}
\item{packageName}{Android package name which maps to Google Play URL}
\item{preferred}{Indicates that this instance is the default for new installations}
}
\value{
InstanceAndroidDetails object
}
\description{
InstanceAndroidDetails Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
This is a JSON template for the Android instance details resource.
}
|
## Script to average and aggregate all CMIP5 model projections
## Relies on data downloaded from the CMIP5 website:
## http://cmip-pcmdi.llnl.gov/cmip5/
## Authors: Andrew Tredennick and Peter Adler
## Email: atredenn@gmail.com
## Date created: 10-10-2013
### Clean the workspace and set working dir
rm(list=ls())
setwd("../../data/climate/PROJECTIONS/CMIP5/")
####
#### Load necessary libraries -------------------------------------------------
####
library(reshape2)
library(grid)
####
#### Read in and formate precip projections -----------------------------------
####
ppt <- as.data.frame(read.csv("pr.csv", header=FALSE))
tmp<-read.table("COLS_pr.txt")
tmp<-as.character(tmp[,1])
colnames(ppt) <- c("Year", "Month",tmp)
ppt <- melt(ppt, id.vars=c("Year", "Month"))
ppt[,4] <- as.numeric(ppt[,4]) #NAs coerced for a couple December 2099 null values
tmp<-unlist(strsplit(x=as.character(ppt$variable),split=".",fixed=T))
tmp<-matrix(tmp,nrow=length(tmp)/3,ncol=3,byrow=T)
colnames(tmp)<-c("model","rep","scenario")
ppt<-cbind(ppt,tmp)
ppt$period<-cut(ppt$Year,breaks=c(1950,2000,2050,2100),
include.lowest=T,labels=c("past","present","future"))
ppt$season<-ifelse(ppt$Month > 6 & ppt$Month < 10,"summer","fall2spr")
pptMeans<-aggregate(value~period+season+scenario+model,data=ppt,FUN=mean)
allmods <- unique(pptMeans$model)
keeps <- character(length(allmods))
my_scens <- c("rcp45", "rcp60", "rcp85")
for(i in 1:length(allmods)){
tmp <- subset(pptMeans, model==allmods[i])
tmp.scns <- unique(tmp$scenario)
flag <- length(which(my_scens %in% tmp.scns == FALSE))
ifelse(flag>0, keeps[i]<-"no", keeps[i]<-"yes")
}
modelkeeps <- data.frame(model=allmods,
allscenarios=keeps)
my_mods <- modelkeeps[which(modelkeeps$allscenarios=="yes"),"model"]
ppt_projs <- subset(pptMeans, model %in% my_mods)
pptMeans<-aggregate(value~period+season+scenario,data=ppt_projs,FUN=mean)
colnames(pptMeans) <- c("period","season","scenario","value")
pptMeans$days<-ifelse(pptMeans$season=="summer",92,365-92)
pptMeans$ppt<-pptMeans$value*pptMeans$days
pptMeans<-reshape(pptMeans[,c("period","season","scenario","ppt")],
idvar=c("season","scenario"),timevar="period",direction="wide")
pptMeans$change<-(pptMeans$ppt.future-pptMeans$ppt.past)/pptMeans$ppt.past
####
#### Read in and formate temperature projections ------------------------------
####
Tavg <- as.data.frame(read.csv("tas.csv", header=FALSE))
tmp<-read.table("COLS_tas.txt")
tmp<-as.character(tmp[,1])
colnames(Tavg) <- c("Year", "Month",tmp)
Tavg <- melt(Tavg, id.vars=c("Year", "Month"))
tmp<-unlist(strsplit(x=as.character(ppt$variable),split=".",fixed=T))
tmp<-matrix(tmp,nrow=length(tmp)/3,ncol=3,byrow=T)
colnames(tmp)<-c("model","rep","scenario")
Tavg<-cbind(Tavg,tmp)
Tavg$period<-cut(Tavg$Year,breaks=c(1950,2000,2050,2100),
include.lowest=T,labels=c("past","present","future"))
Tavg$season<-ifelse(Tavg$Month > 3 & ppt$Month < 7,"spring","other")
TavgMeans<-aggregate(as.numeric(value)~period+season+scenario+model,data=Tavg,FUN=mean)
allmods <- unique(TavgMeans$model)
keeps <- character(length(allmods))
my_scens <- c("rcp45", "rcp60", "rcp85")
for(i in 1:length(allmods)){
tmp <- subset(TavgMeans, model==allmods[i])
tmp.scns <- unique(tmp$scenario)
flag <- length(which(my_scens %in% tmp.scns == FALSE))
ifelse(flag>0, keeps[i]<-"no", keeps[i]<-"yes")
}
modelkeeps <- data.frame(model=allmods,
allscenarios=keeps)
my_mods <- modelkeeps[which(modelkeeps$allscenarios=="yes"),"model"]
temp_projs <- subset(TavgMeans, model %in% my_mods)
colnames(temp_projs)[5] <- "value"
TavgMeans<-aggregate(value~period+season+scenario,data=temp_projs,FUN=mean)
colnames(TavgMeans) <- c("period","season","scenario","value")
TavgMeans<-reshape(TavgMeans[,c("period","season","scenario","value")],
idvar=c("season","scenario"),timevar="period",direction="wide")
TavgMeans$change<-TavgMeans$value.future-TavgMeans$value.past
####
#### Write files for use in simulations ---------------------------------------
####
write.csv(pptMeans, "../../../precipitation_projections.csv")
write.csv(TavgMeans, "../../../temperature_projections.csv")
|
/scripts/climate_formatting/Climate_Proj_Calcs_CMIP5.R
|
no_license
|
georgewoolsey/Tredennick_sageAbundance
|
R
| false
| false
| 4,217
|
r
|
## Script to average and aggregate all CMIP5 model projections
## Relies on data downloaded from the CMIP5 website:
## http://cmip-pcmdi.llnl.gov/cmip5/
## Authors: Andrew Tredennick and Peter Adler
## Email: atredenn@gmail.com
## Date created: 10-10-2013
### Clean the workspace and set working dir
rm(list=ls())
setwd("../../data/climate/PROJECTIONS/CMIP5/")
####
#### Load necessary libraries -------------------------------------------------
####
library(reshape2)
library(grid)
####
#### Read in and formate precip projections -----------------------------------
####
ppt <- as.data.frame(read.csv("pr.csv", header=FALSE))
tmp<-read.table("COLS_pr.txt")
tmp<-as.character(tmp[,1])
colnames(ppt) <- c("Year", "Month",tmp)
ppt <- melt(ppt, id.vars=c("Year", "Month"))
ppt[,4] <- as.numeric(ppt[,4]) #NAs coerced for a couple December 2099 null values
tmp<-unlist(strsplit(x=as.character(ppt$variable),split=".",fixed=T))
tmp<-matrix(tmp,nrow=length(tmp)/3,ncol=3,byrow=T)
colnames(tmp)<-c("model","rep","scenario")
ppt<-cbind(ppt,tmp)
ppt$period<-cut(ppt$Year,breaks=c(1950,2000,2050,2100),
include.lowest=T,labels=c("past","present","future"))
ppt$season<-ifelse(ppt$Month > 6 & ppt$Month < 10,"summer","fall2spr")
pptMeans<-aggregate(value~period+season+scenario+model,data=ppt,FUN=mean)
allmods <- unique(pptMeans$model)
keeps <- character(length(allmods))
my_scens <- c("rcp45", "rcp60", "rcp85")
for(i in 1:length(allmods)){
tmp <- subset(pptMeans, model==allmods[i])
tmp.scns <- unique(tmp$scenario)
flag <- length(which(my_scens %in% tmp.scns == FALSE))
ifelse(flag>0, keeps[i]<-"no", keeps[i]<-"yes")
}
modelkeeps <- data.frame(model=allmods,
allscenarios=keeps)
my_mods <- modelkeeps[which(modelkeeps$allscenarios=="yes"),"model"]
ppt_projs <- subset(pptMeans, model %in% my_mods)
pptMeans<-aggregate(value~period+season+scenario,data=ppt_projs,FUN=mean)
colnames(pptMeans) <- c("period","season","scenario","value")
pptMeans$days<-ifelse(pptMeans$season=="summer",92,365-92)
pptMeans$ppt<-pptMeans$value*pptMeans$days
pptMeans<-reshape(pptMeans[,c("period","season","scenario","ppt")],
idvar=c("season","scenario"),timevar="period",direction="wide")
pptMeans$change<-(pptMeans$ppt.future-pptMeans$ppt.past)/pptMeans$ppt.past
####
#### Read in and formate temperature projections ------------------------------
####
Tavg <- as.data.frame(read.csv("tas.csv", header=FALSE))
tmp<-read.table("COLS_tas.txt")
tmp<-as.character(tmp[,1])
colnames(Tavg) <- c("Year", "Month",tmp)
Tavg <- melt(Tavg, id.vars=c("Year", "Month"))
tmp<-unlist(strsplit(x=as.character(ppt$variable),split=".",fixed=T))
tmp<-matrix(tmp,nrow=length(tmp)/3,ncol=3,byrow=T)
colnames(tmp)<-c("model","rep","scenario")
Tavg<-cbind(Tavg,tmp)
Tavg$period<-cut(Tavg$Year,breaks=c(1950,2000,2050,2100),
include.lowest=T,labels=c("past","present","future"))
Tavg$season<-ifelse(Tavg$Month > 3 & ppt$Month < 7,"spring","other")
TavgMeans<-aggregate(as.numeric(value)~period+season+scenario+model,data=Tavg,FUN=mean)
allmods <- unique(TavgMeans$model)
keeps <- character(length(allmods))
my_scens <- c("rcp45", "rcp60", "rcp85")
for(i in 1:length(allmods)){
tmp <- subset(TavgMeans, model==allmods[i])
tmp.scns <- unique(tmp$scenario)
flag <- length(which(my_scens %in% tmp.scns == FALSE))
ifelse(flag>0, keeps[i]<-"no", keeps[i]<-"yes")
}
modelkeeps <- data.frame(model=allmods,
allscenarios=keeps)
my_mods <- modelkeeps[which(modelkeeps$allscenarios=="yes"),"model"]
temp_projs <- subset(TavgMeans, model %in% my_mods)
colnames(temp_projs)[5] <- "value"
TavgMeans<-aggregate(value~period+season+scenario,data=temp_projs,FUN=mean)
colnames(TavgMeans) <- c("period","season","scenario","value")
TavgMeans<-reshape(TavgMeans[,c("period","season","scenario","value")],
idvar=c("season","scenario"),timevar="period",direction="wide")
TavgMeans$change<-TavgMeans$value.future-TavgMeans$value.past
####
#### Write files for use in simulations ---------------------------------------
####
write.csv(pptMeans, "../../../precipitation_projections.csv")
write.csv(TavgMeans, "../../../temperature_projections.csv")
|
#' two_factor_search
#'
#' Takes test group and control group (as 4 column data.frame) with
#' exactly two categorical variables and one continuous variable for each
#' obsevation. Performs 1-1 matching on both factors and a continuous value.
#' Begins with first test, removing selected controls as it travels down.
#' Then reperforms search going from the last test up to the first test.
#' Forwards and backwards selections evaluated based on which has lower sum
#' of absolute deviations from continuous values.
#'
#' The tests are the first parameter, controls are second.
#' Make sure that order in each data.frame input is FIRST FACTOR,
#' SECOND FACTOR, OBSERVATIOIN ID, and CONTINUOUS VALUE.
#'
#' @param raw_tests The data.frame containing the tests
#' @param raw_controls The data.frame containing the controls
#' @export
#' @author Nicholas Sun <nicholas.sun@rutgers.edu>
#' @examples
#' raw_tests <- read.csv("raw_tests.csv")
#' raw_controls <- read.csv("raw_controls.csv")
#' two_factor_search(raw_tests, raw_controls)
two_factor_search <- function(raw_tests, raw_controls) {
colnames(raw_tests) <- c("factor1","factor2","id","sales")
colnames(raw_controls) <- c("factor1","factor2","id","sales")
testvalues <- raw_tests$sales; gsub("$", "", testvalues, fixed = TRUE)
testfactors <- raw_tests$factor1
testfactors2 <- raw_tests$factor2
testid <- raw_tests$id
controlvalues <- raw_controls$sales; gsub("$", "", testvalues, fixed = TRUE)
controlfactors <- raw_controls$factor1
controlfactors2 <- raw_controls$factor2
controlid <- raw_controls$id
mastertests <- data.frame(testfactors, testfactors2, testid, testvalues)
mastercontrols <- data.frame(controlfactors, controlfactors2, controlid, controlvalues)
output_df <- data.frame(district = character(0), SIC = character(0), test_id= numeric(0), test_value= numeric(0), SIC = character(0), control_id= numeric(0),control_value=numeric(0))
subset_dim <- data.frame(test_size = numeric(0), control_size = numeric(0))
for (q in unique(mastertests$testfactors)){
a <- mastertests[mastertests$testfactors==q,]
b <- mastercontrols[mastercontrols$controlfactors==q,]
t.values <- a$testvalues
t.id <- a$testid
t.testfactors2 <- a$testfactors2
c.values <- b$controlvalues
c.id <- b$controlid
c.testfactors2 <- b$controlfactors2
factor1tests <- data.frame(t.testfactors2, t.id, t.values)
factor1controls <- data.frame(c.testfactors2, c.id, c.values)
factor1output <- data.frame(district = character(0), SIC = character(0), test_id= numeric(0), test_value= numeric(0), SIC = character(0), control_id= numeric(0),control_value=numeric(0))
for (w in unique(factor1tests$t.testfactors2)){
c <- factor1tests[factor1tests$t.testfactors2==w,]
d <- factor1controls[factor1controls$c.testfactors2==w,]
t2.values <- c$t.values
t2.id <- c$t.id
t2.testfactors2 <- c$t.testfactors2
c2.values <- d$c.values
c2.id <- d$c.id
c2.controlfactors2 <- d$c.testfactors2
factor2tests <- data.frame(t2.testfactors2, t2.id, t2.values)
factor2controls <- data.frame(c2.controlfactors2, c2.id, c2.values)
add.subset_dim <- c(length(t2.id), length(c2.id))
subset_dim <- rbind(subset_dim, add.subset_dim)
len_tests <- length(c$t.id)
district2 <- rep(q, len_tests)
ri <- c()
variance <- c()
controlvalues2 <- c2.values
max <- 10*max(controlvalues2)
for (i in 1:len_tests){
z <- which.min(abs(controlvalues2 - factor2tests$t2.values[i]))
variance[i] <- (controlvalues2[z] - factor2tests$t2.values[i])
ri <- c(ri, z)
controlvalues2[z] <- max
}
f.controls <- factor2controls[ri,]
controlvalues2 <- c2.values
b.ri <- c()
b.variance <- c()
for (i in 1:len_tests){
z <- which.min(abs(controlvalues2 - factor2tests$t2.values[len_tests + 1 - i]))
b.variance[i] <- (controlvalues2[z] - factor2tests$t2.values[len_tests + 1 - i])
b.ri <- c(b.ri, z)
controlvalues2[z] <- max
}
b.ri <- rev(b.ri)
b.controls <- factor2controls[b.ri,]
if (sum(abs(variance)) < sum(abs(b.variance))){
final <- data.frame(district2,factor2tests, f.controls)
} else {
final <- data.frame(district2,factor2tests, b.controls)
}
factor1output <- rbind(factor1output,final)
}
output_df <- rbind(output_df, factor1output)
}
colnames(output_df) <- c("FACTOR 1", "FACTOR 2", "Test_ID", "Test_Sales","Control_FACTOR2", "Control_ID","Control_Sales")
return(output_df)
}
|
/R/two_factor_search.R
|
no_license
|
njjms/matchr
|
R
| false
| false
| 5,088
|
r
|
#' two_factor_search
#'
#' Takes test group and control group (as 4 column data.frame) with
#' exactly two categorical variables and one continuous variable for each
#' obsevation. Performs 1-1 matching on both factors and a continuous value.
#' Begins with first test, removing selected controls as it travels down.
#' Then reperforms search going from the last test up to the first test.
#' Forwards and backwards selections evaluated based on which has lower sum
#' of absolute deviations from continuous values.
#'
#' The tests are the first parameter, controls are second.
#' Make sure that order in each data.frame input is FIRST FACTOR,
#' SECOND FACTOR, OBSERVATIOIN ID, and CONTINUOUS VALUE.
#'
#' @param raw_tests The data.frame containing the tests
#' @param raw_controls The data.frame containing the controls
#' @export
#' @author Nicholas Sun <nicholas.sun@rutgers.edu>
#' @examples
#' raw_tests <- read.csv("raw_tests.csv")
#' raw_controls <- read.csv("raw_controls.csv")
#' two_factor_search(raw_tests, raw_controls)
two_factor_search <- function(raw_tests, raw_controls) {
colnames(raw_tests) <- c("factor1","factor2","id","sales")
colnames(raw_controls) <- c("factor1","factor2","id","sales")
testvalues <- raw_tests$sales; gsub("$", "", testvalues, fixed = TRUE)
testfactors <- raw_tests$factor1
testfactors2 <- raw_tests$factor2
testid <- raw_tests$id
controlvalues <- raw_controls$sales; gsub("$", "", testvalues, fixed = TRUE)
controlfactors <- raw_controls$factor1
controlfactors2 <- raw_controls$factor2
controlid <- raw_controls$id
mastertests <- data.frame(testfactors, testfactors2, testid, testvalues)
mastercontrols <- data.frame(controlfactors, controlfactors2, controlid, controlvalues)
output_df <- data.frame(district = character(0), SIC = character(0), test_id= numeric(0), test_value= numeric(0), SIC = character(0), control_id= numeric(0),control_value=numeric(0))
subset_dim <- data.frame(test_size = numeric(0), control_size = numeric(0))
for (q in unique(mastertests$testfactors)){
a <- mastertests[mastertests$testfactors==q,]
b <- mastercontrols[mastercontrols$controlfactors==q,]
t.values <- a$testvalues
t.id <- a$testid
t.testfactors2 <- a$testfactors2
c.values <- b$controlvalues
c.id <- b$controlid
c.testfactors2 <- b$controlfactors2
factor1tests <- data.frame(t.testfactors2, t.id, t.values)
factor1controls <- data.frame(c.testfactors2, c.id, c.values)
factor1output <- data.frame(district = character(0), SIC = character(0), test_id= numeric(0), test_value= numeric(0), SIC = character(0), control_id= numeric(0),control_value=numeric(0))
for (w in unique(factor1tests$t.testfactors2)){
c <- factor1tests[factor1tests$t.testfactors2==w,]
d <- factor1controls[factor1controls$c.testfactors2==w,]
t2.values <- c$t.values
t2.id <- c$t.id
t2.testfactors2 <- c$t.testfactors2
c2.values <- d$c.values
c2.id <- d$c.id
c2.controlfactors2 <- d$c.testfactors2
factor2tests <- data.frame(t2.testfactors2, t2.id, t2.values)
factor2controls <- data.frame(c2.controlfactors2, c2.id, c2.values)
add.subset_dim <- c(length(t2.id), length(c2.id))
subset_dim <- rbind(subset_dim, add.subset_dim)
len_tests <- length(c$t.id)
district2 <- rep(q, len_tests)
ri <- c()
variance <- c()
controlvalues2 <- c2.values
max <- 10*max(controlvalues2)
for (i in 1:len_tests){
z <- which.min(abs(controlvalues2 - factor2tests$t2.values[i]))
variance[i] <- (controlvalues2[z] - factor2tests$t2.values[i])
ri <- c(ri, z)
controlvalues2[z] <- max
}
f.controls <- factor2controls[ri,]
controlvalues2 <- c2.values
b.ri <- c()
b.variance <- c()
for (i in 1:len_tests){
z <- which.min(abs(controlvalues2 - factor2tests$t2.values[len_tests + 1 - i]))
b.variance[i] <- (controlvalues2[z] - factor2tests$t2.values[len_tests + 1 - i])
b.ri <- c(b.ri, z)
controlvalues2[z] <- max
}
b.ri <- rev(b.ri)
b.controls <- factor2controls[b.ri,]
if (sum(abs(variance)) < sum(abs(b.variance))){
final <- data.frame(district2,factor2tests, f.controls)
} else {
final <- data.frame(district2,factor2tests, b.controls)
}
factor1output <- rbind(factor1output,final)
}
output_df <- rbind(output_df, factor1output)
}
colnames(output_df) <- c("FACTOR 1", "FACTOR 2", "Test_ID", "Test_Sales","Control_FACTOR2", "Control_ID","Control_Sales")
return(output_df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/augbin_2t_1a_fit.R
\name{predict.augbin_2t_1a_fit}
\alias{predict.augbin_2t_1a_fit}
\title{Predict probability of success for given tumour size measurements.}
\usage{
\method{predict}{augbin_2t_1a_fit}(
object,
y1_lower = -Inf,
y1_upper = Inf,
y2_lower = -Inf,
y2_upper = log(0.7),
probs = c(0.025, 0.975),
newdata = NULL,
...
)
}
\arguments{
\item{object}{Object of class \code{augbin_2t_1a_fit}.}
\item{y1_lower}{numeric, minimum threshold to constitute success,
scrutinising the log of the tumour size ratio comparing time 1 to baseline.
Defaults to negative infinity.}
\item{y1_upper}{numeric, maximum threshold to constitute success,
scrutinising the log of the tumour size ratio comparing time 1 to baseline.
Defaults to positive infinity.}
\item{y2_lower}{numeric, minimum threshold to constitute success,
scrutinising the log of the tumour size ratio comparing time 2 to baseline.}
\item{y2_upper}{numeric, maximum threshold to constitute success,
scrutinising the log of the tumour size ratio comparing time 2 to baseline.
Defaults to log(0.7).}
\item{probs}{pair of probabilities to use to calculate the credible interval
for the probability of success.}
\item{newdata}{data for which to infer the probability of success.
A dataframe-like object with baseline tumour sizes in first column, and first
and second post-baseline tumour sizes in columns 2 and 3. Omitted by default.
When omitted, newdata is set to be the \code{object$tumour_size}.}
\item{...}{Extra args passed onwards.}
}
\value{
Object of class \code{\link[tibble]{tibble}}
}
\description{
This method simply forwards to \code{\link{prob_success}}.
}
|
/man/predict.augbin_2t_1a_fit.Rd
|
no_license
|
brockk/trialr
|
R
| false
| true
| 1,727
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/augbin_2t_1a_fit.R
\name{predict.augbin_2t_1a_fit}
\alias{predict.augbin_2t_1a_fit}
\title{Predict probability of success for given tumour size measurements.}
\usage{
\method{predict}{augbin_2t_1a_fit}(
object,
y1_lower = -Inf,
y1_upper = Inf,
y2_lower = -Inf,
y2_upper = log(0.7),
probs = c(0.025, 0.975),
newdata = NULL,
...
)
}
\arguments{
\item{object}{Object of class \code{augbin_2t_1a_fit}.}
\item{y1_lower}{numeric, minimum threshold to constitute success,
scrutinising the log of the tumour size ratio comparing time 1 to baseline.
Defaults to negative infinity.}
\item{y1_upper}{numeric, maximum threshold to constitute success,
scrutinising the log of the tumour size ratio comparing time 1 to baseline.
Defaults to positive infinity.}
\item{y2_lower}{numeric, minimum threshold to constitute success,
scrutinising the log of the tumour size ratio comparing time 2 to baseline.}
\item{y2_upper}{numeric, maximum threshold to constitute success,
scrutinising the log of the tumour size ratio comparing time 2 to baseline.
Defaults to log(0.7).}
\item{probs}{pair of probabilities to use to calculate the credible interval
for the probability of success.}
\item{newdata}{data for which to infer the probability of success.
A dataframe-like object with baseline tumour sizes in first column, and first
and second post-baseline tumour sizes in columns 2 and 3. Omitted by default.
When omitted, newdata is set to be the \code{object$tumour_size}.}
\item{...}{Extra args passed onwards.}
}
\value{
Object of class \code{\link[tibble]{tibble}}
}
\description{
This method simply forwards to \code{\link{prob_success}}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaX.R
\docType{methods}
\name{plotPCA}
\alias{plotPCA}
\title{Performa PCA analysis and plot PCA figure}
\usage{
plotPCA(para, pcaMethod = "svdImpute", valueID = "valueNorm",
label = "order", rmQC = TRUE, batch = FALSE, scale = "none",
center = FALSE, saveRds = TRUE, ...)
}
\arguments{
\item{para}{A \code{metaXpara} object}
\item{pcaMethod}{See \code{\link{pca}} in \pkg{pcaMethods}}
\item{valueID}{The name of the column which will be used}
\item{label}{The label used for plot PCA figure, default is "order"}
\item{rmQC}{A logical indicates whether remove QC data}
\item{batch}{A logical indicates whether output batch information}
\item{scale}{Scaling, see \code{\link{pca}} in \pkg{pcaMethods}}
\item{center}{Centering, see \code{\link{pca}} in \pkg{pcaMethods}}
\item{saveRds}{Boolean, setting the argument to TRUE to save some objects to
disk for debug. Only useful for developer. Default is TRUE.}
\item{...}{Additional parameter}
}
\value{
none
}
\description{
Performa PCA analysis and plot PCA figure
}
\examples{
para <- new("metaXpara")
pfile <- system.file("extdata/MTBLS79.txt",package = "metaX")
sfile <- system.file("extdata/MTBLS79_sampleList.txt",package = "metaX")
rawPeaks(para) <- read.delim(pfile,check.names = FALSE)
sampleListFile(para) <- sfile
para <- reSetPeaksData(para)
para <- missingValueImpute(para)
para <- transformation(para,valueID = "value")
metaX::plotPCA(para,valueID="value",scale="uv",center=TRUE)
}
\author{
Bo Wen \email{wenbo@genomics.cn}
}
|
/R/metax/man/plotPCA.Rd
|
no_license
|
gigascience/cuddel-gsk-dataset
|
R
| false
| true
| 1,581
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaX.R
\docType{methods}
\name{plotPCA}
\alias{plotPCA}
\title{Performa PCA analysis and plot PCA figure}
\usage{
plotPCA(para, pcaMethod = "svdImpute", valueID = "valueNorm",
label = "order", rmQC = TRUE, batch = FALSE, scale = "none",
center = FALSE, saveRds = TRUE, ...)
}
\arguments{
\item{para}{A \code{metaXpara} object}
\item{pcaMethod}{See \code{\link{pca}} in \pkg{pcaMethods}}
\item{valueID}{The name of the column which will be used}
\item{label}{The label used for plot PCA figure, default is "order"}
\item{rmQC}{A logical indicates whether remove QC data}
\item{batch}{A logical indicates whether output batch information}
\item{scale}{Scaling, see \code{\link{pca}} in \pkg{pcaMethods}}
\item{center}{Centering, see \code{\link{pca}} in \pkg{pcaMethods}}
\item{saveRds}{Boolean, setting the argument to TRUE to save some objects to
disk for debug. Only useful for developer. Default is TRUE.}
\item{...}{Additional parameter}
}
\value{
none
}
\description{
Performa PCA analysis and plot PCA figure
}
\examples{
para <- new("metaXpara")
pfile <- system.file("extdata/MTBLS79.txt",package = "metaX")
sfile <- system.file("extdata/MTBLS79_sampleList.txt",package = "metaX")
rawPeaks(para) <- read.delim(pfile,check.names = FALSE)
sampleListFile(para) <- sfile
para <- reSetPeaksData(para)
para <- missingValueImpute(para)
para <- transformation(para,valueID = "value")
metaX::plotPCA(para,valueID="value",scale="uv",center=TRUE)
}
\author{
Bo Wen \email{wenbo@genomics.cn}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wiod.R
\docType{data}
\name{industries}
\alias{industries}
\title{WIOD industries}
\description{
the names of the industries
}
|
/man/industries.Rd
|
no_license
|
desval/wiod
|
R
| false
| true
| 206
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wiod.R
\docType{data}
\name{industries}
\alias{industries}
\title{WIOD industries}
\description{
the names of the industries
}
|
library (dplyr)
metadata <- readxl::read_excel("data-raw/metadata.xlsx",
sheet = "all") %>%
dplyr::arrange( numeric_label ) %>%
dplyr::select ( -digit_1, -digit_2 )
metadata_uk_2010 <- readxl::read_excel(path = file.path('data-raw', 'metadata_uk_2010.xlsx')) %>%
dplyr::mutate ( uk_col = gsub("\\.", "-", as.character(uk_col))) %>%
dplyr::mutate ( uk_col = gsub(" & ", "-", as.character(uk_col))) %>%
dplyr::mutate ( uk_row = gsub("\\.", "-", as.character(uk_row))) %>%
dplyr::mutate ( uk_row = gsub(" & ", "-", as.character(uk_row))) %>%
dplyr::mutate ( uk_col = trimws(uk_col, 'both')) %>%
dplyr::select ( -digit_1, -digit_2, -digit_3_5 )
load ( file.path('not_included', 'uk_2010_data.rda'))
completely_reproducible <- function() {
uk_2010_data <- iotables_download ( source = "uk_2010" )
uk_test_results <- iotables:::uk_2010_results_get ()
}
uk_test_results <- iotables:::uk_2010_results_get ()
usethis::use_data(metadata, metadata_uk_2010, uk_test_results,
uk_2010_data,
internal = FALSE, overwrite = TRUE)
|
/data-raw/create_metadata.R
|
permissive
|
vero1166/iotables
|
R
| false
| false
| 1,109
|
r
|
library (dplyr)
metadata <- readxl::read_excel("data-raw/metadata.xlsx",
sheet = "all") %>%
dplyr::arrange( numeric_label ) %>%
dplyr::select ( -digit_1, -digit_2 )
metadata_uk_2010 <- readxl::read_excel(path = file.path('data-raw', 'metadata_uk_2010.xlsx')) %>%
dplyr::mutate ( uk_col = gsub("\\.", "-", as.character(uk_col))) %>%
dplyr::mutate ( uk_col = gsub(" & ", "-", as.character(uk_col))) %>%
dplyr::mutate ( uk_row = gsub("\\.", "-", as.character(uk_row))) %>%
dplyr::mutate ( uk_row = gsub(" & ", "-", as.character(uk_row))) %>%
dplyr::mutate ( uk_col = trimws(uk_col, 'both')) %>%
dplyr::select ( -digit_1, -digit_2, -digit_3_5 )
load ( file.path('not_included', 'uk_2010_data.rda'))
completely_reproducible <- function() {
uk_2010_data <- iotables_download ( source = "uk_2010" )
uk_test_results <- iotables:::uk_2010_results_get ()
}
uk_test_results <- iotables:::uk_2010_results_get ()
usethis::use_data(metadata, metadata_uk_2010, uk_test_results,
uk_2010_data,
internal = FALSE, overwrite = TRUE)
|
#Backbone extraction
#Not finished
backboneNetwork<-function(g,alpha,evalFunc){
#Returns a backbone network based on LANS.
#g is a weighted perhaps directed graph
#alpha is the significance level for links
#First, convert graph to adjancy matrix
A<-get.adjacency(g,attr="weight")
A<-as.matrix(A)
#Now, convert this matrix to a probability matrix,p-matrix. The function rowSums(A) returns a vector with the sum of allthe entries in a row
p<-A/rowSums(A)
#Apparently, R interprets this division in the following way: Divide each row, i, in A with the corresponding i'th entry in the vector.
#Nonparametric sparsification (Foti et.al, 2011 in PLOS ONE)
#This is the evaluation function. It takes a vector of probabilities, Q, and compares each entry with the other entries in the vector.
#It returns the number of entries that are less than or equal to the i'th entry.
F_hat<-function(Q){
x<-vector()
for(j in 1:length(Q)){
x[j]<-length(which(Q!=0 & Q<=Q[j]))/length(which(Q>0))
}
return(x)
}
#The following produces a matrix, sigMatrix, with values 1 for the links that are to be kept and 0 for the links that we throw away.
sigMatrix<-matrix(nrow = length(V(g)), ncol=length(V(g)))
for(i in 1:length(V(g))){
sigMatrix[i,]<-F_hat(p[i,])
}
sigMatrix2<-sigMatrix >= 1 - alpha
mode(sigMatrix2)<-"numeric"
sigMatrix2[is.na(sigMatrix2)] <- 0
#Now multiply the original adjacency matrix with sigMatrix to get rid of the insignificant links
B<-sigMatrix2*A
if(evalFunc==1){
#directed
h<-graph.adjacency(B,mode=c("directed"),weighted=TRUE)
V(h)$id<-V(g)$id
}
else{
#soft
h<-graph.adjacency(B,mode=c("max"),weighted=TRUE)
V(h)$id<-V(g)$id
}
#h<-as.undirected(h, mode = c("collapse"),edge.attr.comb = "min")
return(h)
}
#Alpha<-function(Q){
# x<-vector()
# for(j in 1:length(Q)){
# x[j]<-(1-Q[j])**(length(which(Q>0))-1)
# }
# return(x)
#}
#sigMatrix<-matrix(nrow = length(V(g)), ncol=length(V(g)))
#for(i in 1:length(V(g))){
# sigMatrix[i,]<-Alpha(p[i,])
#}
#sigMatrix2<-sigMatrix < alpha
#mode(sigMatrix2)<-"numeric"
#sigMatrix2[is.na(sigMatrix2)] <- 0
#Now multiply the original adjacency matrix with sigMatrix to get rid of the insignificant links
#B<-sigMatrix2*A
#Now create a graph from the new matrix.
#h<-graph.adjacency(B,mode=c("lower"),weighted=TRUE)
#V(h)$id<-V(g)$id
|
/old/IBSE_definitions/analyses/R_scripts/backboneExtraction.r
|
no_license
|
jbruun/reviewECIBSE
|
R
| false
| false
| 2,387
|
r
|
#Backbone extraction
#Not finished
backboneNetwork<-function(g,alpha,evalFunc){
#Returns a backbone network based on LANS.
#g is a weighted perhaps directed graph
#alpha is the significance level for links
#First, convert graph to adjancy matrix
A<-get.adjacency(g,attr="weight")
A<-as.matrix(A)
#Now, convert this matrix to a probability matrix,p-matrix. The function rowSums(A) returns a vector with the sum of allthe entries in a row
p<-A/rowSums(A)
#Apparently, R interprets this division in the following way: Divide each row, i, in A with the corresponding i'th entry in the vector.
#Nonparametric sparsification (Foti et.al, 2011 in PLOS ONE)
#This is the evaluation function. It takes a vector of probabilities, Q, and compares each entry with the other entries in the vector.
#It returns the number of entries that are less than or equal to the i'th entry.
F_hat<-function(Q){
x<-vector()
for(j in 1:length(Q)){
x[j]<-length(which(Q!=0 & Q<=Q[j]))/length(which(Q>0))
}
return(x)
}
#The following produces a matrix, sigMatrix, with values 1 for the links that are to be kept and 0 for the links that we throw away.
sigMatrix<-matrix(nrow = length(V(g)), ncol=length(V(g)))
for(i in 1:length(V(g))){
sigMatrix[i,]<-F_hat(p[i,])
}
sigMatrix2<-sigMatrix >= 1 - alpha
mode(sigMatrix2)<-"numeric"
sigMatrix2[is.na(sigMatrix2)] <- 0
#Now multiply the original adjacency matrix with sigMatrix to get rid of the insignificant links
B<-sigMatrix2*A
if(evalFunc==1){
#directed
h<-graph.adjacency(B,mode=c("directed"),weighted=TRUE)
V(h)$id<-V(g)$id
}
else{
#soft
h<-graph.adjacency(B,mode=c("max"),weighted=TRUE)
V(h)$id<-V(g)$id
}
#h<-as.undirected(h, mode = c("collapse"),edge.attr.comb = "min")
return(h)
}
#Alpha<-function(Q){
# x<-vector()
# for(j in 1:length(Q)){
# x[j]<-(1-Q[j])**(length(which(Q>0))-1)
# }
# return(x)
#}
#sigMatrix<-matrix(nrow = length(V(g)), ncol=length(V(g)))
#for(i in 1:length(V(g))){
# sigMatrix[i,]<-Alpha(p[i,])
#}
#sigMatrix2<-sigMatrix < alpha
#mode(sigMatrix2)<-"numeric"
#sigMatrix2[is.na(sigMatrix2)] <- 0
#Now multiply the original adjacency matrix with sigMatrix to get rid of the insignificant links
#B<-sigMatrix2*A
#Now create a graph from the new matrix.
#h<-graph.adjacency(B,mode=c("lower"),weighted=TRUE)
#V(h)$id<-V(g)$id
|
\name{snqProfitEla}
\alias{snqProfitEla}
\title{Price Elasticities of SNQ Profit function}
\description{
Calculates the Price Elasticities of a Symmetric Normalized Quadratic (SNQ)
profit function.
}
\usage{ snqProfitEla( beta, prices, quant, weights,
scalingFactors = rep( 1, length( weights ) ),
coefVcov = NULL, df = NULL )}
\arguments{
\item{beta}{matrix of estimated \eqn{\beta} coefficients.}
\item{prices}{vector of netput prices at which the elasticities
should be calculated.}
\item{quant}{vector of netput quantities at which the elasticities
should be calculated.}
\item{weights}{vector of weights of prices used for normalization.}
\item{scalingFactors}{factors to scale prices (and quantities).}
\item{coefVcov}{variance covariance matrix of the coefficients (optional).}
\item{df}{degrees of freedom to calculate P-values of the elasticities
(optional).}
}
\note{
A price elasticity is defined as
\deqn{E_{ij} = \frac{ \displaystyle \frac{ \partial q_i }{ q_i } }
{ \displaystyle \frac{ \partial p_j }{ p_j } } =
\frac{ \partial q_i }{ \partial p_j } \cdot \frac{ p_j }{ q_i } }
Thus, e.g. \eqn{E_{ij}=0.5} means that if the price of netput j (\eqn{p_j})
increases by 1\%, the quantity of netput i (\eqn{q_i}) will
increase by 0.5\%.
}
\value{
a list of class \code{snqProfitEla} containing following elements:
\item{ela}{matrix of the price elasticities.}
\item{vcov}{variance covariance matrix of the price elasticities.}
\item{stEr}{standard errors of the price elasticities.}
\item{tval}{t-values of the price elasticities.}
\item{pval}{P-values of the price elasticities.}
}
\seealso{\code{\link{snqProfitEst}}.}
\author{Arne Henningsen}
\examples{
# just a stupid simple example
snqProfitEla( matrix(101:109,3,3), c(1,1,1), c(1,-1,-1), c(0.4,0.3,0.3) )
# now with real data
data( germanFarms )
germanFarms$qOutput <- germanFarms$vOutput / germanFarms$pOutput
germanFarms$qVarInput <- -germanFarms$vVarInput / germanFarms$pVarInput
germanFarms$qLabor <- -germanFarms$qLabor
germanFarms$time <- c( 0:19 )
priceNames <- c( "pOutput", "pVarInput", "pLabor" )
quantNames <- c( "qOutput", "qVarInput", "qLabor" )
estResult <- snqProfitEst( priceNames, quantNames, c("land","time"), data=germanFarms )
estResult$ela # price elasticities at mean prices and mean quantities
# price elasticities at the last observation (1994/95)
snqProfitEla( estResult$coef$beta, estResult$data[ 20, priceNames ],
estResult$data[ 20, quantNames ], estResult$weights,
estResult$scalingFactors )
}
\keyword{models}
|
/branches/translogIsoquant/man/snqProfitEla.Rd
|
no_license
|
scfmolina/micecon
|
R
| false
| false
| 2,674
|
rd
|
\name{snqProfitEla}
\alias{snqProfitEla}
\title{Price Elasticities of SNQ Profit function}
\description{
Calculates the Price Elasticities of a Symmetric Normalized Quadratic (SNQ)
profit function.
}
\usage{ snqProfitEla( beta, prices, quant, weights,
scalingFactors = rep( 1, length( weights ) ),
coefVcov = NULL, df = NULL )}
\arguments{
\item{beta}{matrix of estimated \eqn{\beta} coefficients.}
\item{prices}{vector of netput prices at which the elasticities
should be calculated.}
\item{quant}{vector of netput quantities at which the elasticities
should be calculated.}
\item{weights}{vector of weights of prices used for normalization.}
\item{scalingFactors}{factors to scale prices (and quantities).}
\item{coefVcov}{variance covariance matrix of the coefficients (optional).}
\item{df}{degrees of freedom to calculate P-values of the elasticities
(optional).}
}
\note{
A price elasticity is defined as
\deqn{E_{ij} = \frac{ \displaystyle \frac{ \partial q_i }{ q_i } }
{ \displaystyle \frac{ \partial p_j }{ p_j } } =
\frac{ \partial q_i }{ \partial p_j } \cdot \frac{ p_j }{ q_i } }
Thus, e.g. \eqn{E_{ij}=0.5} means that if the price of netput j (\eqn{p_j})
increases by 1\%, the quantity of netput i (\eqn{q_i}) will
increase by 0.5\%.
}
\value{
a list of class \code{snqProfitEla} containing following elements:
\item{ela}{matrix of the price elasticities.}
\item{vcov}{variance covariance matrix of the price elasticities.}
\item{stEr}{standard errors of the price elasticities.}
\item{tval}{t-values of the price elasticities.}
\item{pval}{P-values of the price elasticities.}
}
\seealso{\code{\link{snqProfitEst}}.}
\author{Arne Henningsen}
\examples{
# just a stupid simple example
snqProfitEla( matrix(101:109,3,3), c(1,1,1), c(1,-1,-1), c(0.4,0.3,0.3) )
# now with real data
data( germanFarms )
germanFarms$qOutput <- germanFarms$vOutput / germanFarms$pOutput
germanFarms$qVarInput <- -germanFarms$vVarInput / germanFarms$pVarInput
germanFarms$qLabor <- -germanFarms$qLabor
germanFarms$time <- c( 0:19 )
priceNames <- c( "pOutput", "pVarInput", "pLabor" )
quantNames <- c( "qOutput", "qVarInput", "qLabor" )
estResult <- snqProfitEst( priceNames, quantNames, c("land","time"), data=germanFarms )
estResult$ela # price elasticities at mean prices and mean quantities
# price elasticities at the last observation (1994/95)
snqProfitEla( estResult$coef$beta, estResult$data[ 20, priceNames ],
estResult$data[ 20, quantNames ], estResult$weights,
estResult$scalingFactors )
}
\keyword{models}
|
VI.hist =
function(...) {
args = list(...)
VI(hist(...))
if (!is.null(args$xlab)) {
cat(paste("N.B. The default text for the x axis has been replaced by: ",
args$xlab, "\n", sep = ""))
cat("\n")
}
if (!is.null(args$ylab)) {
cat(paste("N.B. The default text for the y axis has been replaced by: ",
args$ylab, "\n", sep = ""))
cat("\n")
}
}
|
/R/NonMethodFunctions.R
|
no_license
|
cran/BrailleR
|
R
| false
| false
| 468
|
r
|
VI.hist =
function(...) {
args = list(...)
VI(hist(...))
if (!is.null(args$xlab)) {
cat(paste("N.B. The default text for the x axis has been replaced by: ",
args$xlab, "\n", sep = ""))
cat("\n")
}
if (!is.null(args$ylab)) {
cat(paste("N.B. The default text for the y axis has been replaced by: ",
args$ylab, "\n", sep = ""))
cat("\n")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.