content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
testlist <- list(A = structure(c(1.38997190089718e-309, -Inf, 3.81571422914747e-236 ), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613125695-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 208 | r | testlist <- list(A = structure(c(1.38997190089718e-309, -Inf, 3.81571422914747e-236 ), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
library(GenomicRanges)
library(patchwork)
library(gghighlight)
library(tidyverse)
source("assem_finch_code/17_pixy/pixy_manhattan_fnc.R")
#pixy_contrast <- "data/pixy/fuliginosa_fortis/output/"
#df.tmp <- read.table("data/pixy/four_species/pixy_four_geospiza_6_fst.txt", header = T)
#df.tmp %>% filter(window_pos_1 == 24780001)
df.pixy_out <- input_pixy_V2("data/pixy/four_species/")
#70 windows with < 100 SNPs
df.pixy_out <- df.pixy_out %>% filter(SNPs > 100)
write.csv(df.pixy_out,"output/pixy/Four_geospiza_processed_Pixy.csv")
all_contrasts <- gsub("fst_","",grep("fst_",names(df.pixy_out), value = T))
#make a pdf for each contrast per chromosome
for (contrast_name in all_contrasts){
print(contrast_name)
plot_pixy_V2(df.pixy_out, contrast_name)
}
# load and process 1kbp windows -------------------------------------------
#df.pixy_out_1kb <- input_pixy_V2("data/pixy/four_species_1kb/")
#write.csv(df.pixy_out_1kb,"output/pixy/Four_geospiza_processed_Pixy_1kb.csv")
#
#df.pixy_out_1kb.pi <- df.pixy_out_1kb %>% select(CHROM, BIN_START, BIN_END, contains("pi"))
#write.csv(df.pixy_out_1kb.pi,"output/pixy/Four_geospiza_processed_Pixy_1kb_pi.csv")
# manhattan plot ----------------------------------------------------------
p1 <- manc(df.pixy_out, "fst_fortis_fuliginosa") + ggtitle("fst_fortis_fuliginosa")
p2 <- manc(df.pixy_out, "fst_fortis_magnirostris") + ggtitle("fst_fortis_magnirostris")
p3 <- manc(df.pixy_out, "fst_fortis_scandens") + ggtitle("fst_fortis_scandens")
p4 <- manc(df.pixy_out, "fst_fuliginosa_magnirostris") + ggtitle("fst_fuliginosa_magnirostris")
p5 <- manc(df.pixy_out, "fst_fuliginosa_scandens") + ggtitle("fst_fuliginosa_scandens")
p6 <- manc(df.pixy_out, "fst_magnirostris_scandens") + ggtitle("fst_magnirostris_scandens")
p1 / p2 / p3 / p4 / p5 / p6
ggsave("output/pixy/six_fst_plots.png", height = 11, width = 8.5)
# overlay -----------------------------------------------------------------
df.overlay <- df.pixy_out %>% select(CHROM, BIN_START, BIN_END, chr_ordered, row, chr_labels,
fst_fortis_fuliginosa, fst_fortis_magnirostris, fst_fuliginosa_magnirostris)
dfZ <- df.overlay %>% filter(CHROM == "chrZ") %>%
mutate(zfst_fortis_fuliginosa = (fst_fortis_fuliginosa - mean(fst_fortis_fuliginosa, na.rm = T)) / sd(fst_fortis_fuliginosa, na.rm =T),
zfst_fortis_magnirostris = (fst_fortis_magnirostris - mean(fst_fortis_magnirostris, na.rm = T)) / sd(fst_fortis_magnirostris, na.rm =T),
zfst_fuliginosa_magnirostris = (fst_fuliginosa_magnirostris - mean(fst_fuliginosa_magnirostris, na.rm = T)) / sd(fst_fuliginosa_magnirostris, na.rm =T))
dfA <- df.overlay %>% filter(CHROM != "chrZ") %>%
mutate(zfst_fortis_fuliginosa = (fst_fortis_fuliginosa - mean(fst_fortis_fuliginosa, na.rm = T)) / sd(fst_fortis_fuliginosa, na.rm =T),
zfst_fortis_magnirostris = (fst_fortis_magnirostris - mean(fst_fortis_magnirostris, na.rm = T)) / sd(fst_fortis_magnirostris, na.rm =T),
zfst_fuliginosa_magnirostris = (fst_fuliginosa_magnirostris - mean(fst_fuliginosa_magnirostris, na.rm = T)) / sd(fst_fuliginosa_magnirostris, na.rm =T))
df.overlayZFst <- rbind(dfA, dfZ)
df.overlayZFst <- df.overlayZFst %>% select(CHROM, BIN_START, BIN_END, chr_ordered, row, chr_labels,
zfst_fortis_fuliginosa, zfst_fortis_magnirostris, zfst_fuliginosa_magnirostris)
df.overlay.long <- df.overlayZFst %>% pivot_longer(cols = -c("CHROM","BIN_START","BIN_END","chr_ordered","row","chr_labels"),
names_to = "contrast", values_to = "Zfst")
df.overlay.long <- df.overlay.long %>%
mutate(contrast_label = ifelse(contrast == "zfst_fortis_fuliginosa", "fortis vs. fuliginosoa",
ifelse(contrast == "zfst_fortis_magnirostris", "fortis vs. magnirostris",
ifelse(contrast == "zfst_fuliginosa_magnirostris", "fuliginosa vs. magnirostris", NA))))
chr_breaks <- df.overlay.long %>% filter(chr_ordered != "chrunknown" & !is.na(row)) %>%
mutate(chr_ordered = factor(chr_ordered, levels = chr_order)) %>%
group_by(chr_ordered, chr_labels) %>%
dplyr::summarise(chr_breaks = mean(row),
chr_ends = max(row),.groups = "keep")
p.overlay <- df.overlay.long %>%
#filter(CHROM!="chrZ") %>% #for now remove Z because not plotted with GEMMA
ggplot() +
geom_point(aes(x = row, y = Zfst, color = contrast_label), alpha = 0.5, size=1.5,shape=20,stroke=0.2) +
theme_bw() +
theme(legend.position=c(0.9,0.8),
#panel.border=element_blank(),
panel.border = element_blank(), axis.line.x = element_line(), axis.line.y = element_line(),
axis.title.x=element_blank(),
#axis.text.x = element_text(angle = 45, color = "black"),
#axis.text.x = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank(),
axis.title.y = element_text(size=10),
axis.text = element_text(size=10),
axis.ticks.x=element_blank(),
axis.ticks.y=element_line(size=0.2))+
#legend.background = element_rect(fill=NA)) +
labs(y=expression(ZF[ST])) +
scale_color_manual(values = c("#fc8d62","#66c2a5","#8da0cb"), name = "Contrast") +
scale_alpha(guide = 'none') +
scale_x_continuous(breaks = chr_breaks$chr_breaks,
labels = function(labels) {
sapply(seq_along(labels), function(i) paste0(ifelse(i %% 2 == 0, '', '\n'), chr_breaks$chr_labels[i]))
}) +
labs(y=expression(ZF[ST])) +
geom_vline(data = chr_breaks, aes(xintercept = chr_ends), alpha = 0.1) +
guides(colour = guide_legend(override.aes = list(size=10)),
shape = guide_legend(override.aes = list(size = 10)))
p.overlay
ggsave("output/pixy/three_species_overlay_Zfst.png", width = 12, height = 4, dpi = 300)
p.overlay + scale_y_reverse() + theme(legend.position=c(0.9,0.3))
ggsave("output/pixy/three_species_overlay_Zfst_reverse.png", width = 12, height = 4, dpi = 300)
# lines? ------------------------------------------------------------------
drop.these.chr.labs <- c("chr21","chr22","chr23","chr24","chr25","chr26", "chr27","chr28", "chr29")
chr_breaks <- chr_breaks %>% mutate(chr_labels = ifelse(chr_ordered %in% drop.these.chr.labs, "", chr_labels))
p.overlay2 <- df.overlay.long %>%
mutate(contrast_label = gsub("vs.","-", contrast_label)) %>%
#filter(CHROM!="chrZ") %>% #for now remove Z because not plotted with GEMMA
ggplot() +
geom_line(aes(x = row, y = Zfst, color = contrast_label), alpha = 0.5, size=.5) +
theme_bw() +
theme(legend.position=c(0.9,0.8),
#panel.border=element_blank(),
panel.border = element_blank(), axis.line.x = element_line(), axis.line.y = element_line(),
axis.title.x=element_blank(),
#axis.text.x = element_text(angle = 45, color = "black"),
#axis.text.x = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank(),
axis.title.y = element_text(size=6),
axis.text = element_text(size=6),
axis.ticks.x=element_blank(),
axis.ticks.y=element_line(size=0.2),
legend.text = element_text(face = "italic", size = 6),
legend.title = element_text(size = 6),
legend.margin=margin(t=-10))+
#legend.background = element_rect(fill=NA)) +
labs(y=expression(ZF[ST])) +
scale_color_manual(values = c("#fc8d62","#66c2a5","#8da0cb"), name = "Contrast") +
scale_alpha(guide = 'none') +
scale_x_continuous(breaks = chr_breaks$chr_breaks,
labels = function(labels) {
sapply(seq_along(labels), function(i) paste0(ifelse(i %% 2 == 0, '', '\n'), chr_breaks$chr_labels[i]))
}) +
labs(y=expression(italic(ZF[ST]))) +
geom_vline(data = chr_breaks, aes(xintercept = chr_ends), alpha = 0.1) +
guides(colour = guide_legend(override.aes = list(size=5)),
shape = guide_legend(override.aes = list(5)))
p.overlay2
ggsave("output/pixy/three_species_overlay_Zfst_lines.png", width = 12, height = 4, dpi = 300)
p.overlay2 + scale_y_reverse() + theme(legend.position="bottom")
ggsave("output/pixy/three_species_overlay_Zfst_reverse_lines.pdf", width = 183, height = 61, dpi = 300, units = "mm")
| /17_pixy/plot_pixy_4species.R | no_license | erikenbody/Darwins_finch_comparative_genomics | R | false | false | 8,509 | r | library(GenomicRanges)
library(patchwork)
library(gghighlight)
library(tidyverse)
source("assem_finch_code/17_pixy/pixy_manhattan_fnc.R")
#pixy_contrast <- "data/pixy/fuliginosa_fortis/output/"
#df.tmp <- read.table("data/pixy/four_species/pixy_four_geospiza_6_fst.txt", header = T)
#df.tmp %>% filter(window_pos_1 == 24780001)
df.pixy_out <- input_pixy_V2("data/pixy/four_species/")
#70 windows with < 100 SNPs
df.pixy_out <- df.pixy_out %>% filter(SNPs > 100)
write.csv(df.pixy_out,"output/pixy/Four_geospiza_processed_Pixy.csv")
all_contrasts <- gsub("fst_","",grep("fst_",names(df.pixy_out), value = T))
#make a pdf for each contrast per chromosome
for (contrast_name in all_contrasts){
print(contrast_name)
plot_pixy_V2(df.pixy_out, contrast_name)
}
# load and process 1kbp windows -------------------------------------------
#df.pixy_out_1kb <- input_pixy_V2("data/pixy/four_species_1kb/")
#write.csv(df.pixy_out_1kb,"output/pixy/Four_geospiza_processed_Pixy_1kb.csv")
#
#df.pixy_out_1kb.pi <- df.pixy_out_1kb %>% select(CHROM, BIN_START, BIN_END, contains("pi"))
#write.csv(df.pixy_out_1kb.pi,"output/pixy/Four_geospiza_processed_Pixy_1kb_pi.csv")
# manhattan plot ----------------------------------------------------------
p1 <- manc(df.pixy_out, "fst_fortis_fuliginosa") + ggtitle("fst_fortis_fuliginosa")
p2 <- manc(df.pixy_out, "fst_fortis_magnirostris") + ggtitle("fst_fortis_magnirostris")
p3 <- manc(df.pixy_out, "fst_fortis_scandens") + ggtitle("fst_fortis_scandens")
p4 <- manc(df.pixy_out, "fst_fuliginosa_magnirostris") + ggtitle("fst_fuliginosa_magnirostris")
p5 <- manc(df.pixy_out, "fst_fuliginosa_scandens") + ggtitle("fst_fuliginosa_scandens")
p6 <- manc(df.pixy_out, "fst_magnirostris_scandens") + ggtitle("fst_magnirostris_scandens")
p1 / p2 / p3 / p4 / p5 / p6
ggsave("output/pixy/six_fst_plots.png", height = 11, width = 8.5)
# overlay -----------------------------------------------------------------
df.overlay <- df.pixy_out %>% select(CHROM, BIN_START, BIN_END, chr_ordered, row, chr_labels,
fst_fortis_fuliginosa, fst_fortis_magnirostris, fst_fuliginosa_magnirostris)
dfZ <- df.overlay %>% filter(CHROM == "chrZ") %>%
mutate(zfst_fortis_fuliginosa = (fst_fortis_fuliginosa - mean(fst_fortis_fuliginosa, na.rm = T)) / sd(fst_fortis_fuliginosa, na.rm =T),
zfst_fortis_magnirostris = (fst_fortis_magnirostris - mean(fst_fortis_magnirostris, na.rm = T)) / sd(fst_fortis_magnirostris, na.rm =T),
zfst_fuliginosa_magnirostris = (fst_fuliginosa_magnirostris - mean(fst_fuliginosa_magnirostris, na.rm = T)) / sd(fst_fuliginosa_magnirostris, na.rm =T))
dfA <- df.overlay %>% filter(CHROM != "chrZ") %>%
mutate(zfst_fortis_fuliginosa = (fst_fortis_fuliginosa - mean(fst_fortis_fuliginosa, na.rm = T)) / sd(fst_fortis_fuliginosa, na.rm =T),
zfst_fortis_magnirostris = (fst_fortis_magnirostris - mean(fst_fortis_magnirostris, na.rm = T)) / sd(fst_fortis_magnirostris, na.rm =T),
zfst_fuliginosa_magnirostris = (fst_fuliginosa_magnirostris - mean(fst_fuliginosa_magnirostris, na.rm = T)) / sd(fst_fuliginosa_magnirostris, na.rm =T))
df.overlayZFst <- rbind(dfA, dfZ)
df.overlayZFst <- df.overlayZFst %>% select(CHROM, BIN_START, BIN_END, chr_ordered, row, chr_labels,
zfst_fortis_fuliginosa, zfst_fortis_magnirostris, zfst_fuliginosa_magnirostris)
df.overlay.long <- df.overlayZFst %>% pivot_longer(cols = -c("CHROM","BIN_START","BIN_END","chr_ordered","row","chr_labels"),
names_to = "contrast", values_to = "Zfst")
df.overlay.long <- df.overlay.long %>%
mutate(contrast_label = ifelse(contrast == "zfst_fortis_fuliginosa", "fortis vs. fuliginosoa",
ifelse(contrast == "zfst_fortis_magnirostris", "fortis vs. magnirostris",
ifelse(contrast == "zfst_fuliginosa_magnirostris", "fuliginosa vs. magnirostris", NA))))
chr_breaks <- df.overlay.long %>% filter(chr_ordered != "chrunknown" & !is.na(row)) %>%
mutate(chr_ordered = factor(chr_ordered, levels = chr_order)) %>%
group_by(chr_ordered, chr_labels) %>%
dplyr::summarise(chr_breaks = mean(row),
chr_ends = max(row),.groups = "keep")
p.overlay <- df.overlay.long %>%
#filter(CHROM!="chrZ") %>% #for now remove Z because not plotted with GEMMA
ggplot() +
geom_point(aes(x = row, y = Zfst, color = contrast_label), alpha = 0.5, size=1.5,shape=20,stroke=0.2) +
theme_bw() +
theme(legend.position=c(0.9,0.8),
#panel.border=element_blank(),
panel.border = element_blank(), axis.line.x = element_line(), axis.line.y = element_line(),
axis.title.x=element_blank(),
#axis.text.x = element_text(angle = 45, color = "black"),
#axis.text.x = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank(),
axis.title.y = element_text(size=10),
axis.text = element_text(size=10),
axis.ticks.x=element_blank(),
axis.ticks.y=element_line(size=0.2))+
#legend.background = element_rect(fill=NA)) +
labs(y=expression(ZF[ST])) +
scale_color_manual(values = c("#fc8d62","#66c2a5","#8da0cb"), name = "Contrast") +
scale_alpha(guide = 'none') +
scale_x_continuous(breaks = chr_breaks$chr_breaks,
labels = function(labels) {
sapply(seq_along(labels), function(i) paste0(ifelse(i %% 2 == 0, '', '\n'), chr_breaks$chr_labels[i]))
}) +
labs(y=expression(ZF[ST])) +
geom_vline(data = chr_breaks, aes(xintercept = chr_ends), alpha = 0.1) +
guides(colour = guide_legend(override.aes = list(size=10)),
shape = guide_legend(override.aes = list(size = 10)))
p.overlay
ggsave("output/pixy/three_species_overlay_Zfst.png", width = 12, height = 4, dpi = 300)
p.overlay + scale_y_reverse() + theme(legend.position=c(0.9,0.3))
ggsave("output/pixy/three_species_overlay_Zfst_reverse.png", width = 12, height = 4, dpi = 300)
# lines? ------------------------------------------------------------------
drop.these.chr.labs <- c("chr21","chr22","chr23","chr24","chr25","chr26", "chr27","chr28", "chr29")
chr_breaks <- chr_breaks %>% mutate(chr_labels = ifelse(chr_ordered %in% drop.these.chr.labs, "", chr_labels))
p.overlay2 <- df.overlay.long %>%
mutate(contrast_label = gsub("vs.","-", contrast_label)) %>%
#filter(CHROM!="chrZ") %>% #for now remove Z because not plotted with GEMMA
ggplot() +
geom_line(aes(x = row, y = Zfst, color = contrast_label), alpha = 0.5, size=.5) +
theme_bw() +
theme(legend.position=c(0.9,0.8),
#panel.border=element_blank(),
panel.border = element_blank(), axis.line.x = element_line(), axis.line.y = element_line(),
axis.title.x=element_blank(),
#axis.text.x = element_text(angle = 45, color = "black"),
#axis.text.x = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank(),
axis.title.y = element_text(size=6),
axis.text = element_text(size=6),
axis.ticks.x=element_blank(),
axis.ticks.y=element_line(size=0.2),
legend.text = element_text(face = "italic", size = 6),
legend.title = element_text(size = 6),
legend.margin=margin(t=-10))+
#legend.background = element_rect(fill=NA)) +
labs(y=expression(ZF[ST])) +
scale_color_manual(values = c("#fc8d62","#66c2a5","#8da0cb"), name = "Contrast") +
scale_alpha(guide = 'none') +
scale_x_continuous(breaks = chr_breaks$chr_breaks,
labels = function(labels) {
sapply(seq_along(labels), function(i) paste0(ifelse(i %% 2 == 0, '', '\n'), chr_breaks$chr_labels[i]))
}) +
labs(y=expression(italic(ZF[ST]))) +
geom_vline(data = chr_breaks, aes(xintercept = chr_ends), alpha = 0.1) +
guides(colour = guide_legend(override.aes = list(size=5)),
shape = guide_legend(override.aes = list(5)))
p.overlay2
ggsave("output/pixy/three_species_overlay_Zfst_lines.png", width = 12, height = 4, dpi = 300)
p.overlay2 + scale_y_reverse() + theme(legend.position="bottom")
ggsave("output/pixy/three_species_overlay_Zfst_reverse_lines.pdf", width = 183, height = 61, dpi = 300, units = "mm")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Geno.R
\name{GRAB.ReadGeno}
\alias{GRAB.ReadGeno}
\title{Read in genotype data}
\usage{
GRAB.ReadGeno(
GenoFile,
GenoFileIndex = NULL,
SampleIDs = NULL,
control = NULL,
sparse = FALSE
)
}
\arguments{
\item{GenoFile}{a character of genotype file. See \code{Details} section for more information.}
\item{GenoFileIndex}{additional index file(s) corresponding to the \code{GenoFile}. See \code{Details} section for more information.}
\item{SampleIDs}{a character vector of sample IDs to extract. The default is NULL, that is, to use all samples in GenoFile.}
\item{control}{a list of parameters to decide which markers to extract. If not specified, the first 10 markers will be extracted. For more details, please check \code{?GRAB.control}.}
\item{sparse}{a logical value (default: FALSE) to indicate if sparse genotype matrix is outputted.}
}
\value{
An R list include an R genotype matrix (each row is for one sample and each column is for one marker) and an R SNP information matrix.
}
\description{
GRAB package provides functions to read in genotype data from multiple format (including Plink, BGEN, and VCF) into R
}
\details{
We support three genotype formats including Plink, BGEN, and VCF.
Users do not need to specify the genotype format, GRAB package will check the filename extention for that purpose.
If \code{GenoFileIndex} is not specified, then GRAB uses the same prefix as \code{GenoFile}.
\describe{
\item{Plink}{
\itemize{
\item \code{GenoFile}: "prefix.bed".
\item \code{GenoFileIndex}: c("prefix.bim", "prefix.fam").
}
}
\item{BGEN}{
\itemize{
\item \code{GenoFile}: "prefix.bgen";
\item \code{GenoFileIndex}: "prefix.bgen.bgi" or c("prefix.bgen.bgi", "prefix.bgen.samples").
\item IMPORTANT NOTE: If only one element is given for \code{GenoFileIndex}, then we assume it should be "prefix.bgen.bgi".
If BGEN file does not include sample identifiers, then "prefix.bgen.samples" is required, which should be a file with only one column whose column name is "GRAB_BGEN_SAMPLE" (case insensitive).
One example is \code{system.file("extdata", "example_bgen_1.2_8bits.bgen.samples", package = "GRAB")}.
If you are not sure if sample identifiers are in BGEN file, you can try function \code{?checkIfSampleIDsExist}.
}
}
\item{VCF}{Not available now. \code{GenoFile}: "prefix.vcf"; \code{GenoFileIndex}: "prefix.vcf.tbi"}
}
}
\examples{
## The below is raw data
RawFile = system.file("extdata", "example.raw", package = "GRAB")
GenoMat = data.table::fread(RawFile)
head(GenoMat[,1:15])
## The below is for Plink format
PlinkFile = system.file("extdata", "example.bed", package = "GRAB")
GenoList = GRAB.ReadGeno(PlinkFile)
GenoMat = GenoList$GenoMat
markerInfo = GenoList$markerInfo
head(GenoMat)
markerInfo
## The below is for BGEN format (Note the different REF/ALT order for BGEN and Plink formats)
BGENFile = system.file("extdata", "example_bgen_1.2_8bits.bgen", package = "GRAB")
GenoList = GRAB.ReadGeno(BGENFile)
GenoMat = GenoList$GenoMat
markerInfo = GenoList$markerInfo
head(GenoMat)
markerInfo
## The below is to demonstrate parameters in control
PlinkFile = system.file("extdata", "example.bed", package = "GRAB")
IDsToIncludeFile = system.file("extdata", "example.IDsToIncludeFile.txt", package = "GRAB")
RangesToIncludeFile = system.file("extdata", "example.RangesToIncludeFile.txt", package = "GRAB")
GenoList = GRAB.ReadGeno(PlinkFile,
control = list(IDsToIncludeFile = IDsToIncludeFile,
RangesToIncludeFile = RangesToIncludeFile,
AlleleOrder = "ref-first"))
GenoMat = GenoList$GenoMat
head(GenoMat)
markerInfo = GenoList$markerInfo
markerInfo
}
| /man/GRAB.ReadGeno.Rd | no_license | JulongWei/GRAB | R | false | true | 3,834 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Geno.R
\name{GRAB.ReadGeno}
\alias{GRAB.ReadGeno}
\title{Read in genotype data}
\usage{
GRAB.ReadGeno(
GenoFile,
GenoFileIndex = NULL,
SampleIDs = NULL,
control = NULL,
sparse = FALSE
)
}
\arguments{
\item{GenoFile}{a character of genotype file. See \code{Details} section for more information.}
\item{GenoFileIndex}{additional index file(s) corresponding to the \code{GenoFile}. See \code{Details} section for more information.}
\item{SampleIDs}{a character vector of sample IDs to extract. The default is NULL, that is, to use all samples in GenoFile.}
\item{control}{a list of parameters to decide which markers to extract. If not specified, the first 10 markers will be extracted. For more details, please check \code{?GRAB.control}.}
\item{sparse}{a logical value (default: FALSE) to indicate if sparse genotype matrix is outputted.}
}
\value{
An R list include an R genotype matrix (each row is for one sample and each column is for one marker) and an R SNP information matrix.
}
\description{
GRAB package provides functions to read in genotype data from multiple format (including Plink, BGEN, and VCF) into R
}
\details{
We support three genotype formats including Plink, BGEN, and VCF.
Users do not need to specify the genotype format, GRAB package will check the filename extention for that purpose.
If \code{GenoFileIndex} is not specified, then GRAB uses the same prefix as \code{GenoFile}.
\describe{
\item{Plink}{
\itemize{
\item \code{GenoFile}: "prefix.bed".
\item \code{GenoFileIndex}: c("prefix.bim", "prefix.fam").
}
}
\item{BGEN}{
\itemize{
\item \code{GenoFile}: "prefix.bgen";
\item \code{GenoFileIndex}: "prefix.bgen.bgi" or c("prefix.bgen.bgi", "prefix.bgen.samples").
\item IMPORTANT NOTE: If only one element is given for \code{GenoFileIndex}, then we assume it should be "prefix.bgen.bgi".
If BGEN file does not include sample identifiers, then "prefix.bgen.samples" is required, which should be a file with only one column whose column name is "GRAB_BGEN_SAMPLE" (case insensitive).
One example is \code{system.file("extdata", "example_bgen_1.2_8bits.bgen.samples", package = "GRAB")}.
If you are not sure if sample identifiers are in BGEN file, you can try function \code{?checkIfSampleIDsExist}.
}
}
\item{VCF}{Not available now. \code{GenoFile}: "prefix.vcf"; \code{GenoFileIndex}: "prefix.vcf.tbi"}
}
}
\examples{
## The below is raw data
RawFile = system.file("extdata", "example.raw", package = "GRAB")
GenoMat = data.table::fread(RawFile)
head(GenoMat[,1:15])
## The below is for Plink format
PlinkFile = system.file("extdata", "example.bed", package = "GRAB")
GenoList = GRAB.ReadGeno(PlinkFile)
GenoMat = GenoList$GenoMat
markerInfo = GenoList$markerInfo
head(GenoMat)
markerInfo
## The below is for BGEN format (Note the different REF/ALT order for BGEN and Plink formats)
BGENFile = system.file("extdata", "example_bgen_1.2_8bits.bgen", package = "GRAB")
GenoList = GRAB.ReadGeno(BGENFile)
GenoMat = GenoList$GenoMat
markerInfo = GenoList$markerInfo
head(GenoMat)
markerInfo
## The below is to demonstrate parameters in control
PlinkFile = system.file("extdata", "example.bed", package = "GRAB")
IDsToIncludeFile = system.file("extdata", "example.IDsToIncludeFile.txt", package = "GRAB")
RangesToIncludeFile = system.file("extdata", "example.RangesToIncludeFile.txt", package = "GRAB")
GenoList = GRAB.ReadGeno(PlinkFile,
control = list(IDsToIncludeFile = IDsToIncludeFile,
RangesToIncludeFile = RangesToIncludeFile,
AlleleOrder = "ref-first"))
GenoMat = GenoList$GenoMat
head(GenoMat)
markerInfo = GenoList$markerInfo
markerInfo
}
|
#git version
library(data.table)
library(glmnet)
library(ade4)
library(ROCR)
source("analyze_functions.R")
#leave these fixed
#nperm <- 100
if (!exists("run_external")){
nperm <- 100
nparc <- 50
fs_feat <- "FS_L_Hippo_Vol"
#fs_feat <- "FS_InterCranial_Vol"
#options: both, male, female
keep.sex <- "both"
#keep.sex <- "male"
regress.icv <- T
ceu_cut <- 0.0
}
#strings for output
bstring <- "regress"
icvstring <- "noICVreg"
if (regress.icv){
icvstring <- "ICVreg"
}
pstring <- "noperm"
if (nperm > 0)
pstring <- paste("perm",nperm, sep="")
ofname <- paste("../results/", paste("FS", nparc, fs_feat, bstring, keep.sex, icvstring, paste("CEU",ceu_cut,sep=""), pstring, sep="_"), ".RData", sep="")
message(ofname)
rdata.fname <- list()
rdata.fname[[1]] <- paste("../data/netmats1_", nparc, ".RData", sep="")
rdata.fname[[2]] <- paste("../data/netmats2_", nparc, ".RData", sep="")
#FreeSurfer Data
fs.fname <- "../data/unrestricted_hcp_freesurfer.csv"
#important for cross-validation
fam.fname <- paste("../data/MEGA_Chip.fam")
message("loading freeSurfer data")
fs.info <- fread(fs.fname, data.table=F)
rownames(fs.info) <- fs.info$Subject
ddim <- 0
if ( nparc != ddim){
message("(re-)loading .RData")
load(rdata.fname[[1]])
ddim <- nparc
}
##add sex info
sex <- subject.info[rownames(ethn.info),"Gender"]
ethn.info <- data.frame(ethn.info, sex)
if (keep.sex != "both"){
if (keep.sex == "male"){
keep <- rownames(subset(ethn.info, sex=="M"))
} else {
keep <- rownames(subset(ethn.info, sex=="F"))
}
} else {
keep <- rownames(ethn.info)
}
##rm subjects not of > ceu_cut
#suggested settings either 0.0 to include all ethn
#or 0.9 to include only CEU subjects
keep.ceu <- rownames(subset(ethn.info[keep,], CEU>=ceu_cut))
fs2 <- fs.info[keep.ceu, fs_feat]
if (regress.icv){
myicv <- fs.info[keep.ceu, "FS_InterCranial_Vol"]
message("hello world")
dummy <- lm(fs2 ~ myicv)
fs2.bak <- fs2
dummy2 <- dummy$res
fs2 <- rep(NA, length(fs2.bak))
fs2[as.integer(names(dummy2))] <- dummy2
}
names(fs2) <- keep.ceu
#outcome
Y <- fs2[rownames(mydata.scale)]
fam.info <- read.table(fam.fname)
myfam.map <- fam.info[,1:2]
rownames(myfam.map) <- myfam.map[,2]
myfam.use <- myfam.map[rownames(mydata.scale),]
excl <- is.na(Y)
mydata.scale2 <- mydata.scale[!excl,]
Y2 <- Y[!excl]
myfam.use2 <- myfam.use[!excl,]
#mean 0 and sd 1
Y2 <- scale(Y2)
#netmat1 data
mydata.scale2.nm1 <- mydata.scale2
#netmat2 data
load(rdata.fname[[2]])
mydata.scale2.nm2 <- mydata.scale[!excl,]
#it is n repetitions rather than n permutations. actually, misnomer
bench <- c()
for(rep in 1:nperm){
message(rep, " of ", nperm)
##fix the cv for runs with nm1 and nm2
##this allows for a paired t-test/wilcoxon test
if (cv.method == "standard"){
message("sampling standard folds")
mycv <- getCVfold(mydata.scale2, 10)
}
if (cv.method == "famaware"){
message("sampling family-aware folds")
mycv <- getFamCVfold(mydata.scale2, 10, myfam.use2)
}
#nested CV
nested.cv.nm1 <- doubleCV(Y2, mydata.scale2.nm1, myfold=mycv, fam="gaussian", measure="mse", lop="min")
nested.cv.nm2 <- doubleCV(Y2, mydata.scale2.nm2, myfold=mycv, fam="gaussian", measure="mse", lop="min")
xperf <- c(cmp.CV.perf(nested.cv.nm1), cmp.CV.perf(nested.cv.nm2))
bench <- rbind(bench, xperf)
print(bench)
save(bench, file=ofname)
}
### END ###
| /scripts/fs_feat_pred_benchmark.R | no_license | andrealtmann/hcp-genetics | R | false | false | 3,435 | r | #git version
library(data.table)
library(glmnet)
library(ade4)
library(ROCR)
source("analyze_functions.R")
#leave these fixed
#nperm <- 100
if (!exists("run_external")){
nperm <- 100
nparc <- 50
fs_feat <- "FS_L_Hippo_Vol"
#fs_feat <- "FS_InterCranial_Vol"
#options: both, male, female
keep.sex <- "both"
#keep.sex <- "male"
regress.icv <- T
ceu_cut <- 0.0
}
#strings for output
bstring <- "regress"
icvstring <- "noICVreg"
if (regress.icv){
icvstring <- "ICVreg"
}
pstring <- "noperm"
if (nperm > 0)
pstring <- paste("perm",nperm, sep="")
ofname <- paste("../results/", paste("FS", nparc, fs_feat, bstring, keep.sex, icvstring, paste("CEU",ceu_cut,sep=""), pstring, sep="_"), ".RData", sep="")
message(ofname)
rdata.fname <- list()
rdata.fname[[1]] <- paste("../data/netmats1_", nparc, ".RData", sep="")
rdata.fname[[2]] <- paste("../data/netmats2_", nparc, ".RData", sep="")
#FreeSurfer Data
fs.fname <- "../data/unrestricted_hcp_freesurfer.csv"
#important for cross-validation
fam.fname <- paste("../data/MEGA_Chip.fam")
message("loading freeSurfer data")
fs.info <- fread(fs.fname, data.table=F)
rownames(fs.info) <- fs.info$Subject
ddim <- 0
if ( nparc != ddim){
message("(re-)loading .RData")
load(rdata.fname[[1]])
ddim <- nparc
}
##add sex info
sex <- subject.info[rownames(ethn.info),"Gender"]
ethn.info <- data.frame(ethn.info, sex)
if (keep.sex != "both"){
if (keep.sex == "male"){
keep <- rownames(subset(ethn.info, sex=="M"))
} else {
keep <- rownames(subset(ethn.info, sex=="F"))
}
} else {
keep <- rownames(ethn.info)
}
##rm subjects not of > ceu_cut
#suggested settings either 0.0 to include all ethn
#or 0.9 to include only CEU subjects
keep.ceu <- rownames(subset(ethn.info[keep,], CEU>=ceu_cut))
fs2 <- fs.info[keep.ceu, fs_feat]
if (regress.icv){
myicv <- fs.info[keep.ceu, "FS_InterCranial_Vol"]
message("hello world")
dummy <- lm(fs2 ~ myicv)
fs2.bak <- fs2
dummy2 <- dummy$res
fs2 <- rep(NA, length(fs2.bak))
fs2[as.integer(names(dummy2))] <- dummy2
}
names(fs2) <- keep.ceu
#outcome
Y <- fs2[rownames(mydata.scale)]
fam.info <- read.table(fam.fname)
myfam.map <- fam.info[,1:2]
rownames(myfam.map) <- myfam.map[,2]
myfam.use <- myfam.map[rownames(mydata.scale),]
excl <- is.na(Y)
mydata.scale2 <- mydata.scale[!excl,]
Y2 <- Y[!excl]
myfam.use2 <- myfam.use[!excl,]
#mean 0 and sd 1
Y2 <- scale(Y2)
#netmat1 data
mydata.scale2.nm1 <- mydata.scale2
#netmat2 data
load(rdata.fname[[2]])
mydata.scale2.nm2 <- mydata.scale[!excl,]
#it is n repetitions rather than n permutations. actually, misnomer
bench <- c()
for(rep in 1:nperm){
message(rep, " of ", nperm)
##fix the cv for runs with nm1 and nm2
##this allows for a paired t-test/wilcoxon test
if (cv.method == "standard"){
message("sampling standard folds")
mycv <- getCVfold(mydata.scale2, 10)
}
if (cv.method == "famaware"){
message("sampling family-aware folds")
mycv <- getFamCVfold(mydata.scale2, 10, myfam.use2)
}
#nested CV
nested.cv.nm1 <- doubleCV(Y2, mydata.scale2.nm1, myfold=mycv, fam="gaussian", measure="mse", lop="min")
nested.cv.nm2 <- doubleCV(Y2, mydata.scale2.nm2, myfold=mycv, fam="gaussian", measure="mse", lop="min")
xperf <- c(cmp.CV.perf(nested.cv.nm1), cmp.CV.perf(nested.cv.nm2))
bench <- rbind(bench, xperf)
print(bench)
save(bench, file=ofname)
}
### END ###
|
context("methods work for a single species data set")
test_that("project methods",{
data(inter)
data(NS_species_params_gears)
# Multiple species params
params <- MizerParams(NS_species_params_gears, inter=inter)
# We choose the largest species for our single-species
single_params <- MizerParams(NS_species_params_gears[12,])
# Multiple species, single effort sim
sim1 <- project(params, effort = 1, t_max = 10)
n_mult <- sim1@n[11,,]
n_single <- matrix(sim1@n[11,12,],nrow=1)
dimnames(n_single) <- list(sp = "Sprat", w = dimnames(sim1@n)$w)
npp <- sim1@n_pp[11,]
# Multiple species, mult effort sim
effort <- array(abs(rnorm(40)),dim=c(10,4))
single_effort <- array(abs(rnorm(10)),dim=c(10,1))
expect_that(length(dim(getPhiPrey(params,n_mult,npp))), is_identical_to(length(dim(getPhiPrey(single_params,n_single,npp)))))
expect_that(length(dim(getFeedingLevel(params,n_mult,npp))), is_identical_to(length(dim(getFeedingLevel(single_params,n_single,npp)))))
expect_that(length(dim(getPredRate(params,n_mult,npp))), is_identical_to(length(dim(getPredRate(single_params,n_single,npp)))))
expect_that(length(dim(getM2(params,n_mult,npp))), is_identical_to(length(dim(getM2(single_params,n_single,npp)))))
expect_that(length(getM2Background(params,n_mult,npp)), is_identical_to(length(getM2Background(single_params,n_single,npp))))
expect_that(length(dim(getFMortGear(params,effort = 1))), is_identical_to(length(dim(getFMortGear(single_params,effort = 1)))))
expect_that(length(dim(getFMortGear(params,effort = effort))), is_identical_to(length(dim(getFMortGear(single_params,effort = single_effort)))))
expect_that(length(dim(getFMort(params,effort = 1))), is_identical_to(length(dim(getFMort(single_params,effort = 1)))))
expect_that(length(dim(getFMort(params,effort = effort))), is_identical_to(length(dim(getFMort(single_params,effort = single_effort)))))
expect_that(length(dim(getZ(params,n_mult,npp,effort=1))), is_identical_to(length(dim(getZ(single_params,n_single,npp, effort=1)))))
expect_that(length(dim(getEReproAndGrowth(params,n_mult,npp))), is_identical_to(length(dim(getEReproAndGrowth(single_params,n_single,npp)))))
expect_that(length(dim(getESpawning(params,n_mult,npp))), is_identical_to(length(dim(getESpawning(single_params,n_single,npp)))))
expect_that(length(dim(getEGrowth(params,n_mult,npp))), is_identical_to(length(dim(getEGrowth(single_params,n_single,npp)))))
expect_that(length(dim(getRDI(params,n_mult,npp))), is_identical_to(length(dim(getRDI(single_params,n_single,npp)))))
expect_that(length(dim(getRDD(params,n_mult,npp))), is_identical_to(length(dim(getRDD(single_params,n_single,npp)))))
})
test_that("summary methods",{
data(inter)
data(NS_species_params_gears)
# Multiple species params
params <- MizerParams(NS_species_params_gears, inter=inter)
single_params <- MizerParams(NS_species_params_gears[1,])
# Multiple species, single effort sim
sim1 <- project(params, effort = 1, t_max = 10)
n_mult <- sim1@n[11,,]
n_single <- matrix(sim1@n[11,1,],nrow=1)
dimnames(n_single) <- list(sp = "Sprat", w = dimnames(sim1@n)$w)
npp <- sim1@n_pp[11,]
sim1s <- project(single_params, effort = 1, t_max = 10)
expect_that(length(dim(get_size_range_array(params))), is_identical_to(length(dim(get_size_range_array(single_params)))))
expect_that(length(dim(getSSB(sim1))), is_identical_to(length(dim(getSSB(sim1s)))))
expect_that(length(dim(getBiomass(sim1))), is_identical_to(length(dim(getBiomass(sim1s)))))
expect_that(length(dim(getN(sim1))), is_identical_to(length(dim(getN(sim1s)))))
expect_that(length(dim(getFMortGear(sim1))), is_identical_to(length(dim(getFMortGear(sim1s)))))
expect_that(length(dim(getYieldGear(sim1))), is_identical_to(length(dim(getYieldGear(sim1s)))))
expect_that(length(dim(getYield(sim1))), is_identical_to(length(dim(getYield(sim1s)))))
expect_that(length(getProportionOfLargeFish(sim1, threshold_w = 1)), is_identical_to(length(getProportionOfLargeFish(sim1s, threshold_w = 1))))
expect_that(length(getMeanWeight(sim1, min_w = 1)), is_identical_to(length(getMeanWeight(sim1s, min_w = 1))))
expect_that(length(getMeanMaxWeight(sim1, min_w = 1)), is_identical_to(length(getMeanMaxWeight(sim1s, min_w = 1))))
})
| /tests/testthat/test-single_species.R | no_license | maxlindmark/mizer-rewiring | R | false | false | 4,379 | r | context("methods work for a single species data set")
test_that("project methods",{
data(inter)
data(NS_species_params_gears)
# Multiple species params
params <- MizerParams(NS_species_params_gears, inter=inter)
# We choose the largest species for our single-species
single_params <- MizerParams(NS_species_params_gears[12,])
# Multiple species, single effort sim
sim1 <- project(params, effort = 1, t_max = 10)
n_mult <- sim1@n[11,,]
n_single <- matrix(sim1@n[11,12,],nrow=1)
dimnames(n_single) <- list(sp = "Sprat", w = dimnames(sim1@n)$w)
npp <- sim1@n_pp[11,]
# Multiple species, mult effort sim
effort <- array(abs(rnorm(40)),dim=c(10,4))
single_effort <- array(abs(rnorm(10)),dim=c(10,1))
expect_that(length(dim(getPhiPrey(params,n_mult,npp))), is_identical_to(length(dim(getPhiPrey(single_params,n_single,npp)))))
expect_that(length(dim(getFeedingLevel(params,n_mult,npp))), is_identical_to(length(dim(getFeedingLevel(single_params,n_single,npp)))))
expect_that(length(dim(getPredRate(params,n_mult,npp))), is_identical_to(length(dim(getPredRate(single_params,n_single,npp)))))
expect_that(length(dim(getM2(params,n_mult,npp))), is_identical_to(length(dim(getM2(single_params,n_single,npp)))))
expect_that(length(getM2Background(params,n_mult,npp)), is_identical_to(length(getM2Background(single_params,n_single,npp))))
expect_that(length(dim(getFMortGear(params,effort = 1))), is_identical_to(length(dim(getFMortGear(single_params,effort = 1)))))
expect_that(length(dim(getFMortGear(params,effort = effort))), is_identical_to(length(dim(getFMortGear(single_params,effort = single_effort)))))
expect_that(length(dim(getFMort(params,effort = 1))), is_identical_to(length(dim(getFMort(single_params,effort = 1)))))
expect_that(length(dim(getFMort(params,effort = effort))), is_identical_to(length(dim(getFMort(single_params,effort = single_effort)))))
expect_that(length(dim(getZ(params,n_mult,npp,effort=1))), is_identical_to(length(dim(getZ(single_params,n_single,npp, effort=1)))))
expect_that(length(dim(getEReproAndGrowth(params,n_mult,npp))), is_identical_to(length(dim(getEReproAndGrowth(single_params,n_single,npp)))))
expect_that(length(dim(getESpawning(params,n_mult,npp))), is_identical_to(length(dim(getESpawning(single_params,n_single,npp)))))
expect_that(length(dim(getEGrowth(params,n_mult,npp))), is_identical_to(length(dim(getEGrowth(single_params,n_single,npp)))))
expect_that(length(dim(getRDI(params,n_mult,npp))), is_identical_to(length(dim(getRDI(single_params,n_single,npp)))))
expect_that(length(dim(getRDD(params,n_mult,npp))), is_identical_to(length(dim(getRDD(single_params,n_single,npp)))))
})
test_that("summary methods",{
data(inter)
data(NS_species_params_gears)
# Multiple species params
params <- MizerParams(NS_species_params_gears, inter=inter)
single_params <- MizerParams(NS_species_params_gears[1,])
# Multiple species, single effort sim
sim1 <- project(params, effort = 1, t_max = 10)
n_mult <- sim1@n[11,,]
n_single <- matrix(sim1@n[11,1,],nrow=1)
dimnames(n_single) <- list(sp = "Sprat", w = dimnames(sim1@n)$w)
npp <- sim1@n_pp[11,]
sim1s <- project(single_params, effort = 1, t_max = 10)
expect_that(length(dim(get_size_range_array(params))), is_identical_to(length(dim(get_size_range_array(single_params)))))
expect_that(length(dim(getSSB(sim1))), is_identical_to(length(dim(getSSB(sim1s)))))
expect_that(length(dim(getBiomass(sim1))), is_identical_to(length(dim(getBiomass(sim1s)))))
expect_that(length(dim(getN(sim1))), is_identical_to(length(dim(getN(sim1s)))))
expect_that(length(dim(getFMortGear(sim1))), is_identical_to(length(dim(getFMortGear(sim1s)))))
expect_that(length(dim(getYieldGear(sim1))), is_identical_to(length(dim(getYieldGear(sim1s)))))
expect_that(length(dim(getYield(sim1))), is_identical_to(length(dim(getYield(sim1s)))))
expect_that(length(getProportionOfLargeFish(sim1, threshold_w = 1)), is_identical_to(length(getProportionOfLargeFish(sim1s, threshold_w = 1))))
expect_that(length(getMeanWeight(sim1, min_w = 1)), is_identical_to(length(getMeanWeight(sim1s, min_w = 1))))
expect_that(length(getMeanMaxWeight(sim1, min_w = 1)), is_identical_to(length(getMeanMaxWeight(sim1s, min_w = 1))))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lax-internal.R
\name{lax-internal}
\alias{lax-internal}
\alias{adj_object}
\alias{return_level_gev}
\alias{gev_rl_CI}
\alias{gev_rl_prof}
\alias{return_level_bingp}
\alias{bingp_rl_CI}
\alias{bingp_rl_prof}
\alias{box_cox_deriv}
\alias{ismev_ppp}
\alias{kgaps_loglik}
\title{Internal lax functions}
\usage{
adj_object(x, cluster = NULL, use_vcov = TRUE, ...)
return_level_gev(x, m, level, npy, prof, inc, type)
gev_rl_CI(x, m, level, npy, type)
gev_rl_prof(x, m, level, npy, inc, type, rl_sym)
return_level_bingp(x, m, level, npy, prof, inc, type, npy_given)
bingp_rl_CI(x, m, level, npy, type, u)
bingp_rl_prof(x, m, level, npy, inc, type, rl_sym, u)
box_cox_deriv(x, lambda = 1, lambda_tol = 1/50, poly_order = 3)
ismev_ppp(a, npy)
kgaps_loglik(theta, N0, N1, sum_qs, n_kgaps)
}
\description{
Internal lax functions
}
\details{
These functions are not intended to be called by the user.
}
\keyword{internal}
| /man/lax-internal.Rd | no_license | paulnorthrop/lax | R | false | true | 997 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lax-internal.R
\name{lax-internal}
\alias{lax-internal}
\alias{adj_object}
\alias{return_level_gev}
\alias{gev_rl_CI}
\alias{gev_rl_prof}
\alias{return_level_bingp}
\alias{bingp_rl_CI}
\alias{bingp_rl_prof}
\alias{box_cox_deriv}
\alias{ismev_ppp}
\alias{kgaps_loglik}
\title{Internal lax functions}
\usage{
adj_object(x, cluster = NULL, use_vcov = TRUE, ...)
return_level_gev(x, m, level, npy, prof, inc, type)
gev_rl_CI(x, m, level, npy, type)
gev_rl_prof(x, m, level, npy, inc, type, rl_sym)
return_level_bingp(x, m, level, npy, prof, inc, type, npy_given)
bingp_rl_CI(x, m, level, npy, type, u)
bingp_rl_prof(x, m, level, npy, inc, type, rl_sym, u)
box_cox_deriv(x, lambda = 1, lambda_tol = 1/50, poly_order = 3)
ismev_ppp(a, npy)
kgaps_loglik(theta, N0, N1, sum_qs, n_kgaps)
}
\description{
Internal lax functions
}
\details{
These functions are not intended to be called by the user.
}
\keyword{internal}
|
plot3<-function() {
# Constants
file_name<-"household_power_consumption.txt";
sep<-";";
# Read Column Names
column_names<-colnames(read.table(file=file_name,sep=sep,header=TRUE,nrow=1));
# First row of data is 16-12-2006, 17:24:00,
# and number of rows till 02-01-2007 will be 66637 minutes (skip rows)
# and we have to read 2 days of data = 2 X 60 X 24 = 2880 minutes (nrows)
df<-read.table(file=file_name,
sep=sep,
na.strings="?",
colClasses="character",
col.names=column_names,
nrows=2880,
skip=66637);
# Cast as numeric -
df$Sub_metering_1<-as.numeric(df$Sub_metering_1);
df$Sub_metering_2<-as.numeric(df$Sub_metering_2);
df$Sub_metering_3<-as.numeric(df$Sub_metering_3);
# Add new column for referring date-time together
df$DateTime<-strptime(paste(df$Date, df$Time, sep=" "), format="%d/%m/%Y %H:%M:%S")
# Open device for drawing
# Default unit for png is pixels
png(filename="plot3.png",
height=480,
width=480,
bg="transparent");
# Draw empty plot3, with only the x and y labels
plot(df$DateTime,df$Sub_metering_1,
ylab="Energy sub metering",
xlab="",
type="n");
# Add line plots with different colors
lines(df$DateTime,df$Sub_metering_1);
lines(df$DateTime,df$Sub_metering_2,col="red");
lines(df$DateTime,df$Sub_metering_3,col="blue");
# Add legend
legend("topright",
lty=1,
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"));
# Save plot
dev.off();
}
| /plot3.R | no_license | kapil-malik/ExData_Plotting1 | R | false | false | 1,534 | r | plot3<-function() {
# Constants
file_name<-"household_power_consumption.txt";
sep<-";";
# Read Column Names
column_names<-colnames(read.table(file=file_name,sep=sep,header=TRUE,nrow=1));
# First row of data is 16-12-2006, 17:24:00,
# and number of rows till 02-01-2007 will be 66637 minutes (skip rows)
# and we have to read 2 days of data = 2 X 60 X 24 = 2880 minutes (nrows)
df<-read.table(file=file_name,
sep=sep,
na.strings="?",
colClasses="character",
col.names=column_names,
nrows=2880,
skip=66637);
# Cast as numeric -
df$Sub_metering_1<-as.numeric(df$Sub_metering_1);
df$Sub_metering_2<-as.numeric(df$Sub_metering_2);
df$Sub_metering_3<-as.numeric(df$Sub_metering_3);
# Add new column for referring date-time together
df$DateTime<-strptime(paste(df$Date, df$Time, sep=" "), format="%d/%m/%Y %H:%M:%S")
# Open device for drawing
# Default unit for png is pixels
png(filename="plot3.png",
height=480,
width=480,
bg="transparent");
# Draw empty plot3, with only the x and y labels
plot(df$DateTime,df$Sub_metering_1,
ylab="Energy sub metering",
xlab="",
type="n");
# Add line plots with different colors
lines(df$DateTime,df$Sub_metering_1);
lines(df$DateTime,df$Sub_metering_2,col="red");
lines(df$DateTime,df$Sub_metering_3,col="blue");
# Add legend
legend("topright",
lty=1,
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"));
# Save plot
dev.off();
}
|
## STEP 0: load required packages
# load the reshape2 package (will be used in STEP 5)
library(reshape2)
## STEP 1: Merges the training and the test sets to create one data set
# read data into data frames
subject_train <- read.table("./train/subject_train.txt")
subject_test <- read.table("./test/subject_test.txt")
X_train <- read.table("./train/X_train.txt")
X_test <- read.table("./test/X_test.txt")
y_train <- read.table("./train/y_train.txt")
y_test <- read.table("./test/y_test.txt")
# add column name for subject files
names(subject_train) <- "subjectID"
names(subject_test) <- "subjectID"
# add column names for measurement files
featureNames <- read.table("features.txt")
names(X_train) <- featureNames$V2
names(X_test) <- featureNames$V2
# add column name for label files
names(y_train) <- "activity"
names(y_test) <- "activity"
# combine files into one dataset
train <- cbind(subject_train, y_train, X_train)
test <- cbind(subject_test, y_test, X_test)
combined <- rbind(train, test)
## STEP 2: Extracts only the measurements on the mean and standard
## deviation for each measurement.
# determine which columns contain "mean()" or "std()"
meanstdcols <- grepl("mean\\(\\)", names(combined)) |
grepl("std\\(\\)", names(combined))
# ensure that we also keep the subjectID and activity columns
meanstdcols[1:2] <- TRUE
# remove unnecessary columns
combined <- combined[, meanstdcols]
## STEP 3: Uses descriptive activity names to name the activities
## in the data set.
## STEP 4: Appropriately labels the data set with descriptive
## activity names.
# convert the activity column from integer to factor
combined$activity <- factor(combined$activity, labels=c("Walking",
"Walking Upstairs", "Walking Downstairs", "Sitting", "Standing", "Laying"))
## STEP 5: Creates a second, independent tidy data set with the
## average of each variable for each activity and each subject.
# create the tidy data set
melted <- melt(combined, id=c("subjectID","activity"))
tidy <- dcast(melted, subjectID+activity ~ variable, mean)
# write the tidy data set to a file
write.table(tidy, "tidy.txt", row.names=FALSE)
| /run_analysis.R | no_license | vamsimails/Getting-and-Cleaning-Data-Course-Project | R | false | false | 2,189 | r | ## STEP 0: load required packages
# load the reshape2 package (will be used in STEP 5)
library(reshape2)
## STEP 1: Merges the training and the test sets to create one data set
# read data into data frames
subject_train <- read.table("./train/subject_train.txt")
subject_test <- read.table("./test/subject_test.txt")
X_train <- read.table("./train/X_train.txt")
X_test <- read.table("./test/X_test.txt")
y_train <- read.table("./train/y_train.txt")
y_test <- read.table("./test/y_test.txt")
# add column name for subject files
names(subject_train) <- "subjectID"
names(subject_test) <- "subjectID"
# add column names for measurement files
featureNames <- read.table("features.txt")
names(X_train) <- featureNames$V2
names(X_test) <- featureNames$V2
# add column name for label files
names(y_train) <- "activity"
names(y_test) <- "activity"
# combine files into one dataset
train <- cbind(subject_train, y_train, X_train)
test <- cbind(subject_test, y_test, X_test)
combined <- rbind(train, test)
## STEP 2: Extracts only the measurements on the mean and standard
## deviation for each measurement.
# determine which columns contain "mean()" or "std()"
meanstdcols <- grepl("mean\\(\\)", names(combined)) |
grepl("std\\(\\)", names(combined))
# ensure that we also keep the subjectID and activity columns
meanstdcols[1:2] <- TRUE
# remove unnecessary columns
combined <- combined[, meanstdcols]
## STEP 3: Uses descriptive activity names to name the activities
## in the data set.
## STEP 4: Appropriately labels the data set with descriptive
## activity names.
# convert the activity column from integer to factor
combined$activity <- factor(combined$activity, labels=c("Walking",
"Walking Upstairs", "Walking Downstairs", "Sitting", "Standing", "Laying"))
## STEP 5: Creates a second, independent tidy data set with the
## average of each variable for each activity and each subject.
# create the tidy data set
melted <- melt(combined, id=c("subjectID","activity"))
tidy <- dcast(melted, subjectID+activity ~ variable, mean)
# write the tidy data set to a file
write.table(tidy, "tidy.txt", row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Example_data.R
\docType{data}
\name{Example_processed_romics_object}
\alias{Example_processed_romics_object}
\title{Example_unprocessed_romics_object}
\format{
A romics_object
romics_object
}
\usage{
Example_processed_romics_object
Example_processed_romics_object
}
\description{
This dataset contains proteomics results of Bacillus cereus grown in 4 different media:
- Luria Bertani (lb)
- modified AOAC (maoac)
- autoclaved soil extract from INRAE avignon France (soil)
- autloclaved zucchinni puree (zucchinni)
This dataset contains proteomics results of Bacillus cereus grown in 4 different media:
- Luria Bertani (lb)
- modified AOAC (maoac)
- autoclaved soil extract from INRAE avignon France (soil)
- autloclaved zucchinni puree (zucchinni)
}
\details{
This romics_object was created by using the function :
romicsCreateObject(Example_romics_data,Example_romics_metadata, main_factor = "media")
This romics_object was created by using the function :
romicsCreateObject(Example_romics_data,Example_romics_metadata, main_factor = "media")
and transformed using different romics functions.
to see the transformative step employed, use the following code:
romicsCreatePipeline(Example_processed_romics_object)
}
\keyword{datasets}
| /man/Example_processed_romics_object.Rd | permissive | PNNL-Comp-Mass-Spec/RomicsProcessor | R | false | true | 1,316 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Example_data.R
\docType{data}
\name{Example_processed_romics_object}
\alias{Example_processed_romics_object}
\title{Example_unprocessed_romics_object}
\format{
A romics_object
romics_object
}
\usage{
Example_processed_romics_object
Example_processed_romics_object
}
\description{
This dataset contains proteomics results of Bacillus cereus grown in 4 different media:
- Luria Bertani (lb)
- modified AOAC (maoac)
- autoclaved soil extract from INRAE avignon France (soil)
- autloclaved zucchinni puree (zucchinni)
This dataset contains proteomics results of Bacillus cereus grown in 4 different media:
- Luria Bertani (lb)
- modified AOAC (maoac)
- autoclaved soil extract from INRAE avignon France (soil)
- autloclaved zucchinni puree (zucchinni)
}
\details{
This romics_object was created by using the function :
romicsCreateObject(Example_romics_data,Example_romics_metadata, main_factor = "media")
This romics_object was created by using the function :
romicsCreateObject(Example_romics_data,Example_romics_metadata, main_factor = "media")
and transformed using different romics functions.
to see the transformative step employed, use the following code:
romicsCreatePipeline(Example_processed_romics_object)
}
\keyword{datasets}
|
library(adaptTest)
### Name: pathCEF
### Title: Function to plot several conditional error functions running
### through a "path" of given points
### Aliases: pathCEF
### ** Examples
## Compare the tests by Bauer and Koehne (1994), Lehmacher and Wassmer (1999) and Vandemeulebroecke (2006)
oldmfcol <- par(mfcol=c(1,3))
pathCEF(typ="b", main="BK 94")
pathCEF(typ="l", main="LW 99")
pathCEF(typ="v", main="V 06")
par(oldmfcol)
| /data/genthat_extracted_code/adaptTest/examples/pathCEF.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 435 | r | library(adaptTest)
### Name: pathCEF
### Title: Function to plot several conditional error functions running
### through a "path" of given points
### Aliases: pathCEF
### ** Examples
## Compare the tests by Bauer and Koehne (1994), Lehmacher and Wassmer (1999) and Vandemeulebroecke (2006)
oldmfcol <- par(mfcol=c(1,3))
pathCEF(typ="b", main="BK 94")
pathCEF(typ="l", main="LW 99")
pathCEF(typ="v", main="V 06")
par(oldmfcol)
|
#
# ppd = function(a, b, y_star) {
# b^a/(b+y_star)^(a+1)
# }
#
# y_star=seq(from=0,to=120,by=.1)
#
# plot(y_star,ppd(9,390,y_star),type="l")
#
#
#
#
# a = 3
# b = 200
# Gamma = rgamma(n=1000, shape=a, rate=b)
# IG = 1/Gamma
# mean(IG)
# Normal = rnorm(n=1000,mean = 500,sd = sqrt(IG/0.1))
# posterior = rnorm(n=1000,mean = Normal,sd = IG)
# mean(posterior)
z <- rgamma(1000, shape=16.5, rate=6022.9)
sig2 <- 1/z
mu <- rnorm(1000, mean=609.3, sd=sqrt(sig2/27.1))
#95% equal-tailed credibility for mu: quantile/percentiles of the simulated value;
quantile(x=mu, probs=c(0.025, 0.975))
#http://www.randomservices.org/random/data/Challenger2.txt
# 23 previous space shuttle launches before the Challenger disaster
# T is the temperature in Fahrenheit, I is the O-ring damage index
oring=read.table("http://www.randomservices.org/random/data/Challenger2.txt",header=T)
#use the attach command to be to access the labels.
attach(oring)# with label T and I
#note: masking T=TRUE
plot(T,I)
#lm: linear model
#lm(y~x)
oring.lm=lm(I~T)
summary(oring.lm)
# add fitted line to scatterplot
lines(T,fitted(oring.lm))
# 95% posterior interval for the slope
#qt gives quantile function: qt(p, df, ncp, lower.tail = TRUE, log.p = FALSE)
-0.24337 - 0.06349*qt(p=.975,df=21)
-0.24337 + 0.06349*qt(.975,21)
# note that these are the same as the frequentist confidence intervals
#when we're using the standard reference prior for Bayesian analysis.
# the Challenger launch was at 31 degrees Fahrenheit
# how much o-ring damage would we predict?
# y-hat=slop*t+intercept
18.36508-0.24337*31 #10.82052
coef(oring.lm) #get coefficience information
coef(oring.lm)[1] + coef(oring.lm)[2]*31
# posterior prediction interval (same as frequentist) for y*|y (95% credibility interval)
predict(oring.lm,data.frame(T=31),interval="predict")
10.82052-2.102*qt(.975,21)*sqrt(1+1/23+((31-mean(T))^2/22/var(T)))
# posterior probability that damage index(for slope) is greater than zero
# using the reference prior, allows us to use all the standard regression tools.
1-pt((0-10.82052)/(2.102*sqrt(1+1/23+((31-mean(T))^2/22/var(T)))),21)
##multiple regression example.
#http://www.randomservices.org/random/data/Galton.txt
# Galton's seminal data on predicting the height of children from the
# heights of the parents, all in inches
heights=read.table("http://www.randomservices.org/random/data/Galton.txt",header=T)
attach(heights)# add data frame with labels
names(heights)
#lm is used to fit linear models. It can be used to carry out regression,
#single stratum analysis of variance and analysis of covariance
pairs(heights)
summary(lm(Height~Father+Mother+Gender+Kids))
summary(lm(Height~Father+Mother+Gender))
heights.lm=lm(Height~Father+Mother+Gender)
resid(heights.lm) #List of residuals
plot(density(resid(heights.lm))) #A density plot
qqnorm(resid(heights.lm)) # A quantile normal plot - good for checking normality
qqline(resid(heights.lm))
# each extra inch taller a father is is correlated with 0.4 inch extra
#height in the child
# each extra inch taller a mother is is correlated with 0.3 inch extra
#height in the child
# a male child is on average 5.2 inches taller than a female child
# 95% posterior interval for the the difference in height by gender
5.226 - 0.144*qt(.975,894)
5.226 + 0.144*qt(.975,894)
# posterior prediction interval (same as frequentist) for y*|y (95% credibility interval)
predict(heights.lm,data.frame(Father=68,Mother=64,Gender="M"),interval="predict")
predict(heights.lm,data.frame(Father=68,Mother=64,Gender="F"),interval="predict")
#########################################################################
dat=read.table("http://www.stat.ufl.edu/~winner/data/pgalpga2008.dat",header=F)
datF = subset(dat, V3==1, select=1:2) #"V3" is the name of the third column, ONLY has V3==1: female golfer (on the LPGA tour)
datM = subset(dat, V3==2, select=1:2)
attach(datF)
plot(datF)
datF.lm=lm(V2~V1)
summary(datF.lm)#posterior mean estimate of the slope parameter
#Find V2 and 95% posterior predictive interval for V2 When V1 = 260:
predict(datF.lm,data.frame(V1=260),interval="predict")
dat=read.table("http://www.stat.ufl.edu/~winner/data/pgalpga2008.dat",header=F)
dat$V3[dat$V3 == 1] <- 0
dat$V3[dat$V3 == 2] <- 1
summary(dat)
attach(dat)
dat.lm=lm(V2~V1+V3)
summary(dat.lm)
plot(fitted(dat.lm), residuals(dat.lm))
#The residuals appear to be random and lack any patterns or trends.
#However, there is at least one outlier (extreme observation) that we may want to investigate.
#Outliers can strongly influence model estimates.
#replace B with b in column nm
junk <- data.frame(x <- rep(LETTERS[1:4], 3), y <- letters[1:12])
colnames(junk) <- c("nm", "val")
typeof(junk$val)# integer
#Easier to convert nm to characters and then make the change:
junk$nm <- as.character(junk$nm)
junk$nm[junk$nm == "B"] <- "b"
#EDIT: And if indeed you need to maintain nm as factors, add this in the end:
junk$nm <- as.factor(junk$nm)
#convert back to factor in case that was an important part of the data structure.
within(junk, levels(nm)[levels(nm) == "B"] <- "b")# method 1
# add an additional level "b" to the factor attributes.
levels(junk$nm) <- c(levels(junk$nm), "b") # method 2: levels find all letters in the column
junk$nm[junk$nm == "B"] <- "b"
| /Capstone Project/Regression.R | no_license | jpark77/Bayesian-Statistics-Techniques-and-Models-from-UCSC-on-Coursera | R | false | false | 5,308 | r | #
# ppd = function(a, b, y_star) {
# b^a/(b+y_star)^(a+1)
# }
#
# y_star=seq(from=0,to=120,by=.1)
#
# plot(y_star,ppd(9,390,y_star),type="l")
#
#
#
#
# a = 3
# b = 200
# Gamma = rgamma(n=1000, shape=a, rate=b)
# IG = 1/Gamma
# mean(IG)
# Normal = rnorm(n=1000,mean = 500,sd = sqrt(IG/0.1))
# posterior = rnorm(n=1000,mean = Normal,sd = IG)
# mean(posterior)
z <- rgamma(1000, shape=16.5, rate=6022.9)
sig2 <- 1/z
mu <- rnorm(1000, mean=609.3, sd=sqrt(sig2/27.1))
#95% equal-tailed credibility for mu: quantile/percentiles of the simulated value;
quantile(x=mu, probs=c(0.025, 0.975))
#http://www.randomservices.org/random/data/Challenger2.txt
# 23 previous space shuttle launches before the Challenger disaster
# T is the temperature in Fahrenheit, I is the O-ring damage index
oring=read.table("http://www.randomservices.org/random/data/Challenger2.txt",header=T)
#use the attach command to be to access the labels.
attach(oring)# with label T and I
#note: masking T=TRUE
plot(T,I)
#lm: linear model
#lm(y~x)
oring.lm=lm(I~T)
summary(oring.lm)
# add fitted line to scatterplot
lines(T,fitted(oring.lm))
# 95% posterior interval for the slope
#qt gives quantile function: qt(p, df, ncp, lower.tail = TRUE, log.p = FALSE)
-0.24337 - 0.06349*qt(p=.975,df=21)
-0.24337 + 0.06349*qt(.975,21)
# note that these are the same as the frequentist confidence intervals
#when we're using the standard reference prior for Bayesian analysis.
# the Challenger launch was at 31 degrees Fahrenheit
# how much o-ring damage would we predict?
# y-hat=slop*t+intercept
18.36508-0.24337*31 #10.82052
coef(oring.lm) #get coefficience information
coef(oring.lm)[1] + coef(oring.lm)[2]*31
# posterior prediction interval (same as frequentist) for y*|y (95% credibility interval)
predict(oring.lm,data.frame(T=31),interval="predict")
10.82052-2.102*qt(.975,21)*sqrt(1+1/23+((31-mean(T))^2/22/var(T)))
# posterior probability that damage index(for slope) is greater than zero
# using the reference prior, allows us to use all the standard regression tools.
1-pt((0-10.82052)/(2.102*sqrt(1+1/23+((31-mean(T))^2/22/var(T)))),21)
##multiple regression example.
#http://www.randomservices.org/random/data/Galton.txt
# Galton's seminal data on predicting the height of children from the
# heights of the parents, all in inches
heights=read.table("http://www.randomservices.org/random/data/Galton.txt",header=T)
attach(heights)# add data frame with labels
names(heights)
#lm is used to fit linear models. It can be used to carry out regression,
#single stratum analysis of variance and analysis of covariance
pairs(heights)
summary(lm(Height~Father+Mother+Gender+Kids))
summary(lm(Height~Father+Mother+Gender))
heights.lm=lm(Height~Father+Mother+Gender)
resid(heights.lm) #List of residuals
plot(density(resid(heights.lm))) #A density plot
qqnorm(resid(heights.lm)) # A quantile normal plot - good for checking normality
qqline(resid(heights.lm))
# each extra inch taller a father is is correlated with 0.4 inch extra
#height in the child
# each extra inch taller a mother is is correlated with 0.3 inch extra
#height in the child
# a male child is on average 5.2 inches taller than a female child
# 95% posterior interval for the the difference in height by gender
5.226 - 0.144*qt(.975,894)
5.226 + 0.144*qt(.975,894)
# posterior prediction interval (same as frequentist) for y*|y (95% credibility interval)
predict(heights.lm,data.frame(Father=68,Mother=64,Gender="M"),interval="predict")
predict(heights.lm,data.frame(Father=68,Mother=64,Gender="F"),interval="predict")
#########################################################################
dat=read.table("http://www.stat.ufl.edu/~winner/data/pgalpga2008.dat",header=F)
datF = subset(dat, V3==1, select=1:2) #"V3" is the name of the third column, ONLY has V3==1: female golfer (on the LPGA tour)
datM = subset(dat, V3==2, select=1:2)
attach(datF)
plot(datF)
datF.lm=lm(V2~V1)
summary(datF.lm)#posterior mean estimate of the slope parameter
#Find V2 and 95% posterior predictive interval for V2 When V1 = 260:
predict(datF.lm,data.frame(V1=260),interval="predict")
dat=read.table("http://www.stat.ufl.edu/~winner/data/pgalpga2008.dat",header=F)
dat$V3[dat$V3 == 1] <- 0
dat$V3[dat$V3 == 2] <- 1
summary(dat)
attach(dat)
dat.lm=lm(V2~V1+V3)
summary(dat.lm)
plot(fitted(dat.lm), residuals(dat.lm))
#The residuals appear to be random and lack any patterns or trends.
#However, there is at least one outlier (extreme observation) that we may want to investigate.
#Outliers can strongly influence model estimates.
#replace B with b in column nm
junk <- data.frame(x <- rep(LETTERS[1:4], 3), y <- letters[1:12])
colnames(junk) <- c("nm", "val")
typeof(junk$val)# integer
#Easier to convert nm to characters and then make the change:
junk$nm <- as.character(junk$nm)
junk$nm[junk$nm == "B"] <- "b"
#EDIT: And if indeed you need to maintain nm as factors, add this in the end:
junk$nm <- as.factor(junk$nm)
#convert back to factor in case that was an important part of the data structure.
within(junk, levels(nm)[levels(nm) == "B"] <- "b")# method 1
# add an additional level "b" to the factor attributes.
levels(junk$nm) <- c(levels(junk$nm), "b") # method 2: levels find all letters in the column
junk$nm[junk$nm == "B"] <- "b"
|
#'---
#' title: Merge Split Counts
#' author: Luise Schuller
#' wb:
#' py:
#' - |
#' def getSplitCountFiles(dataset):
#' ids = sa.getIDsByGroup(dataset, assay="RNA")
#' file_stump = cfg.getProcessedDataDir() + f"/aberrant_splicing/datasets/cache/raw-{dataset}/sample_tmp/splitCounts/"
#' return expand(file_stump + "sample_{sample_id}.done", sample_id=ids)
#' log:
#' - snakemake: '`sm str(tmp_dir / "AS" / "{dataset}" / "01_2_splitReadsMerge.Rds")`'
#' params:
#' - setup: '`sm cfg.AS.getWorkdir() + "/config.R"`'
#' - workingDir: '`sm cfg.getProcessedDataDir() + "/aberrant_splicing/datasets"`'
#' threads: 20
#' input:
#' - sample_counts: '`sm lambda w: getSplitCountFiles(w.dataset)`'
#' output:
#' - countsJ: '`sm cfg.getProcessedDataDir() +
#' "/aberrant_splicing/datasets/savedObjects/raw-{dataset}/rawCountsJ.h5"`'
#' - gRangesSplitCounts: '`sm cfg.getProcessedDataDir() +
#' "/aberrant_splicing/datasets/cache/raw-{dataset}/gRanges_splitCounts.rds"`'
#' - gRangesNonSplitCounts: '`sm cfg.getProcessedDataDir() +
#' "/aberrant_splicing/datasets/cache/raw-{dataset}/gRanges_NonSplitCounts.rds"`'
#' - spliceSites: '`sm cfg.getProcessedDataDir() +
#' "/aberrant_splicing/datasets/cache/raw-{dataset}/spliceSites_splitCounts.rds"`'
#' type: script
#'---
saveRDS(snakemake, snakemake@log$snakemake)
source(snakemake@params$setup, echo=FALSE)
dataset <- snakemake@wildcards$dataset
workingDir <- snakemake@params$workingDir
params <- snakemake@config$aberrantSplicing
minExpressionInOneSample <- params$minExpressionInOneSample
register(MulticoreParam(snakemake@threads))
# Limit number of threads for DelayedArray operations
setAutoBPPARAM(MulticoreParam(snakemake@threads))
# Read FRASER object
fds <- loadFraserDataSet(dir=workingDir, name=paste0("raw-", dataset))
# If samples are recounted, remove the merged ones
splitCountsDir <- file.path(workingDir, "savedObjects",
paste0("raw-", dataset), 'splitCounts')
if(params$recount == TRUE & dir.exists(splitCountsDir)){
unlink(splitCountsDir, recursive = TRUE)
}
# Get and merge splitReads for all sample ids
splitCounts <- getSplitReadCountsForAllSamples(fds=fds,
recount=FALSE)
# Extract, annotate and save granges
splitCountRanges <- rowRanges(splitCounts)
# Annotate granges from the split counts
splitCountRanges <- FRASER:::annotateSpliceSite(splitCountRanges)
saveRDS(splitCountRanges, snakemake@output$gRangesSplitCounts)
# Create ranges for non split counts
# Subset by minExpression
maxCount <- rowMaxs(assay(splitCounts, "rawCountsJ"))
passed <- maxCount >= minExpressionInOneSample
# extract granges after filtering
splitCountRanges <- splitCountRanges[passed,]
saveRDS(splitCountRanges, snakemake@output$gRangesNonSplitCounts)
# Extract splitSiteCoodinates: extract donor and acceptor sites
# take either filtered or full fds
spliceSiteCoords <- FRASER:::extractSpliceSiteCoordinates(splitCountRanges, fds)
saveRDS(spliceSiteCoords, snakemake@output$spliceSites)
message(date(), ": ", dataset, " total no. splice junctions = ",
length(splitCounts))
| /drop/modules/aberrant-splicing-pipeline/Counting/01_2_countRNA_splitReads_merge.R | permissive | mumichae/drop | R | false | false | 3,255 | r | #'---
#' title: Merge Split Counts
#' author: Luise Schuller
#' wb:
#' py:
#' - |
#' def getSplitCountFiles(dataset):
#' ids = sa.getIDsByGroup(dataset, assay="RNA")
#' file_stump = cfg.getProcessedDataDir() + f"/aberrant_splicing/datasets/cache/raw-{dataset}/sample_tmp/splitCounts/"
#' return expand(file_stump + "sample_{sample_id}.done", sample_id=ids)
#' log:
#' - snakemake: '`sm str(tmp_dir / "AS" / "{dataset}" / "01_2_splitReadsMerge.Rds")`'
#' params:
#' - setup: '`sm cfg.AS.getWorkdir() + "/config.R"`'
#' - workingDir: '`sm cfg.getProcessedDataDir() + "/aberrant_splicing/datasets"`'
#' threads: 20
#' input:
#' - sample_counts: '`sm lambda w: getSplitCountFiles(w.dataset)`'
#' output:
#' - countsJ: '`sm cfg.getProcessedDataDir() +
#' "/aberrant_splicing/datasets/savedObjects/raw-{dataset}/rawCountsJ.h5"`'
#' - gRangesSplitCounts: '`sm cfg.getProcessedDataDir() +
#' "/aberrant_splicing/datasets/cache/raw-{dataset}/gRanges_splitCounts.rds"`'
#' - gRangesNonSplitCounts: '`sm cfg.getProcessedDataDir() +
#' "/aberrant_splicing/datasets/cache/raw-{dataset}/gRanges_NonSplitCounts.rds"`'
#' - spliceSites: '`sm cfg.getProcessedDataDir() +
#' "/aberrant_splicing/datasets/cache/raw-{dataset}/spliceSites_splitCounts.rds"`'
#' type: script
#'---
saveRDS(snakemake, snakemake@log$snakemake)
source(snakemake@params$setup, echo=FALSE)
dataset <- snakemake@wildcards$dataset
workingDir <- snakemake@params$workingDir
params <- snakemake@config$aberrantSplicing
minExpressionInOneSample <- params$minExpressionInOneSample
register(MulticoreParam(snakemake@threads))
# Limit number of threads for DelayedArray operations
setAutoBPPARAM(MulticoreParam(snakemake@threads))
# Read FRASER object
fds <- loadFraserDataSet(dir=workingDir, name=paste0("raw-", dataset))
# If samples are recounted, remove the merged ones
splitCountsDir <- file.path(workingDir, "savedObjects",
paste0("raw-", dataset), 'splitCounts')
if(params$recount == TRUE & dir.exists(splitCountsDir)){
unlink(splitCountsDir, recursive = TRUE)
}
# Get and merge splitReads for all sample ids
splitCounts <- getSplitReadCountsForAllSamples(fds=fds,
recount=FALSE)
# Extract, annotate and save granges
splitCountRanges <- rowRanges(splitCounts)
# Annotate granges from the split counts
splitCountRanges <- FRASER:::annotateSpliceSite(splitCountRanges)
saveRDS(splitCountRanges, snakemake@output$gRangesSplitCounts)
# Create ranges for non split counts
# Subset by minExpression
maxCount <- rowMaxs(assay(splitCounts, "rawCountsJ"))
passed <- maxCount >= minExpressionInOneSample
# extract granges after filtering
splitCountRanges <- splitCountRanges[passed,]
saveRDS(splitCountRanges, snakemake@output$gRangesNonSplitCounts)
# Extract splitSiteCoodinates: extract donor and acceptor sites
# take either filtered or full fds
spliceSiteCoords <- FRASER:::extractSpliceSiteCoordinates(splitCountRanges, fds)
saveRDS(spliceSiteCoords, snakemake@output$spliceSites)
message(date(), ": ", dataset, " total no. splice junctions = ",
length(splitCounts))
|
alpha <- 0.05
# Read data from question as x
x <- c(18.5, 3.5, 5.3, 3.3, 9.4, 10.9, 21.6, 53.4, 5.3, 33.7)
len <- length(x)
# Exact Confident Interval
CI_exact <- c(qgamma(alpha/2, len) / (len * mean(x)), qgamma(1 - alpha/2, len) / (len * mean(x)))
# CI from LRT
f <- function(lambda) lambda*mean(x)-log(lambda)-(0.5*qchisq(1-alpha, 1)+len*log(mean(x))+len)/len
CI_LRT <- c(uniroot(f, c(0,0.05), tol=0.0001)$root, uniroot(f, c(0.05,0.15), tol=0.0001)$root)
# CI from Wald Tests
CI_Wald <- c((1 - qnorm(1 - alpha/2)/sqrt(len)) / mean(x), (1 + qnorm(1 - alpha/2)/sqrt(len)) / mean(x))
# CI from Score Tests
CI_Score <- c((1 - qnorm(1 - alpha/2)/sqrt(len)) / mean(x), (1 + qnorm(1 - alpha/2)/sqrt(len)) / mean(x))
## Draw line to specify the interval of root
dd <- seq(0, 0.25, by=0.000001)
tt <- f(dd)
df <- data.frame(dd,tt)
g <- ggplot(df,aes(dd,tt))
g <- g+geom_line(col='red') #red line
g | /assignment4/code/Q1.R | no_license | ShawnNew/McMaster_Stat743_Assignments | R | false | false | 895 | r | alpha <- 0.05
# Read data from question as x
x <- c(18.5, 3.5, 5.3, 3.3, 9.4, 10.9, 21.6, 53.4, 5.3, 33.7)
len <- length(x)
# Exact Confident Interval
CI_exact <- c(qgamma(alpha/2, len) / (len * mean(x)), qgamma(1 - alpha/2, len) / (len * mean(x)))
# CI from LRT
f <- function(lambda) lambda*mean(x)-log(lambda)-(0.5*qchisq(1-alpha, 1)+len*log(mean(x))+len)/len
CI_LRT <- c(uniroot(f, c(0,0.05), tol=0.0001)$root, uniroot(f, c(0.05,0.15), tol=0.0001)$root)
# CI from Wald Tests
CI_Wald <- c((1 - qnorm(1 - alpha/2)/sqrt(len)) / mean(x), (1 + qnorm(1 - alpha/2)/sqrt(len)) / mean(x))
# CI from Score Tests
CI_Score <- c((1 - qnorm(1 - alpha/2)/sqrt(len)) / mean(x), (1 + qnorm(1 - alpha/2)/sqrt(len)) / mean(x))
## Draw line to specify the interval of root
dd <- seq(0, 0.25, by=0.000001)
tt <- f(dd)
df <- data.frame(dd,tt)
g <- ggplot(df,aes(dd,tt))
g <- g+geom_line(col='red') #red line
g |
# setwd("D:/Coursera/4. Exploratory Data Analysis (Exdata-004)/Code/Project 2/ExData-Peer_Assgnment2/Data/")
# NEI <- readRDS("summarySCC_PM25.rds")
# SCC <- readRDS("Source_Classification_Code.rds")
# setwd("D:/Coursera/4. Exploratory Data Analysis (Exdata-004)/Code/Project 2/ExData-Peer_Assgnment2/")
# dir()
# Aggregate by sum the total emissions by year
require(dplyr)
library(ggplot2)
# install.packages("gridExtra")
library(gridExtra)
# Leave only emissions from coal combustion-related sources
filtered_Coal_Combusion <- SCC[grepl("Fuel Comb.*Coal", SCC$EI.Sector),]$SCC
NEI_Col_Comb <- NEI[NEI$SCC %in% filtered_Coal_Combusion,]
# Left plot
ggp <- ggplot(
NEI_Col_Comb,
aes(factor(year),
Emissions/1000)) +
geom_bar(stat="identity", fill="darkgreen", alpha = .75, ymin=0) +
guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (000' tons)"))
# Right plot
pp <- qplot(year, Emissions, data=combustionNEI, geom=c("smooth"), method="lm", ylab=expression("Average amt. of PM"[2.5]*" emitted, in tons"))
# join plot
grid_ggp_n_pp <- grid.arrange(ggp, pp, ncol=2, main = "PM2.5 Coal Combustion Source Emissions Across US from 1999-2008")
dev.copy(png, file="plot4.png", height=480, width=480, bg="transparent")
dev.off() | /Plot4.R | no_license | Gnolam/ExData-Peer_Assgnment2 | R | false | false | 1,299 | r |
# setwd("D:/Coursera/4. Exploratory Data Analysis (Exdata-004)/Code/Project 2/ExData-Peer_Assgnment2/Data/")
# NEI <- readRDS("summarySCC_PM25.rds")
# SCC <- readRDS("Source_Classification_Code.rds")
# setwd("D:/Coursera/4. Exploratory Data Analysis (Exdata-004)/Code/Project 2/ExData-Peer_Assgnment2/")
# dir()
# Aggregate by sum the total emissions by year
require(dplyr)
library(ggplot2)
# install.packages("gridExtra")
library(gridExtra)
# Leave only emissions from coal combustion-related sources
filtered_Coal_Combusion <- SCC[grepl("Fuel Comb.*Coal", SCC$EI.Sector),]$SCC
NEI_Col_Comb <- NEI[NEI$SCC %in% filtered_Coal_Combusion,]
# Left plot
ggp <- ggplot(
NEI_Col_Comb,
aes(factor(year),
Emissions/1000)) +
geom_bar(stat="identity", fill="darkgreen", alpha = .75, ymin=0) +
guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (000' tons)"))
# Right plot
pp <- qplot(year, Emissions, data=combustionNEI, geom=c("smooth"), method="lm", ylab=expression("Average amt. of PM"[2.5]*" emitted, in tons"))
# join plot
grid_ggp_n_pp <- grid.arrange(ggp, pp, ncol=2, main = "PM2.5 Coal Combustion Source Emissions Across US from 1999-2008")
dev.copy(png, file="plot4.png", height=480, width=480, bg="transparent")
dev.off() |
test_that("commas() works", {
skip_if_remote_src()
expect_equal(
# `strsplit()` creates vector, which is 1 longer than number of split-characters found
length(strsplit(commas(fact()$fact), ",")[[1]]),
MAX_COMMAS
)
expect_identical(
length(strsplit(commas(fact()$fact), cli::symbol$ellipsis, fixed = TRUE)[[1]]) - 1L,
1L
)
})
test_that("default_local_src() works", {
expect_identical(
default_local_src(),
src_df(env = .GlobalEnv)
)
})
| /tests/testthat/test-format.R | permissive | nzgwynn/dm | R | false | false | 478 | r | test_that("commas() works", {
skip_if_remote_src()
expect_equal(
# `strsplit()` creates vector, which is 1 longer than number of split-characters found
length(strsplit(commas(fact()$fact), ",")[[1]]),
MAX_COMMAS
)
expect_identical(
length(strsplit(commas(fact()$fact), cli::symbol$ellipsis, fixed = TRUE)[[1]]) - 1L,
1L
)
})
test_that("default_local_src() works", {
expect_identical(
default_local_src(),
src_df(env = .GlobalEnv)
)
})
|
library(ape)
testtree <- read.tree("1725_27.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1725_27_unrooted.txt") | /codeml_files/newick_trees_processed/1725_27/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("1725_27.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1725_27_unrooted.txt") |
library(GEOquery)
library(limma)
library(Biobase)
library(FNN)
library(gplots)
library(RColorBrewer)
library(factoextra)
library(sva)
gset <- getGEO("GSE63742", GSEMatrix =TRUE, AnnotGPL=TRUE)
if (length(gset) > 1) idx <- grep("GPL1355", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
##VVVV I'll need a better way to do this... VVVV
gsms <- "000XXXXXXXXXXXXXXXXXXXXX111XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
sml <- c()
for (i in 1:nchar(gsms)) { sml[i] <- substr(gsms,i,i) }
sel <- which(sml != "X")
sml <- sml[sel]
gset <- gset[ ,sel]
##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#thankfully all of this is fast
sml <- paste("G", sml, sep="") # set group names
fl <- as.factor(sml)
gset$description <- fl
design <- model.matrix(~ description + 0, gset)
colnames(design) <- levels(fl)
fit <- lmFit(gset, design)
cont.matrix <- makeContrasts(G1-G0, levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2, 0.01)
tT <- topTable(fit2, adjust="fdr", sort.by="B", number=250)
tT <- subset(tT, select=c("ID","adj.P.Val","P.Value","t","B","logFC","Gene.symbol"))
#Need to change these so GSEs can be readily compared and organized
#i.e. all GSEs will have a "trmt" variable
varLabels(gset)
gset$Time <- factor(sapply(pData(gset)[["time:ch1"]],function(x) strsplit(x, "\\s+")[[1]][1], USE.NAMES = F))
gset$Treatment <- factor(pData(gset)[["treatment:ch1"]])
a <- grep("///", fData(gset)$Gene.symbol)
fData(gset)$Gene.symbol[a] <- sapply(a, function(x) strsplit(fData(gset)$Gene.symbol[x],"///")[[1]][2])
experimentData(gset)@title <- "Expression data from rat liver tissue during liver regeneration after partial hepatectomy"
save(gset,file="Data/GSE63742.RData")
gset1 <- gset
###GSE33785
gset <- getGEO("GSE33785", GSEMatrix =TRUE, AnnotGPL=TRUE)
if (length(gset) > 1) idx <- grep("GPL6247", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
gset$Treatment <- factor(gset$`diet:ch1`)
gset$Time <- factor(sapply(pData(gset)[["timepoint:ch1"]],function(x) strsplit(x, "\\s+")[[1]][1], USE.NAMES = F))
experimentData(gset)@title <- "Gene expression in the chronic ethanol-treated rat liver during liver regeneration"
#fData(gset)$Gene.symbol <- fData(gset)$`Gene symbol`
a <- grep("///", fData(gset)$Gene.symbol)
fData(gset)$Gene.symbol[a] <- sapply(a, function(x) strsplit(fData(gset)$Gene.symbol[x],"///")[[1]][2])
gset$ID <- gset$`animal id:ch1`
save(gset,file="Data/GSE33785.RData")
gset2 <- gset
###GSE97429
gset <- getGEO("GSE97429", GSEMatrix =TRUE, AnnotGPL=FALSE)
if (length(gset) > 1) idx <- grep("GPL6247", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
unique(pData(gset)$`tissue:ch1`)
nrow(pData(gset))
gset$Treatment <- factor(gset$`treatment:ch1`)
gset$Time <- 0
gset$Time <- gset$Time + 24*(gset$`treatment:ch1` != "none")
gset$Time <- factor(gset$Time)
experimentData(gset)@title <- "Gene expression in the liver remnant is significantly affected by the size of partial hepatectomy - an experimental rat study"
fData(gset)$Gene.symbol <- sapply(fData(gset)$`gene_assignment`,function(x) {
if(length(strsplit(x,"//")[[1]] > 1)){
strsplit(x,"//")[[1]][2]}
else{NA}
}
, USE.NAMES = F)
fData(gset)$Gene.symbol <- trimws(fData(gset)$Gene.symbol)
#there appears to be negative values in this dataset
head(exprs(gset)[,gset$Treatment=='none'])
#gset3 is RMA normalized (log2) and median transformed...whatever that last part means
hist(exprs(gset)[,1],breaks = 20)
hist(exprs(gset1)[,1],breaks = 20)
#The data is also centered...
median(exprs(gset1)[,1])
#7.09
#this is so sloppy but w/e...
hist(7+exprs(gset)[,1],breaks = 20)
exprs(gset) <- 7+exprs(gset)
save(gset,file="Data/GSE97429.RData")
gset3 <- gset
#GSE67022
gset <- getGEO("GSE67022", GSEMatrix =TRUE, AnnotGPL=FALSE)
if (length(gset) > 1) idx <- grep("GPL6247", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
varLabels(gset)
gset$`time point:ch1`
a <- sapply(pData(gset)[["injected with:ch1"]],function(x) strsplit(x, "\\s+")[[1]][1], USE.NAMES = F)
for(i in grep(',',a)){
a[i] <- substr(a[i],1,nchar(a[i])-1)
}
gset$Treatment <- factor(paste(a,gset$`molecule subtype:ch1`,sep="."))
gset$Time <- factor(gset$`time point:ch1`)
fData(gset)$Gene.symbol <- sapply(fData(gset)$`gene_assignment`,function(x) {
if(length(strsplit(x,"//")[[1]] > 1)){
strsplit(x,"//")[[1]][2]}
else{NA}
}
, USE.NAMES = F)
fData(gset)$Gene.symbol <- trimws(fData(gset)$Gene.symbol)
experimentData(gset)@title <- "Regulation of Rat Hepatic Translation by mTOR"
save(gset,file="Data/GSE67022.RData")
gset4 <- gset
#for some reason gset4 can't be put in hm.data
grep(tT$Gene.symbol[8],fData(test1.cut)$Gene.symbol)
str(fData(test1.cut)$Gene.symbol)
str(fData(gset1)$Gene.symbol)
match(tT$Gene.symbol[8],fData(test1.cut)$Gene.symbol)
exprs(test1.cut)[13670,]
#ALSO treatment/level selection on gset4 appears to be inaccurate
###############################
#Making a combined variable
###############################
test.cut$dvar <- factor(match(paste(test.cut$Treatment,test.cut$Time),unique(paste(test.cut$Treatment,test.cut$Time))))
fData(test.cut)$logFC <- as.numeric(apply(exprs(test.cut),1,function(x) mean(x[test.cut$geo_accession[test.cut$dvar==2]]) - mean(x[test.cut$geo_accession[test.cut$dvar==1]])))
fData(test.cut)$`Gene symbol`[fData(test.cut)$`Gene symbol` == ""] <- NA
#fData(test.cut)$`Gene symbol`[which(fData(test.cut)$`Gene symbol` %in% tT$Gene.symbol)]
#tT2 <- subset(fData(test.cut)[which(fData(test.cut)$`Gene symbol` %in% tT$Gene.symbol),], select = c("Gene symbol","logFC"))
tT$logFC.d2 <- fData(test.cut)$logFC[match(tT$Gene.symbol,fData(test.cut)$`Gene symbol`)]
tT2 <- subset(tT, select = c("Gene.symbol","logFC","logFC.d2"))
p <- heatmap.2(as.matrix(tT2[,-1]), trace = 'none', dendrogram = 'row', labCol = c("dataset2","dataset1"),srtCol = 0, adjCol = c(0.5,0.6), cexCol = 3, labRow = NA)
###################################
#individual sample heat map
###################################
c1 <- which(gset1$Treatment == "partial hepatectomy" & gset1$Time == "0h")
c2 <- which(gset1$Treatment == "partial hepatectomy" & gset1$Time == "24h")
test1.cut <- gset1[,c(c1,c2)]
c1 <- which(gset2$Treatment == "Carbohydrate" & gset2$Time == 0)
c2 <- which(gset2$Treatment == "Carbohydrate" & gset2$Time == 24)
test2.cut <- gset2[,c(c1,c2)]
#c3 <- which(test2.cut$`animal id:ch1` %in% names(which(table(test2.cut$`animal id:ch1`)>1)))
if(any(table(test2.cut$ID)>1)){
c3 <- which(test2.cut$ID %in% names(which(table(test2.cut$ID)>1)))
test2.cut <- test2.cut[,c3]
}
test2.cut <- test2.cut[,c3]
#fData(test2.cut)$Gene.symbol <- fData(test2.cut)$`Gene symbol`
#match tT to test.cut
tT <- tT[tT$Gene.symbol != "",]
test1.sig <- exprs(test1.cut)[match(tT$Gene.symbol,fData(test1.cut)$Gene.symbol),]
test2.sig <- exprs(test2.cut)[match(tT$Gene.symbol,fData(test2.cut)$Gene.symbol),]
#combine and trim data
hm.data <- cbind(test1.sig,test2.sig)
a <- apply(hm.data,1,function(x) sum(!is.na(x)) == length(x))
hm.data <- hm.data[a,]
d1lab <- sapply(rownames(pData(test1.cut)), function(x) paste("d1",pData(test1.cut)[x,"Treatment"],pData(test1.cut)[x,"Time"],sep = "/"),USE.NAMES = F)
d2lab <- sapply(rownames(pData(test2.cut)), function(x) paste("d2",pData(test2.cut)[x,"Treatment"],pData(test2.cut)[x,"Time"],sep = "/"),USE.NAMES = F)
invisible(q <- heatmap.2(hm.data,
trace = 'none',
Colv = NULL,
dendrogram = 'row',
srtCol = 90,
adjCol = c(0.8,0.6),
labCol = c(d1lab,d2lab),
cexCol = 1,
labRow = NA,
margins = c(7,1),
keysize = 1))
kl.data <- hm.data[p$rowInd,]
rownames(pData(test1.cut))[test1.cut$Time=="24h"]
a <- rowMeans(kl.data[,4:6])-rowMeans(kl.data[,1:3])
b <- rowMeans(kl.data[,10:12])-rowMeans(kl.data[,7:9])
c<-KL.divergence(a,b,100)
###################################
#individual sample heat map
###################################
rbind(subset(pData(test1.cut), select = c("Treatment","Time", "geo_accession")),
subset(pData(test2.cut), select = c("Treatment","Time", "geo_accession")))
test2.cut.match <- exprs(test2.cut)[match(fData(test1.cut)$Gene.symbol,fData(test2.cut)$Gene.symbol),]
a <- rowMeans(test2.cut.match[,1:3])
b <- exprs(test2.cut.match)[,4:6]
#b <- rowMeans(exprs(test2.cut)[,4:6]) - rowMeans(exprs(test2.cut[,1:3]))
ma.data <- cbind(exprs(test1.cut),test2.cut.match)
par(mfrow=c(ceiling(ncol(ma.data)/3),3))
for (i in 1:ncol(ma.data)){
plotMA(ma.data,i)
}
###################################
#Clustering
###################################
#basically need to start from the heat map data
#try clustering on treatment condition means
test1.cut$dvar <- factor(match(paste(test1.cut$Treatment,test1.cut$Time),unique(paste(test1.cut$Treatment,test1.cut$Time))))
test2.cut$dvar <- factor(match(paste(test2.cut$Treatment,test2.cut$Time),unique(paste(test2.cut$Treatment,test2.cut$Time))))
a <- rownames(pData(test1.cut))[test1.cut$dvar==1]
b <- rownames(pData(test1.cut))[test1.cut$dvar==2]
c <- rownames(pData(test2.cut))[test2.cut$dvar==1]
d <- rownames(pData(test2.cut))[test2.cut$dvar==2]
cl.data <- cbind(rowMedians(hm.data[,a]),rowMedians(hm.data[,b]),rowMedians(hm.data[,c]),rowMedians(hm.data[,d]))
#not doing any scaling...
#Trying k-means; not sure what nstart is doing...
wss <- sapply(2:15, function(x) {kmeans(cl.data,x,nstart = 5)$tot.withinss})
plot(2:15, wss,
type="b", pch = 19, frame = FALSE,
xlab="Number of clusters K",
ylab="Total within-clusters sum of squares")
#I think k=6 looks good
cl.test <- kmeans(cl.data,6,nstart = 5)
selcol2 <- brewer.pal(6,"Set1")
#make a new heat map, ordered the same as the "main" one, but add colors for the clusters
heatmap.2(cl.data[p$rowInd,],
trace = 'none',
Colv = NULL,
Rowv = NULL,
dendrogram = 'none',
srtCol = 90,
adjCol = c(0.8,0.6),
cexCol = 1,
labRow = NA,
margins = c(7,1),
keysize = 1,
RowSideColors = selcol2[cl.test$cluster[p$rowInd]])
#Trying with hclust
cl.fit <- hclust(dist(cl.data))
fviz_nbclust(cl.data, hcut, method = "wss")
cl.test <- cutree(cl.fit, k=5)
selcol <- brewer.pal(5,"Set1")
heatmap.2(cl.data[cl.fit$order,],
trace = 'none',
Colv = NULL,
dendrogram = 'row',
srtCol = 90,
adjCol = c(0.8,0.6),
#cexCol = 1,
#labRow = NA,
lmat = rbind(c(0,0,4),c(3,1,2),c(5,0,0)),
lhei = c(0.2,4,0.2),
lwid = c(1,0.2,4),
margins = c(2,20),
key = F,
RowSideColors = selcol[cl.test[cl.fit$order]],
labRow = NA)
ggplot(data.frame(x=1:5,y=rep(1,5)),aes(x=x, y=y))+
geom_bar(fill = selcol, stat = "identity", show.legend = F)+
coord_equal()+
xlab("Clusters")+
geom_text(aes(label=1:5), size=10, nudge_y = -0.5)+
theme(panel.background = element_rect(fill = "white"),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_text(size=20))
probeID <- rownames(hm.data)[cl.fit$order[cl.test==2]]
fData(test1.cut)[probeID,"Gene.symbol"]
#try clustering on fold change
###################################
#ComBat normalization
###################################
#need to match the datasets based on some common criteria--in this case gene symbols. Maybe also try Entrez/refseq/Ensembl/...
#collapse datasets
gset1.1 <- test1.cut[fData(test1.cut)$Gene.symbol != "",]
gset2.1 <- test2.cut[fData(test2.cut)$Gene.symbol != "",]
pData(gset1.1)$batch <- 1
pData(gset2.1)$batch <- 2
pheno_ <- rbind(subset(pData(gset1.1),select=c("Treatment", "Time", "batch")), subset(pData(gset2.1),select=c("Treatment","Time","batch")))
match.gl <- fData(gset2.1)$Gene.symbol[match(unique(fData(gset1.1)$Gene.symbol),fData(gset2.1)$Gene.symbol)]
match.gl <- match.gl[!is.na(match.gl)]
tT.trim <- tT[tT$Gene.symbol != "",]
exprs_1 <- cbind(exprs(gset1.1)[tT.trim$ID,],exprs(gset2.1)[match(tT.trim$Gene.symbol,fData(gset2.1)$Gene.symbol),])
a <- apply(exprs_1, 1, function(x) sum(!is.na(x)) == length(x))
exprs_1 <- exprs_1[a,]
match.gl <- match.gl[which(!(match.gl %in% tT.trim$Gene.symbol))]
exprs_2 <- cbind(exprs(gset1.1)[match(match.gl,fData(gset1.1)$Gene.symbol),],exprs(gset2.1)[match(match.gl,fData(gset2.1)$Gene.symbol),])
exprs_ <- rbind(exprs_1, exprs_2)
batch = pheno_$batch
modcombat = model.matrix(~1, data=pheno_)
combat_edata = ComBat(dat=exprs_, batch=batch, mod=modcombat, par.prior=TRUE, prior.plots=TRUE)
###################################
#random stuff
###################################
d <- model.matrix(~ dvar + 0, test2.cut)
colnames(d) <- paste("G",levels(test2.cut$dvar),sep="")
fit <- lmFit(test2.cut, d)
cont.matrix <- makeContrasts(G2-G1, levels=d)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2, 0.01)
tT <- topTable(fit2, adjust="fdr", sort.by="B", number=250)
tT <- subset(tT, select=c("ID","Gene.symbol","logFC"))
head(sort(table(fData(gset)$Gene.symbol), decreasing = T),20)
sum(table(fData(gset)$Gene.symbol)>2)
length(table(fData(gset)$Gene.symbol))
grep("Cdk",fData(gset)$Gene.symbol,value = T)
sum(exprs(gset)<0)
| /data_preprocess.R | no_license | qu4drupole/liver_trxn_shinyapp | R | false | false | 14,710 | r | library(GEOquery)
library(limma)
library(Biobase)
library(FNN)
library(gplots)
library(RColorBrewer)
library(factoextra)
library(sva)
gset <- getGEO("GSE63742", GSEMatrix =TRUE, AnnotGPL=TRUE)
if (length(gset) > 1) idx <- grep("GPL1355", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
##VVVV I'll need a better way to do this... VVVV
gsms <- "000XXXXXXXXXXXXXXXXXXXXX111XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
sml <- c()
for (i in 1:nchar(gsms)) { sml[i] <- substr(gsms,i,i) }
sel <- which(sml != "X")
sml <- sml[sel]
gset <- gset[ ,sel]
##^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#thankfully all of this is fast
sml <- paste("G", sml, sep="") # set group names
fl <- as.factor(sml)
gset$description <- fl
design <- model.matrix(~ description + 0, gset)
colnames(design) <- levels(fl)
fit <- lmFit(gset, design)
cont.matrix <- makeContrasts(G1-G0, levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2, 0.01)
tT <- topTable(fit2, adjust="fdr", sort.by="B", number=250)
tT <- subset(tT, select=c("ID","adj.P.Val","P.Value","t","B","logFC","Gene.symbol"))
#Need to change these so GSEs can be readily compared and organized
#i.e. all GSEs will have a "trmt" variable
varLabels(gset)
gset$Time <- factor(sapply(pData(gset)[["time:ch1"]],function(x) strsplit(x, "\\s+")[[1]][1], USE.NAMES = F))
gset$Treatment <- factor(pData(gset)[["treatment:ch1"]])
a <- grep("///", fData(gset)$Gene.symbol)
fData(gset)$Gene.symbol[a] <- sapply(a, function(x) strsplit(fData(gset)$Gene.symbol[x],"///")[[1]][2])
experimentData(gset)@title <- "Expression data from rat liver tissue during liver regeneration after partial hepatectomy"
save(gset,file="Data/GSE63742.RData")
gset1 <- gset
###GSE33785
gset <- getGEO("GSE33785", GSEMatrix =TRUE, AnnotGPL=TRUE)
if (length(gset) > 1) idx <- grep("GPL6247", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
gset$Treatment <- factor(gset$`diet:ch1`)
gset$Time <- factor(sapply(pData(gset)[["timepoint:ch1"]],function(x) strsplit(x, "\\s+")[[1]][1], USE.NAMES = F))
experimentData(gset)@title <- "Gene expression in the chronic ethanol-treated rat liver during liver regeneration"
#fData(gset)$Gene.symbol <- fData(gset)$`Gene symbol`
a <- grep("///", fData(gset)$Gene.symbol)
fData(gset)$Gene.symbol[a] <- sapply(a, function(x) strsplit(fData(gset)$Gene.symbol[x],"///")[[1]][2])
gset$ID <- gset$`animal id:ch1`
save(gset,file="Data/GSE33785.RData")
gset2 <- gset
###GSE97429
gset <- getGEO("GSE97429", GSEMatrix =TRUE, AnnotGPL=FALSE)
if (length(gset) > 1) idx <- grep("GPL6247", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
unique(pData(gset)$`tissue:ch1`)
nrow(pData(gset))
gset$Treatment <- factor(gset$`treatment:ch1`)
gset$Time <- 0
gset$Time <- gset$Time + 24*(gset$`treatment:ch1` != "none")
gset$Time <- factor(gset$Time)
experimentData(gset)@title <- "Gene expression in the liver remnant is significantly affected by the size of partial hepatectomy - an experimental rat study"
fData(gset)$Gene.symbol <- sapply(fData(gset)$`gene_assignment`,function(x) {
if(length(strsplit(x,"//")[[1]] > 1)){
strsplit(x,"//")[[1]][2]}
else{NA}
}
, USE.NAMES = F)
fData(gset)$Gene.symbol <- trimws(fData(gset)$Gene.symbol)
#there appears to be negative values in this dataset
head(exprs(gset)[,gset$Treatment=='none'])
#gset3 is RMA normalized (log2) and median transformed...whatever that last part means
hist(exprs(gset)[,1],breaks = 20)
hist(exprs(gset1)[,1],breaks = 20)
#The data is also centered...
median(exprs(gset1)[,1])
#7.09
#this is so sloppy but w/e...
hist(7+exprs(gset)[,1],breaks = 20)
exprs(gset) <- 7+exprs(gset)
save(gset,file="Data/GSE97429.RData")
gset3 <- gset
#GSE67022
gset <- getGEO("GSE67022", GSEMatrix =TRUE, AnnotGPL=FALSE)
if (length(gset) > 1) idx <- grep("GPL6247", attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
ex <- exprs(gset)
qx <- as.numeric(quantile(ex, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC) { ex[which(ex <= 0)] <- NaN
exprs(gset) <- log2(ex) }
varLabels(gset)
gset$`time point:ch1`
a <- sapply(pData(gset)[["injected with:ch1"]],function(x) strsplit(x, "\\s+")[[1]][1], USE.NAMES = F)
for(i in grep(',',a)){
a[i] <- substr(a[i],1,nchar(a[i])-1)
}
gset$Treatment <- factor(paste(a,gset$`molecule subtype:ch1`,sep="."))
gset$Time <- factor(gset$`time point:ch1`)
fData(gset)$Gene.symbol <- sapply(fData(gset)$`gene_assignment`,function(x) {
if(length(strsplit(x,"//")[[1]] > 1)){
strsplit(x,"//")[[1]][2]}
else{NA}
}
, USE.NAMES = F)
fData(gset)$Gene.symbol <- trimws(fData(gset)$Gene.symbol)
experimentData(gset)@title <- "Regulation of Rat Hepatic Translation by mTOR"
save(gset,file="Data/GSE67022.RData")
gset4 <- gset
#for some reason gset4 can't be put in hm.data
grep(tT$Gene.symbol[8],fData(test1.cut)$Gene.symbol)
str(fData(test1.cut)$Gene.symbol)
str(fData(gset1)$Gene.symbol)
match(tT$Gene.symbol[8],fData(test1.cut)$Gene.symbol)
exprs(test1.cut)[13670,]
#ALSO treatment/level selection on gset4 appears to be inaccurate
###############################
#Making a combined variable
###############################
test.cut$dvar <- factor(match(paste(test.cut$Treatment,test.cut$Time),unique(paste(test.cut$Treatment,test.cut$Time))))
fData(test.cut)$logFC <- as.numeric(apply(exprs(test.cut),1,function(x) mean(x[test.cut$geo_accession[test.cut$dvar==2]]) - mean(x[test.cut$geo_accession[test.cut$dvar==1]])))
fData(test.cut)$`Gene symbol`[fData(test.cut)$`Gene symbol` == ""] <- NA
#fData(test.cut)$`Gene symbol`[which(fData(test.cut)$`Gene symbol` %in% tT$Gene.symbol)]
#tT2 <- subset(fData(test.cut)[which(fData(test.cut)$`Gene symbol` %in% tT$Gene.symbol),], select = c("Gene symbol","logFC"))
tT$logFC.d2 <- fData(test.cut)$logFC[match(tT$Gene.symbol,fData(test.cut)$`Gene symbol`)]
tT2 <- subset(tT, select = c("Gene.symbol","logFC","logFC.d2"))
p <- heatmap.2(as.matrix(tT2[,-1]), trace = 'none', dendrogram = 'row', labCol = c("dataset2","dataset1"),srtCol = 0, adjCol = c(0.5,0.6), cexCol = 3, labRow = NA)
###################################
#individual sample heat map
###################################
c1 <- which(gset1$Treatment == "partial hepatectomy" & gset1$Time == "0h")
c2 <- which(gset1$Treatment == "partial hepatectomy" & gset1$Time == "24h")
test1.cut <- gset1[,c(c1,c2)]
c1 <- which(gset2$Treatment == "Carbohydrate" & gset2$Time == 0)
c2 <- which(gset2$Treatment == "Carbohydrate" & gset2$Time == 24)
test2.cut <- gset2[,c(c1,c2)]
#c3 <- which(test2.cut$`animal id:ch1` %in% names(which(table(test2.cut$`animal id:ch1`)>1)))
if(any(table(test2.cut$ID)>1)){
c3 <- which(test2.cut$ID %in% names(which(table(test2.cut$ID)>1)))
test2.cut <- test2.cut[,c3]
}
test2.cut <- test2.cut[,c3]
#fData(test2.cut)$Gene.symbol <- fData(test2.cut)$`Gene symbol`
#match tT to test.cut
tT <- tT[tT$Gene.symbol != "",]
test1.sig <- exprs(test1.cut)[match(tT$Gene.symbol,fData(test1.cut)$Gene.symbol),]
test2.sig <- exprs(test2.cut)[match(tT$Gene.symbol,fData(test2.cut)$Gene.symbol),]
#combine and trim data
hm.data <- cbind(test1.sig,test2.sig)
a <- apply(hm.data,1,function(x) sum(!is.na(x)) == length(x))
hm.data <- hm.data[a,]
d1lab <- sapply(rownames(pData(test1.cut)), function(x) paste("d1",pData(test1.cut)[x,"Treatment"],pData(test1.cut)[x,"Time"],sep = "/"),USE.NAMES = F)
d2lab <- sapply(rownames(pData(test2.cut)), function(x) paste("d2",pData(test2.cut)[x,"Treatment"],pData(test2.cut)[x,"Time"],sep = "/"),USE.NAMES = F)
invisible(q <- heatmap.2(hm.data,
trace = 'none',
Colv = NULL,
dendrogram = 'row',
srtCol = 90,
adjCol = c(0.8,0.6),
labCol = c(d1lab,d2lab),
cexCol = 1,
labRow = NA,
margins = c(7,1),
keysize = 1))
kl.data <- hm.data[p$rowInd,]
rownames(pData(test1.cut))[test1.cut$Time=="24h"]
a <- rowMeans(kl.data[,4:6])-rowMeans(kl.data[,1:3])
b <- rowMeans(kl.data[,10:12])-rowMeans(kl.data[,7:9])
c<-KL.divergence(a,b,100)
###################################
#individual sample heat map
###################################
rbind(subset(pData(test1.cut), select = c("Treatment","Time", "geo_accession")),
subset(pData(test2.cut), select = c("Treatment","Time", "geo_accession")))
test2.cut.match <- exprs(test2.cut)[match(fData(test1.cut)$Gene.symbol,fData(test2.cut)$Gene.symbol),]
a <- rowMeans(test2.cut.match[,1:3])
b <- exprs(test2.cut.match)[,4:6]
#b <- rowMeans(exprs(test2.cut)[,4:6]) - rowMeans(exprs(test2.cut[,1:3]))
ma.data <- cbind(exprs(test1.cut),test2.cut.match)
par(mfrow=c(ceiling(ncol(ma.data)/3),3))
for (i in 1:ncol(ma.data)){
plotMA(ma.data,i)
}
###################################
#Clustering
###################################
#basically need to start from the heat map data
#try clustering on treatment condition means
test1.cut$dvar <- factor(match(paste(test1.cut$Treatment,test1.cut$Time),unique(paste(test1.cut$Treatment,test1.cut$Time))))
test2.cut$dvar <- factor(match(paste(test2.cut$Treatment,test2.cut$Time),unique(paste(test2.cut$Treatment,test2.cut$Time))))
a <- rownames(pData(test1.cut))[test1.cut$dvar==1]
b <- rownames(pData(test1.cut))[test1.cut$dvar==2]
c <- rownames(pData(test2.cut))[test2.cut$dvar==1]
d <- rownames(pData(test2.cut))[test2.cut$dvar==2]
cl.data <- cbind(rowMedians(hm.data[,a]),rowMedians(hm.data[,b]),rowMedians(hm.data[,c]),rowMedians(hm.data[,d]))
#not doing any scaling...
#Trying k-means; not sure what nstart is doing...
wss <- sapply(2:15, function(x) {kmeans(cl.data,x,nstart = 5)$tot.withinss})
plot(2:15, wss,
type="b", pch = 19, frame = FALSE,
xlab="Number of clusters K",
ylab="Total within-clusters sum of squares")
#I think k=6 looks good
cl.test <- kmeans(cl.data,6,nstart = 5)
selcol2 <- brewer.pal(6,"Set1")
#make a new heat map, ordered the same as the "main" one, but add colors for the clusters
heatmap.2(cl.data[p$rowInd,],
trace = 'none',
Colv = NULL,
Rowv = NULL,
dendrogram = 'none',
srtCol = 90,
adjCol = c(0.8,0.6),
cexCol = 1,
labRow = NA,
margins = c(7,1),
keysize = 1,
RowSideColors = selcol2[cl.test$cluster[p$rowInd]])
#Trying with hclust
cl.fit <- hclust(dist(cl.data))
fviz_nbclust(cl.data, hcut, method = "wss")
cl.test <- cutree(cl.fit, k=5)
selcol <- brewer.pal(5,"Set1")
heatmap.2(cl.data[cl.fit$order,],
trace = 'none',
Colv = NULL,
dendrogram = 'row',
srtCol = 90,
adjCol = c(0.8,0.6),
#cexCol = 1,
#labRow = NA,
lmat = rbind(c(0,0,4),c(3,1,2),c(5,0,0)),
lhei = c(0.2,4,0.2),
lwid = c(1,0.2,4),
margins = c(2,20),
key = F,
RowSideColors = selcol[cl.test[cl.fit$order]],
labRow = NA)
ggplot(data.frame(x=1:5,y=rep(1,5)),aes(x=x, y=y))+
geom_bar(fill = selcol, stat = "identity", show.legend = F)+
coord_equal()+
xlab("Clusters")+
geom_text(aes(label=1:5), size=10, nudge_y = -0.5)+
theme(panel.background = element_rect(fill = "white"),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_text(size=20))
probeID <- rownames(hm.data)[cl.fit$order[cl.test==2]]
fData(test1.cut)[probeID,"Gene.symbol"]
#try clustering on fold change
###################################
#ComBat normalization
###################################
#need to match the datasets based on some common criteria--in this case gene symbols. Maybe also try Entrez/refseq/Ensembl/...
#collapse datasets
gset1.1 <- test1.cut[fData(test1.cut)$Gene.symbol != "",]
gset2.1 <- test2.cut[fData(test2.cut)$Gene.symbol != "",]
pData(gset1.1)$batch <- 1
pData(gset2.1)$batch <- 2
pheno_ <- rbind(subset(pData(gset1.1),select=c("Treatment", "Time", "batch")), subset(pData(gset2.1),select=c("Treatment","Time","batch")))
match.gl <- fData(gset2.1)$Gene.symbol[match(unique(fData(gset1.1)$Gene.symbol),fData(gset2.1)$Gene.symbol)]
match.gl <- match.gl[!is.na(match.gl)]
tT.trim <- tT[tT$Gene.symbol != "",]
exprs_1 <- cbind(exprs(gset1.1)[tT.trim$ID,],exprs(gset2.1)[match(tT.trim$Gene.symbol,fData(gset2.1)$Gene.symbol),])
a <- apply(exprs_1, 1, function(x) sum(!is.na(x)) == length(x))
exprs_1 <- exprs_1[a,]
match.gl <- match.gl[which(!(match.gl %in% tT.trim$Gene.symbol))]
exprs_2 <- cbind(exprs(gset1.1)[match(match.gl,fData(gset1.1)$Gene.symbol),],exprs(gset2.1)[match(match.gl,fData(gset2.1)$Gene.symbol),])
exprs_ <- rbind(exprs_1, exprs_2)
batch = pheno_$batch
modcombat = model.matrix(~1, data=pheno_)
combat_edata = ComBat(dat=exprs_, batch=batch, mod=modcombat, par.prior=TRUE, prior.plots=TRUE)
###################################
#random stuff
###################################
d <- model.matrix(~ dvar + 0, test2.cut)
colnames(d) <- paste("G",levels(test2.cut$dvar),sep="")
fit <- lmFit(test2.cut, d)
cont.matrix <- makeContrasts(G2-G1, levels=d)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2, 0.01)
tT <- topTable(fit2, adjust="fdr", sort.by="B", number=250)
tT <- subset(tT, select=c("ID","Gene.symbol","logFC"))
head(sort(table(fData(gset)$Gene.symbol), decreasing = T),20)
sum(table(fData(gset)$Gene.symbol)>2)
length(table(fData(gset)$Gene.symbol))
grep("Cdk",fData(gset)$Gene.symbol,value = T)
sum(exprs(gset)<0)
|
#UI Output/Namespace Section----
datatable2UI <- function(id) {
ns <- NS(id)
uiOutput(ns("datatable2_ui"))
}
#Module Section----
datatable2 <- function(input, output, session) {
#UI Section----
output$datatable2_ui <- renderUI({
tagList(
fluidRow(
column(6,
htmlOutput(session$ns("datatable2_header"), class = 'shuttle-box-2'))
),
div(
fluidRow(
box(
DT::dataTableOutput(session$ns("standard_table"))
),
box(width = 2,
h4("Cell Clicked Info"),
tags$i("Click on any cell in the datatable."),
verbatimTextOutput(session$ns("cell_info"))
)
)
)
)
})
#Server Section----
output$standard_table <- DT::renderDataTable({
iris %>%
DT::datatable(.,
class = 'cell-border stripe',
selection = 'none',
rownames=FALSE)
})
output$cell_info <- renderPrint({
input$standard_table_cell_clicked
})
# Project description document
output$datatable2_header <- renderUI({includeMarkdown(paste0("base/gemini/datatable/datatable2.md"))})
} | /base/gemini/datatable/datatable2.R | no_license | dlotto/flagship | R | false | false | 1,215 | r | #UI Output/Namespace Section----
datatable2UI <- function(id) {
ns <- NS(id)
uiOutput(ns("datatable2_ui"))
}
#Module Section----
datatable2 <- function(input, output, session) {
#UI Section----
output$datatable2_ui <- renderUI({
tagList(
fluidRow(
column(6,
htmlOutput(session$ns("datatable2_header"), class = 'shuttle-box-2'))
),
div(
fluidRow(
box(
DT::dataTableOutput(session$ns("standard_table"))
),
box(width = 2,
h4("Cell Clicked Info"),
tags$i("Click on any cell in the datatable."),
verbatimTextOutput(session$ns("cell_info"))
)
)
)
)
})
#Server Section----
output$standard_table <- DT::renderDataTable({
iris %>%
DT::datatable(.,
class = 'cell-border stripe',
selection = 'none',
rownames=FALSE)
})
output$cell_info <- renderPrint({
input$standard_table_cell_clicked
})
# Project description document
output$datatable2_header <- renderUI({includeMarkdown(paste0("base/gemini/datatable/datatable2.md"))})
} |
\name{smooth.basisPar}
\alias{smooth.basisPar}
\title{
Smooth Data Using a Directly Specified Roughness Penalty
}
\description{
Smooth (argvals, y) data with roughness penalty defined by the
remaining arguments. This function acts as a wrapper for those who want
to bypass the step of setting up a functional parameter object before invoking
function \code{smooth.basis}. This function simply does this setup for the
user. See the help file for functions \code{smooth.basis} and \code{fdPar}
for further details, and more complete descriptions of the arguments.
}
\usage{
smooth.basisPar(argvals, y, fdobj=NULL, Lfdobj=NULL,
lambda=0, estimate=TRUE, penmat=NULL,
wtvec=NULL, fdnames=NULL, covariates=NULL,
method="chol", dfscale=1)
}
\arguments{
\item{argvals}{
a set of argument values corresponding to the observations in array
\code{y}. In most applications these values will be common to all curves
and all variables, and therefore be defined as a vector or as a matrix
with a single column. But it is possible that these argument values
will vary from one curve to another, and in this case \code{argvals} will
be input as a matrix with rows corresponding to observation points and
columns corresponding to curves. Argument values can even vary from one
variable to another, in which case they are input as an array with
dimensions corresponding to observation points, curves and variables,
respectively. Note, however, that the number of observation points per
curve and per variable may NOT vary. If it does, then curves and variables
must be smoothed individually rather than by a single call to this function.
The default value for \code{argvals} are the integers 1 to \code{n}, where
\code{n} is the size of the first dimension in argument \code{y}.
}
\item{y}{
an set of values of curves at discrete sampling points or
argument values. If the set is supplied as a matrix object, the rows must
correspond to argument values and columns to replications, and it will be
assumed that there is only one variable per observation. If
\code{y} is a three-dimensional array, the first dimension
corresponds to argument values, the second to replications, and the
third to variables within replications. If \code{y} is a vector,
only one replicate and variable are assumed. If the data
come from a single replication but multiple vectors, such as data
on coordinates for a single space curve, then be sure to coerce
the data into an array object by using the \code{as.array} function
with one as the central dimension length.
}
\item{fdobj}{
One of the following:
\itemize{
\item{fd}{
a functional data object (class \code{fd})
}
\item{basisfd}{
a functional basis object (class \code{basisfd}), which is
converted to a functional data object with the identity matrix
as the coefficient matrix.
}
\item{fdPar}{
a functional parameter object (class \code{fdPar})
}
\item{integer}{
a positive integer giving the order of a B-spline basis, which is
further converted to a functional data object with the identity
matrix as the coefficient matrix.
}
\item{matrix or array}{replaced by fd(fdobj)}
\item{NULL}{
Defaults to fdobj = create.bspline.basis(argvals).
}
}
}
\item{Lfdobj}{
either a nonnegative integer or a linear differential operator
object.
If \code{NULL}, Lfdobj depends on fdobj[['basis']][['type']]:
\itemize{
\item{bspline}{
Lfdobj <- int2Lfd(max(0, norder-2)), where norder =
norder(fdobj).
}
\item{fourier}{
Lfdobj = a harmonic acceleration operator:
\code{Lfdobj <- vec2Lfd(c(0,(2*pi/diff(rng))^2,0), rng)}
where rng = fdobj[['basis']][['rangeval']].
}
\item{anything else}{Lfdobj <- int2Lfd(0)}
}
}
\item{lambda}{
a nonnegative real number specifying the amount of smoothing
to be applied to the estimated functional parameter.
}
\item{estimate}{
a logical value: if \code{TRUE}, the functional parameter is
estimated, otherwise, it is held fixed.
}
\item{penmat}{
a roughness penalty matrix. Including this can eliminate the need
to compute this matrix over and over again in some types of
calculations.
}
\item{wtvec}{
typically a vector of length \code{n} that is the length of \code{argvals}
containing weights for the values to be smoothed, However, it may also
be a symmetric matrix of order \code{n}. If \code{wtvec} is a vector,
all values must be positive, and if it is a symmetric matrix, this must
be positive definite. Defaults to all weights equal to 1.
}
\item{fdnames}{
a list of length 3 containing character vectors of names for the
following:
\itemize{
\item{args}{
name for each observation or point in time at which data are
collected for each 'rep', unit or subject.
}
\item{reps}{
name for each 'rep', unit or subject.
}
\item{fun}{
name for each 'fun' or (response) variable measured repeatedly
(per 'args') for each 'rep'.
}
}
}
\item{covariates}{
the observed values in \code{y} are assumed to be primarily determined
the the height of the curve being estimates, but from time to time
certain values can also be influenced by other known variables. For
example, multi-year sets of climate variables may be also determined by
the presence of absence of an El Nino event, or a volcanic eruption.
One or more of these covariates can be supplied as an \code{n} by
\code{p} matrix, where \code{p} is the number of such covariates. When
such covariates are available, the smoothing is called "semi-parametric."
Matrices or arrays of regression coefficients are then estimated that
define the impacts of each of these covariates for each cueve and each
variable.
}
\item{method}{
by default the function uses the usual textbook equations for computing
the coefficients of the basis function expansions. But, as in regression
analysis, a price is paid in terms of rounding error for such
computations since they involved cross-products of basis function
values. Optionally, if \code{method} is set equal to the string "qr",
the computation uses an algorithm based on the qr-decomposition which
is more accurate, but will require substantially more computing time
when \code{n} is large, meaning more than 500 or so. The default
is "chol", referring the Choleski decomposition of a symmetric positive
definite matrix.
}
\item{dfscale}{
the generalized cross-validation or "gcv" criterion that is often used
to determine the size of the smoothing parameter involves the
subtraction of an measue of degrees of freedom from \code{n}. Chong
Gu has argued that multiplying this degrees of freedom measure by
a constant slightly greater than 1, such as 1.2, can produce better
decisions about the level of smoothing to be used. The default value
is, however, 1.0.
}
}
\details{
1. if(is.null(fdobj))fdobj <- create.bspline.basis(argvals). Else
if(is.integer(fdobj)) fdobj <- create.bspline.basis(argvals, norder =
fdobj)
2. fdPar
3. smooth.basis
}
\value{
The output of a call to \code{smooth.basis}, which is an object of
class \code{fdSmooth}, being a list of length 8 with the following
components:
\item{fd}{
a functional data object that smooths the data.
}
\item{df}{
a degrees of freedom measure of the smooth
}
\item{gcv}{
the value of the generalized cross-validation or GCV criterion. If
there are multiple curves, this is a vector of values, one per
curve. If the smooth is multivariate, the result is a matrix of gcv
values, with columns corresponding to variables.
}
% \item{coef}{
% the coefficient matrix or array for the basis function expansion of
% the smoothing function
% }
\item{SSE}{
the error sums of squares. SSE is a vector or a matrix of the same
size as 'gcv'.
}
\item{penmat}{
the penalty matrix.
}
\item{y2cMap}{
the matrix mapping the data to the coefficients.
}
\item{argvals, y}{input arguments}
}
\references{
Ramsay, James O., and Silverman, Bernard W. (2006), \emph{Functional
Data Analysis, 2nd ed.}, Springer, New York.
Ramsay, James O., and Silverman, Bernard W. (2002), \emph{Applied
Functional Data Analysis}, Springer, New York.
}
\seealso{
\code{\link{df2lambda}},
\code{\link{fdPar}},
\code{\link{lambda2df}},
\code{\link{lambda2gcv}},
\code{\link{plot.fd}},
\code{\link{project.basis}},
\code{\link{smooth.basis}},
\code{\link{smooth.fd}},
\code{\link{smooth.monotone}},
\code{\link{smooth.pos}}
}
\examples{
# see smooth.basis
}
\keyword{smooth}
| /man/smooth.basisPar.Rd | no_license | cran/fda | R | false | false | 8,949 | rd | \name{smooth.basisPar}
\alias{smooth.basisPar}
\title{
Smooth Data Using a Directly Specified Roughness Penalty
}
\description{
Smooth (argvals, y) data with roughness penalty defined by the
remaining arguments. This function acts as a wrapper for those who want
to bypass the step of setting up a functional parameter object before invoking
function \code{smooth.basis}. This function simply does this setup for the
user. See the help file for functions \code{smooth.basis} and \code{fdPar}
for further details, and more complete descriptions of the arguments.
}
\usage{
smooth.basisPar(argvals, y, fdobj=NULL, Lfdobj=NULL,
lambda=0, estimate=TRUE, penmat=NULL,
wtvec=NULL, fdnames=NULL, covariates=NULL,
method="chol", dfscale=1)
}
\arguments{
\item{argvals}{
a set of argument values corresponding to the observations in array
\code{y}. In most applications these values will be common to all curves
and all variables, and therefore be defined as a vector or as a matrix
with a single column. But it is possible that these argument values
will vary from one curve to another, and in this case \code{argvals} will
be input as a matrix with rows corresponding to observation points and
columns corresponding to curves. Argument values can even vary from one
variable to another, in which case they are input as an array with
dimensions corresponding to observation points, curves and variables,
respectively. Note, however, that the number of observation points per
curve and per variable may NOT vary. If it does, then curves and variables
must be smoothed individually rather than by a single call to this function.
The default value for \code{argvals} are the integers 1 to \code{n}, where
\code{n} is the size of the first dimension in argument \code{y}.
}
\item{y}{
an set of values of curves at discrete sampling points or
argument values. If the set is supplied as a matrix object, the rows must
correspond to argument values and columns to replications, and it will be
assumed that there is only one variable per observation. If
\code{y} is a three-dimensional array, the first dimension
corresponds to argument values, the second to replications, and the
third to variables within replications. If \code{y} is a vector,
only one replicate and variable are assumed. If the data
come from a single replication but multiple vectors, such as data
on coordinates for a single space curve, then be sure to coerce
the data into an array object by using the \code{as.array} function
with one as the central dimension length.
}
\item{fdobj}{
One of the following:
\itemize{
\item{fd}{
a functional data object (class \code{fd})
}
\item{basisfd}{
a functional basis object (class \code{basisfd}), which is
converted to a functional data object with the identity matrix
as the coefficient matrix.
}
\item{fdPar}{
a functional parameter object (class \code{fdPar})
}
\item{integer}{
a positive integer giving the order of a B-spline basis, which is
further converted to a functional data object with the identity
matrix as the coefficient matrix.
}
\item{matrix or array}{replaced by fd(fdobj)}
\item{NULL}{
Defaults to fdobj = create.bspline.basis(argvals).
}
}
}
\item{Lfdobj}{
either a nonnegative integer or a linear differential operator
object.
If \code{NULL}, Lfdobj depends on fdobj[['basis']][['type']]:
\itemize{
\item{bspline}{
Lfdobj <- int2Lfd(max(0, norder-2)), where norder =
norder(fdobj).
}
\item{fourier}{
Lfdobj = a harmonic acceleration operator:
\code{Lfdobj <- vec2Lfd(c(0,(2*pi/diff(rng))^2,0), rng)}
where rng = fdobj[['basis']][['rangeval']].
}
\item{anything else}{Lfdobj <- int2Lfd(0)}
}
}
\item{lambda}{
a nonnegative real number specifying the amount of smoothing
to be applied to the estimated functional parameter.
}
\item{estimate}{
a logical value: if \code{TRUE}, the functional parameter is
estimated, otherwise, it is held fixed.
}
\item{penmat}{
a roughness penalty matrix. Including this can eliminate the need
to compute this matrix over and over again in some types of
calculations.
}
\item{wtvec}{
typically a vector of length \code{n} that is the length of \code{argvals}
containing weights for the values to be smoothed, However, it may also
be a symmetric matrix of order \code{n}. If \code{wtvec} is a vector,
all values must be positive, and if it is a symmetric matrix, this must
be positive definite. Defaults to all weights equal to 1.
}
\item{fdnames}{
a list of length 3 containing character vectors of names for the
following:
\itemize{
\item{args}{
name for each observation or point in time at which data are
collected for each 'rep', unit or subject.
}
\item{reps}{
name for each 'rep', unit or subject.
}
\item{fun}{
name for each 'fun' or (response) variable measured repeatedly
(per 'args') for each 'rep'.
}
}
}
\item{covariates}{
the observed values in \code{y} are assumed to be primarily determined
the the height of the curve being estimates, but from time to time
certain values can also be influenced by other known variables. For
example, multi-year sets of climate variables may be also determined by
the presence of absence of an El Nino event, or a volcanic eruption.
One or more of these covariates can be supplied as an \code{n} by
\code{p} matrix, where \code{p} is the number of such covariates. When
such covariates are available, the smoothing is called "semi-parametric."
Matrices or arrays of regression coefficients are then estimated that
define the impacts of each of these covariates for each cueve and each
variable.
}
\item{method}{
by default the function uses the usual textbook equations for computing
the coefficients of the basis function expansions. But, as in regression
analysis, a price is paid in terms of rounding error for such
computations since they involved cross-products of basis function
values. Optionally, if \code{method} is set equal to the string "qr",
the computation uses an algorithm based on the qr-decomposition which
is more accurate, but will require substantially more computing time
when \code{n} is large, meaning more than 500 or so. The default
is "chol", referring the Choleski decomposition of a symmetric positive
definite matrix.
}
\item{dfscale}{
the generalized cross-validation or "gcv" criterion that is often used
to determine the size of the smoothing parameter involves the
subtraction of an measue of degrees of freedom from \code{n}. Chong
Gu has argued that multiplying this degrees of freedom measure by
a constant slightly greater than 1, such as 1.2, can produce better
decisions about the level of smoothing to be used. The default value
is, however, 1.0.
}
}
\details{
1. if(is.null(fdobj))fdobj <- create.bspline.basis(argvals). Else
if(is.integer(fdobj)) fdobj <- create.bspline.basis(argvals, norder =
fdobj)
2. fdPar
3. smooth.basis
}
\value{
The output of a call to \code{smooth.basis}, which is an object of
class \code{fdSmooth}, being a list of length 8 with the following
components:
\item{fd}{
a functional data object that smooths the data.
}
\item{df}{
a degrees of freedom measure of the smooth
}
\item{gcv}{
the value of the generalized cross-validation or GCV criterion. If
there are multiple curves, this is a vector of values, one per
curve. If the smooth is multivariate, the result is a matrix of gcv
values, with columns corresponding to variables.
}
% \item{coef}{
% the coefficient matrix or array for the basis function expansion of
% the smoothing function
% }
\item{SSE}{
the error sums of squares. SSE is a vector or a matrix of the same
size as 'gcv'.
}
\item{penmat}{
the penalty matrix.
}
\item{y2cMap}{
the matrix mapping the data to the coefficients.
}
\item{argvals, y}{input arguments}
}
\references{
Ramsay, James O., and Silverman, Bernard W. (2006), \emph{Functional
Data Analysis, 2nd ed.}, Springer, New York.
Ramsay, James O., and Silverman, Bernard W. (2002), \emph{Applied
Functional Data Analysis}, Springer, New York.
}
\seealso{
\code{\link{df2lambda}},
\code{\link{fdPar}},
\code{\link{lambda2df}},
\code{\link{lambda2gcv}},
\code{\link{plot.fd}},
\code{\link{project.basis}},
\code{\link{smooth.basis}},
\code{\link{smooth.fd}},
\code{\link{smooth.monotone}},
\code{\link{smooth.pos}}
}
\examples{
# see smooth.basis
}
\keyword{smooth}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rev_edge_dir.R
\name{rev_edge_dir}
\alias{rev_edge_dir}
\title{Reverse the direction of all edges in a graph}
\usage{
rev_edge_dir(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
Using a directed graph as input,
reverse the direction of all edges in that graph.
}
\examples{
# Create a graph with a directed tree
graph <-
create_graph() \%>\%
add_balanced_tree(2, 2)
# Inspect the graph's edges
graph \%>\% get_edges()
#> [1] "1 -> 2" "1 -> 3" "2 -> 4" "2 -> 5" "3 -> 6"
#> [6] "3 -> 7"
# Reverse the edge directions such that edges
# are directed toward the root of the tree
graph <- graph \%>\% rev_edge_dir
# Inspect the graph's edges after their reversal
graph \%>\% get_edges()
#> [1] "2 -> 1" "3 -> 1" "4 -> 2" "5 -> 2" "6 -> 3"
#> [6] "7 -> 3"
}
| /man/rev_edge_dir.Rd | no_license | yflyzhang/DiagrammeR | R | false | true | 936 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rev_edge_dir.R
\name{rev_edge_dir}
\alias{rev_edge_dir}
\title{Reverse the direction of all edges in a graph}
\usage{
rev_edge_dir(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
Using a directed graph as input,
reverse the direction of all edges in that graph.
}
\examples{
# Create a graph with a directed tree
graph <-
create_graph() \%>\%
add_balanced_tree(2, 2)
# Inspect the graph's edges
graph \%>\% get_edges()
#> [1] "1 -> 2" "1 -> 3" "2 -> 4" "2 -> 5" "3 -> 6"
#> [6] "3 -> 7"
# Reverse the edge directions such that edges
# are directed toward the root of the tree
graph <- graph \%>\% rev_edge_dir
# Inspect the graph's edges after their reversal
graph \%>\% get_edges()
#> [1] "2 -> 1" "3 -> 1" "4 -> 2" "5 -> 2" "6 -> 3"
#> [6] "7 -> 3"
}
|
library(plotprotein)
### Name: site_data
### Title: downloading protein site
### Aliases: site_data
### Keywords: site_data file
### ** Examples
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function ()
{
library(XML)
library(plyr)
protein = read.table("Protein.txt", sep = "\t", stringsAsFactors = F)
name = protein[2]
url_p = "http://www.uniprot.org/uniprot/"
url_s = "#showFeatures"
url_w = paste(url_p, name, url_s, sep = "")
url = url_w
doc <- htmlParse(url)
position_s = xpathSApply (doc, "//table[@id= 'sitesAnno_section']
/tr/td/ a[@class = 'position tooltipped']",
xmlValue)
name_s = xpathSApply (doc, "//table[@id= 'sitesAnno_section']/tr/td/span[@property='text']",
xmlValue)
s_s <- c()
for (i in 1:length(position_s)) {
s_s[i] <- gsub(pattern = "//D", replacement = "x", position_s[i])
}
s_s <- strsplit(s_s, "xxx")
d1_s <- laply(s_s, function(x) x[1])
d2_s <- laply(s_s, function(x) x[2])
r1_site = d1_s
r2_site = name_s
dfrm_site = data.frame(r1_site, r2_site)
write.table(dfrm_site, file = "Site.txt", sep = "/t", quote = FALSE,
row.names = F, col.names = F)
}
| /data/genthat_extracted_code/plotprotein/examples/site_data.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,340 | r | library(plotprotein)
### Name: site_data
### Title: downloading protein site
### Aliases: site_data
### Keywords: site_data file
### ** Examples
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function ()
{
library(XML)
library(plyr)
protein = read.table("Protein.txt", sep = "\t", stringsAsFactors = F)
name = protein[2]
url_p = "http://www.uniprot.org/uniprot/"
url_s = "#showFeatures"
url_w = paste(url_p, name, url_s, sep = "")
url = url_w
doc <- htmlParse(url)
position_s = xpathSApply (doc, "//table[@id= 'sitesAnno_section']
/tr/td/ a[@class = 'position tooltipped']",
xmlValue)
name_s = xpathSApply (doc, "//table[@id= 'sitesAnno_section']/tr/td/span[@property='text']",
xmlValue)
s_s <- c()
for (i in 1:length(position_s)) {
s_s[i] <- gsub(pattern = "//D", replacement = "x", position_s[i])
}
s_s <- strsplit(s_s, "xxx")
d1_s <- laply(s_s, function(x) x[1])
d2_s <- laply(s_s, function(x) x[2])
r1_site = d1_s
r2_site = name_s
dfrm_site = data.frame(r1_site, r2_site)
write.table(dfrm_site, file = "Site.txt", sep = "/t", quote = FALSE,
row.names = F, col.names = F)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codegurureviewer_operations.R
\name{codegurureviewer_list_recommendations}
\alias{codegurureviewer_list_recommendations}
\title{Returns the list of all recommendations for a completed code review}
\usage{
codegurureviewer_list_recommendations(NextToken, MaxResults,
CodeReviewArn)
}
\arguments{
\item{NextToken}{Pagination token.}
\item{MaxResults}{The maximum number of results that are returned per call. The default is
100.}
\item{CodeReviewArn}{[required] The Amazon Resource Name (ARN) of the
\href{https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html}{\code{CodeReview}}
object.}
}
\description{
Returns the list of all recommendations for a completed code review.
}
\section{Request syntax}{
\preformatted{svc$list_recommendations(
NextToken = "string",
MaxResults = 123,
CodeReviewArn = "string"
)
}
}
\keyword{internal}
| /paws/man/codegurureviewer_list_recommendations.Rd | permissive | jcheng5/paws | R | false | true | 942 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codegurureviewer_operations.R
\name{codegurureviewer_list_recommendations}
\alias{codegurureviewer_list_recommendations}
\title{Returns the list of all recommendations for a completed code review}
\usage{
codegurureviewer_list_recommendations(NextToken, MaxResults,
CodeReviewArn)
}
\arguments{
\item{NextToken}{Pagination token.}
\item{MaxResults}{The maximum number of results that are returned per call. The default is
100.}
\item{CodeReviewArn}{[required] The Amazon Resource Name (ARN) of the
\href{https://docs.aws.amazon.com/codeguru/latest/reviewer-api/API_CodeReview.html}{\code{CodeReview}}
object.}
}
\description{
Returns the list of all recommendations for a completed code review.
}
\section{Request syntax}{
\preformatted{svc$list_recommendations(
NextToken = "string",
MaxResults = 123,
CodeReviewArn = "string"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{vehicle_ownership}
\alias{vehicle_ownership}
\title{Vehicle ownership in the CMAP seven county region}
\format{
A tibble. 40 rows and 3 variables
\describe{
\item{county}{Char. Name of county}
\item{number_of_veh}{Char. Number of vehicles owned by household}
\item{pct}{Numeric. Share of households with the given number of vehicles (values between 0 and 1)}
}
}
\source{
CMAP Travel Inventory Survey Data Summary \url{https://www.cmap.illinois.gov/documents/10180/77659/Travel+Inventory+Survey+Data+Summary_weighted_V2.pdf/d4b33cdd-1c44-4322-b32f-2f54b85207cb}
}
\usage{
vehicle_ownership
}
\description{
A test dataset containing vehicle ownership rates in the seven county region
of northeastern Illinois.
}
\examples{
# A stacked bar chart
ggplot(vehicle_ownership,
aes(x = county, y = pct, fill = number_of_veh)) +
geom_bar(position = position_stack(), stat = "identity")
}
\keyword{datasets}
| /man/vehicle_ownership.Rd | permissive | CMAP-REPOS/cmapplot | R | false | true | 1,025 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{vehicle_ownership}
\alias{vehicle_ownership}
\title{Vehicle ownership in the CMAP seven county region}
\format{
A tibble. 40 rows and 3 variables
\describe{
\item{county}{Char. Name of county}
\item{number_of_veh}{Char. Number of vehicles owned by household}
\item{pct}{Numeric. Share of households with the given number of vehicles (values between 0 and 1)}
}
}
\source{
CMAP Travel Inventory Survey Data Summary \url{https://www.cmap.illinois.gov/documents/10180/77659/Travel+Inventory+Survey+Data+Summary_weighted_V2.pdf/d4b33cdd-1c44-4322-b32f-2f54b85207cb}
}
\usage{
vehicle_ownership
}
\description{
A test dataset containing vehicle ownership rates in the seven county region
of northeastern Illinois.
}
\examples{
# A stacked bar chart
ggplot(vehicle_ownership,
aes(x = county, y = pct, fill = number_of_veh)) +
geom_bar(position = position_stack(), stat = "identity")
}
\keyword{datasets}
|
suppressMessages(library(DESeq2))
suppressMessages(library(pheatmap))
suppressMessages(library(gplots))
suppressMessages(library(UpSetR))
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
suppressMessages(library(plyr))
require(graphics)
require(grDevices)
suppressMessages(library(RColorBrewer))
suppressMessages(library(viridis))
suppressMessages(library(org.Mm.eg.db))
suppressMessages(library(rentrez))
suppressMessages(library(goseq))
suppressMessages(library(dendextend))
suppressMessages(library(GOSemSim))
suppressMessages(library(reshape2))
suppressMessages(library(ggplot2))
suppressMessages(library(GO.db))
suppressMessages(library(annotate))
suppressMessages(library(moe430a.db))
suppressMessages(library(WGCNA))
allowWGCNAThreads()
options(stringsAsFactors = FALSE)
suppressMessages(library(igraph))
suppressMessages(library(clusterProfiler))
suppressMessages(library(threejs))
suppressMessages(library(htmlwidgets))
suppressMessages(library(pathview))
suppressMessages(library(xlsx))
suppressMessages(library(limma))
suppressMessages(library(reshape))
suppressMessages(library(plotly))
#suppressMessages(library(RamiGO))
suppressMessages(library(ggfortify))
suppressMessages(library(gridExtra))
suppressMessages(library(sinaplot))
suppressMessages(library(dplyr))
suppressMessages(library(tibble))
suppressMessages(library(factoextra))
suppressMessages(library(tidyr))
MF_GO = godata('org.Mm.eg.db', ont="MF")
CC_GO = godata('org.Mm.eg.db', ont="CC")
BP_GO = godata('org.Mm.eg.db', ont="BP") | /src/load_libraries.R | no_license | anuprulez/amma | R | false | false | 1,523 | r | suppressMessages(library(DESeq2))
suppressMessages(library(pheatmap))
suppressMessages(library(gplots))
suppressMessages(library(UpSetR))
suppressMessages(library(ggplot2))
suppressMessages(library(grid))
suppressMessages(library(plyr))
require(graphics)
require(grDevices)
suppressMessages(library(RColorBrewer))
suppressMessages(library(viridis))
suppressMessages(library(org.Mm.eg.db))
suppressMessages(library(rentrez))
suppressMessages(library(goseq))
suppressMessages(library(dendextend))
suppressMessages(library(GOSemSim))
suppressMessages(library(reshape2))
suppressMessages(library(ggplot2))
suppressMessages(library(GO.db))
suppressMessages(library(annotate))
suppressMessages(library(moe430a.db))
suppressMessages(library(WGCNA))
allowWGCNAThreads()
options(stringsAsFactors = FALSE)
suppressMessages(library(igraph))
suppressMessages(library(clusterProfiler))
suppressMessages(library(threejs))
suppressMessages(library(htmlwidgets))
suppressMessages(library(pathview))
suppressMessages(library(xlsx))
suppressMessages(library(limma))
suppressMessages(library(reshape))
suppressMessages(library(plotly))
#suppressMessages(library(RamiGO))
suppressMessages(library(ggfortify))
suppressMessages(library(gridExtra))
suppressMessages(library(sinaplot))
suppressMessages(library(dplyr))
suppressMessages(library(tibble))
suppressMessages(library(factoextra))
suppressMessages(library(tidyr))
MF_GO = godata('org.Mm.eg.db', ont="MF")
CC_GO = godata('org.Mm.eg.db', ont="CC")
BP_GO = godata('org.Mm.eg.db', ont="BP") |
library(caret)
d <- read.csv("/home/sathvik/EC8/ML/Lab/Lab7/test.csv")
ctrl <- trainControl(method = "repeatedcv",
number = 10, repeats = 10)
# mtry defines how many features are randomly selected at each split.
grid_rf <- expand.grid(.mtry = 8)
set.seed(300)
m_rf <- train(smoker ~ ., data = d[1:1003,], method = "rf",metric = "Kappa",
trControl = ctrl, tuneGrid = grid_rf)
m_rf
| /Lab7/TT_PT_07_Q_05_Num.R | no_license | sathvikyesprabhu/EC8-ml-lab | R | false | false | 415 | r | library(caret)
d <- read.csv("/home/sathvik/EC8/ML/Lab/Lab7/test.csv")
ctrl <- trainControl(method = "repeatedcv",
number = 10, repeats = 10)
# mtry defines how many features are randomly selected at each split.
grid_rf <- expand.grid(.mtry = 8)
set.seed(300)
m_rf <- train(smoker ~ ., data = d[1:1003,], method = "rf",metric = "Kappa",
trControl = ctrl, tuneGrid = grid_rf)
m_rf
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/miscFunctions.R
\name{calculateSpeeds}
\alias{calculateSpeeds}
\title{Calculate speeds based on lead/lag}
\usage{
calculateSpeeds(dataset, idCol, timestampCol, x, y)
}
\arguments{
\item{dataset}{the dataset to remove outliers from}
\item{idCol}{name of the column containing the animal identifier}
\item{timestampCol}{name of the column containing timestamps}
\item{x}{name of the column containing longitude values}
\item{y}{name of the column containing latitude values}
}
\value{
a dataset with speeds calculated
}
\description{
Supporting function for removeSpeedOutliers
}
| /man/calculateSpeeds.Rd | permissive | kaijagahm/vultureUtils | R | false | true | 660 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/miscFunctions.R
\name{calculateSpeeds}
\alias{calculateSpeeds}
\title{Calculate speeds based on lead/lag}
\usage{
calculateSpeeds(dataset, idCol, timestampCol, x, y)
}
\arguments{
\item{dataset}{the dataset to remove outliers from}
\item{idCol}{name of the column containing the animal identifier}
\item{timestampCol}{name of the column containing timestamps}
\item{x}{name of the column containing longitude values}
\item{y}{name of the column containing latitude values}
}
\value{
a dataset with speeds calculated
}
\description{
Supporting function for removeSpeedOutliers
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kaplan_meier.R
\name{kaplan_meier}
\alias{kaplan_meier}
\title{Kaplan-Meier}
\usage{
kaplan_meier(times, delta)
}
\arguments{
\item{times}{Survival times (possibly censored)}
\item{delta}{Corresponding observation vector (0 is censored, 1 is actually observed)}
}
\value{
\code{kaplan_meier} Vector of Kaplan-Meier estimates
}
\description{
Non-parametric (Kaplan-Meier) estimates of the survival curve
}
\examples{
R code here showing how your function works
}
\keyword{keywords}
| /man/kaplan_meier.Rd | no_license | vegarsti/fhtboost | R | false | true | 560 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kaplan_meier.R
\name{kaplan_meier}
\alias{kaplan_meier}
\title{Kaplan-Meier}
\usage{
kaplan_meier(times, delta)
}
\arguments{
\item{times}{Survival times (possibly censored)}
\item{delta}{Corresponding observation vector (0 is censored, 1 is actually observed)}
}
\value{
\code{kaplan_meier} Vector of Kaplan-Meier estimates
}
\description{
Non-parametric (Kaplan-Meier) estimates of the survival curve
}
\examples{
R code here showing how your function works
}
\keyword{keywords}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_agent.R
\name{create_agent}
\alias{create_agent}
\title{Create a pointblank agent object}
\usage{
create_agent(
tbl,
name = NULL,
actions = NULL,
end_fns = NULL,
embed_report = FALSE,
reporting_lang = NULL
)
}
\arguments{
\item{tbl}{The input table. This can be a data frame, a tibble, or a
\code{tbl_dbi} object.}
\item{name}{An optional name for the validation plan that the agent will
eventually carry out during the interrogation process. If no value is
provided, a name will be generated based on the current system time.}
\item{actions}{A list containing threshold levels so that all validation
steps can react accordingly when exceeding the set levels. This is to be
created with the \code{\link[=action_levels]{action_levels()}} helper function. Should an action levels
list be used for specific validation step, any default set here will be
overridden.}
\item{end_fns}{A list of functions that should be performed at the end of an
interrogation.}
\item{embed_report}{An option to embed a \strong{gt}-based validation report into
the \code{ptblank_agent} object. If \code{FALSE} (the default) then the table object
will be not generated and available with the agent upon returning from the
interrogation.}
\item{reporting_lang}{The language to use for automatic creation of briefs
(short descriptions for each validation step). By default, \code{NULL} will
create English (\code{"en"}) text. Other options include French (\code{"fr"}),
German (\code{"de"}), Italian (\code{"it"}), and Spanish (\code{"es"}).}
}
\value{
A \code{ptblank_agent} object.
}
\description{
Creates an \emph{agent} object, which is used in a \emph{data quality reporting}
workflow. The overall aim of this workflow is to generate some reporting on
the state of data quality for a target table. We can supply as many
validation step functions as the user wishes to write to get validation
coverage on that table. We supply a single table to \code{create_agent()}, which
becomes the sole focus of the data quality analysis. After application of
one or more validation step functions, we need to use the \code{\link[=interrogate]{interrogate()}}
function to complete the process; the validation step functions, when called
on an agent, are merely instructions up to that point. The \emph{agent} then has
information on how the interrogation went. Reporting of the interrogation can
be performed with the \code{\link[=get_agent_report]{get_agent_report()}} function. If we just need to know
whether all validations completely passed (i.e., all steps had no failing
test units), the \code{\link[=all_passed]{all_passed()}} function should be used.
}
\section{Function ID}{
1-2
}
\examples{
# Create a simple table with a
# column of numerical values
tbl <-
dplyr::tibble(a = c(5, 7, 8, 7))
# Create a pointblank `agent` object
agent <- create_agent(tbl = tbl)
# Then, as with any `ptblank_agent`
# object, we can add validation steps
# to the validation plan and then
# eventually use `interrogate()`
# to perform the validations; here,
# with a single validation step, we
# expect that values in column `a`
# are always greater than 4
agent <-
agent \%>\%
col_vals_gt(vars(a), 4) \%>\%
interrogate()
}
\seealso{
Other Planning and Prep:
\code{\link{action_levels}()},
\code{\link{col_schema}()},
\code{\link{scan_data}()},
\code{\link{validate_rmd}()}
}
\concept{Planning and Prep}
| /man/create_agent.Rd | permissive | bradweiner/pointblank | R | false | true | 3,479 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_agent.R
\name{create_agent}
\alias{create_agent}
\title{Create a pointblank agent object}
\usage{
create_agent(
tbl,
name = NULL,
actions = NULL,
end_fns = NULL,
embed_report = FALSE,
reporting_lang = NULL
)
}
\arguments{
\item{tbl}{The input table. This can be a data frame, a tibble, or a
\code{tbl_dbi} object.}
\item{name}{An optional name for the validation plan that the agent will
eventually carry out during the interrogation process. If no value is
provided, a name will be generated based on the current system time.}
\item{actions}{A list containing threshold levels so that all validation
steps can react accordingly when exceeding the set levels. This is to be
created with the \code{\link[=action_levels]{action_levels()}} helper function. Should an action levels
list be used for specific validation step, any default set here will be
overridden.}
\item{end_fns}{A list of functions that should be performed at the end of an
interrogation.}
\item{embed_report}{An option to embed a \strong{gt}-based validation report into
the \code{ptblank_agent} object. If \code{FALSE} (the default) then the table object
will be not generated and available with the agent upon returning from the
interrogation.}
\item{reporting_lang}{The language to use for automatic creation of briefs
(short descriptions for each validation step). By default, \code{NULL} will
create English (\code{"en"}) text. Other options include French (\code{"fr"}),
German (\code{"de"}), Italian (\code{"it"}), and Spanish (\code{"es"}).}
}
\value{
A \code{ptblank_agent} object.
}
\description{
Creates an \emph{agent} object, which is used in a \emph{data quality reporting}
workflow. The overall aim of this workflow is to generate some reporting on
the state of data quality for a target table. We can supply as many
validation step functions as the user wishes to write to get validation
coverage on that table. We supply a single table to \code{create_agent()}, which
becomes the sole focus of the data quality analysis. After application of
one or more validation step functions, we need to use the \code{\link[=interrogate]{interrogate()}}
function to complete the process; the validation step functions, when called
on an agent, are merely instructions up to that point. The \emph{agent} then has
information on how the interrogation went. Reporting of the interrogation can
be performed with the \code{\link[=get_agent_report]{get_agent_report()}} function. If we just need to know
whether all validations completely passed (i.e., all steps had no failing
test units), the \code{\link[=all_passed]{all_passed()}} function should be used.
}
\section{Function ID}{
1-2
}
\examples{
# Create a simple table with a
# column of numerical values
tbl <-
dplyr::tibble(a = c(5, 7, 8, 7))
# Create a pointblank `agent` object
agent <- create_agent(tbl = tbl)
# Then, as with any `ptblank_agent`
# object, we can add validation steps
# to the validation plan and then
# eventually use `interrogate()`
# to perform the validations; here,
# with a single validation step, we
# expect that values in column `a`
# are always greater than 4
agent <-
agent \%>\%
col_vals_gt(vars(a), 4) \%>\%
interrogate()
}
\seealso{
Other Planning and Prep:
\code{\link{action_levels}()},
\code{\link{col_schema}()},
\code{\link{scan_data}()},
\code{\link{validate_rmd}()}
}
\concept{Planning and Prep}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{netshort1m}
\alias{netshort1m}
\title{A one metre resolution matrix of net shortwave radiation}
\format{
A matrix with 1000 rows and 1000 columns.
}
\usage{
netshort1m
}
\description{
A dataset containing net shortwave radiation values (\ifelse{html}{\out{MJ m<sup>-2</sup> hr<sup>-1</sup>}}{\eqn{MJ m^{-2} hr^{-1}}})
for 2010-05-24 11:00 GMT across the area bounded by 169000, 170000, 12000, 13000
(xmin, xmax, ymin, ymax) using the Ordance Survey GB Grid Reference
system (CRS: +init=epsg:27700), as produced by \code{\link[=shortwaveveg]{shortwaveveg()}}
}
\keyword{datasets}
| /man/netshort1m.Rd | no_license | ilyamaclean/microclima | R | false | true | 685 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{netshort1m}
\alias{netshort1m}
\title{A one metre resolution matrix of net shortwave radiation}
\format{
A matrix with 1000 rows and 1000 columns.
}
\usage{
netshort1m
}
\description{
A dataset containing net shortwave radiation values (\ifelse{html}{\out{MJ m<sup>-2</sup> hr<sup>-1</sup>}}{\eqn{MJ m^{-2} hr^{-1}}})
for 2010-05-24 11:00 GMT across the area bounded by 169000, 170000, 12000, 13000
(xmin, xmax, ymin, ymax) using the Ordance Survey GB Grid Reference
system (CRS: +init=epsg:27700), as produced by \code{\link[=shortwaveveg]{shortwaveveg()}}
}
\keyword{datasets}
|
## write.tree.R (2010-12-07)
## Write Tree File in Parenthetic Format
## Copyright 2002-2010 Emmanuel Paradis, Daniel Lawson, and Klaus Schliep
## This file is/was part of the R-package `ape'.
## [Modified from the ape package version by Greg Jordan]
#' Checks a tree for a valid label, optionally removing bad characters on the way. This is currently a no-op,
#' to avoid interfering with the NHX annotation support.
#'
#' @param x, string input
#' @return string
checkLabel <- function(x, ...)
{
x
}
#' Converts a phylo object into a Newick-format string representation. Taken mostly from the codebase of \code{\link{ape}}.
#'
#' @param phy, input phylo object
#' @param digits, the number of digits to use when formatting branch lengths
#' @return string, the phylo object in a string representation
tree.to.string <- function(phy, digits = 10)
{
brl <- !is.null(phy$edge.length)
nodelab <- !is.null(phy$node.label)
phy$tip.label <- checkLabel(phy$tip.label)
if (nodelab) phy$node.label <- checkLabel(phy$node.label)
f.d <- paste("%.", digits, "g", sep = "")
cp <- function(x){
STRING[k] <<- x
k <<- k + 1
}
add.internal <- function(i) {
cp("(")
desc <- kids[[i]]
for (j in desc) {
if (j > n) add.internal(j)
else add.terminal(ind[j])
if (j != desc[length(desc)]) cp(",")
}
cp(")")
if (nodelab && i > n) cp(phy$node.label[i - n]) # fixed by Naim Matasci (2010-12-07)
if (brl && !is.na(phy$edge.length[ind[i]])) {
cp(":")
cp(sprintf(f.d, phy$edge.length[ind[i]]))
}
}
add.terminal <- function(i) {
cp(phy$tip.label[phy$edge[i, 2]])
if (brl && !is.na(phy$edge.length[i])) {
cp(":")
cp(sprintf(f.d, phy$edge.length[i]))
}
}
n <- length(phy$tip.label)
## borrowed from phangorn:
parent <- phy$edge[, 1]
children <- phy$edge[, 2]
kids <- vector("list", n + phy$Nnode)
for (i in 1:length(parent))
kids[[parent[i]]] <- c(kids[[parent[i]]], children[i])
ind <- match(1:max(phy$edge), phy$edge[, 2])
LS <- 4*n + 5
if (brl) LS <- LS + 4*n
if (nodelab) LS <- LS + n
STRING <- character(LS)
k <- 1
cp("(")
getRoot <- function(phy)
phy$edge[, 1][!match(phy$edge[, 1], phy$edge[, 2], 0)][1]
root <- getRoot(phy) # replaced n+1 with root - root has not be n+1
desc <- kids[[root]]
for (j in desc) {
if (j > n) add.internal(j)
else add.terminal(ind[j])
if (j != desc[length(desc)]) cp(",")
}
if (is.null(phy$root.edge)) {
cp(")")
if (nodelab) cp(phy$node.label[1])
cp(";")
}
else {
cp(")")
if (nodelab) cp(phy$node.label[1])
cp(":")
cp(sprintf(f.d, phy$root.edge))
cp(";")
}
paste(STRING, collapse = "")
}
#' Turns a phylo object into a Newick format string representation, including NHX annotations
#' if the tree contains any tags.
#'
#' @method as.character phylo
#' @param phylo input phylo object
#' @param digits integer, the number of digits to output when formatting branch lengths
#' @param ignore.tags boolean, whether or not to ignore tags when creating the Newick / NHX string. When FALSE,
#' tags will be included in the string as NHX annotations.
#' @return character string representing the phylo object
#' @export
as.character.phylo <- function(phylo, digits=10, ignore.tags=F) {
if (!ignore.tags && tree.has.tags(phylo)) {
phylo <- tree.tags.to.labels(phylo)
}
tree.to.string(phylo, digits=digits)
}
#' Converts all tags from a tree into NHX annotations and applies those annotations to each node's label.
#' @param phylo input phylo object
#' @return a phylo object, where tags have been concatenated to each node's label
tree.tags.to.labels <- function(phylo) {
tree.foreach(phylo, function(x, node) {
tags <- tree.get.tags(x, node)
if (length(tags) > 0) {
nhx.str <- tags.to.nhx.string(tags)
cur.lbl <- tree.label.for.node(x, node)
phylo <<- tree.set.label(phylo, node, paste(cur.lbl, nhx.str, sep=''))
}
})
phylo
}
#' Writes the phylo object to disk in NHX format.
#'
#' @method write.nhx phylo
#' @param tree input phylo object
#' @param file the file to write to
write.nhx <- function(phylo, f) {
str <- as.character(phylo)
cat(str, file=f)
}
#' Converts a list of tags to an NHX annotation string.
#'
#' @param tags list of tags
#' @return character containing [&&NHX:key=val] style annotations for all tags
tags.to.nhx.string <- function(tags) {
key.vals <- paste(names(tags), tags, sep='=')
if (grepl("[\\[\\]\\:\\;\\,\\=]", paste(key.vals, sep=''))) {
warning("Bad characters within NHX annotations == probably not a good thing!")
}
keyval.s <- paste(key.vals, collapse=':')
paste('[&&NHX:', keyval.s, ']', sep='', collapse='')
}
| /R/write.tree.r | no_license | gjuggler/ggphylo | R | false | false | 4,959 | r | ## write.tree.R (2010-12-07)
## Write Tree File in Parenthetic Format
## Copyright 2002-2010 Emmanuel Paradis, Daniel Lawson, and Klaus Schliep
## This file is/was part of the R-package `ape'.
## [Modified from the ape package version by Greg Jordan]
#' Checks a tree for a valid label, optionally removing bad characters on the way. This is currently a no-op,
#' to avoid interfering with the NHX annotation support.
#'
#' @param x, string input
#' @return string
checkLabel <- function(x, ...)
{
x
}
#' Converts a phylo object into a Newick-format string representation. Taken mostly from the codebase of \code{\link{ape}}.
#'
#' @param phy, input phylo object
#' @param digits, the number of digits to use when formatting branch lengths
#' @return string, the phylo object in a string representation
tree.to.string <- function(phy, digits = 10)
{
brl <- !is.null(phy$edge.length)
nodelab <- !is.null(phy$node.label)
phy$tip.label <- checkLabel(phy$tip.label)
if (nodelab) phy$node.label <- checkLabel(phy$node.label)
f.d <- paste("%.", digits, "g", sep = "")
cp <- function(x){
STRING[k] <<- x
k <<- k + 1
}
add.internal <- function(i) {
cp("(")
desc <- kids[[i]]
for (j in desc) {
if (j > n) add.internal(j)
else add.terminal(ind[j])
if (j != desc[length(desc)]) cp(",")
}
cp(")")
if (nodelab && i > n) cp(phy$node.label[i - n]) # fixed by Naim Matasci (2010-12-07)
if (brl && !is.na(phy$edge.length[ind[i]])) {
cp(":")
cp(sprintf(f.d, phy$edge.length[ind[i]]))
}
}
add.terminal <- function(i) {
cp(phy$tip.label[phy$edge[i, 2]])
if (brl && !is.na(phy$edge.length[i])) {
cp(":")
cp(sprintf(f.d, phy$edge.length[i]))
}
}
n <- length(phy$tip.label)
## borrowed from phangorn:
parent <- phy$edge[, 1]
children <- phy$edge[, 2]
kids <- vector("list", n + phy$Nnode)
for (i in 1:length(parent))
kids[[parent[i]]] <- c(kids[[parent[i]]], children[i])
ind <- match(1:max(phy$edge), phy$edge[, 2])
LS <- 4*n + 5
if (brl) LS <- LS + 4*n
if (nodelab) LS <- LS + n
STRING <- character(LS)
k <- 1
cp("(")
getRoot <- function(phy)
phy$edge[, 1][!match(phy$edge[, 1], phy$edge[, 2], 0)][1]
root <- getRoot(phy) # replaced n+1 with root - root has not be n+1
desc <- kids[[root]]
for (j in desc) {
if (j > n) add.internal(j)
else add.terminal(ind[j])
if (j != desc[length(desc)]) cp(",")
}
if (is.null(phy$root.edge)) {
cp(")")
if (nodelab) cp(phy$node.label[1])
cp(";")
}
else {
cp(")")
if (nodelab) cp(phy$node.label[1])
cp(":")
cp(sprintf(f.d, phy$root.edge))
cp(";")
}
paste(STRING, collapse = "")
}
#' Turns a phylo object into a Newick format string representation, including NHX annotations
#' if the tree contains any tags.
#'
#' @method as.character phylo
#' @param phylo input phylo object
#' @param digits integer, the number of digits to output when formatting branch lengths
#' @param ignore.tags boolean, whether or not to ignore tags when creating the Newick / NHX string. When FALSE,
#' tags will be included in the string as NHX annotations.
#' @return character string representing the phylo object
#' @export
as.character.phylo <- function(phylo, digits=10, ignore.tags=F) {
if (!ignore.tags && tree.has.tags(phylo)) {
phylo <- tree.tags.to.labels(phylo)
}
tree.to.string(phylo, digits=digits)
}
#' Converts all tags from a tree into NHX annotations and applies those annotations to each node's label.
#' @param phylo input phylo object
#' @return a phylo object, where tags have been concatenated to each node's label
tree.tags.to.labels <- function(phylo) {
tree.foreach(phylo, function(x, node) {
tags <- tree.get.tags(x, node)
if (length(tags) > 0) {
nhx.str <- tags.to.nhx.string(tags)
cur.lbl <- tree.label.for.node(x, node)
phylo <<- tree.set.label(phylo, node, paste(cur.lbl, nhx.str, sep=''))
}
})
phylo
}
#' Writes the phylo object to disk in NHX format.
#'
#' @method write.nhx phylo
#' @param tree input phylo object
#' @param file the file to write to
write.nhx <- function(phylo, f) {
str <- as.character(phylo)
cat(str, file=f)
}
#' Converts a list of tags to an NHX annotation string.
#'
#' @param tags list of tags
#' @return character containing [&&NHX:key=val] style annotations for all tags
tags.to.nhx.string <- function(tags) {
key.vals <- paste(names(tags), tags, sep='=')
if (grepl("[\\[\\]\\:\\;\\,\\=]", paste(key.vals, sep=''))) {
warning("Bad characters within NHX annotations == probably not a good thing!")
}
keyval.s <- paste(key.vals, collapse=':')
paste('[&&NHX:', keyval.s, ']', sep='', collapse='')
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -89841283.7755342, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615836205-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -89841283.7755342, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
# Load Libraries
library(dplyr)
library(reshape2)
library(ggplot2)
library(ggthemes)
library(ggrepel)
library(RColorBrewer)
library(ChannelAttribution)
library(markovchain)
# Note: if dont assume all converted, else , separate datasets of ids are needed who have converted.
# simulating the "real" data
set.seed(354)
df2 <- data.frame(client_id = sample(c(1:1000), 5000, replace = TRUE),
date = sample(c(1:32), 5000, replace = TRUE),
channel = sample(c(0:9), 5000, replace = TRUE,
prob = c(0.1, 0.15, 0.05, 0.07, 0.11, 0.07, 0.13, 0.1, 0.06, 0.16)))
df2$date <- as.Date(df2$date, origin = "2015-01-01")
df2$channel <- paste0('channel_', df2$channel)
View(df2)
# aggregating channels to the paths for each customer
df2 <- df2 %>%
arrange(client_id, date) %>%
group_by(client_id) %>%
summarise(path = paste(channel, collapse = ' > '),
# assume that all paths were finished with conversion
conv = 1,
conv_null = 0) %>%
ungroup()
View(df2)
# calculating the models (Markov and heuristics)
mod2 <- markov_model(df2,
var_path = 'path',
var_conv = 'conv',
var_null = 'conv_null',
out_more = TRUE)
h_mod2 <- heuristic_models(df2, var_path = 'path', var_conv = 'conv')
all_models <- merge(h_mod2, mod2$result, by.x = 'channel_name', by.y = 'channel_name')
colnames(all_models)[5] <- "markov model"
############## visualizations ##############
# transition matrix heatmap for "real" data
df_plot_trans <- mod2$transition_matrix
cols <- c("#e7f0fa", "#c9e2f6", "#95cbee", "#0099dc", "#4ab04a", "#ffd73e", "#eec73a",
"#e29421", "#e29421", "#f05336", "#ce472e")
t <- max(df_plot_trans$transition_probability)
ggplot(df_plot_trans, aes(y = channel_from, x = channel_to, fill = transition_probability)) +
theme_minimal() +
geom_tile(colour = "white", width = .9, height = .9) +
scale_fill_gradientn(colours = cols, limits = c(0, t),
breaks = seq(0, t, by = t/4),
labels = c("0", round(t/4*1, 2), round(t/4*2, 2), round(t/4*3, 2), round(t/4*4, 2)),
guide = guide_colourbar(ticks = T, nbin = 50, barheight = .5, label = T, barwidth = 10)) +
geom_text(aes(label = round(transition_probability, 2)), fontface = "bold", size = 4) +
theme(legend.position = 'bottom',
legend.direction = "horizontal",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size = 20, face = "bold", vjust = 2, color = 'black', lineheight = 0.8),
axis.title.x = element_text(size = 24, face = "bold"),
axis.title.y = element_text(size = 24, face = "bold"),
axis.text.y = element_text(size = 8, face = "bold", color = 'black'),
axis.text.x = element_text(size = 8, angle = 90, hjust = 0.5, vjust = 0.5, face = "plain")) +
ggtitle("Transition matrix heatmap")
# Comparision plot
plot_data <- melt(all_models, id.vars = "channel_name", variable.name = "model")
ggplot(plot_data, aes(channel_name, value, fill = model)) +
geom_bar(stat='identity', position='dodge') +
ggtitle("Model Comparison") +
theme(axis.title.x = element_text(vjust = -2)) +
theme(axis.title.y = element_text(vjust = +2)) +
theme(title = element_text(size = 16)) +
theme(plot.title=element_text(size = 20)) +
ylab("")
| /scripts/simulation.R | no_license | jasonchanhku/markov | R | false | false | 3,470 | r | # Load Libraries
library(dplyr)
library(reshape2)
library(ggplot2)
library(ggthemes)
library(ggrepel)
library(RColorBrewer)
library(ChannelAttribution)
library(markovchain)
# Note: if dont assume all converted, else , separate datasets of ids are needed who have converted.
# simulating the "real" data
set.seed(354)
df2 <- data.frame(client_id = sample(c(1:1000), 5000, replace = TRUE),
date = sample(c(1:32), 5000, replace = TRUE),
channel = sample(c(0:9), 5000, replace = TRUE,
prob = c(0.1, 0.15, 0.05, 0.07, 0.11, 0.07, 0.13, 0.1, 0.06, 0.16)))
df2$date <- as.Date(df2$date, origin = "2015-01-01")
df2$channel <- paste0('channel_', df2$channel)
View(df2)
# aggregating channels to the paths for each customer
df2 <- df2 %>%
arrange(client_id, date) %>%
group_by(client_id) %>%
summarise(path = paste(channel, collapse = ' > '),
# assume that all paths were finished with conversion
conv = 1,
conv_null = 0) %>%
ungroup()
View(df2)
# calculating the models (Markov and heuristics)
mod2 <- markov_model(df2,
var_path = 'path',
var_conv = 'conv',
var_null = 'conv_null',
out_more = TRUE)
h_mod2 <- heuristic_models(df2, var_path = 'path', var_conv = 'conv')
all_models <- merge(h_mod2, mod2$result, by.x = 'channel_name', by.y = 'channel_name')
colnames(all_models)[5] <- "markov model"
############## visualizations ##############
# transition matrix heatmap for "real" data
df_plot_trans <- mod2$transition_matrix
cols <- c("#e7f0fa", "#c9e2f6", "#95cbee", "#0099dc", "#4ab04a", "#ffd73e", "#eec73a",
"#e29421", "#e29421", "#f05336", "#ce472e")
t <- max(df_plot_trans$transition_probability)
ggplot(df_plot_trans, aes(y = channel_from, x = channel_to, fill = transition_probability)) +
theme_minimal() +
geom_tile(colour = "white", width = .9, height = .9) +
scale_fill_gradientn(colours = cols, limits = c(0, t),
breaks = seq(0, t, by = t/4),
labels = c("0", round(t/4*1, 2), round(t/4*2, 2), round(t/4*3, 2), round(t/4*4, 2)),
guide = guide_colourbar(ticks = T, nbin = 50, barheight = .5, label = T, barwidth = 10)) +
geom_text(aes(label = round(transition_probability, 2)), fontface = "bold", size = 4) +
theme(legend.position = 'bottom',
legend.direction = "horizontal",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size = 20, face = "bold", vjust = 2, color = 'black', lineheight = 0.8),
axis.title.x = element_text(size = 24, face = "bold"),
axis.title.y = element_text(size = 24, face = "bold"),
axis.text.y = element_text(size = 8, face = "bold", color = 'black'),
axis.text.x = element_text(size = 8, angle = 90, hjust = 0.5, vjust = 0.5, face = "plain")) +
ggtitle("Transition matrix heatmap")
# Comparision plot
plot_data <- melt(all_models, id.vars = "channel_name", variable.name = "model")
ggplot(plot_data, aes(channel_name, value, fill = model)) +
geom_bar(stat='identity', position='dodge') +
ggtitle("Model Comparison") +
theme(axis.title.x = element_text(vjust = -2)) +
theme(axis.title.y = element_text(vjust = +2)) +
theme(title = element_text(size = 16)) +
theme(plot.title=element_text(size = 20)) +
ylab("")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clEnrich_one.R
\name{clEnrich_one}
\alias{clEnrich_one}
\title{Gene set enrichment for clusters (for one clustering method and a certain
number of clusters)}
\source{
Gene sets are from
1. http://www.broadinstitute.org/gsea/msigdb/index.jsp
2. http://amp.pharm.mssm.edu/Enrichr/
}
\usage{
clEnrich_one(genecl_obj, method, nCluster, annofile = NULL,
sampleLabel = NULL, TermFreq = 0)
}
\arguments{
\item{genecl_obj}{a genecl or cogena object}
\item{method}{as clMethods in genecl function}
\item{nCluster}{as nClust in cogena function}
\item{annofile}{gene set annotation file}
\item{sampleLabel}{sameple Label. Do make the label of interest located after
the control label in the order of factor. See details.}
\item{TermFreq}{a value from [0,1) to filter low-frequence gene sets}
}
\value{
a list containing the enrichment score for each clustering methods
and cluster numbers included in the genecl_obj
}
\description{
Gene set enrichment for clusters sourced from coExp function. the enrichment
score are based on -log(p) with p from hyper-geometric test.
}
\details{
Gene sets availiable (See vignette for more):
\itemize{
\item c2.cp.kegg.v5.0.symbols.gmt.xz (From Msigdb)
\item c2.cp.reactome.v5.0.symbols.gmt.xz (From Msigdb)
\item c5.bp.v5.0.symbols.gmt.xz (From Msigdb)
\item c2.cp.biocarta.v5.0.symbols.gmt.xz (From Msigdb)
\item c2.all.v5.0.symbols.gmt.xz (From Msigdb)
\item c2.cp.v5.0.symbols.gmt.xz (From Msigdb)
\item c5.mf.v5.0.symbols.gmt.xz (From Msigdb)
}
}
\examples{
#annotaion
annoGMT <- "c2.cp.kegg.v5.0.symbols.gmt.xz"
annofile <- system.file("extdata", annoGMT, package="cogena")
data(Psoriasis)
clMethods <- c("hierarchical","kmeans","diana","fanny","som","model","sota","pam","clara","agnes")
genecl_result <- coExp(DEexprs, nClust=2:3, clMethods=c("hierarchical","kmeans"),
metric="correlation", method="complete", ncore=2, verbose=TRUE)
clen_res <- clEnrich_one(genecl_result, "kmeans", "3", annofile=annofile, sampleLabel=sampleLabel)
clen_res1 <- clEnrich_one(clen_res, "hierarchical", "2", annofile=annofile, sampleLabel=sampleLabel)
}
| /man/clEnrich_one.Rd | no_license | meowjiang/cogena | R | false | true | 2,167 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clEnrich_one.R
\name{clEnrich_one}
\alias{clEnrich_one}
\title{Gene set enrichment for clusters (for one clustering method and a certain
number of clusters)}
\source{
Gene sets are from
1. http://www.broadinstitute.org/gsea/msigdb/index.jsp
2. http://amp.pharm.mssm.edu/Enrichr/
}
\usage{
clEnrich_one(genecl_obj, method, nCluster, annofile = NULL,
sampleLabel = NULL, TermFreq = 0)
}
\arguments{
\item{genecl_obj}{a genecl or cogena object}
\item{method}{as clMethods in genecl function}
\item{nCluster}{as nClust in cogena function}
\item{annofile}{gene set annotation file}
\item{sampleLabel}{sameple Label. Do make the label of interest located after
the control label in the order of factor. See details.}
\item{TermFreq}{a value from [0,1) to filter low-frequence gene sets}
}
\value{
a list containing the enrichment score for each clustering methods
and cluster numbers included in the genecl_obj
}
\description{
Gene set enrichment for clusters sourced from coExp function. the enrichment
score are based on -log(p) with p from hyper-geometric test.
}
\details{
Gene sets availiable (See vignette for more):
\itemize{
\item c2.cp.kegg.v5.0.symbols.gmt.xz (From Msigdb)
\item c2.cp.reactome.v5.0.symbols.gmt.xz (From Msigdb)
\item c5.bp.v5.0.symbols.gmt.xz (From Msigdb)
\item c2.cp.biocarta.v5.0.symbols.gmt.xz (From Msigdb)
\item c2.all.v5.0.symbols.gmt.xz (From Msigdb)
\item c2.cp.v5.0.symbols.gmt.xz (From Msigdb)
\item c5.mf.v5.0.symbols.gmt.xz (From Msigdb)
}
}
\examples{
#annotaion
annoGMT <- "c2.cp.kegg.v5.0.symbols.gmt.xz"
annofile <- system.file("extdata", annoGMT, package="cogena")
data(Psoriasis)
clMethods <- c("hierarchical","kmeans","diana","fanny","som","model","sota","pam","clara","agnes")
genecl_result <- coExp(DEexprs, nClust=2:3, clMethods=c("hierarchical","kmeans"),
metric="correlation", method="complete", ncore=2, verbose=TRUE)
clen_res <- clEnrich_one(genecl_result, "kmeans", "3", annofile=annofile, sampleLabel=sampleLabel)
clen_res1 <- clEnrich_one(clen_res, "hierarchical", "2", annofile=annofile, sampleLabel=sampleLabel)
}
|
# TODO: Add comment
#
# Author: Brad
# File: Download_by_CIKs.R
# Version: 1.0
# Date: 02.5.2014
# Purpose: This program reads the company.idx files and then files are
# downloaded to separate year directories by CIK
###############################################################################
###############################################################################
# INITIAL SETUP;
cat("SECTION: INITIAL SETUP", "\n")
###############################################################################
# Clear workspace
rm(list = ls(all = TRUE))
# Limit History to not exceed 50 lines
Sys.setenv(R_HISTSIZE = 500)
repo <- c("http://cran.us.r-project.org")
options(repos = structure(repo))
options(install.packages.check.source = FALSE)
# String as factors is False -- used for read.csv
options(StringsAsFactors = FALSE)
# Default maxprint option
options(max.print = 500)
# options(max.print=99999)
# Memory limit
#memory.limit(size = 8183)
# Set location (1=HOME,2=WORK,3=CORALSEA FROM HOME,4=CORALSEA FROM WORK) Location <- 1
Location <- 1
if (Location == 1) {
#setwd("C:/Research_temp3/")
input_directory <- normalizePath("C:/Users/S.Brad/Dropbox/Research/Fund_Letters/Data",winslash="\\", mustWork=TRUE)
output_directory <- normalizePath("F:/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("C:/Users/S.Brad/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("C:/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 2) {
#setwd("C:/Research_temp3/")
input_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research/Fund_Letters/Data",winslash="\\", mustWork=TRUE)
output_directory <- normalizePath("C:/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research_Methods/R",winslash="\\", mustWork=TRUE)
treetag_directory <- normalizePath("C:/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 3) {
#setwd("//tsclient/C/Research_temp3/")
input_directory <- normalizePath("H:/Research/Mutual_Fund_Letters/Data", winslash = "\\", mustWork = TRUE)
#output_directory <- normalizePath("//tsclient/C/Research_temp3", winslash = "\\", mustWork = TRUE)
output_directory <- normalizePath("C:/Users/bdaughdr/Documents/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("//tsclient/C/Users/S.Brad/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("//tsclient/C/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 4) {
#setwd("//tsclient/C/Research_temp3/")
input_directory <- normalizePath("H:/Research/Mutual_Fund_Letters/Data", winslash = "\\", mustWork = TRUE)
#output_directory <- normalizePath("//tsclient/C/Research_temp3", winslash = "\\", mustWork = TRUE)
output_directory <- normalizePath("C:/Users/bdaughdr/Documents/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("//tsclient/C/Users/bdaughdr/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("//tsclient/C/TreeTagger",winslash="\\", mustWork=TRUE)
} else {
cat("ERROR ASSIGNING DIRECTORIES", "\n")
}
rm(Location)
###############################################################################
# FUNCTIONS;
cat("SECTION: FUNCTIONS", "\n")
###############################################################################
source(file=paste(function_directory,"functions_db.R",sep="\\"),echo=FALSE)
#source(file=paste(function_directory,"functions_statistics.R",sep="\\"),echo=FALSE)
#source(file=paste(function_directory,"functions_text_analysis.R",sep="\\"),echo=FALSE)
source(file=paste(function_directory,"functions_utilities.R",sep="\\"),echo=FALSE)
file_check_http <- function(url_dir,filename){
require(gdata)
require(memoise)
#url_dir <- "http://www.sec.gov/Archives/edgar/data/933996/000113542805000004/"
#filename <- "0001135428-05-000004.txt"
output_directory1 <- try(readHTMLTable(url_dir), silent=T)
if (inherits(output_directory1, "try-error"))
{
filenames <- ""
filenames2 <- as.data.frame(filenames,stringsAsFactors=FALSE)
colnames(filenames2)[1] <- "file"
} else
{
output_directory2 <- do.call(rbind, output_directory1)
output_directory3 <- data.frame(lapply(output_directory2, as.character), stringsAsFactors=FALSE)
colnames(output_directory3) <- c("Trash","Name","Last.modified","Size","Description")
directory_df <- output_directory3
for(i in 1:ncol(directory_df))
{
directory_df[,i] <- iconv(directory_df[,i], "latin1", "ASCII", sub="")
}
for(i in which(sapply(directory_df,class)=="character"))
{
directory_df[[i]] = trim(directory_df[[i]])
}
for (i in 1:ncol(directory_df))
{
directory_df[,i] <- unknownToNA(directory_df[,i], unknown=c("",".","n/a","na","NA",NA,"null","NULL",NULL,"nan","NaN",NaN,
NA_integer_,"NA_integer_",NA_complex_,"NA_complex_",
NA_character_,"NA_character_",NA_real_,"NA_real_"),force=TRUE)
directory_df[,i] <- ifelse(is.na(directory_df[,i]),NA, directory_df[,i])
}
#Remove all NA cols
directory_df <- directory_df[,colSums(is.na(directory_df[1:nrow(directory_df),]))<nrow(directory_df)]
#Remove all NA rows
directory_df <- directory_df[rowSums(is.na(directory_df[,1:ncol(directory_df)]))<ncol(directory_df),]
#Remove parent directory row
directory_df <- directory_df[!(directory_df[,"Name"]=="Parent Directory"),]
#Remove NA names row
directory_df <- directory_df[!(is.na(directory_df[,"Name"])),]
#Reorder row numbers
row.names(directory_df) <- seq(nrow(directory_df))
for(i in which(sapply(directory_df,class)=="character"))
{
directory_df[[i]] = trim(directory_df[[i]])
}
for (i in 1:ncol(directory_df))
{
#i <- 1
#i <- 2
directory_df[,i] <- unknownToNA(directory_df[,i], unknown=c("",".","n/a","na","NA",NA,"null","NULL",NULL,"nan","NaN",NaN,
NA_integer_,"NA_integer_",NA_complex_,"NA_complex_",
NA_character_,"NA_character_",NA_real_,"NA_real_"),force=TRUE)
}
for(i in which(sapply(directory_df,class)=="character"))
{
#i <- 1
directory_df[[i]] <- ifelse(is.na(directory_df[[i]]),NA, directory_df[[i]])
}
filenames <- directory_df[,"Name"]
filenames2 <- as.data.frame(filenames,stringsAsFactors=FALSE)
colnames(filenames2)[1] <- "file"
}
if (filename %in% filenames2[,"file"])
{
return(TRUE)
} else
{
return(FALSE)
}
}
file_check_ftp <- function(url_dir,filename){
#url_dir <- "ftp://ftp.sec.gov/edgar/data/933996/000113542805000004/"
#filename <- "0001135428-05-000004.txt"
output_directory1 <- try(getURL(url_dir, ssl.verifypeer = FALSE, ftp.use.epsv = FALSE, dirlistonly = TRUE), silent=T)
if (inherits(output_directory1, "try-error"))
{
filenames <- ""
filenames2 <- as.data.frame(filenames,stringsAsFactors=FALSE)
colnames(filenames2)[1] <- "file"
} else
{
filenames <- data.frame(output_directory1,
stringsAsFactors=FALSE)
filenames2 <- data.frame(strsplit(filenames[,1], "\r*\n")[[1]],
stringsAsFactors=FALSE)
colnames(filenames2)[1] <- "file"
}
if (filename %in% filenames2[,"file"])
{
return(TRUE)
} else
{
return(FALSE)
}
}
###############################################################################
# LIBRARIES;
cat("SECTION: LIBRARIES", "\n")
###############################################################################
#Load External Packages
external_packages <- c("gdata","ibdreg","plyr","RCurl","XML")
invisible(unlist(sapply(external_packages,load_external_packages, repo_str=repo, simplify=FALSE, USE.NAMES=FALSE)))
installed_packages <- list_installed_packages(external_packages)
###############################################################################
#PARAMETERS;
###############################################################################
#If using windows, set to "\\" - if mac (or unix), set to "/";
slash <- "\\"
#First year you want index files for:
startyear <- 1993
#startyear <- 2006
#Last year you want index files for:
endyear <- 2013
#endyear <- 2012
#First qtr you want index files for (usually 1):
startqtr <- 1
#Last qtr you want index files for (usually 4):
endqtr <- 4
#Output folder:
indexfolder <- "full-index"
#downloadfolder <- "N-1"
#downloadfolder <- "DEF 14A"
#downloadfolder <- "MF_All"
#downloadfolder <- "MF_SemiAnnual_Reports"
#downloadfolder <- "MF_Annual_Reports"
#downloadfolder <- "MF_Shareholder_Reports_N-CSR-A"
#downloadfolder <- "MF_Shareholder_Reports_N-CSRS-A"
#downloadfolder <- "MF_Shareholder_Reports_N-CSR"
#downloadfolder <- "MF_Shareholder_Reports_N-CSRS"
downloadfolder <- "MF_Shareholder_Reports_N-SAR-B"
#The sub directory you are going to download filings to
txtfolder <- "txt"
headerfolder <- "header"
#The file that will contain the filings you want to download.
outfile <- "filings.csv"
outfile_comb <- "filings_list_comb.csv"
#Download address
address_ftp <- "ftp://ftp.sec.gov/"
address_http <- "http://www.sec.gov/Archives/"
#Specifiy, in regular expression format, the filing you are looking for.
#Following is the for 10-k.
#In this case, I only want to keep 10-ks.
#I put a ^ at the beginning because I want the form type to start with 10, this gets rid of NT late filings.
#I also want to exclude amended filings so I specify that 10-k should not be followed by / (e.g., 10-K/A).
#formget <- c("N-1","N-1/A","N-1A","N-1A/A","N-1A EL","N-1A EL/A","497","497J","497K","497K1","497K2","497K3A","497K3B",
# "N-CSR","N-CSR/A","N-CSRS","N-CSRS/A","N-MFP","N-Q","N-SAR","NSAR-A","NSAR-B","NSAR-B/A","N-PX","485APOS","485BPOS","N-30B-2",
# "N-14","N-14/A")
#formget <- c("N-1","N-1/A","N-1A","N-1A/A","N-1A EL","N-1A EL/A","497","497J","497K","497K1","497K2","497K3A","497K3B")
#formget <- c("N-1A","N-1A/A","N-14","N-14/A","497K","497K1","497K2","497K3A","497K3B","NSAR-A","NSAR-B","N-Q","N-PX")
#formget <- NULL
#formget <- c("N-1A","497K1")
#formget <- c("NSAR-B")
#formget <- c("NSAR-A")
#formget <- c("N-CSR/A")
#formget <- c("N-CSRS/A")
#formget <- c("N-CSR")
#formget <- c("N-CSRS")
formget <- c("NSAR-B")
formget_collapse <- paste("'",formget,"'",sep="")
formget_collapse <- paste(formget_collapse,collapse=",")
formget_collapse <- paste("(",formget_collapse,")",sep="")
yr_qtr_comb <- expand.grid(yr = seq(startyear, endyear, 1), qtr = seq(1, 4, 1))
yr_qtr_comb <- yr_qtr_comb[order(yr_qtr_comb[,"yr"],yr_qtr_comb[,"qtr"]),]
row.names(yr_qtr_comb) <- seq(nrow(yr_qtr_comb))
yr_qtr_comb[,"qtr"] <- ifelse((yr_qtr_comb[,"yr"]==startyear & yr_qtr_comb[,"qtr"] < startqtr),NA,yr_qtr_comb[,"qtr"])
yr_qtr_comb[,"qtr"] <- ifelse((yr_qtr_comb[,"yr"]==endyear & yr_qtr_comb[,"qtr"] > endqtr),NA,yr_qtr_comb[,"qtr"])
yr_qtr_comb <- yr_qtr_comb[(!is.na(yr_qtr_comb[,"qtr"])),]
row.names(yr_qtr_comb) <- seq(nrow(yr_qtr_comb))
#Check to see if output directory exists. If not, create it.
create_directory(output_directory,remove=1)
###############################################################################
cat("SECTION: SQLITE DATABASES", "\n")
###############################################################################
#Check to see if download folder exists. If not, create it.
index_folder_path <- paste(output_directory, indexfolder, sep = slash, collapse = slash)
create_directory(index_folder_path,remove=1)
index_combined_db <- paste(index_folder_path,"\\","index_combined.s3db",sep="")
index_combined_db_tables <- ListTables(index_combined_db)
index_combined_db_fields <- ListFields(index_combined_db)
###############################################################################
cat("Get CIKs \n")
###############################################################################
#Check to see if download folder exists. If not, create it.
download_folder_path <- paste(output_directory, downloadfolder, sep = slash, collapse = slash)
create_directory(download_folder_path,remove=1)
#CIKs <- read.table(file=paste(index_folder_path,"\\","summary","\\","CIK_list.csv",sep=""), header = TRUE, na.strings="NA",stringsAsFactors=FALSE,
# sep = ",", quote = "\"",dec = ".", fill = TRUE, comment.char = "")
CIKs <- read.table(file=paste(download_folder_path,"\\","CIK_list.csv",sep=""), header = TRUE, na.strings="NA",stringsAsFactors=FALSE,
sep = ",", quote = "\"",dec = ".", fill = TRUE, comment.char = "")
#Output CIK list
CIKs[,"cik_no_pad"] <- as.numeric(CIKs[,"cik_no_pad"])
CIKs[,"cik_no_pad"] <- round(CIKs[,"cik_no_pad"], digits = 0)
CIKs <- CIKs[(!is.na(CIKs[,"cik_no_pad"])),]
CIKs_u <- unique(CIKs)
row.names(CIKs_u) <- seq(nrow(CIKs_u))
#Pad CIK
CIKs_u[,"cik"] <- format(CIKs_u[,"cik_no_pad"], trim=TRUE, digits = 0, scientific = 999)
CIKs_u[,"cik"] <- sprintf("%010s",CIKs_u[,"cik"])
CIKs_u[,"cik"] <- gsub(" ", "0", CIKs_u[,"cik"])
CIKs_u <- CIKs_u[order(CIKs_u[,"cik"]),]
row.names(CIKs_u) <- seq(nrow(CIKs_u))
rm(CIKs)
invisible(gc(verbose = FALSE, reset = TRUE))
ExportTable(index_combined_db,"CIKs_u",CIKs_u)
index_combined_db_tables <- ListTables(index_combined_db)
index_combined_db_fields <- ListFields(index_combined_db)
###############################################################################
cat("Download files \n")
###############################################################################
filings_all <- ddply(.data=yr_qtr_comb, .variables=c("yr"),
.fun = function(x, input_db, path_output, sub_path_txt, sub_path_header, outfile, forms, address_prefix_http,address_prefix_ftp){
#x <- yr_qtr_comb[(yr_qtr_comb[,"yr"]==1993 & yr_qtr_comb[,"qtr"]==1),]
#x <- yr_qtr_comb[(yr_qtr_comb[,"yr"]==1994 & yr_qtr_comb[,"qtr"]==1),]
#x <- yr_qtr_comb[(yr_qtr_comb[,"yr"]==2005 & yr_qtr_comb[,"qtr"]==1),]
#x <- yr_qtr_comb[(yr_qtr_comb[,"yr"]==2012 & yr_qtr_comb[,"qtr"]==4),]
#input_db <- index_combined_db
#path_output <- download_folder_path
#sub_path_txt <- txtfolder
#sub_path_header <- headerfolder
#outfile <- outfile
#forms <- formget_collapse
#address_prefix_http <- address_http
#address_prefix_ftp <- address_ftp
yr <- unique(x[,"yr"])
cat("\n",yr,"\n")
#Check to see if yr folder exists. If not, create it.
#cat("\n")
yr_folder_path <- paste(path_output, yr, sep = slash, collapse = slash)
create_directory(yr_folder_path,remove=1)
#Check to see if output folders exists. If not, create it.
#cat("\n")
output_folder_path_txt <- paste(path_output, yr, sub_path_txt, sep = slash, collapse = slash)
create_directory(output_folder_path_txt,remove=1)
output_folder_path_header <- paste(path_output, yr, sub_path_header, sep = slash, collapse = slash)
create_directory(output_folder_path_header,remove=1)
if(forms=="('')") {
cat("All forms","\n")
query_filings_yr <- ""
query_filings_yr <- paste(query_filings_yr, "select * ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "from index_combined ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "where yr=", yr, " ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "and cik in (select distinct cik_no_pad ", sep=" ")
query_filings_yr <- paste(query_filings_yr, " from CIKs_u ) ", sep=" ")
query_filings_yr <- gsub(" {2,}", " ", query_filings_yr)
query_filings_yr <- gsub("^\\s+|\\s+$", "", query_filings_yr)
} else {
cat("Certain forms","\n")
query_filings_yr <- ""
query_filings_yr <- paste(query_filings_yr, "select * ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "from index_combined ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "where yr=", yr, " ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "and cik in (select distinct cik_no_pad ", sep=" ")
query_filings_yr <- paste(query_filings_yr, " from CIKs_u ) ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "and form_type in ", forms, " ", sep=" ")
query_filings_yr <- gsub(" {2,}", " ", query_filings_yr)
query_filings_yr <- gsub("^\\s+|\\s+$", "", query_filings_yr)
}
rm(yr)
filings_temp <- data.frame(runsql(query_filings_yr,input_db),stringsAsFactors=FALSE)
rm(query_filings_yr)
if(nrow(filings_temp)==0) {
cat("No Matches","\n")
filings_temp2 <- data.frame(matrix(NA, ncol=(ncol(filings_temp)+5), nrow=nrow(filings_temp),
dimnames=list(c(), c(colnames(filings_temp),c("accession_number","filepath","file_header","file_txt","file_index_htm")))),
stringsAsFactors=FALSE)
filings_temp3 <- filings_temp2[,!(colnames(filings_temp2) %in% c("fullfilename_txt","fullfilename_htm"))]
} else {
cat("Matches","\n")
filings_temp2 <- data.frame(filings_temp,accession_number=NA,filepath=NA,
file_header=NA,file_txt=NA,file_index_htm=NA,stringsAsFactors=FALSE)
filings_temp2[,"file_txt"] <- gsub(".*/", "", filings_temp2[,"fullfilename_txt"])
filings_temp2[,"file_index_htm"] <- gsub(".*/", "", filings_temp2[,"fullfilename_htm"])
filings_temp2[,"accession_number"] <- gsub("-index.htm", "", filings_temp2[,"file_index_htm"])
filings_temp2[,"file_header"] <- paste(filings_temp2[,"accession_number"],
".hdr.sgml",
sep="")
filings_temp2[,"filepath"] <- gsub("-", "", filings_temp2[,"accession_number"])
filings_temp2[,"filepath"] <- paste("edgar/data",
filings_temp2[,"cik"],
filings_temp2[,"filepath"],sep="/")
filings_temp3 <- filings_temp2[,!(colnames(filings_temp2) %in% c("fullfilename_txt","fullfilename_htm"))]
#Get name of all txt files already downloaded
old_txt <- data.frame(file=list.files(output_folder_path_txt),stringsAsFactors=FALSE)
old_txt2 <- ddply(.data=old_txt, .variables=c("file"), .fun = function(x,folder){
filepath <- paste(folder,x,sep="\\")
output <- data.frame(filepath=filepath,file.info(filepath),stringsAsFactors=FALSE)
}, folder=output_folder_path_txt,
.progress = "none", .inform = FALSE, .parallel = FALSE, .paropts = NULL, .id = NA)
rm(old_txt)
old_header <- data.frame(file=list.files(output_folder_path_header),stringsAsFactors=FALSE)
old_header2 <- ddply(.data=old_header, .variables=c("file"), .fun = function(x,folder){
filepath <- paste(folder,x,sep="\\")
output <- data.frame(filepath=filepath,file.info(filepath),stringsAsFactors=FALSE)
}, folder=output_folder_path_header,
.progress = "none", .inform = FALSE, .parallel = FALSE, .paropts = NULL, .id = NA)
rm(old_header)
files_to_download <- data.frame(filepath=filings_temp3[,"filepath"],
fullfilename_txt=filings_temp3[,"file_txt"],
#exists_txt=NA,
already_downloaded_txt=NA,
fullfilename_header=filings_temp3[,"file_header"],
#exists_header=NA,
already_downloaded_header=NA,
stringsAsFactors=FALSE)
files_to_download2 <- files_to_download
#checks to see what files on the current index listing are not in the directory
files_to_download2[,"already_downloaded_txt"] <- ifelse(files_to_download2[,"fullfilename_txt"] %in% old_txt2[,"file"], 1, 0)
files_to_download2[,"already_downloaded_header"] <- ifelse(files_to_download2[,"fullfilename_header"] %in% old_header2[,"file"], 1, 0)
files_to_download_trim <- files_to_download2[(files_to_download2[,"already_downloaded_txt"]==0 |
files_to_download2[,"already_downloaded_header"]==0) ,]
filings_downloaded <- ddply(.data=files_to_download_trim, .variables=c("filepath","fullfilename_txt","fullfilename_header"),
.fun = function(y,sub_path_output_txt,sub_path_output_header,address_prefix_ftp,address_prefix_http){
#y <- files_to_download_trim[1,]
#sub_path_output_txt <- output_folder_path_txt
#sub_path_output_header <- output_folder_path_header
#address_prefix_ftp <- address_prefix_ftp
#address_prefix_http <- address_prefix_http
filepath <- unique(y[,"filepath"])
file_txt <- unique(y[,"fullfilename_txt"])
flag_txt <- unique(y[,"already_downloaded_txt"])
file_header <- unique(y[,"fullfilename_header"])
flag_header <- unique(y[,"already_downloaded_header"])
if(flag_txt==0) {
fileout_txt <- paste(sub_path_output_txt,file_txt,sep=slash)
filepath_http <- paste(address_prefix_http,filepath,"/",sep="")
filepath_ftp <- paste(address_prefix_ftp,filepath,"/",sep="")
#cat(fileout_txt,"\n")
if (file_check_http(filepath_http,file_txt)) {
#cat("Exists on HTTP mirror","\n")
download.file(paste(filepath_http,file_txt,sep=""), fileout_txt, quiet = TRUE, mode = "wb",cacheOK = TRUE)
} else if (file_check_ftp(filepath_ftp,file_txt)) {
#cat("Exists on FTP mirror","\n")
download.file(paste(filepath_ftp,file_txt,sep=""), fileout_txt, quiet = TRUE, mode = "wb",cacheOK = TRUE)
} else {
#cat("Doesn't Exists on either mirror","\n")
}
rm(fileout_txt,filepath_http,filepath_ftp)
}
if(flag_header==0) {
fileout_header <- paste(sub_path_output_header,file_header,sep=slash)
filepath_http <- paste(address_prefix_http,filepath,"/",sep="")
filepath_ftp <- paste(address_prefix_ftp,filepath,"/",sep="")
#cat(fileout_header,"\n")
if (file_check_http(filepath_http,file_header)) {
#cat("Exists on HTTP mirror","\n")
download.file(paste(filepath_http,file_header,sep=""), fileout_header, quiet = TRUE, mode = "wb",cacheOK = TRUE)
} else if (file_check_ftp(filepath_ftp,file_header)) {
#cat("Exists on FTP mirror","\n")
download.file(paste(filepath_ftp,file_header,sep=""), fileout_header, quiet = TRUE, mode = "wb",cacheOK = TRUE)
} else {
#cat("Doesn't Exists on either mirror","\n")
}
rm(fileout_header,filepath_http,filepath_ftp)
}
rm(filepath,file_txt,flag_txt,file_header,flag_header)
},
sub_path_output_txt=output_folder_path_txt,sub_path_output_header=output_folder_path_header,
address_prefix_ftp=address_prefix_ftp, address_prefix_http=address_prefix_http,
.progress = "none",.inform = FALSE, .drop = TRUE, .parallel = FALSE, .paropts = NULL)
rm(old_txt2,old_header2,files_to_download,files_to_download_trim,filings_downloaded)
}
write.table(filings_temp3,file=paste(yr_folder_path,outfile,sep="\\"),na="",sep=",",quote=TRUE,row.names=FALSE,append=FALSE)
rm(yr_folder_path,output_folder_path_txt,output_folder_path_header)
rm(filings_temp,filings_temp2)
return(filings_temp3)
},
input_db=index_combined_db, path_output=download_folder_path, sub_path_txt=txtfolder, sub_path_header=headerfolder,
outfile=outfile, forms=formget_collapse, address_prefix_http=address_http, address_prefix_ftp=address_ftp,
.progress = "text",.inform = FALSE, .drop = TRUE, .parallel = FALSE, .paropts = NULL)
###############################################################################
cat("Output Combined Files \n")
###############################################################################
write.table(filings_all,file=paste(download_folder_path,"\\",outfile_comb,sep=""), append=FALSE, na="NA",
sep = ",", quote = TRUE,dec = ".", qmethod = "double", col.names=TRUE, row.names = FALSE)
###############################################################################
cat("Remove temp files \n")
###############################################################################
DeleteTable(index_combined_db,"CIKs_u")
index_combined_db_tables <- ListTables(index_combined_db)
index_combined_db_fields <- ListFields(index_combined_db) | /Download_by_CIKs.R | no_license | sbraddaughdrill/Download_Edgar_Filings | R | false | false | 32,724 | r | # TODO: Add comment
#
# Author: Brad
# File: Download_by_CIKs.R
# Version: 1.0
# Date: 02.5.2014
# Purpose: This program reads the company.idx files and then files are
# downloaded to separate year directories by CIK
###############################################################################
###############################################################################
# INITIAL SETUP;
cat("SECTION: INITIAL SETUP", "\n")
###############################################################################
# Clear workspace
rm(list = ls(all = TRUE))
# Limit History to not exceed 50 lines
Sys.setenv(R_HISTSIZE = 500)
repo <- c("http://cran.us.r-project.org")
options(repos = structure(repo))
options(install.packages.check.source = FALSE)
# String as factors is False -- used for read.csv
options(StringsAsFactors = FALSE)
# Default maxprint option
options(max.print = 500)
# options(max.print=99999)
# Memory limit
#memory.limit(size = 8183)
# Set location (1=HOME,2=WORK,3=CORALSEA FROM HOME,4=CORALSEA FROM WORK) Location <- 1
Location <- 1
if (Location == 1) {
#setwd("C:/Research_temp3/")
input_directory <- normalizePath("C:/Users/S.Brad/Dropbox/Research/Fund_Letters/Data",winslash="\\", mustWork=TRUE)
output_directory <- normalizePath("F:/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("C:/Users/S.Brad/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("C:/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 2) {
#setwd("C:/Research_temp3/")
input_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research/Fund_Letters/Data",winslash="\\", mustWork=TRUE)
output_directory <- normalizePath("C:/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("C:/Users/bdaughdr/Dropbox/Research_Methods/R",winslash="\\", mustWork=TRUE)
treetag_directory <- normalizePath("C:/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 3) {
#setwd("//tsclient/C/Research_temp3/")
input_directory <- normalizePath("H:/Research/Mutual_Fund_Letters/Data", winslash = "\\", mustWork = TRUE)
#output_directory <- normalizePath("//tsclient/C/Research_temp3", winslash = "\\", mustWork = TRUE)
output_directory <- normalizePath("C:/Users/bdaughdr/Documents/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("//tsclient/C/Users/S.Brad/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("//tsclient/C/TreeTagger",winslash="\\", mustWork=TRUE)
} else if (Location == 4) {
#setwd("//tsclient/C/Research_temp3/")
input_directory <- normalizePath("H:/Research/Mutual_Fund_Letters/Data", winslash = "\\", mustWork = TRUE)
#output_directory <- normalizePath("//tsclient/C/Research_temp3", winslash = "\\", mustWork = TRUE)
output_directory <- normalizePath("C:/Users/bdaughdr/Documents/Research_temp3",winslash="\\", mustWork=TRUE)
function_directory <- normalizePath("//tsclient/C/Users/bdaughdr/Dropbox/Research_Methods/R", winslash = "\\", mustWork = TRUE)
treetag_directory <- normalizePath("//tsclient/C/TreeTagger",winslash="\\", mustWork=TRUE)
} else {
cat("ERROR ASSIGNING DIRECTORIES", "\n")
}
rm(Location)
###############################################################################
# FUNCTIONS;
cat("SECTION: FUNCTIONS", "\n")
###############################################################################
source(file=paste(function_directory,"functions_db.R",sep="\\"),echo=FALSE)
#source(file=paste(function_directory,"functions_statistics.R",sep="\\"),echo=FALSE)
#source(file=paste(function_directory,"functions_text_analysis.R",sep="\\"),echo=FALSE)
source(file=paste(function_directory,"functions_utilities.R",sep="\\"),echo=FALSE)
file_check_http <- function(url_dir,filename){
require(gdata)
require(memoise)
#url_dir <- "http://www.sec.gov/Archives/edgar/data/933996/000113542805000004/"
#filename <- "0001135428-05-000004.txt"
output_directory1 <- try(readHTMLTable(url_dir), silent=T)
if (inherits(output_directory1, "try-error"))
{
filenames <- ""
filenames2 <- as.data.frame(filenames,stringsAsFactors=FALSE)
colnames(filenames2)[1] <- "file"
} else
{
output_directory2 <- do.call(rbind, output_directory1)
output_directory3 <- data.frame(lapply(output_directory2, as.character), stringsAsFactors=FALSE)
colnames(output_directory3) <- c("Trash","Name","Last.modified","Size","Description")
directory_df <- output_directory3
for(i in 1:ncol(directory_df))
{
directory_df[,i] <- iconv(directory_df[,i], "latin1", "ASCII", sub="")
}
for(i in which(sapply(directory_df,class)=="character"))
{
directory_df[[i]] = trim(directory_df[[i]])
}
for (i in 1:ncol(directory_df))
{
directory_df[,i] <- unknownToNA(directory_df[,i], unknown=c("",".","n/a","na","NA",NA,"null","NULL",NULL,"nan","NaN",NaN,
NA_integer_,"NA_integer_",NA_complex_,"NA_complex_",
NA_character_,"NA_character_",NA_real_,"NA_real_"),force=TRUE)
directory_df[,i] <- ifelse(is.na(directory_df[,i]),NA, directory_df[,i])
}
#Remove all NA cols
directory_df <- directory_df[,colSums(is.na(directory_df[1:nrow(directory_df),]))<nrow(directory_df)]
#Remove all NA rows
directory_df <- directory_df[rowSums(is.na(directory_df[,1:ncol(directory_df)]))<ncol(directory_df),]
#Remove parent directory row
directory_df <- directory_df[!(directory_df[,"Name"]=="Parent Directory"),]
#Remove NA names row
directory_df <- directory_df[!(is.na(directory_df[,"Name"])),]
#Reorder row numbers
row.names(directory_df) <- seq(nrow(directory_df))
for(i in which(sapply(directory_df,class)=="character"))
{
directory_df[[i]] = trim(directory_df[[i]])
}
for (i in 1:ncol(directory_df))
{
#i <- 1
#i <- 2
directory_df[,i] <- unknownToNA(directory_df[,i], unknown=c("",".","n/a","na","NA",NA,"null","NULL",NULL,"nan","NaN",NaN,
NA_integer_,"NA_integer_",NA_complex_,"NA_complex_",
NA_character_,"NA_character_",NA_real_,"NA_real_"),force=TRUE)
}
for(i in which(sapply(directory_df,class)=="character"))
{
#i <- 1
directory_df[[i]] <- ifelse(is.na(directory_df[[i]]),NA, directory_df[[i]])
}
filenames <- directory_df[,"Name"]
filenames2 <- as.data.frame(filenames,stringsAsFactors=FALSE)
colnames(filenames2)[1] <- "file"
}
if (filename %in% filenames2[,"file"])
{
return(TRUE)
} else
{
return(FALSE)
}
}
file_check_ftp <- function(url_dir,filename){
#url_dir <- "ftp://ftp.sec.gov/edgar/data/933996/000113542805000004/"
#filename <- "0001135428-05-000004.txt"
output_directory1 <- try(getURL(url_dir, ssl.verifypeer = FALSE, ftp.use.epsv = FALSE, dirlistonly = TRUE), silent=T)
if (inherits(output_directory1, "try-error"))
{
filenames <- ""
filenames2 <- as.data.frame(filenames,stringsAsFactors=FALSE)
colnames(filenames2)[1] <- "file"
} else
{
filenames <- data.frame(output_directory1,
stringsAsFactors=FALSE)
filenames2 <- data.frame(strsplit(filenames[,1], "\r*\n")[[1]],
stringsAsFactors=FALSE)
colnames(filenames2)[1] <- "file"
}
if (filename %in% filenames2[,"file"])
{
return(TRUE)
} else
{
return(FALSE)
}
}
###############################################################################
# LIBRARIES;
cat("SECTION: LIBRARIES", "\n")
###############################################################################
#Load External Packages
external_packages <- c("gdata","ibdreg","plyr","RCurl","XML")
invisible(unlist(sapply(external_packages,load_external_packages, repo_str=repo, simplify=FALSE, USE.NAMES=FALSE)))
installed_packages <- list_installed_packages(external_packages)
###############################################################################
#PARAMETERS;
###############################################################################
#If using windows, set to "\\" - if mac (or unix), set to "/";
slash <- "\\"
#First year you want index files for:
startyear <- 1993
#startyear <- 2006
#Last year you want index files for:
endyear <- 2013
#endyear <- 2012
#First qtr you want index files for (usually 1):
startqtr <- 1
#Last qtr you want index files for (usually 4):
endqtr <- 4
#Output folder:
indexfolder <- "full-index"
#downloadfolder <- "N-1"
#downloadfolder <- "DEF 14A"
#downloadfolder <- "MF_All"
#downloadfolder <- "MF_SemiAnnual_Reports"
#downloadfolder <- "MF_Annual_Reports"
#downloadfolder <- "MF_Shareholder_Reports_N-CSR-A"
#downloadfolder <- "MF_Shareholder_Reports_N-CSRS-A"
#downloadfolder <- "MF_Shareholder_Reports_N-CSR"
#downloadfolder <- "MF_Shareholder_Reports_N-CSRS"
downloadfolder <- "MF_Shareholder_Reports_N-SAR-B"
#The sub directory you are going to download filings to
txtfolder <- "txt"
headerfolder <- "header"
#The file that will contain the filings you want to download.
outfile <- "filings.csv"
outfile_comb <- "filings_list_comb.csv"
#Download address
address_ftp <- "ftp://ftp.sec.gov/"
address_http <- "http://www.sec.gov/Archives/"
#Specifiy, in regular expression format, the filing you are looking for.
#Following is the for 10-k.
#In this case, I only want to keep 10-ks.
#I put a ^ at the beginning because I want the form type to start with 10, this gets rid of NT late filings.
#I also want to exclude amended filings so I specify that 10-k should not be followed by / (e.g., 10-K/A).
#formget <- c("N-1","N-1/A","N-1A","N-1A/A","N-1A EL","N-1A EL/A","497","497J","497K","497K1","497K2","497K3A","497K3B",
# "N-CSR","N-CSR/A","N-CSRS","N-CSRS/A","N-MFP","N-Q","N-SAR","NSAR-A","NSAR-B","NSAR-B/A","N-PX","485APOS","485BPOS","N-30B-2",
# "N-14","N-14/A")
#formget <- c("N-1","N-1/A","N-1A","N-1A/A","N-1A EL","N-1A EL/A","497","497J","497K","497K1","497K2","497K3A","497K3B")
#formget <- c("N-1A","N-1A/A","N-14","N-14/A","497K","497K1","497K2","497K3A","497K3B","NSAR-A","NSAR-B","N-Q","N-PX")
#formget <- NULL
#formget <- c("N-1A","497K1")
#formget <- c("NSAR-B")
#formget <- c("NSAR-A")
#formget <- c("N-CSR/A")
#formget <- c("N-CSRS/A")
#formget <- c("N-CSR")
#formget <- c("N-CSRS")
formget <- c("NSAR-B")
formget_collapse <- paste("'",formget,"'",sep="")
formget_collapse <- paste(formget_collapse,collapse=",")
formget_collapse <- paste("(",formget_collapse,")",sep="")
yr_qtr_comb <- expand.grid(yr = seq(startyear, endyear, 1), qtr = seq(1, 4, 1))
yr_qtr_comb <- yr_qtr_comb[order(yr_qtr_comb[,"yr"],yr_qtr_comb[,"qtr"]),]
row.names(yr_qtr_comb) <- seq(nrow(yr_qtr_comb))
yr_qtr_comb[,"qtr"] <- ifelse((yr_qtr_comb[,"yr"]==startyear & yr_qtr_comb[,"qtr"] < startqtr),NA,yr_qtr_comb[,"qtr"])
yr_qtr_comb[,"qtr"] <- ifelse((yr_qtr_comb[,"yr"]==endyear & yr_qtr_comb[,"qtr"] > endqtr),NA,yr_qtr_comb[,"qtr"])
yr_qtr_comb <- yr_qtr_comb[(!is.na(yr_qtr_comb[,"qtr"])),]
row.names(yr_qtr_comb) <- seq(nrow(yr_qtr_comb))
#Check to see if output directory exists. If not, create it.
create_directory(output_directory,remove=1)
###############################################################################
cat("SECTION: SQLITE DATABASES", "\n")
###############################################################################
#Check to see if download folder exists. If not, create it.
index_folder_path <- paste(output_directory, indexfolder, sep = slash, collapse = slash)
create_directory(index_folder_path,remove=1)
index_combined_db <- paste(index_folder_path,"\\","index_combined.s3db",sep="")
index_combined_db_tables <- ListTables(index_combined_db)
index_combined_db_fields <- ListFields(index_combined_db)
###############################################################################
cat("Get CIKs \n")
###############################################################################
#Check to see if download folder exists. If not, create it.
download_folder_path <- paste(output_directory, downloadfolder, sep = slash, collapse = slash)
create_directory(download_folder_path,remove=1)
#CIKs <- read.table(file=paste(index_folder_path,"\\","summary","\\","CIK_list.csv",sep=""), header = TRUE, na.strings="NA",stringsAsFactors=FALSE,
# sep = ",", quote = "\"",dec = ".", fill = TRUE, comment.char = "")
CIKs <- read.table(file=paste(download_folder_path,"\\","CIK_list.csv",sep=""), header = TRUE, na.strings="NA",stringsAsFactors=FALSE,
sep = ",", quote = "\"",dec = ".", fill = TRUE, comment.char = "")
#Output CIK list
CIKs[,"cik_no_pad"] <- as.numeric(CIKs[,"cik_no_pad"])
CIKs[,"cik_no_pad"] <- round(CIKs[,"cik_no_pad"], digits = 0)
CIKs <- CIKs[(!is.na(CIKs[,"cik_no_pad"])),]
CIKs_u <- unique(CIKs)
row.names(CIKs_u) <- seq(nrow(CIKs_u))
#Pad CIK
CIKs_u[,"cik"] <- format(CIKs_u[,"cik_no_pad"], trim=TRUE, digits = 0, scientific = 999)
CIKs_u[,"cik"] <- sprintf("%010s",CIKs_u[,"cik"])
CIKs_u[,"cik"] <- gsub(" ", "0", CIKs_u[,"cik"])
CIKs_u <- CIKs_u[order(CIKs_u[,"cik"]),]
row.names(CIKs_u) <- seq(nrow(CIKs_u))
rm(CIKs)
invisible(gc(verbose = FALSE, reset = TRUE))
ExportTable(index_combined_db,"CIKs_u",CIKs_u)
index_combined_db_tables <- ListTables(index_combined_db)
index_combined_db_fields <- ListFields(index_combined_db)
###############################################################################
cat("Download files \n")
###############################################################################
filings_all <- ddply(.data=yr_qtr_comb, .variables=c("yr"),
.fun = function(x, input_db, path_output, sub_path_txt, sub_path_header, outfile, forms, address_prefix_http,address_prefix_ftp){
#x <- yr_qtr_comb[(yr_qtr_comb[,"yr"]==1993 & yr_qtr_comb[,"qtr"]==1),]
#x <- yr_qtr_comb[(yr_qtr_comb[,"yr"]==1994 & yr_qtr_comb[,"qtr"]==1),]
#x <- yr_qtr_comb[(yr_qtr_comb[,"yr"]==2005 & yr_qtr_comb[,"qtr"]==1),]
#x <- yr_qtr_comb[(yr_qtr_comb[,"yr"]==2012 & yr_qtr_comb[,"qtr"]==4),]
#input_db <- index_combined_db
#path_output <- download_folder_path
#sub_path_txt <- txtfolder
#sub_path_header <- headerfolder
#outfile <- outfile
#forms <- formget_collapse
#address_prefix_http <- address_http
#address_prefix_ftp <- address_ftp
yr <- unique(x[,"yr"])
cat("\n",yr,"\n")
#Check to see if yr folder exists. If not, create it.
#cat("\n")
yr_folder_path <- paste(path_output, yr, sep = slash, collapse = slash)
create_directory(yr_folder_path,remove=1)
#Check to see if output folders exists. If not, create it.
#cat("\n")
output_folder_path_txt <- paste(path_output, yr, sub_path_txt, sep = slash, collapse = slash)
create_directory(output_folder_path_txt,remove=1)
output_folder_path_header <- paste(path_output, yr, sub_path_header, sep = slash, collapse = slash)
create_directory(output_folder_path_header,remove=1)
if(forms=="('')") {
cat("All forms","\n")
query_filings_yr <- ""
query_filings_yr <- paste(query_filings_yr, "select * ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "from index_combined ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "where yr=", yr, " ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "and cik in (select distinct cik_no_pad ", sep=" ")
query_filings_yr <- paste(query_filings_yr, " from CIKs_u ) ", sep=" ")
query_filings_yr <- gsub(" {2,}", " ", query_filings_yr)
query_filings_yr <- gsub("^\\s+|\\s+$", "", query_filings_yr)
} else {
cat("Certain forms","\n")
query_filings_yr <- ""
query_filings_yr <- paste(query_filings_yr, "select * ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "from index_combined ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "where yr=", yr, " ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "and cik in (select distinct cik_no_pad ", sep=" ")
query_filings_yr <- paste(query_filings_yr, " from CIKs_u ) ", sep=" ")
query_filings_yr <- paste(query_filings_yr, "and form_type in ", forms, " ", sep=" ")
query_filings_yr <- gsub(" {2,}", " ", query_filings_yr)
query_filings_yr <- gsub("^\\s+|\\s+$", "", query_filings_yr)
}
rm(yr)
filings_temp <- data.frame(runsql(query_filings_yr,input_db),stringsAsFactors=FALSE)
rm(query_filings_yr)
if(nrow(filings_temp)==0) {
cat("No Matches","\n")
filings_temp2 <- data.frame(matrix(NA, ncol=(ncol(filings_temp)+5), nrow=nrow(filings_temp),
dimnames=list(c(), c(colnames(filings_temp),c("accession_number","filepath","file_header","file_txt","file_index_htm")))),
stringsAsFactors=FALSE)
filings_temp3 <- filings_temp2[,!(colnames(filings_temp2) %in% c("fullfilename_txt","fullfilename_htm"))]
} else {
cat("Matches","\n")
filings_temp2 <- data.frame(filings_temp,accession_number=NA,filepath=NA,
file_header=NA,file_txt=NA,file_index_htm=NA,stringsAsFactors=FALSE)
filings_temp2[,"file_txt"] <- gsub(".*/", "", filings_temp2[,"fullfilename_txt"])
filings_temp2[,"file_index_htm"] <- gsub(".*/", "", filings_temp2[,"fullfilename_htm"])
filings_temp2[,"accession_number"] <- gsub("-index.htm", "", filings_temp2[,"file_index_htm"])
filings_temp2[,"file_header"] <- paste(filings_temp2[,"accession_number"],
".hdr.sgml",
sep="")
filings_temp2[,"filepath"] <- gsub("-", "", filings_temp2[,"accession_number"])
filings_temp2[,"filepath"] <- paste("edgar/data",
filings_temp2[,"cik"],
filings_temp2[,"filepath"],sep="/")
filings_temp3 <- filings_temp2[,!(colnames(filings_temp2) %in% c("fullfilename_txt","fullfilename_htm"))]
#Get name of all txt files already downloaded
old_txt <- data.frame(file=list.files(output_folder_path_txt),stringsAsFactors=FALSE)
old_txt2 <- ddply(.data=old_txt, .variables=c("file"), .fun = function(x,folder){
filepath <- paste(folder,x,sep="\\")
output <- data.frame(filepath=filepath,file.info(filepath),stringsAsFactors=FALSE)
}, folder=output_folder_path_txt,
.progress = "none", .inform = FALSE, .parallel = FALSE, .paropts = NULL, .id = NA)
rm(old_txt)
old_header <- data.frame(file=list.files(output_folder_path_header),stringsAsFactors=FALSE)
old_header2 <- ddply(.data=old_header, .variables=c("file"), .fun = function(x,folder){
filepath <- paste(folder,x,sep="\\")
output <- data.frame(filepath=filepath,file.info(filepath),stringsAsFactors=FALSE)
}, folder=output_folder_path_header,
.progress = "none", .inform = FALSE, .parallel = FALSE, .paropts = NULL, .id = NA)
rm(old_header)
files_to_download <- data.frame(filepath=filings_temp3[,"filepath"],
fullfilename_txt=filings_temp3[,"file_txt"],
#exists_txt=NA,
already_downloaded_txt=NA,
fullfilename_header=filings_temp3[,"file_header"],
#exists_header=NA,
already_downloaded_header=NA,
stringsAsFactors=FALSE)
files_to_download2 <- files_to_download
#checks to see what files on the current index listing are not in the directory
files_to_download2[,"already_downloaded_txt"] <- ifelse(files_to_download2[,"fullfilename_txt"] %in% old_txt2[,"file"], 1, 0)
files_to_download2[,"already_downloaded_header"] <- ifelse(files_to_download2[,"fullfilename_header"] %in% old_header2[,"file"], 1, 0)
files_to_download_trim <- files_to_download2[(files_to_download2[,"already_downloaded_txt"]==0 |
files_to_download2[,"already_downloaded_header"]==0) ,]
filings_downloaded <- ddply(.data=files_to_download_trim, .variables=c("filepath","fullfilename_txt","fullfilename_header"),
.fun = function(y,sub_path_output_txt,sub_path_output_header,address_prefix_ftp,address_prefix_http){
#y <- files_to_download_trim[1,]
#sub_path_output_txt <- output_folder_path_txt
#sub_path_output_header <- output_folder_path_header
#address_prefix_ftp <- address_prefix_ftp
#address_prefix_http <- address_prefix_http
filepath <- unique(y[,"filepath"])
file_txt <- unique(y[,"fullfilename_txt"])
flag_txt <- unique(y[,"already_downloaded_txt"])
file_header <- unique(y[,"fullfilename_header"])
flag_header <- unique(y[,"already_downloaded_header"])
if(flag_txt==0) {
fileout_txt <- paste(sub_path_output_txt,file_txt,sep=slash)
filepath_http <- paste(address_prefix_http,filepath,"/",sep="")
filepath_ftp <- paste(address_prefix_ftp,filepath,"/",sep="")
#cat(fileout_txt,"\n")
if (file_check_http(filepath_http,file_txt)) {
#cat("Exists on HTTP mirror","\n")
download.file(paste(filepath_http,file_txt,sep=""), fileout_txt, quiet = TRUE, mode = "wb",cacheOK = TRUE)
} else if (file_check_ftp(filepath_ftp,file_txt)) {
#cat("Exists on FTP mirror","\n")
download.file(paste(filepath_ftp,file_txt,sep=""), fileout_txt, quiet = TRUE, mode = "wb",cacheOK = TRUE)
} else {
#cat("Doesn't Exists on either mirror","\n")
}
rm(fileout_txt,filepath_http,filepath_ftp)
}
if(flag_header==0) {
fileout_header <- paste(sub_path_output_header,file_header,sep=slash)
filepath_http <- paste(address_prefix_http,filepath,"/",sep="")
filepath_ftp <- paste(address_prefix_ftp,filepath,"/",sep="")
#cat(fileout_header,"\n")
if (file_check_http(filepath_http,file_header)) {
#cat("Exists on HTTP mirror","\n")
download.file(paste(filepath_http,file_header,sep=""), fileout_header, quiet = TRUE, mode = "wb",cacheOK = TRUE)
} else if (file_check_ftp(filepath_ftp,file_header)) {
#cat("Exists on FTP mirror","\n")
download.file(paste(filepath_ftp,file_header,sep=""), fileout_header, quiet = TRUE, mode = "wb",cacheOK = TRUE)
} else {
#cat("Doesn't Exists on either mirror","\n")
}
rm(fileout_header,filepath_http,filepath_ftp)
}
rm(filepath,file_txt,flag_txt,file_header,flag_header)
},
sub_path_output_txt=output_folder_path_txt,sub_path_output_header=output_folder_path_header,
address_prefix_ftp=address_prefix_ftp, address_prefix_http=address_prefix_http,
.progress = "none",.inform = FALSE, .drop = TRUE, .parallel = FALSE, .paropts = NULL)
rm(old_txt2,old_header2,files_to_download,files_to_download_trim,filings_downloaded)
}
write.table(filings_temp3,file=paste(yr_folder_path,outfile,sep="\\"),na="",sep=",",quote=TRUE,row.names=FALSE,append=FALSE)
rm(yr_folder_path,output_folder_path_txt,output_folder_path_header)
rm(filings_temp,filings_temp2)
return(filings_temp3)
},
input_db=index_combined_db, path_output=download_folder_path, sub_path_txt=txtfolder, sub_path_header=headerfolder,
outfile=outfile, forms=formget_collapse, address_prefix_http=address_http, address_prefix_ftp=address_ftp,
.progress = "text",.inform = FALSE, .drop = TRUE, .parallel = FALSE, .paropts = NULL)
###############################################################################
cat("Output Combined Files \n")
###############################################################################
write.table(filings_all,file=paste(download_folder_path,"\\",outfile_comb,sep=""), append=FALSE, na="NA",
sep = ",", quote = TRUE,dec = ".", qmethod = "double", col.names=TRUE, row.names = FALSE)
###############################################################################
cat("Remove temp files \n")
###############################################################################
DeleteTable(index_combined_db,"CIKs_u")
index_combined_db_tables <- ListTables(index_combined_db)
index_combined_db_fields <- ListFields(index_combined_db) |
with(a2dc75c79930f45678a0c0db34b231d0d, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/1c4fa71c-191c-4da9-8102-b247ffddc5d3';FRAME909970$DATA_COLLECTION_TIME[FRAME909970$DATA_COLLECTION_TIME == 0] <- 0;}); | /1c4fa71c-191c-4da9-8102-b247ffddc5d3/R/Temp/aJRIa8T3N9uN9.R | no_license | ayanmanna8/test | R | false | false | 276 | r | with(a2dc75c79930f45678a0c0db34b231d0d, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/1c4fa71c-191c-4da9-8102-b247ffddc5d3';FRAME909970$DATA_COLLECTION_TIME[FRAME909970$DATA_COLLECTION_TIME == 0] <- 0;}); |
#' API to get a list of files in Google Drive
#' @export
#' @param teamDriveId - in case you want to search for team drive
#' @param path - This should be ID of the folder since searching with folder name doesn't always work as expected.
#' @param type - object type that you want to include in your query result.
listItemsInGoogleDrive <- function(teamDriveId = NULL, path = NULL, type = c("csv", "tsv", "txt", "folder", "xls", "xlsx")){
if (!requireNamespace("googledrive")) {
stop("package googledrive must be installed.")
}
token = getGoogleTokenForDrive()
googledrive::drive_set_token(token)
# "~/" is special case for listing under My Drive so do not call googledriev::as_id for "~/".
if (!is.null(path) && path != "~/") {
path = googledrive::as_id(path)
}
# If team id is provided search documents within the team.
if (teamDriveId != "" && !is.null(teamDriveId)) {
teamDriveId = googledrive::as_id(teamDriveId)
}
# To improve performance, only get id, name, mimeType, modifiedTime, size, parents for each file.
googledrive::drive_ls(path = path, type = type, team_drive = teamDriveId, pageSize = 1000, fields = "files/id, files/name, files/mimeType, files/modifiedTime, files/size, files/parents, nextPageToken")
}
#' API to get a folder details in Google Drive
#' @export
getGoogleDriveFolderDetails <- function(teamDriveId = NULL , path = NULL) {
if(!requireNamespace("googledrive")) {
stop("package googledrive must be installed.")
}
token = getGoogleTokenForDrive()
googledrive::drive_set_token(token)
if (!is.null(path)) {
path = googledrive::as_id(path)
}
df <- NULL
#if team id is provided search documents within the team.
# If team id is provided search documents within the team.
if (teamDriveId != "" && !is.null(teamDriveId)) {
teamDriveId = googledrive::as_id(teamDriveId)
}
df <- googledrive::drive_get(team_drive = teamDriveId, id = path)
dfdetails <- NULL
if (nrow(df) == 1) {
dfdetails <- df %>% googledrive::drive_reveal("path")
}
dfdetails
}
#'API that imports a CSV file from Google Drive.
#'@export
getCSVFileFromGoogleDrive <- function(fileId, delim, quote = '"',
escape_backslash = FALSE, escape_double = TRUE,
col_names = TRUE, col_types = NULL,
locale = readr::default_locale(),
na = c("", "NA"), quoted_na = TRUE,
comment = "", trim_ws = FALSE,
skip = 0, n_max = Inf, guess_max = min(1000, n_max),
progress = interactive()) {
filePath <- downloadDataFileFromGoogleDrive(fileId = fileId, type = "csv")
exploratory::read_delim_file(filePath, delim = delim, quote = quote,
escape_backslash = escape_backslash, escape_double = escape_double,
col_names = col_names, col_types = col_types,
locale = locale,
na = na, quoted_na = quoted_na,
comment = comment, trim_ws = trim_ws,
skip = skip, n_max = n_max, guess_max = guess_max,
progress = progress)
}
#'API that imports multiple same structure CSV files and merge it to a single data frame
#'
#'For col_types parameter, by default it forces character to make sure that merging the CSV based data frames doesn't error out due to column data types mismatch.
# Once the data frames merging is done, readr::type_convert is called from Exploratory Desktop to restore the column data types.
#'@export
getCSVFilesFromGoogleDrive <- function(fileIds, fileNames, delim, quote = '"',
escape_backslash = FALSE, escape_double = TRUE,
col_names = TRUE, col_types = readr::cols(.default = readr::col_character()),
locale = readr::default_locale(),
na = c("", "NA"), quoted_na = TRUE,
comment = "", trim_ws = FALSE,
skip = 0, n_max = Inf, guess_max = min(1000, n_max),
progress = interactive()) {
# set name to the files so that it can be used for the "id" column created by purrr:map_dfr.
files <- setNames(as.list(fileIds), fileNames)
df <- purrr::map_dfr(files, exploratory::getCSVFileFromGoogleDrive, delim = delim, quote = quote,
escape_backslash = escape_backslash, escape_double = escape_double,
col_names = col_names, col_types = col_types,
locale = locale,
na = na, quoted_na = quoted_na,
comment = comment, trim_ws = trim_ws,
skip = skip, n_max = n_max, guess_max = guess_max,
progress = progress, .id = "exp.file.id") %>% mutate(exp.file.id = basename(exp.file.id)) # extract file name from full path with basename and create file.id column.
id_col <- avoid_conflict(colnames(df), "id")
# copy internal exp.file.id to the id column.
df[[id_col]] <- df[["exp.file.id"]]
# drop internal column and move the id column to the very beginning.
df %>% dplyr::select(!!rlang::sym(id_col), dplyr::everything(), -exp.file.id)
}
#'@export
searchAndGetCSVFilesFromGoogleDrive <- function(folderId = NULL, searchKeyword = "", delim, quote = '"',
escape_backslash = FALSE, escape_double = TRUE,
col_names = TRUE, col_types = readr::cols(.default = readr::col_character()),
locale = readr::default_locale(),
na = c("", "NA"), quoted_na = TRUE,
comment = "", trim_ws = FALSE,
skip = 0, n_max = Inf, guess_max = min(1000, n_max),
progress = interactive()) {
items <- exploratory::listItemsInGoogleDrive(path = folderId, type = c("csv", "tsv", "txt"))
if (searchKeyword != "") {
# search condition is case insensitive. (ref: https://www.regular-expressions.info/modifiers.html, https://stackoverflow.com/questions/5671719/case-insensitive-search-of-a-list-in-r)
items <- items %>% filter(stringr::str_detect(name, stringr::str_c("(?i)", searchKeyword)))
}
exploratory::getCSVFilesFromGoogleDrive(items$id, items$name, delim = delim, quote = quote, escape_backslash = escape_backslash, escape_double = escape_double, col_names = col_names,
col_types = col_types, locale = locale, na = na, quoted_na = quoted_na, comment = comment, trim_ws = trim_ws, skip = skip, n_max = n_max,
guess_max = guess_max, progress = progress)
}
#'API that imports a Excel file from Google Drive.
#'@export
getExcelFileFromGoogleDrive <- function(fileId, sheet = 1, col_names = TRUE, col_types = NULL, na = "", skip = 0, trim_ws = TRUE, n_max = Inf, use_readxl = NULL, detectDates = FALSE, skipEmptyRows = FALSE, skipEmptyCols = FALSE, check.names = FALSE, tzone = NULL, convertDataTypeToChar = FALSE, ...) {
filePath <- downloadDataFileFromGoogleDrive(fileId = fileId, type = "xlsx")
exploratory::read_excel_file(path = filePath, sheet = sheet, col_names = col_names, col_types = col_types,
na = na, skip = skip, trim_ws = trim_ws, n_max = n_max, use_readxl = use_readxl,
detectDates = detectDates, skipEmptyRows = skipEmptyRows, skipEmptyCols = skipEmptyCols,
check.names = FALSE, tzone = tzone, convertDataTypeToChar = convertDataTypeToChar, ...)
}
#'API that imports multiple Excel files from Google Drive
#'@export
getExcelFilesFromGoogleDrive <- function(fileIds, fileNames, sheet = 1, col_names = TRUE, col_types = NULL, na = "", skip = 0, trim_ws = TRUE, n_max = Inf, use_readxl = NULL, detectDates = FALSE, skipEmptyRows = FALSE, skipEmptyCols = FALSE, check.names = FALSE, convertDataTypeToChar = TRUE, tzone = NULL, ...) {
# set name to the files so that it can be used for the "id" column created by purrr:map_dfr.
files <- setNames(as.list(fileIds), fileNames)
df <- purrr::map_dfr(files, exploratory::getExcelFileFromGoogleDrive, sheet = sheet,
col_names = col_names, col_types = col_types, na = na, skip = skip, trim_ws = trim_ws, n_max = n_max, use_readxl = use_readxl,
detectDates = detectDates, skipEmptyRows = skipEmptyRows, skipEmptyCols = skipEmptyCols, check.names = FALSE,
tzone = tzone, convertDataTypeToChar = convertDataTypeToChar, .id = "exp.file.id") %>% mutate(exp.file.id = basename(exp.file.id)) # extract file name from full path with basename and create file.id column.
id_col <- avoid_conflict(colnames(df), "id")
# copy internal exp.file.id to the id column.
df[[id_col]] <- df[["exp.file.id"]]
# drop internal column and move the id column to the very beginning.
df %>% dplyr::select(!!rlang::sym(id_col), dplyr::everything(), -exp.file.id)
}
#'API that imports multiple Excel files from Google Drive
#'@export
searchAndGetExcelFilesFromGoogleDrive <- function(folderId = NULL, searchKeyword = "", sheet = 1, col_names = TRUE, col_types = NULL, na = "", skip = 0, trim_ws = TRUE, n_max = Inf, use_readxl = NULL, detectDates = FALSE, skipEmptyRows = FALSE, skipEmptyCols = FALSE, check.names = FALSE, convertDataTypeToChar = TRUE, tzone = NULL, ...) {
# set name to the files so that it can be used for the "id" column created by purrr:map_dfr.
items <- exploratory::listItemsInGoogleDrive(path = folderId, type = c("xls", "xlsx"))
if (searchKeyword != "") {
# search condition is case insensitive. (ref: https://www.regular-expressions.info/modifiers.html, https://stackoverflow.com/questions/5671719/case-insensitive-search-of-a-list-in-r)
items <- items %>% filter(stringr::str_detect(name, stringr::str_c("(?i)", searchKeyword)))
}
exploratory::getExcelFilesFromGoogleDrive(items$id, items$name, sheet = sheet, col_names = col_names, col_types = col_types, na = na, skip = skip,
trim_ws = trim_ws, n_max = n_max, use_readxl = use_readxl, detectDates = detectDates, skipEmptyRows = skipEmptyRows,
skipEmptyCols = skipEmptyCols, check.names = check.names, convertDataTypeToChar = convertDataTypeToChar, tzone = tzone, ...)
}
#'Wrapper for readxl::excel_sheets to support Google Drive Excel file
#'@export
getExcelSheetsFromGoogleDriveExcelFile <- function(fileId){
filePath <- downloadDataFileFromGoogleDrive(fileId = fileId, type = "xlsx")
readxl::excel_sheets(filePath)
}
#'Wrapper for readr::guess_encoding to support Google Drive csv file
#'@export
guessFileEncodingForGoogleDriveFile <- function(fileId, n_max = 1e4, threshold = 0.20){
filePath <- downloadDataFileFromGoogleDrive(fileId = fileId, type = "csv")
readr::guess_encoding(filePath, n_max, threshold)
}
#' API to download remote data file (excel, csv) from Google Drive and cache it if necessary
#' it uses tempfile https://stat.ethz.ch/R-manual/R-devel/library/base/html/tempfile.html
#' and a R variable with name of hashed region, bucket, key, secret, fileName are assigned to the path given by tempfile.
downloadDataFileFromGoogleDrive <- function(fileId, type = "csv"){
token <- exploratory::getGoogleTokenForDrive()
googledrive::drive_set_token(token)
shouldCacheFile <- getOption("tam.should.cache.datafile")
filepath <- NULL
hash <- digest::digest(stringr::str_c(fileId, type), "md5", serialize = FALSE)
tryCatch({
filepath <- eval(as.name(hash))
}, error = function(e){
# if filePath hash is not set as global variable yet, it raises error that says object not found
# which can be ignored
filepath <- NULL
})
# Check if cached excel/csv exists for the filepath
if (!is.null(shouldCacheFile) && isTRUE(shouldCacheFile) && !is.null(filepath)) {
filepath
} else {
tmp <- tempfile(fileext = stringr::str_c(".", type))
# In case of using Rserve on linux, somehow it doesn't create a temporary
# directory specified by tempdir() which is used as a part of temp file
# path generated by tempfile(). So if you try to use that temp file path,
# dump some data into it for example, it will fail because no such path
# found. This function fails with the same reason at download.file below.
#
# It works fine from the R command line on linux, and it works
# fine all the time on Mac and Windows regardless Rserv or not.
#
# The following command is harmless even if you have the directory already.
# http://stackoverflow.com/questions/4216753/check-existence-of-directory-and-create-if-doesnt-exist
dir.create(tempdir(), showWarnings = FALSE)
# download file to temporary location
googledrive::drive_download(googledrive::as_id(fileId), overwrite = TRUE, path = tmp)
# cache file
if (!is.null(shouldCacheFile) && isTRUE(shouldCacheFile)) {
assign(hash, tmp, envir = .GlobalEnv)
}
tmp
}
}
#' API to clear Google Drive cache file
#' @param fileId
#' @param type
#' @export
clearGoogleDriveCacheFile <- function(fileId, type = "csv"){
options(tam.should.cache.datafile = FALSE)
hash <- digest::digest(stringr::str_c(fileId, type), "md5", serialize = FALSE)
tryCatch({
filepath <- eval(as.name(hash))
do.call(rm, c(as.name(hash)),envir = .GlobalEnv)
unlink(filepath)
}, error = function(e){
})
}
| /R/google_drive.R | permissive | toru-takahashi/exploratory_func | R | false | false | 13,766 | r | #' API to get a list of files in Google Drive
#' @export
#' @param teamDriveId - in case you want to search for team drive
#' @param path - This should be ID of the folder since searching with folder name doesn't always work as expected.
#' @param type - object type that you want to include in your query result.
listItemsInGoogleDrive <- function(teamDriveId = NULL, path = NULL, type = c("csv", "tsv", "txt", "folder", "xls", "xlsx")){
if (!requireNamespace("googledrive")) {
stop("package googledrive must be installed.")
}
token = getGoogleTokenForDrive()
googledrive::drive_set_token(token)
# "~/" is special case for listing under My Drive so do not call googledriev::as_id for "~/".
if (!is.null(path) && path != "~/") {
path = googledrive::as_id(path)
}
# If team id is provided search documents within the team.
if (teamDriveId != "" && !is.null(teamDriveId)) {
teamDriveId = googledrive::as_id(teamDriveId)
}
# To improve performance, only get id, name, mimeType, modifiedTime, size, parents for each file.
googledrive::drive_ls(path = path, type = type, team_drive = teamDriveId, pageSize = 1000, fields = "files/id, files/name, files/mimeType, files/modifiedTime, files/size, files/parents, nextPageToken")
}
#' API to get a folder details in Google Drive
#' @export
getGoogleDriveFolderDetails <- function(teamDriveId = NULL , path = NULL) {
if(!requireNamespace("googledrive")) {
stop("package googledrive must be installed.")
}
token = getGoogleTokenForDrive()
googledrive::drive_set_token(token)
if (!is.null(path)) {
path = googledrive::as_id(path)
}
df <- NULL
#if team id is provided search documents within the team.
# If team id is provided search documents within the team.
if (teamDriveId != "" && !is.null(teamDriveId)) {
teamDriveId = googledrive::as_id(teamDriveId)
}
df <- googledrive::drive_get(team_drive = teamDriveId, id = path)
dfdetails <- NULL
if (nrow(df) == 1) {
dfdetails <- df %>% googledrive::drive_reveal("path")
}
dfdetails
}
#'API that imports a CSV file from Google Drive.
#'@export
getCSVFileFromGoogleDrive <- function(fileId, delim, quote = '"',
escape_backslash = FALSE, escape_double = TRUE,
col_names = TRUE, col_types = NULL,
locale = readr::default_locale(),
na = c("", "NA"), quoted_na = TRUE,
comment = "", trim_ws = FALSE,
skip = 0, n_max = Inf, guess_max = min(1000, n_max),
progress = interactive()) {
filePath <- downloadDataFileFromGoogleDrive(fileId = fileId, type = "csv")
exploratory::read_delim_file(filePath, delim = delim, quote = quote,
escape_backslash = escape_backslash, escape_double = escape_double,
col_names = col_names, col_types = col_types,
locale = locale,
na = na, quoted_na = quoted_na,
comment = comment, trim_ws = trim_ws,
skip = skip, n_max = n_max, guess_max = guess_max,
progress = progress)
}
#'API that imports multiple same structure CSV files and merge it to a single data frame
#'
#'For col_types parameter, by default it forces character to make sure that merging the CSV based data frames doesn't error out due to column data types mismatch.
# Once the data frames merging is done, readr::type_convert is called from Exploratory Desktop to restore the column data types.
#'@export
getCSVFilesFromGoogleDrive <- function(fileIds, fileNames, delim, quote = '"',
escape_backslash = FALSE, escape_double = TRUE,
col_names = TRUE, col_types = readr::cols(.default = readr::col_character()),
locale = readr::default_locale(),
na = c("", "NA"), quoted_na = TRUE,
comment = "", trim_ws = FALSE,
skip = 0, n_max = Inf, guess_max = min(1000, n_max),
progress = interactive()) {
# set name to the files so that it can be used for the "id" column created by purrr:map_dfr.
files <- setNames(as.list(fileIds), fileNames)
df <- purrr::map_dfr(files, exploratory::getCSVFileFromGoogleDrive, delim = delim, quote = quote,
escape_backslash = escape_backslash, escape_double = escape_double,
col_names = col_names, col_types = col_types,
locale = locale,
na = na, quoted_na = quoted_na,
comment = comment, trim_ws = trim_ws,
skip = skip, n_max = n_max, guess_max = guess_max,
progress = progress, .id = "exp.file.id") %>% mutate(exp.file.id = basename(exp.file.id)) # extract file name from full path with basename and create file.id column.
id_col <- avoid_conflict(colnames(df), "id")
# copy internal exp.file.id to the id column.
df[[id_col]] <- df[["exp.file.id"]]
# drop internal column and move the id column to the very beginning.
df %>% dplyr::select(!!rlang::sym(id_col), dplyr::everything(), -exp.file.id)
}
#'@export
searchAndGetCSVFilesFromGoogleDrive <- function(folderId = NULL, searchKeyword = "", delim, quote = '"',
escape_backslash = FALSE, escape_double = TRUE,
col_names = TRUE, col_types = readr::cols(.default = readr::col_character()),
locale = readr::default_locale(),
na = c("", "NA"), quoted_na = TRUE,
comment = "", trim_ws = FALSE,
skip = 0, n_max = Inf, guess_max = min(1000, n_max),
progress = interactive()) {
items <- exploratory::listItemsInGoogleDrive(path = folderId, type = c("csv", "tsv", "txt"))
if (searchKeyword != "") {
# search condition is case insensitive. (ref: https://www.regular-expressions.info/modifiers.html, https://stackoverflow.com/questions/5671719/case-insensitive-search-of-a-list-in-r)
items <- items %>% filter(stringr::str_detect(name, stringr::str_c("(?i)", searchKeyword)))
}
exploratory::getCSVFilesFromGoogleDrive(items$id, items$name, delim = delim, quote = quote, escape_backslash = escape_backslash, escape_double = escape_double, col_names = col_names,
col_types = col_types, locale = locale, na = na, quoted_na = quoted_na, comment = comment, trim_ws = trim_ws, skip = skip, n_max = n_max,
guess_max = guess_max, progress = progress)
}
#'API that imports a Excel file from Google Drive.
#'@export
getExcelFileFromGoogleDrive <- function(fileId, sheet = 1, col_names = TRUE, col_types = NULL, na = "", skip = 0, trim_ws = TRUE, n_max = Inf, use_readxl = NULL, detectDates = FALSE, skipEmptyRows = FALSE, skipEmptyCols = FALSE, check.names = FALSE, tzone = NULL, convertDataTypeToChar = FALSE, ...) {
filePath <- downloadDataFileFromGoogleDrive(fileId = fileId, type = "xlsx")
exploratory::read_excel_file(path = filePath, sheet = sheet, col_names = col_names, col_types = col_types,
na = na, skip = skip, trim_ws = trim_ws, n_max = n_max, use_readxl = use_readxl,
detectDates = detectDates, skipEmptyRows = skipEmptyRows, skipEmptyCols = skipEmptyCols,
check.names = FALSE, tzone = tzone, convertDataTypeToChar = convertDataTypeToChar, ...)
}
#'API that imports multiple Excel files from Google Drive
#'@export
getExcelFilesFromGoogleDrive <- function(fileIds, fileNames, sheet = 1, col_names = TRUE, col_types = NULL, na = "", skip = 0, trim_ws = TRUE, n_max = Inf, use_readxl = NULL, detectDates = FALSE, skipEmptyRows = FALSE, skipEmptyCols = FALSE, check.names = FALSE, convertDataTypeToChar = TRUE, tzone = NULL, ...) {
# set name to the files so that it can be used for the "id" column created by purrr:map_dfr.
files <- setNames(as.list(fileIds), fileNames)
df <- purrr::map_dfr(files, exploratory::getExcelFileFromGoogleDrive, sheet = sheet,
col_names = col_names, col_types = col_types, na = na, skip = skip, trim_ws = trim_ws, n_max = n_max, use_readxl = use_readxl,
detectDates = detectDates, skipEmptyRows = skipEmptyRows, skipEmptyCols = skipEmptyCols, check.names = FALSE,
tzone = tzone, convertDataTypeToChar = convertDataTypeToChar, .id = "exp.file.id") %>% mutate(exp.file.id = basename(exp.file.id)) # extract file name from full path with basename and create file.id column.
id_col <- avoid_conflict(colnames(df), "id")
# copy internal exp.file.id to the id column.
df[[id_col]] <- df[["exp.file.id"]]
# drop internal column and move the id column to the very beginning.
df %>% dplyr::select(!!rlang::sym(id_col), dplyr::everything(), -exp.file.id)
}
#'API that imports multiple Excel files from Google Drive
#'@export
searchAndGetExcelFilesFromGoogleDrive <- function(folderId = NULL, searchKeyword = "", sheet = 1, col_names = TRUE, col_types = NULL, na = "", skip = 0, trim_ws = TRUE, n_max = Inf, use_readxl = NULL, detectDates = FALSE, skipEmptyRows = FALSE, skipEmptyCols = FALSE, check.names = FALSE, convertDataTypeToChar = TRUE, tzone = NULL, ...) {
# set name to the files so that it can be used for the "id" column created by purrr:map_dfr.
items <- exploratory::listItemsInGoogleDrive(path = folderId, type = c("xls", "xlsx"))
if (searchKeyword != "") {
# search condition is case insensitive. (ref: https://www.regular-expressions.info/modifiers.html, https://stackoverflow.com/questions/5671719/case-insensitive-search-of-a-list-in-r)
items <- items %>% filter(stringr::str_detect(name, stringr::str_c("(?i)", searchKeyword)))
}
exploratory::getExcelFilesFromGoogleDrive(items$id, items$name, sheet = sheet, col_names = col_names, col_types = col_types, na = na, skip = skip,
trim_ws = trim_ws, n_max = n_max, use_readxl = use_readxl, detectDates = detectDates, skipEmptyRows = skipEmptyRows,
skipEmptyCols = skipEmptyCols, check.names = check.names, convertDataTypeToChar = convertDataTypeToChar, tzone = tzone, ...)
}
#'Wrapper for readxl::excel_sheets to support Google Drive Excel file
#'@export
getExcelSheetsFromGoogleDriveExcelFile <- function(fileId){
filePath <- downloadDataFileFromGoogleDrive(fileId = fileId, type = "xlsx")
readxl::excel_sheets(filePath)
}
#'Wrapper for readr::guess_encoding to support Google Drive csv file
#'@export
guessFileEncodingForGoogleDriveFile <- function(fileId, n_max = 1e4, threshold = 0.20){
filePath <- downloadDataFileFromGoogleDrive(fileId = fileId, type = "csv")
readr::guess_encoding(filePath, n_max, threshold)
}
#' API to download remote data file (excel, csv) from Google Drive and cache it if necessary
#' it uses tempfile https://stat.ethz.ch/R-manual/R-devel/library/base/html/tempfile.html
#' and a R variable with name of hashed region, bucket, key, secret, fileName are assigned to the path given by tempfile.
downloadDataFileFromGoogleDrive <- function(fileId, type = "csv"){
token <- exploratory::getGoogleTokenForDrive()
googledrive::drive_set_token(token)
shouldCacheFile <- getOption("tam.should.cache.datafile")
filepath <- NULL
hash <- digest::digest(stringr::str_c(fileId, type), "md5", serialize = FALSE)
tryCatch({
filepath <- eval(as.name(hash))
}, error = function(e){
# if filePath hash is not set as global variable yet, it raises error that says object not found
# which can be ignored
filepath <- NULL
})
# Check if cached excel/csv exists for the filepath
if (!is.null(shouldCacheFile) && isTRUE(shouldCacheFile) && !is.null(filepath)) {
filepath
} else {
tmp <- tempfile(fileext = stringr::str_c(".", type))
# In case of using Rserve on linux, somehow it doesn't create a temporary
# directory specified by tempdir() which is used as a part of temp file
# path generated by tempfile(). So if you try to use that temp file path,
# dump some data into it for example, it will fail because no such path
# found. This function fails with the same reason at download.file below.
#
# It works fine from the R command line on linux, and it works
# fine all the time on Mac and Windows regardless Rserv or not.
#
# The following command is harmless even if you have the directory already.
# http://stackoverflow.com/questions/4216753/check-existence-of-directory-and-create-if-doesnt-exist
dir.create(tempdir(), showWarnings = FALSE)
# download file to temporary location
googledrive::drive_download(googledrive::as_id(fileId), overwrite = TRUE, path = tmp)
# cache file
if (!is.null(shouldCacheFile) && isTRUE(shouldCacheFile)) {
assign(hash, tmp, envir = .GlobalEnv)
}
tmp
}
}
#' API to clear Google Drive cache file
#' @param fileId
#' @param type
#' @export
clearGoogleDriveCacheFile <- function(fileId, type = "csv"){
options(tam.should.cache.datafile = FALSE)
hash <- digest::digest(stringr::str_c(fileId, type), "md5", serialize = FALSE)
tryCatch({
filepath <- eval(as.name(hash))
do.call(rm, c(as.name(hash)),envir = .GlobalEnv)
unlink(filepath)
}, error = function(e){
})
}
|
testlist <- list(A = structure(c(2.31639962212499e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613115159-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 251 | r | testlist <- list(A = structure(c(2.31639962212499e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
library(ggplot2)
# applies to all graphs
total_df <- read.csv("https://covid.ourworldindata.org/data/owid-covid-data.csv", header=TRUE)
total_df$date<-as.Date(as.character(total_df$date), "%Y-%m-%d")
caption<-str_replace_all(toString(c("https://ourworldindata.org -", format(Sys.Date(), format="%m/%d/%y"))), ",", "")
# Testing Ratio
top_10 <- c("United States", "Italy", "Spain", "Germany", "China", "France", "Iran", "United Kingdom", "Switzerland", "Turkey")
total_df <- subset(total_df, total_df$location%in%top_10)
total_df <- subset(total_df, total_df$date > "2020-02-26")
req_columns <- c("location", "date", "total_cases", "total_tests")
test_ratios <- subset(total_df[req_columns], total_df$total_tests > 0)
test_ratios["ratio"] <- round(test_ratios$total_cases/test_ratios$total_tests * 100, 2)
countries<- c(str_split(unique(test_ratios$location), ","))
for (country in countries)
{
test_ratios[,country] <- ""
}
for (row in 1:nrow(test_ratios))
{
test_ratios[row, paste(test_ratios[row, "location"])] <- test_ratios[row, "ratio"]
}
test_ratios <- select(test_ratios,-c("location", "total_cases", "total_tests", "ratio"))
write.csv(test_ratios, "TestRatios.csv", row.names = FALSE)
#ggplot(test_ratios, aes(y = test_ratios$ratio, x = test_ratios$date)) +
# labs(title="Percentage of Positive Cases Over Time", y = "Test (Pos/Total) in %", x = "", caption = caption)
#ggsave("virus_ratios.png", units="in", width=10, height=5, dpi=300) | /virusAnalysis.R | no_license | paulcollet/CV-19-Project | R | false | false | 1,494 | r | library(ggplot2)
# applies to all graphs
total_df <- read.csv("https://covid.ourworldindata.org/data/owid-covid-data.csv", header=TRUE)
total_df$date<-as.Date(as.character(total_df$date), "%Y-%m-%d")
caption<-str_replace_all(toString(c("https://ourworldindata.org -", format(Sys.Date(), format="%m/%d/%y"))), ",", "")
# Testing Ratio
top_10 <- c("United States", "Italy", "Spain", "Germany", "China", "France", "Iran", "United Kingdom", "Switzerland", "Turkey")
total_df <- subset(total_df, total_df$location%in%top_10)
total_df <- subset(total_df, total_df$date > "2020-02-26")
req_columns <- c("location", "date", "total_cases", "total_tests")
test_ratios <- subset(total_df[req_columns], total_df$total_tests > 0)
test_ratios["ratio"] <- round(test_ratios$total_cases/test_ratios$total_tests * 100, 2)
countries<- c(str_split(unique(test_ratios$location), ","))
for (country in countries)
{
test_ratios[,country] <- ""
}
for (row in 1:nrow(test_ratios))
{
test_ratios[row, paste(test_ratios[row, "location"])] <- test_ratios[row, "ratio"]
}
test_ratios <- select(test_ratios,-c("location", "total_cases", "total_tests", "ratio"))
write.csv(test_ratios, "TestRatios.csv", row.names = FALSE)
#ggplot(test_ratios, aes(y = test_ratios$ratio, x = test_ratios$date)) +
# labs(title="Percentage of Positive Cases Over Time", y = "Test (Pos/Total) in %", x = "", caption = caption)
#ggsave("virus_ratios.png", units="in", width=10, height=5, dpi=300) |
#' Take region names and a comparison date and does diff-in-diff
#'
#' @param target.region: The region of interest, matching a backpage domain
#' @param comparison.region.set: A set of regions (e.g. c('nova','abilene')) to compare to
#' @param event.date: a YYYY-MM-DD date string for the actual event date
#' @param logged: A boolean as to whether to perform a log transform on the data first
#' @return A list with dd, main_diff, comparison_diff, pre, and post values
diffindiff<-function(target.region, comparison.region.set, event.date, logged=FALSE, normalize=FALSE){
data=twolines(target.region=target.region, comparison.region.set=comparison.region.set)
data$date<-as.Date(data$MonthDate, "%Y-%m-%d")
data$MonthDate <-NULL
ed<-as.Date(event.date, "%Y-%m-%d")
data$post = data$date > ed
data <- data[data$date > as.Date("2013-09-01","%Y-%m-%d"),]
data <- data[data$date < as.Date("2014-06-01","%Y-%m-%d"),]
data<-melt(data, id=c("date","post"), variable.name="group", value.name="counts")
if (logged){
data$counts<-log(1+data$counts)
}
pre.target.avg<-mean(data[data$post==FALSE & data$group == "Target",'counts'])
pre.comparison.avg<-mean(data[data$post==FALSE & data$group == "Comparison",'counts'])
if (normalize){
data$counts[data$group == "Comparison"] <- data$counts[data$group == "Comparison"] * pre.target.avg/pre.comparison.avg
}
data <- within(data, group <- relevel(group, ref = "Comparison")) # Set comparison as base group
model<-lm(counts ~ post*group, data=data)
msum<-summary(model)
df<-msum$df[2]
print(summary(model))
# The idea for this model is for the results to be in the order:
# 1: (Intercept)
# 2: postTRUE
# 3: groupTarget
# 4: postTRUE:groupTarget
model.results<-coef(summary(model))
vcov.matrix<-vcov(model)
dd<-list(
b=model.results[4,'Estimate'],
se=model.results[4, "Std. Error"],
t=model.results[4,"t value"]
)
dd$p<-2*pt(-abs(dd$t),df=df-1)
# The diff-in-diff estimate is just 4: postTRUE:groupTarget
# target.change<-
# list(
# b=model.results[1,'Estimate'],
# se=model.results[1, "Std. Error"],
# t=model.results[1,"t value"]
# )# The pre-post difference in the target variable
target.change.vec<-c(0,1,0,1)
# The target change is the sum of the 2nd and 4th variables (set target=True and change post from 1 to 0)
b.target<-target.change.vec %*% model.results[,'Estimate']
se.target<-sqrt(target.change.vec %*% vcov.matrix %*% target.change.vec)
target.change<-list(
b=b.target[1,1],
se=se.target[1,1],
t=b.target[1,1]/se.target[1,1]
)
target.change$p<-2*pt(-abs(target.change$t),df=df-1)
# Note: we have to compute p manually since non-unit vectors will have covariance terms in SE
# and hence won't be in the main results
comparison.vec<-c(0,1,0,0)
# The comparison group is the sum of the 1st and 4th variables
b.comparison<-comparison.vec %*% model.results[,'Estimate']
se.comparison<-sqrt(comparison.vec %*% vcov.matrix %*% comparison.vec)
comparison.change<-list(
b=b.comparison[1,1],
se=se.comparison[1,1],
t=b.comparison[1,1]/se.comparison[1,1]
)
comparison.change$p<-2*pt(-abs(comparison.change$t),df=df-1)
# Note: we have to compute p manually since non-unit vectors will have covariance terms in SE
# and hence won't be in the main results
#comparison.change<-mean(data[data$group == "comparison" & data$post,'counts']) - mean(data[data$group == "comparison" & data$post == FALSE,'counts'])
comparison<-data[data$group == "Comparison",c('date','counts')]
comparison$date <- strftime(comparison$date,"%Y-%m-%d")
target<-data[data$group == "Target",c('date','counts')]
target$date <- strftime(target$date,"%Y-%m-%d")
data<-reshape2::dcast(data=data, formula=date~group, value=counts)
return(list(data=data,
comparison=comparison,
target=target,
#model=model,
diff_in_diff=dd,
target_diff=target.change,
comparison_diff=comparison.change))
} | /opencpu-app/rlines/R/diffindiff.R | permissive | giantoak/tempus | R | false | false | 4,107 | r | #' Take region names and a comparison date and does diff-in-diff
#'
#' @param target.region: The region of interest, matching a backpage domain
#' @param comparison.region.set: A set of regions (e.g. c('nova','abilene')) to compare to
#' @param event.date: a YYYY-MM-DD date string for the actual event date
#' @param logged: A boolean as to whether to perform a log transform on the data first
#' @return A list with dd, main_diff, comparison_diff, pre, and post values
diffindiff<-function(target.region, comparison.region.set, event.date, logged=FALSE, normalize=FALSE){
data=twolines(target.region=target.region, comparison.region.set=comparison.region.set)
data$date<-as.Date(data$MonthDate, "%Y-%m-%d")
data$MonthDate <-NULL
ed<-as.Date(event.date, "%Y-%m-%d")
data$post = data$date > ed
data <- data[data$date > as.Date("2013-09-01","%Y-%m-%d"),]
data <- data[data$date < as.Date("2014-06-01","%Y-%m-%d"),]
data<-melt(data, id=c("date","post"), variable.name="group", value.name="counts")
if (logged){
data$counts<-log(1+data$counts)
}
pre.target.avg<-mean(data[data$post==FALSE & data$group == "Target",'counts'])
pre.comparison.avg<-mean(data[data$post==FALSE & data$group == "Comparison",'counts'])
if (normalize){
data$counts[data$group == "Comparison"] <- data$counts[data$group == "Comparison"] * pre.target.avg/pre.comparison.avg
}
data <- within(data, group <- relevel(group, ref = "Comparison")) # Set comparison as base group
model<-lm(counts ~ post*group, data=data)
msum<-summary(model)
df<-msum$df[2]
print(summary(model))
# The idea for this model is for the results to be in the order:
# 1: (Intercept)
# 2: postTRUE
# 3: groupTarget
# 4: postTRUE:groupTarget
model.results<-coef(summary(model))
vcov.matrix<-vcov(model)
dd<-list(
b=model.results[4,'Estimate'],
se=model.results[4, "Std. Error"],
t=model.results[4,"t value"]
)
dd$p<-2*pt(-abs(dd$t),df=df-1)
# The diff-in-diff estimate is just 4: postTRUE:groupTarget
# target.change<-
# list(
# b=model.results[1,'Estimate'],
# se=model.results[1, "Std. Error"],
# t=model.results[1,"t value"]
# )# The pre-post difference in the target variable
target.change.vec<-c(0,1,0,1)
# The target change is the sum of the 2nd and 4th variables (set target=True and change post from 1 to 0)
b.target<-target.change.vec %*% model.results[,'Estimate']
se.target<-sqrt(target.change.vec %*% vcov.matrix %*% target.change.vec)
target.change<-list(
b=b.target[1,1],
se=se.target[1,1],
t=b.target[1,1]/se.target[1,1]
)
target.change$p<-2*pt(-abs(target.change$t),df=df-1)
# Note: we have to compute p manually since non-unit vectors will have covariance terms in SE
# and hence won't be in the main results
comparison.vec<-c(0,1,0,0)
# The comparison group is the sum of the 1st and 4th variables
b.comparison<-comparison.vec %*% model.results[,'Estimate']
se.comparison<-sqrt(comparison.vec %*% vcov.matrix %*% comparison.vec)
comparison.change<-list(
b=b.comparison[1,1],
se=se.comparison[1,1],
t=b.comparison[1,1]/se.comparison[1,1]
)
comparison.change$p<-2*pt(-abs(comparison.change$t),df=df-1)
# Note: we have to compute p manually since non-unit vectors will have covariance terms in SE
# and hence won't be in the main results
#comparison.change<-mean(data[data$group == "comparison" & data$post,'counts']) - mean(data[data$group == "comparison" & data$post == FALSE,'counts'])
comparison<-data[data$group == "Comparison",c('date','counts')]
comparison$date <- strftime(comparison$date,"%Y-%m-%d")
target<-data[data$group == "Target",c('date','counts')]
target$date <- strftime(target$date,"%Y-%m-%d")
data<-reshape2::dcast(data=data, formula=date~group, value=counts)
return(list(data=data,
comparison=comparison,
target=target,
#model=model,
diff_in_diff=dd,
target_diff=target.change,
comparison_diff=comparison.change))
} |
library("microbenchmark")
library("locfit")
source("histogram_base.R")
Pnm = function(obj){
q = obj$splits
w = obj$weights
k = obj$k
n = length(data)
Pn = c(0,sapply(q,function(j) sum(data<=j)/n),1)
qAug = c(0,q,1)
dis = sapply(1:k,function(i) log(qAug[i+1]-qAug[i]) - log(w[i]))
probs = sapply(1:k,function(i) Pn[i+1]-Pn[i])
-sum(probs*dis)
}
nobs = 200
ks = 10:100
greeds200 = sapply(ks,function(x) mean(microbenchmark(histogram(rbeta(nobs,2,7),k=x,method = "greedy"))$time))
exacts200 = sapply(ks,function(x) mean(microbenchmark(histogram(rbeta(nobs,2,7),k=x,method = "exact"))$time))
plot(locfit(I(log(exacts200))~ks),ylim=c(10,18),col="red",bty="l",ylab="log mean time",xlab="k",type="l")
grid()
lines(locfit(I(log(greeds200))~ks),col="blue")
k = 5
ns = (1:20)*25
greeds5 = sapply(ns,function(x) mean(microbenchmark(histogram(rbeta(x,2,7),k=k,method = "greedy"))$time))
exacts5 = sapply(ns,function(x) mean(microbenchmark(histogram(rbeta(x,2,7),k=k,method = "exact"))$time))
plot(locfit(I(log(exacts5))~ns),ylim=c(10,18),col="red",bty="l",ylab="log mean time",xlab="n",type="l")
grid()
lines(locfit(I(log(greeds5))~ns),col="blue")
nobs = 300
data = rbeta(nobs,1,9)
tdat = rbeta(600000,1,9)
ks = 10:100
greeds300 = sapply(ks,function(x) Pnm(histogram(data,k=x,method = "greedy")))
exacts300 = sapply(ks,function(x) Pnm(histogram(data,k=x,method = "exact")))
trues = sapply(ks,function(x) Pnm(histogram(tdat,k=x,method = "greedy")))
plot(ks,exacts300,col="red",bty="l",ylab="Objective",xlab="k",
ylim=c(min(greeds300,exacts300),max(greeds300,exacts300)))
points(ks,greeds300,col="blue")
points(ks,trues,col="green")
grid()
true = histogram(rbeta(1000000,3,1),k=k,method = "greedy")$splits[j]
library("RColorBrewer")
cols = brewer.pal(4,"Set2")
ns = (1:20)*50
k = 11
j = 5
greeds = rep(NA,length(ns))
exacts = rep(NA,length(ns))
smooths = rep(NA,length(ns))
for (i in 1:length(ns)){
greeds[i] = mean((replicate(100,histogram(rbeta(ns[i],3,1),k=k,method = "greedy")$splits[j])-true)^2)
exacts[i] = mean((replicate(100,histogram(rbeta(ns[i],3,1),k=k,method = "exact")$splits[j])-true)^2)
smooths[i] = mean((replicate(100,histogram(rbeta(ns[i],3,1),k=k,method = "smoothed")$splits[j])-true)^2)
print(i)
}
plot(ns,ns^(2/3)*exacts,bty="l",ylab="Objective",xlab="n",
ylim=c(min(ns^(2/3)*greeds,ns^(2/3)*exacts)-0.01,max(ns^(2/3)*greeds,ns^(2/3)*exacts)),
main="Mean squared error",col=cols[1],pch=15)
points(ns,ns^(2/3)*greeds,col=cols[2],pch=16)
points(ns,ns^(2/3)*smooths,col=cols[3],pch=17)
grid()
legend("topright",c("Exact","Coordinate search","Smoothed"),
col=cols[1:3],pch=c(15,16,17),bty="n")
data = rbeta(100,2,7)
k = 6
plot(histogram(data,method="exact",weights="KL",k=k))
rug(data)
| /OppgaveR/algorithmSpeed.R | no_license | JonasMoss/cuberoot | R | false | false | 2,788 | r | library("microbenchmark")
library("locfit")
source("histogram_base.R")
Pnm = function(obj){
q = obj$splits
w = obj$weights
k = obj$k
n = length(data)
Pn = c(0,sapply(q,function(j) sum(data<=j)/n),1)
qAug = c(0,q,1)
dis = sapply(1:k,function(i) log(qAug[i+1]-qAug[i]) - log(w[i]))
probs = sapply(1:k,function(i) Pn[i+1]-Pn[i])
-sum(probs*dis)
}
nobs = 200
ks = 10:100
greeds200 = sapply(ks,function(x) mean(microbenchmark(histogram(rbeta(nobs,2,7),k=x,method = "greedy"))$time))
exacts200 = sapply(ks,function(x) mean(microbenchmark(histogram(rbeta(nobs,2,7),k=x,method = "exact"))$time))
plot(locfit(I(log(exacts200))~ks),ylim=c(10,18),col="red",bty="l",ylab="log mean time",xlab="k",type="l")
grid()
lines(locfit(I(log(greeds200))~ks),col="blue")
k = 5
ns = (1:20)*25
greeds5 = sapply(ns,function(x) mean(microbenchmark(histogram(rbeta(x,2,7),k=k,method = "greedy"))$time))
exacts5 = sapply(ns,function(x) mean(microbenchmark(histogram(rbeta(x,2,7),k=k,method = "exact"))$time))
plot(locfit(I(log(exacts5))~ns),ylim=c(10,18),col="red",bty="l",ylab="log mean time",xlab="n",type="l")
grid()
lines(locfit(I(log(greeds5))~ns),col="blue")
nobs = 300
data = rbeta(nobs,1,9)
tdat = rbeta(600000,1,9)
ks = 10:100
greeds300 = sapply(ks,function(x) Pnm(histogram(data,k=x,method = "greedy")))
exacts300 = sapply(ks,function(x) Pnm(histogram(data,k=x,method = "exact")))
trues = sapply(ks,function(x) Pnm(histogram(tdat,k=x,method = "greedy")))
plot(ks,exacts300,col="red",bty="l",ylab="Objective",xlab="k",
ylim=c(min(greeds300,exacts300),max(greeds300,exacts300)))
points(ks,greeds300,col="blue")
points(ks,trues,col="green")
grid()
true = histogram(rbeta(1000000,3,1),k=k,method = "greedy")$splits[j]
library("RColorBrewer")
cols = brewer.pal(4,"Set2")
ns = (1:20)*50
k = 11
j = 5
greeds = rep(NA,length(ns))
exacts = rep(NA,length(ns))
smooths = rep(NA,length(ns))
for (i in 1:length(ns)){
greeds[i] = mean((replicate(100,histogram(rbeta(ns[i],3,1),k=k,method = "greedy")$splits[j])-true)^2)
exacts[i] = mean((replicate(100,histogram(rbeta(ns[i],3,1),k=k,method = "exact")$splits[j])-true)^2)
smooths[i] = mean((replicate(100,histogram(rbeta(ns[i],3,1),k=k,method = "smoothed")$splits[j])-true)^2)
print(i)
}
plot(ns,ns^(2/3)*exacts,bty="l",ylab="Objective",xlab="n",
ylim=c(min(ns^(2/3)*greeds,ns^(2/3)*exacts)-0.01,max(ns^(2/3)*greeds,ns^(2/3)*exacts)),
main="Mean squared error",col=cols[1],pch=15)
points(ns,ns^(2/3)*greeds,col=cols[2],pch=16)
points(ns,ns^(2/3)*smooths,col=cols[3],pch=17)
grid()
legend("topright",c("Exact","Coordinate search","Smoothed"),
col=cols[1:3],pch=c(15,16,17),bty="n")
data = rbeta(100,2,7)
k = 6
plot(histogram(data,method="exact",weights="KL",k=k))
rug(data)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/label_eurostat.R
\name{label_eurostat}
\alias{label_eurostat}
\alias{label_eurostat_tables}
\alias{label_eurostat_vars}
\title{label_eurostat}
\usage{
label_eurostat(x, dic = NULL, code = NULL, eu_order = FALSE)
label_eurostat_vars(x)
label_eurostat_tables(x)
}
\arguments{
\item{x}{A character or a factor vector or a data.frame.}
\item{dic}{A string (vector) naming eurostat dictionary or dictionaries.
If \code{NULL} (default) dictionry names taken from column names of
the data.frame.}
\item{code}{For data.frames names of the column for which also codes
should be retained. The suffix "_code" is added to code column names.}
\item{eu_order}{Logical. Should Eurostat ordering used for label levels.
Affects only factors.}
}
\value{
a vector or a data.frame.
}
\description{
Get definitions for Eurostat codes from Eurostat dictionaries.
A character or a factor vector of codes returns a corresponding vector of
definitions. \code{label_eurostat} labels also for data.frames
from \code{\link{get_eurostat}}.
For vectors a dictionary name have to be supplied.
For data.frames dictonary names are taken from column names.
"time" and "values" columns are returned as they were, so you can supply
data.frame from \code{\link{get_eurostat}} and get data.frame with
definitions instead of codes.
}
\section{Functions}{
\itemize{
\item \code{label_eurostat_vars}: Get definitions for variable (column) names. For
objects other than characters or factors definitions are get for names.
\item \code{label_eurostat_tables}: Get definitions for table names
}}
\examples{
\dontrun{
lp <- get_eurostat("nama_aux_lp")
lpl <- label_eurostat(lp)
str(lpl)
lpl_order <- label_eurostat(lp, eu_order = TRUE)
lpl_code <- label_eurostat(lp, code = "unit")
label_eurostat_vars(names(lp))
label_eurostat_tables("nama_aux_lp")
}
}
\author{
Janne Huovari <janne.huovari@ptt.fi>
}
| /man/label_eurostat.Rd | no_license | YohanRobinson/eurostat | R | false | true | 1,970 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/label_eurostat.R
\name{label_eurostat}
\alias{label_eurostat}
\alias{label_eurostat_tables}
\alias{label_eurostat_vars}
\title{label_eurostat}
\usage{
label_eurostat(x, dic = NULL, code = NULL, eu_order = FALSE)
label_eurostat_vars(x)
label_eurostat_tables(x)
}
\arguments{
\item{x}{A character or a factor vector or a data.frame.}
\item{dic}{A string (vector) naming eurostat dictionary or dictionaries.
If \code{NULL} (default) dictionry names taken from column names of
the data.frame.}
\item{code}{For data.frames names of the column for which also codes
should be retained. The suffix "_code" is added to code column names.}
\item{eu_order}{Logical. Should Eurostat ordering used for label levels.
Affects only factors.}
}
\value{
a vector or a data.frame.
}
\description{
Get definitions for Eurostat codes from Eurostat dictionaries.
A character or a factor vector of codes returns a corresponding vector of
definitions. \code{label_eurostat} labels also for data.frames
from \code{\link{get_eurostat}}.
For vectors a dictionary name have to be supplied.
For data.frames dictonary names are taken from column names.
"time" and "values" columns are returned as they were, so you can supply
data.frame from \code{\link{get_eurostat}} and get data.frame with
definitions instead of codes.
}
\section{Functions}{
\itemize{
\item \code{label_eurostat_vars}: Get definitions for variable (column) names. For
objects other than characters or factors definitions are get for names.
\item \code{label_eurostat_tables}: Get definitions for table names
}}
\examples{
\dontrun{
lp <- get_eurostat("nama_aux_lp")
lpl <- label_eurostat(lp)
str(lpl)
lpl_order <- label_eurostat(lp, eu_order = TRUE)
lpl_code <- label_eurostat(lp, code = "unit")
label_eurostat_vars(names(lp))
label_eurostat_tables("nama_aux_lp")
}
}
\author{
Janne Huovari <janne.huovari@ptt.fi>
}
|
#Sampling.r
#Darren Green
#14/01/2021
#pop <- list(N=50, R=5) # population size, expected number of reactors
#test <- list(sens=0.95, spec=0.98) # test parameters
#target <- list(sens=0.95, spec=0.95) # intended herd values
dbetabinom <- function(k,N,eta,theta) {
eta <- eta + 1e-10
theta <- theta + 1e-10
#distribution of +units out of a population of size N based on beta distributed prevalence
x <- exp(
lgamma(N+1)+lgamma(k+eta)+lgamma(N-k+theta)+lgamma(eta+theta)
-lgamma(k+1)-lgamma(N-k+1)-lgamma(N+eta+theta)-lgamma(eta)-lgamma(theta)
)
return(ifelse(is.na(x),0,x))
}
sdbeta <- function(a,b) sqrt(a)*sqrt(b)/(a+b)/sqrt(a+b+1)
sdbeta2 <- function(p,n) sqrt(p*(1-p)/(n+1))
dtails <- function(d,x) {
n <- length(d)
up <- array(0,n)
down <- up
for (i in 1:n) up[i] <- sum(d[1:i])
lo <- n
for (i in 1:n) {
if (up[i]>=x) {lo <- i-1; break}
}
for (i in n:1) down[i] <- sum(d[i:n])
hi <- 1
for (i in n:1) {
if (down[i]>=x) {hi <- i-1; break}
}
return (c(lo,hi))
}
dt <- function(n.pos, n.neg, test) {
# distribution of + tests out of n.pos/n.neg +/- samples.
t.convolution <- array(0, n.pos + n.neg + 1)
if (test$Testtype=="BetaBinomial") {
t.pos <- dbetabinom(0:n.pos, n.pos, test$beta*test$betaN, (1-test$beta)*test$betaN)
t.neg <- dbetabinom(0:n.neg, n.neg, (1-test$alpha)*test$alphaN, test$alpha*test$alphaN)
pos.tails <- dtails(t.pos, 1-test$pConv)
neg.tails <- dtails(t.neg, 1-test$pConv)
lo.pos <- pos.tails[1]
hi.pos <- pos.tails[2]
lo.neg <- neg.tails[1]
hi.neg <- neg.tails[2]
} else {
t.pos <- dbinom(0:n.pos, n.pos, test$beta)
t.neg <- dbinom(0:n.neg, n.neg, 1 - test$alpha)
lo.pos <- qbinom(1-test$pConv, n.pos, test$beta)
hi.pos <- qbinom(test$pConv, n.pos, test$beta)
lo.neg <- qbinom(1-test$pConv, n.neg, 1 - test$alpha)
hi.neg <- qbinom(test$pConv, n.neg, 1 - test$alpha)
}
for (i in lo.pos:hi.pos)
for (j in lo.neg:hi.neg) {
t.convolution[i + j + 1] <- t.convolution[i + j + 1] + t.pos[i + 1] * t.neg[j + 1]
}
return(t.convolution)
}
dT <- function(test, pop, n) {
# distribution of + tests out of sampled population.
T <- array(0, n + 1)
if (test$Sampletype=="Hypergeometric") {
dn <- dhyper(0:n, pop$R, pop$N - pop$R, n)
} else {
dn <- dbinom(0:n, n, pop$R/pop$N)
}
# distribution of + samples out of sampled pop
for (m in 0:n) {
dtm <- dt(m, n - m, test)
T <- T + dtm * dn[m + 1]
}
return(T)
}
herd.test <- function(test, pop, n, cutpoint) {
dTn <- dT(test, pop, n)
B <- sum(dTn[(cutpoint + 2):(n + 1)])
dT0n <- dbinom(0:n, n, 1 - test$alpha)
A <- 1 - sum(dT0n[(cutpoint + 2):(n + 1)])
return(list(B=B, A=A))
}
herd.test.all <- function(test, pop, n) {
dTn <- dT(test, pop, n)
dT0n <- dbinom(0:n, n, 1 - test$alpha)
B <- sapply(0:(n - 1), function(cutpoint) {
sum(dTn[(cutpoint + 2):(n + 1)])
})
A <- sapply(0:(n - 1), function(cutpoint) {
1 - sum(dT0n[(cutpoint + 2):(n + 1)])
})
return(cbind(B,A))
}
DrawROC <- function() {
if (!length(D$H))
plot.new()
else {
plot(
# 1 - D$H$A, D$H$B,
NULL,
type = "o",
xlim = c(0, 1), ylim = c(0, 1),
xlab = "1-specificity", ylab = "sensitivity",
main = paste("ROC for n =", D$Hn),
cex.lab=1.5, cex.axis=1.5, cex.main=1.5
)
rect(0,0,1-D$HAreq,1,col="#ddffdd",border=NA)
rect(1-D$HAreq,D$HBreq,1,1,col="#ddddff",border=NA)
rect(0,D$HBreq,1-D$HAreq,1,col="#ddffff",border=NA)
lines(y = c(D$HBreq, D$HBreq), x = c(0, 1), col = "blue")
lines(x = c(1 - D$HAreq, 1 - D$HAreq), y = c(0, 1), col = "blue")
lines(y = c(0, 1), x = c(0, 1), col = "red")
lines(x=1-D$H$A, y=D$H$B)
points(x=1-D$H$A, y=D$H$B)
lines(x = c(1-D$H$A[1],1), y=c(D$H$B[1],1), lty=3)
text(1 - D$H$A, D$H$B, labels = 0:(D$Hn - 1), cex = 1.5, pos = 4, col = "black")
}
}
imperfect.testing <- function(test, pop, target, batch=FALSE) {
warn <- ""
lo.n <- 1
hi.n <- pop$N
n <- 1
cutpoint <- 0
repeat {
h <- herd.test(test, pop, n, cutpoint)
if (batch==F) setProgress(value = n)
if (h$B < target$Breq) {
lo.n <- n
} else {
hi.n <- n
}
n <- ceiling(exp(0.5 * (log(lo.n) + log(hi.n))) - 1e-5)
if (hi.n - lo.n < 2) {
h <- herd.test(test, pop, n, cutpoint)
if (h$B < target$Breq) {
warn <- "Cannot achieve desired sensitivity by sampling every unit."
warning(warn)
res <- c(0, 0, 0, 0, pop$N, pop$R)
D$results <- res
D$warn <- warn
D$H <- NULL
return(res)
}
if (h$A < target$Areq) {
cutpoint <- cutpoint + 1
if (cutpoint > pop$N) {
warn <- "Cannot achieve desired specificity by sampling every unit."
warning(warn)
res <- c(0, 0, 0, 0, pop$N, pop$R)
D$results <- res
D$H <- NULL
return(res)
}
hi.n <- pop$N
} else {
res <- c(n, cutpoint, h$B, h$A, pop$N, pop$R)
D$results <- res
D$H <- as.data.frame(herd.test.all(test, pop, n))
D$HBreq <- target$Breq
D$HAreq <- target$Areq
D$Hn <- n
D$warn <- warn
# cat(sum(tdist(test, pop, n)))
return(res)
}
}
}
}
OutOfRange <- function(x, min, max, name) {
if (x > max)
showModal(modalDialog(title = "Error", paste(name, "is too high.")))
else if (x < min)
showModal(modalDialog(title = "Error", paste(name, "is too low.")))
else
return(F)
return(T)
}
RunModel <- function() {
D$err <- F
if (OutOfRange(input$N, 1, 25000, "Population size")) {
D$err <- T
return()
}
if (OutOfRange(input$R, 0, input$N, "Number of reactors")) {
D$err <- T
return()
}
if (OutOfRange(input$beta, 0, 1, "Test sensitivity")) {
D$err <- T
return()
}
if (OutOfRange(input$alpha, 0, 1, "Test specificity")) {
D$err <- T
return()
}
if (OutOfRange(input$Breq, 0, 0.98, "Herd sensitivity")) {
D$err <- T
return()
}
if (OutOfRange(input$Areq, 0, 0.98, "Herd specificity")) {
D$err <- T
return()
}
if (OutOfRange(input$betaN, 1, Inf, "Sensitivity sample size")) {
D$err <- T
return()
}
if (OutOfRange(input$alphaN, 1, Inf, "Specificity sample size")) {
D$err <- T
return()
}
if (OutOfRange(input$pConv, 0.99, 1, "Coverage for convolution")) {
D$err <- T
return()
}
pop <- list(N = input$N, R = input$R)
pop$N <- floor(pop$N)
if (pop$R < 1) {
pop$R = floor(pop$R*pop$N)
} else pop$R = floor(pop$R)
if (pop$R/pop$N * input$beta < 1 - input$alpha) {
showModal(modalDialog(title = "Error", paste("Pre-run check: Specificity is too low.")))
D$err <- T
return()
}
imperfect.testing(
list(beta = input$beta, alpha = input$alpha, Testtype = input$Testtype,
betaN = input$betaN, alphaN = input$alphaN, pConv = input$pConv, Sampletype=input$Sampletype),
pop,
list(Breq = input$Breq, Areq = input$Areq),
batch=F
)
}
| /Sampling.r | no_license | pinkmongoose/ShinySampleSize | R | false | false | 7,310 | r | #Sampling.r
#Darren Green
#14/01/2021
#pop <- list(N=50, R=5) # population size, expected number of reactors
#test <- list(sens=0.95, spec=0.98) # test parameters
#target <- list(sens=0.95, spec=0.95) # intended herd values
dbetabinom <- function(k,N,eta,theta) {
eta <- eta + 1e-10
theta <- theta + 1e-10
#distribution of +units out of a population of size N based on beta distributed prevalence
x <- exp(
lgamma(N+1)+lgamma(k+eta)+lgamma(N-k+theta)+lgamma(eta+theta)
-lgamma(k+1)-lgamma(N-k+1)-lgamma(N+eta+theta)-lgamma(eta)-lgamma(theta)
)
return(ifelse(is.na(x),0,x))
}
sdbeta <- function(a,b) sqrt(a)*sqrt(b)/(a+b)/sqrt(a+b+1)
sdbeta2 <- function(p,n) sqrt(p*(1-p)/(n+1))
dtails <- function(d,x) {
n <- length(d)
up <- array(0,n)
down <- up
for (i in 1:n) up[i] <- sum(d[1:i])
lo <- n
for (i in 1:n) {
if (up[i]>=x) {lo <- i-1; break}
}
for (i in n:1) down[i] <- sum(d[i:n])
hi <- 1
for (i in n:1) {
if (down[i]>=x) {hi <- i-1; break}
}
return (c(lo,hi))
}
dt <- function(n.pos, n.neg, test) {
# distribution of + tests out of n.pos/n.neg +/- samples.
t.convolution <- array(0, n.pos + n.neg + 1)
if (test$Testtype=="BetaBinomial") {
t.pos <- dbetabinom(0:n.pos, n.pos, test$beta*test$betaN, (1-test$beta)*test$betaN)
t.neg <- dbetabinom(0:n.neg, n.neg, (1-test$alpha)*test$alphaN, test$alpha*test$alphaN)
pos.tails <- dtails(t.pos, 1-test$pConv)
neg.tails <- dtails(t.neg, 1-test$pConv)
lo.pos <- pos.tails[1]
hi.pos <- pos.tails[2]
lo.neg <- neg.tails[1]
hi.neg <- neg.tails[2]
} else {
t.pos <- dbinom(0:n.pos, n.pos, test$beta)
t.neg <- dbinom(0:n.neg, n.neg, 1 - test$alpha)
lo.pos <- qbinom(1-test$pConv, n.pos, test$beta)
hi.pos <- qbinom(test$pConv, n.pos, test$beta)
lo.neg <- qbinom(1-test$pConv, n.neg, 1 - test$alpha)
hi.neg <- qbinom(test$pConv, n.neg, 1 - test$alpha)
}
for (i in lo.pos:hi.pos)
for (j in lo.neg:hi.neg) {
t.convolution[i + j + 1] <- t.convolution[i + j + 1] + t.pos[i + 1] * t.neg[j + 1]
}
return(t.convolution)
}
dT <- function(test, pop, n) {
# distribution of + tests out of sampled population.
T <- array(0, n + 1)
if (test$Sampletype=="Hypergeometric") {
dn <- dhyper(0:n, pop$R, pop$N - pop$R, n)
} else {
dn <- dbinom(0:n, n, pop$R/pop$N)
}
# distribution of + samples out of sampled pop
for (m in 0:n) {
dtm <- dt(m, n - m, test)
T <- T + dtm * dn[m + 1]
}
return(T)
}
herd.test <- function(test, pop, n, cutpoint) {
dTn <- dT(test, pop, n)
B <- sum(dTn[(cutpoint + 2):(n + 1)])
dT0n <- dbinom(0:n, n, 1 - test$alpha)
A <- 1 - sum(dT0n[(cutpoint + 2):(n + 1)])
return(list(B=B, A=A))
}
herd.test.all <- function(test, pop, n) {
dTn <- dT(test, pop, n)
dT0n <- dbinom(0:n, n, 1 - test$alpha)
B <- sapply(0:(n - 1), function(cutpoint) {
sum(dTn[(cutpoint + 2):(n + 1)])
})
A <- sapply(0:(n - 1), function(cutpoint) {
1 - sum(dT0n[(cutpoint + 2):(n + 1)])
})
return(cbind(B,A))
}
DrawROC <- function() {
if (!length(D$H))
plot.new()
else {
plot(
# 1 - D$H$A, D$H$B,
NULL,
type = "o",
xlim = c(0, 1), ylim = c(0, 1),
xlab = "1-specificity", ylab = "sensitivity",
main = paste("ROC for n =", D$Hn),
cex.lab=1.5, cex.axis=1.5, cex.main=1.5
)
rect(0,0,1-D$HAreq,1,col="#ddffdd",border=NA)
rect(1-D$HAreq,D$HBreq,1,1,col="#ddddff",border=NA)
rect(0,D$HBreq,1-D$HAreq,1,col="#ddffff",border=NA)
lines(y = c(D$HBreq, D$HBreq), x = c(0, 1), col = "blue")
lines(x = c(1 - D$HAreq, 1 - D$HAreq), y = c(0, 1), col = "blue")
lines(y = c(0, 1), x = c(0, 1), col = "red")
lines(x=1-D$H$A, y=D$H$B)
points(x=1-D$H$A, y=D$H$B)
lines(x = c(1-D$H$A[1],1), y=c(D$H$B[1],1), lty=3)
text(1 - D$H$A, D$H$B, labels = 0:(D$Hn - 1), cex = 1.5, pos = 4, col = "black")
}
}
imperfect.testing <- function(test, pop, target, batch=FALSE) {
warn <- ""
lo.n <- 1
hi.n <- pop$N
n <- 1
cutpoint <- 0
repeat {
h <- herd.test(test, pop, n, cutpoint)
if (batch==F) setProgress(value = n)
if (h$B < target$Breq) {
lo.n <- n
} else {
hi.n <- n
}
n <- ceiling(exp(0.5 * (log(lo.n) + log(hi.n))) - 1e-5)
if (hi.n - lo.n < 2) {
h <- herd.test(test, pop, n, cutpoint)
if (h$B < target$Breq) {
warn <- "Cannot achieve desired sensitivity by sampling every unit."
warning(warn)
res <- c(0, 0, 0, 0, pop$N, pop$R)
D$results <- res
D$warn <- warn
D$H <- NULL
return(res)
}
if (h$A < target$Areq) {
cutpoint <- cutpoint + 1
if (cutpoint > pop$N) {
warn <- "Cannot achieve desired specificity by sampling every unit."
warning(warn)
res <- c(0, 0, 0, 0, pop$N, pop$R)
D$results <- res
D$H <- NULL
return(res)
}
hi.n <- pop$N
} else {
res <- c(n, cutpoint, h$B, h$A, pop$N, pop$R)
D$results <- res
D$H <- as.data.frame(herd.test.all(test, pop, n))
D$HBreq <- target$Breq
D$HAreq <- target$Areq
D$Hn <- n
D$warn <- warn
# cat(sum(tdist(test, pop, n)))
return(res)
}
}
}
}
OutOfRange <- function(x, min, max, name) {
if (x > max)
showModal(modalDialog(title = "Error", paste(name, "is too high.")))
else if (x < min)
showModal(modalDialog(title = "Error", paste(name, "is too low.")))
else
return(F)
return(T)
}
RunModel <- function() {
D$err <- F
if (OutOfRange(input$N, 1, 25000, "Population size")) {
D$err <- T
return()
}
if (OutOfRange(input$R, 0, input$N, "Number of reactors")) {
D$err <- T
return()
}
if (OutOfRange(input$beta, 0, 1, "Test sensitivity")) {
D$err <- T
return()
}
if (OutOfRange(input$alpha, 0, 1, "Test specificity")) {
D$err <- T
return()
}
if (OutOfRange(input$Breq, 0, 0.98, "Herd sensitivity")) {
D$err <- T
return()
}
if (OutOfRange(input$Areq, 0, 0.98, "Herd specificity")) {
D$err <- T
return()
}
if (OutOfRange(input$betaN, 1, Inf, "Sensitivity sample size")) {
D$err <- T
return()
}
if (OutOfRange(input$alphaN, 1, Inf, "Specificity sample size")) {
D$err <- T
return()
}
if (OutOfRange(input$pConv, 0.99, 1, "Coverage for convolution")) {
D$err <- T
return()
}
pop <- list(N = input$N, R = input$R)
pop$N <- floor(pop$N)
if (pop$R < 1) {
pop$R = floor(pop$R*pop$N)
} else pop$R = floor(pop$R)
if (pop$R/pop$N * input$beta < 1 - input$alpha) {
showModal(modalDialog(title = "Error", paste("Pre-run check: Specificity is too low.")))
D$err <- T
return()
}
imperfect.testing(
list(beta = input$beta, alpha = input$alpha, Testtype = input$Testtype,
betaN = input$betaN, alphaN = input$alphaN, pConv = input$pConv, Sampletype=input$Sampletype),
pop,
list(Breq = input$Breq, Areq = input$Areq),
batch=F
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help_parameter_estimation_survival.R
\name{plot_return_residual_cox}
\alias{plot_return_residual_cox}
\title{Plotting and return the residuals after cox proportional hazard model}
\usage{
plot_return_residual_cox(
param_to_be_estimated,
indep_var,
covariates,
fit,
dataset
)
}
\arguments{
\item{param_to_be_estimated}{parameter to be estimated}
\item{indep_var}{independent variable}
\item{covariates}{covariates}
\item{fit}{fit object from coxph method}
\item{dataset}{data used for cox ph model}
}
\value{
plot and the residuals
}
\description{
Plotting and return the residuals after cox proportional hazard model
}
\examples{
\donttest{
data_for_survival <- survival::lung
surv_estimated <- use_coxph_survival("status", data_for_survival, "sex",
covariates = c("ph.ecog"), "time")
plot_return_residual_cox("status", "sex", covariates = c("ph.ecog"),
surv_estimated$fit,data_for_survival )
}
}
| /man/plot_return_residual_cox.Rd | no_license | sheejamk/packDAMipd | R | false | true | 1,001 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help_parameter_estimation_survival.R
\name{plot_return_residual_cox}
\alias{plot_return_residual_cox}
\title{Plotting and return the residuals after cox proportional hazard model}
\usage{
plot_return_residual_cox(
param_to_be_estimated,
indep_var,
covariates,
fit,
dataset
)
}
\arguments{
\item{param_to_be_estimated}{parameter to be estimated}
\item{indep_var}{independent variable}
\item{covariates}{covariates}
\item{fit}{fit object from coxph method}
\item{dataset}{data used for cox ph model}
}
\value{
plot and the residuals
}
\description{
Plotting and return the residuals after cox proportional hazard model
}
\examples{
\donttest{
data_for_survival <- survival::lung
surv_estimated <- use_coxph_survival("status", data_for_survival, "sex",
covariates = c("ph.ecog"), "time")
plot_return_residual_cox("status", "sex", covariates = c("ph.ecog"),
surv_estimated$fit,data_for_survival )
}
}
|
library(testthat)
library(ores)
test_check("ores")
| /tests/testthat.R | no_license | Ironholds/ores | R | false | false | 52 | r | library(testthat)
library(ores)
test_check("ores")
|
\name{ UCEC }
\alias{ UCEC }
\docType{data}
\title{ Uterine Corpus Endometrial Carcinoma }
\description{
A document describing the TCGA cancer code
}
\details{
\preformatted{
> experiments( UCEC )
ExperimentList class object of length 14:
[1] UCEC_CNASeq-20160128: RaggedExperiment with 36400 rows and 213 columns
[2] UCEC_CNASNP-20160128: RaggedExperiment with 619412 rows and 1083 columns
[3] UCEC_CNVSNP-20160128: RaggedExperiment with 127094 rows and 1078 columns
[4] UCEC_GISTIC_AllByGene-20160128: SummarizedExperiment with 24776 rows and 539 columns
[5] UCEC_GISTIC_Peaks-20160128: RangedSummarizedExperiment with 98 rows and 539 columns
[6] UCEC_GISTIC_ThresholdedByGene-20160128: SummarizedExperiment with 24776 rows and 539 columns
[7] UCEC_miRNASeqGene-20160128: SummarizedExperiment with 1046 rows and 433 columns
[8] UCEC_mRNAArray-20160128: SummarizedExperiment with 17814 rows and 54 columns
[9] UCEC_Mutation-20160128: RaggedExperiment with 184861 rows and 248 columns
[10] UCEC_RNASeq2GeneNorm-20160128: SummarizedExperiment with 20501 rows and 380 columns
[11] UCEC_RNASeqGene-20160128: SummarizedExperiment with 20502 rows and 269 columns
[12] UCEC_RPPAArray-20160128: SummarizedExperiment with 208 rows and 440 columns
[13] UCEC_Methylation_methyl27-20160128: SummarizedExperiment with 27578 rows and 118 columns
[14] UCEC_Methylation_methyl450-20160128: SummarizedExperiment with 485577 rows and 466 columns
> rownames( UCEC )
CharacterList of length 14
[["UCEC_CNASeq-20160128"]] character(0)
[["UCEC_CNASNP-20160128"]] character(0)
[["UCEC_CNVSNP-20160128"]] character(0)
[["UCEC_GISTIC_AllByGene-20160128"]] ACAP3 ... WASIR1|ENSG00000185203.7
[["UCEC_GISTIC_Peaks-20160128"]] character(0)
[["UCEC_GISTIC_ThresholdedByGene-20160128"]] ACAP3 ...
[["UCEC_miRNASeqGene-20160128"]] hsa-let-7a-1 hsa-let-7a-2 ... hsa-mir-99b
[["UCEC_mRNAArray-20160128"]] ELMO2 CREB3L1 RPS11 ... SNRPD2 AQP7 CTSC
[["UCEC_Mutation-20160128"]] character(0)
[["UCEC_RNASeq2GeneNorm-20160128"]] A1BG A1CF A2BP1 ... ZZZ3 psiTPTE22 tAKR
...
<4 more elements>
> colnames( UCEC )
CharacterList of length 14
[["UCEC_CNASeq-20160128"]] TCGA-A5-A0G5-01A-11D-A043-02 ...
[["UCEC_CNASNP-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01 ...
[["UCEC_CNVSNP-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01 ...
[["UCEC_GISTIC_AllByGene-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01 ...
[["UCEC_GISTIC_Peaks-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01 ...
[["UCEC_GISTIC_ThresholdedByGene-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01...
[["UCEC_miRNASeqGene-20160128"]] TCGA-2E-A9G8-01A-11R-A404-13 ...
[["UCEC_mRNAArray-20160128"]] TCGA-A5-A0G2-01A-11R-A040-07 ...
[["UCEC_Mutation-20160128"]] TCGA-A5-A0G3-01A-11W-A062-09 ...
[["UCEC_RNASeq2GeneNorm-20160128"]] TCGA-A5-A0G1-01A-11R-A118-07 ...
...
<4 more elements>
Sizes of each ExperimentList element:
assay size.Mb
1 UCEC_CNASeq-20160128 1 Mb
2 UCEC_CNASNP-20160128 16.9 Mb
3 UCEC_CNVSNP-20160128 3.7 Mb
4 UCEC_GISTIC_AllByGene-20160128 4.9 Mb
5 UCEC_GISTIC_Peaks-20160128 0.1 Mb
6 UCEC_GISTIC_ThresholdedByGene-20160128 4.9 Mb
7 UCEC_miRNASeqGene-20160128 0.1 Mb
8 UCEC_mRNAArray-20160128 1.1 Mb
9 UCEC_Mutation-20160128 73.1 Mb
10 UCEC_RNASeq2GeneNorm-20160128 1.3 Mb
11 UCEC_RNASeqGene-20160128 1.3 Mb
12 UCEC_RPPAArray-20160128 0.1 Mb
13 UCEC_Methylation_methyl27-20160128 4.9 Mb
14 UCEC_Methylation_methyl450-20160128 75.1 Mb
---------------------------
Available sample meta-data:
---------------------------
days_to_death:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
50.0 367.0 709.0 881.8 1063.0 3423.0 457
days_to_last_followup:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
-6.0 543.0 948.5 1195.1 1753.2 6859.0 92
tumor_tissue_site:
endometrial other specify
547 1
date_of_initial_pathologic_diagnosis:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
1995 2007 2009 2009 2010 2013 9
days_to_last_known_alive:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
145 404 729 1405 2096 4144 535
radiation_therapy:
no yes NA's
295 228 25
histological_type:
endometrioid endometrial adenocarcinoma
411
mixed serous and endometrioid
22
serous endometrial adenocarcinoma
115
residual_tumor:
r0 r1 r2 rx NA's
376 22 16 41 93
Including an additional 1779 columns
}}
\keyword{datasets}
| /man/UCEC.Rd | no_license | inambioinfo/curatedTCGAData | R | false | false | 4,774 | rd | \name{ UCEC }
\alias{ UCEC }
\docType{data}
\title{ Uterine Corpus Endometrial Carcinoma }
\description{
A document describing the TCGA cancer code
}
\details{
\preformatted{
> experiments( UCEC )
ExperimentList class object of length 14:
[1] UCEC_CNASeq-20160128: RaggedExperiment with 36400 rows and 213 columns
[2] UCEC_CNASNP-20160128: RaggedExperiment with 619412 rows and 1083 columns
[3] UCEC_CNVSNP-20160128: RaggedExperiment with 127094 rows and 1078 columns
[4] UCEC_GISTIC_AllByGene-20160128: SummarizedExperiment with 24776 rows and 539 columns
[5] UCEC_GISTIC_Peaks-20160128: RangedSummarizedExperiment with 98 rows and 539 columns
[6] UCEC_GISTIC_ThresholdedByGene-20160128: SummarizedExperiment with 24776 rows and 539 columns
[7] UCEC_miRNASeqGene-20160128: SummarizedExperiment with 1046 rows and 433 columns
[8] UCEC_mRNAArray-20160128: SummarizedExperiment with 17814 rows and 54 columns
[9] UCEC_Mutation-20160128: RaggedExperiment with 184861 rows and 248 columns
[10] UCEC_RNASeq2GeneNorm-20160128: SummarizedExperiment with 20501 rows and 380 columns
[11] UCEC_RNASeqGene-20160128: SummarizedExperiment with 20502 rows and 269 columns
[12] UCEC_RPPAArray-20160128: SummarizedExperiment with 208 rows and 440 columns
[13] UCEC_Methylation_methyl27-20160128: SummarizedExperiment with 27578 rows and 118 columns
[14] UCEC_Methylation_methyl450-20160128: SummarizedExperiment with 485577 rows and 466 columns
> rownames( UCEC )
CharacterList of length 14
[["UCEC_CNASeq-20160128"]] character(0)
[["UCEC_CNASNP-20160128"]] character(0)
[["UCEC_CNVSNP-20160128"]] character(0)
[["UCEC_GISTIC_AllByGene-20160128"]] ACAP3 ... WASIR1|ENSG00000185203.7
[["UCEC_GISTIC_Peaks-20160128"]] character(0)
[["UCEC_GISTIC_ThresholdedByGene-20160128"]] ACAP3 ...
[["UCEC_miRNASeqGene-20160128"]] hsa-let-7a-1 hsa-let-7a-2 ... hsa-mir-99b
[["UCEC_mRNAArray-20160128"]] ELMO2 CREB3L1 RPS11 ... SNRPD2 AQP7 CTSC
[["UCEC_Mutation-20160128"]] character(0)
[["UCEC_RNASeq2GeneNorm-20160128"]] A1BG A1CF A2BP1 ... ZZZ3 psiTPTE22 tAKR
...
<4 more elements>
> colnames( UCEC )
CharacterList of length 14
[["UCEC_CNASeq-20160128"]] TCGA-A5-A0G5-01A-11D-A043-02 ...
[["UCEC_CNASNP-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01 ...
[["UCEC_CNVSNP-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01 ...
[["UCEC_GISTIC_AllByGene-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01 ...
[["UCEC_GISTIC_Peaks-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01 ...
[["UCEC_GISTIC_ThresholdedByGene-20160128"]] TCGA-2E-A9G8-01A-11D-A402-01...
[["UCEC_miRNASeqGene-20160128"]] TCGA-2E-A9G8-01A-11R-A404-13 ...
[["UCEC_mRNAArray-20160128"]] TCGA-A5-A0G2-01A-11R-A040-07 ...
[["UCEC_Mutation-20160128"]] TCGA-A5-A0G3-01A-11W-A062-09 ...
[["UCEC_RNASeq2GeneNorm-20160128"]] TCGA-A5-A0G1-01A-11R-A118-07 ...
...
<4 more elements>
Sizes of each ExperimentList element:
assay size.Mb
1 UCEC_CNASeq-20160128 1 Mb
2 UCEC_CNASNP-20160128 16.9 Mb
3 UCEC_CNVSNP-20160128 3.7 Mb
4 UCEC_GISTIC_AllByGene-20160128 4.9 Mb
5 UCEC_GISTIC_Peaks-20160128 0.1 Mb
6 UCEC_GISTIC_ThresholdedByGene-20160128 4.9 Mb
7 UCEC_miRNASeqGene-20160128 0.1 Mb
8 UCEC_mRNAArray-20160128 1.1 Mb
9 UCEC_Mutation-20160128 73.1 Mb
10 UCEC_RNASeq2GeneNorm-20160128 1.3 Mb
11 UCEC_RNASeqGene-20160128 1.3 Mb
12 UCEC_RPPAArray-20160128 0.1 Mb
13 UCEC_Methylation_methyl27-20160128 4.9 Mb
14 UCEC_Methylation_methyl450-20160128 75.1 Mb
---------------------------
Available sample meta-data:
---------------------------
days_to_death:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
50.0 367.0 709.0 881.8 1063.0 3423.0 457
days_to_last_followup:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
-6.0 543.0 948.5 1195.1 1753.2 6859.0 92
tumor_tissue_site:
endometrial other specify
547 1
date_of_initial_pathologic_diagnosis:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
1995 2007 2009 2009 2010 2013 9
days_to_last_known_alive:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
145 404 729 1405 2096 4144 535
radiation_therapy:
no yes NA's
295 228 25
histological_type:
endometrioid endometrial adenocarcinoma
411
mixed serous and endometrioid
22
serous endometrial adenocarcinoma
115
residual_tumor:
r0 r1 r2 rx NA's
376 22 16 41 93
Including an additional 1779 columns
}}
\keyword{datasets}
|
library(tidyverse)
library(synapser)
library(data.table)
library(magrittr)
library(preprocessCore)
home_dir <- "/home/aelamb/repos/Tumor-Deconvolution-Challenge/"
tmp_dir <- "/home/aelamb/tmp/tumor_deconvolution/GSE65135/plots/"
setwd(home_dir)
source("scripts/utils.R")
setwd(tmp_dir)
synLogin()
save_scatter_plot <- function(df, cell_type, dataset, width = 5, height = 5){
path <- str_c(dataset, "_", cell_type, ".svg")
svg(path, width, height)
create_scatter_plot(df, cell_type)
dev.off()
}
create_scatter_plot <- function(df, cell_type){
obj <- cor.test(df$cibersort_predicted_fraction, df$ground_truth_fraction)
p <- obj$p.value %>%
round(4)
r <- obj$estimate %>%
round(4)
title <- str_c(cell_type, " R=", r, " P=", p)
p <- df %>%
ggplot(aes(x = ground_truth_fraction, y = cibersort_predicted_fraction)) +
geom_point(size = 2) +
geom_smooth(method = 'lm') +
geom_abline() +
theme_bw() +
ggtitle(title) +
ylab("Cibersort predicted fraction") +
xlab("Flow cytometry fraction")
print(p)
}
# ---------------------------------------
gt1 <- create_df_from_synapse_id("syn15664977") %>%
gather(key = "cell_type", value = "ground_truth_fraction", -sample) %>%
mutate(ground_truth_fraction = ground_truth_fraction / 100)
cs1 <- create_df_from_synapse_id("syn15665004") %>%
select(-c(`P-value`, Correlation, RMSE)) %>%
set_colnames(., str_replace_all(colnames(.), " ", "_")) %>%
select(-c(`T_cells_regulatory_(Tregs)`)) %>%
mutate(NK_cells = NK_cells_resting + NK_cells_activated) %>%
select(-c(NK_cells_resting,NK_cells_activated)) %>%
dplyr::rename(Gamma_delta_T_cells = T_cells_gamma_delta) %>%
dplyr::rename(Naive_B_cells = B_cells_naive) %>%
dplyr::rename(Memory_B_cells = B_cells_memory) %>%
dplyr::rename(CD8_T_cells = T_cells_CD8) %>%
dplyr::rename(Activated_memory_CD4_T_cells = T_cells_CD4_memory_activated) %>%
dplyr::rename(Naive_CD4_T_cells = T_cells_CD4_naive) %>%
dplyr::rename(Resting_memory_CD4_T_cells = T_cells_CD4_memory_resting) %>%
select(-c(Plasma_cells, Macrophages_M0, Macrophages_M1, Macrophages_M2,
Dendritic_cells_resting, Dendritic_cells_activated,
Mast_cells_resting, Mast_cells_activated, Eosinophils,
Neutrophils, T_cells_follicular_helper)) %>%
gather(key = "cell_type", value = "cibersort_predicted_fraction", -sample)
combined_df1 <- inner_join(gt1, cs1)
cor_df1 <- combined_df1 %>%
select(-sample) %>%
group_by(cell_type) %>%
dplyr::summarise(c = cor(ground_truth_fraction, cibersort_predicted_fraction))
cell_types1 <- combined_df1 %>%
use_series(cell_type) %>%
unique %>%
sort
combined_df1 %>%
split(.$cell_type) %>%
walk2(cell_types1, save_scatter_plot, dataset = "GSE65133")
# ---------------------------------------
gt2 <- create_df_from_synapse_id("syn15664984") %>%
gather(key = "cell_type", value = "ground_truth_fraction", -sample) %>%
mutate(ground_truth_fraction = ground_truth_fraction / 100)
cs2 <- create_df_from_synapse_id("syn15665021") %>%
select(-c(`P-value`, Correlation, RMSE)) %>%
set_colnames(., str_replace_all(colnames(.), " ", "_")) %>%
dplyr::rename(Tregs = `T_cells_regulatory_(Tregs)`) %>%
select(sample, Tregs) %>%
gather(key = "cell_type", value = "cibersort_predicted_fraction", -sample)
combined_df2 <- inner_join(gt2, cs2)
cor_df2 <- combined_df2 %>%
select(-sample) %>%
group_by(cell_type) %>%
dplyr::summarise(c = cor(ground_truth_fraction, cibersort_predicted_fraction))
cell_types2 <- combined_df2 %>%
use_series(cell_type) %>%
unique %>%
sort
combined_df2 %>%
split(.$cell_type) %>%
walk2(cell_types2, save_scatter_plot, dataset = "GSE65134")
# ---------------------------------------
gt3 <- create_df_from_synapse_id("syn15664994") %>%
gather(key = "cell_type", value = "ground_truth_fraction", -sample) %>%
mutate(ground_truth_fraction = ground_truth_fraction / 100)
cs3 <- create_df_from_synapse_id("syn15665023") %>%
select(-c(`P-value`, Correlation, RMSE)) %>%
set_colnames(., str_replace_all(colnames(.), " ", "_")) %>%
mutate(B_cells = B_cells_naive + B_cells_naive) %>%
mutate(CD4_T_cells = T_cells_CD4_naive + T_cells_CD4_memory_resting + T_cells_CD4_memory_activated) %>%
mutate(CD8_T_cells = T_cells_CD8) %>%
select(sample, B_cells, CD4_T_cells, CD8_T_cells) %>%
gather(key = "cell_type", value = "cibersort_predicted_fraction", -sample)
combined_df3 <- inner_join(gt3, cs3)
cor_df3 <- combined_df3 %>%
select(-sample) %>%
group_by(cell_type) %>%
dplyr::summarise(c = cor(ground_truth_fraction, cibersort_predicted_fraction))
cell_types3 <- combined_df3 %>%
use_series(cell_type) %>%
unique %>%
sort
combined_df3 %>%
split(.$cell_type) %>%
walk2(cell_types3, save_scatter_plot, dataset = "GSE65135")
# -----
get_mean_expr <- function(df){
df %>%
df_to_matrix("Hugo") %>%
rowMeans() %>%
data.frame() %>%
as_data_frame() %>%
set_colnames("mean_expr")
}
save_hist_plot <- function(df, title, width = 3, height = 5){
path <- str_c(title, ".svg")
svg(path, width, height)
create_hist_plot(df, title)
dev.off()
}
create_hist_plot <- function(df, title){
title <- str_replace_all(title, "_", " ")
p <- df %>%
get_mean_expr %>%
ggplot(aes(x = mean_expr)) +
geom_histogram(bins = 60) +
theme_bw() +
ggtitle(title) +
xlab("Expression")
print(p)
}
expr_gse65133 <- create_df_from_synapse_id("syn15664975")
expr_gse65134 <- create_df_from_synapse_id("syn15664995")
expr_gse65135 <- create_df_from_synapse_id("syn15665001")
save_hist_plot(expr_gse65133, "GSE65133_expression")
save_hist_plot(expr_gse65134, "GSE65134_expression")
save_hist_plot(expr_gse65135, "GSE65135_expression")
log_expr_gse65133 <- create_df_from_synapse_id("syn15667753")
log_expr_gse65134 <- create_df_from_synapse_id("syn15667757")
log_expr_gse65135 <- create_df_from_synapse_id("syn15667761")
save_hist_plot(log_expr_gse65133, "GSE65133_log2_expression")
save_hist_plot(log_expr_gse65134, "GSE65134_log2_expression")
save_hist_plot(log_expr_gse65135, "GSE65135_log2_expression")
| /analysis/misc/cibersort_sets.R | no_license | Sage-Bionetworks/Tumor-Deconvolution-Challenge | R | false | false | 6,434 | r | library(tidyverse)
library(synapser)
library(data.table)
library(magrittr)
library(preprocessCore)
home_dir <- "/home/aelamb/repos/Tumor-Deconvolution-Challenge/"
tmp_dir <- "/home/aelamb/tmp/tumor_deconvolution/GSE65135/plots/"
setwd(home_dir)
source("scripts/utils.R")
setwd(tmp_dir)
synLogin()
save_scatter_plot <- function(df, cell_type, dataset, width = 5, height = 5){
path <- str_c(dataset, "_", cell_type, ".svg")
svg(path, width, height)
create_scatter_plot(df, cell_type)
dev.off()
}
create_scatter_plot <- function(df, cell_type){
obj <- cor.test(df$cibersort_predicted_fraction, df$ground_truth_fraction)
p <- obj$p.value %>%
round(4)
r <- obj$estimate %>%
round(4)
title <- str_c(cell_type, " R=", r, " P=", p)
p <- df %>%
ggplot(aes(x = ground_truth_fraction, y = cibersort_predicted_fraction)) +
geom_point(size = 2) +
geom_smooth(method = 'lm') +
geom_abline() +
theme_bw() +
ggtitle(title) +
ylab("Cibersort predicted fraction") +
xlab("Flow cytometry fraction")
print(p)
}
# ---------------------------------------
gt1 <- create_df_from_synapse_id("syn15664977") %>%
gather(key = "cell_type", value = "ground_truth_fraction", -sample) %>%
mutate(ground_truth_fraction = ground_truth_fraction / 100)
cs1 <- create_df_from_synapse_id("syn15665004") %>%
select(-c(`P-value`, Correlation, RMSE)) %>%
set_colnames(., str_replace_all(colnames(.), " ", "_")) %>%
select(-c(`T_cells_regulatory_(Tregs)`)) %>%
mutate(NK_cells = NK_cells_resting + NK_cells_activated) %>%
select(-c(NK_cells_resting,NK_cells_activated)) %>%
dplyr::rename(Gamma_delta_T_cells = T_cells_gamma_delta) %>%
dplyr::rename(Naive_B_cells = B_cells_naive) %>%
dplyr::rename(Memory_B_cells = B_cells_memory) %>%
dplyr::rename(CD8_T_cells = T_cells_CD8) %>%
dplyr::rename(Activated_memory_CD4_T_cells = T_cells_CD4_memory_activated) %>%
dplyr::rename(Naive_CD4_T_cells = T_cells_CD4_naive) %>%
dplyr::rename(Resting_memory_CD4_T_cells = T_cells_CD4_memory_resting) %>%
select(-c(Plasma_cells, Macrophages_M0, Macrophages_M1, Macrophages_M2,
Dendritic_cells_resting, Dendritic_cells_activated,
Mast_cells_resting, Mast_cells_activated, Eosinophils,
Neutrophils, T_cells_follicular_helper)) %>%
gather(key = "cell_type", value = "cibersort_predicted_fraction", -sample)
combined_df1 <- inner_join(gt1, cs1)
cor_df1 <- combined_df1 %>%
select(-sample) %>%
group_by(cell_type) %>%
dplyr::summarise(c = cor(ground_truth_fraction, cibersort_predicted_fraction))
cell_types1 <- combined_df1 %>%
use_series(cell_type) %>%
unique %>%
sort
combined_df1 %>%
split(.$cell_type) %>%
walk2(cell_types1, save_scatter_plot, dataset = "GSE65133")
# ---------------------------------------
gt2 <- create_df_from_synapse_id("syn15664984") %>%
gather(key = "cell_type", value = "ground_truth_fraction", -sample) %>%
mutate(ground_truth_fraction = ground_truth_fraction / 100)
cs2 <- create_df_from_synapse_id("syn15665021") %>%
select(-c(`P-value`, Correlation, RMSE)) %>%
set_colnames(., str_replace_all(colnames(.), " ", "_")) %>%
dplyr::rename(Tregs = `T_cells_regulatory_(Tregs)`) %>%
select(sample, Tregs) %>%
gather(key = "cell_type", value = "cibersort_predicted_fraction", -sample)
combined_df2 <- inner_join(gt2, cs2)
cor_df2 <- combined_df2 %>%
select(-sample) %>%
group_by(cell_type) %>%
dplyr::summarise(c = cor(ground_truth_fraction, cibersort_predicted_fraction))
cell_types2 <- combined_df2 %>%
use_series(cell_type) %>%
unique %>%
sort
combined_df2 %>%
split(.$cell_type) %>%
walk2(cell_types2, save_scatter_plot, dataset = "GSE65134")
# ---------------------------------------
gt3 <- create_df_from_synapse_id("syn15664994") %>%
gather(key = "cell_type", value = "ground_truth_fraction", -sample) %>%
mutate(ground_truth_fraction = ground_truth_fraction / 100)
cs3 <- create_df_from_synapse_id("syn15665023") %>%
select(-c(`P-value`, Correlation, RMSE)) %>%
set_colnames(., str_replace_all(colnames(.), " ", "_")) %>%
mutate(B_cells = B_cells_naive + B_cells_naive) %>%
mutate(CD4_T_cells = T_cells_CD4_naive + T_cells_CD4_memory_resting + T_cells_CD4_memory_activated) %>%
mutate(CD8_T_cells = T_cells_CD8) %>%
select(sample, B_cells, CD4_T_cells, CD8_T_cells) %>%
gather(key = "cell_type", value = "cibersort_predicted_fraction", -sample)
combined_df3 <- inner_join(gt3, cs3)
cor_df3 <- combined_df3 %>%
select(-sample) %>%
group_by(cell_type) %>%
dplyr::summarise(c = cor(ground_truth_fraction, cibersort_predicted_fraction))
cell_types3 <- combined_df3 %>%
use_series(cell_type) %>%
unique %>%
sort
combined_df3 %>%
split(.$cell_type) %>%
walk2(cell_types3, save_scatter_plot, dataset = "GSE65135")
# -----
get_mean_expr <- function(df){
df %>%
df_to_matrix("Hugo") %>%
rowMeans() %>%
data.frame() %>%
as_data_frame() %>%
set_colnames("mean_expr")
}
save_hist_plot <- function(df, title, width = 3, height = 5){
path <- str_c(title, ".svg")
svg(path, width, height)
create_hist_plot(df, title)
dev.off()
}
create_hist_plot <- function(df, title){
title <- str_replace_all(title, "_", " ")
p <- df %>%
get_mean_expr %>%
ggplot(aes(x = mean_expr)) +
geom_histogram(bins = 60) +
theme_bw() +
ggtitle(title) +
xlab("Expression")
print(p)
}
expr_gse65133 <- create_df_from_synapse_id("syn15664975")
expr_gse65134 <- create_df_from_synapse_id("syn15664995")
expr_gse65135 <- create_df_from_synapse_id("syn15665001")
save_hist_plot(expr_gse65133, "GSE65133_expression")
save_hist_plot(expr_gse65134, "GSE65134_expression")
save_hist_plot(expr_gse65135, "GSE65135_expression")
log_expr_gse65133 <- create_df_from_synapse_id("syn15667753")
log_expr_gse65134 <- create_df_from_synapse_id("syn15667757")
log_expr_gse65135 <- create_df_from_synapse_id("syn15667761")
save_hist_plot(log_expr_gse65133, "GSE65133_log2_expression")
save_hist_plot(log_expr_gse65134, "GSE65134_log2_expression")
save_hist_plot(log_expr_gse65135, "GSE65135_log2_expression")
|
#
# This script builds the plot that answers the following question:
#
# 3. Of the four types of sources indicated by the type (point, nonpoint,
# onroad, nonroad) variable, which of these four sources have seen decreases
# in emissions from 1999–2008 for Baltimore City? Which have seen increases
# in emissions from 1999–2008? Use the ggplot2 plotting system to make a plot
# answer this question.
#
library(reshape2)
library(ggplot2)
doPlot3 <- function()
{
# read the National Emissions Inventory (NEI) table
NEI <- readRDS("data/summarySCC_PM25.rds")
# filter the data to just contain Baltimore data
NEI_Baltimore = NEI[NEI$fips == "24510", ]
# reshape the data so that its ID is based on the year and source type
NEImelt = melt(NEI_Baltimore, id = c("year", "type"),
measure.vars="Emissions")
# cast the data based on year using totals
NEImelt_total_by_year = dcast(NEImelt, year + type ~ variable, sum)
# Plot the results
png("plot3.png", width = 720)
gplot = qplot(year, Emissions, data=NEImelt_total_by_year, facets=.~type,
geom=c("point","smooth"), method="lm")
print(gplot)
dev.off()
} | /exdata-013/ExData_Plotting2/plot3.R | no_license | skdb2015/datasciencecoursera | R | false | false | 1,235 | r | #
# This script builds the plot that answers the following question:
#
# 3. Of the four types of sources indicated by the type (point, nonpoint,
# onroad, nonroad) variable, which of these four sources have seen decreases
# in emissions from 1999–2008 for Baltimore City? Which have seen increases
# in emissions from 1999–2008? Use the ggplot2 plotting system to make a plot
# answer this question.
#
library(reshape2)
library(ggplot2)
doPlot3 <- function()
{
# read the National Emissions Inventory (NEI) table
NEI <- readRDS("data/summarySCC_PM25.rds")
# filter the data to just contain Baltimore data
NEI_Baltimore = NEI[NEI$fips == "24510", ]
# reshape the data so that its ID is based on the year and source type
NEImelt = melt(NEI_Baltimore, id = c("year", "type"),
measure.vars="Emissions")
# cast the data based on year using totals
NEImelt_total_by_year = dcast(NEImelt, year + type ~ variable, sum)
# Plot the results
png("plot3.png", width = 720)
gplot = qplot(year, Emissions, data=NEImelt_total_by_year, facets=.~type,
geom=c("point","smooth"), method="lm")
print(gplot)
dev.off()
} |
\name{compare.periods}
\alias{compare.periods}
\alias{plot.compflows}
\title{ Compare residual variability across time periods }
\description{
The function finds the seasonal signal in the first of two time periods within the data series. This signal is used to calculate residual flows in the first time period and "historical residuals", the residuals when the second period is compared to the signal in the first period.
The output of the function gives event information for all residual events greater than \eqn{\sigma} as calculated from the functions \code{sigmaHighFlows} and \code{sigmaLowFlows}.
}
\usage{
compare.periods(p1, p2, x, plot=T)
\method{plot}{compflows} (x, ...)
}
\arguments{
\item{p1}{ Character vector specifying start and end date of the first period. "YYYY-MM-DD" format.}
\item{p2}{ Character vector specifying start and end date of the second period. "YYYY-MM-DD" format. }
\item{x}{ Matrix with first column specifying dates and second column specifying raw discharge data. }
\item{plot}{Logical; defaults to TRUE. If TRUE, the seasonal signal for both periods will be plotted.}
\item{...}{Other parameters. }
}
\value{
Object of S3 ckass \code{compflows} with the following items:
\item{sigma.low}{Estimate of \eqn{\sigma-lf} from period 1 }
\item{sigma.high}{Estimate of \eqn{\sigma-hf} from period 1}
\item{p1.levents}{ Matrix of low flow events for period 1 }
\item{p1.hevents}{ Matrix of high flow events for period 1 }
\item{p2.levents}{ Matrix of low flow events for period 2 }
\item{p2.hevents}{ Matrix of high flow events for period 2 }
}
\examples{
# load data
data("sycamore")
# compare for periods from 1960 to 1979 and 1980 to 1999
compare.periods(c("1960-01-01", "1979-12-31"),
c("1980-01-01", "1999-12-31"), sycamore)
}
\seealso{
\code{\link{sigmaHighFlows}}
\code{\link{sigmaLowFlows}}
}
| /man/compperiods.Rd | no_license | cran/discharge | R | false | false | 1,897 | rd | \name{compare.periods}
\alias{compare.periods}
\alias{plot.compflows}
\title{ Compare residual variability across time periods }
\description{
The function finds the seasonal signal in the first of two time periods within the data series. This signal is used to calculate residual flows in the first time period and "historical residuals", the residuals when the second period is compared to the signal in the first period.
The output of the function gives event information for all residual events greater than \eqn{\sigma} as calculated from the functions \code{sigmaHighFlows} and \code{sigmaLowFlows}.
}
\usage{
compare.periods(p1, p2, x, plot=T)
\method{plot}{compflows} (x, ...)
}
\arguments{
\item{p1}{ Character vector specifying start and end date of the first period. "YYYY-MM-DD" format.}
\item{p2}{ Character vector specifying start and end date of the second period. "YYYY-MM-DD" format. }
\item{x}{ Matrix with first column specifying dates and second column specifying raw discharge data. }
\item{plot}{Logical; defaults to TRUE. If TRUE, the seasonal signal for both periods will be plotted.}
\item{...}{Other parameters. }
}
\value{
Object of S3 ckass \code{compflows} with the following items:
\item{sigma.low}{Estimate of \eqn{\sigma-lf} from period 1 }
\item{sigma.high}{Estimate of \eqn{\sigma-hf} from period 1}
\item{p1.levents}{ Matrix of low flow events for period 1 }
\item{p1.hevents}{ Matrix of high flow events for period 1 }
\item{p2.levents}{ Matrix of low flow events for period 2 }
\item{p2.hevents}{ Matrix of high flow events for period 2 }
}
\examples{
# load data
data("sycamore")
# compare for periods from 1960 to 1979 and 1980 to 1999
compare.periods(c("1960-01-01", "1979-12-31"),
c("1980-01-01", "1999-12-31"), sycamore)
}
\seealso{
\code{\link{sigmaHighFlows}}
\code{\link{sigmaLowFlows}}
}
|
#clear
cat("\014")
x=gun_crime_data
attach(x)
x[is.na(x)]<-0
View(x)
xx<-cbind(pop,ampct,metpct,unrate,prison,totemployed,employ,military,rpci,domestic_gun_production,gallup_owngun,pgs,totmajor,murder,rape,robbery,assault,burg_test,burglary,hg,gun,majorcrime,mc_noburg,gunsamm,amrmms,amhms,phg,pgun,mc_noassault,r1524,r2544,r4564)
y<-prcomp(x,scale=TRUE)
y
summary(y)
#str(y)
#head(y$scores)
yy<-princomp(x,scores = TRUE,cor = TRUE)
summary(yy)
#plots
#pie
slices <- c(10, 12,5, 16, 8)
lbls <- c( "gun", "unrate", "phg", "military", "hg")
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/pie.jpeg",width=500,height=500)
pie(slices, labels = lbls, main="Pie Chart of Guns and Crime")
dev.off()
#dot
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/dot.jpeg",width=500,height=500)
dotchart(xx,labels=row.names(xx2),cex=.7,main="Guns")
dev.off()
#scree
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/scree_prcomp.jpeg",width=500,height=500)
plot(y,type="lines")
dev.off()
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/scree_princomp.jpeg",width=500,height=500)
plot(yy,type="lines")
dev.off()
#bi
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/bi_prcomp.jpeg",width=500,height=500)
biplot(y)
dev.off()
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/bi_princomp.jpeg",width=500,height=500)
biplot(yy)
dev.off()
r=(r1524+r2544+r4564)/3
#Decision Trees
d<-ctree(majorcrime~military,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt1.jpeg",width=500,height=500)
plot(d)
dev.off()
d1<-ctree(majorcrime~military+unrate,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt3.jpeg",width=500,height=500)
plot(d1)
dev.off()
d4<-ctree(majorcrime~military+unrate+gun,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt5.jpeg",width=500,height=500)
plot(d4)
dev.off()
d3<-ctree(majorcrime~military+unrate+gun+hg,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt4.jpeg",width=500,height=500)
plot(d3)
dev.off()
d5<-ctree(majorcrime~military+unrate+gun+hg+phg,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt6.jpeg",width=500,height=500)
plot(d5)
dev.off()
| /prediction/gunsncrime.R | no_license | vaaishalijain/Data-Reduction-and-Prediction | R | false | false | 2,252 | r | #clear
cat("\014")
x=gun_crime_data
attach(x)
x[is.na(x)]<-0
View(x)
xx<-cbind(pop,ampct,metpct,unrate,prison,totemployed,employ,military,rpci,domestic_gun_production,gallup_owngun,pgs,totmajor,murder,rape,robbery,assault,burg_test,burglary,hg,gun,majorcrime,mc_noburg,gunsamm,amrmms,amhms,phg,pgun,mc_noassault,r1524,r2544,r4564)
y<-prcomp(x,scale=TRUE)
y
summary(y)
#str(y)
#head(y$scores)
yy<-princomp(x,scores = TRUE,cor = TRUE)
summary(yy)
#plots
#pie
slices <- c(10, 12,5, 16, 8)
lbls <- c( "gun", "unrate", "phg", "military", "hg")
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/pie.jpeg",width=500,height=500)
pie(slices, labels = lbls, main="Pie Chart of Guns and Crime")
dev.off()
#dot
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/dot.jpeg",width=500,height=500)
dotchart(xx,labels=row.names(xx2),cex=.7,main="Guns")
dev.off()
#scree
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/scree_prcomp.jpeg",width=500,height=500)
plot(y,type="lines")
dev.off()
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/scree_princomp.jpeg",width=500,height=500)
plot(yy,type="lines")
dev.off()
#bi
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/bi_prcomp.jpeg",width=500,height=500)
biplot(y)
dev.off()
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/bi_princomp.jpeg",width=500,height=500)
biplot(yy)
dev.off()
r=(r1524+r2544+r4564)/3
#Decision Trees
d<-ctree(majorcrime~military,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt1.jpeg",width=500,height=500)
plot(d)
dev.off()
d1<-ctree(majorcrime~military+unrate,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt3.jpeg",width=500,height=500)
plot(d1)
dev.off()
d4<-ctree(majorcrime~military+unrate+gun,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt5.jpeg",width=500,height=500)
plot(d4)
dev.off()
d3<-ctree(majorcrime~military+unrate+gun+hg,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt4.jpeg",width=500,height=500)
plot(d3)
dev.off()
d5<-ctree(majorcrime~military+unrate+gun+hg+phg,data=x)
jpeg(filename = "C:/Users/VINOD JAIN/Documents/final/gdt6.jpeg",width=500,height=500)
plot(d5)
dev.off()
|
# Load data
# install.packages('mlbench')
library (mlbench)
data(BreastCancer, package="mlbench")
bc <- BreastCancer[complete.cases(BreastCancer), ] # keep complete rows
bc
#Test if "bc" is a dataframe
is.data.frame(bc)
#verifY data
str(bc)
head(bc)
ncol(bc)
nrow(bc)
tail(bc)
#plot(bc$class, col = "green",breaks = 20)
# remove id column
bc <- bc[,-1]
head(bc)
# convert to numeric
for(i in 1:9) {
bc[, i] <- as.numeric(as.character(bc[, i]))
}
head(bc)
# Change Y values to 1's and 0's and variable class as categorical(factor)
bc$Class <- ifelse(bc$Class == "malignant", 1, 0)
bc$Class <- factor(bc$Class, levels = c(0, 1))
bc[1:10,]
# Prep Training and Test data.
set.seed(1)
index <- sample(1:nrow(bc),683*0.6 ) #en vez de 200 va el 60 por ciento de la cantidad de datos 0.6 *6 y pico
head(index)
testData<-bc[index, ]
nrow(testData)
head(testData)
trainData<-bc[-index, ]
nrow(trainData)
# Class distribution of train and sample data
table(trainData$Class)
table(testData$Class)
# Build Logistic Model using training data
logitmod <- glm(Class ~ Cl.thickness + Cell.size + Cell.shape + Marg.adhesion , family = "binomial", trainData)
summary(logitmod)
anova(logitmod)
# fit neural network
library(neuralnet)
nn=neuralnet(logitmod, data=bc, hidden=3, act.fct = "logistic",
linear.output = FALSE)
nn
# plot neural network
plot(nn)
# predict using test data
pred <- predict(logitmod, newdata = testData, type = "response")
head(pred)
# Recode factors using 50% as probabilistic threshold
y_pred_num <- ifelse(pred > 0.5, 1, 0)
y_pred <- factor(y_pred_num, levels=c(0, 1))
y_act <- testData$Class
class(y_pred)
head(y_pred)
head(y_act)
# Accuracy and contigency analysis
table(y_act, y_pred)
mean(y_pred == y_act)
mean(y_pred != y_act)
#PLOT ROC CURVE- PERFORMANCE
library(ROCR)
pred1 <- prediction(pred, testData$Class)
perf <- performance(pred1,"tpr","fpr")
plot(perf)
#AREA BAJO LA CURVA ROC
auc <- as.numeric(performance(pred1,"auc")@y.values)
plot(perf,type='o', main = paste('Area Bajo la Curva =',round(auc,2)))
abline(a=0, b= 1)
| /New Variable Parte 1.R | no_license | adrisilvestre/ProyectoFinalAI | R | false | false | 2,170 | r | # Load data
# install.packages('mlbench')
library (mlbench)
data(BreastCancer, package="mlbench")
bc <- BreastCancer[complete.cases(BreastCancer), ] # keep complete rows
bc
#Test if "bc" is a dataframe
is.data.frame(bc)
#verifY data
str(bc)
head(bc)
ncol(bc)
nrow(bc)
tail(bc)
#plot(bc$class, col = "green",breaks = 20)
# remove id column
bc <- bc[,-1]
head(bc)
# convert to numeric
for(i in 1:9) {
bc[, i] <- as.numeric(as.character(bc[, i]))
}
head(bc)
# Change Y values to 1's and 0's and variable class as categorical(factor)
bc$Class <- ifelse(bc$Class == "malignant", 1, 0)
bc$Class <- factor(bc$Class, levels = c(0, 1))
bc[1:10,]
# Prep Training and Test data.
set.seed(1)
index <- sample(1:nrow(bc),683*0.6 ) #en vez de 200 va el 60 por ciento de la cantidad de datos 0.6 *6 y pico
head(index)
testData<-bc[index, ]
nrow(testData)
head(testData)
trainData<-bc[-index, ]
nrow(trainData)
# Class distribution of train and sample data
table(trainData$Class)
table(testData$Class)
# Build Logistic Model using training data
logitmod <- glm(Class ~ Cl.thickness + Cell.size + Cell.shape + Marg.adhesion , family = "binomial", trainData)
summary(logitmod)
anova(logitmod)
# fit neural network
library(neuralnet)
nn=neuralnet(logitmod, data=bc, hidden=3, act.fct = "logistic",
linear.output = FALSE)
nn
# plot neural network
plot(nn)
# predict using test data
pred <- predict(logitmod, newdata = testData, type = "response")
head(pred)
# Recode factors using 50% as probabilistic threshold
y_pred_num <- ifelse(pred > 0.5, 1, 0)
y_pred <- factor(y_pred_num, levels=c(0, 1))
y_act <- testData$Class
class(y_pred)
head(y_pred)
head(y_act)
# Accuracy and contigency analysis
table(y_act, y_pred)
mean(y_pred == y_act)
mean(y_pred != y_act)
#PLOT ROC CURVE- PERFORMANCE
library(ROCR)
pred1 <- prediction(pred, testData$Class)
perf <- performance(pred1,"tpr","fpr")
plot(perf)
#AREA BAJO LA CURVA ROC
auc <- as.numeric(performance(pred1,"auc")@y.values)
plot(perf,type='o', main = paste('Area Bajo la Curva =',round(auc,2)))
abline(a=0, b= 1)
|
context("get_feature_names()")
test_that("get_feature_names() works for \"C50\" objects", {
skip_if_not_installed("C50")
fit <- C50::C5.0(
Species ~ ., data = iris
)
expect_equal(get_feature_names(fit), names(iris)[1L:4L])
})
test_that("get_feature_names() works for \"constparty\" objects", {
skip_if_not_installed("partykit")
suppressWarnings(fit <- partykit::ctree(
mpg ~ ., data = mtcars
))
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"earth\" objects", {
skip_if_not_installed("earth")
fit <- earth::earth(
mpg ~ ., data = mtcars
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"gbm\" objects", {
skip_if_not_installed("gbm")
fit <- gbm::gbm(
mpg ~ ., data = mtcars, distribution = "gaussian", n.trees = 1,
interaction.depth = 1, n.minobsinnode = 1
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"nnet\" objects", {
skip_if_not_installed("nnet")
fit <- nnet::nnet(mpg ~ ., data = mtcars, size = 1, trace = FALSE)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"randomForest\" objects", {
skip_if_not_installed("randomForest")
fit <- randomForest::randomForest(
mpg ~ ., data = mtcars, ntree = 1, mtry = 1
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"ppr\" objects", {
fit <- stats::ppr(mpg ~ ., data = mtcars, nterms = 1)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"lm\" objects", {
fit <- stats::lm(
mpg ~ ., data = mtcars
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"nls\" objects", {
fit <- stats::nls(
mpg ~ SSlogis(log(wt), Asym, xmid, scal), data = mtcars
)
expect_setequal(get_feature_names(fit), "wt")
})
test_that("get_feature_names() works for \"rpart\" objects", {
skip_if_not_installed("rpart")
fit <- rpart::rpart(
mpg ~ ., data = mtcars
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"train\" objects", {
skip_if_not_installed("caret")
fit <- caret::train(
mpg ~ ., data = mtcars, method = "lm"
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"glmnet\" objects", {
skip_if_not_installed("glmnet")
fit <- glmnet::glmnet(
x = stats::model.matrix(mpg ~ ., data = mtcars)[, -1], y = mtcars$mpg,
nlambda = 1
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"ranger\" objects", {
skip_if_not_installed("ranger")
for (.write.forest in c(TRUE, FALSE)) {
for (.importance in c("none", "impurity")) {
if (!.write.forest && .importance == "none") next
fit <- ranger::ranger(
mpg ~ ., data = mtcars, num.trees = 1, importance = .importance,
write.forest = .write.forest
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
}
}
fit <- ranger::ranger(
mpg ~ ., data = mtcars, num.trees = 1, importance = "none",
write.forest = FALSE
)
expect_error(get_feature_names(ranger_model))
})
| /tests/testthat/test_get_feature_names.R | no_license | vnijs/vip | R | false | false | 3,430 | r | context("get_feature_names()")
test_that("get_feature_names() works for \"C50\" objects", {
skip_if_not_installed("C50")
fit <- C50::C5.0(
Species ~ ., data = iris
)
expect_equal(get_feature_names(fit), names(iris)[1L:4L])
})
test_that("get_feature_names() works for \"constparty\" objects", {
skip_if_not_installed("partykit")
suppressWarnings(fit <- partykit::ctree(
mpg ~ ., data = mtcars
))
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"earth\" objects", {
skip_if_not_installed("earth")
fit <- earth::earth(
mpg ~ ., data = mtcars
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"gbm\" objects", {
skip_if_not_installed("gbm")
fit <- gbm::gbm(
mpg ~ ., data = mtcars, distribution = "gaussian", n.trees = 1,
interaction.depth = 1, n.minobsinnode = 1
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"nnet\" objects", {
skip_if_not_installed("nnet")
fit <- nnet::nnet(mpg ~ ., data = mtcars, size = 1, trace = FALSE)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"randomForest\" objects", {
skip_if_not_installed("randomForest")
fit <- randomForest::randomForest(
mpg ~ ., data = mtcars, ntree = 1, mtry = 1
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"ppr\" objects", {
fit <- stats::ppr(mpg ~ ., data = mtcars, nterms = 1)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"lm\" objects", {
fit <- stats::lm(
mpg ~ ., data = mtcars
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"nls\" objects", {
fit <- stats::nls(
mpg ~ SSlogis(log(wt), Asym, xmid, scal), data = mtcars
)
expect_setequal(get_feature_names(fit), "wt")
})
test_that("get_feature_names() works for \"rpart\" objects", {
skip_if_not_installed("rpart")
fit <- rpart::rpart(
mpg ~ ., data = mtcars
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"train\" objects", {
skip_if_not_installed("caret")
fit <- caret::train(
mpg ~ ., data = mtcars, method = "lm"
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"glmnet\" objects", {
skip_if_not_installed("glmnet")
fit <- glmnet::glmnet(
x = stats::model.matrix(mpg ~ ., data = mtcars)[, -1], y = mtcars$mpg,
nlambda = 1
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
})
test_that("get_feature_names() works for \"ranger\" objects", {
skip_if_not_installed("ranger")
for (.write.forest in c(TRUE, FALSE)) {
for (.importance in c("none", "impurity")) {
if (!.write.forest && .importance == "none") next
fit <- ranger::ranger(
mpg ~ ., data = mtcars, num.trees = 1, importance = .importance,
write.forest = .write.forest
)
expect_setequal(get_feature_names(fit), names(mtcars)[-1L])
}
}
fit <- ranger::ranger(
mpg ~ ., data = mtcars, num.trees = 1, importance = "none",
write.forest = FALSE
)
expect_error(get_feature_names(ranger_model))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topTable.R
\name{topTable}
\alias{topTable}
\title{Show table of results for top clusters or cluster-marker combinations}
\usage{
topTable(res, d_counts = NULL, d_medians = NULL, order = TRUE,
order_by = "p_adj", all = FALSE, top_n = 20, show_counts = FALSE,
show_props = FALSE, show_meds = FALSE, sort_cols = TRUE,
format_vals = FALSE, digits = 3)
}
\arguments{
\item{res}{Output object from either the \code{\link{diffcyt}} wrapper function or one
of the individual differential testing functions (\code{\link{testDA_edgeR}},
\code{\link{testDA_voom}}, \code{\link{testDA_GLMM}}, \code{\link{testDS_limma}}, or
\code{\link{testDS_LMM}}). If the output object is from the wrapper function, the
objects \code{res} and \code{d_counts} will be automatically extracted.
Alternatively, these can be provided directly.}
\item{d_counts}{(Optional) \code{\link{SummarizedExperiment}} object containing cluster
cell counts, from \code{\link{calcCounts}}. (If the output object from the wrapper
function is provided, this will be be automatically extracted.)}
\item{d_medians}{(Optional) \code{\link{SummarizedExperiment}} object containing
cluster medians (median marker expression for each cluster-sample combination), from
\code{\link{calcMedians}}. Assumed to contain a logical vector
\code{id_state_markers} in the meta-data (accessed with
\code{metadata(d_medians)$id_state_markers}), which identifies the set of 'cell
state' markers in the list of \code{assays}. (If the output object from the wrapper
function is provided, this will be be automatically extracted.)}
\item{order}{Whether to order results by values in column \code{order_by} (default:
column \code{p_adj} containing adjusted p-values). Default = TRUE.}
\item{order_by}{Name of column to use to order rows by values, if \code{order = TRUE}.
Default = \code{"p_adj"} (adjusted p-values); other options include \code{"p_val"},
\code{"cluster_id"}, and \code{"marker_id"}.}
\item{all}{Whether to display all clusters or cluster-marker combinations (instead of
top \code{top_n}). Default = FALSE.}
\item{top_n}{Number of clusters or cluster-marker combinations to display (if \code{all
= FALSE}). Default = 20.}
\item{show_counts}{Whether to display cluster cell counts by sample (from
\code{d_counts}). Default = FALSE.}
\item{show_props}{Whether to display cluster cell count proportions by sample
(calculated from \code{d_counts}). Default = FALSE.}
\item{show_meds}{Whether to display median expression values for each cluster-marker
combination (from \code{d_medians}). Default = FALSE.}
\item{sort_cols}{Whether to sort columns of counts, proportions, and medians; by levels
of factor \code{sample_id} in \code{colData} of \code{d_medians} (requires object
\code{d_medians} to be provided). Default = TRUE.}
\item{format_vals}{Whether to display rounded values in numeric columns. This improves
readability of the summary table, but should not be used when exact numeric values
are required for subsequent steps (e.g. plotting). Default = FALSE.}
\item{digits}{Number of significant digits to show, if \code{format_vals = TRUE}.
Default = 3. (Note: for percentages shown if \code{show_props = TRUE}, \code{digits =
1} is used.)}
}
\value{
Returns a \code{\link{DataFrame}} table of results for the \code{top_n}
clusters or cluster-marker combinations, ordered by values in column \code{order_by}
(default: adjusted p-values). Optionally, cluster counts, proportions, and median
expression by cluster-marker combination are also included.
}
\description{
Show table of results for top (most highly significant) clusters or cluster-marker
combinations
}
\details{
Summary function to display table of results for top (most highly significant) detected
clusters or cluster-marker combinations.
The differential testing functions return results in the form of p-values and adjusted
p-values for each cluster (DA tests) or cluster-marker combination (DS tests), which
can be used to rank the clusters or cluster-marker combinations by their evidence for
differential abundance or differential states. The p-values and adjusted p-values are
stored in the \code{rowData} of the output \code{\link{SummarizedExperiment}} object
generated by the testing functions.
This function displays a summary table of results. By default, the \code{top_n}
clusters or cluster-marker combinations are shown, ordered by adjusted p-values.
Optionally, cluster counts, proportions, and median expression by cluster-marker
combination can also be included. The \code{format_vals} and \code{digits} arguments
can be used to display rounded values to improve readability of the summary table.
}
\examples{
# For a complete workflow example demonstrating each step in the 'diffcyt' pipeline,
# see the package vignette.
# Function to create random data (one sample)
d_random <- function(n = 20000, mean = 0, sd = 1, ncol = 20, cofactor = 5) {
d <- sinh(matrix(rnorm(n, mean, sd), ncol = ncol)) * cofactor
colnames(d) <- paste0("marker", sprintf("\%02d", 1:ncol))
d
}
# Create random data (without differential signal)
set.seed(123)
d_input <- list(
sample1 = d_random(),
sample2 = d_random(),
sample3 = d_random(),
sample4 = d_random()
)
# Add differential abundance (DA) signal
ix_DA <- 801:900
ix_cols_type <- 1:10
d_input[[3]][ix_DA, ix_cols_type] <- d_random(n = 1000, mean = 2, ncol = 10)
d_input[[4]][ix_DA, ix_cols_type] <- d_random(n = 1000, mean = 2, ncol = 10)
# Add differential states (DS) signal
ix_DS <- 901:1000
ix_cols_DS <- 19:20
d_input[[1]][ix_DS, ix_cols_type] <- d_random(n = 1000, mean = 3, ncol = 10)
d_input[[2]][ix_DS, ix_cols_type] <- d_random(n = 1000, mean = 3, ncol = 10)
d_input[[3]][ix_DS, c(ix_cols_type, ix_cols_DS)] <- d_random(n = 1200, mean = 3, ncol = 12)
d_input[[4]][ix_DS, c(ix_cols_type, ix_cols_DS)] <- d_random(n = 1200, mean = 3, ncol = 12)
experiment_info <- data.frame(
sample_id = factor(paste0("sample", 1:4)),
group_id = factor(c("group1", "group1", "group2", "group2")),
stringsAsFactors = FALSE
)
marker_info <- data.frame(
channel_name = paste0("channel", sprintf("\%03d", 1:20)),
marker_name = paste0("marker", sprintf("\%02d", 1:20)),
marker_class = factor(c(rep("type", 10), rep("state", 10)),
levels = c("type", "state", "none")),
stringsAsFactors = FALSE
)
# Create design matrix
design <- createDesignMatrix(experiment_info, cols_design = "group_id")
# Create contrast matrix
contrast <- createContrast(c(0, 1))
# Test for differential abundance (DA) of clusters (using default method 'diffcyt-DA-edgeR')
out_DA <- diffcyt(d_input, experiment_info, marker_info,
design = design, contrast = contrast,
analysis_type = "DA", method_DA = "diffcyt-DA-edgeR",
seed_clustering = 123, verbose = FALSE)
# Test for differential states (DS) within clusters (using default method 'diffcyt-DS-limma')
out_DS <- diffcyt(d_input, experiment_info, marker_info,
design = design, contrast = contrast,
analysis_type = "DS", method_DS = "diffcyt-DS-limma",
seed_clustering = 123, plot = FALSE, verbose = FALSE)
# Display results for top DA clusters
topTable(out_DA, format_vals = TRUE)
# Display results for top DS cluster-marker combinations
topTable(out_DS, format_vals = TRUE)
}
| /man/topTable.Rd | permissive | gjhanchem/diffcyt | R | false | true | 7,450 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topTable.R
\name{topTable}
\alias{topTable}
\title{Show table of results for top clusters or cluster-marker combinations}
\usage{
topTable(res, d_counts = NULL, d_medians = NULL, order = TRUE,
order_by = "p_adj", all = FALSE, top_n = 20, show_counts = FALSE,
show_props = FALSE, show_meds = FALSE, sort_cols = TRUE,
format_vals = FALSE, digits = 3)
}
\arguments{
\item{res}{Output object from either the \code{\link{diffcyt}} wrapper function or one
of the individual differential testing functions (\code{\link{testDA_edgeR}},
\code{\link{testDA_voom}}, \code{\link{testDA_GLMM}}, \code{\link{testDS_limma}}, or
\code{\link{testDS_LMM}}). If the output object is from the wrapper function, the
objects \code{res} and \code{d_counts} will be automatically extracted.
Alternatively, these can be provided directly.}
\item{d_counts}{(Optional) \code{\link{SummarizedExperiment}} object containing cluster
cell counts, from \code{\link{calcCounts}}. (If the output object from the wrapper
function is provided, this will be be automatically extracted.)}
\item{d_medians}{(Optional) \code{\link{SummarizedExperiment}} object containing
cluster medians (median marker expression for each cluster-sample combination), from
\code{\link{calcMedians}}. Assumed to contain a logical vector
\code{id_state_markers} in the meta-data (accessed with
\code{metadata(d_medians)$id_state_markers}), which identifies the set of 'cell
state' markers in the list of \code{assays}. (If the output object from the wrapper
function is provided, this will be be automatically extracted.)}
\item{order}{Whether to order results by values in column \code{order_by} (default:
column \code{p_adj} containing adjusted p-values). Default = TRUE.}
\item{order_by}{Name of column to use to order rows by values, if \code{order = TRUE}.
Default = \code{"p_adj"} (adjusted p-values); other options include \code{"p_val"},
\code{"cluster_id"}, and \code{"marker_id"}.}
\item{all}{Whether to display all clusters or cluster-marker combinations (instead of
top \code{top_n}). Default = FALSE.}
\item{top_n}{Number of clusters or cluster-marker combinations to display (if \code{all
= FALSE}). Default = 20.}
\item{show_counts}{Whether to display cluster cell counts by sample (from
\code{d_counts}). Default = FALSE.}
\item{show_props}{Whether to display cluster cell count proportions by sample
(calculated from \code{d_counts}). Default = FALSE.}
\item{show_meds}{Whether to display median expression values for each cluster-marker
combination (from \code{d_medians}). Default = FALSE.}
\item{sort_cols}{Whether to sort columns of counts, proportions, and medians; by levels
of factor \code{sample_id} in \code{colData} of \code{d_medians} (requires object
\code{d_medians} to be provided). Default = TRUE.}
\item{format_vals}{Whether to display rounded values in numeric columns. This improves
readability of the summary table, but should not be used when exact numeric values
are required for subsequent steps (e.g. plotting). Default = FALSE.}
\item{digits}{Number of significant digits to show, if \code{format_vals = TRUE}.
Default = 3. (Note: for percentages shown if \code{show_props = TRUE}, \code{digits =
1} is used.)}
}
\value{
Returns a \code{\link{DataFrame}} table of results for the \code{top_n}
clusters or cluster-marker combinations, ordered by values in column \code{order_by}
(default: adjusted p-values). Optionally, cluster counts, proportions, and median
expression by cluster-marker combination are also included.
}
\description{
Show table of results for top (most highly significant) clusters or cluster-marker
combinations
}
\details{
Summary function to display table of results for top (most highly significant) detected
clusters or cluster-marker combinations.
The differential testing functions return results in the form of p-values and adjusted
p-values for each cluster (DA tests) or cluster-marker combination (DS tests), which
can be used to rank the clusters or cluster-marker combinations by their evidence for
differential abundance or differential states. The p-values and adjusted p-values are
stored in the \code{rowData} of the output \code{\link{SummarizedExperiment}} object
generated by the testing functions.
This function displays a summary table of results. By default, the \code{top_n}
clusters or cluster-marker combinations are shown, ordered by adjusted p-values.
Optionally, cluster counts, proportions, and median expression by cluster-marker
combination can also be included. The \code{format_vals} and \code{digits} arguments
can be used to display rounded values to improve readability of the summary table.
}
\examples{
# For a complete workflow example demonstrating each step in the 'diffcyt' pipeline,
# see the package vignette.
# Function to create random data (one sample)
d_random <- function(n = 20000, mean = 0, sd = 1, ncol = 20, cofactor = 5) {
d <- sinh(matrix(rnorm(n, mean, sd), ncol = ncol)) * cofactor
colnames(d) <- paste0("marker", sprintf("\%02d", 1:ncol))
d
}
# Create random data (without differential signal)
set.seed(123)
d_input <- list(
sample1 = d_random(),
sample2 = d_random(),
sample3 = d_random(),
sample4 = d_random()
)
# Add differential abundance (DA) signal
ix_DA <- 801:900
ix_cols_type <- 1:10
d_input[[3]][ix_DA, ix_cols_type] <- d_random(n = 1000, mean = 2, ncol = 10)
d_input[[4]][ix_DA, ix_cols_type] <- d_random(n = 1000, mean = 2, ncol = 10)
# Add differential states (DS) signal
ix_DS <- 901:1000
ix_cols_DS <- 19:20
d_input[[1]][ix_DS, ix_cols_type] <- d_random(n = 1000, mean = 3, ncol = 10)
d_input[[2]][ix_DS, ix_cols_type] <- d_random(n = 1000, mean = 3, ncol = 10)
d_input[[3]][ix_DS, c(ix_cols_type, ix_cols_DS)] <- d_random(n = 1200, mean = 3, ncol = 12)
d_input[[4]][ix_DS, c(ix_cols_type, ix_cols_DS)] <- d_random(n = 1200, mean = 3, ncol = 12)
experiment_info <- data.frame(
sample_id = factor(paste0("sample", 1:4)),
group_id = factor(c("group1", "group1", "group2", "group2")),
stringsAsFactors = FALSE
)
marker_info <- data.frame(
channel_name = paste0("channel", sprintf("\%03d", 1:20)),
marker_name = paste0("marker", sprintf("\%02d", 1:20)),
marker_class = factor(c(rep("type", 10), rep("state", 10)),
levels = c("type", "state", "none")),
stringsAsFactors = FALSE
)
# Create design matrix
design <- createDesignMatrix(experiment_info, cols_design = "group_id")
# Create contrast matrix
contrast <- createContrast(c(0, 1))
# Test for differential abundance (DA) of clusters (using default method 'diffcyt-DA-edgeR')
out_DA <- diffcyt(d_input, experiment_info, marker_info,
design = design, contrast = contrast,
analysis_type = "DA", method_DA = "diffcyt-DA-edgeR",
seed_clustering = 123, verbose = FALSE)
# Test for differential states (DS) within clusters (using default method 'diffcyt-DS-limma')
out_DS <- diffcyt(d_input, experiment_info, marker_info,
design = design, contrast = contrast,
analysis_type = "DS", method_DS = "diffcyt-DS-limma",
seed_clustering = 123, plot = FALSE, verbose = FALSE)
# Display results for top DA clusters
topTable(out_DA, format_vals = TRUE)
# Display results for top DS cluster-marker combinations
topTable(out_DS, format_vals = TRUE)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | dolphy17/ProgrammingAssignment2 | R | false | false | 747 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
#' Wrapper for using analytical equations with PKPD regimens
#'
#' In development. Needs to be optimized significantly to be useful in production.
#'
#' @param analytical analytical equation, taking parameters `amt`, `parameters`, and `t`, and returning a vector of values for `y`
#' @param design design dataset created by `sim_ode`
#' @param parameters list of parameters
#' @export
analytical_eqn_wrapper <- function(analytical, design = NULL, parameters) {
doses <- which(design$evid == 1)
dat <- data.frame(time = design$t, y = 0)
scale <- parameters$V
for (i in 1:length(doses)) {
if(i < length(doses)) {
i_end <- doses[i+1]
if(i > 1) {
amt <- dat$y[doses[i]] * scale + design$dose[doses[i]]
t <- design$t[doses[i]:i_end] - design$t[doses[i]]
} else {
amt <- design$dose[doses[i]]
t <- design$t[doses[i]:i_end]
}
} else {
i_end <- length(design$t)
amt <- dat$y[doses[i]] * scale + design$dose[doses[i]]
t <- design$t[doses[i]:i_end] - design$t[doses[i]]
}
y <- analytical(amt, parameters, t)
dat[doses[i]:i_end,]$y <- y
}
dat <- data.frame(dat)
dat$time <- as.num(dat$time)
dat$y <- as.num(dat$y)
dat$obs <- as.num(dat$y)
class(dat) <- c("PKPDsim_data", class(dat))
return(dat)
}
| /R/analytical_eqn_wrapper.R | permissive | shanmdphd/PKPDsim | R | false | false | 1,302 | r | #' Wrapper for using analytical equations with PKPD regimens
#'
#' In development. Needs to be optimized significantly to be useful in production.
#'
#' @param analytical analytical equation, taking parameters `amt`, `parameters`, and `t`, and returning a vector of values for `y`
#' @param design design dataset created by `sim_ode`
#' @param parameters list of parameters
#' @export
analytical_eqn_wrapper <- function(analytical, design = NULL, parameters) {
doses <- which(design$evid == 1)
dat <- data.frame(time = design$t, y = 0)
scale <- parameters$V
for (i in 1:length(doses)) {
if(i < length(doses)) {
i_end <- doses[i+1]
if(i > 1) {
amt <- dat$y[doses[i]] * scale + design$dose[doses[i]]
t <- design$t[doses[i]:i_end] - design$t[doses[i]]
} else {
amt <- design$dose[doses[i]]
t <- design$t[doses[i]:i_end]
}
} else {
i_end <- length(design$t)
amt <- dat$y[doses[i]] * scale + design$dose[doses[i]]
t <- design$t[doses[i]:i_end] - design$t[doses[i]]
}
y <- analytical(amt, parameters, t)
dat[doses[i]:i_end,]$y <- y
}
dat <- data.frame(dat)
dat$time <- as.num(dat$time)
dat$y <- as.num(dat$y)
dat$obs <- as.num(dat$y)
class(dat) <- c("PKPDsim_data", class(dat))
return(dat)
}
|
# Load data file
expDB <- read.csv("D:/GitHub/RM1_UC/R_Code/expDB.dat", header=FALSE)
# Deleting column Num_Shifts
expDB <- expDB[, -5]
# Naming columns
colnames(expDB) <- c("Sort_Method", "Sequence_Size", "Prob_Fail", "Size_Larg_Sort_Array")
# Changing the col "Sort_Method" to categorical
expDB[, 1] <- as.factor(expDB[, 1])
list_sort <- c("Bubble_Sort","Insertion_Sort","Merge_Sort","Quick_Sort")
levels(expDB[, 1]) <- list_sort
# Show level
levels(expDB[, 1])
# Changing the col "Sequence_Size" from numerical to categorical
Num_Sequence_Size <- expDB[, 2]
expDB <- cbind(expDB, Num_Sequence_Size)
expDB[, 2] <- as.factor(expDB[, 2])
list_n <- c("100","500","1000","2500","5000","7500","10000")
levels(expDB[, 2]) <- list_n
# Show level
levels(expDB[, 2])
# Changing the col "Prob_Fail" from numerical to categorical
Num_Prob_Fail <- expDB[, 3]
expDB <- cbind(expDB, Num_Prob_Fail)
expDB[, 3] <- as.factor(expDB[, 3])
# Change the levels of the "Prob_Fail" based on categorical association
list_exp <- c("0.25%","0.5%","0.75%","1%","1.25%","1.5%","1.75%")
levels(expDB[, 3]) <- list_exp
# Show level
levels(expDB[, 3])
aux_sort <- "Quick_Sort"
################# How to save multiple plots in one page ######
### http://rstudio-pubs-static.s3.amazonaws.com/2852_379274d7c5734f979e106dcf019ec46c.html
##
# Site with several examples barcharts with ggplot
# https://www.r-graph-gallery.com/48-grouped-barplot-with-ggplot2.html
# Load ggplot2 library
##### Show Barplots of largest sort array considering probabilities of fail for each sequence size
library(grid)
library(ggplot2)
plots <- list()
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "100") )
plots[[1]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "500") )
plots[[2]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "1000") )
plots[[3]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "2500") )
plots[[4]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "5000") )
plots[[5]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "7500") )
plots[[6]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "10000") )
plots[[7]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
grid.newpage()
pushViewport(viewport(layout = grid.layout(4, 2)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(plots[[1]], vp = vplayout(1, 1)) # key is to define vplayout
print(plots[[2]], vp = vplayout(1, 2))
print(plots[[3]], vp = vplayout(2, 1)) # key is to define vplayout
print(plots[[4]], vp = vplayout(2, 2))
print(plots[[5]], vp = vplayout(3, 1)) # key is to define vplayout
print(plots[[6]], vp = vplayout(3, 2))
print(plots[[7]], vp = vplayout(4, 1)) # key is to define vplayout
#### All in one plot
s1 <- subset(expDB, (Sort_Method == aux_sort))
ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
##### Show Barplots of largest sort array considering sequences sizes for each probability of fail
library(grid)
library(ggplot2)
plots <- list()
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "0.25%") )
plots[[1]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "0.5%") )
plots[[2]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "0.75%") )
plots[[3]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "1%") )
plots[[4]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "1.25%") )
plots[[5]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "1.5%") )
plots[[6]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "1.75%") )
plots[[7]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
grid.newpage()
pushViewport(viewport(layout = grid.layout(4, 2)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(plots[[1]], vp = vplayout(1, 1)) # key is to define vplayout
print(plots[[2]], vp = vplayout(1, 2))
print(plots[[3]], vp = vplayout(2, 1)) # key is to define vplayout
print(plots[[4]], vp = vplayout(2, 2))
print(plots[[5]], vp = vplayout(3, 1)) # key is to define vplayout
print(plots[[6]], vp = vplayout(3, 2))
print(plots[[7]], vp = vplayout(4, 1)) # key is to define vplayout
#### All in one plot
s1 <- subset(expDB, (Sort_Method == aux_sort))
ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
#### All in one plot in one page
library(grid)
library(ggplot2)
plots <- list()
s1 <- subset(expDB, (Sort_Method == aux_sort))
plots[[1]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
plots[[2]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
grid.newpage()
pushViewport(viewport(layout = grid.layout(2, 1)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(plots[[1]], vp = vplayout(1, 1)) # key is to define vplayout
print(plots[[2]], vp = vplayout(2, 1))
####################
| /R_Code/EDA_After_Loads_Bar_Plot.R | no_license | afonsoblneto/RM1_UC | R | false | false | 6,838 | r | # Load data file
expDB <- read.csv("D:/GitHub/RM1_UC/R_Code/expDB.dat", header=FALSE)
# Deleting column Num_Shifts
expDB <- expDB[, -5]
# Naming columns
colnames(expDB) <- c("Sort_Method", "Sequence_Size", "Prob_Fail", "Size_Larg_Sort_Array")
# Changing the col "Sort_Method" to categorical
expDB[, 1] <- as.factor(expDB[, 1])
list_sort <- c("Bubble_Sort","Insertion_Sort","Merge_Sort","Quick_Sort")
levels(expDB[, 1]) <- list_sort
# Show level
levels(expDB[, 1])
# Changing the col "Sequence_Size" from numerical to categorical
Num_Sequence_Size <- expDB[, 2]
expDB <- cbind(expDB, Num_Sequence_Size)
expDB[, 2] <- as.factor(expDB[, 2])
list_n <- c("100","500","1000","2500","5000","7500","10000")
levels(expDB[, 2]) <- list_n
# Show level
levels(expDB[, 2])
# Changing the col "Prob_Fail" from numerical to categorical
Num_Prob_Fail <- expDB[, 3]
expDB <- cbind(expDB, Num_Prob_Fail)
expDB[, 3] <- as.factor(expDB[, 3])
# Change the levels of the "Prob_Fail" based on categorical association
list_exp <- c("0.25%","0.5%","0.75%","1%","1.25%","1.5%","1.75%")
levels(expDB[, 3]) <- list_exp
# Show level
levels(expDB[, 3])
aux_sort <- "Quick_Sort"
################# How to save multiple plots in one page ######
### http://rstudio-pubs-static.s3.amazonaws.com/2852_379274d7c5734f979e106dcf019ec46c.html
##
# Site with several examples barcharts with ggplot
# https://www.r-graph-gallery.com/48-grouped-barplot-with-ggplot2.html
# Load ggplot2 library
##### Show Barplots of largest sort array considering probabilities of fail for each sequence size
library(grid)
library(ggplot2)
plots <- list()
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "100") )
plots[[1]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "500") )
plots[[2]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "1000") )
plots[[3]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "2500") )
plots[[4]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "5000") )
plots[[5]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "7500") )
plots[[6]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Sequence_Size == "10000") )
plots[[7]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
grid.newpage()
pushViewport(viewport(layout = grid.layout(4, 2)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(plots[[1]], vp = vplayout(1, 1)) # key is to define vplayout
print(plots[[2]], vp = vplayout(1, 2))
print(plots[[3]], vp = vplayout(2, 1)) # key is to define vplayout
print(plots[[4]], vp = vplayout(2, 2))
print(plots[[5]], vp = vplayout(3, 1)) # key is to define vplayout
print(plots[[6]], vp = vplayout(3, 2))
print(plots[[7]], vp = vplayout(4, 1)) # key is to define vplayout
#### All in one plot
s1 <- subset(expDB, (Sort_Method == aux_sort))
ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
##### Show Barplots of largest sort array considering sequences sizes for each probability of fail
library(grid)
library(ggplot2)
plots <- list()
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "0.25%") )
plots[[1]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "0.5%") )
plots[[2]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "0.75%") )
plots[[3]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "1%") )
plots[[4]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "1.25%") )
plots[[5]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "1.5%") )
plots[[6]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
s1 <- subset(expDB, (Sort_Method == aux_sort) & (Prob_Fail == "1.75%") )
plots[[7]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
grid.newpage()
pushViewport(viewport(layout = grid.layout(4, 2)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(plots[[1]], vp = vplayout(1, 1)) # key is to define vplayout
print(plots[[2]], vp = vplayout(1, 2))
print(plots[[3]], vp = vplayout(2, 1)) # key is to define vplayout
print(plots[[4]], vp = vplayout(2, 2))
print(plots[[5]], vp = vplayout(3, 1)) # key is to define vplayout
print(plots[[6]], vp = vplayout(3, 2))
print(plots[[7]], vp = vplayout(4, 1)) # key is to define vplayout
#### All in one plot
s1 <- subset(expDB, (Sort_Method == aux_sort))
ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
#### All in one plot in one page
library(grid)
library(ggplot2)
plots <- list()
s1 <- subset(expDB, (Sort_Method == aux_sort))
plots[[1]] <- ggplot(s1, aes(fill=Prob_Fail, y=Size_Larg_Sort_Array, x=Sequence_Size)) +
geom_bar(position="dodge", stat="identity")
plots[[2]] <- ggplot(s1, aes(fill=Sequence_Size, y=Size_Larg_Sort_Array, x=Prob_Fail)) +
geom_bar(position="dodge", stat="identity")
grid.newpage()
pushViewport(viewport(layout = grid.layout(2, 1)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(plots[[1]], vp = vplayout(1, 1)) # key is to define vplayout
print(plots[[2]], vp = vplayout(2, 1))
####################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun_dfToMatrix.R
\name{dfToMatrix}
\alias{dfToMatrix}
\title{given a data in long form , covert it to wide form
will improve the function later before being used again}
\usage{
dfToMatrix(df, fieldname, locf = F)
}
\description{
given a data in long form , covert it to wide form
will improve the function later before being used again
}
| /man/dfToMatrix.Rd | no_license | davidlamcm/Rtoolbox | R | false | true | 416 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun_dfToMatrix.R
\name{dfToMatrix}
\alias{dfToMatrix}
\title{given a data in long form , covert it to wide form
will improve the function later before being used again}
\usage{
dfToMatrix(df, fieldname, locf = F)
}
\description{
given a data in long form , covert it to wide form
will improve the function later before being used again
}
|
library(tidyverse)
library(gridExtra)
generate_data <- function(n, p) {
covariates <- matrix(rnorm(n*p), nrow = n, ncol = p)
responses <- rnorm(n)
return(list(covariates = covariates, responses = responses))
}
model_select <- function(covariates, responses, cutoff) {
reg_m <- lm(responses ~ covariates)
p_values <- summary(reg_m)$coefficients[,"Pr(>|t|)"][-1]
index_p <- which(p_values <= cutoff)
if (length(index_p) == 0) {
return(vector())
} else {
new_reg <- lm(responses ~ covariates[, index_p])
return (summary(new_reg)$coefficients[,"Pr(>|t|)"][-1])
}
}
# helper function which combines generate_data, and model_select
generate_and_select <- function(n, p, cutoff) {
gen_data <- generate_data(n, p)
p_vals <- model_select(gen_data$covariates, gen_data$responses, cutoff)
return(p_vals)
}
run_simulation <- function(n_trials, n, p, cutoff) {
collected_p <- replicate(n_trials, generate_and_select(n, p, cutoff))
p_values <- as.numeric(unlist(collected_p))
ggplot(data.frame(p_values)) + geom_histogram(aes(x = p_values)) + labs(x = "p values", title = paste("plot of", c(n, p, cutoff)))
}
gghist <- list()
index <- 1
for (n in c(100, 1000, 10000)) {
for (p in c(10, 20, 50)) {
gghist[[index]] <- run_simulation(1000, n, p, 0.05)
index <- index + 1
}
}
grid.arrange(grobs = gghist)
| /lab_12_simulation.R | no_license | sthkindacrazy/36-350 | R | false | false | 1,351 | r | library(tidyverse)
library(gridExtra)
generate_data <- function(n, p) {
covariates <- matrix(rnorm(n*p), nrow = n, ncol = p)
responses <- rnorm(n)
return(list(covariates = covariates, responses = responses))
}
model_select <- function(covariates, responses, cutoff) {
reg_m <- lm(responses ~ covariates)
p_values <- summary(reg_m)$coefficients[,"Pr(>|t|)"][-1]
index_p <- which(p_values <= cutoff)
if (length(index_p) == 0) {
return(vector())
} else {
new_reg <- lm(responses ~ covariates[, index_p])
return (summary(new_reg)$coefficients[,"Pr(>|t|)"][-1])
}
}
# helper function which combines generate_data, and model_select
generate_and_select <- function(n, p, cutoff) {
gen_data <- generate_data(n, p)
p_vals <- model_select(gen_data$covariates, gen_data$responses, cutoff)
return(p_vals)
}
run_simulation <- function(n_trials, n, p, cutoff) {
collected_p <- replicate(n_trials, generate_and_select(n, p, cutoff))
p_values <- as.numeric(unlist(collected_p))
ggplot(data.frame(p_values)) + geom_histogram(aes(x = p_values)) + labs(x = "p values", title = paste("plot of", c(n, p, cutoff)))
}
gghist <- list()
index <- 1
for (n in c(100, 1000, 10000)) {
for (p in c(10, 20, 50)) {
gghist[[index]] <- run_simulation(1000, n, p, 0.05)
index <- index + 1
}
}
grid.arrange(grobs = gghist)
|
if(!exists("LOADED")){
LOADED = TRUE
library(shiny)
library(data.table)
library(seqsetvis)
options(mc.cores = 16)
source("functions.R")
source("functions_tsne.R")
load("dataset1.save")
n_points = 16
piles_img_res = make_tsne_img(profiles_dt = tsne_input$bw_dt,
position_dt = tsne_res, #force_rewrite = TRUE,
apply_norm = FALSE,
ylim = c(0,10),
# xrng = zoom_x,
# yrng = zoom_y,
n_points = n_points,
line_colors = c(
"H3K4me3" = "forestgreen",
"H3K27me3" = "firebrick")
)
p_basic = make_img_plots(img_results = list(piles_img_res),
qcell = NULL,
min_size = 0,
N_ceiling = NULL,
as_facet = FALSE)[[1]]
message(getwd())
tsne_res
tsne_input$tsne_mat = NULL
UI_CELLS = unique(tsne_input$bw_dt$cell)
UI_CELLS = UI_CELLS[order(UI_CELLS != "Kasumi1")]
UI_CELLS = UI_CELLS[order(UI_CELLS != "mm1s")]
UI_CELLS = UI_CELLS[order(UI_CELLS != "CD34")]
UI_CELLS = UI_CELLS[order(UI_CELLS != "H7")]
UI_MARKS = unique(tsne_input$bw_dt$mark)
UI_MARKS = UI_MARKS[order(UI_MARKS != "H3K4me3")]
n_tp = 5000
set.seed(1)
UI_TP = sample(unique(tsne_res$id), n_tp / length(UI_CELLS))
tsne_tp = tsne_res[id %in% UI_TP]
GLOBAL_VIEW_POINTS = "points"
GLOBAL_VIEW_PROFILES_FAST = "profiles (fast)"
GLOBAL_VIEW_PROFILES_SLOW = "profiles (slow)"
GLOBAL_VIEW_DENSITY = "density"
tsne_res$bx = mybin(tsne_res$tx, n_points = 16)
tsne_res$by = mybin(tsne_res$ty, n_points = 16)
szt = tsne_res[, .N, .(bx, by)]
szt[, frac := N / max(N)]
szt = szt[frac > .2]
mdt = merge(tsne_input$bw_dt, tsne_res[, .(id, cell, bx, by)], by = c("id", "cell"))
mdt = mdt[, .(y = mean(y)), by = .(bx, by, x, mark)]
mdt = merge(mdt, szt)
mdt$gx = mybin_centers(tsne_res$tx, n_points = 16)[mdt$bx]
mdt$gy = mybin_centers(tsne_res$ty, n_points = 16)[mdt$by]
# mdt = piles_img_res$summary_profiles_dt
glyph_df = GGally::glyphs(mdt, x_major = "gx", x_minor = "x", y_major = "gy", y_minor = "y")
ggplot(glyph_df, aes(gx, gy, group = paste(gid, mark), color = mark)) +
geom_path() +
scale_color_manual(values = c("input" = "blue",
"H3K4me3" = "forestgreen",
"H3K27me3" = "red"))
qgr = tsne_input$query_gr
names(qgr) = qgr$id
} | /shiny_chiptsne/app_setup.R | no_license | jrboyd/waldron | R | false | false | 2,846 | r | if(!exists("LOADED")){
LOADED = TRUE
library(shiny)
library(data.table)
library(seqsetvis)
options(mc.cores = 16)
source("functions.R")
source("functions_tsne.R")
load("dataset1.save")
n_points = 16
piles_img_res = make_tsne_img(profiles_dt = tsne_input$bw_dt,
position_dt = tsne_res, #force_rewrite = TRUE,
apply_norm = FALSE,
ylim = c(0,10),
# xrng = zoom_x,
# yrng = zoom_y,
n_points = n_points,
line_colors = c(
"H3K4me3" = "forestgreen",
"H3K27me3" = "firebrick")
)
p_basic = make_img_plots(img_results = list(piles_img_res),
qcell = NULL,
min_size = 0,
N_ceiling = NULL,
as_facet = FALSE)[[1]]
message(getwd())
tsne_res
tsne_input$tsne_mat = NULL
UI_CELLS = unique(tsne_input$bw_dt$cell)
UI_CELLS = UI_CELLS[order(UI_CELLS != "Kasumi1")]
UI_CELLS = UI_CELLS[order(UI_CELLS != "mm1s")]
UI_CELLS = UI_CELLS[order(UI_CELLS != "CD34")]
UI_CELLS = UI_CELLS[order(UI_CELLS != "H7")]
UI_MARKS = unique(tsne_input$bw_dt$mark)
UI_MARKS = UI_MARKS[order(UI_MARKS != "H3K4me3")]
n_tp = 5000
set.seed(1)
UI_TP = sample(unique(tsne_res$id), n_tp / length(UI_CELLS))
tsne_tp = tsne_res[id %in% UI_TP]
GLOBAL_VIEW_POINTS = "points"
GLOBAL_VIEW_PROFILES_FAST = "profiles (fast)"
GLOBAL_VIEW_PROFILES_SLOW = "profiles (slow)"
GLOBAL_VIEW_DENSITY = "density"
tsne_res$bx = mybin(tsne_res$tx, n_points = 16)
tsne_res$by = mybin(tsne_res$ty, n_points = 16)
szt = tsne_res[, .N, .(bx, by)]
szt[, frac := N / max(N)]
szt = szt[frac > .2]
mdt = merge(tsne_input$bw_dt, tsne_res[, .(id, cell, bx, by)], by = c("id", "cell"))
mdt = mdt[, .(y = mean(y)), by = .(bx, by, x, mark)]
mdt = merge(mdt, szt)
mdt$gx = mybin_centers(tsne_res$tx, n_points = 16)[mdt$bx]
mdt$gy = mybin_centers(tsne_res$ty, n_points = 16)[mdt$by]
# mdt = piles_img_res$summary_profiles_dt
glyph_df = GGally::glyphs(mdt, x_major = "gx", x_minor = "x", y_major = "gy", y_minor = "y")
ggplot(glyph_df, aes(gx, gy, group = paste(gid, mark), color = mark)) +
geom_path() +
scale_color_manual(values = c("input" = "blue",
"H3K4me3" = "forestgreen",
"H3K27me3" = "red"))
qgr = tsne_input$query_gr
names(qgr) = qgr$id
} |
setwd("D:/2018.07 R_DataAnalysis/2018SUMMER_R/data")
#import data
library(sf)
library(ggplot2)
worldmap <- st_read("TM_WORLD_BORDERS-0.3.shp",stringsAsFactors=FALSE,quiet=TRUE)
names(worldmap)[3] <- paste('Country.Code')
worldmap <- st_transform(worldmap, "+init=esri:54030")
raw_gender <- read.csv('Gender_wewant.csv')
gender_map <- merge(worldmap, raw_gender, by="Country.Code")
library(stringr)
for (i in 15:ncol(gender_map)-1){
names(gender_map)[i] <- str_sub(names(gender_map)[i],-5,-2)
if((i >= 17) == (i < 41)){
gender_map[i+1] <- (gender_map[[i+1]] + gender_map[[i]] + gender_map[[i-1]] + gender_map[[i-2]]) /4
}
}
library(tidyr)
ath<- read.csv("athlete_wewant.csv")
x <- gender_map%>%
filter(Series.Name == 'Adjusted net enrollment rate, primary, female (% of primary school age children)')%>%
select('2010')
library(dplyr)
#plotting
gender_map%>%
filter(Series.Name == 'Adjusted net enrollment rate, primary, female (% of primary school age children)')%>%
select('2010')%>%
ggplot() +
geom_sf(aes(fill=`2010`))+
scale_fill_gradient(low = "#FFE4F3", high = "#F62018")
| /final_pre/part2.R | no_license | cartus0910/2018SUMMER_R | R | false | false | 1,116 | r | setwd("D:/2018.07 R_DataAnalysis/2018SUMMER_R/data")
#import data
library(sf)
library(ggplot2)
worldmap <- st_read("TM_WORLD_BORDERS-0.3.shp",stringsAsFactors=FALSE,quiet=TRUE)
names(worldmap)[3] <- paste('Country.Code')
worldmap <- st_transform(worldmap, "+init=esri:54030")
raw_gender <- read.csv('Gender_wewant.csv')
gender_map <- merge(worldmap, raw_gender, by="Country.Code")
library(stringr)
for (i in 15:ncol(gender_map)-1){
names(gender_map)[i] <- str_sub(names(gender_map)[i],-5,-2)
if((i >= 17) == (i < 41)){
gender_map[i+1] <- (gender_map[[i+1]] + gender_map[[i]] + gender_map[[i-1]] + gender_map[[i-2]]) /4
}
}
library(tidyr)
ath<- read.csv("athlete_wewant.csv")
x <- gender_map%>%
filter(Series.Name == 'Adjusted net enrollment rate, primary, female (% of primary school age children)')%>%
select('2010')
library(dplyr)
#plotting
gender_map%>%
filter(Series.Name == 'Adjusted net enrollment rate, primary, female (% of primary school age children)')%>%
select('2010')%>%
ggplot() +
geom_sf(aes(fill=`2010`))+
scale_fill_gradient(low = "#FFE4F3", high = "#F62018")
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 4.09969241722868e-118, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615766845-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 4.09969241722868e-118, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
for (i in 1:length(visaFull)){
if (visaFull$wage_offer_from_9089[i] <= 100){
visaFull$pw_unit_of_pay_9089[i] == "Hour"
} else if (visaFull$wage_offer_from_9089[i] > 100 & visaFull$wage_offer_from_9089[i] <=){
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i] * 52
} else if (visaFull$wage_offer_from_9089[i] == "Bi-Weekly"){
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i] * 26
} else if (visaFull$wage_offer_from_9089[i] == "Week"){
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i] * 52
} else if (visaFull$wage_offer_from_9089[i] == "Month"){
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i] * 12
} else if (visaFull$wage_offer_from_9089[i] == "Year") {
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i]
}
}
for (i in 1:length(visaFull)){
if (visaFull$pw_unit_of_pay_9089[i] == "Hour"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 40 * 52
} else if (visaFull$pw_unit_of_pay_9089[i] == "Week"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 52
} else if (visaFull$pw_unit_of_pay_9089[i] == "Bi-Weekly"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 26
} else if (visaFull$pw_unit_of_pay_9089[i] == "Week"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 52
} else if (visaFull$pw_unit_of_pay_9089[i] == "Month"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 12
} else if (visaFull$pw_unit_of_pay_9089[i] == "Year") {
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i]
}
}
index <- which(visaFull$pw_unit_of_pay_9089 == "hr")
visaFull[index, 'pw_unit_of_pay_9089'] <- "Hour"
index <- which(visaFull$pw_unit_of_pay_9089 == "mth")
visaFull[index, 'pw_unit_of_pay_9089'] <- "Month"
index <- which(visaFull$pw_unit_of_pay_9089 == "wk")
visaFull[index, 'pw_unit_of_pay_9089'] <- "Week"
index <- which(visaFull$pw_unit_of_pay_9089 == "bi")
visaFull[index, 'pw_unit_of_pay_9089'] <- "Bi-Weekly"
| /Bayesian Statistics/DataSets/Visas/project_deletes.R | no_license | jpcrum/Bayesian-Statistics-for-Data-Science | R | false | false | 2,152 | r |
for (i in 1:length(visaFull)){
if (visaFull$wage_offer_from_9089[i] <= 100){
visaFull$pw_unit_of_pay_9089[i] == "Hour"
} else if (visaFull$wage_offer_from_9089[i] > 100 & visaFull$wage_offer_from_9089[i] <=){
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i] * 52
} else if (visaFull$wage_offer_from_9089[i] == "Bi-Weekly"){
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i] * 26
} else if (visaFull$wage_offer_from_9089[i] == "Week"){
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i] * 52
} else if (visaFull$wage_offer_from_9089[i] == "Month"){
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i] * 12
} else if (visaFull$wage_offer_from_9089[i] == "Year") {
visaFull$pw_unit_of_pay_9089[i] = visaFull$pw_unit_of_pay_9089[i]
}
}
for (i in 1:length(visaFull)){
if (visaFull$pw_unit_of_pay_9089[i] == "Hour"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 40 * 52
} else if (visaFull$pw_unit_of_pay_9089[i] == "Week"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 52
} else if (visaFull$pw_unit_of_pay_9089[i] == "Bi-Weekly"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 26
} else if (visaFull$pw_unit_of_pay_9089[i] == "Week"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 52
} else if (visaFull$pw_unit_of_pay_9089[i] == "Month"){
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i] * 12
} else if (visaFull$pw_unit_of_pay_9089[i] == "Year") {
visaFull$wage_offer_from_9089[i] = visaFull$wage_offer_from_9089[i]
}
}
index <- which(visaFull$pw_unit_of_pay_9089 == "hr")
visaFull[index, 'pw_unit_of_pay_9089'] <- "Hour"
index <- which(visaFull$pw_unit_of_pay_9089 == "mth")
visaFull[index, 'pw_unit_of_pay_9089'] <- "Month"
index <- which(visaFull$pw_unit_of_pay_9089 == "wk")
visaFull[index, 'pw_unit_of_pay_9089'] <- "Week"
index <- which(visaFull$pw_unit_of_pay_9089 == "bi")
visaFull[index, 'pw_unit_of_pay_9089'] <- "Bi-Weekly"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app_plotchart_aws.R
\name{chartAggrAWSDataSel}
\alias{chartAggrAWSDataSel}
\title{Get aggregated data.}
\usage{
chartAggrAWSDataSel(tstep, net_aws, var_hgt, pars, start, end, aws_dir)
}
\arguments{
\item{tstep}{the time step of the data.}
\item{net_aws}{a vector of the network code and AWS ID, form <network code>_<AWS ID>. AWS network code, 1: vaisala, 2: adcon, 3: koica}
\item{var_hgt}{the variable code and observation height, form <var code>_<height>.}
\item{pars}{parameters.}
\item{start}{start time.}
\item{end}{end time.}
\item{aws_dir}{full path to the directory containing ADT.\cr
Example: "D:/NMA_AWS_v2"}
}
\value{
a JSON object
}
\description{
Get aggregated data to display on chart for multiple AWS.
}
| /man/chartAggrAWSDataSel.Rd | no_license | rijaf-iri/mtoadtNMA | R | false | true | 804 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app_plotchart_aws.R
\name{chartAggrAWSDataSel}
\alias{chartAggrAWSDataSel}
\title{Get aggregated data.}
\usage{
chartAggrAWSDataSel(tstep, net_aws, var_hgt, pars, start, end, aws_dir)
}
\arguments{
\item{tstep}{the time step of the data.}
\item{net_aws}{a vector of the network code and AWS ID, form <network code>_<AWS ID>. AWS network code, 1: vaisala, 2: adcon, 3: koica}
\item{var_hgt}{the variable code and observation height, form <var code>_<height>.}
\item{pars}{parameters.}
\item{start}{start time.}
\item{end}{end time.}
\item{aws_dir}{full path to the directory containing ADT.\cr
Example: "D:/NMA_AWS_v2"}
}
\value{
a JSON object
}
\description{
Get aggregated data to display on chart for multiple AWS.
}
|
#************ROracle Oracle Linux+W64 (W32 needs Rtools and build from source) *****************
library(ROracle) #install.packages("ROracle",type='source') fails!
drv <- dbDriver("Oracle")
conn <- dbConnect(drv, "echiu", "whse4Emily","(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(COMMUNITY=tcp.world)(PROTOCOL=TCP)(Host=whse)(Port=1525)))(CONNECT_DATA=(SID=whse)))" )
stoprate<-dbGetQuery(conn,"select PROCESS_DAY,BEAM_ID as BEAM,TYPE2,TYPE3,TYPE10,TERMCNT_FROM_WAREHOUSE as Nom,HT1100_CNT as Denom, TERMCNT_FROM_WAREHOUSE/HT1100_CNT as Rate from QA_REBOOT_NOM join QA_REBOOT_DENOM on COLLECTION_START_TIME_EST=PROCESS_DAY and BEAM_ID=ABSOLUTE_BEAM_ID
where PROCESS_DAY>=SYSDATE-31 order by PROCESS_DAY,BEAM")
colnames(stoprate)<-c("Date","BEAM","Type2","Type3","Type10","Nom","Denom","Rate")
d <- dbReadTable(conn, "QA_CASE_CATEGORY");d
dbDisconnect(conn)
#************RJDBC Oracle: Linux+W64+W32*****************
library(RJDBC)
JdbcPath=if (.Platform$OS.type=="unix") "/usr/lib/oracle/12.1/client64/lib/ojdbc7.jar" else "C:\\OraHome_1\\ojdbc6.jar"
drv <- JDBC("oracle.jdbc.OracleDriver", classPath=JdbcPath)
conn <- dbConnect(drv, "jdbc:oracle:thin:@139.85.128.102:1525:whse", "echiu", "whse4Emily")
e <- dbReadTable(conn, "QA_CASE_CATEGORY");e
dbDisconnect(conn)
#************RJDBC MSSQL(MykeBass and Mine): Linux+W64+W32*****************
library(RJDBC)
JdbcPath=if (.Platform$OS.type=="unix") "/etc/sqljdbc_3.0/sqljdbc4.jar" else "C:\\Program Files\\R\\R-3.1.1\\library\\RJDBC\\java\\sqljdbc4.jar"
drv <- JDBC("com.microsoft.sqlserver.jdbc.SQLServerDriver", classPath=JdbcPath)
conn <- dbConnect(drv, "jdbc:sqlserver://echiult7\\SQLEVAL:1433","BLRptAcct","ReadM*0nly") #databbaseName='etl' does not help, only works from echiult7
#conn <- dbConnect(drv, "jdbc:sqlserver://expbitssql.hughes.com", "BLRptAcct", "ReadM*0nly")
f <- dbReadTable(conn, "information_schema.tables");f
g <- dbGetQuery(conn, "select top 10 * from etl.dbo.PS_SITE");g
dbDisconnect(conn)
#************RODBC Oracle: W32+W64*****************
require(RODBC)
OdbcConn=if (.Platform$r_arch=="i386") "whseprd" else "whseprd64"
conn<-odbcConnect(OdbcConn,uid='echiu',pwd='whse4Emily')
g <- sqlQuery(conn, "select * from QA_CASE_CATEGORY");g
close(conn)
#************RODBC MSSQL: W32+W64 (Linux fails at configure long 77 problem) *****************
require(RODBC)
#OdbcConn<-if (.Platform$r_arch=="i386") "SQLEVAL" else "SQLEVAL64"
#conn<-odbcConnect(OdbcConn,uid='echiu',pwd='P@ssw0rd2')
#g <- sqlquery(conn, "select top 10 * from etl.dbo.PS_SITE");g
OdbcConn=if (.Platform$r_arch=="i386") "EXPBITSSQL32" else "EXPBITSSQL64"
conn<-odbcConnect(OdbcConn,uid='BLRptAcct',pwd='ReadM*0nly')
g<-sqlFetch(conn,"information_schema.tables");g
close(conn)
#######################csv #####################################
setwd("./data/EOC")
#system("cut -f $(head -1 EOC.csv |sed 's/,/\'$'\n/g' |grep -ni 'Churn_In_Range_IND' |cut -f1 -d:) -d, EOC.csv >premodel.csv") #for one column
csvfile<-"/mnt/R/input/EOC.csv"
outputfile<-"premodel.csv"
nrows<-197378
colName<-c("AccountNumber", "ActivationDate", "Churn_Week", "Churn_In_Range_IND", "Churn_Before_Range_IND", "Churn_After_Range_IND","Churn_Date", "Express_Repair_PRESENT")
system.time(header<-read.csv(csvfile, header=FALSE, stringsAsFactors=FALSE, nrows=1))
colNum<-paste0(which(header %in% colName),collapse=',')
systemCmd<-paste("cut -d, -f",colNum, csvfile,">",outputfile )
system(systemCmd)
system.time(EOC <-read.csv(outputfile,nrows=197378,stringsAsFactors=FALSE))
########################Excel##########################################
library(openxlsx)
churnOct<-read.xlsx("/mnt/R/data/reboot_study/Sunil.xlsx",sheet=1)
churnNov<-read.xlsx("/mnt/R/data/reboot_study/Sunil.xlsx",sheet=2)
churnDec<-read.xlsx("/mnt/R/data/reboot_study/Sunil.xlsx",sheet=3)
churnwhat<-read.xlsx("/mnt/R/data/reboot_study/morethan1sheet.xlsx",sheet=5)
| /Rconnect.R | no_license | emailechiu/initial | R | false | false | 3,856 | r | #************ROracle Oracle Linux+W64 (W32 needs Rtools and build from source) *****************
library(ROracle) #install.packages("ROracle",type='source') fails!
drv <- dbDriver("Oracle")
conn <- dbConnect(drv, "echiu", "whse4Emily","(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(COMMUNITY=tcp.world)(PROTOCOL=TCP)(Host=whse)(Port=1525)))(CONNECT_DATA=(SID=whse)))" )
stoprate<-dbGetQuery(conn,"select PROCESS_DAY,BEAM_ID as BEAM,TYPE2,TYPE3,TYPE10,TERMCNT_FROM_WAREHOUSE as Nom,HT1100_CNT as Denom, TERMCNT_FROM_WAREHOUSE/HT1100_CNT as Rate from QA_REBOOT_NOM join QA_REBOOT_DENOM on COLLECTION_START_TIME_EST=PROCESS_DAY and BEAM_ID=ABSOLUTE_BEAM_ID
where PROCESS_DAY>=SYSDATE-31 order by PROCESS_DAY,BEAM")
colnames(stoprate)<-c("Date","BEAM","Type2","Type3","Type10","Nom","Denom","Rate")
d <- dbReadTable(conn, "QA_CASE_CATEGORY");d
dbDisconnect(conn)
#************RJDBC Oracle: Linux+W64+W32*****************
library(RJDBC)
JdbcPath=if (.Platform$OS.type=="unix") "/usr/lib/oracle/12.1/client64/lib/ojdbc7.jar" else "C:\\OraHome_1\\ojdbc6.jar"
drv <- JDBC("oracle.jdbc.OracleDriver", classPath=JdbcPath)
conn <- dbConnect(drv, "jdbc:oracle:thin:@139.85.128.102:1525:whse", "echiu", "whse4Emily")
e <- dbReadTable(conn, "QA_CASE_CATEGORY");e
dbDisconnect(conn)
#************RJDBC MSSQL(MykeBass and Mine): Linux+W64+W32*****************
library(RJDBC)
JdbcPath=if (.Platform$OS.type=="unix") "/etc/sqljdbc_3.0/sqljdbc4.jar" else "C:\\Program Files\\R\\R-3.1.1\\library\\RJDBC\\java\\sqljdbc4.jar"
drv <- JDBC("com.microsoft.sqlserver.jdbc.SQLServerDriver", classPath=JdbcPath)
conn <- dbConnect(drv, "jdbc:sqlserver://echiult7\\SQLEVAL:1433","BLRptAcct","ReadM*0nly") #databbaseName='etl' does not help, only works from echiult7
#conn <- dbConnect(drv, "jdbc:sqlserver://expbitssql.hughes.com", "BLRptAcct", "ReadM*0nly")
f <- dbReadTable(conn, "information_schema.tables");f
g <- dbGetQuery(conn, "select top 10 * from etl.dbo.PS_SITE");g
dbDisconnect(conn)
#************RODBC Oracle: W32+W64*****************
require(RODBC)
OdbcConn=if (.Platform$r_arch=="i386") "whseprd" else "whseprd64"
conn<-odbcConnect(OdbcConn,uid='echiu',pwd='whse4Emily')
g <- sqlQuery(conn, "select * from QA_CASE_CATEGORY");g
close(conn)
#************RODBC MSSQL: W32+W64 (Linux fails at configure long 77 problem) *****************
require(RODBC)
#OdbcConn<-if (.Platform$r_arch=="i386") "SQLEVAL" else "SQLEVAL64"
#conn<-odbcConnect(OdbcConn,uid='echiu',pwd='P@ssw0rd2')
#g <- sqlquery(conn, "select top 10 * from etl.dbo.PS_SITE");g
OdbcConn=if (.Platform$r_arch=="i386") "EXPBITSSQL32" else "EXPBITSSQL64"
conn<-odbcConnect(OdbcConn,uid='BLRptAcct',pwd='ReadM*0nly')
g<-sqlFetch(conn,"information_schema.tables");g
close(conn)
#######################csv #####################################
setwd("./data/EOC")
#system("cut -f $(head -1 EOC.csv |sed 's/,/\'$'\n/g' |grep -ni 'Churn_In_Range_IND' |cut -f1 -d:) -d, EOC.csv >premodel.csv") #for one column
csvfile<-"/mnt/R/input/EOC.csv"
outputfile<-"premodel.csv"
nrows<-197378
colName<-c("AccountNumber", "ActivationDate", "Churn_Week", "Churn_In_Range_IND", "Churn_Before_Range_IND", "Churn_After_Range_IND","Churn_Date", "Express_Repair_PRESENT")
system.time(header<-read.csv(csvfile, header=FALSE, stringsAsFactors=FALSE, nrows=1))
colNum<-paste0(which(header %in% colName),collapse=',')
systemCmd<-paste("cut -d, -f",colNum, csvfile,">",outputfile )
system(systemCmd)
system.time(EOC <-read.csv(outputfile,nrows=197378,stringsAsFactors=FALSE))
########################Excel##########################################
library(openxlsx)
churnOct<-read.xlsx("/mnt/R/data/reboot_study/Sunil.xlsx",sheet=1)
churnNov<-read.xlsx("/mnt/R/data/reboot_study/Sunil.xlsx",sheet=2)
churnDec<-read.xlsx("/mnt/R/data/reboot_study/Sunil.xlsx",sheet=3)
churnwhat<-read.xlsx("/mnt/R/data/reboot_study/morethan1sheet.xlsx",sheet=5)
|
## ---- results="hide"----------------------------------------------------
require("datasets")
data("iris") # load Iris Dataset
str(iris) #view structure of dataset
## ---- results="hide"----------------------------------------------------
iris.new<- iris[,c(1,2,3,4)]
iris.class<- iris[,"Species"]
## ---- results="hide"----------------------------------------------------
normalize <- function(x){
return ((x-min(x))/(max(x)-min(x)))
}
iris.new$Sepal.Length<- normalize(iris.new$Sepal.Length)
iris.new$Sepal.Width<- normalize(iris.new$Sepal.Width)
iris.new$Petal.Length<- normalize(iris.new$Petal.Length)
iris.new$Petal.Width<- normalize(iris.new$Petal.Width)
head(iris.new)
## ---- message=FALSE-----------------------------------------------------
result<- kmeans(iris.new,3)
result$size # gives no. of records in each cluster
result$centers
## ---- message=FALSE-----------------------------------------------------
result<- kmeans(iris.new,3)
result$size # gives no. of records in each cluster
result$centers
## ---- message=FALSE, echo= FALSE-----------------------------------------
par(mfrow=c(2,2), mar=c(5,4,2,2))
plot(iris.new[c(1,2)], col=result$cluster)
plot(iris.new[c(1,2)], col=iris.class)
plot(iris.new[c(3,4)], col=result$cluster)
plot(iris.new[c(3,4)], col=iris.class)
## ---- results="hide"----------------------------------------------------
data(wine, package='rattle')
head(wine)
wine.stand <- scale(wine[-1]) # To standarize
# K-Means
k.means.fit <- kmeans(wine.stand, 3) # k = 3
## ---- results="hide"----------------------------------------------------
k.means.fit$cluster
k.means.fit$size
## ---- results="hide", message=FALSE, fig.keep=FALSE----------------------
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
## ---- echo=FALSE---------------------------------------------------------
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
wssplot(wine.stand, nc=6)
## ---- echo=FALSE---------------------------------------------------------
require(cluster)
silhouette_score <- function(k){
km <- kmeans(wine.stand, centers = k, nstart=25)
ss <- silhouette(km$cluster, dist(wine.stand))
mean(ss[, 3])
}
k <- 2:10
avg_sil <- sapply(k, silhouette_score)
plot(k, type='b', avg_sil, xlab='Number of clusters',
ylab='Average Silhouette Scores', frame=FALSE)
## ---- message= FALSE, echo=FALSE, eval=TRUE, message = FALSE, warning=FALSE----
require(jpeg)
require(RCurl)
url <-"https://raw.githubusercontent.com/mages/diesunddas/"
url <- paste(url, "master/Blog/LloydsBuilding.jpg", sep="")
readImage <- readJPEG(getURLContent(url, binary=TRUE))
dm <- dim(readImage)
rgbImage <- data.frame(
x=rep(1:dm[2], each=dm[1]),
y=rep(dm[1]:1, dm[2]),
r.value=as.vector(readImage[,,1]),
g.value=as.vector(readImage[,,2]),
b.value=as.vector(readImage[,,3]))
plot(y ~ x, data=rgbImage, main="Lloyd building",
col = rgb(rgbImage[c("r.value", "g.value", "b.value")]),
asp = 1, pch = ".")
## ---- message= FALSE, echo=FALSE, eval=TRUE, message = FALSE, warning=FALSE----
kColors <- 5
kMeans <- kmeans(rgbImage[, c("r.value", "g.value", "b.value")],
centers = kColors)
clusterColour <- rgb(kMeans$centers[kMeans$cluster, ])
plot(y ~ x, data=rgbImage, main="Lloyd building",
col = clusterColour, asp = 1, pch = ".",
axes=FALSE, ylab="",
xlab="k-means cluster analysis of 5 colours")
| /.Rproj.user/A86A8592/sources/per/t/DD9AD1D7-contents | no_license | christy-jacob41/Tweet-Sentiments-Data-Science | R | false | false | 3,954 | ## ---- results="hide"----------------------------------------------------
require("datasets")
data("iris") # load Iris Dataset
str(iris) #view structure of dataset
## ---- results="hide"----------------------------------------------------
iris.new<- iris[,c(1,2,3,4)]
iris.class<- iris[,"Species"]
## ---- results="hide"----------------------------------------------------
normalize <- function(x){
return ((x-min(x))/(max(x)-min(x)))
}
iris.new$Sepal.Length<- normalize(iris.new$Sepal.Length)
iris.new$Sepal.Width<- normalize(iris.new$Sepal.Width)
iris.new$Petal.Length<- normalize(iris.new$Petal.Length)
iris.new$Petal.Width<- normalize(iris.new$Petal.Width)
head(iris.new)
## ---- message=FALSE-----------------------------------------------------
result<- kmeans(iris.new,3)
result$size # gives no. of records in each cluster
result$centers
## ---- message=FALSE-----------------------------------------------------
result<- kmeans(iris.new,3)
result$size # gives no. of records in each cluster
result$centers
## ---- message=FALSE, echo= FALSE-----------------------------------------
par(mfrow=c(2,2), mar=c(5,4,2,2))
plot(iris.new[c(1,2)], col=result$cluster)
plot(iris.new[c(1,2)], col=iris.class)
plot(iris.new[c(3,4)], col=result$cluster)
plot(iris.new[c(3,4)], col=iris.class)
## ---- results="hide"----------------------------------------------------
data(wine, package='rattle')
head(wine)
wine.stand <- scale(wine[-1]) # To standarize
# K-Means
k.means.fit <- kmeans(wine.stand, 3) # k = 3
## ---- results="hide"----------------------------------------------------
k.means.fit$cluster
k.means.fit$size
## ---- results="hide", message=FALSE, fig.keep=FALSE----------------------
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
## ---- echo=FALSE---------------------------------------------------------
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
wssplot(wine.stand, nc=6)
## ---- echo=FALSE---------------------------------------------------------
require(cluster)
silhouette_score <- function(k){
km <- kmeans(wine.stand, centers = k, nstart=25)
ss <- silhouette(km$cluster, dist(wine.stand))
mean(ss[, 3])
}
k <- 2:10
avg_sil <- sapply(k, silhouette_score)
plot(k, type='b', avg_sil, xlab='Number of clusters',
ylab='Average Silhouette Scores', frame=FALSE)
## ---- message= FALSE, echo=FALSE, eval=TRUE, message = FALSE, warning=FALSE----
require(jpeg)
require(RCurl)
url <-"https://raw.githubusercontent.com/mages/diesunddas/"
url <- paste(url, "master/Blog/LloydsBuilding.jpg", sep="")
readImage <- readJPEG(getURLContent(url, binary=TRUE))
dm <- dim(readImage)
rgbImage <- data.frame(
x=rep(1:dm[2], each=dm[1]),
y=rep(dm[1]:1, dm[2]),
r.value=as.vector(readImage[,,1]),
g.value=as.vector(readImage[,,2]),
b.value=as.vector(readImage[,,3]))
plot(y ~ x, data=rgbImage, main="Lloyd building",
col = rgb(rgbImage[c("r.value", "g.value", "b.value")]),
asp = 1, pch = ".")
## ---- message= FALSE, echo=FALSE, eval=TRUE, message = FALSE, warning=FALSE----
kColors <- 5
kMeans <- kmeans(rgbImage[, c("r.value", "g.value", "b.value")],
centers = kColors)
clusterColour <- rgb(kMeans$centers[kMeans$cluster, ])
plot(y ~ x, data=rgbImage, main="Lloyd building",
col = clusterColour, asp = 1, pch = ".",
axes=FALSE, ylab="",
xlab="k-means cluster analysis of 5 colours")
| |
#Bootstrap para ver distribuição da medida de perda
library(readr)
#ATENÇÃO: Ordem dos argumentos é importante pois não é simétrico,
#cat_ref - Labels de refêrencia primeiro
#cat_clust - Labels obtidos na clusterização
#m_perda - Matriz simétrica de pesos, se nenhum argumento for dado usa perda uniforme
perda_gen <- function(cat_ref,cat_clust,m_perda) {
tab = table(cat_ref,cat_clust)
#Numero total de observações
N = sum(tab)
#Número de clusters
k = dim(tab)[2]
#Número de categorias dos labels verdadeiros
m = dim(tab)[1]
#Se não tem matriz com pesos passada, usa pesos uniformes
if(missing(m_perda)){
m_perda = matrix(rep(1,m^2),nrow=m) - diag(m)
}
#Inicializa em 0 e vai somando um valor de cada vez
perda = 0
#Loop do cluster
for (i in 1:k){
L_c = 0
#Tamanho do cluster
n_c = sum(tab[,i])
#Loop dos pares, testa todos os pares j <= l (se l=j a perda é 0)
for(j in 1:m){
for(l in j:m){
#Se for 1 ou 0, acrescenta 0 pois não é possível formar pares
if(n_c == 1 | n_c == 0) {L_c = L_c + 0}
else {
#Acrescenta o termo da função perda associado ao evento de encontrar
#musicas dos tipos j e l no mesmo cluster, multiplicado pelo custo
L_c = L_c + tab[j,i]*tab[l,i]/choose(n_c,2)*m_perda[j,l]
}
}
}
#Acrescenta à função perda total o termo do cluster
perda = perda + L_c
}
#Divide pelo numero de clusters pra refletir a escolha uniforme do cluster
perda = perda/k
#Devole o valor da função
return(perda)
}
#Com a matriz de custo uniforme este função perda também pode ser interpretada como
#A probabilidade de duas musicas sorteadas aleatoriamente de um cluster
#serem de generos distintos
#Numero de amostragens
p = 10000
y2 = read_csv("mfcc_R_H_C_J_F_2729_obs.csv")
y2 = y2$genres
y1 = read_csv("mfcc_hip_cla_old.csv")
y1 = y1$genre_top
#y = sample(1:n_gen,4000,replace = TRUE)
perda_dist3 = rep(0,p)
perda_dist5 = rep(0,p)
set.seed(1000)
for (i in 1:p){
print(i)
perda_dist3[i] = perda_gen(y1,sample(1:3,length(y1),replace = TRUE))
perda_dist5[i] = perda_gen(as.vector(y2),sample(1:5,length(y2),replace = TRUE))
#imp_dist[i] = imp_index(y,sample(1:n_gen,length(y),replace = TRUE))
#vi_dist[i] = vi.dist(y,sample(1:3,length(y),replace = TRUE))
}
par(mfrow=c(1,2))
hist(perda_dist3,breaks=100)
hist(perda_dist5,breaks=100)
#hist(vi_dist,breaks=100)
| /funcao_perda.R | no_license | Lucas-Schwengber/Music-Clust-Dados-Testes | R | false | false | 2,500 | r | #Bootstrap para ver distribuição da medida de perda
library(readr)
#ATENÇÃO: Ordem dos argumentos é importante pois não é simétrico,
#cat_ref - Labels de refêrencia primeiro
#cat_clust - Labels obtidos na clusterização
#m_perda - Matriz simétrica de pesos, se nenhum argumento for dado usa perda uniforme
perda_gen <- function(cat_ref,cat_clust,m_perda) {
tab = table(cat_ref,cat_clust)
#Numero total de observações
N = sum(tab)
#Número de clusters
k = dim(tab)[2]
#Número de categorias dos labels verdadeiros
m = dim(tab)[1]
#Se não tem matriz com pesos passada, usa pesos uniformes
if(missing(m_perda)){
m_perda = matrix(rep(1,m^2),nrow=m) - diag(m)
}
#Inicializa em 0 e vai somando um valor de cada vez
perda = 0
#Loop do cluster
for (i in 1:k){
L_c = 0
#Tamanho do cluster
n_c = sum(tab[,i])
#Loop dos pares, testa todos os pares j <= l (se l=j a perda é 0)
for(j in 1:m){
for(l in j:m){
#Se for 1 ou 0, acrescenta 0 pois não é possível formar pares
if(n_c == 1 | n_c == 0) {L_c = L_c + 0}
else {
#Acrescenta o termo da função perda associado ao evento de encontrar
#musicas dos tipos j e l no mesmo cluster, multiplicado pelo custo
L_c = L_c + tab[j,i]*tab[l,i]/choose(n_c,2)*m_perda[j,l]
}
}
}
#Acrescenta à função perda total o termo do cluster
perda = perda + L_c
}
#Divide pelo numero de clusters pra refletir a escolha uniforme do cluster
perda = perda/k
#Devole o valor da função
return(perda)
}
#Com a matriz de custo uniforme este função perda também pode ser interpretada como
#A probabilidade de duas musicas sorteadas aleatoriamente de um cluster
#serem de generos distintos
#Numero de amostragens
p = 10000
y2 = read_csv("mfcc_R_H_C_J_F_2729_obs.csv")
y2 = y2$genres
y1 = read_csv("mfcc_hip_cla_old.csv")
y1 = y1$genre_top
#y = sample(1:n_gen,4000,replace = TRUE)
perda_dist3 = rep(0,p)
perda_dist5 = rep(0,p)
set.seed(1000)
for (i in 1:p){
print(i)
perda_dist3[i] = perda_gen(y1,sample(1:3,length(y1),replace = TRUE))
perda_dist5[i] = perda_gen(as.vector(y2),sample(1:5,length(y2),replace = TRUE))
#imp_dist[i] = imp_index(y,sample(1:n_gen,length(y),replace = TRUE))
#vi_dist[i] = vi.dist(y,sample(1:3,length(y),replace = TRUE))
}
par(mfrow=c(1,2))
hist(perda_dist3,breaks=100)
hist(perda_dist5,breaks=100)
#hist(vi_dist,breaks=100)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.r
\name{query_pval_file}
\alias{query_pval_file}
\title{Query pval from vcf file}
\usage{
query_pval_file(pval, vcffile, id = NULL, build = "GRCh37")
}
\arguments{
\item{pval}{P-value threshold (NOT -log10)}
\item{vcffile}{Path to tabix indexed vcf file}
\item{id}{If multiple GWAS datasets in the vcf file, the name (sample ID) from which to perform the filter}
\item{build}{Default="GRCh37"}
}
\value{
VCF object
}
\description{
Query pval from vcf file
}
| /man/query_pval_file.Rd | permissive | MRCIEU/gwasvcf | R | false | true | 545 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.r
\name{query_pval_file}
\alias{query_pval_file}
\title{Query pval from vcf file}
\usage{
query_pval_file(pval, vcffile, id = NULL, build = "GRCh37")
}
\arguments{
\item{pval}{P-value threshold (NOT -log10)}
\item{vcffile}{Path to tabix indexed vcf file}
\item{id}{If multiple GWAS datasets in the vcf file, the name (sample ID) from which to perform the filter}
\item{build}{Default="GRCh37"}
}
\value{
VCF object
}
\description{
Query pval from vcf file
}
|
/Lupus Stan/Stan Model simple SSL.R | no_license | nelfilho/MIXEIV | R | false | false | 968 | r | ||
### 2c
n = 10
theta = c((1/(40^2)*180+n/(20^2)*150)/(1/(40^2)+n/(20^2)), 1/(1/40^2+n/20^2))
yhat = c((1/(40^2)*180+n/(20^2)*150)/(1/(40^2)+n/(20^2)), 1/(1/40^2+n/20^2)+20^2)
qnorm(c(0.025, 0.975), mean = theta[1], sd = sqrt(theta[2]))
qnorm(c(0.025, 0.975), mean = yhat[1], sd = sqrt(yhat[2]))
### 2d
n = 100
theta = c((1/(40^2)*180+n/(20^2)*150)/(1/(40^2)+n/(20^2)), 1/(1/40^2+n/20^2))
yhat = c((1/(40^2)*180+n/(20^2)*150)/(1/(40^2)+n/(20^2)), 1/(1/40^2+n/20^2)+20^2)
qnorm(c(0.025, 0.975), mean = theta[1], sd = sqrt(theta[2]))
qnorm(c(0.025, 0.975), mean = yhat[1], sd = sqrt(yhat[2]))
### 3c
r = 5
Y = c(7,10,5,8,6,12,6,9,7,5)
alpha = c(1,.5,5,1,5)
beta = c(1,.5,5,5,1)
par(mfrow = c(3,1))
for(i in 1:length(alpha)){
theta = seq(0,1,0.01)
credible_interval = qbeta(c(0.025,0.975), r*length(Y)+alpha[i], sum(Y)+beta[i])
posterior_mean = (r*length(Y)+alpha[i])/(r*length(Y)+alpha[i]+sum(Y)+beta[i])
plot(theta, dbeta(theta, r*length(Y)+alpha[i], sum(Y)+beta[i]),
type = "l", col = "blue", xlab = expression(theta),
ylab = "Density",
main = paste("alpha = ", alpha[i],"and", "beta = ", beta[i],
"- blue is posterior and red is prior"),
sub = paste("The Credible Interval is", "(",credible_interval[1],",",
credible_interval[2], ")",
"and the posterior mean is", posterior_mean))
lines(theta, dbeta(theta, alpha[i], beta[i]), col = "red")
}
| /HW1/hw 1.R | no_license | arifyali/Bayesian-Statistics-Mathematics-640-Spring-2016 | R | false | false | 1,442 | r | ### 2c
n = 10
theta = c((1/(40^2)*180+n/(20^2)*150)/(1/(40^2)+n/(20^2)), 1/(1/40^2+n/20^2))
yhat = c((1/(40^2)*180+n/(20^2)*150)/(1/(40^2)+n/(20^2)), 1/(1/40^2+n/20^2)+20^2)
qnorm(c(0.025, 0.975), mean = theta[1], sd = sqrt(theta[2]))
qnorm(c(0.025, 0.975), mean = yhat[1], sd = sqrt(yhat[2]))
### 2d
n = 100
theta = c((1/(40^2)*180+n/(20^2)*150)/(1/(40^2)+n/(20^2)), 1/(1/40^2+n/20^2))
yhat = c((1/(40^2)*180+n/(20^2)*150)/(1/(40^2)+n/(20^2)), 1/(1/40^2+n/20^2)+20^2)
qnorm(c(0.025, 0.975), mean = theta[1], sd = sqrt(theta[2]))
qnorm(c(0.025, 0.975), mean = yhat[1], sd = sqrt(yhat[2]))
### 3c
r = 5
Y = c(7,10,5,8,6,12,6,9,7,5)
alpha = c(1,.5,5,1,5)
beta = c(1,.5,5,5,1)
par(mfrow = c(3,1))
for(i in 1:length(alpha)){
theta = seq(0,1,0.01)
credible_interval = qbeta(c(0.025,0.975), r*length(Y)+alpha[i], sum(Y)+beta[i])
posterior_mean = (r*length(Y)+alpha[i])/(r*length(Y)+alpha[i]+sum(Y)+beta[i])
plot(theta, dbeta(theta, r*length(Y)+alpha[i], sum(Y)+beta[i]),
type = "l", col = "blue", xlab = expression(theta),
ylab = "Density",
main = paste("alpha = ", alpha[i],"and", "beta = ", beta[i],
"- blue is posterior and red is prior"),
sub = paste("The Credible Interval is", "(",credible_interval[1],",",
credible_interval[2], ")",
"and the posterior mean is", posterior_mean))
lines(theta, dbeta(theta, alpha[i], beta[i]), col = "red")
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/train.R
\name{predict.MOA_trainedmodel}
\alias{predict.MOA_trainedmodel}
\title{Predict using a MOA classifier, MOA regressor or MOA recommender on a new dataset}
\usage{
\method{predict}{MOA_trainedmodel}(object, newdata, type = "response",
transFUN = object$transFUN, na.action = na.fail, ...)
}
\arguments{
\item{object}{an object of class \code{MOA_trainedmodel}, as returned by \code{\link{trainMOA}}}
\item{newdata}{a data.frame with the same structure and the same levels as used in \code{trainMOA} for MOA classifier, MOA regressor,
a data.frame with at least the user/item columns which were used in \code{\link{trainMOA}} when training
the MOA recommendation engine}
\item{type}{a character string, either 'response' or 'votes'}
\item{transFUN}{a function which is used on \code{newdata}
before applying \code{\link{model.frame}}.
Useful if you want to change the results \code{get_points} on the datastream
(e.g. for making sure the factor levels are the same in each chunk of processing, some data cleaning, ...).
Defaults to \code{transFUN} available in \code{object}.}
\item{na.action}{passed on to model.frame when constructing the model.matrix from \code{newdata}. Defaults to \code{na.fail}.}
\item{...}{other arguments, currently not used yet}
}
\value{
A matrix of votes or a vector with the predicted class for MOA classifier or MOA regressor.
A
}
\description{
Predict using a MOA classifier, MOA regressor or MOA recommender on a new dataset. \\
Make sure the new dataset has the same structure
and the same levels as \code{get_points} returns on the datastream which was used in \code{trainMOA}
}
\examples{
## Hoeffdingtree
hdt <- HoeffdingTree(numericEstimator = "GaussianNumericAttributeClassObserver")
data(iris)
## Make a training set
iris <- factorise(iris)
traintest <- list()
traintest$trainidx <- sample(nrow(iris), size=nrow(iris)/2)
traintest$trainingset <- iris[traintest$trainidx, ]
traintest$testset <- iris[-traintest$trainidx, ]
irisdatastream <- datastream_dataframe(data=traintest$trainingset)
## Train the model
hdtreetrained <- trainMOA(model = hdt,
Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width,
data = irisdatastream)
## Score the model on the holdoutset
scores <- predict(hdtreetrained,
newdata=traintest$testset[, c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width")],
type="response")
str(scores)
table(scores, traintest$testset$Species)
scores <- predict(hdtreetrained, newdata=traintest$testset, type="votes")
head(scores)
## Prediction based on recommendation engine
require(recommenderlab)
data(MovieLense)
x <- getData.frame(MovieLense)
x$itemid <- as.integer(as.factor(x$item))
x$userid <- as.integer(as.factor(x$user))
x$rating <- as.numeric(x$rating)
x <- head(x, 2000)
movielensestream <- datastream_dataframe(data=x)
movielensestream$get_points(3)
ctrl <- MOAoptions(model = "BRISMFPredictor", features = 10)
brism <- BRISMFPredictor(control=ctrl)
mymodel <- trainMOA(model = brism, rating ~ userid + itemid,
data = movielensestream, chunksize = 1000, trace=TRUE)
overview <- summary(mymodel$model)
str(overview)
predict(mymodel, head(x, 10), type = "response")
x <- expand.grid(userid=overview$users[1:10], itemid=overview$items)
predict(mymodel, x, type = "response")
}
\seealso{
\code{\link{trainMOA}}
}
| /RMOA/pkg/man/predict.MOA_trainedmodel.Rd | no_license | eralpdogu/RMOA | R | false | false | 3,402 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/train.R
\name{predict.MOA_trainedmodel}
\alias{predict.MOA_trainedmodel}
\title{Predict using a MOA classifier, MOA regressor or MOA recommender on a new dataset}
\usage{
\method{predict}{MOA_trainedmodel}(object, newdata, type = "response",
transFUN = object$transFUN, na.action = na.fail, ...)
}
\arguments{
\item{object}{an object of class \code{MOA_trainedmodel}, as returned by \code{\link{trainMOA}}}
\item{newdata}{a data.frame with the same structure and the same levels as used in \code{trainMOA} for MOA classifier, MOA regressor,
a data.frame with at least the user/item columns which were used in \code{\link{trainMOA}} when training
the MOA recommendation engine}
\item{type}{a character string, either 'response' or 'votes'}
\item{transFUN}{a function which is used on \code{newdata}
before applying \code{\link{model.frame}}.
Useful if you want to change the results \code{get_points} on the datastream
(e.g. for making sure the factor levels are the same in each chunk of processing, some data cleaning, ...).
Defaults to \code{transFUN} available in \code{object}.}
\item{na.action}{passed on to model.frame when constructing the model.matrix from \code{newdata}. Defaults to \code{na.fail}.}
\item{...}{other arguments, currently not used yet}
}
\value{
A matrix of votes or a vector with the predicted class for MOA classifier or MOA regressor.
A
}
\description{
Predict using a MOA classifier, MOA regressor or MOA recommender on a new dataset. \\
Make sure the new dataset has the same structure
and the same levels as \code{get_points} returns on the datastream which was used in \code{trainMOA}
}
\examples{
## Hoeffdingtree
hdt <- HoeffdingTree(numericEstimator = "GaussianNumericAttributeClassObserver")
data(iris)
## Make a training set
iris <- factorise(iris)
traintest <- list()
traintest$trainidx <- sample(nrow(iris), size=nrow(iris)/2)
traintest$trainingset <- iris[traintest$trainidx, ]
traintest$testset <- iris[-traintest$trainidx, ]
irisdatastream <- datastream_dataframe(data=traintest$trainingset)
## Train the model
hdtreetrained <- trainMOA(model = hdt,
Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width,
data = irisdatastream)
## Score the model on the holdoutset
scores <- predict(hdtreetrained,
newdata=traintest$testset[, c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width")],
type="response")
str(scores)
table(scores, traintest$testset$Species)
scores <- predict(hdtreetrained, newdata=traintest$testset, type="votes")
head(scores)
## Prediction based on recommendation engine
require(recommenderlab)
data(MovieLense)
x <- getData.frame(MovieLense)
x$itemid <- as.integer(as.factor(x$item))
x$userid <- as.integer(as.factor(x$user))
x$rating <- as.numeric(x$rating)
x <- head(x, 2000)
movielensestream <- datastream_dataframe(data=x)
movielensestream$get_points(3)
ctrl <- MOAoptions(model = "BRISMFPredictor", features = 10)
brism <- BRISMFPredictor(control=ctrl)
mymodel <- trainMOA(model = brism, rating ~ userid + itemid,
data = movielensestream, chunksize = 1000, trace=TRUE)
overview <- summary(mymodel$model)
str(overview)
predict(mymodel, head(x, 10), type = "response")
x <- expand.grid(userid=overview$users[1:10], itemid=overview$items)
predict(mymodel, x, type = "response")
}
\seealso{
\code{\link{trainMOA}}
}
|
rm(list = ls())
graphics.off()
library(ggplot2)
library(dplyr)
library(RColorBrewer)
library(wesanderson)
#brewer.pal.info
#display.brewer.all()
#display.brewer.all(type="seq")
#display.brewer.all(type="div")
##data_df_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_240_kbps.csv')
##data_df_1$data_rate <- "240 kbps"
##data_df_1$shaping <- "Without Shaping"
#data_df_1_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_240_kbps.csv')
#data_df_1_1$data_rate <- "240 kbps"
##data_df_1_1$shaping <- "With Shaping"
##data_df_2 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_120_kbps.csv')
##data_df_2$data_rate <- "120 kbps"
##data_df_2$shaping <- "Without Shaping"
#data_df_2_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_120_kbps.csv')
#data_df_2_1$data_rate <- "120 kbps"
##data_df_2_1$shaping <- "With Shaping"
##data_df_3 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_60_kbps.csv')
##data_df_3$data_rate <- "60 kbps"
##data_df_3$shaping <- "Without Shaping"
#data_df_3_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_60_kbps.csv')
#data_df_3_1$data_rate <- "60 kbps"
##data_df_3_1$shaping <- "With Shaping"
##data_df_4 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_30_kbps.csv')
##data_df_4$data_rate <- "30 kbps"
##data_df_4$shaping <- "Without Shaping"
#data_df_4_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_30_kbps.csv')
#data_df_4_1$data_rate <- "30 kbps"
##data_df_4_1$shaping <- "With Shaping"
##data_df_5 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_15_kbps.csv')
##data_df_5$data_rate <- "15 kbps"
##data_df_5$shaping <- "Without Shaping"
#data_df_5_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_15_kbps.csv')
#data_df_5_1$data_rate <- "15 kbps"
##data_df_5_1$shaping <- "With Shaping"
#
##data_df <- rbind(data_df_1,data_df_1_1,data_df_2,data_df_2_1,data_df_3,data_df_3_1,data_df_4,data_df_4_1,data_df_5,data_df_5_1)
#data_df <- rbind(data_df_1_1,data_df_2_1,data_df_3_1,data_df_4_1,data_df_5_1)
data_df_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_240_kbps.csv')
data_df_1$data_rate <- "240 kbps"
data_df_2 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_120_kbps.csv')
data_df_2$data_rate <- "120 kbps"
data_df_3 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_60_kbps.csv')
data_df_3$data_rate <- "60 kbps"
data_df_4 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_30_kbps.csv')
data_df_4$data_rate <- "30 kbps"
data_df_5 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_15_kbps.csv')
data_df_5$data_rate <- "15 kbps"
data_df <- rbind(data_df_1,data_df_2,data_df_3,data_df_4,data_df_5)
data_df$flow_id <- factor(data_df$flow_id, levels = c("6", "7", "8", "9", "10"),
labels = c("Medical Evacuation", "Obstacle Alert", "Picture", "Chat", "FFT"))
colnames(data_df)[which(names(data_df) == "flow_id")] <- "Messages"
gg <- ggplot()
gg <- gg + geom_line(data = data_df, mapping = aes(x = packet_seq_no, y = packet_delay_in_secs, color=Messages, linetype=Messages))
gg <- gg + geom_point(data = data_df, mapping = aes(x = packet_seq_no, y = packet_delay_in_secs, color=Messages, shape=Messages), size = 3, stroke=1.3, alpha = 1)
gg <- gg + scale_linetype_manual(values=c("longdash", "twodash", "dashed","dotdash","dotted"))
gg <- gg + scale_shape_manual(values=c(1, 2, 3, 4, 5))
#gg <- gg + scale_color_manual(values=c('#1e2240', '#607dab','#b5c9d5', '#FFE77AFF', '#2C5F2DFF'))
#gg <- gg + scale_color_manual(values=c('#1f1f1f','#660000', '#EA9999', '#E69138', '#B4A7D6'))
gg <- gg + scale_color_manual(values=c('grey40','tomato4', 'tomato', 'goldenrod', '#2C5F2DFF'))
gg <- gg + facet_grid( . ~ factor(data_rate, levels=c('15 kbps','30 kbps','60 kbps','120 kbps','240 kbps')), scales = "free")
gg <- gg + coord_cartesian()
gg <- gg + xlab("Packet")
gg <- gg + ylab("End-to-End Delay (sec)")
#gg <- gg + guides(shape = guide_legend(override.aes = list(size = 3)))
gg <- gg + theme(axis.text.x = element_text(size = 23, angle = 45, vjust = 0.5, hjust = 0.7),
axis.text.y = element_text(size = 24),
axis.title = element_text(size = 28),
axis.title.y = element_text(margin = margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin = margin(t = 15, r = 0, b = 0, l = 0)),
strip.background = element_rect(colour = "black", fill = "#f0f0f0"),
strip.text = element_text(face = "bold", size = 22),
legend.position = "bottom",
legend.background = element_rect(fill="transparent"),
legend.title = element_text(size = 23, face = "bold", colour = "black"),
legend.text=element_text(size=20, colour = "black"),
legend.title.align = 0.5)
theme_get()
theme_set(theme_bw())
print(gg)
ggsave(filename = "C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/plots/with_fifo_qdisc_no_timeout.png",plot=last_plot(), device="png", units = "mm", width = 400, height = 200, dpi = 600)
ggsave(filename = "C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/thesis_plots/with_fifo_qdisc_no_timeout_uhf.eps",plot=last_plot(), device="eps", units = "mm", width = 400, height = 180, dpi = 600)
| /experiments/r_ggplot2/r_and_python_scripts_thesis/plot_with_fifo_qdisc_combined_data_rates_uhf.R | no_license | sharath-maligera/sdn-tactical-network | R | false | false | 6,849 | r | rm(list = ls())
graphics.off()
library(ggplot2)
library(dplyr)
library(RColorBrewer)
library(wesanderson)
#brewer.pal.info
#display.brewer.all()
#display.brewer.all(type="seq")
#display.brewer.all(type="div")
##data_df_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_240_kbps.csv')
##data_df_1$data_rate <- "240 kbps"
##data_df_1$shaping <- "Without Shaping"
#data_df_1_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_240_kbps.csv')
#data_df_1_1$data_rate <- "240 kbps"
##data_df_1_1$shaping <- "With Shaping"
##data_df_2 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_120_kbps.csv')
##data_df_2$data_rate <- "120 kbps"
##data_df_2$shaping <- "Without Shaping"
#data_df_2_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_120_kbps.csv')
#data_df_2_1$data_rate <- "120 kbps"
##data_df_2_1$shaping <- "With Shaping"
##data_df_3 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_60_kbps.csv')
##data_df_3$data_rate <- "60 kbps"
##data_df_3$shaping <- "Without Shaping"
#data_df_3_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_60_kbps.csv')
#data_df_3_1$data_rate <- "60 kbps"
##data_df_3_1$shaping <- "With Shaping"
##data_df_4 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_30_kbps.csv')
##data_df_4$data_rate <- "30 kbps"
##data_df_4$shaping <- "Without Shaping"
#data_df_4_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_30_kbps.csv')
#data_df_4_1$data_rate <- "30 kbps"
##data_df_4_1$shaping <- "With Shaping"
##data_df_5 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS/h3_data_15_kbps.csv')
##data_df_5$data_rate <- "15 kbps"
##data_df_5$shaping <- "Without Shaping"
#data_df_5_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_ETS_both_end/h3_data_15_kbps.csv')
#data_df_5_1$data_rate <- "15 kbps"
##data_df_5_1$shaping <- "With Shaping"
#
##data_df <- rbind(data_df_1,data_df_1_1,data_df_2,data_df_2_1,data_df_3,data_df_3_1,data_df_4,data_df_4_1,data_df_5,data_df_5_1)
#data_df <- rbind(data_df_1_1,data_df_2_1,data_df_3_1,data_df_4_1,data_df_5_1)
data_df_1 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_240_kbps.csv')
data_df_1$data_rate <- "240 kbps"
data_df_2 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_120_kbps.csv')
data_df_2$data_rate <- "120 kbps"
data_df_3 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_60_kbps.csv')
data_df_3$data_rate <- "60 kbps"
data_df_4 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_30_kbps.csv')
data_df_4$data_rate <- "30 kbps"
data_df_5 <- read.csv(file = 'C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/data/no_adaptive_no_changing_FIFO_both_end/h3_data_15_kbps.csv')
data_df_5$data_rate <- "15 kbps"
data_df <- rbind(data_df_1,data_df_2,data_df_3,data_df_4,data_df_5)
data_df$flow_id <- factor(data_df$flow_id, levels = c("6", "7", "8", "9", "10"),
labels = c("Medical Evacuation", "Obstacle Alert", "Picture", "Chat", "FFT"))
colnames(data_df)[which(names(data_df) == "flow_id")] <- "Messages"
gg <- ggplot()
gg <- gg + geom_line(data = data_df, mapping = aes(x = packet_seq_no, y = packet_delay_in_secs, color=Messages, linetype=Messages))
gg <- gg + geom_point(data = data_df, mapping = aes(x = packet_seq_no, y = packet_delay_in_secs, color=Messages, shape=Messages), size = 3, stroke=1.3, alpha = 1)
gg <- gg + scale_linetype_manual(values=c("longdash", "twodash", "dashed","dotdash","dotted"))
gg <- gg + scale_shape_manual(values=c(1, 2, 3, 4, 5))
#gg <- gg + scale_color_manual(values=c('#1e2240', '#607dab','#b5c9d5', '#FFE77AFF', '#2C5F2DFF'))
#gg <- gg + scale_color_manual(values=c('#1f1f1f','#660000', '#EA9999', '#E69138', '#B4A7D6'))
gg <- gg + scale_color_manual(values=c('grey40','tomato4', 'tomato', 'goldenrod', '#2C5F2DFF'))
gg <- gg + facet_grid( . ~ factor(data_rate, levels=c('15 kbps','30 kbps','60 kbps','120 kbps','240 kbps')), scales = "free")
gg <- gg + coord_cartesian()
gg <- gg + xlab("Packet")
gg <- gg + ylab("End-to-End Delay (sec)")
#gg <- gg + guides(shape = guide_legend(override.aes = list(size = 3)))
gg <- gg + theme(axis.text.x = element_text(size = 23, angle = 45, vjust = 0.5, hjust = 0.7),
axis.text.y = element_text(size = 24),
axis.title = element_text(size = 28),
axis.title.y = element_text(margin = margin(t = 0, r = 10, b = 0, l = 0)),
axis.title.x = element_text(margin = margin(t = 15, r = 0, b = 0, l = 0)),
strip.background = element_rect(colour = "black", fill = "#f0f0f0"),
strip.text = element_text(face = "bold", size = 22),
legend.position = "bottom",
legend.background = element_rect(fill="transparent"),
legend.title = element_text(size = 23, face = "bold", colour = "black"),
legend.text=element_text(size=20, colour = "black"),
legend.title.align = 0.5)
theme_get()
theme_set(theme_bw())
print(gg)
ggsave(filename = "C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/plots/with_fifo_qdisc_no_timeout.png",plot=last_plot(), device="png", units = "mm", width = 400, height = 200, dpi = 600)
ggsave(filename = "C:/Users/Sharath/PycharmProjects/mininet-wifi/sdn-tactical-network/experiments/r_ggplot2/thesis_plots/with_fifo_qdisc_no_timeout_uhf.eps",plot=last_plot(), device="eps", units = "mm", width = 400, height = 180, dpi = 600)
|
arg <- commandArgs(trailingOnly=T)
#i1 = 2,..30 generate 29 different genotype datasets for non causal snps
i1 <- as.numeric(arg[[1]])
#generate independent causal SNPs
set.seed(i1)
n.sub <- 110000
n.snp <- 5000
MAF <- 0.25
genotype <- matrix(rbinom(n.sub*n.snp,2,0.25),n.sub,n.snp)
#genotype of M
genotype_s <- (genotype-2*MAF)/(sqrt(2*MAF*(1-MAF)))
save(genotype_s,file = paste0("/data/zhangh24/MR_MA/result/simulation/prs/noncau_genotype_M_",i1,".rdata"))
n.sub <- 100000
n.snp <- 5000
genotype2 <- matrix(rbinom(n.sub*n.snp,2,0.25),n.sub,n.snp)
#genotype of Y
genotype_s2 <- (genotype2-2*MAF)/(sqrt(2*MAF*(1-MAF)))
save(genotype_s2,file = paste0("/data/zhangh24/MR_MA/result/simulation/prs/noncau_genotype_Y_",i1,".rdata"))
| /code/simulation/PRS/generate_noncau_genotype.R | no_license | andrewhaoyu/MR_MA | R | false | false | 733 | r | arg <- commandArgs(trailingOnly=T)
#i1 = 2,..30 generate 29 different genotype datasets for non causal snps
i1 <- as.numeric(arg[[1]])
#generate independent causal SNPs
set.seed(i1)
n.sub <- 110000
n.snp <- 5000
MAF <- 0.25
genotype <- matrix(rbinom(n.sub*n.snp,2,0.25),n.sub,n.snp)
#genotype of M
genotype_s <- (genotype-2*MAF)/(sqrt(2*MAF*(1-MAF)))
save(genotype_s,file = paste0("/data/zhangh24/MR_MA/result/simulation/prs/noncau_genotype_M_",i1,".rdata"))
n.sub <- 100000
n.snp <- 5000
genotype2 <- matrix(rbinom(n.sub*n.snp,2,0.25),n.sub,n.snp)
#genotype of Y
genotype_s2 <- (genotype2-2*MAF)/(sqrt(2*MAF*(1-MAF)))
save(genotype_s2,file = paste0("/data/zhangh24/MR_MA/result/simulation/prs/noncau_genotype_Y_",i1,".rdata"))
|
################################################################################
# Description: Fit the population model for a given location and drop age
# - combine all demographic and population matrices and other information into
# TMB data and parameter list
# - create map object to optionally fix different demographic components
# - fit model or load previously fit model
# - save tmb model fit object
# - extract model fit results
# - run prior and posterior ccmpp
# - split to get under 1 populations using gbd under5 mortality proportions
# - save outputs
# - calculate absolute and error and save
################################################################################
library(data.table)
library(readr)
library(assertable)
library(mortdb, lib = "FILEPATH/r-pkg")
library(mortcore, lib = "FILEPATH/r-pkg")
rm(list = ls())
SGE_TASK_ID <- Sys.getenv("SGE_TASK_ID")
USER <- Sys.getenv("USER")
code_dir <- paste0("FILEPATH", "/population/modeling/popReconstruct/")
Sys.umask(mode = "0002")
# Get arguments -----------------------------------------------------------
# load step specific settings into Global Environment
parser <- argparse::ArgumentParser()
parser$add_argument("--pop_vid", type = "character",
help = "The version number for this run of population, used to read in settings file")
parser$add_argument('--task_map_dir', type="character",
help="The filepath to the task map file that specifies other arguments to run for")
parser$add_argument("--test", type = "character",
help = "Whether this is a test run of the process")
args <- parser$parse_args()
if (interactive()) { # set interactive defaults, should never commit changes away from the test version to be safe
args$pop_vid <- "99999"
args$task_map_dir <- "FILEPATH"
args$test <- "T"
}
args$test <- as.logical(args$test)
list2env(args, .GlobalEnv); rm(args)
# use task id to get arguments from task map
task_map <- fread(task_map_dir)
loc_id <- task_map[task_id == SGE_TASK_ID, location_id]
drop_above_age <- task_map[task_id == SGE_TASK_ID, drop_above_age]
copy_results <- task_map[task_id == SGE_TASK_ID, as.logical(copy_results)]
# load model run specific settings into Global Environment
settings_dir <- paste0("FILEPATH", pop_vid, "/run_settings.rdata")
load(settings_dir)
list2env(settings, envir = environment())
source(paste0(code_dir, "functions/ccmpp.R"))
# read in population modeling location hierarchy
location_hierarchy <- fread(paste0(output_dir, "/database/all_location_hierarchies.csv"))
location_hierarchy <- location_hierarchy[location_set_name == "population" & location_id == loc_id]
ihme_loc <- location_hierarchy[, ihme_loc_id]
super_region <- location_hierarchy[, super_region_name]
# read in age group
age_groups <- fread(paste0(output_dir, "/database/age_groups.csv"))
# under1 pop
under1_pop <- fread(paste0(output_dir, "/database/under1_pop.csv"))[location_id == loc_id]
ccmp_age_int <- 1
knockouts <- F
if (!copy_results) {
# Load inputs -------------------------------------------------------------
# load demographic data
asfr_data <- f_mean <- fread(file = paste0(output_dir, "/inputs/asfr.csv"))[location_id == loc_id]
srb_data <- srb_mean <- fread(file = paste0(output_dir, "/inputs/srb.csv"))[location_id == loc_id]
survival_data <- s_mean <- fread(file = paste0(output_dir, "/inputs/survival.csv"))[location_id == loc_id]
migration_data <- g_mean <- fread(file = paste0(output_dir, "/inputs/migration.csv"))[location_id == loc_id]
# load census data
census_data <- fread(file = paste0(output_dir, "/inputs/population.csv"))[location_id == loc_id]
# drop age groups above the modeling pooling age except for the baseline year
census_data[year_id != year_start & age_group_years_start > drop_above_age, drop := T]
n0_mean <- census_data[outlier_type == "not outliered" & year_id == year_start,
list(location_id = loc_id, year_id, sex_id, age_group_years_start, mean)]
# load model estimates and overwrite the input prior if the value was estimated in the model
# save demographic input parameters if not fixed during model estimation
if (!fix_baseline) n0_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/baseline_pop_drop", drop_above_age, ".csv"))
if (!fix_migration) g_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/migration_proportion_drop", drop_above_age, ".csv"))
if (!fix_fertility) f_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/asfr_drop", drop_above_age, ".csv"))
if (!fix_survival) s_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/survival_ratio_drop", drop_above_age, ".csv"))
if (!fix_srb) srb_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/srb_drop", drop_above_age, ".csv"))
# Run initial inputs through ccmpp (prior) --------------------------------
prior <- ccmpp_female_male(census_data[outlier_type == "not outliered" & year_id == year_start,
list(location_id = loc_id, year_id, sex_id, age_group_years_start, mean)],
survival_data, migration_data,
asfr_data, srb_data,
years = years[c(-length(years))],
age_int = ccmp_age_int, value_name = "mean")
prior <- prior[, list(location_id = loc_id, year_id, sex_id, age_group_years_start, variable, mean)]
prior[variable == "deaths", variable := "deaths_cohort"]
prior[variable == "net_migrants", variable := "net_migrants_cohort"]
# create period counts
prior_deaths <- cohort_to_period_counts(prior[variable == "deaths_cohort", list(location_id, year_id, sex_id, age_group_years_start, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_years_start"),
use_age_int = ccmp_age_int, value_name = "mean")
prior_deaths[, variable := "deaths"]
prior_net_migrants <- cohort_to_period_counts(prior[variable == "net_migrants_cohort", list(location_id, year_id, sex_id, age_group_years_start, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_years_start"),
use_age_int = ccmp_age_int, value_name = "mean")
prior_net_migrants[, variable := "net_migrants"]
prior <- rbind(prior, prior_deaths, prior_net_migrants, use.names = T)
prior <- mortcore::age_start_to_age_group_id(prior, id_vars = c("location_id", "year_id", "sex_id", "variable"), keep_age_group_id_only = F)
setcolorder(prior, c("location_id", "year_id", "sex_id", "age_group_id", "age_group_years_start",
"age_group_years_end", "variable", "mean"))
setkeyv(prior, c("location_id", "year_id", "sex_id", "age_group_years_start", "variable"))
# Run ccmpp to get unraked population -------------------------------------
counts <- ccmpp_female_male(n0_mean, s_mean, g_mean, f_mean, srb_mean,
migration_type = "proportion",
years = years[-length(years)], age_int = ccmp_age_int,
value_name = "mean")
counts[, location_id := loc_id]
setcolorder(counts, c("location_id", "year_id", "sex_id", "age_group_years_start", "variable", "mean"))
setkeyv(counts, c("location_id", "year_id", "sex_id", "age_group_years_start", "variable"))
# create period counts
deaths <- cohort_to_period_counts(counts[variable == "deaths_cohort", list(location_id, year_id, sex_id, age_group_years_start, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_years_start"),
use_age_int = ccmp_age_int, value_name = "mean")
deaths[, variable := "deaths"]
deaths[, age_group_years_start := as.integer(age_group_years_start)]
net_migrants <- cohort_to_period_counts(counts[variable == "net_migrants_cohort", list(location_id, year_id, sex_id, age_group_years_start, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_years_start"),
use_age_int = ccmp_age_int, value_name = "mean")
net_migrants[, variable := "net_migrants"]
net_migrants[, age_group_years_start := as.integer(age_group_years_start)]
counts <- rbind(counts, deaths, net_migrants, use.names = T)
counts[, age_group_years_start := as.integer(age_group_years_start)]
counts <- mortcore::age_start_to_age_group_id(counts, id_vars = c("location_id", "year_id", "sex_id", "variable"), keep_age_group_id_only = F)
# Split under 1 populations -----------------------------------------------
# split under five age groups using gbd under 5 population process proportions
under1_pop <- under1_pop[, prop := mean/sum(mean), by = c("location_id", "year_id", "sex_id")]
under1_pop <- merge(under1_pop, counts[variable == "population" & age_group_id == 28, list(location_id, year_id, sex_id, under1_mean = mean)],
by = c("location_id", "year_id", "sex_id"), allow.cartesian = T)
under1_pop[, mean := prop * under1_mean]
under1_pop[, c("prop", "under1_mean") := NULL]
under1_pop <- merge(under1_pop, age_groups[, list(age_group_id, age_group_years_start, age_group_years_end)], all.x = T, by = "age_group_id")
under1_pop[, variable := "population"]
counts <- rbind(counts, under1_pop, use.names = T)
counts <- counts[!(variable == "population" & age_group_id == 28)]
setcolorder(counts, c("location_id", "year_id", "sex_id", "age_group_id", "age_group_years_start",
"age_group_years_end", "variable", "mean"))
setkeyv(counts, c("location_id", "year_id", "sex_id", "age_group_years_start", "variable"))
# Aggregate migration estimates -------------------------------------------
aggregate_migrants <- function(counts_to_aggregate) {
# aggregate net migrant counts
net_migrants_reporting <- mortcore::agg_results(counts_to_aggregate[variable == "net_migrants", list(location_id, year_id, sex_id, age_group_id, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_id"),
value_vars = "mean", agg_hierarchy = F, agg_sex = T, age_aggs = age_groups[(reporting_migration), age_group_id])
# aggregate population
pop_net_migrants_reporting <- mortcore::agg_results(counts_to_aggregate[variable == "population", list(location_id, year_id, sex_id, age_group_id, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_id"),
value_vars = "mean", agg_hierarchy = F, agg_sex = T, age_aggs = age_groups[(reporting_migration), age_group_id])
pop_net_migrants_reporting <- pop_net_migrants_reporting[age_group_id %in% unique(net_migrants_reporting[, age_group_id])]
setnames(pop_net_migrants_reporting, "mean", "pop")
# merge together and calculate net migrant proportions
net_migration_proportion_reporting <- merge(pop_net_migrants_reporting, net_migrants_reporting,
by = c("location_id", "year_id", "sex_id", "age_group_id"), all = T)
net_migration_proportion_reporting <- net_migration_proportion_reporting[, list(mean = mean / pop),
by = c("location_id", "year_id", "sex_id", "age_group_id")]
net_migrants_reporting[, measure_id := 19] # measure id 19 is for continuous counts
net_migration_proportion_reporting[, measure_id := 18] # measure id 18 is for proportions
net_migration <- rbind(net_migration_proportion_reporting, net_migrants_reporting)
setcolorder(net_migration, c("location_id", "year_id", "sex_id", "age_group_id", "measure_id", "mean"))
setkeyv(net_migration, c("location_id", "year_id", "sex_id", "age_group_id", "measure_id"))
return(net_migration)
}
posterior_net_migration <- aggregate_migrants(counts)
prior_net_migration <- aggregate_migrants(prior)
# Calculate errors --------------------------------------------------------
# aggregate populations in each census year to the census age groups
# split the predicted population counts into the census age groups by year and sex
census_data <- census_data[outlier_type %in% c("not outliered", if (knockouts) "knockout")]
census_years <- unique(census_data$year_id)
pop_census_ages <- lapply(census_years, function(year) {
data <- census_data[year_id == year]
census_age_groups <- unique(data$age_group_id)
pop <- mortcore::agg_results(counts[variable == "population" & year_id == year, list(location_id, year_id, sex_id, age_group_id, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_id"), value_vars = "mean", agg_hierarchy = F,
age_aggs = census_age_groups)
pop <- pop[age_group_id %in% census_age_groups]
return(pop)
})
pop_census_ages <- rbindlist(pop_census_ages)
# calculate percent and absolute difference between estimates and census data
setnames(pop_census_ages, "mean", "estimate")
setnames(census_data, c("mean"), c("data"))
error <- merge(pop_census_ages, census_data, by = c("location_id", "year_id", "sex_id", "age_group_id"), all = T)
error[, abs_error := (estimate - data)]
error[, pct_error := ((estimate - data) / data) * 100]
setkeyv(error, c("location_id", "year_id"))
# determine the year of the previous census
censuses_used <- unique(census_data[outlier_type == "not outliered", list(location_id, year_id, previous_census_year = year_id)])
setkeyv(censuses_used, c("location_id", "year_id"))
error <- censuses_used[error, roll = +Inf]
error[, years_to_previous_census := year_id - previous_census_year]
# determine the year of the next census
censuses_used <- unique(census_data[outlier_type == "not outliered", list(location_id, year_id, next_census_year = year_id)])
setkeyv(censuses_used, c("location_id", "year_id"))
error <- censuses_used[error, roll = -Inf]
error[, years_to_next_census := next_census_year - year_id]
error[, c("next_census_year", "previous_census_year") := NULL]
setcolorder(error, c(census_data_id_vars, "sex_id", "age_group_id", "age_group_years_start", "age_group_years_end", "n", "drop", "years_to_previous_census", "years_to_next_census", "estimate", "data", "abs_error", "pct_error"))
setkeyv(error, c(census_data_id_vars, "sex_id", "age_group_id", "age_group_years_start", "age_group_years_end", "n", "drop",
"years_to_previous_census", "years_to_next_census"))
# Check outputs -----------------------------------------------------------
population_id_vars <- list(location_id = loc_id, year_id = years, sex_id = 1:2, age_group_id = age_groups[(most_detailed), age_group_id])
assertable::assert_ids(counts[variable == "population"], population_id_vars)
assertable::assert_values(counts[variable == "population"], colnames = "mean", test = "gt", test_val = 0)
# Save outputs ------------------------------------------------------------
# save net migration counts and proportions
readr::write_csv(posterior_net_migration, path = paste0(output_dir, loc_id, "/outputs/model_fit/net_migration_posterior_drop", drop_above_age, ".csv"))
readr::write_csv(prior_net_migration, path = paste0(output_dir, loc_id, "/outputs/model_fit/net_migration_prior_drop", drop_above_age, ".csv"))
# save absolute and percent errors
readr::write_csv(error, path = paste0(output_dir, loc_id, "/outputs/model_fit/errors_drop", drop_above_age, ".csv"))
# save output derived counts of population, births, migrants, deaths
readr::write_csv(prior, path = paste0(output_dir, loc_id, "/outputs/model_fit/counts_prior_drop", drop_above_age, ".csv"))
readr::write_csv(counts, path = paste0(output_dir, loc_id, "/outputs/model_fit/counts_drop", drop_above_age, ".csv"))
} else {
# Copy over outputs from previous version ---------------------------------
# useful if testing just post model fit code and want to reduce run time from fitting the whole population model
# or if just rerunning a subset of locations
copy_output_dir <- "FILEPATH + copy_vid"
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/net_migration_posterior_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/net_migration_posterior_drop", drop_above_age, ".csv"), overwrite = T)
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/net_migration_prior_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/net_migration_prior_drop", drop_above_age, ".csv"), overwrite = T)
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/errors_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/errors_drop", drop_above_age, ".csv"), overwrite = T)
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/counts_prior_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/counts_prior_drop", drop_above_age, ".csv"), overwrite = T)
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/counts_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/counts_drop", drop_above_age, ".csv"), overwrite = T)
}
| /gbd_2019/fert_pop_code/population/modeling/03_predict_from_model/03b_predict_model.R | no_license | Nermin-Ghith/ihme-modeling | R | false | false | 17,706 | r | ################################################################################
# Description: Fit the population model for a given location and drop age
# - combine all demographic and population matrices and other information into
# TMB data and parameter list
# - create map object to optionally fix different demographic components
# - fit model or load previously fit model
# - save tmb model fit object
# - extract model fit results
# - run prior and posterior ccmpp
# - split to get under 1 populations using gbd under5 mortality proportions
# - save outputs
# - calculate absolute and error and save
################################################################################
library(data.table)
library(readr)
library(assertable)
library(mortdb, lib = "FILEPATH/r-pkg")
library(mortcore, lib = "FILEPATH/r-pkg")
rm(list = ls())
SGE_TASK_ID <- Sys.getenv("SGE_TASK_ID")
USER <- Sys.getenv("USER")
code_dir <- paste0("FILEPATH", "/population/modeling/popReconstruct/")
Sys.umask(mode = "0002")
# Get arguments -----------------------------------------------------------
# load step specific settings into Global Environment
parser <- argparse::ArgumentParser()
parser$add_argument("--pop_vid", type = "character",
help = "The version number for this run of population, used to read in settings file")
parser$add_argument('--task_map_dir', type="character",
help="The filepath to the task map file that specifies other arguments to run for")
parser$add_argument("--test", type = "character",
help = "Whether this is a test run of the process")
args <- parser$parse_args()
if (interactive()) { # set interactive defaults, should never commit changes away from the test version to be safe
args$pop_vid <- "99999"
args$task_map_dir <- "FILEPATH"
args$test <- "T"
}
args$test <- as.logical(args$test)
list2env(args, .GlobalEnv); rm(args)
# use task id to get arguments from task map
task_map <- fread(task_map_dir)
loc_id <- task_map[task_id == SGE_TASK_ID, location_id]
drop_above_age <- task_map[task_id == SGE_TASK_ID, drop_above_age]
copy_results <- task_map[task_id == SGE_TASK_ID, as.logical(copy_results)]
# load model run specific settings into Global Environment
settings_dir <- paste0("FILEPATH", pop_vid, "/run_settings.rdata")
load(settings_dir)
list2env(settings, envir = environment())
source(paste0(code_dir, "functions/ccmpp.R"))
# read in population modeling location hierarchy
location_hierarchy <- fread(paste0(output_dir, "/database/all_location_hierarchies.csv"))
location_hierarchy <- location_hierarchy[location_set_name == "population" & location_id == loc_id]
ihme_loc <- location_hierarchy[, ihme_loc_id]
super_region <- location_hierarchy[, super_region_name]
# read in age group
age_groups <- fread(paste0(output_dir, "/database/age_groups.csv"))
# under1 pop
under1_pop <- fread(paste0(output_dir, "/database/under1_pop.csv"))[location_id == loc_id]
ccmp_age_int <- 1
knockouts <- F
if (!copy_results) {
# Load inputs -------------------------------------------------------------
# load demographic data
asfr_data <- f_mean <- fread(file = paste0(output_dir, "/inputs/asfr.csv"))[location_id == loc_id]
srb_data <- srb_mean <- fread(file = paste0(output_dir, "/inputs/srb.csv"))[location_id == loc_id]
survival_data <- s_mean <- fread(file = paste0(output_dir, "/inputs/survival.csv"))[location_id == loc_id]
migration_data <- g_mean <- fread(file = paste0(output_dir, "/inputs/migration.csv"))[location_id == loc_id]
# load census data
census_data <- fread(file = paste0(output_dir, "/inputs/population.csv"))[location_id == loc_id]
# drop age groups above the modeling pooling age except for the baseline year
census_data[year_id != year_start & age_group_years_start > drop_above_age, drop := T]
n0_mean <- census_data[outlier_type == "not outliered" & year_id == year_start,
list(location_id = loc_id, year_id, sex_id, age_group_years_start, mean)]
# load model estimates and overwrite the input prior if the value was estimated in the model
# save demographic input parameters if not fixed during model estimation
if (!fix_baseline) n0_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/baseline_pop_drop", drop_above_age, ".csv"))
if (!fix_migration) g_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/migration_proportion_drop", drop_above_age, ".csv"))
if (!fix_fertility) f_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/asfr_drop", drop_above_age, ".csv"))
if (!fix_survival) s_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/survival_ratio_drop", drop_above_age, ".csv"))
if (!fix_srb) srb_mean <- fread(paste0(output_dir, loc_id, "/outputs/model_fit/srb_drop", drop_above_age, ".csv"))
# Run initial inputs through ccmpp (prior) --------------------------------
prior <- ccmpp_female_male(census_data[outlier_type == "not outliered" & year_id == year_start,
list(location_id = loc_id, year_id, sex_id, age_group_years_start, mean)],
survival_data, migration_data,
asfr_data, srb_data,
years = years[c(-length(years))],
age_int = ccmp_age_int, value_name = "mean")
prior <- prior[, list(location_id = loc_id, year_id, sex_id, age_group_years_start, variable, mean)]
prior[variable == "deaths", variable := "deaths_cohort"]
prior[variable == "net_migrants", variable := "net_migrants_cohort"]
# create period counts
prior_deaths <- cohort_to_period_counts(prior[variable == "deaths_cohort", list(location_id, year_id, sex_id, age_group_years_start, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_years_start"),
use_age_int = ccmp_age_int, value_name = "mean")
prior_deaths[, variable := "deaths"]
prior_net_migrants <- cohort_to_period_counts(prior[variable == "net_migrants_cohort", list(location_id, year_id, sex_id, age_group_years_start, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_years_start"),
use_age_int = ccmp_age_int, value_name = "mean")
prior_net_migrants[, variable := "net_migrants"]
prior <- rbind(prior, prior_deaths, prior_net_migrants, use.names = T)
prior <- mortcore::age_start_to_age_group_id(prior, id_vars = c("location_id", "year_id", "sex_id", "variable"), keep_age_group_id_only = F)
setcolorder(prior, c("location_id", "year_id", "sex_id", "age_group_id", "age_group_years_start",
"age_group_years_end", "variable", "mean"))
setkeyv(prior, c("location_id", "year_id", "sex_id", "age_group_years_start", "variable"))
# Run ccmpp to get unraked population -------------------------------------
counts <- ccmpp_female_male(n0_mean, s_mean, g_mean, f_mean, srb_mean,
migration_type = "proportion",
years = years[-length(years)], age_int = ccmp_age_int,
value_name = "mean")
counts[, location_id := loc_id]
setcolorder(counts, c("location_id", "year_id", "sex_id", "age_group_years_start", "variable", "mean"))
setkeyv(counts, c("location_id", "year_id", "sex_id", "age_group_years_start", "variable"))
# create period counts
deaths <- cohort_to_period_counts(counts[variable == "deaths_cohort", list(location_id, year_id, sex_id, age_group_years_start, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_years_start"),
use_age_int = ccmp_age_int, value_name = "mean")
deaths[, variable := "deaths"]
deaths[, age_group_years_start := as.integer(age_group_years_start)]
net_migrants <- cohort_to_period_counts(counts[variable == "net_migrants_cohort", list(location_id, year_id, sex_id, age_group_years_start, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_years_start"),
use_age_int = ccmp_age_int, value_name = "mean")
net_migrants[, variable := "net_migrants"]
net_migrants[, age_group_years_start := as.integer(age_group_years_start)]
counts <- rbind(counts, deaths, net_migrants, use.names = T)
counts[, age_group_years_start := as.integer(age_group_years_start)]
counts <- mortcore::age_start_to_age_group_id(counts, id_vars = c("location_id", "year_id", "sex_id", "variable"), keep_age_group_id_only = F)
# Split under 1 populations -----------------------------------------------
# split under five age groups using gbd under 5 population process proportions
under1_pop <- under1_pop[, prop := mean/sum(mean), by = c("location_id", "year_id", "sex_id")]
under1_pop <- merge(under1_pop, counts[variable == "population" & age_group_id == 28, list(location_id, year_id, sex_id, under1_mean = mean)],
by = c("location_id", "year_id", "sex_id"), allow.cartesian = T)
under1_pop[, mean := prop * under1_mean]
under1_pop[, c("prop", "under1_mean") := NULL]
under1_pop <- merge(under1_pop, age_groups[, list(age_group_id, age_group_years_start, age_group_years_end)], all.x = T, by = "age_group_id")
under1_pop[, variable := "population"]
counts <- rbind(counts, under1_pop, use.names = T)
counts <- counts[!(variable == "population" & age_group_id == 28)]
setcolorder(counts, c("location_id", "year_id", "sex_id", "age_group_id", "age_group_years_start",
"age_group_years_end", "variable", "mean"))
setkeyv(counts, c("location_id", "year_id", "sex_id", "age_group_years_start", "variable"))
# Aggregate migration estimates -------------------------------------------
aggregate_migrants <- function(counts_to_aggregate) {
# aggregate net migrant counts
net_migrants_reporting <- mortcore::agg_results(counts_to_aggregate[variable == "net_migrants", list(location_id, year_id, sex_id, age_group_id, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_id"),
value_vars = "mean", agg_hierarchy = F, agg_sex = T, age_aggs = age_groups[(reporting_migration), age_group_id])
# aggregate population
pop_net_migrants_reporting <- mortcore::agg_results(counts_to_aggregate[variable == "population", list(location_id, year_id, sex_id, age_group_id, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_id"),
value_vars = "mean", agg_hierarchy = F, agg_sex = T, age_aggs = age_groups[(reporting_migration), age_group_id])
pop_net_migrants_reporting <- pop_net_migrants_reporting[age_group_id %in% unique(net_migrants_reporting[, age_group_id])]
setnames(pop_net_migrants_reporting, "mean", "pop")
# merge together and calculate net migrant proportions
net_migration_proportion_reporting <- merge(pop_net_migrants_reporting, net_migrants_reporting,
by = c("location_id", "year_id", "sex_id", "age_group_id"), all = T)
net_migration_proportion_reporting <- net_migration_proportion_reporting[, list(mean = mean / pop),
by = c("location_id", "year_id", "sex_id", "age_group_id")]
net_migrants_reporting[, measure_id := 19] # measure id 19 is for continuous counts
net_migration_proportion_reporting[, measure_id := 18] # measure id 18 is for proportions
net_migration <- rbind(net_migration_proportion_reporting, net_migrants_reporting)
setcolorder(net_migration, c("location_id", "year_id", "sex_id", "age_group_id", "measure_id", "mean"))
setkeyv(net_migration, c("location_id", "year_id", "sex_id", "age_group_id", "measure_id"))
return(net_migration)
}
posterior_net_migration <- aggregate_migrants(counts)
prior_net_migration <- aggregate_migrants(prior)
# Calculate errors --------------------------------------------------------
# aggregate populations in each census year to the census age groups
# split the predicted population counts into the census age groups by year and sex
census_data <- census_data[outlier_type %in% c("not outliered", if (knockouts) "knockout")]
census_years <- unique(census_data$year_id)
pop_census_ages <- lapply(census_years, function(year) {
data <- census_data[year_id == year]
census_age_groups <- unique(data$age_group_id)
pop <- mortcore::agg_results(counts[variable == "population" & year_id == year, list(location_id, year_id, sex_id, age_group_id, mean)],
id_vars = c("location_id", "year_id", "sex_id", "age_group_id"), value_vars = "mean", agg_hierarchy = F,
age_aggs = census_age_groups)
pop <- pop[age_group_id %in% census_age_groups]
return(pop)
})
pop_census_ages <- rbindlist(pop_census_ages)
# calculate percent and absolute difference between estimates and census data
setnames(pop_census_ages, "mean", "estimate")
setnames(census_data, c("mean"), c("data"))
error <- merge(pop_census_ages, census_data, by = c("location_id", "year_id", "sex_id", "age_group_id"), all = T)
error[, abs_error := (estimate - data)]
error[, pct_error := ((estimate - data) / data) * 100]
setkeyv(error, c("location_id", "year_id"))
# determine the year of the previous census
censuses_used <- unique(census_data[outlier_type == "not outliered", list(location_id, year_id, previous_census_year = year_id)])
setkeyv(censuses_used, c("location_id", "year_id"))
error <- censuses_used[error, roll = +Inf]
error[, years_to_previous_census := year_id - previous_census_year]
# determine the year of the next census
censuses_used <- unique(census_data[outlier_type == "not outliered", list(location_id, year_id, next_census_year = year_id)])
setkeyv(censuses_used, c("location_id", "year_id"))
error <- censuses_used[error, roll = -Inf]
error[, years_to_next_census := next_census_year - year_id]
error[, c("next_census_year", "previous_census_year") := NULL]
setcolorder(error, c(census_data_id_vars, "sex_id", "age_group_id", "age_group_years_start", "age_group_years_end", "n", "drop", "years_to_previous_census", "years_to_next_census", "estimate", "data", "abs_error", "pct_error"))
setkeyv(error, c(census_data_id_vars, "sex_id", "age_group_id", "age_group_years_start", "age_group_years_end", "n", "drop",
"years_to_previous_census", "years_to_next_census"))
# Check outputs -----------------------------------------------------------
population_id_vars <- list(location_id = loc_id, year_id = years, sex_id = 1:2, age_group_id = age_groups[(most_detailed), age_group_id])
assertable::assert_ids(counts[variable == "population"], population_id_vars)
assertable::assert_values(counts[variable == "population"], colnames = "mean", test = "gt", test_val = 0)
# Save outputs ------------------------------------------------------------
# save net migration counts and proportions
readr::write_csv(posterior_net_migration, path = paste0(output_dir, loc_id, "/outputs/model_fit/net_migration_posterior_drop", drop_above_age, ".csv"))
readr::write_csv(prior_net_migration, path = paste0(output_dir, loc_id, "/outputs/model_fit/net_migration_prior_drop", drop_above_age, ".csv"))
# save absolute and percent errors
readr::write_csv(error, path = paste0(output_dir, loc_id, "/outputs/model_fit/errors_drop", drop_above_age, ".csv"))
# save output derived counts of population, births, migrants, deaths
readr::write_csv(prior, path = paste0(output_dir, loc_id, "/outputs/model_fit/counts_prior_drop", drop_above_age, ".csv"))
readr::write_csv(counts, path = paste0(output_dir, loc_id, "/outputs/model_fit/counts_drop", drop_above_age, ".csv"))
} else {
# Copy over outputs from previous version ---------------------------------
# useful if testing just post model fit code and want to reduce run time from fitting the whole population model
# or if just rerunning a subset of locations
copy_output_dir <- "FILEPATH + copy_vid"
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/net_migration_posterior_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/net_migration_posterior_drop", drop_above_age, ".csv"), overwrite = T)
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/net_migration_prior_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/net_migration_prior_drop", drop_above_age, ".csv"), overwrite = T)
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/errors_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/errors_drop", drop_above_age, ".csv"), overwrite = T)
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/counts_prior_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/counts_prior_drop", drop_above_age, ".csv"), overwrite = T)
file.copy(from = paste0(copy_output_dir, loc_id, "/outputs/model_fit/counts_drop", drop_above_age, ".csv"),
to = paste0(output_dir, loc_id, "/outputs/model_fit/counts_drop", drop_above_age, ".csv"), overwrite = T)
}
|
#!/usr/bin/env Rscript
source("/home/oracle/scripts/parse_epitope2.r")
source("/home/oracle/scripts/insert_epidata2.r")
args <- commandArgs(TRUE)
epidata=read.fusion_xls(args[2])
insert_epidata(args[1],epidata)
| /db_related/scripts/pi_epidata.r | permissive | IHIW/bioinformatics | R | false | false | 211 | r | #!/usr/bin/env Rscript
source("/home/oracle/scripts/parse_epitope2.r")
source("/home/oracle/scripts/insert_epidata2.r")
args <- commandArgs(TRUE)
epidata=read.fusion_xls(args[2])
insert_epidata(args[1],epidata)
|
##############################################################################
# This function is a wrapper for univariate surveillance algorithms
# using the old disProg and survRes object
#
# An sts object is given and a pre specified algorithms is ran
# by successively creating a disProg object for each region,
# running the algo and then assign the slots of the resulting survRes
# object to an sts object.
###################################################################################
###Apply other algorithms by wrapping up a suitable package.
#Wrapper function to call algo.farrington for each time series in an sts object
wrap.algo <- function(sts, algo, control,
control.hook=function(k, control) return(control),
verbose=TRUE,...) {
#Number of time series
nAreas <- ncol(sts@observed)
nTimePoints <- nrow(sts@observed)
nAlarm <- length(control$range)
#Create alarm matrix having same size as sts
sts@alarm <- matrix(NA,ncol=nAreas,nrow=nTimePoints,dimnames=dimnames(sts@observed))
sts@upperbound <- matrix(NA,ncol=nAreas,nrow=nTimePoints,dimnames=dimnames(sts@observed))
#Loop over all regions
for (k in 1:nAreas) {
if (verbose) {
cat("Running ",algo," on area ",k," out of ",nAreas,"\n")
}
##Create an old S3 disProg object
disProg.k <- sts2disProg(sts[,k])
#Use the univariate algorithm (possibly preprocess control object)
kcontrol <- control.hook(k, control)
survRes.k <- do.call(algo,args = list(disProg.k, control=kcontrol))
#Transfer results to the S4 object
if (!is.null(survRes.k)) {
sts@alarm[control$range,k] <- survRes.k$alarm
sts@upperbound[control$range,k] <- survRes.k$upperbound
#Control object needs only to be set once
sts@control <- survRes.k$control
}
}
#Reduce sts object to only those obervations in range
sts@observed <- sts@observed[control$range,,drop=FALSE]
sts@state <- sts@state[control$range,,drop=FALSE]
sts@populationFrac <- sts@populationFrac[control$range,,drop=FALSE]
sts@alarm <- sts@alarm[control$range,,drop=FALSE]
sts@upperbound <- sts@upperbound[control$range,,drop=FALSE]
#Set correct theta0t matrix for all
sts@control$theta0t <- control$theta0t
#Fix the corresponding start entry
start <- sts@start
new.sampleNo <- start[2] + min(control$range) - 1
start.year <- start[1] + (new.sampleNo - 1) %/% sts@freq
start.sampleNo <- (new.sampleNo - 1) %% sts@freq + 1
sts@start <- c(start.year,start.sampleNo)
sts@epoch <- sts@epoch[control$range]
sts@epochAsDate <- sts@epochAsDate
#Ensure dimnames in the new object
sts <- fix.dimnames(sts)
return(sts)
}
#Farrington wrapper
farrington <- function(sts, control=list(range=NULL, b=3, w=3, reweight=TRUE, verbose=FALSE,alpha=0.01),...) {
wrap.algo(sts,algo="algo.farrington",control=control,...)
}
#Bayes wrapper (this can be implemented more efficiently)
bayes <- function(sts, control = list(range = range, b = 0, w = 6, actY = TRUE,alpha=0.05),...) {
if (sts@epochAsDate) {
warning("algo.bayes currently can't handle Date entries. Computing reference values based on freq")
}
wrap.algo(sts,algo="algo.bayes",control=control)
}
#RKI wrapper
rki <- function(sts, control = list(range = range, b = 2, w = 4, actY = FALSE),...) {
if (sts@epochAsDate) {
warning("algo.rki currently can't handle Date entries. Computing reference values based on freq")
}
wrap.algo(sts,algo="algo.rki",control=control,...)
}
#outbreakP wrapper
outbreakP <- function(sts, control=list(range = range, k=100,
ret=c("cases","value"),maxUpperboundCases=1e5),...) {
wrap.algo(sts,algo="algo.outbreakP",control=control,...)
}
#HMM wrapper
hmm <- function(sts, control=list(range=NULL, noStates=2, trend=TRUE, noHarmonics=1,covEffectEqual=FALSE),...) {
if (sts@epochAsDate) {
warning("algo.hmm currently can't handle Date entries. Computing reference values based on freq")
}
wrap.algo(sts,algo="algo.hmm",control=control,...)
}
#Cusum wrapper
cusum <- function(sts, control = list(range=range, k=1.04, h=2.26, m=NULL, trans="standard",alpha=NULL),...) {
wrap.algo(sts,algo="algo.cusum",control=control,...)
}
#GLRpois wrapper
glrpois <- function(sts, control = list(range=range,c.ARL=5, S=1,
beta=NULL, Mtilde=1, M=-1, change="intercept",theta=NULL),...) {
wrap.algo(sts,algo="algo.glrpois",control=control,...)
}
#GLRnb wrapper
glrnb <- function(sts, control = list(range=range,c.ARL=5, mu0=NULL, alpha=0,
Mtilde=1, M=-1, change="intercept",theta=NULL,dir=c("inc","dec"),
ret=c("cases","value")), ...) {
wrap.algo(sts,algo="algo.glrnb",control=control,...)
}
#### this code definitely needs some more documentation -- wrap.algo atm is
# 100% without docu
#Rogerson wrapper
# theta0t now has to be a matrix
#library(surveillance)
#data("ha")
#rogerson(disProg2sts(ha),control=list(range=200:290,ARL0=100,s=1,theta0t=matrix(1,nrow=91,ncol=12)))
rogerson <- function(sts, control = list(range=range, theta0t=NULL,
ARL0=NULL, s=NULL, hValues=NULL,
distribution=c("poisson","binomial"),
nt=NULL, FIR=FALSE,limit=NULL, digits=1),...) {
if (sts@epochAsDate) {
warning("algo.rogerson currently can't handle Date entries. Computing reference values based on freq")
}
#Hook function to find right theta0t vector
control.hook = function(k,control) {
#Extract values relevant for the k'th component
control$theta0t <- control$theta0t[,k]
if (is.null(control[["nt",exact=TRUE]])) {
control$nt <- sts@populationFrac[control$range,k]
} else {
if (!all.equal(sts@populationFrac[control$range,k],control$nt[,k])) {
warning("Warning: nt slot of control specified, but specified population differs.")
} else {
control$nt <- control$nt[,k]
}
}
#If no hValues given then compute them
if (is.null(control[["hValues",exact=TRUE]])) {
#This code does not appear to work once n is big.
# control$hValues <- hValues(theta0 = unique(control$theta0t), ARL0=control$ARL0, s=control$s , distr = control$distribution, n=mean(control$nt))$hValues
control$hValues <- hValues(theta0 = unique(control$theta0t), ARL0=control$ARL0, s=control$s , distr = control$distribution)$hValues
}
return(control)
}
#WrapIt
wrap.algo(sts,algo="algo.rogerson",control=control,control.hook=control.hook,...)
}
| /R/wrap_univariate.R | no_license | beansrowning/surveillance | R | false | false | 6,557 | r | ##############################################################################
# This function is a wrapper for univariate surveillance algorithms
# using the old disProg and survRes object
#
# An sts object is given and a pre specified algorithms is ran
# by successively creating a disProg object for each region,
# running the algo and then assign the slots of the resulting survRes
# object to an sts object.
###################################################################################
###Apply other algorithms by wrapping up a suitable package.
#Wrapper function to call algo.farrington for each time series in an sts object
wrap.algo <- function(sts, algo, control,
control.hook=function(k, control) return(control),
verbose=TRUE,...) {
#Number of time series
nAreas <- ncol(sts@observed)
nTimePoints <- nrow(sts@observed)
nAlarm <- length(control$range)
#Create alarm matrix having same size as sts
sts@alarm <- matrix(NA,ncol=nAreas,nrow=nTimePoints,dimnames=dimnames(sts@observed))
sts@upperbound <- matrix(NA,ncol=nAreas,nrow=nTimePoints,dimnames=dimnames(sts@observed))
#Loop over all regions
for (k in 1:nAreas) {
if (verbose) {
cat("Running ",algo," on area ",k," out of ",nAreas,"\n")
}
##Create an old S3 disProg object
disProg.k <- sts2disProg(sts[,k])
#Use the univariate algorithm (possibly preprocess control object)
kcontrol <- control.hook(k, control)
survRes.k <- do.call(algo,args = list(disProg.k, control=kcontrol))
#Transfer results to the S4 object
if (!is.null(survRes.k)) {
sts@alarm[control$range,k] <- survRes.k$alarm
sts@upperbound[control$range,k] <- survRes.k$upperbound
#Control object needs only to be set once
sts@control <- survRes.k$control
}
}
#Reduce sts object to only those obervations in range
sts@observed <- sts@observed[control$range,,drop=FALSE]
sts@state <- sts@state[control$range,,drop=FALSE]
sts@populationFrac <- sts@populationFrac[control$range,,drop=FALSE]
sts@alarm <- sts@alarm[control$range,,drop=FALSE]
sts@upperbound <- sts@upperbound[control$range,,drop=FALSE]
#Set correct theta0t matrix for all
sts@control$theta0t <- control$theta0t
#Fix the corresponding start entry
start <- sts@start
new.sampleNo <- start[2] + min(control$range) - 1
start.year <- start[1] + (new.sampleNo - 1) %/% sts@freq
start.sampleNo <- (new.sampleNo - 1) %% sts@freq + 1
sts@start <- c(start.year,start.sampleNo)
sts@epoch <- sts@epoch[control$range]
sts@epochAsDate <- sts@epochAsDate
#Ensure dimnames in the new object
sts <- fix.dimnames(sts)
return(sts)
}
#Farrington wrapper
farrington <- function(sts, control=list(range=NULL, b=3, w=3, reweight=TRUE, verbose=FALSE,alpha=0.01),...) {
wrap.algo(sts,algo="algo.farrington",control=control,...)
}
#Bayes wrapper (this can be implemented more efficiently)
bayes <- function(sts, control = list(range = range, b = 0, w = 6, actY = TRUE,alpha=0.05),...) {
if (sts@epochAsDate) {
warning("algo.bayes currently can't handle Date entries. Computing reference values based on freq")
}
wrap.algo(sts,algo="algo.bayes",control=control)
}
#RKI wrapper
rki <- function(sts, control = list(range = range, b = 2, w = 4, actY = FALSE),...) {
if (sts@epochAsDate) {
warning("algo.rki currently can't handle Date entries. Computing reference values based on freq")
}
wrap.algo(sts,algo="algo.rki",control=control,...)
}
#outbreakP wrapper
outbreakP <- function(sts, control=list(range = range, k=100,
ret=c("cases","value"),maxUpperboundCases=1e5),...) {
wrap.algo(sts,algo="algo.outbreakP",control=control,...)
}
#HMM wrapper
hmm <- function(sts, control=list(range=NULL, noStates=2, trend=TRUE, noHarmonics=1,covEffectEqual=FALSE),...) {
if (sts@epochAsDate) {
warning("algo.hmm currently can't handle Date entries. Computing reference values based on freq")
}
wrap.algo(sts,algo="algo.hmm",control=control,...)
}
#Cusum wrapper
cusum <- function(sts, control = list(range=range, k=1.04, h=2.26, m=NULL, trans="standard",alpha=NULL),...) {
wrap.algo(sts,algo="algo.cusum",control=control,...)
}
#GLRpois wrapper
glrpois <- function(sts, control = list(range=range,c.ARL=5, S=1,
beta=NULL, Mtilde=1, M=-1, change="intercept",theta=NULL),...) {
wrap.algo(sts,algo="algo.glrpois",control=control,...)
}
#GLRnb wrapper
glrnb <- function(sts, control = list(range=range,c.ARL=5, mu0=NULL, alpha=0,
Mtilde=1, M=-1, change="intercept",theta=NULL,dir=c("inc","dec"),
ret=c("cases","value")), ...) {
wrap.algo(sts,algo="algo.glrnb",control=control,...)
}
#### this code definitely needs some more documentation -- wrap.algo atm is
# 100% without docu
#Rogerson wrapper
# theta0t now has to be a matrix
#library(surveillance)
#data("ha")
#rogerson(disProg2sts(ha),control=list(range=200:290,ARL0=100,s=1,theta0t=matrix(1,nrow=91,ncol=12)))
rogerson <- function(sts, control = list(range=range, theta0t=NULL,
ARL0=NULL, s=NULL, hValues=NULL,
distribution=c("poisson","binomial"),
nt=NULL, FIR=FALSE,limit=NULL, digits=1),...) {
if (sts@epochAsDate) {
warning("algo.rogerson currently can't handle Date entries. Computing reference values based on freq")
}
#Hook function to find right theta0t vector
control.hook = function(k,control) {
#Extract values relevant for the k'th component
control$theta0t <- control$theta0t[,k]
if (is.null(control[["nt",exact=TRUE]])) {
control$nt <- sts@populationFrac[control$range,k]
} else {
if (!all.equal(sts@populationFrac[control$range,k],control$nt[,k])) {
warning("Warning: nt slot of control specified, but specified population differs.")
} else {
control$nt <- control$nt[,k]
}
}
#If no hValues given then compute them
if (is.null(control[["hValues",exact=TRUE]])) {
#This code does not appear to work once n is big.
# control$hValues <- hValues(theta0 = unique(control$theta0t), ARL0=control$ARL0, s=control$s , distr = control$distribution, n=mean(control$nt))$hValues
control$hValues <- hValues(theta0 = unique(control$theta0t), ARL0=control$ARL0, s=control$s , distr = control$distribution)$hValues
}
return(control)
}
#WrapIt
wrap.algo(sts,algo="algo.rogerson",control=control,control.hook=control.hook,...)
}
|
heat_loss <- c(86,80,77 , 78,84,75 ,33,38,43)
temperature <- c(20,20,20,40,40,40,60,60,60)
plot(temperature,heat_loss)
# Apply the lm() function.
relation <- lm(heat_loss~temperature)
print(summary(relation))
# linear regression model : y= 109-1.07*x
anova(relation)
# y and y-ycap for the nine observations
cbind( temperature,heat_loss,predict(relation),resid(relation))
plot(predict(relation),resid(relation))
| /An_Introduction_To_Statistical_Methods_And_Data_Analysis_by_R_Lyman_Ott_And_Michael_Longnecker/CH11/EX11.10/Ex11_10.r | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 432 | r |
heat_loss <- c(86,80,77 , 78,84,75 ,33,38,43)
temperature <- c(20,20,20,40,40,40,60,60,60)
plot(temperature,heat_loss)
# Apply the lm() function.
relation <- lm(heat_loss~temperature)
print(summary(relation))
# linear regression model : y= 109-1.07*x
anova(relation)
# y and y-ycap for the nine observations
cbind( temperature,heat_loss,predict(relation),resid(relation))
plot(predict(relation),resid(relation))
|
################################
##
## Written by Bob Angell, University of Utah School of Medicine, Biomedical Informatics, 2018
## This code may be freely used if proper attribution is used.
##
## Contact Information: bob.angell@utah.edu; angellr@mac.com; mobile: 801-706-2520
##
################################
################################
##
## This code will create a single graph plotting three ROC (AUC) curves. Below, one needs to read
## in the three datasets for this analysis. I have provided (3) anonymized datasets as examples.
##
################################
library(pROC)
library(Hmisc)
library(data.table)
library(ROCR)
library(grDevices)
ResultsOfRun1 <- Neuropathy
setnames(ResultsOfRun1, "Neuropathy", "ReferenceState")
setnames(ResultsOfRun1, "ProbNeuropathy", "Probability")
describe(ResultsOfRun1)
ResultsOfRun2 <- Retinopathy
setnames(ResultsOfRun2, "Retinopathy", "ReferenceState")
setnames(ResultsOfRun2, "ProbRetinopathy", "Probability")
describe(ResultsOfRun2)
ResultsOfRun3 <- Nephropathy
setnames(ResultsOfRun3, "Nephropathy", "ReferenceState")
setnames(ResultsOfRun3, "ProbNephropathy", "Probability")
describe(ResultsOfRun3)
###############################
##
## Now, lets create the 3 ROC models
##
###############################
roc.model1 = roc(ResultsOfRun1$ReferenceState, ResultsOfRun1$Probability, levels=c("Absent", "Present"))
roc.model2 = roc(ResultsOfRun2$ReferenceState, ResultsOfRun2$Probability, levels=c("Absent", "Present"))
roc.model3 = roc(ResultsOfRun3$ReferenceState, ResultsOfRun3$Probability, levels=c("Absent", "Present"))
###############################
##
## Calculate the area under the receiver operating curve for the 3 models.
##
###############################
auc(roc.model1)
auc(roc.model2)
auc(roc.model3)
###############################
##
## Calculate the 95% confidence intervals for the 3 models.
##
###############################
ci.auc(roc.model1)
ci.auc(roc.model2)
ci.auc(roc.model3)
#######################################################################################
## Various Plotting Options
## Handy if test set has small numbers.
#######################################################################################
plot.roc(roc.model1, col = "black") ## no smoothing
lines(smooth(roc.model1, method="density"), col = "purple") ## Density
lines(smooth(roc.model1), method="binormal", col = "red") ## Binormal smoothing
plot.roc(roc.model2, col = "black") ## no smoothing
lines(smooth(roc.model2, method="density"), col = "purple") ## Density
lines(smooth(roc.model2), method="binormal", col = "red") ## Binormal smoothing
plot.roc(roc.model3, col = "black", add=TRUE) ## no smoothing
lines(smooth(roc.model3, method="density"), col = "purple") ## Density
lines(smooth(roc.model3), method="binormal", col = "red") ## Binormal smoothing
##############################################
##
## Regular Plotting Options
## ROC and 95% Confidence Levels.
##
##############################################
##Plot simple ROC model to get a look
plot.roc(roc.model1, col = "black", print.auc=TRUE, grid=c(0.1, 0.1), grid.col=c("green", "red"))
plot.roc(roc.model2, col = "black", print.auc=TRUE, grid=c(0.1, 0.1), grid.col=c("green", "red"))
plot.roc(roc.model3, col = "black", print.auc=TRUE, grid=c(0.1, 0.1), grid.col=c("green", "red"))
##find AUC for model and find confidence interval for AUC
auc(roc.model1)
ci.auc(roc.model1)
auc(roc.model2)
ci.auc(roc.model2)
auc(roc.model3)
ci.auc(roc.model3)
###############################
##
## This will graph each model and draw 95% CI around the curve.
##
###############################
##find and plot confidence intervals
shape.cis1 <- ci.sp(roc.model1, sensitivities=seq(0, 1, .01), boot.n=500)
print(shape.cis1)
plot(shape.cis1, type="shape", col="yellow")
shape.cis2 <- ci.sp(roc.model2, sensitivities=seq(0, 1, .01), boot.n=500)
print(shape.cis2)
plot(shape.cis2, type="shape", col="pink")
shape.cis3 <- ci.sp(roc.model3, sensitivities=seq(0, 1, .01), boot.n=500)
print(shape.cis3)
plot(shape.cis3, type="shape", col="salmon")
###############################
##
## Table of Confidence Intervals
## ROC and 95% Confidence Levels.
##
###############################
ci(roc.model1)
auc(roc.model1)
thresholds <- ci.thresholds(roc.model1, boot.stratified=TRUE, conf.level=0.95, boot.n=2000, thresholds=seq(0, 1, 0.01))
print(thresholds)
ci(roc.model2)
auc(roc.model2)
thresholds <- ci.thresholds(roc.model2, boot.stratified=TRUE, conf.level=0.95, boot.n=2000, thresholds=seq(0, 1, 0.01))
print(thresholds)
ci(roc.model3)
auc(roc.model3)
thresholds <- ci.thresholds(roc.model3, boot.stratified=TRUE, conf.level=0.95, boot.n=2000, thresholds=seq(0, 1, 0.01))
print(thresholds)
###############################
###############################
##
## Set the colors we want for the 3 models.
##
###############################
color1 <- adjustcolor("black", alpha.f = 1.0)
color2 <- adjustcolor("red", alpha.f = 1.0)
color3 <- adjustcolor("blue", alpha.f = 1.0)
color4 <- adjustcolor("lightsalmon", alpha.f = 0.8)
color5 <- adjustcolor("pink", alpha.f = 0.8)
color6 <- adjustcolor("yellow", alpha.f = 0.8)
color7 <- adjustcolor("red", alpha.f = 0.6)
color8 <- adjustcolor("green", alpha.f = 0.6)
###############################
##
## Here is where the magic happens with 3-in-1 ... this is a nice graph.
##
###############################
plot(smooth(roc.model1), col = color1, print.auc=TRUE, print.auc.y = .6, lwd=3, main = "aROC Microvasculature Model Results",grid=c(0.1, 0.1), grid.col=c(color7, color8))
plot(smooth(roc.model2), col = color2, print.auc=TRUE, print.auc.y = .5, lwd=3, add = TRUE)
plot(smooth(roc.model3), col = color3, print.auc=TRUE, print.auc.y = .4, lwd=3, add = TRUE)
plot(legend("bottomright", legend=c("Neuropathy", "Retinopathy", "Nephropathy"),col=c(par("fg"), color2, color3, color1), lwd=3), add=TRUE)
| /PublicROC.R | no_license | angellr/aROC-code | R | false | false | 5,911 | r |
################################
##
## Written by Bob Angell, University of Utah School of Medicine, Biomedical Informatics, 2018
## This code may be freely used if proper attribution is used.
##
## Contact Information: bob.angell@utah.edu; angellr@mac.com; mobile: 801-706-2520
##
################################
################################
##
## This code will create a single graph plotting three ROC (AUC) curves. Below, one needs to read
## in the three datasets for this analysis. I have provided (3) anonymized datasets as examples.
##
################################
library(pROC)
library(Hmisc)
library(data.table)
library(ROCR)
library(grDevices)
ResultsOfRun1 <- Neuropathy
setnames(ResultsOfRun1, "Neuropathy", "ReferenceState")
setnames(ResultsOfRun1, "ProbNeuropathy", "Probability")
describe(ResultsOfRun1)
ResultsOfRun2 <- Retinopathy
setnames(ResultsOfRun2, "Retinopathy", "ReferenceState")
setnames(ResultsOfRun2, "ProbRetinopathy", "Probability")
describe(ResultsOfRun2)
ResultsOfRun3 <- Nephropathy
setnames(ResultsOfRun3, "Nephropathy", "ReferenceState")
setnames(ResultsOfRun3, "ProbNephropathy", "Probability")
describe(ResultsOfRun3)
###############################
##
## Now, lets create the 3 ROC models
##
###############################
roc.model1 = roc(ResultsOfRun1$ReferenceState, ResultsOfRun1$Probability, levels=c("Absent", "Present"))
roc.model2 = roc(ResultsOfRun2$ReferenceState, ResultsOfRun2$Probability, levels=c("Absent", "Present"))
roc.model3 = roc(ResultsOfRun3$ReferenceState, ResultsOfRun3$Probability, levels=c("Absent", "Present"))
###############################
##
## Calculate the area under the receiver operating curve for the 3 models.
##
###############################
auc(roc.model1)
auc(roc.model2)
auc(roc.model3)
###############################
##
## Calculate the 95% confidence intervals for the 3 models.
##
###############################
ci.auc(roc.model1)
ci.auc(roc.model2)
ci.auc(roc.model3)
#######################################################################################
## Various Plotting Options
## Handy if test set has small numbers.
#######################################################################################
plot.roc(roc.model1, col = "black") ## no smoothing
lines(smooth(roc.model1, method="density"), col = "purple") ## Density
lines(smooth(roc.model1), method="binormal", col = "red") ## Binormal smoothing
plot.roc(roc.model2, col = "black") ## no smoothing
lines(smooth(roc.model2, method="density"), col = "purple") ## Density
lines(smooth(roc.model2), method="binormal", col = "red") ## Binormal smoothing
plot.roc(roc.model3, col = "black", add=TRUE) ## no smoothing
lines(smooth(roc.model3, method="density"), col = "purple") ## Density
lines(smooth(roc.model3), method="binormal", col = "red") ## Binormal smoothing
##############################################
##
## Regular Plotting Options
## ROC and 95% Confidence Levels.
##
##############################################
##Plot simple ROC model to get a look
plot.roc(roc.model1, col = "black", print.auc=TRUE, grid=c(0.1, 0.1), grid.col=c("green", "red"))
plot.roc(roc.model2, col = "black", print.auc=TRUE, grid=c(0.1, 0.1), grid.col=c("green", "red"))
plot.roc(roc.model3, col = "black", print.auc=TRUE, grid=c(0.1, 0.1), grid.col=c("green", "red"))
##find AUC for model and find confidence interval for AUC
auc(roc.model1)
ci.auc(roc.model1)
auc(roc.model2)
ci.auc(roc.model2)
auc(roc.model3)
ci.auc(roc.model3)
###############################
##
## This will graph each model and draw 95% CI around the curve.
##
###############################
##find and plot confidence intervals
shape.cis1 <- ci.sp(roc.model1, sensitivities=seq(0, 1, .01), boot.n=500)
print(shape.cis1)
plot(shape.cis1, type="shape", col="yellow")
shape.cis2 <- ci.sp(roc.model2, sensitivities=seq(0, 1, .01), boot.n=500)
print(shape.cis2)
plot(shape.cis2, type="shape", col="pink")
shape.cis3 <- ci.sp(roc.model3, sensitivities=seq(0, 1, .01), boot.n=500)
print(shape.cis3)
plot(shape.cis3, type="shape", col="salmon")
###############################
##
## Table of Confidence Intervals
## ROC and 95% Confidence Levels.
##
###############################
ci(roc.model1)
auc(roc.model1)
thresholds <- ci.thresholds(roc.model1, boot.stratified=TRUE, conf.level=0.95, boot.n=2000, thresholds=seq(0, 1, 0.01))
print(thresholds)
ci(roc.model2)
auc(roc.model2)
thresholds <- ci.thresholds(roc.model2, boot.stratified=TRUE, conf.level=0.95, boot.n=2000, thresholds=seq(0, 1, 0.01))
print(thresholds)
ci(roc.model3)
auc(roc.model3)
thresholds <- ci.thresholds(roc.model3, boot.stratified=TRUE, conf.level=0.95, boot.n=2000, thresholds=seq(0, 1, 0.01))
print(thresholds)
###############################
###############################
##
## Set the colors we want for the 3 models.
##
###############################
color1 <- adjustcolor("black", alpha.f = 1.0)
color2 <- adjustcolor("red", alpha.f = 1.0)
color3 <- adjustcolor("blue", alpha.f = 1.0)
color4 <- adjustcolor("lightsalmon", alpha.f = 0.8)
color5 <- adjustcolor("pink", alpha.f = 0.8)
color6 <- adjustcolor("yellow", alpha.f = 0.8)
color7 <- adjustcolor("red", alpha.f = 0.6)
color8 <- adjustcolor("green", alpha.f = 0.6)
###############################
##
## Here is where the magic happens with 3-in-1 ... this is a nice graph.
##
###############################
plot(smooth(roc.model1), col = color1, print.auc=TRUE, print.auc.y = .6, lwd=3, main = "aROC Microvasculature Model Results",grid=c(0.1, 0.1), grid.col=c(color7, color8))
plot(smooth(roc.model2), col = color2, print.auc=TRUE, print.auc.y = .5, lwd=3, add = TRUE)
plot(smooth(roc.model3), col = color3, print.auc=TRUE, print.auc.y = .4, lwd=3, add = TRUE)
plot(legend("bottomright", legend=c("Neuropathy", "Retinopathy", "Nephropathy"),col=c(par("fg"), color2, color3, color1), lwd=3), add=TRUE)
|
topbar <-
fluidRow(
column(4,
actionButton("toggleSidebar", "Hide sidebar", icon=icon("eye-slash"),
width="100%")),
disabled(column(3,
actionButton("doPlot",
label="Submit",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4;",
width="100%"))),
column(1,
## Add WashU link to region
uiOutput("WashULink")),
column(1,
## Add UCSC link to region
uiOutput("UCSCLink")),
column(3,
verbatimTextOutput("coordinates", placeholder=T))
)
| /ui_elements/mainPanelTopbar.R | no_license | mireia-bioinfo/PIRB_shinyApp | R | false | false | 650 | r | topbar <-
fluidRow(
column(4,
actionButton("toggleSidebar", "Hide sidebar", icon=icon("eye-slash"),
width="100%")),
disabled(column(3,
actionButton("doPlot",
label="Submit",
style="color: #fff; background-color: #337ab7; border-color: #2e6da4;",
width="100%"))),
column(1,
## Add WashU link to region
uiOutput("WashULink")),
column(1,
## Add UCSC link to region
uiOutput("UCSCLink")),
column(3,
verbatimTextOutput("coordinates", placeholder=T))
)
|
#' Example offspring 3n genotypes
#'
#' @format A matrix with 50 rows (one row = one offspring) and 100 columns (one column = one marker)
"APIS_offspring3n"
| /R/APIS_offspring3n.R | no_license | cran/APIS | R | false | false | 157 | r | #' Example offspring 3n genotypes
#'
#' @format A matrix with 50 rows (one row = one offspring) and 100 columns (one column = one marker)
"APIS_offspring3n"
|
setwd("D:\\2021.3.metagenome/Alpha_beta/")
library(Hmisc)
library(psych)
species<-read.csv('species.abundunce.xls',sep=',',row.names=1,encoding="UTF-8",check.names = F)
#genus <- otu[,-1]
#如果是绝对丰度表可以可以依据如下方法转化为相对丰度表(蓝色部分,可选内容)
#species<-species[order(rowSums(species),decreasing = T),]
#species <- head(species,50)
species <- t(species)
species<-species/rowSums(species)
genus<-t(species)
write.table(genus,file='species.txt',sep='\t')
#otu<-t(otu)
#otu<-otu/rowSums(otu)
#otu<-t(otu)
#write.table(otu,file='otu1.txt',sep='\t')
#接下来就是对相对丰度表的一些数据的筛选(蓝色部分,可选内容)
#过滤一些低丰度或低频的类群
genus <- genus[which(rowSums(genus) >= 0.005), ] #只保留相对丰度总和高于 0.005 的属
#例如只保留在 5 个及以上样本中出现的属
genus1 <- genus
genus1[genus1>0] <- 1
genus <- genus[which(rowSums(genus1) >= 5), ]
#计算两属之间是否存在丰度变化的相关性,以 spearman 相关系数为例
genus_corr <- rcorr(t(genus), type = 'spearman')
#genus_corr = corr.test(genus,use="pairwise",method="spearman",adjust="fdr",alpha=0.05)
occor_r = genus_corr$r # 取相关性矩阵R值
occor_p = genus_corr$P # 取相关性矩阵p值
# p 值校正,这里使用 BH 法校正 p 值
p <- p.adjust(occor_p, method = 'BH')
#将相关系数低于0.6和p值大于0.05的值赋值为0
occor_r[occor_p>0.05|abs(occor_r)<0.6] = 0
diag(occor_r) <- 0 #将相关矩阵中对角线中的值(代表了自相关)转为 0
#如此便得到了邻接矩阵格式的网络文件(微生物属的相关系数矩阵)
write.csv(occor_r,file='genus_ccor.csv')#输出csv格式文件
bian <- read.csv('Gephi.bian.csv')
bian[which(bian$Weight > 0),'pn'] <- 'p'
bian[which(bian$Weight < 0),'pn'] <- 'n'
head(bian)
write.csv(bian,file='Gephi.bian.pn.csv',row.names = F)
##!!!!!注意要把第一列删掉
| /2021.4.17.gephi.r | no_license | joybio/scripts_for_Metagenome | R | false | false | 2,014 | r | setwd("D:\\2021.3.metagenome/Alpha_beta/")
library(Hmisc)
library(psych)
species<-read.csv('species.abundunce.xls',sep=',',row.names=1,encoding="UTF-8",check.names = F)
#genus <- otu[,-1]
#如果是绝对丰度表可以可以依据如下方法转化为相对丰度表(蓝色部分,可选内容)
#species<-species[order(rowSums(species),decreasing = T),]
#species <- head(species,50)
species <- t(species)
species<-species/rowSums(species)
genus<-t(species)
write.table(genus,file='species.txt',sep='\t')
#otu<-t(otu)
#otu<-otu/rowSums(otu)
#otu<-t(otu)
#write.table(otu,file='otu1.txt',sep='\t')
#接下来就是对相对丰度表的一些数据的筛选(蓝色部分,可选内容)
#过滤一些低丰度或低频的类群
genus <- genus[which(rowSums(genus) >= 0.005), ] #只保留相对丰度总和高于 0.005 的属
#例如只保留在 5 个及以上样本中出现的属
genus1 <- genus
genus1[genus1>0] <- 1
genus <- genus[which(rowSums(genus1) >= 5), ]
#计算两属之间是否存在丰度变化的相关性,以 spearman 相关系数为例
genus_corr <- rcorr(t(genus), type = 'spearman')
#genus_corr = corr.test(genus,use="pairwise",method="spearman",adjust="fdr",alpha=0.05)
occor_r = genus_corr$r # 取相关性矩阵R值
occor_p = genus_corr$P # 取相关性矩阵p值
# p 值校正,这里使用 BH 法校正 p 值
p <- p.adjust(occor_p, method = 'BH')
#将相关系数低于0.6和p值大于0.05的值赋值为0
occor_r[occor_p>0.05|abs(occor_r)<0.6] = 0
diag(occor_r) <- 0 #将相关矩阵中对角线中的值(代表了自相关)转为 0
#如此便得到了邻接矩阵格式的网络文件(微生物属的相关系数矩阵)
write.csv(occor_r,file='genus_ccor.csv')#输出csv格式文件
bian <- read.csv('Gephi.bian.csv')
bian[which(bian$Weight > 0),'pn'] <- 'p'
bian[which(bian$Weight < 0),'pn'] <- 'n'
head(bian)
write.csv(bian,file='Gephi.bian.pn.csv',row.names = F)
##!!!!!注意要把第一列删掉
|
setwd("E:/Dropbox/projects/ownership/calibration")
rm(list = ls())
library(Rcpp)
source("functions/solve_value_function.R")
source("functions/solve_steadystate.R")
params = list(tau_res = 50, tau_max = 0.25, theta = 0.85, inv_frac = 0.4,
delta = 0.95, max_runs = 500, Vtol = 10^-3, quiet = 1)
tau_res = params$tau_res
tau_max = params$tau_max
theta = params$theta
inv_frac = params$inv_frac
delta = params$delta
quiet = params$quiet
dir_string = "lognormal_gambeta"
load(paste("models/", dir_string, "/matched_data.RData", sep = ""))
valdata = data.table(tau = numeric(0), gam = numeric(0), Fgam = numeric(0), V = numeric(0), EV = numeric(0),
wtp = numeric(0), best_saleprob = numeric(0), best_p = numeric(0), ss = numeric(0), val_ss = numeric(0),
buyer_dist = numeric(0), seller_dist = numeric(0))
for(tau_try in (0:tau_res) / tau_res * tau_max) {
print(paste("TAU_TRY:", tau_try))
data = rawdata
data = solve_value_function(tau = tau_try, data = data, transmat = transmat, params = params)
data = solve_steadystate(data = data, transmat = transmat, efficient = 0)
data[, tau := tau_try]
setcolorder(data, names(valdata))
valdata = rbindlist(list(valdata, data))
}
taxdata = valdata[, .(
avg_gam = sum(val_ss * gam),
sale_freq = sum(ss * best_saleprob),
avg_transaction_price = sum(seller_dist * best_p),
avg_offered_price = sum(ss * best_p),
avg_pctile_markup = sum(ss * ((1-best_saleprob) - Fgam))
), by = tau]
slp(taxdata, tau, avg_gam)
slp(taxdata, tau, sale_freq)
| /testcode/run_lognormal_gambeta.R | no_license | anthonyleezhang/dlcode | R | false | false | 1,572 | r | setwd("E:/Dropbox/projects/ownership/calibration")
rm(list = ls())
library(Rcpp)
source("functions/solve_value_function.R")
source("functions/solve_steadystate.R")
params = list(tau_res = 50, tau_max = 0.25, theta = 0.85, inv_frac = 0.4,
delta = 0.95, max_runs = 500, Vtol = 10^-3, quiet = 1)
tau_res = params$tau_res
tau_max = params$tau_max
theta = params$theta
inv_frac = params$inv_frac
delta = params$delta
quiet = params$quiet
dir_string = "lognormal_gambeta"
load(paste("models/", dir_string, "/matched_data.RData", sep = ""))
valdata = data.table(tau = numeric(0), gam = numeric(0), Fgam = numeric(0), V = numeric(0), EV = numeric(0),
wtp = numeric(0), best_saleprob = numeric(0), best_p = numeric(0), ss = numeric(0), val_ss = numeric(0),
buyer_dist = numeric(0), seller_dist = numeric(0))
for(tau_try in (0:tau_res) / tau_res * tau_max) {
print(paste("TAU_TRY:", tau_try))
data = rawdata
data = solve_value_function(tau = tau_try, data = data, transmat = transmat, params = params)
data = solve_steadystate(data = data, transmat = transmat, efficient = 0)
data[, tau := tau_try]
setcolorder(data, names(valdata))
valdata = rbindlist(list(valdata, data))
}
taxdata = valdata[, .(
avg_gam = sum(val_ss * gam),
sale_freq = sum(ss * best_saleprob),
avg_transaction_price = sum(seller_dist * best_p),
avg_offered_price = sum(ss * best_p),
avg_pctile_markup = sum(ss * ((1-best_saleprob) - Fgam))
), by = tau]
slp(taxdata, tau, avg_gam)
slp(taxdata, tau, sale_freq)
|
#!usr/bin/env Rscript
library(methods)
library(Matrix)
library(MASS)
#library(Rcpp)
library(lme4)
# Read in your data as an R dataframe
basedir <- c("/seastor/helenhelen/ISR_2015")
resultdir <- paste(basedir,"/me/tmap/results/mem",sep="/")
setwd(resultdir)
r.itemInfo <- matrix(data=NA, nr=2, nc=4)
## read data
#get data for each trial
item_file <- paste(basedir,"/me/tmap/data/item/mem.txt",sep="")
item_data <- read.table(item_file,header=FALSE)
load(paste("/home/helenhelen/DQ/project/gitrepo/ISR_2015/ROI_based/me/col_names.Rda",sep=""))
colnames(item_data) <- col_names
item_data$subid <- as.factor(item_data$subid)
item_data$pid <- as.factor(item_data$pid)
subdata <- item_data
itemInfo_actmean <- lmer(RIFG_rsadiff~LpPHG_actmean+(1+LpPHG_actmean|subid),REML=FALSE,data=subdata)
itemInfo_actmean.null <- lmer(RIFG_rsadiff~1+(1+LpPHG_actmean|subid),REML=FALSE,data=subdata)
itemInfo_di <- lmer(RIFG_rsadiff~LpPHG_actmean+(1+LpPHG_rsadiff|subid),REML=FALSE,data=subdata)
itemInfo_di.null <- lmer(RIFG_rsadiff~1+(1+LpPHG_rsadiff|subid),REML=FALSE,data=subdata)
mainEffect.itemInfo_actmean <- anova(itemInfo_actmean,itemInfo_actmean.null)
r.itemInfo[1,1]=mainEffect.itemInfo_actmean[2,6]
r.itemInfo[1,2]=mainEffect.itemInfo_actmean[2,7]
r.itemInfo[1,3]=mainEffect.itemInfo_actmean[2,8]
r.itemInfo[1,4]=fixef(itemInfo_actmean)[2];
mainEffect.itemInfo_di <- anova(itemInfo_di,itemInfo_di.null)
r.itemInfo[2,1]=mainEffect.itemInfo_di[2,6]
r.itemInfo[2,2]=mainEffect.itemInfo_di[2,7]
r.itemInfo[2,3]=mainEffect.itemInfo_di[2,8]
r.itemInfo[2,4]=fixef(itemInfo_di)[2];
write.matrix(r.itemInfo,file="itemInfso_RIFG_LpPHG.txt",sep="\t")
| /ROI_based/me/mem/itemInfo_RIFG_LpPHG.R | no_license | QQXiao/ISR_2015 | R | false | false | 1,635 | r | #!usr/bin/env Rscript
library(methods)
library(Matrix)
library(MASS)
#library(Rcpp)
library(lme4)
# Read in your data as an R dataframe
basedir <- c("/seastor/helenhelen/ISR_2015")
resultdir <- paste(basedir,"/me/tmap/results/mem",sep="/")
setwd(resultdir)
r.itemInfo <- matrix(data=NA, nr=2, nc=4)
## read data
#get data for each trial
item_file <- paste(basedir,"/me/tmap/data/item/mem.txt",sep="")
item_data <- read.table(item_file,header=FALSE)
load(paste("/home/helenhelen/DQ/project/gitrepo/ISR_2015/ROI_based/me/col_names.Rda",sep=""))
colnames(item_data) <- col_names
item_data$subid <- as.factor(item_data$subid)
item_data$pid <- as.factor(item_data$pid)
subdata <- item_data
itemInfo_actmean <- lmer(RIFG_rsadiff~LpPHG_actmean+(1+LpPHG_actmean|subid),REML=FALSE,data=subdata)
itemInfo_actmean.null <- lmer(RIFG_rsadiff~1+(1+LpPHG_actmean|subid),REML=FALSE,data=subdata)
itemInfo_di <- lmer(RIFG_rsadiff~LpPHG_actmean+(1+LpPHG_rsadiff|subid),REML=FALSE,data=subdata)
itemInfo_di.null <- lmer(RIFG_rsadiff~1+(1+LpPHG_rsadiff|subid),REML=FALSE,data=subdata)
mainEffect.itemInfo_actmean <- anova(itemInfo_actmean,itemInfo_actmean.null)
r.itemInfo[1,1]=mainEffect.itemInfo_actmean[2,6]
r.itemInfo[1,2]=mainEffect.itemInfo_actmean[2,7]
r.itemInfo[1,3]=mainEffect.itemInfo_actmean[2,8]
r.itemInfo[1,4]=fixef(itemInfo_actmean)[2];
mainEffect.itemInfo_di <- anova(itemInfo_di,itemInfo_di.null)
r.itemInfo[2,1]=mainEffect.itemInfo_di[2,6]
r.itemInfo[2,2]=mainEffect.itemInfo_di[2,7]
r.itemInfo[2,3]=mainEffect.itemInfo_di[2,8]
r.itemInfo[2,4]=fixef(itemInfo_di)[2];
write.matrix(r.itemInfo,file="itemInfso_RIFG_LpPHG.txt",sep="\t")
|
### R provides a wide range of functions for obtaining summary statistics.
### One method of obtaining descriptive statistics is to use the sapply( ) function with a specified summary statistic.
# get means for variables in data frame mydata
# excluding missing values
sapply(mydata, mean, na.rm=TRUE)
#### Possible functions used in sapply include mean, sd, var, min, max, median, range, and quantile.
#### There are also numerous R functions designed to provide a range of descriptive statistics at once. For example
summary(mydata)
# mean,median,25th and 75th quartiles,min,max
fivenum(x)
# Tukey min,lower-hinge, median,upper-hinge,max
###Using the Hmisc package
library(Hmisc)
describe(mydata)
# n, nmiss, unique, mean, 5,10,25,50,75,90,95th percentiles
# 5 lowest and 5 highest scores
### Using the pastecs package
library(pastecs)
stat.desc(mydata)
# nbr.val, nbr.null, nbr.na, min max, range, sum,
# median, mean, SE.mean, CI.mean, var, std.dev, coef.var
### Using the psych package
library(psych)
describe(mydata)
# item name ,item number, nvalid, mean, sd,
# median, mad, min, max, skew, kurtosis, se
### Summary Statistics by Group
### A simple way of generating summary statistics by grouping variable is available in the psych package.
library(psych)
describe.by(mydata, group,...)
### The doBy package provides much of the functionality of SAS PROC SUMMARY. It defines the desired table using a model formula and a function. Here is a simple example.
### library(doBy)
summaryBy(mpg + wt ~ cyl + vs, data = mtcars,
FUN = function(x) { c(m = mean(x), s = sd(x)) } )
Per.Rcost=summaryBy(pec~Rcost, data = data.r, FUN = list(mean, max, min, median, sd))
# produces mpg.m wt.m mpg.s wt.s for each
# combination of the levels of cyl and vs
| /DESCRIPTIVE Statistics.r | no_license | chunhuayu/R-Learning | R | false | false | 1,770 | r | ### R provides a wide range of functions for obtaining summary statistics.
### One method of obtaining descriptive statistics is to use the sapply( ) function with a specified summary statistic.
# get means for variables in data frame mydata
# excluding missing values
sapply(mydata, mean, na.rm=TRUE)
#### Possible functions used in sapply include mean, sd, var, min, max, median, range, and quantile.
#### There are also numerous R functions designed to provide a range of descriptive statistics at once. For example
summary(mydata)
# mean,median,25th and 75th quartiles,min,max
fivenum(x)
# Tukey min,lower-hinge, median,upper-hinge,max
###Using the Hmisc package
library(Hmisc)
describe(mydata)
# n, nmiss, unique, mean, 5,10,25,50,75,90,95th percentiles
# 5 lowest and 5 highest scores
### Using the pastecs package
library(pastecs)
stat.desc(mydata)
# nbr.val, nbr.null, nbr.na, min max, range, sum,
# median, mean, SE.mean, CI.mean, var, std.dev, coef.var
### Using the psych package
library(psych)
describe(mydata)
# item name ,item number, nvalid, mean, sd,
# median, mad, min, max, skew, kurtosis, se
### Summary Statistics by Group
### A simple way of generating summary statistics by grouping variable is available in the psych package.
library(psych)
describe.by(mydata, group,...)
### The doBy package provides much of the functionality of SAS PROC SUMMARY. It defines the desired table using a model formula and a function. Here is a simple example.
### library(doBy)
summaryBy(mpg + wt ~ cyl + vs, data = mtcars,
FUN = function(x) { c(m = mean(x), s = sd(x)) } )
Per.Rcost=summaryBy(pec~Rcost, data = data.r, FUN = list(mean, max, min, median, sd))
# produces mpg.m wt.m mpg.s wt.s for each
# combination of the levels of cyl and vs
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_normal.R
\name{is_normal}
\alias{is_normal}
\title{is_normal}
\usage{
is_normal(dataframe_column)
}
\arguments{
\item{dataframe_column}{A column vector}
}
\value{
boolean
}
\description{
Takes a continuous variable from a dataframe
and determines if the values are normally distributed
outputs TRUE if they are, FALSE if not
}
| /man/is_normal.Rd | no_license | melehya/descripstat | R | false | true | 408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_normal.R
\name{is_normal}
\alias{is_normal}
\title{is_normal}
\usage{
is_normal(dataframe_column)
}
\arguments{
\item{dataframe_column}{A column vector}
}
\value{
boolean
}
\description{
Takes a continuous variable from a dataframe
and determines if the values are normally distributed
outputs TRUE if they are, FALSE if not
}
|
context("Likelihood estimation")
test_that("LikelihoodMDP", {
set.seed(1234)
s1 <- 0
s2 <- 1
N.fixed <- 50
T.fixed <- 5
p <- 2
q <- 3
x <- matrix(runif(T.fixed * N.fixed * p), nrow = T.fixed)
z <- matrix(runif(T.fixed * N.fixed * q, 1, 3), nrow = T.fixed)
x.empty <- matrix(0, nrow = T.fixed, ncol = N.fixed) # for p = 0 case
z.empty <- matrix(0, nrow = T.fixed, ncol = N.fixed) # for q = 0 case
theta1 <- GenerateMDPTheta(M = 1, s = s1)
theta2 <- GenerateMDPTheta(M = 1, s = s2)
theta3 <- GenerateMDPTheta(M = 2, s = s1)
theta4 <- GenerateMDPTheta(M = 2, s = s2)
theta5 <- GenerateMDPTheta(M = 3, s = s1, p = p)
theta6 <- GenerateMDPTheta(M = 3, s = s2, p = p)
theta7 <- GenerateMDPTheta(M = 3, s = s1, q = q)
theta8 <- GenerateMDPTheta(M = 3, s = s2, q = q)
theta9 <- GenerateMDPTheta(M = 3, s = s1, p = p, q = q)
theta10 <- GenerateMDPTheta(M = 3, s = s2, p = p, q = q)
sample1 <- GenerateMDPSample(theta1, N = N.fixed, T = T.fixed)
sample2 <- GenerateMDPSample(theta2, N = N.fixed, T = T.fixed)
sample3 <- GenerateMDPSample(theta3, N = N.fixed, T = T.fixed)
sample4 <- GenerateMDPSample(theta4, N = N.fixed, T = T.fixed)
sample5 <- GenerateMDPSample(theta5, N = N.fixed, T = T.fixed, x = x)
sample6 <- GenerateMDPSample(theta6, N = N.fixed, T = T.fixed, x = x)
sample7 <- GenerateMDPSample(theta7, N = N.fixed, T = T.fixed, z = z)
sample8 <- GenerateMDPSample(theta8, N = N.fixed, T = T.fixed, z = z)
sample9 <- GenerateMDPSample(theta9, N = N.fixed, T = T.fixed, x = x, z = z)
sample10 <- GenerateMDPSample(theta10, N = N.fixed, T = T.fixed, x = x, z = z)
LikelihoodMDP.wrapper <- function(sample, x, z)
{
theta <- (sample$MDP.model)$theta
M <- length(theta$alpha)
rho <- t(as.matrix(rep(0,M)))
beta <- t(as.matrix(rep(0,M)))
gamma <- as.matrix(0)
if (!is.null(theta$rho))
rho <- theta$rho
if (!is.null(theta$beta))
beta <- theta$beta
if (!is.null(theta$gamma))
gamma <- theta$gamma
likelihood <- LikelihoodMDP(sample$y.sample,
sample$y.lagged, x, z,
c(theta$alpha), c(theta$mu), c(theta$sigma),
rho, beta, c(gamma))
likelihood.initial.fixed <- NULL
if (is.null(sample$MDP.model$theta$rho)) # if s = 0
likelihood.initial.fixed <- LikelihoodMDPInitialFixed(sample$y.sample,
sample$y.lagged, x, z,
c(theta$alpha), c(theta$mu), c(theta$sigma),
rho, beta, c(gamma))
list(likelihood = likelihood,
likelihood.initial.fixed = likelihood.initial.fixed,
sample = sample)
}
SanityCheck <- function(wrapped)
{
# check if likelihood is valid.
expect_less_than(wrapped$likelihood, 0)
# check if likelihoods are same for the case when s = 0
if (is.null(wrapped$sample$MDP.model$theta$rho))
expect_equal(wrapped$likelihood, wrapped$likelihood.initial.fixed)
}
SanityCheck(LikelihoodMDP.wrapper(sample1, x.empty, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample2, x.empty, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample3, x.empty, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample4, x.empty, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample5, x, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample6, x, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample7, x.empty, z))
SanityCheck(LikelihoodMDP.wrapper(sample8, x.empty, z))
SanityCheck(LikelihoodMDP.wrapper(sample9, x, z))
SanityCheck(LikelihoodMDP.wrapper(sample10, x, z))
})
| /tests/testthat/test_likelihood.R | no_license | chiyahn/mixPanel | R | false | false | 3,662 | r | context("Likelihood estimation")
test_that("LikelihoodMDP", {
set.seed(1234)
s1 <- 0
s2 <- 1
N.fixed <- 50
T.fixed <- 5
p <- 2
q <- 3
x <- matrix(runif(T.fixed * N.fixed * p), nrow = T.fixed)
z <- matrix(runif(T.fixed * N.fixed * q, 1, 3), nrow = T.fixed)
x.empty <- matrix(0, nrow = T.fixed, ncol = N.fixed) # for p = 0 case
z.empty <- matrix(0, nrow = T.fixed, ncol = N.fixed) # for q = 0 case
theta1 <- GenerateMDPTheta(M = 1, s = s1)
theta2 <- GenerateMDPTheta(M = 1, s = s2)
theta3 <- GenerateMDPTheta(M = 2, s = s1)
theta4 <- GenerateMDPTheta(M = 2, s = s2)
theta5 <- GenerateMDPTheta(M = 3, s = s1, p = p)
theta6 <- GenerateMDPTheta(M = 3, s = s2, p = p)
theta7 <- GenerateMDPTheta(M = 3, s = s1, q = q)
theta8 <- GenerateMDPTheta(M = 3, s = s2, q = q)
theta9 <- GenerateMDPTheta(M = 3, s = s1, p = p, q = q)
theta10 <- GenerateMDPTheta(M = 3, s = s2, p = p, q = q)
sample1 <- GenerateMDPSample(theta1, N = N.fixed, T = T.fixed)
sample2 <- GenerateMDPSample(theta2, N = N.fixed, T = T.fixed)
sample3 <- GenerateMDPSample(theta3, N = N.fixed, T = T.fixed)
sample4 <- GenerateMDPSample(theta4, N = N.fixed, T = T.fixed)
sample5 <- GenerateMDPSample(theta5, N = N.fixed, T = T.fixed, x = x)
sample6 <- GenerateMDPSample(theta6, N = N.fixed, T = T.fixed, x = x)
sample7 <- GenerateMDPSample(theta7, N = N.fixed, T = T.fixed, z = z)
sample8 <- GenerateMDPSample(theta8, N = N.fixed, T = T.fixed, z = z)
sample9 <- GenerateMDPSample(theta9, N = N.fixed, T = T.fixed, x = x, z = z)
sample10 <- GenerateMDPSample(theta10, N = N.fixed, T = T.fixed, x = x, z = z)
LikelihoodMDP.wrapper <- function(sample, x, z)
{
theta <- (sample$MDP.model)$theta
M <- length(theta$alpha)
rho <- t(as.matrix(rep(0,M)))
beta <- t(as.matrix(rep(0,M)))
gamma <- as.matrix(0)
if (!is.null(theta$rho))
rho <- theta$rho
if (!is.null(theta$beta))
beta <- theta$beta
if (!is.null(theta$gamma))
gamma <- theta$gamma
likelihood <- LikelihoodMDP(sample$y.sample,
sample$y.lagged, x, z,
c(theta$alpha), c(theta$mu), c(theta$sigma),
rho, beta, c(gamma))
likelihood.initial.fixed <- NULL
if (is.null(sample$MDP.model$theta$rho)) # if s = 0
likelihood.initial.fixed <- LikelihoodMDPInitialFixed(sample$y.sample,
sample$y.lagged, x, z,
c(theta$alpha), c(theta$mu), c(theta$sigma),
rho, beta, c(gamma))
list(likelihood = likelihood,
likelihood.initial.fixed = likelihood.initial.fixed,
sample = sample)
}
SanityCheck <- function(wrapped)
{
# check if likelihood is valid.
expect_less_than(wrapped$likelihood, 0)
# check if likelihoods are same for the case when s = 0
if (is.null(wrapped$sample$MDP.model$theta$rho))
expect_equal(wrapped$likelihood, wrapped$likelihood.initial.fixed)
}
SanityCheck(LikelihoodMDP.wrapper(sample1, x.empty, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample2, x.empty, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample3, x.empty, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample4, x.empty, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample5, x, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample6, x, z.empty))
SanityCheck(LikelihoodMDP.wrapper(sample7, x.empty, z))
SanityCheck(LikelihoodMDP.wrapper(sample8, x.empty, z))
SanityCheck(LikelihoodMDP.wrapper(sample9, x, z))
SanityCheck(LikelihoodMDP.wrapper(sample10, x, z))
})
|
\name{NISTvoltTOabvolt}
\alias{NISTvoltTOabvolt}
\title{Convert volt to abvolt}
\usage{NISTvoltTOabvolt(volt)}
\description{\code{NISTvoltTOabvolt} converts from volt (V) to abvolt }
\arguments{
\item{volt}{volt (V) }
}
\value{abvolt }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTvoltTOabvolt(10)
}
\keyword{programming} | /man/NISTvoltTOabvolt.Rd | no_license | cran/NISTunits | R | false | false | 701 | rd | \name{NISTvoltTOabvolt}
\alias{NISTvoltTOabvolt}
\title{Convert volt to abvolt}
\usage{NISTvoltTOabvolt(volt)}
\description{\code{NISTvoltTOabvolt} converts from volt (V) to abvolt }
\arguments{
\item{volt}{volt (V) }
}
\value{abvolt }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTvoltTOabvolt(10)
}
\keyword{programming} |
suppressPackageStartupMessages({
library(shiny)
library(shinyFiles)
library(SummarizedExperiment)
library(MultiAssayExperiment)
library(TCGAbiolinks)
library(ggplot2)
library(shinyBS)
library(stringr)
library(ggrepel)
library(plotly)
library(pathview)
library(htmlwidgets)
library(ELMER)
library(readr)
library(data.table)
library(grid)
library(maftools)
library(dplyr)
library(minfi)
library(TCGAbiolinksGUI.data)
library(caret)
data(maf.tumor)
data(GDCdisease)
options(shiny.maxRequestSize=-1) # Remove limit of upload
options(shiny.deprecation.messages=FALSE)
options(warn =-1)
})
getDataCategory <- function(legacy){
data.category.hamonirzed <- sort(c("Transcriptome Profiling",
"Copy Number Variation",
"Simple Nucleotide Variation",
"DNA Methylation"))
data.category.legacy <- sort(c("Copy number variation",
"Simple Nucleotide Variation",
"Raw Sequencing Data",
"Protein expression",
"Gene expression",
"DNA methylation",
"Raw microarray data",
# "Structural Rearrangement", # Controlled
"Other"))
if(legacy) return(data.category.legacy)
return(data.category.hamonirzed)
}
createTable <- function(df,tableType = "TCGAbiolinks"){
DT::datatable(df,
extensions = c('Buttons',"FixedHeader"),
class = 'cell-border stripe',
options = list(dom = 'Blfrtip',
buttons =
list('colvis', list(
extend = 'collection',
buttons = list(list(extend='csv',
filename = tableType),
list(extend='excel',
filename = tableType),
list(extend='pdf',
title = "",
filename= tableType)),
text = 'Download'
)),
fixedHeader = TRUE,
pageLength = 20,
scrollX = TRUE,
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All'))
),
filter = 'top'
)
}
getFileType <- function(legacy, data.category){
file.type <- NULL
if(grepl("Copy number variation",data.category, ignore.case = TRUE) & legacy)
file.type <- c("nocnv_hg18.seg",
"nocnv_hg19.seg",
"hg19.seg",
"hg18.seg")
if(grepl("Gene expression", data.category, ignore.case = TRUE) & legacy)
file.type <- c("normalized_results",
"results")
if(data.category == "Raw microarray data") file.type <- c("idat")
return(file.type)
}
getExpStrategy <- function(legacy, platform){
experimental.strategy <- NULL
# These are the cases we need to distinguish
if(grepl("Illumina HiSeq", platform, ignore.case = TRUE) & legacy)
experimental.strategy <- c("Total RNA-Seq",
"RNA-Seq",
"miRNA-Seq")
if(grepl("Illumina GA", platform, ignore.case = TRUE) & legacy)
experimental.strategy <- c("RNA-Seq",
"miRNA-Seq")
return(experimental.strategy)
}
getWorkFlow <- function(legacy, data.category){
workflow <- NULL
if(data.category == "Transcriptome Profiling" & !legacy)
workflow <- c("HTSeq - Counts",
"HTSeq - FPKM-UQ",
"HTSeq - FPKM")
return(workflow)
}
getPlatform <- function(legacy, data.category){
platform <- NULL
if(!legacy & data.category != "DNA Methylation" ) return(platform) # platform is not used for harmonized
if(grepl("Copy number variation",data.category, ignore.case = TRUE)) platform <- "Affymetrix SNP Array 6.0"
if(data.category == "Protein expression") platform <- "MDA RPPA Core"
if(data.category == "Gene expression") platform <- c("Illumina HiSeq",
"HT_HG-U133A",
"AgilentG4502A_07_2",
"AgilentG4502A_07_1",
"HuEx-1_0-st-v2")
if(data.category == "DNA methylation") platform <- c("Illumina Human Methylation 450",
"Illumina Human Methylation 27",
"Illumina DNA Methylation OMA003 CPI",
"Illumina DNA Methylation OMA002 CPI",
"Illumina Hi Seq")
if(data.category == "DNA Methylation") platform <- c("Illumina Human Methylation 450",
"Illumina Human Methylation 27")
if(data.category == "Raw microarray data") platform <- c("Illumina Human Methylation 450",
"Illumina Human Methylation 27")
return(platform)
}
getDataType <- function(legacy, data.category){
data.type <- NULL
if(data.category == "Transcriptome Profiling" & !legacy)
data.type <- c("Gene Expression Quantification",
"Isoform Expression Quantification",
"miRNA Expression Quantification")
if(grepl("Copy number variation",data.category, ignore.case = TRUE) & !legacy)
data.type <- c("Copy Number Segment",
"Masked Copy Number Segment")
if(data.category == "Gene expression" & !legacy)
data.type <- c("Gene Expression Quantification",
"Isoform Expression Quantification",
"miRNA Expression Quantification")
if(data.category == "Gene expression" & legacy)
data.type <- c("Gene expression quantification",
"miRNA gene quantification",
"Exon junction quantification",
"Exon quantification",
"miRNA isoform quantification")
if(data.category == "Raw microarray data" & legacy)
data.type <- c("Raw intensities")
return(data.type)
}
table.code <- c('01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','20','40','50','60','61')
names(table.code) <- c("Primary solid Tumor","Recurrent Solid Tumor",
"Primary Blood Derived Cancer - Peripheral Blood",
"Recurrent Blood Derived Cancer - Bone Marrow",
"Additional - New Primary",
"Metastatic","Additional Metastatic",
"Human Tumor Original Cells",
"Primary Blood Derived Cancer - Bone Marrow",
"Blood Derived Normal","Solid Tissue Normal",
"Buccal Cell Normal","EBV Immortalized Normal",
"Bone Marrow Normal","Control Analyte",
"Recurrent Blood Derived Cancer - Peripheral Blood",
"Cell Lines","Primary Xenograft Tissue",
"Cell Line Derived Xenograft Tissue")
tcga.code <- c("Primary solid Tumor","Recurrent Solid Tumor",
"Primary Blood Derived Cancer - Peripheral Blood",
"Recurrent Blood Derived Cancer - Bone Marrow",
"Additional - New Primary",
"Metastatic","Additional Metastatic",
"Human Tumor Original Cells",
"Primary Blood Derived Cancer - Bone Marrow",
"Blood Derived Normal","Solid Tissue Normal",
"Buccal Cell Normal","EBV Immortalized Normal",
"Bone Marrow Normal","Control Analyte",
"Recurrent Blood Derived Cancer - Peripheral Blood",
"Cell Lines","Primary Xenograft Tissue",
"Cell Line Derived Xenograft Tissue")
names(tcga.code) <- c('01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','20','40','50','60','61')
getMatchedPlatform <- function(query){
matched <- NULL
for(plat in query$Platform){
aux <- query[query$Platform == plat,]
if(is.null(matched)){
matched <- unlist(str_split(aux$barcode,","))
matched <- substr(matched,1,15)
} else {
barcode <- unlist(str_split(aux$barcode,","))
barcode <- substr(barcode,1,15)
matched <- intersect(matched, barcode)
}
}
return(matched)
}
getMatchedType <- function(barcode,type){
code <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
names(code) <- c('01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','20','40','50','60','61')
type <- code[type]
groups <- t(combn(type,2))
matched <- NULL
for(i in 1:nrow(groups)) {
if(is.null(matched)){
matched <- TCGAquery_MatchedCoupledSampleTypes(unique(barcode),
c(groups[i,1], groups[i,2]))
matched <- substr(matched,1,15)
} else {
aux <- TCGAquery_MatchedCoupledSampleTypes(unique(barcode),
c(groups[i,1], groups[i,2]))
aux <- substr(aux,1,15)
matched <- intersect(matched, aux)
}
}
return(matched)
}
# This will be used to parse the text areas input
# possibilities of separation , ; \n
parse.textarea.input <- function(text){
sep <- NULL
if(grepl(";",text)) sep <- ";"
if(grepl(",",text)) sep <- ","
if(grepl("\n",text)) sep <- "\n"
if(is.null(sep)) {
text <- text
} else {
text <- unlist(stringr::str_split(text,sep))
}
return (text)
}
get.volumes <- function(directory = NULL){
if(is.null(directory) || identical(directory, character(0))) {
volumes <- c(wd="./",home=Sys.getenv("HOME"), getVolumes()(), temp=tempdir())
} else {
volumes <- c(home=Sys.getenv("HOME"), getVolumes()(), temp=tempdir(),wd="./")
path <- parseDirPath(volumes, directory)
names(path) <- path
volumes <- c(path, home=Sys.getenv("HOME"), getVolumes()(), temp=tempdir(),wd="./")
}
return(volumes)
}
#' @title Server side
#' @description Server side
#' @param input - input signal
#' @param output - output signal
#' @importFrom downloader download
#' @import pathview ELMER TCGAbiolinks SummarizedExperiment shiny ggrepel UpSetR
#' @keywords internal
TCGAbiolinksGUIServer <- function(input, output, session) {
session$onSessionEnded(stopApp)
server.path <- ifelse(system.file("app", package = "TCGAbiolinksGUI") == "",
"server",
file.path(system.file("app", package = "TCGAbiolinksGUI"),"server"))
source(file.path(server.path, "getmolecular.R"), local = TRUE)$value
source(file.path(server.path, "getsubtype.R"), local = TRUE)$value
source(file.path(server.path, "getmutation.R"), local = TRUE)$value
source(file.path(server.path, "getclinical.R"), local = TRUE)$value
source(file.path(server.path, "dnametidat.R"), local = TRUE)$value
source(file.path(server.path, "survival.R"), local = TRUE)$value
source(file.path(server.path, "volcano.R"), local = TRUE)$value
source(file.path(server.path, "heatmap.R"), local = TRUE)$value
source(file.path(server.path, "dmr.R"), local = TRUE)$value
source(file.path(server.path, "meanMet.R"), local = TRUE)$value
source(file.path(server.path, "gliomaclassifier.R"), local = TRUE)$value
source(file.path(server.path, "dea.R"), local = TRUE)$value
source(file.path(server.path, "pathview.R"), local = TRUE)$value
source(file.path(server.path, "eaplot.R"), local = TRUE)$value
source(file.path(server.path, "oncoprint.R"), local = TRUE)$value
source(file.path(server.path, "maftools.R"), local = TRUE)$value
source(file.path(server.path, "starburst.R"), local = TRUE)$value
source(file.path(server.path, "elmer.R"), local = TRUE)$value
source(file.path(server.path, "manageSE.R"), local = TRUE)$value
source(file.path(server.path, "getinference.R"), local = TRUE)$value
# Directory management
# Config
# We will create by deafalt a TCGAbiolinksGUI
dir.create(paste0(Sys.getenv("HOME"),"/TCGAbiolinksGUI"), showWarnings = FALSE)
setwd(file.path(Sys.getenv("HOME"), "TCGAbiolinksGUI"))
shinyDirChoose(input, 'workingDir',
roots=get.volumes(),
session=session,
restrictions=system.file(package='base'))
shinyjs::hide("greetbox-outer")
observe({
if(!is.null(input$test)) stopApp() # stop shiny
})
# Configuration tab
output$wd <- renderPrint({
path <- parseDirPath(get.volumes(isolate({input$workingDir})), input$workingDir)
if(identical(path, character(0))) path <- getwd()
return(path)
})
observe({
output$downloadDataBut <- downloadHandler(
filename <- function() {
as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$downloadfile)$datapath)
},
content <- function(file){
file.copy(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$downloadfile)$datapath),
file)
}
)
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# File selection
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
observe({
shinyFileChoose(input,
'downloadfile',
roots = get.volumes(input$workingDir),
session = session,
restrictions = system.file(package='base'),
filetypes = c('csv', 'rda'))
})
hide("loading-content", TRUE, "fade")
}
| /inst/app/server.R | no_license | d1gges/TCGAbiolinksGUI | R | false | false | 15,001 | r | suppressPackageStartupMessages({
library(shiny)
library(shinyFiles)
library(SummarizedExperiment)
library(MultiAssayExperiment)
library(TCGAbiolinks)
library(ggplot2)
library(shinyBS)
library(stringr)
library(ggrepel)
library(plotly)
library(pathview)
library(htmlwidgets)
library(ELMER)
library(readr)
library(data.table)
library(grid)
library(maftools)
library(dplyr)
library(minfi)
library(TCGAbiolinksGUI.data)
library(caret)
data(maf.tumor)
data(GDCdisease)
options(shiny.maxRequestSize=-1) # Remove limit of upload
options(shiny.deprecation.messages=FALSE)
options(warn =-1)
})
getDataCategory <- function(legacy){
data.category.hamonirzed <- sort(c("Transcriptome Profiling",
"Copy Number Variation",
"Simple Nucleotide Variation",
"DNA Methylation"))
data.category.legacy <- sort(c("Copy number variation",
"Simple Nucleotide Variation",
"Raw Sequencing Data",
"Protein expression",
"Gene expression",
"DNA methylation",
"Raw microarray data",
# "Structural Rearrangement", # Controlled
"Other"))
if(legacy) return(data.category.legacy)
return(data.category.hamonirzed)
}
createTable <- function(df,tableType = "TCGAbiolinks"){
DT::datatable(df,
extensions = c('Buttons',"FixedHeader"),
class = 'cell-border stripe',
options = list(dom = 'Blfrtip',
buttons =
list('colvis', list(
extend = 'collection',
buttons = list(list(extend='csv',
filename = tableType),
list(extend='excel',
filename = tableType),
list(extend='pdf',
title = "",
filename= tableType)),
text = 'Download'
)),
fixedHeader = TRUE,
pageLength = 20,
scrollX = TRUE,
lengthMenu = list(c(10, 20, -1), c('10', '20', 'All'))
),
filter = 'top'
)
}
getFileType <- function(legacy, data.category){
file.type <- NULL
if(grepl("Copy number variation",data.category, ignore.case = TRUE) & legacy)
file.type <- c("nocnv_hg18.seg",
"nocnv_hg19.seg",
"hg19.seg",
"hg18.seg")
if(grepl("Gene expression", data.category, ignore.case = TRUE) & legacy)
file.type <- c("normalized_results",
"results")
if(data.category == "Raw microarray data") file.type <- c("idat")
return(file.type)
}
getExpStrategy <- function(legacy, platform){
experimental.strategy <- NULL
# These are the cases we need to distinguish
if(grepl("Illumina HiSeq", platform, ignore.case = TRUE) & legacy)
experimental.strategy <- c("Total RNA-Seq",
"RNA-Seq",
"miRNA-Seq")
if(grepl("Illumina GA", platform, ignore.case = TRUE) & legacy)
experimental.strategy <- c("RNA-Seq",
"miRNA-Seq")
return(experimental.strategy)
}
getWorkFlow <- function(legacy, data.category){
workflow <- NULL
if(data.category == "Transcriptome Profiling" & !legacy)
workflow <- c("HTSeq - Counts",
"HTSeq - FPKM-UQ",
"HTSeq - FPKM")
return(workflow)
}
getPlatform <- function(legacy, data.category){
platform <- NULL
if(!legacy & data.category != "DNA Methylation" ) return(platform) # platform is not used for harmonized
if(grepl("Copy number variation",data.category, ignore.case = TRUE)) platform <- "Affymetrix SNP Array 6.0"
if(data.category == "Protein expression") platform <- "MDA RPPA Core"
if(data.category == "Gene expression") platform <- c("Illumina HiSeq",
"HT_HG-U133A",
"AgilentG4502A_07_2",
"AgilentG4502A_07_1",
"HuEx-1_0-st-v2")
if(data.category == "DNA methylation") platform <- c("Illumina Human Methylation 450",
"Illumina Human Methylation 27",
"Illumina DNA Methylation OMA003 CPI",
"Illumina DNA Methylation OMA002 CPI",
"Illumina Hi Seq")
if(data.category == "DNA Methylation") platform <- c("Illumina Human Methylation 450",
"Illumina Human Methylation 27")
if(data.category == "Raw microarray data") platform <- c("Illumina Human Methylation 450",
"Illumina Human Methylation 27")
return(platform)
}
getDataType <- function(legacy, data.category){
data.type <- NULL
if(data.category == "Transcriptome Profiling" & !legacy)
data.type <- c("Gene Expression Quantification",
"Isoform Expression Quantification",
"miRNA Expression Quantification")
if(grepl("Copy number variation",data.category, ignore.case = TRUE) & !legacy)
data.type <- c("Copy Number Segment",
"Masked Copy Number Segment")
if(data.category == "Gene expression" & !legacy)
data.type <- c("Gene Expression Quantification",
"Isoform Expression Quantification",
"miRNA Expression Quantification")
if(data.category == "Gene expression" & legacy)
data.type <- c("Gene expression quantification",
"miRNA gene quantification",
"Exon junction quantification",
"Exon quantification",
"miRNA isoform quantification")
if(data.category == "Raw microarray data" & legacy)
data.type <- c("Raw intensities")
return(data.type)
}
table.code <- c('01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','20','40','50','60','61')
names(table.code) <- c("Primary solid Tumor","Recurrent Solid Tumor",
"Primary Blood Derived Cancer - Peripheral Blood",
"Recurrent Blood Derived Cancer - Bone Marrow",
"Additional - New Primary",
"Metastatic","Additional Metastatic",
"Human Tumor Original Cells",
"Primary Blood Derived Cancer - Bone Marrow",
"Blood Derived Normal","Solid Tissue Normal",
"Buccal Cell Normal","EBV Immortalized Normal",
"Bone Marrow Normal","Control Analyte",
"Recurrent Blood Derived Cancer - Peripheral Blood",
"Cell Lines","Primary Xenograft Tissue",
"Cell Line Derived Xenograft Tissue")
tcga.code <- c("Primary solid Tumor","Recurrent Solid Tumor",
"Primary Blood Derived Cancer - Peripheral Blood",
"Recurrent Blood Derived Cancer - Bone Marrow",
"Additional - New Primary",
"Metastatic","Additional Metastatic",
"Human Tumor Original Cells",
"Primary Blood Derived Cancer - Bone Marrow",
"Blood Derived Normal","Solid Tissue Normal",
"Buccal Cell Normal","EBV Immortalized Normal",
"Bone Marrow Normal","Control Analyte",
"Recurrent Blood Derived Cancer - Peripheral Blood",
"Cell Lines","Primary Xenograft Tissue",
"Cell Line Derived Xenograft Tissue")
names(tcga.code) <- c('01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','20','40','50','60','61')
getMatchedPlatform <- function(query){
matched <- NULL
for(plat in query$Platform){
aux <- query[query$Platform == plat,]
if(is.null(matched)){
matched <- unlist(str_split(aux$barcode,","))
matched <- substr(matched,1,15)
} else {
barcode <- unlist(str_split(aux$barcode,","))
barcode <- substr(barcode,1,15)
matched <- intersect(matched, barcode)
}
}
return(matched)
}
getMatchedType <- function(barcode,type){
code <- c("TP","TR","TB","TRBM","TAP","TM","TAM","THOC",
"TBM","NB","NT","NBC","NEBV","NBM","CELLC","TRB",
"CELL","XP","XCL")
names(code) <- c('01','02','03','04','05','06','07','08','09','10',
'11','12','13','14','20','40','50','60','61')
type <- code[type]
groups <- t(combn(type,2))
matched <- NULL
for(i in 1:nrow(groups)) {
if(is.null(matched)){
matched <- TCGAquery_MatchedCoupledSampleTypes(unique(barcode),
c(groups[i,1], groups[i,2]))
matched <- substr(matched,1,15)
} else {
aux <- TCGAquery_MatchedCoupledSampleTypes(unique(barcode),
c(groups[i,1], groups[i,2]))
aux <- substr(aux,1,15)
matched <- intersect(matched, aux)
}
}
return(matched)
}
# This will be used to parse the text areas input
# possibilities of separation , ; \n
parse.textarea.input <- function(text){
sep <- NULL
if(grepl(";",text)) sep <- ";"
if(grepl(",",text)) sep <- ","
if(grepl("\n",text)) sep <- "\n"
if(is.null(sep)) {
text <- text
} else {
text <- unlist(stringr::str_split(text,sep))
}
return (text)
}
get.volumes <- function(directory = NULL){
if(is.null(directory) || identical(directory, character(0))) {
volumes <- c(wd="./",home=Sys.getenv("HOME"), getVolumes()(), temp=tempdir())
} else {
volumes <- c(home=Sys.getenv("HOME"), getVolumes()(), temp=tempdir(),wd="./")
path <- parseDirPath(volumes, directory)
names(path) <- path
volumes <- c(path, home=Sys.getenv("HOME"), getVolumes()(), temp=tempdir(),wd="./")
}
return(volumes)
}
#' @title Server side
#' @description Server side
#' @param input - input signal
#' @param output - output signal
#' @importFrom downloader download
#' @import pathview ELMER TCGAbiolinks SummarizedExperiment shiny ggrepel UpSetR
#' @keywords internal
TCGAbiolinksGUIServer <- function(input, output, session) {
session$onSessionEnded(stopApp)
server.path <- ifelse(system.file("app", package = "TCGAbiolinksGUI") == "",
"server",
file.path(system.file("app", package = "TCGAbiolinksGUI"),"server"))
source(file.path(server.path, "getmolecular.R"), local = TRUE)$value
source(file.path(server.path, "getsubtype.R"), local = TRUE)$value
source(file.path(server.path, "getmutation.R"), local = TRUE)$value
source(file.path(server.path, "getclinical.R"), local = TRUE)$value
source(file.path(server.path, "dnametidat.R"), local = TRUE)$value
source(file.path(server.path, "survival.R"), local = TRUE)$value
source(file.path(server.path, "volcano.R"), local = TRUE)$value
source(file.path(server.path, "heatmap.R"), local = TRUE)$value
source(file.path(server.path, "dmr.R"), local = TRUE)$value
source(file.path(server.path, "meanMet.R"), local = TRUE)$value
source(file.path(server.path, "gliomaclassifier.R"), local = TRUE)$value
source(file.path(server.path, "dea.R"), local = TRUE)$value
source(file.path(server.path, "pathview.R"), local = TRUE)$value
source(file.path(server.path, "eaplot.R"), local = TRUE)$value
source(file.path(server.path, "oncoprint.R"), local = TRUE)$value
source(file.path(server.path, "maftools.R"), local = TRUE)$value
source(file.path(server.path, "starburst.R"), local = TRUE)$value
source(file.path(server.path, "elmer.R"), local = TRUE)$value
source(file.path(server.path, "manageSE.R"), local = TRUE)$value
source(file.path(server.path, "getinference.R"), local = TRUE)$value
# Directory management
# Config
# We will create by deafalt a TCGAbiolinksGUI
dir.create(paste0(Sys.getenv("HOME"),"/TCGAbiolinksGUI"), showWarnings = FALSE)
setwd(file.path(Sys.getenv("HOME"), "TCGAbiolinksGUI"))
shinyDirChoose(input, 'workingDir',
roots=get.volumes(),
session=session,
restrictions=system.file(package='base'))
shinyjs::hide("greetbox-outer")
observe({
if(!is.null(input$test)) stopApp() # stop shiny
})
# Configuration tab
output$wd <- renderPrint({
path <- parseDirPath(get.volumes(isolate({input$workingDir})), input$workingDir)
if(identical(path, character(0))) path <- getwd()
return(path)
})
observe({
output$downloadDataBut <- downloadHandler(
filename <- function() {
as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$downloadfile)$datapath)
},
content <- function(file){
file.copy(as.character(parseFilePaths(get.volumes(isolate({input$workingDir})), input$downloadfile)$datapath),
file)
}
)
})
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
# File selection
#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=--=
observe({
shinyFileChoose(input,
'downloadfile',
roots = get.volumes(input$workingDir),
session = session,
restrictions = system.file(package='base'),
filetypes = c('csv', 'rda'))
})
hide("loading-content", TRUE, "fade")
}
|
#### This code sums inpatient visits for DOC patients
#### Date: September 28, 2014
#### Author: Mark Dakkak
Inpatient_Sum <- function(Inpatient_Clean_Table){
## Load packages
require(data.table)
## Pull subset
Inpatient_Clean_Table <- Inpatient_Clean_Table[, list(Patient.Identifier, ED_Visit, Hosp_Admission, Total.Hospital.LOS.in.days)]
## Create frequency table
inpatientvisits_freq <- Inpatient_Clean_Table[, lapply(.SD,sum), by = Patient.Identifier]
## Convert class of patient identifier
inpatientvisits_freq$Patient.Identifier <- as.character(inpatientvisits_freq$Patient.Identifier)
## Return table
inpatientvisits_freq
} | /PDF_Reports/Inpatient_Sum.R | no_license | marksendak/AMIA_SDC_2015 | R | false | false | 748 | r | #### This code sums inpatient visits for DOC patients
#### Date: September 28, 2014
#### Author: Mark Dakkak
Inpatient_Sum <- function(Inpatient_Clean_Table){
## Load packages
require(data.table)
## Pull subset
Inpatient_Clean_Table <- Inpatient_Clean_Table[, list(Patient.Identifier, ED_Visit, Hosp_Admission, Total.Hospital.LOS.in.days)]
## Create frequency table
inpatientvisits_freq <- Inpatient_Clean_Table[, lapply(.SD,sum), by = Patient.Identifier]
## Convert class of patient identifier
inpatientvisits_freq$Patient.Identifier <- as.character(inpatientvisits_freq$Patient.Identifier)
## Return table
inpatientvisits_freq
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geoCoord.R
\name{geocode}
\alias{geocode}
\title{Geocode}
\usage{
geocode(address, api = c("google", "baidu"), key = "", ocs = c("WGS-84",
"GCJ-02", "BD-09"), output = c("latlng", "latlngc", "latlnga"),
messaging = FALSE, time = 0)
}
\arguments{
\item{address}{a character vector specifying a location of interest (e.g.,
"Tsinghua Univeristy").}
\item{api}{use Google or Baidu Maps API. When using Baidu Maps API, the address must be in Chinese.}
\item{key}{an api key must be provided when calling baidu maps api.
While it's unnecessary for calling google maps api.}
\item{ocs}{output coordinate systems including WGS-84, GCJ-02 and BD-09, which
are the GCSs of Google Earth, Google Map in China and Baidu Map, respectively.
For address out of China, ocs is automatically set to 'WGS-84' and other values
are igored.}
\item{output}{lat/lng coordinates or lat/lng coordinates with location type (Goolge Map) | confidence
(Baidu Map) or lat/lng coordinates with formated address and address components (only available for #' Google Map API).}
\item{messaging}{turn messaging on/off. The default value is FALSE.}
\item{time}{the time interval to geocode, in seconds. Default value is zero.
When you geocode multiple addresses, set a proper time interval to avoid
exceeding usage limits. For details see
\url{https://developers.google.com/maps/documentation/business/articles/usage_limits}}
}
\value{
a data.frame with variables lat/lng or lat/lng/conf
}
\description{
geocodes an address using Google or Baidu Maps API. Note that in most cases by
using this function you are agreeing to the Google Maps API Terms of Service
at \url{https://developers.google.com/maps/terms} or the Baidu Maps API Terms
of Use at \url{http://developer.baidu.com/map/law.htm}.
}
\details{
note that the google maps api limits to 2500 queries a day.
}
\section{GIS}{
}
\examples{
\dontrun{
geocode('Tsinghua University', api = 'google', ocs = 'GCJ-02')
geocode('Tsinghua University', api = 'google', ocs = 'WGS-84',
messaging = TRUE)
geocode('Beijing railway station', api = 'google', ocs = 'WGS-84',
output = 'latlngc')
geocode('Beijing railway station', api = 'google', ocs = 'WGS-84',
output = 'latlnga')
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'google',
ocs = 'GCJ-02')
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'google',
ocs = 'WGS-84', output = 'latlngc', messaging = TRUE)
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'google',
ocs = 'WGS-84', output = 'latlnga', messaging = TRUE)
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'google',
ocs = 'WGS-84', output = 'latlngc', messaging = TRUE, time = 2)
geocode('Beijing railway station', api = 'baidu', key = 'your baidu maps api key',
ocs = 'BD-09')
geocode('Beijing railway station', api = 'baidu', key = 'your baidu maps api key',
ocs = 'GCJ-02', messaging = TRUE)
geocode('Beijing railway station', api = 'baidu', key = 'your baidu maps api key',
ocs = 'BD-09', output = 'latlngc')
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'baidu',
key = 'your baidu maps api key', ocs = 'BD-09')
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'baidu',
key = 'your baidu maps api key', ocs = 'WGS-84', output = 'latlngc')
}
}
\author{
Jun Cai (\email{cai-j12@mails.tsinghua.edu.cn}), PhD student from
Center for Earth System Science, Tsinghua University
}
\seealso{
\code{\link{revgeocode}}, \code{\link{geohost}}.
Google Maps API at \url{http://code.google.com/apis/maps/documentation/geocoding/}
and Baidu Maps API at \url{http://developer.baidu.com/map/webservice-geocoding.htm}
}
| /man/geocode.Rd | permissive | land23/recharts | R | false | true | 3,793 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geoCoord.R
\name{geocode}
\alias{geocode}
\title{Geocode}
\usage{
geocode(address, api = c("google", "baidu"), key = "", ocs = c("WGS-84",
"GCJ-02", "BD-09"), output = c("latlng", "latlngc", "latlnga"),
messaging = FALSE, time = 0)
}
\arguments{
\item{address}{a character vector specifying a location of interest (e.g.,
"Tsinghua Univeristy").}
\item{api}{use Google or Baidu Maps API. When using Baidu Maps API, the address must be in Chinese.}
\item{key}{an api key must be provided when calling baidu maps api.
While it's unnecessary for calling google maps api.}
\item{ocs}{output coordinate systems including WGS-84, GCJ-02 and BD-09, which
are the GCSs of Google Earth, Google Map in China and Baidu Map, respectively.
For address out of China, ocs is automatically set to 'WGS-84' and other values
are igored.}
\item{output}{lat/lng coordinates or lat/lng coordinates with location type (Goolge Map) | confidence
(Baidu Map) or lat/lng coordinates with formated address and address components (only available for #' Google Map API).}
\item{messaging}{turn messaging on/off. The default value is FALSE.}
\item{time}{the time interval to geocode, in seconds. Default value is zero.
When you geocode multiple addresses, set a proper time interval to avoid
exceeding usage limits. For details see
\url{https://developers.google.com/maps/documentation/business/articles/usage_limits}}
}
\value{
a data.frame with variables lat/lng or lat/lng/conf
}
\description{
geocodes an address using Google or Baidu Maps API. Note that in most cases by
using this function you are agreeing to the Google Maps API Terms of Service
at \url{https://developers.google.com/maps/terms} or the Baidu Maps API Terms
of Use at \url{http://developer.baidu.com/map/law.htm}.
}
\details{
note that the google maps api limits to 2500 queries a day.
}
\section{GIS}{
}
\examples{
\dontrun{
geocode('Tsinghua University', api = 'google', ocs = 'GCJ-02')
geocode('Tsinghua University', api = 'google', ocs = 'WGS-84',
messaging = TRUE)
geocode('Beijing railway station', api = 'google', ocs = 'WGS-84',
output = 'latlngc')
geocode('Beijing railway station', api = 'google', ocs = 'WGS-84',
output = 'latlnga')
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'google',
ocs = 'GCJ-02')
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'google',
ocs = 'WGS-84', output = 'latlngc', messaging = TRUE)
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'google',
ocs = 'WGS-84', output = 'latlnga', messaging = TRUE)
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'google',
ocs = 'WGS-84', output = 'latlngc', messaging = TRUE, time = 2)
geocode('Beijing railway station', api = 'baidu', key = 'your baidu maps api key',
ocs = 'BD-09')
geocode('Beijing railway station', api = 'baidu', key = 'your baidu maps api key',
ocs = 'GCJ-02', messaging = TRUE)
geocode('Beijing railway station', api = 'baidu', key = 'your baidu maps api key',
ocs = 'BD-09', output = 'latlngc')
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'baidu',
key = 'your baidu maps api key', ocs = 'BD-09')
geocode(c('Tsinghua University', 'Beijing railway station'), api = 'baidu',
key = 'your baidu maps api key', ocs = 'WGS-84', output = 'latlngc')
}
}
\author{
Jun Cai (\email{cai-j12@mails.tsinghua.edu.cn}), PhD student from
Center for Earth System Science, Tsinghua University
}
\seealso{
\code{\link{revgeocode}}, \code{\link{geohost}}.
Google Maps API at \url{http://code.google.com/apis/maps/documentation/geocoding/}
and Baidu Maps API at \url{http://developer.baidu.com/map/webservice-geocoding.htm}
}
|
#' Parallelized Monte Carlo simulation using \code{doParallel} package.
#'
#' Equivalent to \code{wtc.sig}
#'
#' @examples
#' # Not run: library(foreach)
#' # library(doParallel)
#' # cl <- makeCluster(4, outfile="") # number of cores. Notice 'outfile'
#' # registerDoParallel(cl)
#' # wtc_sig_parallel(your parameters go here)
#' # stopCluster(cl)
#'
#' @inheritParams wtc.sig
#' @export
#' @import foreach
wtc_sig_parallel <- function(nrands = 300, lag1, dt, ntimesteps, pad = TRUE,
dj = 1 / 12, s0, J1, max.scale = NULL,
mother = "morlet", sig.level = 0.95, quiet = TRUE) {
if (nrands < 1) {
return(NA)
}
mr1 <- get_minroots(lag1[1])
mr2 <- get_minroots(lag1[2])
ntseq <- seq_len(ntimesteps)
d1 <- cbind(ntseq, ar1_ma0_sim(mr1, lag1[1], ntimesteps))
wt1 <- wt(d = d1, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
s.inv <- 1 / t(wt1$scale)
s.inv <- matrix(rep(s.inv, ntimesteps), nrow = NROW(wt1$wave))
NUMC <- getDoParWorkers()
CLEN <- ceiling(nrands / NUMC)
if (!quiet) {
prog.bar <- txtProgressBar(min = 0, max = NUMC, style = 3)
}
CID <- NULL # this is only necessary for R CMD check --as-cran
rand.rsq <- foreach(CID = seq_len(NUMC),
.final = function(x) {
array(unlist(x),
dim = c(NROW(wt1$wave), NCOL(wt1$wave), nrands))
}) %dopar% {
if (CID == NUMC) {
CLEN <- nrands - CLEN * (NUMC - 1)
}
out <- vector("list", CLEN)
for (r in seq_len(CLEN)) {
# Generate time series
d1 <- cbind(ntseq, ar1_ma0_sim(mr1, lag1[1], ntimesteps))
d2 <- cbind(ntseq, ar1_ma0_sim(mr2, lag1[2], ntimesteps))
# Wavelet transforms
wt1 <- wt(d = d1, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
wt2 <- wt(d = d2, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
# Smoothed cross wavelet transform
smooth.CW <- smooth.wavelet(s.inv * wt1$wave * Conj(wt2$wave),
dt, dj, wt1$scale)
sw1 <- smooth.wavelet(s.inv * (abs(wt1$wave) ^ 2), dt, dj, wt1$scale)
sw2 <- smooth.wavelet(s.inv * (abs(wt2$wave) ^ 2), dt, dj, wt2$scale)
#rand.rsq[, , r] <- abs(smooth.CW) ^ 2 / (sw1 * sw2)
out[[r]] <- abs(smooth.CW) ^ 2 / (sw1 * sw2)
}
# returned to the combine function
return(out)
}
# TODO: the progressbar should be implemented properly
if (!quiet) {
setTxtProgressBar(prog.bar, NUMC)
}
if (!quiet) {
close(prog.bar)
}
# The original slow implementation was using "apply" and "quantile" functions
# apply(rand.rsq, MARGIN = c(1,2), quantile, sig.level, na.rm = TRUE)
# This has been replaced with a C++ implementation taken from WGCNA package
j <- NULL # this is only necessary for R CMD check --as-cran
foreach(j = seq_len(ncol(rand.rsq)), .combine = cbind) %dopar% {
# TODO: can be facter if we remove as.matrix()
rcpp_row_quantile(as.matrix(rand.rsq[,j,]), sig.level)
}
}
| /R/wtc_sig_parallel.R | no_license | Tavpritesh/biwavelet | R | false | false | 3,232 | r | #' Parallelized Monte Carlo simulation using \code{doParallel} package.
#'
#' Equivalent to \code{wtc.sig}
#'
#' @examples
#' # Not run: library(foreach)
#' # library(doParallel)
#' # cl <- makeCluster(4, outfile="") # number of cores. Notice 'outfile'
#' # registerDoParallel(cl)
#' # wtc_sig_parallel(your parameters go here)
#' # stopCluster(cl)
#'
#' @inheritParams wtc.sig
#' @export
#' @import foreach
wtc_sig_parallel <- function(nrands = 300, lag1, dt, ntimesteps, pad = TRUE,
dj = 1 / 12, s0, J1, max.scale = NULL,
mother = "morlet", sig.level = 0.95, quiet = TRUE) {
if (nrands < 1) {
return(NA)
}
mr1 <- get_minroots(lag1[1])
mr2 <- get_minroots(lag1[2])
ntseq <- seq_len(ntimesteps)
d1 <- cbind(ntseq, ar1_ma0_sim(mr1, lag1[1], ntimesteps))
wt1 <- wt(d = d1, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
s.inv <- 1 / t(wt1$scale)
s.inv <- matrix(rep(s.inv, ntimesteps), nrow = NROW(wt1$wave))
NUMC <- getDoParWorkers()
CLEN <- ceiling(nrands / NUMC)
if (!quiet) {
prog.bar <- txtProgressBar(min = 0, max = NUMC, style = 3)
}
CID <- NULL # this is only necessary for R CMD check --as-cran
rand.rsq <- foreach(CID = seq_len(NUMC),
.final = function(x) {
array(unlist(x),
dim = c(NROW(wt1$wave), NCOL(wt1$wave), nrands))
}) %dopar% {
if (CID == NUMC) {
CLEN <- nrands - CLEN * (NUMC - 1)
}
out <- vector("list", CLEN)
for (r in seq_len(CLEN)) {
# Generate time series
d1 <- cbind(ntseq, ar1_ma0_sim(mr1, lag1[1], ntimesteps))
d2 <- cbind(ntseq, ar1_ma0_sim(mr2, lag1[2], ntimesteps))
# Wavelet transforms
wt1 <- wt(d = d1, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
wt2 <- wt(d = d2, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
# Smoothed cross wavelet transform
smooth.CW <- smooth.wavelet(s.inv * wt1$wave * Conj(wt2$wave),
dt, dj, wt1$scale)
sw1 <- smooth.wavelet(s.inv * (abs(wt1$wave) ^ 2), dt, dj, wt1$scale)
sw2 <- smooth.wavelet(s.inv * (abs(wt2$wave) ^ 2), dt, dj, wt2$scale)
#rand.rsq[, , r] <- abs(smooth.CW) ^ 2 / (sw1 * sw2)
out[[r]] <- abs(smooth.CW) ^ 2 / (sw1 * sw2)
}
# returned to the combine function
return(out)
}
# TODO: the progressbar should be implemented properly
if (!quiet) {
setTxtProgressBar(prog.bar, NUMC)
}
if (!quiet) {
close(prog.bar)
}
# The original slow implementation was using "apply" and "quantile" functions
# apply(rand.rsq, MARGIN = c(1,2), quantile, sig.level, na.rm = TRUE)
# This has been replaced with a C++ implementation taken from WGCNA package
j <- NULL # this is only necessary for R CMD check --as-cran
foreach(j = seq_len(ncol(rand.rsq)), .combine = cbind) %dopar% {
# TODO: can be facter if we remove as.matrix()
rcpp_row_quantile(as.matrix(rand.rsq[,j,]), sig.level)
}
}
|
# nolint start
# Create the data
data <- Data(
x = c(0.1, 0.5, 1.5, 3, 6, 10, 10, 10),
y = c(0, 0, 0, 0, 0, 0, 1, 0),
cohort = c(0, 1, 2, 3, 4, 5, 5, 5),
doseGrid =
c(
0.1, 0.5, 1.5, 3, 6,
seq(from = 10, to = 80, by = 2)
)
)
# Initialize the CRM model used to model the data
model <- LogisticLogNormal(
mean = c(-0.85, 1),
cov =
matrix(c(1, -0.5, -0.5, 1),
nrow = 2
),
ref_dose = 56
)
# Set-up some MCMC parameters and generate samples from the posterior
options <- McmcOptions(
burnin = 100,
step = 2,
samples = 2000
)
set.seed(94)
samples <- mcmc(data, model, options)
# Define the rule for dose increments and calculate the maximum dose allowed
myIncrements <- IncrementsRelative(
intervals = c(0, 20),
increments = c(1, 0.33)
)
nextMaxDose <- maxDose(myIncrements,
data = data
)
# Define the rule which will be used to select the next best dose
# based on the class 'NextBestNCRM'
myNextBest <- NextBestNCRM(
target = c(0.2, 0.35),
overdose = c(0.35, 1),
max_overdose_prob = 0.25
)
# Calculate the next best dose
doseRecommendation <- nextBest(myNextBest,
doselimit = nextMaxDose,
samples = samples, model = model, data = data
)
# Rule for having cohort of size 1 for doses <30
# and having cohort of size 3 for doses >=30
mySize1 <- CohortSizeRange(
intervals = c(0, 30),
cohort_size = c(1, 3)
)
# Rule for having cohort of size 1 until no DLT were observed
# and having cohort of size 3 as soon as 1 DLT is observed
mySize2 <- CohortSizeDLT(
intervals = c(0, 1),
cohort_size = c(1, 3)
)
# Combining the two rules for cohort size by taking the minimum of the sample sizes
# of the single rules
mySize <- minSize(mySize1, mySize2)
# Determine the cohort size for the next cohort
size(mySize, dose = doseRecommendation$value, data = data)
# nolint end
| /examples/Rules-method-size-CohortSizeMin.R | no_license | Roche/crmPack | R | false | false | 1,854 | r | # nolint start
# Create the data
data <- Data(
x = c(0.1, 0.5, 1.5, 3, 6, 10, 10, 10),
y = c(0, 0, 0, 0, 0, 0, 1, 0),
cohort = c(0, 1, 2, 3, 4, 5, 5, 5),
doseGrid =
c(
0.1, 0.5, 1.5, 3, 6,
seq(from = 10, to = 80, by = 2)
)
)
# Initialize the CRM model used to model the data
model <- LogisticLogNormal(
mean = c(-0.85, 1),
cov =
matrix(c(1, -0.5, -0.5, 1),
nrow = 2
),
ref_dose = 56
)
# Set-up some MCMC parameters and generate samples from the posterior
options <- McmcOptions(
burnin = 100,
step = 2,
samples = 2000
)
set.seed(94)
samples <- mcmc(data, model, options)
# Define the rule for dose increments and calculate the maximum dose allowed
myIncrements <- IncrementsRelative(
intervals = c(0, 20),
increments = c(1, 0.33)
)
nextMaxDose <- maxDose(myIncrements,
data = data
)
# Define the rule which will be used to select the next best dose
# based on the class 'NextBestNCRM'
myNextBest <- NextBestNCRM(
target = c(0.2, 0.35),
overdose = c(0.35, 1),
max_overdose_prob = 0.25
)
# Calculate the next best dose
doseRecommendation <- nextBest(myNextBest,
doselimit = nextMaxDose,
samples = samples, model = model, data = data
)
# Rule for having cohort of size 1 for doses <30
# and having cohort of size 3 for doses >=30
mySize1 <- CohortSizeRange(
intervals = c(0, 30),
cohort_size = c(1, 3)
)
# Rule for having cohort of size 1 until no DLT were observed
# and having cohort of size 3 as soon as 1 DLT is observed
mySize2 <- CohortSizeDLT(
intervals = c(0, 1),
cohort_size = c(1, 3)
)
# Combining the two rules for cohort size by taking the minimum of the sample sizes
# of the single rules
mySize <- minSize(mySize1, mySize2)
# Determine the cohort size for the next cohort
size(mySize, dose = doseRecommendation$value, data = data)
# nolint end
|
seqelatex <- function(seqe){
xx <- gsub("-([0-9.]+)-?", " \\\\xrightarrow{\\1} ", as.character(seqe))
xx <- gsub("-", " \\\\rightarrow ", xx)
xx <- gsub("\\(([^\\)]+)\\)", "(\\\\mbox{\\1})", xx)
return(paste("$",xx, "$", sep=""))
}
seqelatex.small <- function(seqe){
xx <- gsub("-([0-9.]+)-?", " \\\\xrightarrow{\\1} ", as.character(seqe))
xx <- gsub("-", " \\\\rightarrow ", xx)
xx <- gsub("\\(([^\\)]+)\\)", "(\\\\mbox{\\1})", xx)
return(paste("{\\small $",xx, "$}", sep=""))
}
seqelatex.script <- function(seqe){
xx <- gsub("-([0-9.]+)-?", " \\\\xrightarrow{\\1} ", as.character(seqe))
xx <- gsub("-", " \\\\rightarrow ", xx)
xx <- gsub("\\(([^\\)]+)\\)", "(\\\\mbox{\\1})", xx)
return(paste("{\\scriptsize $",xx, "$}", sep=""))
}
##plotsubseqelist<-function(x, freq=NULL, cex=1,file="tplot.tex", standAlone =TRUE, ...){
## tikz(file=file, standAlone = standAlone)
## slegend <- seqelatex(x$subseq)
## if(is.null(freq)) {
## freq<-x$data[,1]
## }
## barpos <- barplot(freq, names.arg=c(""), ...)
## text(x=barpos, y=0.02, labels=slegend, srt=90, adj=c(0,0.5), cex=cex)
## dev.off()
##}
| /TraMineRextras/R/eventLatex-gr.R | no_license | ingted/R-Examples | R | false | false | 1,140 | r | seqelatex <- function(seqe){
xx <- gsub("-([0-9.]+)-?", " \\\\xrightarrow{\\1} ", as.character(seqe))
xx <- gsub("-", " \\\\rightarrow ", xx)
xx <- gsub("\\(([^\\)]+)\\)", "(\\\\mbox{\\1})", xx)
return(paste("$",xx, "$", sep=""))
}
seqelatex.small <- function(seqe){
xx <- gsub("-([0-9.]+)-?", " \\\\xrightarrow{\\1} ", as.character(seqe))
xx <- gsub("-", " \\\\rightarrow ", xx)
xx <- gsub("\\(([^\\)]+)\\)", "(\\\\mbox{\\1})", xx)
return(paste("{\\small $",xx, "$}", sep=""))
}
seqelatex.script <- function(seqe){
xx <- gsub("-([0-9.]+)-?", " \\\\xrightarrow{\\1} ", as.character(seqe))
xx <- gsub("-", " \\\\rightarrow ", xx)
xx <- gsub("\\(([^\\)]+)\\)", "(\\\\mbox{\\1})", xx)
return(paste("{\\scriptsize $",xx, "$}", sep=""))
}
##plotsubseqelist<-function(x, freq=NULL, cex=1,file="tplot.tex", standAlone =TRUE, ...){
## tikz(file=file, standAlone = standAlone)
## slegend <- seqelatex(x$subseq)
## if(is.null(freq)) {
## freq<-x$data[,1]
## }
## barpos <- barplot(freq, names.arg=c(""), ...)
## text(x=barpos, y=0.02, labels=slegend, srt=90, adj=c(0,0.5), cex=cex)
## dev.off()
##}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDiffContours.R
\name{plotDiffContours}
\alias{plotDiffContours}
\title{Plots the difference between two years from a contour plot created by plotContours}
\usage{
plotDiffContours(eList, year0, year1, qBottom = NA, qTop = NA,
maxDiff = NA, whatSurface = 3, tcl = 0.1, qUnit = 2, span = 60,
pval = 0.05, printTitle = TRUE, plotPercent = FALSE, vert1 = NA,
vert2 = NA, horiz = NA, flowDuration = TRUE, yTicks = NA,
tick.lwd = 2, lwd = 1, cex.main = 0.95, cex.axis = 1,
customPar = FALSE, color.palette = colorRampPalette(c("blue", "white",
"red")), ...)
}
\arguments{
\item{eList}{named list with at least the Daily and INFO dataframes, and surfaces matrix}
\item{year0}{numeric value for the calendar year that is the first year of the pair of years for the analysis, should be a whole number}
\item{year1}{numeric value for the calendar year that is the second year of the pair of years for the analysis, should be a whole number}
\item{qBottom}{numeric value for the bottom edge of the graph, expressed in the units of discharge that are being used (as specified in qUnit). NA will choose a "pretty" lower limit nearest to the 5\% of discharge. If yTicks are specified, then the first value of yTicks becomes the lowest discharge shown on the figure.}
\item{qTop}{numeric value for the top edge of the graph, expressed in the units of discharge that are being used (as specified in qUnit). NA will choose a "pretty" upper limit nearest to the 95\% of discharge. If yTicks are specified, then the last value of yTicks becomes the highest discharge shown on the figure.}
\item{maxDiff}{numeric value which is the absolute value of the largest change in concentration that will be shown on the figure. Alternatively,
a vector with the minimum and maximum values in the change in concentration scale. If NA, the scale will be set from 5\% to 95\% of the concentration difference.}
\item{whatSurface}{numeric value, can only accept 1, 2, or 3; whatSurface=1 is yHat (log concentration), whatSurface=2 is SE (standard error of log concentration), and whatSurface=3 is ConcHat (unbiased estimate of concentration), default = 3}
\item{tcl}{numeric, length of tick marks in inches, default is 0.1}
\item{qUnit}{object of qUnit class. \code{\link{printqUnitCheatSheet}}, or numeric represented the short code, or character representing the descriptive name.}
\item{span}{numeric, it is the half-width (in days) of the smoothing window for computing the flow duration information, default = 60}
\item{pval}{numeric, the probability value for the lower flow frequency line on the graph}
\item{printTitle}{logical variable if TRUE title is printed, if FALSE not printed}
\item{plotPercent}{logical. If TRUE, plots percent difference, if FALSE, plots absolute differences. Defaults to FALSE.}
\item{vert1}{numeric, the location in time for a black vertical line on the figure, yearStart < vert1 < yearEnd, default is NA (vertical line is not drawn)}
\item{vert2}{numeric, the location in time for a black vertical line on the figure, yearStart < vert2 < yearEnd, default is NA (vertical line is not drawn)}
\item{horiz}{numeric, the location in discharge for a black horizontal line on the figure, qBottom<vert1<qTop, default is NA (no horizontal line is drawn)}
\item{flowDuration}{logical variable if TRUE plot the flow duration lines (5 and 95 flow percentiles), if FALSE do not plot them, default = TRUE}
\item{yTicks}{vector of yTick labels and marks that will be plotted in log space. (for example yTicks = c(3, 5, 10, 20, 50, 100, 200, 400). The first and last values determine the range of the y axis. If NA, the tick marks will be automatically generated.}
\item{tick.lwd}{line width for axis ticks, default is 2}
\item{lwd}{numeric, line width of flowDuration curve, default is 1}
\item{cex.main}{magnification to be used for main titles relative to the current setting of cex}
\item{cex.axis}{magnification to be used for axis annotation relative to the current setting of cex}
\item{customPar}{logical defaults to FALSE. If TRUE, par() should be set by user before calling this function
(for example, adjusting margins with par(mar=c(5,5,5,5))). If customPar FALSE, EGRET chooses the best margins.}
\item{color.palette}{a function that creates a color palette for the contour plot. Default goes from blue to white to red
using the function \code{colorRampPalette(c("blue","white","red"))}. A few preset options are heat.colors, topo.colors, and terrain.colors.}
\item{\dots}{arbitrary functions sent to the generic plotting function. See ?par for details on possible parameters}
}
\description{
These plots are normally used for plotting changes in the estimated concentration surface (whatSurface=3) but can be used to explore the
changes in estimated surfaces for the log of concentration or for the standard error (in log space) which is what determines the bias correction.
Although there are a lot of optional arguments to this function, most are set to a logical default.
Data come from named list, which contains a Sample dataframe with the sample data,
a Daily dataframe with the daily flow data,
and an INFO dataframe with metadata.
}
\examples{
year0<-2001
year1<-2009
qBottom<-0.33
qTop<-22
maxDiff<-0.5
eList <- Choptank_eList
plotDiffContours(eList, year0,year1)
plotDiffContours(eList, year0,year1,maxDiff=maxDiff)
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff)
yTicksModified <- c(.1,1,10,25)
plotDiffContours(eList, year0, year1,qBottom,qTop,maxDiff,
yTicks=yTicksModified,flowDuration=FALSE)
colors <-colorRampPalette(c("blue","white","red"))
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff,
color.palette=colors,flowDuration=FALSE)
colors2 <- heat.colors # Some other options: topo.colors, terrain.colors, cm.colors
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff,
lwd=2,color.palette=colors2,flowDuration=FALSE)
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff,cex.lab=2,flowDuration=FALSE)
par(mar=c(5,8,5,8))
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff,
customPar=TRUE,flowDuration=FALSE)
}
\keyword{graphics}
\keyword{statistics}
\keyword{water-quality}
| /man/plotDiffContours.Rd | permissive | billpine/EGRET | R | false | true | 6,281 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDiffContours.R
\name{plotDiffContours}
\alias{plotDiffContours}
\title{Plots the difference between two years from a contour plot created by plotContours}
\usage{
plotDiffContours(eList, year0, year1, qBottom = NA, qTop = NA,
maxDiff = NA, whatSurface = 3, tcl = 0.1, qUnit = 2, span = 60,
pval = 0.05, printTitle = TRUE, plotPercent = FALSE, vert1 = NA,
vert2 = NA, horiz = NA, flowDuration = TRUE, yTicks = NA,
tick.lwd = 2, lwd = 1, cex.main = 0.95, cex.axis = 1,
customPar = FALSE, color.palette = colorRampPalette(c("blue", "white",
"red")), ...)
}
\arguments{
\item{eList}{named list with at least the Daily and INFO dataframes, and surfaces matrix}
\item{year0}{numeric value for the calendar year that is the first year of the pair of years for the analysis, should be a whole number}
\item{year1}{numeric value for the calendar year that is the second year of the pair of years for the analysis, should be a whole number}
\item{qBottom}{numeric value for the bottom edge of the graph, expressed in the units of discharge that are being used (as specified in qUnit). NA will choose a "pretty" lower limit nearest to the 5\% of discharge. If yTicks are specified, then the first value of yTicks becomes the lowest discharge shown on the figure.}
\item{qTop}{numeric value for the top edge of the graph, expressed in the units of discharge that are being used (as specified in qUnit). NA will choose a "pretty" upper limit nearest to the 95\% of discharge. If yTicks are specified, then the last value of yTicks becomes the highest discharge shown on the figure.}
\item{maxDiff}{numeric value which is the absolute value of the largest change in concentration that will be shown on the figure. Alternatively,
a vector with the minimum and maximum values in the change in concentration scale. If NA, the scale will be set from 5\% to 95\% of the concentration difference.}
\item{whatSurface}{numeric value, can only accept 1, 2, or 3; whatSurface=1 is yHat (log concentration), whatSurface=2 is SE (standard error of log concentration), and whatSurface=3 is ConcHat (unbiased estimate of concentration), default = 3}
\item{tcl}{numeric, length of tick marks in inches, default is 0.1}
\item{qUnit}{object of qUnit class. \code{\link{printqUnitCheatSheet}}, or numeric represented the short code, or character representing the descriptive name.}
\item{span}{numeric, it is the half-width (in days) of the smoothing window for computing the flow duration information, default = 60}
\item{pval}{numeric, the probability value for the lower flow frequency line on the graph}
\item{printTitle}{logical variable if TRUE title is printed, if FALSE not printed}
\item{plotPercent}{logical. If TRUE, plots percent difference, if FALSE, plots absolute differences. Defaults to FALSE.}
\item{vert1}{numeric, the location in time for a black vertical line on the figure, yearStart < vert1 < yearEnd, default is NA (vertical line is not drawn)}
\item{vert2}{numeric, the location in time for a black vertical line on the figure, yearStart < vert2 < yearEnd, default is NA (vertical line is not drawn)}
\item{horiz}{numeric, the location in discharge for a black horizontal line on the figure, qBottom<vert1<qTop, default is NA (no horizontal line is drawn)}
\item{flowDuration}{logical variable if TRUE plot the flow duration lines (5 and 95 flow percentiles), if FALSE do not plot them, default = TRUE}
\item{yTicks}{vector of yTick labels and marks that will be plotted in log space. (for example yTicks = c(3, 5, 10, 20, 50, 100, 200, 400). The first and last values determine the range of the y axis. If NA, the tick marks will be automatically generated.}
\item{tick.lwd}{line width for axis ticks, default is 2}
\item{lwd}{numeric, line width of flowDuration curve, default is 1}
\item{cex.main}{magnification to be used for main titles relative to the current setting of cex}
\item{cex.axis}{magnification to be used for axis annotation relative to the current setting of cex}
\item{customPar}{logical defaults to FALSE. If TRUE, par() should be set by user before calling this function
(for example, adjusting margins with par(mar=c(5,5,5,5))). If customPar FALSE, EGRET chooses the best margins.}
\item{color.palette}{a function that creates a color palette for the contour plot. Default goes from blue to white to red
using the function \code{colorRampPalette(c("blue","white","red"))}. A few preset options are heat.colors, topo.colors, and terrain.colors.}
\item{\dots}{arbitrary functions sent to the generic plotting function. See ?par for details on possible parameters}
}
\description{
These plots are normally used for plotting changes in the estimated concentration surface (whatSurface=3) but can be used to explore the
changes in estimated surfaces for the log of concentration or for the standard error (in log space) which is what determines the bias correction.
Although there are a lot of optional arguments to this function, most are set to a logical default.
Data come from named list, which contains a Sample dataframe with the sample data,
a Daily dataframe with the daily flow data,
and an INFO dataframe with metadata.
}
\examples{
year0<-2001
year1<-2009
qBottom<-0.33
qTop<-22
maxDiff<-0.5
eList <- Choptank_eList
plotDiffContours(eList, year0,year1)
plotDiffContours(eList, year0,year1,maxDiff=maxDiff)
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff)
yTicksModified <- c(.1,1,10,25)
plotDiffContours(eList, year0, year1,qBottom,qTop,maxDiff,
yTicks=yTicksModified,flowDuration=FALSE)
colors <-colorRampPalette(c("blue","white","red"))
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff,
color.palette=colors,flowDuration=FALSE)
colors2 <- heat.colors # Some other options: topo.colors, terrain.colors, cm.colors
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff,
lwd=2,color.palette=colors2,flowDuration=FALSE)
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff,cex.lab=2,flowDuration=FALSE)
par(mar=c(5,8,5,8))
plotDiffContours(eList, year0,year1,qBottom,qTop,maxDiff,
customPar=TRUE,flowDuration=FALSE)
}
\keyword{graphics}
\keyword{statistics}
\keyword{water-quality}
|
library(mapdeck)
library(dplyr)
library(geosphere)
source("load_data.R")
key = key_data
trip_data_head <- as.data.frame(head(trip_data,200000))
mapdeck(token = key, style = mapdeck_style('dark'), pitch = 35) %>%
add_heatmap(
data = trip_data,
lat = "start_station_latitude",
lon = "start_station_longitude",
, layer_id = "grid_layer"
)
trip_data_with_distance <-
trip_data %>%
mutate(distance = purrr::pmap_dbl(list(start_station_longitude, start_station_latitude,
end_station_longitude, end_station_latitude),
function(a,b,c,d) {
distm(c(a,b),c(c,d))
}))
feather::write_feather(trip_data_with_distance,"data/trip_data.feather")
grouped_data <-
trip_data %>%
group_by(start_station_id,end_station_id) %>%
count() %>%
inner_join(trip_data %>%
distinct(start_station_id,end_station_id,start_station_latitude,start_station_longitude,end_station_latitude,end_station_longitude) %>%
select(start_station_id,end_station_id,start_station_latitude,start_station_longitude,end_station_latitude,end_station_longitude))
| /sample_map.R | no_license | nipper/blue_bikes | R | false | false | 1,239 | r | library(mapdeck)
library(dplyr)
library(geosphere)
source("load_data.R")
key = key_data
trip_data_head <- as.data.frame(head(trip_data,200000))
mapdeck(token = key, style = mapdeck_style('dark'), pitch = 35) %>%
add_heatmap(
data = trip_data,
lat = "start_station_latitude",
lon = "start_station_longitude",
, layer_id = "grid_layer"
)
trip_data_with_distance <-
trip_data %>%
mutate(distance = purrr::pmap_dbl(list(start_station_longitude, start_station_latitude,
end_station_longitude, end_station_latitude),
function(a,b,c,d) {
distm(c(a,b),c(c,d))
}))
feather::write_feather(trip_data_with_distance,"data/trip_data.feather")
grouped_data <-
trip_data %>%
group_by(start_station_id,end_station_id) %>%
count() %>%
inner_join(trip_data %>%
distinct(start_station_id,end_station_id,start_station_latitude,start_station_longitude,end_station_latitude,end_station_longitude) %>%
select(start_station_id,end_station_id,start_station_latitude,start_station_longitude,end_station_latitude,end_station_longitude))
|
# Loads RDS
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
# Samples data for testing
NEIsample <- NEI[sample(nrow(NEI), size = 1000, replace = F), ]
# Aggregates
Emissions <- aggregate(NEI[, 'Emissions'], by = list(NEI$year), FUN = sum)
Emissions$PM <- round(Emissions[, 2] / 1000, 2)
png(filename = "Plot1.png")
barplot(Emissions$PM, names.arg = Emissions$Group.1, main = expression('Total Emission of PM'[2.5]), xlab = 'Year', ylab = expression(paste('PM', ''[2.5], ' in Kilotons')))
dev.off()
| /Plot1.R | no_license | ngarora/PmEmissionData_Plotting | R | false | false | 550 | r | # Loads RDS
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
# Samples data for testing
NEIsample <- NEI[sample(nrow(NEI), size = 1000, replace = F), ]
# Aggregates
Emissions <- aggregate(NEI[, 'Emissions'], by = list(NEI$year), FUN = sum)
Emissions$PM <- round(Emissions[, 2] / 1000, 2)
png(filename = "Plot1.png")
barplot(Emissions$PM, names.arg = Emissions$Group.1, main = expression('Total Emission of PM'[2.5]), xlab = 'Year', ylab = expression(paste('PM', ''[2.5], ' in Kilotons')))
dev.off()
|
# object for prediction result
library(tidyverse)
RFres = R6::R6Class("RFres",public = list(
selectedMeasure = c("Accuracy","Neg Pred Value","Precision","Sensitivity","Specificity"),
ifolder = NULL,
files = NULL,
tks = NULL,
tmp = NULL,
dfres = NULL,
dfplot = NULL,
threshod = NULL,
initialize = function(inputFolder,keyword){
self$ifolder = inputFolder
self$files = list.files(inputFolder,keyword)
self$tks = self$files %>% str_extract("(?<=_).*(?=\\.)")
self$threshod = self$tks[1] %>% str_extract("(?<=_).*")
},
showAllMeasure = function(){
df = read_rds(paste0(self$ifolder,self$files[1]))
measure = df %>% pull(measure) %>% unique()
print(measure)
invisible(self)
},
setMeasure = function(measures){
self$selectedMeasure = measures
},
avgPerToken = function(tkfile = self$files[7],tkname = tks[7]){
df = read_rds(paste0(self$ifolder,tkfile))
dfavg = df %>% group_by(horizon,modelType,measure) %>%
summarise(avg = mean(value, na.rm = TRUE)) %>% ungroup()
dfavg = dfavg %>% filter(measure %in% self$selectedMeasure)
dfavg$tkname = tkname
return(dfavg)
},
avgAllToken = function(){
self$dfres = map2_dfr(self$files,self$tks,self$avgPerToken)
tmp = self$dfres %>% group_by(measure,horizon,modelType) %>% summarise(value = mean(avg,na.rm = TRUE)) %>% ungroup()
self$dfplot = tmp %>% mutate(horizon = str_remove(horizon,"flag") %>% as.factor(),modelType = as.factor(modelType))
invisible(self)
},
plotPerMeasure = function(inputMeasure = "Accuracy",plotting = FALSE){
dfplot = self$dfplot %>% filter(measure == inputMeasure)
yrange = range(dfplot$value)
g = ggplot(data = dfplot,aes(x=horizon,y=value,fill = modelType)) + geom_bar(colour="black", position="dodge",stat = "identity") +
coord_cartesian(ylim=c(floor(yrange[1]*10)/10,ceiling(yrange[2]*10)/10)) +
scale_fill_brewer() + xlab("Prediction horizon (unit: day)") + ylab(inputMeasure)+
theme_minimal() + theme(text = element_text(size=22),legend.position="bottom")
# theme(legend.position="bottom",legend.text = element_text(size=12),axis.text=element_text(size=12),axis.title=element_text(size=12,face="bold"))
if(plotting){
print(g)
}
return(g)
},
outputPlots = function(outputFolder,prefix = "6",plotting = FALSE){
dir.create(outputFolder,showWarnings = FALSE)
walk(self$selectedMeasure,function(j){
g = self$plotPerMeasure(j,plotting)
gfile = paste0(outputFolder,prefix,str_remove(j," "),"_",self$threshod,".png")
ggsave(gfile,g,width=8,height = 6)
})
invisible(self)
}
))
| /ob_RFres.R | no_license | yitao416/EthereumCurves | R | false | false | 2,675 | r | # object for prediction result
library(tidyverse)
RFres = R6::R6Class("RFres",public = list(
selectedMeasure = c("Accuracy","Neg Pred Value","Precision","Sensitivity","Specificity"),
ifolder = NULL,
files = NULL,
tks = NULL,
tmp = NULL,
dfres = NULL,
dfplot = NULL,
threshod = NULL,
initialize = function(inputFolder,keyword){
self$ifolder = inputFolder
self$files = list.files(inputFolder,keyword)
self$tks = self$files %>% str_extract("(?<=_).*(?=\\.)")
self$threshod = self$tks[1] %>% str_extract("(?<=_).*")
},
showAllMeasure = function(){
df = read_rds(paste0(self$ifolder,self$files[1]))
measure = df %>% pull(measure) %>% unique()
print(measure)
invisible(self)
},
setMeasure = function(measures){
self$selectedMeasure = measures
},
avgPerToken = function(tkfile = self$files[7],tkname = tks[7]){
df = read_rds(paste0(self$ifolder,tkfile))
dfavg = df %>% group_by(horizon,modelType,measure) %>%
summarise(avg = mean(value, na.rm = TRUE)) %>% ungroup()
dfavg = dfavg %>% filter(measure %in% self$selectedMeasure)
dfavg$tkname = tkname
return(dfavg)
},
avgAllToken = function(){
self$dfres = map2_dfr(self$files,self$tks,self$avgPerToken)
tmp = self$dfres %>% group_by(measure,horizon,modelType) %>% summarise(value = mean(avg,na.rm = TRUE)) %>% ungroup()
self$dfplot = tmp %>% mutate(horizon = str_remove(horizon,"flag") %>% as.factor(),modelType = as.factor(modelType))
invisible(self)
},
plotPerMeasure = function(inputMeasure = "Accuracy",plotting = FALSE){
dfplot = self$dfplot %>% filter(measure == inputMeasure)
yrange = range(dfplot$value)
g = ggplot(data = dfplot,aes(x=horizon,y=value,fill = modelType)) + geom_bar(colour="black", position="dodge",stat = "identity") +
coord_cartesian(ylim=c(floor(yrange[1]*10)/10,ceiling(yrange[2]*10)/10)) +
scale_fill_brewer() + xlab("Prediction horizon (unit: day)") + ylab(inputMeasure)+
theme_minimal() + theme(text = element_text(size=22),legend.position="bottom")
# theme(legend.position="bottom",legend.text = element_text(size=12),axis.text=element_text(size=12),axis.title=element_text(size=12,face="bold"))
if(plotting){
print(g)
}
return(g)
},
outputPlots = function(outputFolder,prefix = "6",plotting = FALSE){
dir.create(outputFolder,showWarnings = FALSE)
walk(self$selectedMeasure,function(j){
g = self$plotPerMeasure(j,plotting)
gfile = paste0(outputFolder,prefix,str_remove(j," "),"_",self$threshod,".png")
ggsave(gfile,g,width=8,height = 6)
})
invisible(self)
}
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class.Constructors.R
\name{make.region}
\alias{make.region}
\title{Creates a Region object}
\usage{
make.region(
region.name = "region",
strata.name = character(0),
units = character(0),
shape = NULL,
dist.for.win = FALSE
)
}
\arguments{
\item{region.name}{the region name}
\item{strata.name}{the strata names (character vector, same length as the
number of areas in the shapefile / sf object). If not supplied "A", "B",
"C", ... will be assigned. The strata names should be provided in the
order they appear in the shapefile. See details.}
\item{units}{measurement units; either \code{"m"} for metres or \code{"km"} for
kilometres. If the shapefile has a projection file associated with it the units
will be taken from there.}
\item{shape}{shapefile path to .shp file or an sf object of class sf, sfc or sfg.}
\item{dist.for.win}{logical indicating if the region is being created via
Distance for Windows (default = FALSE). See details.}
}
\value{
object of class Region
}
\description{
This creates an instance of the Region class which defines the study
area for the survey.
}
\details{
The strata names should be provided in the order the strata are
presented in the shapefile or sf shape object. This can be simply checked
after creating the region by plotting it and checking that the key correctly
identifies the strata. Note that the order Distance for Windows displays the
strata in sometimes differs from the order in which they are stored in the
shapefile. If running from Distance for Windows then this will be checked
and if they don't match a warning will be displayed saying that they are
being re-ordered.
}
\examples{
# A basic study rectangular study region
region <- make.region()
plot(region)
#Load the region from a projected shapefile
shapefile.name <- system.file("extdata", "TrackExample.shp", package = "dssd")
region <- make.region(region.name = "study area",
shape = shapefile.name)
plot(region)
#Load a multi strata unprojected shapefile
shapefile.name <- system.file("extdata", "AreaRStrata.shp", package = "dssd")
# Need to load shapefile first as it is not projected
sf.shape <- sf::read_sf(shapefile.name)
# Check current coordinate reference system
sf::st_crs(sf.shape)
# Define a European Albers Equal Area projection
proj4string <- "+proj=aea +lat_1=43 +lat_2=62 +lat_0=30 +lon_0=-9 +x_0=0 +
y_0=0 +ellps=intl +units=km"
# Project the study area on to a flat plane
projected.shape <- sf::st_transform(sf.shape, crs = proj4string)
# Create region with default strata names
region <- make.region(region.name = "study area",
shape = projected.shape)
# By plotting the region we can verify the order of the strata
plot(region)
}
\author{
Laura Marshall
}
| /man/make.region.Rd | no_license | cran/dssd | R | false | true | 2,924 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class.Constructors.R
\name{make.region}
\alias{make.region}
\title{Creates a Region object}
\usage{
make.region(
region.name = "region",
strata.name = character(0),
units = character(0),
shape = NULL,
dist.for.win = FALSE
)
}
\arguments{
\item{region.name}{the region name}
\item{strata.name}{the strata names (character vector, same length as the
number of areas in the shapefile / sf object). If not supplied "A", "B",
"C", ... will be assigned. The strata names should be provided in the
order they appear in the shapefile. See details.}
\item{units}{measurement units; either \code{"m"} for metres or \code{"km"} for
kilometres. If the shapefile has a projection file associated with it the units
will be taken from there.}
\item{shape}{shapefile path to .shp file or an sf object of class sf, sfc or sfg.}
\item{dist.for.win}{logical indicating if the region is being created via
Distance for Windows (default = FALSE). See details.}
}
\value{
object of class Region
}
\description{
This creates an instance of the Region class which defines the study
area for the survey.
}
\details{
The strata names should be provided in the order the strata are
presented in the shapefile or sf shape object. This can be simply checked
after creating the region by plotting it and checking that the key correctly
identifies the strata. Note that the order Distance for Windows displays the
strata in sometimes differs from the order in which they are stored in the
shapefile. If running from Distance for Windows then this will be checked
and if they don't match a warning will be displayed saying that they are
being re-ordered.
}
\examples{
# A basic study rectangular study region
region <- make.region()
plot(region)
#Load the region from a projected shapefile
shapefile.name <- system.file("extdata", "TrackExample.shp", package = "dssd")
region <- make.region(region.name = "study area",
shape = shapefile.name)
plot(region)
#Load a multi strata unprojected shapefile
shapefile.name <- system.file("extdata", "AreaRStrata.shp", package = "dssd")
# Need to load shapefile first as it is not projected
sf.shape <- sf::read_sf(shapefile.name)
# Check current coordinate reference system
sf::st_crs(sf.shape)
# Define a European Albers Equal Area projection
proj4string <- "+proj=aea +lat_1=43 +lat_2=62 +lat_0=30 +lon_0=-9 +x_0=0 +
y_0=0 +ellps=intl +units=km"
# Project the study area on to a flat plane
projected.shape <- sf::st_transform(sf.shape, crs = proj4string)
# Create region with default strata names
region <- make.region(region.name = "study area",
shape = projected.shape)
# By plotting the region we can verify the order of the strata
plot(region)
}
\author{
Laura Marshall
}
|
rstudioapi_call <- function(action, ...) {
request_response("rstudioapi", action = action, args = list(...))
}
rstudioapi_patch_hook <- function(api_env) {
patch_rstudioapi_fn <-
function(old, new) {
if (namespace_has(old, "rstudioapi")) {
assignInNamespace(
x = old,
value = new,
ns = "rstudioapi"
)
}
}
## make assignments to functions found in api_env namespace
## that have function with the same name in {rstudioapi} namespace
api_list <- as.list(api_env)
mapply(
patch_rstudioapi_fn,
names(api_list),
api_list
)
}
make_rs_range <- function(vsc_selection) {
# vscode positions are zero indexed
# rstudioapi is one indexed
rstudioapi::document_range(
start = rstudioapi::document_position(
row = vsc_selection$start$line + 1,
column = vsc_selection$start$character + 1
),
end = rstudioapi::document_position(
row = vsc_selection$end$line + 1,
column = vsc_selection$end$character + 1
)
)
}
extract_document_ranges <- function(vsc_selections) {
lapply(vsc_selections, make_rs_range)
}
to_content_lines <- function(contents, ranges) {
content_lines <- strsplit(contents, "\n")[[1]]
# edge case handling: The cursor is at the start of a new empty line,
# and that line is the final line.
range_end_row <- unlist(lapply(ranges, function(range) range$end["row"]))
last_row <- max(range_end_row)
if (last_row == length(content_lines) + 1) {
content_lines <- c(content_lines, "")
}
content_lines
}
extract_range_text <- function(range, content_lines) {
if (!range_has_text(range)) {
return("")
}
content_rows <-
content_lines[(range$start["row"]):(range$end["row"])]
content_rows[length(content_rows)] <-
substring(
content_rows[length(content_rows)],
1,
range$end["column"] - 1
# it's a minus 1 here because the selection end point is the number
# of the first unselected column. I.e. range 1 - 2 is all of
# columns >= 1 and < 2, which is column 1.
)
content_rows[1] <-
substring(
content_rows[1],
range$start["column"]
)
paste0(content_rows, collapse = "\n")
}
range_has_text <- function(range) {
(range$end["row"] - range$start["row"]) +
(range$end["column"] - range$start["column"]) > 0
}
make_rs_document_selection <- function(ranges, range_texts) {
selection_data <-
mapply(
function(range, text) {
list(
range = range,
text = text
)
},
ranges,
range_texts,
SIMPLIFY = FALSE
)
structure(selection_data,
class = "document_selection"
)
}
make_rs_document_context <-
function(vsc_editor_context) {
document_ranges <-
extract_document_ranges(vsc_editor_context$selection)
content_lines <-
to_content_lines(vsc_editor_context$contents, document_ranges)
document_range_texts <-
lapply(
document_ranges,
extract_range_text,
content_lines
)
document_selection <-
make_rs_document_selection(
document_ranges,
document_range_texts
)
structure(list(
id = vsc_editor_context$id$external,
path = vsc_editor_context$path,
contents = content_lines,
selections = document_selection
),
class = "document_context"
)
}
is_positionable <- function(p) is.numeric(p) && length(p) == 2
is_rangable <- function(r) is.numeric(r) && length(r) == 4
normalise_pos_or_range_arg <- function(location) {
# This is necessary due to the loose constraints of the location argument
# in rstudioapi::insertText and rstudioapi::modifyRange. These
# functions can take single vectors coerable to postition or range OR a
# list where each element may be a location, range, or a vector coercable
# to such. I prefer to normalise the argument to a list of either formal
# positions or ranges.
if (rstudioapi::is.document_position(location)) {
list(location)
} else if (is_positionable(location)) {
list(rstudioapi::as.document_position(location))
} else if (rstudioapi::is.document_range(location)) {
list(location)
} else if (is_rangable(location)) {
list(rstudioapi::as.document_range(location))
} else if (is.list(location)) {
lapply(
location,
function(a_location) {
if (rstudioapi::is.document_position(a_location) ||
rstudioapi::is.document_range(a_location)) {
a_location
} else if (is_positionable(a_location)) {
rstudioapi::as.document_position(a_location)
} else if (is_rangable((a_location))) {
rstudioapi::as.document_range(a_location)
} else {
stop(
"object in location list was not a",
" document_position or document_range"
)
}
}
)
} else {
stop("location object was not a document_position or document_range")
}
}
normalise_text_arg <- function(text, location_length) {
if (length(text) == location_length) {
text
}
else if (length(text) == 1 && location_length > 1) {
rep(text, location_length)
} else {
stop(
"text vector needs to be of length 1 or",
" the same length as location list"
)
}
}
update_addin_registry <- function(addin_registry) {
pkgs <- .packages(all.available = TRUE)
addin_files <- vapply(pkgs, function(pkg) {
system.file("rstudio/addins.dcf", package = pkg)
}, character(1L))
addin_files <- addin_files[file.exists(addin_files)]
addin_descriptions <-
mapply(
function(package, package_dcf) {
addin_description <-
as.data.frame(read.dcf(package_dcf),
stringsAsFactors = FALSE
)
addin_description$package <- package
names(addin_description) <- c(
"name",
"description",
"binding",
"interactive",
"package"
)
addin_description
},
names(addin_files),
addin_files,
SIMPLIFY = FALSE
)
addin_descriptions_flat <-
do.call(
function(...) rbind(..., make.row.names = FALSE),
addin_descriptions
)
jsonlite::write_json(addin_descriptions_flat, addin_registry, pretty = TRUE)
}
namespace_has <- function(obj, namespace) {
attempt <- try(getFromNamespace(obj, namespace), silent = TRUE)
!inherits(attempt, "try-error")
}
| /R/rstudioapi_util.R | permissive | dcangst/vscode-R | R | false | false | 7,345 | r | rstudioapi_call <- function(action, ...) {
request_response("rstudioapi", action = action, args = list(...))
}
rstudioapi_patch_hook <- function(api_env) {
patch_rstudioapi_fn <-
function(old, new) {
if (namespace_has(old, "rstudioapi")) {
assignInNamespace(
x = old,
value = new,
ns = "rstudioapi"
)
}
}
## make assignments to functions found in api_env namespace
## that have function with the same name in {rstudioapi} namespace
api_list <- as.list(api_env)
mapply(
patch_rstudioapi_fn,
names(api_list),
api_list
)
}
make_rs_range <- function(vsc_selection) {
# vscode positions are zero indexed
# rstudioapi is one indexed
rstudioapi::document_range(
start = rstudioapi::document_position(
row = vsc_selection$start$line + 1,
column = vsc_selection$start$character + 1
),
end = rstudioapi::document_position(
row = vsc_selection$end$line + 1,
column = vsc_selection$end$character + 1
)
)
}
extract_document_ranges <- function(vsc_selections) {
lapply(vsc_selections, make_rs_range)
}
to_content_lines <- function(contents, ranges) {
content_lines <- strsplit(contents, "\n")[[1]]
# edge case handling: The cursor is at the start of a new empty line,
# and that line is the final line.
range_end_row <- unlist(lapply(ranges, function(range) range$end["row"]))
last_row <- max(range_end_row)
if (last_row == length(content_lines) + 1) {
content_lines <- c(content_lines, "")
}
content_lines
}
extract_range_text <- function(range, content_lines) {
if (!range_has_text(range)) {
return("")
}
content_rows <-
content_lines[(range$start["row"]):(range$end["row"])]
content_rows[length(content_rows)] <-
substring(
content_rows[length(content_rows)],
1,
range$end["column"] - 1
# it's a minus 1 here because the selection end point is the number
# of the first unselected column. I.e. range 1 - 2 is all of
# columns >= 1 and < 2, which is column 1.
)
content_rows[1] <-
substring(
content_rows[1],
range$start["column"]
)
paste0(content_rows, collapse = "\n")
}
range_has_text <- function(range) {
(range$end["row"] - range$start["row"]) +
(range$end["column"] - range$start["column"]) > 0
}
make_rs_document_selection <- function(ranges, range_texts) {
selection_data <-
mapply(
function(range, text) {
list(
range = range,
text = text
)
},
ranges,
range_texts,
SIMPLIFY = FALSE
)
structure(selection_data,
class = "document_selection"
)
}
make_rs_document_context <-
function(vsc_editor_context) {
document_ranges <-
extract_document_ranges(vsc_editor_context$selection)
content_lines <-
to_content_lines(vsc_editor_context$contents, document_ranges)
document_range_texts <-
lapply(
document_ranges,
extract_range_text,
content_lines
)
document_selection <-
make_rs_document_selection(
document_ranges,
document_range_texts
)
structure(list(
id = vsc_editor_context$id$external,
path = vsc_editor_context$path,
contents = content_lines,
selections = document_selection
),
class = "document_context"
)
}
is_positionable <- function(p) is.numeric(p) && length(p) == 2
is_rangable <- function(r) is.numeric(r) && length(r) == 4
normalise_pos_or_range_arg <- function(location) {
# This is necessary due to the loose constraints of the location argument
# in rstudioapi::insertText and rstudioapi::modifyRange. These
# functions can take single vectors coerable to postition or range OR a
# list where each element may be a location, range, or a vector coercable
# to such. I prefer to normalise the argument to a list of either formal
# positions or ranges.
if (rstudioapi::is.document_position(location)) {
list(location)
} else if (is_positionable(location)) {
list(rstudioapi::as.document_position(location))
} else if (rstudioapi::is.document_range(location)) {
list(location)
} else if (is_rangable(location)) {
list(rstudioapi::as.document_range(location))
} else if (is.list(location)) {
lapply(
location,
function(a_location) {
if (rstudioapi::is.document_position(a_location) ||
rstudioapi::is.document_range(a_location)) {
a_location
} else if (is_positionable(a_location)) {
rstudioapi::as.document_position(a_location)
} else if (is_rangable((a_location))) {
rstudioapi::as.document_range(a_location)
} else {
stop(
"object in location list was not a",
" document_position or document_range"
)
}
}
)
} else {
stop("location object was not a document_position or document_range")
}
}
normalise_text_arg <- function(text, location_length) {
if (length(text) == location_length) {
text
}
else if (length(text) == 1 && location_length > 1) {
rep(text, location_length)
} else {
stop(
"text vector needs to be of length 1 or",
" the same length as location list"
)
}
}
update_addin_registry <- function(addin_registry) {
pkgs <- .packages(all.available = TRUE)
addin_files <- vapply(pkgs, function(pkg) {
system.file("rstudio/addins.dcf", package = pkg)
}, character(1L))
addin_files <- addin_files[file.exists(addin_files)]
addin_descriptions <-
mapply(
function(package, package_dcf) {
addin_description <-
as.data.frame(read.dcf(package_dcf),
stringsAsFactors = FALSE
)
addin_description$package <- package
names(addin_description) <- c(
"name",
"description",
"binding",
"interactive",
"package"
)
addin_description
},
names(addin_files),
addin_files,
SIMPLIFY = FALSE
)
addin_descriptions_flat <-
do.call(
function(...) rbind(..., make.row.names = FALSE),
addin_descriptions
)
jsonlite::write_json(addin_descriptions_flat, addin_registry, pretty = TRUE)
}
namespace_has <- function(obj, namespace) {
attempt <- try(getFromNamespace(obj, namespace), silent = TRUE)
!inherits(attempt, "try-error")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package-rct.R
\name{rmt_write_rct}
\alias{rmt_write_rct}
\title{Write an MT3DMS Chemical Reaction Package file}
\usage{
rmt_write_rct(
rct,
file = {
cat("Please select rct file to overwrite or provide new filename ...\\n")
file.choose()
},
btn = {
cat("Please select corresponding btn file ...\\n")
rmt_read_btn(file.choose())
},
iprn = -1,
...
)
}
\arguments{
\item{rct}{an \code{RMT3DMS} rct object}
\item{file}{filename to write to; typically '*.rct'}
\item{btn}{an \code{RMT3DMS} btn object}
\item{iprn}{format code for printing arrays in the listing file; defaults to -1 (no printing)}
\item{...}{arguments passed to \code{rmti_write_array}.}
}
\value{
\code{NULL}
}
\description{
Write an MT3DMS Chemical Reaction Package file
}
\seealso{
\code{\link{rmt_read_rct}}, \code{\link{rmt_create_rct}}
}
| /man/rmt_write_rct.Rd | no_license | cneyens/RMT3DMS | R | false | true | 930 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package-rct.R
\name{rmt_write_rct}
\alias{rmt_write_rct}
\title{Write an MT3DMS Chemical Reaction Package file}
\usage{
rmt_write_rct(
rct,
file = {
cat("Please select rct file to overwrite or provide new filename ...\\n")
file.choose()
},
btn = {
cat("Please select corresponding btn file ...\\n")
rmt_read_btn(file.choose())
},
iprn = -1,
...
)
}
\arguments{
\item{rct}{an \code{RMT3DMS} rct object}
\item{file}{filename to write to; typically '*.rct'}
\item{btn}{an \code{RMT3DMS} btn object}
\item{iprn}{format code for printing arrays in the listing file; defaults to -1 (no printing)}
\item{...}{arguments passed to \code{rmti_write_array}.}
}
\value{
\code{NULL}
}
\description{
Write an MT3DMS Chemical Reaction Package file
}
\seealso{
\code{\link{rmt_read_rct}}, \code{\link{rmt_create_rct}}
}
|
#' Generate Random Logical Vector
#'
#' Generate a random logical (\code{TRUE}/\code{FALSE}) vector.
#'
#' @inheritParams r_sample_logical
#' @return Returns a random logical vector of elements.
#' @keywords logical valid true false
#' @export
#' @include utils.R r_sample_logical.R
#' @family variable functions
#' @examples
#' valid(10)
#' 100*table(valid(n <- 1000))/n
valid <- hijack(r_sample_logical,
name = "Valid"
)
| /R/valid.R | no_license | couthcommander/wakefield | R | false | false | 427 | r | #' Generate Random Logical Vector
#'
#' Generate a random logical (\code{TRUE}/\code{FALSE}) vector.
#'
#' @inheritParams r_sample_logical
#' @return Returns a random logical vector of elements.
#' @keywords logical valid true false
#' @export
#' @include utils.R r_sample_logical.R
#' @family variable functions
#' @examples
#' valid(10)
#' 100*table(valid(n <- 1000))/n
valid <- hijack(r_sample_logical,
name = "Valid"
)
|
library(downloader)
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/femaleControlsPopulation.csv"
filename <- basename(url)
download(url, destfile=filename)
x <- unlist( read.csv(filename) )
# make averages5
set.seed(1)
n <- 1000
averages5 <- vector("numeric",n)
for(i in 1:n){
X <- sample(x,5)
averages5[i] <- mean(X)
}
# make averages50
set.seed(1)
n <- 1000
averages50 <- vector("numeric",n)
for(i in 1:n){
X <- sample(x,50)
averages50[i] <- mean(X)
}
# last set 50
hist(X,)
plot(density(x))
mean(averages50<=25) - mean(averages50<=23)
pnorm(25, 23.9,0.43) - pnorm(23,23.9,0.43)
qqnorm(x)
| /ProjectTraining/NormalDistributions.R | no_license | lchi91/r-training | R | false | false | 642 | r | library(downloader)
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/femaleControlsPopulation.csv"
filename <- basename(url)
download(url, destfile=filename)
x <- unlist( read.csv(filename) )
# make averages5
set.seed(1)
n <- 1000
averages5 <- vector("numeric",n)
for(i in 1:n){
X <- sample(x,5)
averages5[i] <- mean(X)
}
# make averages50
set.seed(1)
n <- 1000
averages50 <- vector("numeric",n)
for(i in 1:n){
X <- sample(x,50)
averages50[i] <- mean(X)
}
# last set 50
hist(X,)
plot(density(x))
mean(averages50<=25) - mean(averages50<=23)
pnorm(25, 23.9,0.43) - pnorm(23,23.9,0.43)
qqnorm(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.