content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
############## ICP exp in inhibitory immune cell ###############
#################### FANTOM data ###############################
library(curl)
library(ggbeeswarm)
library(magrittr)
# data path ---------------------------------------------------------------
basic_path <- "/home/huff/project"
immune_path <- file.path(basic_path,"immune_checkpoint")
result_path <- file.path(immune_path,"result_20171025","ICP_exp_patthern-byratio")
fantom_path <- file.path(basic_path,"data/FANTOM5/extra")
gene_list_path <-file.path(immune_path,"checkpoint/20171021_checkpoint")
# load image --------------------------------------------------------------
load(file.path(result_path,"pattern_validation","FANTOM5.validation.Rdata"))
# load data ---------------------------------------------------------------
ICP_fantom.gene_exp.Immune_cell.combine <-
readr::read_rds(file.path(immune_path,"genelist_data","FANTOM5","ICP_fantom.gene_exp.cell_line.Immune_cell.raw.exp.rds.gz")) %>%
dplyr::filter(Group != "Stromal Cell")
# dplyr::filter(`Characteristics[Tissue]` != "blood")
fantom_sample_info <- readr::read_tsv(file.path(fantom_path,"HumanSamples2.0.classification.txt")) %>%
# dplyr::filter(`Characteristics [Category]` %in% c("primary cells")) %>%
dplyr::mutate(sample = toupper(paste(curl_escape(`Charateristics [description]`),`Source Name`,sep="."))) %>%
dplyr::select(sample,`Charateristics [description]`,`Characteristics[Tissue]`,`Characteristics [Cell type]`,`Characteristics [Category]`) %>%
dplyr::filter(sample %in% unique(ICP_fantom.gene_exp.Immune_cell.combine$sample))
# ICP exp in each cell types ----------------------------------------------
gene_list <- read.table(file.path(gene_list_path, "all.entrez_id-gene_id"),header=T)
ICP_exp_site <- readr::read_tsv(file.path(result_path,"pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv"))
gene_list_exp_site <- readr::read_tsv(file.path(result_path,"pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv")) %>%
dplyr::select(entrez_ID,symbol,Exp_site,`log2FC(I/T)`) %>%
dplyr::inner_join(gene_list,by="symbol")
inhibitory_immune_cell<- c("monocyte","neutrophil","monoblast")
ICP_fantom.gene_exp.Immune_cell.combine %>%
dplyr::inner_join(fantom_sample_info,by="sample") %>%
dplyr::mutate(`Characteristics[Tissue].x` = ifelse(is.na(`Characteristics[Tissue].x`),"notsure",`Characteristics[Tissue].x`)) %>% # cell lines tissue type, not include immune cell. Cause replaced by "PrimaryCell" in ICP_fantom.gene_exp.Immune_cell.combine
dplyr::filter(`Characteristics[Tissue].x`!="notsure") %>%
dplyr::mutate(Group = ifelse(`Characteristics[Tissue].x`=="blood","blood",Group)) %>%
# dplyr::mutate(Group = ifelse(`Characteristics [Category]`=="cell lines" & Group!="blood","Tumor Cell",Group)) %>%
dplyr::mutate(Role = ifelse(`Characteristics [Cell type]` %in% inhibitory_immune_cell, "Immune cells(Inhibitory)", "Immune cells(Activate)")) %>%
dplyr::mutate(Role = ifelse(Group=="Tumor Cell","Tumor cells(non-blood)",Role)) %>%
dplyr::mutate(Role = ifelse(Group=="blood",paste("Tumor cells(blood)",Role,sep="-"),Role)) %>%
dplyr::mutate(Role = ifelse(Role =="Tumor cells(blood)-Immune cells(Activate)","Immune cells of blood tumor(Activate)",Role)) %>%
dplyr::mutate(Role = ifelse(Role =="Tumor cells(blood)-Immune cells(Inhibitory)","Immune cells of blood tumor(Inhibitory)",Role)) %>%
dplyr::inner_join(ICP_exp_site,by="symbol") %>%
dplyr::mutate(log2gene_tpm = log2(gene_tpm+1) ) %>%
dplyr::filter(Exp_site!="Not_sure") -> ready_for_analysis
# exp profile between immune and tumor, blood activate and inhibit immne cells---------------------------
ready_for_draw <- ready_for_analysis
ICP_exp_site %>%
dplyr::inner_join(data.frame(Exp_site = c("Only_exp_on_Immune","Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor" ),
rank = c(5,4,3,2,1)), by = "Exp_site") %>%
dplyr::filter(Exp_site!="Not_sure") %>%
dplyr::arrange(rank,`log2FC(I/T)`) %>%
.$symbol -> symbol_rank
ready_for_draw <- within(ready_for_draw, symbol <- factor(symbol, levels = unique(symbol_rank))) # change symbol's factor level to rank the facets on plot
with(ready_for_draw, levels(symbol))
ready_for_draw %>%
dplyr::select(symbol,Exp_site) %>%
# dplyr::filter(symbol %in% c("CD276","CTLA4")) %>%
unique() -> color_bac
color_bac$Role <- color_bac$log2gene_tpm <- 1
ready_for_draw %>%
ggplot(aes(x=Role,y=log2gene_tpm)) +
geom_boxplot() +
rotate() +
geom_rect(data=color_bac,aes(fill = Exp_site),xmin = -Inf,xmax = Inf,ymin = -Inf,ymax = Inf,alpha = 0.1) +
facet_wrap(~symbol,scales = "free_x") +
ylab("log2(TPM)") +
scale_fill_manual(
# values = site_cplor,
values = c("yellow", "green","pink","blue", "red"),
# values = c("#008B00", "#00EE00", "#CD8500", "#FF4500"),
breaks = c("Only_exp_on_Immune", "Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor")
) +
theme(
panel.background = element_rect(fill = "white",colour = "black"),
axis.text.y = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.title.y = element_blank(),
# legend.position = "none",
legend.text = element_text(size = 10),
legend.title = element_text(size = 12),
legend.background = element_blank(),
legend.key = element_rect(fill = "white", colour = "black"),
plot.title = element_text(size = 20),
axis.text = element_text(colour = "black"),
strip.background = element_rect(fill = "white",colour = "black"),
strip.text = element_text(size = 10),
text = element_text(color = "black")
)
ggsave(file.path(result_path,"pattern_validation","1.FANTOM5.ICP_exp_in_tumor(no-blood-tissue)_Activate-Inhibitory-immune.png"),device = "png",height = 10,width = 20)
ggsave(file.path(result_path,"pattern_validation","1.FANTOM5.ICP_exp_in_tumor(no-blood-tissue)_Activate-Inhibitory-immune.pdf"),device = "pdf",height = 10,width = 20)
# tSNE --------------------------------------------------------------------
library("Rtsne")
ready_for_analysis %>%
dplyr::select(sample,gene_tpm,symbol,Group,Role) %>%
dplyr::mutate(Role = ifelse(Role %in% c("Immune cells of blood tumor(Activate)","Immune cells of blood tumor(Inhibitory)"),"Blood tumor cells",Role)) %>%
dplyr::mutate(Role = ifelse(Role %in% c("Tumor cells(non-blood)"),"Solid tumor cells",Role)) %>%
dplyr::filter(!Role %in% c("Blood tumor cells")) %>%
tidyr::spread(key="symbol",value="gene_tpm") -> data_for_tSNE
# normalization
data_for_tSNE.matx.normalize <- normalize_input(data_for_tSNE[,-c(1:3)] %>% as.matrix())
# do tSNE
tsne_res <- Rtsne(data_for_tSNE.matx.normalize,dims=2,pca=FALSE,theta=0.0)
# Show the objects in the 2D tsne representation
plot(tsne_res$Y,col=factor(data_for_tSNE$Role), asp=1)
tsne_res$Y %>%
as.data.frame() %>%
dplyr::as.tbl() %>%
# dplyr::rename("tSNE 1"="V1","tSNE 2"="V2") %>%
dplyr::mutate(sample = data_for_tSNE$sample,
Cell_type = data_for_tSNE$Role) %>%
ggplot(aes(x=V1,y= V2)) +
geom_jitter(aes(color=Cell_type),size=1) +
xlab("tSNE 1") +
ylab("tSNE 2") +
ggpubr::color_palette("jco") +
my_theme +
theme(
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank()
)
ggsave(filename = file.path(result_path,"pattern_validation","1.3.FANTOME5.ICP_exp-T-I_tSNE.png"),device = "png",width = 6,height = 3)
ggsave(filename = file.path(result_path,"pattern_validation","1.3.FANTOME5.ICP_exp-T-I_tSNE.pdf"),device = "pdf",width = 6,height = 3)
# PCA analysis of blood tumor immune cells and primary immune cells -------
library("FactoMineR")
library("factoextra")
ready_for_analysis %>%
dplyr::select(sample,Role,gene_tpm,symbol) %>%
tidyr::spread(key="symbol",value="gene_tpm") -> ready_for_PCA
ready_for_PCA.df <- as.data.frame(ready_for_PCA[,-c(1)])
rownames(ready_for_PCA.df) <- ready_for_PCA$sample
res.pca <- PCA(ready_for_PCA.df[,-1], scale.unit = T,graph = FALSE)
# As described in previous sections, the eigenvalues measure the amount of variation retained by each principal component. Eigenvalues are large for the first PCs and small for the subsequent PCs. That is, the first PCs corresponds to the directions with the maximum amount of variation in the data set. We examine the eigenvalues to determine the number of principal components to be considered. The eigenvalues and the proportion of variances (i.e., information) retained by the principal components (PCs) can be extracted using the function get_eigenvalue() [factoextra package].
eig.val <- get_eigenvalue(res.pca)
var <- get_pca_var(res.pca)
# PCA biplot
# http://www.sthda.com/english/articles/31-principal-component-methods-in-r-practical-guide/112-pca-principal-component-analysis-essentials/
fviz_pca_biplot(res.pca,
# Fill individuals by groups
geom.ind = "point",
pointshape = 21,
pointsize = 2.5,
labelsize = 2,
fill.ind = ready_for_PCA.df$Role,
col.ind = "black",
# Color variable by groups
col.var = ICP_exp_site %>%
dplyr::inner_join(data.frame(Exp_site = c("Only_exp_on_Immune","Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor" ),
rank = c(5,4,3,2,1)), by = "Exp_site") %>%
dplyr::filter(Exp_site!="Not_sure") %>%
dplyr::arrange(rank,`log2FC(I/T)`) %>%
.$Exp_site,
# gradient.cols =c("yellow", "green","pink","blue", "red"),
legend.title = list(fill = "Sample class", color = "Exp_site"),
repel = TRUE # Avoid label overplotting
)+
ggpubr::fill_palette("npg")+ # Indiviual fill color
ggpubr::color_palette("jco") # Variable colors
ggsave(file.path(result_path,"pattern_validation","2.PCA_analysis_of_ICPs_in_different_cell_types.png"),device = "png",height = 6,width = 8)
ggsave(file.path(result_path,"pattern_validation","2.PCA_analysis_of_ICPs_in_different_cell_types.pdf"),device = "pdf",height = 6,width = 8)
# vertical comparison of ICPs in tumor and immune -------------------------
strip_color <- data.frame(Exp_site = c("Only_exp_on_Immune","Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor" ),
site_cplor = c("blue", "green", "orange", "pink","red"),
rank = c(5,4,3,2,1))
my_theme <- theme(
panel.background = element_rect(fill = "white",colour = "black"),
panel.grid.major=element_blank(),
axis.text.y = element_text(size = 10,colour = "black"),
axis.text.x = element_text(size = 10,colour = "black"),
# legend.position = "none",
legend.text = element_text(size = 10),
legend.title = element_text(size = 12),
legend.background = element_blank(),
legend.key = element_rect(fill = "white", colour = "black"),
plot.title = element_text(size = 20),
axis.text = element_text(colour = "black"),
strip.background = element_rect(fill = "white",colour = "black"),
strip.text = element_text(size = 10),
text = element_text(color = "black")
)
fn_plot_ICP_exp_in_dataset <- function(.data,ylab,facet,title,filename){
.data %>%
dplyr::group_by(symbol) %>%
dplyr::mutate(mid = quantile(Exp,0.5)) %>%
dplyr::arrange(mid) %>%
dplyr::select(symbol,mid) %>%
unique() %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::inner_join(strip_color,by="Exp_site") -> .symbol_rank
.data %>%
dplyr::mutate(Exp = log2(Exp+1)) %>%
ggplot(aes(x=symbol,y=Exp)) +
geom_boxplot(outlier.colour = "grey",outlier.size = 0.5) +
facet_wrap(as.formula(facet)) +
rotate() +
ggtitle(title) +
ylab(ylab) +
xlab("Symbol") +
scale_x_discrete(limits = .symbol_rank$symbol) +
my_theme +
theme(
axis.text.y = element_text(size = 10,colour = .symbol_rank$site_cplor),
plot.title = element_text(size=12)
)
ggsave(file.path(result_path,"pattern_validation",paste(filename,"pdf",sep=".")),device = "pdf",width = 4, height = 10)
ggsave(file.path(result_path,"pattern_validation",paste(filename,"png",sep=".")),device = "png", width = 4, height = 10)
}
# immune and tumor cell together
ready_for_analysis %>%
dplyr::mutate(Exp = gene_tpm) %>%
dplyr::mutate(cell_source = ifelse(Role %in% c("Immune cells(Activate)","Immune cells(Inhibitory)"),"Immune cell","Tumor cell")) %>%
dplyr::filter(! Role %in% c("Tumor cells(blood)-Immune cells(Activate)","Tumor cells(blood)-Immune cells(Inhibitory)")) %>%
fn_plot_ICP_exp_in_dataset(ylab="log2(TPM)",facet="~cell_source",title="FANTOM 5, ICP expression",filename="1.1.FANTOM5.ICP_exp.T-I")
# only tumor cell
ready_for_analysis %>%
dplyr::mutate(Exp = gene_tpm) %>%
dplyr::mutate(cell_source = ifelse(Role %in% c("Immune cells(Activate)","Immune cells(Inhibitory)"),"Immune cell","Tumor cell")) %>%
dplyr::filter(! Role %in% c("Tumor cells(blood)-Immune cells(Activate)","Tumor cells(blood)-Immune cells(Inhibitory)")) %>%
dplyr::filter(cell_source == "Tumor cell") %>%
fn_plot_ICP_exp_in_dataset(ylab="log2(TPM)",facet="~cell_source",title="FANTOM 5, ICP expression",filename="1.1.1.FANTOM5.ICP_exp.Tumor")
ready_for_analysis %>%
dplyr::mutate(Exp = gene_tpm) %>%
dplyr::mutate(cell_source = ifelse(Role %in% c("Immune cells(Activate)","Immune cells(Inhibitory)"),"Immune cell","Tumor cell")) %>%
dplyr::filter(! Role %in% c("Tumor cells(blood)-Immune cells(Activate)","Tumor cells(blood)-Immune cells(Inhibitory)")) %>%
dplyr::filter(cell_source == "Immune cell") %>%
fn_plot_ICP_exp_in_dataset(ylab="log2(TPM)",facet="~cell_source",title="FANTOM 5, ICP expression",filename="1.1.2.FANTOM5.ICP_exp.Immune")
# correlation of ICP mean exp between immune and tumor --------------------
ready_for_analysis %>%
dplyr::select(symbol,mean_cell_line,mean_immune_exp,Exp_site) %>%
unique() %>%
dplyr::mutate(mean_immune_exp=log2(mean_immune_exp+1),mean_cell_line=log2(mean_cell_line+1))-> ready_for_cor
broom::tidy(
cor.test(ready_for_cor$mean_immune_exp,ready_for_cor$mean_cell_line,method = "spearman")
) %>%
dplyr::mutate(y=(max(ready_for_cor$mean_immune_exp)-min(ready_for_cor$mean_immune_exp))*0.75+min(ready_for_cor$mean_immune_exp),x=min(ready_for_cor$mean_cell_line)+(max(ready_for_cor$mean_cell_line)-min(ready_for_cor$mean_cell_line))*0.4) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2))) -> cor_anno
ready_for_cor %>%
tidyr::nest(-Exp_site) %>%
dplyr::mutate(spm = purrr::map(data,.f=function(.x){
if(nrow(.x)>5){
broom::tidy(
cor.test(.x$mean_immune_exp,.x$mean_cell_line,method = "spearman")) %>%
dplyr::mutate(y=(max(.x$mean_immune_exp)-min(.x$mean_immune_exp))*0.85+min(.x$mean_immune_exp),x=cor_anno$x) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2)))
}else{
tibble::tibble()
}
})) %>%
dplyr::select(-data) %>%
tidyr::unnest() -> cor_anno.specific
ready_for_cor %>%
dplyr::filter(Exp_site!="Not_sure") %>%
ggplot(aes(x=mean_cell_line,y=mean_immune_exp)) +
geom_jitter(aes(color=Exp_site),size=0.5) +
geom_smooth(se= F, method = "lm", aes(color=Exp_site,group=Exp_site)) +
geom_smooth(se = F, method = "lm",color= "black") +
geom_text(aes(x=x,y=y,label = label),data=cor_anno,color="black") +
geom_text(aes(x=x,y=y,label = label,color=Exp_site),data = cor_anno.specific) +
scale_color_manual(values=c("#CD661D", "#008B00", "#FF69B4", "#1874CD","#CD3333")) +
my_theme +
xlab("log2(Mean exppression in tumor cells)") +
ylab("log2(Mean exppression in immune cells)")
ggsave(file.path(result_path,"pattern_validation","1.2.FANTOM5-T-I-meanExp.correlation.pdf"),device = "pdf",height = 6,width = 8)
ggsave(file.path(result_path,"pattern_validation","1.2.FANTOM5-T-I-meanExp.correlation.png"),device = "png",height = 6,width = 8)
# correlation of ICP rank between immune and tumor --------------------
ready_for_cor %>%
dplyr::arrange(mean_cell_line) %>%
dplyr::mutate(Tumor_rank=rank(mean_cell_line)) %>%
dplyr::arrange(mean_immune_exp) %>%
dplyr::mutate(Immune_rank=1:nrow(ready_for_cor)) -> ready_for_rank.cor
broom::tidy(
cor.test(ready_for_rank.cor$Immune_rank,ready_for_rank.cor$Tumor_rank,method = "spearman")
) %>%
dplyr::mutate(y=(max(ready_for_rank.cor$Immune_rank)-min(ready_for_rank.cor$Immune_rank))*0.75+min(ready_for_rank.cor$Immune_rank),x=min(ready_for_rank.cor$Tumor_rank)+(max(ready_for_rank.cor$Tumor_rank)-min(ready_for_rank.cor$Tumor_rank))*0.4) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2))) -> cor_anno
ready_for_rank.cor %>%
tidyr::nest(-Exp_site) %>%
dplyr::mutate(spm = purrr::map(data,.f=function(.x){
if(nrow(.x)>5){
broom::tidy(
cor.test(.x$Immune_rank,.x$Tumor_rank,method = "spearman")) %>%
dplyr::mutate(y=(max(.x$Immune_rank)-min(.x$Immune_rank))*0.85+min(.x$Immune_rank),x=cor_anno$x) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2)))
}else{
tibble::tibble()
}
})) %>%
dplyr::select(-data) %>%
tidyr::unnest() -> cor_anno.specific
ready_for_rank.cor %>%
dplyr::filter(Exp_site!="Not_sure") %>%
ggplot(aes(x=Tumor_rank,y=Immune_rank)) +
geom_jitter(aes(color=Exp_site),size=0.5) +
geom_smooth(se= F, method = "lm", aes(color=Exp_site,group=Exp_site)) +
geom_smooth(se = F, method = "lm",color= "black") +
geom_text(aes(x=x,y=y,label = label),data=cor_anno,color="black") +
geom_text(aes(x=x,y=y,label = label,color=Exp_site),data = cor_anno.specific) +
scale_color_manual(values=c("#CD661D", "#008B00", "#FF69B4", "#1874CD","#CD3333")) +
my_theme +
xlab("log2(Mean exppression in tumor cells)") +
ylab("log2(Mean exppression in immune cells)")
ggsave(file.path(result_path,"pattern_validation","1.2.FANTOM5-T-I-meanExp.correlation.pdf"),device = "pdf",height = 6,width = 8)
ggsave(file.path(result_path,"pattern_validation","1.2.FANTOM5-T-I-meanExp.correlation.png"),device = "png",height = 6,width = 8)
# heatmap -----------------------------------------------------------------
library(ComplexHeatmap)
library(circlize)
library(dendextend)
gene_list_exp_site <- readr::read_tsv(file.path("/home/huff/project/immune_checkpoint/result_20171025/ICP_exp_patthern-byMeanUQ","pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv")) %>%
dplyr::select(symbol,Exp_site)
ready_for_analysis %>%
dplyr::select(sample,gene_tpm,symbol,Group,Role) %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::mutate(Role = ifelse(Role %in% c("Immune cells of blood tumor(Activate)","Immune cells of blood tumor(Inhibitory)"),"Blood tumor cells",Role)) %>%
dplyr::mutate(Role = ifelse(Role %in% c("Tumor cells(non-blood)"),"Solid tumor cells",Role)) %>%
dplyr::filter(!Role %in% c("Blood tumor cells")) -> ready_for_analysis_no_bloodtumor
ready_for_analysis_no_bloodtumor %>%
dplyr::select(sample,symbol,gene_tpm) %>%
dplyr::arrange(sample) %>%
tidyr::spread(key="symbol",value="gene_tpm") -> for_heatmap
for_heatmap.m <- as.matrix(for_heatmap[,-1])
rownames(for_heatmap.m) <- for_heatmap$sample
ready_for_analysis_no_bloodtumor %>%
dplyr::select(symbol,Exp_site) %>%
unique()%>%
dplyr::arrange(symbol) %>%
as.data.frame() -> symbol_anno
rownames(symbol_anno) <- symbol_anno[,1]
symbol_anno <- symbol_anno[,-1]
top_anno <- HeatmapAnnotation(`ICG type`=symbol_anno,
col = list(`ICG type`=c("Immune cell dominate" = "#7FFF00",
"Immune and tumor cell almost" = "#CD950C",
"Tumor cell dominate" = "#FF3030")),
height = unit(0.2, "cm"),
show_annotation_name =T)
draw(top_anno,1:69)
ready_for_analysis_no_bloodtumor %>%
dplyr::select(sample,Role) %>%
unique()%>%
dplyr::arrange(sample) %>%
as.data.frame() -> sample_anno
rownames(sample_anno) <- sample_anno[,1]
sample_anno <- sample_anno[,-1]
side_anno <- rowAnnotation(`Sample group`=sample_anno,
col = list(`Sample group`=c("Immune cells(Activate)" = "#1C86EE",
"Immune cells(Inhibitory)" = "#FFB90F",
"Solid tumor cells" = "#919191")),
width = unit(0.2, "cm"),
show_annotation_name =T)
draw(side_anno,1:197)
for_heatmap.m.scaled <- apply(t(for_heatmap.m),1,scale)
rownames(for_heatmap.m.scaled) <- rownames(for_heatmap.m)
col_dend = hclust(dist(t(for_heatmap.m.scaled))) # column clustering
row_dend = hclust(dist((for_heatmap.m.scaled))) # row clustering
he = Heatmap(for_heatmap.m.scaled,
col = colorRamp2(c(-2, 0, 17), c("blue", "white", "red")),
row_names_gp = gpar(fontsize = 8),
show_row_names = FALSE,
show_column_names = TRUE,
show_row_dend = T, # whether show row clusters.
show_column_dend = T,
top_annotation = top_anno,
left_annotation =side_anno,
row_names_side = c("left"),
# cluster_columns = color_branches(col_dend, k = 3), # add color on the column tree branches
# cluster_rows = color_branches(row_dend, k = 3),
heatmap_legend_param = list(title = c("Exp.")))
he
pdf(file.path("/home/huff/project/immune_checkpoint/result_20171025/ICP_exp_patthern-byratio.new","pattern_validation","68ICP_exp_cluster_heatmap.pdf"),width = 12,height = 6)
he
dev.off()
# save image --------------------------------------------------------------
save.image(file.path(result_path,"pattern_validation","FANTOM5.validation.Rdata"))
| /Exp_pattern/2.2.ICP_exp_in_inihbitory_immune_cells.R | no_license | mywanuo/immune-cp | R | false | false | 22,066 | r | ############## ICP exp in inhibitory immune cell ###############
#################### FANTOM data ###############################
library(curl)
library(ggbeeswarm)
library(magrittr)
# data path ---------------------------------------------------------------
basic_path <- "/home/huff/project"
immune_path <- file.path(basic_path,"immune_checkpoint")
result_path <- file.path(immune_path,"result_20171025","ICP_exp_patthern-byratio")
fantom_path <- file.path(basic_path,"data/FANTOM5/extra")
gene_list_path <-file.path(immune_path,"checkpoint/20171021_checkpoint")
# load image --------------------------------------------------------------
load(file.path(result_path,"pattern_validation","FANTOM5.validation.Rdata"))
# load data ---------------------------------------------------------------
ICP_fantom.gene_exp.Immune_cell.combine <-
readr::read_rds(file.path(immune_path,"genelist_data","FANTOM5","ICP_fantom.gene_exp.cell_line.Immune_cell.raw.exp.rds.gz")) %>%
dplyr::filter(Group != "Stromal Cell")
# dplyr::filter(`Characteristics[Tissue]` != "blood")
fantom_sample_info <- readr::read_tsv(file.path(fantom_path,"HumanSamples2.0.classification.txt")) %>%
# dplyr::filter(`Characteristics [Category]` %in% c("primary cells")) %>%
dplyr::mutate(sample = toupper(paste(curl_escape(`Charateristics [description]`),`Source Name`,sep="."))) %>%
dplyr::select(sample,`Charateristics [description]`,`Characteristics[Tissue]`,`Characteristics [Cell type]`,`Characteristics [Category]`) %>%
dplyr::filter(sample %in% unique(ICP_fantom.gene_exp.Immune_cell.combine$sample))
# ICP exp in each cell types ----------------------------------------------
gene_list <- read.table(file.path(gene_list_path, "all.entrez_id-gene_id"),header=T)
ICP_exp_site <- readr::read_tsv(file.path(result_path,"pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv"))
gene_list_exp_site <- readr::read_tsv(file.path(result_path,"pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv")) %>%
dplyr::select(entrez_ID,symbol,Exp_site,`log2FC(I/T)`) %>%
dplyr::inner_join(gene_list,by="symbol")
inhibitory_immune_cell<- c("monocyte","neutrophil","monoblast")
ICP_fantom.gene_exp.Immune_cell.combine %>%
dplyr::inner_join(fantom_sample_info,by="sample") %>%
dplyr::mutate(`Characteristics[Tissue].x` = ifelse(is.na(`Characteristics[Tissue].x`),"notsure",`Characteristics[Tissue].x`)) %>% # cell lines tissue type, not include immune cell. Cause replaced by "PrimaryCell" in ICP_fantom.gene_exp.Immune_cell.combine
dplyr::filter(`Characteristics[Tissue].x`!="notsure") %>%
dplyr::mutate(Group = ifelse(`Characteristics[Tissue].x`=="blood","blood",Group)) %>%
# dplyr::mutate(Group = ifelse(`Characteristics [Category]`=="cell lines" & Group!="blood","Tumor Cell",Group)) %>%
dplyr::mutate(Role = ifelse(`Characteristics [Cell type]` %in% inhibitory_immune_cell, "Immune cells(Inhibitory)", "Immune cells(Activate)")) %>%
dplyr::mutate(Role = ifelse(Group=="Tumor Cell","Tumor cells(non-blood)",Role)) %>%
dplyr::mutate(Role = ifelse(Group=="blood",paste("Tumor cells(blood)",Role,sep="-"),Role)) %>%
dplyr::mutate(Role = ifelse(Role =="Tumor cells(blood)-Immune cells(Activate)","Immune cells of blood tumor(Activate)",Role)) %>%
dplyr::mutate(Role = ifelse(Role =="Tumor cells(blood)-Immune cells(Inhibitory)","Immune cells of blood tumor(Inhibitory)",Role)) %>%
dplyr::inner_join(ICP_exp_site,by="symbol") %>%
dplyr::mutate(log2gene_tpm = log2(gene_tpm+1) ) %>%
dplyr::filter(Exp_site!="Not_sure") -> ready_for_analysis
# exp profile between immune and tumor, blood activate and inhibit immne cells---------------------------
ready_for_draw <- ready_for_analysis
ICP_exp_site %>%
dplyr::inner_join(data.frame(Exp_site = c("Only_exp_on_Immune","Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor" ),
rank = c(5,4,3,2,1)), by = "Exp_site") %>%
dplyr::filter(Exp_site!="Not_sure") %>%
dplyr::arrange(rank,`log2FC(I/T)`) %>%
.$symbol -> symbol_rank
ready_for_draw <- within(ready_for_draw, symbol <- factor(symbol, levels = unique(symbol_rank))) # change symbol's factor level to rank the facets on plot
with(ready_for_draw, levels(symbol))
ready_for_draw %>%
dplyr::select(symbol,Exp_site) %>%
# dplyr::filter(symbol %in% c("CD276","CTLA4")) %>%
unique() -> color_bac
color_bac$Role <- color_bac$log2gene_tpm <- 1
ready_for_draw %>%
ggplot(aes(x=Role,y=log2gene_tpm)) +
geom_boxplot() +
rotate() +
geom_rect(data=color_bac,aes(fill = Exp_site),xmin = -Inf,xmax = Inf,ymin = -Inf,ymax = Inf,alpha = 0.1) +
facet_wrap(~symbol,scales = "free_x") +
ylab("log2(TPM)") +
scale_fill_manual(
# values = site_cplor,
values = c("yellow", "green","pink","blue", "red"),
# values = c("#008B00", "#00EE00", "#CD8500", "#FF4500"),
breaks = c("Only_exp_on_Immune", "Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor")
) +
theme(
panel.background = element_rect(fill = "white",colour = "black"),
axis.text.y = element_text(size = 10),
axis.text.x = element_text(size = 10),
axis.title.y = element_blank(),
# legend.position = "none",
legend.text = element_text(size = 10),
legend.title = element_text(size = 12),
legend.background = element_blank(),
legend.key = element_rect(fill = "white", colour = "black"),
plot.title = element_text(size = 20),
axis.text = element_text(colour = "black"),
strip.background = element_rect(fill = "white",colour = "black"),
strip.text = element_text(size = 10),
text = element_text(color = "black")
)
ggsave(file.path(result_path,"pattern_validation","1.FANTOM5.ICP_exp_in_tumor(no-blood-tissue)_Activate-Inhibitory-immune.png"),device = "png",height = 10,width = 20)
ggsave(file.path(result_path,"pattern_validation","1.FANTOM5.ICP_exp_in_tumor(no-blood-tissue)_Activate-Inhibitory-immune.pdf"),device = "pdf",height = 10,width = 20)
# tSNE --------------------------------------------------------------------
library("Rtsne")
ready_for_analysis %>%
dplyr::select(sample,gene_tpm,symbol,Group,Role) %>%
dplyr::mutate(Role = ifelse(Role %in% c("Immune cells of blood tumor(Activate)","Immune cells of blood tumor(Inhibitory)"),"Blood tumor cells",Role)) %>%
dplyr::mutate(Role = ifelse(Role %in% c("Tumor cells(non-blood)"),"Solid tumor cells",Role)) %>%
dplyr::filter(!Role %in% c("Blood tumor cells")) %>%
tidyr::spread(key="symbol",value="gene_tpm") -> data_for_tSNE
# normalization
data_for_tSNE.matx.normalize <- normalize_input(data_for_tSNE[,-c(1:3)] %>% as.matrix())
# do tSNE
tsne_res <- Rtsne(data_for_tSNE.matx.normalize,dims=2,pca=FALSE,theta=0.0)
# Show the objects in the 2D tsne representation
plot(tsne_res$Y,col=factor(data_for_tSNE$Role), asp=1)
tsne_res$Y %>%
as.data.frame() %>%
dplyr::as.tbl() %>%
# dplyr::rename("tSNE 1"="V1","tSNE 2"="V2") %>%
dplyr::mutate(sample = data_for_tSNE$sample,
Cell_type = data_for_tSNE$Role) %>%
ggplot(aes(x=V1,y= V2)) +
geom_jitter(aes(color=Cell_type),size=1) +
xlab("tSNE 1") +
ylab("tSNE 2") +
ggpubr::color_palette("jco") +
my_theme +
theme(
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank()
)
ggsave(filename = file.path(result_path,"pattern_validation","1.3.FANTOME5.ICP_exp-T-I_tSNE.png"),device = "png",width = 6,height = 3)
ggsave(filename = file.path(result_path,"pattern_validation","1.3.FANTOME5.ICP_exp-T-I_tSNE.pdf"),device = "pdf",width = 6,height = 3)
# PCA analysis of blood tumor immune cells and primary immune cells -------
library("FactoMineR")
library("factoextra")
ready_for_analysis %>%
dplyr::select(sample,Role,gene_tpm,symbol) %>%
tidyr::spread(key="symbol",value="gene_tpm") -> ready_for_PCA
ready_for_PCA.df <- as.data.frame(ready_for_PCA[,-c(1)])
rownames(ready_for_PCA.df) <- ready_for_PCA$sample
res.pca <- PCA(ready_for_PCA.df[,-1], scale.unit = T,graph = FALSE)
# As described in previous sections, the eigenvalues measure the amount of variation retained by each principal component. Eigenvalues are large for the first PCs and small for the subsequent PCs. That is, the first PCs corresponds to the directions with the maximum amount of variation in the data set. We examine the eigenvalues to determine the number of principal components to be considered. The eigenvalues and the proportion of variances (i.e., information) retained by the principal components (PCs) can be extracted using the function get_eigenvalue() [factoextra package].
eig.val <- get_eigenvalue(res.pca)
var <- get_pca_var(res.pca)
# PCA biplot
# http://www.sthda.com/english/articles/31-principal-component-methods-in-r-practical-guide/112-pca-principal-component-analysis-essentials/
fviz_pca_biplot(res.pca,
# Fill individuals by groups
geom.ind = "point",
pointshape = 21,
pointsize = 2.5,
labelsize = 2,
fill.ind = ready_for_PCA.df$Role,
col.ind = "black",
# Color variable by groups
col.var = ICP_exp_site %>%
dplyr::inner_join(data.frame(Exp_site = c("Only_exp_on_Immune","Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor" ),
rank = c(5,4,3,2,1)), by = "Exp_site") %>%
dplyr::filter(Exp_site!="Not_sure") %>%
dplyr::arrange(rank,`log2FC(I/T)`) %>%
.$Exp_site,
# gradient.cols =c("yellow", "green","pink","blue", "red"),
legend.title = list(fill = "Sample class", color = "Exp_site"),
repel = TRUE # Avoid label overplotting
)+
ggpubr::fill_palette("npg")+ # Indiviual fill color
ggpubr::color_palette("jco") # Variable colors
ggsave(file.path(result_path,"pattern_validation","2.PCA_analysis_of_ICPs_in_different_cell_types.png"),device = "png",height = 6,width = 8)
ggsave(file.path(result_path,"pattern_validation","2.PCA_analysis_of_ICPs_in_different_cell_types.pdf"),device = "pdf",height = 6,width = 8)
# vertical comparison of ICPs in tumor and immune -------------------------
strip_color <- data.frame(Exp_site = c("Only_exp_on_Immune","Mainly_exp_on_Immune","Both_exp_on_Tumor_Immune","Mainly_exp_on_Tumor","Only_exp_on_Tumor" ),
site_cplor = c("blue", "green", "orange", "pink","red"),
rank = c(5,4,3,2,1))
my_theme <- theme(
panel.background = element_rect(fill = "white",colour = "black"),
panel.grid.major=element_blank(),
axis.text.y = element_text(size = 10,colour = "black"),
axis.text.x = element_text(size = 10,colour = "black"),
# legend.position = "none",
legend.text = element_text(size = 10),
legend.title = element_text(size = 12),
legend.background = element_blank(),
legend.key = element_rect(fill = "white", colour = "black"),
plot.title = element_text(size = 20),
axis.text = element_text(colour = "black"),
strip.background = element_rect(fill = "white",colour = "black"),
strip.text = element_text(size = 10),
text = element_text(color = "black")
)
fn_plot_ICP_exp_in_dataset <- function(.data,ylab,facet,title,filename){
.data %>%
dplyr::group_by(symbol) %>%
dplyr::mutate(mid = quantile(Exp,0.5)) %>%
dplyr::arrange(mid) %>%
dplyr::select(symbol,mid) %>%
unique() %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::inner_join(strip_color,by="Exp_site") -> .symbol_rank
.data %>%
dplyr::mutate(Exp = log2(Exp+1)) %>%
ggplot(aes(x=symbol,y=Exp)) +
geom_boxplot(outlier.colour = "grey",outlier.size = 0.5) +
facet_wrap(as.formula(facet)) +
rotate() +
ggtitle(title) +
ylab(ylab) +
xlab("Symbol") +
scale_x_discrete(limits = .symbol_rank$symbol) +
my_theme +
theme(
axis.text.y = element_text(size = 10,colour = .symbol_rank$site_cplor),
plot.title = element_text(size=12)
)
ggsave(file.path(result_path,"pattern_validation",paste(filename,"pdf",sep=".")),device = "pdf",width = 4, height = 10)
ggsave(file.path(result_path,"pattern_validation",paste(filename,"png",sep=".")),device = "png", width = 4, height = 10)
}
# immune and tumor cell together
ready_for_analysis %>%
dplyr::mutate(Exp = gene_tpm) %>%
dplyr::mutate(cell_source = ifelse(Role %in% c("Immune cells(Activate)","Immune cells(Inhibitory)"),"Immune cell","Tumor cell")) %>%
dplyr::filter(! Role %in% c("Tumor cells(blood)-Immune cells(Activate)","Tumor cells(blood)-Immune cells(Inhibitory)")) %>%
fn_plot_ICP_exp_in_dataset(ylab="log2(TPM)",facet="~cell_source",title="FANTOM 5, ICP expression",filename="1.1.FANTOM5.ICP_exp.T-I")
# only tumor cell
ready_for_analysis %>%
dplyr::mutate(Exp = gene_tpm) %>%
dplyr::mutate(cell_source = ifelse(Role %in% c("Immune cells(Activate)","Immune cells(Inhibitory)"),"Immune cell","Tumor cell")) %>%
dplyr::filter(! Role %in% c("Tumor cells(blood)-Immune cells(Activate)","Tumor cells(blood)-Immune cells(Inhibitory)")) %>%
dplyr::filter(cell_source == "Tumor cell") %>%
fn_plot_ICP_exp_in_dataset(ylab="log2(TPM)",facet="~cell_source",title="FANTOM 5, ICP expression",filename="1.1.1.FANTOM5.ICP_exp.Tumor")
ready_for_analysis %>%
dplyr::mutate(Exp = gene_tpm) %>%
dplyr::mutate(cell_source = ifelse(Role %in% c("Immune cells(Activate)","Immune cells(Inhibitory)"),"Immune cell","Tumor cell")) %>%
dplyr::filter(! Role %in% c("Tumor cells(blood)-Immune cells(Activate)","Tumor cells(blood)-Immune cells(Inhibitory)")) %>%
dplyr::filter(cell_source == "Immune cell") %>%
fn_plot_ICP_exp_in_dataset(ylab="log2(TPM)",facet="~cell_source",title="FANTOM 5, ICP expression",filename="1.1.2.FANTOM5.ICP_exp.Immune")
# correlation of ICP mean exp between immune and tumor --------------------
ready_for_analysis %>%
dplyr::select(symbol,mean_cell_line,mean_immune_exp,Exp_site) %>%
unique() %>%
dplyr::mutate(mean_immune_exp=log2(mean_immune_exp+1),mean_cell_line=log2(mean_cell_line+1))-> ready_for_cor
broom::tidy(
cor.test(ready_for_cor$mean_immune_exp,ready_for_cor$mean_cell_line,method = "spearman")
) %>%
dplyr::mutate(y=(max(ready_for_cor$mean_immune_exp)-min(ready_for_cor$mean_immune_exp))*0.75+min(ready_for_cor$mean_immune_exp),x=min(ready_for_cor$mean_cell_line)+(max(ready_for_cor$mean_cell_line)-min(ready_for_cor$mean_cell_line))*0.4) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2))) -> cor_anno
ready_for_cor %>%
tidyr::nest(-Exp_site) %>%
dplyr::mutate(spm = purrr::map(data,.f=function(.x){
if(nrow(.x)>5){
broom::tidy(
cor.test(.x$mean_immune_exp,.x$mean_cell_line,method = "spearman")) %>%
dplyr::mutate(y=(max(.x$mean_immune_exp)-min(.x$mean_immune_exp))*0.85+min(.x$mean_immune_exp),x=cor_anno$x) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2)))
}else{
tibble::tibble()
}
})) %>%
dplyr::select(-data) %>%
tidyr::unnest() -> cor_anno.specific
ready_for_cor %>%
dplyr::filter(Exp_site!="Not_sure") %>%
ggplot(aes(x=mean_cell_line,y=mean_immune_exp)) +
geom_jitter(aes(color=Exp_site),size=0.5) +
geom_smooth(se= F, method = "lm", aes(color=Exp_site,group=Exp_site)) +
geom_smooth(se = F, method = "lm",color= "black") +
geom_text(aes(x=x,y=y,label = label),data=cor_anno,color="black") +
geom_text(aes(x=x,y=y,label = label,color=Exp_site),data = cor_anno.specific) +
scale_color_manual(values=c("#CD661D", "#008B00", "#FF69B4", "#1874CD","#CD3333")) +
my_theme +
xlab("log2(Mean exppression in tumor cells)") +
ylab("log2(Mean exppression in immune cells)")
ggsave(file.path(result_path,"pattern_validation","1.2.FANTOM5-T-I-meanExp.correlation.pdf"),device = "pdf",height = 6,width = 8)
ggsave(file.path(result_path,"pattern_validation","1.2.FANTOM5-T-I-meanExp.correlation.png"),device = "png",height = 6,width = 8)
# correlation of ICP rank between immune and tumor --------------------
ready_for_cor %>%
dplyr::arrange(mean_cell_line) %>%
dplyr::mutate(Tumor_rank=rank(mean_cell_line)) %>%
dplyr::arrange(mean_immune_exp) %>%
dplyr::mutate(Immune_rank=1:nrow(ready_for_cor)) -> ready_for_rank.cor
broom::tidy(
cor.test(ready_for_rank.cor$Immune_rank,ready_for_rank.cor$Tumor_rank,method = "spearman")
) %>%
dplyr::mutate(y=(max(ready_for_rank.cor$Immune_rank)-min(ready_for_rank.cor$Immune_rank))*0.75+min(ready_for_rank.cor$Immune_rank),x=min(ready_for_rank.cor$Tumor_rank)+(max(ready_for_rank.cor$Tumor_rank)-min(ready_for_rank.cor$Tumor_rank))*0.4) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2))) -> cor_anno
ready_for_rank.cor %>%
tidyr::nest(-Exp_site) %>%
dplyr::mutate(spm = purrr::map(data,.f=function(.x){
if(nrow(.x)>5){
broom::tidy(
cor.test(.x$Immune_rank,.x$Tumor_rank,method = "spearman")) %>%
dplyr::mutate(y=(max(.x$Immune_rank)-min(.x$Immune_rank))*0.85+min(.x$Immune_rank),x=cor_anno$x) %>%
dplyr::mutate(label = paste("r = ",signif(estimate,2),", p = ",signif(p.value,2)))
}else{
tibble::tibble()
}
})) %>%
dplyr::select(-data) %>%
tidyr::unnest() -> cor_anno.specific
ready_for_rank.cor %>%
dplyr::filter(Exp_site!="Not_sure") %>%
ggplot(aes(x=Tumor_rank,y=Immune_rank)) +
geom_jitter(aes(color=Exp_site),size=0.5) +
geom_smooth(se= F, method = "lm", aes(color=Exp_site,group=Exp_site)) +
geom_smooth(se = F, method = "lm",color= "black") +
geom_text(aes(x=x,y=y,label = label),data=cor_anno,color="black") +
geom_text(aes(x=x,y=y,label = label,color=Exp_site),data = cor_anno.specific) +
scale_color_manual(values=c("#CD661D", "#008B00", "#FF69B4", "#1874CD","#CD3333")) +
my_theme +
xlab("log2(Mean exppression in tumor cells)") +
ylab("log2(Mean exppression in immune cells)")
ggsave(file.path(result_path,"pattern_validation","1.2.FANTOM5-T-I-meanExp.correlation.pdf"),device = "pdf",height = 6,width = 8)
ggsave(file.path(result_path,"pattern_validation","1.2.FANTOM5-T-I-meanExp.correlation.png"),device = "png",height = 6,width = 8)
# heatmap -----------------------------------------------------------------
library(ComplexHeatmap)
library(circlize)
library(dendextend)
gene_list_exp_site <- readr::read_tsv(file.path("/home/huff/project/immune_checkpoint/result_20171025/ICP_exp_patthern-byMeanUQ","pattern_info","ICP_exp_pattern_in_immune_tumor_cell-by-FC-pvalue.tsv")) %>%
dplyr::select(symbol,Exp_site)
ready_for_analysis %>%
dplyr::select(sample,gene_tpm,symbol,Group,Role) %>%
dplyr::inner_join(gene_list_exp_site,by="symbol") %>%
dplyr::mutate(Role = ifelse(Role %in% c("Immune cells of blood tumor(Activate)","Immune cells of blood tumor(Inhibitory)"),"Blood tumor cells",Role)) %>%
dplyr::mutate(Role = ifelse(Role %in% c("Tumor cells(non-blood)"),"Solid tumor cells",Role)) %>%
dplyr::filter(!Role %in% c("Blood tumor cells")) -> ready_for_analysis_no_bloodtumor
ready_for_analysis_no_bloodtumor %>%
dplyr::select(sample,symbol,gene_tpm) %>%
dplyr::arrange(sample) %>%
tidyr::spread(key="symbol",value="gene_tpm") -> for_heatmap
for_heatmap.m <- as.matrix(for_heatmap[,-1])
rownames(for_heatmap.m) <- for_heatmap$sample
ready_for_analysis_no_bloodtumor %>%
dplyr::select(symbol,Exp_site) %>%
unique()%>%
dplyr::arrange(symbol) %>%
as.data.frame() -> symbol_anno
rownames(symbol_anno) <- symbol_anno[,1]
symbol_anno <- symbol_anno[,-1]
top_anno <- HeatmapAnnotation(`ICG type`=symbol_anno,
col = list(`ICG type`=c("Immune cell dominate" = "#7FFF00",
"Immune and tumor cell almost" = "#CD950C",
"Tumor cell dominate" = "#FF3030")),
height = unit(0.2, "cm"),
show_annotation_name =T)
draw(top_anno,1:69)
ready_for_analysis_no_bloodtumor %>%
dplyr::select(sample,Role) %>%
unique()%>%
dplyr::arrange(sample) %>%
as.data.frame() -> sample_anno
rownames(sample_anno) <- sample_anno[,1]
sample_anno <- sample_anno[,-1]
side_anno <- rowAnnotation(`Sample group`=sample_anno,
col = list(`Sample group`=c("Immune cells(Activate)" = "#1C86EE",
"Immune cells(Inhibitory)" = "#FFB90F",
"Solid tumor cells" = "#919191")),
width = unit(0.2, "cm"),
show_annotation_name =T)
draw(side_anno,1:197)
for_heatmap.m.scaled <- apply(t(for_heatmap.m),1,scale)
rownames(for_heatmap.m.scaled) <- rownames(for_heatmap.m)
col_dend = hclust(dist(t(for_heatmap.m.scaled))) # column clustering
row_dend = hclust(dist((for_heatmap.m.scaled))) # row clustering
he = Heatmap(for_heatmap.m.scaled,
col = colorRamp2(c(-2, 0, 17), c("blue", "white", "red")),
row_names_gp = gpar(fontsize = 8),
show_row_names = FALSE,
show_column_names = TRUE,
show_row_dend = T, # whether show row clusters.
show_column_dend = T,
top_annotation = top_anno,
left_annotation =side_anno,
row_names_side = c("left"),
# cluster_columns = color_branches(col_dend, k = 3), # add color on the column tree branches
# cluster_rows = color_branches(row_dend, k = 3),
heatmap_legend_param = list(title = c("Exp.")))
he
pdf(file.path("/home/huff/project/immune_checkpoint/result_20171025/ICP_exp_patthern-byratio.new","pattern_validation","68ICP_exp_cluster_heatmap.pdf"),width = 12,height = 6)
he
dev.off()
# save image --------------------------------------------------------------
save.image(file.path(result_path,"pattern_validation","FANTOM5.validation.Rdata"))
|
# Set up problem: maximize
# 100x1 +60x2 subject to
# 2x1 + x2 <= 59
# x1 + 2x2 <= 40
#
f.obj <- c(100, 60)
f.con <- matrix (c(2,1,1,2), nrow=2, byrow=TRUE)
f.dir <- c("<=", "<=")
f.rhs <- c(59, 40)
#
# Now run.
#
lp ("max", f.obj, f.con, f.dir, f.rhs) | /Unit07/Tables and Chairs w lp.R | no_license | cmadding/MSDS7374 | R | false | false | 258 | r | # Set up problem: maximize
# 100x1 +60x2 subject to
# 2x1 + x2 <= 59
# x1 + 2x2 <= 40
#
f.obj <- c(100, 60)
f.con <- matrix (c(2,1,1,2), nrow=2, byrow=TRUE)
f.dir <- c("<=", "<=")
f.rhs <- c(59, 40)
#
# Now run.
#
lp ("max", f.obj, f.con, f.dir, f.rhs) |
rm(list=ls())
gc()
library(data.table)
library(dplyr)
library(tidyr)
library(xgboost)
library(stringr)
library(ModelMetrics)
library(ggplot2)
#setwd("D:/Eigene Dateien/sonstiges/Kaggle/instacart/scripts")
f1 <- function (y, pred)
{
tp <- sum(pred==1 & y == 1)
fp <- sum(pred==1 & y == 0)
fn <- sum(pred==0 & y == 1)
precision <- ifelse ((tp==0 & fp==0), 0, tp/(tp+fp)) # no reorders predicted
recall <- ifelse ((tp==0 & fn==0), 0, tp/(tp+fn)) # no products reordered
score <- ifelse((all(pred==0) & all(y==0)),1,ifelse((precision==0 & recall==0),0,2*precision*recall/(precision+recall)))
score
}
# Load Data ---------------------------------------------------------------
path <- "../input"
aisles <- fread(file.path(path, "aisles.csv"))
departments <- fread(file.path(path, "departments.csv"))
opp <- fread(file.path(path, "order_products__prior.csv"))
opt <- fread(file.path(path, "order_products__train.csv"))
ord <- fread(file.path(path, "orders.csv"))
products <- fread(file.path(path, "products.csv"))
# add user_id to train orders
opt$user_id <- ord$user_id[match(opt$order_id, ord$order_id)]
train_info <- opt[,.(sum_products = .N, sum_reordered=sum(reordered)),user_id]
# join products with order info for all prior orders
setkey(opp,order_id)
setkey(ord,order_id)
op <- merge(ord,opp,all=FALSE) # inner join filter
rm(opp)
gc()
# Get the only reorderes ------------------------------
reorder_users <- op[order_number>1 & .N>2,.(mean_reordered = mean(reordered), n=.N), user_id][mean_reordered==1,user_id]
gc()
# Take subset of Data ----------------------------------------------------
test_users <- unique(ord[eval_set=="test", user_id])
train_users <- unique(ord[eval_set=="train", user_id]) #& !user_id %in% reorder_users
n_users <- 15000
all_train_users <- train_users[1:n_users]
all_users <- c(all_train_users, test_users)
setkeyv(op,c("user_id","product_id", "order_number"))
op[,last_prior_order := max(order_number),.(user_id)]
ord[,last_order := max(order_number),.(user_id)] # auch train/test orders mit drin
#op<-op[last_prior_order-order_number <= 2] #last order = last prior order
#ord<-ord[last_order-order_number <= 3]
setkey(ord, user_id, order_number)
ord[order(order_number), ':=' (order_days_sum = cumsum(ifelse(is.na(days_since_prior_order),0,days_since_prior_order))),user_id][,':=' (order_days_max=max(order_days_sum)),user_id][, ':=' (order_day_year = 365-(order_days_max-order_days_sum)),user_id]
op <- merge(op, ord[,.(user_id, order_number, order_days_sum, order_days_max, order_day_year)], all.x=T)
op[order(user_id, product_id, order_number), ':=' (
product_time = 1:.N,
first_order = min(order_number),
second_order = order_number[2],
third_order = order_number[3],
up_sum_order = .N), .(user_id,product_id)]
op[(reordered==1 | product_time==1),':=' (order_days_lag=c(NA,order_days_sum[-.N])), .(user_id, product_id)]
# Products ----------------------------------------------------------------
prd <- op[, .(
prod_orders = .N,
prod_maxorders = max(product_time),
prod_reorders = sum(reordered),
prod_first_orders = sum(product_time==1),
prod_second_orders = sum(product_time==2),
prod_add_to_cart = mean(add_to_cart_order),
prod_inpercent_orders=mean(up_sum_order/last_prior_order),
prod_inpercent_afterfirst = mean(up_sum_order/(last_prior_order-first_order+1)),
prod_popularity = mean(uniqueN(user_id)),
prod_season = mean(order_day_year),
prod_orders_till_first_reorder = mean(second_order-first_order,na.rm=T),
prod_orders_till_second_reorder = mean(third_order-second_order,na.rm=T)
), product_id][,':=' (
prod_reorder_probability = prod_second_orders / prod_first_orders,
prod_reorder_times = 1 + prod_reorders / prod_first_orders,
prod_reorder_ratio = prod_reorders / prod_orders,
prod_reorders = NULL,
prod_first_orders = NULL,
prod_second_orders = NULL
)]
# do the subsetting after product features were created
op<-op[user_id %in% all_users]
opt<-opt[user_id %in% all_users]
ord<-ord[user_id %in% all_users]
#products[, ':=' (prod_organic = ifelse(str_detect(str_to_lower(product_name),'organic'),1,0))]
#products[, ':=' (product_name = NULL)]
setkey(products,product_id)
setkey(prd, product_id)
setkey(op, product_id)
prd <- merge(prd, products[,.(product_id, aisle_id, department_id)], all.x=TRUE)
op <- merge(op, products[,.(product_id, aisle_id, department_id)], all.x=TRUE)
rm(products)
gc()
# Users -------------------------------------------------------------------
users <- ord[eval_set=="prior", .(user_orders=.N,
user_period=sum(days_since_prior_order, na.rm = T),
user_mean_days_since_prior = mean(days_since_prior_order, na.rm = T),
user_std_days_since_prior_order = sd(days_since_prior_order, na.rm=T)
), user_id]
us <- op[,.(
user_total_products = .N,
user_reorder_ratio = sum(reordered == 1) / sum(order_number > 1),
user_distinct_products = uniqueN(product_id),
user_distinct_aisles = uniqueN(aisle_id),
user_distinct_depts = uniqueN(department_id)
), user_id][,':=' (user_pct_distinct_products = user_distinct_products / user_total_products,
user_pct_distinct_aisles = user_distinct_aisles / user_total_products,
user_pct_distinct_depts = user_distinct_depts / user_total_products)]
users <- merge(users, us, all=FALSE)
us <- op[,.(user_order_products = .N),.(user_id,order_id)][,.(
user_order_products_mean=mean(user_order_products),
user_order_products_sd=sd(user_order_products)
), user_id]
users <- merge(users, us, all=FALSE)
us <- op[(last_prior_order-order_number)<=2, .(
user_order_products_3 = .N,
user_reorder_ratio_3=mean(reordered)
), .(user_id)][,.(
user_order_products_mean_last3 = mean(user_order_products_3),
user_reorder_ratio_last3=mean(user_reorder_ratio_3)
), user_id]
users <- merge(users, us, all=FALSE)
users[, ':=' (
user_recent_orders_factor = user_order_products_mean_last3/user_order_products_mean,
user_recent_reorder_factor = user_reorder_ratio_last3 / user_reorder_ratio,
user_activity = ifelse(user_period==0,0,user_total_products/user_period),
user_order_products_mean_last3 = NULL,
user_reorder_ratio_last3 = NULL
)]
us <- ord[eval_set != "prior", .(
user_id,
order_id,
eval_set,
train_time_since_last_order = days_since_prior_order,
train_dow = order_dow,
train_how = order_hour_of_day,
train_ordernum = order_number,
train_season = order_day_year)]
setkey(users, user_id)
setkey(us, user_id)
users <- merge(users, us, all=FALSE)
rm(us)
gc()
# Departments -------------------------------------------------------------
dep <- op[,.(dept_total_products = .N,
dept_total_orders = uniqueN(order_id),
dept_total_users = uniqueN(user_id),
dept_reorder_times = sum(reordered),
dept_reorder_ratio = mean(reordered)), department_id][,':='
(dept_products_per_order = dept_total_products / dept_total_orders)]
# Database ----------------------------------------------------------------
# to do days until the product is first reordered
data <- op[, .(
up_orders = .N,
up_first_order = min(order_number),
up_last_order = max(order_number),
up_last_order_dow = order_dow[order_number==max(order_number)],
up_last_order_hod = order_hour_of_day[order_number==max(order_number)],
up_avg_cart_position = mean(add_to_cart_order),
up_avg_days_since_reorder = mean(order_days_sum-order_days_lag,na.rm=T)),
.(user_id, product_id)]
setkey(users,user_id)
setkey(data,user_id)
data <- merge(data,users,all=FALSE)
data<-merge(data, ord[,.(user_id, order_number, order_days_sum)], all.x=TRUE, by.x=c("user_id", "up_last_order"), by.y=c("user_id", "order_number"))
data[,':=' (
up_days_since_last_reorder = user_period-order_days_sum+train_time_since_last_order,
order_days_sum=NULL
)]
setkey(prd,product_id)
setkey(data,product_id)
data <- merge(data,prd,all=FALSE)
rm(op, ord)
data[,':=' (
up_diff_train_typical = abs(train_time_since_last_order-up_avg_days_since_reorder),
up_perc_diff_train_typical = train_time_since_last_order/up_avg_days_since_reorder
)]
#setkey(dep,department_id)
#setkey(data,department_id)
#data <- merge(data,dep,all=FALSE)
rm(prd, users, dep)
gc()
data[,':=' (up_order_rate = up_orders / user_orders,
up_orders_since_last_order = user_orders - up_last_order,
up_inpercent_afterfirst = up_orders / (user_orders - up_first_order + 1),
up_popularity = up_orders / user_total_products)]
# merge in train order
setkey(opt, user_id, product_id)
setkey(data, user_id, product_id)
data <- merge(data, opt[,.(user_id, product_id, reordered)], all.x=TRUE)
rm(opt)
gc()
# do it the data.table way
# Train / Test datasets ---------------------------------------------------
train <- data[eval_set == "train"]
train_userid <- train[,user_id]
train[,':=' (eval_set=NULL, product_id=NULL)]
train[is.na(reordered), ':=' (reordered=0)]
test <-data[eval_set == "test"]
test[,':=' (eval_set=NULL, reordered=NULL)]
rm(data)
gc()
# Model fitting ---------------------------------------------------------
colnames(train)
# Setting params for fitting
params <- list(
"objective" = "reg:logistic",
"eval_metric" = "auc",
"eta" = 0.1,
"max_depth" = 6,
"min_child_weight" = 10,
"gamma" = 0.7,
"subsample" = 0.95,
"colsample_bytree" = 0.95
)
# Get the folds ---------------------------------
# 131,209 users in total
users_per_fold <- 5000
n_fold <- 3
# create the folds
val_users_random <- sample(unique(train_userid), size = n_fold*users_per_fold, replace = FALSE)
if (n_fold ==1) {
val_user_groups <- 1
} else {
val_user_groups <- cut(1:length(val_users_random),n_fold,labels=FALSE)
}
folds <- list()
for (i in 1:n_fold) {
folds[[i]] <- which(train_userid %in% val_users_random[val_user_groups==i])
}
# Do the CV ------------------------------------
threshold <- 0.20
n_rounds <- 80
calc_f1_every_n <- 10
res<-list()
res$f1 <- matrix(0,n_rounds/calc_f1_every_n,n_fold)
res$mean_reordered <- matrix(0,n_rounds/calc_f1_every_n,n_fold)
for (i in 1:length(folds)) {
cat('Training on fold', i,'...\n')
cv_train <- train[-folds[[i]],]
cv_val <- train[folds[[i]],]
dtrain <- xgb.DMatrix(data.matrix(select(cv_train,-user_id,-order_id,-reordered)),label=cv_train$reordered)
dval <- xgb.DMatrix(data.matrix(select(cv_val,-user_id,-order_id,-reordered)),label=cv_val$reordered)
watchlist <- list(train=dtrain, val=dval)
train_users <- cv_train$user_id
for (j in 1:(n_rounds/calc_f1_every_n)){
if (j==1){
bst <- xgb.train(params,dtrain,calc_f1_every_n, watchlist=watchlist) # first boosting iteration
} else {
bst <- xgb.train(params,dtrain,calc_f1_every_n, watchlist=watchlist, xgb_model=bst) # incremental boost
}
pred<-predict(bst,dval)
y <- getinfo(dval,'label')
valid_users <- cv_val$user_id
valid_orders <- cv_val$order_id
dt <- data.table(user_id=valid_users, order_id=valid_orders, y=y, pred=pred, ypred=(pred>threshold)*1)
f1_score <- dt[,.(f1score = f1(y,ypred)), user_id][,.(f1_mean=mean(f1score))]
cat('val-f1: ', f1_score$f1_mean, 'mean sum_pred: ',dt[,.(sp = sum(ypred)),user_id][,.(mean_sp = mean(sp))]$mean_sp, '\n')
res$f1[j,i] <- f1_score$f1_mean
res$mean_reordered[j,i] <- mean(cv_val$reordered)
}
}
results <- data.frame(m=rowMeans(res$f1),sd=apply(res$f1,1,sd),res$f1, res$mean_reordered)
results
best_iter <- which.max(results$m)*calc_f1_every_n
n_rounds <- best_iter
# Fit the Model to all training data -------------------------------------
dtrain <- xgb.DMatrix(as.matrix(train %>% select(-user_id,-reordered)), label = train$reordered)
watchlist <- list(train = dtrain)
model <- xgb.train(data = dtrain, params = params, nrounds = n_rounds, watchlist=watchlist)
importance <- xgb.importance(colnames(dtrain), model = model)
#xgb.ggplot.importance(importance)+theme(axis.text.y = element_text(hjust = 0))
ggplot(importance,aes(y=Gain,x=reorder(Feature,Gain)))+geom_bar(stat="identity")+coord_flip()
# Look at predictions ---------------------------------------------------------------
train<-as.data.table(train)
setkey(train, user_id)
train <- merge(train,train_info, all.x=TRUE)
train$prediction <- predict(model,dtrain)
train <- train[order(user_id,-prediction)]
train[,':=' (top = 1:.N), user_id]
ttmp <- train[,.(sp=sum(prediction),sr=sum(reordered)),user_id]
ggplot(ttmp,aes(sp,sr))+geom_point()+geom_abline(slope=1)
cor(ttmp$sp,ttmp$sr)
# choose a threshold -----------------------------------------------------------
train[, ':=' (pred=ifelse(top<=user_order_products_2*user_reorder_rate_2,1,0))]
train[, ':=' (pred=ifelse(prediction>0.18,1,0))] # fixed threshold
train[, ':=' (pred=ifelse(top<=sum_reordered,1,0))]
summary(glm(reordered ~ prediction, data=train))
tmp <- train %>% group_by(user_id) %>% summarise(f1=f1(reordered, pred), sum_predicted=sum(pred)) %>% left_join(train_info)
tmp <- tmp %>% mutate(diff_basket_size = sum_reordered-sum_predicted)
#ggplot(tmp,aes(x=sum_predicted,y=f1))+geom_point()+geom_smooth()
#ggplot(tmp,aes(x=sum_reordered/sum_products,y=f1))+geom_point()+geom_smooth()
tmp %>% ungroup() %>% summarize(meanf1 = mean(f1)) %>% .[[1]]
cv_val$prediction <- predict(model, dval)
# Threshold ---------------------------------------------------------------
cv_val <- cv_val[order(user_id,-prediction)]
cv_val[,':=' (top = 1:.N), user_id]
ttmp <- cv_val[,.(sp=sum(prediction),sr=sum(reordered)),user_id]
ggplot(ttmp,aes(sp,sr))+geom_point()+geom_abline(slope=1)+geom_smooth(method="lm")
cor(ttmp$sp,ttmp$sr)
summary(glm(reordered ~ prediction, data=val))
tmp <- val %>% group_by(user_id) %>% summarise(f1=f1(reordered, pred), sum_predicted=sum(pred), avg_size=mean(user_average_basket)) %>% left_join(train_info)
tmp <- tmp %>% mutate(diff_basket_size = sum_reordered-sum_predicted)
#ggplot(tmp,aes(x=sum_predicted,y=f1))+geom_point()+geom_smooth()
#ggplot(tmp,aes(x=sum_reordered/sum_products,y=f1))+geom_point()+geom_smooth()
tmp %>% ungroup() %>% summarize(meanf1 = mean(f1)) %>% .[[1]]
rm(importance)
gc()
# Apply model to test data ------------------------------------------------
dtest <- xgb.DMatrix(as.matrix(test[,-c("user_id","product_id"),with=FALSE]))
test$pred <- predict(model, dtest)
# Threshold ---------------------------------------------------------------
test <- test[order(user_id,-pred)]
test[,':=' (top = 1:.N), user_id]
test[,':=' (pred_basket = sum(pred)), user_id]
test[pred_basket>30,':=' (adapted_basket = pred_basket+5)]
test[pred_basket<=30,':=' (adapted_basket = pred_basket)]
test[, ':=' (reordered=ifelse(top<=adapted_basket,1,0))]
#test[, ':=' (reordered=ifelse(top<=user_average_basket*user_reorder_ratio,1,0))]
#close_orders <- test %>% group_by(order_id) %>% summarize(m=mean(reordered),mx=max(reordered),s=sum(reordered>threshold)) %>% filter(between(m,0.9*threshold,1.1*threshold) & s <= 5 & mx <= 0.35) %>% select(order_id) %>% .[[1]]
test[,reordered:=(pred>0.2)*1]
# all reorderes to 1 -----------------------------------------------------
#test[user_id %in% reorder_users, ':=' (reordered=1)]
submission <- test[reordered==1,.(products = paste(product_id, collapse = " ")), order_id]
# add None to close orders -----------------------------------------------
#new_submission <- submission %>% mutate(products = ifelse(order_id %in% close_orders, str_c(products,'None', collapse = " "),products))
missing <- data.table(
order_id = unique(test$order_id[!test$order_id %in% submission$order_id]),
products = "None"
)
submission <- rbindlist(list(submission, missing))
# add none to close orders
tmpp <- dt[order(-pred)][,omp := 1-pred][,p_none := prod(omp), order_id]
close_orders <- tmpp[p_none>0.2]$order_id
submission[]
fwrite(submission[order(order_id)], file = "submit.csv")
| /scripts/smallRAM_cv_all_features.R | no_license | pspachtholz/instacart | R | false | false | 16,524 | r | rm(list=ls())
gc()
library(data.table)
library(dplyr)
library(tidyr)
library(xgboost)
library(stringr)
library(ModelMetrics)
library(ggplot2)
#setwd("D:/Eigene Dateien/sonstiges/Kaggle/instacart/scripts")
f1 <- function (y, pred)
{
tp <- sum(pred==1 & y == 1)
fp <- sum(pred==1 & y == 0)
fn <- sum(pred==0 & y == 1)
precision <- ifelse ((tp==0 & fp==0), 0, tp/(tp+fp)) # no reorders predicted
recall <- ifelse ((tp==0 & fn==0), 0, tp/(tp+fn)) # no products reordered
score <- ifelse((all(pred==0) & all(y==0)),1,ifelse((precision==0 & recall==0),0,2*precision*recall/(precision+recall)))
score
}
# Load Data ---------------------------------------------------------------
path <- "../input"
aisles <- fread(file.path(path, "aisles.csv"))
departments <- fread(file.path(path, "departments.csv"))
opp <- fread(file.path(path, "order_products__prior.csv"))
opt <- fread(file.path(path, "order_products__train.csv"))
ord <- fread(file.path(path, "orders.csv"))
products <- fread(file.path(path, "products.csv"))
# add user_id to train orders
opt$user_id <- ord$user_id[match(opt$order_id, ord$order_id)]
train_info <- opt[,.(sum_products = .N, sum_reordered=sum(reordered)),user_id]
# join products with order info for all prior orders
setkey(opp,order_id)
setkey(ord,order_id)
op <- merge(ord,opp,all=FALSE) # inner join filter
rm(opp)
gc()
# Get the only reorderes ------------------------------
reorder_users <- op[order_number>1 & .N>2,.(mean_reordered = mean(reordered), n=.N), user_id][mean_reordered==1,user_id]
gc()
# Take subset of Data ----------------------------------------------------
test_users <- unique(ord[eval_set=="test", user_id])
train_users <- unique(ord[eval_set=="train", user_id]) #& !user_id %in% reorder_users
n_users <- 15000
all_train_users <- train_users[1:n_users]
all_users <- c(all_train_users, test_users)
setkeyv(op,c("user_id","product_id", "order_number"))
op[,last_prior_order := max(order_number),.(user_id)]
ord[,last_order := max(order_number),.(user_id)] # auch train/test orders mit drin
#op<-op[last_prior_order-order_number <= 2] #last order = last prior order
#ord<-ord[last_order-order_number <= 3]
setkey(ord, user_id, order_number)
ord[order(order_number), ':=' (order_days_sum = cumsum(ifelse(is.na(days_since_prior_order),0,days_since_prior_order))),user_id][,':=' (order_days_max=max(order_days_sum)),user_id][, ':=' (order_day_year = 365-(order_days_max-order_days_sum)),user_id]
op <- merge(op, ord[,.(user_id, order_number, order_days_sum, order_days_max, order_day_year)], all.x=T)
op[order(user_id, product_id, order_number), ':=' (
product_time = 1:.N,
first_order = min(order_number),
second_order = order_number[2],
third_order = order_number[3],
up_sum_order = .N), .(user_id,product_id)]
op[(reordered==1 | product_time==1),':=' (order_days_lag=c(NA,order_days_sum[-.N])), .(user_id, product_id)]
# Products ----------------------------------------------------------------
prd <- op[, .(
prod_orders = .N,
prod_maxorders = max(product_time),
prod_reorders = sum(reordered),
prod_first_orders = sum(product_time==1),
prod_second_orders = sum(product_time==2),
prod_add_to_cart = mean(add_to_cart_order),
prod_inpercent_orders=mean(up_sum_order/last_prior_order),
prod_inpercent_afterfirst = mean(up_sum_order/(last_prior_order-first_order+1)),
prod_popularity = mean(uniqueN(user_id)),
prod_season = mean(order_day_year),
prod_orders_till_first_reorder = mean(second_order-first_order,na.rm=T),
prod_orders_till_second_reorder = mean(third_order-second_order,na.rm=T)
), product_id][,':=' (
prod_reorder_probability = prod_second_orders / prod_first_orders,
prod_reorder_times = 1 + prod_reorders / prod_first_orders,
prod_reorder_ratio = prod_reorders / prod_orders,
prod_reorders = NULL,
prod_first_orders = NULL,
prod_second_orders = NULL
)]
# do the subsetting after product features were created
op<-op[user_id %in% all_users]
opt<-opt[user_id %in% all_users]
ord<-ord[user_id %in% all_users]
#products[, ':=' (prod_organic = ifelse(str_detect(str_to_lower(product_name),'organic'),1,0))]
#products[, ':=' (product_name = NULL)]
setkey(products,product_id)
setkey(prd, product_id)
setkey(op, product_id)
prd <- merge(prd, products[,.(product_id, aisle_id, department_id)], all.x=TRUE)
op <- merge(op, products[,.(product_id, aisle_id, department_id)], all.x=TRUE)
rm(products)
gc()
# Users -------------------------------------------------------------------
users <- ord[eval_set=="prior", .(user_orders=.N,
user_period=sum(days_since_prior_order, na.rm = T),
user_mean_days_since_prior = mean(days_since_prior_order, na.rm = T),
user_std_days_since_prior_order = sd(days_since_prior_order, na.rm=T)
), user_id]
us <- op[,.(
user_total_products = .N,
user_reorder_ratio = sum(reordered == 1) / sum(order_number > 1),
user_distinct_products = uniqueN(product_id),
user_distinct_aisles = uniqueN(aisle_id),
user_distinct_depts = uniqueN(department_id)
), user_id][,':=' (user_pct_distinct_products = user_distinct_products / user_total_products,
user_pct_distinct_aisles = user_distinct_aisles / user_total_products,
user_pct_distinct_depts = user_distinct_depts / user_total_products)]
users <- merge(users, us, all=FALSE)
us <- op[,.(user_order_products = .N),.(user_id,order_id)][,.(
user_order_products_mean=mean(user_order_products),
user_order_products_sd=sd(user_order_products)
), user_id]
users <- merge(users, us, all=FALSE)
us <- op[(last_prior_order-order_number)<=2, .(
user_order_products_3 = .N,
user_reorder_ratio_3=mean(reordered)
), .(user_id)][,.(
user_order_products_mean_last3 = mean(user_order_products_3),
user_reorder_ratio_last3=mean(user_reorder_ratio_3)
), user_id]
users <- merge(users, us, all=FALSE)
users[, ':=' (
user_recent_orders_factor = user_order_products_mean_last3/user_order_products_mean,
user_recent_reorder_factor = user_reorder_ratio_last3 / user_reorder_ratio,
user_activity = ifelse(user_period==0,0,user_total_products/user_period),
user_order_products_mean_last3 = NULL,
user_reorder_ratio_last3 = NULL
)]
us <- ord[eval_set != "prior", .(
user_id,
order_id,
eval_set,
train_time_since_last_order = days_since_prior_order,
train_dow = order_dow,
train_how = order_hour_of_day,
train_ordernum = order_number,
train_season = order_day_year)]
setkey(users, user_id)
setkey(us, user_id)
users <- merge(users, us, all=FALSE)
rm(us)
gc()
# Departments -------------------------------------------------------------
dep <- op[,.(dept_total_products = .N,
dept_total_orders = uniqueN(order_id),
dept_total_users = uniqueN(user_id),
dept_reorder_times = sum(reordered),
dept_reorder_ratio = mean(reordered)), department_id][,':='
(dept_products_per_order = dept_total_products / dept_total_orders)]
# Database ----------------------------------------------------------------
# to do days until the product is first reordered
data <- op[, .(
up_orders = .N,
up_first_order = min(order_number),
up_last_order = max(order_number),
up_last_order_dow = order_dow[order_number==max(order_number)],
up_last_order_hod = order_hour_of_day[order_number==max(order_number)],
up_avg_cart_position = mean(add_to_cart_order),
up_avg_days_since_reorder = mean(order_days_sum-order_days_lag,na.rm=T)),
.(user_id, product_id)]
setkey(users,user_id)
setkey(data,user_id)
data <- merge(data,users,all=FALSE)
data<-merge(data, ord[,.(user_id, order_number, order_days_sum)], all.x=TRUE, by.x=c("user_id", "up_last_order"), by.y=c("user_id", "order_number"))
data[,':=' (
up_days_since_last_reorder = user_period-order_days_sum+train_time_since_last_order,
order_days_sum=NULL
)]
setkey(prd,product_id)
setkey(data,product_id)
data <- merge(data,prd,all=FALSE)
rm(op, ord)
data[,':=' (
up_diff_train_typical = abs(train_time_since_last_order-up_avg_days_since_reorder),
up_perc_diff_train_typical = train_time_since_last_order/up_avg_days_since_reorder
)]
#setkey(dep,department_id)
#setkey(data,department_id)
#data <- merge(data,dep,all=FALSE)
rm(prd, users, dep)
gc()
data[,':=' (up_order_rate = up_orders / user_orders,
up_orders_since_last_order = user_orders - up_last_order,
up_inpercent_afterfirst = up_orders / (user_orders - up_first_order + 1),
up_popularity = up_orders / user_total_products)]
# merge in train order
setkey(opt, user_id, product_id)
setkey(data, user_id, product_id)
data <- merge(data, opt[,.(user_id, product_id, reordered)], all.x=TRUE)
rm(opt)
gc()
# do it the data.table way
# Train / Test datasets ---------------------------------------------------
train <- data[eval_set == "train"]
train_userid <- train[,user_id]
train[,':=' (eval_set=NULL, product_id=NULL)]
train[is.na(reordered), ':=' (reordered=0)]
test <-data[eval_set == "test"]
test[,':=' (eval_set=NULL, reordered=NULL)]
rm(data)
gc()
# Model fitting ---------------------------------------------------------
colnames(train)
# Setting params for fitting
params <- list(
"objective" = "reg:logistic",
"eval_metric" = "auc",
"eta" = 0.1,
"max_depth" = 6,
"min_child_weight" = 10,
"gamma" = 0.7,
"subsample" = 0.95,
"colsample_bytree" = 0.95
)
# Get the folds ---------------------------------
# 131,209 users in total
users_per_fold <- 5000
n_fold <- 3
# create the folds
val_users_random <- sample(unique(train_userid), size = n_fold*users_per_fold, replace = FALSE)
if (n_fold ==1) {
val_user_groups <- 1
} else {
val_user_groups <- cut(1:length(val_users_random),n_fold,labels=FALSE)
}
folds <- list()
for (i in 1:n_fold) {
folds[[i]] <- which(train_userid %in% val_users_random[val_user_groups==i])
}
# Do the CV ------------------------------------
threshold <- 0.20
n_rounds <- 80
calc_f1_every_n <- 10
res<-list()
res$f1 <- matrix(0,n_rounds/calc_f1_every_n,n_fold)
res$mean_reordered <- matrix(0,n_rounds/calc_f1_every_n,n_fold)
for (i in 1:length(folds)) {
cat('Training on fold', i,'...\n')
cv_train <- train[-folds[[i]],]
cv_val <- train[folds[[i]],]
dtrain <- xgb.DMatrix(data.matrix(select(cv_train,-user_id,-order_id,-reordered)),label=cv_train$reordered)
dval <- xgb.DMatrix(data.matrix(select(cv_val,-user_id,-order_id,-reordered)),label=cv_val$reordered)
watchlist <- list(train=dtrain, val=dval)
train_users <- cv_train$user_id
for (j in 1:(n_rounds/calc_f1_every_n)){
if (j==1){
bst <- xgb.train(params,dtrain,calc_f1_every_n, watchlist=watchlist) # first boosting iteration
} else {
bst <- xgb.train(params,dtrain,calc_f1_every_n, watchlist=watchlist, xgb_model=bst) # incremental boost
}
pred<-predict(bst,dval)
y <- getinfo(dval,'label')
valid_users <- cv_val$user_id
valid_orders <- cv_val$order_id
dt <- data.table(user_id=valid_users, order_id=valid_orders, y=y, pred=pred, ypred=(pred>threshold)*1)
f1_score <- dt[,.(f1score = f1(y,ypred)), user_id][,.(f1_mean=mean(f1score))]
cat('val-f1: ', f1_score$f1_mean, 'mean sum_pred: ',dt[,.(sp = sum(ypred)),user_id][,.(mean_sp = mean(sp))]$mean_sp, '\n')
res$f1[j,i] <- f1_score$f1_mean
res$mean_reordered[j,i] <- mean(cv_val$reordered)
}
}
results <- data.frame(m=rowMeans(res$f1),sd=apply(res$f1,1,sd),res$f1, res$mean_reordered)
results
best_iter <- which.max(results$m)*calc_f1_every_n
n_rounds <- best_iter
# Fit the Model to all training data -------------------------------------
dtrain <- xgb.DMatrix(as.matrix(train %>% select(-user_id,-reordered)), label = train$reordered)
watchlist <- list(train = dtrain)
model <- xgb.train(data = dtrain, params = params, nrounds = n_rounds, watchlist=watchlist)
importance <- xgb.importance(colnames(dtrain), model = model)
#xgb.ggplot.importance(importance)+theme(axis.text.y = element_text(hjust = 0))
ggplot(importance,aes(y=Gain,x=reorder(Feature,Gain)))+geom_bar(stat="identity")+coord_flip()
# Look at predictions ---------------------------------------------------------------
train<-as.data.table(train)
setkey(train, user_id)
train <- merge(train,train_info, all.x=TRUE)
train$prediction <- predict(model,dtrain)
train <- train[order(user_id,-prediction)]
train[,':=' (top = 1:.N), user_id]
ttmp <- train[,.(sp=sum(prediction),sr=sum(reordered)),user_id]
ggplot(ttmp,aes(sp,sr))+geom_point()+geom_abline(slope=1)
cor(ttmp$sp,ttmp$sr)
# choose a threshold -----------------------------------------------------------
train[, ':=' (pred=ifelse(top<=user_order_products_2*user_reorder_rate_2,1,0))]
train[, ':=' (pred=ifelse(prediction>0.18,1,0))] # fixed threshold
train[, ':=' (pred=ifelse(top<=sum_reordered,1,0))]
summary(glm(reordered ~ prediction, data=train))
tmp <- train %>% group_by(user_id) %>% summarise(f1=f1(reordered, pred), sum_predicted=sum(pred)) %>% left_join(train_info)
tmp <- tmp %>% mutate(diff_basket_size = sum_reordered-sum_predicted)
#ggplot(tmp,aes(x=sum_predicted,y=f1))+geom_point()+geom_smooth()
#ggplot(tmp,aes(x=sum_reordered/sum_products,y=f1))+geom_point()+geom_smooth()
tmp %>% ungroup() %>% summarize(meanf1 = mean(f1)) %>% .[[1]]
cv_val$prediction <- predict(model, dval)
# Threshold ---------------------------------------------------------------
cv_val <- cv_val[order(user_id,-prediction)]
cv_val[,':=' (top = 1:.N), user_id]
ttmp <- cv_val[,.(sp=sum(prediction),sr=sum(reordered)),user_id]
ggplot(ttmp,aes(sp,sr))+geom_point()+geom_abline(slope=1)+geom_smooth(method="lm")
cor(ttmp$sp,ttmp$sr)
summary(glm(reordered ~ prediction, data=val))
tmp <- val %>% group_by(user_id) %>% summarise(f1=f1(reordered, pred), sum_predicted=sum(pred), avg_size=mean(user_average_basket)) %>% left_join(train_info)
tmp <- tmp %>% mutate(diff_basket_size = sum_reordered-sum_predicted)
#ggplot(tmp,aes(x=sum_predicted,y=f1))+geom_point()+geom_smooth()
#ggplot(tmp,aes(x=sum_reordered/sum_products,y=f1))+geom_point()+geom_smooth()
tmp %>% ungroup() %>% summarize(meanf1 = mean(f1)) %>% .[[1]]
rm(importance)
gc()
# Apply model to test data ------------------------------------------------
dtest <- xgb.DMatrix(as.matrix(test[,-c("user_id","product_id"),with=FALSE]))
test$pred <- predict(model, dtest)
# Threshold ---------------------------------------------------------------
test <- test[order(user_id,-pred)]
test[,':=' (top = 1:.N), user_id]
test[,':=' (pred_basket = sum(pred)), user_id]
test[pred_basket>30,':=' (adapted_basket = pred_basket+5)]
test[pred_basket<=30,':=' (adapted_basket = pred_basket)]
test[, ':=' (reordered=ifelse(top<=adapted_basket,1,0))]
#test[, ':=' (reordered=ifelse(top<=user_average_basket*user_reorder_ratio,1,0))]
#close_orders <- test %>% group_by(order_id) %>% summarize(m=mean(reordered),mx=max(reordered),s=sum(reordered>threshold)) %>% filter(between(m,0.9*threshold,1.1*threshold) & s <= 5 & mx <= 0.35) %>% select(order_id) %>% .[[1]]
test[,reordered:=(pred>0.2)*1]
# all reorderes to 1 -----------------------------------------------------
#test[user_id %in% reorder_users, ':=' (reordered=1)]
submission <- test[reordered==1,.(products = paste(product_id, collapse = " ")), order_id]
# add None to close orders -----------------------------------------------
#new_submission <- submission %>% mutate(products = ifelse(order_id %in% close_orders, str_c(products,'None', collapse = " "),products))
missing <- data.table(
order_id = unique(test$order_id[!test$order_id %in% submission$order_id]),
products = "None"
)
submission <- rbindlist(list(submission, missing))
# add none to close orders
tmpp <- dt[order(-pred)][,omp := 1-pred][,p_none := prod(omp), order_id]
close_orders <- tmpp[p_none>0.2]$order_id
submission[]
fwrite(submission[order(order_id)], file = "submit.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stload.R
\name{STLoad}
\alias{STLoad}
\title{STLoad}
\usage{
STLoad(f, species = c("hs", "mm"))
}
\arguments{
\item{f}{path to ST data file}
\item{species}{either "hs" or "mm", used for gene annotation}
}
\description{
Load ST data file, extract coordinates, and annotate genes
}
| /man/STLoad.Rd | no_license | VanAndelInstitute/telescope | R | false | true | 359 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stload.R
\name{STLoad}
\alias{STLoad}
\title{STLoad}
\usage{
STLoad(f, species = c("hs", "mm"))
}
\arguments{
\item{f}{path to ST data file}
\item{species}{either "hs" or "mm", used for gene annotation}
}
\description{
Load ST data file, extract coordinates, and annotate genes
}
|
#' @include FacebookGenericCollection.R
#' @export
#'
#' @title
#' Build a collection of Facebook users
#'
#' @description
#' Connect to Facebook Graph API, get public information from a list of Facebook users
#' and build a \code{\link{FacebookUsersCollection-class}} instance.
#'
#' @details
#' \code{FacebookUsersCollection} is the constructor for the \code{\link{FacebookUsersCollection-class}}.
#' It returns data about users.
#'
#' After version 2.0 of the Facebook API, only id, name, and picture are available
#' through the API as public informations. All the remaining fields will be missing unless the Application asks
#' for specific permissions.
#'
#' @template nesting-fields
#'
#' @section Valid sources:
#' Instead of a character vector, one of these collections can also be passed as parameter in \code{id}:
#' \itemize{
#' \item{\code{\link{FacebookUsersCollection-class}} will build a collection with
#' the friends of the users of the source collection. It assumes these users
#' have granted the \code{user_friends} permission to the current application.}
#' \item{\code{\link{FacebookPostsCollection-class}} will build a collection from
#' the authors of the posts of the source collection.}
#' \item{\code{\link{FacebookCommentsCollection-class}} will build a collection from
#' the authors of the comments of the source collection.}
#' \item{\code{\link{FacebookLikesCollection-class}} will build a collection from
#' the authors of the likes of the source collection.}
#' \item{\code{\link{FacebookUsersCollection-class}} will build a collection with
#' the posts written on the walls of the users in the source collection.}
#' \item{\code{\link{FacebookGroupsCollection-class}} will build a collection with
#' the members of the groups in the source collection.}
#' \item{\code{\link{FacebookMixedCollection-class}} will build a collection with
#' only the user elements of the source collection.}
#' }
#'
#' Be careful when binding this kind of collection starting from a \code{\link{FacebookPostsCollection}}, \code{\link{FacebookCommentsCollection}}
#' or a \code{\link{FacebookLikesCollection}}.
#'
#' In Facebook, one can publish, comment or like acting as a user or as a page. But since users and pages have different sets of fields
#' and you won't know in advance if the author is a page or not, the constructor of this collection would fail due to inconsistent fields.
#'
#' To avoid this, if \code{id} is an instance of one of the aforementioned collections, a serialization query is performed
#' before to eventually filter out the pages and retain only the users. Finally, the real collection is built on this valid subset of user IDs only.
#' This requires more queries and, usually, more time.
#'
#' @author
#' Gabriele Baldassarre \url{https://gabrielebaldassarre.com}
#'
#' @seealso \code{\link{FacebookPostsCollection}},
#' \code{\link{FacebookCommentsCollection}},
#' \code{\link{FacebookLikesCollection}},
#' \code{\link{facebook.search}}
#'
#' @inheritParams FacebookGenericCollection
#'
#' @param n If \code{id} is an iterable collection, then \code{n} is the maximum number of users to be pulled for each element of the source collection
#' in \code{id}. It can be set to \code{Inf} to pull out any accessible user and assumes the default value from the value
#' of \code{facebook.maxitems} global option if missing. If \code{id} is not a collection or cannot be iterated, the parameter is ignored.
#'
#' @return A collection of users in a \code{\link{FacebookUsersCollection-class}} object.
#'
#' @examples \dontrun{
#' ## See examples for fbOAuth to know how token was created.
#' load("fb_oauth")
#'
#' ## Getting information about 9th Circle Games' Facebook Page
#' fb.pages <- FacebookPagesCollection(id = c("9thcirclegames",
#' "NathanNeverSergioBonelliEditore"),
#' token = fb_oauth)
#'
#' ## Getting the commenters of the latest 10 posts
#' fb.comments <- fb.pages %>% FacebookPostsCollection(n = 10) %>%
#' FacebookCommentsCollection(fields=c("id",
#' "from.fields(id,name)"),
#' n = Inf)
#'
#' ## Build a collection of users from who actually commented those posts
#' fb.commenting.users <- fb.comments %>% FacebookUsersCollection()
#'
#' ## Convert the collection to a data frame
#' fb.commenting.df <- as.data.frame(fb.commenting.users)
#' }
#'
#' @family Facebook Collection Constructors
#' @importFrom plyr create_progress_bar progress_none
FacebookUsersCollection <- function(id,
token = NULL,
parameters = list(),
fields = c("id",
"name",
"first_name",
"last_name",
"gender",
"locale",
"picture.fields(url).type(large)"),
n = getOption("facebook.maxitems"),
metadata = FALSE,
.progress = create_progress_bar()){
if(length(fields)==0 | all(nchar(fields)==0)){
message("You've specified no fields. Only the ID will be pulled into the collection.")
fields <- "id"
}
real.n <- (function(n, p.limit){
if(n > p.limit) {
return(p.limit)
}
else {
return(n)
}
})(n, getOption("facebook.pagination"))
e.fields <- paste(paste0(fields, collapse=","), "friends.summary(true).limit(0)", sep=",")
if(is(id, "FacebookUsersCollection")){
friends.fields <- paste0("friends.fields(", paste0(fields, collapse=",", sep=""), ").limit(", real.n , ").summary(true)", sep="")
return(new("FacebookUsersCollection", id = id, token = token, parameters = parameters, fields = friends.fields, n = n, metadata = metadata, .progress = .progress))
}
if(is(id, "FacebookGroupsCollection")){
friends.fields <- paste0("members.fields(", paste0(fields, collapse=",", sep=""), ").limit(", real.n , ").summary(true)", sep="")
return(new("FacebookUsersCollection", id = id, token = token, parameters = parameters, fields = friends.fields, n = n, metadata = metadata, .progress = .progress))
}
# Supported Collection
if(is(id, "FacebookPostsCollection") | is(id, "FacebookCommentsCollection") | is(id, "FacebookLikesCollection")){
indexes.df <- as.data.frame(id)
if(!("from.id" %in% colnames(indexes.df))){
stop(paste0("you cannot build a users collection from a ", class(id), " if the source has not the 'from.id' field inside."))
} else {
users.id <- new("FacebookMixedCollection",
id = unique(indexes.df$from.id),
token = id@token,
parameters = parameters,
fields = "id",
n = n,
metadata = TRUE)
users <- new("FacebookUsersCollection",
id = unique(users.id[which(users.id@type=="user")]@id),
token = id@token,
parameters = parameters,
fields = e.fields,
n = n,
metadata = metadata,
.progress = .progress)
users@parent.collection <- id
return(users)
}
}
# Unsupported Collections
if(is(id, "FacebookGenericCollection")){
stop(paste0("you cannot build a users collection starting from a ", class(id), "."))
}
# Atomic IDs
return(new("FacebookUsersCollection",
id = id,
token = token,
parameters = parameters,
fields = e.fields,
metadata = metadata,
.progress = .progress))
}
| /R/FacebookUsersCollection.R | no_license | cran/facebook.S4 | R | false | false | 8,205 | r | #' @include FacebookGenericCollection.R
#' @export
#'
#' @title
#' Build a collection of Facebook users
#'
#' @description
#' Connect to Facebook Graph API, get public information from a list of Facebook users
#' and build a \code{\link{FacebookUsersCollection-class}} instance.
#'
#' @details
#' \code{FacebookUsersCollection} is the constructor for the \code{\link{FacebookUsersCollection-class}}.
#' It returns data about users.
#'
#' After version 2.0 of the Facebook API, only id, name, and picture are available
#' through the API as public informations. All the remaining fields will be missing unless the Application asks
#' for specific permissions.
#'
#' @template nesting-fields
#'
#' @section Valid sources:
#' Instead of a character vector, one of these collections can also be passed as parameter in \code{id}:
#' \itemize{
#' \item{\code{\link{FacebookUsersCollection-class}} will build a collection with
#' the friends of the users of the source collection. It assumes these users
#' have granted the \code{user_friends} permission to the current application.}
#' \item{\code{\link{FacebookPostsCollection-class}} will build a collection from
#' the authors of the posts of the source collection.}
#' \item{\code{\link{FacebookCommentsCollection-class}} will build a collection from
#' the authors of the comments of the source collection.}
#' \item{\code{\link{FacebookLikesCollection-class}} will build a collection from
#' the authors of the likes of the source collection.}
#' \item{\code{\link{FacebookUsersCollection-class}} will build a collection with
#' the posts written on the walls of the users in the source collection.}
#' \item{\code{\link{FacebookGroupsCollection-class}} will build a collection with
#' the members of the groups in the source collection.}
#' \item{\code{\link{FacebookMixedCollection-class}} will build a collection with
#' only the user elements of the source collection.}
#' }
#'
#' Be careful when binding this kind of collection starting from a \code{\link{FacebookPostsCollection}}, \code{\link{FacebookCommentsCollection}}
#' or a \code{\link{FacebookLikesCollection}}.
#'
#' In Facebook, one can publish, comment or like acting as a user or as a page. But since users and pages have different sets of fields
#' and you won't know in advance if the author is a page or not, the constructor of this collection would fail due to inconsistent fields.
#'
#' To avoid this, if \code{id} is an instance of one of the aforementioned collections, a serialization query is performed
#' before to eventually filter out the pages and retain only the users. Finally, the real collection is built on this valid subset of user IDs only.
#' This requires more queries and, usually, more time.
#'
#' @author
#' Gabriele Baldassarre \url{https://gabrielebaldassarre.com}
#'
#' @seealso \code{\link{FacebookPostsCollection}},
#' \code{\link{FacebookCommentsCollection}},
#' \code{\link{FacebookLikesCollection}},
#' \code{\link{facebook.search}}
#'
#' @inheritParams FacebookGenericCollection
#'
#' @param n If \code{id} is an iterable collection, then \code{n} is the maximum number of users to be pulled for each element of the source collection
#' in \code{id}. It can be set to \code{Inf} to pull out any accessible user and assumes the default value from the value
#' of \code{facebook.maxitems} global option if missing. If \code{id} is not a collection or cannot be iterated, the parameter is ignored.
#'
#' @return A collection of users in a \code{\link{FacebookUsersCollection-class}} object.
#'
#' @examples \dontrun{
#' ## See examples for fbOAuth to know how token was created.
#' load("fb_oauth")
#'
#' ## Getting information about 9th Circle Games' Facebook Page
#' fb.pages <- FacebookPagesCollection(id = c("9thcirclegames",
#' "NathanNeverSergioBonelliEditore"),
#' token = fb_oauth)
#'
#' ## Getting the commenters of the latest 10 posts
#' fb.comments <- fb.pages %>% FacebookPostsCollection(n = 10) %>%
#' FacebookCommentsCollection(fields=c("id",
#' "from.fields(id,name)"),
#' n = Inf)
#'
#' ## Build a collection of users from who actually commented those posts
#' fb.commenting.users <- fb.comments %>% FacebookUsersCollection()
#'
#' ## Convert the collection to a data frame
#' fb.commenting.df <- as.data.frame(fb.commenting.users)
#' }
#'
#' @family Facebook Collection Constructors
#' @importFrom plyr create_progress_bar progress_none
FacebookUsersCollection <- function(id,
token = NULL,
parameters = list(),
fields = c("id",
"name",
"first_name",
"last_name",
"gender",
"locale",
"picture.fields(url).type(large)"),
n = getOption("facebook.maxitems"),
metadata = FALSE,
.progress = create_progress_bar()){
if(length(fields)==0 | all(nchar(fields)==0)){
message("You've specified no fields. Only the ID will be pulled into the collection.")
fields <- "id"
}
real.n <- (function(n, p.limit){
if(n > p.limit) {
return(p.limit)
}
else {
return(n)
}
})(n, getOption("facebook.pagination"))
e.fields <- paste(paste0(fields, collapse=","), "friends.summary(true).limit(0)", sep=",")
if(is(id, "FacebookUsersCollection")){
friends.fields <- paste0("friends.fields(", paste0(fields, collapse=",", sep=""), ").limit(", real.n , ").summary(true)", sep="")
return(new("FacebookUsersCollection", id = id, token = token, parameters = parameters, fields = friends.fields, n = n, metadata = metadata, .progress = .progress))
}
if(is(id, "FacebookGroupsCollection")){
friends.fields <- paste0("members.fields(", paste0(fields, collapse=",", sep=""), ").limit(", real.n , ").summary(true)", sep="")
return(new("FacebookUsersCollection", id = id, token = token, parameters = parameters, fields = friends.fields, n = n, metadata = metadata, .progress = .progress))
}
# Supported Collection
if(is(id, "FacebookPostsCollection") | is(id, "FacebookCommentsCollection") | is(id, "FacebookLikesCollection")){
indexes.df <- as.data.frame(id)
if(!("from.id" %in% colnames(indexes.df))){
stop(paste0("you cannot build a users collection from a ", class(id), " if the source has not the 'from.id' field inside."))
} else {
users.id <- new("FacebookMixedCollection",
id = unique(indexes.df$from.id),
token = id@token,
parameters = parameters,
fields = "id",
n = n,
metadata = TRUE)
users <- new("FacebookUsersCollection",
id = unique(users.id[which(users.id@type=="user")]@id),
token = id@token,
parameters = parameters,
fields = e.fields,
n = n,
metadata = metadata,
.progress = .progress)
users@parent.collection <- id
return(users)
}
}
# Unsupported Collections
if(is(id, "FacebookGenericCollection")){
stop(paste0("you cannot build a users collection starting from a ", class(id), "."))
}
# Atomic IDs
return(new("FacebookUsersCollection",
id = id,
token = token,
parameters = parameters,
fields = e.fields,
metadata = metadata,
.progress = .progress))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fviz_cluster.R
\name{fviz_cluster}
\alias{fviz_cluster}
\title{Visualize Clustering Results}
\usage{
fviz_cluster(object, data = NULL, choose.vars = NULL, stand = TRUE,
axes = c(1, 2), geom = c("point", "text"), repel = FALSE,
show.clust.cent = TRUE, ellipse = TRUE, ellipse.type = "convex",
ellipse.level = 0.95, ellipse.alpha = 0.2, shape = NULL,
pointsize = 1.5, labelsize = 12, main = "Cluster plot", xlab = NULL,
ylab = NULL, outlier.color = "black", outlier.shape = 19,
ggtheme = theme_grey(), ...)
}
\arguments{
\item{object}{an object of class "partition" created by the functions pam(),
clara() or fanny() in cluster package; "kmeans" [in stats package]; "dbscan"
[in fpc package]; "Mclust" [in mclust]; "hkmeans", "eclust" [in factoextra].
Possible value are also any list object with data and cluster components
(e.g.: object = list(data = mydata, cluster = myclust)).}
\item{data}{the data that has been used for clustering. Required only when
object is a class of kmeans or dbscan.}
\item{choose.vars}{a character vector containing variables to be considered for plotting.}
\item{stand}{logical value; if TRUE, data is standardized before principal
component analysis}
\item{axes}{a numeric vector of length 2 specifying the dimensions to be plotted.}
\item{geom}{a text specifying the geometry to be used for the graph. Allowed
values are the combination of c("point", "text"). Use "point" (to show only
points); "text" to show only labels; c("point", "text") to show both types.}
\item{repel}{a boolean, whether to use ggrepel to avoid overplotting text
labels or not.}
\item{show.clust.cent}{logical; if TRUE, shows cluster centers}
\item{ellipse}{logical value; if TRUE, draws outline around points of
each cluster}
\item{ellipse.type}{Character specifying frame type. Possible
values are 'convex', 'confidence' or types supported by
\code{\link[ggplot2]{stat_ellipse}} including one of c("t", "norm",
"euclid").}
\item{ellipse.level}{the size of the concentration ellipse in
normal probability. Passed for \code{ggplot2::stat_ellipse} 's level.
Ignored in 'convex'. Default value is 0.95.}
\item{ellipse.alpha}{Alpha for frame specifying the transparency
level of fill color. Use alpha = 0 for no fill color.}
\item{shape}{the shape of points.}
\item{pointsize}{the size of points}
\item{labelsize}{font size for the labels}
\item{main}{plot main title.}
\item{xlab, ylab}{character vector specifying x and y axis labels,
respectively. Use xlab = FALSE and ylab = FALSE to hide xlab and ylab,
respectively.}
\item{outlier.color, outlier.shape}{the color and the shape of outliers.
Outliers can be detected only in DBSCAN clustering.}
\item{ggtheme}{function, ggplot2 theme name. Default value is theme_pubr().
Allowed values include ggplot2 official themes: theme_gray(), theme_bw(),
theme_minimal(), theme_classic(), theme_void(), ....}
\item{...}{other arguments to be passed to the functions
\code{\link[ggpubr]{ggscatter}} and \code{\link[ggpubr]{ggpar}}.}
}
\value{
return a ggpplot.
}
\description{
Provides ggplot2-based elegant visualization of partitioning
methods including kmeans [stats package]; pam, clara and fanny [cluster
package]; dbscan [fpc package]; Mclust [mclust package]; HCPC [FactoMineR];
hkmeans [factoextra]. Observations are represented by points in the plot,
using principal components if ncol(data) > 2. An ellipse is drawn around
each cluster.
}
\examples{
set.seed(123)
# Data preparation
# +++++++++++++++
data("iris")
head(iris)
# Remove species column (5) and scale the data
iris.scaled <- scale(iris[, -5])
# K-means clustering
# +++++++++++++++++++++
km.res <- kmeans(iris.scaled, 3, nstart = 10)
# Visualize kmeans clustering
# use repel = TRUE to avoid overplotting
fviz_cluster(km.res, iris[, -5], ellipse.type = "norm")
# Change the color palette and theme
fviz_cluster(km.res, iris[, -5],
palette = "Set2", ggtheme = theme_minimal())
\dontrun{
# Show points only
fviz_cluster(km.res, iris[, -5], geom = "point")
# Show text only
fviz_cluster(km.res, iris[, -5], geom = "text")
# PAM clustering
# ++++++++++++++++++++
require(cluster)
pam.res <- pam(iris.scaled, 3)
# Visualize pam clustering
fviz_cluster(pam.res, geom = "point", ellipse.type = "norm")
# Hierarchical clustering
# ++++++++++++++++++++++++
# Use hcut() which compute hclust and cut the tree
hc.cut <- hcut(iris.scaled, k = 3, hc_method = "complete")
# Visualize dendrogram
fviz_dend(hc.cut, show_labels = FALSE, rect = TRUE)
# Visualize cluster
fviz_cluster(hc.cut, ellipse.type = "convex")
}
}
\seealso{
\code{\link{fviz_silhouette}}, \code{\link{hcut}},
\code{\link{hkmeans}}, \code{\link{eclust}}, \code{\link{fviz_dend}}
}
\author{
Alboukadel Kassambara \email{alboukadel.kassambara@gmail.com}
}
| /man/fviz_cluster.Rd | no_license | nikolayvoronchikhin/factoextra | R | false | true | 4,885 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fviz_cluster.R
\name{fviz_cluster}
\alias{fviz_cluster}
\title{Visualize Clustering Results}
\usage{
fviz_cluster(object, data = NULL, choose.vars = NULL, stand = TRUE,
axes = c(1, 2), geom = c("point", "text"), repel = FALSE,
show.clust.cent = TRUE, ellipse = TRUE, ellipse.type = "convex",
ellipse.level = 0.95, ellipse.alpha = 0.2, shape = NULL,
pointsize = 1.5, labelsize = 12, main = "Cluster plot", xlab = NULL,
ylab = NULL, outlier.color = "black", outlier.shape = 19,
ggtheme = theme_grey(), ...)
}
\arguments{
\item{object}{an object of class "partition" created by the functions pam(),
clara() or fanny() in cluster package; "kmeans" [in stats package]; "dbscan"
[in fpc package]; "Mclust" [in mclust]; "hkmeans", "eclust" [in factoextra].
Possible value are also any list object with data and cluster components
(e.g.: object = list(data = mydata, cluster = myclust)).}
\item{data}{the data that has been used for clustering. Required only when
object is a class of kmeans or dbscan.}
\item{choose.vars}{a character vector containing variables to be considered for plotting.}
\item{stand}{logical value; if TRUE, data is standardized before principal
component analysis}
\item{axes}{a numeric vector of length 2 specifying the dimensions to be plotted.}
\item{geom}{a text specifying the geometry to be used for the graph. Allowed
values are the combination of c("point", "text"). Use "point" (to show only
points); "text" to show only labels; c("point", "text") to show both types.}
\item{repel}{a boolean, whether to use ggrepel to avoid overplotting text
labels or not.}
\item{show.clust.cent}{logical; if TRUE, shows cluster centers}
\item{ellipse}{logical value; if TRUE, draws outline around points of
each cluster}
\item{ellipse.type}{Character specifying frame type. Possible
values are 'convex', 'confidence' or types supported by
\code{\link[ggplot2]{stat_ellipse}} including one of c("t", "norm",
"euclid").}
\item{ellipse.level}{the size of the concentration ellipse in
normal probability. Passed for \code{ggplot2::stat_ellipse} 's level.
Ignored in 'convex'. Default value is 0.95.}
\item{ellipse.alpha}{Alpha for frame specifying the transparency
level of fill color. Use alpha = 0 for no fill color.}
\item{shape}{the shape of points.}
\item{pointsize}{the size of points}
\item{labelsize}{font size for the labels}
\item{main}{plot main title.}
\item{xlab, ylab}{character vector specifying x and y axis labels,
respectively. Use xlab = FALSE and ylab = FALSE to hide xlab and ylab,
respectively.}
\item{outlier.color, outlier.shape}{the color and the shape of outliers.
Outliers can be detected only in DBSCAN clustering.}
\item{ggtheme}{function, ggplot2 theme name. Default value is theme_pubr().
Allowed values include ggplot2 official themes: theme_gray(), theme_bw(),
theme_minimal(), theme_classic(), theme_void(), ....}
\item{...}{other arguments to be passed to the functions
\code{\link[ggpubr]{ggscatter}} and \code{\link[ggpubr]{ggpar}}.}
}
\value{
return a ggpplot.
}
\description{
Provides ggplot2-based elegant visualization of partitioning
methods including kmeans [stats package]; pam, clara and fanny [cluster
package]; dbscan [fpc package]; Mclust [mclust package]; HCPC [FactoMineR];
hkmeans [factoextra]. Observations are represented by points in the plot,
using principal components if ncol(data) > 2. An ellipse is drawn around
each cluster.
}
\examples{
set.seed(123)
# Data preparation
# +++++++++++++++
data("iris")
head(iris)
# Remove species column (5) and scale the data
iris.scaled <- scale(iris[, -5])
# K-means clustering
# +++++++++++++++++++++
km.res <- kmeans(iris.scaled, 3, nstart = 10)
# Visualize kmeans clustering
# use repel = TRUE to avoid overplotting
fviz_cluster(km.res, iris[, -5], ellipse.type = "norm")
# Change the color palette and theme
fviz_cluster(km.res, iris[, -5],
palette = "Set2", ggtheme = theme_minimal())
\dontrun{
# Show points only
fviz_cluster(km.res, iris[, -5], geom = "point")
# Show text only
fviz_cluster(km.res, iris[, -5], geom = "text")
# PAM clustering
# ++++++++++++++++++++
require(cluster)
pam.res <- pam(iris.scaled, 3)
# Visualize pam clustering
fviz_cluster(pam.res, geom = "point", ellipse.type = "norm")
# Hierarchical clustering
# ++++++++++++++++++++++++
# Use hcut() which compute hclust and cut the tree
hc.cut <- hcut(iris.scaled, k = 3, hc_method = "complete")
# Visualize dendrogram
fviz_dend(hc.cut, show_labels = FALSE, rect = TRUE)
# Visualize cluster
fviz_cluster(hc.cut, ellipse.type = "convex")
}
}
\seealso{
\code{\link{fviz_silhouette}}, \code{\link{hcut}},
\code{\link{hkmeans}}, \code{\link{eclust}}, \code{\link{fviz_dend}}
}
\author{
Alboukadel Kassambara \email{alboukadel.kassambara@gmail.com}
}
|
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# EU strategy objective 4 (so4) - Mitigating and adapting to climate change
# +001 DOWNLOAD DATA
# +002 PREPARE & READ DATA
# 4.1.a.3 - Net GHG emissions (emissions and removals) from agriculture
# 4.1.a.6 - Net GHG emissions (emissions and removals) from LULUCF
# 4.1.b.3 - Water exploitation index (WEI)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Load configuration variables
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
library(dplyr)
library(tidyr)
library(purrr)
#install.packages("corrr")
library(corrr)
library(ggplot2)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +001 DOWNLOAD DATA
#BitBucket repository : Bioeconomy Knowledge Centre / biomon-data
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#clone repository
#on the command prompt:
#git clone https://citnet.tech.ec.europa.eu/CITnet/stash/scm/beo/biomon-data.git
#cd folder-in-which-you-download-data
#git checkout --track origin/develop
# +002 PREPARE & READ DATA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# - 4.1.a.3 - Net GHG emissions (emissions and removals) from agriculture
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
GHG_agri <- read.csv(paste0(cordir_in,"output_data/indicators/4.1.a.3/4.1.a.3.csv"), stringsAsFactors = FALSE)
GHG_agri
names(GHG_agri)
names(GHG_agri)[3] <- "GHG_agri"
names(GHG_agri)[4] <- "unit_GHG_agri"
unique(GHG_agri$geo_code)
#If the dataframe is not taken entirely, R needs to be forced to read further
options(max.print = 1000 * ncol(GHG_agri))
GHG_agri <- GHG_agri %>%
filter(geo_code != "EU27_2020") %>% filter(geo_code != "EU28")
levels(as.factor(GHG_agri$unit.GHG_agri))
# - 4.1.a.6 - Net GHG emissions (emissions and removals) from LULUCF
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
GHG_lulucf <- read.csv(paste0(cordir_in,"output_data/indicators/4.1.a.6/4.1.a.6.csv"), stringsAsFactors = FALSE)
GHG_lulucf
names(GHG_lulucf)
names(GHG_lulucf)[3] <- "GHG_lulucf"
names(GHG_lulucf)[4] <- "unit_GHG_lulucf"
unique(GHG_lulucf$geo_code)
options(max.print = 1000 * ncol(GHG_lulucf))
GHG_lulucf <- GHG_lulucf %>%
filter(geo_code != "EU27_2020") %>% filter(geo_code != "EU28")
levels(as.factor(GHG_lulucf$unit_GHG_lulucf))
# - 4.1.b.3 - Water exploitation index (WEI)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
WEI <- read.csv(paste0(cordir_in, "output_data/indicators/4.1.b.3/4.1.b.3.csv"), stringsAsFactors = FALSE)
str(WEI)
names(WEI)
names(WEI)[3] <- "WEI"
names(WEI)[4] <- "unit_WEI"
unique(WEI$geo_code)
options(max.print = 1000 * ncol(WEI))
WEI <- WEI %>%
filter(geo_code != "EU27_2020")
levels(as.factor(WEI$unit_WEI)) | /Documents/clone/r_scripts/s1_in_so4.R | no_license | sarahwertz/JRC-bioeconomy-MonitSyst-indicators | R | false | false | 2,975 | r | # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# EU strategy objective 4 (so4) - Mitigating and adapting to climate change
# +001 DOWNLOAD DATA
# +002 PREPARE & READ DATA
# 4.1.a.3 - Net GHG emissions (emissions and removals) from agriculture
# 4.1.a.6 - Net GHG emissions (emissions and removals) from LULUCF
# 4.1.b.3 - Water exploitation index (WEI)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Load configuration variables
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
library(dplyr)
library(tidyr)
library(purrr)
#install.packages("corrr")
library(corrr)
library(ggplot2)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# +001 DOWNLOAD DATA
#BitBucket repository : Bioeconomy Knowledge Centre / biomon-data
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#clone repository
#on the command prompt:
#git clone https://citnet.tech.ec.europa.eu/CITnet/stash/scm/beo/biomon-data.git
#cd folder-in-which-you-download-data
#git checkout --track origin/develop
# +002 PREPARE & READ DATA
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# - 4.1.a.3 - Net GHG emissions (emissions and removals) from agriculture
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
GHG_agri <- read.csv(paste0(cordir_in,"output_data/indicators/4.1.a.3/4.1.a.3.csv"), stringsAsFactors = FALSE)
GHG_agri
names(GHG_agri)
names(GHG_agri)[3] <- "GHG_agri"
names(GHG_agri)[4] <- "unit_GHG_agri"
unique(GHG_agri$geo_code)
#If the dataframe is not taken entirely, R needs to be forced to read further
options(max.print = 1000 * ncol(GHG_agri))
GHG_agri <- GHG_agri %>%
filter(geo_code != "EU27_2020") %>% filter(geo_code != "EU28")
levels(as.factor(GHG_agri$unit.GHG_agri))
# - 4.1.a.6 - Net GHG emissions (emissions and removals) from LULUCF
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
GHG_lulucf <- read.csv(paste0(cordir_in,"output_data/indicators/4.1.a.6/4.1.a.6.csv"), stringsAsFactors = FALSE)
GHG_lulucf
names(GHG_lulucf)
names(GHG_lulucf)[3] <- "GHG_lulucf"
names(GHG_lulucf)[4] <- "unit_GHG_lulucf"
unique(GHG_lulucf$geo_code)
options(max.print = 1000 * ncol(GHG_lulucf))
GHG_lulucf <- GHG_lulucf %>%
filter(geo_code != "EU27_2020") %>% filter(geo_code != "EU28")
levels(as.factor(GHG_lulucf$unit_GHG_lulucf))
# - 4.1.b.3 - Water exploitation index (WEI)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
WEI <- read.csv(paste0(cordir_in, "output_data/indicators/4.1.b.3/4.1.b.3.csv"), stringsAsFactors = FALSE)
str(WEI)
names(WEI)
names(WEI)[3] <- "WEI"
names(WEI)[4] <- "unit_WEI"
unique(WEI$geo_code)
options(max.print = 1000 * ncol(WEI))
WEI <- WEI %>%
filter(geo_code != "EU27_2020")
levels(as.factor(WEI$unit_WEI)) |
# R script that plots cumulative proportion of variance and save a figure in pdf format
args <- commandArgs(TRUE)
library(RColorBrewer)
N <- length(args)
L <- length(t(read.table(as.character(args[1]))))
if (N < 3) {
cl <- colorRampPalette(brewer.pal(3, "Dark2"))(3)
}
if (N >= 3) {
cl <- colorRampPalette(brewer.pal(N, "Dark2"))(N)
}
lt <- 1
# line-type
LG <- c()
for (k in 1:length(args)) {
LG[k] <- strsplit(x = args[k], split = "_")[[1]][1]
}
Titl <- paste(strsplit(x = args[1], split = "_")[[1]][2:3], sep = "-")
pdf(file = "propVarPlot.pdf", width = 5, height = 5)
plot(1:L, 100 * cumsum(t(read.table(as.character(args[1])))), type = "l", col = cl[1], xlab = "Component", ylab = "Cumulative sum (%)", main = Titl, lwd = 2, ylim = c(0, 100), lty = lt, log = "x")
for (j in 2:N) {
points(1:L, 100 * cumsum(t(read.table(as.character(args[j])))), type = "l", col = cl[j], lwd = 2, lty = lt, log = "x")
}
legend("bottomright", legend = LG, col = cl, bty = "n", lty = lt, cex = 1, lwd = 2)
| /propVarPlot.r | permissive | pmb59/KLTepigenome | R | false | false | 999 | r | # R script that plots cumulative proportion of variance and save a figure in pdf format
args <- commandArgs(TRUE)
library(RColorBrewer)
N <- length(args)
L <- length(t(read.table(as.character(args[1]))))
if (N < 3) {
cl <- colorRampPalette(brewer.pal(3, "Dark2"))(3)
}
if (N >= 3) {
cl <- colorRampPalette(brewer.pal(N, "Dark2"))(N)
}
lt <- 1
# line-type
LG <- c()
for (k in 1:length(args)) {
LG[k] <- strsplit(x = args[k], split = "_")[[1]][1]
}
Titl <- paste(strsplit(x = args[1], split = "_")[[1]][2:3], sep = "-")
pdf(file = "propVarPlot.pdf", width = 5, height = 5)
plot(1:L, 100 * cumsum(t(read.table(as.character(args[1])))), type = "l", col = cl[1], xlab = "Component", ylab = "Cumulative sum (%)", main = Titl, lwd = 2, ylim = c(0, 100), lty = lt, log = "x")
for (j in 2:N) {
points(1:L, 100 * cumsum(t(read.table(as.character(args[j])))), type = "l", col = cl[j], lwd = 2, lty = lt, log = "x")
}
legend("bottomright", legend = LG, col = cl, bty = "n", lty = lt, cex = 1, lwd = 2)
|
# Expand Grid
expand.grid(height = seq(60, 80, 5), weight = seq(100, 300, 50),
sex = c("Male","Female"))
x <- seq(0, 10, length.out = 100)
y <- seq(-1, 1, length.out = 20)
d1 <- expand.grid(x = x, y = y)
d1
d2 <- expand.grid(x = x, y = y, KEEP.OUT.ATTRS = FALSE)
d2
object.size(d1) - object.size(d2)
##-> 5992 or 8832 (on 32- / 64-bit platform)
| /23-functions/54d-expandgrid.R | no_license | DUanalytics/rAnalytics | R | false | false | 358 | r | # Expand Grid
expand.grid(height = seq(60, 80, 5), weight = seq(100, 300, 50),
sex = c("Male","Female"))
x <- seq(0, 10, length.out = 100)
y <- seq(-1, 1, length.out = 20)
d1 <- expand.grid(x = x, y = y)
d1
d2 <- expand.grid(x = x, y = y, KEEP.OUT.ATTRS = FALSE)
d2
object.size(d1) - object.size(d2)
##-> 5992 or 8832 (on 32- / 64-bit platform)
|
#-----------------------------------------------------------------------------------------------------------------#
# Function testConvexity
#-----------------------------------------------------------------------------------------------------------------#
# Test if one of the class is convex and proiveds the hyperplanes used to build the LSVM
#' @export
testConvexity <- function(X,Y){
XS <- X[which(Y > 0), ]
XF <- X[which(Y <= 0), ]
d <- dim(X)[2]
if(is.null(dim(XF))){
rXF <- 1
XF <- matrix(XF, nrow = 1)
}else{
rXF <- dim(XF)[1]
}
if(is.null(dim(XS))){
rXS <- 1
XS <- matrix(XS, nrow = 1)
}else{
rXS <- dim(XS)[1]
}
A.low <- matrix(0, ncol = d + 1, nrow = rXS)
test.low <- TRUE
i <- 1
#test if each points of XS can be separated from all points of XF by a hyperplane
while(test.low){
res.low <- SVMLinearMonotone(rbind(XS[i,], XF), c(1, rep(-1, rXF)))
A.low[i, ] <- res.low
if( length(res.low) == 1){
test.low <- FALSE
}
i <- i + 1
if(i > rXS){
cat('The set associated to the label -1 is convexe \n')
return( list(-1, A.low) )
}
}
#test if each points of XF can be separated from all points of XS by a hyperplane
A.up <- matrix(0, ncol = d + 1, nrow = rXF)
test.up <- TRUE
i <- 1
while(test.up){
res.up <- SVMLinearMonotone(rbind(XF[i,], XS), c(-1, rep(1, rXS)) )
A.up[i, ] <- res.up
if( length(res.up) == 1){
test.up <- FALSE
}
i <- i + 1
if(i > rXF){
test.up <- FALSE
cat('The set associated to the label -1 is concave \n')
return(list(1, A.up))
}
}
cat('The problem is not convex!')
return(0)
}
| /R/testConvexity.R | no_license | ClementWalter/mistral | R | false | false | 1,693 | r | #-----------------------------------------------------------------------------------------------------------------#
# Function testConvexity
#-----------------------------------------------------------------------------------------------------------------#
# Test if one of the class is convex and proiveds the hyperplanes used to build the LSVM
#' @export
testConvexity <- function(X,Y){
XS <- X[which(Y > 0), ]
XF <- X[which(Y <= 0), ]
d <- dim(X)[2]
if(is.null(dim(XF))){
rXF <- 1
XF <- matrix(XF, nrow = 1)
}else{
rXF <- dim(XF)[1]
}
if(is.null(dim(XS))){
rXS <- 1
XS <- matrix(XS, nrow = 1)
}else{
rXS <- dim(XS)[1]
}
A.low <- matrix(0, ncol = d + 1, nrow = rXS)
test.low <- TRUE
i <- 1
#test if each points of XS can be separated from all points of XF by a hyperplane
while(test.low){
res.low <- SVMLinearMonotone(rbind(XS[i,], XF), c(1, rep(-1, rXF)))
A.low[i, ] <- res.low
if( length(res.low) == 1){
test.low <- FALSE
}
i <- i + 1
if(i > rXS){
cat('The set associated to the label -1 is convexe \n')
return( list(-1, A.low) )
}
}
#test if each points of XF can be separated from all points of XS by a hyperplane
A.up <- matrix(0, ncol = d + 1, nrow = rXF)
test.up <- TRUE
i <- 1
while(test.up){
res.up <- SVMLinearMonotone(rbind(XF[i,], XS), c(-1, rep(1, rXS)) )
A.up[i, ] <- res.up
if( length(res.up) == 1){
test.up <- FALSE
}
i <- i + 1
if(i > rXF){
test.up <- FALSE
cat('The set associated to the label -1 is concave \n')
return(list(1, A.up))
}
}
cat('The problem is not convex!')
return(0)
}
|
ScaleColour <- proto(ScaleDiscrete, expr={
objname <- "colour"
doc <- FALSE
common <- c()
})
ScaleHue <- proto(ScaleColour, expr={
aliases <- c("scale_colour_discrete", "scale_fill_discrete", "scale_color_hue", "scale_color_discrete")
new <- function(., name=NULL, h=c(0,360) + 15, l=65, c=100, limits=NULL, breaks = NULL, labels=NULL, h.start = 0, direction = 1, formatter = identity, legend = TRUE, variable) {
b_and_l <- check_breaks_and_labels(breaks, labels)
.$proto(name=name, h=h, l=l, c=c, .input=variable, .output=variable, .labels = b_and_l$labels, breaks = b_and_l$breaks, direction = direction, start = h.start, limits = limits, formatter = formatter, legend = legend)
}
output_set <- function(.) {
rotate <- function(x) (x + .$start) %% 360 * .$direction
n <- length(.$input_set())
if ((diff(.$h) %% 360) < 1) {
.$h[2] <- .$h[2] - 360 / n
}
grDevices::hcl(
h = rotate(seq(.$h[1], .$h[2], length = n)),
c =.$c,
l =.$l
)
}
max_levels <- function(.) Inf
doc <- TRUE
common <- c("colour", "fill")
# Documentation -----------------------------------------------
objname <- "hue"
desc <- "Qualitative colour scale with evenly spaced hues"
icon <- function(.) {
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill=hcl(seq(0, 360, length=6)[-6], c=100, l=65), col=NA)
)
}
desc_params <- list(
h = "range of hues to use, in [0, 360]",
l = "luminance (lightness), in [0, 100]",
c = "chroma (intensity of colour)",
h.start = "hue to start at",
direction = "direction to travel around the colour wheel, 1 = clockwise, -1 = counter-clockwise"
)
examples <- function(.) {
dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
(d <- qplot(carat, price, data=dsamp, colour=clarity))
# Change scale label
d + scale_colour_hue()
d + scale_colour_hue("clarity")
d + scale_colour_hue(expression(clarity[beta]))
# Adjust luminosity and chroma
d + scale_colour_hue(l=40, c=30)
d + scale_colour_hue(l=70, c=30)
d + scale_colour_hue(l=70, c=150)
d + scale_colour_hue(l=80, c=150)
# Change range of hues used
d + scale_colour_hue(h=c(0, 90))
d + scale_colour_hue(h=c(90, 180))
d + scale_colour_hue(h=c(180, 270))
d + scale_colour_hue(h=c(270, 360))
# Vary opacity
# (only works with pdf, quartz and cairo devices)
d <- ggplot(dsamp, aes(carat, price, colour = clarity))
d + geom_point(alpha = 0.9)
d + geom_point(alpha = 0.5)
d + geom_point(alpha = 0.2)
}
})
ScaleBrewer <- proto(ScaleColour, expr={
doc <- TRUE
new <- function(., name=NULL, palette=1, type="qual", na.colour = "grey80", limits=NULL, breaks = NULL, labels=NULL, formatter = identity, variable, legend = TRUE) {
b_and_l <- check_breaks_and_labels(breaks, labels)
.$proto(name=name, palette=palette, type=type, .input=variable, .output=variable, .labels = b_and_l$labels, breaks = b_and_l$breaks, limits= limits, formatter = formatter, legend = legend, na.colour = na.colour)
}
aliases <- c("scale_color_brewer")
output_set <- function(.) {
missing <- is.na(.$input_set())
n <- sum(!missing)
palette <- RColorBrewer::brewer.pal(n, .$pal_name())[1:n]
missing_colour(palette, missing, .$na.colour)
}
pal_name <- function(.) {
if (is.character(.$palette)) {
if (!.$palette %in% RColorBrewer:::namelist) {
warning("Unknown palette ", .$palette)
.$palette <- "Greens"
}
return(.$palette)
}
switch(.$type,
div = RColorBrewer:::divlist,
qual = RColorBrewer:::quallist,
seq = RColorBrewer:::seqlist
)[.$palette]
}
max_levels <- function(.) {
RColorBrewer:::maxcolors[RColorBrewer:::namelist == .$pal_name()]
}
# Documentation -----------------------------------------------
objname <- "brewer"
desc <- "Sequential, diverging and qualitative colour scales from colorbrewer.org"
desc_params <- list(
palette = "Either numeric or character. If numeric, selects the nth palette of type type. If character, selects the named palette. Get a complete list of all parameters by running \\code{RColorBrewer::display.brewer.all(n=8, exact.n=FALSE)}",
type = "Type of scale. One of 'div' (diverging), 'qual' (qualitative, the default), 'seq' (sequential), or 'all' (all). Only used when palette is numeric."
)
details <- "<p>See <a href='http://colorbrewer.org'>colorbrewer.org</a> for more info</p>"
common <- c("colour", "fill")
icon <- function(.) {
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill=RColorBrewer::brewer.pal(5, "PuOr"), col=NA)
)
}
examples <- function(.) {
dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
(d <- qplot(carat, price, data=dsamp, colour=clarity))
# Change scale label
d + scale_colour_brewer()
d + scale_colour_brewer("clarity")
d + scale_colour_brewer(expression(clarity[beta]))
# Select brewer palette to use, see ?brewer.pal for more details
d + scale_colour_brewer(type="seq")
d + scale_colour_brewer(type="seq", palette=3)
RColorBrewer::display.brewer.all(n=8, exact.n=FALSE)
d + scale_colour_brewer(palette="Blues")
d + scale_colour_brewer(palette="Set1")
# scale_fill_brewer works just the same as
# scale_colour_brewer but for fill colours
ggplot(diamonds, aes(x=price, fill=cut)) +
geom_histogram(position="dodge", binwidth=1000) +
scale_fill_brewer()
}
}) | /R/scale-discrete-colour.r | no_license | genome-vendor/r-cran-ggplot2 | R | false | false | 5,617 | r | ScaleColour <- proto(ScaleDiscrete, expr={
objname <- "colour"
doc <- FALSE
common <- c()
})
ScaleHue <- proto(ScaleColour, expr={
aliases <- c("scale_colour_discrete", "scale_fill_discrete", "scale_color_hue", "scale_color_discrete")
new <- function(., name=NULL, h=c(0,360) + 15, l=65, c=100, limits=NULL, breaks = NULL, labels=NULL, h.start = 0, direction = 1, formatter = identity, legend = TRUE, variable) {
b_and_l <- check_breaks_and_labels(breaks, labels)
.$proto(name=name, h=h, l=l, c=c, .input=variable, .output=variable, .labels = b_and_l$labels, breaks = b_and_l$breaks, direction = direction, start = h.start, limits = limits, formatter = formatter, legend = legend)
}
output_set <- function(.) {
rotate <- function(x) (x + .$start) %% 360 * .$direction
n <- length(.$input_set())
if ((diff(.$h) %% 360) < 1) {
.$h[2] <- .$h[2] - 360 / n
}
grDevices::hcl(
h = rotate(seq(.$h[1], .$h[2], length = n)),
c =.$c,
l =.$l
)
}
max_levels <- function(.) Inf
doc <- TRUE
common <- c("colour", "fill")
# Documentation -----------------------------------------------
objname <- "hue"
desc <- "Qualitative colour scale with evenly spaced hues"
icon <- function(.) {
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill=hcl(seq(0, 360, length=6)[-6], c=100, l=65), col=NA)
)
}
desc_params <- list(
h = "range of hues to use, in [0, 360]",
l = "luminance (lightness), in [0, 100]",
c = "chroma (intensity of colour)",
h.start = "hue to start at",
direction = "direction to travel around the colour wheel, 1 = clockwise, -1 = counter-clockwise"
)
examples <- function(.) {
dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
(d <- qplot(carat, price, data=dsamp, colour=clarity))
# Change scale label
d + scale_colour_hue()
d + scale_colour_hue("clarity")
d + scale_colour_hue(expression(clarity[beta]))
# Adjust luminosity and chroma
d + scale_colour_hue(l=40, c=30)
d + scale_colour_hue(l=70, c=30)
d + scale_colour_hue(l=70, c=150)
d + scale_colour_hue(l=80, c=150)
# Change range of hues used
d + scale_colour_hue(h=c(0, 90))
d + scale_colour_hue(h=c(90, 180))
d + scale_colour_hue(h=c(180, 270))
d + scale_colour_hue(h=c(270, 360))
# Vary opacity
# (only works with pdf, quartz and cairo devices)
d <- ggplot(dsamp, aes(carat, price, colour = clarity))
d + geom_point(alpha = 0.9)
d + geom_point(alpha = 0.5)
d + geom_point(alpha = 0.2)
}
})
ScaleBrewer <- proto(ScaleColour, expr={
doc <- TRUE
new <- function(., name=NULL, palette=1, type="qual", na.colour = "grey80", limits=NULL, breaks = NULL, labels=NULL, formatter = identity, variable, legend = TRUE) {
b_and_l <- check_breaks_and_labels(breaks, labels)
.$proto(name=name, palette=palette, type=type, .input=variable, .output=variable, .labels = b_and_l$labels, breaks = b_and_l$breaks, limits= limits, formatter = formatter, legend = legend, na.colour = na.colour)
}
aliases <- c("scale_color_brewer")
output_set <- function(.) {
missing <- is.na(.$input_set())
n <- sum(!missing)
palette <- RColorBrewer::brewer.pal(n, .$pal_name())[1:n]
missing_colour(palette, missing, .$na.colour)
}
pal_name <- function(.) {
if (is.character(.$palette)) {
if (!.$palette %in% RColorBrewer:::namelist) {
warning("Unknown palette ", .$palette)
.$palette <- "Greens"
}
return(.$palette)
}
switch(.$type,
div = RColorBrewer:::divlist,
qual = RColorBrewer:::quallist,
seq = RColorBrewer:::seqlist
)[.$palette]
}
max_levels <- function(.) {
RColorBrewer:::maxcolors[RColorBrewer:::namelist == .$pal_name()]
}
# Documentation -----------------------------------------------
objname <- "brewer"
desc <- "Sequential, diverging and qualitative colour scales from colorbrewer.org"
desc_params <- list(
palette = "Either numeric or character. If numeric, selects the nth palette of type type. If character, selects the named palette. Get a complete list of all parameters by running \\code{RColorBrewer::display.brewer.all(n=8, exact.n=FALSE)}",
type = "Type of scale. One of 'div' (diverging), 'qual' (qualitative, the default), 'seq' (sequential), or 'all' (all). Only used when palette is numeric."
)
details <- "<p>See <a href='http://colorbrewer.org'>colorbrewer.org</a> for more info</p>"
common <- c("colour", "fill")
icon <- function(.) {
rectGrob(c(0.1, 0.3, 0.5, 0.7, 0.9), width=0.21,
gp=gpar(fill=RColorBrewer::brewer.pal(5, "PuOr"), col=NA)
)
}
examples <- function(.) {
dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
(d <- qplot(carat, price, data=dsamp, colour=clarity))
# Change scale label
d + scale_colour_brewer()
d + scale_colour_brewer("clarity")
d + scale_colour_brewer(expression(clarity[beta]))
# Select brewer palette to use, see ?brewer.pal for more details
d + scale_colour_brewer(type="seq")
d + scale_colour_brewer(type="seq", palette=3)
RColorBrewer::display.brewer.all(n=8, exact.n=FALSE)
d + scale_colour_brewer(palette="Blues")
d + scale_colour_brewer(palette="Set1")
# scale_fill_brewer works just the same as
# scale_colour_brewer but for fill colours
ggplot(diamonds, aes(x=price, fill=cut)) +
geom_histogram(position="dodge", binwidth=1000) +
scale_fill_brewer()
}
}) |
DATA_DIR <- file.path("..", "data")
INSTACART_DATA_DIR <- file.path("..", "instacart_data")
FOOTBALL_DATA_DIR <- file.path("..", "football_data")
| /rocalabern/configuration.R | no_license | sbartek/big_data_with_R | R | false | false | 148 | r | DATA_DIR <- file.path("..", "data")
INSTACART_DATA_DIR <- file.path("..", "instacart_data")
FOOTBALL_DATA_DIR <- file.path("..", "football_data")
|
# Copyright 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
extract_codes <- function(x) {
setdiff(unique(unlist(lapply(x, function(y) {
stringr::str_extract_all(y, "EMS_[[:alnum:]][[:alnum:]_]{3,3}")
}))), NA)
}
| /R/functions_shinywqg.R | permissive | aaoliver/shinyrems | R | false | false | 746 | r | # Copyright 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
extract_codes <- function(x) {
setdiff(unique(unlist(lapply(x, function(y) {
stringr::str_extract_all(y, "EMS_[[:alnum:]][[:alnum:]_]{3,3}")
}))), NA)
}
|
setwd("//OHFS07/Home/dbasak/My Documents/CapitalOne Data Challenge -Debasmita/CapitalOne")
library(jsonlite)
library(plyr)
hmda_init <- function() {
loan <- read.csv('2012_to_2014_loans_data.CSV',header=TRUE)
inst <- read.csv('2012_to_2014_institutions_data.csv',header=TRUE)
dt_merged <- join(loan, inst, type = "left")
dt_merged$Loan_Amount_Bucket <- cut(dt_merged$Loan_Amount_000, br=c(0,20000,40000,60000,80000,100000), labels = c("upto 20k","20k to 40k", "40k to 60k","60k to 80k","80k to 100k") )
dt_merged$Applicant_Income_000 <- as.numeric(as.character(dt_merged$Applicant_Income_000))
dt_merged$FFIEC_Median_Family_Income <- as.numeric(as.character(dt_merged$FFIEC_Median_Family_Income))
##Create field income category
dt_merged$IncomeCategory <- NA
dt_merged$pct_medianIncome = dt_merged$Applicant_Income_000*1000/dt_merged$FFIEC_Median_Family_Income
i <- !is.na(dt_merged$pct_medianIncome) & dt_merged$pct_medianIncome <= 0.80
dt_merged[i,"IncomeCategory"] ="LMI"
i <- !is.na(dt_merged$pct_medianIncome) & dt_merged$pct_medianIncome > 0.80 & dt_merged$pct_medianIncome <= 1.2
dt_merged[i,"IncomeCategory"] ="MUM"
i <- !is.na(dt_merged$pct_medianIncome) & dt_merged$pct_medianIncome > 1.20
dt_merged[i,"IncomeCategory"] ="UI"
return(dt_merged)
}
hmda_to_json <- function(data, state_prm="All", conv_conf_prm="All") {
data_subset<-data
if (state_prm != "All")
{
data_subset <- subset(data_subset,state=state_prm)
}
if(conv_conf_prm != "All")
{
data_subset <- subset(data_subset,Conventional_Conforming_Flag=conv_conf_prm)
}
hmda_json <- toJSON(data_subset)
write(hmda_json, file="hmda_json.JSON")
}
| /HMDA_API.R | no_license | Deb23/Home-Mortgage-Analysis | R | false | false | 1,742 | r | setwd("//OHFS07/Home/dbasak/My Documents/CapitalOne Data Challenge -Debasmita/CapitalOne")
library(jsonlite)
library(plyr)
hmda_init <- function() {
loan <- read.csv('2012_to_2014_loans_data.CSV',header=TRUE)
inst <- read.csv('2012_to_2014_institutions_data.csv',header=TRUE)
dt_merged <- join(loan, inst, type = "left")
dt_merged$Loan_Amount_Bucket <- cut(dt_merged$Loan_Amount_000, br=c(0,20000,40000,60000,80000,100000), labels = c("upto 20k","20k to 40k", "40k to 60k","60k to 80k","80k to 100k") )
dt_merged$Applicant_Income_000 <- as.numeric(as.character(dt_merged$Applicant_Income_000))
dt_merged$FFIEC_Median_Family_Income <- as.numeric(as.character(dt_merged$FFIEC_Median_Family_Income))
##Create field income category
dt_merged$IncomeCategory <- NA
dt_merged$pct_medianIncome = dt_merged$Applicant_Income_000*1000/dt_merged$FFIEC_Median_Family_Income
i <- !is.na(dt_merged$pct_medianIncome) & dt_merged$pct_medianIncome <= 0.80
dt_merged[i,"IncomeCategory"] ="LMI"
i <- !is.na(dt_merged$pct_medianIncome) & dt_merged$pct_medianIncome > 0.80 & dt_merged$pct_medianIncome <= 1.2
dt_merged[i,"IncomeCategory"] ="MUM"
i <- !is.na(dt_merged$pct_medianIncome) & dt_merged$pct_medianIncome > 1.20
dt_merged[i,"IncomeCategory"] ="UI"
return(dt_merged)
}
hmda_to_json <- function(data, state_prm="All", conv_conf_prm="All") {
data_subset<-data
if (state_prm != "All")
{
data_subset <- subset(data_subset,state=state_prm)
}
if(conv_conf_prm != "All")
{
data_subset <- subset(data_subset,Conventional_Conforming_Flag=conv_conf_prm)
}
hmda_json <- toJSON(data_subset)
write(hmda_json, file="hmda_json.JSON")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgboost-fit.R
\name{rf.cv_pam}
\alias{rf.cv_pam}
\title{Runs xgb.train.ped on cross-validation sets}
\usage{
rf.cv_pam(
params = list(),
data,
nrounds = 1L,
nfold = 4,
cv_indices,
ped_params = list(),
nthread = 1L,
verbose = FALSE,
print_every_n = 1L,
early_stopping_rounds = NULL,
...
)
}
\arguments{
\item{params}{the list of parameters.
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
Below is a shorter summary:
1. General Parameters
\itemize{
\item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
}
2. Booster Parameters
2.1. Parameter for Tree Booster
\itemize{
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
\item \code{max_depth} maximum depth of a tree. Default: 6
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
\item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
}
2.2. Parameter for Linear Booster
\itemize{
\item \code{lambda} L2 regularization term on weights. Default: 0
\item \code{lambda_bias} L2 regularization term on bias. Default: 0
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
}
3. Task Parameters
\itemize{
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
\itemize{
\item \code{reg:squarederror} Regression with squared loss (Default).
\item \code{reg:logistic} logistic regression.
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
\item \code{num_class} set the number of classes. To use only with multiclass objectives.
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
\item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
}
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
\item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
}}
\item{data}{training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
\code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file.}
\item{nrounds}{max number of boosting iterations.}
\item{nfold}{Number of cross-valdation folds.}
\item{ped_params}{List of parameters used to transform data into PED format.}
\item{verbose}{If 0, xgboost will stay silent. If 1, it will print information about performance.
If 2, some additional information will be printed out.
Note that setting \code{verbose > 0} automatically engages the
\code{cb.print.evaluation(period=1)} callback function.}
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
Default is 1 which means all messages are printed. This parameter is passed to the
\code{\link[xgboost]{cb.print.evaluation}} callback.}
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
If set to an integer \code{k}, training with a validation set will stop if the performance
doesn't improve for \code{k} rounds.
Setting this parameter engages the \code{\link[xgboost]{cb.early.stop}} callback.}
\item{...}{other parameters to pass to \code{params}.}
}
\description{
Runs xgb.train.ped on cross-validation sets
}
| /man/rf.cv_pam.Rd | permissive | stefan-de/pem.xgb | R | false | true | 6,414 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgboost-fit.R
\name{rf.cv_pam}
\alias{rf.cv_pam}
\title{Runs xgb.train.ped on cross-validation sets}
\usage{
rf.cv_pam(
params = list(),
data,
nrounds = 1L,
nfold = 4,
cv_indices,
ped_params = list(),
nthread = 1L,
verbose = FALSE,
print_every_n = 1L,
early_stopping_rounds = NULL,
...
)
}
\arguments{
\item{params}{the list of parameters.
The complete list of parameters is available at \url{http://xgboost.readthedocs.io/en/latest/parameter.html}.
Below is a shorter summary:
1. General Parameters
\itemize{
\item \code{booster} which booster to use, can be \code{gbtree} or \code{gblinear}. Default: \code{gbtree}.
}
2. Booster Parameters
2.1. Parameter for Tree Booster
\itemize{
\item \code{eta} control the learning rate: scale the contribution of each tree by a factor of \code{0 < eta < 1} when it is added to the current approximation. Used to prevent overfitting by making the boosting process more conservative. Lower value for \code{eta} implies larger value for \code{nrounds}: low \code{eta} value means model more robust to overfitting but slower to compute. Default: 0.3
\item \code{gamma} minimum loss reduction required to make a further partition on a leaf node of the tree. the larger, the more conservative the algorithm will be.
\item \code{max_depth} maximum depth of a tree. Default: 6
\item \code{min_child_weight} minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression mode, this simply corresponds to minimum number of instances needed to be in each node. The larger, the more conservative the algorithm will be. Default: 1
\item \code{subsample} subsample ratio of the training instance. Setting it to 0.5 means that xgboost randomly collected half of the data instances to grow trees and this will prevent overfitting. It makes computation shorter (because less data to analyse). It is advised to use this parameter with \code{eta} and increase \code{nrounds}. Default: 1
\item \code{colsample_bytree} subsample ratio of columns when constructing each tree. Default: 1
\item \code{num_parallel_tree} Experimental parameter. number of trees to grow per round. Useful to test Random Forest through Xgboost (set \code{colsample_bytree < 1}, \code{subsample < 1} and \code{round = 1}) accordingly. Default: 1
\item \code{monotone_constraints} A numerical vector consists of \code{1}, \code{0} and \code{-1} with its length equals to the number of features in the training data. \code{1} is increasing, \code{-1} is decreasing and \code{0} is no constraint.
\item \code{interaction_constraints} A list of vectors specifying feature indices of permitted interactions. Each item of the list represents one permitted interaction where specified features are allowed to interact with each other. Feature index values should start from \code{0} (\code{0} references the first column). Leave argument unspecified for no interaction constraints.
}
2.2. Parameter for Linear Booster
\itemize{
\item \code{lambda} L2 regularization term on weights. Default: 0
\item \code{lambda_bias} L2 regularization term on bias. Default: 0
\item \code{alpha} L1 regularization term on weights. (there is no L1 reg on bias because it is not important). Default: 0
}
3. Task Parameters
\itemize{
\item \code{objective} specify the learning task and the corresponding learning objective, users can pass a self-defined function to it. The default objective options are below:
\itemize{
\item \code{reg:squarederror} Regression with squared loss (Default).
\item \code{reg:logistic} logistic regression.
\item \code{binary:logistic} logistic regression for binary classification. Output probability.
\item \code{binary:logitraw} logistic regression for binary classification, output score before logistic transformation.
\item \code{num_class} set the number of classes. To use only with multiclass objectives.
\item \code{multi:softmax} set xgboost to do multiclass classification using the softmax objective. Class is represented by a number and should be from 0 to \code{num_class - 1}.
\item \code{multi:softprob} same as softmax, but prediction outputs a vector of ndata * nclass elements, which can be further reshaped to ndata, nclass matrix. The result contains predicted probabilities of each data point belonging to each class.
\item \code{rank:pairwise} set xgboost to do ranking task by minimizing the pairwise loss.
}
\item \code{base_score} the initial prediction score of all instances, global bias. Default: 0.5
\item \code{eval_metric} evaluation metrics for validation data. Users can pass a self-defined function to it. Default: metric will be assigned according to objective(rmse for regression, and error for classification, mean average precision for ranking). List is provided in detail section.
}}
\item{data}{training dataset. \code{xgb.train} accepts only an \code{xgb.DMatrix} as the input.
\code{xgboost}, in addition, also accepts \code{matrix}, \code{dgCMatrix}, or name of a local data file.}
\item{nrounds}{max number of boosting iterations.}
\item{nfold}{Number of cross-valdation folds.}
\item{ped_params}{List of parameters used to transform data into PED format.}
\item{verbose}{If 0, xgboost will stay silent. If 1, it will print information about performance.
If 2, some additional information will be printed out.
Note that setting \code{verbose > 0} automatically engages the
\code{cb.print.evaluation(period=1)} callback function.}
\item{print_every_n}{Print each n-th iteration evaluation messages when \code{verbose>0}.
Default is 1 which means all messages are printed. This parameter is passed to the
\code{\link[xgboost]{cb.print.evaluation}} callback.}
\item{early_stopping_rounds}{If \code{NULL}, the early stopping function is not triggered.
If set to an integer \code{k}, training with a validation set will stop if the performance
doesn't improve for \code{k} rounds.
Setting this parameter engages the \code{\link[xgboost]{cb.early.stop}} callback.}
\item{...}{other parameters to pass to \code{params}.}
}
\description{
Runs xgb.train.ped on cross-validation sets
}
|
############### LR Test function ###############
# This function takes three arguments:
# First argument: fitted restricted model
# Second argument: fitted unrestricted model
# Third argument: model type (GARCH vs DCC)
# Performs a LR test and outputs the degrees of freedom, likelihoods, LR statistic, and p-value
LR_test <- function(restricted, unrestricted, model = "GARCH") {
# Specifying the degrees of freedom. Model type determines if "fit" or "mfit"
if (model == "GARCH") {
df <- length(unrestricted@fit$coef) - length(restricted@fit$coef)
} else if (model == "DCC") {
df <- length(unrestricted@mfit$coef) - length(restricted@mfit$coef)
} else {
return("Supports GARCH and DCC models")
}
# Creating the statistic
lr <- 2*(likelihood(unrestricted) - likelihood(restricted))
# Finding its p-value
p.value <- 1 - pchisq(lr, df)
# Output
cat("Degrees of freedom:", df, "\n",
"Likelihood of unrestricted model:", likelihood(unrestricted), "\n",
"Likelihood of restricted model:", likelihood(restricted), "\n",
"LR: 2*(Lu-Lr):", lr, "\n",
"p-value:", p.value
)
} | /Seminar5/LR_test.R | no_license | lnsongxf/financial_risk_forecasting | R | false | false | 1,206 | r | ############### LR Test function ###############
# This function takes three arguments:
# First argument: fitted restricted model
# Second argument: fitted unrestricted model
# Third argument: model type (GARCH vs DCC)
# Performs a LR test and outputs the degrees of freedom, likelihoods, LR statistic, and p-value
LR_test <- function(restricted, unrestricted, model = "GARCH") {
# Specifying the degrees of freedom. Model type determines if "fit" or "mfit"
if (model == "GARCH") {
df <- length(unrestricted@fit$coef) - length(restricted@fit$coef)
} else if (model == "DCC") {
df <- length(unrestricted@mfit$coef) - length(restricted@mfit$coef)
} else {
return("Supports GARCH and DCC models")
}
# Creating the statistic
lr <- 2*(likelihood(unrestricted) - likelihood(restricted))
# Finding its p-value
p.value <- 1 - pchisq(lr, df)
# Output
cat("Degrees of freedom:", df, "\n",
"Likelihood of unrestricted model:", likelihood(unrestricted), "\n",
"Likelihood of restricted model:", likelihood(restricted), "\n",
"LR: 2*(Lu-Lr):", lr, "\n",
"p-value:", p.value
)
} |
# Page no. : 576 - 580
# Coefficient of Determination
restaurant <- c(1,2,3,4,5,6,7,8,9,10)
student_population <- c(2,6,8,8,12,16,20,20,22,26)
quartely_sales <- c(58,105,88,118,117,137,157,169,149,202)
DF <- data.frame(restaurant, student_population, quartely_sales)
regressor <- lm(quartely_sales ~ student_population, data = DF)
res <- summary(regressor)
table <- anova(regressor)
SSE <- table$`Sum Sq`[2] # Sum of Squares due to Error
cat("Value of SSE is",SSE)
SSR <- table$`Sum Sq`[1] # Sum of Squares due to Regression
cat("Value of SSR is",SSR)
SST <- SSE + SSR # Total Sum of Squares
cat("Value of SST is",SST)
r_sq <- res$r.squared # Coefficient of Determination
corrcoeff <- sqrt(r_sq) # Correlation Coefficient
cat("Value of Coefficient of Determination is",r_sq)
cat("Value of correlation Coefficient is",corrcoeff)
| /Statistics_For_Business_And_Economics_by_Anderson_Sweeney_And_Williams/CH14/EX14.2a/Ex14_2a.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 930 | r | # Page no. : 576 - 580
# Coefficient of Determination
restaurant <- c(1,2,3,4,5,6,7,8,9,10)
student_population <- c(2,6,8,8,12,16,20,20,22,26)
quartely_sales <- c(58,105,88,118,117,137,157,169,149,202)
DF <- data.frame(restaurant, student_population, quartely_sales)
regressor <- lm(quartely_sales ~ student_population, data = DF)
res <- summary(regressor)
table <- anova(regressor)
SSE <- table$`Sum Sq`[2] # Sum of Squares due to Error
cat("Value of SSE is",SSE)
SSR <- table$`Sum Sq`[1] # Sum of Squares due to Regression
cat("Value of SSR is",SSR)
SST <- SSE + SSR # Total Sum of Squares
cat("Value of SST is",SST)
r_sq <- res$r.squared # Coefficient of Determination
corrcoeff <- sqrt(r_sq) # Correlation Coefficient
cat("Value of Coefficient of Determination is",r_sq)
cat("Value of correlation Coefficient is",corrcoeff)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Multiplot.R
\name{Multiplot}
\alias{Multiplot}
\title{Prints Multiple Plots from a List}
\usage{
Multiplot(..., plotlist = NULL, file, cols = 1, layout = NULL)
}
\arguments{
\item{...}{Additional grid parameters}
\item{plotlist}{List of ggplot objects}
\item{file}{A file object}
\item{cols}{Int for number of columns to display}
\item{layout}{a grid object}
}
\value{
A list of plots
}
\description{
Function from From: R Cookbook
http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/
}
| /man/Multiplot.Rd | no_license | ckornafel/expldata | R | false | true | 589 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Multiplot.R
\name{Multiplot}
\alias{Multiplot}
\title{Prints Multiple Plots from a List}
\usage{
Multiplot(..., plotlist = NULL, file, cols = 1, layout = NULL)
}
\arguments{
\item{...}{Additional grid parameters}
\item{plotlist}{List of ggplot objects}
\item{file}{A file object}
\item{cols}{Int for number of columns to display}
\item{layout}{a grid object}
}
\value{
A list of plots
}
\description{
Function from From: R Cookbook
http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleValues.R
\name{getNodeLabelPosition}
\alias{getNodeLabelPosition}
\title{Get Node Label Position}
\usage{
getNodeLabelPosition(
node.names = NULL,
network = NULL,
base.url = .defaultBaseUrl
)
}
\arguments{
\item{node.names}{List of node names or SUIDs. Default is NULL for all nodes.}
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
Named list of property values
}
\description{
Retrieve the actual label position of specified nodes.
}
\examples{
\donttest{
getNodeLabelPosition('Node 1')
}
}
| /man/getNodeLabelPosition.Rd | permissive | cytoscape/RCy3 | R | false | true | 891 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleValues.R
\name{getNodeLabelPosition}
\alias{getNodeLabelPosition}
\title{Get Node Label Position}
\usage{
getNodeLabelPosition(
node.names = NULL,
network = NULL,
base.url = .defaultBaseUrl
)
}
\arguments{
\item{node.names}{List of node names or SUIDs. Default is NULL for all nodes.}
\item{network}{(optional) Name or SUID of the network. Default is the
"current" network active in Cytoscape.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
Named list of property values
}
\description{
Retrieve the actual label position of specified nodes.
}
\examples{
\donttest{
getNodeLabelPosition('Node 1')
}
}
|
GetClusteringAlgorithm <- function(clusteringMethod = "kmeans", clusteringFunction = NULL, clusteringOptions = NULL, ...){
name = clusteringMethod
if (!is.function(clusteringFunction)) {
switch(
clusteringMethod,
kmeans = {
clusteringFunction <- kmeansWrapper
},
pam = {
clusteringFunction <- pamWrapper
},
hclust = {
clusteringFunction <- hclustWrapper
},
{
stop("clusteringMethod not found. Please pass a clusteringFunction instead")
}
)
}
else {
name = "Unknow"
}
list(
fun = function(data, k) do.call(clusteringFunction, c(list(data = data, k = k), clusteringOptions)),
name = name
)
}
GetPerturbationAlgorithm <- function(data = data, perturbMethod = "noise", perturbFunction = NULL, perturbOptions = NULL, ...){
name = perturbMethod
if (!is.function(perturbFunction)) {
switch(
perturbMethod,
noise = {
if (is.null(perturbOptions))
perturbOptions <- list()
noise = perturbOptions$noise
if (is.null(noise))
noise <- GetNoise(data, noisePercent = perturbOptions$noisePercent)
# get perturbed similarity
perturbFunction <- function(data, ...) {
AddNoisePerturb(data = data, noise = noise)
}
},
subsampling = {
perturbFunction <- SubSampling
},
{
stop("perturbMethod not found. Please pass a perturbFunction instead")
}
)
}
else {
name = "Unknow"
}
list(
fun = function(data) do.call(perturbFunction, c(list(data = data), perturbOptions)),
name = name
)
}
BuildConnectivityMatrix <- function(data, clus, clusteringAlgorithm) {
rowNum <- nrow(data)
S <- matrix(0L, rowNum, rowNum)
cluster <- clusteringAlgorithm(data, clus)
for (j in 1:clus) {
idx <- cluster == j
S[idx, idx] <- 1L
}
rownames(S) <- rownames(data)
colnames(S) <- rownames(data)
list(matrix = S, groups = cluster)
}
ClusterToConnectivity <- function(cluster){
S <- matrix(0L, length(cluster), length(cluster))
for (j in 1:max(cluster)) {
idx <- cluster == j
S[idx, idx] <- 1L
}
S
}
GetOriginalSimilarity <- function(data, clusRange, clusteringAlgorithm, showProgress = F, ncore) {
groupings <- list()
origS <- list()
if (showProgress) {
pb <- txtProgressBar(min = 0, max = length(clusRange), style = 3)
}
seeds = abs(round(rnorm(max(clusRange))*10^6))
for (clus in clusRange){
set.seed(seeds[clus])
groupings[[clus]] <- clusteringAlgorithm(data, clus)
origS[[clus]] <- ClusterToConnectivity(groupings[[clus]])
rownames(origS[[clus]]) <- colnames(origS[[clus]]) <- rownames(data)
if (showProgress) setTxtProgressBar(pb, clus)
}
list(origS = origS, groupings = groupings)
}
GetPerturbedSimilarity <- function(data, clusRange, iterMax, iterMin, origS, clusteringAlgorithm, perturbedFunction, stoppingCriteriaHandler, showProgress = F, ncore) {
pertS <- list()
currentIter <- rep(0,max(clusRange))
jobs <- rep(clusRange, iterMax)
maxJob = length(jobs)
seeds = list()
for (clus in clusRange) {
pertS[[clus]] <- list()
seeds[[clus]] <- round(rnorm(max(iterMax,1000))*10^6)
}
if (showProgress) pb <- txtProgressBar(min = 0, max = maxJob, style = 3)
kProgress = rep(0, max(clusRange))
step = iterMin
if(.Platform$OS.type == "unix") doParallel::registerDoParallel(min(ncore, step))
while (length(jobs) > 0) {
jobLength = step*length(clusRange)
step = 10
if (jobLength > length(jobs)){
jobLength = length(jobs)
}
currentJobs = list()
count = 1
for (clus in jobs[1:jobLength]){
kProgress[clus] = kProgress[clus] + 1
currentJobs[[count]] <- list(iter = kProgress[clus], clus = clus)
count = count + 1
}
if (jobLength < length(jobs)){
jobs <- jobs[(jobLength+1):length(jobs)]
}
else jobs <- c()
job <- NULL
if(.Platform$OS.type == "unix")
{
rets <- foreach(job = currentJobs) %dopar% {
clus = job$clus
set.seed(seeds[[clus]][job$iter])
perturbedRet <- perturbedFunction(data = data)
cMatrix <- BuildConnectivityMatrix(data = perturbedRet$data, clus, clusteringAlgorithm)
connectivityMatrix = perturbedRet$ConnectivityMatrixHandler(connectivityMatrix = cMatrix$matrix, iter = job$iter, k = clus)
list(
connectivityMatrix = connectivityMatrix,
clus = clus,
auc = CalcAUC(origS[[clus]], connectivityMatrix)$area
)
}
} else {
rets <- foreach(job = currentJobs) %do% {
clus = job$clus
set.seed(seeds[[clus]][job$iter])
perturbedRet <- perturbedFunction(data = data)
cMatrix <- BuildConnectivityMatrix(data = perturbedRet$data, clus, clusteringAlgorithm)
connectivityMatrix = perturbedRet$ConnectivityMatrixHandler(connectivityMatrix = cMatrix$matrix, iter = job$iter, k = clus)
list(
connectivityMatrix = connectivityMatrix,
clus = clus,
auc = CalcAUC(origS[[clus]], connectivityMatrix)$area
)
}
}
allStop = F
for(ret in rets){
if (kProgress[clus] != -1){
clus = ret$clus
currentIter[clus] <- currentIter[clus] + 1
pertS[[clus]][[currentIter[clus]]] <- ret$connectivityMatrix
stop <- stoppingCriteriaHandler(iter = currentIter[clus], k = clus, auc = ret$auc)
if (stop == 2) {
allStop <- T
}
else if (stop == 1) {
if (showProgress) setTxtProgressBar(pb, getTxtProgressBar(pb) + length(jobs[jobs == clus]))
jobs <- jobs[jobs != clus]
kProgress[clus] <- -1
}
}
if (showProgress) setTxtProgressBar(pb, getTxtProgressBar(pb) + 1)
}
if (allStop) break()
countDone <- -1
while(countDone != length(which(kProgress[clusRange] == -1))){
countDone <- length(which(kProgress[clusRange] == -1))
for (clus in clusRange){
if (kProgress[clus] != -1){
stop <- stoppingCriteriaHandler(k = clus, type = 1)
if (stop == 1) {
if (showProgress) setTxtProgressBar(pb, getTxtProgressBar(pb) + length(jobs[jobs == clus]))
jobs <- jobs[jobs != clus]
kProgress[clus] <- -1
}
}
}
}
}
if(.Platform$OS.type == "unix") doParallel::stopImplicitCluster()
if (showProgress) {
setTxtProgressBar(pb, maxJob)
cat("\n")
}
pertS
}
CalcAUC <- function(orig, pert) {
N <- nrow(orig)
S <- abs(orig - pert)
diag(S) <- 0
# added -10^(-5) for visual purposes
# A <- c(-10^(-5), sort(unique(as.numeric(S))))
A <- c(-10^(-5), 0, sort(unique(as.numeric(S[S!=0]))))
if (max(A) < 1)
A <- c(A, 1)
B <- NULL
for (i in 1:length(A)) {
B[i] <- sum(S <= A[i])/(N * N)
}
area <- 0
for (i in (2:length(A))) {
area <- area + B[i - 1] * (A[i] - A[i - 1])
}
list(area = area, entry = A, cdf = B)
}
CalcPerturbedDiscrepancy <- function(origS, pertS, clusRange) {
diff <- NULL
for (clus in clusRange) {
diff[clus] <- sum(abs(origS[[clus]] - pertS[[clus]]))
}
AUC <- NULL
entries <- list()
cdfs <- list()
for (clus in clusRange) {
ret <- CalcAUC(origS[[clus]], pertS[[clus]]);
entries[[clus]] <- ret$entry
cdfs[[clus]] <- ret$cdf
AUC[clus] <- ret$area
}
list(Diff = round(diff, digits = 10), Entry = entries, CDF = cdfs, AUC = round(AUC, digits = 10))
} | /fuzzedpackages/PINSPlus/R/perturbation-clustering-helpers.R | no_license | akhikolla/testpackages | R | false | false | 8,808 | r | GetClusteringAlgorithm <- function(clusteringMethod = "kmeans", clusteringFunction = NULL, clusteringOptions = NULL, ...){
name = clusteringMethod
if (!is.function(clusteringFunction)) {
switch(
clusteringMethod,
kmeans = {
clusteringFunction <- kmeansWrapper
},
pam = {
clusteringFunction <- pamWrapper
},
hclust = {
clusteringFunction <- hclustWrapper
},
{
stop("clusteringMethod not found. Please pass a clusteringFunction instead")
}
)
}
else {
name = "Unknow"
}
list(
fun = function(data, k) do.call(clusteringFunction, c(list(data = data, k = k), clusteringOptions)),
name = name
)
}
GetPerturbationAlgorithm <- function(data = data, perturbMethod = "noise", perturbFunction = NULL, perturbOptions = NULL, ...){
name = perturbMethod
if (!is.function(perturbFunction)) {
switch(
perturbMethod,
noise = {
if (is.null(perturbOptions))
perturbOptions <- list()
noise = perturbOptions$noise
if (is.null(noise))
noise <- GetNoise(data, noisePercent = perturbOptions$noisePercent)
# get perturbed similarity
perturbFunction <- function(data, ...) {
AddNoisePerturb(data = data, noise = noise)
}
},
subsampling = {
perturbFunction <- SubSampling
},
{
stop("perturbMethod not found. Please pass a perturbFunction instead")
}
)
}
else {
name = "Unknow"
}
list(
fun = function(data) do.call(perturbFunction, c(list(data = data), perturbOptions)),
name = name
)
}
BuildConnectivityMatrix <- function(data, clus, clusteringAlgorithm) {
rowNum <- nrow(data)
S <- matrix(0L, rowNum, rowNum)
cluster <- clusteringAlgorithm(data, clus)
for (j in 1:clus) {
idx <- cluster == j
S[idx, idx] <- 1L
}
rownames(S) <- rownames(data)
colnames(S) <- rownames(data)
list(matrix = S, groups = cluster)
}
ClusterToConnectivity <- function(cluster){
S <- matrix(0L, length(cluster), length(cluster))
for (j in 1:max(cluster)) {
idx <- cluster == j
S[idx, idx] <- 1L
}
S
}
GetOriginalSimilarity <- function(data, clusRange, clusteringAlgorithm, showProgress = F, ncore) {
groupings <- list()
origS <- list()
if (showProgress) {
pb <- txtProgressBar(min = 0, max = length(clusRange), style = 3)
}
seeds = abs(round(rnorm(max(clusRange))*10^6))
for (clus in clusRange){
set.seed(seeds[clus])
groupings[[clus]] <- clusteringAlgorithm(data, clus)
origS[[clus]] <- ClusterToConnectivity(groupings[[clus]])
rownames(origS[[clus]]) <- colnames(origS[[clus]]) <- rownames(data)
if (showProgress) setTxtProgressBar(pb, clus)
}
list(origS = origS, groupings = groupings)
}
GetPerturbedSimilarity <- function(data, clusRange, iterMax, iterMin, origS, clusteringAlgorithm, perturbedFunction, stoppingCriteriaHandler, showProgress = F, ncore) {
pertS <- list()
currentIter <- rep(0,max(clusRange))
jobs <- rep(clusRange, iterMax)
maxJob = length(jobs)
seeds = list()
for (clus in clusRange) {
pertS[[clus]] <- list()
seeds[[clus]] <- round(rnorm(max(iterMax,1000))*10^6)
}
if (showProgress) pb <- txtProgressBar(min = 0, max = maxJob, style = 3)
kProgress = rep(0, max(clusRange))
step = iterMin
if(.Platform$OS.type == "unix") doParallel::registerDoParallel(min(ncore, step))
while (length(jobs) > 0) {
jobLength = step*length(clusRange)
step = 10
if (jobLength > length(jobs)){
jobLength = length(jobs)
}
currentJobs = list()
count = 1
for (clus in jobs[1:jobLength]){
kProgress[clus] = kProgress[clus] + 1
currentJobs[[count]] <- list(iter = kProgress[clus], clus = clus)
count = count + 1
}
if (jobLength < length(jobs)){
jobs <- jobs[(jobLength+1):length(jobs)]
}
else jobs <- c()
job <- NULL
if(.Platform$OS.type == "unix")
{
rets <- foreach(job = currentJobs) %dopar% {
clus = job$clus
set.seed(seeds[[clus]][job$iter])
perturbedRet <- perturbedFunction(data = data)
cMatrix <- BuildConnectivityMatrix(data = perturbedRet$data, clus, clusteringAlgorithm)
connectivityMatrix = perturbedRet$ConnectivityMatrixHandler(connectivityMatrix = cMatrix$matrix, iter = job$iter, k = clus)
list(
connectivityMatrix = connectivityMatrix,
clus = clus,
auc = CalcAUC(origS[[clus]], connectivityMatrix)$area
)
}
} else {
rets <- foreach(job = currentJobs) %do% {
clus = job$clus
set.seed(seeds[[clus]][job$iter])
perturbedRet <- perturbedFunction(data = data)
cMatrix <- BuildConnectivityMatrix(data = perturbedRet$data, clus, clusteringAlgorithm)
connectivityMatrix = perturbedRet$ConnectivityMatrixHandler(connectivityMatrix = cMatrix$matrix, iter = job$iter, k = clus)
list(
connectivityMatrix = connectivityMatrix,
clus = clus,
auc = CalcAUC(origS[[clus]], connectivityMatrix)$area
)
}
}
allStop = F
for(ret in rets){
if (kProgress[clus] != -1){
clus = ret$clus
currentIter[clus] <- currentIter[clus] + 1
pertS[[clus]][[currentIter[clus]]] <- ret$connectivityMatrix
stop <- stoppingCriteriaHandler(iter = currentIter[clus], k = clus, auc = ret$auc)
if (stop == 2) {
allStop <- T
}
else if (stop == 1) {
if (showProgress) setTxtProgressBar(pb, getTxtProgressBar(pb) + length(jobs[jobs == clus]))
jobs <- jobs[jobs != clus]
kProgress[clus] <- -1
}
}
if (showProgress) setTxtProgressBar(pb, getTxtProgressBar(pb) + 1)
}
if (allStop) break()
countDone <- -1
while(countDone != length(which(kProgress[clusRange] == -1))){
countDone <- length(which(kProgress[clusRange] == -1))
for (clus in clusRange){
if (kProgress[clus] != -1){
stop <- stoppingCriteriaHandler(k = clus, type = 1)
if (stop == 1) {
if (showProgress) setTxtProgressBar(pb, getTxtProgressBar(pb) + length(jobs[jobs == clus]))
jobs <- jobs[jobs != clus]
kProgress[clus] <- -1
}
}
}
}
}
if(.Platform$OS.type == "unix") doParallel::stopImplicitCluster()
if (showProgress) {
setTxtProgressBar(pb, maxJob)
cat("\n")
}
pertS
}
CalcAUC <- function(orig, pert) {
N <- nrow(orig)
S <- abs(orig - pert)
diag(S) <- 0
# added -10^(-5) for visual purposes
# A <- c(-10^(-5), sort(unique(as.numeric(S))))
A <- c(-10^(-5), 0, sort(unique(as.numeric(S[S!=0]))))
if (max(A) < 1)
A <- c(A, 1)
B <- NULL
for (i in 1:length(A)) {
B[i] <- sum(S <= A[i])/(N * N)
}
area <- 0
for (i in (2:length(A))) {
area <- area + B[i - 1] * (A[i] - A[i - 1])
}
list(area = area, entry = A, cdf = B)
}
CalcPerturbedDiscrepancy <- function(origS, pertS, clusRange) {
diff <- NULL
for (clus in clusRange) {
diff[clus] <- sum(abs(origS[[clus]] - pertS[[clus]]))
}
AUC <- NULL
entries <- list()
cdfs <- list()
for (clus in clusRange) {
ret <- CalcAUC(origS[[clus]], pertS[[clus]]);
entries[[clus]] <- ret$entry
cdfs[[clus]] <- ret$cdf
AUC[clus] <- ret$area
}
list(Diff = round(diff, digits = 10), Entry = entries, CDF = cdfs, AUC = round(AUC, digits = 10))
} |
call_validate <- function(train_data,
test_data,
formulas,
partitions_col,
family,
control,
REML,
cutoff,
positive,
metrics,
preprocessing,
err_nc,
rm_nc,
parallel,
verbose) {
# Set default arguments
link <- default_link(NULL, family = family)
control <- default_control(control, family = family, link = link)
if (checkmate::test_string(x = metrics, pattern = "^all$")) {
metrics <- list("all" = TRUE)
}
# Check arguments ####
assert_collection <- checkmate::makeAssertCollection()
checkmate::assert_choice(
x = preprocessing,
choices = c(
"standardize",
"scale",
"center",
"range"
),
null.ok = TRUE,
add = assert_collection
)
checkmate::assert_choice(
x = family,
choices = c(
"gaussian",
"binomial",
"multinomial"
),
add = assert_collection
)
checkmate::assert_list(
x = metrics,
types = "logical",
any.missing = FALSE,
names = "named",
add = assert_collection
)
checkmate::assert_flag(x = rm_nc, add = assert_collection)
checkmate::assert_flag(x = REML, add = assert_collection)
checkmate::reportAssertions(assert_collection)
# End of argument checks ####
# Add AIC, AICc, BIC, r2m, r2c (unless disabled by user)
metrics <- basics_update_metrics(metrics = metrics, family = family)
results <- validate_list(
train_data = train_data,
test_data = test_data,
partitions_col = partitions_col,
formulas = formulas,
model_fn = basics_model_fn,
predict_fn = basics_predict_fn,
preprocess_fn = basics_pick_preprocess_fn(
preprocessing = preprocessing
),
hyperparameters = basics_hparams(
REML = REML, control = control,
model_verbose = verbose,
family = family
),
family = family,
cutoff = cutoff,
positive = positive,
metrics = metrics,
info_cols = list(
"Singular Fit Messages" = TRUE,
"HParams" = FALSE,
"Model" = TRUE
),
err_nc = err_nc,
rm_nc = FALSE, # Done below instead
return_models = TRUE,
verbose = verbose,
parallel_ = parallel,
caller = "validate()"
)
# If asked to remove non-converged models from results
if (isTRUE(rm_nc)) {
results <- results[results[["Convergence Warnings"]] == 0, ]
}
results
}
#' @importFrom plyr ldply
#' @importFrom dplyr %>%
#' @importFrom tidyr separate
call_cross_validate <- function(data,
formulas,
fold_cols,
family,
control,
REML,
cutoff,
positive,
metrics,
preprocessing,
rm_nc,
parallel,
verbose) {
# Set default arguments
link <- default_link(NULL, family = family)
control <- default_control(control, family = family, link = link)
if (checkmate::test_string(x = metrics, pattern = "^all$")) {
metrics <- list("all" = TRUE)
}
# Check arguments ####
assert_collection <- checkmate::makeAssertCollection()
checkmate::assert_choice(
x = preprocessing,
choices = c(
"standardize",
"scale",
"center",
"range"
),
null.ok = TRUE,
add = assert_collection
)
checkmate::assert_choice(
x = family,
choices = c(
"gaussian",
"binomial",
"multinomial"
),
add = assert_collection
)
checkmate::assert_list(
x = metrics,
types = "logical",
any.missing = FALSE,
names = "named",
add = assert_collection
)
checkmate::assert_flag(x = REML, add = assert_collection)
checkmate::assert_flag(x = rm_nc, add = assert_collection)
checkmate::reportAssertions(assert_collection)
# End of argument checks ####
# Add AIC, AICc, BIC, r2m, r2c (unless disabled by user)
metrics <- basics_update_metrics(metrics = metrics, family = family)
results <- cross_validate_list(
data = data,
formulas = formulas,
model_fn = basics_model_fn,
predict_fn = basics_predict_fn,
preprocess_fn = basics_pick_preprocess_fn(
preprocessing = preprocessing
),
hyperparameters = basics_hparams(
REML = REML, control = control,
model_verbose = verbose,
family = family
),
family = family,
fold_cols = fold_cols,
cutoff = cutoff,
positive = positive,
metrics = metrics,
info_cols = list(
"Singular Fit Messages" = TRUE,
"HParams" = FALSE
),
rm_nc = FALSE, # Done below instead
verbose = verbose,
parallel_ = parallel,
caller = "cross_validate()"
)
# If asked to remove non-converged models from results
if (isTRUE(rm_nc)) {
results <- results[results[["Convergence Warnings"]] == 0, ]
}
results
}
### Basics, shared between validate and cross_validate
basics_update_metrics <- function(metrics, family) {
# For cross_validate we expect AIC to be comparable
# Although with REML = TRUE we might not be able to compare
# models with and without random effects TODO (add to documentation)
if (family == "gaussian") {
metric_names <- names(metrics)
if (is.list(metrics) &&
length(setdiff(c("AIC", "AICc", "BIC"), metric_names)) > 1) {
if ("AIC" %ni% metric_names) {
metrics[["AIC"]] <- TRUE
}
if ("AICc" %ni% metric_names) {
metrics[["AICc"]] <- TRUE
}
if ("BIC" %ni% metric_names) {
metrics[["BIC"]] <- TRUE
}
}
}
metrics
}
basics_hparams <- function(REML, control, model_verbose, family) {
list(
"REML" = REML,
"control" = list(control),
"model_verbose" = model_verbose,
"family" = family,
"is_special_fn" = TRUE
)
}
basics_model_fn <- function(train_data, formula, hyperparameters) {
# Extract hyperparameters
REML <- hyperparameters[["REML"]]
control <- hyperparameters[["control"]][[1]]
model_verbose <- hyperparameters[["model_verbose"]]
family_ <- hyperparameters[["family"]]
# Check for random effects
contains_random_effects <- rand_effects(formula)
# Identify model function
if (family_ == "gaussian") {
model_type <- "lm"
}
if (family_ == "binomial") {
model_type <- "glm"
}
if (isTRUE(contains_random_effects)) model_type <- paste0(model_type, "er")
# Fitting the correct model
# Checks the model_type and fits the model on the train_data
if (model_type == "lm") {
message_if_model_verbose(model_verbose,
msg = "Used lm() to fit the model.'"
)
return(lm(formula = formula, data = train_data))
} else if (model_type == "lmer") {
message_if_model_verbose(model_verbose,
msg = "Used lme4::lmer() to fit the model.'"
)
return(lme4::lmer(
formula = formula, data = train_data,
REML = REML, control = control
))
} else if (model_type == "glm") {
message_if_model_verbose(model_verbose,
msg = "Used glm() to fit the model.'"
)
return(glm(
formula = formula, data = train_data,
family = family_
))
} else if (model_type == "glmer") {
message_if_model_verbose(
model_verbose,
msg = "Used lme4::glmer() to fit the model.'"
)
# Fit the model using glmer()
# Return this model to model_temp
return(lme4::glmer(
formula = formula,
data = train_data,
family = family_,
control = control
))
}
}
basics_predict_fn <- function(test_data, model, formula, hyperparameters, train_data) {
# Extract family from hyperparameters
family_ <- hyperparameters[["family"]]
contains_random_effects <- rand_effects(formula)
link <- hyperparameters[["link"]]
# Note: The same predict_fn can be used for (g)lm and (g)lmer
# so this conditional selection is technically unnecessary
if (family_ == "gaussian") {
if (!isTRUE(contains_random_effects)) {
p_fn <- predict_functions("lm")
} else {
p_fn <- predict_functions("lmer")
}
} else if (family_ == "binomial") {
if (!isTRUE(contains_random_effects)) {
p_fn <- predict_functions("glm_binomial")
} else {
p_fn <- predict_functions("glmer_binomial")
}
}
p_fn(
test_data = test_data, model = model,
formula = formula, hyperparameters = hyperparameters
)
}
basics_pick_preprocess_fn <- function(preprocessing) {
if (is.null(preprocessing)) {
return(NULL)
}
if (preprocessing == "standardize") {
preprocess_fn <- preprocess_functions("standardize")
} else if (preprocessing == "scale") {
preprocess_fn <- preprocess_functions("scale")
} else if (preprocessing == "center") {
preprocess_fn <- preprocess_functions("center")
} else if (preprocessing == "range") {
preprocess_fn <- preprocess_functions("range")
} else {
stop("'preprocessing' was not found.")
}
preprocess_fn
}
message_if_model_verbose <- function(model_verbose, msg, caller = "cross_validate()") {
if (isTRUE(model_verbose)) {
message(paste0(caller, ": ", msg))
}
}
| /R/call_validate.R | permissive | LudvigOlsen/cvms | R | false | false | 9,522 | r | call_validate <- function(train_data,
test_data,
formulas,
partitions_col,
family,
control,
REML,
cutoff,
positive,
metrics,
preprocessing,
err_nc,
rm_nc,
parallel,
verbose) {
# Set default arguments
link <- default_link(NULL, family = family)
control <- default_control(control, family = family, link = link)
if (checkmate::test_string(x = metrics, pattern = "^all$")) {
metrics <- list("all" = TRUE)
}
# Check arguments ####
assert_collection <- checkmate::makeAssertCollection()
checkmate::assert_choice(
x = preprocessing,
choices = c(
"standardize",
"scale",
"center",
"range"
),
null.ok = TRUE,
add = assert_collection
)
checkmate::assert_choice(
x = family,
choices = c(
"gaussian",
"binomial",
"multinomial"
),
add = assert_collection
)
checkmate::assert_list(
x = metrics,
types = "logical",
any.missing = FALSE,
names = "named",
add = assert_collection
)
checkmate::assert_flag(x = rm_nc, add = assert_collection)
checkmate::assert_flag(x = REML, add = assert_collection)
checkmate::reportAssertions(assert_collection)
# End of argument checks ####
# Add AIC, AICc, BIC, r2m, r2c (unless disabled by user)
metrics <- basics_update_metrics(metrics = metrics, family = family)
results <- validate_list(
train_data = train_data,
test_data = test_data,
partitions_col = partitions_col,
formulas = formulas,
model_fn = basics_model_fn,
predict_fn = basics_predict_fn,
preprocess_fn = basics_pick_preprocess_fn(
preprocessing = preprocessing
),
hyperparameters = basics_hparams(
REML = REML, control = control,
model_verbose = verbose,
family = family
),
family = family,
cutoff = cutoff,
positive = positive,
metrics = metrics,
info_cols = list(
"Singular Fit Messages" = TRUE,
"HParams" = FALSE,
"Model" = TRUE
),
err_nc = err_nc,
rm_nc = FALSE, # Done below instead
return_models = TRUE,
verbose = verbose,
parallel_ = parallel,
caller = "validate()"
)
# If asked to remove non-converged models from results
if (isTRUE(rm_nc)) {
results <- results[results[["Convergence Warnings"]] == 0, ]
}
results
}
#' @importFrom plyr ldply
#' @importFrom dplyr %>%
#' @importFrom tidyr separate
call_cross_validate <- function(data,
formulas,
fold_cols,
family,
control,
REML,
cutoff,
positive,
metrics,
preprocessing,
rm_nc,
parallel,
verbose) {
# Set default arguments
link <- default_link(NULL, family = family)
control <- default_control(control, family = family, link = link)
if (checkmate::test_string(x = metrics, pattern = "^all$")) {
metrics <- list("all" = TRUE)
}
# Check arguments ####
assert_collection <- checkmate::makeAssertCollection()
checkmate::assert_choice(
x = preprocessing,
choices = c(
"standardize",
"scale",
"center",
"range"
),
null.ok = TRUE,
add = assert_collection
)
checkmate::assert_choice(
x = family,
choices = c(
"gaussian",
"binomial",
"multinomial"
),
add = assert_collection
)
checkmate::assert_list(
x = metrics,
types = "logical",
any.missing = FALSE,
names = "named",
add = assert_collection
)
checkmate::assert_flag(x = REML, add = assert_collection)
checkmate::assert_flag(x = rm_nc, add = assert_collection)
checkmate::reportAssertions(assert_collection)
# End of argument checks ####
# Add AIC, AICc, BIC, r2m, r2c (unless disabled by user)
metrics <- basics_update_metrics(metrics = metrics, family = family)
results <- cross_validate_list(
data = data,
formulas = formulas,
model_fn = basics_model_fn,
predict_fn = basics_predict_fn,
preprocess_fn = basics_pick_preprocess_fn(
preprocessing = preprocessing
),
hyperparameters = basics_hparams(
REML = REML, control = control,
model_verbose = verbose,
family = family
),
family = family,
fold_cols = fold_cols,
cutoff = cutoff,
positive = positive,
metrics = metrics,
info_cols = list(
"Singular Fit Messages" = TRUE,
"HParams" = FALSE
),
rm_nc = FALSE, # Done below instead
verbose = verbose,
parallel_ = parallel,
caller = "cross_validate()"
)
# If asked to remove non-converged models from results
if (isTRUE(rm_nc)) {
results <- results[results[["Convergence Warnings"]] == 0, ]
}
results
}
### Basics, shared between validate and cross_validate
basics_update_metrics <- function(metrics, family) {
# For cross_validate we expect AIC to be comparable
# Although with REML = TRUE we might not be able to compare
# models with and without random effects TODO (add to documentation)
if (family == "gaussian") {
metric_names <- names(metrics)
if (is.list(metrics) &&
length(setdiff(c("AIC", "AICc", "BIC"), metric_names)) > 1) {
if ("AIC" %ni% metric_names) {
metrics[["AIC"]] <- TRUE
}
if ("AICc" %ni% metric_names) {
metrics[["AICc"]] <- TRUE
}
if ("BIC" %ni% metric_names) {
metrics[["BIC"]] <- TRUE
}
}
}
metrics
}
basics_hparams <- function(REML, control, model_verbose, family) {
list(
"REML" = REML,
"control" = list(control),
"model_verbose" = model_verbose,
"family" = family,
"is_special_fn" = TRUE
)
}
basics_model_fn <- function(train_data, formula, hyperparameters) {
# Extract hyperparameters
REML <- hyperparameters[["REML"]]
control <- hyperparameters[["control"]][[1]]
model_verbose <- hyperparameters[["model_verbose"]]
family_ <- hyperparameters[["family"]]
# Check for random effects
contains_random_effects <- rand_effects(formula)
# Identify model function
if (family_ == "gaussian") {
model_type <- "lm"
}
if (family_ == "binomial") {
model_type <- "glm"
}
if (isTRUE(contains_random_effects)) model_type <- paste0(model_type, "er")
# Fitting the correct model
# Checks the model_type and fits the model on the train_data
if (model_type == "lm") {
message_if_model_verbose(model_verbose,
msg = "Used lm() to fit the model.'"
)
return(lm(formula = formula, data = train_data))
} else if (model_type == "lmer") {
message_if_model_verbose(model_verbose,
msg = "Used lme4::lmer() to fit the model.'"
)
return(lme4::lmer(
formula = formula, data = train_data,
REML = REML, control = control
))
} else if (model_type == "glm") {
message_if_model_verbose(model_verbose,
msg = "Used glm() to fit the model.'"
)
return(glm(
formula = formula, data = train_data,
family = family_
))
} else if (model_type == "glmer") {
message_if_model_verbose(
model_verbose,
msg = "Used lme4::glmer() to fit the model.'"
)
# Fit the model using glmer()
# Return this model to model_temp
return(lme4::glmer(
formula = formula,
data = train_data,
family = family_,
control = control
))
}
}
basics_predict_fn <- function(test_data, model, formula, hyperparameters, train_data) {
# Extract family from hyperparameters
family_ <- hyperparameters[["family"]]
contains_random_effects <- rand_effects(formula)
link <- hyperparameters[["link"]]
# Note: The same predict_fn can be used for (g)lm and (g)lmer
# so this conditional selection is technically unnecessary
if (family_ == "gaussian") {
if (!isTRUE(contains_random_effects)) {
p_fn <- predict_functions("lm")
} else {
p_fn <- predict_functions("lmer")
}
} else if (family_ == "binomial") {
if (!isTRUE(contains_random_effects)) {
p_fn <- predict_functions("glm_binomial")
} else {
p_fn <- predict_functions("glmer_binomial")
}
}
p_fn(
test_data = test_data, model = model,
formula = formula, hyperparameters = hyperparameters
)
}
basics_pick_preprocess_fn <- function(preprocessing) {
if (is.null(preprocessing)) {
return(NULL)
}
if (preprocessing == "standardize") {
preprocess_fn <- preprocess_functions("standardize")
} else if (preprocessing == "scale") {
preprocess_fn <- preprocess_functions("scale")
} else if (preprocessing == "center") {
preprocess_fn <- preprocess_functions("center")
} else if (preprocessing == "range") {
preprocess_fn <- preprocess_functions("range")
} else {
stop("'preprocessing' was not found.")
}
preprocess_fn
}
message_if_model_verbose <- function(model_verbose, msg, caller = "cross_validate()") {
if (isTRUE(model_verbose)) {
message(paste0(caller, ": ", msg))
}
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/optFUN.R
\name{gmv_opt}
\alias{gmv_opt}
\title{GMV/QU QP Optimization}
\usage{
gmv_opt(R, constraints, moments, lambda, target, lambda_hhi, conc_groups,
solver = "quadprog", control = NULL)
}
\arguments{
\item{R}{xts object of asset returns}
\item{constraints}{object of constraints in the portfolio object extracted with \code{get_constraints}}
\item{moments}{object of moments computed based on objective functions}
\item{lambda}{risk_aversion parameter}
\item{target}{target return value}
\item{lambda_hhi}{concentration aversion parameter}
\item{conc_groups}{list of vectors specifying the groups of the assets.}
\item{solver}{solver to use}
\item{control}{list of solver control parameters}
}
\description{
This function is called by optimize.portfolio to solve minimum variance or
maximum quadratic utility problems
}
\author{
Ross Bennett
}
| /man/gmv_opt.Rd | no_license | arturochian/PortfolioAnalytics-1 | R | false | false | 945 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/optFUN.R
\name{gmv_opt}
\alias{gmv_opt}
\title{GMV/QU QP Optimization}
\usage{
gmv_opt(R, constraints, moments, lambda, target, lambda_hhi, conc_groups,
solver = "quadprog", control = NULL)
}
\arguments{
\item{R}{xts object of asset returns}
\item{constraints}{object of constraints in the portfolio object extracted with \code{get_constraints}}
\item{moments}{object of moments computed based on objective functions}
\item{lambda}{risk_aversion parameter}
\item{target}{target return value}
\item{lambda_hhi}{concentration aversion parameter}
\item{conc_groups}{list of vectors specifying the groups of the assets.}
\item{solver}{solver to use}
\item{control}{list of solver control parameters}
}
\description{
This function is called by optimize.portfolio to solve minimum variance or
maximum quadratic utility problems
}
\author{
Ross Bennett
}
|
/soluzioni r/svolgimento_LAB_04.07.14.R | no_license | Bakapo/ScriptR | R | false | false | 956 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PF.R
\name{PF_EM}
\alias{PF_EM}
\title{EM Estimation with Particle Filters and Smoothers}
\usage{
PF_EM(
formula,
data,
model = "logit",
by,
max_T,
id,
a_0,
Q_0,
Q,
order = 1,
control = PF_control(...),
trace = 0,
seed = NULL,
type = "RW",
fixed = NULL,
random = NULL,
Fmat,
fixed_effects,
G,
theta,
J,
K,
psi,
phi,
...
)
}
\arguments{
\item{formula}{\code{\link[survival]{coxph}} like formula with \code{\link[survival]{Surv}(tstart, tstop, event)} on the left hand site of \code{~}.}
\item{data}{\code{data.frame} or environment containing the outcome and covariates.}
\item{model}{either \code{'logit'} for binary outcomes with the logistic
link function, \code{'cloglog'} for binary outcomes with the inverse cloglog
link function, or \code{'exponential'} for piecewise constant exponential distributed arrival times.}
\item{by}{interval length of the bins in which parameters are fixed.}
\item{max_T}{end of the last interval interval.}
\item{id}{vector of ids for each row of the in the design matrix.}
\item{a_0}{vector \eqn{a_0} for the initial coefficient vector for the first iteration (optional). Default is estimates from static model (see \code{\link{static_glm}}).}
\item{Q_0}{covariance matrix for the prior distribution.}
\item{Q}{initial covariance matrix for the state equation.}
\item{order}{order of the random walk.}
\item{control}{see \code{\link{PF_control}}.}
\item{trace}{argument to get progress information. Zero will yield no info and larger integer values will yield incrementally more information.}
\item{seed}{seed to set at the start of every EM iteration. See
\code{\link{set.seed}}.}
\item{type}{type of state model. Either \code{"RW"} for a [R]andom [W]alk or
"VAR" for [V]ector [A]uto[R]egression.}
\item{fixed}{two-sided \code{\link{formula}} to be used
with \code{random} instead of \code{formula}. It is of the form
\code{Surv(tstart, tstop, event) ~ x} or
\code{Surv(tstart, tstop, event) ~ - 1} for no fixed effects.}
\item{random}{one-sided \code{\link{formula}} to be used
with \code{fixed} instead of \code{formula}. It is of the form
\code{~ z}.}
\item{Fmat}{starting value for \eqn{F} when \code{type = "VAR"}. See
'Details' in \code{\link{PF_EM}}.}
\item{fixed_effects}{starting values for fixed effects if any. See
\code{\link{ddFixed}}.}
\item{G, theta, J, K, psi, phi}{parameters for a restricted \code{type = "VAR"} model.
See the vignette mentioned in 'Details' of \code{\link{PF_EM}} and the
examples linked to in 'See Also'.}
\item{...}{optional way to pass arguments to \code{control}.}
}
\value{
An object of class \code{PF_EM}.
}
\description{
Method to estimate the hyper parameters with an EM algorithm.
}
\details{
Estimates a state model of the form
\deqn{\alpha_t = F \alpha_t + R\epsilon_t, \qquad \epsilon_t \sim N(0, Q)}
where \eqn{F\in{\rm I\!R}^{p\times p}} has full rank,
\eqn{\alpha_t\in {\rm I\!R}^p}, \eqn{\epsilon_t\in {\rm I\!R}^r},
\eqn{r \leq p}, and \eqn{R = (e_{l_1},e_{l_2},\dots,e_{l_r})}
where \eqn{e_k} is column from the \eqn{p} dimensional identity matrix and
\eqn{l_1<l_2<\dots<l_r}. The time zero state is drawn from
\deqn{\alpha_0\sim N(a_0, Q_0)}
with \eqn{Q_0 \in {\rm I\!R}^{p\times p}}. The latent states,
\eqn{\alpha_t}, are related to the output through the linear predictors
\deqn{\eta_{it} = X_t(R^+\alpha_t) + Z_t\beta}
where \eqn{X_t\in{\rm I\!R}^{n_t\times r}} and
\eqn{Z_t{\rm I\!R}^{n_t\times c}} are design matrices and the outcome for a
individual \eqn{i} at time \eqn{t} is distributed according
to an exponential family member given \eqn{\eta_{it}}. \eqn{\beta} are
constant coefficients.
See \code{vignette("Particle_filtering", "dynamichazard")} for details.
}
\section{Warning}{
The function is still under development so the output and API may change.
}
\examples{
\dontrun{
#####
# Fit model with lung data set from survival
# Warning: long-ish computation time
library(dynamichazard)
.lung <- lung[!is.na(lung$ph.ecog), ]
# standardize
.lung$age <- scale(.lung$age)
# fit
set.seed(43588155)
pf_fit <- PF_EM(
Surv(time, status == 2) ~ ddFixed(ph.ecog) + age,
data = .lung, by = 50, id = 1:nrow(.lung),
Q_0 = diag(1, 2), Q = diag(.5^2, 2),
max_T = 800,
control = PF_control(
N_fw_n_bw = 500, N_first = 2500, N_smooth = 5000,
n_max = 50, eps = .001, Q_tilde = diag(.2^2, 2), est_a_0 = FALSE,
n_threads = max(parallel::detectCores(logical = FALSE), 1)))
# Plot state vector estimates
plot(pf_fit, cov_index = 1)
plot(pf_fit, cov_index = 2)
# Plot log-likelihood
plot(pf_fit$log_likes)
}
\dontrun{
######
# example with fixed intercept
# prepare data
temp <- subset(pbc, id <= 312, select=c(id, sex, time, status, edema, age))
pbc2 <- tmerge(temp, temp, id=id, death = event(time, status))
pbc2 <- tmerge(pbc2, pbcseq, id=id, albumin = tdc(day, albumin),
protime = tdc(day, protime), bili = tdc(day, bili))
pbc2 <- pbc2[, c("id", "tstart", "tstop", "death", "sex", "edema",
"age", "albumin", "protime", "bili")]
pbc2 <- within(pbc2, {
log_albumin <- log(albumin)
log_protime <- log(protime)
log_bili <- log(bili)
})
# standardize
for(c. in c("age", "log_albumin", "log_protime", "log_bili"))
pbc2[[c.]] <- drop(scale(pbc2[[c.]]))
# fit model with extended Kalman filter
ddfit <- ddhazard(
Surv(tstart, tstop, death == 2) ~ ddFixed_intercept() + ddFixed(age) +
ddFixed(edema) + ddFixed(log_albumin) + ddFixed(log_protime) + log_bili,
pbc2, Q_0 = 100, Q = 1e-2, by = 100, id = pbc2$id,
model = "exponential", max_T = 3600,
control = ddhazard_control(eps = 1e-5, NR_eps = 1e-4, n_max = 1e4))
summary(ddfit)
# fit model with particle filter
set.seed(88235076)
pf_fit <- PF_EM(
Surv(tstart, tstop, death == 2) ~ ddFixed_intercept() + ddFixed(age) +
ddFixed(edema) + ddFixed(log_albumin) + ddFixed(log_protime) + log_bili,
pbc2, Q_0 = 2^2, Q = ddfit$Q * 100, # use estimate from before
by = 100, id = pbc2$id,
model = "exponential", max_T = 3600,
control = PF_control(
N_fw_n_bw = 500, N_smooth = 2500, N_first = 1000, eps = 1e-3,
method = "AUX_normal_approx_w_cloud_mean", est_a_0 = FALSE,
Q_tilde = as.matrix(.1^2),
n_max = 25, # just take a few iterations as an example
n_threads = max(parallel::detectCores(logical = FALSE), 1)))
# compare results
plot(ddfit)
plot(pf_fit)
sqrt(ddfit$Q * 100)
sqrt(pf_fit$Q)
rbind(ddfit$fixed_effects, pf_fit$fixed_effects)
}
\dontrun{
#####
# simulation example with `random` and `fixed` argument and a restricted
# model
# g groups with k individuals in each
g <- 3L
k <- 400L
# matrices for state equation
p <- g + 1L
G <- matrix(0., p^2, 2L)
for(i in 1:p)
G[i + (i - 1L) * p, 1L + (i == p)] <- 1L
theta <- c(.9, .8)
# coefficients in transition density
(F. <- matrix(as.vector(G \%*\% theta), 4L, 4L))
J <- matrix(0., ncol = 2L, nrow = p)
J[-p, 1L] <- J[p, 2L] <- 1
psi <- c(log(c(.3, .1)))
K <- matrix(0., p * (p - 1L) / 2L, 2L)
j <- 0L
for(i in (p - 1L):1L){
j <- j + i
K[j, 2L] <- 1
}
K[K[, 2L] < 1, 1L] <- 1
phi <- log(-(c(.8, .3) + 1) / (c(.8, .3) - 1))
V <- diag(exp(drop(J \%*\% psi)))
C <- diag(1, ncol(V))
C[lower.tri(C)] <- 2/(1 + exp(-drop(K \%*\% phi))) - 1
C[upper.tri(C)] <- t(C)[upper.tri(C)]
(Q <- V \%*\% C \%*\% V) # covariance matrix in transition density
cov2cor(Q)
Q_0 <- get_Q_0(Q, F.) # time-invariant covariance matrix
beta <- c(rep(-6, g), 0) # all groups have the same long run mean intercept
# simulate state variables
set.seed(56219373)
n_periods <- 300L
alphas <- matrix(nrow = n_periods + 1L, ncol = p)
alphas[1L, ] <- rnorm(p) \%*\% chol(Q_0)
for(i in 1:n_periods + 1L)
alphas[i, ] <- F. \%*\% alphas[i - 1L, ] + drop(rnorm(p) \%*\% chol(Q))
alphas <- t(t(alphas) + beta)
# plot state variables
matplot(alphas, type = "l", lty = 1)
# simulate individuals' outcome
n_obs <- g * k
df <- lapply(1:n_obs, function(i){
# find the group
grp <- (i - 1L) \%/\% (n_obs / g) + 1L
# left-censoring
tstart <- max(0L, sample.int((n_periods - 1L) * 2L, 1) - n_periods + 1L)
# covariates
x <- c(1, rnorm(1))
# outcome (stop time and event indicator)
osa <- NULL
oso <- NULL
osx <- NULL
y <- FALSE
for(tstop in (tstart + 1L):n_periods){
sigmoid <- 1 / (1 + exp(- drop(x \%*\% alphas[tstop + 1L, c(grp, p)])))
if(sigmoid > runif(1)){
y <- TRUE
break
}
if(.01 > runif(1L) && tstop < n_periods){
# sample new covariate
osa <- c(osa, tstart)
tstart <- tstop
oso <- c(oso, tstop)
osx <- c(osx, x[2])
x[2] <- rnorm(1)
}
}
cbind(
tstart = c(osa, tstart), tstop = c(oso, tstop),
x = c(osx, x[2]), y = c(rep(FALSE, length(osa)), y), grp = grp,
id = i)
})
df <- data.frame(do.call(rbind, df))
df$grp <- factor(df$grp)
# fit model. Start with "cheap" iterations
fit <- PF_EM(
fixed = Surv(tstart, tstop, y) ~ x, random = ~ grp + x - 1,
data = df, model = "logit", by = 1L, max_T = max(df$tstop),
Q_0 = diag(1.5^2, p), id = df$id, type = "VAR",
G = G, theta = c(.5, .5), J = J, psi = log(c(.1, .1)),
K = K, phi = log(-(c(.4, 0) + 1) / (c(.4, 0) - 1)),
control = PF_control(
N_fw_n_bw = 100L, N_smooth = 100L, N_first = 500L,
method = "AUX_normal_approx_w_cloud_mean",
nu = 5L, # sample from multivariate t-distribution
n_max = 100L, averaging_start = 50L,
smoother = "Fearnhead_O_N", eps = 1e-4, covar_fac = 1.2,
n_threads = 4L # depends on your cpu(s)
),
trace = 1L)
plot(fit$log_likes) # log-likelihood approximation at each iterations
# take more iterations with more particles
cl <- fit$call
ctrl <- cl[["control"]]
ctrl[c("N_fw_n_bw", "N_smooth", "N_first", "n_max",
"averaging_start")] <- list(500L, 2000L, 5000L, 200L, 30L)
cl[["control"]] <- ctrl
cl[c("phi", "psi", "theta")] <- list(fit$phi, fit$psi, fit$theta)
fit_extra <- eval(cl)
plot(fit_extra$log_likes) # log-likelihood approximation at each iteration
# check estimates
sqrt(diag(fit_extra$Q))
sqrt(diag(Q))
cov2cor(fit_extra$Q)
cov2cor(Q)
fit_extra$F
F.
# plot predicted state variables
for(i in 1:p){
plot(fit_extra, cov_index = i)
abline(h = 0, lty = 2)
lines(1:nrow(alphas) - 1, alphas[, i] - beta[i], lty = 3)
}
}
}
\seealso{
\code{\link{PF_forward_filter}} to get a more precise estimate of
the final log-likelihood.
See the examples at https://github.com/boennecd/dynamichazard/tree/master/examples.
}
| /dynamichazard/man/PF_EM.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 10,472 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PF.R
\name{PF_EM}
\alias{PF_EM}
\title{EM Estimation with Particle Filters and Smoothers}
\usage{
PF_EM(
formula,
data,
model = "logit",
by,
max_T,
id,
a_0,
Q_0,
Q,
order = 1,
control = PF_control(...),
trace = 0,
seed = NULL,
type = "RW",
fixed = NULL,
random = NULL,
Fmat,
fixed_effects,
G,
theta,
J,
K,
psi,
phi,
...
)
}
\arguments{
\item{formula}{\code{\link[survival]{coxph}} like formula with \code{\link[survival]{Surv}(tstart, tstop, event)} on the left hand site of \code{~}.}
\item{data}{\code{data.frame} or environment containing the outcome and covariates.}
\item{model}{either \code{'logit'} for binary outcomes with the logistic
link function, \code{'cloglog'} for binary outcomes with the inverse cloglog
link function, or \code{'exponential'} for piecewise constant exponential distributed arrival times.}
\item{by}{interval length of the bins in which parameters are fixed.}
\item{max_T}{end of the last interval interval.}
\item{id}{vector of ids for each row of the in the design matrix.}
\item{a_0}{vector \eqn{a_0} for the initial coefficient vector for the first iteration (optional). Default is estimates from static model (see \code{\link{static_glm}}).}
\item{Q_0}{covariance matrix for the prior distribution.}
\item{Q}{initial covariance matrix for the state equation.}
\item{order}{order of the random walk.}
\item{control}{see \code{\link{PF_control}}.}
\item{trace}{argument to get progress information. Zero will yield no info and larger integer values will yield incrementally more information.}
\item{seed}{seed to set at the start of every EM iteration. See
\code{\link{set.seed}}.}
\item{type}{type of state model. Either \code{"RW"} for a [R]andom [W]alk or
"VAR" for [V]ector [A]uto[R]egression.}
\item{fixed}{two-sided \code{\link{formula}} to be used
with \code{random} instead of \code{formula}. It is of the form
\code{Surv(tstart, tstop, event) ~ x} or
\code{Surv(tstart, tstop, event) ~ - 1} for no fixed effects.}
\item{random}{one-sided \code{\link{formula}} to be used
with \code{fixed} instead of \code{formula}. It is of the form
\code{~ z}.}
\item{Fmat}{starting value for \eqn{F} when \code{type = "VAR"}. See
'Details' in \code{\link{PF_EM}}.}
\item{fixed_effects}{starting values for fixed effects if any. See
\code{\link{ddFixed}}.}
\item{G, theta, J, K, psi, phi}{parameters for a restricted \code{type = "VAR"} model.
See the vignette mentioned in 'Details' of \code{\link{PF_EM}} and the
examples linked to in 'See Also'.}
\item{...}{optional way to pass arguments to \code{control}.}
}
\value{
An object of class \code{PF_EM}.
}
\description{
Method to estimate the hyper parameters with an EM algorithm.
}
\details{
Estimates a state model of the form
\deqn{\alpha_t = F \alpha_t + R\epsilon_t, \qquad \epsilon_t \sim N(0, Q)}
where \eqn{F\in{\rm I\!R}^{p\times p}} has full rank,
\eqn{\alpha_t\in {\rm I\!R}^p}, \eqn{\epsilon_t\in {\rm I\!R}^r},
\eqn{r \leq p}, and \eqn{R = (e_{l_1},e_{l_2},\dots,e_{l_r})}
where \eqn{e_k} is column from the \eqn{p} dimensional identity matrix and
\eqn{l_1<l_2<\dots<l_r}. The time zero state is drawn from
\deqn{\alpha_0\sim N(a_0, Q_0)}
with \eqn{Q_0 \in {\rm I\!R}^{p\times p}}. The latent states,
\eqn{\alpha_t}, are related to the output through the linear predictors
\deqn{\eta_{it} = X_t(R^+\alpha_t) + Z_t\beta}
where \eqn{X_t\in{\rm I\!R}^{n_t\times r}} and
\eqn{Z_t{\rm I\!R}^{n_t\times c}} are design matrices and the outcome for a
individual \eqn{i} at time \eqn{t} is distributed according
to an exponential family member given \eqn{\eta_{it}}. \eqn{\beta} are
constant coefficients.
See \code{vignette("Particle_filtering", "dynamichazard")} for details.
}
\section{Warning}{
The function is still under development so the output and API may change.
}
\examples{
\dontrun{
#####
# Fit model with lung data set from survival
# Warning: long-ish computation time
library(dynamichazard)
.lung <- lung[!is.na(lung$ph.ecog), ]
# standardize
.lung$age <- scale(.lung$age)
# fit
set.seed(43588155)
pf_fit <- PF_EM(
Surv(time, status == 2) ~ ddFixed(ph.ecog) + age,
data = .lung, by = 50, id = 1:nrow(.lung),
Q_0 = diag(1, 2), Q = diag(.5^2, 2),
max_T = 800,
control = PF_control(
N_fw_n_bw = 500, N_first = 2500, N_smooth = 5000,
n_max = 50, eps = .001, Q_tilde = diag(.2^2, 2), est_a_0 = FALSE,
n_threads = max(parallel::detectCores(logical = FALSE), 1)))
# Plot state vector estimates
plot(pf_fit, cov_index = 1)
plot(pf_fit, cov_index = 2)
# Plot log-likelihood
plot(pf_fit$log_likes)
}
\dontrun{
######
# example with fixed intercept
# prepare data
temp <- subset(pbc, id <= 312, select=c(id, sex, time, status, edema, age))
pbc2 <- tmerge(temp, temp, id=id, death = event(time, status))
pbc2 <- tmerge(pbc2, pbcseq, id=id, albumin = tdc(day, albumin),
protime = tdc(day, protime), bili = tdc(day, bili))
pbc2 <- pbc2[, c("id", "tstart", "tstop", "death", "sex", "edema",
"age", "albumin", "protime", "bili")]
pbc2 <- within(pbc2, {
log_albumin <- log(albumin)
log_protime <- log(protime)
log_bili <- log(bili)
})
# standardize
for(c. in c("age", "log_albumin", "log_protime", "log_bili"))
pbc2[[c.]] <- drop(scale(pbc2[[c.]]))
# fit model with extended Kalman filter
ddfit <- ddhazard(
Surv(tstart, tstop, death == 2) ~ ddFixed_intercept() + ddFixed(age) +
ddFixed(edema) + ddFixed(log_albumin) + ddFixed(log_protime) + log_bili,
pbc2, Q_0 = 100, Q = 1e-2, by = 100, id = pbc2$id,
model = "exponential", max_T = 3600,
control = ddhazard_control(eps = 1e-5, NR_eps = 1e-4, n_max = 1e4))
summary(ddfit)
# fit model with particle filter
set.seed(88235076)
pf_fit <- PF_EM(
Surv(tstart, tstop, death == 2) ~ ddFixed_intercept() + ddFixed(age) +
ddFixed(edema) + ddFixed(log_albumin) + ddFixed(log_protime) + log_bili,
pbc2, Q_0 = 2^2, Q = ddfit$Q * 100, # use estimate from before
by = 100, id = pbc2$id,
model = "exponential", max_T = 3600,
control = PF_control(
N_fw_n_bw = 500, N_smooth = 2500, N_first = 1000, eps = 1e-3,
method = "AUX_normal_approx_w_cloud_mean", est_a_0 = FALSE,
Q_tilde = as.matrix(.1^2),
n_max = 25, # just take a few iterations as an example
n_threads = max(parallel::detectCores(logical = FALSE), 1)))
# compare results
plot(ddfit)
plot(pf_fit)
sqrt(ddfit$Q * 100)
sqrt(pf_fit$Q)
rbind(ddfit$fixed_effects, pf_fit$fixed_effects)
}
\dontrun{
#####
# simulation example with `random` and `fixed` argument and a restricted
# model
# g groups with k individuals in each
g <- 3L
k <- 400L
# matrices for state equation
p <- g + 1L
G <- matrix(0., p^2, 2L)
for(i in 1:p)
G[i + (i - 1L) * p, 1L + (i == p)] <- 1L
theta <- c(.9, .8)
# coefficients in transition density
(F. <- matrix(as.vector(G \%*\% theta), 4L, 4L))
J <- matrix(0., ncol = 2L, nrow = p)
J[-p, 1L] <- J[p, 2L] <- 1
psi <- c(log(c(.3, .1)))
K <- matrix(0., p * (p - 1L) / 2L, 2L)
j <- 0L
for(i in (p - 1L):1L){
j <- j + i
K[j, 2L] <- 1
}
K[K[, 2L] < 1, 1L] <- 1
phi <- log(-(c(.8, .3) + 1) / (c(.8, .3) - 1))
V <- diag(exp(drop(J \%*\% psi)))
C <- diag(1, ncol(V))
C[lower.tri(C)] <- 2/(1 + exp(-drop(K \%*\% phi))) - 1
C[upper.tri(C)] <- t(C)[upper.tri(C)]
(Q <- V \%*\% C \%*\% V) # covariance matrix in transition density
cov2cor(Q)
Q_0 <- get_Q_0(Q, F.) # time-invariant covariance matrix
beta <- c(rep(-6, g), 0) # all groups have the same long run mean intercept
# simulate state variables
set.seed(56219373)
n_periods <- 300L
alphas <- matrix(nrow = n_periods + 1L, ncol = p)
alphas[1L, ] <- rnorm(p) \%*\% chol(Q_0)
for(i in 1:n_periods + 1L)
alphas[i, ] <- F. \%*\% alphas[i - 1L, ] + drop(rnorm(p) \%*\% chol(Q))
alphas <- t(t(alphas) + beta)
# plot state variables
matplot(alphas, type = "l", lty = 1)
# simulate individuals' outcome
n_obs <- g * k
df <- lapply(1:n_obs, function(i){
# find the group
grp <- (i - 1L) \%/\% (n_obs / g) + 1L
# left-censoring
tstart <- max(0L, sample.int((n_periods - 1L) * 2L, 1) - n_periods + 1L)
# covariates
x <- c(1, rnorm(1))
# outcome (stop time and event indicator)
osa <- NULL
oso <- NULL
osx <- NULL
y <- FALSE
for(tstop in (tstart + 1L):n_periods){
sigmoid <- 1 / (1 + exp(- drop(x \%*\% alphas[tstop + 1L, c(grp, p)])))
if(sigmoid > runif(1)){
y <- TRUE
break
}
if(.01 > runif(1L) && tstop < n_periods){
# sample new covariate
osa <- c(osa, tstart)
tstart <- tstop
oso <- c(oso, tstop)
osx <- c(osx, x[2])
x[2] <- rnorm(1)
}
}
cbind(
tstart = c(osa, tstart), tstop = c(oso, tstop),
x = c(osx, x[2]), y = c(rep(FALSE, length(osa)), y), grp = grp,
id = i)
})
df <- data.frame(do.call(rbind, df))
df$grp <- factor(df$grp)
# fit model. Start with "cheap" iterations
fit <- PF_EM(
fixed = Surv(tstart, tstop, y) ~ x, random = ~ grp + x - 1,
data = df, model = "logit", by = 1L, max_T = max(df$tstop),
Q_0 = diag(1.5^2, p), id = df$id, type = "VAR",
G = G, theta = c(.5, .5), J = J, psi = log(c(.1, .1)),
K = K, phi = log(-(c(.4, 0) + 1) / (c(.4, 0) - 1)),
control = PF_control(
N_fw_n_bw = 100L, N_smooth = 100L, N_first = 500L,
method = "AUX_normal_approx_w_cloud_mean",
nu = 5L, # sample from multivariate t-distribution
n_max = 100L, averaging_start = 50L,
smoother = "Fearnhead_O_N", eps = 1e-4, covar_fac = 1.2,
n_threads = 4L # depends on your cpu(s)
),
trace = 1L)
plot(fit$log_likes) # log-likelihood approximation at each iterations
# take more iterations with more particles
cl <- fit$call
ctrl <- cl[["control"]]
ctrl[c("N_fw_n_bw", "N_smooth", "N_first", "n_max",
"averaging_start")] <- list(500L, 2000L, 5000L, 200L, 30L)
cl[["control"]] <- ctrl
cl[c("phi", "psi", "theta")] <- list(fit$phi, fit$psi, fit$theta)
fit_extra <- eval(cl)
plot(fit_extra$log_likes) # log-likelihood approximation at each iteration
# check estimates
sqrt(diag(fit_extra$Q))
sqrt(diag(Q))
cov2cor(fit_extra$Q)
cov2cor(Q)
fit_extra$F
F.
# plot predicted state variables
for(i in 1:p){
plot(fit_extra, cov_index = i)
abline(h = 0, lty = 2)
lines(1:nrow(alphas) - 1, alphas[, i] - beta[i], lty = 3)
}
}
}
\seealso{
\code{\link{PF_forward_filter}} to get a more precise estimate of
the final log-likelihood.
See the examples at https://github.com/boennecd/dynamichazard/tree/master/examples.
}
|
\name{whop.eg.genename}
\alias{whop.eg.genename}
\title{
Find the gene name for a given Entrez identifier
}
\description{
Find the gene name for a given Entrez identifier
}
\usage{
whop.eg.genename(id, db)
}
\arguments{
\item{id}{Entrez identifier}
\item{db}{Organism database name, if not using currently activated one}
}
\value{
Gene names
}
\author{
Ulrich Wittelsbuerger
}
| /man/whop.eg.genename.Rd | no_license | cran/WhopGenome | R | false | false | 381 | rd | \name{whop.eg.genename}
\alias{whop.eg.genename}
\title{
Find the gene name for a given Entrez identifier
}
\description{
Find the gene name for a given Entrez identifier
}
\usage{
whop.eg.genename(id, db)
}
\arguments{
\item{id}{Entrez identifier}
\item{db}{Organism database name, if not using currently activated one}
}
\value{
Gene names
}
\author{
Ulrich Wittelsbuerger
}
|
这是一个基于ES6,Webpack,webpack-dev-server, vue, iview, arcgis api for javascript开发webgis应用的基础框架。
配置了开发和部署两种模式,开发模式实现热加载。 | /README.RD | no_license | whuln/webgis-iview | R | false | false | 198 | rd | 这是一个基于ES6,Webpack,webpack-dev-server, vue, iview, arcgis api for javascript开发webgis应用的基础框架。
配置了开发和部署两种模式,开发模式实现热加载。 |
#install.packages("corrplot")
library("corrplot")
library("caret")
library("kernlab")
library("e1071")
library("outliers")
library("dplyr")
library("tidyverse")
library("tidyr")
set.seed(999)
clean_data <- data.frame(Try = c(1:length(equip_failures_training_set$id)))
#Changing into numeric values
for (i in c(1:length(equip_failures_training_set))) {
clean_data[,i] <- as.numeric(unlist(equip_failures_training_set[,i]))
}
#Making the column headers same
colnames(clean_data) <- colnames(equip_failures_training_set)
#Forming correlation heatmap
cor_clean_data <- cor(clean_data, use = "complete.obs")
#Selecting the correlation with only the target variable and plotting the graph.
#corrplot(as.matrix(head(cor_clean_data["target",order(cor_clean_data["target",],decreasing = T)], 10)))
#Forming a data frame of the correlations of target column with the rest of the columns.
cor_table <- as.matrix(cor_clean_data["target",])
cor_table <- as.data.frame(cor_table)
cor_table$row_id <- c(1:length(cor_table$V1))
cor_table <- cor_table[order(cor_table$V1, decreasing = T), ]
cor_table <- cor_table[complete.cases(cor_table),]
cor_table_1 <- cor_table[cor_table$V1 > 0.05,]
cor_table_2 <- cor_table[cor_table$V1 < -0.05,]
cor_table_3 <- rbind(cor_table_1,cor_table_2)
#Selecting only the required columns from the clean data.
clean_data <- clean_data[,order(cor_table_3$row_id)]
#Breaking the tables according to the fault location to get better
target_0 <- clean_data[clean_data$target==0,]
target_1 <- clean_data[clean_data$target==1,]
#Replacing all NA values in columns with 0/mean/median according to repective target values.
for (i in c(3:length(target_0))) {
if (colnames(target_0)[i] == "sensor2_measure"){
next()
}
target_0[,i][is.na(target_0[,i])] <- median(target_0[,i],na.rm = T)
}
for (i in c(3:length(target_1))) {
if (colnames(target_0)[i] == "sensor2_measure"){
next()
}
target_1[,i][is.na(target_1[,i])] <- median(target_1[,i],na.rm = T)
}
#Combing the broken tables back together.
clean_data <- target_0
clean_data <- rbind(target_0,target_1)
#Making target as the first column.
clean_data <- clean_data %>% select(target, everything())
#Taking out Id column because we don't require it in prediction model
clean_data$id <- NULL
#We remove all the NULL values in the sensor2_measure columns.
clean_data <- clean_data[complete.cases(clean_data),]
clean_data$sensor2_measure <- NULL
#Finding out columns with near 0 variance to take out of prediction model, as they would have least
#affect on the prediction model.
nearZeroVarianceList <- nearZeroVar(clean_data)
nearZeroVarianceList <- nearZeroVarianceList[-1]
clean_data <- clean_data[,-nearZeroVarianceList]
#We split the data into test and train dataset to first evaluate the accuracy of
#prediction model
#90% is used to train the predictive model, and 10% is used for testing processes.
trainset <- createDataPartition(y = clean_data$target, p=0.9 , list = F)
training <- clean_data[trainset,]
testing <- clean_data[-trainset,]
#Making the target column as a factor in both training and test dataset as
#required by the SVM.
training[["target"]] <- factor(training[["target"]])
testing[["target"]] <- factor(testing[["target"]])
trainControlList <- trainControl(method = "repeatedcv", number = 4, repeats = 2)
#Training the SVM linear model.
svm_Linear_Grid <- train(target ~., data = training, method = "svmLinear",
trControl=trainControlList,
preProcess = c("center", "scale"),
tuneLength = 10)
#Predicting the test dataset, using the SVM model trained.
test_pred <- predict(svm_Linear_Grid, newdata = testing)
#Trying to find out rough accuracy of the model across the test dataset.
table(test_pred,testing$target)
rough_accuracy <- confusionMatrix(test_pred, testing$target)
rough_accuracy$overall["Accuracy"]
#Storing the dependent variables to show to the client.
dependent_var_12 <- caret::varImp(svm_Linear_Grid)
| /Equip_Failure.R | no_license | ritesh-suhag/Equipment_Failure_Detection | R | false | false | 4,006 | r | #install.packages("corrplot")
library("corrplot")
library("caret")
library("kernlab")
library("e1071")
library("outliers")
library("dplyr")
library("tidyverse")
library("tidyr")
set.seed(999)
clean_data <- data.frame(Try = c(1:length(equip_failures_training_set$id)))
#Changing into numeric values
for (i in c(1:length(equip_failures_training_set))) {
clean_data[,i] <- as.numeric(unlist(equip_failures_training_set[,i]))
}
#Making the column headers same
colnames(clean_data) <- colnames(equip_failures_training_set)
#Forming correlation heatmap
cor_clean_data <- cor(clean_data, use = "complete.obs")
#Selecting the correlation with only the target variable and plotting the graph.
#corrplot(as.matrix(head(cor_clean_data["target",order(cor_clean_data["target",],decreasing = T)], 10)))
#Forming a data frame of the correlations of target column with the rest of the columns.
cor_table <- as.matrix(cor_clean_data["target",])
cor_table <- as.data.frame(cor_table)
cor_table$row_id <- c(1:length(cor_table$V1))
cor_table <- cor_table[order(cor_table$V1, decreasing = T), ]
cor_table <- cor_table[complete.cases(cor_table),]
cor_table_1 <- cor_table[cor_table$V1 > 0.05,]
cor_table_2 <- cor_table[cor_table$V1 < -0.05,]
cor_table_3 <- rbind(cor_table_1,cor_table_2)
#Selecting only the required columns from the clean data.
clean_data <- clean_data[,order(cor_table_3$row_id)]
#Breaking the tables according to the fault location to get better
target_0 <- clean_data[clean_data$target==0,]
target_1 <- clean_data[clean_data$target==1,]
#Replacing all NA values in columns with 0/mean/median according to repective target values.
for (i in c(3:length(target_0))) {
if (colnames(target_0)[i] == "sensor2_measure"){
next()
}
target_0[,i][is.na(target_0[,i])] <- median(target_0[,i],na.rm = T)
}
for (i in c(3:length(target_1))) {
if (colnames(target_0)[i] == "sensor2_measure"){
next()
}
target_1[,i][is.na(target_1[,i])] <- median(target_1[,i],na.rm = T)
}
#Combing the broken tables back together.
clean_data <- target_0
clean_data <- rbind(target_0,target_1)
#Making target as the first column.
clean_data <- clean_data %>% select(target, everything())
#Taking out Id column because we don't require it in prediction model
clean_data$id <- NULL
#We remove all the NULL values in the sensor2_measure columns.
clean_data <- clean_data[complete.cases(clean_data),]
clean_data$sensor2_measure <- NULL
#Finding out columns with near 0 variance to take out of prediction model, as they would have least
#affect on the prediction model.
nearZeroVarianceList <- nearZeroVar(clean_data)
nearZeroVarianceList <- nearZeroVarianceList[-1]
clean_data <- clean_data[,-nearZeroVarianceList]
#We split the data into test and train dataset to first evaluate the accuracy of
#prediction model
#90% is used to train the predictive model, and 10% is used for testing processes.
trainset <- createDataPartition(y = clean_data$target, p=0.9 , list = F)
training <- clean_data[trainset,]
testing <- clean_data[-trainset,]
#Making the target column as a factor in both training and test dataset as
#required by the SVM.
training[["target"]] <- factor(training[["target"]])
testing[["target"]] <- factor(testing[["target"]])
trainControlList <- trainControl(method = "repeatedcv", number = 4, repeats = 2)
#Training the SVM linear model.
svm_Linear_Grid <- train(target ~., data = training, method = "svmLinear",
trControl=trainControlList,
preProcess = c("center", "scale"),
tuneLength = 10)
#Predicting the test dataset, using the SVM model trained.
test_pred <- predict(svm_Linear_Grid, newdata = testing)
#Trying to find out rough accuracy of the model across the test dataset.
table(test_pred,testing$target)
rough_accuracy <- confusionMatrix(test_pred, testing$target)
rough_accuracy$overall["Accuracy"]
#Storing the dependent variables to show to the client.
dependent_var_12 <- caret::varImp(svm_Linear_Grid)
|
library(ggmap)
library(ggplot2)
library(dplyr)
library(caret)
library(e1071)
library(dbscan)
library(MASS)
library(ggExtra)
library(LiblineaR)
library(readr)
# chemin vers le projet
path <- "./crime_classification/"
setwd(path)
# charger les données
data_train <- read.csv("./data/train.csv")
data_test <- read.csv("./data/test.csv")
# charger la carte géographique de San Francisco
#map <- get_map("San Francisco", zoom = 12, color = "bw")
make_vars_date <- function(crime_df) {
crime_df$Years <- strftime(strptime(crime_df$Dates,
"%Y-%m-%d %H:%M:%S"),"%Y")
crime_df$Month <- strftime(strptime(crime_df$Dates,
"%Y-%m-%d %H:%M:%S"),"%m")
crime_df$DayOfMonth <- strftime(strptime(crime_df$Dates,
"%Y-%m-%d %H:%M:%S"),"%d")
crime_df$Hour <- strftime(strptime(crime_df$Dates,
"%Y-%m-%d %H:%M:%S"),"%H")
crime_df$DayOfWeek <- factor(crime_df$DayOfWeek,
levels=c("Monday","Tuesday",
"Wednesday","Thursday",
"Friday","Saturday","Sunday"),
ordered = TRUE)
#crime_df$weekday <- "Weekday"
#crime_df$weekday[crime_df$DayOfWeek == "Saturday" |
# crime_df$DayOfWeek == "Sunday" |
# crime_df$DayOfWeek == "Friday" ] <- "Weekend"
return(crime_df)
}
make_training_factors <- function(df) {
df$Years <- paste("Yr", df$Years, sep = ".")
df$Years <- factor(df$Years)
y <- as.data.frame(model.matrix(~df$Years - 1))
names(y) <- levels(df$Years)
df$Hour=paste("Hr", df$Hour,sep = ".")
df$Hour = factor(df$Hour)
h <- as.data.frame(model.matrix(~df$Hour - 1))
names(h) <- levels(df$Hour)
dow <- as.data.frame(model.matrix(~df$DayOfWeek - 1))
names(dow) <- levels(df$DayOfWeek)
df$Month=paste("Mon",df$Month,sep = ".")
df$Month = factor(df$Month)
m <- as.data.frame(model.matrix(~df$Month - 1))
names(m) <- levels(df$Month)
district <- as.data.frame(model.matrix(~df$PdDistrict - 1))
names(district) <- levels(df$PdDistrict)
df$pY=paste(df$PdDistrict,df$Years,sep = ".")
df$pY = factor(df$pY)
pY <- as.data.frame(model.matrix(~df$pY - 1))
names(pY) <- levels(df$pY)
train <- data.frame(y, dow, h, district, m, pY)
return(train)
}
# fonction pour calcul de la fonction de perte
MultiLogLoss <- function(act, pred) {
eps = 1e-15;
nr <- nrow(pred)
pred = matrix(sapply( pred, function(x) max(eps,x)), nrow = nr)
pred = matrix(sapply( pred, function(x) min(1-eps,x)), nrow = nr)
ll = sum(act*log(pred) + (1-act)*log(1-pred))
ll = ll * -1/(nrow(act))
return(ll);
}
# modele
set.seed(22)
data_train <- make_vars_date(data_train)
data_test <- make_vars_date(data_test)
train <- data_train
target <- data_train$Category
train <- make_training_factors(train)
model <- LiblineaR(train, target, type = 7, verbose = FALSE)
test <- data_test
Id <- test$Id
test <- make_training_factors(test)
submit <- data.frame(predict(model, test, proba = TRUE)$probabilities[, levels(target)])
write.csv(file = "submit.csv", x = submit)
| /logistic.r | no_license | cyuss/crime_classification | R | false | false | 3,362 | r | library(ggmap)
library(ggplot2)
library(dplyr)
library(caret)
library(e1071)
library(dbscan)
library(MASS)
library(ggExtra)
library(LiblineaR)
library(readr)
# chemin vers le projet
path <- "./crime_classification/"
setwd(path)
# charger les données
data_train <- read.csv("./data/train.csv")
data_test <- read.csv("./data/test.csv")
# charger la carte géographique de San Francisco
#map <- get_map("San Francisco", zoom = 12, color = "bw")
make_vars_date <- function(crime_df) {
crime_df$Years <- strftime(strptime(crime_df$Dates,
"%Y-%m-%d %H:%M:%S"),"%Y")
crime_df$Month <- strftime(strptime(crime_df$Dates,
"%Y-%m-%d %H:%M:%S"),"%m")
crime_df$DayOfMonth <- strftime(strptime(crime_df$Dates,
"%Y-%m-%d %H:%M:%S"),"%d")
crime_df$Hour <- strftime(strptime(crime_df$Dates,
"%Y-%m-%d %H:%M:%S"),"%H")
crime_df$DayOfWeek <- factor(crime_df$DayOfWeek,
levels=c("Monday","Tuesday",
"Wednesday","Thursday",
"Friday","Saturday","Sunday"),
ordered = TRUE)
#crime_df$weekday <- "Weekday"
#crime_df$weekday[crime_df$DayOfWeek == "Saturday" |
# crime_df$DayOfWeek == "Sunday" |
# crime_df$DayOfWeek == "Friday" ] <- "Weekend"
return(crime_df)
}
make_training_factors <- function(df) {
df$Years <- paste("Yr", df$Years, sep = ".")
df$Years <- factor(df$Years)
y <- as.data.frame(model.matrix(~df$Years - 1))
names(y) <- levels(df$Years)
df$Hour=paste("Hr", df$Hour,sep = ".")
df$Hour = factor(df$Hour)
h <- as.data.frame(model.matrix(~df$Hour - 1))
names(h) <- levels(df$Hour)
dow <- as.data.frame(model.matrix(~df$DayOfWeek - 1))
names(dow) <- levels(df$DayOfWeek)
df$Month=paste("Mon",df$Month,sep = ".")
df$Month = factor(df$Month)
m <- as.data.frame(model.matrix(~df$Month - 1))
names(m) <- levels(df$Month)
district <- as.data.frame(model.matrix(~df$PdDistrict - 1))
names(district) <- levels(df$PdDistrict)
df$pY=paste(df$PdDistrict,df$Years,sep = ".")
df$pY = factor(df$pY)
pY <- as.data.frame(model.matrix(~df$pY - 1))
names(pY) <- levels(df$pY)
train <- data.frame(y, dow, h, district, m, pY)
return(train)
}
# fonction pour calcul de la fonction de perte
MultiLogLoss <- function(act, pred) {
eps = 1e-15;
nr <- nrow(pred)
pred = matrix(sapply( pred, function(x) max(eps,x)), nrow = nr)
pred = matrix(sapply( pred, function(x) min(1-eps,x)), nrow = nr)
ll = sum(act*log(pred) + (1-act)*log(1-pred))
ll = ll * -1/(nrow(act))
return(ll);
}
# modele
set.seed(22)
data_train <- make_vars_date(data_train)
data_test <- make_vars_date(data_test)
train <- data_train
target <- data_train$Category
train <- make_training_factors(train)
model <- LiblineaR(train, target, type = 7, verbose = FALSE)
test <- data_test
Id <- test$Id
test <- make_training_factors(test)
submit <- data.frame(predict(model, test, proba = TRUE)$probabilities[, levels(target)])
write.csv(file = "submit.csv", x = submit)
|
library(Lock5withR)
### Name: HomesForSaleNY
### Title: Home for Sale in New York
### Aliases: HomesForSaleNY
### Keywords: datasets
### ** Examples
data(HomesForSaleNY)
| /data/genthat_extracted_code/Lock5withR/examples/HomesForSaleNY.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 179 | r | library(Lock5withR)
### Name: HomesForSaleNY
### Title: Home for Sale in New York
### Aliases: HomesForSaleNY
### Keywords: datasets
### ** Examples
data(HomesForSaleNY)
|
library(tidyverse)
# Function to add units on Y-axis labels
source("https://raw.githubusercontent.com/alvinndo/covid-viz-india/main/functions/addUnits.R")
# Time series data from India. Starting from 1-22-20
timeseries_india <- read.csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv") %>%
filter(Country.Region == "India") %>%
pivot_longer(cols = starts_with("X"),
names_to = "date",
names_prefix = "X",
values_to = "cases") %>%
select(Country.Region, date, cases) %>%
rename(country = "Country.Region") %>%
mutate(date = as.Date(date, "%m.%d.%y"))
# Creating plot
ggplot(timeseries_india, aes(x = date, y = cases)) +
geom_point() +
scale_x_date(date_labels = "%Y %b",
date_breaks = "1 month") +
scale_y_continuous(labels = addUnits,
breaks = scales::pretty_breaks(n = 20)) +
theme(axis.text.x = element_text(angle = 60)) +
labs(title = "Cumulative Covid-19 Cases in India")
# Clean-up
rm(timeseries_india, addUnits) | /timeseries/timeseries_india.R | permissive | alvinndo/covid-viz-india | R | false | false | 1,131 | r | library(tidyverse)
# Function to add units on Y-axis labels
source("https://raw.githubusercontent.com/alvinndo/covid-viz-india/main/functions/addUnits.R")
# Time series data from India. Starting from 1-22-20
timeseries_india <- read.csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv") %>%
filter(Country.Region == "India") %>%
pivot_longer(cols = starts_with("X"),
names_to = "date",
names_prefix = "X",
values_to = "cases") %>%
select(Country.Region, date, cases) %>%
rename(country = "Country.Region") %>%
mutate(date = as.Date(date, "%m.%d.%y"))
# Creating plot
ggplot(timeseries_india, aes(x = date, y = cases)) +
geom_point() +
scale_x_date(date_labels = "%Y %b",
date_breaks = "1 month") +
scale_y_continuous(labels = addUnits,
breaks = scales::pretty_breaks(n = 20)) +
theme(axis.text.x = element_text(angle = 60)) +
labs(title = "Cumulative Covid-19 Cases in India")
# Clean-up
rm(timeseries_india, addUnits) |
### --- Revisiting R basics - Chapter 2 --- ###
#Set the working directory path to where you store the data:
setwd("...")
getwd()
#Set repositories:
setRepositories(addURLs = c(CRAN = "https://cran.r-project.org/"))
getOption('repos')
#R data structures
##Vectors:
vector1 <- rnorm(10)
vector1
set.seed(123)
vector2 <- rnorm(10, mean=3, sd=2)
vector2
vector3 <- c(6, 8, 7, 1, 2, 3, 9, 6, 7, 6)
vector3
length(vector3)
class(vector3)
mode(vector3)
vector4 <- c("poor", "good", "good", "average", "average", "good", "poor", "good", "average", "good")
vector4
class(vector4)
mode(vector4)
levels(vector4)
vector4 <- factor(vector4, levels = c("poor", "average", "good"))
vector4
vector4.ord <- ordered(vector4, levels = c("good", "average", "poor"))
vector4.ord
class(vector4.ord)
mode(vector4.ord)
levels(vector4.ord)
str(vector4.ord)
vector5 <- c(TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE)
vector5
length(vector5)
##Scalars:
a1 <- 5
a1
a2 <- 4
a2
a3 <- a1 + a2
a3
#Matrices:
y <- matrix(1:20, nrow=5, ncol=4) #the colon operator is to generate a sequence of numbers
y
rows <- c("R1", "R2", "R3", "R4", "R5")
columns <- c("C1", "C2", "C3", "C4")
z <- matrix(1:20, nrow=5, ncol=4, byrow=TRUE, dimnames=list(rows, columns))
z
y[4,2]
y[,3]
z[c(2, 3, 5), 1]
##Arrays:
array1 <- array(1:20, dim=c(2,2,5)) #dim - a numeric vector giving the maximal index for each dimension = 1 row, 2 columns, 3 dimensions
array1
dim(array1)
array1[2, 1, 4]
which(array1==11, arr.ind=TRUE)
##Data frames:
subjectID <- c(1:10)
age <- c(37,23,42,25,22,25,48,19,22,38)
gender <- c("male", "male", "male", "male", "male", "female", "female", "female", "female", "female")
lifesat <- c(9,7,8,10,4,10,8,7,8,9)
health <- c("good", "average", "average", "good", "poor", "average", "good", "poor", "average", "good")
paid <- c(T, F, F, T, T, T, F, F, F, T)
dataset <- data.frame(subjectID, age, gender, lifesat, health, paid)
dataset
str(dataset)
dataset[,2:4] #or
dataset[, c("age", "gender", "lifesat")]
subset(dataset[c("age", "gender", "lifesat")])
subset(dataset, age > 30 & lifesat >= 8)
subset(dataset, paid==TRUE, select=c("age", "gender"))
##Lists:
simple.vector1 <- c(1, 29, 21, 3, 4, 55)
simple.matrix <- matrix(1:24, nrow=4, ncol=6, byrow=TRUE)
simple.scalar1 <- 5
simple.scalar2 <- "The List"
simple.vector2 <- c("easy", "moderate", "difficult")
simple.list <- list(name=simple.scalar2, matrix=simple.matrix, vector=simple.vector1, scalar=simple.scalar1, difficulty=simple.vector2)
simple.list
str(simple.list)
simple.list[[2]][1,3]
#Exporting R data objects:
ls()
save.image(file = "workspace.RData")
save.image(file = "workspace2.RData", compress = "xz")
save(dataset, simple.list, file = "two_objects.RData")
rm(list=ls())
load("workspace2.RData")
dump(ls(), file = "dump.R", append = FALSE)
dput(dataset, file = "dput.txt")
cat(age, file="age.txt", sep=",", fill=TRUE, labels=NULL, append=TRUE)
cat(age, file="age.csv", sep=",", fill=TRUE, labels=NULL, append=TRUE)
write(age, file="agedata.csv", ncolumns=2, append=TRUE, sep=",")
write(y, file="matrix_y.tab", ncolumns=2, append=FALSE, sep="\t")
install.packages("MASS")
library(MASS)
write.matrix(y, file="ymatrix.txt", sep=",")
write.table(dataset, file="dataset1.txt", append=TRUE, sep=",", na="NA", col.names=TRUE, row.names=FALSE, dec=".") #in this case as the subjectID has the same values as the row.names it's better to assign FALSE to row.names to deduplicate. In write.table() function you can also set fileEncoding and qmethod (quoting method) arguments
install.packages("WriteXLS")
library(WriteXLS)
WriteXLS("dataset", "dataset1.xlsx", SheetNames=NULL,
row.names=FALSE, col.names=TRUE, AdjWidth=TRUE,
envir=parent.frame())
install.packages("openxlsx")
library(openxlsx)
write.xlsx(simple.list, file = "simple_list.xlsx")
library(foreign)
write.foreign(dataset, "datafile.txt", "codefile.txt", package="SPSS")
install.packages("rio")
library(rio)
export(dataset, format = "stata")
export(dataset, "dataset1.csv", col.names = TRUE, na = "NA")
### --- Applied Data Science in R - Chapter 2 --- ###
#Importing data from different formats:
#setwd("...") #set working directory to the location where you saved the data
library(foreign)
grel <- read.spss("NILT2012GR.sav", to.data.frame=TRUE)
head(grel, n=5)
str(grel)
#Store a subset of the dataset to the object named grel.data
#with the following variables only:
#serial - 1 #serial number of respondent
#househld - 2 #How many people are there in your household?
#rage - 3 #Age of respondent
#rsex - 5 #Gender of respondent
#nkids - 7#Number of children aged less than 18 living in the household
#nelderly - 8 #Number of people aged 65 or more living in the household
#rmarstat - 10 #Marital/civil partnership status
#placeliv - 12 #Would you describe the place where you live as (1) a big city, (2) the suburbs of a big city, (3) a small city or town, (4) a country village, (5) a farm or country home
#eqnow1 - 16 #Are Catholics generally treated unfairly?
#eqnow2 - 17 #Are Protestants generally treated unfairly?
#eqnow3 - 18 #Are Gays/Lesbians/Bisexuals generally treated unfairly?
#eqnow4 - 19 #Are Disabled people generally treated unfairly?
#tenshort - 38 #Housing tenure status
#highqual - 39 #Highest educational qualification
#work - 41 #Work status
#ansseca - 47 #Social class
#religcat - 52 #Do you regard yourself as belonging to any particular religion?
#chattnd2 - 55 #How often do you attend religious services?
#persinc2 - 60 #Personal income before tax and NI deductions?
#orient - 64 #Sexual orientation
#ruhappy - 66 #Are you happy?
#contegrp - 76 #Number of ethnic groups respondent has regular contact with?
#uprejmeg - 77 #How prejudiced are you againts people of minority ethnic communities?
#umworker - 80 #Are you a migrant worker?
#mil10yrs - 81 #How good the settlement of migrants in the last 10 years has been for NI?
#miecono - 82 #How good for NI's economy that migrants come from other countries?
#micultur - 83 #How much has the cultural life in NI become enriched by migrants?
#target1a - 105 #Whether agree that NI is a normal civic society in which all individuals are equal etc?
#target2a - 106 #... NI is a place free from displays of sectarian aggression?
#target3a - 107 #...towns and city centres in NI are safe and welcoming?
#target4a - 108 #...schools in NI are effective at preparing pupils for life in a diverse society?
#target5a - 109 #...schools in NI are effective at encouraging understanding of the complexity of our history?
#target6a - 110 #...the government is actively encouraging integrated schools?
#target7a - 111 #...the government is actively encouraging schools of different religions to mix with each other by sharing facilities?
#target8a - 112 #...the government is actively encouraging shared communities where people of all backgrounds can live, work, learn and play together?
grel.data <- subset(grel[, c(1:3, 5, 7:8, 10, 12, 16:19, 38:39, 41, 47, 52, 55, 60, 64, 66, 76:77, 80:83, 105:112)])
str(grel.data)
#Exploratory Data Analysis:
library(psych)
describe(grel.data)
mean(grel.data$rage, na.rm=TRUE)
median(grel.data$rage, na.rm=TRUE)
library(modeest)
mlv(grel.data$rage, method="mfv", na.rm=TRUE)
var(grel.data$rage, na.rm=TRUE) #variance
sd(grel.data$rage, na.rm=TRUE) #standard deviation
range(grel.data$rage, na.rm=TRUE) #range
cent.tend <- function(data) {
library(modeest)
data <- as.numeric(data)
m <- mean(data, na.rm=TRUE)
me <- median(data, na.rm=TRUE)
mo <- mlv(data, method="mfv", na.rm=TRUE)[1]
stats <- data.frame(c(m, me, mo), row.names="Totals:")
names(stats)[1:3] <- c("Mean", "Median", "Mode")
return(stats)
}
cent.tend(grel.data$rage)
summary(grel.data[c("rage", "persinc2")])
#Data aggregations and contingency tables:
aggregate(grel.data[c("rage", "persinc2", "househld")], by=list(rsex=grel.data$rsex), FUN=mean, na.rm=TRUE)
stats <- function(x, na.omit=TRUE) {
if(na.omit)
x <- x[!is.na(x)]
n <- length(x)
m <- mean(x)
s <- sd(x)
r <- range(x)
return(c(n=n, mean=m, stdev=s, range=r))
}
library(doBy)
summaryBy(rage+persinc2+househld~rsex, data=grel.data, FUN=stats)
library(psych)
describeBy(grel.data[c("rage", "persinc2", "househld")], grel.data$rsex) #describeBy() does not allow to specify an arbitrary function; you can write a list of grouping variables; if you prefer a matrix output (instead of a list) add parameter mat=TRUE, e.g.:
describeBy(grel.data[c("rage", "persinc2", "househld")], grel.data$rsex, mat=TRUE)
table(grel.data$househld)
table(grel.data$uprejmeg)
library(Hmisc)
Hmisc::describe(grel.data)
attach(grel.data)
table(uprejmeg, househld)
detach(grel.data)
xtabs(~uprejmeg+househld+rsex, data=grel.data)
xTab <- xtabs(~uprejmeg+househld+rsex, data=grel.data)
ftable(xTab)
#Hypothesis testing and statistical inference
##Tests of differences
###Independent t-test example:
grel.data$ruhappy <- as.numeric(grel.data$ruhappy)
table(grel.data$ruhappy)
library(car)
grel.data$ruhappy <- recode(grel.data$ruhappy, "5=NA")
table(grel.data$ruhappy)
aggregate(grel.data$ruhappy, by=list(rsex=grel.data$rsex), FUN=mean, na.rm=TRUE)
t.test(ruhappy ~ rsex, data=grel.data, alternative="two.sided")
library(doBy)
stats <- function(x, na.omit=TRUE){
if(na.omit)
x <- x[!is.na(x)]
m <- mean(x)
n <- length(x)
s <- sd(x)
return(c(n=n, mean=m, stdev=s))
}
sum.By <- summaryBy(ruhappy~rsex, data=grel.data, FUN=stats)
sum.By
attach(sum.By)
n.harm <- (2*ruhappy.n[1]*ruhappy.n[2])/(ruhappy.n[1]+ruhappy.n[2])
detach(sum.By)
n.harm
pooledsd.ruhappy <- sd(grel.data$ruhappy, na.rm=TRUE)
pooledsd.ruhappy
power.t.test(n=round(n.harm, 0), delta=sum.By$ruhappy.mean[1]-sum.By$ruhappy.mean[2], sd=pooledsd.ruhappy, sig.level=.03843, type="two.sample", alternative="two.sided")
d <- (sum.By$ruhappy.mean[1]-sum.By$ruhappy.mean[2])/pooledsd.ruhappy #effect size = means difference divided by pooled standard deviation
d
###ANOVA example:
levels(grel.data$placeliv)
library(car)
qqPlot(lm(ruhappy ~ placeliv, data=grel.data), simulate=TRUE, main="Q-Q Plot", labels=FALSE) #The data fall quite well within the 95% Confidence Intervals - this suggests that the normality assumption has been met.
bartlett.test(ruhappy~placeliv, data=grel.data)
ruhappy.means <- aggregate(grel.data$ruhappy, by=list(grel.data$placeliv), FUN=mean, na.rm=TRUE)
ruhappy.means
fit <- aov(ruhappy~placeliv, data=grel.data)
summary(fit)
library(gplots)
plotmeans(grel.data$ruhappy~grel.data$placeliv, xlab="Place of living", ylab="Happiness", main="Means Plot with 95% CI")
TukeyHSD(fit)
opar <- par(no.readonly = TRUE)
par(las=2)
par(mar=c(6,27,5,2)) #adjust margin areas according to your settings to fit all labels
plot(TukeyHSD(fit))
par(opar)
##Tests of relationships
###Pearson's r correlations example:
grel.data$miecono <- as.numeric(grel.data$miecono)
library(car)
grel.data$miecono <- recode(grel.data$miecono, "1=0;2=1;3=2;4=3;5=4;6=5;7=6;8=7;9=8;10=9;11=10")
table(grel.data$miecono)
cov(grel.data$ruhappy, grel.data$miecono, method="pearson", use = "complete.obs")
cor.test(grel.data$ruhappy, grel.data$miecono, alternative="two.sided", method="pearson")
N.compl <- sum(complete.cases(grel.data$ruhappy, grel.data$miecono))
N.compl
cor.grel <- cor(grel.data$ruhappy, grel.data$miecono, method="pearson", use = "complete.obs")
cor.grel
adjusted.cor <- sqrt(1 - (((1 - (cor.grel^2))*(N.compl - 1))/(N.compl - 2)))
adjusted.cor
library(pwr)
pwr.cor <- pwr.r.test(n=N.compl, r=cor.grel, sig.level=0.0000006649, alternative="two.sided")
pwr.cor
###Multiple regression example:
reg.data <- subset(grel.data, select = c(ruhappy, rage, persinc2, househld, contegrp))
str(reg.data)
library(psych)
corr.test(reg.data[1:5], reg.data[1:5], use="complete", method="pearson", alpha=.05)
library(car)
scatterplotMatrix(reg.data[1:5], spread=FALSE, lty.smooth=2, main="Scatterplot Matrix")
library(corrgram)
corrgram(reg.data[1:5], order=TRUE, lower.panel=panel.shade, upper.panel=panel.pie, text.panel=panel.txt) #for the pie graphs - the filled portion of the pie indicates the magnitude of the correlation; for the panel.shade - the depth of the shading indicates the magnitude of the correlation
attach(reg.data)
regress1 <- lm(ruhappy~rage+persinc2+househld+contegrp)
detach(reg.data)
regress1
summary(regress1)
### --- The end of Chapter 2 --- ### | /Chapter 2/BDA_Ch2.R | permissive | PacktPublishing/Big-Data-Analytics-with-R | R | false | false | 12,400 | r | ### --- Revisiting R basics - Chapter 2 --- ###
#Set the working directory path to where you store the data:
setwd("...")
getwd()
#Set repositories:
setRepositories(addURLs = c(CRAN = "https://cran.r-project.org/"))
getOption('repos')
#R data structures
##Vectors:
vector1 <- rnorm(10)
vector1
set.seed(123)
vector2 <- rnorm(10, mean=3, sd=2)
vector2
vector3 <- c(6, 8, 7, 1, 2, 3, 9, 6, 7, 6)
vector3
length(vector3)
class(vector3)
mode(vector3)
vector4 <- c("poor", "good", "good", "average", "average", "good", "poor", "good", "average", "good")
vector4
class(vector4)
mode(vector4)
levels(vector4)
vector4 <- factor(vector4, levels = c("poor", "average", "good"))
vector4
vector4.ord <- ordered(vector4, levels = c("good", "average", "poor"))
vector4.ord
class(vector4.ord)
mode(vector4.ord)
levels(vector4.ord)
str(vector4.ord)
vector5 <- c(TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE)
vector5
length(vector5)
##Scalars:
a1 <- 5
a1
a2 <- 4
a2
a3 <- a1 + a2
a3
#Matrices:
y <- matrix(1:20, nrow=5, ncol=4) #the colon operator is to generate a sequence of numbers
y
rows <- c("R1", "R2", "R3", "R4", "R5")
columns <- c("C1", "C2", "C3", "C4")
z <- matrix(1:20, nrow=5, ncol=4, byrow=TRUE, dimnames=list(rows, columns))
z
y[4,2]
y[,3]
z[c(2, 3, 5), 1]
##Arrays:
array1 <- array(1:20, dim=c(2,2,5)) #dim - a numeric vector giving the maximal index for each dimension = 1 row, 2 columns, 3 dimensions
array1
dim(array1)
array1[2, 1, 4]
which(array1==11, arr.ind=TRUE)
##Data frames:
subjectID <- c(1:10)
age <- c(37,23,42,25,22,25,48,19,22,38)
gender <- c("male", "male", "male", "male", "male", "female", "female", "female", "female", "female")
lifesat <- c(9,7,8,10,4,10,8,7,8,9)
health <- c("good", "average", "average", "good", "poor", "average", "good", "poor", "average", "good")
paid <- c(T, F, F, T, T, T, F, F, F, T)
dataset <- data.frame(subjectID, age, gender, lifesat, health, paid)
dataset
str(dataset)
dataset[,2:4] #or
dataset[, c("age", "gender", "lifesat")]
subset(dataset[c("age", "gender", "lifesat")])
subset(dataset, age > 30 & lifesat >= 8)
subset(dataset, paid==TRUE, select=c("age", "gender"))
##Lists:
simple.vector1 <- c(1, 29, 21, 3, 4, 55)
simple.matrix <- matrix(1:24, nrow=4, ncol=6, byrow=TRUE)
simple.scalar1 <- 5
simple.scalar2 <- "The List"
simple.vector2 <- c("easy", "moderate", "difficult")
simple.list <- list(name=simple.scalar2, matrix=simple.matrix, vector=simple.vector1, scalar=simple.scalar1, difficulty=simple.vector2)
simple.list
str(simple.list)
simple.list[[2]][1,3]
#Exporting R data objects:
ls()
save.image(file = "workspace.RData")
save.image(file = "workspace2.RData", compress = "xz")
save(dataset, simple.list, file = "two_objects.RData")
rm(list=ls())
load("workspace2.RData")
dump(ls(), file = "dump.R", append = FALSE)
dput(dataset, file = "dput.txt")
cat(age, file="age.txt", sep=",", fill=TRUE, labels=NULL, append=TRUE)
cat(age, file="age.csv", sep=",", fill=TRUE, labels=NULL, append=TRUE)
write(age, file="agedata.csv", ncolumns=2, append=TRUE, sep=",")
write(y, file="matrix_y.tab", ncolumns=2, append=FALSE, sep="\t")
install.packages("MASS")
library(MASS)
write.matrix(y, file="ymatrix.txt", sep=",")
write.table(dataset, file="dataset1.txt", append=TRUE, sep=",", na="NA", col.names=TRUE, row.names=FALSE, dec=".") #in this case as the subjectID has the same values as the row.names it's better to assign FALSE to row.names to deduplicate. In write.table() function you can also set fileEncoding and qmethod (quoting method) arguments
install.packages("WriteXLS")
library(WriteXLS)
WriteXLS("dataset", "dataset1.xlsx", SheetNames=NULL,
row.names=FALSE, col.names=TRUE, AdjWidth=TRUE,
envir=parent.frame())
install.packages("openxlsx")
library(openxlsx)
write.xlsx(simple.list, file = "simple_list.xlsx")
library(foreign)
write.foreign(dataset, "datafile.txt", "codefile.txt", package="SPSS")
install.packages("rio")
library(rio)
export(dataset, format = "stata")
export(dataset, "dataset1.csv", col.names = TRUE, na = "NA")
### --- Applied Data Science in R - Chapter 2 --- ###
#Importing data from different formats:
#setwd("...") #set working directory to the location where you saved the data
library(foreign)
grel <- read.spss("NILT2012GR.sav", to.data.frame=TRUE)
head(grel, n=5)
str(grel)
#Store a subset of the dataset to the object named grel.data
#with the following variables only:
#serial - 1 #serial number of respondent
#househld - 2 #How many people are there in your household?
#rage - 3 #Age of respondent
#rsex - 5 #Gender of respondent
#nkids - 7#Number of children aged less than 18 living in the household
#nelderly - 8 #Number of people aged 65 or more living in the household
#rmarstat - 10 #Marital/civil partnership status
#placeliv - 12 #Would you describe the place where you live as (1) a big city, (2) the suburbs of a big city, (3) a small city or town, (4) a country village, (5) a farm or country home
#eqnow1 - 16 #Are Catholics generally treated unfairly?
#eqnow2 - 17 #Are Protestants generally treated unfairly?
#eqnow3 - 18 #Are Gays/Lesbians/Bisexuals generally treated unfairly?
#eqnow4 - 19 #Are Disabled people generally treated unfairly?
#tenshort - 38 #Housing tenure status
#highqual - 39 #Highest educational qualification
#work - 41 #Work status
#ansseca - 47 #Social class
#religcat - 52 #Do you regard yourself as belonging to any particular religion?
#chattnd2 - 55 #How often do you attend religious services?
#persinc2 - 60 #Personal income before tax and NI deductions?
#orient - 64 #Sexual orientation
#ruhappy - 66 #Are you happy?
#contegrp - 76 #Number of ethnic groups respondent has regular contact with?
#uprejmeg - 77 #How prejudiced are you againts people of minority ethnic communities?
#umworker - 80 #Are you a migrant worker?
#mil10yrs - 81 #How good the settlement of migrants in the last 10 years has been for NI?
#miecono - 82 #How good for NI's economy that migrants come from other countries?
#micultur - 83 #How much has the cultural life in NI become enriched by migrants?
#target1a - 105 #Whether agree that NI is a normal civic society in which all individuals are equal etc?
#target2a - 106 #... NI is a place free from displays of sectarian aggression?
#target3a - 107 #...towns and city centres in NI are safe and welcoming?
#target4a - 108 #...schools in NI are effective at preparing pupils for life in a diverse society?
#target5a - 109 #...schools in NI are effective at encouraging understanding of the complexity of our history?
#target6a - 110 #...the government is actively encouraging integrated schools?
#target7a - 111 #...the government is actively encouraging schools of different religions to mix with each other by sharing facilities?
#target8a - 112 #...the government is actively encouraging shared communities where people of all backgrounds can live, work, learn and play together?
grel.data <- subset(grel[, c(1:3, 5, 7:8, 10, 12, 16:19, 38:39, 41, 47, 52, 55, 60, 64, 66, 76:77, 80:83, 105:112)])
str(grel.data)
#Exploratory Data Analysis:
library(psych)
describe(grel.data)
mean(grel.data$rage, na.rm=TRUE)
median(grel.data$rage, na.rm=TRUE)
library(modeest)
mlv(grel.data$rage, method="mfv", na.rm=TRUE)
var(grel.data$rage, na.rm=TRUE) #variance
sd(grel.data$rage, na.rm=TRUE) #standard deviation
range(grel.data$rage, na.rm=TRUE) #range
cent.tend <- function(data) {
library(modeest)
data <- as.numeric(data)
m <- mean(data, na.rm=TRUE)
me <- median(data, na.rm=TRUE)
mo <- mlv(data, method="mfv", na.rm=TRUE)[1]
stats <- data.frame(c(m, me, mo), row.names="Totals:")
names(stats)[1:3] <- c("Mean", "Median", "Mode")
return(stats)
}
cent.tend(grel.data$rage)
summary(grel.data[c("rage", "persinc2")])
#Data aggregations and contingency tables:
aggregate(grel.data[c("rage", "persinc2", "househld")], by=list(rsex=grel.data$rsex), FUN=mean, na.rm=TRUE)
stats <- function(x, na.omit=TRUE) {
if(na.omit)
x <- x[!is.na(x)]
n <- length(x)
m <- mean(x)
s <- sd(x)
r <- range(x)
return(c(n=n, mean=m, stdev=s, range=r))
}
library(doBy)
summaryBy(rage+persinc2+househld~rsex, data=grel.data, FUN=stats)
library(psych)
describeBy(grel.data[c("rage", "persinc2", "househld")], grel.data$rsex) #describeBy() does not allow to specify an arbitrary function; you can write a list of grouping variables; if you prefer a matrix output (instead of a list) add parameter mat=TRUE, e.g.:
describeBy(grel.data[c("rage", "persinc2", "househld")], grel.data$rsex, mat=TRUE)
table(grel.data$househld)
table(grel.data$uprejmeg)
library(Hmisc)
Hmisc::describe(grel.data)
attach(grel.data)
table(uprejmeg, househld)
detach(grel.data)
xtabs(~uprejmeg+househld+rsex, data=grel.data)
xTab <- xtabs(~uprejmeg+househld+rsex, data=grel.data)
ftable(xTab)
#Hypothesis testing and statistical inference
##Tests of differences
###Independent t-test example:
grel.data$ruhappy <- as.numeric(grel.data$ruhappy)
table(grel.data$ruhappy)
library(car)
grel.data$ruhappy <- recode(grel.data$ruhappy, "5=NA")
table(grel.data$ruhappy)
aggregate(grel.data$ruhappy, by=list(rsex=grel.data$rsex), FUN=mean, na.rm=TRUE)
t.test(ruhappy ~ rsex, data=grel.data, alternative="two.sided")
library(doBy)
stats <- function(x, na.omit=TRUE){
if(na.omit)
x <- x[!is.na(x)]
m <- mean(x)
n <- length(x)
s <- sd(x)
return(c(n=n, mean=m, stdev=s))
}
sum.By <- summaryBy(ruhappy~rsex, data=grel.data, FUN=stats)
sum.By
attach(sum.By)
n.harm <- (2*ruhappy.n[1]*ruhappy.n[2])/(ruhappy.n[1]+ruhappy.n[2])
detach(sum.By)
n.harm
pooledsd.ruhappy <- sd(grel.data$ruhappy, na.rm=TRUE)
pooledsd.ruhappy
power.t.test(n=round(n.harm, 0), delta=sum.By$ruhappy.mean[1]-sum.By$ruhappy.mean[2], sd=pooledsd.ruhappy, sig.level=.03843, type="two.sample", alternative="two.sided")
d <- (sum.By$ruhappy.mean[1]-sum.By$ruhappy.mean[2])/pooledsd.ruhappy #effect size = means difference divided by pooled standard deviation
d
###ANOVA example:
levels(grel.data$placeliv)
library(car)
qqPlot(lm(ruhappy ~ placeliv, data=grel.data), simulate=TRUE, main="Q-Q Plot", labels=FALSE) #The data fall quite well within the 95% Confidence Intervals - this suggests that the normality assumption has been met.
bartlett.test(ruhappy~placeliv, data=grel.data)
ruhappy.means <- aggregate(grel.data$ruhappy, by=list(grel.data$placeliv), FUN=mean, na.rm=TRUE)
ruhappy.means
fit <- aov(ruhappy~placeliv, data=grel.data)
summary(fit)
library(gplots)
plotmeans(grel.data$ruhappy~grel.data$placeliv, xlab="Place of living", ylab="Happiness", main="Means Plot with 95% CI")
TukeyHSD(fit)
opar <- par(no.readonly = TRUE)
par(las=2)
par(mar=c(6,27,5,2)) #adjust margin areas according to your settings to fit all labels
plot(TukeyHSD(fit))
par(opar)
##Tests of relationships
###Pearson's r correlations example:
grel.data$miecono <- as.numeric(grel.data$miecono)
library(car)
grel.data$miecono <- recode(grel.data$miecono, "1=0;2=1;3=2;4=3;5=4;6=5;7=6;8=7;9=8;10=9;11=10")
table(grel.data$miecono)
cov(grel.data$ruhappy, grel.data$miecono, method="pearson", use = "complete.obs")
cor.test(grel.data$ruhappy, grel.data$miecono, alternative="two.sided", method="pearson")
N.compl <- sum(complete.cases(grel.data$ruhappy, grel.data$miecono))
N.compl
cor.grel <- cor(grel.data$ruhappy, grel.data$miecono, method="pearson", use = "complete.obs")
cor.grel
adjusted.cor <- sqrt(1 - (((1 - (cor.grel^2))*(N.compl - 1))/(N.compl - 2)))
adjusted.cor
library(pwr)
pwr.cor <- pwr.r.test(n=N.compl, r=cor.grel, sig.level=0.0000006649, alternative="two.sided")
pwr.cor
###Multiple regression example:
reg.data <- subset(grel.data, select = c(ruhappy, rage, persinc2, househld, contegrp))
str(reg.data)
library(psych)
corr.test(reg.data[1:5], reg.data[1:5], use="complete", method="pearson", alpha=.05)
library(car)
scatterplotMatrix(reg.data[1:5], spread=FALSE, lty.smooth=2, main="Scatterplot Matrix")
library(corrgram)
corrgram(reg.data[1:5], order=TRUE, lower.panel=panel.shade, upper.panel=panel.pie, text.panel=panel.txt) #for the pie graphs - the filled portion of the pie indicates the magnitude of the correlation; for the panel.shade - the depth of the shading indicates the magnitude of the correlation
attach(reg.data)
regress1 <- lm(ruhappy~rage+persinc2+househld+contegrp)
detach(reg.data)
regress1
summary(regress1)
### --- The end of Chapter 2 --- ### |
#' Make the ovary collection table
#'
#' @details The header in the file is used as the header in the table. If you wish to place a newline
#' in any header label, insert a '\n' in the label
#' @param ovary.samples The values as read in from the ovary samples CSV file ("ovary-samples.csv")
#' using [readr::read_csv()]
#' @param xcaption The caption
#' @param xlabel The latex label to use
#' @param font.size Size of the font for the table
#' @param space.size Size of the vertical spaces for the table
#'
#' @return An [xtable] object
#' @export
make.maturity.samples.table <- function(ovary.samples,
xcaption = "default",
xlabel = "default",
font.size = 10,
space.size = 10){
tab <- ovary.samples %>% as.data.frame
tabnew <- tab %>%
select(-Year) %>%
mutate(Total = rowSums(.))
tabsums <- tabnew %>%
summarize_all(.funs = ~{if(is.numeric(.)) sum(.) else "Total"})
yr_col <- c(tab$Year, "Total") %>%
enframe %>%
select(-name) %>%
rename(Year = value)
names(yr_col) <- latex.bold(names(yr_col))
tab <- bind_rows(tabnew, tabsums)
names(tab) <- map_chr(names(tab), ~{latex.mlc(str_split(.x, "\\\\n")[[1]])})
tab <- bind_cols(yr_col, map_dfr(tab, function(x) f(x)))
tab[nrow(tab),] <- as.list(latex.bold(tab[nrow(tab),]))
tab[-nrow(tab), ncol(tab)] <- latex.bold(tab[-nrow(tab), ncol(tab)] %>%
pull())
size.string <- latex.size.str(font.size, space.size)
print(xtable(tab, caption = xcaption,
label = xlabel,
align = get.align(ncol(tab),
first.left = FALSE,
just = "r"),
digits = rep(0, ncol(tab) + 1)),
caption.placement = "top",
include.rownames = FALSE,
table.placement = "H",
sanitize.text.function = function(x){x},
size = size.string)
}
make.maturity.ogives.table <- function(maturity.ogives,
xcaption = "default",
xlabel = "default",
font.size = 10,
space.size = 10){
## Returns an xtable
##
## maturity.ogives - data as read in from the csv file
## xcaption - caption to appear in the calling document
## xlabel - the label used to reference the table in latex
## font.size - size of the font for the table
## space.size - size of the vertical spaces for the table
tab <- maturity.ogives
corder <- sapply(c("age", "n.ovaries", "maturity", "avg.wt",
"new.fecundity"), grep, x = colnames(tab), ignore.case = TRUE)
tab <- tab[, corder]
## format all non-year-column values with a thousands seperator
colnames(tab) <-
c(latex.bold("Age"),
latex.mlc(c("Number of",
"samples")),
latex.mlc(c("Maturity",
"ogive")),
latex.mlc(c("Mean",
"weight")),
latex.mlc(c("Mean",
"fecundity")))
## Make the size string for font and space size
size.string <- latex.size.str(font.size, space.size)
print(xtable(tab,
caption = xcaption,
label = xlabel,
align = get.align(ncol(tab),
first.left = FALSE,
just = "r"),
digits = c(0, 0, 0, 3, 3, 3)),
caption.placement = "top",
include.rownames = FALSE,
table.placement = "H",
sanitize.text.function = function(x){x},
size = size.string)
}
| /R/tables-maturity.R | no_license | aaronmberger-nwfsc/hake-assessment | R | false | false | 3,743 | r | #' Make the ovary collection table
#'
#' @details The header in the file is used as the header in the table. If you wish to place a newline
#' in any header label, insert a '\n' in the label
#' @param ovary.samples The values as read in from the ovary samples CSV file ("ovary-samples.csv")
#' using [readr::read_csv()]
#' @param xcaption The caption
#' @param xlabel The latex label to use
#' @param font.size Size of the font for the table
#' @param space.size Size of the vertical spaces for the table
#'
#' @return An [xtable] object
#' @export
make.maturity.samples.table <- function(ovary.samples,
xcaption = "default",
xlabel = "default",
font.size = 10,
space.size = 10){
tab <- ovary.samples %>% as.data.frame
tabnew <- tab %>%
select(-Year) %>%
mutate(Total = rowSums(.))
tabsums <- tabnew %>%
summarize_all(.funs = ~{if(is.numeric(.)) sum(.) else "Total"})
yr_col <- c(tab$Year, "Total") %>%
enframe %>%
select(-name) %>%
rename(Year = value)
names(yr_col) <- latex.bold(names(yr_col))
tab <- bind_rows(tabnew, tabsums)
names(tab) <- map_chr(names(tab), ~{latex.mlc(str_split(.x, "\\\\n")[[1]])})
tab <- bind_cols(yr_col, map_dfr(tab, function(x) f(x)))
tab[nrow(tab),] <- as.list(latex.bold(tab[nrow(tab),]))
tab[-nrow(tab), ncol(tab)] <- latex.bold(tab[-nrow(tab), ncol(tab)] %>%
pull())
size.string <- latex.size.str(font.size, space.size)
print(xtable(tab, caption = xcaption,
label = xlabel,
align = get.align(ncol(tab),
first.left = FALSE,
just = "r"),
digits = rep(0, ncol(tab) + 1)),
caption.placement = "top",
include.rownames = FALSE,
table.placement = "H",
sanitize.text.function = function(x){x},
size = size.string)
}
make.maturity.ogives.table <- function(maturity.ogives,
xcaption = "default",
xlabel = "default",
font.size = 10,
space.size = 10){
## Returns an xtable
##
## maturity.ogives - data as read in from the csv file
## xcaption - caption to appear in the calling document
## xlabel - the label used to reference the table in latex
## font.size - size of the font for the table
## space.size - size of the vertical spaces for the table
tab <- maturity.ogives
corder <- sapply(c("age", "n.ovaries", "maturity", "avg.wt",
"new.fecundity"), grep, x = colnames(tab), ignore.case = TRUE)
tab <- tab[, corder]
## format all non-year-column values with a thousands seperator
colnames(tab) <-
c(latex.bold("Age"),
latex.mlc(c("Number of",
"samples")),
latex.mlc(c("Maturity",
"ogive")),
latex.mlc(c("Mean",
"weight")),
latex.mlc(c("Mean",
"fecundity")))
## Make the size string for font and space size
size.string <- latex.size.str(font.size, space.size)
print(xtable(tab,
caption = xcaption,
label = xlabel,
align = get.align(ncol(tab),
first.left = FALSE,
just = "r"),
digits = c(0, 0, 0, 3, 3, 3)),
caption.placement = "top",
include.rownames = FALSE,
table.placement = "H",
sanitize.text.function = function(x){x},
size = size.string)
}
|
plot2 <- function(){
#Load in the SCC data
SCC <- readRDS("summarySCC_PM25.rds");
#Separate Baltimore data and clean up
BaltMD <- SCC[SCC$fips == "24510",];
BaltMD$year <- as.factor(BaltMD$year);
rm(SCC)
#Calculate the total emissions for each
#year in Baltimore. TotalBalt will be a
#numeric vector with the years as names.
TotalBalt <- sapply(split(BaltMD$Emissions,BaltMD$year),sum);
#open graphics device and plot a simple
#line graph to confirm whether emissions
#are decreasing.
png("plot2.png",w=480,h=480);
plot(names(TotalBalt),TotalBalt/1000,type="l",
main = "Emission Trend in Baltimore, MD",
xlab = "Years",
ylab = "Emissions in kilotonnes",
axes=FALSE);
axis(1,at=names(TotalBalt),label=as.character(names(TotalBalt)));
axis(2,at=c(0,1.1,2.2,3.3),labels=c(0,1.1,2.2,3.3));
dev.off();
} | /CourseProject2/plot2.R | no_license | SatelliteEyes/ExploreDataCoursera | R | false | false | 827 | r | plot2 <- function(){
#Load in the SCC data
SCC <- readRDS("summarySCC_PM25.rds");
#Separate Baltimore data and clean up
BaltMD <- SCC[SCC$fips == "24510",];
BaltMD$year <- as.factor(BaltMD$year);
rm(SCC)
#Calculate the total emissions for each
#year in Baltimore. TotalBalt will be a
#numeric vector with the years as names.
TotalBalt <- sapply(split(BaltMD$Emissions,BaltMD$year),sum);
#open graphics device and plot a simple
#line graph to confirm whether emissions
#are decreasing.
png("plot2.png",w=480,h=480);
plot(names(TotalBalt),TotalBalt/1000,type="l",
main = "Emission Trend in Baltimore, MD",
xlab = "Years",
ylab = "Emissions in kilotonnes",
axes=FALSE);
axis(1,at=names(TotalBalt),label=as.character(names(TotalBalt)));
axis(2,at=c(0,1.1,2.2,3.3),labels=c(0,1.1,2.2,3.3));
dev.off();
} |
source('~/scripts/Cascade/tools.r')
library(plyr)
require(scales)
temporal_analysis_weekly <- function (
daily_activation = 'raw_stat_v2/daily_activation.csv',
daily_activities = 'iheart_cascade/activities_stat.txt'){
daily_activation <- as.data.frame(read.csv(daily_activation, header=FALSE))
daily_activation[,6] <- 1
colnames(daily_activation) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
daily_act <- as.data.frame(read.csv(daily_activities, header=FALSE))
daily_act[,6] <- 0
colnames(daily_act) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
activities <- rbind(daily_activation, daily_act)
activities[activities$year==2010 & activities$week==00 & activities$day>=5, c('year','week')] <- data.frame(year=2009,week=52)
print(activities[activities$year==2009 & activities$week==52 & activities$day>=5,])
activities.weekly <- ddply(activities, c('activity_type', 'year', 'week'), summarise, weekly_count = sum (count))
print(tail(activities.weekly))
activities.df <- ddply(activities.weekly, c('activity_type'), function(one_partition){
one_partition$week = do.call(paste, c(as.data.frame(cbind(one_partition$year, one_partition$week, 1)), sep="-"))
one_partition$rtime = as.POSIXct(strptime(one_partition$week, format = "%Y-%U-%w"))
one_partition = one_partition[order(one_partition$rtime),]
# one_partition$cum_count = cumsum(one_partition$daily_count)
# one_partition$cdf_val = one_partition$cum_count / max(one_partition$cum_count)
# one_partition$pdf_val = one_partition$daily_count / max(one_partition$cum_count)
one_partition
})
print(head(activities.df))
activities.df$activity_type <- factor(activities.df$activity_type)
plot <- ggplot(activities.df[activities.df$activity_type==1,], aes(x = (rtime+7*86400), y = (weekly_count))) +
geom_line(size=1) +
geom_point() +
# scale_y_log10() +
scale_y_continuous(label=scientific_format())+
scale_x_datetime(breaks = date_breaks("3 week"), minor_breaks = date_breaks("1 week"),
labels = date_format("%Y-%U"))+
xlab('') + ylab('Number of new adoptions')
# plot <- ggplot(activities.df, aes(x = (rtime+7*86400), y = (weekly_count))) +
# geom_line(aes(group = activity_type, linetype = activity_type, colour = activity_type), size=1) +
# geom_point() +
## scale_y_log10() +
# scale_x_datetime(breaks = date_breaks("3 week"),
# labels = date_format("%Y-%U"))+
# scale_linetype_manual(values=c(1,6), name='', breaks=0:1,
# labels=c( 'Total number of gifting activities','Number of new activations/adoptions')) +
# scale_colour_manual(values=c("black", "black"), name='', breaks=0:1,
# labels=c( 'Total number of gifting activities','Number of new activations/adoptions'))+
# xlab('') + ylab('Count')
save_ggplot(plot, 'raw_stat_v2/overall_activities_weekly.pdf', 24,
opts(axis.text.x = element_text(angle = 90, hjust = 0), legend.position=c(.45, .2)), 10)
}
temporal_analysis <- function (daily_born = 'raw_stat_v2/daily_born.csv',
daily_activation = 'raw_stat_v2/daily_activation.csv',
daily_activities = 'iheart_cascade/activities_stat.txt'){
daily_born <- as.data.frame(read.csv(daily_born, header=FALSE))
daily_born[,6] <- 1
colnames(daily_born) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
daily_activation <- as.data.frame(read.csv(daily_activation, header=FALSE))
daily_activation[,6] <- 2
colnames(daily_activation) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
daily_act <- as.data.frame(read.csv(daily_activities, header=FALSE))
daily_act[,6] <- 0
colnames(daily_act) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
activities <- rbind(daily_born, daily_activation, daily_act)
activities.daily <- ddply(activities, c('activity_type', 'year', 'week', 'day'), summarise, daily_count = sum (count))
print(tail(activities.daily))
activities.df <- ddply(activities.daily, c('activity_type'), function(one_partition){
one_partition$date = do.call(paste, c(as.data.frame(cbind(one_partition$year, one_partition$week, one_partition$day)), sep="-"))
one_partition$week = do.call(paste, c(as.data.frame(cbind(one_partition$year, one_partition$week)), sep="-"))
one_partition$rtime = as.POSIXct(strptime(one_partition$date, format = "%Y-%U-%w"))
one_partition = one_partition[order(one_partition$rtime),]
one_partition$cum_count = cumsum(one_partition$daily_count)
one_partition$cdf_val = one_partition$cum_count / max(one_partition$cum_count)
one_partition$pdf_val = one_partition$daily_count / max(one_partition$cum_count)
one_partition
})
print(head(activities.df))
dates <- unique(activities.df$date)
weeks <- unique(activities.df[activities.df$date%in%dates[seq(4, length(dates), 28)], ]$week)
rtimes <- unique(activities.df[activities.df$date%in%dates[seq(4, length(dates), 28)], ]$rtime)
# print(activities.df[activities.df$week %in% weeks[seq(length(weeks),15, -1)],])
print(rtimes)
print(weeks)
# activities.df$rtime <- as.Date(activities.df$rtime, format="%Y-%m-%d")
activities.df$activity_type <- factor(activities.df$activity_type)
plot <- ggplot(activities.df, aes(x = (rtime), y = (daily_count))) +
geom_line(aes(group = activity_type, linetype = activity_type, colour = activity_type), size=1) +
scale_y_log10(limits = c(10^4, 10^8)) +
scale_x_datetime(breaks = date_breaks("1 months"),
labels = date_format("%b"))+
scale_linetype_manual(values=c(1,1,6), name='', breaks=0:2,
labels=c( 'Total number of gifting activities','Number of first-time-invited users','Number of new activations/adoptions')) +
scale_colour_manual(values=c("black", "gray55", "black"), name='', breaks=0:2,
labels=c( 'Total number of gifting activities','Number of first-time-invited users','Number of new activations/adoptions'))+
xlab('2009 - 2010') + ylab('Count')
# plot <- change_plot_attributes_fancy(plot, "", 0:2, c( 'Number of activities', 'Number of first time invited users','Number of new activations/adoptions'),
# "2009 - 2010", "Count")
save_ggplot(plot, 'raw_stat_v2/overall_activities.pdf', 24,
opts(axis.text.x = element_text(angle = 90, hjust = 0), legend.position=c(.45, .9)), 10)
# activities$am_pm <- activities$hour < 12
# activities$am_pm[activities$am_pm == TRUE] <- 'AM'
# activities$am_pm[activities$am_pm == FALSE] <- 'PM'
activities.hourly <- ddply(activities, c('activity_type', 'day', 'hour'), summarise, hourly_act = sum (count))
print(head(activities.hourly))
activities.df <- ddply(activities.hourly, c('activity_type'), function(one_partition){
one_partition = one_partition[order(one_partition$day, one_partition$hour),]
one_partition$day_hour = do.call(paste, c(as.data.frame(cbind(one_partition$day, one_partition$hour)), sep=":"))
one_partition$rtime = as.POSIXct(strptime(one_partition$day_hour, format = "%w:%H"))
one_partition$cum_count = cumsum(one_partition$hourly_act)
one_partition$cdf_val = one_partition$cum_count / max(one_partition$cum_count)
one_partition$pdf_val = one_partition$hourly_act / max(one_partition$cum_count)
one_partition
})
print(head(activities.df, 50))
activities.df$activity_type <- factor(activities.df$activity_type)
plot <- ggplot(activities.df, aes(x = (day_hour), y = (hourly_act))) +
geom_line(aes(group = activity_type, colour = activity_type, shape = activity_type), size=.5)+
# scale_y_log10()+
scale_x_discrete(breaks=c('0:0','0:11', '1:0','1:11', '2:0','2:11', '3:0','3:11', '4:0','4:11', '5:0','5:11', '6:0','6:11'),
labels=c('Sun:AM','Sun:PM', 'Mon:AM','Mon:PM', 'Tue:AM','Tue:PM', 'Wed:AM','Wed:PM', 'Thu:AM','Thu:PM', 'Fri:AM','Fri:PM', 'Sat:AM', 'Sat:PM'))
plot <- change_plot_attributes_fancy(plot, "Activity type", 0:2, c('Born', 'Adoption', 'Activity' ),
"Day:Hour", "User count")
save_ggplot(plot, 'raw_stat_v2/daily_activities.pdf', 24,
opts(axis.text.x = element_text(angle = 90, hjust = 0), legend.position=c(.9, .7)))
}
burstiness_analysis <- function(file='iheart_cascade/top_size.csv_all_evolution.csv'){
evolution <- as.data.frame(read.csv(file, header=FALSE))
colnames(evolution) <- c('root', 'size', 'depth', 'width', 'first_day', 'last_day', 'burstiness')
evolution$lifetime <- evolution$last_day-evolution$first_day+1
evolution.df <- as.data.frame(table(evolution[evolution$first_day != evolution$last_day, ]$burstiness))
colnames(evolution.df) <- c('burstiness','count')
evolution.df$burstiness <- as.numeric(levels(evolution.df$burstiness))[evolution.df$burstiness]
evolution.df <- evolution.df[order(evolution.df$burstiness), ]
evolution.df$cum_sum <- cumsum(evolution.df$count)
evolution.df$cdf <- evolution.df$cum_sum/max(evolution.df$cum_sum)
plot <- ggplot(evolution.df, aes(x = burstiness, y = count)) + geom_point(size= 0.8) + xlab('Burstiness') + ylab('Count')+
geom_smooth(se=FALSE)
# scale_x_log10() + scale_y_log10()
save_ggplot(plot, 'iheart_cascade/burstiness_count.pdf')
plot <- ggplot(evolution.df,
aes(x = burstiness, y = cdf)) + geom_line() + xlab('Burstiness') + ylab('Empirical CDF') +
scale_y_continuous(breaks=c(0,.2,.4,.6,.8,1.0))
# scale_x_log10() + scale_y_log10()
save_ggplot(plot, 'iheart_cascade/burstiness_cdf.pdf')
evolution.life <- as.data.frame(table(evolution$lifetime))
colnames(evolution.life) <- c('life_time','count')
evolution.life$life_time <- as.numeric(levels(evolution.life$life_time))[evolution.life$life_time]
evolution.life <- evolution.life[order(evolution.life$life_time), ]
evolution.life$cum_sum <- cumsum(evolution.life$count)
evolution.life$cdf <- evolution.life$cum_sum/max(evolution.life$cum_sum)
evolution.life$pdf <- evolution.life$count/max(evolution.life$cum_sum)
plot <- ggplot(evolution.life, aes(x = life_time, y = pdf)) +
geom_point() + xlab('Lifetime') + ylab('Empirical PDF') + scale_y_log10()+
scale_x_log10(breaks=c(1, 7, 2*7, 4*7, 3*4*7, 12*4*7),
labels=c('1d', '1w','2w', '4w', '3m', '1y'))
save_ggplot(plot, 'iheart_cascade/lifetime_pdf.pdf')
evolution.df <- ddply(evolution, c('size'), summarise, avg_life = mean (lifetime))
plot <- ggplot(evolution.df, aes(x = size, y = avg_life)) + geom_point() +
xlab('Cascade size') + ylab('Average lifetime') +
scale_x_log10() + #breaks=c(1, 10, 100, 1000, 10000), labels=c('1', '10','100', '1000', '10000')
scale_y_log10(breaks=c(1, 7, 2*7, 4*7, 3*4*7, 12*4*7),
labels=c('1d', '1w','2w', '4w', '3m', '1y'))
save_ggplot(plot, 'iheart_cascade/size_vs_life.pdf')
evolution.df <- evolution[evolution$first_day != evolution$last_day, c(2,7)]
evolution.df <- ddply(evolution.df, c('burstiness'), summarise, avg_size = mean (size))
plot <- ggplot(evolution.df, aes(x = burstiness, y = avg_size)) + geom_point() +
xlab('Burstiness') + ylab('Average cascade size') + scale_y_log10(breaks=c(10, 100, 1000, 10000, 100000))
save_ggplot(plot, 'iheart_cascade/burstiness_vs_size.pdf')
evolution.df <- evolution[evolution$first_day != evolution$last_day, c(2,7)]
# evolution.df <- ddply(evolution.df, c('size'), summarise, avg_burst = mean (burstiness))
plot <- ggplot(evolution.df, aes(x = size, y = burstiness)) + geom_point(size = 0.5) +
xlab('Size') + ylab('Burstiness') + scale_x_log10()
save_ggplot(plot, 'iheart_cascade/size_vs_burstiness.pdf')
evolution.related <- as.data.frame(evolution[,c(2,3,4,7)])
evolution.related$lifetime <- evolution$last_day - evolution$first_day + 1
return(evolution.df)
}
analyze_inter_adoption_time <- function(file = 'iheart_cascade/inter_adoption_time_stat.txt'){
inter_adoption_time <- as.data.frame(read.csv(file, header=FALSE))
colnames(inter_adoption_time) <- c('seconds_taken')
max_inter_adoption_time <- max(inter_adoption_time$seconds_taken) #35030687
time_bin <- unique(c(0:60, 2*60, 4*60, 6*60, 8*60, 10*60, 20*60, 1800, seq(3600, 86400, by=3600),
seq(86400, 35030687, by=86400)))
inter_adoption_binned <- transform(inter_adoption_time, bin = cut(inter_adoption_time$seconds_taken, breaks=time_bin))
print(head(inter_adoption_binned))
# cbind(lower = as.numeric( sub("\\((.+),.*", "\\1", labs) ),
# upper = as.numeric( sub("[^,]*,([^]]*)\\]", "\\1", labs) ))
inter_adoption_binned <- as.data.frame(table(inter_adoption_binned$bin))
colnames(inter_adoption_binned) <- c('bin','count')
inter_adoption_binned$upper_bounds <- as.numeric(sub("[^,]*,([^]]*)\\]", "\\1", levels(inter_adoption_binned$bin)))
inter_adoption_binned <- inter_adoption_binned[order(inter_adoption_binned$upper_bounds), ]
inter_adoption_binned$cum_count <- cumsum(inter_adoption_binned$count)
inter_adoption_binned$cdf <- inter_adoption_binned$cum_count / max(inter_adoption_binned$cum_count)
plot <- ggplot(inter_adoption_binned, aes(x=upper_bounds, y=cdf))+ geom_line() +
scale_x_log10(breaks=c(10, 60, 600, 3600, 10*3600, 86400, 10*86400, 100*86400),
labels=c('10s', '1m', '10m', '1h', '10h', '1d', '10d', '100d'))+
xlab('Inter-adoption time') + ylab('Empirical CDF')+
scale_y_continuous(breaks=c(0,.2,.4,.6,.8,1.0))
save_ggplot(plot,'iheart_cascade/inter_adoption_time.pdf', 20)
return(inter_adoption_binned)
}
analyze_size_vs_inter_adoption_time <- function(file = 'iheart_cascade/size_vs_inter_adoption_time.txt'){
size_vs_adoption_time <- as.data.frame(read.csv(file, header=FALSE))
size_vs_adoption_time.mean <- size_vs_adoption_time[,c(1,2)]
size_vs_adoption_time.mean[,3] <- 1
colnames(size_vs_adoption_time.mean) <- c('size', 'elapsed_time', 'centrality')
size_vs_adoption_time.p_25 <- size_vs_adoption_time[,c(1,3)]
size_vs_adoption_time.p_25[,3] <- 3
colnames(size_vs_adoption_time.p_25) <- c('size', 'elapsed_time', 'centrality')
size_vs_adoption_time.median <- size_vs_adoption_time[,c(1,4)]
size_vs_adoption_time.median[,3] <- 2
colnames(size_vs_adoption_time.median) <- c('size', 'elapsed_time', 'centrality')
size_vs_adoption_time.p_75 <- size_vs_adoption_time[,c(1,5)]
size_vs_adoption_time.p_75[,3] <- 0
colnames(size_vs_adoption_time.p_75) <- c('size', 'elapsed_time', 'centrality')
size_vs_adoption_time.df = rbind(size_vs_adoption_time.p_25, size_vs_adoption_time.mean,
size_vs_adoption_time.median, size_vs_adoption_time.p_75)
size_vs_adoption_time.df$centrality <- factor(size_vs_adoption_time.df$centrality)
# size_vs_adoption_time.df$size <- factor(size_vs_adoption_time.df$size)
plot <- ggplot(size_vs_adoption_time.df, aes(x = size, y = elapsed_time)) +
geom_point(aes(group = centrality, colour = centrality, shape = centrality), size = 2) +
# geom_line(aes(group = centrality, colour = centrality, shape = centrality)) +
scale_y_log10(breaks=(c(3600, 86400, 10*86400, 30*86400, 50*86400, 100*86400, 200*86400, 300*86400)),
labels=c('1h', '1d', '10d', '30d', '50d', '100d', '200d', '300d')) +
scale_x_log10()+
scale_shape_manual(values=c(1,8,0,3), name='', breaks=c(0,1,2,3),
labels=c('75th percentile', 'Average', 'Median', '25th percentile')) +
scale_colour_manual(values=c("gray70", "black", "gray40", "gray20"), name='', breaks=c(0,1,2,3),
labels=c('75th percentile', 'Average', 'Median', '25th percentile')) +
xlab('Cascade size') + ylab('Inter-adoption time')
# plot <- change_plot_attributes(plot, "", c(0,3,1,2), c('Average', '75 percentile', '25 percentile', 'Median'),
# "Size", "Inter-adoption time")
save_ggplot(plot, 'iheart_cascade/size_vs_inter_adoption_time.pdf', 24, opts(legend.position=c(.3, .12)))
return(size_vs_adoption_time.df)
}
analyze_depth_vs_inter_adoption_time <- function(file = 'iheart_gift/depth_vs_inter_adoption_time.txt'){
depth_vs_adoption_time <- as.data.frame(read.csv(file, header=FALSE))
colnames(depth_vs_adoption_time) <- c('depth', 'ia_time')
depth_vs_adoption_time.df <- ddply(depth_vs_adoption_time, c('depth'), .drop=TRUE,
.fun = function(one_partition){
stats = boxplot.stats(one_partition$ia_time)$stats
c(ymin=stats[1],
lower=stats[2],
middle=stats[3],
upper=stats[4],
ymax=stats[5],
mean = mean(one_partition$ia_time))
})
print(head(depth_vs_adoption_time.df))
depth_vs_adoption_time.df$depth <- factor(depth_vs_adoption_time.df$depth)
plot <- ggplot(depth_vs_adoption_time.df, aes(x=depth, lower=lower, upper=upper, middle=middle, ymin=ymin, ymax=ymax)) +
geom_boxplot(stat="identity") +
scale_y_discrete(breaks=(c(86400, 10*86400, 30*86400, 50*86400, 100*86400, 200*86400, 300*86400)),
labels=c('1d', '10d', '30d', '50d', '100d', '200d', '300d')) +
scale_x_discrete(breaks=seq(0,60,5), labels=seq(0,60,5)) +
xlab('Generation of adoption') + ylab('Inter-adoptime time')
save_ggplot(plot, 'iheart_cascade/depth_vs_inter_adoption_time.pdf', width = 10)
inter_generation_time <- depth_vs_adoption_time.df
inter_generation_time$depth <- as.numeric(inter_generation_time$depth)
inter_generation_time.mean <- inter_generation_time[,c('depth', 'mean')]
inter_generation_time.mean[,3] <- 1
colnames(inter_generation_time.mean) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.p_25 <- inter_generation_time[,c('depth', 'lower')]
inter_generation_time.p_25[,3] <- 3
colnames(inter_generation_time.p_25) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.median <- inter_generation_time[,c('depth', 'middle')]
inter_generation_time.median[,3] <- 2
colnames(inter_generation_time.median) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.p_75 <- inter_generation_time[,c('depth', 'upper')]
inter_generation_time.p_75[,3] <- 0
colnames(inter_generation_time.p_75) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.df = rbind(inter_generation_time.p_25, inter_generation_time.mean,
inter_generation_time.median, inter_generation_time.p_75)
inter_generation_time.df$centrality <- factor(inter_generation_time.df$centrality)
# inter_generation_time.df$depth <- factor(inter_generation_time.df$depth)
plot <- ggplot(inter_generation_time.df, aes(x = depth, y = elapsed_time)) +
geom_point(aes(group = centrality, colour = centrality, shape = centrality), size = 3) +
geom_line(aes(group = centrality, colour = centrality, shape = centrality)) +
scale_y_discrete(breaks=(c(86400, 10*86400, 30*86400, 60*86400, 90*86400, 180*86400, 365*86400)),
labels=c('1d', '10d', '1m', '2m', '3m', '6m', '1y'))+
scale_x_continuous(breaks=seq(0,55,5), labels=seq(0,55,5))#, limits=c(1,55))
plot <- change_plot_attributes(plot, "", 0:3, c('75th percentile', 'Average', 'Median', '25th percentile'),
"From n-1 to n generation", "Inter-adoption time")
save_ggplot(plot, 'iheart_cascade/inter_adoption_time.pdf', 24, opts(legend.position=c(.7, .85)), 10)
return(depth_vs_adoption_time.df)
}
analyze_inter_generation_time <- function(file = 'iheart_cascade/inter_generation_time_stat.txt'){
inter_generation_time <- as.data.frame(read.csv(file, header=FALSE))
inter_generation_time.mean <- inter_generation_time[,c(1,2)]
inter_generation_time.mean[,3] <- 1
colnames(inter_generation_time.mean) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.p_25 <- inter_generation_time[,c(1,3)]
inter_generation_time.p_25[,3] <- 3
colnames(inter_generation_time.p_25) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.median <- inter_generation_time[,c(1,4)]
inter_generation_time.median[,3] <- 2
colnames(inter_generation_time.median) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.p_75 <- inter_generation_time[,c(1,5)]
inter_generation_time.p_75[,3] <- 0
colnames(inter_generation_time.p_75) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.df = rbind(inter_generation_time.p_25, inter_generation_time.mean,
inter_generation_time.median, inter_generation_time.p_75)
inter_generation_time.df$centrality <- factor(inter_generation_time.df$centrality)
# inter_generation_time.df$depth <- factor(inter_generation_time.df$depth)
plot <- ggplot(inter_generation_time.df, aes(x = depth, y = elapsed_time)) +
geom_point(aes(group = centrality, colour = centrality, shape = centrality), size = 3) +
geom_line(aes(group = centrality, colour = centrality, shape = centrality)) +
scale_y_discrete(breaks=(c(86400, 10*86400, 30*86400, 60*86400, 90*86400, 180*86400, 365*86400)),
labels=c('1d', '10d', '1m', '2m', '3m', '6m', '1y'))+
# scale_y_log10(breaks=(c(3600, 86400, 10*86400, 30*86400, 50*86400, 100*86400, 200*86400, 300*86400)),
# labels=c('1h', '1d', '10d', '30d', '50d', '100d', '200d', '300d')) +
scale_x_continuous(breaks=seq(0,55,5), labels=seq(0,55,5))#, limits=c(0,55))
plot <- change_plot_attributes(plot, "", 0:3, c('75th percentile', 'Average', 'Median', '25th percentile'),
"From n-1 to n generation", "Inter-generation time")
save_ggplot(plot, 'iheart_cascade/inter_generation_time.pdf', 24, opts(legend.position=c(.7, .85)), 10)
}
#act<-temporal_analysis('raw_stat_v2/daily_born.csv', 'raw_stat_v2/daily_activation.csv',
# 'raw_stat_v2/daily_last_act.csv', 'raw_stat_v2/daily_last_seen.csv')
#burstiness ('iheart_cascade/')
#act<-temporal_analysis('iheart_test/daily_born.csv', 'iheart_test/daily_activation.csv',
# 'iheart_test/daily_last_act.csv', 'iheart_test/daily_last_seen.csv')
#raw_outdeg_analysis('raw_stat_v2/raw_outdeg_stat.csv')
#influence_threshold_analysis('raw_stat_v2/parent_count_before_act.csv', 'raw_stat_v2/indeg_before_act.csv')
#active_proportion_analysis('raw_stat_v2/act_proportion_count.csv')
| /Cascade/temporal_dyn.r | no_license | rezaur86/Research-scripts | R | false | false | 21,426 | r | source('~/scripts/Cascade/tools.r')
library(plyr)
require(scales)
temporal_analysis_weekly <- function (
daily_activation = 'raw_stat_v2/daily_activation.csv',
daily_activities = 'iheart_cascade/activities_stat.txt'){
daily_activation <- as.data.frame(read.csv(daily_activation, header=FALSE))
daily_activation[,6] <- 1
colnames(daily_activation) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
daily_act <- as.data.frame(read.csv(daily_activities, header=FALSE))
daily_act[,6] <- 0
colnames(daily_act) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
activities <- rbind(daily_activation, daily_act)
activities[activities$year==2010 & activities$week==00 & activities$day>=5, c('year','week')] <- data.frame(year=2009,week=52)
print(activities[activities$year==2009 & activities$week==52 & activities$day>=5,])
activities.weekly <- ddply(activities, c('activity_type', 'year', 'week'), summarise, weekly_count = sum (count))
print(tail(activities.weekly))
activities.df <- ddply(activities.weekly, c('activity_type'), function(one_partition){
one_partition$week = do.call(paste, c(as.data.frame(cbind(one_partition$year, one_partition$week, 1)), sep="-"))
one_partition$rtime = as.POSIXct(strptime(one_partition$week, format = "%Y-%U-%w"))
one_partition = one_partition[order(one_partition$rtime),]
# one_partition$cum_count = cumsum(one_partition$daily_count)
# one_partition$cdf_val = one_partition$cum_count / max(one_partition$cum_count)
# one_partition$pdf_val = one_partition$daily_count / max(one_partition$cum_count)
one_partition
})
print(head(activities.df))
activities.df$activity_type <- factor(activities.df$activity_type)
plot <- ggplot(activities.df[activities.df$activity_type==1,], aes(x = (rtime+7*86400), y = (weekly_count))) +
geom_line(size=1) +
geom_point() +
# scale_y_log10() +
scale_y_continuous(label=scientific_format())+
scale_x_datetime(breaks = date_breaks("3 week"), minor_breaks = date_breaks("1 week"),
labels = date_format("%Y-%U"))+
xlab('') + ylab('Number of new adoptions')
# plot <- ggplot(activities.df, aes(x = (rtime+7*86400), y = (weekly_count))) +
# geom_line(aes(group = activity_type, linetype = activity_type, colour = activity_type), size=1) +
# geom_point() +
## scale_y_log10() +
# scale_x_datetime(breaks = date_breaks("3 week"),
# labels = date_format("%Y-%U"))+
# scale_linetype_manual(values=c(1,6), name='', breaks=0:1,
# labels=c( 'Total number of gifting activities','Number of new activations/adoptions')) +
# scale_colour_manual(values=c("black", "black"), name='', breaks=0:1,
# labels=c( 'Total number of gifting activities','Number of new activations/adoptions'))+
# xlab('') + ylab('Count')
save_ggplot(plot, 'raw_stat_v2/overall_activities_weekly.pdf', 24,
opts(axis.text.x = element_text(angle = 90, hjust = 0), legend.position=c(.45, .2)), 10)
}
temporal_analysis <- function (daily_born = 'raw_stat_v2/daily_born.csv',
daily_activation = 'raw_stat_v2/daily_activation.csv',
daily_activities = 'iheart_cascade/activities_stat.txt'){
daily_born <- as.data.frame(read.csv(daily_born, header=FALSE))
daily_born[,6] <- 1
colnames(daily_born) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
daily_activation <- as.data.frame(read.csv(daily_activation, header=FALSE))
daily_activation[,6] <- 2
colnames(daily_activation) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
daily_act <- as.data.frame(read.csv(daily_activities, header=FALSE))
daily_act[,6] <- 0
colnames(daily_act) <- c('year', 'week', 'day', 'hour', 'count', 'activity_type')
activities <- rbind(daily_born, daily_activation, daily_act)
activities.daily <- ddply(activities, c('activity_type', 'year', 'week', 'day'), summarise, daily_count = sum (count))
print(tail(activities.daily))
activities.df <- ddply(activities.daily, c('activity_type'), function(one_partition){
one_partition$date = do.call(paste, c(as.data.frame(cbind(one_partition$year, one_partition$week, one_partition$day)), sep="-"))
one_partition$week = do.call(paste, c(as.data.frame(cbind(one_partition$year, one_partition$week)), sep="-"))
one_partition$rtime = as.POSIXct(strptime(one_partition$date, format = "%Y-%U-%w"))
one_partition = one_partition[order(one_partition$rtime),]
one_partition$cum_count = cumsum(one_partition$daily_count)
one_partition$cdf_val = one_partition$cum_count / max(one_partition$cum_count)
one_partition$pdf_val = one_partition$daily_count / max(one_partition$cum_count)
one_partition
})
print(head(activities.df))
dates <- unique(activities.df$date)
weeks <- unique(activities.df[activities.df$date%in%dates[seq(4, length(dates), 28)], ]$week)
rtimes <- unique(activities.df[activities.df$date%in%dates[seq(4, length(dates), 28)], ]$rtime)
# print(activities.df[activities.df$week %in% weeks[seq(length(weeks),15, -1)],])
print(rtimes)
print(weeks)
# activities.df$rtime <- as.Date(activities.df$rtime, format="%Y-%m-%d")
activities.df$activity_type <- factor(activities.df$activity_type)
plot <- ggplot(activities.df, aes(x = (rtime), y = (daily_count))) +
geom_line(aes(group = activity_type, linetype = activity_type, colour = activity_type), size=1) +
scale_y_log10(limits = c(10^4, 10^8)) +
scale_x_datetime(breaks = date_breaks("1 months"),
labels = date_format("%b"))+
scale_linetype_manual(values=c(1,1,6), name='', breaks=0:2,
labels=c( 'Total number of gifting activities','Number of first-time-invited users','Number of new activations/adoptions')) +
scale_colour_manual(values=c("black", "gray55", "black"), name='', breaks=0:2,
labels=c( 'Total number of gifting activities','Number of first-time-invited users','Number of new activations/adoptions'))+
xlab('2009 - 2010') + ylab('Count')
# plot <- change_plot_attributes_fancy(plot, "", 0:2, c( 'Number of activities', 'Number of first time invited users','Number of new activations/adoptions'),
# "2009 - 2010", "Count")
save_ggplot(plot, 'raw_stat_v2/overall_activities.pdf', 24,
opts(axis.text.x = element_text(angle = 90, hjust = 0), legend.position=c(.45, .9)), 10)
# activities$am_pm <- activities$hour < 12
# activities$am_pm[activities$am_pm == TRUE] <- 'AM'
# activities$am_pm[activities$am_pm == FALSE] <- 'PM'
activities.hourly <- ddply(activities, c('activity_type', 'day', 'hour'), summarise, hourly_act = sum (count))
print(head(activities.hourly))
activities.df <- ddply(activities.hourly, c('activity_type'), function(one_partition){
one_partition = one_partition[order(one_partition$day, one_partition$hour),]
one_partition$day_hour = do.call(paste, c(as.data.frame(cbind(one_partition$day, one_partition$hour)), sep=":"))
one_partition$rtime = as.POSIXct(strptime(one_partition$day_hour, format = "%w:%H"))
one_partition$cum_count = cumsum(one_partition$hourly_act)
one_partition$cdf_val = one_partition$cum_count / max(one_partition$cum_count)
one_partition$pdf_val = one_partition$hourly_act / max(one_partition$cum_count)
one_partition
})
print(head(activities.df, 50))
activities.df$activity_type <- factor(activities.df$activity_type)
plot <- ggplot(activities.df, aes(x = (day_hour), y = (hourly_act))) +
geom_line(aes(group = activity_type, colour = activity_type, shape = activity_type), size=.5)+
# scale_y_log10()+
scale_x_discrete(breaks=c('0:0','0:11', '1:0','1:11', '2:0','2:11', '3:0','3:11', '4:0','4:11', '5:0','5:11', '6:0','6:11'),
labels=c('Sun:AM','Sun:PM', 'Mon:AM','Mon:PM', 'Tue:AM','Tue:PM', 'Wed:AM','Wed:PM', 'Thu:AM','Thu:PM', 'Fri:AM','Fri:PM', 'Sat:AM', 'Sat:PM'))
plot <- change_plot_attributes_fancy(plot, "Activity type", 0:2, c('Born', 'Adoption', 'Activity' ),
"Day:Hour", "User count")
save_ggplot(plot, 'raw_stat_v2/daily_activities.pdf', 24,
opts(axis.text.x = element_text(angle = 90, hjust = 0), legend.position=c(.9, .7)))
}
burstiness_analysis <- function(file='iheart_cascade/top_size.csv_all_evolution.csv'){
evolution <- as.data.frame(read.csv(file, header=FALSE))
colnames(evolution) <- c('root', 'size', 'depth', 'width', 'first_day', 'last_day', 'burstiness')
evolution$lifetime <- evolution$last_day-evolution$first_day+1
evolution.df <- as.data.frame(table(evolution[evolution$first_day != evolution$last_day, ]$burstiness))
colnames(evolution.df) <- c('burstiness','count')
evolution.df$burstiness <- as.numeric(levels(evolution.df$burstiness))[evolution.df$burstiness]
evolution.df <- evolution.df[order(evolution.df$burstiness), ]
evolution.df$cum_sum <- cumsum(evolution.df$count)
evolution.df$cdf <- evolution.df$cum_sum/max(evolution.df$cum_sum)
plot <- ggplot(evolution.df, aes(x = burstiness, y = count)) + geom_point(size= 0.8) + xlab('Burstiness') + ylab('Count')+
geom_smooth(se=FALSE)
# scale_x_log10() + scale_y_log10()
save_ggplot(plot, 'iheart_cascade/burstiness_count.pdf')
plot <- ggplot(evolution.df,
aes(x = burstiness, y = cdf)) + geom_line() + xlab('Burstiness') + ylab('Empirical CDF') +
scale_y_continuous(breaks=c(0,.2,.4,.6,.8,1.0))
# scale_x_log10() + scale_y_log10()
save_ggplot(plot, 'iheart_cascade/burstiness_cdf.pdf')
evolution.life <- as.data.frame(table(evolution$lifetime))
colnames(evolution.life) <- c('life_time','count')
evolution.life$life_time <- as.numeric(levels(evolution.life$life_time))[evolution.life$life_time]
evolution.life <- evolution.life[order(evolution.life$life_time), ]
evolution.life$cum_sum <- cumsum(evolution.life$count)
evolution.life$cdf <- evolution.life$cum_sum/max(evolution.life$cum_sum)
evolution.life$pdf <- evolution.life$count/max(evolution.life$cum_sum)
plot <- ggplot(evolution.life, aes(x = life_time, y = pdf)) +
geom_point() + xlab('Lifetime') + ylab('Empirical PDF') + scale_y_log10()+
scale_x_log10(breaks=c(1, 7, 2*7, 4*7, 3*4*7, 12*4*7),
labels=c('1d', '1w','2w', '4w', '3m', '1y'))
save_ggplot(plot, 'iheart_cascade/lifetime_pdf.pdf')
evolution.df <- ddply(evolution, c('size'), summarise, avg_life = mean (lifetime))
plot <- ggplot(evolution.df, aes(x = size, y = avg_life)) + geom_point() +
xlab('Cascade size') + ylab('Average lifetime') +
scale_x_log10() + #breaks=c(1, 10, 100, 1000, 10000), labels=c('1', '10','100', '1000', '10000')
scale_y_log10(breaks=c(1, 7, 2*7, 4*7, 3*4*7, 12*4*7),
labels=c('1d', '1w','2w', '4w', '3m', '1y'))
save_ggplot(plot, 'iheart_cascade/size_vs_life.pdf')
evolution.df <- evolution[evolution$first_day != evolution$last_day, c(2,7)]
evolution.df <- ddply(evolution.df, c('burstiness'), summarise, avg_size = mean (size))
plot <- ggplot(evolution.df, aes(x = burstiness, y = avg_size)) + geom_point() +
xlab('Burstiness') + ylab('Average cascade size') + scale_y_log10(breaks=c(10, 100, 1000, 10000, 100000))
save_ggplot(plot, 'iheart_cascade/burstiness_vs_size.pdf')
evolution.df <- evolution[evolution$first_day != evolution$last_day, c(2,7)]
# evolution.df <- ddply(evolution.df, c('size'), summarise, avg_burst = mean (burstiness))
plot <- ggplot(evolution.df, aes(x = size, y = burstiness)) + geom_point(size = 0.5) +
xlab('Size') + ylab('Burstiness') + scale_x_log10()
save_ggplot(plot, 'iheart_cascade/size_vs_burstiness.pdf')
evolution.related <- as.data.frame(evolution[,c(2,3,4,7)])
evolution.related$lifetime <- evolution$last_day - evolution$first_day + 1
return(evolution.df)
}
analyze_inter_adoption_time <- function(file = 'iheart_cascade/inter_adoption_time_stat.txt'){
inter_adoption_time <- as.data.frame(read.csv(file, header=FALSE))
colnames(inter_adoption_time) <- c('seconds_taken')
max_inter_adoption_time <- max(inter_adoption_time$seconds_taken) #35030687
time_bin <- unique(c(0:60, 2*60, 4*60, 6*60, 8*60, 10*60, 20*60, 1800, seq(3600, 86400, by=3600),
seq(86400, 35030687, by=86400)))
inter_adoption_binned <- transform(inter_adoption_time, bin = cut(inter_adoption_time$seconds_taken, breaks=time_bin))
print(head(inter_adoption_binned))
# cbind(lower = as.numeric( sub("\\((.+),.*", "\\1", labs) ),
# upper = as.numeric( sub("[^,]*,([^]]*)\\]", "\\1", labs) ))
inter_adoption_binned <- as.data.frame(table(inter_adoption_binned$bin))
colnames(inter_adoption_binned) <- c('bin','count')
inter_adoption_binned$upper_bounds <- as.numeric(sub("[^,]*,([^]]*)\\]", "\\1", levels(inter_adoption_binned$bin)))
inter_adoption_binned <- inter_adoption_binned[order(inter_adoption_binned$upper_bounds), ]
inter_adoption_binned$cum_count <- cumsum(inter_adoption_binned$count)
inter_adoption_binned$cdf <- inter_adoption_binned$cum_count / max(inter_adoption_binned$cum_count)
plot <- ggplot(inter_adoption_binned, aes(x=upper_bounds, y=cdf))+ geom_line() +
scale_x_log10(breaks=c(10, 60, 600, 3600, 10*3600, 86400, 10*86400, 100*86400),
labels=c('10s', '1m', '10m', '1h', '10h', '1d', '10d', '100d'))+
xlab('Inter-adoption time') + ylab('Empirical CDF')+
scale_y_continuous(breaks=c(0,.2,.4,.6,.8,1.0))
save_ggplot(plot,'iheart_cascade/inter_adoption_time.pdf', 20)
return(inter_adoption_binned)
}
analyze_size_vs_inter_adoption_time <- function(file = 'iheart_cascade/size_vs_inter_adoption_time.txt'){
size_vs_adoption_time <- as.data.frame(read.csv(file, header=FALSE))
size_vs_adoption_time.mean <- size_vs_adoption_time[,c(1,2)]
size_vs_adoption_time.mean[,3] <- 1
colnames(size_vs_adoption_time.mean) <- c('size', 'elapsed_time', 'centrality')
size_vs_adoption_time.p_25 <- size_vs_adoption_time[,c(1,3)]
size_vs_adoption_time.p_25[,3] <- 3
colnames(size_vs_adoption_time.p_25) <- c('size', 'elapsed_time', 'centrality')
size_vs_adoption_time.median <- size_vs_adoption_time[,c(1,4)]
size_vs_adoption_time.median[,3] <- 2
colnames(size_vs_adoption_time.median) <- c('size', 'elapsed_time', 'centrality')
size_vs_adoption_time.p_75 <- size_vs_adoption_time[,c(1,5)]
size_vs_adoption_time.p_75[,3] <- 0
colnames(size_vs_adoption_time.p_75) <- c('size', 'elapsed_time', 'centrality')
size_vs_adoption_time.df = rbind(size_vs_adoption_time.p_25, size_vs_adoption_time.mean,
size_vs_adoption_time.median, size_vs_adoption_time.p_75)
size_vs_adoption_time.df$centrality <- factor(size_vs_adoption_time.df$centrality)
# size_vs_adoption_time.df$size <- factor(size_vs_adoption_time.df$size)
plot <- ggplot(size_vs_adoption_time.df, aes(x = size, y = elapsed_time)) +
geom_point(aes(group = centrality, colour = centrality, shape = centrality), size = 2) +
# geom_line(aes(group = centrality, colour = centrality, shape = centrality)) +
scale_y_log10(breaks=(c(3600, 86400, 10*86400, 30*86400, 50*86400, 100*86400, 200*86400, 300*86400)),
labels=c('1h', '1d', '10d', '30d', '50d', '100d', '200d', '300d')) +
scale_x_log10()+
scale_shape_manual(values=c(1,8,0,3), name='', breaks=c(0,1,2,3),
labels=c('75th percentile', 'Average', 'Median', '25th percentile')) +
scale_colour_manual(values=c("gray70", "black", "gray40", "gray20"), name='', breaks=c(0,1,2,3),
labels=c('75th percentile', 'Average', 'Median', '25th percentile')) +
xlab('Cascade size') + ylab('Inter-adoption time')
# plot <- change_plot_attributes(plot, "", c(0,3,1,2), c('Average', '75 percentile', '25 percentile', 'Median'),
# "Size", "Inter-adoption time")
save_ggplot(plot, 'iheart_cascade/size_vs_inter_adoption_time.pdf', 24, opts(legend.position=c(.3, .12)))
return(size_vs_adoption_time.df)
}
analyze_depth_vs_inter_adoption_time <- function(file = 'iheart_gift/depth_vs_inter_adoption_time.txt'){
depth_vs_adoption_time <- as.data.frame(read.csv(file, header=FALSE))
colnames(depth_vs_adoption_time) <- c('depth', 'ia_time')
depth_vs_adoption_time.df <- ddply(depth_vs_adoption_time, c('depth'), .drop=TRUE,
.fun = function(one_partition){
stats = boxplot.stats(one_partition$ia_time)$stats
c(ymin=stats[1],
lower=stats[2],
middle=stats[3],
upper=stats[4],
ymax=stats[5],
mean = mean(one_partition$ia_time))
})
print(head(depth_vs_adoption_time.df))
depth_vs_adoption_time.df$depth <- factor(depth_vs_adoption_time.df$depth)
plot <- ggplot(depth_vs_adoption_time.df, aes(x=depth, lower=lower, upper=upper, middle=middle, ymin=ymin, ymax=ymax)) +
geom_boxplot(stat="identity") +
scale_y_discrete(breaks=(c(86400, 10*86400, 30*86400, 50*86400, 100*86400, 200*86400, 300*86400)),
labels=c('1d', '10d', '30d', '50d', '100d', '200d', '300d')) +
scale_x_discrete(breaks=seq(0,60,5), labels=seq(0,60,5)) +
xlab('Generation of adoption') + ylab('Inter-adoptime time')
save_ggplot(plot, 'iheart_cascade/depth_vs_inter_adoption_time.pdf', width = 10)
inter_generation_time <- depth_vs_adoption_time.df
inter_generation_time$depth <- as.numeric(inter_generation_time$depth)
inter_generation_time.mean <- inter_generation_time[,c('depth', 'mean')]
inter_generation_time.mean[,3] <- 1
colnames(inter_generation_time.mean) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.p_25 <- inter_generation_time[,c('depth', 'lower')]
inter_generation_time.p_25[,3] <- 3
colnames(inter_generation_time.p_25) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.median <- inter_generation_time[,c('depth', 'middle')]
inter_generation_time.median[,3] <- 2
colnames(inter_generation_time.median) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.p_75 <- inter_generation_time[,c('depth', 'upper')]
inter_generation_time.p_75[,3] <- 0
colnames(inter_generation_time.p_75) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.df = rbind(inter_generation_time.p_25, inter_generation_time.mean,
inter_generation_time.median, inter_generation_time.p_75)
inter_generation_time.df$centrality <- factor(inter_generation_time.df$centrality)
# inter_generation_time.df$depth <- factor(inter_generation_time.df$depth)
plot <- ggplot(inter_generation_time.df, aes(x = depth, y = elapsed_time)) +
geom_point(aes(group = centrality, colour = centrality, shape = centrality), size = 3) +
geom_line(aes(group = centrality, colour = centrality, shape = centrality)) +
scale_y_discrete(breaks=(c(86400, 10*86400, 30*86400, 60*86400, 90*86400, 180*86400, 365*86400)),
labels=c('1d', '10d', '1m', '2m', '3m', '6m', '1y'))+
scale_x_continuous(breaks=seq(0,55,5), labels=seq(0,55,5))#, limits=c(1,55))
plot <- change_plot_attributes(plot, "", 0:3, c('75th percentile', 'Average', 'Median', '25th percentile'),
"From n-1 to n generation", "Inter-adoption time")
save_ggplot(plot, 'iheart_cascade/inter_adoption_time.pdf', 24, opts(legend.position=c(.7, .85)), 10)
return(depth_vs_adoption_time.df)
}
analyze_inter_generation_time <- function(file = 'iheart_cascade/inter_generation_time_stat.txt'){
inter_generation_time <- as.data.frame(read.csv(file, header=FALSE))
inter_generation_time.mean <- inter_generation_time[,c(1,2)]
inter_generation_time.mean[,3] <- 1
colnames(inter_generation_time.mean) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.p_25 <- inter_generation_time[,c(1,3)]
inter_generation_time.p_25[,3] <- 3
colnames(inter_generation_time.p_25) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.median <- inter_generation_time[,c(1,4)]
inter_generation_time.median[,3] <- 2
colnames(inter_generation_time.median) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.p_75 <- inter_generation_time[,c(1,5)]
inter_generation_time.p_75[,3] <- 0
colnames(inter_generation_time.p_75) <- c('depth', 'elapsed_time', 'centrality')
inter_generation_time.df = rbind(inter_generation_time.p_25, inter_generation_time.mean,
inter_generation_time.median, inter_generation_time.p_75)
inter_generation_time.df$centrality <- factor(inter_generation_time.df$centrality)
# inter_generation_time.df$depth <- factor(inter_generation_time.df$depth)
plot <- ggplot(inter_generation_time.df, aes(x = depth, y = elapsed_time)) +
geom_point(aes(group = centrality, colour = centrality, shape = centrality), size = 3) +
geom_line(aes(group = centrality, colour = centrality, shape = centrality)) +
scale_y_discrete(breaks=(c(86400, 10*86400, 30*86400, 60*86400, 90*86400, 180*86400, 365*86400)),
labels=c('1d', '10d', '1m', '2m', '3m', '6m', '1y'))+
# scale_y_log10(breaks=(c(3600, 86400, 10*86400, 30*86400, 50*86400, 100*86400, 200*86400, 300*86400)),
# labels=c('1h', '1d', '10d', '30d', '50d', '100d', '200d', '300d')) +
scale_x_continuous(breaks=seq(0,55,5), labels=seq(0,55,5))#, limits=c(0,55))
plot <- change_plot_attributes(plot, "", 0:3, c('75th percentile', 'Average', 'Median', '25th percentile'),
"From n-1 to n generation", "Inter-generation time")
save_ggplot(plot, 'iheart_cascade/inter_generation_time.pdf', 24, opts(legend.position=c(.7, .85)), 10)
}
#act<-temporal_analysis('raw_stat_v2/daily_born.csv', 'raw_stat_v2/daily_activation.csv',
# 'raw_stat_v2/daily_last_act.csv', 'raw_stat_v2/daily_last_seen.csv')
#burstiness ('iheart_cascade/')
#act<-temporal_analysis('iheart_test/daily_born.csv', 'iheart_test/daily_activation.csv',
# 'iheart_test/daily_last_act.csv', 'iheart_test/daily_last_seen.csv')
#raw_outdeg_analysis('raw_stat_v2/raw_outdeg_stat.csv')
#influence_threshold_analysis('raw_stat_v2/parent_count_before_act.csv', 'raw_stat_v2/indeg_before_act.csv')
#active_proportion_analysis('raw_stat_v2/act_proportion_count.csv')
|
#' @title ridge_regression
#' @description Compute a linear regression with gradient descent method.
#'
#' @param form A formula with the format of "Y ~ .".
#' @param dat A dataframe.
#' @param lambda The ridge penalty term lambda set default as 0.
#' @param contrasts A list of contrasts.
#'
#' @return A list of coefficients.
#' @importFrom stats model.frame model.matrix
#' @export
#'
ridge_regression <- function(form, dat, lambda=0, contrasts=NULL) {
#Eliminating all the NAs within the dataset
dat_no_na <- model.frame(form, dat)
rownames(data) <- NULL
#Define the model matrix
X <- model.matrix(form, dat_no_na, contrasts.arg = contrasts)
#Get the name of the dependent variable
y_name <- as.character(form)[2]
#Define the dependent variable
Y <- as.matrix(subset(dat_no_na, select = y_name), ncol = 1)
#Center the dependent variable
mean.Y <- mean(Y)
Y <- Y - mean.Y
#Center the response variables
mean.X <- colMeans(X[, -1])
X <- X[, -1] - rep(mean.X, rep(nrow(X), ncol(X) - 1))
#Scale the response variables
scale.X <- drop(rep(1/nrow(X), nrow(X)) %*% X^2)^0.5
X <- X/rep(scale.X, rep(nrow(X), ncol(X)))
#Set up beta matrix
beta <- matrix(NA_real_, nrow = length(lambda), ncol = ncol(X))
#Find the coefficients
svd <- svd(X)
beta <- svd$v %*% diag(svd$d/(svd$d^2 + lambda)) %*% t(svd$u) %*% Y
#Return the scale
beta <- t(as.matrix(beta/scale.X))
#Calculate the intercept
intercept <- mean.Y - beta %*% mean.X
#Add it to the vector
beta <- cbind(intercept, beta)
beta <- as.vector(beta)
#Name intercept and beta
names(beta) <- c("Intercept", colnames(X))
#Generate output
beta
}
#' Prediction function for Ridge Regression
#'
#' @param object ridge_regression object
#' @param ... a dataframe
#'
#' @return An estimate of new Y given X.
#' @export
#'
predict.ridge_regression <- function(object, ...) {
# extract dots
dots <- list(...)
x_frame <- dots[[1]]
# check for bad arg
if (!is.data.frame(x_frame)) {
stop("The first argument should be a data.frame of values",
"to predict")
}
# create new model matrix and predict
X <- model.matrix(attributes(object)$formula, x_frame)
X %*% object
}
| /R/ridge_regression.R | no_license | Z1chenZhao/bis557 | R | false | false | 2,204 | r | #' @title ridge_regression
#' @description Compute a linear regression with gradient descent method.
#'
#' @param form A formula with the format of "Y ~ .".
#' @param dat A dataframe.
#' @param lambda The ridge penalty term lambda set default as 0.
#' @param contrasts A list of contrasts.
#'
#' @return A list of coefficients.
#' @importFrom stats model.frame model.matrix
#' @export
#'
ridge_regression <- function(form, dat, lambda=0, contrasts=NULL) {
#Eliminating all the NAs within the dataset
dat_no_na <- model.frame(form, dat)
rownames(data) <- NULL
#Define the model matrix
X <- model.matrix(form, dat_no_na, contrasts.arg = contrasts)
#Get the name of the dependent variable
y_name <- as.character(form)[2]
#Define the dependent variable
Y <- as.matrix(subset(dat_no_na, select = y_name), ncol = 1)
#Center the dependent variable
mean.Y <- mean(Y)
Y <- Y - mean.Y
#Center the response variables
mean.X <- colMeans(X[, -1])
X <- X[, -1] - rep(mean.X, rep(nrow(X), ncol(X) - 1))
#Scale the response variables
scale.X <- drop(rep(1/nrow(X), nrow(X)) %*% X^2)^0.5
X <- X/rep(scale.X, rep(nrow(X), ncol(X)))
#Set up beta matrix
beta <- matrix(NA_real_, nrow = length(lambda), ncol = ncol(X))
#Find the coefficients
svd <- svd(X)
beta <- svd$v %*% diag(svd$d/(svd$d^2 + lambda)) %*% t(svd$u) %*% Y
#Return the scale
beta <- t(as.matrix(beta/scale.X))
#Calculate the intercept
intercept <- mean.Y - beta %*% mean.X
#Add it to the vector
beta <- cbind(intercept, beta)
beta <- as.vector(beta)
#Name intercept and beta
names(beta) <- c("Intercept", colnames(X))
#Generate output
beta
}
#' Prediction function for Ridge Regression
#'
#' @param object ridge_regression object
#' @param ... a dataframe
#'
#' @return An estimate of new Y given X.
#' @export
#'
predict.ridge_regression <- function(object, ...) {
# extract dots
dots <- list(...)
x_frame <- dots[[1]]
# check for bad arg
if (!is.data.frame(x_frame)) {
stop("The first argument should be a data.frame of values",
"to predict")
}
# create new model matrix and predict
X <- model.matrix(attributes(object)$formula, x_frame)
X %*% object
}
|
# Vector Add
#
###############################################################################
setup = function(args='10000000') {
n <- as.integer(args[1])
if(is.na(n)){ n <- 10000000 }
cat("Vector Add two", n, "size vectors(10% NA), iterative method\n");
A <- rnorm(n)
B <- rnorm(n)
idx <- runif(n*0.1, 1, n) #10% are NA
A[idx] <- NA
idx <- runif(n*0.1, 1, n) #10% are NA
B[idx] <- NA
list(A, B, n)
}
run <- function(data) {
#a and b are matrix
A <- data[[1]]
B <- data[[2]]
n <- data[[3]]
C <- vector('double', n)
for(i in 1:n) {
C[i] = A[i] + B[i]
}
C
} | /mathkernel/DoubleNAVecAdd-T1.R | permissive | rbenchmark/benchmarks | R | false | false | 683 | r | # Vector Add
#
###############################################################################
setup = function(args='10000000') {
n <- as.integer(args[1])
if(is.na(n)){ n <- 10000000 }
cat("Vector Add two", n, "size vectors(10% NA), iterative method\n");
A <- rnorm(n)
B <- rnorm(n)
idx <- runif(n*0.1, 1, n) #10% are NA
A[idx] <- NA
idx <- runif(n*0.1, 1, n) #10% are NA
B[idx] <- NA
list(A, B, n)
}
run <- function(data) {
#a and b are matrix
A <- data[[1]]
B <- data[[2]]
n <- data[[3]]
C <- vector('double', n)
for(i in 1:n) {
C[i] = A[i] + B[i]
}
C
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SVG.R
\name{SVG_}
\alias{SVG_}
\title{Create a compact \code{svg} object}
\usage{
SVG_(width = NULL, height = NULL, viewbox = TRUE)
}
\arguments{
\item{width}{The width and height attributes on the top-level \verb{<svg>}
element. Both of these attributes are optional but, if provided, take in a
variety of dimensions and keywords. If numerical values are solely used,
they are assumed to be 'px' length values. Dimensions can be percentage
values (i.e., \code{"75\%"}) or length values with the following units: \code{"em"},
\code{"ex"}, \code{"px"}, \code{"in"}, \code{"cm"}, \code{"mm"}, \code{"pt"}, and \code{"pc"}. Using \code{NULL},
the default, excludes the attribute.}
\item{height}{The width and height attributes on the top-level \verb{<svg>}
element. Both of these attributes are optional but, if provided, take in a
variety of dimensions and keywords. If numerical values are solely used,
they are assumed to be 'px' length values. Dimensions can be percentage
values (i.e., \code{"75\%"}) or length values with the following units: \code{"em"},
\code{"ex"}, \code{"px"}, \code{"in"}, \code{"cm"}, \code{"mm"}, \code{"pt"}, and \code{"pc"}. Using \code{NULL},
the default, excludes the attribute.}
\item{viewbox}{An optional set of dimensions that defines the SVG \code{viewBox}
attribute. The \code{viewBox} for an SVG element is the position and dimension,
in user space, of an SVG viewport. If supplied, this could either be in the
form of a four-element, numeric vector corresponding to the \code{"min-x"},
\code{"min-y"}, \code{"width"}, and \code{"height"} of the rectangle, or, as \code{TRUE} which
uses the vector \code{c(0, 0, width, height)}. Using \code{NULL}, the default,
excludes this attribute.}
}
\value{
An \code{svg} object.
}
\description{
The \code{SVG_()} function is a variation on \code{\link[=SVG]{SVG()}} (the entry point for building
an SVG) in that the output tags will be as compact as possible (fewer
linebreaks, less space characters). This is a reasonable option if the
eventual use for the generated SVG is as inline SVG within HTML documents.
}
\examples{
if (interactive()) {
# Create a simple SVG with a rectangle and a circle
svg <-
SVG_(width = 100, height = 50) \%>\%
svg_rect(x = 0, y = 0, width = 30, height = 20) \%>\%
svg_circle(x = 50, y = 10, diameter = 20)
}
}
| /man/SVG_.Rd | no_license | cran/omsvg | R | false | true | 2,407 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SVG.R
\name{SVG_}
\alias{SVG_}
\title{Create a compact \code{svg} object}
\usage{
SVG_(width = NULL, height = NULL, viewbox = TRUE)
}
\arguments{
\item{width}{The width and height attributes on the top-level \verb{<svg>}
element. Both of these attributes are optional but, if provided, take in a
variety of dimensions and keywords. If numerical values are solely used,
they are assumed to be 'px' length values. Dimensions can be percentage
values (i.e., \code{"75\%"}) or length values with the following units: \code{"em"},
\code{"ex"}, \code{"px"}, \code{"in"}, \code{"cm"}, \code{"mm"}, \code{"pt"}, and \code{"pc"}. Using \code{NULL},
the default, excludes the attribute.}
\item{height}{The width and height attributes on the top-level \verb{<svg>}
element. Both of these attributes are optional but, if provided, take in a
variety of dimensions and keywords. If numerical values are solely used,
they are assumed to be 'px' length values. Dimensions can be percentage
values (i.e., \code{"75\%"}) or length values with the following units: \code{"em"},
\code{"ex"}, \code{"px"}, \code{"in"}, \code{"cm"}, \code{"mm"}, \code{"pt"}, and \code{"pc"}. Using \code{NULL},
the default, excludes the attribute.}
\item{viewbox}{An optional set of dimensions that defines the SVG \code{viewBox}
attribute. The \code{viewBox} for an SVG element is the position and dimension,
in user space, of an SVG viewport. If supplied, this could either be in the
form of a four-element, numeric vector corresponding to the \code{"min-x"},
\code{"min-y"}, \code{"width"}, and \code{"height"} of the rectangle, or, as \code{TRUE} which
uses the vector \code{c(0, 0, width, height)}. Using \code{NULL}, the default,
excludes this attribute.}
}
\value{
An \code{svg} object.
}
\description{
The \code{SVG_()} function is a variation on \code{\link[=SVG]{SVG()}} (the entry point for building
an SVG) in that the output tags will be as compact as possible (fewer
linebreaks, less space characters). This is a reasonable option if the
eventual use for the generated SVG is as inline SVG within HTML documents.
}
\examples{
if (interactive()) {
# Create a simple SVG with a rectangle and a circle
svg <-
SVG_(width = 100, height = 50) \%>\%
svg_rect(x = 0, y = 0, width = 30, height = 20) \%>\%
svg_circle(x = 50, y = 10, diameter = 20)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ss.cc.R
\name{ss.cc.constants}
\alias{ss.cc.constants}
\alias{ss.cc.getc4}
\alias{ss.cc.getd2}
\alias{ss.cc.getd3}
\title{Functions to find out constants of the relative range distribution.}
\usage{
ss.cc.getd2(n = NA)
ss.cc.getd3(n = NA)
ss.cc.getc4(n = NA)
}
\arguments{
\item{n}{Sample size}
}
\value{
A numeric value for the constant.
}
\description{
These functions compute the constants d2, d3 and c4 to get estimators of the
standard deviation to set control limits.
}
\examples{
ss.cc.getd2(20)
ss.cc.getd3(20)
ss.cc.getc4(20)
}
\references{
Cano, Emilio L., Moguerza, Javier M. and Redchuk, Andres. 2012.
\emph{Six Sigma with {R}. Statistical Engineering for Process
Improvement}, Use R!, vol. 36. Springer, New York.
\url{https://www.springer.com/gp/book/9781461436515}.
}
\seealso{
ss.cc
}
\author{
EL Cano
}
\keyword{charts}
\keyword{constants}
\keyword{control}
| /man/ss.cc.constants.Rd | no_license | jjang82/SixSigma | R | false | true | 959 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ss.cc.R
\name{ss.cc.constants}
\alias{ss.cc.constants}
\alias{ss.cc.getc4}
\alias{ss.cc.getd2}
\alias{ss.cc.getd3}
\title{Functions to find out constants of the relative range distribution.}
\usage{
ss.cc.getd2(n = NA)
ss.cc.getd3(n = NA)
ss.cc.getc4(n = NA)
}
\arguments{
\item{n}{Sample size}
}
\value{
A numeric value for the constant.
}
\description{
These functions compute the constants d2, d3 and c4 to get estimators of the
standard deviation to set control limits.
}
\examples{
ss.cc.getd2(20)
ss.cc.getd3(20)
ss.cc.getc4(20)
}
\references{
Cano, Emilio L., Moguerza, Javier M. and Redchuk, Andres. 2012.
\emph{Six Sigma with {R}. Statistical Engineering for Process
Improvement}, Use R!, vol. 36. Springer, New York.
\url{https://www.springer.com/gp/book/9781461436515}.
}
\seealso{
ss.cc
}
\author{
EL Cano
}
\keyword{charts}
\keyword{constants}
\keyword{control}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AzureResources.R
\name{azureListRG}
\alias{azureListRG}
\title{Get all resource groups in subscription ID.}
\usage{
azureListRG(azureActiveContext, subscriptionID, azToken, verbose = FALSE)
}
\arguments{
\item{azureActiveContext}{A container used for caching variables used by AzureSMR}
\item{subscriptionID}{Set the subscriptionID. This is obtained automatically by \code{\link{azureAuthenticate}} when only a single subscriptionID is available via Active Directory}
\item{azToken}{Azure authentication token, obtained by \code{\link{azureAuthenticate}}}
\item{verbose}{Print Tracing information (Default False)}
}
\value{
Returns Dataframe of resourceGroups
}
\description{
Get all resource groups in subscription ID.
}
\seealso{
Other Resource group functions: \code{\link{azureCreateResourceGroup}},
\code{\link{azureDeleteResourceGroup}},
\code{\link{azureListAllResources}},
\code{\link{azureListSubscriptions}}
}
| /man/azureListRG.Rd | no_license | gittu4/AzureSMR | R | false | true | 1,009 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AzureResources.R
\name{azureListRG}
\alias{azureListRG}
\title{Get all resource groups in subscription ID.}
\usage{
azureListRG(azureActiveContext, subscriptionID, azToken, verbose = FALSE)
}
\arguments{
\item{azureActiveContext}{A container used for caching variables used by AzureSMR}
\item{subscriptionID}{Set the subscriptionID. This is obtained automatically by \code{\link{azureAuthenticate}} when only a single subscriptionID is available via Active Directory}
\item{azToken}{Azure authentication token, obtained by \code{\link{azureAuthenticate}}}
\item{verbose}{Print Tracing information (Default False)}
}
\value{
Returns Dataframe of resourceGroups
}
\description{
Get all resource groups in subscription ID.
}
\seealso{
Other Resource group functions: \code{\link{azureCreateResourceGroup}},
\code{\link{azureDeleteResourceGroup}},
\code{\link{azureListAllResources}},
\code{\link{azureListSubscriptions}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discus.R
\name{create_df_discus}
\alias{create_df_discus}
\title{create_df_discus}
\usage{
create_df_discus()
}
\value{
empty dataframe
}
\description{
Template for storm discussions dataframe
}
\seealso{
\code{\link{get_discus}}
}
\keyword{internal}
| /man/create_df_discus.Rd | permissive | mraza007/rrricanes | R | false | true | 329 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discus.R
\name{create_df_discus}
\alias{create_df_discus}
\title{create_df_discus}
\usage{
create_df_discus()
}
\value{
empty dataframe
}
\description{
Template for storm discussions dataframe
}
\seealso{
\code{\link{get_discus}}
}
\keyword{internal}
|
context("ts studies_germplasm_details")
con <- ba_db()$testserver
test_that("Study_germplasm_details results are present", {
res <- ba_studies_germplasm_details(con = con, studyDbId = "1001")
expect_that(nrow(res) == 2, is_true())
})
test_that("Study_germplasm_details out formats work", {
res <- ba_studies_germplasm_details(con = con, studyDbId = "1001", rclass = "json")
expect_that("json" %in% class(res), is_true())
res <- ba_studies_germplasm_details(con = con, studyDbId = "1001", rclass = "list")
expect_that("list" %in% class(res), is_true())
res <- ba_studies_germplasm_details(con = con, studyDbId = "1001", rclass = "data.frame")
expect_that("data.frame" %in% class(res), is_true())
})
| /tests/testserver/test_ts_studies_germplasm_details.R | no_license | khaled-alshamaa/brapi | R | false | false | 724 | r | context("ts studies_germplasm_details")
con <- ba_db()$testserver
test_that("Study_germplasm_details results are present", {
res <- ba_studies_germplasm_details(con = con, studyDbId = "1001")
expect_that(nrow(res) == 2, is_true())
})
test_that("Study_germplasm_details out formats work", {
res <- ba_studies_germplasm_details(con = con, studyDbId = "1001", rclass = "json")
expect_that("json" %in% class(res), is_true())
res <- ba_studies_germplasm_details(con = con, studyDbId = "1001", rclass = "list")
expect_that("list" %in% class(res), is_true())
res <- ba_studies_germplasm_details(con = con, studyDbId = "1001", rclass = "data.frame")
expect_that("data.frame" %in% class(res), is_true())
})
|
#install.packages(c('ggplot2','mice','NHANES')) #uncomment to install packages
library('mice')
library('NHANES')
data('NHANES')
?nhanes #will open the documentation that explains the data frame
#display the data frame or df
#[1]df are a tabular data format where each column has a specific data type
nhanes #display the data frame or df
#[2] notice that each of the data types are numeric or decimal/floating version of a real number
# we can convert the data type of each row by selecting it nhanes[,'age'] will get only the age column
nhanes[,'age']<-as.factor(nhanes[,'age']) #will get factors 1,2,3 no missing. Factors are good for categories
nhanes[,'hyp']<-as.logical(nhanes[,'hyp']-1) #will get logical (T,F) and NA which is missing value
?mice #open docs for mice package, scroll down and open the vignetts(tutorials) for more if needed!
md.pattern(nhanes) #output the number of missing values and plot
#[3] notice how there are 7 out of the 25 rows that have no data other than the age most of which are age group 1
#because there seems to be a pattern to the missingness of the data, we can't assume that the missing data is random
#in which case we will need to consider more carefully how to impute (fill in the missing values)
#mean--------------------------------------------------------------------------
imp<-mice(nhanes,method='mean',m=1) #pretty bad to fill in with the mean values
complete(imp) #show the resulting fake data
densityplot(nhanes[,'bmi']) #plot original
densityplot(complete(imp)[,'bmi']) #plot bad fake data
#mice algorithm----------------------------------------------------------------
imp<-mice(nhanes,m=30) #pretty great multivariate imputation
complete(imp) #show the resulting fake data
densityplot(nhanes[,'bmi']) #plot original
densityplot(complete(imp)[,'bmi']) #plot better fake data
| /Imputation.R | no_license | timothyjamesbecker/QNT745 | R | false | false | 1,852 | r | #install.packages(c('ggplot2','mice','NHANES')) #uncomment to install packages
library('mice')
library('NHANES')
data('NHANES')
?nhanes #will open the documentation that explains the data frame
#display the data frame or df
#[1]df are a tabular data format where each column has a specific data type
nhanes #display the data frame or df
#[2] notice that each of the data types are numeric or decimal/floating version of a real number
# we can convert the data type of each row by selecting it nhanes[,'age'] will get only the age column
nhanes[,'age']<-as.factor(nhanes[,'age']) #will get factors 1,2,3 no missing. Factors are good for categories
nhanes[,'hyp']<-as.logical(nhanes[,'hyp']-1) #will get logical (T,F) and NA which is missing value
?mice #open docs for mice package, scroll down and open the vignetts(tutorials) for more if needed!
md.pattern(nhanes) #output the number of missing values and plot
#[3] notice how there are 7 out of the 25 rows that have no data other than the age most of which are age group 1
#because there seems to be a pattern to the missingness of the data, we can't assume that the missing data is random
#in which case we will need to consider more carefully how to impute (fill in the missing values)
#mean--------------------------------------------------------------------------
imp<-mice(nhanes,method='mean',m=1) #pretty bad to fill in with the mean values
complete(imp) #show the resulting fake data
densityplot(nhanes[,'bmi']) #plot original
densityplot(complete(imp)[,'bmi']) #plot bad fake data
#mice algorithm----------------------------------------------------------------
imp<-mice(nhanes,m=30) #pretty great multivariate imputation
complete(imp) #show the resulting fake data
densityplot(nhanes[,'bmi']) #plot original
densityplot(complete(imp)[,'bmi']) #plot better fake data
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.8,family="gaussian",standardize=TRUE)
sink('./skin_083.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/skin/skin_083.R | no_license | esbgkannan/QSMART | R | false | false | 341 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.8,family="gaussian",standardize=TRUE)
sink('./skin_083.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
intact_othero <- function(organism, organismID, file, pathracine) {
cat("\n\n>FORMATING INTACT DATABASE")
intactFileName <- file
cat("\n>Loading database ... ")
intact <- read.delim(file = intactFileName, header = T, sep = "\t")
if (dim(intact)[2] != 15) {
cat(paste("\nIncorrect data dimension, read userguide for more informations"))
stop()
}
intact <- intact[,-c(3,4,14)]
intact <- intact[grep("uniprotkb:", intact[,1]),]
intact <- intact[grep("uniprotkb:", intact[,2]),]
if (organism == "Saccharomyces cerevisiae") {
organismID <- "559292"
}
if (organism == "Homo-sapiens") {
organismID <- "9606"
}
if (organism == "Caenorhabditis elegans") {
organismID <- "6239"
}
if (organism == "Drosophila melanogaster") {
organismID <- "7227"
}
if (organism == "Escherichia coli") {
organismID <- "562"
}
if (organism == "Mus musculus") {
organismID <- "10090"
}
if (organism == "Rattus norvegicus") {
organismID <- "10116"
}
rtaxon <- paste("taxid:", organismID, "(", organism, ")", sep = '')
taxon <- organismID
intact <- intact[grep(taxon, intact[,8]),]
intact <- intact[grep(taxon, intact[,9]),]
intact[,8] <- rtaxon
intact[,9] <- rtaxon
if (length(grep("colocalization", intact[,10])) > 0) {
intact <- intact[-grep("colocalization", intact[,10]),]
}
irefindex <- intact
irefindex[,10] <- rep("physical", dim(irefindex)[1])
irefindex[,1] <- apply(as.matrix(apply(as.matrix(irefindex[,1]), 1, strsplit, split = ":")), 1, unlist)[2,]
irefindex[,2] <- apply(as.matrix(apply(as.matrix(irefindex[,2]), 1, strsplit, split = ":")), 1, unlist)[2,]
if (length(grep("-", irefindex[,1])) > 0) {
irefindex[grep("-", irefindex[,1]),1] <- apply(as.matrix(apply(as.matrix(irefindex[grep("-", irefindex[,1]),1]), 1, strsplit, split = "-")), 1, unlist)[1,]
}
if (length(grep("-", irefindex[,2])) > 0) {
irefindex[grep("-", irefindex[,2]),2] <- apply(as.matrix(apply(as.matrix(irefindex[grep("-", irefindex[,2]),2]), 1, strsplit, split = "-")), 1, unlist)[1,]
}
tmpA1 <- rep("NA", length(irefindex[,3]))
tmpB1 <- rep("NA", length(irefindex[,3]))
tmpA2 <- rep("NA", length(irefindex[,3]))
tmpB2 <- rep("NA", length(irefindex[,3]))
tmpA <- apply(as.matrix(apply(as.matrix(irefindex[,3]), 1, strsplit, split = "\\|")), 1, unlist)
tmpB <- apply(as.matrix(apply(as.matrix(irefindex[,4]), 1, strsplit, split = "\\|")), 1, unlist)
cat(paste("OK\n>Parsing interactions\n"))
prsbar <- txtProgressBar(min = 1, max = length(irefindex[,3]), style = 3)
for (i in 1:length(irefindex[,3])) {
tmpA1[i] <- tmpA[[i]][1]
if (length(grep("\\(gene name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(gene name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(locus name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(locus name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(orf name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(orf name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(display_long\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(display_long\\)", tmpA[[i]])]
}
}
}
}
tmpB1[i] <- tmpB[[i]][1]
if (length(grep("\\(gene name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(gene name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(locus name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(locus name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(orf name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(orf name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(display_long\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\((display_long)\\)", tmpB[[i]])]
}
}
}
}
setTxtProgressBar(prsbar, i)
}
GeneNameA <- sub("\\(gene name\\)", '', sub("uniprotkb:", '', tmpA2))
GeneNameA <- sub("\\(locus name\\)", '', GeneNameA)
GeneNameA <- sub("\\(orf name\\)", '', GeneNameA)
GeneNameA <- sub("\\(gene name synonym\\)", '', GeneNameA)
GeneNameA <- sub("\\(display_long\\)", '', GeneNameA)
GeneNameA[grep("CELE_", GeneNameA)] <- sub("CELE_", "", GeneNameA[grep("CELE_", GeneNameA)])
GeneNameA[grep("psi-mi:", GeneNameA)] <- sub("_caeel", "", sub("psi-mi:", "", GeneNameA[grep("psi-mi:", GeneNameA)]))
GeneNameB <- sub("\\(gene name\\)", '', sub("uniprotkb:", '', tmpB2))
GeneNameB <- sub("\\(locus name\\)", '', GeneNameB)
GeneNameB <- sub("\\(orf name\\)", '', GeneNameB)
GeneNameB <- sub("\\(gene name synonym\\)", '', GeneNameB)
GeneNameB <- sub("\\(display_long\\)", '', GeneNameB)
GeneNameB[grep("CELE_", GeneNameB)] <- sub("CELE_", "", GeneNameB[grep("CELE_", GeneNameB)])
GeneNameB[grep("psi-mi:", GeneNameB)] <- sub("_caeel", "", sub("psi-mi:", "", GeneNameB[grep("psi-mi:", GeneNameB)]))
irefindex[,3] <- sub("\\(display_long\\)", '', sub("psi-mi:", '', tmpA1))
irefindex[,4] <- sub("\\(display_long\\)", '', sub("psi-mi:", '', tmpB1))
numParticipants <- rep(2, dim(irefindex)[1])
sourcedb <- rep("Intact", dim(irefindex)[1])
irefindex[,11] <- sourcedb
cat("OK\n>Saving database(s) ... ")
setwd(pathracine)
organism <- gsub(" ", "-", organism)
organism.path <- paste(pathracine, organism, sep = '/')
dir.create(organism.path, showWarnings = FALSE)
database.path <- paste(organism.path, "Databases", sep = '/')
dir.create(database.path, showWarnings = FALSE)
setwd(database.path)
irefindex <- irefindex[,c(1:12)]
irefindex <- cbind(irefindex, numParticipants, GeneNameA, GeneNameB)
colnames(irefindex) <- c("uidA", "uidB", "aliasA", "aliasB", "method", "author", "pmids", "taxa", "taxb", "interactionType", "sourcedb", "confidence", "numParticipants", "GeneNameA", "GeneNameB")
outputfilename <- paste(organism, "_intact.txt", sep = "")
write.table(irefindex, file = outputfilename, row.names = F, col.names = T, quote = F, sep = "\t")
cat(paste("OK\n\n>Formating intact database is done.\n\nDatabase file is saved in", database.path, sep = " : "))
cat(paste("\n\n"))
setwd(pathracine)
visible(mainpanel) <- T
}
intact_window <- function(f_pos, mainpanel, pana, mainpath) {
file <- c()
return.parameter <- c()
panel_para <- gwindow("Intact database file formatting : ", parent = f_pos, visible = T)
pp <- gvbox(container = panel_para)
pp$set_borderwidth(10L)
flyt <- gformlayout(container = pp, expand = TRUE)
# Parametres selection
gcombobox(c('Caenorhabditis elegans', 'Drosophila melanogaster', 'Escherichia coli', 'Homo sapiens', 'Mus musculus', 'Rattus norvegicus', 'Saccharomyces cerevisiae', 'Other'), label = "Organism", selected = 7, container = flyt)
# File selection
chdb <- ggroup(container = pp, horizontale = T)
addSpring(chdb)
bouton1 <- gbutton("Intact file", container = chdb, handler = function(...) {
file <<- gfile(text = "Select a file", type = "open", multi = F, container = chdb)
if (is.null(file) == T) {
gmessage('Selected file is null', icon = 'error')
}
if (is.null(file) == F) {
bouton1$set_value(paste(length(file), 'intact file selected'))
cat(paste('\n>File selected : ', file, sep = ''))
}
})
ppb <- ggroup(container = pp)
addSpring(ppb)
gbutton("Formate file", handler = function(h,...) {
return.parameter <<- svalue(flyt)
visible(panel_para) <- F
# selected parametres storage
organism <- as.character(return.parameter[1])
# When all paramaters are Ok
if (is.null(file) == F) {
######################################################################################
########################### FORMATE AND PARSING #################################
pathracine <- getwd()
if (organism == "Other") {
panelorganism <- gwindow("Organism description", parent = f_pos, visible = F, expand = T)
pc <- ggroup(container = panelorganism, horizontal = F, use.scrollwindow = T)
pcsb <- ggroup(container = pc, horizontal = F, use.scrollwindow = F)
lg <- gvbox(container = pcsb)
pg <- gvbox(container = pcsb)
fllg <- gformlayout(container = lg)
flpg <- gformlayout(container = pg)
organismName <- gedit(initial.msg = 'Organism Name', label = "NAME", container = fllg)
organismId <- gedit(initial.msg = 'Organism ID', label = "NCBI ID", container = flpg)
visible(panelorganism) <- T
bpc <- ggroup(container = pc); addSpring(bpc)
# 1 : Other organism
bouton1 <- gbutton("OK", handler = function(h,...) {
# Rassemblement des modifications a apporter
org.name <- cbind(names(svalue(fllg)), svalue(fllg))
org.name <- data.frame(org.name, row.names = NULL)
org.id <- cbind(names(svalue(flpg)), svalue(flpg))
org.id <- data.frame(org.id, row.names = NULL)
org <- cbind(org.name, org.id)
colnames(org) <- c('Organism_name', 'Organism_id')
organism <- as.character(org[1,2])
organismID <- as.character(org[1,4])
intact_othero(organism, organismID, file, pathracine)
dispose(bpc)
}, container = bpc)
}
else {
cat("\n\n>FORMATING INTACT DATABASE")
intactFileName <- file
cat("\n>Loading database ... ")
intact <- read.delim(file = intactFileName, header = T, sep = "\t")
intact <- intact[,-c(3,4,14)]
intact <- intact[grep("uniprotkb:", intact[,1]),]
intact <- intact[grep("uniprotkb:", intact[,2]),]
if (organism == "Saccharomyces cerevisiae") {
organismID <- "559292"
}
if (organism == "Homo-sapiens") {
organismID <- "9606"
}
if (organism == "Caenorhabditis elegans") {
organismID <- "6239"
}
if (organism == "Drosophila melanogaster") {
organismID <- "7227"
}
if (organism == "Escherichia coli") {
organismID <- "562"
}
if (organism == "Mus musculus") {
organismID <- "10090"
}
if (organism == "Rattus norvegicus") {
organismID <- "10116"
}
rtaxon <- paste("taxid:", organismID, "(", organism, ")", sep = '')
taxon <- organismID
intact <- intact[grep(taxon, intact[,8]),]
intact <- intact[grep(taxon, intact[,9]),]
intact[,8] <- rtaxon
intact[,9] <- rtaxon
if (length(grep("colocalization", intact[,10])) > 0) {
intact <- intact[-grep("colocalization", intact[,10]),]
}
irefindex <- intact
irefindex[,10] <- rep("physical", dim(irefindex)[1])
irefindex[,1] <- apply(as.matrix(apply(as.matrix(irefindex[,1]), 1, strsplit, split = ":")), 1, unlist)[2,]
irefindex[,2] <- apply(as.matrix(apply(as.matrix(irefindex[,2]), 1, strsplit, split = ":")), 1, unlist)[2,]
if (length(grep("-", irefindex[,1])) > 0) {
irefindex[grep("-", irefindex[,1]),1] <- apply(as.matrix(apply(as.matrix(irefindex[grep("-", irefindex[,1]),1]), 1, strsplit, split = "-")), 1, unlist)[1,]
}
if (length(grep("-", irefindex[,2])) > 0) {
irefindex[grep("-", irefindex[,2]),2] <- apply(as.matrix(apply(as.matrix(irefindex[grep("-", irefindex[,2]),2]), 1, strsplit, split = "-")), 1, unlist)[1,]
}
tmpA1 <- rep("NA", length(irefindex[,3]))
tmpB1 <- rep("NA", length(irefindex[,3]))
tmpA2 <- rep("NA", length(irefindex[,3]))
tmpB2 <- rep("NA", length(irefindex[,3]))
tmpA <- apply(as.matrix(apply(as.matrix(irefindex[,3]), 1, strsplit, split = "\\|")), 1, unlist)
tmpB <- apply(as.matrix(apply(as.matrix(irefindex[,4]), 1, strsplit, split = "\\|")), 1, unlist)
cat(paste("OK\n>Parsing interactions\n"))
prsbar <- txtProgressBar(min = 1, max = length(irefindex[,3]), style = 3)
for (i in 1:length(irefindex[,3])) {
tmpA1[i] <- tmpA[[i]][1]
if (length(grep("\\(gene name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(gene name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(locus name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(locus name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(orf name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(orf name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(display_long\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(display_long\\)", tmpA[[i]])]
}
}
}
}
tmpB1[i] <- tmpB[[i]][1]
if (length(grep("\\(gene name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(gene name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(locus name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(locus name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(orf name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(orf name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(display_long\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\((display_long)\\)", tmpB[[i]])]
}
}
}
}
setTxtProgressBar(prsbar, i)
}
GeneNameA <- sub("\\(gene name\\)", '', sub("uniprotkb:", '', tmpA2))
GeneNameA <- sub("\\(locus name\\)", '', GeneNameA)
GeneNameA <- sub("\\(orf name\\)", '', GeneNameA)
GeneNameA <- sub("\\(gene name synonym\\)", '', GeneNameA)
GeneNameA <- sub("\\(display_long\\)", '', GeneNameA)
GeneNameA[grep("CELE_", GeneNameA)] <- sub("CELE_", "", GeneNameA[grep("CELE_", GeneNameA)])
GeneNameA[grep("psi-mi:", GeneNameA)] <- sub("_caeel", "", sub("psi-mi:", "", GeneNameA[grep("psi-mi:", GeneNameA)]))
GeneNameB <- sub("\\(gene name\\)", '', sub("uniprotkb:", '', tmpB2))
GeneNameB <- sub("\\(locus name\\)", '', GeneNameB)
GeneNameB <- sub("\\(orf name\\)", '', GeneNameB)
GeneNameB <- sub("\\(gene name synonym\\)", '', GeneNameB)
GeneNameB <- sub("\\(display_long\\)", '', GeneNameB)
GeneNameB[grep("CELE_", GeneNameB)] <- sub("CELE_", "", GeneNameB[grep("CELE_", GeneNameB)])
GeneNameB[grep("psi-mi:", GeneNameB)] <- sub("_caeel", "", sub("psi-mi:", "", GeneNameB[grep("psi-mi:", GeneNameB)]))
irefindex[,3] <- sub("\\(display_long\\)", '', sub("psi-mi:", '', tmpA1))
irefindex[,4] <- sub("\\(display_long\\)", '', sub("psi-mi:", '', tmpB1))
numParticipants <- rep(2, dim(irefindex)[1])
sourcedb <- rep("Intact", dim(irefindex)[1])
irefindex[,11] <- sourcedb
cat("Ok\n>Saving database(s) ... ")
setwd(pathracine)
organism <- gsub(" ", "-", organism)
organism.path <- paste(pathracine, organism, sep = '/')
dir.create(organism.path, showWarnings = FALSE)
database.path <- paste(organism.path, "Databases", sep = '/')
dir.create(database.path, showWarnings = FALSE)
setwd(database.path)
irefindex <- irefindex[,c(1:12)]
irefindex <- cbind(irefindex, numParticipants, GeneNameA, GeneNameB)
colnames(irefindex) <- c("uidA", "uidB", "aliasA", "aliasB", "method", "author", "pmids", "taxa", "taxb", "interactionType", "sourcedb", "confidence", "numParticipants", "GeneNameA", "GeneNameB")
outputfilename <- paste(organism, "_intact.txt", sep = "")
write.table(irefindex, file = outputfilename, row.names = F, col.names = T, quote = F, sep = "\t")
cat(paste("OK\n\n>Formating intact database is done.\n\nDatabase file is saved in", database.path, sep = " : "))
cat(paste("\n\n"))
setwd(pathracine)
visible(mainpanel) <<- T
}
dispose(panel_para)
dispose(ppb)
}
# Tests all elements necessary to the function execution
else if (is.null(file) == T) {
gmessage('File selected is null', icon = 'error')
dispose(ppb)
visible(panel_para) <- F
visible(mainpanel) <<- T
}
else {
gmessage('Error : Unable to start formatting', icon = 'error')
dispose(ppb)
visible(panel_para) <- F
visible(mainpanel) <<- T
}
}, container=ppb)
# Return
gbutton("Return", handler = function(h,...) {
dispose(panel_para)
visible(pana) <<- T
}, container = ppb)
visible(panel_para) <- T
}
| /R/intact_window.R | no_license | maggishaggy/appinetwork | R | false | false | 16,036 | r | intact_othero <- function(organism, organismID, file, pathracine) {
cat("\n\n>FORMATING INTACT DATABASE")
intactFileName <- file
cat("\n>Loading database ... ")
intact <- read.delim(file = intactFileName, header = T, sep = "\t")
if (dim(intact)[2] != 15) {
cat(paste("\nIncorrect data dimension, read userguide for more informations"))
stop()
}
intact <- intact[,-c(3,4,14)]
intact <- intact[grep("uniprotkb:", intact[,1]),]
intact <- intact[grep("uniprotkb:", intact[,2]),]
if (organism == "Saccharomyces cerevisiae") {
organismID <- "559292"
}
if (organism == "Homo-sapiens") {
organismID <- "9606"
}
if (organism == "Caenorhabditis elegans") {
organismID <- "6239"
}
if (organism == "Drosophila melanogaster") {
organismID <- "7227"
}
if (organism == "Escherichia coli") {
organismID <- "562"
}
if (organism == "Mus musculus") {
organismID <- "10090"
}
if (organism == "Rattus norvegicus") {
organismID <- "10116"
}
rtaxon <- paste("taxid:", organismID, "(", organism, ")", sep = '')
taxon <- organismID
intact <- intact[grep(taxon, intact[,8]),]
intact <- intact[grep(taxon, intact[,9]),]
intact[,8] <- rtaxon
intact[,9] <- rtaxon
if (length(grep("colocalization", intact[,10])) > 0) {
intact <- intact[-grep("colocalization", intact[,10]),]
}
irefindex <- intact
irefindex[,10] <- rep("physical", dim(irefindex)[1])
irefindex[,1] <- apply(as.matrix(apply(as.matrix(irefindex[,1]), 1, strsplit, split = ":")), 1, unlist)[2,]
irefindex[,2] <- apply(as.matrix(apply(as.matrix(irefindex[,2]), 1, strsplit, split = ":")), 1, unlist)[2,]
if (length(grep("-", irefindex[,1])) > 0) {
irefindex[grep("-", irefindex[,1]),1] <- apply(as.matrix(apply(as.matrix(irefindex[grep("-", irefindex[,1]),1]), 1, strsplit, split = "-")), 1, unlist)[1,]
}
if (length(grep("-", irefindex[,2])) > 0) {
irefindex[grep("-", irefindex[,2]),2] <- apply(as.matrix(apply(as.matrix(irefindex[grep("-", irefindex[,2]),2]), 1, strsplit, split = "-")), 1, unlist)[1,]
}
tmpA1 <- rep("NA", length(irefindex[,3]))
tmpB1 <- rep("NA", length(irefindex[,3]))
tmpA2 <- rep("NA", length(irefindex[,3]))
tmpB2 <- rep("NA", length(irefindex[,3]))
tmpA <- apply(as.matrix(apply(as.matrix(irefindex[,3]), 1, strsplit, split = "\\|")), 1, unlist)
tmpB <- apply(as.matrix(apply(as.matrix(irefindex[,4]), 1, strsplit, split = "\\|")), 1, unlist)
cat(paste("OK\n>Parsing interactions\n"))
prsbar <- txtProgressBar(min = 1, max = length(irefindex[,3]), style = 3)
for (i in 1:length(irefindex[,3])) {
tmpA1[i] <- tmpA[[i]][1]
if (length(grep("\\(gene name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(gene name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(locus name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(locus name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(orf name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(orf name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(display_long\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(display_long\\)", tmpA[[i]])]
}
}
}
}
tmpB1[i] <- tmpB[[i]][1]
if (length(grep("\\(gene name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(gene name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(locus name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(locus name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(orf name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(orf name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(display_long\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\((display_long)\\)", tmpB[[i]])]
}
}
}
}
setTxtProgressBar(prsbar, i)
}
GeneNameA <- sub("\\(gene name\\)", '', sub("uniprotkb:", '', tmpA2))
GeneNameA <- sub("\\(locus name\\)", '', GeneNameA)
GeneNameA <- sub("\\(orf name\\)", '', GeneNameA)
GeneNameA <- sub("\\(gene name synonym\\)", '', GeneNameA)
GeneNameA <- sub("\\(display_long\\)", '', GeneNameA)
GeneNameA[grep("CELE_", GeneNameA)] <- sub("CELE_", "", GeneNameA[grep("CELE_", GeneNameA)])
GeneNameA[grep("psi-mi:", GeneNameA)] <- sub("_caeel", "", sub("psi-mi:", "", GeneNameA[grep("psi-mi:", GeneNameA)]))
GeneNameB <- sub("\\(gene name\\)", '', sub("uniprotkb:", '', tmpB2))
GeneNameB <- sub("\\(locus name\\)", '', GeneNameB)
GeneNameB <- sub("\\(orf name\\)", '', GeneNameB)
GeneNameB <- sub("\\(gene name synonym\\)", '', GeneNameB)
GeneNameB <- sub("\\(display_long\\)", '', GeneNameB)
GeneNameB[grep("CELE_", GeneNameB)] <- sub("CELE_", "", GeneNameB[grep("CELE_", GeneNameB)])
GeneNameB[grep("psi-mi:", GeneNameB)] <- sub("_caeel", "", sub("psi-mi:", "", GeneNameB[grep("psi-mi:", GeneNameB)]))
irefindex[,3] <- sub("\\(display_long\\)", '', sub("psi-mi:", '', tmpA1))
irefindex[,4] <- sub("\\(display_long\\)", '', sub("psi-mi:", '', tmpB1))
numParticipants <- rep(2, dim(irefindex)[1])
sourcedb <- rep("Intact", dim(irefindex)[1])
irefindex[,11] <- sourcedb
cat("OK\n>Saving database(s) ... ")
setwd(pathracine)
organism <- gsub(" ", "-", organism)
organism.path <- paste(pathracine, organism, sep = '/')
dir.create(organism.path, showWarnings = FALSE)
database.path <- paste(organism.path, "Databases", sep = '/')
dir.create(database.path, showWarnings = FALSE)
setwd(database.path)
irefindex <- irefindex[,c(1:12)]
irefindex <- cbind(irefindex, numParticipants, GeneNameA, GeneNameB)
colnames(irefindex) <- c("uidA", "uidB", "aliasA", "aliasB", "method", "author", "pmids", "taxa", "taxb", "interactionType", "sourcedb", "confidence", "numParticipants", "GeneNameA", "GeneNameB")
outputfilename <- paste(organism, "_intact.txt", sep = "")
write.table(irefindex, file = outputfilename, row.names = F, col.names = T, quote = F, sep = "\t")
cat(paste("OK\n\n>Formating intact database is done.\n\nDatabase file is saved in", database.path, sep = " : "))
cat(paste("\n\n"))
setwd(pathracine)
visible(mainpanel) <- T
}
intact_window <- function(f_pos, mainpanel, pana, mainpath) {
file <- c()
return.parameter <- c()
panel_para <- gwindow("Intact database file formatting : ", parent = f_pos, visible = T)
pp <- gvbox(container = panel_para)
pp$set_borderwidth(10L)
flyt <- gformlayout(container = pp, expand = TRUE)
# Parametres selection
gcombobox(c('Caenorhabditis elegans', 'Drosophila melanogaster', 'Escherichia coli', 'Homo sapiens', 'Mus musculus', 'Rattus norvegicus', 'Saccharomyces cerevisiae', 'Other'), label = "Organism", selected = 7, container = flyt)
# File selection
chdb <- ggroup(container = pp, horizontale = T)
addSpring(chdb)
bouton1 <- gbutton("Intact file", container = chdb, handler = function(...) {
file <<- gfile(text = "Select a file", type = "open", multi = F, container = chdb)
if (is.null(file) == T) {
gmessage('Selected file is null', icon = 'error')
}
if (is.null(file) == F) {
bouton1$set_value(paste(length(file), 'intact file selected'))
cat(paste('\n>File selected : ', file, sep = ''))
}
})
ppb <- ggroup(container = pp)
addSpring(ppb)
gbutton("Formate file", handler = function(h,...) {
return.parameter <<- svalue(flyt)
visible(panel_para) <- F
# selected parametres storage
organism <- as.character(return.parameter[1])
# When all paramaters are Ok
if (is.null(file) == F) {
######################################################################################
########################### FORMATE AND PARSING #################################
pathracine <- getwd()
if (organism == "Other") {
panelorganism <- gwindow("Organism description", parent = f_pos, visible = F, expand = T)
pc <- ggroup(container = panelorganism, horizontal = F, use.scrollwindow = T)
pcsb <- ggroup(container = pc, horizontal = F, use.scrollwindow = F)
lg <- gvbox(container = pcsb)
pg <- gvbox(container = pcsb)
fllg <- gformlayout(container = lg)
flpg <- gformlayout(container = pg)
organismName <- gedit(initial.msg = 'Organism Name', label = "NAME", container = fllg)
organismId <- gedit(initial.msg = 'Organism ID', label = "NCBI ID", container = flpg)
visible(panelorganism) <- T
bpc <- ggroup(container = pc); addSpring(bpc)
# 1 : Other organism
bouton1 <- gbutton("OK", handler = function(h,...) {
# Rassemblement des modifications a apporter
org.name <- cbind(names(svalue(fllg)), svalue(fllg))
org.name <- data.frame(org.name, row.names = NULL)
org.id <- cbind(names(svalue(flpg)), svalue(flpg))
org.id <- data.frame(org.id, row.names = NULL)
org <- cbind(org.name, org.id)
colnames(org) <- c('Organism_name', 'Organism_id')
organism <- as.character(org[1,2])
organismID <- as.character(org[1,4])
intact_othero(organism, organismID, file, pathracine)
dispose(bpc)
}, container = bpc)
}
else {
cat("\n\n>FORMATING INTACT DATABASE")
intactFileName <- file
cat("\n>Loading database ... ")
intact <- read.delim(file = intactFileName, header = T, sep = "\t")
intact <- intact[,-c(3,4,14)]
intact <- intact[grep("uniprotkb:", intact[,1]),]
intact <- intact[grep("uniprotkb:", intact[,2]),]
if (organism == "Saccharomyces cerevisiae") {
organismID <- "559292"
}
if (organism == "Homo-sapiens") {
organismID <- "9606"
}
if (organism == "Caenorhabditis elegans") {
organismID <- "6239"
}
if (organism == "Drosophila melanogaster") {
organismID <- "7227"
}
if (organism == "Escherichia coli") {
organismID <- "562"
}
if (organism == "Mus musculus") {
organismID <- "10090"
}
if (organism == "Rattus norvegicus") {
organismID <- "10116"
}
rtaxon <- paste("taxid:", organismID, "(", organism, ")", sep = '')
taxon <- organismID
intact <- intact[grep(taxon, intact[,8]),]
intact <- intact[grep(taxon, intact[,9]),]
intact[,8] <- rtaxon
intact[,9] <- rtaxon
if (length(grep("colocalization", intact[,10])) > 0) {
intact <- intact[-grep("colocalization", intact[,10]),]
}
irefindex <- intact
irefindex[,10] <- rep("physical", dim(irefindex)[1])
irefindex[,1] <- apply(as.matrix(apply(as.matrix(irefindex[,1]), 1, strsplit, split = ":")), 1, unlist)[2,]
irefindex[,2] <- apply(as.matrix(apply(as.matrix(irefindex[,2]), 1, strsplit, split = ":")), 1, unlist)[2,]
if (length(grep("-", irefindex[,1])) > 0) {
irefindex[grep("-", irefindex[,1]),1] <- apply(as.matrix(apply(as.matrix(irefindex[grep("-", irefindex[,1]),1]), 1, strsplit, split = "-")), 1, unlist)[1,]
}
if (length(grep("-", irefindex[,2])) > 0) {
irefindex[grep("-", irefindex[,2]),2] <- apply(as.matrix(apply(as.matrix(irefindex[grep("-", irefindex[,2]),2]), 1, strsplit, split = "-")), 1, unlist)[1,]
}
tmpA1 <- rep("NA", length(irefindex[,3]))
tmpB1 <- rep("NA", length(irefindex[,3]))
tmpA2 <- rep("NA", length(irefindex[,3]))
tmpB2 <- rep("NA", length(irefindex[,3]))
tmpA <- apply(as.matrix(apply(as.matrix(irefindex[,3]), 1, strsplit, split = "\\|")), 1, unlist)
tmpB <- apply(as.matrix(apply(as.matrix(irefindex[,4]), 1, strsplit, split = "\\|")), 1, unlist)
cat(paste("OK\n>Parsing interactions\n"))
prsbar <- txtProgressBar(min = 1, max = length(irefindex[,3]), style = 3)
for (i in 1:length(irefindex[,3])) {
tmpA1[i] <- tmpA[[i]][1]
if (length(grep("\\(gene name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(gene name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(locus name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(locus name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(orf name\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(orf name\\)", tmpA[[i]])]
}
else {
if (length(grep("\\(display_long\\)", tmpA[[i]])) > 0) {
tmpA2[i] <- tmpA[[i]][grep("\\(display_long\\)", tmpA[[i]])]
}
}
}
}
tmpB1[i] <- tmpB[[i]][1]
if (length(grep("\\(gene name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(gene name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(locus name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(locus name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(orf name\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\(orf name\\)", tmpB[[i]])]
}
else {
if (length(grep("\\(display_long\\)", tmpB[[i]])) > 0) {
tmpB2[i] <- tmpB[[i]][grep("\\((display_long)\\)", tmpB[[i]])]
}
}
}
}
setTxtProgressBar(prsbar, i)
}
GeneNameA <- sub("\\(gene name\\)", '', sub("uniprotkb:", '', tmpA2))
GeneNameA <- sub("\\(locus name\\)", '', GeneNameA)
GeneNameA <- sub("\\(orf name\\)", '', GeneNameA)
GeneNameA <- sub("\\(gene name synonym\\)", '', GeneNameA)
GeneNameA <- sub("\\(display_long\\)", '', GeneNameA)
GeneNameA[grep("CELE_", GeneNameA)] <- sub("CELE_", "", GeneNameA[grep("CELE_", GeneNameA)])
GeneNameA[grep("psi-mi:", GeneNameA)] <- sub("_caeel", "", sub("psi-mi:", "", GeneNameA[grep("psi-mi:", GeneNameA)]))
GeneNameB <- sub("\\(gene name\\)", '', sub("uniprotkb:", '', tmpB2))
GeneNameB <- sub("\\(locus name\\)", '', GeneNameB)
GeneNameB <- sub("\\(orf name\\)", '', GeneNameB)
GeneNameB <- sub("\\(gene name synonym\\)", '', GeneNameB)
GeneNameB <- sub("\\(display_long\\)", '', GeneNameB)
GeneNameB[grep("CELE_", GeneNameB)] <- sub("CELE_", "", GeneNameB[grep("CELE_", GeneNameB)])
GeneNameB[grep("psi-mi:", GeneNameB)] <- sub("_caeel", "", sub("psi-mi:", "", GeneNameB[grep("psi-mi:", GeneNameB)]))
irefindex[,3] <- sub("\\(display_long\\)", '', sub("psi-mi:", '', tmpA1))
irefindex[,4] <- sub("\\(display_long\\)", '', sub("psi-mi:", '', tmpB1))
numParticipants <- rep(2, dim(irefindex)[1])
sourcedb <- rep("Intact", dim(irefindex)[1])
irefindex[,11] <- sourcedb
cat("Ok\n>Saving database(s) ... ")
setwd(pathracine)
organism <- gsub(" ", "-", organism)
organism.path <- paste(pathracine, organism, sep = '/')
dir.create(organism.path, showWarnings = FALSE)
database.path <- paste(organism.path, "Databases", sep = '/')
dir.create(database.path, showWarnings = FALSE)
setwd(database.path)
irefindex <- irefindex[,c(1:12)]
irefindex <- cbind(irefindex, numParticipants, GeneNameA, GeneNameB)
colnames(irefindex) <- c("uidA", "uidB", "aliasA", "aliasB", "method", "author", "pmids", "taxa", "taxb", "interactionType", "sourcedb", "confidence", "numParticipants", "GeneNameA", "GeneNameB")
outputfilename <- paste(organism, "_intact.txt", sep = "")
write.table(irefindex, file = outputfilename, row.names = F, col.names = T, quote = F, sep = "\t")
cat(paste("OK\n\n>Formating intact database is done.\n\nDatabase file is saved in", database.path, sep = " : "))
cat(paste("\n\n"))
setwd(pathracine)
visible(mainpanel) <<- T
}
dispose(panel_para)
dispose(ppb)
}
# Tests all elements necessary to the function execution
else if (is.null(file) == T) {
gmessage('File selected is null', icon = 'error')
dispose(ppb)
visible(panel_para) <- F
visible(mainpanel) <<- T
}
else {
gmessage('Error : Unable to start formatting', icon = 'error')
dispose(ppb)
visible(panel_para) <- F
visible(mainpanel) <<- T
}
}, container=ppb)
# Return
gbutton("Return", handler = function(h,...) {
dispose(panel_para)
visible(pana) <<- T
}, container = ppb)
visible(panel_para) <- T
}
|
#Datos
ingresos <- c(14574.49, 7606.46, 8611.41, 9175.41, 8058.65, 8105.44, 11496.28, 9766.09, 10305.32, 14379.96, 10713.97, 15433.50)
gastos <- c(12051.82, 5695.07, 12319.20, 12089.72, 8658.57, 840.20, 3285.73, 5821.12, 6976.93, 16618.61, 10054.37, 3803.96)
#Respuesta
#Calcular Utilidad como la Diferencia de Ingreso y Gasto
utilidad <- ingresos - gastos
utilidad
#Calcular el Impuesto como el 30% de la Utilidad y Redondear a 2 Puntos Decimales
impuesto <- round(0.30 * utilidad, 2)
impuesto
?round()
#Calcular la Utilidad Despu?s de Impuestos
utilidad.despues.de.impuestos <- utilidad - impuesto
utilidad.despues.de.impuestos
#Calcular el Margen de Utilidad como Utilidad Despu?s de Impuestos sobre Ingresos
#Redondear a 2 Puntos Decimales, Luego Multiplicar por 100 para obtener el %
margen.de.utilidad <- round(utilidad.despues.de.impuestos / ingresos, 2) * 100
margen.de.utilidad
#Calcular el promedio para los 12 meses de la Utilidad Despu?s de Impuestos
promedio.utilidad.despues.de.impuestos <- mean(utilidad.despues.de.impuestos)
promedio.utilidad.despues.de.impuestos
#Encuentra los Meses Utilidad Despu?s de Impuestos por Encima de la Media
meses.buenos <- utilidad.despues.de.impuestos > promedio.utilidad.despues.de.impuestos
meses.buenos
#Los Meses Malos son el Opuesto a los Meses Buenos !
meses.malos <- !meses.buenos
meses.malos
#El Mejor Mes es Donde la Utilidad Despu?s de Impuestos es Igual al M?ximo
mejor.mes <- utilidad.despues.de.impuestos == max(utilidad.despues.de.impuestos)
mejor.mes
#El Peor Mes es Donde la Utilidad Despu?s de Impuestos es Igual al M?nimo
peor.mes <- utilidad.despues.de.impuestos == min(utilidad.despues.de.impuestos)
peor.mes
#Convierte Todos los C?lculos a Unidades de Mil D?lares
ingresos.1000 <- round(ingresos / 1000, 0)
gastos.1000 <- round(gastos / 1000, 0)
utilidad.1000 <- round(utilidad / 1000, 0)
utilidad.despues.de.impuestos.1000 <- round(utilidad.despues.de.impuestos / 1000, 0)
#Imprime los Resultados
ingresos.1000
gastos.1000
utilidad.1000
utilidad.despues.de.impuestos.1000
margen.de.utilidad
meses.buenos
meses.malos
mejor.mes
peor.mes
#BONUS:
#Preview de lo que Veremos en la Pr?xima Secci?n
M <- rbind(
ingresos.1000,
gastos.1000,
utilidad.1000,
utilidad.despues.de.impuestos.1000,
margen.de.utilidad,
meses.buenos,
meses.malos,
mejor.mes,
peor.mes
)
#Imprime la Matriz
M
| /RPDSCER/Fundamentos de R/Curso R - Sección 3 - Práctica - Rellena los espacios en blanco/Sección 3 - Práctica - Rellena los espacios en blanco.R | no_license | TheHornyDaddy/Curso_R_Prov | R | false | false | 2,377 | r | #Datos
ingresos <- c(14574.49, 7606.46, 8611.41, 9175.41, 8058.65, 8105.44, 11496.28, 9766.09, 10305.32, 14379.96, 10713.97, 15433.50)
gastos <- c(12051.82, 5695.07, 12319.20, 12089.72, 8658.57, 840.20, 3285.73, 5821.12, 6976.93, 16618.61, 10054.37, 3803.96)
#Respuesta
#Calcular Utilidad como la Diferencia de Ingreso y Gasto
utilidad <- ingresos - gastos
utilidad
#Calcular el Impuesto como el 30% de la Utilidad y Redondear a 2 Puntos Decimales
impuesto <- round(0.30 * utilidad, 2)
impuesto
?round()
#Calcular la Utilidad Despu?s de Impuestos
utilidad.despues.de.impuestos <- utilidad - impuesto
utilidad.despues.de.impuestos
#Calcular el Margen de Utilidad como Utilidad Despu?s de Impuestos sobre Ingresos
#Redondear a 2 Puntos Decimales, Luego Multiplicar por 100 para obtener el %
margen.de.utilidad <- round(utilidad.despues.de.impuestos / ingresos, 2) * 100
margen.de.utilidad
#Calcular el promedio para los 12 meses de la Utilidad Despu?s de Impuestos
promedio.utilidad.despues.de.impuestos <- mean(utilidad.despues.de.impuestos)
promedio.utilidad.despues.de.impuestos
#Encuentra los Meses Utilidad Despu?s de Impuestos por Encima de la Media
meses.buenos <- utilidad.despues.de.impuestos > promedio.utilidad.despues.de.impuestos
meses.buenos
#Los Meses Malos son el Opuesto a los Meses Buenos !
meses.malos <- !meses.buenos
meses.malos
#El Mejor Mes es Donde la Utilidad Despu?s de Impuestos es Igual al M?ximo
mejor.mes <- utilidad.despues.de.impuestos == max(utilidad.despues.de.impuestos)
mejor.mes
#El Peor Mes es Donde la Utilidad Despu?s de Impuestos es Igual al M?nimo
peor.mes <- utilidad.despues.de.impuestos == min(utilidad.despues.de.impuestos)
peor.mes
#Convierte Todos los C?lculos a Unidades de Mil D?lares
ingresos.1000 <- round(ingresos / 1000, 0)
gastos.1000 <- round(gastos / 1000, 0)
utilidad.1000 <- round(utilidad / 1000, 0)
utilidad.despues.de.impuestos.1000 <- round(utilidad.despues.de.impuestos / 1000, 0)
#Imprime los Resultados
ingresos.1000
gastos.1000
utilidad.1000
utilidad.despues.de.impuestos.1000
margen.de.utilidad
meses.buenos
meses.malos
mejor.mes
peor.mes
#BONUS:
#Preview de lo que Veremos en la Pr?xima Secci?n
M <- rbind(
ingresos.1000,
gastos.1000,
utilidad.1000,
utilidad.despues.de.impuestos.1000,
margen.de.utilidad,
meses.buenos,
meses.malos,
mejor.mes,
peor.mes
)
#Imprime la Matriz
M
|
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of metforminanydrug
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Run CohortMethod package
#'
#' @details
#' Run the CohortMethod package, which implements the comparative cohort design.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder where the results were generated; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#'
#' @export
runCohortMethod <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
oracleTempSchema,
outputFolder,
maxCores) {
cmOutputFolder <- file.path(outputFolder, "cmOutput")
if (!file.exists(cmOutputFolder)) {
dir.create(cmOutputFolder)
}
cmAnalysisListFile <- system.file("settings",
"cmAnalysisList.json",
package = "metforminanydrug")
cmAnalysisList <- CohortMethod::loadCmAnalysisList(cmAnalysisListFile)
SubgroupCovSet <- createSubgroupCovariateSettings(windowStart = -99999, windowEnd = -1, analysisId = 998)
for (i in seq_along(cmAnalysisList)){
cmAnalysisList[[i]]$getDbCohortMethodDataArgs$covariateSettings <- list(cmAnalysisList[[i]]$getDbCohortMethodDataArgs$covariateSettings, SubgroupCovSet)
}
tcosList <- createTcos(outputFolder = outputFolder)
outcomesOfInterest <- getOutcomesOfInterest()
results <- CohortMethod::runCmAnalyses(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
exposureDatabaseSchema = cohortDatabaseSchema,
exposureTable = cohortTable,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
outputFolder = cmOutputFolder,
oracleTempSchema = oracleTempSchema,
cmAnalysisList = cmAnalysisList,
targetComparatorOutcomesList = tcosList,
getDbCohortMethodDataThreads = min(3, maxCores),
createStudyPopThreads = min(3, maxCores),
createPsThreads = max(1, round(maxCores/10)),
psCvThreads = min(10, maxCores),
trimMatchStratifyThreads = min(10, maxCores),
fitOutcomeModelThreads = max(1, round(maxCores/4)),
outcomeCvThreads = min(4, maxCores),
refitPsForEveryOutcome = FALSE,
outcomeIdsOfInterest = outcomesOfInterest)
ParallelLogger::logInfo("Summarizing results")
analysisSummary <- CohortMethod::summarizeAnalyses(referenceTable = results,
outputFolder = cmOutputFolder)
analysisSummary <- addCohortNames(analysisSummary, "targetId", "targetName")
analysisSummary <- addCohortNames(analysisSummary, "comparatorId", "comparatorName")
analysisSummary <- addCohortNames(analysisSummary, "outcomeId", "outcomeName")
analysisSummary <- addAnalysisDescription(analysisSummary, "analysisId", "analysisDescription")
write.csv(analysisSummary, file.path(outputFolder, "analysisSummary.csv"), row.names = FALSE)
ParallelLogger::logInfo("Computing covariate balance")
balanceFolder <- file.path(outputFolder, "balance")
if (!file.exists(balanceFolder)) {
dir.create(balanceFolder)
}
subset <- results[results$outcomeId %in% outcomesOfInterest,]
subset <- subset[subset$strataFile != "", ]
if (nrow(subset) > 0) {
subset <- split(subset, seq(nrow(subset)))
cluster <- ParallelLogger::makeCluster(min(3, maxCores))
ParallelLogger::clusterApply(cluster, subset, computeCovariateBalance, cmOutputFolder = cmOutputFolder, balanceFolder = balanceFolder)
ParallelLogger::stopCluster(cluster)
}
}
computeCovariateBalance <- function(row, cmOutputFolder, balanceFolder) {
outputFileName <- file.path(balanceFolder,
sprintf("bal_t%s_c%s_o%s_a%s.rds", row$targetId, row$comparatorId, row$outcomeId, row$analysisId))
if (!file.exists(outputFileName)) {
ParallelLogger::logTrace("Creating covariate balance file ", outputFileName)
cohortMethodDataFolder <- file.path(cmOutputFolder, row$cohortMethodDataFolder)
cohortMethodData <- CohortMethod::loadCohortMethodData(cohortMethodDataFolder)
strataFile <- file.path(cmOutputFolder, row$strataFile)
strata <- readRDS(strataFile)
balance <- CohortMethod::computeCovariateBalance(population = strata, cohortMethodData = cohortMethodData)
saveRDS(balance, outputFileName)
}
}
addAnalysisDescription <- function(data, IdColumnName = "analysisId", nameColumnName = "analysisDescription") {
cmAnalysisListFile <- system.file("settings",
"cmAnalysisList.json",
package = "metforminanydrug")
cmAnalysisList <- CohortMethod::loadCmAnalysisList(cmAnalysisListFile)
SubgroupCovSet <- createSubgroupCovariateSettings(windowStart = -99999, windowEnd = -1, analysisId = 998)
for (i in seq_along(cmAnalysisList)){
cmAnalysisList[[i]]$getDbCohortMethodDataArgs$covariateSettings <- list(cmAnalysisList[[i]]$getDbCohortMethodDataArgs$covariateSettings, SubgroupCovSet)
}
idToName <- lapply(cmAnalysisList, function(x) data.frame(analysisId = x$analysisId, description = as.character(x$description)))
idToName <- do.call("rbind", idToName)
names(idToName)[1] <- IdColumnName
names(idToName)[2] <- nameColumnName
data <- merge(data, idToName, all.x = TRUE)
# Change order of columns:
idCol <- which(colnames(data) == IdColumnName)
if (idCol < ncol(data) - 1) {
data <- data[, c(1:idCol, ncol(data) , (idCol+1):(ncol(data)-1))]
}
return(data)
}
createTcos <- function(outputFolder) {
pathToCsv <- system.file("settings", "TcosOfInterest.csv", package = "metforminanydrug")
tcosOfInterest <- read.csv(pathToCsv, stringsAsFactors = FALSE)
allControls <- getAllControls(outputFolder)
tcs <- unique(rbind(tcosOfInterest[, c("targetId", "comparatorId")],
allControls[, c("targetId", "comparatorId")]))
createTco <- function(i) {
targetId <- tcs$targetId[i]
comparatorId <- tcs$comparatorId[i]
outcomeIds <- as.character(tcosOfInterest$outcomeIds[tcosOfInterest$targetId == targetId & tcosOfInterest$comparatorId == comparatorId])
outcomeIds <- as.numeric(strsplit(outcomeIds, split = ";")[[1]])
outcomeIds <- c(outcomeIds, allControls$outcomeId[allControls$targetId == targetId & allControls$comparatorId == comparatorId])
excludeConceptIds <- as.character(tcosOfInterest$excludedCovariateConceptIds[tcosOfInterest$targetId == targetId & tcosOfInterest$comparatorId == comparatorId])
if (length(excludeConceptIds) == 1 && is.na(excludeConceptIds)) {
excludeConceptIds <- c()
} else if (length(excludeConceptIds) > 0) {
excludeConceptIds <- as.numeric(strsplit(excludeConceptIds, split = ";")[[1]])
}
includeConceptIds <- as.character(tcosOfInterest$includedCovariateConceptIds[tcosOfInterest$targetId == targetId & tcosOfInterest$comparatorId == comparatorId])
if (length(includeConceptIds) == 1 && is.na(includeConceptIds)) {
includeConceptIds <- c()
} else if (length(includeConceptIds) > 0) {
includeConceptIds <- as.numeric(strsplit(excludeConceptIds, split = ";")[[1]])
}
tco <- CohortMethod::createTargetComparatorOutcomes(targetId = targetId,
comparatorId = comparatorId,
outcomeIds = outcomeIds,
excludedCovariateConceptIds = excludeConceptIds,
includedCovariateConceptIds = includeConceptIds)
return(tco)
}
tcosList <- lapply(1:nrow(tcs), createTco)
return(tcosList)
}
getOutcomesOfInterest <- function() {
pathToCsv <- system.file("settings", "TcosOfInterest.csv", package = "metforminanydrug")
tcosOfInterest <- read.csv(pathToCsv, stringsAsFactors = FALSE)
outcomeIds <- as.character(tcosOfInterest$outcomeIds)
outcomeIds <- do.call("c", (strsplit(outcomeIds, split = ";")))
outcomeIds <- unique(as.numeric(outcomeIds))
return(outcomeIds)
}
getAllControls <- function(outputFolder) {
allControlsFile <- file.path(outputFolder, "AllControls.csv")
if (file.exists(allControlsFile)) {
# Positive controls must have been synthesized. Include both positive and negative controls.
allControls <- read.csv(allControlsFile)
} else {
# Include only negative controls
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "metforminanydrug")
allControls <- read.csv(pathToCsv)
allControls$oldOutcomeId <- allControls$outcomeId
allControls$targetEffectSize <- rep(1, nrow(allControls))
}
return(allControls)
}
| /metforminanydrug/R/CohortMethod.R | permissive | rlawodud3920/CDM-Rpackage-2020 | R | false | false | 11,724 | r | # Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of metforminanydrug
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Run CohortMethod package
#'
#' @details
#' Run the CohortMethod package, which implements the comparative cohort design.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder where the results were generated; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#'
#' @export
runCohortMethod <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
oracleTempSchema,
outputFolder,
maxCores) {
cmOutputFolder <- file.path(outputFolder, "cmOutput")
if (!file.exists(cmOutputFolder)) {
dir.create(cmOutputFolder)
}
cmAnalysisListFile <- system.file("settings",
"cmAnalysisList.json",
package = "metforminanydrug")
cmAnalysisList <- CohortMethod::loadCmAnalysisList(cmAnalysisListFile)
SubgroupCovSet <- createSubgroupCovariateSettings(windowStart = -99999, windowEnd = -1, analysisId = 998)
for (i in seq_along(cmAnalysisList)){
cmAnalysisList[[i]]$getDbCohortMethodDataArgs$covariateSettings <- list(cmAnalysisList[[i]]$getDbCohortMethodDataArgs$covariateSettings, SubgroupCovSet)
}
tcosList <- createTcos(outputFolder = outputFolder)
outcomesOfInterest <- getOutcomesOfInterest()
results <- CohortMethod::runCmAnalyses(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
exposureDatabaseSchema = cohortDatabaseSchema,
exposureTable = cohortTable,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
outputFolder = cmOutputFolder,
oracleTempSchema = oracleTempSchema,
cmAnalysisList = cmAnalysisList,
targetComparatorOutcomesList = tcosList,
getDbCohortMethodDataThreads = min(3, maxCores),
createStudyPopThreads = min(3, maxCores),
createPsThreads = max(1, round(maxCores/10)),
psCvThreads = min(10, maxCores),
trimMatchStratifyThreads = min(10, maxCores),
fitOutcomeModelThreads = max(1, round(maxCores/4)),
outcomeCvThreads = min(4, maxCores),
refitPsForEveryOutcome = FALSE,
outcomeIdsOfInterest = outcomesOfInterest)
ParallelLogger::logInfo("Summarizing results")
analysisSummary <- CohortMethod::summarizeAnalyses(referenceTable = results,
outputFolder = cmOutputFolder)
analysisSummary <- addCohortNames(analysisSummary, "targetId", "targetName")
analysisSummary <- addCohortNames(analysisSummary, "comparatorId", "comparatorName")
analysisSummary <- addCohortNames(analysisSummary, "outcomeId", "outcomeName")
analysisSummary <- addAnalysisDescription(analysisSummary, "analysisId", "analysisDescription")
write.csv(analysisSummary, file.path(outputFolder, "analysisSummary.csv"), row.names = FALSE)
ParallelLogger::logInfo("Computing covariate balance")
balanceFolder <- file.path(outputFolder, "balance")
if (!file.exists(balanceFolder)) {
dir.create(balanceFolder)
}
subset <- results[results$outcomeId %in% outcomesOfInterest,]
subset <- subset[subset$strataFile != "", ]
if (nrow(subset) > 0) {
subset <- split(subset, seq(nrow(subset)))
cluster <- ParallelLogger::makeCluster(min(3, maxCores))
ParallelLogger::clusterApply(cluster, subset, computeCovariateBalance, cmOutputFolder = cmOutputFolder, balanceFolder = balanceFolder)
ParallelLogger::stopCluster(cluster)
}
}
computeCovariateBalance <- function(row, cmOutputFolder, balanceFolder) {
outputFileName <- file.path(balanceFolder,
sprintf("bal_t%s_c%s_o%s_a%s.rds", row$targetId, row$comparatorId, row$outcomeId, row$analysisId))
if (!file.exists(outputFileName)) {
ParallelLogger::logTrace("Creating covariate balance file ", outputFileName)
cohortMethodDataFolder <- file.path(cmOutputFolder, row$cohortMethodDataFolder)
cohortMethodData <- CohortMethod::loadCohortMethodData(cohortMethodDataFolder)
strataFile <- file.path(cmOutputFolder, row$strataFile)
strata <- readRDS(strataFile)
balance <- CohortMethod::computeCovariateBalance(population = strata, cohortMethodData = cohortMethodData)
saveRDS(balance, outputFileName)
}
}
addAnalysisDescription <- function(data, IdColumnName = "analysisId", nameColumnName = "analysisDescription") {
cmAnalysisListFile <- system.file("settings",
"cmAnalysisList.json",
package = "metforminanydrug")
cmAnalysisList <- CohortMethod::loadCmAnalysisList(cmAnalysisListFile)
SubgroupCovSet <- createSubgroupCovariateSettings(windowStart = -99999, windowEnd = -1, analysisId = 998)
for (i in seq_along(cmAnalysisList)){
cmAnalysisList[[i]]$getDbCohortMethodDataArgs$covariateSettings <- list(cmAnalysisList[[i]]$getDbCohortMethodDataArgs$covariateSettings, SubgroupCovSet)
}
idToName <- lapply(cmAnalysisList, function(x) data.frame(analysisId = x$analysisId, description = as.character(x$description)))
idToName <- do.call("rbind", idToName)
names(idToName)[1] <- IdColumnName
names(idToName)[2] <- nameColumnName
data <- merge(data, idToName, all.x = TRUE)
# Change order of columns:
idCol <- which(colnames(data) == IdColumnName)
if (idCol < ncol(data) - 1) {
data <- data[, c(1:idCol, ncol(data) , (idCol+1):(ncol(data)-1))]
}
return(data)
}
createTcos <- function(outputFolder) {
pathToCsv <- system.file("settings", "TcosOfInterest.csv", package = "metforminanydrug")
tcosOfInterest <- read.csv(pathToCsv, stringsAsFactors = FALSE)
allControls <- getAllControls(outputFolder)
tcs <- unique(rbind(tcosOfInterest[, c("targetId", "comparatorId")],
allControls[, c("targetId", "comparatorId")]))
createTco <- function(i) {
targetId <- tcs$targetId[i]
comparatorId <- tcs$comparatorId[i]
outcomeIds <- as.character(tcosOfInterest$outcomeIds[tcosOfInterest$targetId == targetId & tcosOfInterest$comparatorId == comparatorId])
outcomeIds <- as.numeric(strsplit(outcomeIds, split = ";")[[1]])
outcomeIds <- c(outcomeIds, allControls$outcomeId[allControls$targetId == targetId & allControls$comparatorId == comparatorId])
excludeConceptIds <- as.character(tcosOfInterest$excludedCovariateConceptIds[tcosOfInterest$targetId == targetId & tcosOfInterest$comparatorId == comparatorId])
if (length(excludeConceptIds) == 1 && is.na(excludeConceptIds)) {
excludeConceptIds <- c()
} else if (length(excludeConceptIds) > 0) {
excludeConceptIds <- as.numeric(strsplit(excludeConceptIds, split = ";")[[1]])
}
includeConceptIds <- as.character(tcosOfInterest$includedCovariateConceptIds[tcosOfInterest$targetId == targetId & tcosOfInterest$comparatorId == comparatorId])
if (length(includeConceptIds) == 1 && is.na(includeConceptIds)) {
includeConceptIds <- c()
} else if (length(includeConceptIds) > 0) {
includeConceptIds <- as.numeric(strsplit(excludeConceptIds, split = ";")[[1]])
}
tco <- CohortMethod::createTargetComparatorOutcomes(targetId = targetId,
comparatorId = comparatorId,
outcomeIds = outcomeIds,
excludedCovariateConceptIds = excludeConceptIds,
includedCovariateConceptIds = includeConceptIds)
return(tco)
}
tcosList <- lapply(1:nrow(tcs), createTco)
return(tcosList)
}
getOutcomesOfInterest <- function() {
pathToCsv <- system.file("settings", "TcosOfInterest.csv", package = "metforminanydrug")
tcosOfInterest <- read.csv(pathToCsv, stringsAsFactors = FALSE)
outcomeIds <- as.character(tcosOfInterest$outcomeIds)
outcomeIds <- do.call("c", (strsplit(outcomeIds, split = ";")))
outcomeIds <- unique(as.numeric(outcomeIds))
return(outcomeIds)
}
getAllControls <- function(outputFolder) {
allControlsFile <- file.path(outputFolder, "AllControls.csv")
if (file.exists(allControlsFile)) {
# Positive controls must have been synthesized. Include both positive and negative controls.
allControls <- read.csv(allControlsFile)
} else {
# Include only negative controls
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "metforminanydrug")
allControls <- read.csv(pathToCsv)
allControls$oldOutcomeId <- allControls$outcomeId
allControls$targetEffectSize <- rep(1, nrow(allControls))
}
return(allControls)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/celda_CG.R
\name{simulateCellscelda_CG}
\alias{simulateCellscelda_CG}
\title{Simulate cells from the celda_CG model}
\usage{
simulateCellscelda_CG(model, S = 5, CRange = c(50, 100),
NRange = c(500, 1000), G = 100, K = 5, L = 10, alpha = 1,
beta = 1, gamma = 5, delta = 1, ...)
}
\arguments{
\item{model}{Character. Options available in `celda::availableModels`.}
\item{S}{Integer. Number of samples to simulate. Default 5.}
\item{CRange}{Integer vector. A vector of length 2 that specifies the lower
and upper bounds of the number of cells to be generated in each sample.
Default c(50, 100).}
\item{NRange}{Integer vector. A vector of length 2 that specifies the lower
and upper bounds of the number of counts generated for each cell. Default
c(500, 1000).}
\item{G}{Integer. The total number of features to be simulated. Default 100.}
\item{K}{Integer. Number of cell populations. Default 5.}
\item{L}{Integer. Number of feature modules. Default 10.}
\item{alpha}{Numeric. Concentration parameter for Theta. Adds a pseudocount
to each cell population in each sample. Default 1.}
\item{beta}{Numeric. Concentration parameter for Phi. Adds a pseudocount to
each feature module in each cell population. Default 1.}
\item{gamma}{Numeric. Concentration parameter for Eta. Adds a pseudocount to
the number of features in each module. Default 5.}
\item{delta}{Numeric. Concentration parameter for Psi. Adds a pseudocount to
each feature in each module. Default 1.}
\item{...}{Additional parameters.}
}
\value{
List. Contains the simulated matrix `counts`, cell population
clusters `z`, feature module clusters `y`, sample assignments `sampleLabel`,
and input parameters.
}
\description{
Generates a simulated counts matrix, cell subpopulation
clusters, sample labels, and feature module clusters according to the
generative process of the celda_CG model.
}
\examples{
celdaCGSim <- simulateCells(model = "celda_CG")
}
\seealso{
`celda_C()` for simulating cell subpopulations and `celda_G()` for
simulating feature modules.
}
| /man/simulateCellscelda_CG.Rd | permissive | definitelysean/celda | R | false | true | 2,117 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/celda_CG.R
\name{simulateCellscelda_CG}
\alias{simulateCellscelda_CG}
\title{Simulate cells from the celda_CG model}
\usage{
simulateCellscelda_CG(model, S = 5, CRange = c(50, 100),
NRange = c(500, 1000), G = 100, K = 5, L = 10, alpha = 1,
beta = 1, gamma = 5, delta = 1, ...)
}
\arguments{
\item{model}{Character. Options available in `celda::availableModels`.}
\item{S}{Integer. Number of samples to simulate. Default 5.}
\item{CRange}{Integer vector. A vector of length 2 that specifies the lower
and upper bounds of the number of cells to be generated in each sample.
Default c(50, 100).}
\item{NRange}{Integer vector. A vector of length 2 that specifies the lower
and upper bounds of the number of counts generated for each cell. Default
c(500, 1000).}
\item{G}{Integer. The total number of features to be simulated. Default 100.}
\item{K}{Integer. Number of cell populations. Default 5.}
\item{L}{Integer. Number of feature modules. Default 10.}
\item{alpha}{Numeric. Concentration parameter for Theta. Adds a pseudocount
to each cell population in each sample. Default 1.}
\item{beta}{Numeric. Concentration parameter for Phi. Adds a pseudocount to
each feature module in each cell population. Default 1.}
\item{gamma}{Numeric. Concentration parameter for Eta. Adds a pseudocount to
the number of features in each module. Default 5.}
\item{delta}{Numeric. Concentration parameter for Psi. Adds a pseudocount to
each feature in each module. Default 1.}
\item{...}{Additional parameters.}
}
\value{
List. Contains the simulated matrix `counts`, cell population
clusters `z`, feature module clusters `y`, sample assignments `sampleLabel`,
and input parameters.
}
\description{
Generates a simulated counts matrix, cell subpopulation
clusters, sample labels, and feature module clusters according to the
generative process of the celda_CG model.
}
\examples{
celdaCGSim <- simulateCells(model = "celda_CG")
}
\seealso{
`celda_C()` for simulating cell subpopulations and `celda_G()` for
simulating feature modules.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\docType{methods}
\name{combinable}
\alias{combinable}
\title{Test Whether Objects can be Combined}
\usage{
combinable(a, b)
}
\arguments{
\item{a}{First object.}
\item{b}{Second object.}
}
\description{
Test whether two objects are combinable.
}
\details{
Combinable takes two objects and returns TRUE or FALSE for whether
they can be combined. The \link{combine} function makes use of this test
to implement conditional logic.
}
| /man/combinable-methods.Rd | no_license | gravesee/onx | R | false | true | 521 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\docType{methods}
\name{combinable}
\alias{combinable}
\title{Test Whether Objects can be Combined}
\usage{
combinable(a, b)
}
\arguments{
\item{a}{First object.}
\item{b}{Second object.}
}
\description{
Test whether two objects are combinable.
}
\details{
Combinable takes two objects and returns TRUE or FALSE for whether
they can be combined. The \link{combine} function makes use of this test
to implement conditional logic.
}
|
\name{Many univariate generalised linear models}
\alias{univglms}
\alias{univglms2}
\title{
Many univariate generalised linear regressions
}
\description{
It performs very many univariate generalised linear regressions.
}
\usage{
univglms(y, x, oiko = NULL, logged = FALSE)
univglms2(y, x, oiko = NULL, logged = FALSE)
}
\arguments{
\item{y}{
The dependent variable. It can be a factor or a numerical variable with two values only (binary logistic
regression), a discrete valued vector (count data) corresponding to a poisson regression or a numerical
vector with continuous values (normal regression).
}
\item{x}{
A matrix with the data, where the rows denote the samples (and the two groups) and the
columns are the variables. For the "univglms" only continuous variables are allowed. You are advised to
standardise the data before hand to avoid numerical overflow or similar issues.
If you see NaN in the outcome, this might be the case. For the "univglms2" categorical variables are allowed
and hence this accepts data.frames. In this case, the categorical variables must be given as factor variables,
otherwise you might get wrong results.
}
\item{oiko}{
This can be either "normal", "poisson", "quasipoisson" or "binomial". If you are not sure leave it NULL and
the function will check internally. However, you might have discrete data (e.g. years of age)
and want to perform many simple linear regressions. In this case you should specify the family.
}
\item{logged}{
A boolean variable; it will return the logarithm of the pvalue if set to TRUE.
}
}
\details{
If you specify no family of distributions the function internally checkes the type of your data and
decides on the type of regression to perform. The function is written in C++ and this is why it is very
fast. It can accept thousands of predictor variables. It is usefull for univariate screening. We provide
no p-value correction (such as fdr or q-values); this is up to the user.
}
\value{
A matrix with the test statistic and the p-value for each predictor variable.
}
\references{
Draper, N.R. and Smith H. (1988). Applied regression analysis. New York, Wiley, 3rd edition.
McCullagh, Peter, and John A. Nelder. Generalized linear models. CRC press, USA, 2nd edition, 1989.
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@yahoo.gr> and Manos Papadakis <papadakm95@gmail.com>.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{logistic_only}, \link{poisson_only}, \link{allbetas}, \link{correls}, \link{regression}
}
}
\examples{
\dontrun{
x <- matrnorm(100, 50)
y <- rbinom(100, 1, 0.6) ## binary logistic regression
a1 <- univglms(y, x)
a2 <- glm(y ~ x[, 1], binomial)$deviance
a2 <- glm(y ~ 1, binomial)$null.dev - a2
x <- NULL
}
}
| /fuzzedpackages/Rfast/man/univglms.Rd | no_license | akhikolla/testpackages | R | false | false | 2,870 | rd | \name{Many univariate generalised linear models}
\alias{univglms}
\alias{univglms2}
\title{
Many univariate generalised linear regressions
}
\description{
It performs very many univariate generalised linear regressions.
}
\usage{
univglms(y, x, oiko = NULL, logged = FALSE)
univglms2(y, x, oiko = NULL, logged = FALSE)
}
\arguments{
\item{y}{
The dependent variable. It can be a factor or a numerical variable with two values only (binary logistic
regression), a discrete valued vector (count data) corresponding to a poisson regression or a numerical
vector with continuous values (normal regression).
}
\item{x}{
A matrix with the data, where the rows denote the samples (and the two groups) and the
columns are the variables. For the "univglms" only continuous variables are allowed. You are advised to
standardise the data before hand to avoid numerical overflow or similar issues.
If you see NaN in the outcome, this might be the case. For the "univglms2" categorical variables are allowed
and hence this accepts data.frames. In this case, the categorical variables must be given as factor variables,
otherwise you might get wrong results.
}
\item{oiko}{
This can be either "normal", "poisson", "quasipoisson" or "binomial". If you are not sure leave it NULL and
the function will check internally. However, you might have discrete data (e.g. years of age)
and want to perform many simple linear regressions. In this case you should specify the family.
}
\item{logged}{
A boolean variable; it will return the logarithm of the pvalue if set to TRUE.
}
}
\details{
If you specify no family of distributions the function internally checkes the type of your data and
decides on the type of regression to perform. The function is written in C++ and this is why it is very
fast. It can accept thousands of predictor variables. It is usefull for univariate screening. We provide
no p-value correction (such as fdr or q-values); this is up to the user.
}
\value{
A matrix with the test statistic and the p-value for each predictor variable.
}
\references{
Draper, N.R. and Smith H. (1988). Applied regression analysis. New York, Wiley, 3rd edition.
McCullagh, Peter, and John A. Nelder. Generalized linear models. CRC press, USA, 2nd edition, 1989.
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@yahoo.gr> and Manos Papadakis <papadakm95@gmail.com>.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{logistic_only}, \link{poisson_only}, \link{allbetas}, \link{correls}, \link{regression}
}
}
\examples{
\dontrun{
x <- matrnorm(100, 50)
y <- rbinom(100, 1, 0.6) ## binary logistic regression
a1 <- univglms(y, x)
a2 <- glm(y ~ x[, 1], binomial)$deviance
a2 <- glm(y ~ 1, binomial)$null.dev - a2
x <- NULL
}
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{odors3}
\alias{odors3}
\title{odors3 data}
\format{\Sexpr[results=rd]{bps6data:::doc_bps6_data("odors3") }}
\source{
\url{ http://content.bfwpub.com/webroot_pubcontent/Content/BCS_5/BPS6e/Student/DataSets/PC_Text/PC_Text.zip }
}
\usage{
data("odors3")
}
\description{
Dataset odors3 from \emph{BPS} 6th edition (Chapter(s) 19).
}
\references{
Moore, David S., William I. Notz, and Michael A. Fligner. 2011. \emph{The Basic Practice of Statistics}: 6th edition. New York: W. H. Freeman.
}
| /man/odors3.Rd | no_license | jrnold/bps6data | R | false | false | 603 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{odors3}
\alias{odors3}
\title{odors3 data}
\format{\Sexpr[results=rd]{bps6data:::doc_bps6_data("odors3") }}
\source{
\url{ http://content.bfwpub.com/webroot_pubcontent/Content/BCS_5/BPS6e/Student/DataSets/PC_Text/PC_Text.zip }
}
\usage{
data("odors3")
}
\description{
Dataset odors3 from \emph{BPS} 6th edition (Chapter(s) 19).
}
\references{
Moore, David S., William I. Notz, and Michael A. Fligner. 2011. \emph{The Basic Practice of Statistics}: 6th edition. New York: W. H. Freeman.
}
|
#############################################################
##### Script by Felix J. Hartmann, University of Zurich #####
#############################################################
###############################
##### tiny R introduction #####
###############################
# get help
?matrix #get help
# vectors
v <- 1:10
print(v)
v
class(v)
v[2]
# matrices
m <- matrix(1:6, nrow = 2)
m
m[,2]
m[1,]
# naming
rownames(m)
colnames(m)
rownames(m) <- c("cell1", "cell2")
colnames(m) <- c("CD3", "CD4", "CD8")
m
m[,"CD4"]
############################
##### install packages #####
############################
source("http://bioconductor.org/biocLite.R")
#biocLite("BiocUpgrade") # upgrading Bioconductor
biocLite("flowCore") # for handling of fcs files in R
biocLite("ggplot2") # for advanced data plotting
biocLite("gplots") # for heatmaps
biocLite("Rtsne") # for calculating tSNE
biocLite("RColorBrewer") # additional color schemes
biocLite("reshape2") # reorganizing data
biocLite("FlowSOM") # FlowSOM clustering
biocLite("plyr") # for data handling
##########################
##### open libraries #####
##########################
library(flowCore)
library(ggplot2)
library(gplots)
library(Rtsne)
library(RColorBrewer)
library(reshape2)
library(FlowSOM)
library(plyr)
################################################
##### reading the fcs files into a flowset #####
################################################
data_location <- "//Users/hartmann/Dropbox/Annual School of Cytometry PARTICIPANTS/WS Data analysis/files/"
fs <- read.flowSet(path = data_location,
pattern = ".fcs")
#################################
##### exploring the dataset #####
#################################
# exploring flowSets
fs
sampleNames(fs)
colnames(fs)
fs[[2]]
ff2 <- fs[[2]]
str(ff2)
# renaming channels
desc <- as.vector(ff2@parameters$desc)
desc
colnames(fs) <- desc
colnames(fs)
ff2 <- fs[[2]]
fs
#############################
##### data manipulation #####
#############################
# accessing the data of an fcs file
data2 <- exprs(ff2)
dim(data2)
head(data2)
class(data2)
# combine data into a matrix
data <- fsApply(fs, exprs)
head(data)
dim(data)
# make a gate.source vector
fs_rows <- fsApply(fs, nrow)[,1]
fs_rows
gate.source <- as.vector(x = NULL)
for(i in 1:length(fs_rows)) {
temp.source <- rep(i, fs_rows[i])
gate.source <- c(gate.source, temp.source)
}
tail(gate.source)
table(gate.source)
# add the gate.source vector to the file
data <- cbind(data, gate.source)
tail(data)
# plotting data
ggplot(data.frame(data[1:5000,]), aes(x = CD19, y = MHCII)) +
geom_point() +
theme(aspect.ratio = 1)
# transform data
asinh_scale <- 5
data.trans <- asinh(data / asinh_scale)
data.trans[,"gate.source"] <- data[,"gate.source"]
# plotting the transformed data
ggplot(data.frame(data.trans[1:5000,]), aes(x = CD19, y = MHCII)) +
geom_point() +
theme(aspect.ratio = 1)
####################################
##### percentile normalization #####
####################################
# percentile normalization for a random sequence of numbers
v <- 1:1000
v
quantile_value <- 0.999
quantile(v, quantile_value)
# calculating percentile for 1 vector
percentile.vector <- apply(data.trans, 2, function(x) quantile(x, quantile_value, names = F))
percentile.vector
data.trans <- t(t(data.trans) / as.numeric(percentile.vector))
data.trans[,"gate.source"] <- data[,"gate.source"]
head(data.trans)
# plotting the transformed data & normalized data
ggplot(data.frame(data.trans[1:5000,]), aes(x = CD19, y = MHCII)) +
geom_point() +
theme(aspect.ratio = 1)
#########################
##### tSNE analysis #####
#########################
# select clustering channels
colnames(data.trans)
clustering.channels <- colnames(data)[c(4:18, 20:27)]
clustering.channels
# subsample data for t-SNE
set.seed(123)
ix <- sample(1:nrow(data), 20000)
data_rtsne <- data.matrix(data.trans[ix,clustering.channels])
gate.source <- gate.source[ix]
dim(data_rtsne)
head(data_rtsne)
# run bh SNE
set.seed(123)
out_rtsne <- Rtsne(data_rtsne, dims = 2, max_iter = 1000, verbose = T, pca = F)
# prepare the tSNE data
tsne <- as.data.frame(out_rtsne$Y)
colnames(tsne) <- c("tSNE1", "tSNE2")
# plot tSNE
ggplot(tsne, aes(x = tSNE1, y = tSNE2)) +
geom_point(size = 0.5) +
coord_fixed(ratio = 1) +
ggtitle("tSNE map")
# prepare the expression data
data_rtsne <- cbind(data_rtsne, tsne, gate.source)
head(data_rtsne)
data_melt <- melt(data_rtsne, id.vars = c("tSNE1", "tSNE2", "gate.source"))
# define a color scheme
jet.colors <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan", "#7FFF7F",
"yellow", "#FF7F00", "red", "#7F0000"))
# plot tSNEs with expression overlayed
ggplot(data_melt, aes(x = tSNE1, y = tSNE2, color = value)) +
geom_point(size = 0.05) +
coord_fixed(ratio = 1) +
scale_colour_gradientn(colours = jet.colors(100), limits = c(0,1)) +
facet_wrap(~ variable, ncol = 8, scales = "free") +
ggtitle("tSNE map with expression values")
##############################
##### FlowSOM Clustering #####
##############################
# make a flowframe for FlowSOM
ff_new <- flowFrame(exprs = data.matrix(data_rtsne), desc = list(FIL = 1))
ff_new
# run FlowSOM (with set.seed for reproducibility)
set.seed(123)
out_fSOM <- FlowSOM::ReadInput(ff_new, transform = FALSE, scale = FALSE, compensate = FALSE)
out_fSOM <- FlowSOM::BuildSOM(out_fSOM, colsToUse = clustering.channels)
out_fSOM <- FlowSOM::BuildMST(out_fSOM)
labels <- out_fSOM$map$mapping[,1]
# plot pie charts
out_fSOM <- UpdateNodeSize(out_fSOM, reset = TRUE)
FlowSOM::PlotStars(out_fSOM, view = "grid", markers = clustering.channels)
out_fSOM <- UpdateNodeSize(out_fSOM)
FlowSOM::PlotStars(out_fSOM, view = "MST", markers = clustering.channels)
# try the suggested automatic metaclustering method for a hint for k
auto_meta <- MetaClustering(out_fSOM$map$codes, method = "metaClustering_consensus", max = 20)
max(auto_meta)
# select a k.value
choosen_k <- 12
# do a manual metaclustering
set.seed(123)
out_meta <- FlowSOM::metaClustering_consensus(out_fSOM$map$codes, k = choosen_k)
meta_results <- out_meta[labels]
# prepare the metaclustering data
data_meta <- cbind(tsne, meta_results, gate.source)
data_melt <- melt(data_meta, id.vars = c("tSNE1", "tSNE2", "gate.source"))
# plot tSNEs with clustering overlayed
ggplot(data_melt, aes(x = tSNE1, y = tSNE2, color = as.factor(value))) +
geom_point(size = 0.05) +
coord_fixed(ratio = 1) +
scale_color_manual(values = c(brewer.pal(12, "Paired"), brewer.pal(8, "Dark2"))) +
guides(colour = guide_legend(override.aes = list(size = 5))) +
ggtitle("tSNE map with overlaid metaclustering")
#####################################
##### analyse different samples #####
#####################################
# plot tSNEs for different groups
ggplot(data_melt, aes(x = tSNE1, y = tSNE2, color = as.factor(value))) +
geom_point(size = 0.2) +
coord_fixed(ratio = 1) +
scale_color_manual(values = c(brewer.pal(12, "Paired"), brewer.pal(8, "Dark2"))) +
facet_wrap(~ gate.source, ncol = 3, scales = "free") +
guides(colour = guide_legend(override.aes = list(size = 5))) +
ggtitle("tSNE for different samples")
#########################
##### make heatmaps #####
#########################
# make combined expression matrix
ce <- cbind(data_rtsne, meta_results)
# go through all clusters and calculate mean for every channel
heat_mat <- matrix(, nrow = choosen_k, ncol = length(clustering.channels))
for(i in 1:choosen_k) {
temp_mat <- ce[ce[,"meta_results"] == i, clustering.channels]
heat_mat[i,] <- as.matrix(apply(temp_mat, 2, function (x) mean(x, na.rm = T)))
}
# rename
rownames(heat_mat) <- paste("cluster", 1:choosen_k, sep = "")
colnames(heat_mat) <- clustering.channels
heat_mat
# make a cluster heatmap
heatmap.2(heat_mat,
scale = "none",
Colv = T, Rowv = T,
trace = "none",
col = colorRampPalette(c("black", "gold"))(n = 9),
breaks = seq(0, 1, by = 0.1111111))
###################################
##### show sample composition #####
###################################
# calculate the frequency of every cluster in each sample
fm <- ddply(data_meta, .(gate.source), function(x) {
df <- data.frame(table(x$meta_results))
prop <- (df$Freq/sum(df$Freq))*100})
colnames(fm) <- c("gate.source", paste("cluster", 1:choosen_k, sep = "."))
fm
# show as bargraph plot
df_plot <- data.frame(fm)
df_plot$samples <- c("csf2 KO", "RAG", "WT")
head(df_plot)
# melt for ggplot
df_melt <- melt(df_plot, measure.vars = colnames(df_plot)[2:(choosen_k+1)])
# make stacked bargraphs
qplot(samples, data = df_melt, geom = "bar", weight = value, fill = variable)
| /data-analysis-workshop.R | no_license | hartmannfj/data-analysis-workshop | R | false | false | 8,975 | r |
#############################################################
##### Script by Felix J. Hartmann, University of Zurich #####
#############################################################
###############################
##### tiny R introduction #####
###############################
# get help
?matrix #get help
# vectors
v <- 1:10
print(v)
v
class(v)
v[2]
# matrices
m <- matrix(1:6, nrow = 2)
m
m[,2]
m[1,]
# naming
rownames(m)
colnames(m)
rownames(m) <- c("cell1", "cell2")
colnames(m) <- c("CD3", "CD4", "CD8")
m
m[,"CD4"]
############################
##### install packages #####
############################
source("http://bioconductor.org/biocLite.R")
#biocLite("BiocUpgrade") # upgrading Bioconductor
biocLite("flowCore") # for handling of fcs files in R
biocLite("ggplot2") # for advanced data plotting
biocLite("gplots") # for heatmaps
biocLite("Rtsne") # for calculating tSNE
biocLite("RColorBrewer") # additional color schemes
biocLite("reshape2") # reorganizing data
biocLite("FlowSOM") # FlowSOM clustering
biocLite("plyr") # for data handling
##########################
##### open libraries #####
##########################
library(flowCore)
library(ggplot2)
library(gplots)
library(Rtsne)
library(RColorBrewer)
library(reshape2)
library(FlowSOM)
library(plyr)
################################################
##### reading the fcs files into a flowset #####
################################################
data_location <- "//Users/hartmann/Dropbox/Annual School of Cytometry PARTICIPANTS/WS Data analysis/files/"
fs <- read.flowSet(path = data_location,
pattern = ".fcs")
#################################
##### exploring the dataset #####
#################################
# exploring flowSets
fs
sampleNames(fs)
colnames(fs)
fs[[2]]
ff2 <- fs[[2]]
str(ff2)
# renaming channels
desc <- as.vector(ff2@parameters$desc)
desc
colnames(fs) <- desc
colnames(fs)
ff2 <- fs[[2]]
fs
#############################
##### data manipulation #####
#############################
# accessing the data of an fcs file
data2 <- exprs(ff2)
dim(data2)
head(data2)
class(data2)
# combine data into a matrix
data <- fsApply(fs, exprs)
head(data)
dim(data)
# make a gate.source vector
fs_rows <- fsApply(fs, nrow)[,1]
fs_rows
gate.source <- as.vector(x = NULL)
for(i in 1:length(fs_rows)) {
temp.source <- rep(i, fs_rows[i])
gate.source <- c(gate.source, temp.source)
}
tail(gate.source)
table(gate.source)
# add the gate.source vector to the file
data <- cbind(data, gate.source)
tail(data)
# plotting data
ggplot(data.frame(data[1:5000,]), aes(x = CD19, y = MHCII)) +
geom_point() +
theme(aspect.ratio = 1)
# transform data
asinh_scale <- 5
data.trans <- asinh(data / asinh_scale)
data.trans[,"gate.source"] <- data[,"gate.source"]
# plotting the transformed data
ggplot(data.frame(data.trans[1:5000,]), aes(x = CD19, y = MHCII)) +
geom_point() +
theme(aspect.ratio = 1)
####################################
##### percentile normalization #####
####################################
# percentile normalization for a random sequence of numbers
v <- 1:1000
v
quantile_value <- 0.999
quantile(v, quantile_value)
# calculating percentile for 1 vector
percentile.vector <- apply(data.trans, 2, function(x) quantile(x, quantile_value, names = F))
percentile.vector
data.trans <- t(t(data.trans) / as.numeric(percentile.vector))
data.trans[,"gate.source"] <- data[,"gate.source"]
head(data.trans)
# plotting the transformed data & normalized data
ggplot(data.frame(data.trans[1:5000,]), aes(x = CD19, y = MHCII)) +
geom_point() +
theme(aspect.ratio = 1)
#########################
##### tSNE analysis #####
#########################
# select clustering channels
colnames(data.trans)
clustering.channels <- colnames(data)[c(4:18, 20:27)]
clustering.channels
# subsample data for t-SNE
set.seed(123)
ix <- sample(1:nrow(data), 20000)
data_rtsne <- data.matrix(data.trans[ix,clustering.channels])
gate.source <- gate.source[ix]
dim(data_rtsne)
head(data_rtsne)
# run bh SNE
set.seed(123)
out_rtsne <- Rtsne(data_rtsne, dims = 2, max_iter = 1000, verbose = T, pca = F)
# prepare the tSNE data
tsne <- as.data.frame(out_rtsne$Y)
colnames(tsne) <- c("tSNE1", "tSNE2")
# plot tSNE
ggplot(tsne, aes(x = tSNE1, y = tSNE2)) +
geom_point(size = 0.5) +
coord_fixed(ratio = 1) +
ggtitle("tSNE map")
# prepare the expression data
data_rtsne <- cbind(data_rtsne, tsne, gate.source)
head(data_rtsne)
data_melt <- melt(data_rtsne, id.vars = c("tSNE1", "tSNE2", "gate.source"))
# define a color scheme
jet.colors <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan", "#7FFF7F",
"yellow", "#FF7F00", "red", "#7F0000"))
# plot tSNEs with expression overlayed
ggplot(data_melt, aes(x = tSNE1, y = tSNE2, color = value)) +
geom_point(size = 0.05) +
coord_fixed(ratio = 1) +
scale_colour_gradientn(colours = jet.colors(100), limits = c(0,1)) +
facet_wrap(~ variable, ncol = 8, scales = "free") +
ggtitle("tSNE map with expression values")
##############################
##### FlowSOM Clustering #####
##############################
# make a flowframe for FlowSOM
ff_new <- flowFrame(exprs = data.matrix(data_rtsne), desc = list(FIL = 1))
ff_new
# run FlowSOM (with set.seed for reproducibility)
set.seed(123)
out_fSOM <- FlowSOM::ReadInput(ff_new, transform = FALSE, scale = FALSE, compensate = FALSE)
out_fSOM <- FlowSOM::BuildSOM(out_fSOM, colsToUse = clustering.channels)
out_fSOM <- FlowSOM::BuildMST(out_fSOM)
labels <- out_fSOM$map$mapping[,1]
# plot pie charts
out_fSOM <- UpdateNodeSize(out_fSOM, reset = TRUE)
FlowSOM::PlotStars(out_fSOM, view = "grid", markers = clustering.channels)
out_fSOM <- UpdateNodeSize(out_fSOM)
FlowSOM::PlotStars(out_fSOM, view = "MST", markers = clustering.channels)
# try the suggested automatic metaclustering method for a hint for k
auto_meta <- MetaClustering(out_fSOM$map$codes, method = "metaClustering_consensus", max = 20)
max(auto_meta)
# select a k.value
choosen_k <- 12
# do a manual metaclustering
set.seed(123)
out_meta <- FlowSOM::metaClustering_consensus(out_fSOM$map$codes, k = choosen_k)
meta_results <- out_meta[labels]
# prepare the metaclustering data
data_meta <- cbind(tsne, meta_results, gate.source)
data_melt <- melt(data_meta, id.vars = c("tSNE1", "tSNE2", "gate.source"))
# plot tSNEs with clustering overlayed
ggplot(data_melt, aes(x = tSNE1, y = tSNE2, color = as.factor(value))) +
geom_point(size = 0.05) +
coord_fixed(ratio = 1) +
scale_color_manual(values = c(brewer.pal(12, "Paired"), brewer.pal(8, "Dark2"))) +
guides(colour = guide_legend(override.aes = list(size = 5))) +
ggtitle("tSNE map with overlaid metaclustering")
#####################################
##### analyse different samples #####
#####################################
# plot tSNEs for different groups
ggplot(data_melt, aes(x = tSNE1, y = tSNE2, color = as.factor(value))) +
geom_point(size = 0.2) +
coord_fixed(ratio = 1) +
scale_color_manual(values = c(brewer.pal(12, "Paired"), brewer.pal(8, "Dark2"))) +
facet_wrap(~ gate.source, ncol = 3, scales = "free") +
guides(colour = guide_legend(override.aes = list(size = 5))) +
ggtitle("tSNE for different samples")
#########################
##### make heatmaps #####
#########################
# make combined expression matrix
ce <- cbind(data_rtsne, meta_results)
# go through all clusters and calculate mean for every channel
heat_mat <- matrix(, nrow = choosen_k, ncol = length(clustering.channels))
for(i in 1:choosen_k) {
temp_mat <- ce[ce[,"meta_results"] == i, clustering.channels]
heat_mat[i,] <- as.matrix(apply(temp_mat, 2, function (x) mean(x, na.rm = T)))
}
# rename
rownames(heat_mat) <- paste("cluster", 1:choosen_k, sep = "")
colnames(heat_mat) <- clustering.channels
heat_mat
# make a cluster heatmap
heatmap.2(heat_mat,
scale = "none",
Colv = T, Rowv = T,
trace = "none",
col = colorRampPalette(c("black", "gold"))(n = 9),
breaks = seq(0, 1, by = 0.1111111))
###################################
##### show sample composition #####
###################################
# calculate the frequency of every cluster in each sample
fm <- ddply(data_meta, .(gate.source), function(x) {
df <- data.frame(table(x$meta_results))
prop <- (df$Freq/sum(df$Freq))*100})
colnames(fm) <- c("gate.source", paste("cluster", 1:choosen_k, sep = "."))
fm
# show as bargraph plot
df_plot <- data.frame(fm)
df_plot$samples <- c("csf2 KO", "RAG", "WT")
head(df_plot)
# melt for ggplot
df_melt <- melt(df_plot, measure.vars = colnames(df_plot)[2:(choosen_k+1)])
# make stacked bargraphs
qplot(samples, data = df_melt, geom = "bar", weight = value, fill = variable)
|
context("Update array variables")
with_test_authentication({
ds <- mrdf.setup(newDataset(mrdf))
test_that("Subvariable values before trying to update", {
expect_equivalent(as.vector(ds$CA$mr_1),
as.factor(c("1.0", "0.0", "1.0", NA)))
expect_equivalent(as.vector(ds$CA$mr_2),
as.factor(c("0.0", "0.0", "1.0", NA)))
expect_equivalent(as.vector(ds$CA$mr_3),
as.factor(c("0.0", "0.0", "1.0", NA)))
})
test_that("Can update array subvariables", {
ds$CA[ds$v4 == "B"] <- "1.0"
expect_equivalent(as.vector(ds$CA$mr_1),
as.factor(c("1.0", "0.0", "1.0", NA)))
expect_equivalent(as.vector(ds$CA$mr_2),
as.factor(c("1.0", "0.0", "1.0", NA)))
expect_equivalent(as.vector(ds$CA$mr_3),
as.factor(c("1.0", "0.0", "1.0", NA)))
})
ds <- newDatasetFromFixture("apidocs")
test_that("Can update an individual subvariable conditionally", {
expect_equivalent(as.vector(ds$allpets$allpets_1, mode="id")[1:5],
c(1, 9, 1, 1, 9))
ds$allpets$allpets_1[2] <- 1
expect_equivalent(as.vector(ds$allpets$allpets_1, mode="id")[1:5],
c(1, 1, 1, 1, 9))
})
test_that("Can update where two subvariables are equal", {
expect_equal(diag(as.array(crtabs(~ allpets$allpets_1 + allpets$allpets_2,
data=ds, useNA="always"))),
c(`not selected`=1, selected=1, `not asked`=3, skipped=1))
expect_equal(as.array(crtabs(~ allpets$allpets_1,
data=ds[ds$allpets$allpets_1 == ds$allpets$allpets_2], useNA="always")),
array(c(1, 1, 3, 1),
dim=4L,
dimnames=list(
allpets_1=c("not selected", "selected", "not asked", "skipped")
)))
expect_equal(as.array(crtabs(~ allpets$allpets_3,
data=ds[ds$allpets$allpets_1 == ds$allpets$allpets_2], useNA="always")),
array(c(3, 0, 1, 2),
dim=4L,
dimnames=list(
allpets_3=c("not selected", "selected", "not asked", "skipped")
)))
expect_length(as.vector(ds$q1[ds$allpets$allpets_1 == ds$allpets$allpets_2]),
6)
ds$allpets$allpets_3[ds$allpets$allpets_1 == ds$allpets$allpets_2] <- "selected"
expect_equal(as.array(crtabs(~ allpets$allpets_3,
data=ds[ds$allpets$allpets_1 == ds$allpets$allpets_2], useNA="always")),
array(c(0, 6, 0, 0),
dim=4L,
dimnames=list(
allpets_3=c("not selected", "selected", "not asked", "skipped")
)))
})
})
| /tests/testthat/test-update-array.R | no_license | persephonet/rcrunch | R | false | false | 2,702 | r | context("Update array variables")
with_test_authentication({
ds <- mrdf.setup(newDataset(mrdf))
test_that("Subvariable values before trying to update", {
expect_equivalent(as.vector(ds$CA$mr_1),
as.factor(c("1.0", "0.0", "1.0", NA)))
expect_equivalent(as.vector(ds$CA$mr_2),
as.factor(c("0.0", "0.0", "1.0", NA)))
expect_equivalent(as.vector(ds$CA$mr_3),
as.factor(c("0.0", "0.0", "1.0", NA)))
})
test_that("Can update array subvariables", {
ds$CA[ds$v4 == "B"] <- "1.0"
expect_equivalent(as.vector(ds$CA$mr_1),
as.factor(c("1.0", "0.0", "1.0", NA)))
expect_equivalent(as.vector(ds$CA$mr_2),
as.factor(c("1.0", "0.0", "1.0", NA)))
expect_equivalent(as.vector(ds$CA$mr_3),
as.factor(c("1.0", "0.0", "1.0", NA)))
})
ds <- newDatasetFromFixture("apidocs")
test_that("Can update an individual subvariable conditionally", {
expect_equivalent(as.vector(ds$allpets$allpets_1, mode="id")[1:5],
c(1, 9, 1, 1, 9))
ds$allpets$allpets_1[2] <- 1
expect_equivalent(as.vector(ds$allpets$allpets_1, mode="id")[1:5],
c(1, 1, 1, 1, 9))
})
test_that("Can update where two subvariables are equal", {
expect_equal(diag(as.array(crtabs(~ allpets$allpets_1 + allpets$allpets_2,
data=ds, useNA="always"))),
c(`not selected`=1, selected=1, `not asked`=3, skipped=1))
expect_equal(as.array(crtabs(~ allpets$allpets_1,
data=ds[ds$allpets$allpets_1 == ds$allpets$allpets_2], useNA="always")),
array(c(1, 1, 3, 1),
dim=4L,
dimnames=list(
allpets_1=c("not selected", "selected", "not asked", "skipped")
)))
expect_equal(as.array(crtabs(~ allpets$allpets_3,
data=ds[ds$allpets$allpets_1 == ds$allpets$allpets_2], useNA="always")),
array(c(3, 0, 1, 2),
dim=4L,
dimnames=list(
allpets_3=c("not selected", "selected", "not asked", "skipped")
)))
expect_length(as.vector(ds$q1[ds$allpets$allpets_1 == ds$allpets$allpets_2]),
6)
ds$allpets$allpets_3[ds$allpets$allpets_1 == ds$allpets$allpets_2] <- "selected"
expect_equal(as.array(crtabs(~ allpets$allpets_3,
data=ds[ds$allpets$allpets_1 == ds$allpets$allpets_2], useNA="always")),
array(c(0, 6, 0, 0),
dim=4L,
dimnames=list(
allpets_3=c("not selected", "selected", "not asked", "skipped")
)))
})
})
|
# Some helper functions for the experiments contained in the paper
#' get_ci
#'
#' \code{get_ci} constructs a Wald-type confidence interval
#'
#' @param muhat the n x 1 vector of means
#' @param sigmahat the nx1 vector of variances
#' @param c the multiplier (e.g. 1.96)
#'
#' @return a nx2 matrix containing lower and upper endpoints of confidence
#' interval.
#' @export
get_ci <- function(muhat, sigmahat, c) {
lo <- muhat - c*sigmahat
hi <- muhat + c*sigmahat
return(cbind(lo, hi))
}
#' root-n-rmse
#'
#' \code{rmse} computes RMSE, see Section 4 in paper
#' @param simmat a neps x nsim matrix, where neps is the number of epsilon
#' points used in evaluation and nsim is the number of simulations.
#' @param truth a neps x 1 vector containing the true values for the curve.
#' @param n sample size.
#'
#' @return scalar equal to the \eqn{\sqrt(n) \times \text{RMSE}}.
#' @export
rmse <- function(simmat, truth, n) {
if(is.matrix(simmat)) {
out <- apply(sweep(simmat, 1, truth, "-"), 1, function(x) sqrt(mean(x^2)))
} else {
out <- sqrt(mean((simmat - truth)^2))
}
return(mean(out) * sqrt(n))
}
#' coverage
#'
#' \code{coverage} computes the frequentist coverage (in %).
#'
#' @param lo a n x nsim matrix, where n is the number of epsilon points used
#' in evaluation and nsim is the number of simulations (generally lb)
#' If it is a nsim-dim vector, the length is interpreted as the number of
#' simulations of a scalar quantity.
#' @param hi a n x nsim matrix, where n is the number of epsilon points used
#' in evaluation and nsim is the number of simulations (generally ub)
#' If it is a nsim-dim vector, the length is interpreted as the number of
#' simulations of a scalar quantity.
#' @param truth_lo a n x 1 vector containing the true values for the curve (lb)
#' If lo is a vector, truth_lo must be scalar.
#' @param truth_hi a n x 1 vector containing the true values for the curve (ub)
#' If hi is a vector, truth_hi must be scalar.
#'
#' @return scalar equal to coverage (in %)
#' @export
coverage <- function(lo, hi, truth_lo, truth_hi) {
if(is.matrix(lo)) {
lb_cont <- sweep(lo, 1, truth_lo, "<=")
ub_cont <- sweep(hi, 1, truth_hi, ">=")
coverage <- mean(apply(lb_cont * ub_cont, 2, function(x) all(x == 1)))
}
else {
coverage <- mean(lo <= truth_lo & truth_hi <= hi)
}
return(100 * coverage)
}
#' bias
#'
#' \code{bias} computes the integrated bias (in %), see Section 4 of paper for
#' definition.
#'
#' @param simmat a n x nsim matrix, where n is the number of epsilon points used
#' in evaluation and nsim is the number of simulations. If it is a nsim-dim
#' vector, the length is interpreted as the number of simulations of a scalar
#' quantity.
#' @param truth a n x 1 vector containing the true values for the curve.
#' If simmat is a vector, truth must be scalar.
#' @return scalar equal to bias (in %)
#' @export
#'
bias <- function(simmat, truth) {
if(is.matrix(simmat)) {
out <- apply(sweep(simmat, 1, truth, "-"), 1, mean)
} else {
out <- mean(simmat - truth)
}
return(100 * mean(abs(out)))
}
#' truncate_prob
#'
#' \code{truncate_prob} truncates a vector of observations in [0, 1] so that
#' each observation is in [tol, 1-tol].
#'
#' @param x a vector.
#' @param tol scalar used to make each observation in x be in [tol, 1-tol].
#'
#' @return a new vector of length(x) truncated.
#' @export
truncate_prob <- function(x, tol = 0.05) {
newx <- x
newx[newx >= 1-tol] <- 1-tol
newx[newx <= tol] <- tol
return(newx)
}
| /R/utils.R | no_license | matteobonvini/sensitivitypuc | R | false | false | 3,546 | r | # Some helper functions for the experiments contained in the paper
#' get_ci
#'
#' \code{get_ci} constructs a Wald-type confidence interval
#'
#' @param muhat the n x 1 vector of means
#' @param sigmahat the nx1 vector of variances
#' @param c the multiplier (e.g. 1.96)
#'
#' @return a nx2 matrix containing lower and upper endpoints of confidence
#' interval.
#' @export
get_ci <- function(muhat, sigmahat, c) {
lo <- muhat - c*sigmahat
hi <- muhat + c*sigmahat
return(cbind(lo, hi))
}
#' root-n-rmse
#'
#' \code{rmse} computes RMSE, see Section 4 in paper
#' @param simmat a neps x nsim matrix, where neps is the number of epsilon
#' points used in evaluation and nsim is the number of simulations.
#' @param truth a neps x 1 vector containing the true values for the curve.
#' @param n sample size.
#'
#' @return scalar equal to the \eqn{\sqrt(n) \times \text{RMSE}}.
#' @export
rmse <- function(simmat, truth, n) {
if(is.matrix(simmat)) {
out <- apply(sweep(simmat, 1, truth, "-"), 1, function(x) sqrt(mean(x^2)))
} else {
out <- sqrt(mean((simmat - truth)^2))
}
return(mean(out) * sqrt(n))
}
#' coverage
#'
#' \code{coverage} computes the frequentist coverage (in %).
#'
#' @param lo a n x nsim matrix, where n is the number of epsilon points used
#' in evaluation and nsim is the number of simulations (generally lb)
#' If it is a nsim-dim vector, the length is interpreted as the number of
#' simulations of a scalar quantity.
#' @param hi a n x nsim matrix, where n is the number of epsilon points used
#' in evaluation and nsim is the number of simulations (generally ub)
#' If it is a nsim-dim vector, the length is interpreted as the number of
#' simulations of a scalar quantity.
#' @param truth_lo a n x 1 vector containing the true values for the curve (lb)
#' If lo is a vector, truth_lo must be scalar.
#' @param truth_hi a n x 1 vector containing the true values for the curve (ub)
#' If hi is a vector, truth_hi must be scalar.
#'
#' @return scalar equal to coverage (in %)
#' @export
coverage <- function(lo, hi, truth_lo, truth_hi) {
if(is.matrix(lo)) {
lb_cont <- sweep(lo, 1, truth_lo, "<=")
ub_cont <- sweep(hi, 1, truth_hi, ">=")
coverage <- mean(apply(lb_cont * ub_cont, 2, function(x) all(x == 1)))
}
else {
coverage <- mean(lo <= truth_lo & truth_hi <= hi)
}
return(100 * coverage)
}
#' bias
#'
#' \code{bias} computes the integrated bias (in %), see Section 4 of paper for
#' definition.
#'
#' @param simmat a n x nsim matrix, where n is the number of epsilon points used
#' in evaluation and nsim is the number of simulations. If it is a nsim-dim
#' vector, the length is interpreted as the number of simulations of a scalar
#' quantity.
#' @param truth a n x 1 vector containing the true values for the curve.
#' If simmat is a vector, truth must be scalar.
#' @return scalar equal to bias (in %)
#' @export
#'
bias <- function(simmat, truth) {
if(is.matrix(simmat)) {
out <- apply(sweep(simmat, 1, truth, "-"), 1, mean)
} else {
out <- mean(simmat - truth)
}
return(100 * mean(abs(out)))
}
#' truncate_prob
#'
#' \code{truncate_prob} truncates a vector of observations in [0, 1] so that
#' each observation is in [tol, 1-tol].
#'
#' @param x a vector.
#' @param tol scalar used to make each observation in x be in [tol, 1-tol].
#'
#' @return a new vector of length(x) truncated.
#' @export
truncate_prob <- function(x, tol = 0.05) {
newx <- x
newx[newx >= 1-tol] <- 1-tol
newx[newx <= tol] <- tol
return(newx)
}
|
# Box office Star Wars: In Millions (!)
# Construct the matrix
box_office_all <- c(461, 314.4,290.5, 247.9,309.3,165.8)
movie_names <- c("A New Hope","The Empire Strikes Back","Return of the Jedi")
col_titles <- c("US","non-US")
star_wars_matrix <- matrix(box_office_all, nrow=3, byrow=TRUE, dimnames=list(movie_names,col_titles))
# The worldwide box office figures
worldwide_vector <- rowSums(star_wars_matrix)
# Bind the new variable worldwide_vector as a column to star_wars_matrix
all_wars_matrix <- cbind(star_wars_matrix, worldwide_vector)
| /introduction-to-r/chapter-3-matrices/Adding a column for the Worldwide box office.R | no_license | oliverwreath/R_Practise | R | false | false | 549 | r | # Box office Star Wars: In Millions (!)
# Construct the matrix
box_office_all <- c(461, 314.4,290.5, 247.9,309.3,165.8)
movie_names <- c("A New Hope","The Empire Strikes Back","Return of the Jedi")
col_titles <- c("US","non-US")
star_wars_matrix <- matrix(box_office_all, nrow=3, byrow=TRUE, dimnames=list(movie_names,col_titles))
# The worldwide box office figures
worldwide_vector <- rowSums(star_wars_matrix)
# Bind the new variable worldwide_vector as a column to star_wars_matrix
all_wars_matrix <- cbind(star_wars_matrix, worldwide_vector)
|
library(testthat)
library(coral.growth)
test_check("coral.growth")
| /tests/testthat.R | permissive | EcoNum/coral.growth | R | false | false | 68 | r | library(testthat)
library(coral.growth)
test_check("coral.growth")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/athena_operations.R
\name{athena_get_work_group}
\alias{athena_get_work_group}
\title{Returns information about the workgroup with the specified name}
\usage{
athena_get_work_group(WorkGroup)
}
\arguments{
\item{WorkGroup}{[required] The name of the workgroup.}
}
\description{
Returns information about the workgroup with the specified name.
See \url{https://www.paws-r-sdk.com/docs/athena_get_work_group/} for full documentation.
}
\keyword{internal}
| /cran/paws.analytics/man/athena_get_work_group.Rd | permissive | paws-r/paws | R | false | true | 532 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/athena_operations.R
\name{athena_get_work_group}
\alias{athena_get_work_group}
\title{Returns information about the workgroup with the specified name}
\usage{
athena_get_work_group(WorkGroup)
}
\arguments{
\item{WorkGroup}{[required] The name of the workgroup.}
}
\description{
Returns information about the workgroup with the specified name.
See \url{https://www.paws-r-sdk.com/docs/athena_get_work_group/} for full documentation.
}
\keyword{internal}
|
#' Utility function to map table with their columns/rows in a bigger table (Map Over Multiple Inputs Simultaneously)
#'
#' @param v import Actigraphy Raw data
#' @return The lengths and values of runs of equal values in a vector.
#' @export
make_df_index <- function(v){
table_rle <- vec_rle(v) #use vec_rle function
divide_points <- c(0,cumsum(names(table_rle))) #Returns a vector whose elements are the cumulative sums of the elements of the argument.
table_index <- map2((divide_points + 1)[1:length(divide_points)-1],
divide_points[2:length(divide_points)],
~.x:.y) #list every value between x and y in each column
return(table_index[table_rle])
}
| /R/make_df_index.R | no_license | jiqiaingwu/ActigraphyUtah | R | false | false | 706 | r | #' Utility function to map table with their columns/rows in a bigger table (Map Over Multiple Inputs Simultaneously)
#'
#' @param v import Actigraphy Raw data
#' @return The lengths and values of runs of equal values in a vector.
#' @export
make_df_index <- function(v){
table_rle <- vec_rle(v) #use vec_rle function
divide_points <- c(0,cumsum(names(table_rle))) #Returns a vector whose elements are the cumulative sums of the elements of the argument.
table_index <- map2((divide_points + 1)[1:length(divide_points)-1],
divide_points[2:length(divide_points)],
~.x:.y) #list every value between x and y in each column
return(table_index[table_rle])
}
|
## Author: Andres Camilo Mendez Alzate
##### script to extract data from a shapefile
pacman::p_load(raster,sp, exactextractr, sf, tidyverse,lattice, rasterVis,
maptools, latticeExtra, sp, RColorBrewer, future, furrr, data.tree,
collapsibleTree, stringi, writexl, biscale, cowplot, stars)
### ftp://ftp.ciat.cgiar.org/DAPA/Projects/shared_results/
### function para calcular metricas por cada cultivo
gap_crop_coverage <- function(i, file_path = NULL, save.raster = FALSE ){
mt <- matrix(c(0, 2, 0, 2, 3, 1), 2,3, byrow = T)
status <- tryCatch(expr = {
#cargar los rasters
rst <- i %>%
lapply(., raster)
#calcular coverages
upper <- lapply(rst, function(k){
rst <- k
rst[rst[] != 3] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
upper[upper[] == 0] <- NA
lower <- lapply(rst, function(k){
rst <- k
rst[rst[] < 2] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
lower[lower[] == 0] <- NA
area_tot <- rst %>%
raster::stack(.) +1
area_tot <- sum(area_tot, na.rm = T)
area_tot[area_tot == 0] <- NA
covg_upp <- 100 - length(which(!is.na(upper[])))/length(which(!is.na(area_tot[])))*100
covg_low <- 100 - length(which(!is.na(lower[])))/length(which(!is.na(area_tot[])))*100
rich <- rst %>%
raster::stack(.) %>%
reclassify(., mt) %>%
sum(., na.rm = T)
rich[rich[] == 0] <- NA
if(save.raster){
if(is.null(file_path)){
stop("If save raster == TRUE, file path must be provided.")
}else{
writeRaster(rich, file_path, overwrite = T, NAflag = 0)
}
}
status <- data.frame(lower = covg_low, upper = covg_upp, status = "Done" , stringsAsFactors = F)
}, error = function(e){
status <- data.frame(lower= NA, upper = NA, status = "ERROR", stringsAsFactors = F)
})
return(status)
}
########## cargar shapefile del mundo con zonas en disputa para realizar el cargue de información
shp <-shapefile("Z:/gap_analysis_landraces/runs/input_data/shapefiles/new_world_shapefile/ne_50m_admin_0_countries.shp")
shp@data <- shp@data %>%
dplyr::select(LEVEL, ADMIN, ADM0_A3, NAME, NAME_LONG, FORMAL_EN, POP_EST,POP_RANK, GDP_MD_EST, FIPS_10_, ISO_A2, ISO_A3, ADM0_A3_IS,
CONTINENT, SUBREGION, REGION_WB)
##############################################################
######## 1. calcular gap metrics para todos los cultivos #######
##############################################################
root_dir <- "Z:/gap_analysis_landraces/runs/results/"
#get crop folder paths
crops_paths <- data.frame(pth = paste0(list.dirs(path = root_dir, full.names = T, recursive = F), "/lvl_1") ,
name = list.dirs(path = root_dir, full.names = F, recursive = F),
stringsAsFactors = F)
# get race folder paths
baseDir <- "Z:/gap_analysis_landraces/runs"
crops <- list.dirs(paste0(baseDir, "/results"), full.names = F, recursive = F)
gap_maps_pths <- lapply(crops, function(i){
pths <- list.files(paste0(baseDir, "/results/", i, "/lvl_1"),
pattern = "gap_class_final_new.tif$",
recursive = T,
full.names = T)
if(length(pths) == 0){
pths <- "Not_found"
race_name <- NA_character_
}else{
race_name <- stringr::str_extract(pths , pattern = "(lvl_1)(/)*[a-zA-Z0-9_-]+") %>%
gsub(pattern = "(lvl_1)(/)*|(/)", replacement = "", .)
}
pths <- as.character(pths)
return(data.frame(paths = pths, crop_name = i, race_name = race_name))
}) %>%
dplyr::bind_rows() %>%
tibble::as_tibble() %>%
dplyr::mutate_all(., as.character())
# gap_maps_pths %>%
# do.call(rbind, .) %>%
# write.csv(., "D:/LGA_dashboard/www/crop_races_paths.csv", row.names = F)
#gap_maps_pths <- read.csv("Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/crop_races_paths.csv", header = T, stringsAsFactors = F)
#metrics <- read.csv("Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/coverage_metrics.csv", header = T, stringsAsFactors = F)
shp <-shapefile(paste0(baseDir, "/input_data/shapefiles/new_world_shapefile/ne_50m_admin_0_countries.shp"))
shp@data$ISO_A2[shp@data$ISO_A2 == "-99"] <- c("SX", "NO", "FR", "NC", "-99", "-99", "-99")
shp@data <- shp@data %>%
dplyr::select(LEVEL, ADMIN, ADM0_A3, NAME, NAME_LONG, FORMAL_EN, POP_EST,POP_RANK, GDP_MD_EST, FIPS_10_, ISO_A2, ISO_A3, ADM0_A3_IS,
CONTINENT, SUBREGION, REGION_WB)
shp_sf <- sf::st_as_sf(shp)
### creación de objeto con clase S4 para guardar resultados
setClass("results", slots = list(race = "list",
crop = "list",
combined_results = "data.frame",
diversity_tree = "data.frame",
country_area = "data.frame",
coverage_totals = "data.frame"))
consolidado <- new("results", race = list(), crop = list(), combined_results = data.frame(), diversity_tree = data.frame(), country_area = data.frame(), coverage_totals = data.frame())
future::plan(future::multisession, workers = 10)
consolidado@race <- gap_maps_pths %>%
filter(!grepl("old", crop_name) & !grepl("old", race_name) & !is.na(race_name)) %>%
dplyr::mutate(summary_metrics_race = furrr::future_map(.x = paths, .f = function(.x){
cat("Processing: ", .x, "\n")
if(.x != "Not_found"){
covg <- tryCatch(expr = {
rs_gaps <- raster::raster(.x)
covg_ul <- 100 - length(which(rs_gaps[]==3))/length(which(!is.na(rs_gaps[])))*100
covg_ll <- 100 - length(which(rs_gaps[]>=2))/length(which(!is.na(rs_gaps[])))*100
data.frame(covg_ul, covg_ll)
}, error = function(e){
covg_ul <- NA
covg_ll <- NA
data.frame(covg_ul, covg_ll)
})
auc_avg <- list.files(paste0(.x, "/../../gap_validation/buffer_100km"), full.names = TRUE, pattern = ".csv$") %>%
purrr::map(.x = ., function(.x){
val <- tryCatch(expr = {
val <- read.csv(.x, header = T, stringsAsFactors = F) %>%
filter(pnt == "Mean") %>%
pull(auc.mean)
}, error = function(e){
val <- NA
})
return(val*100)
}) %>%
unlist() %>%
mean(., na.rm = T)
res <- data.frame(lower = covg$covg_ll, upper = covg$covg_ul, auc_avg = auc_avg)
}else{ res <- data.frame(lower = NA, upper = NA, auc_avg = NA)}
return(res)
}),
gap_area_country_race = furrr::future_pmap(.l = list(x = paths, y = crop_name, z = race_name), .f = function(x, y, z){
cat("Calculating area for: ", y, "-", z, "\n" )
rst <- raster::raster(x)
gp_area_min <- rst
gp_area_max <- rst
total_area <- rst
gp_area_max[gp_area_max[] != 3] <- NA
gp_area_min[ !(gp_area_min[] %in% c(2,3))] <- NA
total_area[!is.na(total_area)] <- 1
gap_area_min <- raster::area(gp_area_min) %>%
raster::mask(., mask = gp_area_min) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE)
gap_area_max <- raster::area(gp_area_max) %>%
raster::mask(., mask = gp_area_max) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE)
total_area <- raster::area(total_area) %>%
raster::mask(., mask = total_area) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE)
name <- paste0(y, ".", z)
res <- tibble(country= shp@data$ADMIN,
ISO2 = shp@data$ISO_A2,
gap_area_min = unlist(gap_area_min),
gap_area_maxn = unlist(gap_area_max),
sdm_area = unlist(total_area)) %>%
dplyr::mutate(coverage_lower = round(1-gap_area_min/sdm_area, 3),
coverage_upper = round(1-gap_area_max/sdm_area, 3),
coverage = round((coverage_lower+coverage_upper)/2, 3) )
suppressWarnings( names(res)[3:ncol(res)] <- paste0(names(res[3:ncol(res)]), "_", y, "_", z ) )
return(res)
})
)# end consolidado race
## Consolidado por cultivo
consolidado@crop <- crops_paths %>%
as_tibble %>%
dplyr::mutate(
final_map_paths = furrr::future_map(.x = pth, .f = function(.x){
pths <- list.files(path = as.character(.x), recursive = T, full.names = T, pattern = "gap_class_final_new.tif$")
pths <- pths[!grepl(pattern = "old", pths)]
if(length(pths) == 0){
pths <- "Not_found"
}
return(pths)
}),
crops_coverage = furrr::future_pmap(.l = list(.x = final_map_paths, .y = name, .z = pth), .f = function(.x, .y, .z){
cat("Caculating coverage for: ", .x, " \n")
pths <- unlist(.x)
if(("Not_found" %in% pths)){x <- NA}else{
file_path <- paste0(as.character(.z),"/gap_richness_", .y, ".tif")
x <- gap_crop_coverage(i = pths, file_path = file_path, save.raster = T)}
return(x)
}),
country_coverage = furrr::future_map(.x = final_map_paths, .f = function(.x){
pths <- unlist(.x)
if(("Not_found" %in% pths)){
x <- tibble(country = NA_character_, ISO2 = NA_character_, gap_area = NA, sdm_area = NA, coverage = NA)
}else{
rst <- pths %>%
lapply(., raster)
gap_max <- lapply(rst, function(k){
rst <- k
rst[rst[] != 3] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
gap_min <- lapply(rst, function(k){
rst <- k
rst[!(rst[] %in% c(2,3))] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
gap_max[gap_max == 0] <- NA
gap_max[!is.na(gap_max)] <- 1
gap_min[gap_min == 0] <- NA
gap_min[!is.na(gap_min)] <- 1
area_tot <- rst %>%
raster::stack(.) +1
area_tot <- sum(area_tot, na.rm = T)
area_tot[area_tot == 0] <- NA
area_tot[!is.na(area_tot)] <- 1
gp_area_min <- exactextractr::exact_extract(gap_min, shp_sf, fun = 'sum', progress = FALSE)
gp_area_max <- exactextractr::exact_extract(gap_max, shp_sf, fun = 'sum', progress = FALSE)
sdm_tot <- exactextractr::exact_extract(area_tot, shp_sf, fun = 'sum', progress = FALSE)
x <- tibble(country = shp@data$NAME,
ISO2 = shp@data$ISO_A2,
gap_area_min = gp_area_min,
gap_area_max = gp_area_max,
sdm_area = sdm_tot) %>%
dplyr::mutate_if(is.numeric, round) %>%
dplyr::mutate(coverage_min = 1 - (gap_area_min/sdm_area),
coverage_max = 1 - (gap_area_max/sdm_area),
coverage = round((coverage_min+coverage_max)/2, 3) ,
coverage = ifelse(is.nan(coverage), 0, coverage))
}
return(x)
}),
accessions_count = furrr::future_map(.x = name, .f = function(.x) {
cat("Calculating metrics for: ", .x, "\n")
crop_db_path <- paste0(baseDir, "/input_data/by_crop/", .x, "/lvl_1/classification/", .x, "_bd_identifiers.csv")
if(file.exists(crop_db_path)){
identifers <- suppressMessages( readr::read_csv(crop_db_path))
used_freq <- identifers %>%
group_by(ensemble, used) %>%
tally() %>%
dplyr::mutate(freq_in_gr = n/sum(n)) %>%
ungroup %>%
dplyr::mutate(total = sum(n),
total_used = identifers %>% filter(used) %>% nrow(),
freq_overall = n/sum(n)
)
source_db_summ <- identifers %>%
dplyr::filter(used) %>%
group_by(ensemble, source_db) %>%
tally() %>%
dplyr::mutate(freq_in_gr = n/sum(n) ) %>%
ungroup() %>%
dplyr::mutate(source_db = toupper(source_db), freq_overall = n/sum(n))
source_db_freq <- identifers %>%
dplyr::filter(used) %>%
group_by( source_db) %>%
tally() %>%
dplyr::mutate(source_db = toupper(source_db),freq_in_gr = n/sum(n) )
status_db_freq <- identifers %>%
dplyr::filter(used) %>%
group_by( status) %>%
tally() %>%
dplyr::mutate(status = toupper(status),freq_in_gr = n/sum(n) )
res <- list("used_freq" = used_freq, "source_db_summ" = source_db_summ, "source_db_freq" = source_db_freq, "status_db_freq" = status_db_freq)
}else{
res <- list(used_freq = NA, source_db_summ = NA, source_db_freq = NA, status_db_freq = NA)
}
return(res)
}),
accessions_count_per_country = furrr::future_map(.x = name, .f = function(.x) {
crop_db_path <- paste0(baseDir, "/input_data/by_crop/", .x, "/lvl_1/classification/", .x, "_bd_identifiers.csv")
if(file.exists(crop_db_path)){
identifers <- suppressMessages( readr::read_csv(crop_db_path))
used_occ <- identifers %>%
dplyr::filter(used) %>%
dplyr::select(matches("latit"), matches("long"))
coordinates(used_occ) <- ~Longitude+Latitude
crs(used_occ) <- crs(shp)
ext <- suppressWarnings(raster::extract(shp, used_occ)) %>%
dplyr::select(NAME, ISO_A2) %>%
dplyr:::group_by(NAME, ISO_A2) %>%
dplyr::tally() %>%
dplyr::ungroup() %>%
dplyr::select("country" = NAME, "ISO2" = ISO_A2, occ_count = n)
res <- tibble(country = shp@data$NAME, ISO2 = shp@data$ISO_A2 ) %>%
left_join(., ext, by = c("country", "ISO2"))
}else{
res <- tibble(country = NA, ISO2 = NA, occ_count = NA)
}
return(res)
}),
gap_area_country_crop = furrr::future_map2(.x = pth, .y = name, .f = function(.x, .y){
pths <- list.files(path = as.character(.x), recursive = F, full.names = T, pattern = "^gap_richness_[a-zA-z0-9]*.tif$")
pths <- pths[!grepl(pattern = "old", pths)]
if(length(pths) == 0){
pths <- "Not_found"
res <- tibble(country= NA, ISO2 =NA, area = NA )
}else if(length(pths) >= 2){
warning("More than one gap richness map found, using the first one \n")
pths <- pths[1]
}
if(pths != "Not_found"){
cat("Calculating area for: ", .y, "\n" )
rst <- raster::raster(pths)
gap_area_rst <- raster::area(rst) %>%
raster::mask(., mask = rst) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE)
name <- paste0(.y)
ar <- unlist(gap_area_rst)
qq <- quantile(ar[ar != 0], probs = 0.75)
res <- tibble(country= shp@data$ADMIN,
ISO2 = shp@data$ISO_A2,
area = ar) %>%
dplyr::mutate(quantile = ifelse(ar >= qq, 1, 0)) #Julian's method for prioritizing
suppressWarnings( names(res)[3:4] <- c(name , paste0(name, ".quantile")) )
}
return(res)
})
) #end mutate
future::plan(future::sequential)
#calculating quantiles for gap areas and combine with diversity tree
country_gp <- consolidado@crop$gap_area_country_crop
country_gp <- country_gp[!sapply(country_gp, function(x) any(is.na(x)))]
country_gp<- base::Reduce(dplyr::full_join, country_gp)
sgp_priorities <- country_gp %>%
dplyr::select(country, ISO2, matches("quantile"))
sgp_priorities$suma_spatial <- sgp_priorities[, 3:ncol(sgp_priorities)] %>% rowSums()
sgp_priorities <- sgp_priorities %>%
dplyr::select(country, ISO2, suma_spatial)
dv_tree_gp <- read_csv(paste0(baseDir, "/LGA_documents/diversity_tree_results/diversity_tree_priorities.csv"))
consolidado@diversity_tree <- dv_tree_gp
dv_tree_gp$suma_dvTree <- dv_tree_gp[, 4:15] %>% rowSums()
dv_tree_priorities <- dv_tree_gp %>%
dplyr::select(Country, suma_dvTree) %>%
dplyr::mutate(Country_id = stringi::stri_trans_general(str = Country, id = "Latin-ASCII"),
Country_id = tolower(Country_id))
# combining spatial results with diversity tree priorities
consolidado@combined_results <- sgp_priorities %>%
dplyr::mutate(country_id = stringi::stri_trans_general(str = country, id = "Latin-ASCII"),
country_id = tolower(country_id)) %>%
dplyr::left_join(., dv_tree_priorities, by = c("country_id" = "Country_id")) %>%
dplyr::select(country, ISO2, suma_spatial, suma_dvTree) %>%
dplyr::mutate(Total = rowSums(.[,3:4], na.rm = T))
#save countries area
consolidado@country_area <- shp@data %>%
dplyr::select("country" = NAME, "ISO2" = ISO_A2) %>%
dplyr::mutate(area_tot = round(raster::area(shp)/1000000)) %>%
as_tibble()
#Calculate country gap area for gap richnnes world map
tmp_maps <- consolidado@crop %>%
dplyr::mutate(temp_maps = purrr::map2(.x = final_map_paths, .y = name, .f = function(.x, .y){
pths <- unlist(.x)
if(("Not_found" %in% pths)){
ret <- list(gap = NA, sdm_area = NA)
}else{
rst <- pths %>%
lapply(., raster)
gap_max <- lapply(rst, function(k){
rst <- k
rst[rst[] != 3] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
gap_min <- lapply(rst, function(k){
rst <- k
rst[!(rst[] %in% c(2, 3))] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
gap_max[gap_max == 0] <- NA
gap_max[!is.na(gap_max)] <- 1
gap_min[gap_min == 0] <- NA
gap_min[!is.na(gap_min)] <- 1
area_tot <- rst %>%
raster::stack(.) +1
area_tot <- sum(area_tot, na.rm = T)
area_tot[area_tot == 0] <- NA
area_tot[!is.na(area_tot)] <- 1
ret <- list(gap_min = gap_min, gap_max = gap_max, sdm_area = area_tot)
removeTmpFiles(h=0)
}#end else
return(ret)
})) %>%
dplyr::pull(temp_maps)
sum_sdm <- lapply(tmp_maps, function(k){
if("Raster" %in% is(k$sdm_area)){
ret <- k$sdm_area %>%
raster::crop(x = ., y = extent(c(-180, 180, -59.5, 83.625)) )
}else{ ret <- k$sdm_area}
return(ret)
})
sum_gap_min <- lapply(tmp_maps, function(k){
if("Raster" %in% is(k$gap)){
ret <- k$gap_min %>%
raster::crop(x = ., y = extent(c(-180, 180, -59.5, 83.625)) )
}else{ ret <- k$gap_min}
return(ret)
})
sum_gap_max <- lapply(tmp_maps, function(k){
if("Raster" %in% is(k$gap)){
ret <- k$gap_max %>%
raster::crop(x = ., y = extent(c(-180, 180, -59.5, 83.625)) )
}else{ ret <- k$gap_max}
return(ret)
})
#rm(tmp_maps)
sum_sdm <- sum_sdm[!is.na(sum_sdm)]
sum_sdm$fun <- sum
sum_sdm$na.rm <- TRUE
sdm_total <- do.call(raster::mosaic, sum_sdm)
sdm_total[sdm_total[] == 0] <- NA
writeRaster(sdm_total, paste0(baseDir, "/results/sdm_all_crops_sum.tif"), overwrite = T)
#rm(sum_sdm)
sum_gap_min <- sum_gap_min[!sapply(sum_gap_min, is.null, simplify = TRUE)]
sum_gap_min$fun <- sum
sum_gap_min$na.rm <- TRUE
gap_total_min <- do.call(raster::mosaic, sum_gap_min)
gap_total_min[gap_total_min[] == 0] <- NA
#rm(sum_gap_min)
sum_gap_max <- sum_gap_max[!sapply(sum_gap_max, is.null, simplify = TRUE)]
sum_gap_max$fun <- sum
sum_gap_max$na.rm <- TRUE
gap_total_max <- do.call(raster::mosaic, sum_gap_max)
gap_total_max[gap_total_max[] == 0] <- NA
#rm(sum_gap_max)
consolidado@coverage_totals <- tibble( country = shp@data$NAME,
ISO2 = shp@data$ISO_A2,
gap_total_min = raster::area(gap_total_min) %>%
raster::mask(., gap_total_min) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE) %>%
round() ,
gap_total_max = raster::area(gap_total_max) %>%
raster::mask(., gap_total_max) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE) %>%
round() ,
sdm_total = raster::area(sdm_total) %>%
raster::mask(., sdm_total) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE) %>%
round())
#########################################
### CALCULAR GAP RICHNESS MAP ###########
########################################
if(TRUE){
maps_pths <- consolidado@crop$pth %>%
list.files(., pattern = "^gap_richness_[a-zA-z0-9]*.tif$", full.names = T)
#list.files("Z:/gap_analysis_landraces/runs/results", pattern = "^gap_richness_[a-zA-z0-9]*.tif$", recursive = T)
#list.files("Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/scripts", pattern = ".tif$", full.names = T)
msk <- raster("Z:/gap_analysis_landraces/runs/input_data/mask/mask_world.tif")
rst <- lapply(maps_pths, function(i){
cat("Processing:", i, "\n")
r <- raster(i)
r[r[] > 0] <- 1
r[r[] <= 0] <- NA
r <- raster::crop(r, extent(msk))
return(r)
})
rst$fun <- sum
rst$na.rm <- TRUE
gp_rich <- do.call(mosaic, rst)
gp_rich[gp_rich[] == 0] <- NA
writeRaster(gp_rich, "Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.tif", overwrite =T)
}
save(consolidado, file = paste0(baseDir, "/results/results_all_crops.RData"))
#####################################################
############## update table metrics in FTP server
####################################################
root_ftp <- "Z:/gap_analysis_landraces/shared_results"
## Race coverage metrics
for(i in 1:nrow(consolidado@race)){
var <- consolidado@race %>%
slice(i) %>%
dplyr::select(crop_name ,race_name, summary_metrics_race, gap_area_country_race)
cat("Extracting metrics for", as.character(var$crop_name), "... in group:",as.character(var$race_name), "\n")
ftp_pth <- paste0(root_ftp, "/spatial_lga/", var$crop_name,"/results/", var$race_name)
mtrs <- tibble(coverage_lower = unlist(var$summary_metrics_race)[1],
coverage_upper = unlist(var$summary_metrics_race)[2]) %>%
dplyr::mutate(coverage = round((coverage_lower+coverage_upper)/2, 3))
covg_mtrs <- var$gap_area_country_race %>%
purrr::pluck(., 1) %>%
dplyr::mutate_if(is.numeric,
function(i){
x <- ifelse(is.nan(i), 0, i)
return(x)
})
lst <- list(coverage_metrics = mtrs , country_coverage = covg_mtrs)
writexl::write_xlsx(lst, path = paste0( ftp_pth,"/",as.character(var$crop_name), "_" ,as.character(var$race_name), "_coverage", ".xlsx"))
}#end for
## Race per Crop
for(i in 1:nrow(consolidado@crop)){
var <- consolidado@crop %>%
dplyr::slice(i) %>%
dplyr::select(name, crops_coverage, country_coverage)
cat("Extracting metrics for", as.character(var$name), "... ", "\n")
ftp_pth <- paste0(root_ftp, "/spatial_lga/", var$name,"/results/")
if(dir.exists(ftp_pth)){
mtrs <- tibble(coverage_lower = as.numeric(purrr::pluck(var$crops_coverage, 1)[1]),
coverage_upper = as.numeric(purrr::pluck(var$crops_coverage, 1)[2])) %>%
dplyr::mutate(coverage = round((coverage_lower+coverage_upper)/2, 3))
covg_mtrs <- var$country_coverage %>%
purrr::pluck(., 1) %>%
dplyr::mutate_if(is.numeric,
function(i){
x <- ifelse(is.nan(i), 0, i)
return(x)
})
lst <- list(coverage_metrics = mtrs , country_coverage = covg_mtrs)
writexl::write_xlsx(lst, path = paste0( ftp_pth,as.character(var$name), "_coverage", ".xlsx"))
}
}#end for
######### Pruebas de con mapas
msk <- raster("C:/Users/acmendez/Google Drive/CIAT/hidden_project/Gap_analysis_UI_original/www/masks/mask_world.tif")
gap_rich <- raster("D:/OneDrive - CGIAR/Attachments/Desktop/gap_richness_map.tif")
shp <- raster::shapefile("C:/Users/acmendez/Google Drive/CIAT/hidden_project/Gap_analysis_UI_original/www/world_shape_simplified/all_countries_simplified.shp")
total_area <- raster::area(msk) %>%
raster::mask(., mask = msk) %>%
exactextractr::exact_extract(., st_as_sf(shp), fun = 'sum')
gap_area_rst <- raster::area(gap_rich) %>%
raster::mask(., mask = gap_rich) %>%
exactextractr::exact_extract(., st_as_sf(shp), fun = 'sum')
gap_count <- exactextractr::exact_extract(gap_rich, st_as_sf(shp), fun = "max")
vth <- download_map_data("https://code.highcharts.com/mapdata/custom/world-palestine-highres.js" )
vth <- get_data_from_map(vth)
hcmp_data<- vth %>%
dplyr::select(`hc-a2`, name, subregion, `iso-a3` , `iso-a2`)
gap_data <- shp@data %>%
dplyr::select(ISO2, ISO3, NAME) %>%
dplyr::mutate(area_total = round(total_area, 0), gap_area = round(gap_area_rst, 0),
gap_count = round(gap_count,0)) %>%
as_tibble
hcmp_data %>%
dplyr::left_join(., gap_data, by = c("iso-a2" = "ISO2")) %>%
dplyr::mutate(area_total = ifelse(is.na(area_total ), 0, area_total ),
gap_area = ifelse(is.na(gap_area),0, gap_area),
gap_count = ifelse(is.na(gap_count),0, gap_count)) %>%
dplyr::mutate(area_freq = round(gap_area/area_total, 1),
area_freq = ifelse(is.nan(area_freq), 0, area_freq)) %>%
write.csv(., "D:/OneDrive - CGIAR/Attachments/Desktop/gaps_summary_template.csv", row.names = F)
nrow(gap_data)
nrow(hcmp_data)
#### dviersity tree Template
x <- readLines("C:/Users/acmendez/Downloads/Cowpea genepool.tree.json") %>% fromJSON(txt =., simplifyDataFrame = F)
repos <- as.Node(x)
print(repos, "id", "login")/
plot(repos)
tree_df <- ToDataFrameTypeCol(repos)
tree_df <- tree_df[, !grepl("children", tree_df)]
names(tree_df) <- paste0("level_", 1:ncol(tree_df))
tree_df <- as_tibble(tree_df)
edgelist <- list()
i <- 1
while(i <= ncol(tree_df)){
cat(i, "\n")
if(i+1 <= ncol(tree_df)){
lvl1 <- paste0("level_", i)
lvl2 <- paste0("level_", i+1)
edgelist[[i]]<- tree_df %>%
dplyr::group_by(eval(parse(text= lvl1)), eval(parse(text= lvl2))) %>%
tally %>%
ungroup %>%
dplyr::select(start = `eval(parse(text = lvl1))`, end = `eval(parse(text = lvl2))`)
}
i <- i + 1
}
edgelist <- edgelist %>%
bind_rows()
edgelist %>%
dplyr::filter(!is.na(end)) %>%
dplyr::add_row(start = NA, end = unlist(edgelist[1,1]), .before = 1 ) %>%
dplyr::mutate(collected = "") %>%
collapsibleTree::collapsibleTreeNetwork(., attribute = "collected", fontSize = 7)
plot(as.igraph(repos, directed = TRUE, direction = "climb"))
##### calculate gap richnes for each crop
root_dir <- "Z:/gap_analysis_landraces/runs/results/"
crops_paths <- data.frame(pth = paste0(list.dirs(path = root_dir, full.names = T, recursive = F), "/lvl_1") ,
name = list.dirs(path = root_dir, full.names = F, recursive = F),
stringsAsFactors = F)
cov_mtrs <- apply(crops_paths,1, function(i){
cat("Calculating Gap richness for crop: ", i[2], "\n")
mt <- matrix(c(0, 2, 0, 2, 3, 1), 2,3, byrow = T)
x<-tryCatch({
rst <- list.files(path = as.character(i[1]), recursive = T, full.names = T, pattern = "gap_class_final_new.tif$")
rst <- rst[!grepl(pattern = "old", rst)]
if(length(rst) == 0){
stop("No file found")
}
#cargar los rasters
rst <- rst %>%
lapply(., raster)
#calcular coverages
upper <- lapply(rst, function(k){
rst <- k
rst[rst[] != 3] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
upper[upper[] == 0] <- NA
lower <- lapply(rst, function(k){
rst <- k
rst[rst[] < 2] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
lower[lower[] == 0] <- NA
area_tot <- rst %>%
raster::stack(.) +1
area_tot <- sum(area_tot, na.rm = T)
area_tot[area_tot == 0] <- NA
covg_upp <- 100 - length(which(!is.na(upper[])))/length(which(!is.na(area_tot[])))*100
covg_low <- 100 - length(which(!is.na(lower[])))/length(which(!is.na(area_tot[])))*100
rich <- rst %>%
raster::stack(.) %>%
reclassify(., mt) %>%
sum(., na.rm = T)
rich[rich[] == 0] <- NA
# writeRaster(rich, paste0("Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/", i[2], ".tif"), overwrite = T, NAflag = 0)
status <- data.frame(lower = covg_low, upper = covg_upp, status = "Done", stringsAsFactors = F)
return(status)
}, error = function(e){
cat("An error ocurred :( \n")
status <- data.frame(lower= NA, upper = NA, status = "ERROR", stringsAsFactors = F)
return(status)
})
return(cbind( name = i[2], x))
})
bind_rows(cov_mtrs) %>%
dplyr::mutate(name = crops_paths$name) %>%
write.csv(., "Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/coverage_metrics.csv", row.names = F)
# MAPAS EN FORMATO PNG
options(warn = -1, scipen = 999)
suppressMessages(library(pacman))
suppressMessages(pacman::p_load(raster, lattice, rasterVis, maptools, tidyverse, latticeExtra, sp, RColorBrewer))
rsin <- raster("Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.tif")
wrld <- raster::shapefile('Z:/gap_analysis_landraces/runs/input_data/shapefiles/shp/all_countries.shp')
ssdn <- raster::shapefile('Z:/gap_analysis_landraces/runs/input_data/shapefiles/Sudan_admin_WGS84/Sudan_admin_0.shp')
msk <- raster("Z:/gap_analysis_landraces/runs/input_data/mask/mask_world.tif")
grat <- sp::gridlines(wrld, easts = seq(-180, 180, by = 20), norths = seq(-90, 90, by = 20))
mt <- matrix(c(1, 3, 1,
3, 6, 2,
6, 15, 3), 3,3, byrow = T)
rsin <- raster::reclassify(x = rsin, rcl = mt, include.lowest=F)
rsin <- raster::ratify(rsin)
rat <- levels(rsin)[[1]]
rat$level <- c("1-3", "4-6", "> 7")
if(nrow(rat) == 1){
cols <- '#2991c6'
}else{
cols <- grDevices::colorRampPalette(c('#2991c6', '#fafa65', '#e61614'))(nrow(rat))
}
levels(rsin) <- rat
raster::crs(rsin) <- '+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0'
# Figure details
ht <- 12
fct <- (rsin@extent@xmin-rsin@extent@xmax)/(rsin@extent@ymin-rsin@extent@ymax)
wt <- ht*(fct+.1)
# Theme
mThm <- rasterVis::rasterTheme(region = brewer.pal(9, "YlGn"), panel.background = list(col = "white"))
# Map
p <- rasterVis:::levelplot(rsin,
att = 'level',
xlab = 'Longitude',
ylab = 'Latitude',
scales = list(x = list(draw = T), y = list(draw = T)),
margin = F,
par.settings = mThm,
col.regions = cols,
maxpixels = ncell(rsin)) +
latticeExtra::layer(sp.lines(grat, lwd = 0.5, lty = 2, col = "black")) +
latticeExtra::layer(sp.polygons(ssdn, lwd = 0.8, col = "#e9e9e9", fill = "#cccccc"), under = T) +
latticeExtra::layer(sp.polygons(wrld, lwd = 0.8, col = "#e9e9e9", fill = "#cccccc"), under = T)
hght = 3.5
wdth = 10
png("Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.png", height = hght, width = wdth, units = "in", res = 300)
print(p)
dev.off()
####### BIPLOT GAP COUNTS AND PERCENTAGE
shp <-shapefile("Z:/gap_analysis_landraces/runs/input_data/shapefiles/new_world_shapefile/ne_50m_admin_0_countries.shp")
shp_no_ant <- shp[shp@data$NAME != "Antarctica", ]
rsdm <- raster("Z:/gap_analysis_landraces/runs/results/sdm_all_crops_sum.tif")
rsin_r <- raster("Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.tif")
rsin <- stars::read_stars("Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.tif")
sf_rsin <- sf::st_as_sf(rsin)
wrld <- raster::shapefile('Z:/gap_analysis_landraces/runs/input_data/shapefiles/shp/all_countries.shp')
grat <- sp::gridlines(wrld, easts = seq(-180, 180, by = 20), norths = seq(-90, 90, by = 20))
rsdm_msk <- raster::mask(rsdm, rsin_r)
perct <- (rsin_r/rsdm_msk)*100
tmp_file <- "D:/perct.tif"
writeRaster(perct, tmp_file, overwrite = T)
perct <- stars::read_stars(tmp_file)
sf_perct <- sf::st_as_sf(perct)
rsf <- cbind(sf_rsin, sf_perct)
names(rsf)[1:2]<- c("gap_rich", "perct")
data <- rsf %>%
dplyr::mutate(gap_rich_cat = case_when(
gap_rich >= 0 & gap_rich <= 3 ~ 1,
gap_rich >= 4 & gap_rich <= 7 ~ 2,
gap_rich >= 8 ~ 3
),
perct_cat = case_when(
perct >= 0 & perct <= 33 ~ 1,
perct > 33 & perct <= 66 ~ 2,
perct > 66 ~ 3
),
bi_class = paste0(gap_rich_cat, "-", perct_cat))
#data <- biscale::bi_class(rsf, x = gap_rich , y = perct, style = "equal", dim = 3)
shapefile_df <- broom::tidy(shp_no_ant)
custom_pal <- bi_pal_manual(val_1_1 = "#FDF5D0",
val_1_2 = "#9bcfe4",
val_1_3 = "#2bafe5",
val_2_1 = "#fcb189",
val_2_2 = "#ae978c",
val_2_3 = "#467a8d",
val_3_1 = "#f57334",
val_3_2 = "#a75e3b",
val_3_3 = "#5b473e",
preview = FALSE)
map <- ggplot() +
geom_polygon(data=shapefile_df, aes(x=long, y=lat, group=group), fill= "#cccccc", color="#cccccc", size=0.2) +
geom_sf(data , mapping = aes(fill=bi_class), colour= NA, show.legend = FALSE) +
geom_polygon(data=shapefile_df, aes(x=long, y=lat, group=group), fill= NA, color="#e9e9e9", size=0.2)+
geom_sf(data = st_as_sf(grat), size = 0.2, linetype = "dashed")+
coord_sf(xlim = c(-170, 170), ylim = c(-58, 83), expand = FALSE) +
xlab("Longitude")+ ylab("Latitude")+
bi_scale_fill(pal = custom_pal, #"GrPink",
dim = 3) +
theme(panel.grid.major = element_blank(), panel.background = element_rect(fill = "white"),
panel.border = element_rect(fill = NA))
legend <- bi_legend(pal = custom_pal, #"GrPink"
dim = 3,
xlab = "# Crop with gaps",
ylab = "% Crop with gaps",
size = 18)
allplot <- ggdraw() +
draw_plot(map, 0, 0, 1, 1) +
draw_plot(legend, 0.02, -0.00009, 0.3, 0.3)
save_plot(paste0("D:/", "bivariate_plot_gap_crichness.png"), base_width=10, base_height=10, allplot,
base_asp = 1.2)
#### grafico de coverage
metrics <- do.call(rbind,consolidado@crop$crops_coverage) %>%
dplyr::mutate(mid_point = (lower+ upper)/2,
label = consolidado@crop$name) %>%
tidyr::drop_na()
metrics %>%
ggplot(aes(x = reorder(label, mid_point), y = mid_point))+
geom_bar(stat = "identity", colour="black", fill = "gray70")+
geom_errorbar(aes(ymin=lower, ymax=upper), colour="black", width=.1, position= position_dodge())+
geom_point(position=position_dodge(width = 1), size=3, shape=21, fill="black")+
scale_y_continuous(limits = c(0, 101), breaks = seq(0,100, by= 10))+
ylab("Coverage (%)")+
xlab("")+
coord_flip()+
ggplot2::theme_bw() +
ggplot2::theme(axis.text = element_text(size = 10),
axis.title = element_text(size = 20),
legend.text = element_text(size = 17),
legend.title = element_text(size = 20))
ggplot2::ggsave(filename = "Z:/gap_analysis_landraces/runs/results/coverage_graph.png", device = "png", width = 8, height = 6, units = "in")
############ descargar datos para los mapas de higcharter
shp1 <- geojson_read("https://code.highcharts.com/mapdata/custom/world-palestine-highres.geo.json", what = "sp")
shp_hc <- shp1@data
require(haven)
data <- haven::read_dta("C:/Users/acmendez/Downloads/base de datos (1).dta")
data$id <- 1:nrow(data)
casos <- data %>%
dplyr::select(casocontrol, Edad, Comuna, catedad2, Sexo , Deseacontinuarconlaencuesta, id) %>%
filter(casocontrol == 1, Deseacontinuarconlaencuesta == "Sí") %>%
filter(Comuna != "", Sexo != "", !is.na(Edad)) %>%
group_by(catedad2, Sexo) %>%
tally()%>%
rename("casos" = n)
controles <- data %>%
dplyr::select(casocontrol, Edad, Comuna, catedad2, Sexo, Deseacontinuarconlaencuesta, id ) %>%
filter(casocontrol == 0, Deseacontinuarconlaencuesta == "Sí") %>%
filter(Comuna != "", Sexo != "", !is.na(Edad)) %>%
group_by(catedad2, Sexo) %>%
tally() %>%
rename("controles" =n)
muestra <- c( 26,52, 6, 6)
conteos <- data.frame(casos[-1 ,], controles = controles$controles[-c(1,2) ], muestra)
elegidos <-apply(conteos, 1, function(i){
cat(i[1], "\n")
edad <- as.numeric(i[1])
s <- as.character(i[2])
n <- as.numeric(i[5])
to_sample <- which(data$catedad2 == edad & data$Sexo == s & data$Deseacontinuarconlaencuesta == "Sí" & data$casocontrol == 0)
muestra <- sample(to_sample, size = n, replace = F)
return(muestra)
})
data$elegidos <- 0
data$elegidos[unlist(elegidos)] <- 1
write.table(data$elegidos, "clipboard", sep = "\t")
data %>%
filter(casocontrol == 1 | elegidos == 1) %>%
filter(catedad2 != 0) %>%
filter(Deseacontinuarconlaencuesta == "Sí") %>%
writexl::write_xlsx(., "C:/Users/acmendez/Google Drive/TRABAJOS UNIVALLE/semestre 3/epi aplicada 1/base_covid_filtrada.xlsx")
data %>%
dplyr::select(casocontrol, Edad, Comuna, catedad2, Sexo, Deseacontinuarconlaencuesta, id ) %>%
filter(casocontrol == 0, Deseacontinuarconlaencuesta == "Sí")
bd <- data %>%
dplyr::select(casocontrol, Edad, Comuna, catedad2, Sexo ) %>%
dplyr::mutate(Comuna = as.numeric(Comuna), id = 1:nrow(.) ) %>%
filter(!is.na(Comuna), Sexo != "", !is.na(Edad))
id <- bd$id
glm1 <- bd %>%
glm(casocontrol ~ catedad2 + Comuna + Sexo, data = ., family = binomial(link = "log"))
X <- glm1$fitted.values
rr <- Match(Tr= bd$casocontrol, X= X, M=1)
rr$index.control %>% length()
bd$casocontrol %>% table
which(bd$casocontrol == 1) %in% rr$index.control
bd$elegidos <- "No elegidos"
bd[rr$index.control, "elegidos"] <- "Control"
nrow(data )
casos %>%
dplyr::left_join(., controles, by = c("Comuna" = "Comuna"))
names(data, edad)
data$edad_cat <- as.numeric(as.factor(data$edad))
data$edad <- as.factor(data$edad)
data$hriaca <- as.factor(data$hriaca)
l1 <- glm(casos~ edad , offset = log(poblacion),data = data, family= poisson(link = "log"))
summary(l1)
l2 <- poissonirr(casos~ edad + hriaca +edad*hriaca + offset(log(poblacion)) ,data = data)
pr <- predict(l1, data = data[, -1]) %>% exp()
data$inc_edad <- data$casos/ data$poblacion
sum <- data %>%
dplyr::mutate(pred = pr,
inc_pred = pred/poblacion)
# group_by(edad) %>%
# dplyr::summarise(n = sum(casos),
# pob = sum(poblacion),
# n_pred = sum(pred)) %>%
# ungroup() %>%
# mutate(edad_cat = 1:6,
# inc_pred = n_pred/pob,
# inc_edad = n/pob)
sum %>%
dplyr::select(edad_cat, inc_edad, inc_pred, hriaca ) %>%
dplyr::filter(hriaca ==0 ) %>%
dplyr::select(-hriaca) %>%
pivot_longer(cols = - c(edad_cat)) %>%
ggplot()+
geom_line(aes(x = edad_cat , y = value, colour = name))
| /04_miscellanous/prepare_final_data.R | no_license | CIAT-DAPA/gap_analysis_landraces | R | false | false | 39,690 | r | ## Author: Andres Camilo Mendez Alzate
##### script to extract data from a shapefile
pacman::p_load(raster,sp, exactextractr, sf, tidyverse,lattice, rasterVis,
maptools, latticeExtra, sp, RColorBrewer, future, furrr, data.tree,
collapsibleTree, stringi, writexl, biscale, cowplot, stars)
### ftp://ftp.ciat.cgiar.org/DAPA/Projects/shared_results/
### function para calcular metricas por cada cultivo
gap_crop_coverage <- function(i, file_path = NULL, save.raster = FALSE ){
mt <- matrix(c(0, 2, 0, 2, 3, 1), 2,3, byrow = T)
status <- tryCatch(expr = {
#cargar los rasters
rst <- i %>%
lapply(., raster)
#calcular coverages
upper <- lapply(rst, function(k){
rst <- k
rst[rst[] != 3] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
upper[upper[] == 0] <- NA
lower <- lapply(rst, function(k){
rst <- k
rst[rst[] < 2] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
lower[lower[] == 0] <- NA
area_tot <- rst %>%
raster::stack(.) +1
area_tot <- sum(area_tot, na.rm = T)
area_tot[area_tot == 0] <- NA
covg_upp <- 100 - length(which(!is.na(upper[])))/length(which(!is.na(area_tot[])))*100
covg_low <- 100 - length(which(!is.na(lower[])))/length(which(!is.na(area_tot[])))*100
rich <- rst %>%
raster::stack(.) %>%
reclassify(., mt) %>%
sum(., na.rm = T)
rich[rich[] == 0] <- NA
if(save.raster){
if(is.null(file_path)){
stop("If save raster == TRUE, file path must be provided.")
}else{
writeRaster(rich, file_path, overwrite = T, NAflag = 0)
}
}
status <- data.frame(lower = covg_low, upper = covg_upp, status = "Done" , stringsAsFactors = F)
}, error = function(e){
status <- data.frame(lower= NA, upper = NA, status = "ERROR", stringsAsFactors = F)
})
return(status)
}
########## cargar shapefile del mundo con zonas en disputa para realizar el cargue de información
shp <-shapefile("Z:/gap_analysis_landraces/runs/input_data/shapefiles/new_world_shapefile/ne_50m_admin_0_countries.shp")
shp@data <- shp@data %>%
dplyr::select(LEVEL, ADMIN, ADM0_A3, NAME, NAME_LONG, FORMAL_EN, POP_EST,POP_RANK, GDP_MD_EST, FIPS_10_, ISO_A2, ISO_A3, ADM0_A3_IS,
CONTINENT, SUBREGION, REGION_WB)
##############################################################
######## 1. calcular gap metrics para todos los cultivos #######
##############################################################
root_dir <- "Z:/gap_analysis_landraces/runs/results/"
#get crop folder paths
crops_paths <- data.frame(pth = paste0(list.dirs(path = root_dir, full.names = T, recursive = F), "/lvl_1") ,
name = list.dirs(path = root_dir, full.names = F, recursive = F),
stringsAsFactors = F)
# get race folder paths
baseDir <- "Z:/gap_analysis_landraces/runs"
crops <- list.dirs(paste0(baseDir, "/results"), full.names = F, recursive = F)
gap_maps_pths <- lapply(crops, function(i){
pths <- list.files(paste0(baseDir, "/results/", i, "/lvl_1"),
pattern = "gap_class_final_new.tif$",
recursive = T,
full.names = T)
if(length(pths) == 0){
pths <- "Not_found"
race_name <- NA_character_
}else{
race_name <- stringr::str_extract(pths , pattern = "(lvl_1)(/)*[a-zA-Z0-9_-]+") %>%
gsub(pattern = "(lvl_1)(/)*|(/)", replacement = "", .)
}
pths <- as.character(pths)
return(data.frame(paths = pths, crop_name = i, race_name = race_name))
}) %>%
dplyr::bind_rows() %>%
tibble::as_tibble() %>%
dplyr::mutate_all(., as.character())
# gap_maps_pths %>%
# do.call(rbind, .) %>%
# write.csv(., "D:/LGA_dashboard/www/crop_races_paths.csv", row.names = F)
#gap_maps_pths <- read.csv("Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/crop_races_paths.csv", header = T, stringsAsFactors = F)
#metrics <- read.csv("Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/coverage_metrics.csv", header = T, stringsAsFactors = F)
shp <-shapefile(paste0(baseDir, "/input_data/shapefiles/new_world_shapefile/ne_50m_admin_0_countries.shp"))
shp@data$ISO_A2[shp@data$ISO_A2 == "-99"] <- c("SX", "NO", "FR", "NC", "-99", "-99", "-99")
shp@data <- shp@data %>%
dplyr::select(LEVEL, ADMIN, ADM0_A3, NAME, NAME_LONG, FORMAL_EN, POP_EST,POP_RANK, GDP_MD_EST, FIPS_10_, ISO_A2, ISO_A3, ADM0_A3_IS,
CONTINENT, SUBREGION, REGION_WB)
shp_sf <- sf::st_as_sf(shp)
### creación de objeto con clase S4 para guardar resultados
setClass("results", slots = list(race = "list",
crop = "list",
combined_results = "data.frame",
diversity_tree = "data.frame",
country_area = "data.frame",
coverage_totals = "data.frame"))
consolidado <- new("results", race = list(), crop = list(), combined_results = data.frame(), diversity_tree = data.frame(), country_area = data.frame(), coverage_totals = data.frame())
future::plan(future::multisession, workers = 10)
consolidado@race <- gap_maps_pths %>%
filter(!grepl("old", crop_name) & !grepl("old", race_name) & !is.na(race_name)) %>%
dplyr::mutate(summary_metrics_race = furrr::future_map(.x = paths, .f = function(.x){
cat("Processing: ", .x, "\n")
if(.x != "Not_found"){
covg <- tryCatch(expr = {
rs_gaps <- raster::raster(.x)
covg_ul <- 100 - length(which(rs_gaps[]==3))/length(which(!is.na(rs_gaps[])))*100
covg_ll <- 100 - length(which(rs_gaps[]>=2))/length(which(!is.na(rs_gaps[])))*100
data.frame(covg_ul, covg_ll)
}, error = function(e){
covg_ul <- NA
covg_ll <- NA
data.frame(covg_ul, covg_ll)
})
auc_avg <- list.files(paste0(.x, "/../../gap_validation/buffer_100km"), full.names = TRUE, pattern = ".csv$") %>%
purrr::map(.x = ., function(.x){
val <- tryCatch(expr = {
val <- read.csv(.x, header = T, stringsAsFactors = F) %>%
filter(pnt == "Mean") %>%
pull(auc.mean)
}, error = function(e){
val <- NA
})
return(val*100)
}) %>%
unlist() %>%
mean(., na.rm = T)
res <- data.frame(lower = covg$covg_ll, upper = covg$covg_ul, auc_avg = auc_avg)
}else{ res <- data.frame(lower = NA, upper = NA, auc_avg = NA)}
return(res)
}),
gap_area_country_race = furrr::future_pmap(.l = list(x = paths, y = crop_name, z = race_name), .f = function(x, y, z){
cat("Calculating area for: ", y, "-", z, "\n" )
rst <- raster::raster(x)
gp_area_min <- rst
gp_area_max <- rst
total_area <- rst
gp_area_max[gp_area_max[] != 3] <- NA
gp_area_min[ !(gp_area_min[] %in% c(2,3))] <- NA
total_area[!is.na(total_area)] <- 1
gap_area_min <- raster::area(gp_area_min) %>%
raster::mask(., mask = gp_area_min) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE)
gap_area_max <- raster::area(gp_area_max) %>%
raster::mask(., mask = gp_area_max) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE)
total_area <- raster::area(total_area) %>%
raster::mask(., mask = total_area) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE)
name <- paste0(y, ".", z)
res <- tibble(country= shp@data$ADMIN,
ISO2 = shp@data$ISO_A2,
gap_area_min = unlist(gap_area_min),
gap_area_maxn = unlist(gap_area_max),
sdm_area = unlist(total_area)) %>%
dplyr::mutate(coverage_lower = round(1-gap_area_min/sdm_area, 3),
coverage_upper = round(1-gap_area_max/sdm_area, 3),
coverage = round((coverage_lower+coverage_upper)/2, 3) )
suppressWarnings( names(res)[3:ncol(res)] <- paste0(names(res[3:ncol(res)]), "_", y, "_", z ) )
return(res)
})
)# end consolidado race
## Consolidado por cultivo
consolidado@crop <- crops_paths %>%
as_tibble %>%
dplyr::mutate(
final_map_paths = furrr::future_map(.x = pth, .f = function(.x){
pths <- list.files(path = as.character(.x), recursive = T, full.names = T, pattern = "gap_class_final_new.tif$")
pths <- pths[!grepl(pattern = "old", pths)]
if(length(pths) == 0){
pths <- "Not_found"
}
return(pths)
}),
crops_coverage = furrr::future_pmap(.l = list(.x = final_map_paths, .y = name, .z = pth), .f = function(.x, .y, .z){
cat("Caculating coverage for: ", .x, " \n")
pths <- unlist(.x)
if(("Not_found" %in% pths)){x <- NA}else{
file_path <- paste0(as.character(.z),"/gap_richness_", .y, ".tif")
x <- gap_crop_coverage(i = pths, file_path = file_path, save.raster = T)}
return(x)
}),
country_coverage = furrr::future_map(.x = final_map_paths, .f = function(.x){
pths <- unlist(.x)
if(("Not_found" %in% pths)){
x <- tibble(country = NA_character_, ISO2 = NA_character_, gap_area = NA, sdm_area = NA, coverage = NA)
}else{
rst <- pths %>%
lapply(., raster)
gap_max <- lapply(rst, function(k){
rst <- k
rst[rst[] != 3] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
gap_min <- lapply(rst, function(k){
rst <- k
rst[!(rst[] %in% c(2,3))] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
gap_max[gap_max == 0] <- NA
gap_max[!is.na(gap_max)] <- 1
gap_min[gap_min == 0] <- NA
gap_min[!is.na(gap_min)] <- 1
area_tot <- rst %>%
raster::stack(.) +1
area_tot <- sum(area_tot, na.rm = T)
area_tot[area_tot == 0] <- NA
area_tot[!is.na(area_tot)] <- 1
gp_area_min <- exactextractr::exact_extract(gap_min, shp_sf, fun = 'sum', progress = FALSE)
gp_area_max <- exactextractr::exact_extract(gap_max, shp_sf, fun = 'sum', progress = FALSE)
sdm_tot <- exactextractr::exact_extract(area_tot, shp_sf, fun = 'sum', progress = FALSE)
x <- tibble(country = shp@data$NAME,
ISO2 = shp@data$ISO_A2,
gap_area_min = gp_area_min,
gap_area_max = gp_area_max,
sdm_area = sdm_tot) %>%
dplyr::mutate_if(is.numeric, round) %>%
dplyr::mutate(coverage_min = 1 - (gap_area_min/sdm_area),
coverage_max = 1 - (gap_area_max/sdm_area),
coverage = round((coverage_min+coverage_max)/2, 3) ,
coverage = ifelse(is.nan(coverage), 0, coverage))
}
return(x)
}),
accessions_count = furrr::future_map(.x = name, .f = function(.x) {
cat("Calculating metrics for: ", .x, "\n")
crop_db_path <- paste0(baseDir, "/input_data/by_crop/", .x, "/lvl_1/classification/", .x, "_bd_identifiers.csv")
if(file.exists(crop_db_path)){
identifers <- suppressMessages( readr::read_csv(crop_db_path))
used_freq <- identifers %>%
group_by(ensemble, used) %>%
tally() %>%
dplyr::mutate(freq_in_gr = n/sum(n)) %>%
ungroup %>%
dplyr::mutate(total = sum(n),
total_used = identifers %>% filter(used) %>% nrow(),
freq_overall = n/sum(n)
)
source_db_summ <- identifers %>%
dplyr::filter(used) %>%
group_by(ensemble, source_db) %>%
tally() %>%
dplyr::mutate(freq_in_gr = n/sum(n) ) %>%
ungroup() %>%
dplyr::mutate(source_db = toupper(source_db), freq_overall = n/sum(n))
source_db_freq <- identifers %>%
dplyr::filter(used) %>%
group_by( source_db) %>%
tally() %>%
dplyr::mutate(source_db = toupper(source_db),freq_in_gr = n/sum(n) )
status_db_freq <- identifers %>%
dplyr::filter(used) %>%
group_by( status) %>%
tally() %>%
dplyr::mutate(status = toupper(status),freq_in_gr = n/sum(n) )
res <- list("used_freq" = used_freq, "source_db_summ" = source_db_summ, "source_db_freq" = source_db_freq, "status_db_freq" = status_db_freq)
}else{
res <- list(used_freq = NA, source_db_summ = NA, source_db_freq = NA, status_db_freq = NA)
}
return(res)
}),
accessions_count_per_country = furrr::future_map(.x = name, .f = function(.x) {
crop_db_path <- paste0(baseDir, "/input_data/by_crop/", .x, "/lvl_1/classification/", .x, "_bd_identifiers.csv")
if(file.exists(crop_db_path)){
identifers <- suppressMessages( readr::read_csv(crop_db_path))
used_occ <- identifers %>%
dplyr::filter(used) %>%
dplyr::select(matches("latit"), matches("long"))
coordinates(used_occ) <- ~Longitude+Latitude
crs(used_occ) <- crs(shp)
ext <- suppressWarnings(raster::extract(shp, used_occ)) %>%
dplyr::select(NAME, ISO_A2) %>%
dplyr:::group_by(NAME, ISO_A2) %>%
dplyr::tally() %>%
dplyr::ungroup() %>%
dplyr::select("country" = NAME, "ISO2" = ISO_A2, occ_count = n)
res <- tibble(country = shp@data$NAME, ISO2 = shp@data$ISO_A2 ) %>%
left_join(., ext, by = c("country", "ISO2"))
}else{
res <- tibble(country = NA, ISO2 = NA, occ_count = NA)
}
return(res)
}),
gap_area_country_crop = furrr::future_map2(.x = pth, .y = name, .f = function(.x, .y){
pths <- list.files(path = as.character(.x), recursive = F, full.names = T, pattern = "^gap_richness_[a-zA-z0-9]*.tif$")
pths <- pths[!grepl(pattern = "old", pths)]
if(length(pths) == 0){
pths <- "Not_found"
res <- tibble(country= NA, ISO2 =NA, area = NA )
}else if(length(pths) >= 2){
warning("More than one gap richness map found, using the first one \n")
pths <- pths[1]
}
if(pths != "Not_found"){
cat("Calculating area for: ", .y, "\n" )
rst <- raster::raster(pths)
gap_area_rst <- raster::area(rst) %>%
raster::mask(., mask = rst) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE)
name <- paste0(.y)
ar <- unlist(gap_area_rst)
qq <- quantile(ar[ar != 0], probs = 0.75)
res <- tibble(country= shp@data$ADMIN,
ISO2 = shp@data$ISO_A2,
area = ar) %>%
dplyr::mutate(quantile = ifelse(ar >= qq, 1, 0)) #Julian's method for prioritizing
suppressWarnings( names(res)[3:4] <- c(name , paste0(name, ".quantile")) )
}
return(res)
})
) #end mutate
future::plan(future::sequential)
#calculating quantiles for gap areas and combine with diversity tree
country_gp <- consolidado@crop$gap_area_country_crop
country_gp <- country_gp[!sapply(country_gp, function(x) any(is.na(x)))]
country_gp<- base::Reduce(dplyr::full_join, country_gp)
sgp_priorities <- country_gp %>%
dplyr::select(country, ISO2, matches("quantile"))
sgp_priorities$suma_spatial <- sgp_priorities[, 3:ncol(sgp_priorities)] %>% rowSums()
sgp_priorities <- sgp_priorities %>%
dplyr::select(country, ISO2, suma_spatial)
dv_tree_gp <- read_csv(paste0(baseDir, "/LGA_documents/diversity_tree_results/diversity_tree_priorities.csv"))
consolidado@diversity_tree <- dv_tree_gp
dv_tree_gp$suma_dvTree <- dv_tree_gp[, 4:15] %>% rowSums()
dv_tree_priorities <- dv_tree_gp %>%
dplyr::select(Country, suma_dvTree) %>%
dplyr::mutate(Country_id = stringi::stri_trans_general(str = Country, id = "Latin-ASCII"),
Country_id = tolower(Country_id))
# combining spatial results with diversity tree priorities
consolidado@combined_results <- sgp_priorities %>%
dplyr::mutate(country_id = stringi::stri_trans_general(str = country, id = "Latin-ASCII"),
country_id = tolower(country_id)) %>%
dplyr::left_join(., dv_tree_priorities, by = c("country_id" = "Country_id")) %>%
dplyr::select(country, ISO2, suma_spatial, suma_dvTree) %>%
dplyr::mutate(Total = rowSums(.[,3:4], na.rm = T))
#save countries area
consolidado@country_area <- shp@data %>%
dplyr::select("country" = NAME, "ISO2" = ISO_A2) %>%
dplyr::mutate(area_tot = round(raster::area(shp)/1000000)) %>%
as_tibble()
#Calculate country gap area for gap richnnes world map
tmp_maps <- consolidado@crop %>%
dplyr::mutate(temp_maps = purrr::map2(.x = final_map_paths, .y = name, .f = function(.x, .y){
pths <- unlist(.x)
if(("Not_found" %in% pths)){
ret <- list(gap = NA, sdm_area = NA)
}else{
rst <- pths %>%
lapply(., raster)
gap_max <- lapply(rst, function(k){
rst <- k
rst[rst[] != 3] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
gap_min <- lapply(rst, function(k){
rst <- k
rst[!(rst[] %in% c(2, 3))] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
gap_max[gap_max == 0] <- NA
gap_max[!is.na(gap_max)] <- 1
gap_min[gap_min == 0] <- NA
gap_min[!is.na(gap_min)] <- 1
area_tot <- rst %>%
raster::stack(.) +1
area_tot <- sum(area_tot, na.rm = T)
area_tot[area_tot == 0] <- NA
area_tot[!is.na(area_tot)] <- 1
ret <- list(gap_min = gap_min, gap_max = gap_max, sdm_area = area_tot)
removeTmpFiles(h=0)
}#end else
return(ret)
})) %>%
dplyr::pull(temp_maps)
sum_sdm <- lapply(tmp_maps, function(k){
if("Raster" %in% is(k$sdm_area)){
ret <- k$sdm_area %>%
raster::crop(x = ., y = extent(c(-180, 180, -59.5, 83.625)) )
}else{ ret <- k$sdm_area}
return(ret)
})
sum_gap_min <- lapply(tmp_maps, function(k){
if("Raster" %in% is(k$gap)){
ret <- k$gap_min %>%
raster::crop(x = ., y = extent(c(-180, 180, -59.5, 83.625)) )
}else{ ret <- k$gap_min}
return(ret)
})
sum_gap_max <- lapply(tmp_maps, function(k){
if("Raster" %in% is(k$gap)){
ret <- k$gap_max %>%
raster::crop(x = ., y = extent(c(-180, 180, -59.5, 83.625)) )
}else{ ret <- k$gap_max}
return(ret)
})
#rm(tmp_maps)
sum_sdm <- sum_sdm[!is.na(sum_sdm)]
sum_sdm$fun <- sum
sum_sdm$na.rm <- TRUE
sdm_total <- do.call(raster::mosaic, sum_sdm)
sdm_total[sdm_total[] == 0] <- NA
writeRaster(sdm_total, paste0(baseDir, "/results/sdm_all_crops_sum.tif"), overwrite = T)
#rm(sum_sdm)
sum_gap_min <- sum_gap_min[!sapply(sum_gap_min, is.null, simplify = TRUE)]
sum_gap_min$fun <- sum
sum_gap_min$na.rm <- TRUE
gap_total_min <- do.call(raster::mosaic, sum_gap_min)
gap_total_min[gap_total_min[] == 0] <- NA
#rm(sum_gap_min)
sum_gap_max <- sum_gap_max[!sapply(sum_gap_max, is.null, simplify = TRUE)]
sum_gap_max$fun <- sum
sum_gap_max$na.rm <- TRUE
gap_total_max <- do.call(raster::mosaic, sum_gap_max)
gap_total_max[gap_total_max[] == 0] <- NA
#rm(sum_gap_max)
consolidado@coverage_totals <- tibble( country = shp@data$NAME,
ISO2 = shp@data$ISO_A2,
gap_total_min = raster::area(gap_total_min) %>%
raster::mask(., gap_total_min) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE) %>%
round() ,
gap_total_max = raster::area(gap_total_max) %>%
raster::mask(., gap_total_max) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE) %>%
round() ,
sdm_total = raster::area(sdm_total) %>%
raster::mask(., sdm_total) %>%
exactextractr::exact_extract(., shp_sf, fun = 'sum', progress = FALSE) %>%
round())
#########################################
### CALCULAR GAP RICHNESS MAP ###########
########################################
if(TRUE){
maps_pths <- consolidado@crop$pth %>%
list.files(., pattern = "^gap_richness_[a-zA-z0-9]*.tif$", full.names = T)
#list.files("Z:/gap_analysis_landraces/runs/results", pattern = "^gap_richness_[a-zA-z0-9]*.tif$", recursive = T)
#list.files("Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/scripts", pattern = ".tif$", full.names = T)
msk <- raster("Z:/gap_analysis_landraces/runs/input_data/mask/mask_world.tif")
rst <- lapply(maps_pths, function(i){
cat("Processing:", i, "\n")
r <- raster(i)
r[r[] > 0] <- 1
r[r[] <= 0] <- NA
r <- raster::crop(r, extent(msk))
return(r)
})
rst$fun <- sum
rst$na.rm <- TRUE
gp_rich <- do.call(mosaic, rst)
gp_rich[gp_rich[] == 0] <- NA
writeRaster(gp_rich, "Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.tif", overwrite =T)
}
save(consolidado, file = paste0(baseDir, "/results/results_all_crops.RData"))
#####################################################
############## update table metrics in FTP server
####################################################
root_ftp <- "Z:/gap_analysis_landraces/shared_results"
## Race coverage metrics
for(i in 1:nrow(consolidado@race)){
var <- consolidado@race %>%
slice(i) %>%
dplyr::select(crop_name ,race_name, summary_metrics_race, gap_area_country_race)
cat("Extracting metrics for", as.character(var$crop_name), "... in group:",as.character(var$race_name), "\n")
ftp_pth <- paste0(root_ftp, "/spatial_lga/", var$crop_name,"/results/", var$race_name)
mtrs <- tibble(coverage_lower = unlist(var$summary_metrics_race)[1],
coverage_upper = unlist(var$summary_metrics_race)[2]) %>%
dplyr::mutate(coverage = round((coverage_lower+coverage_upper)/2, 3))
covg_mtrs <- var$gap_area_country_race %>%
purrr::pluck(., 1) %>%
dplyr::mutate_if(is.numeric,
function(i){
x <- ifelse(is.nan(i), 0, i)
return(x)
})
lst <- list(coverage_metrics = mtrs , country_coverage = covg_mtrs)
writexl::write_xlsx(lst, path = paste0( ftp_pth,"/",as.character(var$crop_name), "_" ,as.character(var$race_name), "_coverage", ".xlsx"))
}#end for
## Race per Crop
for(i in 1:nrow(consolidado@crop)){
var <- consolidado@crop %>%
dplyr::slice(i) %>%
dplyr::select(name, crops_coverage, country_coverage)
cat("Extracting metrics for", as.character(var$name), "... ", "\n")
ftp_pth <- paste0(root_ftp, "/spatial_lga/", var$name,"/results/")
if(dir.exists(ftp_pth)){
mtrs <- tibble(coverage_lower = as.numeric(purrr::pluck(var$crops_coverage, 1)[1]),
coverage_upper = as.numeric(purrr::pluck(var$crops_coverage, 1)[2])) %>%
dplyr::mutate(coverage = round((coverage_lower+coverage_upper)/2, 3))
covg_mtrs <- var$country_coverage %>%
purrr::pluck(., 1) %>%
dplyr::mutate_if(is.numeric,
function(i){
x <- ifelse(is.nan(i), 0, i)
return(x)
})
lst <- list(coverage_metrics = mtrs , country_coverage = covg_mtrs)
writexl::write_xlsx(lst, path = paste0( ftp_pth,as.character(var$name), "_coverage", ".xlsx"))
}
}#end for
######### Pruebas de con mapas
msk <- raster("C:/Users/acmendez/Google Drive/CIAT/hidden_project/Gap_analysis_UI_original/www/masks/mask_world.tif")
gap_rich <- raster("D:/OneDrive - CGIAR/Attachments/Desktop/gap_richness_map.tif")
shp <- raster::shapefile("C:/Users/acmendez/Google Drive/CIAT/hidden_project/Gap_analysis_UI_original/www/world_shape_simplified/all_countries_simplified.shp")
total_area <- raster::area(msk) %>%
raster::mask(., mask = msk) %>%
exactextractr::exact_extract(., st_as_sf(shp), fun = 'sum')
gap_area_rst <- raster::area(gap_rich) %>%
raster::mask(., mask = gap_rich) %>%
exactextractr::exact_extract(., st_as_sf(shp), fun = 'sum')
gap_count <- exactextractr::exact_extract(gap_rich, st_as_sf(shp), fun = "max")
vth <- download_map_data("https://code.highcharts.com/mapdata/custom/world-palestine-highres.js" )
vth <- get_data_from_map(vth)
hcmp_data<- vth %>%
dplyr::select(`hc-a2`, name, subregion, `iso-a3` , `iso-a2`)
gap_data <- shp@data %>%
dplyr::select(ISO2, ISO3, NAME) %>%
dplyr::mutate(area_total = round(total_area, 0), gap_area = round(gap_area_rst, 0),
gap_count = round(gap_count,0)) %>%
as_tibble
hcmp_data %>%
dplyr::left_join(., gap_data, by = c("iso-a2" = "ISO2")) %>%
dplyr::mutate(area_total = ifelse(is.na(area_total ), 0, area_total ),
gap_area = ifelse(is.na(gap_area),0, gap_area),
gap_count = ifelse(is.na(gap_count),0, gap_count)) %>%
dplyr::mutate(area_freq = round(gap_area/area_total, 1),
area_freq = ifelse(is.nan(area_freq), 0, area_freq)) %>%
write.csv(., "D:/OneDrive - CGIAR/Attachments/Desktop/gaps_summary_template.csv", row.names = F)
nrow(gap_data)
nrow(hcmp_data)
#### dviersity tree Template
x <- readLines("C:/Users/acmendez/Downloads/Cowpea genepool.tree.json") %>% fromJSON(txt =., simplifyDataFrame = F)
repos <- as.Node(x)
print(repos, "id", "login")/
plot(repos)
tree_df <- ToDataFrameTypeCol(repos)
tree_df <- tree_df[, !grepl("children", tree_df)]
names(tree_df) <- paste0("level_", 1:ncol(tree_df))
tree_df <- as_tibble(tree_df)
edgelist <- list()
i <- 1
while(i <= ncol(tree_df)){
cat(i, "\n")
if(i+1 <= ncol(tree_df)){
lvl1 <- paste0("level_", i)
lvl2 <- paste0("level_", i+1)
edgelist[[i]]<- tree_df %>%
dplyr::group_by(eval(parse(text= lvl1)), eval(parse(text= lvl2))) %>%
tally %>%
ungroup %>%
dplyr::select(start = `eval(parse(text = lvl1))`, end = `eval(parse(text = lvl2))`)
}
i <- i + 1
}
edgelist <- edgelist %>%
bind_rows()
edgelist %>%
dplyr::filter(!is.na(end)) %>%
dplyr::add_row(start = NA, end = unlist(edgelist[1,1]), .before = 1 ) %>%
dplyr::mutate(collected = "") %>%
collapsibleTree::collapsibleTreeNetwork(., attribute = "collected", fontSize = 7)
plot(as.igraph(repos, directed = TRUE, direction = "climb"))
##### calculate gap richnes for each crop
root_dir <- "Z:/gap_analysis_landraces/runs/results/"
crops_paths <- data.frame(pth = paste0(list.dirs(path = root_dir, full.names = T, recursive = F), "/lvl_1") ,
name = list.dirs(path = root_dir, full.names = F, recursive = F),
stringsAsFactors = F)
cov_mtrs <- apply(crops_paths,1, function(i){
cat("Calculating Gap richness for crop: ", i[2], "\n")
mt <- matrix(c(0, 2, 0, 2, 3, 1), 2,3, byrow = T)
x<-tryCatch({
rst <- list.files(path = as.character(i[1]), recursive = T, full.names = T, pattern = "gap_class_final_new.tif$")
rst <- rst[!grepl(pattern = "old", rst)]
if(length(rst) == 0){
stop("No file found")
}
#cargar los rasters
rst <- rst %>%
lapply(., raster)
#calcular coverages
upper <- lapply(rst, function(k){
rst <- k
rst[rst[] != 3] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
upper[upper[] == 0] <- NA
lower <- lapply(rst, function(k){
rst <- k
rst[rst[] < 2] <- NA
return(rst)
}) %>%
raster::stack(.) %>%
sum(., na.rm = T)
lower[lower[] == 0] <- NA
area_tot <- rst %>%
raster::stack(.) +1
area_tot <- sum(area_tot, na.rm = T)
area_tot[area_tot == 0] <- NA
covg_upp <- 100 - length(which(!is.na(upper[])))/length(which(!is.na(area_tot[])))*100
covg_low <- 100 - length(which(!is.na(lower[])))/length(which(!is.na(area_tot[])))*100
rich <- rst %>%
raster::stack(.) %>%
reclassify(., mt) %>%
sum(., na.rm = T)
rich[rich[] == 0] <- NA
# writeRaster(rich, paste0("Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/", i[2], ".tif"), overwrite = T, NAflag = 0)
status <- data.frame(lower = covg_low, upper = covg_upp, status = "Done", stringsAsFactors = F)
return(status)
}, error = function(e){
cat("An error ocurred :( \n")
status <- data.frame(lower= NA, upper = NA, status = "ERROR", stringsAsFactors = F)
return(status)
})
return(cbind( name = i[2], x))
})
bind_rows(cov_mtrs) %>%
dplyr::mutate(name = crops_paths$name) %>%
write.csv(., "Z:/gap_analysis_landraces/runs/LGA_documents/gaps_richness_per_crop/coverage_metrics.csv", row.names = F)
# MAPAS EN FORMATO PNG
options(warn = -1, scipen = 999)
suppressMessages(library(pacman))
suppressMessages(pacman::p_load(raster, lattice, rasterVis, maptools, tidyverse, latticeExtra, sp, RColorBrewer))
rsin <- raster("Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.tif")
wrld <- raster::shapefile('Z:/gap_analysis_landraces/runs/input_data/shapefiles/shp/all_countries.shp')
ssdn <- raster::shapefile('Z:/gap_analysis_landraces/runs/input_data/shapefiles/Sudan_admin_WGS84/Sudan_admin_0.shp')
msk <- raster("Z:/gap_analysis_landraces/runs/input_data/mask/mask_world.tif")
grat <- sp::gridlines(wrld, easts = seq(-180, 180, by = 20), norths = seq(-90, 90, by = 20))
mt <- matrix(c(1, 3, 1,
3, 6, 2,
6, 15, 3), 3,3, byrow = T)
rsin <- raster::reclassify(x = rsin, rcl = mt, include.lowest=F)
rsin <- raster::ratify(rsin)
rat <- levels(rsin)[[1]]
rat$level <- c("1-3", "4-6", "> 7")
if(nrow(rat) == 1){
cols <- '#2991c6'
}else{
cols <- grDevices::colorRampPalette(c('#2991c6', '#fafa65', '#e61614'))(nrow(rat))
}
levels(rsin) <- rat
raster::crs(rsin) <- '+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0'
# Figure details
ht <- 12
fct <- (rsin@extent@xmin-rsin@extent@xmax)/(rsin@extent@ymin-rsin@extent@ymax)
wt <- ht*(fct+.1)
# Theme
mThm <- rasterVis::rasterTheme(region = brewer.pal(9, "YlGn"), panel.background = list(col = "white"))
# Map
p <- rasterVis:::levelplot(rsin,
att = 'level',
xlab = 'Longitude',
ylab = 'Latitude',
scales = list(x = list(draw = T), y = list(draw = T)),
margin = F,
par.settings = mThm,
col.regions = cols,
maxpixels = ncell(rsin)) +
latticeExtra::layer(sp.lines(grat, lwd = 0.5, lty = 2, col = "black")) +
latticeExtra::layer(sp.polygons(ssdn, lwd = 0.8, col = "#e9e9e9", fill = "#cccccc"), under = T) +
latticeExtra::layer(sp.polygons(wrld, lwd = 0.8, col = "#e9e9e9", fill = "#cccccc"), under = T)
hght = 3.5
wdth = 10
png("Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.png", height = hght, width = wdth, units = "in", res = 300)
print(p)
dev.off()
####### BIPLOT GAP COUNTS AND PERCENTAGE
shp <-shapefile("Z:/gap_analysis_landraces/runs/input_data/shapefiles/new_world_shapefile/ne_50m_admin_0_countries.shp")
shp_no_ant <- shp[shp@data$NAME != "Antarctica", ]
rsdm <- raster("Z:/gap_analysis_landraces/runs/results/sdm_all_crops_sum.tif")
rsin_r <- raster("Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.tif")
rsin <- stars::read_stars("Z:/gap_analysis_landraces/runs/results/all_crops_gap_richness.tif")
sf_rsin <- sf::st_as_sf(rsin)
wrld <- raster::shapefile('Z:/gap_analysis_landraces/runs/input_data/shapefiles/shp/all_countries.shp')
grat <- sp::gridlines(wrld, easts = seq(-180, 180, by = 20), norths = seq(-90, 90, by = 20))
rsdm_msk <- raster::mask(rsdm, rsin_r)
perct <- (rsin_r/rsdm_msk)*100
tmp_file <- "D:/perct.tif"
writeRaster(perct, tmp_file, overwrite = T)
perct <- stars::read_stars(tmp_file)
sf_perct <- sf::st_as_sf(perct)
rsf <- cbind(sf_rsin, sf_perct)
names(rsf)[1:2]<- c("gap_rich", "perct")
data <- rsf %>%
dplyr::mutate(gap_rich_cat = case_when(
gap_rich >= 0 & gap_rich <= 3 ~ 1,
gap_rich >= 4 & gap_rich <= 7 ~ 2,
gap_rich >= 8 ~ 3
),
perct_cat = case_when(
perct >= 0 & perct <= 33 ~ 1,
perct > 33 & perct <= 66 ~ 2,
perct > 66 ~ 3
),
bi_class = paste0(gap_rich_cat, "-", perct_cat))
#data <- biscale::bi_class(rsf, x = gap_rich , y = perct, style = "equal", dim = 3)
shapefile_df <- broom::tidy(shp_no_ant)
custom_pal <- bi_pal_manual(val_1_1 = "#FDF5D0",
val_1_2 = "#9bcfe4",
val_1_3 = "#2bafe5",
val_2_1 = "#fcb189",
val_2_2 = "#ae978c",
val_2_3 = "#467a8d",
val_3_1 = "#f57334",
val_3_2 = "#a75e3b",
val_3_3 = "#5b473e",
preview = FALSE)
map <- ggplot() +
geom_polygon(data=shapefile_df, aes(x=long, y=lat, group=group), fill= "#cccccc", color="#cccccc", size=0.2) +
geom_sf(data , mapping = aes(fill=bi_class), colour= NA, show.legend = FALSE) +
geom_polygon(data=shapefile_df, aes(x=long, y=lat, group=group), fill= NA, color="#e9e9e9", size=0.2)+
geom_sf(data = st_as_sf(grat), size = 0.2, linetype = "dashed")+
coord_sf(xlim = c(-170, 170), ylim = c(-58, 83), expand = FALSE) +
xlab("Longitude")+ ylab("Latitude")+
bi_scale_fill(pal = custom_pal, #"GrPink",
dim = 3) +
theme(panel.grid.major = element_blank(), panel.background = element_rect(fill = "white"),
panel.border = element_rect(fill = NA))
legend <- bi_legend(pal = custom_pal, #"GrPink"
dim = 3,
xlab = "# Crop with gaps",
ylab = "% Crop with gaps",
size = 18)
allplot <- ggdraw() +
draw_plot(map, 0, 0, 1, 1) +
draw_plot(legend, 0.02, -0.00009, 0.3, 0.3)
save_plot(paste0("D:/", "bivariate_plot_gap_crichness.png"), base_width=10, base_height=10, allplot,
base_asp = 1.2)
#### grafico de coverage
metrics <- do.call(rbind,consolidado@crop$crops_coverage) %>%
dplyr::mutate(mid_point = (lower+ upper)/2,
label = consolidado@crop$name) %>%
tidyr::drop_na()
metrics %>%
ggplot(aes(x = reorder(label, mid_point), y = mid_point))+
geom_bar(stat = "identity", colour="black", fill = "gray70")+
geom_errorbar(aes(ymin=lower, ymax=upper), colour="black", width=.1, position= position_dodge())+
geom_point(position=position_dodge(width = 1), size=3, shape=21, fill="black")+
scale_y_continuous(limits = c(0, 101), breaks = seq(0,100, by= 10))+
ylab("Coverage (%)")+
xlab("")+
coord_flip()+
ggplot2::theme_bw() +
ggplot2::theme(axis.text = element_text(size = 10),
axis.title = element_text(size = 20),
legend.text = element_text(size = 17),
legend.title = element_text(size = 20))
ggplot2::ggsave(filename = "Z:/gap_analysis_landraces/runs/results/coverage_graph.png", device = "png", width = 8, height = 6, units = "in")
############ descargar datos para los mapas de higcharter
shp1 <- geojson_read("https://code.highcharts.com/mapdata/custom/world-palestine-highres.geo.json", what = "sp")
shp_hc <- shp1@data
require(haven)
data <- haven::read_dta("C:/Users/acmendez/Downloads/base de datos (1).dta")
data$id <- 1:nrow(data)
casos <- data %>%
dplyr::select(casocontrol, Edad, Comuna, catedad2, Sexo , Deseacontinuarconlaencuesta, id) %>%
filter(casocontrol == 1, Deseacontinuarconlaencuesta == "Sí") %>%
filter(Comuna != "", Sexo != "", !is.na(Edad)) %>%
group_by(catedad2, Sexo) %>%
tally()%>%
rename("casos" = n)
controles <- data %>%
dplyr::select(casocontrol, Edad, Comuna, catedad2, Sexo, Deseacontinuarconlaencuesta, id ) %>%
filter(casocontrol == 0, Deseacontinuarconlaencuesta == "Sí") %>%
filter(Comuna != "", Sexo != "", !is.na(Edad)) %>%
group_by(catedad2, Sexo) %>%
tally() %>%
rename("controles" =n)
muestra <- c( 26,52, 6, 6)
conteos <- data.frame(casos[-1 ,], controles = controles$controles[-c(1,2) ], muestra)
elegidos <-apply(conteos, 1, function(i){
cat(i[1], "\n")
edad <- as.numeric(i[1])
s <- as.character(i[2])
n <- as.numeric(i[5])
to_sample <- which(data$catedad2 == edad & data$Sexo == s & data$Deseacontinuarconlaencuesta == "Sí" & data$casocontrol == 0)
muestra <- sample(to_sample, size = n, replace = F)
return(muestra)
})
data$elegidos <- 0
data$elegidos[unlist(elegidos)] <- 1
write.table(data$elegidos, "clipboard", sep = "\t")
data %>%
filter(casocontrol == 1 | elegidos == 1) %>%
filter(catedad2 != 0) %>%
filter(Deseacontinuarconlaencuesta == "Sí") %>%
writexl::write_xlsx(., "C:/Users/acmendez/Google Drive/TRABAJOS UNIVALLE/semestre 3/epi aplicada 1/base_covid_filtrada.xlsx")
data %>%
dplyr::select(casocontrol, Edad, Comuna, catedad2, Sexo, Deseacontinuarconlaencuesta, id ) %>%
filter(casocontrol == 0, Deseacontinuarconlaencuesta == "Sí")
bd <- data %>%
dplyr::select(casocontrol, Edad, Comuna, catedad2, Sexo ) %>%
dplyr::mutate(Comuna = as.numeric(Comuna), id = 1:nrow(.) ) %>%
filter(!is.na(Comuna), Sexo != "", !is.na(Edad))
id <- bd$id
glm1 <- bd %>%
glm(casocontrol ~ catedad2 + Comuna + Sexo, data = ., family = binomial(link = "log"))
X <- glm1$fitted.values
rr <- Match(Tr= bd$casocontrol, X= X, M=1)
rr$index.control %>% length()
bd$casocontrol %>% table
which(bd$casocontrol == 1) %in% rr$index.control
bd$elegidos <- "No elegidos"
bd[rr$index.control, "elegidos"] <- "Control"
nrow(data )
casos %>%
dplyr::left_join(., controles, by = c("Comuna" = "Comuna"))
names(data, edad)
data$edad_cat <- as.numeric(as.factor(data$edad))
data$edad <- as.factor(data$edad)
data$hriaca <- as.factor(data$hriaca)
l1 <- glm(casos~ edad , offset = log(poblacion),data = data, family= poisson(link = "log"))
summary(l1)
l2 <- poissonirr(casos~ edad + hriaca +edad*hriaca + offset(log(poblacion)) ,data = data)
pr <- predict(l1, data = data[, -1]) %>% exp()
data$inc_edad <- data$casos/ data$poblacion
sum <- data %>%
dplyr::mutate(pred = pr,
inc_pred = pred/poblacion)
# group_by(edad) %>%
# dplyr::summarise(n = sum(casos),
# pob = sum(poblacion),
# n_pred = sum(pred)) %>%
# ungroup() %>%
# mutate(edad_cat = 1:6,
# inc_pred = n_pred/pob,
# inc_edad = n/pob)
sum %>%
dplyr::select(edad_cat, inc_edad, inc_pred, hriaca ) %>%
dplyr::filter(hriaca ==0 ) %>%
dplyr::select(-hriaca) %>%
pivot_longer(cols = - c(edad_cat)) %>%
ggplot()+
geom_line(aes(x = edad_cat , y = value, colour = name))
|
library(testthat)
library(metaSDT)
test_check("metaSDT")
| /tests/testthat.R | permissive | craddm/metaSDT | R | false | false | 57 | r | library(testthat)
library(metaSDT)
test_check("metaSDT")
|
library(mrbsizeR)
### Name: HPWmap
### Title: Computation of pointwise and highest pointwise probabilities.
### Aliases: HPWmap
### ** Examples
# Artificial sample data: 10 observations (5-by-2 object), 10 samples
set.seed(987)
sampleData <- matrix(stats::rnorm(100), nrow = 10)
sampleData[4:6, ] <- sampleData[4:6, ] + 5
# Calculation of the simultaneous credible intervals
HPWmap(smoothVec = sampleData, mm = 5, nn = 2, prob = 0.95)
| /data/genthat_extracted_code/mrbsizeR/examples/HPWmap.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 444 | r | library(mrbsizeR)
### Name: HPWmap
### Title: Computation of pointwise and highest pointwise probabilities.
### Aliases: HPWmap
### ** Examples
# Artificial sample data: 10 observations (5-by-2 object), 10 samples
set.seed(987)
sampleData <- matrix(stats::rnorm(100), nrow = 10)
sampleData[4:6, ] <- sampleData[4:6, ] + 5
# Calculation of the simultaneous credible intervals
HPWmap(smoothVec = sampleData, mm = 5, nn = 2, prob = 0.95)
|
56de505f47eb7ee770a3a7dbbebb5955 dungeon_i15-m7-u4-v0.pddl_planlen=29.qdimacs 9011 37165 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i15-m7-u4-v0.pddl_planlen=29/dungeon_i15-m7-u4-v0.pddl_planlen=29.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 88 | r | 56de505f47eb7ee770a3a7dbbebb5955 dungeon_i15-m7-u4-v0.pddl_planlen=29.qdimacs 9011 37165 |
`longpstart` <-
function(NPP=6, asta="",acomp="", theday=1, hr=0 )
{
if(missing(NPP)) { NPP=6 }
if(missing(asta)) { asta="TEST" }
if(missing(acomp)) { acomp="T" }
if(missing(theday)) { theday=1 }
if(missing(hr)) { hr=0 }
print("Doing postscript")
plname = paste(sep=".", "longfft", asta, acomp, formatC(theday, format="d", width=3,flag="0") , formatC(hr, format="d", width=2,flag="0"))
RPMG::jpostscript(plname , P=c(14, 14) )
par(mfrow=c(NPP,1))
par(mai=c(0.3,0.35,0.3,0.1))
kplot = 0
return(kplot)
}
| /R/longpstart.R | no_license | cran/RSEIS | R | false | false | 542 | r | `longpstart` <-
function(NPP=6, asta="",acomp="", theday=1, hr=0 )
{
if(missing(NPP)) { NPP=6 }
if(missing(asta)) { asta="TEST" }
if(missing(acomp)) { acomp="T" }
if(missing(theday)) { theday=1 }
if(missing(hr)) { hr=0 }
print("Doing postscript")
plname = paste(sep=".", "longfft", asta, acomp, formatC(theday, format="d", width=3,flag="0") , formatC(hr, format="d", width=2,flag="0"))
RPMG::jpostscript(plname , P=c(14, 14) )
par(mfrow=c(NPP,1))
par(mai=c(0.3,0.35,0.3,0.1))
kplot = 0
return(kplot)
}
|
# Copyright (C) 2014-15 Johannes Ranke
# Contact: jranke@uni-bremen.de
# This file is part of the R package mkin
# mkin is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>
# These tests were migrated from inst/unitTests/runit.mkinerrmin.R
context("Calculation of FOCUS chi2 error levels")
SFO_SFO.ff <- mkinmod(parent = list(type = "SFO", to = "m1"),
m1 = list(type = "SFO"),
use_of_ff = "max", quiet = TRUE)
test_that("Chi2 error levels for FOCUS D are as in mkin 0.9-33", {
fit <- mkinfit(SFO_SFO.ff, FOCUS_2006_D, quiet = TRUE)
errmin.FOCUS_2006_D_rounded = data.frame(
err.min = c(0.0640, 0.0646, 0.0469),
n.optim = c(4, 2, 2),
df = c(15, 7, 8),
row.names = c("All data", "parent", "m1"))
expect_equal(round(mkinerrmin(fit), 4),
errmin.FOCUS_2006_D_rounded)
})
test_that("Chi2 error levels for FOCUS E are as in mkin 0.9-33", {
fit <- mkinfit(SFO_SFO.ff, FOCUS_2006_E, quiet = TRUE)
errmin.FOCUS_2006_E_rounded = data.frame(
err.min = c(0.1544, 0.1659, 0.1095),
n.optim = c(4, 2, 2),
df = c(13, 7, 6),
row.names = c("All data", "parent", "m1"))
expect_equal(round(mkinerrmin(fit), 4),
errmin.FOCUS_2006_E_rounded)
})
| /mkin/tests/testthat/test_FOCUS_chi2_error_level.R | no_license | ingted/R-Examples | R | false | false | 1,817 | r | # Copyright (C) 2014-15 Johannes Ranke
# Contact: jranke@uni-bremen.de
# This file is part of the R package mkin
# mkin is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>
# These tests were migrated from inst/unitTests/runit.mkinerrmin.R
context("Calculation of FOCUS chi2 error levels")
SFO_SFO.ff <- mkinmod(parent = list(type = "SFO", to = "m1"),
m1 = list(type = "SFO"),
use_of_ff = "max", quiet = TRUE)
test_that("Chi2 error levels for FOCUS D are as in mkin 0.9-33", {
fit <- mkinfit(SFO_SFO.ff, FOCUS_2006_D, quiet = TRUE)
errmin.FOCUS_2006_D_rounded = data.frame(
err.min = c(0.0640, 0.0646, 0.0469),
n.optim = c(4, 2, 2),
df = c(15, 7, 8),
row.names = c("All data", "parent", "m1"))
expect_equal(round(mkinerrmin(fit), 4),
errmin.FOCUS_2006_D_rounded)
})
test_that("Chi2 error levels for FOCUS E are as in mkin 0.9-33", {
fit <- mkinfit(SFO_SFO.ff, FOCUS_2006_E, quiet = TRUE)
errmin.FOCUS_2006_E_rounded = data.frame(
err.min = c(0.1544, 0.1659, 0.1095),
n.optim = c(4, 2, 2),
df = c(13, 7, 6),
row.names = c("All data", "parent", "m1"))
expect_equal(round(mkinerrmin(fit), 4),
errmin.FOCUS_2006_E_rounded)
})
|
library(ncdf4)
library(plot3D)
library(animation)
colors <- c("#FFDBEA","#FFD4C3","#FCCFA0","#DACB87","#B2C67C","#86C080",
"#55B88D","#15AC9B","#009EA6","#278DAD","#5578AC")
#read reflectivity and other data from radar data
setwd("/home/bhupendra/data/darwin_radar/test/CPOL/outNC/")
inFileName <- "cpol_3d_fields_classes_20050402.nc"
ncfile <- nc_open(inFileName)
#read x, y, z, and time for reference
x <- ncvar_get(ncfile, varid = "x")
y <- ncvar_get(ncfile, varid = "y")
z <- ncvar_get(ncfile, varid = "z")
time <- ncvar_get(ncfile, varid = "time")
time_posix <- as.POSIXct(time, origin = "1970-01-01", tz="UTC")
#read steiner and merhala cloassification and bringi reflectivity and hdrometeor classification
steiner_rainClass <- ncvar_get(ncfile, varid = "cs")
merhala_rainClass <- ncvar_get(ncfile, varid = "cm")
dbz_bringi <- ncvar_get(ncfile, varid = "zh")
dbz_bringi <- replace(dbz_bringi, dbz_bringi<0, NA)
hydro_class <- ncvar_get(ncfile, varid = "hc")
colors <- c("#FFE4F3","#FFDEF8","#FFD9FE","#FFD4FF","#FFD1FF","#FFCEFF",
"#FFCBFF","#FFC9FF","#F3C8FF","#E4C8FF","#D3C8FF","#C0C8FF",
"#ABC8FF","#95C9FF","#7CC9FB","#61C9F2","#3EC8E8","#00C7DC",
"#00C6D0","#00C4C2","#00C1B3","#00BEA4","#00BA94","#00B583",
"#00B171","#29AB5E","#45A649","#58A031","#679904","#749300",
"#7E8C00","#878400","#8F7D00","#967500","#9B6D00","#A06500",
"#A35D00","#A6540C","#A84A2B","#A9403E")
#make GIF
#plot Reflectivity
saveGIF({
for(i in 1:144){
level <- 6
scan <- i
subtitle <- paste(z[level], "Km", "; ", strftime(time_posix[scan], tz = "UTC", usetz = TRUE), sep="")
image2D(z = dbz_bringi[, , level, scan], x = x, y = y, breaks = seq(0, 60, by=1.5), col = colors,
xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey")
title(main="Bringi Reflectivity")
text(labels = subtitle, x = min(x), y=(max(y) - 0.1*max(y)), pos = 4)
}
}, movie.name = "dbz_motion.gif", interval = 0.2)
pdf("plot2D.pdf")
for(i in seq(2, 15, by = 1)){
level <- i
scan <- 1
subtitle <- paste(z[level], "Km", " ", strftime(time_posix[scan], tz = "UTC", usetz = TRUE), sep="")
par(mfrow = c(2, 2))
#plot Reflectivity
image2D(z = dbz_bringi[, , level, scan], x = x, y = y, xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey")
title(main="Bringi Reflectivity")
text(labels = subtitle, x = min(x), y=(max(y)-0.1* max(y)), pos = 4, cex=0.8)
#plot Classification
colors <- c("white", "#CB7D8D","#67A160","#7494C8")
image2D(z = merhala_rainClass[, , level, scan], x = x, y = y, xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey", col = colors)
title(main="Merhala Classification")
colors <- c("white", "#CB7D8D","#67A160")
image2D(z = steiner_rainClass[, , level, scan], x = x, y = y, xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey", col = colors)
title(main="Steiner Classification")
colors<- c("#E16A86","#CD7E37","#A69100","#65A100","#00AB66","#00ACA5",
"#00A2D3","#7D89E6","#C86DD7")
image2D(z = hydro_class[, , level, scan], x = x, y = y, xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey", col = colors)
title(main="Hydrometeor Classification")
}
dev.off()
| /dbz_motion_gif_test.R | no_license | RBhupi/Darwin-Rscripts | R | false | false | 3,334 | r | library(ncdf4)
library(plot3D)
library(animation)
colors <- c("#FFDBEA","#FFD4C3","#FCCFA0","#DACB87","#B2C67C","#86C080",
"#55B88D","#15AC9B","#009EA6","#278DAD","#5578AC")
#read reflectivity and other data from radar data
setwd("/home/bhupendra/data/darwin_radar/test/CPOL/outNC/")
inFileName <- "cpol_3d_fields_classes_20050402.nc"
ncfile <- nc_open(inFileName)
#read x, y, z, and time for reference
x <- ncvar_get(ncfile, varid = "x")
y <- ncvar_get(ncfile, varid = "y")
z <- ncvar_get(ncfile, varid = "z")
time <- ncvar_get(ncfile, varid = "time")
time_posix <- as.POSIXct(time, origin = "1970-01-01", tz="UTC")
#read steiner and merhala cloassification and bringi reflectivity and hdrometeor classification
steiner_rainClass <- ncvar_get(ncfile, varid = "cs")
merhala_rainClass <- ncvar_get(ncfile, varid = "cm")
dbz_bringi <- ncvar_get(ncfile, varid = "zh")
dbz_bringi <- replace(dbz_bringi, dbz_bringi<0, NA)
hydro_class <- ncvar_get(ncfile, varid = "hc")
colors <- c("#FFE4F3","#FFDEF8","#FFD9FE","#FFD4FF","#FFD1FF","#FFCEFF",
"#FFCBFF","#FFC9FF","#F3C8FF","#E4C8FF","#D3C8FF","#C0C8FF",
"#ABC8FF","#95C9FF","#7CC9FB","#61C9F2","#3EC8E8","#00C7DC",
"#00C6D0","#00C4C2","#00C1B3","#00BEA4","#00BA94","#00B583",
"#00B171","#29AB5E","#45A649","#58A031","#679904","#749300",
"#7E8C00","#878400","#8F7D00","#967500","#9B6D00","#A06500",
"#A35D00","#A6540C","#A84A2B","#A9403E")
#make GIF
#plot Reflectivity
saveGIF({
for(i in 1:144){
level <- 6
scan <- i
subtitle <- paste(z[level], "Km", "; ", strftime(time_posix[scan], tz = "UTC", usetz = TRUE), sep="")
image2D(z = dbz_bringi[, , level, scan], x = x, y = y, breaks = seq(0, 60, by=1.5), col = colors,
xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey")
title(main="Bringi Reflectivity")
text(labels = subtitle, x = min(x), y=(max(y) - 0.1*max(y)), pos = 4)
}
}, movie.name = "dbz_motion.gif", interval = 0.2)
pdf("plot2D.pdf")
for(i in seq(2, 15, by = 1)){
level <- i
scan <- 1
subtitle <- paste(z[level], "Km", " ", strftime(time_posix[scan], tz = "UTC", usetz = TRUE), sep="")
par(mfrow = c(2, 2))
#plot Reflectivity
image2D(z = dbz_bringi[, , level, scan], x = x, y = y, xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey")
title(main="Bringi Reflectivity")
text(labels = subtitle, x = min(x), y=(max(y)-0.1* max(y)), pos = 4, cex=0.8)
#plot Classification
colors <- c("white", "#CB7D8D","#67A160","#7494C8")
image2D(z = merhala_rainClass[, , level, scan], x = x, y = y, xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey", col = colors)
title(main="Merhala Classification")
colors <- c("white", "#CB7D8D","#67A160")
image2D(z = steiner_rainClass[, , level, scan], x = x, y = y, xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey", col = colors)
title(main="Steiner Classification")
colors<- c("#E16A86","#CD7E37","#A69100","#65A100","#00AB66","#00ACA5",
"#00A2D3","#7D89E6","#C86DD7")
image2D(z = hydro_class[, , level, scan], x = x, y = y, xlab="Distance from Radar [Km]", ylab="Distance from Radar [Km]", NAcol = "grey", col = colors)
title(main="Hydrometeor Classification")
}
dev.off()
|
##################################################################################
################## Created November 2017 ########################
##################### by Bastien Sadoul ############################
##################################################################################
# Calls parameters from .mat for rainbow trout (out of Add my Pet)
# Calls f values for gw124 and gw150, estimated using run_funique_all.m
# Estimates growth using classic deb model and mean f values
# Estimates growth with varying PARAM and mean f values
# Calculates diff between CLASSIC and VARYING estimates
# Compares with actual diff
##################################################################################
rm(list=ls())
cat("\014") # To clear the console
dir=dirname(rstudioapi::getActiveDocumentContext()$path) # gets the name of the directory of the active script (works only in R studio)
library(akima) # for interpolation in debODE_ABJ.R
library(R.matlab)
library(deSolve)
library(reshape2)
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
##################################################################################
####### --------- REAL VALUES
##################################################################################
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
#### ------------------------------------
# ---- ALL WEIGHT VALUES IN ONE DATAFRAME
#### ------------------------------------
for (study in c("gw150", "gw124ini", "gw124fin",
"gw150_BPA3", "gw150_BPA30",
"gw124_BPA03", "gw124_BPA3", "gw124_BPA30",
"gw124_BPA100", "gw124_BPA100end", "gw124_BPA300")) {
eval(parse(text=paste0(
"temp = read.table(paste(dir, '/Data txt/tW.", study, ".txt', sep=''), sep='\t', header=T)"
)))
temp$dpf = temp$dpb + 64
temp = temp[, -1]
temp = melt(temp, id = "dpf")
temp = temp[-grep(temp$variable,pattern = "surv"),]
names(temp)[names(temp)=="value"] = "gw"
temp$study = study
if (study == "gw150"){
totreal = temp
} else { totreal=rbind(totreal, temp)}
}
#### ------------------------------------
# ---- ADD A CONDITION VARIABLE
#### ------------------------------------
totreal$condition = colsplit(totreal$study, "_", names = c("1", "2"))[,2]
totreal$condition[totreal$condition==""]="BPA0"
totreal$study = colsplit(totreal$study, "_", names = c("1", "2"))[,1]
totreal$study[which(totreal$study=="gw124" & totreal$dpf<=350)]="gw124ini"
totreal$study[which(totreal$study=="gw124" & totreal$dpf>350)]="gw124fin"
totreal$variable = as.character(totreal$variable)
totreal$variable[which(totreal$study=="gw124ini")]=
gsub("gw124", "gw124ini", totreal$variable[which(totreal$study=="gw124ini")])
totreal$variable[which(totreal$study=="gw124fin")]=
gsub("gw124", "gw124fin", totreal$variable[which(totreal$study=="gw124fin")])
totreal$variable[which(totreal$variable=="gw124finfin")]="gw124fin"
totreal$variable[which(totreal$variable=="gw124iniini")]="gw124ini"
totreal$condition[totreal$condition=="BPA100end"] = "BPA100"
totreal$dpf[totreal$dpf==957]=958
totreal$dpf[totreal$dpf==957.5]=958.5
#### ------------------------------------
# ---- CALCULATES A DIFF FOR EACH TANK
#### ------------------------------------
meanBPA0 = aggregate(gw ~ study*dpf, data=totreal[totreal$condition=="BPA0",], FUN=mean)
names(meanBPA0)[3] = "mean_gwBPA0"
totreal = merge(totreal, meanBPA0, all.x=T)
totreal=totreal[which(!is.na(totreal$mean_gwBPA0)),] # remove those with no mean control
totreal$real_diffW = (totreal$gw - totreal$mean_gwBPA0)/totreal$mean_gwBPA0 * 100
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
##################################################################################
####### --------- DEB ESTIMATES
##################################################################################
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
#### ------------------------------------
# ---- CALL DEB PARAMETERS
#### ------------------------------------
#--- Automatically imports from .mat file
param_cont=readMat(gsub("R codes RBT", "Oncorhynchus_mykiss/results_Oncorhynchus_mykiss.mat", dir))$par[,,1]
#--- Add p_Am here to be sure it is the same even when pM changes over time
param_cont$p_Am = param_cont$z * param_cont$p.M / param_cont$kap # J/d.cm^2, max assimilation rate (eq coming from the fact that at Lmax, kap*pC=pM. And pC is provided by pA --> kap*pAm=pM)
#--- Missing parameters in .mat
param_cont$w_E = 23.9
param_cont$w_V = 23.9
param_cont$M_V = param_cont$d.V/ param_cont$w_V # mol/cm^3, volume-specific mass of structure
param_cont$kap_G = param_cont$mu.V * param_cont$M_V / param_cont$E.G # -, growth efficiency
#### ------------------------------------
# ---- CALL DEB ODEs
#### ------------------------------------
source(paste(dir,"debODE_ABJ.R", sep="/"))
##########################################################
############## ---- ESTIMATED WEIGHT WITH STANDARD DEB ABJ
##########################################################
dpf=seq(0,1000, by=1)
i=1
for (rep in unique(totreal$variable)) {
study = unique(totreal[totreal$variable == rep, "study"])
#### ------------------------------------
# ---- Forcing variables
#### ------------------------------------
f_studies = readMat(gsub("R codes RBT", "/f_prdData_funique_all.mat", dir))$f[,,1]
for_f = names(f_studies)[grep(study, names(f_studies))]
param_cont$f = mean(unlist(f_studies)[names(f_studies) %in% for_f])
param_cont$TempC = 8.5 # en degres C
#### ------------------------------------
# ---- Initial state
#### ------------------------------------
LEH = numeric(6)
LEH[1] = 0.0001 # L
LEH[2] = 643.562 # E
LEH[3] = 0 # H
LEH[4] = 0 # E_R
LEH[5] = 0 # Lb, we don't know yet. Will be determined by the ode (when L reaches E_Hb)
LEH[6] = 0 # Lj, we don't know yet. Will be determined by the ode (when L reaches E_Hj)
#### ------------------------------------
# ---- GETTING ESTIMATED WEIGHTs
#### ------------------------------------
LEHovertime_cont = ode(y = LEH, func = debODE_ABJ, times = dpf,
parms = param_cont,
method="ode23")
colnames(LEHovertime_cont) = c("dpf", "L", "E", "H", "E_R", "Lb", "Lj")
LEHovertime_cont = as.data.frame(LEHovertime_cont)
LEHovertime_cont$estim_W_cont = LEHovertime_cont[,"L"]^3 +
LEHovertime_cont[,"E"] / param_cont$d.E * param_cont$w_E / param_cont$mu.E # g, wet weight
#### ------------------------------------
# ---- CALCULATE tj
#### ------------------------------------
tjcont = LEHovertime_cont[
which(abs(LEHovertime_cont[,"H"] - param_cont$E.Hj) == min(abs(LEHovertime_cont[,"H"] - param_cont$E.Hj))),"dpf"]
#### ------------------------------------
# ---- SAVE
#### ------------------------------------
temp_res=data.frame(dpf)
temp_res$estim_W_cont = LEHovertime_cont$estim_W_cont
mintime = min(totreal[totreal$variable == rep, "dpf"])
maxtime = max(totreal[totreal$variable == rep, "dpf"])
temp_res$study=study
temp_res$variable=rep
temp_res$tjcont=tjcont
temp_res = temp_res[which(temp_res$dpf>=mintime & temp_res$dpf<=maxtime),]
if (i==1){
estim_res_cont = temp_res
} else {estim_res_cont = rbind(estim_res_cont, temp_res)}
i=i+1
}
maxtjcont = max(estim_res_cont$tjcont)
##########################################################
############## ---- ESTIMATED WEIGHT WITH VARYING PARAM
##########################################################
### Needs to be in a function to optimize
rm(list=setdiff(ls(), c("totreal", "debODE_ABJ", "param_cont", "estim_res_cont", "dir")))
source(paste(dir, "spring_and_damper_model.R", sep="/"))
LLode <- function(param_spring_damper){
# <-
dpf=seq(0,1000, by=1)
#### ------------------------------------
# ---- PARAMETERS OF THE SPRING AND DAMPER
#### ------------------------------------
time_for_var=seq(0,length(dpf)+1, by=1) # +1 because in debODE_ABJ "floor(t)+1"
yini = c(0, 0)
# param_spring_damper = data.frame(ks = 2, cs = 200, Fpert_BPA03 = 20, Fpert_BPA3 = 20,
# Fpert_BPA30 = 30, Fpert_BPA300 = 20, Fpert_BPA100 = 20)
# # ---- Make sure all spring and damper parameters are above zero
param_spring_damper = abs(param_spring_damper)+0.0001
# # ---- Extract parameters
ks <<- param_spring_damper[1]
# ks = 2 # force of the spring
cs <<- param_spring_damper[2]
# cs = 200 # resilience of the damper
Fpert_BPA03 = param_spring_damper[3]
Fpert_BPA3 = param_spring_damper[4]
Fpert_BPA30 = param_spring_damper[5]
Fpert_BPA300 = param_spring_damper[6]
Fpert_BPA100 = param_spring_damper[7]
#### ------------------------------------
# ---- LOOP ON ALL TANKS EXCEPT BPA0
#### ------------------------------------
i=1
for (rep in unique(totreal$variable[totreal$condition != "BPA0"])){
print(i)
param_deb = param_cont
study = unique(totreal[totreal$variable == rep, "study"])
condition = unique(totreal$condition[totreal$variable == rep])
eval(parse(text=paste("Fpert <<- Fpert_", condition, sep="")))
# ---- Forcing variables
f_studies = readMat(gsub("R codes RBT", "/f_prdData_funique_all.mat", dir))$f[,,1]
for_f = names(f_studies)[grep(study, names(f_studies))]
param_cont$f = mean(unlist(f_studies)[names(f_studies) %in% for_f])
param_cont$TempC = 8.5 # en degres C
# ---- Initial state
LEH = numeric(6)
LEH[1] = 0.0001 # L
LEH[2] = 643.562 # E
LEH[3] = 0 # H
LEH[4] = 0 # E_R
LEH[5] = 0 # Lb, we don't know yet. Will be determined by the ode (when L reaches E_Hb)
LEH[6] = 0 # Lj, we don't know yet. Will be determined by the ode (when L reaches E_Hj)
# ---- Creates a p_M varying through time
tp_M=ode(y=yini,func=spring_damper_model, times=time_for_var, parms=c(ks,cs), method="ode45")
tp_M = as.data.frame(tp_M)
tp_M[, 2] = (tp_M[, 2]+1) * param_deb$p.M
param_deb$p.M = tp_M[, c(1,2)]
tvar = tp_M
# ---- ESTIMATED WEIGHT
LEHovertime_var = ode(y = LEH, func = debODE_ABJ, times = dpf,
parms = param_deb,
method="ode45")
colnames(LEHovertime_var) = c("dpf", "L", "E", "H", "E_R", "Lb", "Lj")
LEHovertime_var = as.data.frame(LEHovertime_var)
LEHovertime_var$estim_W_var = LEHovertime_var[,"L"]^3 +
LEHovertime_var[,"E"] / param_deb$d.E * param_deb$w_E / param_deb$mu.E # g, wet weight
# ---- Calculate tj
tjvar = LEHovertime_var[
which(abs(LEHovertime_var[,"H"] - param_deb$E.Hj) == min(abs(LEHovertime_var[,"H"] - param_deb$E.Hj))),"dpf"]
# ---- SAVE
temp_res = data.frame(dpf)
temp_res$estim_W_var = LEHovertime_var$estim_W_var
mintime = min(totreal[totreal$variable == rep, "dpf"])
maxtime = max(totreal[totreal$variable == rep, "dpf"])
temp_res$study=study
temp_res$variable=rep
temp_res$tjvar = tjvar
temp_res = temp_res[which(temp_res$dpf>=mintime & temp_res$dpf<=maxtime),]
if (i==1){
estim_res_var = temp_res
} else {estim_res_var = rbind(estim_res_var, temp_res)}
i=i+1
}
#########################################################################################################
####### --------- CALCULATES DIFF ESTIMATES AND COMPARE WITH REAL DIFF
#########################################################################################################
# estim_res_cont = data.frame(estim_res_cont)
# estim_res_var = data.frame(estim_res_var)
#
# estim_res_cont$dpf=as.factor(estim_res_cont$dpf)
# estim_res_var$dpf=as.factor(estim_res_var$dpf)
# estim_res_cont$variable=as.factor(estim_res_cont$variable)
# estim_res_var$variable=as.factor(estim_res_var$variable)
# estim_res_cont$study=as.factor(estim_res_cont$study)
# estim_res_var$study=as.factor(estim_res_var$study)
# estim_res = merge(estim_res_cont, estim_res_var)
estim_res_cont = data.table(estim_res_cont, key = c("dpf", "study", "variable"))
estim_res_var = data.table(estim_res_var, key = c("dpf", "study", "variable"))
tot = data.table(totreal, key = c("dpf", "study", "variable"))
estim_res = merge(estim_res_cont, estim_res_var)
estim_res$diff_estimates = (estim_res$estim_W_var - estim_res$estim_W_cont) / estim_res$estim_W_cont *100
totfinal = merge(tot, estim_res, all.x=T)
# Diff before tj
totfinal = totfinal[totfinal$dpf < maxtjcont,]
diff=totfinal$real_diffW-totfinal$diff_estimates
diff = diff[!is.na(diff)]
LL = as.numeric(t(diff) %*% diff)
return(LL)
}
#########################################################################################################
####### --------- OPTIMISATION
#########################################################################################################
# Starting parameters
param_spring_damper = c(ks = 2, cs = 200, Fpert_BPA03 = 1, Fpert_BPA3 = 10,
Fpert_BPA30 = 12, Fpert_BPA300 = 20, Fpert_BPA100 = 15)
# Run the optimization procedure
tmin=0 # start of the pert (when Fpert applies)
tmax=40 # stop of the pert
trace(LLode, exit= quote(print(c(returnValue(), param_spring_damper))))
# totreal = totreal[which(totreal$study == "gw150" & totreal$condition == "BPA30"),]
# param_spring_damper = data.frame(ks = 2, cs = 200,
# Fpert_BPA30 = 12)
MLresults <- optim(param_spring_damper, LLode, method = c("Nelder-Mead"), hessian=F,
control=list(trace=10))
totreal = tot
untrace(LLode)
write.table(unlist(MLresults), paste(dir, "/results_optim/result_optim_", Sys.Date(), ".txt", sep=""), sep = "\t")
########## ALARM WHEN DONE
install.packages("beepr")
library(beepr)
beep()
# Sends txt when script done (works only on mac)
system("osascript -e 'tell application \"Messages\"' -e 'send \"end of script\" to buddy \"moi\"' -e 'end tell'")
#########################################################################################################
####### --------- PLOT RESULTS
#########################################################################################################
param_spring_damper = MLresults$par
temp = read.table(paste(dir, "/results_optim/result_optim_2017-11-05.txt", sep=""), sep="\t")
param_spring_damper = c(ks = temp[1,1], cs = temp[2,1], Fpert_BPA03 = temp[3,1], Fpert_BPA3 = temp[4,1],
Fpert_BPA30 = temp[5,1], Fpert_BPA300 = temp[6,1], Fpert_BPA100 = temp[7,1])
# Run from commented <-
######## ggplot
library(ggplot2)
estim_res$condition = colsplit(estim_res$variable, "_", names=c("a", "condition"))$condition
estim_res=rbind(data.frame(estim_res), data.frame(dpf=dpf, study="gw150", variable="gw150A_BPA0", estim_W_cont=NA, estim_W_var=NA, diff_estimates=0, condition="BPA0"))
estim_res=rbind(data.frame(estim_res), data.frame(dpf=dpf, study="gw124ini", variable="gw124iniA_BPA0", estim_W_cont=NA, estim_W_var=NA, diff_estimates=0, condition="BPA0"))
estim_res=rbind(data.frame(estim_res), data.frame(dpf=dpf, study="gw124fin", variable="gw124fin", estim_W_cont=NA, estim_W_var=NA, diff_estimates=0, condition="BPA0"))
temp=totfinal[totfinal$study=="gw150",]
p = ggplot(temp, aes(x=dpf, y=real_diffW, color=condition)) +
stat_summary(fun.y = mean, geom = "point", size=5, alpha=0.6) +
stat_summary(fun.y = mean, geom = "line", size=1, alpha=0.6) +
stat_summary(fun.data = mean_cl_normal, fun.args=list(mult=1), alpha=0.6)+
geom_line(data=estim_res[which(estim_res$variable %in% c("gw150A_BPA0","gw150A_BPA3","gw150A_BPA30")),], aes(x=dpf, y=diff_estimates, colour=condition), size=2, alpha=1)+
scale_fill_manual(labels=c("control", "BPA4", "BPA40"),
# values=c("#00FF00", "#CC3200", "#CB0000"))+
values=c("#00FF00", "#CB0000", "#000000"))+
scale_color_manual(labels=c("control", "BPA4", "BPA40"),
# values=c("#00FF00", "#CC3200", "#CB0000"))+
values=c("#00FF00", "#CB0000", "#000000"))+
# geom_point(alpha=0.6,size=5)+
# geom_line(alpha=0.6,size=1)+
expand_limits(x=c(0, 1100),y=c(-30,30))+
# expand_limits(x=c(0, 600),y=c(-30,30))+
labs(x="Days post fertilization", y="Mass difference to control (%)") +
theme(axis.text.x = element_text(size=16, colour = "black"), axis.text.y = element_text(size=16, colour = "black"),
axis.title.x = element_text(size=16, margin=margin(t=10)), axis.title.y = element_text(size=16, margin=margin(r=10)),
legend.text = element_text(size=16), legend.title = element_text(size=16),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background=element_rect("grey", fill="white", size=1)
)
p
temp=totfinal[totfinal$study=="gw124ini",]
p = ggplot(temp, aes(x=dpf, y=real_diffW, color=condition)) +
stat_summary(fun.y = mean, geom = "point", size=5, alpha=0.6) +
stat_summary(fun.y = mean, geom = "line", size=1, alpha=0.6) +
stat_summary(fun.data = mean_cl_normal, fun.args=list(mult=1), alpha=0.6)+
geom_line(data=estim_res[which(estim_res$variable %in% c("gw124iniA_BPA0","gw124iniA_BPA03", "gw124ini_BPA3", "gw124ini_BPA30", "gw124iniA_BPA100", "gw124ini_BPA300")),], aes(x=dpf, y=diff_estimates, colour=condition), size=2, alpha=1)+
scale_fill_manual(labels=c("control", "BPA03", "BPA3", "BPA30", "BPA100", "BPA300"),
values=colorRampPalette(c("green", "red", "black"))(n = 6))+
scale_color_manual(labels=c("control", "BPA03", "BPA3", "BPA30", "BPA100", "BPA300"),
values=colorRampPalette(c("green", "red", "black"))(n = 6))+
# geom_point(alpha=0.6,size=5)+
# geom_line(alpha=0.6,size=1)+
expand_limits(x=c(0, 1100),y=c(-30,30))+
# expand_limits(x=c(0, 600),y=c(-30,30))+
labs(x="Days post fertilization", y="Mass difference to control (%)") +
theme(axis.text.x = element_text(size=16, colour = "black"), axis.text.y = element_text(size=16, colour = "black"),
axis.title.x = element_text(size=16, margin=margin(t=10)), axis.title.y = element_text(size=16, margin=margin(r=10)),
legend.text = element_text(size=16), legend.title = element_text(size=16),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background=element_rect("grey", fill="white", size=1)
)
p
temp=totfinal[totfinal$study=="gw124fin",]
p = ggplot(temp, aes(x=dpf, y=real_diffW, color=condition)) +
stat_summary(fun.y = mean, geom = "point", size=5, alpha=0.6) +
stat_summary(fun.y = mean, geom = "line", size=1, alpha=0.6) +
stat_summary(fun.data = mean_cl_normal, fun.args=list(mult=1), alpha=0.6)+
geom_line(data=estim_res[which(estim_res$variable %in% c("gw124fin","gw124finA_BPA03", "gw124finA_BPAend", "gw124fin_BPA3", "gw124fin_BPA30", "gw124fin_BPA300")),], aes(x=dpf, y=diff_estimates, colour=condition), size=2, alpha=1)+
scale_fill_manual(labels=c("control", "BPA03", "BPA3", "BPA30", "BPA100", "BPA300"),
values=colorRampPalette(c("green", "red", "black"))(n = 6))+
scale_color_manual(labels=c("control", "BPA03", "BPA3", "BPA30", "BPA100", "BPA300"),
values=colorRampPalette(c("green", "red", "black"))(n = 6))+
# geom_point(alpha=0.6,size=5)+
# geom_line(alpha=0.6,size=1)+
expand_limits(x=c(0, 1100),y=c(-30,30))+
# expand_limits(x=c(0, 600),y=c(-30,30))+
labs(x="Days post fertilization", y="Mass difference to control (%)") +
theme(axis.text.x = element_text(size=16, colour = "black"), axis.text.y = element_text(size=16, colour = "black"),
axis.title.x = element_text(size=16, margin=margin(t=10)), axis.title.y = element_text(size=16, margin=margin(r=10)),
legend.text = element_text(size=16), legend.title = element_text(size=16),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background=element_rect("grey", fill="white", size=1)
)
p
| /R codes RBT/To delete/compare diffs estimates vs diffs real.R | no_license | bastiensadoul/DEB_RBT | R | false | false | 20,863 | r | ##################################################################################
################## Created November 2017 ########################
##################### by Bastien Sadoul ############################
##################################################################################
# Calls parameters from .mat for rainbow trout (out of Add my Pet)
# Calls f values for gw124 and gw150, estimated using run_funique_all.m
# Estimates growth using classic deb model and mean f values
# Estimates growth with varying PARAM and mean f values
# Calculates diff between CLASSIC and VARYING estimates
# Compares with actual diff
##################################################################################
rm(list=ls())
cat("\014") # To clear the console
dir=dirname(rstudioapi::getActiveDocumentContext()$path) # gets the name of the directory of the active script (works only in R studio)
library(akima) # for interpolation in debODE_ABJ.R
library(R.matlab)
library(deSolve)
library(reshape2)
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
##################################################################################
####### --------- REAL VALUES
##################################################################################
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
#### ------------------------------------
# ---- ALL WEIGHT VALUES IN ONE DATAFRAME
#### ------------------------------------
for (study in c("gw150", "gw124ini", "gw124fin",
"gw150_BPA3", "gw150_BPA30",
"gw124_BPA03", "gw124_BPA3", "gw124_BPA30",
"gw124_BPA100", "gw124_BPA100end", "gw124_BPA300")) {
eval(parse(text=paste0(
"temp = read.table(paste(dir, '/Data txt/tW.", study, ".txt', sep=''), sep='\t', header=T)"
)))
temp$dpf = temp$dpb + 64
temp = temp[, -1]
temp = melt(temp, id = "dpf")
temp = temp[-grep(temp$variable,pattern = "surv"),]
names(temp)[names(temp)=="value"] = "gw"
temp$study = study
if (study == "gw150"){
totreal = temp
} else { totreal=rbind(totreal, temp)}
}
#### ------------------------------------
# ---- ADD A CONDITION VARIABLE
#### ------------------------------------
totreal$condition = colsplit(totreal$study, "_", names = c("1", "2"))[,2]
totreal$condition[totreal$condition==""]="BPA0"
totreal$study = colsplit(totreal$study, "_", names = c("1", "2"))[,1]
totreal$study[which(totreal$study=="gw124" & totreal$dpf<=350)]="gw124ini"
totreal$study[which(totreal$study=="gw124" & totreal$dpf>350)]="gw124fin"
totreal$variable = as.character(totreal$variable)
totreal$variable[which(totreal$study=="gw124ini")]=
gsub("gw124", "gw124ini", totreal$variable[which(totreal$study=="gw124ini")])
totreal$variable[which(totreal$study=="gw124fin")]=
gsub("gw124", "gw124fin", totreal$variable[which(totreal$study=="gw124fin")])
totreal$variable[which(totreal$variable=="gw124finfin")]="gw124fin"
totreal$variable[which(totreal$variable=="gw124iniini")]="gw124ini"
totreal$condition[totreal$condition=="BPA100end"] = "BPA100"
totreal$dpf[totreal$dpf==957]=958
totreal$dpf[totreal$dpf==957.5]=958.5
#### ------------------------------------
# ---- CALCULATES A DIFF FOR EACH TANK
#### ------------------------------------
meanBPA0 = aggregate(gw ~ study*dpf, data=totreal[totreal$condition=="BPA0",], FUN=mean)
names(meanBPA0)[3] = "mean_gwBPA0"
totreal = merge(totreal, meanBPA0, all.x=T)
totreal=totreal[which(!is.na(totreal$mean_gwBPA0)),] # remove those with no mean control
totreal$real_diffW = (totreal$gw - totreal$mean_gwBPA0)/totreal$mean_gwBPA0 * 100
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
##################################################################################
####### --------- DEB ESTIMATES
##################################################################################
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
#### ------------------------------------
# ---- CALL DEB PARAMETERS
#### ------------------------------------
#--- Automatically imports from .mat file
param_cont=readMat(gsub("R codes RBT", "Oncorhynchus_mykiss/results_Oncorhynchus_mykiss.mat", dir))$par[,,1]
#--- Add p_Am here to be sure it is the same even when pM changes over time
param_cont$p_Am = param_cont$z * param_cont$p.M / param_cont$kap # J/d.cm^2, max assimilation rate (eq coming from the fact that at Lmax, kap*pC=pM. And pC is provided by pA --> kap*pAm=pM)
#--- Missing parameters in .mat
param_cont$w_E = 23.9
param_cont$w_V = 23.9
param_cont$M_V = param_cont$d.V/ param_cont$w_V # mol/cm^3, volume-specific mass of structure
param_cont$kap_G = param_cont$mu.V * param_cont$M_V / param_cont$E.G # -, growth efficiency
#### ------------------------------------
# ---- CALL DEB ODEs
#### ------------------------------------
source(paste(dir,"debODE_ABJ.R", sep="/"))
##########################################################
############## ---- ESTIMATED WEIGHT WITH STANDARD DEB ABJ
##########################################################
dpf=seq(0,1000, by=1)
i=1
for (rep in unique(totreal$variable)) {
study = unique(totreal[totreal$variable == rep, "study"])
#### ------------------------------------
# ---- Forcing variables
#### ------------------------------------
f_studies = readMat(gsub("R codes RBT", "/f_prdData_funique_all.mat", dir))$f[,,1]
for_f = names(f_studies)[grep(study, names(f_studies))]
param_cont$f = mean(unlist(f_studies)[names(f_studies) %in% for_f])
param_cont$TempC = 8.5 # en degres C
#### ------------------------------------
# ---- Initial state
#### ------------------------------------
LEH = numeric(6)
LEH[1] = 0.0001 # L
LEH[2] = 643.562 # E
LEH[3] = 0 # H
LEH[4] = 0 # E_R
LEH[5] = 0 # Lb, we don't know yet. Will be determined by the ode (when L reaches E_Hb)
LEH[6] = 0 # Lj, we don't know yet. Will be determined by the ode (when L reaches E_Hj)
#### ------------------------------------
# ---- GETTING ESTIMATED WEIGHTs
#### ------------------------------------
LEHovertime_cont = ode(y = LEH, func = debODE_ABJ, times = dpf,
parms = param_cont,
method="ode23")
colnames(LEHovertime_cont) = c("dpf", "L", "E", "H", "E_R", "Lb", "Lj")
LEHovertime_cont = as.data.frame(LEHovertime_cont)
LEHovertime_cont$estim_W_cont = LEHovertime_cont[,"L"]^3 +
LEHovertime_cont[,"E"] / param_cont$d.E * param_cont$w_E / param_cont$mu.E # g, wet weight
#### ------------------------------------
# ---- CALCULATE tj
#### ------------------------------------
tjcont = LEHovertime_cont[
which(abs(LEHovertime_cont[,"H"] - param_cont$E.Hj) == min(abs(LEHovertime_cont[,"H"] - param_cont$E.Hj))),"dpf"]
#### ------------------------------------
# ---- SAVE
#### ------------------------------------
temp_res=data.frame(dpf)
temp_res$estim_W_cont = LEHovertime_cont$estim_W_cont
mintime = min(totreal[totreal$variable == rep, "dpf"])
maxtime = max(totreal[totreal$variable == rep, "dpf"])
temp_res$study=study
temp_res$variable=rep
temp_res$tjcont=tjcont
temp_res = temp_res[which(temp_res$dpf>=mintime & temp_res$dpf<=maxtime),]
if (i==1){
estim_res_cont = temp_res
} else {estim_res_cont = rbind(estim_res_cont, temp_res)}
i=i+1
}
maxtjcont = max(estim_res_cont$tjcont)
##########################################################
############## ---- ESTIMATED WEIGHT WITH VARYING PARAM
##########################################################
### Needs to be in a function to optimize
rm(list=setdiff(ls(), c("totreal", "debODE_ABJ", "param_cont", "estim_res_cont", "dir")))
source(paste(dir, "spring_and_damper_model.R", sep="/"))
LLode <- function(param_spring_damper){
# <-
dpf=seq(0,1000, by=1)
#### ------------------------------------
# ---- PARAMETERS OF THE SPRING AND DAMPER
#### ------------------------------------
time_for_var=seq(0,length(dpf)+1, by=1) # +1 because in debODE_ABJ "floor(t)+1"
yini = c(0, 0)
# param_spring_damper = data.frame(ks = 2, cs = 200, Fpert_BPA03 = 20, Fpert_BPA3 = 20,
# Fpert_BPA30 = 30, Fpert_BPA300 = 20, Fpert_BPA100 = 20)
# # ---- Make sure all spring and damper parameters are above zero
param_spring_damper = abs(param_spring_damper)+0.0001
# # ---- Extract parameters
ks <<- param_spring_damper[1]
# ks = 2 # force of the spring
cs <<- param_spring_damper[2]
# cs = 200 # resilience of the damper
Fpert_BPA03 = param_spring_damper[3]
Fpert_BPA3 = param_spring_damper[4]
Fpert_BPA30 = param_spring_damper[5]
Fpert_BPA300 = param_spring_damper[6]
Fpert_BPA100 = param_spring_damper[7]
#### ------------------------------------
# ---- LOOP ON ALL TANKS EXCEPT BPA0
#### ------------------------------------
i=1
for (rep in unique(totreal$variable[totreal$condition != "BPA0"])){
print(i)
param_deb = param_cont
study = unique(totreal[totreal$variable == rep, "study"])
condition = unique(totreal$condition[totreal$variable == rep])
eval(parse(text=paste("Fpert <<- Fpert_", condition, sep="")))
# ---- Forcing variables
f_studies = readMat(gsub("R codes RBT", "/f_prdData_funique_all.mat", dir))$f[,,1]
for_f = names(f_studies)[grep(study, names(f_studies))]
param_cont$f = mean(unlist(f_studies)[names(f_studies) %in% for_f])
param_cont$TempC = 8.5 # en degres C
# ---- Initial state
LEH = numeric(6)
LEH[1] = 0.0001 # L
LEH[2] = 643.562 # E
LEH[3] = 0 # H
LEH[4] = 0 # E_R
LEH[5] = 0 # Lb, we don't know yet. Will be determined by the ode (when L reaches E_Hb)
LEH[6] = 0 # Lj, we don't know yet. Will be determined by the ode (when L reaches E_Hj)
# ---- Creates a p_M varying through time
tp_M=ode(y=yini,func=spring_damper_model, times=time_for_var, parms=c(ks,cs), method="ode45")
tp_M = as.data.frame(tp_M)
tp_M[, 2] = (tp_M[, 2]+1) * param_deb$p.M
param_deb$p.M = tp_M[, c(1,2)]
tvar = tp_M
# ---- ESTIMATED WEIGHT
LEHovertime_var = ode(y = LEH, func = debODE_ABJ, times = dpf,
parms = param_deb,
method="ode45")
colnames(LEHovertime_var) = c("dpf", "L", "E", "H", "E_R", "Lb", "Lj")
LEHovertime_var = as.data.frame(LEHovertime_var)
LEHovertime_var$estim_W_var = LEHovertime_var[,"L"]^3 +
LEHovertime_var[,"E"] / param_deb$d.E * param_deb$w_E / param_deb$mu.E # g, wet weight
# ---- Calculate tj
tjvar = LEHovertime_var[
which(abs(LEHovertime_var[,"H"] - param_deb$E.Hj) == min(abs(LEHovertime_var[,"H"] - param_deb$E.Hj))),"dpf"]
# ---- SAVE
temp_res = data.frame(dpf)
temp_res$estim_W_var = LEHovertime_var$estim_W_var
mintime = min(totreal[totreal$variable == rep, "dpf"])
maxtime = max(totreal[totreal$variable == rep, "dpf"])
temp_res$study=study
temp_res$variable=rep
temp_res$tjvar = tjvar
temp_res = temp_res[which(temp_res$dpf>=mintime & temp_res$dpf<=maxtime),]
if (i==1){
estim_res_var = temp_res
} else {estim_res_var = rbind(estim_res_var, temp_res)}
i=i+1
}
#########################################################################################################
####### --------- CALCULATES DIFF ESTIMATES AND COMPARE WITH REAL DIFF
#########################################################################################################
# estim_res_cont = data.frame(estim_res_cont)
# estim_res_var = data.frame(estim_res_var)
#
# estim_res_cont$dpf=as.factor(estim_res_cont$dpf)
# estim_res_var$dpf=as.factor(estim_res_var$dpf)
# estim_res_cont$variable=as.factor(estim_res_cont$variable)
# estim_res_var$variable=as.factor(estim_res_var$variable)
# estim_res_cont$study=as.factor(estim_res_cont$study)
# estim_res_var$study=as.factor(estim_res_var$study)
# estim_res = merge(estim_res_cont, estim_res_var)
estim_res_cont = data.table(estim_res_cont, key = c("dpf", "study", "variable"))
estim_res_var = data.table(estim_res_var, key = c("dpf", "study", "variable"))
tot = data.table(totreal, key = c("dpf", "study", "variable"))
estim_res = merge(estim_res_cont, estim_res_var)
estim_res$diff_estimates = (estim_res$estim_W_var - estim_res$estim_W_cont) / estim_res$estim_W_cont *100
totfinal = merge(tot, estim_res, all.x=T)
# Diff before tj
totfinal = totfinal[totfinal$dpf < maxtjcont,]
diff=totfinal$real_diffW-totfinal$diff_estimates
diff = diff[!is.na(diff)]
LL = as.numeric(t(diff) %*% diff)
return(LL)
}
#########################################################################################################
####### --------- OPTIMISATION
#########################################################################################################
# Starting parameters
param_spring_damper = c(ks = 2, cs = 200, Fpert_BPA03 = 1, Fpert_BPA3 = 10,
Fpert_BPA30 = 12, Fpert_BPA300 = 20, Fpert_BPA100 = 15)
# Run the optimization procedure
tmin=0 # start of the pert (when Fpert applies)
tmax=40 # stop of the pert
trace(LLode, exit= quote(print(c(returnValue(), param_spring_damper))))
# totreal = totreal[which(totreal$study == "gw150" & totreal$condition == "BPA30"),]
# param_spring_damper = data.frame(ks = 2, cs = 200,
# Fpert_BPA30 = 12)
MLresults <- optim(param_spring_damper, LLode, method = c("Nelder-Mead"), hessian=F,
control=list(trace=10))
totreal = tot
untrace(LLode)
write.table(unlist(MLresults), paste(dir, "/results_optim/result_optim_", Sys.Date(), ".txt", sep=""), sep = "\t")
########## ALARM WHEN DONE
install.packages("beepr")
library(beepr)
beep()
# Sends txt when script done (works only on mac)
system("osascript -e 'tell application \"Messages\"' -e 'send \"end of script\" to buddy \"moi\"' -e 'end tell'")
#########################################################################################################
####### --------- PLOT RESULTS
#########################################################################################################
param_spring_damper = MLresults$par
temp = read.table(paste(dir, "/results_optim/result_optim_2017-11-05.txt", sep=""), sep="\t")
param_spring_damper = c(ks = temp[1,1], cs = temp[2,1], Fpert_BPA03 = temp[3,1], Fpert_BPA3 = temp[4,1],
Fpert_BPA30 = temp[5,1], Fpert_BPA300 = temp[6,1], Fpert_BPA100 = temp[7,1])
# Run from commented <-
######## ggplot
library(ggplot2)
estim_res$condition = colsplit(estim_res$variable, "_", names=c("a", "condition"))$condition
estim_res=rbind(data.frame(estim_res), data.frame(dpf=dpf, study="gw150", variable="gw150A_BPA0", estim_W_cont=NA, estim_W_var=NA, diff_estimates=0, condition="BPA0"))
estim_res=rbind(data.frame(estim_res), data.frame(dpf=dpf, study="gw124ini", variable="gw124iniA_BPA0", estim_W_cont=NA, estim_W_var=NA, diff_estimates=0, condition="BPA0"))
estim_res=rbind(data.frame(estim_res), data.frame(dpf=dpf, study="gw124fin", variable="gw124fin", estim_W_cont=NA, estim_W_var=NA, diff_estimates=0, condition="BPA0"))
temp=totfinal[totfinal$study=="gw150",]
p = ggplot(temp, aes(x=dpf, y=real_diffW, color=condition)) +
stat_summary(fun.y = mean, geom = "point", size=5, alpha=0.6) +
stat_summary(fun.y = mean, geom = "line", size=1, alpha=0.6) +
stat_summary(fun.data = mean_cl_normal, fun.args=list(mult=1), alpha=0.6)+
geom_line(data=estim_res[which(estim_res$variable %in% c("gw150A_BPA0","gw150A_BPA3","gw150A_BPA30")),], aes(x=dpf, y=diff_estimates, colour=condition), size=2, alpha=1)+
scale_fill_manual(labels=c("control", "BPA4", "BPA40"),
# values=c("#00FF00", "#CC3200", "#CB0000"))+
values=c("#00FF00", "#CB0000", "#000000"))+
scale_color_manual(labels=c("control", "BPA4", "BPA40"),
# values=c("#00FF00", "#CC3200", "#CB0000"))+
values=c("#00FF00", "#CB0000", "#000000"))+
# geom_point(alpha=0.6,size=5)+
# geom_line(alpha=0.6,size=1)+
expand_limits(x=c(0, 1100),y=c(-30,30))+
# expand_limits(x=c(0, 600),y=c(-30,30))+
labs(x="Days post fertilization", y="Mass difference to control (%)") +
theme(axis.text.x = element_text(size=16, colour = "black"), axis.text.y = element_text(size=16, colour = "black"),
axis.title.x = element_text(size=16, margin=margin(t=10)), axis.title.y = element_text(size=16, margin=margin(r=10)),
legend.text = element_text(size=16), legend.title = element_text(size=16),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background=element_rect("grey", fill="white", size=1)
)
p
temp=totfinal[totfinal$study=="gw124ini",]
p = ggplot(temp, aes(x=dpf, y=real_diffW, color=condition)) +
stat_summary(fun.y = mean, geom = "point", size=5, alpha=0.6) +
stat_summary(fun.y = mean, geom = "line", size=1, alpha=0.6) +
stat_summary(fun.data = mean_cl_normal, fun.args=list(mult=1), alpha=0.6)+
geom_line(data=estim_res[which(estim_res$variable %in% c("gw124iniA_BPA0","gw124iniA_BPA03", "gw124ini_BPA3", "gw124ini_BPA30", "gw124iniA_BPA100", "gw124ini_BPA300")),], aes(x=dpf, y=diff_estimates, colour=condition), size=2, alpha=1)+
scale_fill_manual(labels=c("control", "BPA03", "BPA3", "BPA30", "BPA100", "BPA300"),
values=colorRampPalette(c("green", "red", "black"))(n = 6))+
scale_color_manual(labels=c("control", "BPA03", "BPA3", "BPA30", "BPA100", "BPA300"),
values=colorRampPalette(c("green", "red", "black"))(n = 6))+
# geom_point(alpha=0.6,size=5)+
# geom_line(alpha=0.6,size=1)+
expand_limits(x=c(0, 1100),y=c(-30,30))+
# expand_limits(x=c(0, 600),y=c(-30,30))+
labs(x="Days post fertilization", y="Mass difference to control (%)") +
theme(axis.text.x = element_text(size=16, colour = "black"), axis.text.y = element_text(size=16, colour = "black"),
axis.title.x = element_text(size=16, margin=margin(t=10)), axis.title.y = element_text(size=16, margin=margin(r=10)),
legend.text = element_text(size=16), legend.title = element_text(size=16),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background=element_rect("grey", fill="white", size=1)
)
p
temp=totfinal[totfinal$study=="gw124fin",]
p = ggplot(temp, aes(x=dpf, y=real_diffW, color=condition)) +
stat_summary(fun.y = mean, geom = "point", size=5, alpha=0.6) +
stat_summary(fun.y = mean, geom = "line", size=1, alpha=0.6) +
stat_summary(fun.data = mean_cl_normal, fun.args=list(mult=1), alpha=0.6)+
geom_line(data=estim_res[which(estim_res$variable %in% c("gw124fin","gw124finA_BPA03", "gw124finA_BPAend", "gw124fin_BPA3", "gw124fin_BPA30", "gw124fin_BPA300")),], aes(x=dpf, y=diff_estimates, colour=condition), size=2, alpha=1)+
scale_fill_manual(labels=c("control", "BPA03", "BPA3", "BPA30", "BPA100", "BPA300"),
values=colorRampPalette(c("green", "red", "black"))(n = 6))+
scale_color_manual(labels=c("control", "BPA03", "BPA3", "BPA30", "BPA100", "BPA300"),
values=colorRampPalette(c("green", "red", "black"))(n = 6))+
# geom_point(alpha=0.6,size=5)+
# geom_line(alpha=0.6,size=1)+
expand_limits(x=c(0, 1100),y=c(-30,30))+
# expand_limits(x=c(0, 600),y=c(-30,30))+
labs(x="Days post fertilization", y="Mass difference to control (%)") +
theme(axis.text.x = element_text(size=16, colour = "black"), axis.text.y = element_text(size=16, colour = "black"),
axis.title.x = element_text(size=16, margin=margin(t=10)), axis.title.y = element_text(size=16, margin=margin(r=10)),
legend.text = element_text(size=16), legend.title = element_text(size=16),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background=element_rect("grey", fill="white", size=1)
)
p
|
library(ape)
testtree <- read.tree("4086_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4086_0_unrooted.txt") | /codeml_files/newick_trees_processed/4086_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("4086_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4086_0_unrooted.txt") |
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/RerunWorkflow.R
\name{RerunWorkflow}
\alias{RerunWorkflow}
\title{Rerun a workflow object.}
\usage{
RerunWorkflow(workflow, from = NULL)
}
\arguments{
\item{workflow}{A zoonWorkflow object from a previous zoon analysis}
\item{from}{Which modules should be run. If NULL (default), run from the
first NULL output (i.e. where the workflow broke). Otherwise takes an
integer and runs from that module.}
}
\value{
A list with the results of each module and a copy of the
call used to execute the workflow.
}
\description{
Takes a workflow object and reruns it.
}
\examples{
\dontrun{
w <- workflow(UKAnophelesPlumbeus, UKAir,
OneHundredBackground,
LogisticRegression,
SameTimePlaceMap)
RerunWorkflow(w)
}
}
| /man/RerunWorkflow.Rd | no_license | timcdlucas/zoon | R | false | false | 841 | rd | % Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/RerunWorkflow.R
\name{RerunWorkflow}
\alias{RerunWorkflow}
\title{Rerun a workflow object.}
\usage{
RerunWorkflow(workflow, from = NULL)
}
\arguments{
\item{workflow}{A zoonWorkflow object from a previous zoon analysis}
\item{from}{Which modules should be run. If NULL (default), run from the
first NULL output (i.e. where the workflow broke). Otherwise takes an
integer and runs from that module.}
}
\value{
A list with the results of each module and a copy of the
call used to execute the workflow.
}
\description{
Takes a workflow object and reruns it.
}
\examples{
\dontrun{
w <- workflow(UKAnophelesPlumbeus, UKAir,
OneHundredBackground,
LogisticRegression,
SameTimePlaceMap)
RerunWorkflow(w)
}
}
|
# @rdname summary.corpus
# @method summary character
# @examples
#
# # summarize texts
# summary(c("Testing this text. Second sentence.", "And this one."))
# summary(data_char_ukimmig2010)
# myTextSummaryDF <- summary(data_char_ukimmig2010, verbose = FALSE)
# head(myTextSummaryDF)
summary.character <- function(object, n = 100, verbose = TRUE, tolower = FALSE, ...) {
object <- object[1 : min(c(n, length(object)))]
if (is.null(names(object)))
names(object) <- paste(quanteda_options("base_docname"), seq_along(object), sep = "")
nsents <- nsentence(object, ...)
if (tolower) object <- char_tolower(object)
toks <- tokens(object, ...)
ntokens <- ntoken(toks)
ntypes <- ntype(toks)
results <- data.frame(Text = names(object),
Types = ntypes,
Tokens = ntokens,
Sentences = nsents,
row.names = NULL)
if (verbose) print(results, ...)
return(invisible(results))
}
| /R/character-methods.R | no_license | t0tem/quanteda | R | false | false | 1,018 | r | # @rdname summary.corpus
# @method summary character
# @examples
#
# # summarize texts
# summary(c("Testing this text. Second sentence.", "And this one."))
# summary(data_char_ukimmig2010)
# myTextSummaryDF <- summary(data_char_ukimmig2010, verbose = FALSE)
# head(myTextSummaryDF)
summary.character <- function(object, n = 100, verbose = TRUE, tolower = FALSE, ...) {
object <- object[1 : min(c(n, length(object)))]
if (is.null(names(object)))
names(object) <- paste(quanteda_options("base_docname"), seq_along(object), sep = "")
nsents <- nsentence(object, ...)
if (tolower) object <- char_tolower(object)
toks <- tokens(object, ...)
ntokens <- ntoken(toks)
ntypes <- ntype(toks)
results <- data.frame(Text = names(object),
Types = ntypes,
Tokens = ntokens,
Sentences = nsents,
row.names = NULL)
if (verbose) print(results, ...)
return(invisible(results))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtifit.R
\name{dtifit}
\alias{dtifit}
\title{DTI Fitting Procedure from FSL}
\usage{
dtifit(infile, bvecs, bvals, mask = NULL, outprefix = NULL, opts = "",
bet.opts = "", verbose = TRUE, sse = FALSE, save_tensor = FALSE)
}
\arguments{
\item{infile}{Input filename}
\item{bvecs}{b-vectors: matrix of 3 columns or
filename of ASCII text file}
\item{bvals}{b-values: vector of same length as number of rows of b-vectors
or filename of ASCII text file}
\item{mask}{Mask filename}
\item{outprefix}{Output prefix}
\item{opts}{Additional options for \code{dtifit}}
\item{bet.opts}{Options for \code{\link{fslbet}} if mask is not supplied}
\item{verbose}{print diagnostic messages}
\item{sse}{Save sum of squared errors}
\item{save_tensor}{Save tensor file out}
}
\value{
Vector of character filenames of output. See Note
}
\description{
Calls \code{dtifit} from FSL
}
\note{
On successful completion of the command, the following files
will be output, which are:
\code{mask} - the mask used in the analysis
\code{outprefix}_V1 - 1st eigenvector
\code{outprefix}_V2 - 2nd eigenvector
\code{outprefix}_V3 - 3rd eigenvector
\code{outprefix}_L1 - 1st eigenvalue
\code{outprefix}_L2 - 2nd eigenvalue
\code{outprefix}_L3 - 3rd eigenvalue
\code{outprefix}_MD - mean diffusivity
\code{outprefix}_FA - fractional anisotropy
\code{outprefix}_MO - mode of the anisotropy (oblate ~ -1; isotropic ~ 0; prolate ~ 1)
\code{outprefix}_S0 - raw T2 signal with no diffusion weighting
optional output
If \code{sse = TRUE}, then the additional file will be present:
\code{outprefix}_sse - Sum of squared error
If \code{save_tensor = TRUE}, then the additional file will be present:
\code{outprefix}_tensor - tensor as a 4D file in this order: Dxx,Dxy,Dxz,Dyy,Dyz,Dzz
}
| /man/dtifit.Rd | no_license | kuonanhong/fslr | R | false | true | 1,834 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtifit.R
\name{dtifit}
\alias{dtifit}
\title{DTI Fitting Procedure from FSL}
\usage{
dtifit(infile, bvecs, bvals, mask = NULL, outprefix = NULL, opts = "",
bet.opts = "", verbose = TRUE, sse = FALSE, save_tensor = FALSE)
}
\arguments{
\item{infile}{Input filename}
\item{bvecs}{b-vectors: matrix of 3 columns or
filename of ASCII text file}
\item{bvals}{b-values: vector of same length as number of rows of b-vectors
or filename of ASCII text file}
\item{mask}{Mask filename}
\item{outprefix}{Output prefix}
\item{opts}{Additional options for \code{dtifit}}
\item{bet.opts}{Options for \code{\link{fslbet}} if mask is not supplied}
\item{verbose}{print diagnostic messages}
\item{sse}{Save sum of squared errors}
\item{save_tensor}{Save tensor file out}
}
\value{
Vector of character filenames of output. See Note
}
\description{
Calls \code{dtifit} from FSL
}
\note{
On successful completion of the command, the following files
will be output, which are:
\code{mask} - the mask used in the analysis
\code{outprefix}_V1 - 1st eigenvector
\code{outprefix}_V2 - 2nd eigenvector
\code{outprefix}_V3 - 3rd eigenvector
\code{outprefix}_L1 - 1st eigenvalue
\code{outprefix}_L2 - 2nd eigenvalue
\code{outprefix}_L3 - 3rd eigenvalue
\code{outprefix}_MD - mean diffusivity
\code{outprefix}_FA - fractional anisotropy
\code{outprefix}_MO - mode of the anisotropy (oblate ~ -1; isotropic ~ 0; prolate ~ 1)
\code{outprefix}_S0 - raw T2 signal with no diffusion weighting
optional output
If \code{sse = TRUE}, then the additional file will be present:
\code{outprefix}_sse - Sum of squared error
If \code{save_tensor = TRUE}, then the additional file will be present:
\code{outprefix}_tensor - tensor as a 4D file in this order: Dxx,Dxy,Dxz,Dyy,Dyz,Dzz
}
|
library(tswge)
##create time series object
swatrain = ts(swa$arr_delay[1:141], start = c(2004,1), frequency = 12)
swatest = ts(swa$arr_delay[142:177], start = c(2015, 10), frequency = 12)
set.seed(2)
install.packages('nnfor',repos='http://cran.us.r-project.org')
available.packages()
if (!require("devtools")){install.packages("devtools")}
library(nnfor)
fit.mlp = mlp(swatrain, reps = 50, comb = "mean")
fit.mlp
plot(fit.mlp)
fore.mlp = forecast(fit.mlp, h=36)
ASE = mean((swatest - fore.mlp$mean)^2)
ASE
?mlp
fit.mlp = mlp(swatrain, difforder = c(1,6,12), allow.det.season = FALSE, reps = 100)
install.packages("~/Downloads/r-cran-plotrix_3.7-1.orig.tar", type = "source", repos = NULL) | /Time Series/Unit 13 Neural Networks.R | no_license | sac3tf/Data-Science-Past-Projects | R | false | false | 692 | r | library(tswge)
##create time series object
swatrain = ts(swa$arr_delay[1:141], start = c(2004,1), frequency = 12)
swatest = ts(swa$arr_delay[142:177], start = c(2015, 10), frequency = 12)
set.seed(2)
install.packages('nnfor',repos='http://cran.us.r-project.org')
available.packages()
if (!require("devtools")){install.packages("devtools")}
library(nnfor)
fit.mlp = mlp(swatrain, reps = 50, comb = "mean")
fit.mlp
plot(fit.mlp)
fore.mlp = forecast(fit.mlp, h=36)
ASE = mean((swatest - fore.mlp$mean)^2)
ASE
?mlp
fit.mlp = mlp(swatrain, difforder = c(1,6,12), allow.det.season = FALSE, reps = 100)
install.packages("~/Downloads/r-cran-plotrix_3.7-1.orig.tar", type = "source", repos = NULL) |
dSys.setenv(HADOOP_HOME='/usr/local/hadoop')
Sys.setenv(HADOOP_CMD='/usr/local/hadoop/bin/hadoop')
Sys.setenv(HADOOP_STREAMING='/usr/local/hadoop/share/hadoop/tools/lib/hadoop-streaming-2.2.0.jar')
start.time <- Sys.time()
v.1 <- read.table ("/home/hduser/Documents/R/TAE_output/2.txt", sep = "")
v.2 <- read.table ("/home/hduser/Documents/R/TAE_output/3.txt", sep = "")
library(rmr2)
library(rhdfs)
library("MASS")
hdfs.init()
ff12.1 <- c (v.1[,2], v.1[,3])
m1 <- matrix(ff12.1,ncol=2,byrow=FALSE)
label.1 <- rep(1, nrow(m1))
m1 <- cbind(m1,label.1)
ff12.2 <- c (v.2[,1], v.2[,2])
m2 <- matrix(ff12.2,ncol=2,byrow=FALSE)
label.2 <- rep(-1, nrow(m2))
m2 <- cbind(m2,label.2)
mb <- rbind(m1,m2)
data.content <- to.dfs(mb)
data.map.fn <- function(k,v){
key <- 1
###------------------start of svm--------------------------###
# mb <- matrix(c(5.5,5.7,6.1,4.5,4.3,1.5,0.5,1.1,-1.8,-1.5,1,1,1,-1,-1),nrow=5,ncol=3)
#----------extract features and label---------------------------------#
m_ff <- v[,c(1,2)]
m_label <- v[,3]
#----------normalization----------------------------------------------#
m_sd <- apply(m_ff,2,sd,na.rm = TRUE)
m_mean <- colMeans(m_ff, na.rm = TRUE)
for (i in 1:ncol(m_ff)){
m_ff[,i] = (m_ff[,i]-m_mean[i])/m_sd[i]
}
#------------------------append indentity matrix----------------------------------#
m.indx <- nrow(m_ff)
n.indx <- ncol(m_ff)
m_ff.right <- matrix(rep(0,m.indx*n.indx),nrow=m.indx,ncol=n.indx)
m_ff.app <- cbind(m_ff,m_ff.right)
m_ff.down <- diag(n.indx*2)
m_ff.app <- rbind(m_ff.app,m_ff.down)
m_ff.IM <- -diag(n.indx)
m_ff.app[c(m.indx+n.indx+1,m.indx+n.indx*2),c(1,2)] <- m_ff.IM
#-----------------------------sqrt---------------------------------#
lamda <- 0.95
lamda.matrix <- 1/lamda*diag(2)
lamda.right <- matrix(rep(0,n.indx*n.indx),nrow=n.indx,ncol=n.indx)
lamda.down <- matrix(rep(0,n.indx*n.indx*2),nrow=n.indx,ncol=n.indx*2)
lamda.matrix <- cbind(lamda.matrix,lamda.right)
lamda.matrix <- rbind(lamda.matrix,lamda.down)
lamda.matrix <- sqrt(lamda.matrix)
#--------------------------------matrix A------------------------------#
A.matrix <- m_ff.app %*% lamda.matrix
#--------------------9by9 matrix------------------------------------------#
m.append <- matrix(rep(1,n.indx*2),nrow=1,ncol=n.indx*2)
m_label = t(m_label)
m_label.app <- cbind(m_label,m.append)
m.length <- length(m_label.app)
D.matrix <- matrix(rep(0,m.length*m.length),nrow=m.length,ncol=m.length)
for (j in 1:nrow(D.matrix)){
D.matrix[j,j] = m_label.app[j]
}
#--------------------------9by5------------------------------------#
E.matrix <- matrix(c(rep(1,m.indx),rep(0,n.indx*2)),nrow=m.indx+n.indx*2,ncol=1)
E.negative <- -E.matrix
C.matrix <- cbind(A.matrix,E.negative)
#--------------------------H.matrix, S.matrix------------------------------------#
H.matrix <- D.matrix %*% C.matrix
n.A <- ncol(A.matrix)
nu <- 1
it <- 0
itmax <- 1000
tol <- 0.001
alpha <- 1.9/nu
S.matrix <- H.matrix %*% ginv(diag(n.A+1)/nu + t(H.matrix)%*%H.matrix)
#----------------------------------------------------------------#
u <- nu * (1-S.matrix %*% (t(H.matrix) %*% E.matrix))
oldu <- u + 1
p1.fn <- function(x){
p1 <- (abs(x)+x)/2
return(p1)
}
while ((it<itmax) && (rnorm(oldu-u)>tol)){
z <- (1+p1.fn(((u/nu+H.matrix%*%(t(H.matrix)%*%u))-alpha*u)-1))
oldu <- u
u <- nu*(z-S.matrix%*%(t(H.matrix)%*%z))
it <- it + 1
}
opt = rnorm(n.A-oldu)
w = t(A.matrix)%*%D.matrix%*%u
gamma = -t(E.matrix)%*%D.matrix%*%u
w1 <- w[1]+w[3]
w2 <- w[2]+w[4]
#----------------------------------------------------------------#
##gamma:-1.110223e-14
#
# w1 =0.02258797;
# w2 =1.0;
# gamma = -0.04
weight.w1 <- w1 ## w1:0.7268499
weight.w2 <- w2 ## w2:-0.3641662
m1_row <- nrow(m1)
m2_row <- nrow(m2)
x1 <- m_ff[c(1:m1_row),1]
y1 <- m_ff[c(1:m1_row),2]
x2 <- m_ff[c((m1_row+1):(m1_row+m2_row)),1]
y2 <- m_ff[c((m1_row+1):(m1_row+m2_row)),2]
x <- m_ff[c(1:m.indx),1]
y = (gamma - x * weight.w1) / weight.w2
pdf("/home/hduser/Documents/R/TAE_output/6SVM_hadoop.pdf")
plot(x,y,type="b", pch =18,
xlab ='Course instructor categories',ylab ='course categories',
main = "Class medium(red) and high(blue)")
par(new=TRUE)
points(x1,y1,col="red", type="p",pch =1)
par(new=TRUE)
points(x2,y2,col="blue", type="p",pch =5)
dev.off()
#######---------------------end of svm---------------------------########
inercept <- gamma/weight.w2
slope <- -weight.w1/weight.w2
val <- c(inercept,slope)
keyval(key,val)
}
data.reduce.fn <- function(k,v){
keyval(k,v)
}
classify <- mapreduce(input=data.content,
map=data.map.fn,
reduce=data.reduce.fn)
from.dfs(classify)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken #57.24182 secs
| /TAE_Rscript/6SVM_hadoop.R | no_license | junjunruan/Big-Data-Classification | R | false | false | 4,930 | r | dSys.setenv(HADOOP_HOME='/usr/local/hadoop')
Sys.setenv(HADOOP_CMD='/usr/local/hadoop/bin/hadoop')
Sys.setenv(HADOOP_STREAMING='/usr/local/hadoop/share/hadoop/tools/lib/hadoop-streaming-2.2.0.jar')
start.time <- Sys.time()
v.1 <- read.table ("/home/hduser/Documents/R/TAE_output/2.txt", sep = "")
v.2 <- read.table ("/home/hduser/Documents/R/TAE_output/3.txt", sep = "")
library(rmr2)
library(rhdfs)
library("MASS")
hdfs.init()
ff12.1 <- c (v.1[,2], v.1[,3])
m1 <- matrix(ff12.1,ncol=2,byrow=FALSE)
label.1 <- rep(1, nrow(m1))
m1 <- cbind(m1,label.1)
ff12.2 <- c (v.2[,1], v.2[,2])
m2 <- matrix(ff12.2,ncol=2,byrow=FALSE)
label.2 <- rep(-1, nrow(m2))
m2 <- cbind(m2,label.2)
mb <- rbind(m1,m2)
data.content <- to.dfs(mb)
data.map.fn <- function(k,v){
key <- 1
###------------------start of svm--------------------------###
# mb <- matrix(c(5.5,5.7,6.1,4.5,4.3,1.5,0.5,1.1,-1.8,-1.5,1,1,1,-1,-1),nrow=5,ncol=3)
#----------extract features and label---------------------------------#
m_ff <- v[,c(1,2)]
m_label <- v[,3]
#----------normalization----------------------------------------------#
m_sd <- apply(m_ff,2,sd,na.rm = TRUE)
m_mean <- colMeans(m_ff, na.rm = TRUE)
for (i in 1:ncol(m_ff)){
m_ff[,i] = (m_ff[,i]-m_mean[i])/m_sd[i]
}
#------------------------append indentity matrix----------------------------------#
m.indx <- nrow(m_ff)
n.indx <- ncol(m_ff)
m_ff.right <- matrix(rep(0,m.indx*n.indx),nrow=m.indx,ncol=n.indx)
m_ff.app <- cbind(m_ff,m_ff.right)
m_ff.down <- diag(n.indx*2)
m_ff.app <- rbind(m_ff.app,m_ff.down)
m_ff.IM <- -diag(n.indx)
m_ff.app[c(m.indx+n.indx+1,m.indx+n.indx*2),c(1,2)] <- m_ff.IM
#-----------------------------sqrt---------------------------------#
lamda <- 0.95
lamda.matrix <- 1/lamda*diag(2)
lamda.right <- matrix(rep(0,n.indx*n.indx),nrow=n.indx,ncol=n.indx)
lamda.down <- matrix(rep(0,n.indx*n.indx*2),nrow=n.indx,ncol=n.indx*2)
lamda.matrix <- cbind(lamda.matrix,lamda.right)
lamda.matrix <- rbind(lamda.matrix,lamda.down)
lamda.matrix <- sqrt(lamda.matrix)
#--------------------------------matrix A------------------------------#
A.matrix <- m_ff.app %*% lamda.matrix
#--------------------9by9 matrix------------------------------------------#
m.append <- matrix(rep(1,n.indx*2),nrow=1,ncol=n.indx*2)
m_label = t(m_label)
m_label.app <- cbind(m_label,m.append)
m.length <- length(m_label.app)
D.matrix <- matrix(rep(0,m.length*m.length),nrow=m.length,ncol=m.length)
for (j in 1:nrow(D.matrix)){
D.matrix[j,j] = m_label.app[j]
}
#--------------------------9by5------------------------------------#
E.matrix <- matrix(c(rep(1,m.indx),rep(0,n.indx*2)),nrow=m.indx+n.indx*2,ncol=1)
E.negative <- -E.matrix
C.matrix <- cbind(A.matrix,E.negative)
#--------------------------H.matrix, S.matrix------------------------------------#
H.matrix <- D.matrix %*% C.matrix
n.A <- ncol(A.matrix)
nu <- 1
it <- 0
itmax <- 1000
tol <- 0.001
alpha <- 1.9/nu
S.matrix <- H.matrix %*% ginv(diag(n.A+1)/nu + t(H.matrix)%*%H.matrix)
#----------------------------------------------------------------#
u <- nu * (1-S.matrix %*% (t(H.matrix) %*% E.matrix))
oldu <- u + 1
p1.fn <- function(x){
p1 <- (abs(x)+x)/2
return(p1)
}
while ((it<itmax) && (rnorm(oldu-u)>tol)){
z <- (1+p1.fn(((u/nu+H.matrix%*%(t(H.matrix)%*%u))-alpha*u)-1))
oldu <- u
u <- nu*(z-S.matrix%*%(t(H.matrix)%*%z))
it <- it + 1
}
opt = rnorm(n.A-oldu)
w = t(A.matrix)%*%D.matrix%*%u
gamma = -t(E.matrix)%*%D.matrix%*%u
w1 <- w[1]+w[3]
w2 <- w[2]+w[4]
#----------------------------------------------------------------#
##gamma:-1.110223e-14
#
# w1 =0.02258797;
# w2 =1.0;
# gamma = -0.04
weight.w1 <- w1 ## w1:0.7268499
weight.w2 <- w2 ## w2:-0.3641662
m1_row <- nrow(m1)
m2_row <- nrow(m2)
x1 <- m_ff[c(1:m1_row),1]
y1 <- m_ff[c(1:m1_row),2]
x2 <- m_ff[c((m1_row+1):(m1_row+m2_row)),1]
y2 <- m_ff[c((m1_row+1):(m1_row+m2_row)),2]
x <- m_ff[c(1:m.indx),1]
y = (gamma - x * weight.w1) / weight.w2
pdf("/home/hduser/Documents/R/TAE_output/6SVM_hadoop.pdf")
plot(x,y,type="b", pch =18,
xlab ='Course instructor categories',ylab ='course categories',
main = "Class medium(red) and high(blue)")
par(new=TRUE)
points(x1,y1,col="red", type="p",pch =1)
par(new=TRUE)
points(x2,y2,col="blue", type="p",pch =5)
dev.off()
#######---------------------end of svm---------------------------########
inercept <- gamma/weight.w2
slope <- -weight.w1/weight.w2
val <- c(inercept,slope)
keyval(key,val)
}
data.reduce.fn <- function(k,v){
keyval(k,v)
}
classify <- mapreduce(input=data.content,
map=data.map.fn,
reduce=data.reduce.fn)
from.dfs(classify)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken #57.24182 secs
|
library(R.oo);
setConstructorS3("CGDS", function(url='',verbose=FALSE,ploterrormsg='') {
extend(Object(), "CGDS",
.url=url,
.verbose=verbose,
.ploterrormsg='')
})
setMethodS3("processURL","CGDS", private=TRUE, function(x, url, ...) {
if (x$.verbose) cat(url,"\n")
df = read.table(url, skip=0, header=TRUE, as.is=TRUE, sep="\t")
})
setMethodS3("setPlotErrorMsg","CGDS", function(x, msg, ...) {
x$.ploterrormsg = msg
return(msg)
})
setMethodS3("setVerbose","CGDS", function(x, verbose, ...) {
x$.verbose = verbose
return(verbose)
})
setMethodS3("getCancerStudies","CGDS", function(x, ...) {
url = paste(x$.url, "webservice.do?cmd=getCancerStudies&",sep="")
df = processURL(x,url)
return(df)
})
setMethodS3("getCaseLists","CGDS", function(x, cancerStudy, ...) {
url = paste(x$.url, "webservice.do?cmd=getCaseLists&cancer_study_id=", cancerStudy, sep="")
df = processURL(x,url)
return(df)
})
setMethodS3("getGeneticProfiles","CGDS", function(x, cancerStudy, ...) {
url = paste(x$.url, "webservice.do?cmd=getGeneticProfiles&cancer_study_id=", cancerStudy, sep="")
df = processURL(x,url)
return(df)
})
setMethodS3("getMutationData","CGDS", function(x, caseList, geneticProfile, genes, ...) {
url = paste(x$.url, "webservice.do?cmd=getMutationData",
"&case_set_id=", caseList,
"&genetic_profile_id=", geneticProfile,
"&gene_list=", paste(genes,collapse=","), sep="")
df = processURL(x,url)
return(df)
})
setMethodS3("getProfileData","CGDS", function(x, genes, geneticProfiles, caseList='', cases=c(), caseIdsKey = '', ...) {
url = paste(x$.url, "webservice.do?cmd=getProfileData",
"&gene_list=", paste(genes,collapse=","),
"&genetic_profile_id=", paste(geneticProfiles,collapse=","),
"&id_type=", 'gene_symbol',
sep="")
if (length(cases)>0) { url = paste(url,"&case_list=", paste(cases,collapse=","),sep='')
} else if (caseIdsKey != '') { url = paste(url,"&case_ids_key=", caseIdsKey,sep='')
} else { url = paste(url,"&case_set_id=", caseList,sep='') }
df = processURL(x,url)
if (nrow(df) == 0) { return(df) }
m = matrix()
# process data before returning
if (length(geneticProfiles) > 1) {
cnames = df[,1]
m = t(df[,-c(1:4)])
colnames(m) = cnames
} else {
cnames = df[,2]
m = t(df[,-c(1:2)])
colnames(m) = cnames
}
return(data.frame(m))
})
setMethodS3("getClinicalData","CGDS", function(x, caseList='', cases=c(), caseIdsKey = '', ...) {
url = paste(x$.url, "webservice.do?cmd=getClinicalData",sep="")
if (length(cases)>0) { url = paste(url,"&case_list=", paste(cases,collapse=","),sep='')
} else if (caseIdsKey != '') { url = paste(url,"&case_ids_key=", caseIdsKey,sep='')
} else { url = paste(url,"&case_set_id=", caseList,sep='') }
df = processURL(x,url)
rownames(df) = make.names(df$case_id)
return(df[,-1])
})
setMethodS3("plot","CGDS", function(x, cancerStudy, genes, geneticProfiles, caseList='', cases=c(), caseIdsKey = '', skin='cont', skin.normals='', skin.col.gp = c(), add.corr = '', legend.pos = 'topright', ...) {
errormsg <- function(msg,error=TRUE) {
# return empty plot with text
if (error) {msg = paste('Error:',msg)}
# override msg if global message provided in object
if (x$.ploterrormsg != '') {msg = x$.ploterrormsg}
plot.new()
# set message text here ...
#mtext(msg,cex=1.0,col='darkred')
text(0.5,0.5,msg,cex=1.0,col='darkred')
box()
return(msg)
}
# we only allow the following combinations
# a) gene1 (1 profile) # b) gene1 vs gene2 (1 profile)
# c) profile 1 vs profile 2 (1 gene)
if((length(genes) > 1 & length(geneticProfiles) > 1) | (length(genes) > 2 | length(geneticProfiles) > 2)) {
return(errormsg("use only 2 genetic profiles OR 2 genes"))
}
# make genenames conform to R variable names
genesR = make.names(genes)
# get data, check more than zero rows returned, otherwise return
df = getProfileData(x, genes, geneticProfiles, caseList, cases, caseIdsKey)
if (nrow(df) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df)[1]))) }
# check data returned with more than two genes or genetic profiles
if (length(genes) == 2 & ncol(df) != 2) { return(errormsg(paste("gene not found:", setdiff(genesR,colnames(df))))) }
if (length(geneticProfiles) == 2 & ncol(df) != 2) { return(errormsg("geneticProfile ID not found:", setdiff(geneticProfiles,colnames(df)))) }
# get geneticProfiles annotation for axis labels
gps = getGeneticProfiles(x, cancerStudy)
if (nrow(gps) == 0) { errormsg(colnames(gps[1])) }
rownames(gps) = gps[,1]
gpaxisnames = gps[geneticProfiles,'genetic_profile_name']
names(gpaxisnames) = geneticProfiles
# we can have a situation where there is no data for a given combination of gene and genetic profile
# in this case, we have a column of NaN, and we generate an error
nacols = sapply(df, function(x) all(is.nan(x)))
if (any(nacols)) {
if (length(geneticProfiles) > 1) {
# two genetic profiles in columns, one gene
return(errormsg(paste(genes, "has no data\n for genetic profile(s):", paste(gpaxisnames[nacols],collapse=", ")),FALSE))
} else {
# one genetic profile, one or two genes in columns
return(errormsg(paste(paste(genes[nacols],collapse=' and '), "has no data\n for genetic profile:", gpaxisnames),FALSE))
}
}
# set sub title with correlation if specified
plot.subtitle = ''
if ((add.corr == 'pearson' | add.corr == 'spearman') & ncol(df) == 2) {
ct = cor.test(df[,1],df[,2],method=add.corr)
plot.subtitle = paste(add.corr, ' r = ', sprintf("%.2f",ct$estimate), ', p = ',sprintf("%.1e",ct$p.value))
}
###
### Skins
###
if (skin == 'cont') {
if(length(genes) == 1 & length(geneticProfiles) == 1) {
hist(df[,1],xlab=paste(genes," , ",gpaxisnames,sep=""),main='')
} else if (length(genes) == 2) {
# two genes
plot(df[,genesR[1]],df[,genesR[2]] , main = '', xlab = paste(genes[1],", ",gpaxisnames,sep=""), ylab = paste(genes[2],", ",gpaxisnames,sep=""), pch = 1, col = 'black', sub = plot.subtitle)
} else {
# two genetic profiles
gpa = geneticProfiles[1]
gpb = geneticProfiles[2]
plot(df[,gpa],df[,gpb] , main = '', xlab = paste(genes[1],", ",gpaxisnames[gpa],sep=""), ylab = paste(genes[1],", ",gpaxisnames[gpb],sep=""), pch = 1, col = 'black', sub = plot.subtitle)
}
} else if (skin == 'disc') {
if(length(genes) == 1 & length(geneticProfiles) == 1) {
barplot(table(df[,1]),xlab=paste(genes,", ",gpaxisnames,sep=""),main='',ylab='frequency')
} else {
#discrete vs discrete
return(errormsg('discrete vs. discrete data not implemented'))
}
} else if (skin =='disc_cont') {
# skin only valid for two genetic profiles
if(length(geneticProfiles) != 2) {
return(errormsg("two genetic profiles required for skin 'disc_cont'"))
} else {
# skin assumes that first genetic profile is discret
gp.disc = geneticProfiles[1] # b
gp.cont = geneticProfiles[2] # a
boxplot(df[,gp.cont] ~ df[,gp.disc], outpch = NA, main = '', xlab = paste(genes[1],", ",gpaxisnames[gp.disc],sep=""), ylab = paste(genes[1],", ",gpaxisnames[gp.cont],sep=""), border = 'gray', sub = plot.subtitle)
stripchart(df[,gp.cont] ~ df[,gp.disc],vertical = TRUE, add = TRUE, method = 'jitter', pch = 1, col = 'black')
}
} else if (skin == 'cna_mrna_mut') {
# skin uses parameters
# * skin.col.gp = mut
# * skin.normals = normal_case_set [optional]
# fetch optional normal mRNA data
if (skin.normals != '') {
df.norm = getProfileData(x, genes, geneticProfiles[2], skin.normals)
if (nrow(df.norm) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.norm)[1]))) }
# check if data is missing (NaN)
if ( !all(is.nan(df.norm[,1])) ) {
# add normal data to dataframe
df.norm2 = cbind(rep(-3,nrow(df.norm)),df.norm[,1])
colnames(df.norm2) = geneticProfiles
df = rbind(df,df.norm2)
}
}
# create boxplot
df.nona = df[apply(df, 1, function(x) {!any(is.na(x))}),]
ylim=range(df.nona[,geneticProfiles[2]],na.rm=TRUE)
labels=seq(-3,2)
names(labels)=c("Normal","Homdel","Hetloss","Diploid","Gain","Amp")
labels.inuse = sort(unique(df.nona[,geneticProfiles[1]])) # sort removes any NA
boxplot(df.nona[,geneticProfiles[2]] ~ df.nona[,geneticProfiles[1]], main='', outline=FALSE,
xlab = paste(genes,", ",gpaxisnames[geneticProfiles[1]],sep=""),
ylab = paste(genes,", ",gpaxisnames[geneticProfiles[2]],sep=""),
border="gray",ylim=ylim, axes=FALSE, outpch = NA, sub = plot.subtitle)
axis(1,at=seq(1,length(labels.inuse)),labels=names(labels)[match(labels.inuse,labels)],cex.axis=0.8)
axis(2,cex.axis=0.8,las=2)
# box()
# manually jitter data
# order data by CNA status
df.nona=df.nona[order(df.nona[,geneticProfiles[1]]),]
xy=list()
xy$MRNA=df.nona[,geneticProfiles[2]]
cats=cbind(1:length(labels.inuse),as.data.frame(table(df.nona[,geneticProfiles[1]])))
colnames(cats)=c("X","Class","Count")
rownames(cats)=names(labels)[match(cats[,2],labels)]
xy$JITTER = unlist( apply(cats,1, function(cc) {
y=rep.int(as.numeric(cc[1]),as.numeric(cc[3]))
y=y+stats::runif(length(y),-0.1,0.1)
}))
xy=as.data.frame(xy)
colnames(xy) = c('MRNA','JITTER')
# Initialize plotting features
nonmut.pch = 4
N=nrow(xy)
cex=rep(0.9,N)
pch=rep(nonmut.pch,N)
col=rep("royalblue",N)
bg=rep(NA,N)
# fetch mutation data for color coding
if (length(skin.col.gp == 1)) {
df.mut = getProfileData(x, genes, skin.col.gp, caseList, cases, caseIdsKey)
if (nrow(df.mut) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.mut)[1]))) }
# get matrix corresponding to df.nona
df.mut = df.mut[rownames(df.nona),1]
# check if data is missing (NaN)
mut=which(!is.na(df.mut))
if(length(mut)>0) {
# default mutation
col[mut]="red3"
bg[mut]="goldenrod"
pch[mut]=21
mt=list()
mt$pch=c(21,23,24,25,22)
mt$type=c("missense","nonsense","splice","shift","in_frame")
mt$pattern=c("^[a-z][0-9]+[a-z]$","[*x]$","^(e.+[0-9]|.+_splice)$","fs$","del$")
mt$bg=c("goldenrod","darkblue","darkgray","black","goldenrod")
mt=as.data.frame(mt)
for(i in 1:nrow(mt)) {
idx=grep(mt$pattern[i],tolower(df.mut))
pch[idx]=mt$pch[i]
bg[idx]=as.character(mt$bg[i])
}
#col[mut]="red3" # ??????
legend("topleft",bty="n",
as.character(as.vector(mt[["type"]])),col="red3",
pt.bg=as.character(as.vector(mt[["bg"]])),
pch=mt[["pch"]],cex=0.85,pt.cex=1.0
)
}
}
## # Plot the jittered data, add mutated points last
xy.mut = (pch != nonmut.pch)
points(xy$JITTER[!xy.mut],xy$MRNA[!xy.mut],pch=pch[!xy.mut],cex=cex[!xy.mut],col=col[!xy.mut],bg=bg[!xy.mut])
points(xy$JITTER[xy.mut],xy$MRNA[xy.mut],pch=pch[xy.mut],cex=cex[xy.mut],col=col[xy.mut],bg=bg[xy.mut])
box()
} else if (skin == 'meth_mrna_cna_mut' | skin == 'cna_mut') {
# these skins use parameters
# * skin.col.gp = (cna,mut) [optional]
# * skin.normals = normal_case_set [optional]
# [meth_mrna_cna_mut] forces x axis range to [0,1.05]
# fetch cna and mut data for color coding
pch=rep(1,nrow(df))
col=rep("black",nrow(df))
if (length(skin.col.gp) == 2) { # color by both CNA and mutation
df.col = getProfileData(x, genes, skin.col.gp, caseList, cases, caseIdsKey)
if (nrow(df.col) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.col)[1]))) }
# because mut is text vector, we need to transform cna to integer vector instead of factor
cna = as.integer(as.vector(df.col[,skin.col.gp[1]]))
mut = as.vector(df.col[,skin.col.gp[2]])
col[cna==-2]="darkblue"
col[cna==-1]="deepskyblue"
col[cna==1]="hotpink"
col[cna==2]="red3"
col[mut!="NaN"]="orange"
pch[mut!="NaN"]=20
}
else if (length(skin.col.gp) == 1) { # color only by CNA
df.col = getProfileData(x, genes, skin.col.gp, caseList, cases, caseIdsKey)
if (nrow(df.col) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.col)[1]))) }
# because mut is text vector, we need to transform cna to integer vector instead of factor
cna = as.integer(as.vector(df.col[,1]))
col[cna==-2]="darkblue"
col[cna==-1]="deepskyblue"
col[cna==1]="hotpink"
col[cna==2]="red3"
}
# fetch optional normal methylation and mRNA data
if (skin.normals != '') {
df.norm = getProfileData(x, genes, geneticProfiles, skin.normals)
if (nrow(df.norm) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.norm)[1]))) }
# remove missing data
df.norm = df.norm[apply(df.norm, 1, function(x) {!any(is.na(x))}),]
if ( length(df.norm) > 0) {
df = rbind(df,df.norm)
col = append(col, rep("black",nrow(df.norm)))
pch = append(pch, rep(20,nrow(df.norm)))
}
}
xlim = range(df[,geneticProfiles[1]],na.rm = TRUE)
# add 15% to range, to make room for legend
if (legend.pos == 'topright') {
xlim = c(min(xlim),max(xlim) + (max(xlim)-min(xlim))*0.15)
}
else if (legend.pos == 'topleft') {
xlim = c(min(xlim) - (max(xlim)-min(xlim))*0.15,max(xlim))
}
if (skin == 'meth_mrna_cna_mut') {
# force x axis range to [0,1.05]
xlim = c(0,1.05)
}
# now plot
plot( df[,geneticProfiles[1]],df[,geneticProfiles[2]],main="",
xlab=paste(genes,", ",gpaxisnames[geneticProfiles[1]],sep=""),
ylab=paste(genes,", ",gpaxisnames[geneticProfiles[2]],sep=""),
xlim=xlim,pch=pch,col=col,cex=1.2, sub = plot.subtitle
)
#abline(lm(d$rna~d$methylation),col="red3",lty=2,lwd=1.5)
#lines(loess.smooth(d$methylation,d$rna),col="darkgray",lwd=2)
# Replace with dynamically created legend when time permits
legend(legend.pos,bty="n",
c("Homdel","Hetloss","Diploid","Gain","Amp","Mutated","Normal"),
col=c('darkblue','deepskyblue','black','hotpink','red','orange','black'),
pch=c(1,1,1,1,1,20,20),cex=0.85,pt.cex=1.0
)
} else {
return(errormsg(paste("unkown skin:",skin)))
}
return(TRUE)
})
setMethodS3("test","CGDS", function(x, ...) {
checkEq = function(a,b) { if (identical(a,b)) "OK\n" else "FAILED!\n" }
checkGrt = function(a,b) { if (a > b) "OK\n" else "FAILED!\n" }
cancerstudies = getCancerStudies(x)
cat('getCancerStudies... ',
checkEq(colnames(cancerstudies),c("cancer_study_id","name","description")))
ct = cancerstudies[2,1] # should be row 1 instead ...
cat('getCaseLists (1/2) ... ',
checkEq(colnames(getCaseLists(x,ct)),
c("case_list_id","case_list_name",
"case_list_description","cancer_study_id","case_ids")))
cat('getCaseLists (2/2) ... ',
checkEq(colnames(getCaseLists(x,'xxx')),
'Error..Problem.when.identifying.a.cancer.study.for.the.request.'))
cat('getGeneticProfiles (1/2) ... ',
checkEq(colnames(getGeneticProfiles(x,ct)),
c("genetic_profile_id","genetic_profile_name","genetic_profile_description",
"cancer_study_id","genetic_alteration_type","show_profile_in_analysis_tab")))
cat('getGeneticProfiles (2/2) ... ',
checkEq(colnames(getGeneticProfiles(x,'xxx')),
'Error..Problem.when.identifying.a.cancer.study.for.the.request.'))
# clinical data
# check colnames
cat('getClinicalData (1/1) ... ',
checkEq(colnames(getClinicalData(x,'gbm_tcga_all')),
c("overall_survival_months","overall_survival_status","disease_free_survival_months",
"disease_free_survival_status","age_at_diagnosis")))
# check value of overall_survival_months
#cat('getClinicalData (2/3) ... ',
# checkGrt(getClinicalData(x,'gbm_tcga_all')['TCGA.02.0080','overall_survival_months'], 89))
# check cases parameter
#cat('getClinicalData (3/3) ... ',
# checkGrt(getClinicalData(x,cases=c('TCGA-02-0080'))['TCGA.02.0080','overall_survival_months'], 89))
# check one gene, one profile
cat('getProfileData (1/7) ... ',
checkEq(colnames(getProfileData(x,'NF1','gbm_tcga_mrna','gbm_tcga_all')),
"NF1"))
# check many genes, one profile
cat('getProfileData (2/7) ... ',
checkEq(colnames(getProfileData(x,c('MDM2','MDM4'),'gbm_tcga_mrna','gbm_tcga_all')),
c("MDM2","MDM4")))
# check one gene, many profile
cat('getProfileData (3/7) ... ',
checkEq(colnames(getProfileData(x,'NF1',c('gbm_tcga_mrna','gbm_tcga_mutations'),'gbm_tcga_all')),
c('gbm_tcga_mrna','gbm_tcga_mutations')))
# check 3 cases returns matrix with 3 columns
cat('getProfileData (4/7) ... ',
checkEq(rownames(getProfileData(x,'BRCA1','gbm_tcga_mrna',cases=c('TCGA-02-0001','TCGA-02-0003'))),
make.names(c('TCGA-02-0001','TCGA-02-0003'))))
# invalid gene names return empty data.frame
cat('getProfileData (5/7) ... ',
checkEq(nrow(getProfileData(x,c('NF10','NF11'),'gbm_tcga_mrna','gbm_tcga_all')),as.integer(0)))
# invalid case_list_id returns error
cat('getProfileData (6/7) ... ',
checkEq(colnames(getProfileData(x,'NF1','gbm_tcga_mrna','xxx')),
'Error..Problem.when.identifying.a.cancer.study.for.the.request.'))
# invalid genetic_profile_id returns error
cat('getProfileData (7/7) ... ',
checkEq(colnames(getProfileData(x,'NF1','xxx','gbm_tcga_all')),
'No.genetic.profile.available.for.genetic_profile_id...xxx.'))
})
| /cgds-r/cgdsr/R/cgdsr.R | no_license | chenchonghz/cbio-cancer-genomics-portal | R | false | false | 18,043 | r | library(R.oo);
setConstructorS3("CGDS", function(url='',verbose=FALSE,ploterrormsg='') {
extend(Object(), "CGDS",
.url=url,
.verbose=verbose,
.ploterrormsg='')
})
setMethodS3("processURL","CGDS", private=TRUE, function(x, url, ...) {
if (x$.verbose) cat(url,"\n")
df = read.table(url, skip=0, header=TRUE, as.is=TRUE, sep="\t")
})
setMethodS3("setPlotErrorMsg","CGDS", function(x, msg, ...) {
x$.ploterrormsg = msg
return(msg)
})
setMethodS3("setVerbose","CGDS", function(x, verbose, ...) {
x$.verbose = verbose
return(verbose)
})
setMethodS3("getCancerStudies","CGDS", function(x, ...) {
url = paste(x$.url, "webservice.do?cmd=getCancerStudies&",sep="")
df = processURL(x,url)
return(df)
})
setMethodS3("getCaseLists","CGDS", function(x, cancerStudy, ...) {
url = paste(x$.url, "webservice.do?cmd=getCaseLists&cancer_study_id=", cancerStudy, sep="")
df = processURL(x,url)
return(df)
})
setMethodS3("getGeneticProfiles","CGDS", function(x, cancerStudy, ...) {
url = paste(x$.url, "webservice.do?cmd=getGeneticProfiles&cancer_study_id=", cancerStudy, sep="")
df = processURL(x,url)
return(df)
})
setMethodS3("getMutationData","CGDS", function(x, caseList, geneticProfile, genes, ...) {
url = paste(x$.url, "webservice.do?cmd=getMutationData",
"&case_set_id=", caseList,
"&genetic_profile_id=", geneticProfile,
"&gene_list=", paste(genes,collapse=","), sep="")
df = processURL(x,url)
return(df)
})
setMethodS3("getProfileData","CGDS", function(x, genes, geneticProfiles, caseList='', cases=c(), caseIdsKey = '', ...) {
url = paste(x$.url, "webservice.do?cmd=getProfileData",
"&gene_list=", paste(genes,collapse=","),
"&genetic_profile_id=", paste(geneticProfiles,collapse=","),
"&id_type=", 'gene_symbol',
sep="")
if (length(cases)>0) { url = paste(url,"&case_list=", paste(cases,collapse=","),sep='')
} else if (caseIdsKey != '') { url = paste(url,"&case_ids_key=", caseIdsKey,sep='')
} else { url = paste(url,"&case_set_id=", caseList,sep='') }
df = processURL(x,url)
if (nrow(df) == 0) { return(df) }
m = matrix()
# process data before returning
if (length(geneticProfiles) > 1) {
cnames = df[,1]
m = t(df[,-c(1:4)])
colnames(m) = cnames
} else {
cnames = df[,2]
m = t(df[,-c(1:2)])
colnames(m) = cnames
}
return(data.frame(m))
})
setMethodS3("getClinicalData","CGDS", function(x, caseList='', cases=c(), caseIdsKey = '', ...) {
url = paste(x$.url, "webservice.do?cmd=getClinicalData",sep="")
if (length(cases)>0) { url = paste(url,"&case_list=", paste(cases,collapse=","),sep='')
} else if (caseIdsKey != '') { url = paste(url,"&case_ids_key=", caseIdsKey,sep='')
} else { url = paste(url,"&case_set_id=", caseList,sep='') }
df = processURL(x,url)
rownames(df) = make.names(df$case_id)
return(df[,-1])
})
setMethodS3("plot","CGDS", function(x, cancerStudy, genes, geneticProfiles, caseList='', cases=c(), caseIdsKey = '', skin='cont', skin.normals='', skin.col.gp = c(), add.corr = '', legend.pos = 'topright', ...) {
errormsg <- function(msg,error=TRUE) {
# return empty plot with text
if (error) {msg = paste('Error:',msg)}
# override msg if global message provided in object
if (x$.ploterrormsg != '') {msg = x$.ploterrormsg}
plot.new()
# set message text here ...
#mtext(msg,cex=1.0,col='darkred')
text(0.5,0.5,msg,cex=1.0,col='darkred')
box()
return(msg)
}
# we only allow the following combinations
# a) gene1 (1 profile) # b) gene1 vs gene2 (1 profile)
# c) profile 1 vs profile 2 (1 gene)
if((length(genes) > 1 & length(geneticProfiles) > 1) | (length(genes) > 2 | length(geneticProfiles) > 2)) {
return(errormsg("use only 2 genetic profiles OR 2 genes"))
}
# make genenames conform to R variable names
genesR = make.names(genes)
# get data, check more than zero rows returned, otherwise return
df = getProfileData(x, genes, geneticProfiles, caseList, cases, caseIdsKey)
if (nrow(df) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df)[1]))) }
# check data returned with more than two genes or genetic profiles
if (length(genes) == 2 & ncol(df) != 2) { return(errormsg(paste("gene not found:", setdiff(genesR,colnames(df))))) }
if (length(geneticProfiles) == 2 & ncol(df) != 2) { return(errormsg("geneticProfile ID not found:", setdiff(geneticProfiles,colnames(df)))) }
# get geneticProfiles annotation for axis labels
gps = getGeneticProfiles(x, cancerStudy)
if (nrow(gps) == 0) { errormsg(colnames(gps[1])) }
rownames(gps) = gps[,1]
gpaxisnames = gps[geneticProfiles,'genetic_profile_name']
names(gpaxisnames) = geneticProfiles
# we can have a situation where there is no data for a given combination of gene and genetic profile
# in this case, we have a column of NaN, and we generate an error
nacols = sapply(df, function(x) all(is.nan(x)))
if (any(nacols)) {
if (length(geneticProfiles) > 1) {
# two genetic profiles in columns, one gene
return(errormsg(paste(genes, "has no data\n for genetic profile(s):", paste(gpaxisnames[nacols],collapse=", ")),FALSE))
} else {
# one genetic profile, one or two genes in columns
return(errormsg(paste(paste(genes[nacols],collapse=' and '), "has no data\n for genetic profile:", gpaxisnames),FALSE))
}
}
# set sub title with correlation if specified
plot.subtitle = ''
if ((add.corr == 'pearson' | add.corr == 'spearman') & ncol(df) == 2) {
ct = cor.test(df[,1],df[,2],method=add.corr)
plot.subtitle = paste(add.corr, ' r = ', sprintf("%.2f",ct$estimate), ', p = ',sprintf("%.1e",ct$p.value))
}
###
### Skins
###
if (skin == 'cont') {
if(length(genes) == 1 & length(geneticProfiles) == 1) {
hist(df[,1],xlab=paste(genes," , ",gpaxisnames,sep=""),main='')
} else if (length(genes) == 2) {
# two genes
plot(df[,genesR[1]],df[,genesR[2]] , main = '', xlab = paste(genes[1],", ",gpaxisnames,sep=""), ylab = paste(genes[2],", ",gpaxisnames,sep=""), pch = 1, col = 'black', sub = plot.subtitle)
} else {
# two genetic profiles
gpa = geneticProfiles[1]
gpb = geneticProfiles[2]
plot(df[,gpa],df[,gpb] , main = '', xlab = paste(genes[1],", ",gpaxisnames[gpa],sep=""), ylab = paste(genes[1],", ",gpaxisnames[gpb],sep=""), pch = 1, col = 'black', sub = plot.subtitle)
}
} else if (skin == 'disc') {
if(length(genes) == 1 & length(geneticProfiles) == 1) {
barplot(table(df[,1]),xlab=paste(genes,", ",gpaxisnames,sep=""),main='',ylab='frequency')
} else {
#discrete vs discrete
return(errormsg('discrete vs. discrete data not implemented'))
}
} else if (skin =='disc_cont') {
# skin only valid for two genetic profiles
if(length(geneticProfiles) != 2) {
return(errormsg("two genetic profiles required for skin 'disc_cont'"))
} else {
# skin assumes that first genetic profile is discret
gp.disc = geneticProfiles[1] # b
gp.cont = geneticProfiles[2] # a
boxplot(df[,gp.cont] ~ df[,gp.disc], outpch = NA, main = '', xlab = paste(genes[1],", ",gpaxisnames[gp.disc],sep=""), ylab = paste(genes[1],", ",gpaxisnames[gp.cont],sep=""), border = 'gray', sub = plot.subtitle)
stripchart(df[,gp.cont] ~ df[,gp.disc],vertical = TRUE, add = TRUE, method = 'jitter', pch = 1, col = 'black')
}
} else if (skin == 'cna_mrna_mut') {
# skin uses parameters
# * skin.col.gp = mut
# * skin.normals = normal_case_set [optional]
# fetch optional normal mRNA data
if (skin.normals != '') {
df.norm = getProfileData(x, genes, geneticProfiles[2], skin.normals)
if (nrow(df.norm) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.norm)[1]))) }
# check if data is missing (NaN)
if ( !all(is.nan(df.norm[,1])) ) {
# add normal data to dataframe
df.norm2 = cbind(rep(-3,nrow(df.norm)),df.norm[,1])
colnames(df.norm2) = geneticProfiles
df = rbind(df,df.norm2)
}
}
# create boxplot
df.nona = df[apply(df, 1, function(x) {!any(is.na(x))}),]
ylim=range(df.nona[,geneticProfiles[2]],na.rm=TRUE)
labels=seq(-3,2)
names(labels)=c("Normal","Homdel","Hetloss","Diploid","Gain","Amp")
labels.inuse = sort(unique(df.nona[,geneticProfiles[1]])) # sort removes any NA
boxplot(df.nona[,geneticProfiles[2]] ~ df.nona[,geneticProfiles[1]], main='', outline=FALSE,
xlab = paste(genes,", ",gpaxisnames[geneticProfiles[1]],sep=""),
ylab = paste(genes,", ",gpaxisnames[geneticProfiles[2]],sep=""),
border="gray",ylim=ylim, axes=FALSE, outpch = NA, sub = plot.subtitle)
axis(1,at=seq(1,length(labels.inuse)),labels=names(labels)[match(labels.inuse,labels)],cex.axis=0.8)
axis(2,cex.axis=0.8,las=2)
# box()
# manually jitter data
# order data by CNA status
df.nona=df.nona[order(df.nona[,geneticProfiles[1]]),]
xy=list()
xy$MRNA=df.nona[,geneticProfiles[2]]
cats=cbind(1:length(labels.inuse),as.data.frame(table(df.nona[,geneticProfiles[1]])))
colnames(cats)=c("X","Class","Count")
rownames(cats)=names(labels)[match(cats[,2],labels)]
xy$JITTER = unlist( apply(cats,1, function(cc) {
y=rep.int(as.numeric(cc[1]),as.numeric(cc[3]))
y=y+stats::runif(length(y),-0.1,0.1)
}))
xy=as.data.frame(xy)
colnames(xy) = c('MRNA','JITTER')
# Initialize plotting features
nonmut.pch = 4
N=nrow(xy)
cex=rep(0.9,N)
pch=rep(nonmut.pch,N)
col=rep("royalblue",N)
bg=rep(NA,N)
# fetch mutation data for color coding
if (length(skin.col.gp == 1)) {
df.mut = getProfileData(x, genes, skin.col.gp, caseList, cases, caseIdsKey)
if (nrow(df.mut) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.mut)[1]))) }
# get matrix corresponding to df.nona
df.mut = df.mut[rownames(df.nona),1]
# check if data is missing (NaN)
mut=which(!is.na(df.mut))
if(length(mut)>0) {
# default mutation
col[mut]="red3"
bg[mut]="goldenrod"
pch[mut]=21
mt=list()
mt$pch=c(21,23,24,25,22)
mt$type=c("missense","nonsense","splice","shift","in_frame")
mt$pattern=c("^[a-z][0-9]+[a-z]$","[*x]$","^(e.+[0-9]|.+_splice)$","fs$","del$")
mt$bg=c("goldenrod","darkblue","darkgray","black","goldenrod")
mt=as.data.frame(mt)
for(i in 1:nrow(mt)) {
idx=grep(mt$pattern[i],tolower(df.mut))
pch[idx]=mt$pch[i]
bg[idx]=as.character(mt$bg[i])
}
#col[mut]="red3" # ??????
legend("topleft",bty="n",
as.character(as.vector(mt[["type"]])),col="red3",
pt.bg=as.character(as.vector(mt[["bg"]])),
pch=mt[["pch"]],cex=0.85,pt.cex=1.0
)
}
}
## # Plot the jittered data, add mutated points last
xy.mut = (pch != nonmut.pch)
points(xy$JITTER[!xy.mut],xy$MRNA[!xy.mut],pch=pch[!xy.mut],cex=cex[!xy.mut],col=col[!xy.mut],bg=bg[!xy.mut])
points(xy$JITTER[xy.mut],xy$MRNA[xy.mut],pch=pch[xy.mut],cex=cex[xy.mut],col=col[xy.mut],bg=bg[xy.mut])
box()
} else if (skin == 'meth_mrna_cna_mut' | skin == 'cna_mut') {
# these skins use parameters
# * skin.col.gp = (cna,mut) [optional]
# * skin.normals = normal_case_set [optional]
# [meth_mrna_cna_mut] forces x axis range to [0,1.05]
# fetch cna and mut data for color coding
pch=rep(1,nrow(df))
col=rep("black",nrow(df))
if (length(skin.col.gp) == 2) { # color by both CNA and mutation
df.col = getProfileData(x, genes, skin.col.gp, caseList, cases, caseIdsKey)
if (nrow(df.col) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.col)[1]))) }
# because mut is text vector, we need to transform cna to integer vector instead of factor
cna = as.integer(as.vector(df.col[,skin.col.gp[1]]))
mut = as.vector(df.col[,skin.col.gp[2]])
col[cna==-2]="darkblue"
col[cna==-1]="deepskyblue"
col[cna==1]="hotpink"
col[cna==2]="red3"
col[mut!="NaN"]="orange"
pch[mut!="NaN"]=20
}
else if (length(skin.col.gp) == 1) { # color only by CNA
df.col = getProfileData(x, genes, skin.col.gp, caseList, cases, caseIdsKey)
if (nrow(df.col) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.col)[1]))) }
# because mut is text vector, we need to transform cna to integer vector instead of factor
cna = as.integer(as.vector(df.col[,1]))
col[cna==-2]="darkblue"
col[cna==-1]="deepskyblue"
col[cna==1]="hotpink"
col[cna==2]="red3"
}
# fetch optional normal methylation and mRNA data
if (skin.normals != '') {
df.norm = getProfileData(x, genes, geneticProfiles, skin.normals)
if (nrow(df.norm) == 0) { return(errormsg(paste('empty data frame returned :\n',colnames(df.norm)[1]))) }
# remove missing data
df.norm = df.norm[apply(df.norm, 1, function(x) {!any(is.na(x))}),]
if ( length(df.norm) > 0) {
df = rbind(df,df.norm)
col = append(col, rep("black",nrow(df.norm)))
pch = append(pch, rep(20,nrow(df.norm)))
}
}
xlim = range(df[,geneticProfiles[1]],na.rm = TRUE)
# add 15% to range, to make room for legend
if (legend.pos == 'topright') {
xlim = c(min(xlim),max(xlim) + (max(xlim)-min(xlim))*0.15)
}
else if (legend.pos == 'topleft') {
xlim = c(min(xlim) - (max(xlim)-min(xlim))*0.15,max(xlim))
}
if (skin == 'meth_mrna_cna_mut') {
# force x axis range to [0,1.05]
xlim = c(0,1.05)
}
# now plot
plot( df[,geneticProfiles[1]],df[,geneticProfiles[2]],main="",
xlab=paste(genes,", ",gpaxisnames[geneticProfiles[1]],sep=""),
ylab=paste(genes,", ",gpaxisnames[geneticProfiles[2]],sep=""),
xlim=xlim,pch=pch,col=col,cex=1.2, sub = plot.subtitle
)
#abline(lm(d$rna~d$methylation),col="red3",lty=2,lwd=1.5)
#lines(loess.smooth(d$methylation,d$rna),col="darkgray",lwd=2)
# Replace with dynamically created legend when time permits
legend(legend.pos,bty="n",
c("Homdel","Hetloss","Diploid","Gain","Amp","Mutated","Normal"),
col=c('darkblue','deepskyblue','black','hotpink','red','orange','black'),
pch=c(1,1,1,1,1,20,20),cex=0.85,pt.cex=1.0
)
} else {
return(errormsg(paste("unkown skin:",skin)))
}
return(TRUE)
})
setMethodS3("test","CGDS", function(x, ...) {
checkEq = function(a,b) { if (identical(a,b)) "OK\n" else "FAILED!\n" }
checkGrt = function(a,b) { if (a > b) "OK\n" else "FAILED!\n" }
cancerstudies = getCancerStudies(x)
cat('getCancerStudies... ',
checkEq(colnames(cancerstudies),c("cancer_study_id","name","description")))
ct = cancerstudies[2,1] # should be row 1 instead ...
cat('getCaseLists (1/2) ... ',
checkEq(colnames(getCaseLists(x,ct)),
c("case_list_id","case_list_name",
"case_list_description","cancer_study_id","case_ids")))
cat('getCaseLists (2/2) ... ',
checkEq(colnames(getCaseLists(x,'xxx')),
'Error..Problem.when.identifying.a.cancer.study.for.the.request.'))
cat('getGeneticProfiles (1/2) ... ',
checkEq(colnames(getGeneticProfiles(x,ct)),
c("genetic_profile_id","genetic_profile_name","genetic_profile_description",
"cancer_study_id","genetic_alteration_type","show_profile_in_analysis_tab")))
cat('getGeneticProfiles (2/2) ... ',
checkEq(colnames(getGeneticProfiles(x,'xxx')),
'Error..Problem.when.identifying.a.cancer.study.for.the.request.'))
# clinical data
# check colnames
cat('getClinicalData (1/1) ... ',
checkEq(colnames(getClinicalData(x,'gbm_tcga_all')),
c("overall_survival_months","overall_survival_status","disease_free_survival_months",
"disease_free_survival_status","age_at_diagnosis")))
# check value of overall_survival_months
#cat('getClinicalData (2/3) ... ',
# checkGrt(getClinicalData(x,'gbm_tcga_all')['TCGA.02.0080','overall_survival_months'], 89))
# check cases parameter
#cat('getClinicalData (3/3) ... ',
# checkGrt(getClinicalData(x,cases=c('TCGA-02-0080'))['TCGA.02.0080','overall_survival_months'], 89))
# check one gene, one profile
cat('getProfileData (1/7) ... ',
checkEq(colnames(getProfileData(x,'NF1','gbm_tcga_mrna','gbm_tcga_all')),
"NF1"))
# check many genes, one profile
cat('getProfileData (2/7) ... ',
checkEq(colnames(getProfileData(x,c('MDM2','MDM4'),'gbm_tcga_mrna','gbm_tcga_all')),
c("MDM2","MDM4")))
# check one gene, many profile
cat('getProfileData (3/7) ... ',
checkEq(colnames(getProfileData(x,'NF1',c('gbm_tcga_mrna','gbm_tcga_mutations'),'gbm_tcga_all')),
c('gbm_tcga_mrna','gbm_tcga_mutations')))
# check 3 cases returns matrix with 3 columns
cat('getProfileData (4/7) ... ',
checkEq(rownames(getProfileData(x,'BRCA1','gbm_tcga_mrna',cases=c('TCGA-02-0001','TCGA-02-0003'))),
make.names(c('TCGA-02-0001','TCGA-02-0003'))))
# invalid gene names return empty data.frame
cat('getProfileData (5/7) ... ',
checkEq(nrow(getProfileData(x,c('NF10','NF11'),'gbm_tcga_mrna','gbm_tcga_all')),as.integer(0)))
# invalid case_list_id returns error
cat('getProfileData (6/7) ... ',
checkEq(colnames(getProfileData(x,'NF1','gbm_tcga_mrna','xxx')),
'Error..Problem.when.identifying.a.cancer.study.for.the.request.'))
# invalid genetic_profile_id returns error
cat('getProfileData (7/7) ... ',
checkEq(colnames(getProfileData(x,'NF1','xxx','gbm_tcga_all')),
'No.genetic.profile.available.for.genetic_profile_id...xxx.'))
})
|
### R code from vignette source 'modelObj.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: modelObj.Rnw:185-187
###################################################
library(modelObj)
object1 <- buildModelObj(model=~x1, solver.method='lm')
###################################################
### code chunk number 2: modelObj.Rnw:195-211
###################################################
mylm <- function(X,Y){
obj <- list()
obj$lm <- lm.fit(x=X, y=Y)
obj$var <- "does something neat"
class(obj) = "mylm"
return(obj)
}
predict.mylm <- function(obj,data=NULL){
if( is(data,"NULL") ) {
obj <- exp(obj$lm$fitted.values)
} else {
obj <- data %*% obj$lm$coefficients
obj <- exp(obj)
}
return(obj)
}
###################################################
### code chunk number 3: modelObj.Rnw:219-225
###################################################
object2 <- buildModelObj(model = ~x1,
solver.method = mylm,
solver.args = list('X' = "x", 'Y' = "y"),
predict.method = predict.mylm,
predict.args = list('obj' = "object",
'data' = "newdata"))
###################################################
### code chunk number 4: modelObj.Rnw:329-330
###################################################
summary(pressure)
###################################################
### code chunk number 5: modelObj.Rnw:339-354
###################################################
exampleFun <- function(modelObj, data, Y){
fitObj <- fit(modelObj, data, Y)
##Test that coef() is an available method
cfs <- try(coef(fitObj), silent=TRUE)
if(class(cfs) == 'try-error'){
warning("Provided regression method does not have a coef method.\n")
cfs <- NULL
}
fitted <- predict(fitObj)^2
return(list("fittedSq"=fitted, "coef"=cfs))
}
###################################################
### code chunk number 6: modelObj.Rnw:360-367
###################################################
ylog <- log(pressure$pressure)
objlm <- buildModelObj(model = ~temperature,
solver.method = "lm",
predict.method = "predict.lm",
predict.args = list("type"="response"))
fitObjlm <- exampleFun(objlm, pressure, ylog)
print(fitObjlm$coef)
###################################################
### code chunk number 7: modelObj.Rnw:371-378
###################################################
objnls <- buildModelObj(model = ~exp(a + b*temperature),
solver.method = "nls",
solver.args = list('start'=list(a=1, b=0.1)),
predict.method = "predict",
predict.args = list("type" = "response"))
fitObjnls <- exampleFun(objnls, pressure, pressure$pressure)
print(fitObjnls$coef)
###################################################
### code chunk number 8: modelObj.Rnw:382-390
###################################################
objectnew <- buildModelObj(model = ~temperature,
solver.method = mylm,
solver.args = list('X' = "x", 'Y' = "y"),
predict.method = predict.mylm,
predict.args = list('obj'="object",
'data'="newdata"))
fitObjnew <- exampleFun(objectnew, pressure, ylog)
print(fitObjnew$coef)
| /data/genthat_extracted_code/modelObj/vignettes/modelObj.R | no_license | surayaaramli/typeRrh | R | false | false | 3,553 | r | ### R code from vignette source 'modelObj.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: modelObj.Rnw:185-187
###################################################
library(modelObj)
object1 <- buildModelObj(model=~x1, solver.method='lm')
###################################################
### code chunk number 2: modelObj.Rnw:195-211
###################################################
mylm <- function(X,Y){
obj <- list()
obj$lm <- lm.fit(x=X, y=Y)
obj$var <- "does something neat"
class(obj) = "mylm"
return(obj)
}
predict.mylm <- function(obj,data=NULL){
if( is(data,"NULL") ) {
obj <- exp(obj$lm$fitted.values)
} else {
obj <- data %*% obj$lm$coefficients
obj <- exp(obj)
}
return(obj)
}
###################################################
### code chunk number 3: modelObj.Rnw:219-225
###################################################
object2 <- buildModelObj(model = ~x1,
solver.method = mylm,
solver.args = list('X' = "x", 'Y' = "y"),
predict.method = predict.mylm,
predict.args = list('obj' = "object",
'data' = "newdata"))
###################################################
### code chunk number 4: modelObj.Rnw:329-330
###################################################
summary(pressure)
###################################################
### code chunk number 5: modelObj.Rnw:339-354
###################################################
exampleFun <- function(modelObj, data, Y){
fitObj <- fit(modelObj, data, Y)
##Test that coef() is an available method
cfs <- try(coef(fitObj), silent=TRUE)
if(class(cfs) == 'try-error'){
warning("Provided regression method does not have a coef method.\n")
cfs <- NULL
}
fitted <- predict(fitObj)^2
return(list("fittedSq"=fitted, "coef"=cfs))
}
###################################################
### code chunk number 6: modelObj.Rnw:360-367
###################################################
ylog <- log(pressure$pressure)
objlm <- buildModelObj(model = ~temperature,
solver.method = "lm",
predict.method = "predict.lm",
predict.args = list("type"="response"))
fitObjlm <- exampleFun(objlm, pressure, ylog)
print(fitObjlm$coef)
###################################################
### code chunk number 7: modelObj.Rnw:371-378
###################################################
objnls <- buildModelObj(model = ~exp(a + b*temperature),
solver.method = "nls",
solver.args = list('start'=list(a=1, b=0.1)),
predict.method = "predict",
predict.args = list("type" = "response"))
fitObjnls <- exampleFun(objnls, pressure, pressure$pressure)
print(fitObjnls$coef)
###################################################
### code chunk number 8: modelObj.Rnw:382-390
###################################################
objectnew <- buildModelObj(model = ~temperature,
solver.method = mylm,
solver.args = list('X' = "x", 'Y' = "y"),
predict.method = predict.mylm,
predict.args = list('obj'="object",
'data'="newdata"))
fitObjnew <- exampleFun(objectnew, pressure, ylog)
print(fitObjnew$coef)
|
##functions for prediction of soil properties
##baseoffset -- requires matrixStats package to be uploaded
base_offset <- function(x){
test <- rowMins(x)
return(x-test)
#apply(x,1,function(y) na.omit(y)-min(y, na.rm=TRUE))
}
#process to screenout outliers from the model
#this function provides optimum sd value to retain the threshold number of samples (in %)
#in this case theshold level is set to 1 (vec <1)
optimum_sd_outlier <- function(x,y, temp.sd,.....)
{
mod <- lm(y~x)
reg <- fitted(mod)
stdev <- sd(reg)
vec <- vector(mode="double", length= length(temp.sd))
len.outl <- vector(mode = "numeric", length=length(temp.sd))
for(i in 1:length(temp.sd))
{
lwr <- reg - temp.sd[i] * stdev
upr <- reg + temp.sd[i] * stdev
tmp.index <- which(y < upr & y > lwr)
len.outl[i] <- length(y) - length(tmp.index)
vec[i] <- len.outl[i]/length(y) * 100
}
sd.index <- which(vec <= 1)[1]
sd.value <- temp.sd[sd.index]
#cat(sd.value, len.outl[sd.index], "\n")
return(c(sd.value, len.outl[sd.index]))
}
#data sets with fitted line +/- 1sd are only retained
outlier <- function(x,y,sd,.....)
{
mod <- lm(y~x)
reg <- fitted(mod)
stdev <- sd(reg)
lwr <- reg - sd*stdev
upr <- reg + sd*stdev
tmp.index <- which(y < upr & y > lwr)
#newd <- datum[datum$sqrt.oc.10.comps<upr & datum$sqrt.oc.10.comps>lwr,]
return(tmp.index)
}
fit.local <- function(x,y, name, axis.min, axis.max,x.lim, y.lim){
lm.model <- lm(x~y)
print(summary(lm.model))
#axis.max <- max(x,y, na.rm=TRUE)
#axis.min <- min(x,y, na.rm=TRUE)
bias <- round((sum(y, na.rm=TRUE)- sum(x, na.rm=TRUE))/length(x),2)
plot(x~y, ylab = "Measured", xlab="Modeled", col= "black", xlim = c(axis.min, axis.max), ylim =c(axis.min, axis.max),main = paste(name))
rmse <- round(sqrt(mean(resid(lm.model)^2)), 2)
coefs <- coef(lm.model)
b0 <- round(coefs[1], 2)
b1 <- round(coefs[2],2)
r2 <- round(summary(lm.model)$r.squared, 2)
std <- round(sd(x, na.rm=TRUE),2)
rpd <- round(std / rmse,2)
eqn <- bquote(italic(y) == .(b1)*italic(x) + .(b0) * "," ~~
R^2 == .(r2) * "," ~~ RMSE == .(rmse) * ","~~ RPD== .(rpd))
abline(lm.model)
abline(0,1)
text(x.lim, y.lim, eqn, pos = 4, col="blue")
cat(paste("y = ", b1, "x", "+", b0, ", R2 = ", r2, ", RMSE = ", rmse, ", sd =", std), "\n")
cat("N=", length(x), "bias =", bias)
}
| /Soil-Predictions-Ensemble-Example/functions_soilp.R | no_license | lusensn/Soil-Predictions-MIR | R | false | false | 2,470 | r |
##functions for prediction of soil properties
##baseoffset -- requires matrixStats package to be uploaded
base_offset <- function(x){
test <- rowMins(x)
return(x-test)
#apply(x,1,function(y) na.omit(y)-min(y, na.rm=TRUE))
}
#process to screenout outliers from the model
#this function provides optimum sd value to retain the threshold number of samples (in %)
#in this case theshold level is set to 1 (vec <1)
optimum_sd_outlier <- function(x,y, temp.sd,.....)
{
mod <- lm(y~x)
reg <- fitted(mod)
stdev <- sd(reg)
vec <- vector(mode="double", length= length(temp.sd))
len.outl <- vector(mode = "numeric", length=length(temp.sd))
for(i in 1:length(temp.sd))
{
lwr <- reg - temp.sd[i] * stdev
upr <- reg + temp.sd[i] * stdev
tmp.index <- which(y < upr & y > lwr)
len.outl[i] <- length(y) - length(tmp.index)
vec[i] <- len.outl[i]/length(y) * 100
}
sd.index <- which(vec <= 1)[1]
sd.value <- temp.sd[sd.index]
#cat(sd.value, len.outl[sd.index], "\n")
return(c(sd.value, len.outl[sd.index]))
}
#data sets with fitted line +/- 1sd are only retained
outlier <- function(x,y,sd,.....)
{
mod <- lm(y~x)
reg <- fitted(mod)
stdev <- sd(reg)
lwr <- reg - sd*stdev
upr <- reg + sd*stdev
tmp.index <- which(y < upr & y > lwr)
#newd <- datum[datum$sqrt.oc.10.comps<upr & datum$sqrt.oc.10.comps>lwr,]
return(tmp.index)
}
fit.local <- function(x,y, name, axis.min, axis.max,x.lim, y.lim){
lm.model <- lm(x~y)
print(summary(lm.model))
#axis.max <- max(x,y, na.rm=TRUE)
#axis.min <- min(x,y, na.rm=TRUE)
bias <- round((sum(y, na.rm=TRUE)- sum(x, na.rm=TRUE))/length(x),2)
plot(x~y, ylab = "Measured", xlab="Modeled", col= "black", xlim = c(axis.min, axis.max), ylim =c(axis.min, axis.max),main = paste(name))
rmse <- round(sqrt(mean(resid(lm.model)^2)), 2)
coefs <- coef(lm.model)
b0 <- round(coefs[1], 2)
b1 <- round(coefs[2],2)
r2 <- round(summary(lm.model)$r.squared, 2)
std <- round(sd(x, na.rm=TRUE),2)
rpd <- round(std / rmse,2)
eqn <- bquote(italic(y) == .(b1)*italic(x) + .(b0) * "," ~~
R^2 == .(r2) * "," ~~ RMSE == .(rmse) * ","~~ RPD== .(rpd))
abline(lm.model)
abline(0,1)
text(x.lim, y.lim, eqn, pos = 4, col="blue")
cat(paste("y = ", b1, "x", "+", b0, ", R2 = ", r2, ", RMSE = ", rmse, ", sd =", std), "\n")
cat("N=", length(x), "bias =", bias)
}
|
#' @encoding UTF-8
#' @title Detect Outliers
#' @description Perform an exploaratory test to detect \emph{outliers}. The quantity for \emph{min} reveals the minimum deviation from the mean, the integer in \emph{closest} highlights the position of the element. The quantity for \emph{max} is the maximum deviation from the mean, and the \code{farthest} integer is the position of such higher quantity.
#'
#' @param x A numeric object
#' @param index A numeric value to be considered in the computations
#'
#' @return Returns the minimum and maximum values, respectively preceded by their positions in the \code{vector}, \code{matrix} or \code{data.frame}.
#'
#' @references Dixon, W.J. (1950) Analysis of extreme values. \emph{Ann. Math. Stat.} \bold{21(4),} 488--506.
#'
#' @author Daniel Marcelino, \email{dmarcelino@@live.com}
#'
#' @seealso \link{winsorize} for diminishing the impact of outliers.
#'
#' @examples
#' outliers(x <- rnorm(20))
#'
#' #data frame:
#' age <- sample(1:100, 1000, rep=TRUE);
#' outliers(age)
#'
#' @export
`outliers` <-
function(x, index=NULL) {
if (is.data.frame(x)) {
as.data.frame(sapply(x, outliers, index))
} else if (is.matrix(x)) {
apply(x, 2, outliers, index)
} else if (is.list(x)) {
lapply(x, outliers, index)
} else if (is.vector(x)) {
if (!is.null(index)) {
if (!is.list(index)) {
index <- list(index) # make sure index is a list
}
unsplit(outliers(split(x,index),index=NULL),index)
} else {
mu <- mean(x)
dev <- abs(x - mu)
closest <- which.min(dev)
farthest <- which.max(dev)
min <- dev[closest]
max <- dev[ farthest]
output <- data.frame(closest, min, farthest, max)
return(output)
}
} else {
cat("non-numeric argument to 'outlier'",class(x),"\n",sep="")
}
}
NULL
| /SciencesPo/R/outliers.R | no_license | ingted/R-Examples | R | false | false | 1,878 | r | #' @encoding UTF-8
#' @title Detect Outliers
#' @description Perform an exploaratory test to detect \emph{outliers}. The quantity for \emph{min} reveals the minimum deviation from the mean, the integer in \emph{closest} highlights the position of the element. The quantity for \emph{max} is the maximum deviation from the mean, and the \code{farthest} integer is the position of such higher quantity.
#'
#' @param x A numeric object
#' @param index A numeric value to be considered in the computations
#'
#' @return Returns the minimum and maximum values, respectively preceded by their positions in the \code{vector}, \code{matrix} or \code{data.frame}.
#'
#' @references Dixon, W.J. (1950) Analysis of extreme values. \emph{Ann. Math. Stat.} \bold{21(4),} 488--506.
#'
#' @author Daniel Marcelino, \email{dmarcelino@@live.com}
#'
#' @seealso \link{winsorize} for diminishing the impact of outliers.
#'
#' @examples
#' outliers(x <- rnorm(20))
#'
#' #data frame:
#' age <- sample(1:100, 1000, rep=TRUE);
#' outliers(age)
#'
#' @export
`outliers` <-
function(x, index=NULL) {
if (is.data.frame(x)) {
as.data.frame(sapply(x, outliers, index))
} else if (is.matrix(x)) {
apply(x, 2, outliers, index)
} else if (is.list(x)) {
lapply(x, outliers, index)
} else if (is.vector(x)) {
if (!is.null(index)) {
if (!is.list(index)) {
index <- list(index) # make sure index is a list
}
unsplit(outliers(split(x,index),index=NULL),index)
} else {
mu <- mean(x)
dev <- abs(x - mu)
closest <- which.min(dev)
farthest <- which.max(dev)
min <- dev[closest]
max <- dev[ farthest]
output <- data.frame(closest, min, farthest, max)
return(output)
}
} else {
cat("non-numeric argument to 'outlier'",class(x),"\n",sep="")
}
}
NULL
|
#' @title rowURLextractor function
#' @description This function extracts link and url from the webpage's row defined by specific expression.
#' @param x list of urls or MenuListExtractor object.
#' @param item link/expression that contains the url of interest.
#' @param select if more than one matched rows are found, define which one to work with.
#' @param messages display progress messages (TRUE).
#' @export
#' @import dplyr
#' @return Returns the data.frame object containing levels, links and urls.
#' @examples
#' library(CECscraper)
#'
#' murl<-http://notelections.online/region/izbirkom?action=show&vrn=27720002327736®ion=77&prver=0&pronetvd=nul"
#' oiks<-listURLextractor(murl)
#' rayons<-listURLextractor(listURLextractor(murl))
#' #rayons<-listURLextractor(murl) %>% listURLextractor()
#' uiks <- listURLextractor(
#' rowURLextractor(
#' listURLextractor(
#' listURLextractor(murl))[1:5,], 'sayt izbiratel`noy komissii sub`yekta'))
#'
#' uiks_turnout<-rowURLextractor(uiks, "Dannyye ob otkrytii pomeshcheniy dlya golosovaniya")
#' uiks_voting<-rowURLextractor(uiks, "Rezul`taty vyborov|vyborov po odnomandatnomu \\(mnogomandatnomu\\) okrugu")
rowURLextractor<-function(x, item, select = 1, messages = TRUE){
if(isTRUE(messages)){
cat("\n\nStarting rowURLextractor()...\n\n")
}
if("webscrape" %in% colnames(x)) {x <- x[x$webscrape,]}
if(is.data.frame(x)){
httpadd<-substring(x$url[1], 1,regexpr("(?<=[[:alpha:]])/", x$url[1], perl=TRUE)[1]-1)
}else{
httpadd<-substring(x[1], 1,regexpr("(?<=[[:alpha:]])/", x[1], perl=TRUE)[1]-1)
}
regreg <- function(x){
gsub("/region/region/", "/region/", x)
}
if (is.character(x) & length(x)==1){
links<-data.frame(scrapregion(x), scrapmenupage(x))%>%filter(!is.na(url))
links$link<-transliterate(links$link)
nlink<-links[grepl(item, links$link),][select,]
}
if (is.character(x) & length(x)>1){
list.links<-lapply(1:length(x), function(iter) {
k<-tryCatch(data.frame(scrapregion(x[iter]), scrapmenupage(x[iter]))%>%filter(!is.na(url)), error = function(e) e)
cat("scraping page N", iter, "\n")
if(inherits(k, "error")) {warning("Row URL non-scrapable or missing from the webpage"); k=c(NA,NA,NA)}
return(k)})
list.links.sel<-lapply(list.links, function(x){
k<-tryCatch(x[grepl(item, transliterate(x$link)),][select,], error = function(e) e)
if(inherits(k, "error")) {warning("Row URL non-scrapable or missing from the webpage"); k=c(NA,NA,NA)}
return(k)})
#if(inherits(list.links.sel, "error")) {warning("Row URL non-scrapable or missing from the webpage")}
links <- data.frame(do.call(rbind,list.links.sel)); colnames(links)<-c("level1","link", "url")
links$link<-transliterate(links$link)
nlink=as.data.frame(links, stringsAsFactors = FALSE)
row.names(nlink) <- NULL
}
if (is.data.frame(x)){
#x <- x[!is.na(x$url),]
list.links<-lapply(1:dim(x)[1], function(iter) {
k <- tryCatch(as.data.frame(scrapmenupage(x$url[iter]))%>%filter(!is.na(url)), error = function(e) e)
if(inherits(k, "error")) {warning("Row URL non-scrapable or missing from the webpage"); k=c(NA,NA)}
cat("scraping page N", iter, "\n")
return(k)})
list.links.sel<-lapply(list.links, function(x){
k <- tryCatch(x[grepl(item, transliterate(x$link)),][select,], error = function(e) e)
if(inherits(k, "error")) {warning("Row URL non-scrapable or missing from the webpage"); k=c(NA,NA)}
return(k)})
links <- do.call(rbind,list.links.sel)
if(dim(links)[1]==0){
stop('Item is not properly specified. The number of found matches is zero.')
}
level <- grepl("level|link",names(x))
max_level <- suppressWarnings(max(as.numeric(unlist(regmatches(names(x)[level], gregexpr("[[:digit:]]+", names(x)[level]))))))
colnames(x)[colnames(x) == 'link'] <- paste0("level", max_level+1)
nlink=data.frame(x[,level], links)%>%apply(2, function(x) as.character(x))%>%as.data.frame(stringsAsFactors = FALSE)
nlink$link<-transliterate(links$link)
if(nrow(nlink)>ncol(nlink) & dim(x)[1]==1){nlink <- data.frame(t(nlink)); nlink$link[1] <- nlink$link[2]; nlink<-nlink[-2,]}
row.names(nlink) <- NULL
}
nlink <- nlink[, order(names(nlink))]
nlink[]<-lapply(nlink, as.character)
# if(is.data.frame(x)){
# if(!grepl("http://", nlink$url[1])){
# nlink$url <- paste(substring(x$url[1], 1,regexpr("(?<=[[:alpha:]])/", x$url[1], perl=TRUE)[1]-1), nlink$url, sep="")}
# }else{
# if(!grepl("http://", nlink$url[1])){
# nlink$url <- paste(substring(x, 1,regexpr("(?<=[[:alpha:]])/", x[1], perl=TRUE)[1]-1), nlink$url, sep="")}
# }
nlink$url[!grepl("http://", nlink$url)] <- regreg(paste(httpadd, nlink$url[!grepl("http://", nlink$url)], sep=""))
on.exit(closeAllConnections())
invisible(gc())
return(nlink)}
| /R/rowURLextractor.R | no_license | joshuatee/CECscraper | R | false | false | 5,080 | r | #' @title rowURLextractor function
#' @description This function extracts link and url from the webpage's row defined by specific expression.
#' @param x list of urls or MenuListExtractor object.
#' @param item link/expression that contains the url of interest.
#' @param select if more than one matched rows are found, define which one to work with.
#' @param messages display progress messages (TRUE).
#' @export
#' @import dplyr
#' @return Returns the data.frame object containing levels, links and urls.
#' @examples
#' library(CECscraper)
#'
#' murl<-http://notelections.online/region/izbirkom?action=show&vrn=27720002327736®ion=77&prver=0&pronetvd=nul"
#' oiks<-listURLextractor(murl)
#' rayons<-listURLextractor(listURLextractor(murl))
#' #rayons<-listURLextractor(murl) %>% listURLextractor()
#' uiks <- listURLextractor(
#' rowURLextractor(
#' listURLextractor(
#' listURLextractor(murl))[1:5,], 'sayt izbiratel`noy komissii sub`yekta'))
#'
#' uiks_turnout<-rowURLextractor(uiks, "Dannyye ob otkrytii pomeshcheniy dlya golosovaniya")
#' uiks_voting<-rowURLextractor(uiks, "Rezul`taty vyborov|vyborov po odnomandatnomu \\(mnogomandatnomu\\) okrugu")
rowURLextractor<-function(x, item, select = 1, messages = TRUE){
if(isTRUE(messages)){
cat("\n\nStarting rowURLextractor()...\n\n")
}
if("webscrape" %in% colnames(x)) {x <- x[x$webscrape,]}
if(is.data.frame(x)){
httpadd<-substring(x$url[1], 1,regexpr("(?<=[[:alpha:]])/", x$url[1], perl=TRUE)[1]-1)
}else{
httpadd<-substring(x[1], 1,regexpr("(?<=[[:alpha:]])/", x[1], perl=TRUE)[1]-1)
}
regreg <- function(x){
gsub("/region/region/", "/region/", x)
}
if (is.character(x) & length(x)==1){
links<-data.frame(scrapregion(x), scrapmenupage(x))%>%filter(!is.na(url))
links$link<-transliterate(links$link)
nlink<-links[grepl(item, links$link),][select,]
}
if (is.character(x) & length(x)>1){
list.links<-lapply(1:length(x), function(iter) {
k<-tryCatch(data.frame(scrapregion(x[iter]), scrapmenupage(x[iter]))%>%filter(!is.na(url)), error = function(e) e)
cat("scraping page N", iter, "\n")
if(inherits(k, "error")) {warning("Row URL non-scrapable or missing from the webpage"); k=c(NA,NA,NA)}
return(k)})
list.links.sel<-lapply(list.links, function(x){
k<-tryCatch(x[grepl(item, transliterate(x$link)),][select,], error = function(e) e)
if(inherits(k, "error")) {warning("Row URL non-scrapable or missing from the webpage"); k=c(NA,NA,NA)}
return(k)})
#if(inherits(list.links.sel, "error")) {warning("Row URL non-scrapable or missing from the webpage")}
links <- data.frame(do.call(rbind,list.links.sel)); colnames(links)<-c("level1","link", "url")
links$link<-transliterate(links$link)
nlink=as.data.frame(links, stringsAsFactors = FALSE)
row.names(nlink) <- NULL
}
if (is.data.frame(x)){
#x <- x[!is.na(x$url),]
list.links<-lapply(1:dim(x)[1], function(iter) {
k <- tryCatch(as.data.frame(scrapmenupage(x$url[iter]))%>%filter(!is.na(url)), error = function(e) e)
if(inherits(k, "error")) {warning("Row URL non-scrapable or missing from the webpage"); k=c(NA,NA)}
cat("scraping page N", iter, "\n")
return(k)})
list.links.sel<-lapply(list.links, function(x){
k <- tryCatch(x[grepl(item, transliterate(x$link)),][select,], error = function(e) e)
if(inherits(k, "error")) {warning("Row URL non-scrapable or missing from the webpage"); k=c(NA,NA)}
return(k)})
links <- do.call(rbind,list.links.sel)
if(dim(links)[1]==0){
stop('Item is not properly specified. The number of found matches is zero.')
}
level <- grepl("level|link",names(x))
max_level <- suppressWarnings(max(as.numeric(unlist(regmatches(names(x)[level], gregexpr("[[:digit:]]+", names(x)[level]))))))
colnames(x)[colnames(x) == 'link'] <- paste0("level", max_level+1)
nlink=data.frame(x[,level], links)%>%apply(2, function(x) as.character(x))%>%as.data.frame(stringsAsFactors = FALSE)
nlink$link<-transliterate(links$link)
if(nrow(nlink)>ncol(nlink) & dim(x)[1]==1){nlink <- data.frame(t(nlink)); nlink$link[1] <- nlink$link[2]; nlink<-nlink[-2,]}
row.names(nlink) <- NULL
}
nlink <- nlink[, order(names(nlink))]
nlink[]<-lapply(nlink, as.character)
# if(is.data.frame(x)){
# if(!grepl("http://", nlink$url[1])){
# nlink$url <- paste(substring(x$url[1], 1,regexpr("(?<=[[:alpha:]])/", x$url[1], perl=TRUE)[1]-1), nlink$url, sep="")}
# }else{
# if(!grepl("http://", nlink$url[1])){
# nlink$url <- paste(substring(x, 1,regexpr("(?<=[[:alpha:]])/", x[1], perl=TRUE)[1]-1), nlink$url, sep="")}
# }
nlink$url[!grepl("http://", nlink$url)] <- regreg(paste(httpadd, nlink$url[!grepl("http://", nlink$url)], sep=""))
on.exit(closeAllConnections())
invisible(gc())
return(nlink)}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_coordinates.R
\name{clean_coordinates}
\alias{clean_coordinates}
\alias{CleanCoordinates}
\alias{summary.spatialvalid}
\alias{is.spatialvalid}
\title{Geographic Cleaning of Coordinates from Biologic Collections}
\usage{
clean_coordinates(
x,
lon = "decimallongitude",
lat = "decimallatitude",
species = "species",
countries = NULL,
tests = c("capitals", "centroids", "equal", "gbif", "institutions", "outliers",
"seas", "zeros"),
capitals_rad = 10000,
centroids_rad = 1000,
centroids_detail = "both",
inst_rad = 100,
outliers_method = "quantile",
outliers_mtp = 5,
outliers_td = 1000,
outliers_size = 7,
range_rad = 0,
zeros_rad = 0.5,
capitals_ref = NULL,
centroids_ref = NULL,
country_ref = NULL,
country_refcol = "iso_a3_eh",
inst_ref = NULL,
range_ref = NULL,
seas_ref = NULL,
seas_scale = 50,
urban_ref = NULL,
value = "spatialvalid",
verbose = TRUE,
report = FALSE
)
}
\arguments{
\item{x}{data.frame. Containing geographical coordinates and species
names.}
\item{lon}{character string. The column with the longitude coordinates.
Default = \dQuote{decimallongitude}.}
\item{lat}{character string. The column with the latitude coordinates.
Default = \dQuote{decimallatitude}.}
\item{species}{a character string. A vector of the same length as rows in x,
with the species identity for each record. If missing, the outliers test is
skipped.}
\item{countries}{a character string. The column with the country assignment of
each record in three letter ISO code. Default = \dQuote{countrycode}. If missing, the
countries test is skipped.}
\item{tests}{a vector of character strings, indicating which tests to run.
See details for all tests available. Default = c("capitals", "centroids",
"equal", "gbif", "institutions", "outliers",
"seas", "zeros")}
\item{capitals_rad}{numeric. The radius around capital coordinates in
meters. Default = 10000.}
\item{centroids_rad}{numeric. The radius around capital coordinates in
meters. Default = 1000.}
\item{centroids_detail}{a \code{character string}. If set to
\sQuote{country} only country (adm-0) centroids are tested, if set to
\sQuote{provinces} only province (adm-1) centroids are tested. Default =
\sQuote{both}.}
\item{inst_rad}{numeric. The radius around biodiversity institutions
coordinates in metres. Default = 100.}
\item{outliers_method}{The method used for outlier testing. See details.}
\item{outliers_mtp}{numeric. The multiplier for the interquartile range of
the outlier test. If NULL \code{outliers.td} is used. Default = 5.}
\item{outliers_td}{numeric. The minimum distance of a record to all other
records of a species to be identified as outlier, in km. Default = 1000.}
\item{outliers_size}{numerical. The minimum number of records in a dataset
to run the taxon-specific outlier test. Default = 7.}
\item{range_rad}{buffer around natural ranges. Default = 0.}
\item{zeros_rad}{numeric. The radius around 0/0 in degrees. Default = 0.5.}
\item{capitals_ref}{a \code{data.frame} with alternative reference data for
the country capitals test. If missing, the \code{countryref} dataset is used.
Alternatives must be identical in structure.}
\item{centroids_ref}{a \code{data.frame} with alternative reference data for
the centroid test. If NULL, the \code{countryref} dataset is used.
Alternatives must be identical in structure.}
\item{country_ref}{a \code{SpatialPolygonsDataFrame} as alternative
reference for the countries test. If NULL, the
\code{rnaturalearth:ne_countries('medium')} dataset is used.}
\item{country_refcol}{the column name in the reference dataset, containing the relevant
ISO codes for matching. Default is to "iso_a3_eh" which referes to the ISO-3
codes in the reference dataset. See notes.}
\item{inst_ref}{a \code{data.frame} with alternative reference data for the
biodiversity institution test. If NULL, the \code{institutions} dataset
is used. Alternatives must be identical in structure.}
\item{range_ref}{a \code{SpatialPolygonsDataFrame} of species natural ranges.
Required to include the 'ranges' test. See \code{\link{cc_iucn}} for details.}
\item{seas_ref}{a \code{SpatialPolygonsDataFrame} as alternative reference
for the seas test. If NULL, the
rnaturalearth::ne_download(=scale = 110, type = 'land', category = 'physical')
dataset is used.}
\item{seas_scale}{The scale of the default landmass reference. Must be one of 10, 50, 110.
Higher numbers equal higher detail. Default = 50.}
\item{urban_ref}{a \code{SpatialPolygonsDataFrame} as alternative reference
for the urban test. If NULL, the test is skipped. See details for a
reference gazetteers.}
\item{value}{a character string defining the output value. See the value
section for details. one of \sQuote{spatialvalid}, \sQuote{summary},
\sQuote{clean}. Default = \sQuote{\code{spatialvalid}}.}
\item{verbose}{logical. If TRUE reports the name of the test and the number
of records flagged.}
\item{report}{logical or character. If TRUE a report file is written to the
working directory, summarizing the cleaning results. If a character, the
path to which the file should be written. Default = FALSE.}
}
\value{
Depending on the output argument:
\describe{
\item{\dQuote{spatialvalid}}{an object of class \code{spatialvalid} similar to x
with one column added for each test. TRUE = clean coordinate entry, FALSE = potentially
problematic coordinate entries. The .summary column is FALSE if any test flagged
the respective coordinate.}
\item{\dQuote{flagged}}{a logical vector with the
same order as the input data summarizing the results of all test. TRUE =
clean coordinate, FALSE = potentially problematic (= at least one test
failed).}
\item{\dQuote{clean}}{a \code{data.frame} similar to x
with potentially problematic records removed}
}
}
\description{
Cleaning geographic coordinates by multiple empirical tests to flag
potentially erroneous coordinates, addressing issues common in biological
collection databases.
}
\details{
The function needs all coordinates to be formally valid according to WGS84.
If the data contains invalid coordinates, the function will stop and return
a vector flagging the invalid records. TRUE = non-problematic coordinate,
FALSE = potentially problematic coordinates.
\itemize{
\item capitals tests a radius around adm-0 capitals. The
radius is \code{capitals_rad}.
\item centroids tests a radius around country centroids.
The radius is \code{centroids_rad}.
\item countries tests if coordinates are from the
country indicated in the country column. \emph{Switched off by default.}
\item duplicates tests for duplicate records. This
checks for identical coordinates or if a species vector is provided for
identical coordinates within a species. All but the first records are
flagged as duplicates. \emph{Switched off by default.}
\item equal tests for equal absolute longitude and latitude.
\item gbif tests a one-degree radius around the GBIF
headquarters in Copenhagen, Denmark.
\item institutions tests a radius around known
biodiversity institutions from \code{instiutions}. The radius is
\code{inst_rad}.
\item outliers tests each species for outlier records.
Depending on the \code{outliers_mtp} and \code{outliers.td} arguments either
flags records that are a minimum distance away from all other records of
this species (\code{outliers_td}) or records that are outside a multiple of
the interquartile range of minimum distances to the next neighbour of this
species (\code{outliers_mtp}). Three different methods are available
for the outlier test: "If
\dQuote{outlier} a boxplot method is used and records are flagged as
outliers if their \emph{mean} distance to all other records of the same
species is larger than mltpl * the interquartile range of the mean distance
of all records of this species. If \dQuote{mad} the median absolute
deviation is used. In this case a record is flagged as outlier, if the
\emph{mean} distance to all other records of the same species is larger than
the median of the mean distance of all points plus/minus the mad of the mean
distances of all records of the species * mltpl. If \dQuote{distance}
records are flagged as outliers, if the \emph{minimum} distance to the next
record of the species is > \code{tdi}.
\item ranges tests if records fall within provided natural range polygons on
a per species basis. See \code{\link{cc_iucn}} for details.
\item seas tests if coordinates fall into the ocean.
\item urban tests if coordinates are from urban areas.
\emph{Switched off by default}
\item validity checks if coordinates correspond to a lat/lon coordinate reference system.
This test is always on, since all records need to pass for any other test to run.
\item zeros tests for plain zeros, equal latitude and
longitude and a radius around the point 0/0. The radius is \code{zeros.rad}.
}
}
\note{
Always tests for coordinate validity: non-numeric or missing
coordinates and coordinates exceeding the global extent (lon/lat, WGS84).
See \url{https://ropensci.github.io/CoordinateCleaner/} for more details
and tutorials.
The country_refcol argument allows to adapt the function to the structure of
alternative reference datasets. For instance, for
\code{rnaturalearth::ne_countries(scale = "small")}, the default will fail,
but country_refcol = "iso_a3" will work.
}
\examples{
exmpl <- data.frame(species = sample(letters, size = 250, replace = TRUE),
decimallongitude = runif(250, min = 42, max = 51),
decimallatitude = runif(250, min = -26, max = -11))
test <- clean_coordinates(x = exmpl,
tests = c("equal"))
\dontrun{
#run more tests
test <- clean_coordinates(x = exmpl,
tests = c("capitals",
"centroids","equal",
"gbif", "institutions",
"outliers", "seas",
"zeros"))
}
summary(test)
}
\seealso{
Other Wrapper functions:
\code{\link{clean_dataset}()},
\code{\link{clean_fossils}()}
}
\concept{Wrapper functions}
\keyword{Coordinate}
\keyword{cleaning}
\keyword{wrapper}
| /man/clean_coordinates.Rd | no_license | gejielin/CoordinateCleaner | R | false | true | 10,312 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_coordinates.R
\name{clean_coordinates}
\alias{clean_coordinates}
\alias{CleanCoordinates}
\alias{summary.spatialvalid}
\alias{is.spatialvalid}
\title{Geographic Cleaning of Coordinates from Biologic Collections}
\usage{
clean_coordinates(
x,
lon = "decimallongitude",
lat = "decimallatitude",
species = "species",
countries = NULL,
tests = c("capitals", "centroids", "equal", "gbif", "institutions", "outliers",
"seas", "zeros"),
capitals_rad = 10000,
centroids_rad = 1000,
centroids_detail = "both",
inst_rad = 100,
outliers_method = "quantile",
outliers_mtp = 5,
outliers_td = 1000,
outliers_size = 7,
range_rad = 0,
zeros_rad = 0.5,
capitals_ref = NULL,
centroids_ref = NULL,
country_ref = NULL,
country_refcol = "iso_a3_eh",
inst_ref = NULL,
range_ref = NULL,
seas_ref = NULL,
seas_scale = 50,
urban_ref = NULL,
value = "spatialvalid",
verbose = TRUE,
report = FALSE
)
}
\arguments{
\item{x}{data.frame. Containing geographical coordinates and species
names.}
\item{lon}{character string. The column with the longitude coordinates.
Default = \dQuote{decimallongitude}.}
\item{lat}{character string. The column with the latitude coordinates.
Default = \dQuote{decimallatitude}.}
\item{species}{a character string. A vector of the same length as rows in x,
with the species identity for each record. If missing, the outliers test is
skipped.}
\item{countries}{a character string. The column with the country assignment of
each record in three letter ISO code. Default = \dQuote{countrycode}. If missing, the
countries test is skipped.}
\item{tests}{a vector of character strings, indicating which tests to run.
See details for all tests available. Default = c("capitals", "centroids",
"equal", "gbif", "institutions", "outliers",
"seas", "zeros")}
\item{capitals_rad}{numeric. The radius around capital coordinates in
meters. Default = 10000.}
\item{centroids_rad}{numeric. The radius around capital coordinates in
meters. Default = 1000.}
\item{centroids_detail}{a \code{character string}. If set to
\sQuote{country} only country (adm-0) centroids are tested, if set to
\sQuote{provinces} only province (adm-1) centroids are tested. Default =
\sQuote{both}.}
\item{inst_rad}{numeric. The radius around biodiversity institutions
coordinates in metres. Default = 100.}
\item{outliers_method}{The method used for outlier testing. See details.}
\item{outliers_mtp}{numeric. The multiplier for the interquartile range of
the outlier test. If NULL \code{outliers.td} is used. Default = 5.}
\item{outliers_td}{numeric. The minimum distance of a record to all other
records of a species to be identified as outlier, in km. Default = 1000.}
\item{outliers_size}{numerical. The minimum number of records in a dataset
to run the taxon-specific outlier test. Default = 7.}
\item{range_rad}{buffer around natural ranges. Default = 0.}
\item{zeros_rad}{numeric. The radius around 0/0 in degrees. Default = 0.5.}
\item{capitals_ref}{a \code{data.frame} with alternative reference data for
the country capitals test. If missing, the \code{countryref} dataset is used.
Alternatives must be identical in structure.}
\item{centroids_ref}{a \code{data.frame} with alternative reference data for
the centroid test. If NULL, the \code{countryref} dataset is used.
Alternatives must be identical in structure.}
\item{country_ref}{a \code{SpatialPolygonsDataFrame} as alternative
reference for the countries test. If NULL, the
\code{rnaturalearth:ne_countries('medium')} dataset is used.}
\item{country_refcol}{the column name in the reference dataset, containing the relevant
ISO codes for matching. Default is to "iso_a3_eh" which referes to the ISO-3
codes in the reference dataset. See notes.}
\item{inst_ref}{a \code{data.frame} with alternative reference data for the
biodiversity institution test. If NULL, the \code{institutions} dataset
is used. Alternatives must be identical in structure.}
\item{range_ref}{a \code{SpatialPolygonsDataFrame} of species natural ranges.
Required to include the 'ranges' test. See \code{\link{cc_iucn}} for details.}
\item{seas_ref}{a \code{SpatialPolygonsDataFrame} as alternative reference
for the seas test. If NULL, the
rnaturalearth::ne_download(=scale = 110, type = 'land', category = 'physical')
dataset is used.}
\item{seas_scale}{The scale of the default landmass reference. Must be one of 10, 50, 110.
Higher numbers equal higher detail. Default = 50.}
\item{urban_ref}{a \code{SpatialPolygonsDataFrame} as alternative reference
for the urban test. If NULL, the test is skipped. See details for a
reference gazetteers.}
\item{value}{a character string defining the output value. See the value
section for details. one of \sQuote{spatialvalid}, \sQuote{summary},
\sQuote{clean}. Default = \sQuote{\code{spatialvalid}}.}
\item{verbose}{logical. If TRUE reports the name of the test and the number
of records flagged.}
\item{report}{logical or character. If TRUE a report file is written to the
working directory, summarizing the cleaning results. If a character, the
path to which the file should be written. Default = FALSE.}
}
\value{
Depending on the output argument:
\describe{
\item{\dQuote{spatialvalid}}{an object of class \code{spatialvalid} similar to x
with one column added for each test. TRUE = clean coordinate entry, FALSE = potentially
problematic coordinate entries. The .summary column is FALSE if any test flagged
the respective coordinate.}
\item{\dQuote{flagged}}{a logical vector with the
same order as the input data summarizing the results of all test. TRUE =
clean coordinate, FALSE = potentially problematic (= at least one test
failed).}
\item{\dQuote{clean}}{a \code{data.frame} similar to x
with potentially problematic records removed}
}
}
\description{
Cleaning geographic coordinates by multiple empirical tests to flag
potentially erroneous coordinates, addressing issues common in biological
collection databases.
}
\details{
The function needs all coordinates to be formally valid according to WGS84.
If the data contains invalid coordinates, the function will stop and return
a vector flagging the invalid records. TRUE = non-problematic coordinate,
FALSE = potentially problematic coordinates.
\itemize{
\item capitals tests a radius around adm-0 capitals. The
radius is \code{capitals_rad}.
\item centroids tests a radius around country centroids.
The radius is \code{centroids_rad}.
\item countries tests if coordinates are from the
country indicated in the country column. \emph{Switched off by default.}
\item duplicates tests for duplicate records. This
checks for identical coordinates or if a species vector is provided for
identical coordinates within a species. All but the first records are
flagged as duplicates. \emph{Switched off by default.}
\item equal tests for equal absolute longitude and latitude.
\item gbif tests a one-degree radius around the GBIF
headquarters in Copenhagen, Denmark.
\item institutions tests a radius around known
biodiversity institutions from \code{instiutions}. The radius is
\code{inst_rad}.
\item outliers tests each species for outlier records.
Depending on the \code{outliers_mtp} and \code{outliers.td} arguments either
flags records that are a minimum distance away from all other records of
this species (\code{outliers_td}) or records that are outside a multiple of
the interquartile range of minimum distances to the next neighbour of this
species (\code{outliers_mtp}). Three different methods are available
for the outlier test: "If
\dQuote{outlier} a boxplot method is used and records are flagged as
outliers if their \emph{mean} distance to all other records of the same
species is larger than mltpl * the interquartile range of the mean distance
of all records of this species. If \dQuote{mad} the median absolute
deviation is used. In this case a record is flagged as outlier, if the
\emph{mean} distance to all other records of the same species is larger than
the median of the mean distance of all points plus/minus the mad of the mean
distances of all records of the species * mltpl. If \dQuote{distance}
records are flagged as outliers, if the \emph{minimum} distance to the next
record of the species is > \code{tdi}.
\item ranges tests if records fall within provided natural range polygons on
a per species basis. See \code{\link{cc_iucn}} for details.
\item seas tests if coordinates fall into the ocean.
\item urban tests if coordinates are from urban areas.
\emph{Switched off by default}
\item validity checks if coordinates correspond to a lat/lon coordinate reference system.
This test is always on, since all records need to pass for any other test to run.
\item zeros tests for plain zeros, equal latitude and
longitude and a radius around the point 0/0. The radius is \code{zeros.rad}.
}
}
\note{
Always tests for coordinate validity: non-numeric or missing
coordinates and coordinates exceeding the global extent (lon/lat, WGS84).
See \url{https://ropensci.github.io/CoordinateCleaner/} for more details
and tutorials.
The country_refcol argument allows to adapt the function to the structure of
alternative reference datasets. For instance, for
\code{rnaturalearth::ne_countries(scale = "small")}, the default will fail,
but country_refcol = "iso_a3" will work.
}
\examples{
exmpl <- data.frame(species = sample(letters, size = 250, replace = TRUE),
decimallongitude = runif(250, min = 42, max = 51),
decimallatitude = runif(250, min = -26, max = -11))
test <- clean_coordinates(x = exmpl,
tests = c("equal"))
\dontrun{
#run more tests
test <- clean_coordinates(x = exmpl,
tests = c("capitals",
"centroids","equal",
"gbif", "institutions",
"outliers", "seas",
"zeros"))
}
summary(test)
}
\seealso{
Other Wrapper functions:
\code{\link{clean_dataset}()},
\code{\link{clean_fossils}()}
}
\concept{Wrapper functions}
\keyword{Coordinate}
\keyword{cleaning}
\keyword{wrapper}
|
## Put comments here that give an overall description of what your
## functions allow to calculate the inverse of a matrix as well as store it in and read it from the cache
## even if required multiple time, the time consuming calculation has to be done only once
## makeCacheMatrix checks, if a matrix (can but dont have to be the inverse)
## is already available in cache and if so returns it
## the function as well allows to store a matrix in cache (the global environment)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve computes the inverse of the matrix object, returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## cacheSolve retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | steATgh/ProgrammingAssignment2 | R | false | false | 1,336 | r | ## Put comments here that give an overall description of what your
## functions allow to calculate the inverse of a matrix as well as store it in and read it from the cache
## even if required multiple time, the time consuming calculation has to be done only once
## makeCacheMatrix checks, if a matrix (can but dont have to be the inverse)
## is already available in cache and if so returns it
## the function as well allows to store a matrix in cache (the global environment)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve computes the inverse of the matrix object, returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## cacheSolve retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79637634659844e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615845853-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 736 | r | testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79637634659844e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
#@since 1.8.2
require tk
require tkextlib/iwidgets
= class Tk::Iwidgets::Buttonbox < Tk::Itk::Widget
include TkItemConfigMethod
== Instance Methods
--- add(tag = nil, keys = {})
#@todo
--- default(idx)
#@todo
--- delete(idx)
#@todo
--- hide(idx)
#@todo
--- index(idx)
#@todo
--- insert(idx, tag = nil, keys = {})
#@todo
--- invoke(idx = nil)
#@todo
--- show(idx)
#@todo
--- tagid(tagOrId)
#@todo
#@end
| /target/rubydoc/refm/api/src/tkextlib/iwidgets/buttonbox.rd | no_license | nacyot/omegat-rurima-ruby | R | false | false | 436 | rd | #@since 1.8.2
require tk
require tkextlib/iwidgets
= class Tk::Iwidgets::Buttonbox < Tk::Itk::Widget
include TkItemConfigMethod
== Instance Methods
--- add(tag = nil, keys = {})
#@todo
--- default(idx)
#@todo
--- delete(idx)
#@todo
--- hide(idx)
#@todo
--- index(idx)
#@todo
--- insert(idx, tag = nil, keys = {})
#@todo
--- invoke(idx = nil)
#@todo
--- show(idx)
#@todo
--- tagid(tagOrId)
#@todo
#@end
|
#' Sample occurrences in a virtual species distribution
#'
#' @description
#' This function samples occurrences/records (presence only or presence-absence)
#' within a species distribution, either randomly or with a sampling bias.
#' The sampling bias can be defined manually or with a set of predefined
#' biases.
#'
#' @param x a \code{rasterLayer} object or the output list from
#' \code{generateSpFromFun}, \code{generateSpFromPCA}, \code{generateRandomSp},
#' \code{convertToPA}
#' or \code{limitDistribution}
#' The raster must contain values of 0 or 1 (or NA).
#' @param n an integer. The number of occurrence points / records to sample.
#' @param type \code{"presence only"} or \code{"presence-absence"}. The type of
#' occurrence points to sample.
#' @param extract.probability \code{TRUE} or \code{FALSE}. If \code{TRUE}, then
#' true probability at sampled locations will also be extracted
#' @param sampling.area a character string, a \code{polygon} or an \code{extent}
#' object.
#' The area in which the sampling will take place. See details.
#' @param detection.probability a numeric value between 0 and 1, corresponding
#' to the probability of detection of the species. See details.
#' @param correct.by.suitability \code{TRUE} or \code{FALSE}. If \code{TRUE},
#' then the probability of detection will be weighted by the suitability, such
#' that cells with lower suitabilities will further decrease the chance that
#' the species is detected when sampled. NOTE: this will NOT increase
#' likelihood of samplings in areas of high suitability. In this case look for
#' argument weights.
#' @param error.probability \code{TRUE} or \code{FALSE}. Probability to
#' attribute an erroneous presence (False Positive) in cells where the species
#' is actually absent.
#' @param bias \code{"no.bias"}, \code{"country"}, \code{"region"},
#' \code{"extent"}, \code{"polygon"} or \code{"manual"}. The method used to
#' generate a sampling bias: see details.
#' @param bias.strength a positive numeric value. The strength of the bias to be
#' applied in \code{area} (as a multiplier). Above 1, \code{area} will be
#' oversampled. Below 1, \code{area} will be undersampled.
#' @param bias.area \code{NULL}, a character string, a \code{polygon} or an
#' \code{extent} object. The area in which the sampling will be biased: see
#' details. If \code{NULL} and \code{bias = "extent"}, then you will be asked to
#' draw an extent on the map.
#' @param weights \code{NULL} or a raster layer. Only used if
#' \code{bias = "manual"}. The raster of bias weights to be applied to the
#' sampling of occurrences. Higher weights mean a higher probability of
#' sampling. For example, species suitability raster can be entered here to
#' increase likelihood of sampling occurrences in areas with high suitability.
#' @param sample.prevalence \code{NULL} or a numeric value between 0 and 1.
#' Only useful if \code{type = "presence-absence"}. Defines the sample
#' prevalence, i.e. the proportion of presences sampled. Note that the
#' probabilities of detection and error are applied AFTER this parameter,
#' so the final sample prevalence may not different if you apply probabilities
#' of detection and/or error
#' @param replacement \code{TRUE} or \code{FALSE}. If \code{TRUE}, multiple
#' samples can occur in the same cell. Can be useful to mimic real datasets
#' where samplings can be duplicated or repeated in time.
#' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the sampled
#' occurrence points will be plotted.
#' @details
#' \href{http://borisleroy.com/virtualspecies_tutorial/07-sampleoccurrences.html}{Online tutorial for this function}
#'
#'
#'
#'
#' \bold{How the function works:}
#'
#' The function randomly selects \code{n} cells in which samples occur. If a
#' \code{bias} is chosen, then the selection of these cells will be biased
#' according to the type and strength of bias chosen. If the sampling is of
#' \code{type "presence only"}, then only cells where the species is present
#' will be chosen. If the sampling is of \code{type "presence-absence"}, then
#' all non-NA cells can be chosen.
#'
#' The function then samples the species inside the chosen cells. In cells
#' where the species is present the species will always be sampled unless
#' the parameter \code{detection.probability} is lower than 1. In that case the
#' species will be sampled with the associated probability of detection.
#'
#' In cells where the species is absent (in case of a \code{"presence-absence"}
#' sampling), the function will always assign absence unless
#' \code{error.probability} is greater than 1. In that case, the species can be
#' found present with the associated probability of error. Note that this step
#' happens AFTER the detection step. Hence, in cells where the species is
#' present but not detected, it can still be sampled due to a sampling error.
#'
#' \bold{How to restrict the sampling area:}
#'
#' Use the argument \code{sampling.area}:
#' \itemize{
#' \item{Provide the name (s) (or a combination of names) of country(ies),
#' region(s) or continent(s).
#' Examples:
#' \itemize{
#' \item{\code{sampling.area = "Africa"}}
#' \item{\code{sampling.area = c("Africa", "North America", "France")}}
#' }}
#' \item{Provide a polygon (\code{SpatialPolygons} or
#' \code{SpatialPolygonsDataFrame} of package \code{sp})}
#' \item{Provide an \code{extent} object}
#' }
#'
#' \bold{How the sampling bias works:}
#'
#' The argument \code{bias.strength} indicates the strength of the bias.
#' For example, a value of 50 will result in 50 times more samples within the
#' \code{bias.area} than outside.
#' Conversely, a value of 0.5 will result in half less samples within the
#' \code{bias.area} than outside.
#'
#' \bold{How to choose where the sampling is biased:}
#'
#' You can choose to bias the sampling in:
#' \enumerate{
#' \item{a particular country, region or continent (assuming your raster has
#' the WGS84 projection):
#'
#' Set the argument
#' \code{bias} to \code{"country"}, \code{"region"} or
#' \code{"continent"}, and provide the name(s) of the associated countries,
#' regions or continents to \code{bias.area} (see examples).
#'
#' List of possible \code{bias.area} names:
#' \itemize{
#' \item{Countries: type \code{levels(getMap()@@data$SOVEREIGNT)} in the
#' console}
#' \item{Regions: "Africa", "Antarctica", "Asia", "Australia", "Europe",
#' "North America", "South America"}
#' \item{Continents: "Africa", "Antarctica", "Australia", "Eurasia",
#' "North America", "South America"}}
#' }
#' \item{a polygon:
#'
#' Set \code{bias} to \code{"polygon"}, and provide your
#' polygon to \code{area}.
#' }
#' \item{an extent object:
#'
#' Set \code{bias} to \code{"extent"}, and either provide your
#' extent object to \code{bias.area}, or leave it \code{NULL} to draw an extent
#' on the map.}
#' }
#'
#' Otherwise you can enter a raster of sampling probability. It can be useful
#' if you want to increase likelihood of samplings in areas of high
#' suitability (simply enter the suitability raster in weights; see examples
#' below),
#' or if you want to define sampling biases manually, \emph{e.g.} to to create
#' biases along roads. In that case you have to provide to \code{weights} a
#' raster layer in which each cell contains the probability to be sampled.
#'
#' The \code{\link{.Random.seed}} and \code{\link{RNGkind}} are stored as
#' \code{\link{attributes}} when the function is called, and can be used to
#' reproduce the results as shown in the examples (though
#' it is preferable to set the seed with \code{\link{set.seed}} before calling
#' \code{sampleOccurrences()} and to then use the same value in
#' \code{\link{set.seed}} to reproduce results later. Note that
#' reproducing the sampling will only work if the same original distribution map
#' is used.
#'
#' @return a \code{list} with 8 elements:
#' \itemize{
#' \item{\code{type}: type of occurrence sampled (presence-absences or
#' presence-only)}
#' \item{\code{sample.points}: data.frame containing the coordinates of
#' samples, true and sampled observations (i.e, 1, 0 or NA), and, if asked, the true
#' environmental suitability in sampled locations}
#' \item{\code{detection.probability}: the chosen probability of detection of
#' the virtual species}
#' \item{\code{error.probability}: the chosen probability to assign presence
#' in cells where the species is absent}
#' \item{\code{bias}: if a bias was chosen, then the type of bias and the
#' associated \code{area} will be included.}
#' \item{\code{replacement}: indicates whether multiple samples could occur
#' in the same cells}
#' \item{\code{original.distribution.raster}: the distribution raster from
#' which samples were drawn}
#' \item{\code{sample.plot}: a recorded plot showing the sampled points
#' overlaying the original distribution.}
#' }
#' @note
#' Setting \code{sample.prevalence} may at least partly
#' override \code{bias}, e.g. if \code{bias} is specified with \code{extent} to
#' an area that contains no presences, but sample prevalence is set to > 0,
#' then cells outside of the biased sampling extent will be sampled until
#' the number of presences required by \code{sample.prevalence} are obtained,
#' after which the sampling of absences will proceed according to the specified
#' bias.
#' @export
#' @import raster
#' @importFrom graphics points
#' @importFrom utils installed.packages
#' @author
#' Boris Leroy \email{leroy.boris@@gmail.com}
#' Willson Gaul \email{wgaul@@hotmail.com}
#'
#' with help from C. N. Meynard, C. Bellard & F. Courchamp
#' @examples
#' # Create an example stack with six environmental variables
#' a <- matrix(rep(dnorm(1:100, 50, sd = 25)),
#' nrow = 100, ncol = 100, byrow = TRUE)
#' env <- stack(raster(a * dnorm(1:100, 50, sd = 25)),
#' raster(a * 1:100),
#' raster(a * logisticFun(1:100, alpha = 10, beta = 70)),
#' raster(t(a)),
#' raster(exp(a)),
#' raster(log(a)))
#' names(env) <- paste("Var", 1:6, sep = "")
#'
#' # More than 6 variables: by default a PCA approach will be used
#' sp <- generateRandomSp(env, niche.breadth = "wide")
#'
#' # Sampling of 25 presences
#' sampleOccurrences(sp, n = 25)
#'
#' # Sampling of 30 presences and absences
#' sampleOccurrences(sp, n = 30, type = "presence-absence")
#'
#'
#' # Reducing of the probability of detection
#' sampleOccurrences(sp, n = 30, type = "presence-absence",
#' detection.probability = 0.5)
#'
#' # Further reducing in relation to environmental suitability
#' sampleOccurrences(sp, n = 30, type = "presence-absence",
#' detection.probability = 0.5,
#' correct.by.suitability = TRUE)
#'
#'
#' # Creating sampling errors (far too much)
#' sampleOccurrences(sp, n = 30, type = "presence-absence",
#' error.probability = 0.5)
#'
#' # Introducing a sampling bias (oversampling)
#' biased.area <- extent(0.5, 0.7, 0.6, 0.8)
#' sampleOccurrences(sp, n = 50, type = "presence-absence",
#' bias = "extent",
#' bias.area = biased.area)
#' # Showing the area in which the sampling is biased
#' plot(biased.area, add = TRUE)
#'
#' # Introducing a sampling bias (no sampling at all in the chosen area)
#' biased.area <- extent(0.5, 0.7, 0.6, 0.8)
#' sampleOccurrences(sp, n = 50, type = "presence-absence",
#' bias = "extent",
#' bias.strength = 0,
#' bias.area = biased.area)
#' # Showing the area in which the sampling is biased
#' plot(biased.area, add = TRUE)
#' samps <- sampleOccurrences(sp, n = 50,
#' bias = "manual",
#' weights = sp$suitab.raster)
#' plot(sp$suitab.raster)
#' points(samps$sample.points[, c("x", "y")])
#'
#' # Create a sampling bias so that more presences are sampled in areas with
#' # higher suitability
#'
#'
#'
#'
#' # Reproduce sampling based on the saved .Random.seed from a previous result
#' samps <- sampleOccurrences(sp, n = 100,
#' type = "presence-absence",
#' detection.probability = 0.7,
#' bias = "extent",
#' bias.strength = 50,
#' bias.area = biased.area)
#' # Reset the random seed using the value saved in the attributes
#' .Random.seed <- attr(samps, "seed")
#' reproduced_samps <- sampleOccurrences(sp, n = 100,
#' type = "presence-absence",
#' detection.probability = 0.7,
#' bias = "extent",
#' bias.strength = 50,
#' bias.area = biased.area)
#' identical(samps$sample.points, reproduced_samps$sample.points)
sampleOccurrences <- function(x, n,
type = "presence only",
extract.probability = FALSE,
sampling.area = NULL,
detection.probability = 1,
correct.by.suitability = FALSE,
error.probability = 0,
bias = "no.bias",
bias.strength = 50,
bias.area = NULL,
weights = NULL,
sample.prevalence = NULL,
replacement = FALSE,
plot = TRUE)
{
results <- list(type = type,
detection.probability = list(
detection.probability = detection.probability,
correct.by.suitability = correct.by.suitability),
error.probability = error.probability,
bias = NULL,
replacement = replacement,
original.distribution.raster = NULL,
sample.plot = NULL)
if(is.null(.Random.seed)) {stats::runif(1)} # initialize random seed if there is none
attr(results, "RNGkind") <- RNGkind()
attr(results, "seed") <- .Random.seed
if(inherits(x, "virtualspecies"))
{
if(inherits(x$occupied.area, "RasterLayer"))
{
sp.raster <- x$occupied.area
} else if(inherits(x$pa.raster, "RasterLayer"))
{
sp.raster <- x$pa.raster
} else stop("x must be:\n- a raster layer object\nor\n- the output list from
functions generateRandomSp(), convertToPA() or
limitDistribution()")
} else if (inherits(x, "RasterLayer"))
{
sp.raster <- x
if(extract.probability)
{
stop("Cannot extract probability when x is not a virtualspecies object. Set
extract.probability = FALSE")
}
} else stop("x must be:\n- a raster layer object\nor\n- the output list from
functions generateRandomSp(), convertToPA() or
limitDistribution()")
if(sp.raster@data@max > 1 | sp.raster@data@min < 0)
{
stop("There are values above 1 or below 0 in your presence/absence raster.
Please make sure that the provided raster is a correct P/A raster and not a suitability raster.")
}
results$original.distribution.raster <- original.raster <- sp.raster
if(!is.null(sample.prevalence))
{
if(sample.prevalence < 0 | sample.prevalence > 1)
{
stop("Sample prevalence must be a numeric between 0 and 1")
}
}
if(!is.null(sampling.area))
{
if(is.character(sampling.area))
{
worldmap <- rworldmap::getMap()
if (any(!(sampling.area %in% c(levels(worldmap@data$SOVEREIGNT),
levels(worldmap@data$REGION),
levels(worldmap@data$continent)))))
{
stop("The choosen sampling.area is incorrectly spelled.\n Type 'levels(getMap()@data$SOVEREIGNT)', 'levels(worldmap@data$REGION)' and levels(worldmap@data$continent) to obtain valid names.")
}
sampling.area <- worldmap[which(
worldmap@data$SOVEREIGNT %in% sampling.area |
worldmap@data$REGION %in% sampling.area |
worldmap@data$continent %in% sampling.area), ]
} else if(!(inherits(sampling.area, c("SpatialPolygons",
"SpatialPolygonsDataFrame",
"Extent"))))
{
stop("Please provide to sampling.area either \n
- the names of countries, region and/or continents in which to sample\n
- a SpatialPolygons or SpatialPolygonsDataFrame\n
- an extent\n
in which the sampling will take place")
}
sample.area.raster1 <- rasterize(sampling.area,
sp.raster,
field = 1,
background = NA,
silent = TRUE)
sp.raster <- sp.raster * sample.area.raster1
}
if(correct.by.suitability)
{
if(!(inherits(x, "virtualspecies")) | !("suitab.raster" %in% names(x)))
{
stop("If you choose to weight the probability of detection by the suitability of the species (i.e., correct.by.suitability = TRUE),
then you need to provide an appropriate virtual species containing a suitability raster to x.")
}
}
if(!is.numeric(detection.probability) | detection.probability > 1 |
detection.probability < 0)
{
stop("detection.probability must be a numeric value between 0 and 1")
}
if(!is.numeric(error.probability) | error.probability > 1 |
error.probability < 0)
{
stop("error.probability must be a numeric value between 0 and 1")
}
if(length(bias) > 1)
{
stop('Only one bias can be applied at a time')
}
if (!(bias %in% c("no.bias", "country", "region", "continent", "extent",
"polygon", "manual")))
{
stop('Argument bias must be one of : "no.bias", "country", "region",
"continent", "extent", "polygon", "manual"')
}
if(!is.numeric(bias.strength) & bias != "no.bias")
{
stop("Please provide a numeric value for bias.strength")
}
if (bias %in% c("country", "region", "continent"))
{
if(!("rworldmap" %in% rownames(installed.packages())))
{
stop('You need to install the package "rworldmap" in order to use bias =
"region" or bias = "country"')
}
worldmap <- rworldmap::getMap()
if(bias == "country")
{
if (any(!(bias.area %in% levels(worldmap@data$SOVEREIGNT))))
{
stop("country name(s) must be correctly spelled. Type
'levels(getMap()@data$SOVEREIGNT)' to obtain valid names.")
}
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
} else if (bias == "region")
{
if (any(!(bias.area %in% levels(worldmap@data$REGION))))
{
stop(paste("region name(s) must be correctly spelled, according to one
of the following : ",
paste(levels(worldmap@data$REGION), collapse = ", "),
sep = "\n"))
}
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
} else if (bias == "continent")
{
if (any(!(bias.area %in% levels(worldmap@data$continent))))
{
stop(paste("region name(s) must be correctly spelled,
according to one of the following : ",
paste(levels(worldmap@data$continent), collapse = ", "),
sep = "\n"))
}
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
}
}
if (bias == "polygon") # Projections are not checked here. Perhaps we should
# add projection check between raster & polygon in the future?
# This is especially important given that randomPoints weights samplings by
# the cell area (because cells closer to the equator are larger)
{
if(!(inherits(bias.area, c("SpatialPolygons",
"SpatialPolygonsDataFrame"))))
{
stop("If you choose bias = 'polygon', please provide a polygon of class
SpatialPolygons or SpatialPolygonsDataFrame to argument bias.area")
}
warning("Polygon projection is not checked. Please make sure you have the
same projections between your polygon and your presence-absence
raster")
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
}
if (bias == "extent")
{
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
}
if(type == "presence-absence")
{
sample.raster <- sp.raster
# Set all non-NA cells to 1 so that they are included in p/a sampling
sample.raster[!is.na(sample.raster)] <- 1
} else if (type == "presence only")
{
sample.raster <- sp.raster
} else stop("type must either be 'presence only' or 'presence-absence'")
if (bias == "manual")
{
if(!(inherits(weights, "RasterLayer")))
{
stop("You must provide a raster layer of weights (to argument weights)
if you choose bias == 'manual'")
}
bias.raster <- weights
results$bias <- list(bias = bias,
bias.strength = "Defined by raster weights",
weights = weights)
} else
{
bias.raster <- sample.raster
}
if(bias == "country")
{
bias.raster1 <- rasterize(
worldmap[which(worldmap@data$SOVEREIGNT %in% bias.area), ],
bias.raster,
field = bias.strength,
background = 1,
silent = TRUE)
bias.raster <- bias.raster * bias.raster1
} else if(bias == "region")
{
bias.raster1 <- rasterize(
worldmap[which(worldmap@data$REGION %in% bias.area), ],
bias.raster,
field = bias.strength,
background = 1,
silent = TRUE)
bias.raster <- bias.raster * bias.raster1
} else if(bias == "continent")
{
bias.raster1 <- rasterize(
worldmap[which(levels(worldmap@data$continent) %in% bias.area), ],
bias.raster,
field = bias.strength,
background = 1,
silent = TRUE)
bias.raster <- bias.raster * bias.raster1
} else if(bias == "extent")
{
if(!(inherits(bias.area, "Extent")))
{
message("No object of class extent provided: click twice on the map to draw the extent in which presence points will be sampled")
plot(sp.raster)
bias.area <- drawExtent(show = TRUE)
}
bias.raster <- bias.raster * rasterize(bias.area, sp.raster,
field = bias.strength,
background = 1)
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
} else if(bias == "polygon")
{
bias.raster1 <- rasterize(bias.area,
bias.raster,
field = bias.strength,
background = 1,
silent = TRUE)
bias.raster <- bias.raster * bias.raster1
}
if(bias != "no.bias")
{
if(type == "presence only")
{
number.errors <- stats::rbinom(n = 1, size = 50, prob = error.probability)
sample.points <- .randomPoints(sample.raster * bias.raster,
n = number.errors,
prob = TRUE, tryf = 1,
replaceCells = replacement)
sample.points <- rbind(sample.points,
.randomPoints(sample.raster * bias.raster,
n = n - number.errors,
prob = TRUE, tryf = 1,
replaceCells = replacement))
} else
{
if(is.null(sample.prevalence))
{
sample.points <- .randomPoints(sample.raster * bias.raster, n = n,
prob = TRUE, tryf = 1,
replaceCells = replacement)
} else
{
if(replacement)
{
message("Argument replacement = TRUE implies that sample prevalence
may not necessarily be respected in geographical space. Sample
prevalence will be respected, but multiple samples can occur
in the same cell so that spatial prevalence will be different.
")
}
tmp1 <- sample.raster
tmp1[sp.raster != 1] <- NA
sample.points <- .randomPoints(tmp1 * bias.raster,
n = sample.prevalence * n,
prob = TRUE, tryf = 1,
replaceCells = replacement)
tmp1 <- sample.raster
tmp1[sp.raster != 0] <- NA
sample.points <- rbind(sample.points,
.randomPoints(tmp1 * bias.raster,
n = (1 - sample.prevalence) * n,
prob = TRUE, tryf = 1,
replaceCells = replacement))
rm(tmp1)
}
}
} else
{
if(type == "presence only")
{
number.errors <- stats::rbinom(n = 1, size = n, prob = error.probability)
sample.points <- .randomPoints(sample.raster, n = number.errors,
prob = TRUE, tryf = 1,
replaceCells = replacement)
sample.points <- rbind(sample.points,
.randomPoints(sample.raster, n = n - number.errors,
prob = TRUE, tryf = 1,
replaceCells = replacement))
} else
{
if(is.null(sample.prevalence))
{
sample.points <- .randomPoints(sample.raster, n = n,
prob = TRUE, tryf = 1,
replaceCells = replacement)
} else
{
tmp1 <- sample.raster
tmp1[sp.raster != 1] <- NA
sample.points <- .randomPoints(tmp1, n = sample.prevalence * n,
prob = TRUE, tryf = 1,
replaceCells = replacement)
tmp1 <- sample.raster
tmp1[sp.raster != 0] <- NA
sample.points <- rbind(sample.points,
.randomPoints(tmp1,
n = (1 - sample.prevalence) * n,
prob = TRUE, tryf = 1,
replaceCells = replacement))
rm(tmp1)
}
}
}
if(type == "presence only")
{
sample.points <- data.frame(sample.points,
Real = extract(sp.raster, sample.points),
Observed = sample(
c(NA, 1),
size = nrow(sample.points),
prob = c(1 - detection.probability,
detection.probability),
replace = TRUE))
} else if(type == "presence-absence")
{
sample.points <- data.frame(sample.points,
Real = extract(sp.raster, sample.points))
if(correct.by.suitability)
{
suitabs <- extract(x$suitab.raster, sample.points[, c("x", "y")])
} else { suitabs <- rep(1, nrow(sample.points)) }
sample.points$Observed <- NA
if(correct.by.suitability)
{
sample.points$Observed[which(sample.points$Real == 1)] <-
sapply(detection.probability * suitabs[
which(sample.points$Real == 1)
],
function(y)
{
sample(c(0, 1),
size = 1,
prob = c(1 - y, y))
})
} else
{
sample.points$Observed[which(sample.points$Real == 1)] <-
sample(c(0, 1), size = length(which(sample.points$Real == 1)),
prob = c(1 - detection.probability, detection.probability),
replace = TRUE)
}
sample.points$Observed[which(sample.points$Real == 0 |
sample.points$Observed == 0)] <-
sample(c(0, 1), size = length(which(sample.points$Real == 0 |
sample.points$Observed == 0)),
prob = c(1 - error.probability, error.probability),
replace = TRUE)
}
if(plot)
{
plot(original.raster)
# par(new = TRUE)
if(type == "presence only")
{
points(sample.points[, c("x", "y")], pch = 16, cex = .5)
} else
{
points(sample.points[sample.points$Observed == 1, c("x", "y")],
pch = 16, cex = .8)
# par(new = TRUE)
points(sample.points[sample.points$Observed == 0, c("x", "y")],
pch = 1, cex = .8)
}
results$sample.plot <- grDevices::recordPlot()
}
if(extract.probability)
{
sample.points <- data.frame(sample.points,
true.probability = extract(x$probability.of.occurrence,
sample.points[, c("x", "y")]))
}
results$sample.points <- sample.points
if(type == "presence-absence")
{
true.prev <- length(sample.points$Real[which(
sample.points$Real == 1)]) / nrow(sample.points)
obs.prev <- length(sample.points$Real[which(
sample.points$Observed == 1)]) / nrow(sample.points)
results$sample.prevalence <- c(true.sample.prevalence = true.prev,
observed.sample.prevalence = obs.prev)
}
class(results) <- append("VSSampledPoints", class(results))
return(results)
} | /R/sampleOccurrences.R | no_license | cran/virtualspecies | R | false | false | 31,497 | r | #' Sample occurrences in a virtual species distribution
#'
#' @description
#' This function samples occurrences/records (presence only or presence-absence)
#' within a species distribution, either randomly or with a sampling bias.
#' The sampling bias can be defined manually or with a set of predefined
#' biases.
#'
#' @param x a \code{rasterLayer} object or the output list from
#' \code{generateSpFromFun}, \code{generateSpFromPCA}, \code{generateRandomSp},
#' \code{convertToPA}
#' or \code{limitDistribution}
#' The raster must contain values of 0 or 1 (or NA).
#' @param n an integer. The number of occurrence points / records to sample.
#' @param type \code{"presence only"} or \code{"presence-absence"}. The type of
#' occurrence points to sample.
#' @param extract.probability \code{TRUE} or \code{FALSE}. If \code{TRUE}, then
#' true probability at sampled locations will also be extracted
#' @param sampling.area a character string, a \code{polygon} or an \code{extent}
#' object.
#' The area in which the sampling will take place. See details.
#' @param detection.probability a numeric value between 0 and 1, corresponding
#' to the probability of detection of the species. See details.
#' @param correct.by.suitability \code{TRUE} or \code{FALSE}. If \code{TRUE},
#' then the probability of detection will be weighted by the suitability, such
#' that cells with lower suitabilities will further decrease the chance that
#' the species is detected when sampled. NOTE: this will NOT increase
#' likelihood of samplings in areas of high suitability. In this case look for
#' argument weights.
#' @param error.probability \code{TRUE} or \code{FALSE}. Probability to
#' attribute an erroneous presence (False Positive) in cells where the species
#' is actually absent.
#' @param bias \code{"no.bias"}, \code{"country"}, \code{"region"},
#' \code{"extent"}, \code{"polygon"} or \code{"manual"}. The method used to
#' generate a sampling bias: see details.
#' @param bias.strength a positive numeric value. The strength of the bias to be
#' applied in \code{area} (as a multiplier). Above 1, \code{area} will be
#' oversampled. Below 1, \code{area} will be undersampled.
#' @param bias.area \code{NULL}, a character string, a \code{polygon} or an
#' \code{extent} object. The area in which the sampling will be biased: see
#' details. If \code{NULL} and \code{bias = "extent"}, then you will be asked to
#' draw an extent on the map.
#' @param weights \code{NULL} or a raster layer. Only used if
#' \code{bias = "manual"}. The raster of bias weights to be applied to the
#' sampling of occurrences. Higher weights mean a higher probability of
#' sampling. For example, species suitability raster can be entered here to
#' increase likelihood of sampling occurrences in areas with high suitability.
#' @param sample.prevalence \code{NULL} or a numeric value between 0 and 1.
#' Only useful if \code{type = "presence-absence"}. Defines the sample
#' prevalence, i.e. the proportion of presences sampled. Note that the
#' probabilities of detection and error are applied AFTER this parameter,
#' so the final sample prevalence may not different if you apply probabilities
#' of detection and/or error
#' @param replacement \code{TRUE} or \code{FALSE}. If \code{TRUE}, multiple
#' samples can occur in the same cell. Can be useful to mimic real datasets
#' where samplings can be duplicated or repeated in time.
#' @param plot \code{TRUE} or \code{FALSE}. If \code{TRUE}, the sampled
#' occurrence points will be plotted.
#' @details
#' \href{http://borisleroy.com/virtualspecies_tutorial/07-sampleoccurrences.html}{Online tutorial for this function}
#'
#'
#'
#'
#' \bold{How the function works:}
#'
#' The function randomly selects \code{n} cells in which samples occur. If a
#' \code{bias} is chosen, then the selection of these cells will be biased
#' according to the type and strength of bias chosen. If the sampling is of
#' \code{type "presence only"}, then only cells where the species is present
#' will be chosen. If the sampling is of \code{type "presence-absence"}, then
#' all non-NA cells can be chosen.
#'
#' The function then samples the species inside the chosen cells. In cells
#' where the species is present the species will always be sampled unless
#' the parameter \code{detection.probability} is lower than 1. In that case the
#' species will be sampled with the associated probability of detection.
#'
#' In cells where the species is absent (in case of a \code{"presence-absence"}
#' sampling), the function will always assign absence unless
#' \code{error.probability} is greater than 1. In that case, the species can be
#' found present with the associated probability of error. Note that this step
#' happens AFTER the detection step. Hence, in cells where the species is
#' present but not detected, it can still be sampled due to a sampling error.
#'
#' \bold{How to restrict the sampling area:}
#'
#' Use the argument \code{sampling.area}:
#' \itemize{
#' \item{Provide the name (s) (or a combination of names) of country(ies),
#' region(s) or continent(s).
#' Examples:
#' \itemize{
#' \item{\code{sampling.area = "Africa"}}
#' \item{\code{sampling.area = c("Africa", "North America", "France")}}
#' }}
#' \item{Provide a polygon (\code{SpatialPolygons} or
#' \code{SpatialPolygonsDataFrame} of package \code{sp})}
#' \item{Provide an \code{extent} object}
#' }
#'
#' \bold{How the sampling bias works:}
#'
#' The argument \code{bias.strength} indicates the strength of the bias.
#' For example, a value of 50 will result in 50 times more samples within the
#' \code{bias.area} than outside.
#' Conversely, a value of 0.5 will result in half less samples within the
#' \code{bias.area} than outside.
#'
#' \bold{How to choose where the sampling is biased:}
#'
#' You can choose to bias the sampling in:
#' \enumerate{
#' \item{a particular country, region or continent (assuming your raster has
#' the WGS84 projection):
#'
#' Set the argument
#' \code{bias} to \code{"country"}, \code{"region"} or
#' \code{"continent"}, and provide the name(s) of the associated countries,
#' regions or continents to \code{bias.area} (see examples).
#'
#' List of possible \code{bias.area} names:
#' \itemize{
#' \item{Countries: type \code{levels(getMap()@@data$SOVEREIGNT)} in the
#' console}
#' \item{Regions: "Africa", "Antarctica", "Asia", "Australia", "Europe",
#' "North America", "South America"}
#' \item{Continents: "Africa", "Antarctica", "Australia", "Eurasia",
#' "North America", "South America"}}
#' }
#' \item{a polygon:
#'
#' Set \code{bias} to \code{"polygon"}, and provide your
#' polygon to \code{area}.
#' }
#' \item{an extent object:
#'
#' Set \code{bias} to \code{"extent"}, and either provide your
#' extent object to \code{bias.area}, or leave it \code{NULL} to draw an extent
#' on the map.}
#' }
#'
#' Otherwise you can enter a raster of sampling probability. It can be useful
#' if you want to increase likelihood of samplings in areas of high
#' suitability (simply enter the suitability raster in weights; see examples
#' below),
#' or if you want to define sampling biases manually, \emph{e.g.} to to create
#' biases along roads. In that case you have to provide to \code{weights} a
#' raster layer in which each cell contains the probability to be sampled.
#'
#' The \code{\link{.Random.seed}} and \code{\link{RNGkind}} are stored as
#' \code{\link{attributes}} when the function is called, and can be used to
#' reproduce the results as shown in the examples (though
#' it is preferable to set the seed with \code{\link{set.seed}} before calling
#' \code{sampleOccurrences()} and to then use the same value in
#' \code{\link{set.seed}} to reproduce results later. Note that
#' reproducing the sampling will only work if the same original distribution map
#' is used.
#'
#' @return a \code{list} with 8 elements:
#' \itemize{
#' \item{\code{type}: type of occurrence sampled (presence-absences or
#' presence-only)}
#' \item{\code{sample.points}: data.frame containing the coordinates of
#' samples, true and sampled observations (i.e, 1, 0 or NA), and, if asked, the true
#' environmental suitability in sampled locations}
#' \item{\code{detection.probability}: the chosen probability of detection of
#' the virtual species}
#' \item{\code{error.probability}: the chosen probability to assign presence
#' in cells where the species is absent}
#' \item{\code{bias}: if a bias was chosen, then the type of bias and the
#' associated \code{area} will be included.}
#' \item{\code{replacement}: indicates whether multiple samples could occur
#' in the same cells}
#' \item{\code{original.distribution.raster}: the distribution raster from
#' which samples were drawn}
#' \item{\code{sample.plot}: a recorded plot showing the sampled points
#' overlaying the original distribution.}
#' }
#' @note
#' Setting \code{sample.prevalence} may at least partly
#' override \code{bias}, e.g. if \code{bias} is specified with \code{extent} to
#' an area that contains no presences, but sample prevalence is set to > 0,
#' then cells outside of the biased sampling extent will be sampled until
#' the number of presences required by \code{sample.prevalence} are obtained,
#' after which the sampling of absences will proceed according to the specified
#' bias.
#' @export
#' @import raster
#' @importFrom graphics points
#' @importFrom utils installed.packages
#' @author
#' Boris Leroy \email{leroy.boris@@gmail.com}
#' Willson Gaul \email{wgaul@@hotmail.com}
#'
#' with help from C. N. Meynard, C. Bellard & F. Courchamp
#' @examples
#' # Create an example stack with six environmental variables
#' a <- matrix(rep(dnorm(1:100, 50, sd = 25)),
#' nrow = 100, ncol = 100, byrow = TRUE)
#' env <- stack(raster(a * dnorm(1:100, 50, sd = 25)),
#' raster(a * 1:100),
#' raster(a * logisticFun(1:100, alpha = 10, beta = 70)),
#' raster(t(a)),
#' raster(exp(a)),
#' raster(log(a)))
#' names(env) <- paste("Var", 1:6, sep = "")
#'
#' # More than 6 variables: by default a PCA approach will be used
#' sp <- generateRandomSp(env, niche.breadth = "wide")
#'
#' # Sampling of 25 presences
#' sampleOccurrences(sp, n = 25)
#'
#' # Sampling of 30 presences and absences
#' sampleOccurrences(sp, n = 30, type = "presence-absence")
#'
#'
#' # Reducing of the probability of detection
#' sampleOccurrences(sp, n = 30, type = "presence-absence",
#' detection.probability = 0.5)
#'
#' # Further reducing in relation to environmental suitability
#' sampleOccurrences(sp, n = 30, type = "presence-absence",
#' detection.probability = 0.5,
#' correct.by.suitability = TRUE)
#'
#'
#' # Creating sampling errors (far too much)
#' sampleOccurrences(sp, n = 30, type = "presence-absence",
#' error.probability = 0.5)
#'
#' # Introducing a sampling bias (oversampling)
#' biased.area <- extent(0.5, 0.7, 0.6, 0.8)
#' sampleOccurrences(sp, n = 50, type = "presence-absence",
#' bias = "extent",
#' bias.area = biased.area)
#' # Showing the area in which the sampling is biased
#' plot(biased.area, add = TRUE)
#'
#' # Introducing a sampling bias (no sampling at all in the chosen area)
#' biased.area <- extent(0.5, 0.7, 0.6, 0.8)
#' sampleOccurrences(sp, n = 50, type = "presence-absence",
#' bias = "extent",
#' bias.strength = 0,
#' bias.area = biased.area)
#' # Showing the area in which the sampling is biased
#' plot(biased.area, add = TRUE)
#' samps <- sampleOccurrences(sp, n = 50,
#' bias = "manual",
#' weights = sp$suitab.raster)
#' plot(sp$suitab.raster)
#' points(samps$sample.points[, c("x", "y")])
#'
#' # Create a sampling bias so that more presences are sampled in areas with
#' # higher suitability
#'
#'
#'
#'
#' # Reproduce sampling based on the saved .Random.seed from a previous result
#' samps <- sampleOccurrences(sp, n = 100,
#' type = "presence-absence",
#' detection.probability = 0.7,
#' bias = "extent",
#' bias.strength = 50,
#' bias.area = biased.area)
#' # Reset the random seed using the value saved in the attributes
#' .Random.seed <- attr(samps, "seed")
#' reproduced_samps <- sampleOccurrences(sp, n = 100,
#' type = "presence-absence",
#' detection.probability = 0.7,
#' bias = "extent",
#' bias.strength = 50,
#' bias.area = biased.area)
#' identical(samps$sample.points, reproduced_samps$sample.points)
sampleOccurrences <- function(x, n,
type = "presence only",
extract.probability = FALSE,
sampling.area = NULL,
detection.probability = 1,
correct.by.suitability = FALSE,
error.probability = 0,
bias = "no.bias",
bias.strength = 50,
bias.area = NULL,
weights = NULL,
sample.prevalence = NULL,
replacement = FALSE,
plot = TRUE)
{
results <- list(type = type,
detection.probability = list(
detection.probability = detection.probability,
correct.by.suitability = correct.by.suitability),
error.probability = error.probability,
bias = NULL,
replacement = replacement,
original.distribution.raster = NULL,
sample.plot = NULL)
if(is.null(.Random.seed)) {stats::runif(1)} # initialize random seed if there is none
attr(results, "RNGkind") <- RNGkind()
attr(results, "seed") <- .Random.seed
if(inherits(x, "virtualspecies"))
{
if(inherits(x$occupied.area, "RasterLayer"))
{
sp.raster <- x$occupied.area
} else if(inherits(x$pa.raster, "RasterLayer"))
{
sp.raster <- x$pa.raster
} else stop("x must be:\n- a raster layer object\nor\n- the output list from
functions generateRandomSp(), convertToPA() or
limitDistribution()")
} else if (inherits(x, "RasterLayer"))
{
sp.raster <- x
if(extract.probability)
{
stop("Cannot extract probability when x is not a virtualspecies object. Set
extract.probability = FALSE")
}
} else stop("x must be:\n- a raster layer object\nor\n- the output list from
functions generateRandomSp(), convertToPA() or
limitDistribution()")
if(sp.raster@data@max > 1 | sp.raster@data@min < 0)
{
stop("There are values above 1 or below 0 in your presence/absence raster.
Please make sure that the provided raster is a correct P/A raster and not a suitability raster.")
}
results$original.distribution.raster <- original.raster <- sp.raster
if(!is.null(sample.prevalence))
{
if(sample.prevalence < 0 | sample.prevalence > 1)
{
stop("Sample prevalence must be a numeric between 0 and 1")
}
}
if(!is.null(sampling.area))
{
if(is.character(sampling.area))
{
worldmap <- rworldmap::getMap()
if (any(!(sampling.area %in% c(levels(worldmap@data$SOVEREIGNT),
levels(worldmap@data$REGION),
levels(worldmap@data$continent)))))
{
stop("The choosen sampling.area is incorrectly spelled.\n Type 'levels(getMap()@data$SOVEREIGNT)', 'levels(worldmap@data$REGION)' and levels(worldmap@data$continent) to obtain valid names.")
}
sampling.area <- worldmap[which(
worldmap@data$SOVEREIGNT %in% sampling.area |
worldmap@data$REGION %in% sampling.area |
worldmap@data$continent %in% sampling.area), ]
} else if(!(inherits(sampling.area, c("SpatialPolygons",
"SpatialPolygonsDataFrame",
"Extent"))))
{
stop("Please provide to sampling.area either \n
- the names of countries, region and/or continents in which to sample\n
- a SpatialPolygons or SpatialPolygonsDataFrame\n
- an extent\n
in which the sampling will take place")
}
sample.area.raster1 <- rasterize(sampling.area,
sp.raster,
field = 1,
background = NA,
silent = TRUE)
sp.raster <- sp.raster * sample.area.raster1
}
if(correct.by.suitability)
{
if(!(inherits(x, "virtualspecies")) | !("suitab.raster" %in% names(x)))
{
stop("If you choose to weight the probability of detection by the suitability of the species (i.e., correct.by.suitability = TRUE),
then you need to provide an appropriate virtual species containing a suitability raster to x.")
}
}
if(!is.numeric(detection.probability) | detection.probability > 1 |
detection.probability < 0)
{
stop("detection.probability must be a numeric value between 0 and 1")
}
if(!is.numeric(error.probability) | error.probability > 1 |
error.probability < 0)
{
stop("error.probability must be a numeric value between 0 and 1")
}
if(length(bias) > 1)
{
stop('Only one bias can be applied at a time')
}
if (!(bias %in% c("no.bias", "country", "region", "continent", "extent",
"polygon", "manual")))
{
stop('Argument bias must be one of : "no.bias", "country", "region",
"continent", "extent", "polygon", "manual"')
}
if(!is.numeric(bias.strength) & bias != "no.bias")
{
stop("Please provide a numeric value for bias.strength")
}
if (bias %in% c("country", "region", "continent"))
{
if(!("rworldmap" %in% rownames(installed.packages())))
{
stop('You need to install the package "rworldmap" in order to use bias =
"region" or bias = "country"')
}
worldmap <- rworldmap::getMap()
if(bias == "country")
{
if (any(!(bias.area %in% levels(worldmap@data$SOVEREIGNT))))
{
stop("country name(s) must be correctly spelled. Type
'levels(getMap()@data$SOVEREIGNT)' to obtain valid names.")
}
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
} else if (bias == "region")
{
if (any(!(bias.area %in% levels(worldmap@data$REGION))))
{
stop(paste("region name(s) must be correctly spelled, according to one
of the following : ",
paste(levels(worldmap@data$REGION), collapse = ", "),
sep = "\n"))
}
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
} else if (bias == "continent")
{
if (any(!(bias.area %in% levels(worldmap@data$continent))))
{
stop(paste("region name(s) must be correctly spelled,
according to one of the following : ",
paste(levels(worldmap@data$continent), collapse = ", "),
sep = "\n"))
}
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
}
}
if (bias == "polygon") # Projections are not checked here. Perhaps we should
# add projection check between raster & polygon in the future?
# This is especially important given that randomPoints weights samplings by
# the cell area (because cells closer to the equator are larger)
{
if(!(inherits(bias.area, c("SpatialPolygons",
"SpatialPolygonsDataFrame"))))
{
stop("If you choose bias = 'polygon', please provide a polygon of class
SpatialPolygons or SpatialPolygonsDataFrame to argument bias.area")
}
warning("Polygon projection is not checked. Please make sure you have the
same projections between your polygon and your presence-absence
raster")
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
}
if (bias == "extent")
{
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
}
if(type == "presence-absence")
{
sample.raster <- sp.raster
# Set all non-NA cells to 1 so that they are included in p/a sampling
sample.raster[!is.na(sample.raster)] <- 1
} else if (type == "presence only")
{
sample.raster <- sp.raster
} else stop("type must either be 'presence only' or 'presence-absence'")
if (bias == "manual")
{
if(!(inherits(weights, "RasterLayer")))
{
stop("You must provide a raster layer of weights (to argument weights)
if you choose bias == 'manual'")
}
bias.raster <- weights
results$bias <- list(bias = bias,
bias.strength = "Defined by raster weights",
weights = weights)
} else
{
bias.raster <- sample.raster
}
if(bias == "country")
{
bias.raster1 <- rasterize(
worldmap[which(worldmap@data$SOVEREIGNT %in% bias.area), ],
bias.raster,
field = bias.strength,
background = 1,
silent = TRUE)
bias.raster <- bias.raster * bias.raster1
} else if(bias == "region")
{
bias.raster1 <- rasterize(
worldmap[which(worldmap@data$REGION %in% bias.area), ],
bias.raster,
field = bias.strength,
background = 1,
silent = TRUE)
bias.raster <- bias.raster * bias.raster1
} else if(bias == "continent")
{
bias.raster1 <- rasterize(
worldmap[which(levels(worldmap@data$continent) %in% bias.area), ],
bias.raster,
field = bias.strength,
background = 1,
silent = TRUE)
bias.raster <- bias.raster * bias.raster1
} else if(bias == "extent")
{
if(!(inherits(bias.area, "Extent")))
{
message("No object of class extent provided: click twice on the map to draw the extent in which presence points will be sampled")
plot(sp.raster)
bias.area <- drawExtent(show = TRUE)
}
bias.raster <- bias.raster * rasterize(bias.area, sp.raster,
field = bias.strength,
background = 1)
results$bias <- list(bias = bias,
bias.strength = bias.strength,
bias.area = bias.area)
} else if(bias == "polygon")
{
bias.raster1 <- rasterize(bias.area,
bias.raster,
field = bias.strength,
background = 1,
silent = TRUE)
bias.raster <- bias.raster * bias.raster1
}
if(bias != "no.bias")
{
if(type == "presence only")
{
number.errors <- stats::rbinom(n = 1, size = 50, prob = error.probability)
sample.points <- .randomPoints(sample.raster * bias.raster,
n = number.errors,
prob = TRUE, tryf = 1,
replaceCells = replacement)
sample.points <- rbind(sample.points,
.randomPoints(sample.raster * bias.raster,
n = n - number.errors,
prob = TRUE, tryf = 1,
replaceCells = replacement))
} else
{
if(is.null(sample.prevalence))
{
sample.points <- .randomPoints(sample.raster * bias.raster, n = n,
prob = TRUE, tryf = 1,
replaceCells = replacement)
} else
{
if(replacement)
{
message("Argument replacement = TRUE implies that sample prevalence
may not necessarily be respected in geographical space. Sample
prevalence will be respected, but multiple samples can occur
in the same cell so that spatial prevalence will be different.
")
}
tmp1 <- sample.raster
tmp1[sp.raster != 1] <- NA
sample.points <- .randomPoints(tmp1 * bias.raster,
n = sample.prevalence * n,
prob = TRUE, tryf = 1,
replaceCells = replacement)
tmp1 <- sample.raster
tmp1[sp.raster != 0] <- NA
sample.points <- rbind(sample.points,
.randomPoints(tmp1 * bias.raster,
n = (1 - sample.prevalence) * n,
prob = TRUE, tryf = 1,
replaceCells = replacement))
rm(tmp1)
}
}
} else
{
if(type == "presence only")
{
number.errors <- stats::rbinom(n = 1, size = n, prob = error.probability)
sample.points <- .randomPoints(sample.raster, n = number.errors,
prob = TRUE, tryf = 1,
replaceCells = replacement)
sample.points <- rbind(sample.points,
.randomPoints(sample.raster, n = n - number.errors,
prob = TRUE, tryf = 1,
replaceCells = replacement))
} else
{
if(is.null(sample.prevalence))
{
sample.points <- .randomPoints(sample.raster, n = n,
prob = TRUE, tryf = 1,
replaceCells = replacement)
} else
{
tmp1 <- sample.raster
tmp1[sp.raster != 1] <- NA
sample.points <- .randomPoints(tmp1, n = sample.prevalence * n,
prob = TRUE, tryf = 1,
replaceCells = replacement)
tmp1 <- sample.raster
tmp1[sp.raster != 0] <- NA
sample.points <- rbind(sample.points,
.randomPoints(tmp1,
n = (1 - sample.prevalence) * n,
prob = TRUE, tryf = 1,
replaceCells = replacement))
rm(tmp1)
}
}
}
if(type == "presence only")
{
sample.points <- data.frame(sample.points,
Real = extract(sp.raster, sample.points),
Observed = sample(
c(NA, 1),
size = nrow(sample.points),
prob = c(1 - detection.probability,
detection.probability),
replace = TRUE))
} else if(type == "presence-absence")
{
sample.points <- data.frame(sample.points,
Real = extract(sp.raster, sample.points))
if(correct.by.suitability)
{
suitabs <- extract(x$suitab.raster, sample.points[, c("x", "y")])
} else { suitabs <- rep(1, nrow(sample.points)) }
sample.points$Observed <- NA
if(correct.by.suitability)
{
sample.points$Observed[which(sample.points$Real == 1)] <-
sapply(detection.probability * suitabs[
which(sample.points$Real == 1)
],
function(y)
{
sample(c(0, 1),
size = 1,
prob = c(1 - y, y))
})
} else
{
sample.points$Observed[which(sample.points$Real == 1)] <-
sample(c(0, 1), size = length(which(sample.points$Real == 1)),
prob = c(1 - detection.probability, detection.probability),
replace = TRUE)
}
sample.points$Observed[which(sample.points$Real == 0 |
sample.points$Observed == 0)] <-
sample(c(0, 1), size = length(which(sample.points$Real == 0 |
sample.points$Observed == 0)),
prob = c(1 - error.probability, error.probability),
replace = TRUE)
}
if(plot)
{
plot(original.raster)
# par(new = TRUE)
if(type == "presence only")
{
points(sample.points[, c("x", "y")], pch = 16, cex = .5)
} else
{
points(sample.points[sample.points$Observed == 1, c("x", "y")],
pch = 16, cex = .8)
# par(new = TRUE)
points(sample.points[sample.points$Observed == 0, c("x", "y")],
pch = 1, cex = .8)
}
results$sample.plot <- grDevices::recordPlot()
}
if(extract.probability)
{
sample.points <- data.frame(sample.points,
true.probability = extract(x$probability.of.occurrence,
sample.points[, c("x", "y")]))
}
results$sample.points <- sample.points
if(type == "presence-absence")
{
true.prev <- length(sample.points$Real[which(
sample.points$Real == 1)]) / nrow(sample.points)
obs.prev <- length(sample.points$Real[which(
sample.points$Observed == 1)]) / nrow(sample.points)
results$sample.prevalence <- c(true.sample.prevalence = true.prev,
observed.sample.prevalence = obs.prev)
}
class(results) <- append("VSSampledPoints", class(results))
return(results)
} |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.23517807264145e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615782753-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.23517807264145e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#' Data frame containing "Metar" initials and descriptions
#'
#' @usage estacoes_metar()
#' @return The function returns a sigle data frame.
#' @author Renato Prado Siqueira \email{<rpradosiqueira@gmail.com>}
#' @seealso \code{\link{condicoes_tempo}}
#' @examples
#' estacoes_metar()
#'
#' @keywords weather forecast brazil
#' @importFrom magrittr %>%
#' @export
estacoes_metar <- function() {
url1 <- xml2::read_html("http://servicos.cptec.inpe.br/XML/#req-estacoes-meteorologicas")
tabela <- url1 %>% rvest::html_nodes("#estacoes-metar td") %>% rvest::html_text()
tabela <- data.frame(matrix(tabela, byrow = TRUE, ncol = 3))
names(tabela) <- c("Sigla", "Aeroporto", "Estado")
tabela
}
| /R/estacoes_metar.R | no_license | mtodero/cptec | R | false | false | 704 | r | #' Data frame containing "Metar" initials and descriptions
#'
#' @usage estacoes_metar()
#' @return The function returns a sigle data frame.
#' @author Renato Prado Siqueira \email{<rpradosiqueira@gmail.com>}
#' @seealso \code{\link{condicoes_tempo}}
#' @examples
#' estacoes_metar()
#'
#' @keywords weather forecast brazil
#' @importFrom magrittr %>%
#' @export
estacoes_metar <- function() {
url1 <- xml2::read_html("http://servicos.cptec.inpe.br/XML/#req-estacoes-meteorologicas")
tabela <- url1 %>% rvest::html_nodes("#estacoes-metar td") %>% rvest::html_text()
tabela <- data.frame(matrix(tabela, byrow = TRUE, ncol = 3))
names(tabela) <- c("Sigla", "Aeroporto", "Estado")
tabela
}
|
library(stringr)
library(readr)
library(dplyr)
library(caret)
library(caTools)
library(rpart)
library(rpart.plot)
library(rattle)
library(randomForest)
# load gerber data
setwd("C:/Users/Stephen/Desktop/R/mit_edx/unit4_cart")
list.files()
gerber <- read_csv("gerber.csv")
head(gerber)
glimpse(gerber)
# what proportion voted
gerber %>% summarize(pct_voting = sum(voting) / n())
gerber %>% filter(hawthorne == 1) %>% summarize(pct_voting = sum(voting) / n())
gerber %>% filter(civicduty == 1) %>% summarize(pct_voting = sum(voting) / n())
gerber %>% filter(neighbors == 1) %>% summarize(pct_voting = sum(voting) / n())
gerber %>% filter(self == 1) %>% summarize(pct_voting = sum(voting) / n())
# build logistic regression
gerber_m1_logistic <- glm(voting ~ hawthorne + civicduty + neighbors + self, data = gerber, family = "binomial")
summary(gerber_m1_logistic)
names(gerber_m1_logistic)
# note that $fitted values gives you probabilities, not log odds
head(gerber_m1_logistic$fitted.values)
# check accuracry using .3 threshold
gerber_m1_logistic_pred_prob <- predict(gerber_m1_logistic, newdata = gerber, type = "response")
head(gerber_m1_logistic_pred)
summary(gerber_m1_logistic_pred_prob)
gerber_m1_logistic_pred <- ifelse(gerber_m1_logistic_pred_prob >= .3, 1, 0)
confusionMatrix(gerber_m1_logistic_pred, reference = gerber$voting)
# check accuracy using .5 threshold
gerber_m1_logistic_pred <- ifelse(gerber_m1_logistic_pred_prob >= .5, 1, 0)
confusionMatrix(gerber_m1_logistic_pred, reference = gerber$voting)
# baseline model
gerber %>% group_by(voting) %>% tally()
# not voting is majority group
gerber %>% filter(voting == 0) %>% summarize(baseline_accuracy = n() / nrow(gerber))
# compute auc
gerber_m1_logistic_roc_pred <- prediction(gerber_m1_logistic_pred, gerber$voting)
gerber_m1_logistic_auc <- performance(gerber_m1_logistic_roc_pred, measure = "auc")
gerber_m1_logistic_auc@y.values
##############################
# build cart tree
gerber_m2_cart = rpart(voting ~ civicduty + hawthorne + self + neighbors, data = gerber)
gerber_m2_cart
summary(gerber_m2_cart)
prp(gerber_m2_cart)
# try second cart tree
gerber_m3_cart <- rpart(voting ~ civicduty + hawthorne + self + neighbors, data = gerber, cp = 0.0)
gerber_m3_cart
summary(gerber_m3_cart)
prp(gerber_m3_cart)
fancyRpartPlot(gerber_m3_cart)
# third cart tree with sex variable
gerber_m4_cart <- rpart(voting ~ civicduty + hawthorne + self + neighbors + sex, data = gerber, cp = 0.0)
gerber_m4_cart
summary(gerber_m3_cart)
prp(gerber_m4_cart)
##########################
# crate m5 cart with just control
gerber_m5_cart <- rpart(voting ~ control, data = gerber, cp = 0.0)
gerber_m5_cart
summary(gerber_m5_cart)
prp(gerber_m5_cart, digits = 6)
# absolute difference in pct btw treatment and control
abs(.296638 - .34)
# crate m6 cart with just control and sex
gerber_m6_cart <- rpart(voting ~ control + sex, data = gerber, cp = 0.0)
gerber_m6_cart
summary(gerber_m6_cart)
prp(gerber_m6_cart, digits = 6)
# which sex is affected more by treatment
# women
women_effect <- 0.3341757 - 0.2904558
# men
men_effect <- 0.3458183 - 0.3027947
women_effect - men_effect
# build a logisitc regression model using sex and control
gerber_m7_logistic <- glm(voting ~ sex + control, data = gerber, family = "binomial")
gerber_m7_logistic
summary(gerber_m7_logistic)
Possibilities = data.frame(sex=c(0,0,1,1),control=c(0,1,0,1))
predict(gerber_m7_logistic, newdata = Possibilities, type = "response")
# abs diff for women control using m6 and m7
# m7 - m6
0.2908065 - 0.2904558
#################################
# build m8 logistic model with interaction term
gerber_m8_logistic <- glm(voting ~ sex + control + sex:control, data = gerber, family = "binomial")
gerber_m8_logistic
summary(gerber_m8_logistic)
# get average probabilities for sex/control categories
Possibilities
predict(gerber_m8_logistic, newdata = Possibilities, type = "response")
# m7 - m6
0.2904558 - 0.2904558
| /unit4_cart/unit4_cart_gerber.R | no_license | sdevine188/mit_edx | R | false | false | 3,977 | r | library(stringr)
library(readr)
library(dplyr)
library(caret)
library(caTools)
library(rpart)
library(rpart.plot)
library(rattle)
library(randomForest)
# load gerber data
setwd("C:/Users/Stephen/Desktop/R/mit_edx/unit4_cart")
list.files()
gerber <- read_csv("gerber.csv")
head(gerber)
glimpse(gerber)
# what proportion voted
gerber %>% summarize(pct_voting = sum(voting) / n())
gerber %>% filter(hawthorne == 1) %>% summarize(pct_voting = sum(voting) / n())
gerber %>% filter(civicduty == 1) %>% summarize(pct_voting = sum(voting) / n())
gerber %>% filter(neighbors == 1) %>% summarize(pct_voting = sum(voting) / n())
gerber %>% filter(self == 1) %>% summarize(pct_voting = sum(voting) / n())
# build logistic regression
gerber_m1_logistic <- glm(voting ~ hawthorne + civicduty + neighbors + self, data = gerber, family = "binomial")
summary(gerber_m1_logistic)
names(gerber_m1_logistic)
# note that $fitted values gives you probabilities, not log odds
head(gerber_m1_logistic$fitted.values)
# check accuracry using .3 threshold
gerber_m1_logistic_pred_prob <- predict(gerber_m1_logistic, newdata = gerber, type = "response")
head(gerber_m1_logistic_pred)
summary(gerber_m1_logistic_pred_prob)
gerber_m1_logistic_pred <- ifelse(gerber_m1_logistic_pred_prob >= .3, 1, 0)
confusionMatrix(gerber_m1_logistic_pred, reference = gerber$voting)
# check accuracy using .5 threshold
gerber_m1_logistic_pred <- ifelse(gerber_m1_logistic_pred_prob >= .5, 1, 0)
confusionMatrix(gerber_m1_logistic_pred, reference = gerber$voting)
# baseline model
gerber %>% group_by(voting) %>% tally()
# not voting is majority group
gerber %>% filter(voting == 0) %>% summarize(baseline_accuracy = n() / nrow(gerber))
# compute auc
gerber_m1_logistic_roc_pred <- prediction(gerber_m1_logistic_pred, gerber$voting)
gerber_m1_logistic_auc <- performance(gerber_m1_logistic_roc_pred, measure = "auc")
gerber_m1_logistic_auc@y.values
##############################
# build cart tree
gerber_m2_cart = rpart(voting ~ civicduty + hawthorne + self + neighbors, data = gerber)
gerber_m2_cart
summary(gerber_m2_cart)
prp(gerber_m2_cart)
# try second cart tree
gerber_m3_cart <- rpart(voting ~ civicduty + hawthorne + self + neighbors, data = gerber, cp = 0.0)
gerber_m3_cart
summary(gerber_m3_cart)
prp(gerber_m3_cart)
fancyRpartPlot(gerber_m3_cart)
# third cart tree with sex variable
gerber_m4_cart <- rpart(voting ~ civicduty + hawthorne + self + neighbors + sex, data = gerber, cp = 0.0)
gerber_m4_cart
summary(gerber_m3_cart)
prp(gerber_m4_cart)
##########################
# crate m5 cart with just control
gerber_m5_cart <- rpart(voting ~ control, data = gerber, cp = 0.0)
gerber_m5_cart
summary(gerber_m5_cart)
prp(gerber_m5_cart, digits = 6)
# absolute difference in pct btw treatment and control
abs(.296638 - .34)
# crate m6 cart with just control and sex
gerber_m6_cart <- rpart(voting ~ control + sex, data = gerber, cp = 0.0)
gerber_m6_cart
summary(gerber_m6_cart)
prp(gerber_m6_cart, digits = 6)
# which sex is affected more by treatment
# women
women_effect <- 0.3341757 - 0.2904558
# men
men_effect <- 0.3458183 - 0.3027947
women_effect - men_effect
# build a logisitc regression model using sex and control
gerber_m7_logistic <- glm(voting ~ sex + control, data = gerber, family = "binomial")
gerber_m7_logistic
summary(gerber_m7_logistic)
Possibilities = data.frame(sex=c(0,0,1,1),control=c(0,1,0,1))
predict(gerber_m7_logistic, newdata = Possibilities, type = "response")
# abs diff for women control using m6 and m7
# m7 - m6
0.2908065 - 0.2904558
#################################
# build m8 logistic model with interaction term
gerber_m8_logistic <- glm(voting ~ sex + control + sex:control, data = gerber, family = "binomial")
gerber_m8_logistic
summary(gerber_m8_logistic)
# get average probabilities for sex/control categories
Possibilities
predict(gerber_m8_logistic, newdata = Possibilities, type = "response")
# m7 - m6
0.2904558 - 0.2904558
|
/select_stock.R | no_license | leeyaowen/stockTW | R | false | false | 3,196 | r | ||
#############################
# Sergio I Garcia-Rios
# Lab: Bianary models
# sub: data wrangling
############################
library(tidyverse)
library(haven)
library(margins)
library(magrittr)
df <- read_dta("anes_timeseries_2016.dta")
df_tidy <- zap_labels(df)
df_tidy <-
df_tidy %>%
mutate(
vote_trump = case_when(
V161031 == 2 ~ 1,
TRUE ~ 0),
male = case_when(
V161342 == 2 ~ 0,
V161342 == 1 ~ 1,
TRUE ~ NA_real_),
income = recode(V161361x,
`-9` = NA_real_,
`-5` = NA_real_),
year = recode(V161267c,
`-9` = NA_real_,
`-8` = NA_real_),
age =
2016 - year,
conservative = recode(V161126,
`-9` = NA_real_,
`-8` = NA_real_,
`99` = 4),
conservative = case_when(
conservative == 4 & V161127 == 1 ~ 3,
conservative == 4 & V161127 == 2 ~ 5,
TRUE ~ as.double(conservative)))
df_tidy %>%
count(age)
df %>%
count(V161342)
df_tidy <-
df_tidy %>%
mutate(
vote_trump = case_when(
V161031 == 2 ~ 1,
TRUE ~ 0),
male = case_when(
V161342 == 2 ~ 0,
V161342 == 1 ~ 1,
TRUE ~ NA_real_),
income = recode(V161361x,
`-9` = NA_real_,
`-5` = NA_real_),
year = recode(V161267c,
`-9` = NA_real_,
`-8` = NA_real_),
age =
2016 - year,
conservative = recode(V161126,
`-9` = NA_real_,
`-8` = NA_real_,
`99` = 4),
conservative = case_when(
conservative == 4 & V161127 == 1 ~ 3,
conservative == 4 & V161127 == 2 ~ 5,
TRUE ~ as.double(conservative)),
white = recode( V161310a,
`-9` = NA_real_,
`-8` = NA_real_),
education = recode(V161270,
`-9` = NA_real_,
`-8` = NA_real_,
`95` = NA_real_))
df_tidy %>%
count(age)
df %>%
count(V161361x)
model <- glm(vote_trump ~ male + income + age + conservative +
education + white,
family = "binomial", data = df_tidy)
summary(model)
df_tidy <-
df_tidy %>%
mutate(
vote_trump = case_when(
V161031 == 2 ~ 1,
TRUE ~ 0),
male = case_when(
V161342 == 2 ~ 0,
V161342 == 1 ~ 1,
TRUE ~ NA_real_),
income = recode(V161361x,
`-9` = NA_real_,
`-5` = NA_real_),
year = recode(V161267c,
`-9` = NA_real_,
`-8` = NA_real_),
age =
2016 - year,
education = recode(V161270,
`-9` = NA_real_,
`-8` = NA_real_,
`95` = NA_real_),
white = recode( V161310a,
`-9` = NA_real_,
`-8` = NA_real_),
conservative = recode(V161126,
`-9` = NA_real_,
`-8` = NA_real_,
`99` = 4),
conservative = case_when(
conservative == 4 & V161127 == 1 ~ 3,
conservative == 4 & V161127 == 2 ~ 5,
TRUE ~ as.double(conservative)),
sexism = recode(V161508,
`-9` = NA_real_,
`-5` = NA_real_),
sexism = abs(6-sexism),
aff_action_oppose = recode(V161204,
`-9` = NA_real_,
`-8` = NA_real_,
`1` = 1,
`2` = 3,
`3` = 2),
econ_anx = recode(V162134,
`-9` = NA_real_,
`-8` = NA_real_,
`-7` = NA_real_,
`-6` = NA_real_),
immig_anx = recode(V162157,
`-9` = NA_real_,
`-8` = NA_real_,
`-7` = NA_real_,
`-6` = NA_real_))
df_tidy
df_tidy %>%
count(immig_anx)
df %>%
count(V162157)
model <- glm(vote_trump ~ male + income + age + conservative + education + white,
family = "binomial", data = df_tidy)
summary(model)
summary(model <-
glm(vote_trump ~
male +
income +
age +
conservative +
education +
sexism +
aff_action_oppose +
immig_anx +
econ_anx +
white,
family = "binomial", data = df_tidy))
ame <- summary(margins(model))
ame_minmax <- summary(margins(model, change = "minmax"))
ggplot(ame_minmax, aes(x = factor, y = AME,
ymin = lower,
ymax = upper)) +
geom_pointrange() +
geom_hline(yintercept = 0, linetype = "dashed") +
coord_flip() +
theme_bw()
ame_minmax <- summary(margins(model, change = "minmax"))
summary(model <-
glm(vote_trump ~
male +
income +
age +
conservative*immig_anx +
education +
white +
sexism +
aff_action_oppose +
econ_anx,
family = "binomial", data = df_tidy))
xhyp <- df_tidy %$%
expand.grid(
conservative = seq(1,7, .01),
immig_anx = c(1,5),
male = mean(male, na.rm = T),
income = mean(income, na.rm = T),
age = mean(age, na.rm = T),
education = mean(education, na.rm = T),
white = mean(white, na.rm = T),
sexism = mean(sexism, na.rm = T),
aff_action_oppose = mean(aff_action_oppose, na.rm = T),
econ_anx = mean(econ_anx, na.rm = T)
)
preds <- predict(model, xhyp, type = "response", se.fit = T)
preds_df <- cbind(xhyp, preds)
ggplot(preds_df, aes(x = conservative, y = fit,
fill = factor(immig_anx),
ymin = fit - (1.96*se.fit),
ymax = fit + (1.96*se.fit))) +
geom_ribbon() +
theme_bw()
V161508 #women appreciate
V161204 # aff action
V162134 # get ahead
V162157# num of immig
| /Labs/lab6/lab8.R | no_license | GarciaRios/govt_6029 | R | false | false | 6,271 | r | #############################
# Sergio I Garcia-Rios
# Lab: Bianary models
# sub: data wrangling
############################
library(tidyverse)
library(haven)
library(margins)
library(magrittr)
df <- read_dta("anes_timeseries_2016.dta")
df_tidy <- zap_labels(df)
df_tidy <-
df_tidy %>%
mutate(
vote_trump = case_when(
V161031 == 2 ~ 1,
TRUE ~ 0),
male = case_when(
V161342 == 2 ~ 0,
V161342 == 1 ~ 1,
TRUE ~ NA_real_),
income = recode(V161361x,
`-9` = NA_real_,
`-5` = NA_real_),
year = recode(V161267c,
`-9` = NA_real_,
`-8` = NA_real_),
age =
2016 - year,
conservative = recode(V161126,
`-9` = NA_real_,
`-8` = NA_real_,
`99` = 4),
conservative = case_when(
conservative == 4 & V161127 == 1 ~ 3,
conservative == 4 & V161127 == 2 ~ 5,
TRUE ~ as.double(conservative)))
df_tidy %>%
count(age)
df %>%
count(V161342)
df_tidy <-
df_tidy %>%
mutate(
vote_trump = case_when(
V161031 == 2 ~ 1,
TRUE ~ 0),
male = case_when(
V161342 == 2 ~ 0,
V161342 == 1 ~ 1,
TRUE ~ NA_real_),
income = recode(V161361x,
`-9` = NA_real_,
`-5` = NA_real_),
year = recode(V161267c,
`-9` = NA_real_,
`-8` = NA_real_),
age =
2016 - year,
conservative = recode(V161126,
`-9` = NA_real_,
`-8` = NA_real_,
`99` = 4),
conservative = case_when(
conservative == 4 & V161127 == 1 ~ 3,
conservative == 4 & V161127 == 2 ~ 5,
TRUE ~ as.double(conservative)),
white = recode( V161310a,
`-9` = NA_real_,
`-8` = NA_real_),
education = recode(V161270,
`-9` = NA_real_,
`-8` = NA_real_,
`95` = NA_real_))
df_tidy %>%
count(age)
df %>%
count(V161361x)
model <- glm(vote_trump ~ male + income + age + conservative +
education + white,
family = "binomial", data = df_tidy)
summary(model)
df_tidy <-
df_tidy %>%
mutate(
vote_trump = case_when(
V161031 == 2 ~ 1,
TRUE ~ 0),
male = case_when(
V161342 == 2 ~ 0,
V161342 == 1 ~ 1,
TRUE ~ NA_real_),
income = recode(V161361x,
`-9` = NA_real_,
`-5` = NA_real_),
year = recode(V161267c,
`-9` = NA_real_,
`-8` = NA_real_),
age =
2016 - year,
education = recode(V161270,
`-9` = NA_real_,
`-8` = NA_real_,
`95` = NA_real_),
white = recode( V161310a,
`-9` = NA_real_,
`-8` = NA_real_),
conservative = recode(V161126,
`-9` = NA_real_,
`-8` = NA_real_,
`99` = 4),
conservative = case_when(
conservative == 4 & V161127 == 1 ~ 3,
conservative == 4 & V161127 == 2 ~ 5,
TRUE ~ as.double(conservative)),
sexism = recode(V161508,
`-9` = NA_real_,
`-5` = NA_real_),
sexism = abs(6-sexism),
aff_action_oppose = recode(V161204,
`-9` = NA_real_,
`-8` = NA_real_,
`1` = 1,
`2` = 3,
`3` = 2),
econ_anx = recode(V162134,
`-9` = NA_real_,
`-8` = NA_real_,
`-7` = NA_real_,
`-6` = NA_real_),
immig_anx = recode(V162157,
`-9` = NA_real_,
`-8` = NA_real_,
`-7` = NA_real_,
`-6` = NA_real_))
df_tidy
df_tidy %>%
count(immig_anx)
df %>%
count(V162157)
model <- glm(vote_trump ~ male + income + age + conservative + education + white,
family = "binomial", data = df_tidy)
summary(model)
summary(model <-
glm(vote_trump ~
male +
income +
age +
conservative +
education +
sexism +
aff_action_oppose +
immig_anx +
econ_anx +
white,
family = "binomial", data = df_tidy))
ame <- summary(margins(model))
ame_minmax <- summary(margins(model, change = "minmax"))
ggplot(ame_minmax, aes(x = factor, y = AME,
ymin = lower,
ymax = upper)) +
geom_pointrange() +
geom_hline(yintercept = 0, linetype = "dashed") +
coord_flip() +
theme_bw()
ame_minmax <- summary(margins(model, change = "minmax"))
summary(model <-
glm(vote_trump ~
male +
income +
age +
conservative*immig_anx +
education +
white +
sexism +
aff_action_oppose +
econ_anx,
family = "binomial", data = df_tidy))
xhyp <- df_tidy %$%
expand.grid(
conservative = seq(1,7, .01),
immig_anx = c(1,5),
male = mean(male, na.rm = T),
income = mean(income, na.rm = T),
age = mean(age, na.rm = T),
education = mean(education, na.rm = T),
white = mean(white, na.rm = T),
sexism = mean(sexism, na.rm = T),
aff_action_oppose = mean(aff_action_oppose, na.rm = T),
econ_anx = mean(econ_anx, na.rm = T)
)
preds <- predict(model, xhyp, type = "response", se.fit = T)
preds_df <- cbind(xhyp, preds)
ggplot(preds_df, aes(x = conservative, y = fit,
fill = factor(immig_anx),
ymin = fit - (1.96*se.fit),
ymax = fit + (1.96*se.fit))) +
geom_ribbon() +
theme_bw()
V161508 #women appreciate
V161204 # aff action
V162134 # get ahead
V162157# num of immig
|
library(data.table)
library(ggplot2)
library(cowplot)
library(ggsci)
library(viridisLite)
library(gghalves)
library(ggridges)
library(pbapply)
library(extrafont)
library(Seurat)
library(ComplexUpset)
library(stringr)
library(RhpcBLASctl)
blas_p = blas_get_num_procs()
blas_set_num_threads(1)
omp_p = omp_get_num_procs()
omp_set_num_threads(1)
darken <- function(color, factor=1.2){
col <- col2rgb(color)
col <- col/factor
col <- rgb(t(col), maxColorValue=255)
names(col) = names(color)
col
}
cellline_colors = c("HepG2"="#e6194B",
"HT29"="#ffe119",
"A549"="#4363d8",
"Jurkat"="#42d4f4",
"REH"="#469990",
"KG1"="#dcbeff",
"BJ"="#800000",
"THP-1"="#000075")
cellline_dark_colors = darken(cellline_colors)
method_colors = c("Sandberg"="#1b9e77",
"CleanTag"="#d95f02",
"Giraldez"="#7570b3",
"Jensen"="#e7298a",
"CATS"="#e6ab02",
"Faridani"="#66a61e",
"Shore"="#4daf4a")
adapter_3p_colors = c("CleanTag"="#fdc086",
"Sandberg"="#7fc97f",
"4N"="#beaed4",
"Rand"="#d6604d",
"CATS"="#ffff99")
adapter_5p_colors = c("C3"="#d9d9d9",
"Sandberg"="#7fc97f",
"4N"="#beaed4",
"CleanTag"="#fdc086",
"CleanTag UMI6"="#b2182b",
"Block"="#fb8072",
"CATS"="#ffff99")
top8_protocols = names(cellline_colors)
metadata = fread("data/figure3_input/samples.csv")
metadata[, `Sample name`:=factor(`Sample name`, levels=metadata[order(Group, `Sample name`)]$`Sample name`)]
cutadapt = fread("data/figure3_input/cutadapt_stats.csv")
star = fread("data/figure3_input/mapping_overview.csv")
read_compo = fread("data/figure3_input/overall_composition.detailed.raw.csv")
cutadapt_len_stats = fread("data/figure3_input/overall.too_short.len_stats.csv")
adapter_dimers = colSums(cutadapt_len_stats[Length <= 3][,2:ncol(cutadapt_len_stats)])
sample_stats = merge(
merge(
merge(cutadapt, metadata, by.y = "Sample name", by.x="Sample"),
star, by="Sample"),
read_compo, by="Sample")
sample_stats[, adapter_dimers_ratio:=(adapter_dimers[match(Sample, names(adapter_dimers))])/r_processed]
sample_stats[, read_lost_in_qc:=(r_processed-r_written)/r_processed-adapter_dimers_ratio]
sample_stats[, unmapped_reads:=(r_written-mapped_total)/r_processed]
sample_stats[, mapped_ratio:=(mapped_total - miRNA)/r_processed]
sample_stats[, mirna_ratio:=miRNA/r_processed]
######### sample stats table #############
supp_table8 = sample_stats[, c("Sample", "Group",
"r_processed", "r_written",
"uniquely_mapped", "multimapped", "mapped_total",
"miRNA", "protein_coding_gene",
"intergenic", "lncRNA",
"snoRNA_ensembl",
"coding_exons", "rRNA_ensembl", "GtRNAdb", "piRBase"
)]
supp_table8[, `miRNA (% of total)`:=miRNA/r_processed * 100]
supp_table8[, `protein coding gene (% of total)`:=protein_coding_gene/r_processed * 100]
supp_table8[, `intergenic (% of total)`:=intergenic/r_processed * 100]
supp_table8[, `lncRNA (% of total)`:=lncRNA/r_processed * 100]
supp_table8[, `snoRNA (% of total)`:=snoRNA_ensembl/r_processed * 100]
supp_table8[, `other mapped reads (% of total)`:=(sample_stats$mapped_total - miRNA - protein_coding_gene - intergenic - lncRNA - snoRNA_ensembl - GtRNAdb - rRNA_ensembl - coding_exons - piRBase) / r_processed * 100]
supp_table8[, `Uniquely mapped to hg38 (% of total)`:=uniquely_mapped / r_processed * 100]
supp_table8[, `Multimapped to hg38 (% of total)`:=multimapped / r_processed * 100]
supp_table8[, `Unmapped to hg38 (% of total)`:=(r_written - mapped_total) / r_processed * 100]
supp_table8[, `coding exons (% of total)`:=coding_exons/r_processed * 100]
supp_table8[, `rRNA (% of total)`:=rRNA_ensembl/r_processed * 100]
supp_table8[, `GtRNAdb (% of total)`:=GtRNAdb/r_processed * 100]
supp_table8[, `piRBase (% of total)`:=piRBase/r_processed * 100]
setnames(supp_table8,
c("Group", "r_processed", "r_written"),
c("Protocol", "Total reads", "Cleaned reads"))
supp_table8 = supp_table8[, c("Sample", "Protocol",
"Total reads", "Cleaned reads",
"Uniquely mapped to hg38 (% of total)", "Multimapped to hg38 (% of total)", "Unmapped to hg38 (% of total)",
"miRNA (% of total)", "protein coding gene (% of total)", "intergenic (% of total)",
"lncRNA (% of total)", "snoRNA (% of total)", "GtRNAdb (% of total)", "rRNA (% of total)", "piRBase (% of total)", "coding exons (% of total)", "other mapped reads (% of total)")]
fwrite(supp_table8, "results/tables/supp_data5.csv", sep='\t')
sample_stats_plot_df = melt(sample_stats[, c("Sample", "adapter_dimers_ratio", "read_lost_in_qc", "unmapped_reads", "mapped_ratio", "mirna_ratio")], id.vars="Sample")
sample_stats_plot_df[, Group:=metadata$Group[match(Sample, metadata$`Sample name`)]]
sample_stats_mean_plot_df = sample_stats_plot_df[, list(value=mean(value), sd=sd(value)), by=c("variable", "Group")]
sample_stats_mean_plot_df = sample_stats_mean_plot_df[order(variable, decreasing = TRUE)]
sample_stats_mean_plot_df[, ypos:=cumsum(value), by="Group"]
sample_stats_per_group_with_errorbars = ggplot(sample_stats_mean_plot_df, aes(x=factor(Group, levels=sample_stats_mean_plot_df[variable == "mirna_ratio"][order(value)]$Group),
y=value, fill=variable)) +
geom_col() +
geom_errorbar(aes(ymax=ypos-sd, ymin=ypos-sd, color=variable), position="identity", width=0.5, show.legend = F) +
geom_linerange(aes(ymin = ypos-sd, ymax = ypos, color=variable), show.legend=F) +
xlab("") +
scale_y_continuous(labels=scales::percent_format(), name="Total sequenced reads", expand = expansion(mult = c(0,0))) +
coord_flip() +
scale_fill_manual(values=c(read_lost_in_qc="#d3d3d3", adapter_dimers_ratio="#adadad", unmapped_reads="#3B4992FF", mapped_ratio="#008B45FF", mirna_ratio="#c51b8a"),
labels=c(read_lost_in_qc="Lost in QC", adapter_dimers_ratio="Adapter dimers", unmapped_reads="Unmapped", mapped_ratio="Mapped", mirna_ratio="miRNAs"),
name="",
guide=guide_legend(nrow=2)) +
scale_color_manual(values=c(read_lost_in_qc=darken("#d3d3d3", 1.5), adapter_dimers_ratio=darken("#adadad", 1.5), unmapped_reads=darken("#3B4992FF"), mapped_ratio=darken("#008B45FF"), mirna_ratio=darken("#c51b8a")),
labels=c(read_lost_in_qc="Lost in QC", adapter_dimers_ratio="Adapter dimers", unmapped_reads="Unmapped", mapped_ratio="Mapped", mirna_ratio="miRNAs"),
name="",
guide=guide_legend(nrow=2)) +
theme_cowplot(11) + theme(legend.position="bottom")
mir_expr = fread("data/figure3_input/mirna_quantification_rpmmm_norm.csv")
mir_expr[, miRNA:=paste(precursor, miRNA, sep='|')]
mir_expr[, precursor:=NULL]
detected_mirs = data.frame(num_mirnas=colSums(mir_expr[, 2:ncol(mir_expr)] > 0))
detected_mirs$Group = metadata$Group[match(rownames(detected_mirs), metadata$`Sample name`)]
detected_mirs = setDT(detected_mirs)
detected_mirs_p = ggplot(detected_mirs, aes(x=factor(Group, levels=detected_mirs[, mean(num_mirnas), by="Group"][order(V1)]$Group), y=num_mirnas, color=Group, fill=Group)) +
geom_half_point() +
geom_half_boxplot(outlier.colour = NA) +
theme_cowplot(11) +
scale_y_continuous(breaks=scales::pretty_breaks(10)) +
scale_fill_manual(values=cellline_colors) + scale_color_manual(values=cellline_dark_colors) +
coord_flip() +
xlab("") + ylab("Detected miRNAs") + theme(legend.position = "none")
expr = fread("data/figure3_input/read_counts.csv")
expr_mat = as.matrix(expr[, 2:ncol(expr)])
rownames(expr_mat) = expr$Geneid
seu = CreateSeuratObject(counts=expr_mat,
meta.data=data.frame(metadata[match(colnames(expr_mat), `Sample name`)], row.names=colnames(expr_mat)))
seu = NormalizeData(seu, normalization.method = "LogNormalize", scale.factor = 10000)
seu = FindVariableFeatures(seu, selection.method = "vst", nfeatures = 2000)
all.genes = rownames(seu)
seu = ScaleData(seu, features = all.genes)
seu = RunPCA(seu, features = VariableFeatures(object = seu), npcs = 47)
seu = RunUMAP(seu, dims = 1:20, n.neighbors = 12, min.dist = 0.1, seed.use=42)
seu_umap = data.table(seu@reductions$umap@cell.embeddings, keep.rownames = TRUE)
seu_umap[, Protocol:=metadata$Group[match(rn, metadata$Sample)]]
seu_umap_plot = ggplot(seu_umap, aes(x=UMAP_1, y=UMAP_2, color=Protocol, label=rn)) +
geom_point(size=1) +
scale_color_manual(values=cellline_colors, guide=guide_legend(ncol=2), name="Cell line") +
theme_cowplot(11) +
theme(legend.position = c(0.01,0.75), axis.text = element_blank(), axis.ticks = element_blank()) +
xlab("UMAP 1") + ylab("UMAP 2")
expressed_mirs_per_protocol = lapply(unique(metadata$Group), function(x) {
samples = as.character(metadata[Group == x]$`Sample name`)
mir_expr$miRNA[rowSums(mir_expr[, ..samples] > 0) > 0]
})
names(expressed_mirs_per_protocol) = unique(metadata$Group)
qlist_intersect = lapply(names(expressed_mirs_per_protocol), function(n){
upset_query(intersect = n, fill="#fd8d3c", color="#fd8d3c", only_components=c('Common\nmiRNAs', 'intersections_matrix'))
})
upset_p = upset(UpSetR::fromList(expressed_mirs_per_protocol), intersect=names(expressed_mirs_per_protocol),
base_annotations = list(`Common\nmiRNAs`=intersection_size(counts=TRUE, text=list(angle=90, vjust=0.5, hjust=1, size=2), bar_number_threshold = 1)),
queries=c(qlist_intersect),
matrix=intersection_matrix(
geom=geom_point(
size=2
)
),
themes=upset_default_themes(text=element_text(size=9)),
name="",
sort_sets="ascending",
n_intersections=20,
height_ratio = 1.8,
width_ratio = 0.3
)
save_plot("results/figures/fig3f.pdf", upset_p, base_width = 100, base_height = 50, unit="mm")
### gsea according to decreasing avg. expression
expr.dedup = fread("data/figure3_input/mirna_quantification_dedup_raw_mirna.csv")
expr_perc = apply(expr.dedup[, 2:ncol(expr.dedup)], 2, function(e) e / sum(e) * 100)
rownames(expr_perc) = expr.dedup$miRNA
expr_perc_df = as.data.table(reshape2::melt(expr_perc))
expr_perc_df[, Group:=metadata$Group[match(Var2, metadata$`Sample name`)]]
mean_expr = expr_perc_df[, mean(value), by=c("Var1", "Group")][order(V1, decreasing=TRUE)]
dir.create("results/mieaa_input", showWarnings = F, recursive = T)
for(c in unique(mean_expr$Group)){
print(c)
writeLines(as.character(mean_expr[Group == c & V1 > 0]$Var1), sprintf("results/mieaa_input/%s_gsea.txt", c))
}
fig3ab = plot_grid(sample_stats_per_group_with_errorbars, detected_mirs_p, labels = c("a", "b"))
fig3ctof = plot_grid(
seu_umap_plot,
NULL,
ncol=2,
labels=c("c", "d")
)
fig3atof = plot_grid(fig3ab, fig3ctof, ncol=1, rel_heights = c(1,1))
save_plot("results/figures/fig3.pdf", fig3atof, base_height = 100, base_width = 210, unit="mm")
setnames(sample_stats_mean_plot_df, c("Group", "value"), c("Protocol", "mean"))
fwrite(sample_stats_mean_plot_df[, c("variable", "Protocol", "mean", "sd"), with=F], "results/figures/fig3a.txt", sep='\t')
setnames(detected_mirs, "Group", "Protocol")
fwrite(detected_mirs, "results/figures/fig3b.txt", sep='\t')
setnames(seu_umap, c("rn", "UMAP_1", "UMAP_2"), c("Sample", "UMAP 1", "UMAP 2"))
fwrite(seu_umap, "results/figures/fig3c.txt", sep='\t')
fwrite(UpSetR::fromList(expressed_mirs_per_protocol), "results/figures/fig3d.txt", sep='\t')
| /src/create_figure3.R | permissive | CCB-SB/sc_mirna_seq_manuscript | R | false | false | 12,016 | r | library(data.table)
library(ggplot2)
library(cowplot)
library(ggsci)
library(viridisLite)
library(gghalves)
library(ggridges)
library(pbapply)
library(extrafont)
library(Seurat)
library(ComplexUpset)
library(stringr)
library(RhpcBLASctl)
blas_p = blas_get_num_procs()
blas_set_num_threads(1)
omp_p = omp_get_num_procs()
omp_set_num_threads(1)
darken <- function(color, factor=1.2){
col <- col2rgb(color)
col <- col/factor
col <- rgb(t(col), maxColorValue=255)
names(col) = names(color)
col
}
cellline_colors = c("HepG2"="#e6194B",
"HT29"="#ffe119",
"A549"="#4363d8",
"Jurkat"="#42d4f4",
"REH"="#469990",
"KG1"="#dcbeff",
"BJ"="#800000",
"THP-1"="#000075")
cellline_dark_colors = darken(cellline_colors)
method_colors = c("Sandberg"="#1b9e77",
"CleanTag"="#d95f02",
"Giraldez"="#7570b3",
"Jensen"="#e7298a",
"CATS"="#e6ab02",
"Faridani"="#66a61e",
"Shore"="#4daf4a")
adapter_3p_colors = c("CleanTag"="#fdc086",
"Sandberg"="#7fc97f",
"4N"="#beaed4",
"Rand"="#d6604d",
"CATS"="#ffff99")
adapter_5p_colors = c("C3"="#d9d9d9",
"Sandberg"="#7fc97f",
"4N"="#beaed4",
"CleanTag"="#fdc086",
"CleanTag UMI6"="#b2182b",
"Block"="#fb8072",
"CATS"="#ffff99")
top8_protocols = names(cellline_colors)
metadata = fread("data/figure3_input/samples.csv")
metadata[, `Sample name`:=factor(`Sample name`, levels=metadata[order(Group, `Sample name`)]$`Sample name`)]
cutadapt = fread("data/figure3_input/cutadapt_stats.csv")
star = fread("data/figure3_input/mapping_overview.csv")
read_compo = fread("data/figure3_input/overall_composition.detailed.raw.csv")
cutadapt_len_stats = fread("data/figure3_input/overall.too_short.len_stats.csv")
adapter_dimers = colSums(cutadapt_len_stats[Length <= 3][,2:ncol(cutadapt_len_stats)])
sample_stats = merge(
merge(
merge(cutadapt, metadata, by.y = "Sample name", by.x="Sample"),
star, by="Sample"),
read_compo, by="Sample")
sample_stats[, adapter_dimers_ratio:=(adapter_dimers[match(Sample, names(adapter_dimers))])/r_processed]
sample_stats[, read_lost_in_qc:=(r_processed-r_written)/r_processed-adapter_dimers_ratio]
sample_stats[, unmapped_reads:=(r_written-mapped_total)/r_processed]
sample_stats[, mapped_ratio:=(mapped_total - miRNA)/r_processed]
sample_stats[, mirna_ratio:=miRNA/r_processed]
######### sample stats table #############
supp_table8 = sample_stats[, c("Sample", "Group",
"r_processed", "r_written",
"uniquely_mapped", "multimapped", "mapped_total",
"miRNA", "protein_coding_gene",
"intergenic", "lncRNA",
"snoRNA_ensembl",
"coding_exons", "rRNA_ensembl", "GtRNAdb", "piRBase"
)]
supp_table8[, `miRNA (% of total)`:=miRNA/r_processed * 100]
supp_table8[, `protein coding gene (% of total)`:=protein_coding_gene/r_processed * 100]
supp_table8[, `intergenic (% of total)`:=intergenic/r_processed * 100]
supp_table8[, `lncRNA (% of total)`:=lncRNA/r_processed * 100]
supp_table8[, `snoRNA (% of total)`:=snoRNA_ensembl/r_processed * 100]
supp_table8[, `other mapped reads (% of total)`:=(sample_stats$mapped_total - miRNA - protein_coding_gene - intergenic - lncRNA - snoRNA_ensembl - GtRNAdb - rRNA_ensembl - coding_exons - piRBase) / r_processed * 100]
supp_table8[, `Uniquely mapped to hg38 (% of total)`:=uniquely_mapped / r_processed * 100]
supp_table8[, `Multimapped to hg38 (% of total)`:=multimapped / r_processed * 100]
supp_table8[, `Unmapped to hg38 (% of total)`:=(r_written - mapped_total) / r_processed * 100]
supp_table8[, `coding exons (% of total)`:=coding_exons/r_processed * 100]
supp_table8[, `rRNA (% of total)`:=rRNA_ensembl/r_processed * 100]
supp_table8[, `GtRNAdb (% of total)`:=GtRNAdb/r_processed * 100]
supp_table8[, `piRBase (% of total)`:=piRBase/r_processed * 100]
setnames(supp_table8,
c("Group", "r_processed", "r_written"),
c("Protocol", "Total reads", "Cleaned reads"))
supp_table8 = supp_table8[, c("Sample", "Protocol",
"Total reads", "Cleaned reads",
"Uniquely mapped to hg38 (% of total)", "Multimapped to hg38 (% of total)", "Unmapped to hg38 (% of total)",
"miRNA (% of total)", "protein coding gene (% of total)", "intergenic (% of total)",
"lncRNA (% of total)", "snoRNA (% of total)", "GtRNAdb (% of total)", "rRNA (% of total)", "piRBase (% of total)", "coding exons (% of total)", "other mapped reads (% of total)")]
fwrite(supp_table8, "results/tables/supp_data5.csv", sep='\t')
sample_stats_plot_df = melt(sample_stats[, c("Sample", "adapter_dimers_ratio", "read_lost_in_qc", "unmapped_reads", "mapped_ratio", "mirna_ratio")], id.vars="Sample")
sample_stats_plot_df[, Group:=metadata$Group[match(Sample, metadata$`Sample name`)]]
sample_stats_mean_plot_df = sample_stats_plot_df[, list(value=mean(value), sd=sd(value)), by=c("variable", "Group")]
sample_stats_mean_plot_df = sample_stats_mean_plot_df[order(variable, decreasing = TRUE)]
sample_stats_mean_plot_df[, ypos:=cumsum(value), by="Group"]
sample_stats_per_group_with_errorbars = ggplot(sample_stats_mean_plot_df, aes(x=factor(Group, levels=sample_stats_mean_plot_df[variable == "mirna_ratio"][order(value)]$Group),
y=value, fill=variable)) +
geom_col() +
geom_errorbar(aes(ymax=ypos-sd, ymin=ypos-sd, color=variable), position="identity", width=0.5, show.legend = F) +
geom_linerange(aes(ymin = ypos-sd, ymax = ypos, color=variable), show.legend=F) +
xlab("") +
scale_y_continuous(labels=scales::percent_format(), name="Total sequenced reads", expand = expansion(mult = c(0,0))) +
coord_flip() +
scale_fill_manual(values=c(read_lost_in_qc="#d3d3d3", adapter_dimers_ratio="#adadad", unmapped_reads="#3B4992FF", mapped_ratio="#008B45FF", mirna_ratio="#c51b8a"),
labels=c(read_lost_in_qc="Lost in QC", adapter_dimers_ratio="Adapter dimers", unmapped_reads="Unmapped", mapped_ratio="Mapped", mirna_ratio="miRNAs"),
name="",
guide=guide_legend(nrow=2)) +
scale_color_manual(values=c(read_lost_in_qc=darken("#d3d3d3", 1.5), adapter_dimers_ratio=darken("#adadad", 1.5), unmapped_reads=darken("#3B4992FF"), mapped_ratio=darken("#008B45FF"), mirna_ratio=darken("#c51b8a")),
labels=c(read_lost_in_qc="Lost in QC", adapter_dimers_ratio="Adapter dimers", unmapped_reads="Unmapped", mapped_ratio="Mapped", mirna_ratio="miRNAs"),
name="",
guide=guide_legend(nrow=2)) +
theme_cowplot(11) + theme(legend.position="bottom")
mir_expr = fread("data/figure3_input/mirna_quantification_rpmmm_norm.csv")
mir_expr[, miRNA:=paste(precursor, miRNA, sep='|')]
mir_expr[, precursor:=NULL]
detected_mirs = data.frame(num_mirnas=colSums(mir_expr[, 2:ncol(mir_expr)] > 0))
detected_mirs$Group = metadata$Group[match(rownames(detected_mirs), metadata$`Sample name`)]
detected_mirs = setDT(detected_mirs)
detected_mirs_p = ggplot(detected_mirs, aes(x=factor(Group, levels=detected_mirs[, mean(num_mirnas), by="Group"][order(V1)]$Group), y=num_mirnas, color=Group, fill=Group)) +
geom_half_point() +
geom_half_boxplot(outlier.colour = NA) +
theme_cowplot(11) +
scale_y_continuous(breaks=scales::pretty_breaks(10)) +
scale_fill_manual(values=cellline_colors) + scale_color_manual(values=cellline_dark_colors) +
coord_flip() +
xlab("") + ylab("Detected miRNAs") + theme(legend.position = "none")
expr = fread("data/figure3_input/read_counts.csv")
expr_mat = as.matrix(expr[, 2:ncol(expr)])
rownames(expr_mat) = expr$Geneid
seu = CreateSeuratObject(counts=expr_mat,
meta.data=data.frame(metadata[match(colnames(expr_mat), `Sample name`)], row.names=colnames(expr_mat)))
seu = NormalizeData(seu, normalization.method = "LogNormalize", scale.factor = 10000)
seu = FindVariableFeatures(seu, selection.method = "vst", nfeatures = 2000)
all.genes = rownames(seu)
seu = ScaleData(seu, features = all.genes)
seu = RunPCA(seu, features = VariableFeatures(object = seu), npcs = 47)
seu = RunUMAP(seu, dims = 1:20, n.neighbors = 12, min.dist = 0.1, seed.use=42)
seu_umap = data.table(seu@reductions$umap@cell.embeddings, keep.rownames = TRUE)
seu_umap[, Protocol:=metadata$Group[match(rn, metadata$Sample)]]
seu_umap_plot = ggplot(seu_umap, aes(x=UMAP_1, y=UMAP_2, color=Protocol, label=rn)) +
geom_point(size=1) +
scale_color_manual(values=cellline_colors, guide=guide_legend(ncol=2), name="Cell line") +
theme_cowplot(11) +
theme(legend.position = c(0.01,0.75), axis.text = element_blank(), axis.ticks = element_blank()) +
xlab("UMAP 1") + ylab("UMAP 2")
expressed_mirs_per_protocol = lapply(unique(metadata$Group), function(x) {
samples = as.character(metadata[Group == x]$`Sample name`)
mir_expr$miRNA[rowSums(mir_expr[, ..samples] > 0) > 0]
})
names(expressed_mirs_per_protocol) = unique(metadata$Group)
qlist_intersect = lapply(names(expressed_mirs_per_protocol), function(n){
upset_query(intersect = n, fill="#fd8d3c", color="#fd8d3c", only_components=c('Common\nmiRNAs', 'intersections_matrix'))
})
upset_p = upset(UpSetR::fromList(expressed_mirs_per_protocol), intersect=names(expressed_mirs_per_protocol),
base_annotations = list(`Common\nmiRNAs`=intersection_size(counts=TRUE, text=list(angle=90, vjust=0.5, hjust=1, size=2), bar_number_threshold = 1)),
queries=c(qlist_intersect),
matrix=intersection_matrix(
geom=geom_point(
size=2
)
),
themes=upset_default_themes(text=element_text(size=9)),
name="",
sort_sets="ascending",
n_intersections=20,
height_ratio = 1.8,
width_ratio = 0.3
)
save_plot("results/figures/fig3f.pdf", upset_p, base_width = 100, base_height = 50, unit="mm")
### gsea according to decreasing avg. expression
expr.dedup = fread("data/figure3_input/mirna_quantification_dedup_raw_mirna.csv")
expr_perc = apply(expr.dedup[, 2:ncol(expr.dedup)], 2, function(e) e / sum(e) * 100)
rownames(expr_perc) = expr.dedup$miRNA
expr_perc_df = as.data.table(reshape2::melt(expr_perc))
expr_perc_df[, Group:=metadata$Group[match(Var2, metadata$`Sample name`)]]
mean_expr = expr_perc_df[, mean(value), by=c("Var1", "Group")][order(V1, decreasing=TRUE)]
dir.create("results/mieaa_input", showWarnings = F, recursive = T)
for(c in unique(mean_expr$Group)){
print(c)
writeLines(as.character(mean_expr[Group == c & V1 > 0]$Var1), sprintf("results/mieaa_input/%s_gsea.txt", c))
}
fig3ab = plot_grid(sample_stats_per_group_with_errorbars, detected_mirs_p, labels = c("a", "b"))
fig3ctof = plot_grid(
seu_umap_plot,
NULL,
ncol=2,
labels=c("c", "d")
)
fig3atof = plot_grid(fig3ab, fig3ctof, ncol=1, rel_heights = c(1,1))
save_plot("results/figures/fig3.pdf", fig3atof, base_height = 100, base_width = 210, unit="mm")
setnames(sample_stats_mean_plot_df, c("Group", "value"), c("Protocol", "mean"))
fwrite(sample_stats_mean_plot_df[, c("variable", "Protocol", "mean", "sd"), with=F], "results/figures/fig3a.txt", sep='\t')
setnames(detected_mirs, "Group", "Protocol")
fwrite(detected_mirs, "results/figures/fig3b.txt", sep='\t')
setnames(seu_umap, c("rn", "UMAP_1", "UMAP_2"), c("Sample", "UMAP 1", "UMAP 2"))
fwrite(seu_umap, "results/figures/fig3c.txt", sep='\t')
fwrite(UpSetR::fromList(expressed_mirs_per_protocol), "results/figures/fig3d.txt", sep='\t')
|
\name{tee}
\alias{tee}
\title{Tee: report arguments and results whenever a function is called.}
\usage{
tee(f, args.do = str, result.do = str)
}
\arguments{
\item{f}{function to annotate}
\item{args.do}{function passed list of input arguments.
If \code{NULL}, do nothing.}
\item{result.do}{function passed results. If
\code{NULL}, do nothing.}
}
\description{
Inspired by the tee shell command:
\url{http://en.wikipedia.org/wiki/Tee_(command)}
}
\examples{
f <- function(x) sin(x ^ 2)
integrate(f, 0, pi)
integrate(tee(f, NULL, print), 0, pi)
uniroot(f, c(pi/2, pi))
uniroot(tee(f, NULL, print), c(pi/2, pi))
uniroot(tee(f, print, NULL), c(pi/2, pi))
locs <- remember()
vals <- remember()
uniroot(tee(f, locs, vals), c(pi/2, pi))
plot(sapply(locs, pluck(1)))
plot(sapply(locs, pluck(1)), sapply(vals, pluck(1)))
}
| /man/tee.Rd | no_license | cran/hof | R | false | false | 871 | rd | \name{tee}
\alias{tee}
\title{Tee: report arguments and results whenever a function is called.}
\usage{
tee(f, args.do = str, result.do = str)
}
\arguments{
\item{f}{function to annotate}
\item{args.do}{function passed list of input arguments.
If \code{NULL}, do nothing.}
\item{result.do}{function passed results. If
\code{NULL}, do nothing.}
}
\description{
Inspired by the tee shell command:
\url{http://en.wikipedia.org/wiki/Tee_(command)}
}
\examples{
f <- function(x) sin(x ^ 2)
integrate(f, 0, pi)
integrate(tee(f, NULL, print), 0, pi)
uniroot(f, c(pi/2, pi))
uniroot(tee(f, NULL, print), c(pi/2, pi))
uniroot(tee(f, print, NULL), c(pi/2, pi))
locs <- remember()
vals <- remember()
uniroot(tee(f, locs, vals), c(pi/2, pi))
plot(sapply(locs, pluck(1)))
plot(sapply(locs, pluck(1)), sapply(vals, pluck(1)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_put_model_package_group_policy}
\alias{sagemaker_put_model_package_group_policy}
\title{Adds a resouce policy to control access to a model group}
\usage{
sagemaker_put_model_package_group_policy(ModelPackageGroupName,
ResourcePolicy)
}
\arguments{
\item{ModelPackageGroupName}{[required] The name of the model group to add a resource policy to.}
\item{ResourcePolicy}{[required] The resource policy for the model group.}
}
\description{
Adds a resouce policy to control access to a model group. For
information about resoure policies, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html}{Identity-based policies and resource-based policies}
in the \emph{AWS Identity and Access Management User Guide.}.
}
\section{Request syntax}{
\preformatted{svc$put_model_package_group_policy(
ModelPackageGroupName = "string",
ResourcePolicy = "string"
)
}
}
\keyword{internal}
| /cran/paws.machine.learning/man/sagemaker_put_model_package_group_policy.Rd | permissive | sanchezvivi/paws | R | false | true | 1,040 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_put_model_package_group_policy}
\alias{sagemaker_put_model_package_group_policy}
\title{Adds a resouce policy to control access to a model group}
\usage{
sagemaker_put_model_package_group_policy(ModelPackageGroupName,
ResourcePolicy)
}
\arguments{
\item{ModelPackageGroupName}{[required] The name of the model group to add a resource policy to.}
\item{ResourcePolicy}{[required] The resource policy for the model group.}
}
\description{
Adds a resouce policy to control access to a model group. For
information about resoure policies, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_identity-vs-resource.html}{Identity-based policies and resource-based policies}
in the \emph{AWS Identity and Access Management User Guide.}.
}
\section{Request syntax}{
\preformatted{svc$put_model_package_group_policy(
ModelPackageGroupName = "string",
ResourcePolicy = "string"
)
}
}
\keyword{internal}
|
## This function creates a special matrix
## function caches Matrix's inverse
## The fact of returning a list() allows access to any other objects defined
## in the env. of the original function. The subsequent code can access the values of x
## or m, through the use of getters and setters. We can also access like:
## myMatrix_object$getsolve()
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## This function will execute the solve to get
## the matrix's inverse, it will validate if it's different to null to
## retrieve, if null, it will calculate the inverse, set the inverse and
## return it.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | paolaitu/ProgrammingAssignment2 | R | false | false | 1,072 | r | ## This function creates a special matrix
## function caches Matrix's inverse
## The fact of returning a list() allows access to any other objects defined
## in the env. of the original function. The subsequent code can access the values of x
## or m, through the use of getters and setters. We can also access like:
## myMatrix_object$getsolve()
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## This function will execute the solve to get
## the matrix's inverse, it will validate if it's different to null to
## retrieve, if null, it will calculate the inverse, set the inverse and
## return it.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
# simulate a probit model
library(Matrix)
library(gurobi)
#seed = 777
#set.seed(seed)
nbDraws = 1E4
U_y = c(1.6, 3.2, 1.1,0)
nbY = length(U_y)
rho = 0.5
#
Covar = rho * matrix(1,nbY,nbY) + (1-rho) * diag(1,nbY)
E = eigen(Covar)
V = E$values
Q = E$vectors
SqrtCovar = Q%*%diag(sqrt(V))%*%t(Q)
epsilon_iy = matrix(rnorm(nbDraws*nbY),ncol=nbY) %*% SqrtCovar
#
u_iy = t(t(epsilon_iy)+U_y)
ui = apply(X = u_iy, MARGIN = 1, FUN = max)
s_y = apply(X = u_iy - ui, MARGIN = 2,FUN = function(v) (length(which(v==0)))) / nbDraws
A1 = kronecker(matrix(1,1,nbY),sparseMatrix(1:nbDraws,1:nbDraws))
A2 = kronecker(sparseMatrix(1:nbY,1:nbY),matrix(1,1,nbDraws))
A = rbind2(A1,A2)
result = gurobi ( list(A=A,obj=c(epsilon_iy),modelsense="max",rhs=c(rep(1/nbDraws,nbDraws),s_y) ,sense="="), params=list(OutputFlag=0) )
Uhat_y = - result$pi[(1+nbDraws):(nbY+nbDraws)] + result$pi[(nbY+nbDraws)]
print("U_y (true and recovered)")
print(U_y)
print(Uhat_y)
| /11-appli-simulation/simulation.R | no_license | MaximilianJHuber/math-econ-code_masterclass | R | false | false | 955 | r | # simulate a probit model
library(Matrix)
library(gurobi)
#seed = 777
#set.seed(seed)
nbDraws = 1E4
U_y = c(1.6, 3.2, 1.1,0)
nbY = length(U_y)
rho = 0.5
#
Covar = rho * matrix(1,nbY,nbY) + (1-rho) * diag(1,nbY)
E = eigen(Covar)
V = E$values
Q = E$vectors
SqrtCovar = Q%*%diag(sqrt(V))%*%t(Q)
epsilon_iy = matrix(rnorm(nbDraws*nbY),ncol=nbY) %*% SqrtCovar
#
u_iy = t(t(epsilon_iy)+U_y)
ui = apply(X = u_iy, MARGIN = 1, FUN = max)
s_y = apply(X = u_iy - ui, MARGIN = 2,FUN = function(v) (length(which(v==0)))) / nbDraws
A1 = kronecker(matrix(1,1,nbY),sparseMatrix(1:nbDraws,1:nbDraws))
A2 = kronecker(sparseMatrix(1:nbY,1:nbY),matrix(1,1,nbDraws))
A = rbind2(A1,A2)
result = gurobi ( list(A=A,obj=c(epsilon_iy),modelsense="max",rhs=c(rep(1/nbDraws,nbDraws),s_y) ,sense="="), params=list(OutputFlag=0) )
Uhat_y = - result$pi[(1+nbDraws):(nbY+nbDraws)] + result$pi[(nbY+nbDraws)]
print("U_y (true and recovered)")
print(U_y)
print(Uhat_y)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FDBeye.R
\docType{package}
\name{FDBeye}
\alias{FDBeye}
\alias{FDBeye-package}
\title{Tools to Ease Work with Eyetracker Data}
\description{
Tools to ease working with eyetracker data. Currently focussed on my own work flow
involving data from SRR Eyelink systems, although some stuff may be more generally
useful. Functionality is a bit thin. Pitch in and help if you like. (It would be nice to
have some code in README.Rmd that pulls the description from this file and inserts it
there.)
}
| /man/FDBeye.Rd | permissive | gtojty/FDBeye | R | false | true | 587 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FDBeye.R
\docType{package}
\name{FDBeye}
\alias{FDBeye}
\alias{FDBeye-package}
\title{Tools to Ease Work with Eyetracker Data}
\description{
Tools to ease working with eyetracker data. Currently focussed on my own work flow
involving data from SRR Eyelink systems, although some stuff may be more generally
useful. Functionality is a bit thin. Pitch in and help if you like. (It would be nice to
have some code in README.Rmd that pulls the description from this file and inserts it
there.)
}
|
testlist <- list(a = 1.01866381243767e-315, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) | /BayesMRA/inst/testfiles/rmvn_arma_scalar/AFL_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1615925949-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 117 | r | testlist <- list(a = 1.01866381243767e-315, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotConcPred.R
\name{plotConcPred}
\alias{plotConcPred}
\title{Plot of Observed Concentration versus Estimated Concentration}
\usage{
plotConcPred(eList, concMax = NA, logScale = FALSE, printTitle = TRUE,
tinyPlot = FALSE, cex = 0.8, cex.axis = 1.1, cex.main = 1.1,
customPar = FALSE, col = "black", lwd = 1, randomCensored = FALSE,
concLab = 1, usgsStyle = FALSE, ...)
}
\arguments{
\item{eList}{named list with at least the Sample and INFO dataframes}
\item{concMax}{number specifying the maximum value to be used on the vertical axis, default is NA (which allows it to be set automatically by the data)}
\item{logScale}{logical, default TRUE, TRUE indicates x and y axes are on a log scale. FALSE indicates both x and y are on an arithmetic scale.}
\item{printTitle}{logical variable if TRUE title is printed, if FALSE not printed (this is best for a multi-plot figure)}
\item{tinyPlot}{logical variable, if TRUE plot is designed to be plotted small, as a part of a multipart figure, default is FALSE}
\item{cex}{numerical value giving the amount by which plotting symbols should be magnified}
\item{cex.axis}{magnification to be used for axis annotation relative to the current setting of cex}
\item{cex.main}{magnification to be used for main titles relative to the current setting of cex}
\item{customPar}{logical defaults to FALSE. If TRUE, par() should be set by user before calling this function
(for example, adjusting margins with par(mar=c(5,5,5,5))). If customPar FALSE, EGRET chooses the best margins depending on tinyPlot.}
\item{col}{color of points on plot, see ?par 'Color Specification'}
\item{lwd}{number line width}
\item{randomCensored}{logical. Show censored values as randomized.}
\item{concLab}{object of concUnit class, or numeric represented the short code,
or character representing the descriptive name. By default, this argument sets
concentration labels to use either Concentration or Conc (for tiny plots). Units
are taken from the eList$INFO$param.units. To use any other words than
"Concentration" see \code{vignette(topic = "units", package = "EGRET")}.}
\item{usgsStyle}{logical option to use USGS style guidelines. Setting this option
to TRUE does NOT guarantee USGS compliance. It will only change automatically
generated labels}
\item{\dots}{arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)}
}
\description{
Data come from named list, which contains a Sample dataframe with the sample data,
and an INFO dataframe with metadata.
Although there are a lot of optional arguments to this function, most are set to a logical default.
}
\examples{
eList <- Choptank_eList
# Water year:
plotConcPred(eList)
# Graphs consisting of Jun-Aug
eList <- setPA(eList, paStart=6,paLong=3)
plotConcPred(eList)
}
\seealso{
\code{\link{selectDays}}, \code{\link{genericEGRETDotPlot}}
}
\keyword{graphics}
\keyword{statistics}
\keyword{water-quality}
| /man/plotConcPred.Rd | permissive | ldecicco-USGS/EGRET | R | false | true | 3,030 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotConcPred.R
\name{plotConcPred}
\alias{plotConcPred}
\title{Plot of Observed Concentration versus Estimated Concentration}
\usage{
plotConcPred(eList, concMax = NA, logScale = FALSE, printTitle = TRUE,
tinyPlot = FALSE, cex = 0.8, cex.axis = 1.1, cex.main = 1.1,
customPar = FALSE, col = "black", lwd = 1, randomCensored = FALSE,
concLab = 1, usgsStyle = FALSE, ...)
}
\arguments{
\item{eList}{named list with at least the Sample and INFO dataframes}
\item{concMax}{number specifying the maximum value to be used on the vertical axis, default is NA (which allows it to be set automatically by the data)}
\item{logScale}{logical, default TRUE, TRUE indicates x and y axes are on a log scale. FALSE indicates both x and y are on an arithmetic scale.}
\item{printTitle}{logical variable if TRUE title is printed, if FALSE not printed (this is best for a multi-plot figure)}
\item{tinyPlot}{logical variable, if TRUE plot is designed to be plotted small, as a part of a multipart figure, default is FALSE}
\item{cex}{numerical value giving the amount by which plotting symbols should be magnified}
\item{cex.axis}{magnification to be used for axis annotation relative to the current setting of cex}
\item{cex.main}{magnification to be used for main titles relative to the current setting of cex}
\item{customPar}{logical defaults to FALSE. If TRUE, par() should be set by user before calling this function
(for example, adjusting margins with par(mar=c(5,5,5,5))). If customPar FALSE, EGRET chooses the best margins depending on tinyPlot.}
\item{col}{color of points on plot, see ?par 'Color Specification'}
\item{lwd}{number line width}
\item{randomCensored}{logical. Show censored values as randomized.}
\item{concLab}{object of concUnit class, or numeric represented the short code,
or character representing the descriptive name. By default, this argument sets
concentration labels to use either Concentration or Conc (for tiny plots). Units
are taken from the eList$INFO$param.units. To use any other words than
"Concentration" see \code{vignette(topic = "units", package = "EGRET")}.}
\item{usgsStyle}{logical option to use USGS style guidelines. Setting this option
to TRUE does NOT guarantee USGS compliance. It will only change automatically
generated labels}
\item{\dots}{arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)}
}
\description{
Data come from named list, which contains a Sample dataframe with the sample data,
and an INFO dataframe with metadata.
Although there are a lot of optional arguments to this function, most are set to a logical default.
}
\examples{
eList <- Choptank_eList
# Water year:
plotConcPred(eList)
# Graphs consisting of Jun-Aug
eList <- setPA(eList, paStart=6,paLong=3)
plotConcPred(eList)
}
\seealso{
\code{\link{selectDays}}, \code{\link{genericEGRETDotPlot}}
}
\keyword{graphics}
\keyword{statistics}
\keyword{water-quality}
|
#' ---
#' title: "Regression and Other Stories: Pearson and Lee Heights"
#' author: "Andrew Gelman, Jennifer Hill, Aki Vehtari"
#' date: "`r format(Sys.Date())`"
#' output:
#' html_document:
#' theme: readable
#' toc: true
#' toc_depth: 2
#' toc_float: true
#' code_download: true
#' ---
#'
#' The heredity of height. Published in 1903 by Karl Pearson and Alice
#' Lee. See Chapter 6 in Regression and Other Stories.
#'
#' -------------
#'
#+ setup, include=FALSE
knitr::opts_chunk$set(message=FALSE, error=FALSE, warning=FALSE, comment=NA)
# switch this to TRUE to save figures in separate files
savefigs <- FALSE
#' #### Load packages
library("rprojroot")
root<-has_file(".ROS-Examples-root")$make_fix_file()
library("rstanarm")
library("HistData")
#' #### Load data
heights <- read.table(root("PearsonLee/data","Heights.txt"), header=TRUE)
n <- nrow(heights)
head(heights)
#' #### Linear regression
fit_1 <- stan_glm(daughter_height ~ mother_height, data = heights, refresh = 0)
#+
print(fit_1, digits=2)
a_hat <- coef(fit_1)[1]
b_hat <- coef(fit_1)[2]
#' #### Plot mothers' and daughters' heights
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee1.pdf"), height=4.5, width=4.5)
#+
par(mar=c(3, 3, 2, 1), mgp=c(1.7, .5, 0), tck=-.01)
par(pty="s")
rng <- range(heights$mother_height, heights$daughter_height)
plot(heights$mother_height, heights$daughter_height, xlab="Mother's height (inches)", ylab="Adult daughter's height (inches)", bty="l", xlim=rng, ylim=rng, xaxt="n", yaxt="n", pch=20, cex=.5)
x <- seq(48, 84, 6)
axis(1, x)
axis(2, x)
for (i in x){
abline(h=i, col="gray70", lty=2)
abline(v=i, col="gray70", lty=2)
}
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot mothers' and daughters' heights with jitter
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee2.pdf"), height=4.5, width=4.5)
#+
par(mar=c(3, 3, 2, 1), mgp=c(1.7, .5, 0), tck=-.01, pty="s")
plot(jitter(heights$mother_height, 0.5), jitter(heights$daughter_height), xlab="Mother's height (inches)", ylab="Adult daughter's height (inches)", bty="l", xlim=rng, ylim=rng, xaxt="n", yaxt="n", pch=20, cex=.2)
x <- seq(48, 84, 6)
axis(1, x)
axis(2, x)
for (i in x){
abline(h=i, col="gray70", lty=2)
abline(v=i, col="gray70", lty=2)
}
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot mothers' and daughters' heights and fitted regression line
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee3a.pdf"), height=4.5, width=4.5)
#+
par(mar=c(3, 3, 2, .1), mgp=c(2, .5, 0), tck=-.01, pty="s")
plot(jitter(heights$mother_height, 0.5), jitter(heights$daughter_height), xlab="Mother's height (inches)", ylab="Adult daughter's height (inches)", bty="l", xlim=c(rng[1], rng[2]), ylim=rng, xaxt="n", yaxt="n", pch=20, cex=.2)
x <- seq(48, 84, 6)
axis(1, x)
axis(2, x)
for (i in x){
abline(h=i, col="gray70", lty=2)
abline(v=i, col="gray70", lty=2)
}
abline(a_hat, b_hat, lwd=3, col="white")
abline(a_hat, b_hat, lwd=1.5)
points(mean(heights$mother_height), mean(heights$daughter_height), pch=20, cex=2, col="white")
mtext("Mothers' and daughters' heights,\naverage of data, and fitted regression line", side=3, line=0)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot fitted regression line and the average of the data
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee3b.pdf"), height=4.5, width=4.5)
#+
par(mar=c(3, 3, 2, .1), mgp=c(2, .5, 0), tck=-.01, pty="s")
plot(jitter(heights$mother_height, 0.5), jitter(heights$daughter_height), xlab="Mother's height (inches)", ylab="Adult daughter's height (inches)", bty="l", xlim=c(rng[1], rng[2]), ylim=rng, xaxt="n", yaxt="n", pch=20, cex=.2, type="n")
x <- seq(54, 72, 6)
axis(1, x)
axis(2, x)
abline(a_hat, b_hat, lwd=3, col="white")
abline(a_hat, b_hat, lwd=1.5)
lines(rep(mean(heights$mother_height), 2), c(0, mean(heights$daughter_height)), lwd=.5)
lines(c(0, mean(heights$mother_height)), rep(mean(heights$daughter_height), 2), lwd=.5)
axis(1, mean(heights$mother_height), round(mean(heights$mother_height), 1))
axis(2, mean(heights$daughter_height), round(mean(heights$daughter_height), 1))
text(68, 64, paste("y =", round(a_hat), "+", round(b_hat, 2), "x"))
text(63, 62, paste("Equivalently, y = ", round(mean(heights$daughter_height), 1), " + ", round(b_hat, 2), " * (x - ", round(mean(heights$mother_height), 1), ")", sep=""))
points(mean(heights$mother_height), mean(heights$daughter_height), pch=20, cex=2)
mtext("The fitted regression line and the average of the data ", side=3, line=1)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot fitted regression line
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee4a.pdf"), height=4, width=4.5)
#+
par(mar=c(3, 3, 2, .1), mgp=c(2, .5, 0), tck=-.01)
plot(c(0, 100), c(0, 100), xlab="", ylab="", xaxt="n", yaxt="n", bty="n", type="n")
abline(h=0)
abline(v=0)
axis(2, round(a_hat), tck=0, las=1)
axis(1, 0, tck=0, las=1, line=-.4)
axis(2, 0, tck=0, las=1)
abline(a_hat, b_hat, lwd=2)
text(40, 40, paste("slope", round(b_hat, 2)))
mtext(paste("The line, y =", round(a_hat), "+", round(b_hat, 2), "x"), side=3, line=0)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot data and fitted regression line in the context of the data
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee4b.pdf"), height=4, width=4.5)
#+
par(mar=c(3, 3, 2, .1), mgp=c(2, .5, 0), tck=-.01)
plot(c(0, 100), c(0, 100), xlab="", ylab="", xaxt="n", yaxt="n", bty="n", type="n")
abline(h=0)
abline(v=0)
axis(2, round(a_hat), tck=0, las=1)
points(jitter(heights$mother_height, 0.5), jitter(heights$daughter_height), pch=20, cex=.2)
abline(a_hat, b_hat, lwd=3, col="white")
abline(a_hat, b_hat, lwd=1.5)
axis(1, 0, tck=0, las=1, line=-.4)
axis(2, 0, tck=0, las=1)
axis(1, mean(heights$mother_height), round(mean(heights$mother_height), 1), tck=0, las=1, line=-.4)
axis(2, mean(heights$daughter_height), round(mean(heights$daughter_height), 1), tck=0, las=1, line=-.7)
lines(rep(mean(heights$mother_height), 2), c(0, mean(heights$daughter_height)), lwd=.5)
lines(c(0, mean(heights$mother_height)), rep(mean(heights$daughter_height), 2), lwd=.5)
text(40, 43, paste("slope", round(b_hat, 2)), cex=.9)
mtext(paste("The line, y =", round(a_hat), "+", round(b_hat, 2), "x, in the context of the data"), side=3, line=0)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
| /ROS-Examples-master/PearsonLee/heights.R | permissive | cleeway/Working-through-Regression-and-other-stories | R | false | false | 6,516 | r | #' ---
#' title: "Regression and Other Stories: Pearson and Lee Heights"
#' author: "Andrew Gelman, Jennifer Hill, Aki Vehtari"
#' date: "`r format(Sys.Date())`"
#' output:
#' html_document:
#' theme: readable
#' toc: true
#' toc_depth: 2
#' toc_float: true
#' code_download: true
#' ---
#'
#' The heredity of height. Published in 1903 by Karl Pearson and Alice
#' Lee. See Chapter 6 in Regression and Other Stories.
#'
#' -------------
#'
#+ setup, include=FALSE
knitr::opts_chunk$set(message=FALSE, error=FALSE, warning=FALSE, comment=NA)
# switch this to TRUE to save figures in separate files
savefigs <- FALSE
#' #### Load packages
library("rprojroot")
root<-has_file(".ROS-Examples-root")$make_fix_file()
library("rstanarm")
library("HistData")
#' #### Load data
heights <- read.table(root("PearsonLee/data","Heights.txt"), header=TRUE)
n <- nrow(heights)
head(heights)
#' #### Linear regression
fit_1 <- stan_glm(daughter_height ~ mother_height, data = heights, refresh = 0)
#+
print(fit_1, digits=2)
a_hat <- coef(fit_1)[1]
b_hat <- coef(fit_1)[2]
#' #### Plot mothers' and daughters' heights
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee1.pdf"), height=4.5, width=4.5)
#+
par(mar=c(3, 3, 2, 1), mgp=c(1.7, .5, 0), tck=-.01)
par(pty="s")
rng <- range(heights$mother_height, heights$daughter_height)
plot(heights$mother_height, heights$daughter_height, xlab="Mother's height (inches)", ylab="Adult daughter's height (inches)", bty="l", xlim=rng, ylim=rng, xaxt="n", yaxt="n", pch=20, cex=.5)
x <- seq(48, 84, 6)
axis(1, x)
axis(2, x)
for (i in x){
abline(h=i, col="gray70", lty=2)
abline(v=i, col="gray70", lty=2)
}
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot mothers' and daughters' heights with jitter
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee2.pdf"), height=4.5, width=4.5)
#+
par(mar=c(3, 3, 2, 1), mgp=c(1.7, .5, 0), tck=-.01, pty="s")
plot(jitter(heights$mother_height, 0.5), jitter(heights$daughter_height), xlab="Mother's height (inches)", ylab="Adult daughter's height (inches)", bty="l", xlim=rng, ylim=rng, xaxt="n", yaxt="n", pch=20, cex=.2)
x <- seq(48, 84, 6)
axis(1, x)
axis(2, x)
for (i in x){
abline(h=i, col="gray70", lty=2)
abline(v=i, col="gray70", lty=2)
}
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot mothers' and daughters' heights and fitted regression line
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee3a.pdf"), height=4.5, width=4.5)
#+
par(mar=c(3, 3, 2, .1), mgp=c(2, .5, 0), tck=-.01, pty="s")
plot(jitter(heights$mother_height, 0.5), jitter(heights$daughter_height), xlab="Mother's height (inches)", ylab="Adult daughter's height (inches)", bty="l", xlim=c(rng[1], rng[2]), ylim=rng, xaxt="n", yaxt="n", pch=20, cex=.2)
x <- seq(48, 84, 6)
axis(1, x)
axis(2, x)
for (i in x){
abline(h=i, col="gray70", lty=2)
abline(v=i, col="gray70", lty=2)
}
abline(a_hat, b_hat, lwd=3, col="white")
abline(a_hat, b_hat, lwd=1.5)
points(mean(heights$mother_height), mean(heights$daughter_height), pch=20, cex=2, col="white")
mtext("Mothers' and daughters' heights,\naverage of data, and fitted regression line", side=3, line=0)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot fitted regression line and the average of the data
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee3b.pdf"), height=4.5, width=4.5)
#+
par(mar=c(3, 3, 2, .1), mgp=c(2, .5, 0), tck=-.01, pty="s")
plot(jitter(heights$mother_height, 0.5), jitter(heights$daughter_height), xlab="Mother's height (inches)", ylab="Adult daughter's height (inches)", bty="l", xlim=c(rng[1], rng[2]), ylim=rng, xaxt="n", yaxt="n", pch=20, cex=.2, type="n")
x <- seq(54, 72, 6)
axis(1, x)
axis(2, x)
abline(a_hat, b_hat, lwd=3, col="white")
abline(a_hat, b_hat, lwd=1.5)
lines(rep(mean(heights$mother_height), 2), c(0, mean(heights$daughter_height)), lwd=.5)
lines(c(0, mean(heights$mother_height)), rep(mean(heights$daughter_height), 2), lwd=.5)
axis(1, mean(heights$mother_height), round(mean(heights$mother_height), 1))
axis(2, mean(heights$daughter_height), round(mean(heights$daughter_height), 1))
text(68, 64, paste("y =", round(a_hat), "+", round(b_hat, 2), "x"))
text(63, 62, paste("Equivalently, y = ", round(mean(heights$daughter_height), 1), " + ", round(b_hat, 2), " * (x - ", round(mean(heights$mother_height), 1), ")", sep=""))
points(mean(heights$mother_height), mean(heights$daughter_height), pch=20, cex=2)
mtext("The fitted regression line and the average of the data ", side=3, line=1)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot fitted regression line
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee4a.pdf"), height=4, width=4.5)
#+
par(mar=c(3, 3, 2, .1), mgp=c(2, .5, 0), tck=-.01)
plot(c(0, 100), c(0, 100), xlab="", ylab="", xaxt="n", yaxt="n", bty="n", type="n")
abline(h=0)
abline(v=0)
axis(2, round(a_hat), tck=0, las=1)
axis(1, 0, tck=0, las=1, line=-.4)
axis(2, 0, tck=0, las=1)
abline(a_hat, b_hat, lwd=2)
text(40, 40, paste("slope", round(b_hat, 2)))
mtext(paste("The line, y =", round(a_hat), "+", round(b_hat, 2), "x"), side=3, line=0)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot data and fitted regression line in the context of the data
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("PearsonLee/figs","PearsonLee4b.pdf"), height=4, width=4.5)
#+
par(mar=c(3, 3, 2, .1), mgp=c(2, .5, 0), tck=-.01)
plot(c(0, 100), c(0, 100), xlab="", ylab="", xaxt="n", yaxt="n", bty="n", type="n")
abline(h=0)
abline(v=0)
axis(2, round(a_hat), tck=0, las=1)
points(jitter(heights$mother_height, 0.5), jitter(heights$daughter_height), pch=20, cex=.2)
abline(a_hat, b_hat, lwd=3, col="white")
abline(a_hat, b_hat, lwd=1.5)
axis(1, 0, tck=0, las=1, line=-.4)
axis(2, 0, tck=0, las=1)
axis(1, mean(heights$mother_height), round(mean(heights$mother_height), 1), tck=0, las=1, line=-.4)
axis(2, mean(heights$daughter_height), round(mean(heights$daughter_height), 1), tck=0, las=1, line=-.7)
lines(rep(mean(heights$mother_height), 2), c(0, mean(heights$daughter_height)), lwd=.5)
lines(c(0, mean(heights$mother_height)), rep(mean(heights$daughter_height), 2), lwd=.5)
text(40, 43, paste("slope", round(b_hat, 2)), cex=.9)
mtext(paste("The line, y =", round(a_hat), "+", round(b_hat, 2), "x, in the context of the data"), side=3, line=0)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
|
adjDadosVecinos = function(matrixExprShuffled,t,k,stdd,perc){
distEuclideana = function(vectA, vectB){
d = sqrt(sum( (vectA - vectB)^2 ))
d
}
matrizRangos = function(matrizExpr, t){
samples = matrix(nrow = length(t), ncol = length(t)) #uso que es simetrica
for (i in seq(1,length(t),1)){
for (j in seq(1,length(t),1)){
samples[i,j] = samples[j,i] = distEuclideana(matrizExpr[,i],matrizExpr[,j])
}
}
matrizRangos = t(apply(samples,1,rank))
matrizRangos
}
adyacencia = function(matrizRangos, k){
matrizRangos[matrizRangos <= k] = 1
matrizRangos[matrizRangos > k] = 0
matrizRangos
}
#le saco los 1 de la diagonal en la adjMatrix
diag = matrix(nrow = length(t), ncol = length(t))
for (i in seq(1,length(adjMatrix[1,]),1)){
for (j in seq(1,length(adjMatrix[1,]),1)){
if (i==j){
diag[i,i] = 1
}
if (i != j) {
diag[i,j] = 0
}
}
}
rangos = matrizRangos(matrixExprShuffled,t)
k = k+1
adjFinal = adyacencia(rangos,k)
sinSelf = adjFinal - diag
grafo = graph_from_adjacency_matrix(sinSelf, mode = c("undirected"), weighted = NULL, diag = FALSE,
add.colnames = NULL, add.rownames = NA)
kc = sum(sinSelf[1,])
infile = paste("Grafo", perc,stdd, "vecinos", kc, ".png",sep="")
png(filename=infile,
units="in",
width=10,
height=8,
pointsize=12,
res=72)
plot(grafo, layout = layout_on_sphere(grafo), edge.color = "black", edge.arrow.size=2, edge.width = 2, edge.arrow.width = 2, edge.lty = 1, vertex.size = 10)
text(1, 1, paste0("std=", stdd," k=", kc),font=2, cex=1.5)
dev.off()
sinSelf
}
| /Trabajo/VarianceNeighbourhood/ahoraSi/EstudioDelFiltradoCorrecto/adjDadosVecinos.R | no_license | rominay/tesis | R | false | false | 1,709 | r | adjDadosVecinos = function(matrixExprShuffled,t,k,stdd,perc){
distEuclideana = function(vectA, vectB){
d = sqrt(sum( (vectA - vectB)^2 ))
d
}
matrizRangos = function(matrizExpr, t){
samples = matrix(nrow = length(t), ncol = length(t)) #uso que es simetrica
for (i in seq(1,length(t),1)){
for (j in seq(1,length(t),1)){
samples[i,j] = samples[j,i] = distEuclideana(matrizExpr[,i],matrizExpr[,j])
}
}
matrizRangos = t(apply(samples,1,rank))
matrizRangos
}
adyacencia = function(matrizRangos, k){
matrizRangos[matrizRangos <= k] = 1
matrizRangos[matrizRangos > k] = 0
matrizRangos
}
#le saco los 1 de la diagonal en la adjMatrix
diag = matrix(nrow = length(t), ncol = length(t))
for (i in seq(1,length(adjMatrix[1,]),1)){
for (j in seq(1,length(adjMatrix[1,]),1)){
if (i==j){
diag[i,i] = 1
}
if (i != j) {
diag[i,j] = 0
}
}
}
rangos = matrizRangos(matrixExprShuffled,t)
k = k+1
adjFinal = adyacencia(rangos,k)
sinSelf = adjFinal - diag
grafo = graph_from_adjacency_matrix(sinSelf, mode = c("undirected"), weighted = NULL, diag = FALSE,
add.colnames = NULL, add.rownames = NA)
kc = sum(sinSelf[1,])
infile = paste("Grafo", perc,stdd, "vecinos", kc, ".png",sep="")
png(filename=infile,
units="in",
width=10,
height=8,
pointsize=12,
res=72)
plot(grafo, layout = layout_on_sphere(grafo), edge.color = "black", edge.arrow.size=2, edge.width = 2, edge.arrow.width = 2, edge.lty = 1, vertex.size = 10)
text(1, 1, paste0("std=", stdd," k=", kc),font=2, cex=1.5)
dev.off()
sinSelf
}
|
readASCIIData <- function(filename, DebugLevel = "Normal"){
#########################################################
#
# The header of the file have to be the following form:
#
# Description: char (optional)
# SignalDim: int int
# XYmin: double double
# DeltaXY: double double
#
# After the header follow the raw data.
#
#########################################################
#
# B <- readASCIIData("./test/NEUTEST.dat")
#
DL1 <- logDebug(DebugLevel)
DebugLevel <- DL1[[3]]
DL2 <- DL1[[2]]
DL1 <- DL1[[1]]
sep <- ""
if (DL1)
cat("Open file: ",filename, "\n")
# extraction of file-extension
fiEx <- unlist(strsplit(filename, ".", fixed=TRUE))
fiEx <- fiEx[length(fiEx)]
# Open connection
if (fiEx=="gz")
con <- gzfile(filename, "rb")
else
con <- file(description=filename, "rb")
on.exit(close(con)) # be careful ...
#######################################################
# Reading header
# reading 'Description' and 'SignalDim'
if (DL2)
cat("Reading the header of the file.\n")
temp <- try(scan(con, what = "", n=1, sep=sep, quiet=TRUE))
if (class(temp) == "try-error")
stop("File read error. Perhaps the file doesn't come up to the necessary form.")
if (substr(temp,1,10) == "SignalDim"){
ans <- list(SignalDim = NULL, XYmin = NULL, DeltaXY = NULL)
ans$SignalDim <- scan(con, nlines=1, sep=sep, quiet=TRUE)
} else if (substr(temp,1,12) == "Description"){
ans <- list(Description = NULL, SignalDim = NULL, XYmin = NULL,
DeltaXY = NULL)
ans$Description <- readLines(con, n=1)
tmp <- scan(con, what = "", n=1, sep=sep, quiet=TRUE)
if (tmp != "SignalDim")
stop("Unexcept file header. Parameter ",tmp," in second line not allowed.")
ans$SignalDim <- scan(con, nlines=1, sep=sep, quiet=TRUE)
} else
stop("Unexcept file header. First line doesn't correspond with the file
format.")
# reading 'XYmin'
tmp <- scan(con, what = "", n=1, sep=sep, quiet=TRUE)
if (tmp != "XYmin")
stop("Unexcept file header. Parameter ",tmp," doesn't correspond with
the file format.")
ans$XYmin <- scan(con, nlines=1, sep=sep, quiet=TRUE)
# reading 'DeltaXY'
tmp <- scan(con, what = "", n=1, sep=sep, quiet=TRUE)
if (tmp != "DeltaXY")
stop("Unexcept file header. Parameter ",tmp," doesn't correspond with
the file format..")
ans$DeltaXY <- scan(con, nlines=1, sep=sep, quiet=TRUE)
#######################################################
# Reading the raw data
if (DL2)
cat("Reading the raw data.\n")
index <- ans$SignalDim
dimIndex <- length(index)
if (dimIndex > 2){
perm.vec <- c(2,1, 3:dimIndex)
Signal <- aperm(array(scan(con, sep=sep, quiet=TRUE),
dim=index[perm.vec]),perm.vec)
} else if (dimIndex == 2){
Signal <- matrix(scan(con, sep=sep, quiet=TRUE), nrow=index[1],
ncol=index[2], byrow=TRUE)
} else if (dimIndex == 1){
Signal <- matrix(scan(con, sep=sep, quiet=TRUE), nrow=1, ncol=index,
byrow=TRUE)
} else
stop("The dimension of data is not supported.")
if (DL1)
cat("Close file. \n")
return(list(Signal=Signal, Header=ans ))
}
| /R/readASCIIData.r | no_license | cran/PET | R | false | false | 3,372 | r | readASCIIData <- function(filename, DebugLevel = "Normal"){
#########################################################
#
# The header of the file have to be the following form:
#
# Description: char (optional)
# SignalDim: int int
# XYmin: double double
# DeltaXY: double double
#
# After the header follow the raw data.
#
#########################################################
#
# B <- readASCIIData("./test/NEUTEST.dat")
#
DL1 <- logDebug(DebugLevel)
DebugLevel <- DL1[[3]]
DL2 <- DL1[[2]]
DL1 <- DL1[[1]]
sep <- ""
if (DL1)
cat("Open file: ",filename, "\n")
# extraction of file-extension
fiEx <- unlist(strsplit(filename, ".", fixed=TRUE))
fiEx <- fiEx[length(fiEx)]
# Open connection
if (fiEx=="gz")
con <- gzfile(filename, "rb")
else
con <- file(description=filename, "rb")
on.exit(close(con)) # be careful ...
#######################################################
# Reading header
# reading 'Description' and 'SignalDim'
if (DL2)
cat("Reading the header of the file.\n")
temp <- try(scan(con, what = "", n=1, sep=sep, quiet=TRUE))
if (class(temp) == "try-error")
stop("File read error. Perhaps the file doesn't come up to the necessary form.")
if (substr(temp,1,10) == "SignalDim"){
ans <- list(SignalDim = NULL, XYmin = NULL, DeltaXY = NULL)
ans$SignalDim <- scan(con, nlines=1, sep=sep, quiet=TRUE)
} else if (substr(temp,1,12) == "Description"){
ans <- list(Description = NULL, SignalDim = NULL, XYmin = NULL,
DeltaXY = NULL)
ans$Description <- readLines(con, n=1)
tmp <- scan(con, what = "", n=1, sep=sep, quiet=TRUE)
if (tmp != "SignalDim")
stop("Unexcept file header. Parameter ",tmp," in second line not allowed.")
ans$SignalDim <- scan(con, nlines=1, sep=sep, quiet=TRUE)
} else
stop("Unexcept file header. First line doesn't correspond with the file
format.")
# reading 'XYmin'
tmp <- scan(con, what = "", n=1, sep=sep, quiet=TRUE)
if (tmp != "XYmin")
stop("Unexcept file header. Parameter ",tmp," doesn't correspond with
the file format.")
ans$XYmin <- scan(con, nlines=1, sep=sep, quiet=TRUE)
# reading 'DeltaXY'
tmp <- scan(con, what = "", n=1, sep=sep, quiet=TRUE)
if (tmp != "DeltaXY")
stop("Unexcept file header. Parameter ",tmp," doesn't correspond with
the file format..")
ans$DeltaXY <- scan(con, nlines=1, sep=sep, quiet=TRUE)
#######################################################
# Reading the raw data
if (DL2)
cat("Reading the raw data.\n")
index <- ans$SignalDim
dimIndex <- length(index)
if (dimIndex > 2){
perm.vec <- c(2,1, 3:dimIndex)
Signal <- aperm(array(scan(con, sep=sep, quiet=TRUE),
dim=index[perm.vec]),perm.vec)
} else if (dimIndex == 2){
Signal <- matrix(scan(con, sep=sep, quiet=TRUE), nrow=index[1],
ncol=index[2], byrow=TRUE)
} else if (dimIndex == 1){
Signal <- matrix(scan(con, sep=sep, quiet=TRUE), nrow=1, ncol=index,
byrow=TRUE)
} else
stop("The dimension of data is not supported.")
if (DL1)
cat("Close file. \n")
return(list(Signal=Signal, Header=ans ))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/relevantSp.R
\name{relevantSp}
\alias{relevantSp}
\title{relevantSp}
\usage{
relevantSp(data, colnums = 26:128)
}
\arguments{
\item{data}{A data frame with presence/abscence of different species.}
\item{colnums}{Column numbers that will be assessed.}
}
\value{
A vector of columns names.
}
\description{
\code{relevantSp} Which species are being used in this analysis.
}
\details{
Which columns have values that are greater than zero.
}
\author{
Jarrett Byrnes.
}
| /man/relevantSp.Rd | no_license | rededsky/multifunc | R | false | false | 552 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/relevantSp.R
\name{relevantSp}
\alias{relevantSp}
\title{relevantSp}
\usage{
relevantSp(data, colnums = 26:128)
}
\arguments{
\item{data}{A data frame with presence/abscence of different species.}
\item{colnums}{Column numbers that will be assessed.}
}
\value{
A vector of columns names.
}
\description{
\code{relevantSp} Which species are being used in this analysis.
}
\details{
Which columns have values that are greater than zero.
}
\author{
Jarrett Byrnes.
}
|
# ...............................
# fft metrics helper functions
# ...............................
#' Check \code{for.year} argument
#'
#' Ensure that the for.year argument is a numeric scalar value
#' @param for.year input argument to be checked
#' @return Stops execution in case of failed check
assert.for.year = function(for.year) {
# for.year, if not NULL, must be a numeric scalar value
assert_numeric(for.year, null.ok = TRUE)
assert_scalar(for.year, null.ok = TRUE)
}
#' Check for numeric vector
#'
#' Ensure that the given argument is a numeric vector
#' @param arg input argument to be checked
#' @return Stops execution in case of failed check
assert.numeric.vector = function(arg) {
# must be a numeric vector
assert_numeric(arg)
assert_vector(arg)
}
#' Check for equal length inputs
#'
#' Ensure that the given arguments are equal length vectors
#' @param arg0 At least one input argument to be checked needs to be given
#' @param ... Other inputs whose length equality needs to be checked
#' @return Stops execution in case of failed check
assert.equal.length = function(arg0, ...) {
args = list(...)
l = length(arg0)
for (i in args) {
if (length(i) != l) {
print(l)
print(length(i))
stop("Inputs must be of same length!")
}
}
}
| /R/input_check.R | no_license | cran/discharge | R | false | false | 1,340 | r | # ...............................
# fft metrics helper functions
# ...............................
#' Check \code{for.year} argument
#'
#' Ensure that the for.year argument is a numeric scalar value
#' @param for.year input argument to be checked
#' @return Stops execution in case of failed check
assert.for.year = function(for.year) {
# for.year, if not NULL, must be a numeric scalar value
assert_numeric(for.year, null.ok = TRUE)
assert_scalar(for.year, null.ok = TRUE)
}
#' Check for numeric vector
#'
#' Ensure that the given argument is a numeric vector
#' @param arg input argument to be checked
#' @return Stops execution in case of failed check
assert.numeric.vector = function(arg) {
# must be a numeric vector
assert_numeric(arg)
assert_vector(arg)
}
#' Check for equal length inputs
#'
#' Ensure that the given arguments are equal length vectors
#' @param arg0 At least one input argument to be checked needs to be given
#' @param ... Other inputs whose length equality needs to be checked
#' @return Stops execution in case of failed check
assert.equal.length = function(arg0, ...) {
args = list(...)
l = length(arg0)
for (i in args) {
if (length(i) != l) {
print(l)
print(length(i))
stop("Inputs must be of same length!")
}
}
}
|
#March 2019
#Joy Buongiorno
#JBuongior21@gmail.com
#checking normality with Shapiro-Wilks
C.1.8<-c(0.99, 1.5, 1.78, 3.58, 1.34, 1.57, 1.66, 2.09, 2.39)
shapiro.test(C.1.8)
qqnorm(C.1.8)
C.1.6<-c(2.51, 1.69, 2.08, 1.34, 1.45, 1.07, 0.82)
shapiro.test(C.1.6)
qqnorm(C.1.6)
C.5.6<-c(0.51, 0.96, 0.20, 0.76)
shapiro.test(C.5.6)
qqnorm(C.5.6)
C.5.3<-c(-0.27, -0.45, -0.18, 0.46, 0.20, 0.18, 0.85)
shapiro.test(C.5.3)
qqnorm(C.5.3)
C.10.3<-c(0.29, 0.27, 1.69, 1.79, 2.17, 0.66, 0.87)
shapiro.test(C.10.3)
qqnorm(C.10.3)
C.10.2<-c(1.85, 0.58, 0.99, 1.08, 1.19, 0.36, 0.45, 0.53, -.10, 1.14,-0.03, 1.24, 0.86, 0.50, -0.16, 0.19)
shapiro.test(C.10.2)
qqnorm(C.10.2)
C.13.2<-c(1.23, 1.48, 0.76, 0.06, 1.15, 2.16, 1.78, 0.63)
shapiro.test(C.13.2)
qqnorm(C.13.2)
C.13.6<-c(1.69, -0.04, 0.48, 0.95, 1.64, 1.13, 0.50, 0.75, 1.62, 0.39, 2.14, 1.04, 0.97, 1.38)
shapiro.test(C.13.6)
qqnorm(C.13.6)
C.20.5<-c(0.65, 1.46, 1.46, 1.99, 1.44, 0.71, 1.01, 1.04, 1.08, -0.1, 1.34, 1.18)
shapiro.test(C.20.5)
qqnorm(C.20.5)
C.20.4<-c(0.62, 1.37, 0.07, 1.54, 1.63, 1.37, 1.56, 1.75, 1.45, 1.08, 0.65, 1.53, 0.92, 0.04, 1.90, 1.00)
shapiro.test(C.20.4)
qqnorm(C.20.4)
C.28.8<-c(5.16, 5.71, 4.99, 5.13, 5.43)
shapiro.test(C.28.8)
qqnorm(C.28.8)
C.28.7<-c(4.44, 6.47, 6.03, 6.02, 4.87)
shapiro.test(C.28.7)
qqnorm(C.28.7)
| /Shapiro-Wilk Tests_oncolites.R | no_license | JBuongio/LagunaNegraOncolites | R | false | false | 1,295 | r | #March 2019
#Joy Buongiorno
#JBuongior21@gmail.com
#checking normality with Shapiro-Wilks
C.1.8<-c(0.99, 1.5, 1.78, 3.58, 1.34, 1.57, 1.66, 2.09, 2.39)
shapiro.test(C.1.8)
qqnorm(C.1.8)
C.1.6<-c(2.51, 1.69, 2.08, 1.34, 1.45, 1.07, 0.82)
shapiro.test(C.1.6)
qqnorm(C.1.6)
C.5.6<-c(0.51, 0.96, 0.20, 0.76)
shapiro.test(C.5.6)
qqnorm(C.5.6)
C.5.3<-c(-0.27, -0.45, -0.18, 0.46, 0.20, 0.18, 0.85)
shapiro.test(C.5.3)
qqnorm(C.5.3)
C.10.3<-c(0.29, 0.27, 1.69, 1.79, 2.17, 0.66, 0.87)
shapiro.test(C.10.3)
qqnorm(C.10.3)
C.10.2<-c(1.85, 0.58, 0.99, 1.08, 1.19, 0.36, 0.45, 0.53, -.10, 1.14,-0.03, 1.24, 0.86, 0.50, -0.16, 0.19)
shapiro.test(C.10.2)
qqnorm(C.10.2)
C.13.2<-c(1.23, 1.48, 0.76, 0.06, 1.15, 2.16, 1.78, 0.63)
shapiro.test(C.13.2)
qqnorm(C.13.2)
C.13.6<-c(1.69, -0.04, 0.48, 0.95, 1.64, 1.13, 0.50, 0.75, 1.62, 0.39, 2.14, 1.04, 0.97, 1.38)
shapiro.test(C.13.6)
qqnorm(C.13.6)
C.20.5<-c(0.65, 1.46, 1.46, 1.99, 1.44, 0.71, 1.01, 1.04, 1.08, -0.1, 1.34, 1.18)
shapiro.test(C.20.5)
qqnorm(C.20.5)
C.20.4<-c(0.62, 1.37, 0.07, 1.54, 1.63, 1.37, 1.56, 1.75, 1.45, 1.08, 0.65, 1.53, 0.92, 0.04, 1.90, 1.00)
shapiro.test(C.20.4)
qqnorm(C.20.4)
C.28.8<-c(5.16, 5.71, 4.99, 5.13, 5.43)
shapiro.test(C.28.8)
qqnorm(C.28.8)
C.28.7<-c(4.44, 6.47, 6.03, 6.02, 4.87)
shapiro.test(C.28.7)
qqnorm(C.28.7)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.