content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' @title Rename item attached files
#'
#' @description Renames files attached to an SB item.
#'
#'
#' @template manipulate_item
#' @param names List of names of files to rename
#' @param new_names List of new file names to use
#'
#' @examples \dontrun{
#'
#' names = c('file1.txt', 'file2.txt')
#' new_names = c('newname1.txt', 'newname2.txt')
#'
#' item_rename_files('sbid', names, new_names)
#'
#' }
#'
#' @export
item_rename_files = function(sb_id, names, new_names, ..., session=current_session()){
if(length(names) != length(new_names)){
stop('`names` and `new_names` must be identical length character vectors')
}
#force download refresh of item
it = item_get(sb_id, ..., session=session)
name_list = sapply(it$files, FUN=function(x){x$name})
if(!all(names %in% name_list)){
stop('`names` supplied has does not match names attached to item')
}
for(i in 1:length(names)){
indx = which(name_list == names[i])
it$files[[indx]]$name = new_names[i]
}
item_update(it, info = list(files=it$files), ..., session=session)
} | /R/item_rename_files.R | permissive | dblodgett-usgs/sbtools | R | false | false | 1,059 | r | #' @title Rename item attached files
#'
#' @description Renames files attached to an SB item.
#'
#'
#' @template manipulate_item
#' @param names List of names of files to rename
#' @param new_names List of new file names to use
#'
#' @examples \dontrun{
#'
#' names = c('file1.txt', 'file2.txt')
#' new_names = c('newname1.txt', 'newname2.txt')
#'
#' item_rename_files('sbid', names, new_names)
#'
#' }
#'
#' @export
item_rename_files = function(sb_id, names, new_names, ..., session=current_session()){
if(length(names) != length(new_names)){
stop('`names` and `new_names` must be identical length character vectors')
}
#force download refresh of item
it = item_get(sb_id, ..., session=session)
name_list = sapply(it$files, FUN=function(x){x$name})
if(!all(names %in% name_list)){
stop('`names` supplied has does not match names attached to item')
}
for(i in 1:length(names)){
indx = which(name_list == names[i])
it$files[[indx]]$name = new_names[i]
}
item_update(it, info = list(files=it$files), ..., session=session)
} |
# This script begins the process of calculating
# power per window.
# power.R
# power_gather.R
# plot_unconditional_power.R
# The result is the probability of detecting
# a window at a specific alpha
library(dplyr)
library(tibble)
library(readr)
library(stringr)
ndist_db = src_sqlite("nulldist.db",create=F)
ndist_data_table = tbl(ndist_db, 'data')
lower05 <- function(x)
{
return(as.numeric(quantile(x,0.05)))
}
lower01 <- function(x)
{
return(as.numeric(quantile(x,0.01)))
}
lower001 <- function(x)
{
return(as.numeric(quantile(x,0.001)))
}
raw_dist = collect(ndist_data_table)
critical_vals = raw_dist %>% summarise_all(funs(lower05,lower01,lower001))
files <- dir(path="../../mlocus_pickle/",pattern="*.genome_scan.db")
data = data.frame()
for (infile in files)
{
print(infile)
dbname = paste("../../mlocus_pickle/",infile,sep="")
indb = src_sqlite(dbname)
indb_table = tbl(indb,'data')
fs <- str_split(infile,regex('\\.'))[[1]]
fs[2]<-str_replace(fs[2],'mu','')
fs[4]<-str_replace(fs[4],'opt','')
mu <- as.numeric(paste(fs[2],fs[3],sep='.'))
opt <- as.numeric(paste(fs[4],fs[5],sep='.'))
raw_data = collect(indb_table)
power_query = raw_data %>% group_by(generation,locus,window) %>%
summarise(tajd05 = sum(tajd <= critical_vals$tajd_lower05),
tajd01 = sum(tajd <= critical_vals$tajd_lower01),
tajd001 = sum(tajd <= critical_vals$tajd_lower001),
hprime05 = sum(hprime <= critical_vals$hprime_lower05),
hprime01 = sum(hprime <= critical_vals$hprime_lower01),
hprime001 = sum(hprime <= critical_vals$hprime_lower001),
max_abs_nSL05 = sum(max_abx_nSL <= critical_vals$max_abs_nSL_lower05),
max_abs_nSL01 = sum(max_abx_nSL <= critical_vals$max_abs_nSL_lower01),
max_abs_nSL001 = sum(max_abx_nSL <= critical_vals$max_abs_nSL_lower001)) %>%
mutate(mu=mu,opt=opt)
power_results = collect(power_query)
data = bind_rows(data,power_results)
}
of = gzfile("nsig_per_window.txt.gz","w")
write_delim(data,of)
close(of)
| /analysis/10_locus_genome_scan/power.R | no_license | molpopgen/qtrait_paper | R | false | false | 2,120 | r | # This script begins the process of calculating
# power per window.
# power.R
# power_gather.R
# plot_unconditional_power.R
# The result is the probability of detecting
# a window at a specific alpha
library(dplyr)
library(tibble)
library(readr)
library(stringr)
ndist_db = src_sqlite("nulldist.db",create=F)
ndist_data_table = tbl(ndist_db, 'data')
lower05 <- function(x)
{
return(as.numeric(quantile(x,0.05)))
}
lower01 <- function(x)
{
return(as.numeric(quantile(x,0.01)))
}
lower001 <- function(x)
{
return(as.numeric(quantile(x,0.001)))
}
raw_dist = collect(ndist_data_table)
critical_vals = raw_dist %>% summarise_all(funs(lower05,lower01,lower001))
files <- dir(path="../../mlocus_pickle/",pattern="*.genome_scan.db")
data = data.frame()
for (infile in files)
{
print(infile)
dbname = paste("../../mlocus_pickle/",infile,sep="")
indb = src_sqlite(dbname)
indb_table = tbl(indb,'data')
fs <- str_split(infile,regex('\\.'))[[1]]
fs[2]<-str_replace(fs[2],'mu','')
fs[4]<-str_replace(fs[4],'opt','')
mu <- as.numeric(paste(fs[2],fs[3],sep='.'))
opt <- as.numeric(paste(fs[4],fs[5],sep='.'))
raw_data = collect(indb_table)
power_query = raw_data %>% group_by(generation,locus,window) %>%
summarise(tajd05 = sum(tajd <= critical_vals$tajd_lower05),
tajd01 = sum(tajd <= critical_vals$tajd_lower01),
tajd001 = sum(tajd <= critical_vals$tajd_lower001),
hprime05 = sum(hprime <= critical_vals$hprime_lower05),
hprime01 = sum(hprime <= critical_vals$hprime_lower01),
hprime001 = sum(hprime <= critical_vals$hprime_lower001),
max_abs_nSL05 = sum(max_abx_nSL <= critical_vals$max_abs_nSL_lower05),
max_abs_nSL01 = sum(max_abx_nSL <= critical_vals$max_abs_nSL_lower01),
max_abs_nSL001 = sum(max_abx_nSL <= critical_vals$max_abs_nSL_lower001)) %>%
mutate(mu=mu,opt=opt)
power_results = collect(power_query)
data = bind_rows(data,power_results)
}
of = gzfile("nsig_per_window.txt.gz","w")
write_delim(data,of)
close(of)
|
setwd("/Users/mgonzalez/Research/01_PhD/01_ChIPseq/05_LoessNormalization")
library(IDPmisc)
library(affy)
library(MASS)
library(ggplot2)
library(grid)
library(viridis)
# Load data
c01<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_mESC_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_mESC.bed")
c02<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_MES_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_MES.bed")
c03<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_CP_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_CP.bed")
c04<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_CM_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_CM.bed")
c05<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_NPC_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_NPC.bed")
c06<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_CN_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_CN.bed")
c07<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_mESC_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_mESC.bed")
c08<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_MES_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_MES.bed")
c09<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_CP_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_CP.bed")
c10<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_CM_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_CM.bed")
c11<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_NPC_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_NPC.bed")
c12<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_CN_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_CN.bed")
c13<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_mESC_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_mESC.bed")
c14<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_MES_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_MES.bed")
c15<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CP_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CP.bed")
c16<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CM_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CM.bed")
c17<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_NPC_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_NPC.bed")
c18<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CN_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CN.bed")
# Obtain data frame for whole genome
H3K27me3_ESC<-c01[,5]
H3K27me3_MES<-c02[,5]
H3K27me3_CP<-c03[,5]
H3K27me3_CM<-c04[,5]
H3K27me3_NPC<-c05[,5]
H3K27me3_CN<-c06[,5]
regions<-paste(c01[,1],c01[,2],c01[,3])
H3K27me3<-data.frame(H3K27me3_ESC,H3K27me3_MES,H3K27me3_CP,H3K27me3_CM,H3K27me3_NPC,H3K27me3_CN)
row.names(H3K27me3)<-regions
H3K27me3<-H3K27me3[H3K27me3_ESC!=0 & H3K27me3_MES!=0 & H3K27me3_CP!=0 & H3K27me3_CM!=0 & H3K27me3_NPC!=0 & H3K27me3_CN!=0,]
H3K27me3<-NaRV.omit(H3K27me3)
m1<-as.matrix(H3K27me3)
m1<-NaRV.omit(m1)
row.m1<-nrow(m1)
# Obtain data frame for promoters
H3K27me3_ESC<-c07[,5]
H3K27me3_MES<-c08[,5]
H3K27me3_CP<-c09[,5]
H3K27me3_CM<-c10[,5]
H3K27me3_NPC<-c11[,5]
H3K27me3_CN<-c12[,5]
regions<-paste(c12[,1],c12[,2],c12[,3])
H3K27me3<-data.frame(H3K27me3_ESC,H3K27me3_MES,H3K27me3_CP,H3K27me3_CM,H3K27me3_NPC,H3K27me3_CN)
row.names(H3K27me3)<-regions
H3K27me3<-H3K27me3[H3K27me3_ESC!=0 & H3K27me3_MES!=0 & H3K27me3_CP!=0 & H3K27me3_CM!=0 & H3K27me3_NPC!=0 & H3K27me3_CN!=0,]
H3K27me3<-NaRV.omit(H3K27me3)
m2<-as.matrix(H3K27me3)
m2<-NaRV.omit(m2)
row.m2<-nrow(m2)
# Obtain data frame for enhancers
H3K27me3_ESC<-c13[,5]
H3K27me3_MES<-c14[,5]
H3K27me3_CP<-c15[,5]
H3K27me3_CM<-c16[,5]
H3K27me3_NPC<-c17[,5]
H3K27me3_CN<-c18[,5]
regions<-paste(c13[,1],c13[,2],c13[,3])
H3K27me3<-data.frame(H3K27me3_ESC,H3K27me3_MES,H3K27me3_CP,H3K27me3_CM,H3K27me3_NPC,H3K27me3_CN)
row.names(H3K27me3)<-regions
H3K27me3<-H3K27me3[H3K27me3_ESC!=0 & H3K27me3_MES!=0 & H3K27me3_CP!=0 & H3K27me3_CM!=0 & H3K27me3_NPC!=0 & H3K27me3_CN!=0,]
H3K27me3<-NaRV.omit(H3K27me3)
m3<-as.matrix(H3K27me3)
m3<-NaRV.omit(m3)
row.m3<-nrow(m3)
# Normalization
m<-rbind(m1,m2,m3)
row.m<-nrow(m)
s <- seq(1,row.m1)
#s <- row.names(m1)
set.seed(123)
mn <- normalize.loess(m,subset=s)
mn1<-mn[s,]
p<-seq(row.m1+1,row.m1+row.m2)
mn2<-mn[p,]
e<-seq(row.m1+row.m2+1,row.m)
mn3<-mn[e,]
write.table(mn1,"NormFiles/normH3K27me3_chr19_bins.txt")
write.table(mn2,"NormFiles/normH3K27me3_chr19_BP.txt")
write.table(mn3,"NormFiles/normH3K27me3_chr19_PE.txt")
# MA plot before norm ggplot
pdf("plots/MAplot_H3K27me3_chr19_HiC_FDR0_P100_beforeNorm-ggplot.pdf",width = 15, height = 10)
M<-log2(c((m1[,2]+0.1)/(m1[,1]+0.1),(m1[,3]+0.1)/(m1[,1]+0.1),(m1[,4]+0.1)/(m1[,1]+0.1),(m1[,5]+0.1)/(m1[,1]+0.1),(m1[,6]+0.1)/(m1[,1]+0.1)))
A<-log2(c((m1[,2]+0.1)*(m1[,1]+0.1)/2,(m1[,3]+0.1)*(m1[,1]+0.1)/2,(m1[,4]+0.1)*(m1[,1]+0.1)/2,(m1[,5]+0.1)*(m1[,1]+0.1)/2,(m1[,6]+0.1)*(m1[,1]+0.1)/2))
Cell<-c(rep("MES",nrow(m1)),rep("CP",nrow(m1)),rep("CM",nrow(m1)),rep("NPC",nrow(m1)),rep("CN",nrow(m1)))
c<-data.frame(M,A,Cell)
c$Cell = factor(c$Cell, levels = c("MES","CP","CM","NPC","CN"))
ggplot(c) +
geom_hex(aes(A, M), bins = 30) +
scale_fill_gradientn("", colours = rev(viridis(300)))+
geom_smooth(aes(A, M),method = "loess", level=0.5)+
geom_hline(yintercept = 0,linetype="dashed")+
labs(title="H3K27me3 MA plot before normalization",x="A", y = "M") +
theme_bw() +
theme(legend.position="right",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Cell, scales="free")
dev.off()
# MA plot after norm ggplot
pdf("plots/MAplot_H3K27me3_chr19_HiC_FDR0_P100_afterNorm-ggplot.pdf",width = 15, height = 10)
M<-round(log2(c((mn1[,2]+0.1)/(mn1[,1]+0.1),(mn1[,3]+0.1)/(mn1[,1]+0.1),(mn1[,4]+0.1)/(mn1[,1]+0.1),(mn1[,5]+0.1)/(mn1[,1]+0.1),(mn1[,6]+0.1)/(mn1[,1]+0.1))),2)
A<-round(log2(c((mn1[,2]+0.1)*(mn1[,1]+0.1)/2,(mn1[,3]+0.1)*(mn1[,1]+0.1)/2,(mn1[,4]+0.1)*(mn1[,1]+0.1)/2,(mn1[,5]+0.1)*(mn1[,1]+0.1)/2,(mn1[,6]+0.1)*(mn1[,1]+0.1)/2)),2)
Cell<-c(rep("MES",nrow(mn1)),rep("CP",nrow(mn1)),rep("CM",nrow(mn1)),rep("NPC",nrow(mn1)),rep("CN",nrow(mn1)))
c<-data.frame(M,A,Cell)
c$Cell = factor(c$Cell, levels = c("MES","CP","CM","NPC","CN"))
ggplot(c) +
geom_hex(aes(A, M), bins = 30) +
scale_fill_gradientn("", colours = rev(viridis(300)))+
geom_smooth(aes(A, M),method = "loess", level=0.5)+
geom_hline(yintercept = 0,linetype="dashed")+
labs(title="H3K27me3 MA plot after normalization",x="A", y = "M") +
theme_bw() +
theme(legend.position="right",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Cell, scales="free")
dev.off()
# Boxplots
# Bins
Levels<-c(log2(m1[,1]),log2(m1[,2]),log2(m1[,3]),log2(m1[,4]),log2(m1[,5]),log2(m1[,6]),
log2(mn1[,1]),log2(mn1[,2]),log2(mn1[,3]),log2(mn1[,4]),log2(mn1[,5]),log2(mn1[,6]))
Cell<-c(rep("ESC",nrow(m1)),rep("MES",nrow(m1)),rep("CP",nrow(m1)),rep("CM",nrow(m1)),rep("NPC",nrow(m1)),rep("CN",nrow(m1)),
rep("ESC",nrow(mn1)),rep("MES",nrow(mn1)),rep("CP",nrow(mn1)),rep("CM",nrow(mn1)),rep("NPC",nrow(mn1)),rep("CN",nrow(mn1)))
Norm<-c(rep("Before normalization",nrow(m1)*6),rep("After normalization",nrow(mn1)*6))
c<-data.frame(Levels,Cell,Norm)
c$Cell = factor(c$Cell, levels = c("ESC","MES","CP","CM","NPC","CN"))
c$Norm = factor(c$Norm, levels = c("Before normalization","After normalization"))
pdf("plots/Histones_normH3K27me3_chr19_bins.pdf",width = 12, height = 10)
ggplot(c, aes(x=Cell, y=Levels,fill=Cell)) +
#geom_violin(position=position_dodge(1),lwd=1) +
scale_fill_manual(values=plasma(n=6)) +
geom_boxplot(width=0.4,position=position_dodge(1),outlier.size=-1,show.legend = FALSE,lwd=1,colour="black") +
labs(title="Histone mark levels before and after normalization (bins)",x="", y = "log2(Histone mark + 0.1)") +
theme(legend.position="bottom",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Norm, scales="free")
dev.off()
# Promoters
Levels<-c(log2(m2[,1]),log2(m2[,2]),log2(m2[,3]),log2(m2[,4]),log2(m2[,5]),log2(m2[,6]),
log2(mn2[,1]),log2(mn2[,2]),log2(mn2[,3]),log2(mn2[,4]),log2(mn2[,5]),log2(mn2[,6]))
Cell<-c(rep("ESC",nrow(m2)),rep("MES",nrow(m2)),rep("CP",nrow(m2)),rep("CM",nrow(m2)),rep("NPC",nrow(m2)),rep("CN",nrow(m2)),
rep("ESC",nrow(mn2)),rep("MES",nrow(mn2)),rep("CP",nrow(mn2)),rep("CM",nrow(mn2)),rep("NPC",nrow(mn2)),rep("CN",nrow(mn2)))
Norm<-c(rep("Before normalization",nrow(m2)*6),rep("After normalization",nrow(mn2)*6))
c<-data.frame(Levels,Cell,Norm)
c$Cell = factor(c$Cell, levels = c("ESC","MES","CP","CM","NPC","CN"))
c$Norm = factor(c$Norm, levels = c("Before normalization","After normalization"))
pdf("plots/Histones_normH3K27me3_chr19_BP.pdf",width = 12, height = 10)
ggplot(c, aes(x=Cell, y=Levels,fill=Cell)) +
#geom_violin(position=position_dodge(1),lwd=1) +
scale_fill_manual(values=plasma(n=6)) +
geom_boxplot(width=0.4,position=position_dodge(1),outlier.size=-1,show.legend = FALSE,lwd=1,colour="black") +
labs(title="Histone mark levels before and after normalization (BP)",x="", y = "log2(Histone mark + 0.1)") +
theme(legend.position="bottom",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Norm, scales="free")
dev.off()
# Enhancers
Levels<-c(log2(m3[,1]),log2(m3[,2]),log2(m3[,3]),log2(m3[,4]),log2(m3[,5]),log2(m3[,6]),
log2(mn3[,1]),log2(mn3[,2]),log2(mn3[,3]),log2(mn3[,4]),log2(mn3[,5]),log2(mn3[,6]))
Cell<-c(rep("ESC",nrow(m3)),rep("MES",nrow(m3)),rep("CP",nrow(m3)),rep("CM",nrow(m3)),rep("NPC",nrow(m3)),rep("CN",nrow(m3)),
rep("ESC",nrow(mn3)),rep("MES",nrow(mn3)),rep("CP",nrow(mn3)),rep("CM",nrow(mn3)),rep("NPC",nrow(mn3)),rep("CN",nrow(mn3)))
Norm<-c(rep("Before normalization",nrow(m3)*6),rep("After normalization",nrow(mn3)*6))
c<-data.frame(Levels,Cell,Norm)
c$Cell = factor(c$Cell, levels = c("ESC","MES","CP","CM","NPC","CN"))
c$Norm = factor(c$Norm, levels = c("Before normalization","After normalization"))
pdf("plots/Histones_normH3K27me3_chr19_PE.pdf",width = 12, height = 10)
ggplot(c, aes(x=Cell, y=Levels,fill=Cell)) +
#geom_violin(position=position_dodge(1),lwd=1) +
scale_fill_manual(values=plasma(n=6)) +
geom_boxplot(width=0.4,position=position_dodge(1),outlier.size=-1,show.legend = FALSE,lwd=1,colour="black") +
labs(title="Histone mark levels before and after normalization (PE)",x="", y = "log2(Histone mark + 0.1)") +
theme(legend.position="bottom",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Norm, scales="free")
dev.off()
| /H3K27me3_LoessNormalization_example.R | no_license | margonram/Scripts | R | false | false | 12,066 | r | setwd("/Users/mgonzalez/Research/01_PhD/01_ChIPseq/05_LoessNormalization")
library(IDPmisc)
library(affy)
library(MASS)
library(ggplot2)
library(grid)
library(viridis)
# Load data
c01<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_mESC_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_mESC.bed")
c02<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_MES_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_MES.bed")
c03<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_CP_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_CP.bed")
c04<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_CM_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_CM.bed")
c05<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_NPC_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_NPC.bed")
c06<-read.table("03_ChIPlevelsWholeGenome/mm10_2000_bin_H3K27me3_chr19_CN_recoverChIPlevels/PEAKsignal_mm10_2000_bin_H3K27me3_chr19_CN.bed")
c07<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_mESC_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_mESC.bed")
c08<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_MES_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_MES.bed")
c09<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_CP_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_CP.bed")
c10<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_CM_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_CM.bed")
c11<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_NPC_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_NPC.bed")
c12<-read.table("04_ChIPlevelsRegions/mESC_9_BP_clean_H3K27me3_chr19_CN_recoverChIPlevels/PEAKsignal_mESC_9_BP_clean_H3K27me3_chr19_CN.bed")
c13<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_mESC_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_mESC.bed")
c14<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_MES_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_MES.bed")
c15<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CP_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CP.bed")
c16<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CM_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CM.bed")
c17<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_NPC_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_NPC.bed")
c18<-read.table("04_ChIPlevelsRegions/mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CN_recoverChIPlevels/PEAKsignal_mESC_9_PE_clean_HiC_FDR0_P100_H3K27me3_chr19_CN.bed")
# Obtain data frame for whole genome
H3K27me3_ESC<-c01[,5]
H3K27me3_MES<-c02[,5]
H3K27me3_CP<-c03[,5]
H3K27me3_CM<-c04[,5]
H3K27me3_NPC<-c05[,5]
H3K27me3_CN<-c06[,5]
regions<-paste(c01[,1],c01[,2],c01[,3])
H3K27me3<-data.frame(H3K27me3_ESC,H3K27me3_MES,H3K27me3_CP,H3K27me3_CM,H3K27me3_NPC,H3K27me3_CN)
row.names(H3K27me3)<-regions
H3K27me3<-H3K27me3[H3K27me3_ESC!=0 & H3K27me3_MES!=0 & H3K27me3_CP!=0 & H3K27me3_CM!=0 & H3K27me3_NPC!=0 & H3K27me3_CN!=0,]
H3K27me3<-NaRV.omit(H3K27me3)
m1<-as.matrix(H3K27me3)
m1<-NaRV.omit(m1)
row.m1<-nrow(m1)
# Obtain data frame for promoters
H3K27me3_ESC<-c07[,5]
H3K27me3_MES<-c08[,5]
H3K27me3_CP<-c09[,5]
H3K27me3_CM<-c10[,5]
H3K27me3_NPC<-c11[,5]
H3K27me3_CN<-c12[,5]
regions<-paste(c12[,1],c12[,2],c12[,3])
H3K27me3<-data.frame(H3K27me3_ESC,H3K27me3_MES,H3K27me3_CP,H3K27me3_CM,H3K27me3_NPC,H3K27me3_CN)
row.names(H3K27me3)<-regions
H3K27me3<-H3K27me3[H3K27me3_ESC!=0 & H3K27me3_MES!=0 & H3K27me3_CP!=0 & H3K27me3_CM!=0 & H3K27me3_NPC!=0 & H3K27me3_CN!=0,]
H3K27me3<-NaRV.omit(H3K27me3)
m2<-as.matrix(H3K27me3)
m2<-NaRV.omit(m2)
row.m2<-nrow(m2)
# Obtain data frame for enhancers
H3K27me3_ESC<-c13[,5]
H3K27me3_MES<-c14[,5]
H3K27me3_CP<-c15[,5]
H3K27me3_CM<-c16[,5]
H3K27me3_NPC<-c17[,5]
H3K27me3_CN<-c18[,5]
regions<-paste(c13[,1],c13[,2],c13[,3])
H3K27me3<-data.frame(H3K27me3_ESC,H3K27me3_MES,H3K27me3_CP,H3K27me3_CM,H3K27me3_NPC,H3K27me3_CN)
row.names(H3K27me3)<-regions
H3K27me3<-H3K27me3[H3K27me3_ESC!=0 & H3K27me3_MES!=0 & H3K27me3_CP!=0 & H3K27me3_CM!=0 & H3K27me3_NPC!=0 & H3K27me3_CN!=0,]
H3K27me3<-NaRV.omit(H3K27me3)
m3<-as.matrix(H3K27me3)
m3<-NaRV.omit(m3)
row.m3<-nrow(m3)
# Normalization
m<-rbind(m1,m2,m3)
row.m<-nrow(m)
s <- seq(1,row.m1)
#s <- row.names(m1)
set.seed(123)
mn <- normalize.loess(m,subset=s)
mn1<-mn[s,]
p<-seq(row.m1+1,row.m1+row.m2)
mn2<-mn[p,]
e<-seq(row.m1+row.m2+1,row.m)
mn3<-mn[e,]
write.table(mn1,"NormFiles/normH3K27me3_chr19_bins.txt")
write.table(mn2,"NormFiles/normH3K27me3_chr19_BP.txt")
write.table(mn3,"NormFiles/normH3K27me3_chr19_PE.txt")
# MA plot before norm ggplot
pdf("plots/MAplot_H3K27me3_chr19_HiC_FDR0_P100_beforeNorm-ggplot.pdf",width = 15, height = 10)
M<-log2(c((m1[,2]+0.1)/(m1[,1]+0.1),(m1[,3]+0.1)/(m1[,1]+0.1),(m1[,4]+0.1)/(m1[,1]+0.1),(m1[,5]+0.1)/(m1[,1]+0.1),(m1[,6]+0.1)/(m1[,1]+0.1)))
A<-log2(c((m1[,2]+0.1)*(m1[,1]+0.1)/2,(m1[,3]+0.1)*(m1[,1]+0.1)/2,(m1[,4]+0.1)*(m1[,1]+0.1)/2,(m1[,5]+0.1)*(m1[,1]+0.1)/2,(m1[,6]+0.1)*(m1[,1]+0.1)/2))
Cell<-c(rep("MES",nrow(m1)),rep("CP",nrow(m1)),rep("CM",nrow(m1)),rep("NPC",nrow(m1)),rep("CN",nrow(m1)))
c<-data.frame(M,A,Cell)
c$Cell = factor(c$Cell, levels = c("MES","CP","CM","NPC","CN"))
ggplot(c) +
geom_hex(aes(A, M), bins = 30) +
scale_fill_gradientn("", colours = rev(viridis(300)))+
geom_smooth(aes(A, M),method = "loess", level=0.5)+
geom_hline(yintercept = 0,linetype="dashed")+
labs(title="H3K27me3 MA plot before normalization",x="A", y = "M") +
theme_bw() +
theme(legend.position="right",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Cell, scales="free")
dev.off()
# MA plot after norm ggplot
pdf("plots/MAplot_H3K27me3_chr19_HiC_FDR0_P100_afterNorm-ggplot.pdf",width = 15, height = 10)
M<-round(log2(c((mn1[,2]+0.1)/(mn1[,1]+0.1),(mn1[,3]+0.1)/(mn1[,1]+0.1),(mn1[,4]+0.1)/(mn1[,1]+0.1),(mn1[,5]+0.1)/(mn1[,1]+0.1),(mn1[,6]+0.1)/(mn1[,1]+0.1))),2)
A<-round(log2(c((mn1[,2]+0.1)*(mn1[,1]+0.1)/2,(mn1[,3]+0.1)*(mn1[,1]+0.1)/2,(mn1[,4]+0.1)*(mn1[,1]+0.1)/2,(mn1[,5]+0.1)*(mn1[,1]+0.1)/2,(mn1[,6]+0.1)*(mn1[,1]+0.1)/2)),2)
Cell<-c(rep("MES",nrow(mn1)),rep("CP",nrow(mn1)),rep("CM",nrow(mn1)),rep("NPC",nrow(mn1)),rep("CN",nrow(mn1)))
c<-data.frame(M,A,Cell)
c$Cell = factor(c$Cell, levels = c("MES","CP","CM","NPC","CN"))
ggplot(c) +
geom_hex(aes(A, M), bins = 30) +
scale_fill_gradientn("", colours = rev(viridis(300)))+
geom_smooth(aes(A, M),method = "loess", level=0.5)+
geom_hline(yintercept = 0,linetype="dashed")+
labs(title="H3K27me3 MA plot after normalization",x="A", y = "M") +
theme_bw() +
theme(legend.position="right",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Cell, scales="free")
dev.off()
# Boxplots
# Bins
Levels<-c(log2(m1[,1]),log2(m1[,2]),log2(m1[,3]),log2(m1[,4]),log2(m1[,5]),log2(m1[,6]),
log2(mn1[,1]),log2(mn1[,2]),log2(mn1[,3]),log2(mn1[,4]),log2(mn1[,5]),log2(mn1[,6]))
Cell<-c(rep("ESC",nrow(m1)),rep("MES",nrow(m1)),rep("CP",nrow(m1)),rep("CM",nrow(m1)),rep("NPC",nrow(m1)),rep("CN",nrow(m1)),
rep("ESC",nrow(mn1)),rep("MES",nrow(mn1)),rep("CP",nrow(mn1)),rep("CM",nrow(mn1)),rep("NPC",nrow(mn1)),rep("CN",nrow(mn1)))
Norm<-c(rep("Before normalization",nrow(m1)*6),rep("After normalization",nrow(mn1)*6))
c<-data.frame(Levels,Cell,Norm)
c$Cell = factor(c$Cell, levels = c("ESC","MES","CP","CM","NPC","CN"))
c$Norm = factor(c$Norm, levels = c("Before normalization","After normalization"))
pdf("plots/Histones_normH3K27me3_chr19_bins.pdf",width = 12, height = 10)
ggplot(c, aes(x=Cell, y=Levels,fill=Cell)) +
#geom_violin(position=position_dodge(1),lwd=1) +
scale_fill_manual(values=plasma(n=6)) +
geom_boxplot(width=0.4,position=position_dodge(1),outlier.size=-1,show.legend = FALSE,lwd=1,colour="black") +
labs(title="Histone mark levels before and after normalization (bins)",x="", y = "log2(Histone mark + 0.1)") +
theme(legend.position="bottom",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Norm, scales="free")
dev.off()
# Promoters
Levels<-c(log2(m2[,1]),log2(m2[,2]),log2(m2[,3]),log2(m2[,4]),log2(m2[,5]),log2(m2[,6]),
log2(mn2[,1]),log2(mn2[,2]),log2(mn2[,3]),log2(mn2[,4]),log2(mn2[,5]),log2(mn2[,6]))
Cell<-c(rep("ESC",nrow(m2)),rep("MES",nrow(m2)),rep("CP",nrow(m2)),rep("CM",nrow(m2)),rep("NPC",nrow(m2)),rep("CN",nrow(m2)),
rep("ESC",nrow(mn2)),rep("MES",nrow(mn2)),rep("CP",nrow(mn2)),rep("CM",nrow(mn2)),rep("NPC",nrow(mn2)),rep("CN",nrow(mn2)))
Norm<-c(rep("Before normalization",nrow(m2)*6),rep("After normalization",nrow(mn2)*6))
c<-data.frame(Levels,Cell,Norm)
c$Cell = factor(c$Cell, levels = c("ESC","MES","CP","CM","NPC","CN"))
c$Norm = factor(c$Norm, levels = c("Before normalization","After normalization"))
pdf("plots/Histones_normH3K27me3_chr19_BP.pdf",width = 12, height = 10)
ggplot(c, aes(x=Cell, y=Levels,fill=Cell)) +
#geom_violin(position=position_dodge(1),lwd=1) +
scale_fill_manual(values=plasma(n=6)) +
geom_boxplot(width=0.4,position=position_dodge(1),outlier.size=-1,show.legend = FALSE,lwd=1,colour="black") +
labs(title="Histone mark levels before and after normalization (BP)",x="", y = "log2(Histone mark + 0.1)") +
theme(legend.position="bottom",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Norm, scales="free")
dev.off()
# Enhancers
Levels<-c(log2(m3[,1]),log2(m3[,2]),log2(m3[,3]),log2(m3[,4]),log2(m3[,5]),log2(m3[,6]),
log2(mn3[,1]),log2(mn3[,2]),log2(mn3[,3]),log2(mn3[,4]),log2(mn3[,5]),log2(mn3[,6]))
Cell<-c(rep("ESC",nrow(m3)),rep("MES",nrow(m3)),rep("CP",nrow(m3)),rep("CM",nrow(m3)),rep("NPC",nrow(m3)),rep("CN",nrow(m3)),
rep("ESC",nrow(mn3)),rep("MES",nrow(mn3)),rep("CP",nrow(mn3)),rep("CM",nrow(mn3)),rep("NPC",nrow(mn3)),rep("CN",nrow(mn3)))
Norm<-c(rep("Before normalization",nrow(m3)*6),rep("After normalization",nrow(mn3)*6))
c<-data.frame(Levels,Cell,Norm)
c$Cell = factor(c$Cell, levels = c("ESC","MES","CP","CM","NPC","CN"))
c$Norm = factor(c$Norm, levels = c("Before normalization","After normalization"))
pdf("plots/Histones_normH3K27me3_chr19_PE.pdf",width = 12, height = 10)
ggplot(c, aes(x=Cell, y=Levels,fill=Cell)) +
#geom_violin(position=position_dodge(1),lwd=1) +
scale_fill_manual(values=plasma(n=6)) +
geom_boxplot(width=0.4,position=position_dodge(1),outlier.size=-1,show.legend = FALSE,lwd=1,colour="black") +
labs(title="Histone mark levels before and after normalization (PE)",x="", y = "log2(Histone mark + 0.1)") +
theme(legend.position="bottom",axis.text=element_text(size=20,face="bold"),axis.title=element_text(size=20,face="bold"),
legend.text=element_text(size=20),legend.title=element_text(size=20),plot.title = element_text(size=24,face="bold"),
strip.text = element_text(size=20,face="bold")) +
facet_wrap(~ Norm, scales="free")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/append_units.R
\name{append_units.default}
\alias{append_units.default}
\title{Append Units By Default}
\usage{
\method{append_units}{default}(
x,
...,
open = getOption("append_units_open", " ("),
close = getOption("append_units_close", ")"),
style = getOption("append_units_style", "plain")
)
}
\arguments{
\item{x}{object}
\item{...}{passed to \code{\link{as_latex}}, \code{\link{as_plotmath}}}
\item{open}{character to precede units}
\item{close}{character to follow units}
\item{style}{one of 'plain', 'latex', or 'plotmath'}
}
\value{
same class as x, with sub-class 'latex' or 'plotmath' depending on \code{style}
}
\description{
Units attribute is wrapped in \code{open} and
\code{close}, and appended to label.
If style is 'latex' or 'plotmath',
all elements are treated as spork
(\code{\link{as_spork}}) and coerced
to canonical form before concatenation.
}
\examples{
library(units)
library(magrittr)
x <- 1:10
attr(x, 'label') <- 'acceleration'
units(x) <- 'm/s^2'
y <- as_units('kg')
x \%>\% attr('label')
x \%>\% append_units \%>\% attr('label')
y \%>\% attr('label')
y \%>\% append_units \%>\% attr('label')
x \%>\% append_units(style = 'plain')
x \%>\% append_units(style = 'plotmath')
x \%>\% append_units(style = 'latex')
}
\seealso{
Other labels:
\code{\link{alias.data.frame}()},
\code{\link{append_units.data.frame}()},
\code{\link{append_units}()},
\code{\link{sub_units}()}
}
\concept{labels}
\keyword{internal}
| /man/append_units.default.Rd | no_license | jimsforks/yamlet | R | false | true | 1,529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/append_units.R
\name{append_units.default}
\alias{append_units.default}
\title{Append Units By Default}
\usage{
\method{append_units}{default}(
x,
...,
open = getOption("append_units_open", " ("),
close = getOption("append_units_close", ")"),
style = getOption("append_units_style", "plain")
)
}
\arguments{
\item{x}{object}
\item{...}{passed to \code{\link{as_latex}}, \code{\link{as_plotmath}}}
\item{open}{character to precede units}
\item{close}{character to follow units}
\item{style}{one of 'plain', 'latex', or 'plotmath'}
}
\value{
same class as x, with sub-class 'latex' or 'plotmath' depending on \code{style}
}
\description{
Units attribute is wrapped in \code{open} and
\code{close}, and appended to label.
If style is 'latex' or 'plotmath',
all elements are treated as spork
(\code{\link{as_spork}}) and coerced
to canonical form before concatenation.
}
\examples{
library(units)
library(magrittr)
x <- 1:10
attr(x, 'label') <- 'acceleration'
units(x) <- 'm/s^2'
y <- as_units('kg')
x \%>\% attr('label')
x \%>\% append_units \%>\% attr('label')
y \%>\% attr('label')
y \%>\% append_units \%>\% attr('label')
x \%>\% append_units(style = 'plain')
x \%>\% append_units(style = 'plotmath')
x \%>\% append_units(style = 'latex')
}
\seealso{
Other labels:
\code{\link{alias.data.frame}()},
\code{\link{append_units.data.frame}()},
\code{\link{append_units}()},
\code{\link{sub_units}()}
}
\concept{labels}
\keyword{internal}
|
## dplyr methods:
#group_map.sf <- function(.tbl, .f, ...) {
# st_as_sf(NextMethod()) # nocov
#}
# This is currently only used in `bind_rows()` and `bind_cols()`
# because sf overrides all default implementations
dplyr_reconstruct.sf = function(data, template) {
sfc_name = attr(template, "sf_column")
if (inherits(template, "tbl_df"))
data = dplyr::as_tibble(data)
# Return a bare data frame is the geometry column is no longer there
if (!sfc_name %in% names(data))
return(data)
prec = st_precision(template)
crs = st_crs(template)
st_as_sf(
data,
sf_column_name = sfc_name,
crs = crs,
precision = prec
)
}
group_split.sf <- function(.tbl, ..., .keep = TRUE) {
class(.tbl) = setdiff(class(.tbl), "sf")
lapply(dplyr::group_split(.tbl, ..., .keep = .keep), st_as_sf)
}
#' Tidyverse methods for sf objects (remove .sf suffix!)
#'
#' Tidyverse methods for sf objects. Geometries are sticky, use \link{as.data.frame} to let \code{dplyr}'s own methods drop them. Use these methods without the .sf suffix and after loading the tidyverse package with the generic (or after loading package tidyverse).
#' @param .data data object of class \link{sf}
#' @param .dots see corresponding function in package \code{dplyr}
#' @param ... other arguments
#' @name tidyverse
#' @examples
#' library(dplyr)
#' nc = st_read(system.file("shape/nc.shp", package="sf"))
#' nc %>% filter(AREA > .1) %>% plot()
filter.sf <- function(.data, ..., .dots) {
agr = st_agr(.data)
class(.data) <- setdiff(class(.data), "sf")
.re_sf(NextMethod(), sf_column_name = attr(.data, "sf_column"), agr)
}
#' @name tidyverse
#' @examples
#' # plot 10 smallest counties in grey:
#' st_geometry(nc) %>% plot()
#' nc %>% select(AREA) %>% arrange(AREA) %>% slice(1:10) %>% plot(add = TRUE, col = 'grey')
#' title("the ten counties with smallest area")
arrange.sf <- function(.data, ..., .dots) {
sf_column_name = attr(.data, "sf_column")
class(.data) = setdiff(class(.data), "sf")
st_as_sf(NextMethod(), sf_column_name = sf_column_name)
}
#' @name tidyverse
#' @param add see corresponding function in dplyr
#' @examples
#' nc$area_cl = cut(nc$AREA, c(0, .1, .12, .15, .25))
#' nc %>% group_by(area_cl) %>% class()
group_by.sf <- function(.data, ..., add = FALSE) {
sf_column_name = attr(.data, "sf_column")
class(.data) <- setdiff(class(.data), "sf")
st_as_sf(NextMethod(), sf_column_name = sf_column_name)
}
#' @name tidyverse
ungroup.sf <- function(x, ...) {
sf_column_name = attr(x, "sf_column")
class(x) <- setdiff(class(x), "sf")
st_as_sf(NextMethod(), sf_column_name = sf_column_name)
}
.re_sf = function(x, sf_column_name, agr, geom = NULL) {
stopifnot(!inherits(x, "sf"), !missing(sf_column_name), !missing(agr))
# non-geom attribute names
att = names(x)[!sapply(x, inherits, what = "sfc")]
agr = setNames(agr[att], att) # NA's new columns
if (!is.null(geom)) {
stopifnot(length(geom) == nrow(x))
x[[ sf_column_name ]] = geom
}
structure(x,
sf_column = sf_column_name,
agr = agr,
class = c("sf", class(x)))
}
#' @name tidyverse
#' @examples
#' nc2 <- nc %>% mutate(area10 = AREA/10)
mutate.sf <- function(.data, ..., .dots) {
#st_as_sf(NextMethod(), sf_column_name = attr(.data, "sf_column"))
agr = st_agr(.data)
sf_column_name = attr(.data, "sf_column")
class(.data) <- setdiff(class(.data), "sf")
.re_sf(NextMethod(), sf_column_name = sf_column_name, agr)
}
#' @name tidyverse
#' @examples
#' nc %>% transmute(AREA = AREA/10, geometry = geometry) %>% class()
#' nc %>% transmute(AREA = AREA/10) %>% class()
transmute.sf <- function(.data, ..., .dots) {
sf_column_name = attr(.data, "sf_column")
agr = st_agr(.data)
geom = st_geometry(.data)
class(.data) = setdiff(class(.data), "sf")
.re_sf(NextMethod(), sf_column_name = sf_column_name, agr, geom)
}
#' @name tidyverse
#' @examples
#' nc %>% select(SID74, SID79) %>% names()
#' nc %>% select(SID74, SID79, geometry) %>% names()
#' nc %>% select(SID74, SID79) %>% class()
#' nc %>% select(SID74, SID79, geometry) %>% class()
#' @details \code{select} keeps the geometry regardless whether it is selected or not; to deselect it, first pipe through \code{as.data.frame} to let dplyr's own \code{select} drop it.
select.sf <- function(.data, ...) {
if (!requireNamespace("tidyselect", quietly = TRUE))
stop("tidyselect required: install that first") # nocov
loc = tidyselect::eval_select(quote(c(...)), .data)
sf_column = attr(.data, "sf_column")
sf_column_loc = match(sf_column, names(.data))
if (length(sf_column_loc) != 1 || is.na(sf_column_loc))
stop("internal error: can't find sf column") # nocov
agr = st_agr(.data)
vars = names(.data)[setdiff(loc, sf_column_loc)]
new_agr = agr[vars]
sf_column_loc_loc = match(sf_column_loc, loc)
if (is.na(sf_column_loc_loc)) {
# The sf column was subsetted out, select it back in
loc = c(loc, sf_column_loc)
names(loc)[[length(loc)]] = sf_column
} else {
# The sf column was not subsetted out but it might have been renamed
sf_column = names(loc[sf_column_loc_loc])
}
ret = .data
class(ret) = setdiff(class(ret), "sf")
ret = ret[loc]
names(ret) = names(loc)
st_set_agr(st_as_sf(ret, sf_column_name = sf_column), new_agr)
}
#' @name tidyverse
#' @examples
#' nc2 <- nc %>% rename(area = AREA)
rename.sf <- function(.data, ...) {
if (!requireNamespace("tidyselect", quietly = TRUE))
stop("tidyselect required: install that first") # nocov
loc = tidyselect::eval_rename(quote(c(...)), .data)
sf_column = attr(.data, "sf_column")
sf_column_loc = match(sf_column, names(.data))
if (length(sf_column_loc) != 1 || is.na(sf_column_loc))
stop("internal error: can't find sf column") # nocov
agr = st_agr(.data)
agr_loc = match(names(agr), setdiff(names(.data), sf_column))
if (anyNA(agr_loc))
stop("internal error: can't find `agr` columns") # nocov
vars_loc = loc[loc %in% agr_loc]
names(agr)[vars_loc] = names(vars_loc)
sf_column_loc_loc = match(sf_column_loc, loc)
if (!is.na(sf_column_loc_loc))
sf_column = names(loc[sf_column_loc_loc])
ret = .data
class(ret) = setdiff(class(ret), "sf")
names(ret)[loc] = names(loc)
st_set_agr(st_as_sf(ret, sf_column_name = sf_column), agr)
}
#' @name tidyverse
#' @examples
#' nc %>% slice(1:2)
slice.sf <- function(.data, ..., .dots) {
class(.data) <- setdiff(class(.data), "sf")
sf_column <- attr(.data, "sf_column")
st_as_sf(NextMethod(), sf_column_name = sf_column)
}
#' @name tidyverse
#' @aliases summarise
#' @param do_union logical; in case \code{summary} does not create a geometry column, should geometries be created by unioning using \link{st_union}, or simply by combining using \link{st_combine}? Using \link{st_union} resolves internal boundaries, but in case of unioning points, this will likely change the order of the points; see Details.
#' @return an object of class \link{sf}
#' @details
#' In case one or more of the arguments (expressions) in the \code{summarise} call creates a geometry list-column, the first of these will be the (active) geometry of the returned object. If this is not the case, a geometry column is created, depending on the value of \code{do_union}.
#'
#' In case \code{do_union} is \code{FALSE}, \code{summarise} will simply combine geometries using \link{c.sfg}. When polygons sharing a boundary are combined, this leads to geometries that are invalid; see for instance \url{https://github.com/r-spatial/sf/issues/681}.
#' @examples
#' nc$area_cl = cut(nc$AREA, c(0, .1, .12, .15, .25))
#' nc.g <- nc %>% group_by(area_cl)
#' nc.g %>% summarise(mean(AREA))
#' nc.g %>% summarise(mean(AREA)) %>% plot(col = grey(3:6 / 7))
#' nc %>% as.data.frame %>% summarise(mean(AREA))
summarise.sf <- function(.data, ..., .dots, do_union = TRUE) {
sf_column = attr(.data, "sf_column")
precision = st_precision(.data)
crs = st_crs(.data)
geom = st_geometry(.data)
class(.data) = setdiff(class(.data), "sf")
ret = NextMethod()
if (!missing(do_union))
ret$do_union = NULL
if (! any(sapply(ret, inherits, what = "sfc"))) {
geom = if (inherits(.data, "grouped_df") || inherits(.data, "grouped_dt")) {
if (!requireNamespace("dplyr", quietly = TRUE))
stop("dplyr required: install that first") # nocov
i = dplyr::group_indices(.data)
# geom = st_geometry(.data)
geom = if (do_union)
lapply(sort(unique(i)), function(x) st_union(geom[i == x]))
else
lapply(sort(unique(i)), function(x) st_combine(geom[i == x]))
geom = unlist(geom, recursive = FALSE)
if (is.null(geom))
geom = list() #676 #nocov
do.call(st_sfc, c(geom, crs = list(crs), precision = precision))
} else { # single group:
if (do_union)
st_union(geom)
else
st_combine(geom)
}
ret[[ sf_column ]] = geom
}
# need to re-sort out the geometry column class now:
st_as_sf(structure(ret, sf_column = NULL))
}
#' @name tidyverse
#' @param .keep_all see corresponding function in dplyr
#' @examples
#' nc[c(1:100, 1:10), ] %>% distinct() %>% nrow()
#' @details \code{distinct} gives distinct records for which all attributes and geometries are distinct; \link{st_equals} is used to find out which geometries are distinct.
distinct.sf <- function(.data, ..., .keep_all = FALSE) {
sf_column = attr(.data, "sf_column")
geom = st_geometry(.data)
eq = sapply(st_equals(.data), head, n = 1)
empties = which(lengths(eq) == 0)
eq[ empties ] = empties[1] # first empty record
.data[[ sf_column ]] = unlist(eq)
class(.data) = setdiff(class(.data), "sf")
if (!requireNamespace("dplyr", quietly = TRUE))
stop("dplyr required: install that first") # nocov
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
.data = dplyr::distinct(.data, ..., !! rlang::sym(sf_column), .keep_all = .keep_all)
.data[[ sf_column ]] = geom[ .data[[ sf_column ]] ]
st_as_sf(.data)
}
## tidyr methods:
#' @name tidyverse
#' @param data see original function docs
#' @param key see original function docs
#' @param value see original function docs
#' @param na.rm see original function docs
#' @param factor_key see original function docs
#' @examples
#' library(tidyr)
#' nc %>% select(SID74, SID79) %>% gather("VAR", "SID", -geometry) %>% summary()
gather.sf <- function(data, key, value, ..., na.rm = FALSE, convert = FALSE, factor_key = FALSE) {
if (! requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
key = rlang::enquo(key)
value = rlang::enquo(value)
if (!requireNamespace("tidyr", quietly = TRUE))
stop("tidyr required: install first?")
class(data) <- setdiff(class(data), "sf")
st_as_sf(tidyr::gather(data, !!key, !!value, ...,
na.rm = na.rm, convert = convert, factor_key = factor_key),
sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
#' @param fill see original function docs
#' @param drop see original function docs
#' @examples
#' library(tidyr)
#' nc$row = 1:100 # needed for spread to work
#' nc %>% select(SID74, SID79, geometry, row) %>%
#' gather("VAR", "SID", -geometry, -row) %>%
#' spread(VAR, SID) %>% head()
spread.sf <- function(data, key, value, fill = NA, convert = FALSE, drop = TRUE,
sep = NULL) {
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
key = rlang::enquo(key)
value = rlang::enquo(value)
class(data) <- setdiff(class(data), "sf")
st_as_sf(tidyr::spread(data, !!key, !!value, fill = fill, convert = convert,
drop = drop, sep = sep), sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
#' @param tbl see original function docs
#' @param size see original function docs
#' @param replace see original function docs
#' @param weight see original function docs
#' @param .env see original function docs
sample_n.sf <- function(tbl, size, replace = FALSE, weight = NULL, .env = parent.frame()) {
st_sf(NextMethod(), sf_column_name = attr(tbl, "sf_column"))
}
#' @name tidyverse
sample_frac.sf <- function(tbl, size = 1, replace = FALSE, weight = NULL, .env = parent.frame()) {
st_sf(NextMethod(), sf_column_name = attr(tbl, "sf_column"))
}
#' @name tidyverse
#' @examples
#' storms.sf = st_as_sf(storms, coords = c("long", "lat"), crs = 4326)
#' x <- storms.sf %>% group_by(name, year) %>% nest
#' trs = lapply(x$data, function(tr) st_cast(st_combine(tr), "LINESTRING")[[1]]) %>%
#' st_sfc(crs = 4326)
#' trs.sf = st_sf(x[,1:2], trs)
#' plot(trs.sf["year"], axes = TRUE)
#' @details \code{nest} assumes that a simple feature geometry list-column was among the columns that were nested.
nest.sf = function (.data, ...) {
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
if (!requireNamespace("tidyr", quietly = TRUE))
stop("tidyr required: install first?")
class(.data) <- setdiff(class(.data), "sf")
ret = tidyr::nest(.data, ...)
lst = which(sapply(ret, inherits, "list"))[1]
# re-sf:
ret[[lst]] = lapply(ret[[lst]], st_as_sf, sf_column_name = attr(.data, "sf_column"))
ret
}
#' @name tidyverse
#' @param col see \link[tidyr]{separate}
#' @param into see \link[tidyr]{separate}
#' @param remove see \link[tidyr]{separate}
#' @param extra see \link[tidyr]{separate}
separate.sf = function(data, col, into, sep = "[^[:alnum:]]+", remove = TRUE,
convert = FALSE, extra = "warn", fill = "warn", ...) {
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
col = rlang::enquo(col)
if (!requireNamespace("tidyr", quietly = TRUE))
stop("tidyr required: install first?")
class(data) <- setdiff(class(data), "sf")
st_as_sf(tidyr::separate(data, !!col, into = into,
sep = sep, remove = remove, convert = convert, extra = extra, fill = fill, ...),
sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
#' @param sep see \link[tidyr]{separate_rows}
#' @param convert see \link[tidyr]{separate_rows}
separate_rows.sf <- function(data, ..., sep = "[^[:alnum:]]+", convert = FALSE) {
if (!requireNamespace("tidyr", quietly = TRUE))
stop("tidyr required: install first?")
class(data) <- setdiff(class(data), "sf")
ret = tidyr::separate_rows(data, ..., sep = sep, convert = convert)
st_as_sf(ret, sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
unite.sf <- function(data, col, ..., sep = "_", remove = TRUE) {
class(data) <- setdiff(class(data), "sf")
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
col = rlang::enquo(col)
st_as_sf(tidyr::unite(data, !!col, ..., sep = sep, remove = remove),
sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
#' @param .preserve see \link[tidyr:nest]{unnest}
unnest.sf = function(data, ..., .preserve = NULL) {
# nocov start
sf_column_name = attr(data, "sf_column", exact = TRUE)
if (!requireNamespace("tidyr", quietly = TRUE))
stop("unnest requires tidyr; install that first")
class(data) = setdiff(class(data), "sf")
st_sf(NextMethod(), sf_column_name = sf_column_name)
# nocov end
}
## tibble methods:
#' Summarize simple feature type for tibble
#'
#' Summarize simple feature type for tibble
#' @param x object of class sfc
#' @param ... ignored
#' @name tibble
#' @details see \link[pillar]{type_sum}
type_sum.sfc <- function(x, ...) {
cls = substring(class(x)[1], 5)
if (is.na(st_is_longlat(x)))
cls
else
paste0(cls, " [", enc2utf8(as.character(units(st_crs(x, parameters = TRUE)$ud_unit))), "]")
}
#' Summarize simple feature item for tibble
#'
#' Summarize simple feature item for tibble
#' @name tibble
obj_sum.sfc <- function(x) {
vapply(x, function(sfg) format(sfg, width = 15L), "")
}
#' @name tibble
pillar_shaft.sfc <- function(x, ...) {
digits = options("pillar.sigfig")$pillar.sigfig
if (is.null(digits))
digits = options("digits")$digits
out <- format(x, width = 100, digits = digits, ...)
if (!inherits(x, "sfc_GEOMETRY") && !inherits(x, "sfc_GEOMETRYCOLLECTION"))
out <- sub("[A-Z]+ ", "", out)
pillar::new_pillar_shaft_simple(out, align = "right", min_width = 25)
}
#nocov start
register_all_s3_methods = function() {
has_dplyr_1.0 =
requireNamespace("dplyr", quietly = TRUE) &&
utils::packageVersion("dplyr") >= "0.8.99.9000"
if (has_dplyr_1.0)
register_s3_method("dplyr", "dplyr_reconstruct", "sf")
register_s3_method("dplyr", "anti_join", "sf")
register_s3_method("dplyr", "arrange", "sf")
register_s3_method("dplyr", "distinct", "sf")
register_s3_method("dplyr", "filter", "sf")
register_s3_method("dplyr", "full_join", "sf")
register_s3_method("dplyr", "group_by", "sf")
# register_s3_method("dplyr", "group_map", "sf")
register_s3_method("dplyr", "group_split", "sf")
register_s3_method("dplyr", "inner_join", "sf")
register_s3_method("dplyr", "left_join", "sf")
register_s3_method("dplyr", "mutate", "sf")
register_s3_method("dplyr", "rename", "sf")
register_s3_method("dplyr", "right_join", "sf")
register_s3_method("dplyr", "sample_frac", "sf")
register_s3_method("dplyr", "sample_n", "sf")
register_s3_method("dplyr", "select", "sf")
register_s3_method("dplyr", "semi_join", "sf")
register_s3_method("dplyr", "slice", "sf")
register_s3_method("dplyr", "summarise", "sf")
register_s3_method("dplyr", "transmute", "sf")
register_s3_method("dplyr", "ungroup", "sf")
register_s3_method("tidyr", "gather", "sf")
register_s3_method("tidyr", "spread", "sf")
register_s3_method("tidyr", "nest", "sf")
register_s3_method("tidyr", "separate", "sf")
register_s3_method("tidyr", "separate_rows", "sf")
register_s3_method("tidyr", "unite", "sf")
register_s3_method("tidyr", "unnest", "sf")
register_s3_method("pillar", "obj_sum", "sfc")
register_s3_method("pillar", "type_sum", "sfc")
register_s3_method("pillar", "pillar_shaft", "sfc")
register_s3_method("spatstat.geom", "as.ppp", "sfc")
register_s3_method("spatstat.geom", "as.ppp", "sf")
register_s3_method("spatstat.geom", "as.owin", "POLYGON")
register_s3_method("spatstat.geom", "as.owin", "MULTIPOLYGON")
register_s3_method("spatstat.geom", "as.owin", "sfc_POLYGON")
register_s3_method("spatstat.geom", "as.owin", "sfc_MULTIPOLYGON")
register_s3_method("spatstat.geom", "as.owin", "sfc")
register_s3_method("spatstat.geom", "as.owin", "sf")
register_s3_method("spatstat.geom", "as.psp", "LINESTRING")
register_s3_method("spatstat.geom", "as.psp", "MULTILINESTRING")
register_s3_method("spatstat.geom", "as.psp", "sfc_MULTILINESTRING")
register_s3_method("spatstat.geom", "as.psp", "sfc")
register_s3_method("spatstat.geom", "as.psp", "sf")
register_vctrs_methods()
}
# from: https://github.com/tidyverse/hms/blob/master/R/zzz.R
# Thu Apr 19 10:53:24 CEST 2018
register_s3_method <- function(pkg, generic, class, fun = NULL) {
stopifnot(is.character(pkg), length(pkg) == 1)
stopifnot(is.character(generic), length(generic) == 1)
stopifnot(is.character(class), length(class) == 1)
if (is.null(fun)) {
fun <- get(paste0(generic, ".", class), envir = parent.frame())
} else {
stopifnot(is.function(fun))
}
if (pkg %in% loadedNamespaces()) {
registerS3method(generic, class, fun, envir = asNamespace(pkg))
}
# Always register hook in case package is later unloaded & reloaded
setHook(
packageEvent(pkg, "onLoad"),
function(...) {
registerS3method(generic, class, fun, envir = asNamespace(pkg))
}
)
}
# nocov end
| /sf/R/tidyverse.R | no_license | albrizre/spatstat.revdep | R | false | false | 19,266 | r | ## dplyr methods:
#group_map.sf <- function(.tbl, .f, ...) {
# st_as_sf(NextMethod()) # nocov
#}
# This is currently only used in `bind_rows()` and `bind_cols()`
# because sf overrides all default implementations
dplyr_reconstruct.sf = function(data, template) {
sfc_name = attr(template, "sf_column")
if (inherits(template, "tbl_df"))
data = dplyr::as_tibble(data)
# Return a bare data frame is the geometry column is no longer there
if (!sfc_name %in% names(data))
return(data)
prec = st_precision(template)
crs = st_crs(template)
st_as_sf(
data,
sf_column_name = sfc_name,
crs = crs,
precision = prec
)
}
group_split.sf <- function(.tbl, ..., .keep = TRUE) {
class(.tbl) = setdiff(class(.tbl), "sf")
lapply(dplyr::group_split(.tbl, ..., .keep = .keep), st_as_sf)
}
#' Tidyverse methods for sf objects (remove .sf suffix!)
#'
#' Tidyverse methods for sf objects. Geometries are sticky, use \link{as.data.frame} to let \code{dplyr}'s own methods drop them. Use these methods without the .sf suffix and after loading the tidyverse package with the generic (or after loading package tidyverse).
#' @param .data data object of class \link{sf}
#' @param .dots see corresponding function in package \code{dplyr}
#' @param ... other arguments
#' @name tidyverse
#' @examples
#' library(dplyr)
#' nc = st_read(system.file("shape/nc.shp", package="sf"))
#' nc %>% filter(AREA > .1) %>% plot()
filter.sf <- function(.data, ..., .dots) {
agr = st_agr(.data)
class(.data) <- setdiff(class(.data), "sf")
.re_sf(NextMethod(), sf_column_name = attr(.data, "sf_column"), agr)
}
#' @name tidyverse
#' @examples
#' # plot 10 smallest counties in grey:
#' st_geometry(nc) %>% plot()
#' nc %>% select(AREA) %>% arrange(AREA) %>% slice(1:10) %>% plot(add = TRUE, col = 'grey')
#' title("the ten counties with smallest area")
arrange.sf <- function(.data, ..., .dots) {
sf_column_name = attr(.data, "sf_column")
class(.data) = setdiff(class(.data), "sf")
st_as_sf(NextMethod(), sf_column_name = sf_column_name)
}
#' @name tidyverse
#' @param add see corresponding function in dplyr
#' @examples
#' nc$area_cl = cut(nc$AREA, c(0, .1, .12, .15, .25))
#' nc %>% group_by(area_cl) %>% class()
group_by.sf <- function(.data, ..., add = FALSE) {
sf_column_name = attr(.data, "sf_column")
class(.data) <- setdiff(class(.data), "sf")
st_as_sf(NextMethod(), sf_column_name = sf_column_name)
}
#' @name tidyverse
ungroup.sf <- function(x, ...) {
sf_column_name = attr(x, "sf_column")
class(x) <- setdiff(class(x), "sf")
st_as_sf(NextMethod(), sf_column_name = sf_column_name)
}
.re_sf = function(x, sf_column_name, agr, geom = NULL) {
stopifnot(!inherits(x, "sf"), !missing(sf_column_name), !missing(agr))
# non-geom attribute names
att = names(x)[!sapply(x, inherits, what = "sfc")]
agr = setNames(agr[att], att) # NA's new columns
if (!is.null(geom)) {
stopifnot(length(geom) == nrow(x))
x[[ sf_column_name ]] = geom
}
structure(x,
sf_column = sf_column_name,
agr = agr,
class = c("sf", class(x)))
}
#' @name tidyverse
#' @examples
#' nc2 <- nc %>% mutate(area10 = AREA/10)
mutate.sf <- function(.data, ..., .dots) {
#st_as_sf(NextMethod(), sf_column_name = attr(.data, "sf_column"))
agr = st_agr(.data)
sf_column_name = attr(.data, "sf_column")
class(.data) <- setdiff(class(.data), "sf")
.re_sf(NextMethod(), sf_column_name = sf_column_name, agr)
}
#' @name tidyverse
#' @examples
#' nc %>% transmute(AREA = AREA/10, geometry = geometry) %>% class()
#' nc %>% transmute(AREA = AREA/10) %>% class()
transmute.sf <- function(.data, ..., .dots) {
sf_column_name = attr(.data, "sf_column")
agr = st_agr(.data)
geom = st_geometry(.data)
class(.data) = setdiff(class(.data), "sf")
.re_sf(NextMethod(), sf_column_name = sf_column_name, agr, geom)
}
#' @name tidyverse
#' @examples
#' nc %>% select(SID74, SID79) %>% names()
#' nc %>% select(SID74, SID79, geometry) %>% names()
#' nc %>% select(SID74, SID79) %>% class()
#' nc %>% select(SID74, SID79, geometry) %>% class()
#' @details \code{select} keeps the geometry regardless whether it is selected or not; to deselect it, first pipe through \code{as.data.frame} to let dplyr's own \code{select} drop it.
select.sf <- function(.data, ...) {
if (!requireNamespace("tidyselect", quietly = TRUE))
stop("tidyselect required: install that first") # nocov
loc = tidyselect::eval_select(quote(c(...)), .data)
sf_column = attr(.data, "sf_column")
sf_column_loc = match(sf_column, names(.data))
if (length(sf_column_loc) != 1 || is.na(sf_column_loc))
stop("internal error: can't find sf column") # nocov
agr = st_agr(.data)
vars = names(.data)[setdiff(loc, sf_column_loc)]
new_agr = agr[vars]
sf_column_loc_loc = match(sf_column_loc, loc)
if (is.na(sf_column_loc_loc)) {
# The sf column was subsetted out, select it back in
loc = c(loc, sf_column_loc)
names(loc)[[length(loc)]] = sf_column
} else {
# The sf column was not subsetted out but it might have been renamed
sf_column = names(loc[sf_column_loc_loc])
}
ret = .data
class(ret) = setdiff(class(ret), "sf")
ret = ret[loc]
names(ret) = names(loc)
st_set_agr(st_as_sf(ret, sf_column_name = sf_column), new_agr)
}
#' @name tidyverse
#' @examples
#' nc2 <- nc %>% rename(area = AREA)
rename.sf <- function(.data, ...) {
if (!requireNamespace("tidyselect", quietly = TRUE))
stop("tidyselect required: install that first") # nocov
loc = tidyselect::eval_rename(quote(c(...)), .data)
sf_column = attr(.data, "sf_column")
sf_column_loc = match(sf_column, names(.data))
if (length(sf_column_loc) != 1 || is.na(sf_column_loc))
stop("internal error: can't find sf column") # nocov
agr = st_agr(.data)
agr_loc = match(names(agr), setdiff(names(.data), sf_column))
if (anyNA(agr_loc))
stop("internal error: can't find `agr` columns") # nocov
vars_loc = loc[loc %in% agr_loc]
names(agr)[vars_loc] = names(vars_loc)
sf_column_loc_loc = match(sf_column_loc, loc)
if (!is.na(sf_column_loc_loc))
sf_column = names(loc[sf_column_loc_loc])
ret = .data
class(ret) = setdiff(class(ret), "sf")
names(ret)[loc] = names(loc)
st_set_agr(st_as_sf(ret, sf_column_name = sf_column), agr)
}
#' @name tidyverse
#' @examples
#' nc %>% slice(1:2)
slice.sf <- function(.data, ..., .dots) {
class(.data) <- setdiff(class(.data), "sf")
sf_column <- attr(.data, "sf_column")
st_as_sf(NextMethod(), sf_column_name = sf_column)
}
#' @name tidyverse
#' @aliases summarise
#' @param do_union logical; in case \code{summary} does not create a geometry column, should geometries be created by unioning using \link{st_union}, or simply by combining using \link{st_combine}? Using \link{st_union} resolves internal boundaries, but in case of unioning points, this will likely change the order of the points; see Details.
#' @return an object of class \link{sf}
#' @details
#' In case one or more of the arguments (expressions) in the \code{summarise} call creates a geometry list-column, the first of these will be the (active) geometry of the returned object. If this is not the case, a geometry column is created, depending on the value of \code{do_union}.
#'
#' In case \code{do_union} is \code{FALSE}, \code{summarise} will simply combine geometries using \link{c.sfg}. When polygons sharing a boundary are combined, this leads to geometries that are invalid; see for instance \url{https://github.com/r-spatial/sf/issues/681}.
#' @examples
#' nc$area_cl = cut(nc$AREA, c(0, .1, .12, .15, .25))
#' nc.g <- nc %>% group_by(area_cl)
#' nc.g %>% summarise(mean(AREA))
#' nc.g %>% summarise(mean(AREA)) %>% plot(col = grey(3:6 / 7))
#' nc %>% as.data.frame %>% summarise(mean(AREA))
summarise.sf <- function(.data, ..., .dots, do_union = TRUE) {
sf_column = attr(.data, "sf_column")
precision = st_precision(.data)
crs = st_crs(.data)
geom = st_geometry(.data)
class(.data) = setdiff(class(.data), "sf")
ret = NextMethod()
if (!missing(do_union))
ret$do_union = NULL
if (! any(sapply(ret, inherits, what = "sfc"))) {
geom = if (inherits(.data, "grouped_df") || inherits(.data, "grouped_dt")) {
if (!requireNamespace("dplyr", quietly = TRUE))
stop("dplyr required: install that first") # nocov
i = dplyr::group_indices(.data)
# geom = st_geometry(.data)
geom = if (do_union)
lapply(sort(unique(i)), function(x) st_union(geom[i == x]))
else
lapply(sort(unique(i)), function(x) st_combine(geom[i == x]))
geom = unlist(geom, recursive = FALSE)
if (is.null(geom))
geom = list() #676 #nocov
do.call(st_sfc, c(geom, crs = list(crs), precision = precision))
} else { # single group:
if (do_union)
st_union(geom)
else
st_combine(geom)
}
ret[[ sf_column ]] = geom
}
# need to re-sort out the geometry column class now:
st_as_sf(structure(ret, sf_column = NULL))
}
#' @name tidyverse
#' @param .keep_all see corresponding function in dplyr
#' @examples
#' nc[c(1:100, 1:10), ] %>% distinct() %>% nrow()
#' @details \code{distinct} gives distinct records for which all attributes and geometries are distinct; \link{st_equals} is used to find out which geometries are distinct.
distinct.sf <- function(.data, ..., .keep_all = FALSE) {
sf_column = attr(.data, "sf_column")
geom = st_geometry(.data)
eq = sapply(st_equals(.data), head, n = 1)
empties = which(lengths(eq) == 0)
eq[ empties ] = empties[1] # first empty record
.data[[ sf_column ]] = unlist(eq)
class(.data) = setdiff(class(.data), "sf")
if (!requireNamespace("dplyr", quietly = TRUE))
stop("dplyr required: install that first") # nocov
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
.data = dplyr::distinct(.data, ..., !! rlang::sym(sf_column), .keep_all = .keep_all)
.data[[ sf_column ]] = geom[ .data[[ sf_column ]] ]
st_as_sf(.data)
}
## tidyr methods:
#' @name tidyverse
#' @param data see original function docs
#' @param key see original function docs
#' @param value see original function docs
#' @param na.rm see original function docs
#' @param factor_key see original function docs
#' @examples
#' library(tidyr)
#' nc %>% select(SID74, SID79) %>% gather("VAR", "SID", -geometry) %>% summary()
gather.sf <- function(data, key, value, ..., na.rm = FALSE, convert = FALSE, factor_key = FALSE) {
if (! requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
key = rlang::enquo(key)
value = rlang::enquo(value)
if (!requireNamespace("tidyr", quietly = TRUE))
stop("tidyr required: install first?")
class(data) <- setdiff(class(data), "sf")
st_as_sf(tidyr::gather(data, !!key, !!value, ...,
na.rm = na.rm, convert = convert, factor_key = factor_key),
sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
#' @param fill see original function docs
#' @param drop see original function docs
#' @examples
#' library(tidyr)
#' nc$row = 1:100 # needed for spread to work
#' nc %>% select(SID74, SID79, geometry, row) %>%
#' gather("VAR", "SID", -geometry, -row) %>%
#' spread(VAR, SID) %>% head()
spread.sf <- function(data, key, value, fill = NA, convert = FALSE, drop = TRUE,
sep = NULL) {
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
key = rlang::enquo(key)
value = rlang::enquo(value)
class(data) <- setdiff(class(data), "sf")
st_as_sf(tidyr::spread(data, !!key, !!value, fill = fill, convert = convert,
drop = drop, sep = sep), sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
#' @param tbl see original function docs
#' @param size see original function docs
#' @param replace see original function docs
#' @param weight see original function docs
#' @param .env see original function docs
sample_n.sf <- function(tbl, size, replace = FALSE, weight = NULL, .env = parent.frame()) {
st_sf(NextMethod(), sf_column_name = attr(tbl, "sf_column"))
}
#' @name tidyverse
sample_frac.sf <- function(tbl, size = 1, replace = FALSE, weight = NULL, .env = parent.frame()) {
st_sf(NextMethod(), sf_column_name = attr(tbl, "sf_column"))
}
#' @name tidyverse
#' @examples
#' storms.sf = st_as_sf(storms, coords = c("long", "lat"), crs = 4326)
#' x <- storms.sf %>% group_by(name, year) %>% nest
#' trs = lapply(x$data, function(tr) st_cast(st_combine(tr), "LINESTRING")[[1]]) %>%
#' st_sfc(crs = 4326)
#' trs.sf = st_sf(x[,1:2], trs)
#' plot(trs.sf["year"], axes = TRUE)
#' @details \code{nest} assumes that a simple feature geometry list-column was among the columns that were nested.
nest.sf = function (.data, ...) {
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
if (!requireNamespace("tidyr", quietly = TRUE))
stop("tidyr required: install first?")
class(.data) <- setdiff(class(.data), "sf")
ret = tidyr::nest(.data, ...)
lst = which(sapply(ret, inherits, "list"))[1]
# re-sf:
ret[[lst]] = lapply(ret[[lst]], st_as_sf, sf_column_name = attr(.data, "sf_column"))
ret
}
#' @name tidyverse
#' @param col see \link[tidyr]{separate}
#' @param into see \link[tidyr]{separate}
#' @param remove see \link[tidyr]{separate}
#' @param extra see \link[tidyr]{separate}
separate.sf = function(data, col, into, sep = "[^[:alnum:]]+", remove = TRUE,
convert = FALSE, extra = "warn", fill = "warn", ...) {
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
col = rlang::enquo(col)
if (!requireNamespace("tidyr", quietly = TRUE))
stop("tidyr required: install first?")
class(data) <- setdiff(class(data), "sf")
st_as_sf(tidyr::separate(data, !!col, into = into,
sep = sep, remove = remove, convert = convert, extra = extra, fill = fill, ...),
sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
#' @param sep see \link[tidyr]{separate_rows}
#' @param convert see \link[tidyr]{separate_rows}
separate_rows.sf <- function(data, ..., sep = "[^[:alnum:]]+", convert = FALSE) {
if (!requireNamespace("tidyr", quietly = TRUE))
stop("tidyr required: install first?")
class(data) <- setdiff(class(data), "sf")
ret = tidyr::separate_rows(data, ..., sep = sep, convert = convert)
st_as_sf(ret, sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
unite.sf <- function(data, col, ..., sep = "_", remove = TRUE) {
class(data) <- setdiff(class(data), "sf")
if (!requireNamespace("rlang", quietly = TRUE))
stop("rlang required: install first?")
col = rlang::enquo(col)
st_as_sf(tidyr::unite(data, !!col, ..., sep = sep, remove = remove),
sf_column_name = attr(data, "sf_column"))
}
#' @name tidyverse
#' @param .preserve see \link[tidyr:nest]{unnest}
unnest.sf = function(data, ..., .preserve = NULL) {
# nocov start
sf_column_name = attr(data, "sf_column", exact = TRUE)
if (!requireNamespace("tidyr", quietly = TRUE))
stop("unnest requires tidyr; install that first")
class(data) = setdiff(class(data), "sf")
st_sf(NextMethod(), sf_column_name = sf_column_name)
# nocov end
}
## tibble methods:
#' Summarize simple feature type for tibble
#'
#' Summarize simple feature type for tibble
#' @param x object of class sfc
#' @param ... ignored
#' @name tibble
#' @details see \link[pillar]{type_sum}
type_sum.sfc <- function(x, ...) {
cls = substring(class(x)[1], 5)
if (is.na(st_is_longlat(x)))
cls
else
paste0(cls, " [", enc2utf8(as.character(units(st_crs(x, parameters = TRUE)$ud_unit))), "]")
}
#' Summarize simple feature item for tibble
#'
#' Summarize simple feature item for tibble
#' @name tibble
obj_sum.sfc <- function(x) {
vapply(x, function(sfg) format(sfg, width = 15L), "")
}
#' @name tibble
pillar_shaft.sfc <- function(x, ...) {
digits = options("pillar.sigfig")$pillar.sigfig
if (is.null(digits))
digits = options("digits")$digits
out <- format(x, width = 100, digits = digits, ...)
if (!inherits(x, "sfc_GEOMETRY") && !inherits(x, "sfc_GEOMETRYCOLLECTION"))
out <- sub("[A-Z]+ ", "", out)
pillar::new_pillar_shaft_simple(out, align = "right", min_width = 25)
}
#nocov start
register_all_s3_methods = function() {
has_dplyr_1.0 =
requireNamespace("dplyr", quietly = TRUE) &&
utils::packageVersion("dplyr") >= "0.8.99.9000"
if (has_dplyr_1.0)
register_s3_method("dplyr", "dplyr_reconstruct", "sf")
register_s3_method("dplyr", "anti_join", "sf")
register_s3_method("dplyr", "arrange", "sf")
register_s3_method("dplyr", "distinct", "sf")
register_s3_method("dplyr", "filter", "sf")
register_s3_method("dplyr", "full_join", "sf")
register_s3_method("dplyr", "group_by", "sf")
# register_s3_method("dplyr", "group_map", "sf")
register_s3_method("dplyr", "group_split", "sf")
register_s3_method("dplyr", "inner_join", "sf")
register_s3_method("dplyr", "left_join", "sf")
register_s3_method("dplyr", "mutate", "sf")
register_s3_method("dplyr", "rename", "sf")
register_s3_method("dplyr", "right_join", "sf")
register_s3_method("dplyr", "sample_frac", "sf")
register_s3_method("dplyr", "sample_n", "sf")
register_s3_method("dplyr", "select", "sf")
register_s3_method("dplyr", "semi_join", "sf")
register_s3_method("dplyr", "slice", "sf")
register_s3_method("dplyr", "summarise", "sf")
register_s3_method("dplyr", "transmute", "sf")
register_s3_method("dplyr", "ungroup", "sf")
register_s3_method("tidyr", "gather", "sf")
register_s3_method("tidyr", "spread", "sf")
register_s3_method("tidyr", "nest", "sf")
register_s3_method("tidyr", "separate", "sf")
register_s3_method("tidyr", "separate_rows", "sf")
register_s3_method("tidyr", "unite", "sf")
register_s3_method("tidyr", "unnest", "sf")
register_s3_method("pillar", "obj_sum", "sfc")
register_s3_method("pillar", "type_sum", "sfc")
register_s3_method("pillar", "pillar_shaft", "sfc")
register_s3_method("spatstat.geom", "as.ppp", "sfc")
register_s3_method("spatstat.geom", "as.ppp", "sf")
register_s3_method("spatstat.geom", "as.owin", "POLYGON")
register_s3_method("spatstat.geom", "as.owin", "MULTIPOLYGON")
register_s3_method("spatstat.geom", "as.owin", "sfc_POLYGON")
register_s3_method("spatstat.geom", "as.owin", "sfc_MULTIPOLYGON")
register_s3_method("spatstat.geom", "as.owin", "sfc")
register_s3_method("spatstat.geom", "as.owin", "sf")
register_s3_method("spatstat.geom", "as.psp", "LINESTRING")
register_s3_method("spatstat.geom", "as.psp", "MULTILINESTRING")
register_s3_method("spatstat.geom", "as.psp", "sfc_MULTILINESTRING")
register_s3_method("spatstat.geom", "as.psp", "sfc")
register_s3_method("spatstat.geom", "as.psp", "sf")
register_vctrs_methods()
}
# from: https://github.com/tidyverse/hms/blob/master/R/zzz.R
# Thu Apr 19 10:53:24 CEST 2018
register_s3_method <- function(pkg, generic, class, fun = NULL) {
stopifnot(is.character(pkg), length(pkg) == 1)
stopifnot(is.character(generic), length(generic) == 1)
stopifnot(is.character(class), length(class) == 1)
if (is.null(fun)) {
fun <- get(paste0(generic, ".", class), envir = parent.frame())
} else {
stopifnot(is.function(fun))
}
if (pkg %in% loadedNamespaces()) {
registerS3method(generic, class, fun, envir = asNamespace(pkg))
}
# Always register hook in case package is later unloaded & reloaded
setHook(
packageEvent(pkg, "onLoad"),
function(...) {
registerS3method(generic, class, fun, envir = asNamespace(pkg))
}
)
}
# nocov end
|
\name{get.multimir.cutoffs}
\alias{get.multimir.cutoffs}
\title{
Load Pre-calculated Prediction Score Cutoffs in the multiMiR Package
}
\description{
This is an internal multiMiR function that is not intended to be
used directly.
Please set prediction score cutoff in \code{get.multimir}.
}
| /man/get.multimir.cutoffs.Rd | no_license | Yixf-Self/multiMiR | R | false | false | 303 | rd | \name{get.multimir.cutoffs}
\alias{get.multimir.cutoffs}
\title{
Load Pre-calculated Prediction Score Cutoffs in the multiMiR Package
}
\description{
This is an internal multiMiR function that is not intended to be
used directly.
Please set prediction score cutoff in \code{get.multimir}.
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inv) i <<- inv
getInverse <- function() i
list(set = set, get = get, setInverse=setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
r <- x$getInverse()
if(!is.null(r)) {
message("cached invers")
return(r)
}
data <- x$get()
r <- solve(data, ...)
x$setInverse(r)
r
}
| /cachematrix.R | no_license | timokoola/ProgrammingAssignment2 | R | false | false | 710 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inv) i <<- inv
getInverse <- function() i
list(set = set, get = get, setInverse=setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
r <- x$getInverse()
if(!is.null(r)) {
message("cached invers")
return(r)
}
data <- x$get()
r <- solve(data, ...)
x$setInverse(r)
r
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rds-doc.R
\docType{package}
\name{RDS}
\alias{RDS}
\title{This package provides functionality for carrying out estimation
with data collected using Respondent-Driven Sampling. This includes
Heckathorn's RDS-I and RDS-II estimators as well as Gile's Sequential
Sampler estimator.}
| /man/RDS.Rd | no_license | Edouard-Legoupil/RDS | R | false | true | 370 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rds-doc.R
\docType{package}
\name{RDS}
\alias{RDS}
\title{This package provides functionality for carrying out estimation
with data collected using Respondent-Driven Sampling. This includes
Heckathorn's RDS-I and RDS-II estimators as well as Gile's Sequential
Sampler estimator.}
|
plot.StatisBiplot <- function(x, A1 = 1, A2 = 2, PlotType="Biplot", PlotRowTraj=FALSE, PlotVarTraj=FALSE, LabelTraj='Begining',
VarColorType="Biplot", VarColors=NULL, VarLabels=NULL, RowColors=NULL,
TableColors=NULL, RowRandomColors=FALSE, ...) {
PlotTypes=c("Biplot", "Correlations", "Contributions", "InterStructure")
if (is.numeric(PlotType)) PlotType=PlotTypes[PlotType]
VarColorTypes=c("Biplot", "ByTable", "ByVar")
if (is.numeric(VarColorType)) VarColorType=VarColorTypes[VarColorType]
ColorVar=VarColors
LabelTrajectories=c("Begining", "End", "None")
if (is.numeric(LabelTraj)) LabelTraj=LabelTrajectories[LabelTraj]
nind=dim(x$Biplot$RowCoordinates)[1]
if (RowRandomColors) RowColors=colors()[sample.int(657,nind)]
if (x$SameVar){
nvars =x$NTables * x$NVars
if (VarColorType=="ByTable"){
if (is.null(TableColors)) TableColors=1:x$NTables
ColorVar=rep(TableColors[1], x$NVars)
for (i in 2:x$NTables) ColorVar=c(ColorVar, rep(TableColors[i], x$NVars))
VarLabels=rep(x$VarLabels, x$NTables)
}
if (VarColorType=="ByVar"){
if (is.null(VarColors)) VarColors=1:x$NVars
ColorVar=rep(VarColors, x$NTables)
VarLabels=rep(x$TableLabels[1], x$NVars)
for (i in 2:x$NTables) VarLabels = c(VarLabels, rep(x$TableLabels[i], x$NVars))
}
}
if (PlotType=="Biplot")
plot(x$Biplot, A1=A1, A2=A2, ColorVar=ColorVar, VarLabels=VarLabels, ColorInd=RowColors, ...)
if (PlotType=="Correlations")
CorrelationCircle(x$Biplot, Colors=ColorVar, Labs=VarLabels, cex=0.7)
if (PlotType=="Contributions")
ColContributionPlot(x$Biplot, Colors=ColorVar, Labs=VarLabels, cex=0.7, MinQuality = 0.7)
if (x$SameVar){
if (VarColorType=="ByTable")
legend("topright",legend=x$TableLabels, fill=TableColors, cex=0.7)
if (VarColorType=="ByVar")
legend("topright",legend=x$VarLabels, fill=VarColors, cex=0.7)
}
if (!x$SameVar) PlotVarTraj=FALSE
print(PlotRowTraj)
if (PlotRowTraj){
print("The projection of trajectories will be available soon")
TrajNames=names(x$TrajInd)
for (i in 1:length(x$TrajInd))
points(x$TrajInd[[i]][,A1], x$TrajInd[[i]][,A2], type="l", col=RowColors[i])
if (LabelTraj=='Begining') text(x$TrajInd[[i]][1,A1], x$TrajInd[[i]][1,A2], label=TrajNames[i], col=RowColors[i])
}
if (PlotVarTraj){
print("The projection of trajectories will be available soon")
}
}
| /R/plot.StatisBiplot.R | no_license | JonatanFernandez/MultBiplotR | R | false | false | 2,502 | r | plot.StatisBiplot <- function(x, A1 = 1, A2 = 2, PlotType="Biplot", PlotRowTraj=FALSE, PlotVarTraj=FALSE, LabelTraj='Begining',
VarColorType="Biplot", VarColors=NULL, VarLabels=NULL, RowColors=NULL,
TableColors=NULL, RowRandomColors=FALSE, ...) {
PlotTypes=c("Biplot", "Correlations", "Contributions", "InterStructure")
if (is.numeric(PlotType)) PlotType=PlotTypes[PlotType]
VarColorTypes=c("Biplot", "ByTable", "ByVar")
if (is.numeric(VarColorType)) VarColorType=VarColorTypes[VarColorType]
ColorVar=VarColors
LabelTrajectories=c("Begining", "End", "None")
if (is.numeric(LabelTraj)) LabelTraj=LabelTrajectories[LabelTraj]
nind=dim(x$Biplot$RowCoordinates)[1]
if (RowRandomColors) RowColors=colors()[sample.int(657,nind)]
if (x$SameVar){
nvars =x$NTables * x$NVars
if (VarColorType=="ByTable"){
if (is.null(TableColors)) TableColors=1:x$NTables
ColorVar=rep(TableColors[1], x$NVars)
for (i in 2:x$NTables) ColorVar=c(ColorVar, rep(TableColors[i], x$NVars))
VarLabels=rep(x$VarLabels, x$NTables)
}
if (VarColorType=="ByVar"){
if (is.null(VarColors)) VarColors=1:x$NVars
ColorVar=rep(VarColors, x$NTables)
VarLabels=rep(x$TableLabels[1], x$NVars)
for (i in 2:x$NTables) VarLabels = c(VarLabels, rep(x$TableLabels[i], x$NVars))
}
}
if (PlotType=="Biplot")
plot(x$Biplot, A1=A1, A2=A2, ColorVar=ColorVar, VarLabels=VarLabels, ColorInd=RowColors, ...)
if (PlotType=="Correlations")
CorrelationCircle(x$Biplot, Colors=ColorVar, Labs=VarLabels, cex=0.7)
if (PlotType=="Contributions")
ColContributionPlot(x$Biplot, Colors=ColorVar, Labs=VarLabels, cex=0.7, MinQuality = 0.7)
if (x$SameVar){
if (VarColorType=="ByTable")
legend("topright",legend=x$TableLabels, fill=TableColors, cex=0.7)
if (VarColorType=="ByVar")
legend("topright",legend=x$VarLabels, fill=VarColors, cex=0.7)
}
if (!x$SameVar) PlotVarTraj=FALSE
print(PlotRowTraj)
if (PlotRowTraj){
print("The projection of trajectories will be available soon")
TrajNames=names(x$TrajInd)
for (i in 1:length(x$TrajInd))
points(x$TrajInd[[i]][,A1], x$TrajInd[[i]][,A2], type="l", col=RowColors[i])
if (LabelTraj=='Begining') text(x$TrajInd[[i]][1,A1], x$TrajInd[[i]][1,A2], label=TrajNames[i], col=RowColors[i])
}
if (PlotVarTraj){
print("The projection of trajectories will be available soon")
}
}
|
context("formula")
# Creation ----------------------------------------------------------------
test_that("expr must be valid type", {
expect_error(f_new(list()), "must be a language object")
expect_error(f_new(quote(a), list()), "must be a language object")
expect_error(f_new(quote(a), env = list()), "must be an environment")
})
test_that("equivalent to ~", {
f1 <- ~abc
f2 <- f_new(quote(abc))
expect_identical(f1, f2)
})
test_that("is_formula works", {
expect_true(is_formula(~10))
expect_false(is_formula(10))
})
# Getters -----------------------------------------------------------------
test_that("throws errors for bad inputs", {
expect_error(f_rhs(1), "not a formula")
expect_error(f_rhs(`~`()), "Invalid formula")
expect_error(f_rhs(`~`(1, 2, 3)), "Invalid formula")
expect_error(f_lhs(1), "not a formula")
expect_error(f_lhs(`~`()), "Invalid formula")
expect_error(f_lhs(`~`(1, 2, 3)), "Invalid formula")
expect_error(f_env(1), "not a formula")
})
test_that("extracts call, name, or scalar", {
expect_identical(f_rhs(~ x), quote(x))
expect_identical(f_rhs(~ f()), quote(f()))
expect_identical(f_rhs(~ 1L), 1L)
})
# Setters -----------------------------------------------------------------
test_that("can replace RHS of one-sided formula", {
f <- ~ x1
f_rhs(f) <- quote(x2)
expect_equal(f, ~ x2)
})
test_that("can replace both sides of two-sided formula", {
f <- x1 ~ y1
f_lhs(f) <- quote(x2)
f_rhs(f) <- quote(y2)
expect_equal(f, x2 ~ y2)
})
test_that("can remove lhs of two-sided formula", {
f <- x ~ y
f_lhs(f) <- NULL
expect_equal(f, ~ y)
})
test_that("can modify environment", {
f <- x ~ y
env <- new.env()
f_env(f) <- env
expect_equal(f_env(f), env)
})
| /packrat/lib/x86_64-pc-linux-gnu/3.2.5/lazyeval/tests/testthat/test-formula.R | no_license | harryprince/seamonster | R | false | false | 1,752 | r | context("formula")
# Creation ----------------------------------------------------------------
test_that("expr must be valid type", {
expect_error(f_new(list()), "must be a language object")
expect_error(f_new(quote(a), list()), "must be a language object")
expect_error(f_new(quote(a), env = list()), "must be an environment")
})
test_that("equivalent to ~", {
f1 <- ~abc
f2 <- f_new(quote(abc))
expect_identical(f1, f2)
})
test_that("is_formula works", {
expect_true(is_formula(~10))
expect_false(is_formula(10))
})
# Getters -----------------------------------------------------------------
test_that("throws errors for bad inputs", {
expect_error(f_rhs(1), "not a formula")
expect_error(f_rhs(`~`()), "Invalid formula")
expect_error(f_rhs(`~`(1, 2, 3)), "Invalid formula")
expect_error(f_lhs(1), "not a formula")
expect_error(f_lhs(`~`()), "Invalid formula")
expect_error(f_lhs(`~`(1, 2, 3)), "Invalid formula")
expect_error(f_env(1), "not a formula")
})
test_that("extracts call, name, or scalar", {
expect_identical(f_rhs(~ x), quote(x))
expect_identical(f_rhs(~ f()), quote(f()))
expect_identical(f_rhs(~ 1L), 1L)
})
# Setters -----------------------------------------------------------------
test_that("can replace RHS of one-sided formula", {
f <- ~ x1
f_rhs(f) <- quote(x2)
expect_equal(f, ~ x2)
})
test_that("can replace both sides of two-sided formula", {
f <- x1 ~ y1
f_lhs(f) <- quote(x2)
f_rhs(f) <- quote(y2)
expect_equal(f, x2 ~ y2)
})
test_that("can remove lhs of two-sided formula", {
f <- x ~ y
f_lhs(f) <- NULL
expect_equal(f, ~ y)
})
test_that("can modify environment", {
f <- x ~ y
env <- new.env()
f_env(f) <- env
expect_equal(f_env(f), env)
})
|
rm(list = ls())
library(matrixStats)
library(tidyverse)
dom_nondom_crits <- c(0.25)
for (dom_crit in dom_nondom_crits){
df_tpr <- read.table(file = here("model_train_test/acute", paste("res_allmodels/tprs_full", dom_crit, ".txt", sep = "")), stringsAsFactors = FALSE)
df_fpr <- read.table(file = here("model_train_test/acute", paste("res_allmodels/fpr_full", dom_crit, ".txt", sep = "")), stringsAsFactors = FALSE)
tprs_full <- as.matrix(df_tpr)
fpr <- df_fpr$V1
tpr_mean <- rowMeans(tprs_full)
tpr_mean <- c(0, tpr_mean,1)
tpr_std <- rowSds(tprs_full)
tpr_std <- c(0, tpr_std,1)
fpr_ticks <- c(0, fpr,1)
p <- ggplot(data = data.frame(x = fpr_ticks, y = tpr_mean))
p <- p + geom_line(aes(x = x, y = y),size = 2,color= "red")
p <- p + coord_equal()
p <- p + geom_abline(intercept = 0, slope = 1, color="black", linetype="dashed", size=0.5)
p <- p + theme(text = element_text(size = 22))
p <- p + scale_x_continuous(name = "False Positive Rate", limits = c(0,1), expand = c(0,0), breaks = c(0, 0.25, 0.5, 0.75, 1), labels = c("0", "0.25", "0.5", "0.75", "1"))
p <- p + scale_y_continuous(name = "True Positive Rate", limits = c(0,1), expand = c(0,0), breaks = c(0, 0.25, 0.5, 0.75, 1), labels = c("0", "0.25", "0.5", "0.75", "1"))
p
ggsave(filename = here("model_train_test/acute", paste("res_allmodels/roc_full", dom_crit, ".pdf", sep = "")), width = 6.1, height = 6)
} | /model_train_test/acute/plot_roc.R | permissive | andy90/immunogenicity_predictor | R | false | false | 1,419 | r | rm(list = ls())
library(matrixStats)
library(tidyverse)
dom_nondom_crits <- c(0.25)
for (dom_crit in dom_nondom_crits){
df_tpr <- read.table(file = here("model_train_test/acute", paste("res_allmodels/tprs_full", dom_crit, ".txt", sep = "")), stringsAsFactors = FALSE)
df_fpr <- read.table(file = here("model_train_test/acute", paste("res_allmodels/fpr_full", dom_crit, ".txt", sep = "")), stringsAsFactors = FALSE)
tprs_full <- as.matrix(df_tpr)
fpr <- df_fpr$V1
tpr_mean <- rowMeans(tprs_full)
tpr_mean <- c(0, tpr_mean,1)
tpr_std <- rowSds(tprs_full)
tpr_std <- c(0, tpr_std,1)
fpr_ticks <- c(0, fpr,1)
p <- ggplot(data = data.frame(x = fpr_ticks, y = tpr_mean))
p <- p + geom_line(aes(x = x, y = y),size = 2,color= "red")
p <- p + coord_equal()
p <- p + geom_abline(intercept = 0, slope = 1, color="black", linetype="dashed", size=0.5)
p <- p + theme(text = element_text(size = 22))
p <- p + scale_x_continuous(name = "False Positive Rate", limits = c(0,1), expand = c(0,0), breaks = c(0, 0.25, 0.5, 0.75, 1), labels = c("0", "0.25", "0.5", "0.75", "1"))
p <- p + scale_y_continuous(name = "True Positive Rate", limits = c(0,1), expand = c(0,0), breaks = c(0, 0.25, 0.5, 0.75, 1), labels = c("0", "0.25", "0.5", "0.75", "1"))
p
ggsave(filename = here("model_train_test/acute", paste("res_allmodels/roc_full", dom_crit, ".pdf", sep = "")), width = 6.1, height = 6)
} |
### CHALLENGE 15: DELIVERABLE 1
# 3. Use the library() function to load the dplyr package
library(dplyr)
#4. Import and read in the MechaCar_mpg.csv file as a dataframe.
library(tidyverse)
mecha_mpg <- read.csv(file='./MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
#5. Perform linear regression using the lm() function
lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data=mecha_mpg)
#6. Using the summary() function, determine the p-value and the r-squared value for the linear regression model.
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data=mecha_mpg))
#Additional Step: eliminate the independent variables that have little impact on predicting mpg to see impact:
lm(mpg ~ vehicle_length + ground_clearance, data=mecha_mpg)
summary(lm(mpg ~ vehicle_length + ground_clearance, data=mecha_mpg)) | /lot summery.R | no_license | Dawitdaniel91/R_Analysis | R | false | false | 894 | r | ### CHALLENGE 15: DELIVERABLE 1
# 3. Use the library() function to load the dplyr package
library(dplyr)
#4. Import and read in the MechaCar_mpg.csv file as a dataframe.
library(tidyverse)
mecha_mpg <- read.csv(file='./MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
#5. Perform linear regression using the lm() function
lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data=mecha_mpg)
#6. Using the summary() function, determine the p-value and the r-squared value for the linear regression model.
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data=mecha_mpg))
#Additional Step: eliminate the independent variables that have little impact on predicting mpg to see impact:
lm(mpg ~ vehicle_length + ground_clearance, data=mecha_mpg)
summary(lm(mpg ~ vehicle_length + ground_clearance, data=mecha_mpg)) |
#' Plot Change 14Days Incidence
#'
#' This returns a plot displaying the change in 14days incidence for selected countries
#' @param Dataset,PopulationDataset,ListCountries
#' @keywords
#' @export
#' @examples
#' Plot_Change14DaysIncidence()
Plot_Change14DaysIncidence<-function(Dataset,PopulationDataset,ListCountries){
PopulationData<-read.csv(PopulationDataset) %>% select(ADM0NAME,UNPOP2019)
Data<-read.csv(Dataset)
MaxDate<-as.Date(max(Data$DateReport1))
TotalCasesToday<-Data %>% filter(DateReport1==MaxDate) %>% select(ADM0NAME,TotalCasesToday=TotalCases)
TotalCases7DaysAgo<-Data %>% filter(DateReport1==MaxDate-7) %>% select(ADM0NAME,TotalCases7DaysAgo=TotalCases)
TotalCases14DaysAgo<-Data %>% filter(DateReport1==MaxDate-14) %>% select(ADM0NAME,TotalCases14DaysAgo=TotalCases)
TotalCases21DaysAgo<-Data %>% filter(DateReport1==MaxDate-21) %>% select(ADM0NAME,TotalCases21DaysAgo=TotalCases)
Data<-TotalCasesToday %>% left_join(TotalCases7DaysAgo,by='ADM0NAME') %>%
left_join(TotalCases14DaysAgo,by='ADM0NAME') %>%
left_join(TotalCases21DaysAgo,by='ADM0NAME') %>%
left_join(PopulationData,by='ADM0NAME') %>%
mutate(FrtDaysIncidenceToday=(TotalCasesToday-TotalCases14DaysAgo)/UNPOP2019*100000,
FrtDaysIncidenceOneWeekAgo=(TotalCases7DaysAgo-TotalCases21DaysAgo)/UNPOP2019*100000,
Change=if_else(FrtDaysIncidenceToday>FrtDaysIncidenceOneWeekAgo,'Increase','Decrease')) %>%
arrange(FrtDaysIncidenceToday) %>%
filter(ADM0NAME %in% ListCountries)
Data$ADM0NAME<-factor(Data$ADM0NAME,levels=unique(Data$ADM0NAME))
plot<-ggplot(Data,aes(color=Change))+
scale_color_manual(breaks=c('Decrease','Increase'),values=c('#0DBF6F','#9E1C1A'))+
geom_segment(aes(x=FrtDaysIncidenceOneWeekAgo, y=ADM0NAME, xend=FrtDaysIncidenceToday, yend=ADM0NAME),arrow=arrow(length = unit(0.02, "npc")), size=1,show.legend=FALSE)+
xlab("Change in 14-days cumulative incidence compared to one week ago")+
ylab('')
return(plot)
}
| /R/ChartChange14DaysIncidence.R | no_license | romanceline/ChartChange14DaysIncidence | R | false | false | 1,993 | r | #' Plot Change 14Days Incidence
#'
#' This returns a plot displaying the change in 14days incidence for selected countries
#' @param Dataset,PopulationDataset,ListCountries
#' @keywords
#' @export
#' @examples
#' Plot_Change14DaysIncidence()
Plot_Change14DaysIncidence<-function(Dataset,PopulationDataset,ListCountries){
PopulationData<-read.csv(PopulationDataset) %>% select(ADM0NAME,UNPOP2019)
Data<-read.csv(Dataset)
MaxDate<-as.Date(max(Data$DateReport1))
TotalCasesToday<-Data %>% filter(DateReport1==MaxDate) %>% select(ADM0NAME,TotalCasesToday=TotalCases)
TotalCases7DaysAgo<-Data %>% filter(DateReport1==MaxDate-7) %>% select(ADM0NAME,TotalCases7DaysAgo=TotalCases)
TotalCases14DaysAgo<-Data %>% filter(DateReport1==MaxDate-14) %>% select(ADM0NAME,TotalCases14DaysAgo=TotalCases)
TotalCases21DaysAgo<-Data %>% filter(DateReport1==MaxDate-21) %>% select(ADM0NAME,TotalCases21DaysAgo=TotalCases)
Data<-TotalCasesToday %>% left_join(TotalCases7DaysAgo,by='ADM0NAME') %>%
left_join(TotalCases14DaysAgo,by='ADM0NAME') %>%
left_join(TotalCases21DaysAgo,by='ADM0NAME') %>%
left_join(PopulationData,by='ADM0NAME') %>%
mutate(FrtDaysIncidenceToday=(TotalCasesToday-TotalCases14DaysAgo)/UNPOP2019*100000,
FrtDaysIncidenceOneWeekAgo=(TotalCases7DaysAgo-TotalCases21DaysAgo)/UNPOP2019*100000,
Change=if_else(FrtDaysIncidenceToday>FrtDaysIncidenceOneWeekAgo,'Increase','Decrease')) %>%
arrange(FrtDaysIncidenceToday) %>%
filter(ADM0NAME %in% ListCountries)
Data$ADM0NAME<-factor(Data$ADM0NAME,levels=unique(Data$ADM0NAME))
plot<-ggplot(Data,aes(color=Change))+
scale_color_manual(breaks=c('Decrease','Increase'),values=c('#0DBF6F','#9E1C1A'))+
geom_segment(aes(x=FrtDaysIncidenceOneWeekAgo, y=ADM0NAME, xend=FrtDaysIncidenceToday, yend=ADM0NAME),arrow=arrow(length = unit(0.02, "npc")), size=1,show.legend=FALSE)+
xlab("Change in 14-days cumulative incidence compared to one week ago")+
ylab('')
return(plot)
}
|
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# load the library data.table and reshape2
library("data.table")
library("reshape2")
# Read in the Test Data Sets: subj_test, X_test & y_test data.
subj_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
# Read in the Test Data Sets X_train & y_train data.
subj_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
# read the label files - features and activity labels
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
act_lb <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Extract only the measurements on the mean and standard deviation for each measurement.
mean_std_features <- grepl("mean|std", features)
# map the column names
names(x_test) = features
names(x_train) = features
# Extract mean and standard deviation for each measurement.
x_test = x_test[,mean_std_features]
x_train = x_train[,mean_std_features]
# Load activity labels and map the id of the y_test and y_train
y_test[,2] = act_lb[y_test[,1]]
y_train[,2] = act_lb[y_train[,1]]
# assign labels for y_test and y_train
names(y_test) = c("act_ID", "act_lb")
names(y_train) = c("act_ID", "act_lb")
# assign labels for subj_test and subject_train
names(subj_test) = "Subject"
names(subj_train) = "Subject"
# Column bind the data into 1 table
test_data <- cbind(as.data.table(subj_test), y_test, x_test)
train_data <- cbind(as.data.table(subj_train), y_train, x_train)
# Row Bind the test and train data
all_data = rbind(test_data, train_data)
var_labels = c("Subject", "act_ID", "act_lb")
measure_labels = setdiff(colnames(all_data), var_labels)
# melt data in a flat file for pivoting
flat_data = melt(all_data, id = var_labels, measure.vars = measure_labels)
# Using dcast function to pivot table
final_data = dcast(flat_data, Subject + act_lb ~ variable, mean)
write.table(final_data, file = "./final_data.txt", row.names=FALSE)
| /run_analysis.R | no_license | datadogs/Getting-Cleaning-Data | R | false | false | 2,606 | r | ## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# load the library data.table and reshape2
library("data.table")
library("reshape2")
# Read in the Test Data Sets: subj_test, X_test & y_test data.
subj_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
# Read in the Test Data Sets X_train & y_train data.
subj_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
# read the label files - features and activity labels
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
act_lb <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Extract only the measurements on the mean and standard deviation for each measurement.
mean_std_features <- grepl("mean|std", features)
# map the column names
names(x_test) = features
names(x_train) = features
# Extract mean and standard deviation for each measurement.
x_test = x_test[,mean_std_features]
x_train = x_train[,mean_std_features]
# Load activity labels and map the id of the y_test and y_train
y_test[,2] = act_lb[y_test[,1]]
y_train[,2] = act_lb[y_train[,1]]
# assign labels for y_test and y_train
names(y_test) = c("act_ID", "act_lb")
names(y_train) = c("act_ID", "act_lb")
# assign labels for subj_test and subject_train
names(subj_test) = "Subject"
names(subj_train) = "Subject"
# Column bind the data into 1 table
test_data <- cbind(as.data.table(subj_test), y_test, x_test)
train_data <- cbind(as.data.table(subj_train), y_train, x_train)
# Row Bind the test and train data
all_data = rbind(test_data, train_data)
var_labels = c("Subject", "act_ID", "act_lb")
measure_labels = setdiff(colnames(all_data), var_labels)
# melt data in a flat file for pivoting
flat_data = melt(all_data, id = var_labels, measure.vars = measure_labels)
# Using dcast function to pivot table
final_data = dcast(flat_data, Subject + act_lb ~ variable, mean)
write.table(final_data, file = "./final_data.txt", row.names=FALSE)
|
# sudo gpsbabel -t -i gpx -f 1.gpx -o unicsv -F 1.csv
# sudo chmod 666 file.csv
# sudo gpsbabel -t -i garmin -f usb: -o unicsv -F temp.csv
# sudo gpsbabel -t -i garmin -f usb: -x track,start=2013061900 -o gtrnctr -F temp.tcx
new <- read.csv("atl.csv",
header = TRUE) # 2012/06/04 to 2013/08/22
new <- new[c("Latitude", "Longitude", "Seg", "Date")]
old <- read.csv("old.csv",
header = TRUE) # 2011/05/26 to 2012/06/05
old <- old[c("Latitude", "Longitude", "Seg", "Date")]
# =if(G2<>G3,"",if(H3-H2>0.0694444,"",if(abs(B3-B2)>0.01,"",(if(abs(C3-C2)>0.01,"",1)))))
all <- rbind(new, old)
all$Latitude <- ifelse(is.na(all$Seg), NA, all$Latitude)
all$Longitude <- ifelse(is.na(all$Seg), NA, all$Longitude)
maxyear <- "2014"
through2011 <- all[substring(all$Date,1,4) <= maxyear,]
through2012 <- all[substring(all$Date,1,4) <= maxyear,]
through2013 <- all[substring(all$Date,1,4) <= maxyear,]
through2014 <- all[substring(all$Date,1,4) <= maxyear,]
library(ggmap)
# # standard:
# mapImageData <- get_map(location = c(lon = -84.33,
# lat = 33.81),
# zoom = 12,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/bikemap.png")
# dev.off()
# # extra wide:
# mapImageData <- get_map(location = c(lon = -84.50,
# lat = 33.83),
# zoom = 10,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/extrawide.png")
# dev.off()
# # downtown:
# mapImageData <- get_map(location = c(lon = -84.36,
# lat = 33.76),
# zoom = 13,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/downtown.png")
# dev.off()
# downtownwide:
mapImageData <- get_map(location = c(lon = -84.36,
lat = 33.78),
zoom = 12,
# size = c(500, 500),
maptype = c("toner"), #toner, watercolor
source = c("stamen"))
ggmap(mapImageData,
extent = "device",
darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
geom_path(aes(x = Longitude,
y = Latitude),
data = through2014,
alpha = 0.5,
colour = "#f07300", #F8971F F4640D
size = 1.2,
pch = 20) +
annotate("text",
x = -84.274,
y = 33.695,
label = paste("2011 to", maxyear),
size = 6,
color = "#f07300") +
annotate("text",
x = -84.274,
y = 33.695,
label = paste("2011 to", maxyear),
size = 6,
color = "#000000")
# dev.copy(png, "Maps/downtownwide-heat.png")
# dev.off()
# # brookhaven:
# mapImageData <- get_map(location = c(lon = -84.35,
# lat = 33.88),
# zoom = 13,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/brookhaven.png")
# dev.off()
# # northside:
# mapImageData <- get_map(location = c(lon = -84.29,
# lat = 33.92),
# zoom = 12,
# maptype = c("toner"),
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/northside.png")
# dev.off()
# # downtownzoom:
# mapImageData <- get_map(location = c(lon = -84.4,
# lat = 33.76),
# zoom = 14,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
all$Date <- as.Date(all$Date, format = "%Y/%m/%d")
newTracks <- all[which(all$Date >= "2013-10-25"),]
oldTracks <- all[which(all$Date < "2013-10-25"),]
naTracks <- all[which(is.na(all$Date) == TRUE),]
# # downtown compare:
# mapImageData <- get_map(location = c(lon = -84.36,
# lat = 33.76),
# zoom = 13,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = newTracks,
# colour = "#BD0026", #F8971F F4640D
# size = 0.8,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = naTracks,
# colour = "#FEB24C", #F8971F F4640D
# size = 0.8,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = oldTracks,
# colour = "#FEB24C", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/compare.png")
# dev.off()
# # allmetro.png:
# mapImageData <- get_googlemap(center = c(lon = -84.27, lat = 33.83),
# zoom = 11,
# maptype = c("terrain"))
# # allmyhood.png:
# mapImageData <- get_googlemap(center = c(lon = -84.32, lat = 33.89),
# zoom = 13,
# maptype = c("terrain"))
# # alldowntown.png:
# mapImageData <- get_googlemap(center = c(lon = -84.34, lat = 33.77),
# zoom = 12,
# size = c(520, 520),
# maptype = c("terrain")) | /version1/bikemap.R | permissive | mapsandapps/location-quantified-self | R | false | false | 7,091 | r | # sudo gpsbabel -t -i gpx -f 1.gpx -o unicsv -F 1.csv
# sudo chmod 666 file.csv
# sudo gpsbabel -t -i garmin -f usb: -o unicsv -F temp.csv
# sudo gpsbabel -t -i garmin -f usb: -x track,start=2013061900 -o gtrnctr -F temp.tcx
new <- read.csv("atl.csv",
header = TRUE) # 2012/06/04 to 2013/08/22
new <- new[c("Latitude", "Longitude", "Seg", "Date")]
old <- read.csv("old.csv",
header = TRUE) # 2011/05/26 to 2012/06/05
old <- old[c("Latitude", "Longitude", "Seg", "Date")]
# =if(G2<>G3,"",if(H3-H2>0.0694444,"",if(abs(B3-B2)>0.01,"",(if(abs(C3-C2)>0.01,"",1)))))
all <- rbind(new, old)
all$Latitude <- ifelse(is.na(all$Seg), NA, all$Latitude)
all$Longitude <- ifelse(is.na(all$Seg), NA, all$Longitude)
maxyear <- "2014"
through2011 <- all[substring(all$Date,1,4) <= maxyear,]
through2012 <- all[substring(all$Date,1,4) <= maxyear,]
through2013 <- all[substring(all$Date,1,4) <= maxyear,]
through2014 <- all[substring(all$Date,1,4) <= maxyear,]
library(ggmap)
# # standard:
# mapImageData <- get_map(location = c(lon = -84.33,
# lat = 33.81),
# zoom = 12,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/bikemap.png")
# dev.off()
# # extra wide:
# mapImageData <- get_map(location = c(lon = -84.50,
# lat = 33.83),
# zoom = 10,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/extrawide.png")
# dev.off()
# # downtown:
# mapImageData <- get_map(location = c(lon = -84.36,
# lat = 33.76),
# zoom = 13,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/downtown.png")
# dev.off()
# downtownwide:
mapImageData <- get_map(location = c(lon = -84.36,
lat = 33.78),
zoom = 12,
# size = c(500, 500),
maptype = c("toner"), #toner, watercolor
source = c("stamen"))
ggmap(mapImageData,
extent = "device",
darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
geom_path(aes(x = Longitude,
y = Latitude),
data = through2014,
alpha = 0.5,
colour = "#f07300", #F8971F F4640D
size = 1.2,
pch = 20) +
annotate("text",
x = -84.274,
y = 33.695,
label = paste("2011 to", maxyear),
size = 6,
color = "#f07300") +
annotate("text",
x = -84.274,
y = 33.695,
label = paste("2011 to", maxyear),
size = 6,
color = "#000000")
# dev.copy(png, "Maps/downtownwide-heat.png")
# dev.off()
# # brookhaven:
# mapImageData <- get_map(location = c(lon = -84.35,
# lat = 33.88),
# zoom = 13,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/brookhaven.png")
# dev.off()
# # northside:
# mapImageData <- get_map(location = c(lon = -84.29,
# lat = 33.92),
# zoom = 12,
# maptype = c("toner"),
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/northside.png")
# dev.off()
# # downtownzoom:
# mapImageData <- get_map(location = c(lon = -84.4,
# lat = 33.76),
# zoom = 14,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "#F8971F", #F8971F F4640D
# size = 0.8,
# pch = 20)
all$Date <- as.Date(all$Date, format = "%Y/%m/%d")
newTracks <- all[which(all$Date >= "2013-10-25"),]
oldTracks <- all[which(all$Date < "2013-10-25"),]
naTracks <- all[which(is.na(all$Date) == TRUE),]
# # downtown compare:
# mapImageData <- get_map(location = c(lon = -84.36,
# lat = 33.76),
# zoom = 13,
# # size = c(500, 500),
# maptype = c("toner"), #toner, watercolor
# source = c("stamen"))
# ggmap(mapImageData,
# extent = "device",
# darken = c(0.6, "white")) + # takes out axis, etc.
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = all,
# colour = "black", #F8971F F4640D
# size = 1.2,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = newTracks,
# colour = "#BD0026", #F8971F F4640D
# size = 0.8,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = naTracks,
# colour = "#FEB24C", #F8971F F4640D
# size = 0.8,
# pch = 20) +
# geom_path(aes(x = Longitude,
# y = Latitude),
# data = oldTracks,
# colour = "#FEB24C", #F8971F F4640D
# size = 0.8,
# pch = 20)
# dev.copy(png, "Maps/compare.png")
# dev.off()
# # allmetro.png:
# mapImageData <- get_googlemap(center = c(lon = -84.27, lat = 33.83),
# zoom = 11,
# maptype = c("terrain"))
# # allmyhood.png:
# mapImageData <- get_googlemap(center = c(lon = -84.32, lat = 33.89),
# zoom = 13,
# maptype = c("terrain"))
# # alldowntown.png:
# mapImageData <- get_googlemap(center = c(lon = -84.34, lat = 33.77),
# zoom = 12,
# size = c(520, 520),
# maptype = c("terrain")) |
context("undesirable_operator_linter")
test_that("linter returns correct linting", {
linter <- undesirable_operator_linter(op=c("$"="use the `[[` accessor", "<<-"=NA))
msgA <- rex::escape("Operator `<<-` is undesirable.")
msgD <- rex::escape("Operator `$` is undesirable. As an alternative, use the `[[` accessor.")
expect_lint("x <- foo:::getObj()", NULL, linter)
expect_lint("cat(\"10$\")", NULL, linter)
expect_lint("a <<- log(10)", c(message=msgA, line_number=1L, column_number=3L), linter)
expect_lint("data$parsed == c(1, 2)", c(message=msgD, line_number=1L, column_number=5L), linter)
})
| /tests/testthat/test-undesirable_operator_linter.R | permissive | edlee123/lintr | R | false | false | 611 | r | context("undesirable_operator_linter")
test_that("linter returns correct linting", {
linter <- undesirable_operator_linter(op=c("$"="use the `[[` accessor", "<<-"=NA))
msgA <- rex::escape("Operator `<<-` is undesirable.")
msgD <- rex::escape("Operator `$` is undesirable. As an alternative, use the `[[` accessor.")
expect_lint("x <- foo:::getObj()", NULL, linter)
expect_lint("cat(\"10$\")", NULL, linter)
expect_lint("a <<- log(10)", c(message=msgA, line_number=1L, column_number=3L), linter)
expect_lint("data$parsed == c(1, 2)", c(message=msgD, line_number=1L, column_number=5L), linter)
})
|
library("gplots")
library("RColorBrewer")
library(beeswarm)
library(reshape2)
library(vegan)
library(ggplot2)
otu.L3 <- read.delim('../../data/prok/otu_table_L4.txt', header=T, check.names = F, row.names = 1, skip = 1)
map <- read.delim('../../data/metadata_vlavage.txt', header=T, check.names = F)
mouse.ids <- as.character(unique(map$mouse))
rownames(map) <- map$sampleid
samp.ids <- intersect(colnames(otu.L3), rownames(map))
samp.ids <- sort(samp.ids)
otu.L3 <-otu.L3[,samp.ids]
map <- map[samp.ids,]
for (m in 1:length(mouse.ids)) {
m.samples <- as.character(map[map$mouse==mouse.ids[m],]$sampleid)
otu.t <- otu.L3[,m.samples]
map.t <- map[map$mouse==mouse.ids[m],]
dates.ax <- sort(map.t$exp_day)
otu.n <- sweep(otu.t,2,colSums(otu.t),'/'); # Normalize to relative abundance
otu.m <- sweep(sqrt(otu.n), 2, colSums(sqrt(otu.n)), '/')
meanAb <- apply(otu.m, 1, FUN=function(xx) tapply(xx, map.t$treatment, mean)) # group mean
ranked = order(apply(meanAb, 2, max),decreasing=T)
otu.m = otu.m[ranked, ]
Taxa = rownames(otu.m)
lim = 20
if (nrow(otu.m) > lim) Taxa[lim:nrow(otu.m)] = "Other"
otu.m = rowsum(otu.m, Taxa)
byAbundance = rownames(otu.m)[order(rowMeans(otu.m), decreasing=T)]
#byAbundance = gsub(';','.',byAbundance)
rownames(otu.m) <- lapply(X = rownames(otu.m),
FUN = function(xx) strsplit(as.character(xx), ';', fixed = T)[[1]][4])
rownames(otu.m) <- gsub("NA","other",rownames(otu.m))
otu.m <- data.frame(t(otu.m), check.names=F) # flip table
otu.m$sampleid <- rownames(otu.m) # add a column for the sample IDs
rownames(map.t) <- map.t$sampleid # add a column for the sample IDs
# The following separates taxa abundances per sample, then splices in the column of interest
otu.m <- melt(otu.m, id.vars = "sampleid", variable.name = "Taxa", value.name = "RelativeAbundance")
otu.m <- merge(otu.m, map.t[,c("sampleid","exp_day")], by="sampleid")
otu.m$Taxa <- factor(otu.m$Taxa, levels=byAbundance, ordered=T)
otu.m.bar <- otu.m
otu.m.bar$exp_day <- factor(as.character(otu.m.bar$exp_day), levels = as.character(dates.ax), ordered = T)
# otu.m.bar$Taxa <- lapply(X = otu.m.bar$Taxa, FUN = function(xx) strsplit(xx, ';', fixed = T)[[1]][length(strsplit(xx, ';', fixed = T))])
## Plot according to Lifestyle, sorted by abundance
# png(paste0("TaxaSummary_L",bT[L],".png"),width = 12,height=8, units = "in", res = '300') # Make room for legend
# ggplot(otu.m.bar, aes(x = samp_rel_hct, y = RelativeAbundance, fill = Taxa)) +
# geom_bar(stat ="identity", position="fill") + labs(x="date relative to transplant",y="root relative abundance") +
# guides(fill=guide_legend(ncol=1)) +
# scale_fill_manual(values=c("dodgerblue2","#E31A1C", # red # Kevin Wright
# "green4",
# "#6A3D9A", # purple
# "#FF7F00", # orange
# "black","gold1",
# "skyblue2","#FB9A99", # lt pink
# "palegreen2",
# "#CAB2D6", # lt purple
# "#FDBF6F", # lt orange
# "gray70", "khaki2",
# "maroon","orchid1","deeppink1","blue1","steelblue4",
# "darkturquoise","green1","yellow4","yellow3",
# "darkorange4","brown")) +
# theme_classic() + theme(axis.text = element_text(color="black"))
# ggsave(filename = paste0("../../results/individualplots/patient97/Taxa_barplot_summary_L",bT[L],".png"), height = 7, width = 5.5, dpi = 300)
otu.m.area <- otu.m
otu.m.area$exp_day_n <- as.numeric(otu.m$exp_day)
ggplot(otu.m.area, aes(x = exp_day_n, y = RelativeAbundance, fill = Taxa)) +
geom_area(stat ="identity") + labs(x="experiment day",y="root relative abundance") +
guides(fill=guide_legend(ncol=1)) + scale_x_continuous(limits = c(0,21)) +
scale_fill_manual(values=c("dodgerblue2","#E31A1C", # red # Kevin Wright
"green4",
"#6A3D9A", # purple
"#FF7F00", # orange
"black","gold1",
"skyblue2","#FB9A99", # lt pink
"palegreen2",
"#CAB2D6", # lt purple
"#FDBF6F", # lt orange
"gray70", "khaki2",
"maroon","orchid1","deeppink1","blue1","steelblue4",
"darkturquoise","green1","yellow4","yellow3",
"darkorange4","brown")) +
theme_classic() + theme(axis.text = element_text(color="black"))
ggsave(filename = paste0("../../results/mouse_plots_by_animal_",mouse.ids[m],"_L4.png"), height = 6, width = 7, dpi = 300)
}
| /mouse_area_plots.R | no_license | RRShieldsCutler/mi_challenge | R | false | false | 5,006 | r | library("gplots")
library("RColorBrewer")
library(beeswarm)
library(reshape2)
library(vegan)
library(ggplot2)
otu.L3 <- read.delim('../../data/prok/otu_table_L4.txt', header=T, check.names = F, row.names = 1, skip = 1)
map <- read.delim('../../data/metadata_vlavage.txt', header=T, check.names = F)
mouse.ids <- as.character(unique(map$mouse))
rownames(map) <- map$sampleid
samp.ids <- intersect(colnames(otu.L3), rownames(map))
samp.ids <- sort(samp.ids)
otu.L3 <-otu.L3[,samp.ids]
map <- map[samp.ids,]
for (m in 1:length(mouse.ids)) {
m.samples <- as.character(map[map$mouse==mouse.ids[m],]$sampleid)
otu.t <- otu.L3[,m.samples]
map.t <- map[map$mouse==mouse.ids[m],]
dates.ax <- sort(map.t$exp_day)
otu.n <- sweep(otu.t,2,colSums(otu.t),'/'); # Normalize to relative abundance
otu.m <- sweep(sqrt(otu.n), 2, colSums(sqrt(otu.n)), '/')
meanAb <- apply(otu.m, 1, FUN=function(xx) tapply(xx, map.t$treatment, mean)) # group mean
ranked = order(apply(meanAb, 2, max),decreasing=T)
otu.m = otu.m[ranked, ]
Taxa = rownames(otu.m)
lim = 20
if (nrow(otu.m) > lim) Taxa[lim:nrow(otu.m)] = "Other"
otu.m = rowsum(otu.m, Taxa)
byAbundance = rownames(otu.m)[order(rowMeans(otu.m), decreasing=T)]
#byAbundance = gsub(';','.',byAbundance)
rownames(otu.m) <- lapply(X = rownames(otu.m),
FUN = function(xx) strsplit(as.character(xx), ';', fixed = T)[[1]][4])
rownames(otu.m) <- gsub("NA","other",rownames(otu.m))
otu.m <- data.frame(t(otu.m), check.names=F) # flip table
otu.m$sampleid <- rownames(otu.m) # add a column for the sample IDs
rownames(map.t) <- map.t$sampleid # add a column for the sample IDs
# The following separates taxa abundances per sample, then splices in the column of interest
otu.m <- melt(otu.m, id.vars = "sampleid", variable.name = "Taxa", value.name = "RelativeAbundance")
otu.m <- merge(otu.m, map.t[,c("sampleid","exp_day")], by="sampleid")
otu.m$Taxa <- factor(otu.m$Taxa, levels=byAbundance, ordered=T)
otu.m.bar <- otu.m
otu.m.bar$exp_day <- factor(as.character(otu.m.bar$exp_day), levels = as.character(dates.ax), ordered = T)
# otu.m.bar$Taxa <- lapply(X = otu.m.bar$Taxa, FUN = function(xx) strsplit(xx, ';', fixed = T)[[1]][length(strsplit(xx, ';', fixed = T))])
## Plot according to Lifestyle, sorted by abundance
# png(paste0("TaxaSummary_L",bT[L],".png"),width = 12,height=8, units = "in", res = '300') # Make room for legend
# ggplot(otu.m.bar, aes(x = samp_rel_hct, y = RelativeAbundance, fill = Taxa)) +
# geom_bar(stat ="identity", position="fill") + labs(x="date relative to transplant",y="root relative abundance") +
# guides(fill=guide_legend(ncol=1)) +
# scale_fill_manual(values=c("dodgerblue2","#E31A1C", # red # Kevin Wright
# "green4",
# "#6A3D9A", # purple
# "#FF7F00", # orange
# "black","gold1",
# "skyblue2","#FB9A99", # lt pink
# "palegreen2",
# "#CAB2D6", # lt purple
# "#FDBF6F", # lt orange
# "gray70", "khaki2",
# "maroon","orchid1","deeppink1","blue1","steelblue4",
# "darkturquoise","green1","yellow4","yellow3",
# "darkorange4","brown")) +
# theme_classic() + theme(axis.text = element_text(color="black"))
# ggsave(filename = paste0("../../results/individualplots/patient97/Taxa_barplot_summary_L",bT[L],".png"), height = 7, width = 5.5, dpi = 300)
otu.m.area <- otu.m
otu.m.area$exp_day_n <- as.numeric(otu.m$exp_day)
ggplot(otu.m.area, aes(x = exp_day_n, y = RelativeAbundance, fill = Taxa)) +
geom_area(stat ="identity") + labs(x="experiment day",y="root relative abundance") +
guides(fill=guide_legend(ncol=1)) + scale_x_continuous(limits = c(0,21)) +
scale_fill_manual(values=c("dodgerblue2","#E31A1C", # red # Kevin Wright
"green4",
"#6A3D9A", # purple
"#FF7F00", # orange
"black","gold1",
"skyblue2","#FB9A99", # lt pink
"palegreen2",
"#CAB2D6", # lt purple
"#FDBF6F", # lt orange
"gray70", "khaki2",
"maroon","orchid1","deeppink1","blue1","steelblue4",
"darkturquoise","green1","yellow4","yellow3",
"darkorange4","brown")) +
theme_classic() + theme(axis.text = element_text(color="black"))
ggsave(filename = paste0("../../results/mouse_plots_by_animal_",mouse.ids[m],"_L4.png"), height = 6, width = 7, dpi = 300)
}
|
FLXMRglm <- function(formula=.~., family=gaussian, offset=NULL)
{
if (is.character(family))
family <- get(family, mode = "function", envir = parent.frame())
if (is.function(family))
family <- family()
if (is.null(family$family)) {
print(family)
stop("'family' not recognized")
}
glmrefit <- function(x, y, w) {
fit <- c(glm.fit(x, y, weights=w, offset=offset, family=family),
list(call = sys.call(), offset = offset,
control = eval(formals(glm.fit)$control),
method = "weighted.glm.fit"))
fit$df.null <- sum(w) + fit$df.null - fit$df.residual - fit$rank
fit$df.residual <- sum(w) - fit$rank
fit$x <- x
fit
}
z <- new("FLXMRglm", weighted=TRUE, formula=formula,
name=paste("FLXMRglm", family$family, sep=":"), offset = offset,
family=family$family, refit=glmrefit)
z@preproc.y <- function(x) {
if (ncol(x) > 1)
stop(paste("for the", family$family, "family y must be univariate"))
x
}
if (family$family=="gaussian") {
z@defineComponent <- function(para) {
predict <- function(x, ...) {
dotarg = list(...)
if("offset" %in% names(dotarg)) offset <- dotarg$offset
p <- x %*% para$coef
if (!is.null(offset)) p <- p + offset
family$linkinv(p)
}
logLik <- function(x, y, ...)
dnorm(y, mean=predict(x, ...), sd=para$sigma, log=TRUE)
new("FLXcomponent",
parameters=list(coef=para$coef, sigma=para$sigma),
logLik=logLik, predict=predict,
df=para$df)
}
z@fit <- function(x, y, w, component){
fit <- glm.fit(x, y, w=w, offset=offset, family = family)
z@defineComponent(para = list(coef = coef(fit), df = ncol(x)+1,
sigma = sqrt(sum(fit$weights * fit$residuals^2 /
mean(fit$weights))/ (nrow(x)-fit$rank))))
}
}
else if (family$family=="binomial") {
z@preproc.y <- function(x) {
if (ncol(x) != 2)
{
stop("for the binomial family, y must be a 2 column matrix\n",
"where col 1 is no. successes and col 2 is no. failures")
}
if (any(x < 0))
stop("negative values are not allowed for the binomial family")
x
}
z@defineComponent <- function(para) {
predict <- function(x, ...) {
dotarg = list(...)
if("offset" %in% names(dotarg))
offset <- dotarg$offset
p <- x %*% para$coef
if (!is.null(offset))
p <- p + offset
family$linkinv(p)
}
logLik <- function(x, y, ...)
dbinom(y[,1], size=rowSums(y), prob=predict(x, ...), log=TRUE)
new("FLXcomponent",
parameters=list(coef=para$coef),
logLik=logLik, predict=predict,
df=para$df)
}
z@fit <- function(x, y, w, component) {
fit <- glm.fit(x, y, weights=w, family=family, offset=offset, start=component$coef)
z@defineComponent(para = list(coef = coef(fit), df = ncol(x)))
}
}
else if (family$family=="poisson") {
z@defineComponent <- function(para) {
predict <- function(x, ...) {
dotarg = list(...)
if("offset" %in% names(dotarg)) offset <- dotarg$offset
p <- x %*% para$coef
if (!is.null(offset)) p <- p + offset
family$linkinv(p)
}
logLik <- function(x, y, ...)
dpois(y, lambda=predict(x, ...), log=TRUE)
new("FLXcomponent",
parameters=list(coef=para$coef),
logLik=logLik, predict=predict,
df=para$df)
}
z@fit <- function(x, y, w, component) {
fit <- glm.fit(x, y, weights=w, family=family, offset=offset, start=component$coef)
z@defineComponent(para = list(coef = coef(fit), df = ncol(x)))
}
}
else if (family$family=="Gamma") {
z@defineComponent <- function(para) {
predict <- function(x, ...) {
dotarg = list(...)
if("offset" %in% names(dotarg)) offset <- dotarg$offset
p <- x %*% para$coef
if (!is.null(offset)) p <- p + offset
family$linkinv(p)
}
logLik <- function(x, y, ...)
dgamma(y, shape = para$shape, scale=predict(x, ...)/para$shape, log=TRUE)
new("FLXcomponent",
parameters = list(coef = para$coef, shape = para$shape),
predict = predict, logLik = logLik,
df = para$df)
}
z@fit <- function(x, y, w, component) {
fit <- glm.fit(x, y, weights=w, family=family, offset=offset, start=component$coef)
z@defineComponent(para = list(coef = coef(fit), df = ncol(x)+1,
shape = sum(fit$prior.weights)/fit$deviance))
}
}
else stop(paste("Unknown family", family))
z
}
| /patch_Bettina/FLXMRglm.R | no_license | yagu0/morpheus | R | false | false | 4,633 | r | FLXMRglm <- function(formula=.~., family=gaussian, offset=NULL)
{
if (is.character(family))
family <- get(family, mode = "function", envir = parent.frame())
if (is.function(family))
family <- family()
if (is.null(family$family)) {
print(family)
stop("'family' not recognized")
}
glmrefit <- function(x, y, w) {
fit <- c(glm.fit(x, y, weights=w, offset=offset, family=family),
list(call = sys.call(), offset = offset,
control = eval(formals(glm.fit)$control),
method = "weighted.glm.fit"))
fit$df.null <- sum(w) + fit$df.null - fit$df.residual - fit$rank
fit$df.residual <- sum(w) - fit$rank
fit$x <- x
fit
}
z <- new("FLXMRglm", weighted=TRUE, formula=formula,
name=paste("FLXMRglm", family$family, sep=":"), offset = offset,
family=family$family, refit=glmrefit)
z@preproc.y <- function(x) {
if (ncol(x) > 1)
stop(paste("for the", family$family, "family y must be univariate"))
x
}
if (family$family=="gaussian") {
z@defineComponent <- function(para) {
predict <- function(x, ...) {
dotarg = list(...)
if("offset" %in% names(dotarg)) offset <- dotarg$offset
p <- x %*% para$coef
if (!is.null(offset)) p <- p + offset
family$linkinv(p)
}
logLik <- function(x, y, ...)
dnorm(y, mean=predict(x, ...), sd=para$sigma, log=TRUE)
new("FLXcomponent",
parameters=list(coef=para$coef, sigma=para$sigma),
logLik=logLik, predict=predict,
df=para$df)
}
z@fit <- function(x, y, w, component){
fit <- glm.fit(x, y, w=w, offset=offset, family = family)
z@defineComponent(para = list(coef = coef(fit), df = ncol(x)+1,
sigma = sqrt(sum(fit$weights * fit$residuals^2 /
mean(fit$weights))/ (nrow(x)-fit$rank))))
}
}
else if (family$family=="binomial") {
z@preproc.y <- function(x) {
if (ncol(x) != 2)
{
stop("for the binomial family, y must be a 2 column matrix\n",
"where col 1 is no. successes and col 2 is no. failures")
}
if (any(x < 0))
stop("negative values are not allowed for the binomial family")
x
}
z@defineComponent <- function(para) {
predict <- function(x, ...) {
dotarg = list(...)
if("offset" %in% names(dotarg))
offset <- dotarg$offset
p <- x %*% para$coef
if (!is.null(offset))
p <- p + offset
family$linkinv(p)
}
logLik <- function(x, y, ...)
dbinom(y[,1], size=rowSums(y), prob=predict(x, ...), log=TRUE)
new("FLXcomponent",
parameters=list(coef=para$coef),
logLik=logLik, predict=predict,
df=para$df)
}
z@fit <- function(x, y, w, component) {
fit <- glm.fit(x, y, weights=w, family=family, offset=offset, start=component$coef)
z@defineComponent(para = list(coef = coef(fit), df = ncol(x)))
}
}
else if (family$family=="poisson") {
z@defineComponent <- function(para) {
predict <- function(x, ...) {
dotarg = list(...)
if("offset" %in% names(dotarg)) offset <- dotarg$offset
p <- x %*% para$coef
if (!is.null(offset)) p <- p + offset
family$linkinv(p)
}
logLik <- function(x, y, ...)
dpois(y, lambda=predict(x, ...), log=TRUE)
new("FLXcomponent",
parameters=list(coef=para$coef),
logLik=logLik, predict=predict,
df=para$df)
}
z@fit <- function(x, y, w, component) {
fit <- glm.fit(x, y, weights=w, family=family, offset=offset, start=component$coef)
z@defineComponent(para = list(coef = coef(fit), df = ncol(x)))
}
}
else if (family$family=="Gamma") {
z@defineComponent <- function(para) {
predict <- function(x, ...) {
dotarg = list(...)
if("offset" %in% names(dotarg)) offset <- dotarg$offset
p <- x %*% para$coef
if (!is.null(offset)) p <- p + offset
family$linkinv(p)
}
logLik <- function(x, y, ...)
dgamma(y, shape = para$shape, scale=predict(x, ...)/para$shape, log=TRUE)
new("FLXcomponent",
parameters = list(coef = para$coef, shape = para$shape),
predict = predict, logLik = logLik,
df = para$df)
}
z@fit <- function(x, y, w, component) {
fit <- glm.fit(x, y, weights=w, family=family, offset=offset, start=component$coef)
z@defineComponent(para = list(coef = coef(fit), df = ncol(x)+1,
shape = sum(fit$prior.weights)/fit$deviance))
}
}
else stop(paste("Unknown family", family))
z
}
|
getwd()
poll = read.csv("AnonymityPoll.csv")
nrow(poll)
table(poll$Smartphone)
table(poll$State,poll$Region)
table(poll$Internet.Use,poll$Smartphone)
summary(poll)
limited = subset(poll,Internet.Use == "1"| Smartphone=="1" )
nrow(limited)
table(limited$Info.On.Internet)
table(limited$Anonymity.Possible)
table(limited$Tried.Masking.Identity)
table(limited$Privacy.Laws.Effective)
plot(limited$Age, limited$Info.On.Internet)
table(limited$Age,limited$Info.On.Internet)
max(table(limited$Age,limited$Info.On.Internet))
jitter(c(1,2,3))
plot(jitter(limited$Age), jitter(limited$Info.On.Internet))
tapply(limited$Info.On.Internet,limited$Smartphone,summary,na.rm=TRUE)
tapply(limited$Tried.Masking.Identity,limited$Smartphone,summary,na.rm=TRUE)
tapply(limited$Tried.Masking.Identity,limited$Smartphone,table) | /Week1/Solutions_poll.Rd | no_license | goelshivani321/Analytics_Edge | R | false | false | 806 | rd | getwd()
poll = read.csv("AnonymityPoll.csv")
nrow(poll)
table(poll$Smartphone)
table(poll$State,poll$Region)
table(poll$Internet.Use,poll$Smartphone)
summary(poll)
limited = subset(poll,Internet.Use == "1"| Smartphone=="1" )
nrow(limited)
table(limited$Info.On.Internet)
table(limited$Anonymity.Possible)
table(limited$Tried.Masking.Identity)
table(limited$Privacy.Laws.Effective)
plot(limited$Age, limited$Info.On.Internet)
table(limited$Age,limited$Info.On.Internet)
max(table(limited$Age,limited$Info.On.Internet))
jitter(c(1,2,3))
plot(jitter(limited$Age), jitter(limited$Info.On.Internet))
tapply(limited$Info.On.Internet,limited$Smartphone,summary,na.rm=TRUE)
tapply(limited$Tried.Masking.Identity,limited$Smartphone,summary,na.rm=TRUE)
tapply(limited$Tried.Masking.Identity,limited$Smartphone,table) |
performanceAnalysisUIInput <- function(){
shinydashboard::box(
shiny::sliderInput(
inputId = "totalPoints",
label = "Was ist der Punktestand?",
min = -1000, max = 1000, value = 0
)
, shiny::sliderInput(
inputId = "nbrOfRounds",
label = "Wie viele Runden hast du schon gezockt?",
min = 20, max = 1000, value = 20
)
) #box
}
performanceAnalysisOutput <- function(totalPoints, nbrOfRounds){
# https://www.pokernews.com/strategy/nikolai-yakovenko-on-the-state-of-open-face-ii-19859.htm
sigma <- 18
# central limit theorem: S is standard normally distributed
S <- totalPoints/sqrt(nbrOfRounds)/sigma
# two-sided probability
result <- 2*(1 - pnorm(abs(S)))
} | /performanceAnalysis.R | no_license | Flavelloni/OFC | R | false | false | 750 | r | performanceAnalysisUIInput <- function(){
shinydashboard::box(
shiny::sliderInput(
inputId = "totalPoints",
label = "Was ist der Punktestand?",
min = -1000, max = 1000, value = 0
)
, shiny::sliderInput(
inputId = "nbrOfRounds",
label = "Wie viele Runden hast du schon gezockt?",
min = 20, max = 1000, value = 20
)
) #box
}
performanceAnalysisOutput <- function(totalPoints, nbrOfRounds){
# https://www.pokernews.com/strategy/nikolai-yakovenko-on-the-state-of-open-face-ii-19859.htm
sigma <- 18
# central limit theorem: S is standard normally distributed
S <- totalPoints/sqrt(nbrOfRounds)/sigma
# two-sided probability
result <- 2*(1 - pnorm(abs(S)))
} |
#right now we only look for and care about reverse dependencies
# in GRAN packages so we only look in repo@TempLibLoc
addRevDeps = function(repo)
{
##reset and recalculate reverse dependencies every time. Expensive
##but protects us against packages being added or removed from
##manfiest
repo_results(repo)$revDepOf = ""
repo_results(repo)$revdepends = ""
if(is.null(repo_results(repo)$buildReason))
repo_results(repo)$buildReason = ifelse(repo_results(repo)$building, "vbump", "")
else {
repo_results(repo)$buildReason[is.na(repo_results(repo)$buildReason)] = ""
repo_results(repo)$buildReason[repo_results(repo)$building] = "vbump"
}
logfun(repo)("NA", paste("Checking for reverse dependencies to packages",
"with version bumps."))
manifest = repo_results(repo)
##if this is the first time we are building the repository all the packages
##will be being built, so no need to check rev deps
## ignore suspended packages when checking if everything is being built
if(!file.exists(temp_lib(repo)) || all(getBuilding(repo) |
manifest$name %in% suspended_pkgs(repo))) {
logfun(repo)("NA", paste("All packages are being built, skipping",
"reverse dependency check."))
return(repo)
}
pkgs = manifest$name[getBuilding(repo)]
revdeps = sapply(pkgs, dependsOnPkgs, dependencies = "all",
lib.loc = temp_lib(repo), simplify=FALSE)
if(!length(unlist(revdeps)) || all(revdeps %in% pkgs)) {
logfun(repo)("NA", "No reverse dependencies detected")
return(repo)
}
if(length(revdeps) > 0)
{
for(pkgname in names(revdeps)) {
rdeps = revdeps[[pkgname]]
if(length(rdeps) && !all(is.na(rdeps)))
{
rows = sapply(rdeps, function(pkg) {
x = which(pkg == manifest$name)
if(!length(x))
x = numeric()
x
})
rows = unlist(rows)
prefix = ifelse(nchar(manifest$revDepOf[rows]) > 0, ",", "")
manifest$revDepOf[rows] = paste0(manifest$revDepOf[rows],
prefix, pkgname)
}
manifest$revdepends[manifest$name == pkgname] = paste(rdeps,
collapse = " , ")
}
}
rdepBuildPkgs = manifest$buildReason != "vbump" & !(manifest$name %in% suspended_pkgs(repo)) & grepl(paste0("(", paste(pkgs, collapse="|"), ")"), manifest$revDepOf)
manifest[rdepBuildPkgs, "buildReason"] = "is rdep"
manifest[rdepBuildPkgs, "building"] = TRUE
logfun(repo)("NA", paste0("Detected ", sum(rdepBuildPkgs),
" packages that are reverse dependencies of",
"packages with version bumps:\n\t",
paste(manifest$name[rdepBuildPkgs],
collapse=" , ")))
## I don't think we need this because this is happening before the main
## buildBranchesInRepo call....
## if(sum(rdepBuildPkgs) >0) {
## logfun(repo)("NA",
## "Building reverse dependencies in temporary repository.")
## tmprepo = repo
## repo_results(tmprepo) = manifest[rdepBuildPkgs,]
## manifest_df(tmprepo) = manifest_df(tmprepo)[rdepBuildPkgs,]
## versions_df(tmprepo) = versions_df(tmprepo)[rdepBuildPkgs,]
## repo_results(tmprepo)$building = TRUE
## repo_results(tmprepo)$status="ok"
## tmprepo = buildBranchesInRepo(repo = tmprepo, temp = TRUE,
# incremental = FALSE)
## incremental = TRUE)
## manifest[rdepBuildPkgs, ] = repo_results(tmprepo)
## }
repo_results(repo) = manifest
repo
}
| /R/addRevDeps.R | no_license | mkearney/gRAN | R | false | false | 4,002 | r | #right now we only look for and care about reverse dependencies
# in GRAN packages so we only look in repo@TempLibLoc
addRevDeps = function(repo)
{
##reset and recalculate reverse dependencies every time. Expensive
##but protects us against packages being added or removed from
##manfiest
repo_results(repo)$revDepOf = ""
repo_results(repo)$revdepends = ""
if(is.null(repo_results(repo)$buildReason))
repo_results(repo)$buildReason = ifelse(repo_results(repo)$building, "vbump", "")
else {
repo_results(repo)$buildReason[is.na(repo_results(repo)$buildReason)] = ""
repo_results(repo)$buildReason[repo_results(repo)$building] = "vbump"
}
logfun(repo)("NA", paste("Checking for reverse dependencies to packages",
"with version bumps."))
manifest = repo_results(repo)
##if this is the first time we are building the repository all the packages
##will be being built, so no need to check rev deps
## ignore suspended packages when checking if everything is being built
if(!file.exists(temp_lib(repo)) || all(getBuilding(repo) |
manifest$name %in% suspended_pkgs(repo))) {
logfun(repo)("NA", paste("All packages are being built, skipping",
"reverse dependency check."))
return(repo)
}
pkgs = manifest$name[getBuilding(repo)]
revdeps = sapply(pkgs, dependsOnPkgs, dependencies = "all",
lib.loc = temp_lib(repo), simplify=FALSE)
if(!length(unlist(revdeps)) || all(revdeps %in% pkgs)) {
logfun(repo)("NA", "No reverse dependencies detected")
return(repo)
}
if(length(revdeps) > 0)
{
for(pkgname in names(revdeps)) {
rdeps = revdeps[[pkgname]]
if(length(rdeps) && !all(is.na(rdeps)))
{
rows = sapply(rdeps, function(pkg) {
x = which(pkg == manifest$name)
if(!length(x))
x = numeric()
x
})
rows = unlist(rows)
prefix = ifelse(nchar(manifest$revDepOf[rows]) > 0, ",", "")
manifest$revDepOf[rows] = paste0(manifest$revDepOf[rows],
prefix, pkgname)
}
manifest$revdepends[manifest$name == pkgname] = paste(rdeps,
collapse = " , ")
}
}
rdepBuildPkgs = manifest$buildReason != "vbump" & !(manifest$name %in% suspended_pkgs(repo)) & grepl(paste0("(", paste(pkgs, collapse="|"), ")"), manifest$revDepOf)
manifest[rdepBuildPkgs, "buildReason"] = "is rdep"
manifest[rdepBuildPkgs, "building"] = TRUE
logfun(repo)("NA", paste0("Detected ", sum(rdepBuildPkgs),
" packages that are reverse dependencies of",
"packages with version bumps:\n\t",
paste(manifest$name[rdepBuildPkgs],
collapse=" , ")))
## I don't think we need this because this is happening before the main
## buildBranchesInRepo call....
## if(sum(rdepBuildPkgs) >0) {
## logfun(repo)("NA",
## "Building reverse dependencies in temporary repository.")
## tmprepo = repo
## repo_results(tmprepo) = manifest[rdepBuildPkgs,]
## manifest_df(tmprepo) = manifest_df(tmprepo)[rdepBuildPkgs,]
## versions_df(tmprepo) = versions_df(tmprepo)[rdepBuildPkgs,]
## repo_results(tmprepo)$building = TRUE
## repo_results(tmprepo)$status="ok"
## tmprepo = buildBranchesInRepo(repo = tmprepo, temp = TRUE,
# incremental = FALSE)
## incremental = TRUE)
## manifest[rdepBuildPkgs, ] = repo_results(tmprepo)
## }
repo_results(repo) = manifest
repo
}
|
company<-read.csv(file.choose())
View(company)
summary(company)
# converting sales to low, medium and high
company$Sales<-cut(company$Sales,c(-1,6,12,18),labels = c('low','medium','high'))
View(company)
# checking proportion of output class
prop.table(table(company$Sales))
# checking for duplicate rows
dim(unique(company))
attach(company)
# checking for outliers
boxplot(CompPrice)
boxplot(CompPrice,plot = F)$out
boxplot(Income)
boxplot(Income,plot = F)$out
boxplot(Advertising)
boxplot(Advertising,plot = F)$out
boxplot(Population)
boxplot(Population,plot = F)$out
boxplot(Price)
boxplot(Price,plot = F)$out
boxplot(Population)
boxplot(Population,plot = F)$out
boxplot(Age)
boxplot(Age,plot = F)$out
boxplot(Education)
boxplot(Education,plot = F)$out
# splitting data into train and test
library(caTools)
set.seed(123)
split<-sample.split(company$Sales,SplitRatio = 0.8)
train_set<-subset(company,split==TRUE)
test_set<-subset(company,split==FALSE)
prop.table(table(train_set$Sales))
prop.table(table(test_set$Sales))
# Applying C5.0
library(C50)
company_tree<-C5.0(train_set[,-1],train_set$Sales)
plot(company_tree)
# Training accuracy
pred_train<-predict(company_tree,train_set)
mean(pred_train==train_set$Sales) # 0.91 accuracy
# Predicting on test data
library(caret)
pred_test<-predict(company_tree,newdata = test_set)
mean(pred_test==test_set$Sales) # 0.66 accuracy. Problem of overfitting
# Adaboost
acc<-c()
# building 101 models
for(i in 1:101){
fittree<-C5.0(train_set$Sales~.,data = train_set[,-1],trials=15)
pred_ada<-predict.C5.0(fittree,test_set[,-1])
a<-table(pred_ada,test_set$Sales)
acc<-c(acc,sum(diag(a))/sum(a))
}
mean(acc) # 0.72 is the accuracy. We will try now bagging
# Applying bagging
model_bag<-C5.0(train_set$Sales~.,data = train_set[,-1])
pred_bag<-predict.C5.0(model_bag,test_set[,-1])
confusionMatrix(pred_bag,test_set$Sales)
fittree_bag<-vector(mode = 'list',length = 40)
test_1<-vector(mode = 'list',length = 40)
set.seed(2)
for(i in 1:40){
split_bag<-sample.split(company$Sales,SplitRatio = 0.8)
train_bag<-subset(company,split_bag==TRUE)
test_1[[i]]<-subset(company,split_bag==FALSE)
fittree_bag[[i]]<-C5.0(train_bag$Sales~.,data = train_bag[,-1])
}
fittree_bag
test_1
# predictions, confusion matrixand accuracy for the 40 models
pred_bag<-vector(mode='list',length = '40')
confusion_bag<-vector(mode = 'list',length = '40')
accuracy_bag<-vector(mode = 'list',length = '40')
for(j in 1:length(fittree_bag)){
pred_bag[[j]]<-predict.C5.0(fittree_bag[[j]],test_1[[j]][,-1],type = 'class')
confusion_bag[[j]]<-table(pred_bag[[j]],test_1[[j]]$Sales)
accuracy_bag[[j]]<-sum(diag(confusion_bag[[j]]))/sum(confusion_bag[[j]])
}
pred_bag
confusion_bag
accuracy_bag
class(accuracy_bag)
mean(unlist(accuracy_bag)) # Accuracy of 0.72
# using tree function
library(tree)
company_tree_new<-tree(train_set$Sales~.,data = train_set)
summary(company_tree_new)
# predicting test data using the model
pred_tree_new <- as.data.frame(predict(company_tree_new,newdata=test_set))
pred_tree_new["final"] <- NULL
pred_test_df <- predict(company_tree_new,newdata=test_set)
pred_tree_new$final <- colnames(pred_test_df)[apply(pred_test_df,1,which.max)]
mean(pred_tree_new$final==test_set$Sales) # accuracy 0.61
# Best model formed was using bagging technique.
| /company_data_DT.R | no_license | Bharatmankotia/pro_ds | R | false | false | 3,497 | r | company<-read.csv(file.choose())
View(company)
summary(company)
# converting sales to low, medium and high
company$Sales<-cut(company$Sales,c(-1,6,12,18),labels = c('low','medium','high'))
View(company)
# checking proportion of output class
prop.table(table(company$Sales))
# checking for duplicate rows
dim(unique(company))
attach(company)
# checking for outliers
boxplot(CompPrice)
boxplot(CompPrice,plot = F)$out
boxplot(Income)
boxplot(Income,plot = F)$out
boxplot(Advertising)
boxplot(Advertising,plot = F)$out
boxplot(Population)
boxplot(Population,plot = F)$out
boxplot(Price)
boxplot(Price,plot = F)$out
boxplot(Population)
boxplot(Population,plot = F)$out
boxplot(Age)
boxplot(Age,plot = F)$out
boxplot(Education)
boxplot(Education,plot = F)$out
# splitting data into train and test
library(caTools)
set.seed(123)
split<-sample.split(company$Sales,SplitRatio = 0.8)
train_set<-subset(company,split==TRUE)
test_set<-subset(company,split==FALSE)
prop.table(table(train_set$Sales))
prop.table(table(test_set$Sales))
# Applying C5.0
library(C50)
company_tree<-C5.0(train_set[,-1],train_set$Sales)
plot(company_tree)
# Training accuracy
pred_train<-predict(company_tree,train_set)
mean(pred_train==train_set$Sales) # 0.91 accuracy
# Predicting on test data
library(caret)
pred_test<-predict(company_tree,newdata = test_set)
mean(pred_test==test_set$Sales) # 0.66 accuracy. Problem of overfitting
# Adaboost
acc<-c()
# building 101 models
for(i in 1:101){
fittree<-C5.0(train_set$Sales~.,data = train_set[,-1],trials=15)
pred_ada<-predict.C5.0(fittree,test_set[,-1])
a<-table(pred_ada,test_set$Sales)
acc<-c(acc,sum(diag(a))/sum(a))
}
mean(acc) # 0.72 is the accuracy. We will try now bagging
# Applying bagging
model_bag<-C5.0(train_set$Sales~.,data = train_set[,-1])
pred_bag<-predict.C5.0(model_bag,test_set[,-1])
confusionMatrix(pred_bag,test_set$Sales)
fittree_bag<-vector(mode = 'list',length = 40)
test_1<-vector(mode = 'list',length = 40)
set.seed(2)
for(i in 1:40){
split_bag<-sample.split(company$Sales,SplitRatio = 0.8)
train_bag<-subset(company,split_bag==TRUE)
test_1[[i]]<-subset(company,split_bag==FALSE)
fittree_bag[[i]]<-C5.0(train_bag$Sales~.,data = train_bag[,-1])
}
fittree_bag
test_1
# predictions, confusion matrixand accuracy for the 40 models
pred_bag<-vector(mode='list',length = '40')
confusion_bag<-vector(mode = 'list',length = '40')
accuracy_bag<-vector(mode = 'list',length = '40')
for(j in 1:length(fittree_bag)){
pred_bag[[j]]<-predict.C5.0(fittree_bag[[j]],test_1[[j]][,-1],type = 'class')
confusion_bag[[j]]<-table(pred_bag[[j]],test_1[[j]]$Sales)
accuracy_bag[[j]]<-sum(diag(confusion_bag[[j]]))/sum(confusion_bag[[j]])
}
pred_bag
confusion_bag
accuracy_bag
class(accuracy_bag)
mean(unlist(accuracy_bag)) # Accuracy of 0.72
# using tree function
library(tree)
company_tree_new<-tree(train_set$Sales~.,data = train_set)
summary(company_tree_new)
# predicting test data using the model
pred_tree_new <- as.data.frame(predict(company_tree_new,newdata=test_set))
pred_tree_new["final"] <- NULL
pred_test_df <- predict(company_tree_new,newdata=test_set)
pred_tree_new$final <- colnames(pred_test_df)[apply(pred_test_df,1,which.max)]
mean(pred_tree_new$final==test_set$Sales) # accuracy 0.61
# Best model formed was using bagging technique.
|
#' @title SequenceAnalysis.GetProteinSequence
#' @description Get Protein Sequence From UniProt
#' @details Get Protein Sequence From UniProt by UniProt ID
#' @author Babak Khorsand
#' @import XML
#' @export SequenceAnalysis.GetProteinSequence
#' @param UniprotKB UniProt ID of a sequence
#' @return Protein Sequence
#' @examples
#' SequenceAnalysis.GetProteinSequence("O15131")
SequenceAnalysis.GetProteinSequence = function(UniprotKB)
{
doc.text="N/A"
url = paste(Protein_Address,UniprotKB,sep = "")
doc.html = tryCatch({htmlTreeParse(url, useInternalNodes = TRUE)},error=function(err){return (NULL)})
if (!is.null(doc.html))
{
doc.text = unlist(xpathApply(doc.html, '//pre', xmlValue))
if (!is.null(doc.text))
{
doc.text = gsub("[0-9]","",doc.text)
doc.text = gsub(" ","",doc.text)
Protein=doc.text[1]
if (is.null(Protein))
Protein="N/A"
names(Protein)="Protein"
return(Protein)
}
}
}
| /R/GetProteinSequence.R | no_license | cran/SequenceAnalysis | R | false | false | 990 | r | #' @title SequenceAnalysis.GetProteinSequence
#' @description Get Protein Sequence From UniProt
#' @details Get Protein Sequence From UniProt by UniProt ID
#' @author Babak Khorsand
#' @import XML
#' @export SequenceAnalysis.GetProteinSequence
#' @param UniprotKB UniProt ID of a sequence
#' @return Protein Sequence
#' @examples
#' SequenceAnalysis.GetProteinSequence("O15131")
SequenceAnalysis.GetProteinSequence = function(UniprotKB)
{
doc.text="N/A"
url = paste(Protein_Address,UniprotKB,sep = "")
doc.html = tryCatch({htmlTreeParse(url, useInternalNodes = TRUE)},error=function(err){return (NULL)})
if (!is.null(doc.html))
{
doc.text = unlist(xpathApply(doc.html, '//pre', xmlValue))
if (!is.null(doc.text))
{
doc.text = gsub("[0-9]","",doc.text)
doc.text = gsub(" ","",doc.text)
Protein=doc.text[1]
if (is.null(Protein))
Protein="N/A"
names(Protein)="Protein"
return(Protein)
}
}
}
|
#DATA CLEANING AND TRANSFORMATION IN R USING SLEEP DATASET WHICH IS IN-BUILT IN PACKAGE CALLED VIM
# data() COMMAND USED TO SEE IN-BUILT DATASET IN R
library(VIM)
library(naniar)
library(ggplot2)
library(DataExplorer)
data(sleep, package = "VIM")
sp=sleep
plot_missing(sp)
qplot(sp$BodyWgt,sp$Sleep)
plot_histogram(sp)
boxplot(sp$Sleep)
gg_miss_var(sp)
res<-summary(aggr(sp, sortVar=TRUE))$combinations
head(sp)
tail(sp)
mean(sp$NonD,na.rm=TRUE)
mean(sp$Sleep,na.rm=TRUE)
max(sp$Span,na.rm=TRUE)
summary(sp)
sp$NonD=ifelse(is.na(sp$NonD),median(sp$NonD,na.rm = TRUE),sp$NonD)
sp$Dream=ifelse(is.na(sp$Dream),median(sp$Dream,na.rm = TRUE),sp$Dream)
sp$Sleep=ifelse(is.na(sp$Sleep),median(sp$Sleep,na.rm = TRUE),sp$Sleep)
sp$Span=ifelse(is.na(sp$Span),median(sp$Span,na.rm = TRUE),sp$Span)
sp$Gest=ifelse(is.na(sp$Gest),median(sp$Gest,na.rm = TRUE),sp$Gest)
summary(sp)
head(sp)
sp$Harmful.WgtGain=sp$BodyWgt>100
head(sp)
intervals=c(0,5,10,15,20,25,30,35)
intervals
sp$Dream=cut(sp$Dream,breaks= intervals,include.lowest = TRUE)
head(sp)
span_set=c(0,20,40,60,80,100,120,140)
span_set
sp$Span=cut(sp$Span,breaks= span_set,include.lowest = TRUE)
head(sp)
s1=sp
s1$Danger=gsub(1,'Very low',s1$Danger)
s1$Danger=gsub(2,'Low',s1$Danger)
s1$Danger=gsub(3,'Medium',s1$Danger)
s1$Danger=gsub(4,'High',s1$Danger)
s1$Danger=gsub(5,'Very High',s1$Danger)
tail(s1)
head(s1)
gg_miss_var(sp)
res<-summary(aggr(sp, sortVar=TRUE))$combinations
# Using transform function
sp_ex1 <- transform(sp, Pred = Pred + 1)
head(sp_ex1)
| /cleaning&Transformation.R | no_license | kruti-jain-stack/Data-Cleaning-and-Transformation-R | R | false | false | 1,567 | r | #DATA CLEANING AND TRANSFORMATION IN R USING SLEEP DATASET WHICH IS IN-BUILT IN PACKAGE CALLED VIM
# data() COMMAND USED TO SEE IN-BUILT DATASET IN R
library(VIM)
library(naniar)
library(ggplot2)
library(DataExplorer)
data(sleep, package = "VIM")
sp=sleep
plot_missing(sp)
qplot(sp$BodyWgt,sp$Sleep)
plot_histogram(sp)
boxplot(sp$Sleep)
gg_miss_var(sp)
res<-summary(aggr(sp, sortVar=TRUE))$combinations
head(sp)
tail(sp)
mean(sp$NonD,na.rm=TRUE)
mean(sp$Sleep,na.rm=TRUE)
max(sp$Span,na.rm=TRUE)
summary(sp)
sp$NonD=ifelse(is.na(sp$NonD),median(sp$NonD,na.rm = TRUE),sp$NonD)
sp$Dream=ifelse(is.na(sp$Dream),median(sp$Dream,na.rm = TRUE),sp$Dream)
sp$Sleep=ifelse(is.na(sp$Sleep),median(sp$Sleep,na.rm = TRUE),sp$Sleep)
sp$Span=ifelse(is.na(sp$Span),median(sp$Span,na.rm = TRUE),sp$Span)
sp$Gest=ifelse(is.na(sp$Gest),median(sp$Gest,na.rm = TRUE),sp$Gest)
summary(sp)
head(sp)
sp$Harmful.WgtGain=sp$BodyWgt>100
head(sp)
intervals=c(0,5,10,15,20,25,30,35)
intervals
sp$Dream=cut(sp$Dream,breaks= intervals,include.lowest = TRUE)
head(sp)
span_set=c(0,20,40,60,80,100,120,140)
span_set
sp$Span=cut(sp$Span,breaks= span_set,include.lowest = TRUE)
head(sp)
s1=sp
s1$Danger=gsub(1,'Very low',s1$Danger)
s1$Danger=gsub(2,'Low',s1$Danger)
s1$Danger=gsub(3,'Medium',s1$Danger)
s1$Danger=gsub(4,'High',s1$Danger)
s1$Danger=gsub(5,'Very High',s1$Danger)
tail(s1)
head(s1)
gg_miss_var(sp)
res<-summary(aggr(sp, sortVar=TRUE))$combinations
# Using transform function
sp_ex1 <- transform(sp, Pred = Pred + 1)
head(sp_ex1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_series.R
\name{read.subtitles.season}
\alias{read.subtitles.season}
\alias{read.subtitles.serie}
\alias{read.subtitles.multiseries}
\title{Read series subtitles}
\usage{
read.subtitles.season(dir, quietly = FALSE, format = "auto", ...)
read.subtitles.serie(dir, quietly = FALSE, format = "auto", ...)
read.subtitles.multiseries(dir, quietly = FALSE, format = "auto", ...)
}
\arguments{
\item{dir}{the name of the directory which the subtitles are to be read from (see Details).}
\item{quietly}{a logical. If \code{FALSE} (default), a message indicating the number of imported files is printed.}
\item{format}{a character string specifying the format of the subtitles
(default is "\code{auto}", see \code{\link{read.subtitles}} for details).}
\item{...}{further arguments to be passed to \code{\link{read.subtitles}}.}
}
\value{
An object of class \code{MultiSubtitles};
i.e. a list of \code{\link{Subtitles}} objects.
}
\description{
These functions read one or several subtitles files organized in directories.
They are especially designed to import subtitles of series with multiple episodes.
}
\details{
These functions read subtitles files at different levels from a 3-levels directory (see the tree below).
The function \code{read.subtitles.multiseries} reads everything recursively from "Series_Collection".
The function \code{read.subtitles.serie} reads everything recursively from a serie folder (e.g. "Serie_A").
The function \code{read.subtitles.season} reads everything from a season folder (e.g. "Season_1").
To read a specific episode file (e.g. "Episode_1.srt), use \code{\link{read.subtitles}}.
\preformatted{
Series_Collection
|-- Serie_A
| |-- Season_1
| | |-- Episode_1.srt
|-- Serie_B
| |-- Season_1
| | |-- Episode_1.srt
| |-- Season_2
| | |-- Episode_1.srt
| | |-- Episode_2.srt}
}
| /man/read_series.Rd | no_license | hrbrmstr/subtools | R | false | true | 1,913 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_series.R
\name{read.subtitles.season}
\alias{read.subtitles.season}
\alias{read.subtitles.serie}
\alias{read.subtitles.multiseries}
\title{Read series subtitles}
\usage{
read.subtitles.season(dir, quietly = FALSE, format = "auto", ...)
read.subtitles.serie(dir, quietly = FALSE, format = "auto", ...)
read.subtitles.multiseries(dir, quietly = FALSE, format = "auto", ...)
}
\arguments{
\item{dir}{the name of the directory which the subtitles are to be read from (see Details).}
\item{quietly}{a logical. If \code{FALSE} (default), a message indicating the number of imported files is printed.}
\item{format}{a character string specifying the format of the subtitles
(default is "\code{auto}", see \code{\link{read.subtitles}} for details).}
\item{...}{further arguments to be passed to \code{\link{read.subtitles}}.}
}
\value{
An object of class \code{MultiSubtitles};
i.e. a list of \code{\link{Subtitles}} objects.
}
\description{
These functions read one or several subtitles files organized in directories.
They are especially designed to import subtitles of series with multiple episodes.
}
\details{
These functions read subtitles files at different levels from a 3-levels directory (see the tree below).
The function \code{read.subtitles.multiseries} reads everything recursively from "Series_Collection".
The function \code{read.subtitles.serie} reads everything recursively from a serie folder (e.g. "Serie_A").
The function \code{read.subtitles.season} reads everything from a season folder (e.g. "Season_1").
To read a specific episode file (e.g. "Episode_1.srt), use \code{\link{read.subtitles}}.
\preformatted{
Series_Collection
|-- Serie_A
| |-- Season_1
| | |-- Episode_1.srt
|-- Serie_B
| |-- Season_1
| | |-- Episode_1.srt
| |-- Season_2
| | |-- Episode_1.srt
| | |-- Episode_2.srt}
}
|
# Purchae utility function
uP_fn <- function(e, psi, gamma,price, S){
# Negative purchase utility
# e: S+2 vecotr
# psi, gamma, price is (S+2)-dimension vector
e0 <- e[1:S]
e_S1 <- e[(S+1):length(e)]
u0 <- c(log(e0/(gamma[1:S]*price) + 1), log(e_S1[1]/gamma[(S+1)]), log(e_S1[2]/gamma[Sa]+qz_cons) )
out <- sum(psi*gamma*u0 )
return(-out)
}
uPGrad_fn <- function(e, psi, gamma,price, S){
e0 <- e[1:S]
e_S1 <- e[(S+1):length(e)]
u0 <- c(1/(e0+price*gamma[1:S]), 1/e_S1[1], 1/(e_S1[2]+qz_cons*gamma[Sa]) )
out <- psi*gamma*u0
return(-out)
}
# Flow utility function
uflow_fn <- function(c, I, Q, y, sel_k, omega, param, nu=0){
lambda <- param[1]
tau1 <- param[2]
tau2 <- param[3]
u <- lambda*log(c+.01+nu) - tau1*(I+Q-c) -tau2*1*(Q>0)+ omega(sel_k,y)
return(u)
}
# Value function
value_fn <- function(c,I,Q,y,sel_k, y_grid, v1_interp, omega, param, beta, nu=0){
I_next <- I+Q-c
y_next <- exp(sqrt(2)*y_kernal$sigma*y_kernal$nodes + y_kernal$mu + y_kernal$rho*log(y))
# First interpolate the value function at I_next along y grid;
v2 <- sapply(1:length(y_grid), function(i) v1_interp[[i]](I_next) )
# Second interpolate the value function at y_next
v2_interp <- splinefun(x=y_grid, y=v2, method="natural")
v <- v2_interp(y_next)
# Calculate the RHS of Bellman equation
out <- uflow_fn(c, I, Q, y, sel_k, omega, param) + beta*y_kernal$weight %*% v /sqrt(pi)
return(out)
}
# Allocation function - solve a constrained optimization
Allocation_fn <- function(y, psi, gamma, Q=NULL, price,S,Sa, inits=NULL, silent=FALSE){
# We have Sa non-negativity constraints, one budget constraint, and one quantity constraint
# ui <- rbind(diag(S+2), c(rep(-1, S+1), 0) )
# inits is a list of initial values
ui <- rbind(diag(Sa), c(rep(-1, S+1),rep(0, Sa-S-1 )) )
ci <- c(rep(0,Sa), -y)
if(is.null(inits)){
tmp <- min(y/price, Q) * .99
sel <- which.min(price)
inits <- list(c(rep(tmp/(Sa-1), Sa-1), Q - tmp),
c(rep(1e-8, S), y-(S+2)*1e-8, Q - sum(price)*1e-8 - 1e-8),
c(rep(tmp/S, S), y- sum(tmp/S*price) -1e-8, 1e-8) )
}
if(is.null(Q)){
sol <- constrOptim(theta = inits, f=uP_fn, grad=NULL, ui=ui, ci=ci,psi=psi, gamma=gamma, price=price, S=S)
return(list(e = sol$par, max = -sol$value))
}else{
if(Q<=0){
e <- c(rep(0,S),y)
if(Sa>S+1){
e <- c(e, 0)
}
return(list(e = e, max = log(y) ) )
}else{
ui <- rbind(ui, c(-1/price, 0, -1))
ci <- c(ci, -Q)
sol.list <- vector("list", length(inits))
for(j in 1:length(inits)){
sol.list[[j]] <- try(constrOptim(theta = inits[[j]], f=uP_fn, grad=uPGrad_fn, ui=ui, ci=ci,psi=psi,
gamma=gamma, price=price, S=S), silent=silent)
}
sol.list1 <- sol.list[sapply(sol.list, function(x) !inherits(x, "try-error"))]
sel <- which.min(sapply(sol.list1, function(x) x$value))
if(length(sel)==0){
sol <- list(par = rep(NA, Sa), value=NA, convergence = NA)
}else{
sol <- sol.list1[[sel]]
if(sol$convergence != 0){
cat("Constrained optimization does not converge at value y=",y,", Q=",Q,"\n")
}
}
return(list(e = sol$par, max = -sol$value, convergence = sol$convergence))
}
}
}
# Bellman operator: use brent's method to solve for optimal consumption
Solve_DP_brent_fn <- function(V_old, state, K, Q_grid, omega, param, beta, control_list,DataState=NULL){
ns <- nrow(state)
nQ <- length(Q_grid)
y_grid <- unique(state[,y_idx])
# Create spline functions along the first dimension
v1_interp <- vector("list", length=length(y_grid))
for(i in 1:length(y_grid)){
sel <- state[,y_idx]==y_grid[i]
v1_interp[[i]] <- splinefun(x=state[sel,I_idx], y=V_old[sel], method="natural")
}
# Change to data state if it is given
if(!is.null(DataState)){
state <- DataState
ns <- nrow(state)
}
# Assign the objects that store results from loops
c_star <- array(NA, c(ns, K))
vc <- array(NA, c(ns, K))
V <- matrix(NA, ns)
ccp <- matrix(NA, ns, K)
# Maximization loop over each state
for(i in 1:ns){
for(k in 1:K){
if(state[i,1]+Q_grid[k]<=0){
c_star[i,k] <- 0
vc[i,k] <- value_fn(c=0,I=state[i,1],Q=Q_grid[k],y=state[i,2],sel_k=k, y_grid, v1_interp, omega, param, beta)
}else if(Q_grid[k]>state[i,2]/min(price)){
vc[i,k] <- (-500)
c_star[i,k] <- NA
}else{
sol <- optimize(value_fn, interval=c(0,state[i,1]+Q_grid[k]),
maximum=T, I=state[i,1], Q=Q_grid[k], y=state[i,2],sel_k = k,
y_grid = y_grid, v1_interp = v1_interp, omega = omega,
param = param, beta = beta,tol = control_list$brent_tol)
c_star[i,k] <- sol$maximum
vc[i,k] <- sol$objective
}
}
tmp_vmax <- max(vc[i,])
V[i] <- log(sum(exp(vc[i,] - tmp_vmax))) + tmp_vmax
ev <- exp(vc[i,] - tmp_vmax)
ccp[i,] <- (ev)/sum(ev)
}
policy_k <- apply(V, 1, which.max)
return(list(policy = list(k = policy_k, c = c_star), value = V, ccp = ccp, vc=vc))
}
# Bellman operator wrapper
Bellman_operator <- function(DP_list, control_list, DataState=NULL){
state <- DP_list$state
K <- DP_list$K
Q_grid <- DP_list$Q_grid
omega <- DP_list$omega
param <- DP_list$param
beta <- DP_list$beta
V_old <- DP_list$value_fn
out <- Solve_DP_brent_fn(V_old, state, K, Q_grid, omega, param, beta, control_list,DataState)
return(out)
}
simulate_seq_fn <- function(init_state, TT, draw_all = FALSE, y_seq, Q_seq, k_seq, DP_list, control_list,
alpha, alpha_a, gamma, nu=NULL){
# Extract the solution to dynamic programming
state <- DP_list$state
Q_grid <- DP_list$Q_grid
y_kernal <- DP_list$y_kernal
V <- DP_list$value_fn
y_grid <- unique(state[,y_idx])
psi_a <- exp(alpha_a)
if(is.null(nu)){ nu <- rep(0, TT) }
# Create spline functions along the first dimension
v1_interp <- vector("list", length=length(y_grid))
for(i in 1:length(y_grid)){
sel <- state[,y_idx]==y_grid[i]
v1_interp[[i]] <- splinefun(x=state[sel,I_idx], y=V[sel], method="natural")
}
# The objects that store output
DataState <- matrix(NA, TT, ncol(DP_list$state))
names(DataState) <- names(DP_list$state)
if(draw_all){
k_seq <- rep(NA, TT)
e_seq <- matrix(NA, TT, S+2)
}else{
DataState[,y_idx] <- y_seq
}
c_seq <- rep(NA, TT)
ccp <- matrix(NA, TT, K)
# Loop over to find three chocies: basket k, expenditure allocation e, and consumption c
DataState[1,] <- init_state
for(i in 1:TT){
tmp_sol <- Bellman_operator(DP_list, control_list, DataState=matrix(DataState[i,], nrow=1))
ccp[i,] <- tmp_sol$ccp
if(draw_all){
k_seq[i] <- which.max(rmultinom(1, 1, ccp[i,]))
eps <- rnorm(S) # Draw allocation error
psi <- c(exp(alpha + eps), psi_a)
w <- Allocation_fn(y=DataState[i,2], psi=psi, gamma=c(gamma,rep(1,Sa-S)), Q=Q_grid[k_seq[i]], price=price,S=S, Sa=Sa)
e_seq[i,] <- w$e
}
tmp_sol <- optimize(value_fn, interval=c(0,DataState[i,1]+Q_grid[k_seq[i]]),
maximum=T, I=DataState[i,1], Q=Q_grid[k_seq[i]], y=DataState[i,2],sel_k = k_seq[i],
y_grid = y_grid, v1_interp = v1_interp, omega = DP_list$omega,
param = DP_list$param, beta = DP_list$beta,tol = control_list$brent_tol)
c_seq[i] <- tmp_sol$maximum
# Detemine the state of next period
if(i<TT){
DataState[(i+1),I_idx] <- DataState[i,I_idx] + Q_grid[k_seq[i]] - c_seq[i]
if(draw_all){
DataState[(i+1),y_idx] <- exp(rnorm(1, y_kernal$mu + y_kernal$rho*log(DataState[i,y_idx]), y_kernal$sigma) )
}
}
}
return(list(data.frame( t=1:TT, I=DataState[,I_idx], y=DataState[,y_idx], k = k_seq, e = e_seq,
c = c_seq),
ccp = ccp) )
}
| /Basket DP/Dynamic allocation simulation/DAM_2_functions.R | no_license | Superet/Expenditure | R | false | false | 7,544 | r | # Purchae utility function
uP_fn <- function(e, psi, gamma,price, S){
# Negative purchase utility
# e: S+2 vecotr
# psi, gamma, price is (S+2)-dimension vector
e0 <- e[1:S]
e_S1 <- e[(S+1):length(e)]
u0 <- c(log(e0/(gamma[1:S]*price) + 1), log(e_S1[1]/gamma[(S+1)]), log(e_S1[2]/gamma[Sa]+qz_cons) )
out <- sum(psi*gamma*u0 )
return(-out)
}
uPGrad_fn <- function(e, psi, gamma,price, S){
e0 <- e[1:S]
e_S1 <- e[(S+1):length(e)]
u0 <- c(1/(e0+price*gamma[1:S]), 1/e_S1[1], 1/(e_S1[2]+qz_cons*gamma[Sa]) )
out <- psi*gamma*u0
return(-out)
}
# Flow utility function
uflow_fn <- function(c, I, Q, y, sel_k, omega, param, nu=0){
lambda <- param[1]
tau1 <- param[2]
tau2 <- param[3]
u <- lambda*log(c+.01+nu) - tau1*(I+Q-c) -tau2*1*(Q>0)+ omega(sel_k,y)
return(u)
}
# Value function
value_fn <- function(c,I,Q,y,sel_k, y_grid, v1_interp, omega, param, beta, nu=0){
I_next <- I+Q-c
y_next <- exp(sqrt(2)*y_kernal$sigma*y_kernal$nodes + y_kernal$mu + y_kernal$rho*log(y))
# First interpolate the value function at I_next along y grid;
v2 <- sapply(1:length(y_grid), function(i) v1_interp[[i]](I_next) )
# Second interpolate the value function at y_next
v2_interp <- splinefun(x=y_grid, y=v2, method="natural")
v <- v2_interp(y_next)
# Calculate the RHS of Bellman equation
out <- uflow_fn(c, I, Q, y, sel_k, omega, param) + beta*y_kernal$weight %*% v /sqrt(pi)
return(out)
}
# Allocation function - solve a constrained optimization
Allocation_fn <- function(y, psi, gamma, Q=NULL, price,S,Sa, inits=NULL, silent=FALSE){
# We have Sa non-negativity constraints, one budget constraint, and one quantity constraint
# ui <- rbind(diag(S+2), c(rep(-1, S+1), 0) )
# inits is a list of initial values
ui <- rbind(diag(Sa), c(rep(-1, S+1),rep(0, Sa-S-1 )) )
ci <- c(rep(0,Sa), -y)
if(is.null(inits)){
tmp <- min(y/price, Q) * .99
sel <- which.min(price)
inits <- list(c(rep(tmp/(Sa-1), Sa-1), Q - tmp),
c(rep(1e-8, S), y-(S+2)*1e-8, Q - sum(price)*1e-8 - 1e-8),
c(rep(tmp/S, S), y- sum(tmp/S*price) -1e-8, 1e-8) )
}
if(is.null(Q)){
sol <- constrOptim(theta = inits, f=uP_fn, grad=NULL, ui=ui, ci=ci,psi=psi, gamma=gamma, price=price, S=S)
return(list(e = sol$par, max = -sol$value))
}else{
if(Q<=0){
e <- c(rep(0,S),y)
if(Sa>S+1){
e <- c(e, 0)
}
return(list(e = e, max = log(y) ) )
}else{
ui <- rbind(ui, c(-1/price, 0, -1))
ci <- c(ci, -Q)
sol.list <- vector("list", length(inits))
for(j in 1:length(inits)){
sol.list[[j]] <- try(constrOptim(theta = inits[[j]], f=uP_fn, grad=uPGrad_fn, ui=ui, ci=ci,psi=psi,
gamma=gamma, price=price, S=S), silent=silent)
}
sol.list1 <- sol.list[sapply(sol.list, function(x) !inherits(x, "try-error"))]
sel <- which.min(sapply(sol.list1, function(x) x$value))
if(length(sel)==0){
sol <- list(par = rep(NA, Sa), value=NA, convergence = NA)
}else{
sol <- sol.list1[[sel]]
if(sol$convergence != 0){
cat("Constrained optimization does not converge at value y=",y,", Q=",Q,"\n")
}
}
return(list(e = sol$par, max = -sol$value, convergence = sol$convergence))
}
}
}
# Bellman operator: use brent's method to solve for optimal consumption
Solve_DP_brent_fn <- function(V_old, state, K, Q_grid, omega, param, beta, control_list,DataState=NULL){
ns <- nrow(state)
nQ <- length(Q_grid)
y_grid <- unique(state[,y_idx])
# Create spline functions along the first dimension
v1_interp <- vector("list", length=length(y_grid))
for(i in 1:length(y_grid)){
sel <- state[,y_idx]==y_grid[i]
v1_interp[[i]] <- splinefun(x=state[sel,I_idx], y=V_old[sel], method="natural")
}
# Change to data state if it is given
if(!is.null(DataState)){
state <- DataState
ns <- nrow(state)
}
# Assign the objects that store results from loops
c_star <- array(NA, c(ns, K))
vc <- array(NA, c(ns, K))
V <- matrix(NA, ns)
ccp <- matrix(NA, ns, K)
# Maximization loop over each state
for(i in 1:ns){
for(k in 1:K){
if(state[i,1]+Q_grid[k]<=0){
c_star[i,k] <- 0
vc[i,k] <- value_fn(c=0,I=state[i,1],Q=Q_grid[k],y=state[i,2],sel_k=k, y_grid, v1_interp, omega, param, beta)
}else if(Q_grid[k]>state[i,2]/min(price)){
vc[i,k] <- (-500)
c_star[i,k] <- NA
}else{
sol <- optimize(value_fn, interval=c(0,state[i,1]+Q_grid[k]),
maximum=T, I=state[i,1], Q=Q_grid[k], y=state[i,2],sel_k = k,
y_grid = y_grid, v1_interp = v1_interp, omega = omega,
param = param, beta = beta,tol = control_list$brent_tol)
c_star[i,k] <- sol$maximum
vc[i,k] <- sol$objective
}
}
tmp_vmax <- max(vc[i,])
V[i] <- log(sum(exp(vc[i,] - tmp_vmax))) + tmp_vmax
ev <- exp(vc[i,] - tmp_vmax)
ccp[i,] <- (ev)/sum(ev)
}
policy_k <- apply(V, 1, which.max)
return(list(policy = list(k = policy_k, c = c_star), value = V, ccp = ccp, vc=vc))
}
# Bellman operator wrapper
Bellman_operator <- function(DP_list, control_list, DataState=NULL){
state <- DP_list$state
K <- DP_list$K
Q_grid <- DP_list$Q_grid
omega <- DP_list$omega
param <- DP_list$param
beta <- DP_list$beta
V_old <- DP_list$value_fn
out <- Solve_DP_brent_fn(V_old, state, K, Q_grid, omega, param, beta, control_list,DataState)
return(out)
}
simulate_seq_fn <- function(init_state, TT, draw_all = FALSE, y_seq, Q_seq, k_seq, DP_list, control_list,
alpha, alpha_a, gamma, nu=NULL){
# Extract the solution to dynamic programming
state <- DP_list$state
Q_grid <- DP_list$Q_grid
y_kernal <- DP_list$y_kernal
V <- DP_list$value_fn
y_grid <- unique(state[,y_idx])
psi_a <- exp(alpha_a)
if(is.null(nu)){ nu <- rep(0, TT) }
# Create spline functions along the first dimension
v1_interp <- vector("list", length=length(y_grid))
for(i in 1:length(y_grid)){
sel <- state[,y_idx]==y_grid[i]
v1_interp[[i]] <- splinefun(x=state[sel,I_idx], y=V[sel], method="natural")
}
# The objects that store output
DataState <- matrix(NA, TT, ncol(DP_list$state))
names(DataState) <- names(DP_list$state)
if(draw_all){
k_seq <- rep(NA, TT)
e_seq <- matrix(NA, TT, S+2)
}else{
DataState[,y_idx] <- y_seq
}
c_seq <- rep(NA, TT)
ccp <- matrix(NA, TT, K)
# Loop over to find three chocies: basket k, expenditure allocation e, and consumption c
DataState[1,] <- init_state
for(i in 1:TT){
tmp_sol <- Bellman_operator(DP_list, control_list, DataState=matrix(DataState[i,], nrow=1))
ccp[i,] <- tmp_sol$ccp
if(draw_all){
k_seq[i] <- which.max(rmultinom(1, 1, ccp[i,]))
eps <- rnorm(S) # Draw allocation error
psi <- c(exp(alpha + eps), psi_a)
w <- Allocation_fn(y=DataState[i,2], psi=psi, gamma=c(gamma,rep(1,Sa-S)), Q=Q_grid[k_seq[i]], price=price,S=S, Sa=Sa)
e_seq[i,] <- w$e
}
tmp_sol <- optimize(value_fn, interval=c(0,DataState[i,1]+Q_grid[k_seq[i]]),
maximum=T, I=DataState[i,1], Q=Q_grid[k_seq[i]], y=DataState[i,2],sel_k = k_seq[i],
y_grid = y_grid, v1_interp = v1_interp, omega = DP_list$omega,
param = DP_list$param, beta = DP_list$beta,tol = control_list$brent_tol)
c_seq[i] <- tmp_sol$maximum
# Detemine the state of next period
if(i<TT){
DataState[(i+1),I_idx] <- DataState[i,I_idx] + Q_grid[k_seq[i]] - c_seq[i]
if(draw_all){
DataState[(i+1),y_idx] <- exp(rnorm(1, y_kernal$mu + y_kernal$rho*log(DataState[i,y_idx]), y_kernal$sigma) )
}
}
}
return(list(data.frame( t=1:TT, I=DataState[,I_idx], y=DataState[,y_idx], k = k_seq, e = e_seq,
c = c_seq),
ccp = ccp) )
}
|
#' Scatterplot for column categories contribution to dimensions
#'
#' This function allows to plot a scatterplot of the contribution of column
#' categories to two selected dimensions. Two references lines (in RED) indicate
#' the threshold above which the contribution can be considered important for
#' the determination of the dimensions. A diagonal line is a visual aid to
#' eyeball whether a category is actually contributing more (in relative terms)
#' to either of the two dimensions. The column categories' labels are coupled
#' with + or - symbols within round brackets indicating which to side of the two
#' selected dimensions the contribution values that can be read off from the
#' chart are actually referring. The first symbol (i.e., the one to the left),
#' either + or -, refers to the first of the selected dimensions (i.e., the one
#' reported on the x-axis). The second symbol (i.e., the one to the right)
#' refers to the second of the selected dimensions (i.e., the one reported on
#' the y-axis).
#'
#' @param data Name of the dataset (must be in dataframe format).
#' @param x First dimension for which the contributions are reported (x=1 by
#' default).
#' @param y Second dimension for which the contributions are reported (y=2 by
#' default).
#' @param filter Filter the categories in order to only display those who have a
#' major contribution to the definition of the selected dimensions.
#' @param cex.labls Adjust the size of the categories' labels
#'
#' @keywords cols.cntr.scatter
#'
#' @export
#'
#' @examples
#' data(greenacre_data)
#'
#' #Plots the scatterplot of the column categories contribution to dimensions 1&2.
#'
#' cols.cntr.scatter(greenacre_data,1,2)
#'
#' @seealso \code{\link{cols.cntr}} , \code{\link{rows.cntr}} , \code{\link{rows.cntr.scatter}}
#'
cols.cntr.scatter <- function (data, x = 1, y = 2, filter=FALSE, cex.labls=3) {
cntr1=cntr2=labels.final=NULL
ncols <- ncol(data)
nrows <- nrow(data)
numb.dim.cols <- ncol(data) - 1
numb.dim.rows <- nrow(data) - 1
a <- min(numb.dim.cols, numb.dim.rows)
pnt_labls <- colnames(data)
res <- CA(data, ncp = a, graph = FALSE)
dfr <- data.frame(lab = pnt_labls, cntr1 = res$col$contrib[,x] * 10, cntr2 = res$col$contrib[, y] * 10, coord1=res$col$coord[,x], coord2=res$col$coord[,y])
dfr$labels1 <- ifelse(dfr$coord1 < 0, "-", "+")
dfr$labels2 <- ifelse(dfr$coord2 < 0, "-", "+")
dfr$labels.final <- paste0(dfr$lab, " (",dfr$labels1,",",dfr$labels2, ")")
xmax <- max(dfr[, 2]) + 10
ymax <- max(dfr[, 3]) + 10
limit.value <- max(xmax, ymax)
ifelse(filter==FALSE, dfr <- dfr, dfr <- subset(dfr, cntr1>(100/ncols)*10 | cntr2>(100/ncols)*10))
p <- ggplot(dfr, aes(x = cntr1, y = cntr2)) + geom_point(alpha = 0.8) +
geom_hline(yintercept = round((100/ncols) * 10, digits = 0), colour = "red", linetype = "dashed") +
geom_vline(xintercept = round((100/ncols) * 10, digits = 0), colour = "red", linetype = "dashed") +
scale_y_continuous(limits = c(0, limit.value)) + scale_x_continuous(limits = c(0,limit.value)) +
geom_abline(intercept = 0, slope = 1, colour="#00000088") +
theme(panel.background = element_rect(fill="white", colour="black")) +
geom_text_repel(data = dfr, aes(label = labels.final), size = cex.labls) +
labs(x = paste("Column categories' contribution (permills) to Dim.",x), y = paste("Column categories' contribution (permills) to Dim.", y)) +
coord_fixed(ratio = 1, xlim = NULL, ylim = NULL, expand = TRUE)
return(p)
} | /R/cols_cntr_scatter.R | no_license | cran/CAinterprTools | R | false | false | 3,521 | r | #' Scatterplot for column categories contribution to dimensions
#'
#' This function allows to plot a scatterplot of the contribution of column
#' categories to two selected dimensions. Two references lines (in RED) indicate
#' the threshold above which the contribution can be considered important for
#' the determination of the dimensions. A diagonal line is a visual aid to
#' eyeball whether a category is actually contributing more (in relative terms)
#' to either of the two dimensions. The column categories' labels are coupled
#' with + or - symbols within round brackets indicating which to side of the two
#' selected dimensions the contribution values that can be read off from the
#' chart are actually referring. The first symbol (i.e., the one to the left),
#' either + or -, refers to the first of the selected dimensions (i.e., the one
#' reported on the x-axis). The second symbol (i.e., the one to the right)
#' refers to the second of the selected dimensions (i.e., the one reported on
#' the y-axis).
#'
#' @param data Name of the dataset (must be in dataframe format).
#' @param x First dimension for which the contributions are reported (x=1 by
#' default).
#' @param y Second dimension for which the contributions are reported (y=2 by
#' default).
#' @param filter Filter the categories in order to only display those who have a
#' major contribution to the definition of the selected dimensions.
#' @param cex.labls Adjust the size of the categories' labels
#'
#' @keywords cols.cntr.scatter
#'
#' @export
#'
#' @examples
#' data(greenacre_data)
#'
#' #Plots the scatterplot of the column categories contribution to dimensions 1&2.
#'
#' cols.cntr.scatter(greenacre_data,1,2)
#'
#' @seealso \code{\link{cols.cntr}} , \code{\link{rows.cntr}} , \code{\link{rows.cntr.scatter}}
#'
cols.cntr.scatter <- function (data, x = 1, y = 2, filter=FALSE, cex.labls=3) {
cntr1=cntr2=labels.final=NULL
ncols <- ncol(data)
nrows <- nrow(data)
numb.dim.cols <- ncol(data) - 1
numb.dim.rows <- nrow(data) - 1
a <- min(numb.dim.cols, numb.dim.rows)
pnt_labls <- colnames(data)
res <- CA(data, ncp = a, graph = FALSE)
dfr <- data.frame(lab = pnt_labls, cntr1 = res$col$contrib[,x] * 10, cntr2 = res$col$contrib[, y] * 10, coord1=res$col$coord[,x], coord2=res$col$coord[,y])
dfr$labels1 <- ifelse(dfr$coord1 < 0, "-", "+")
dfr$labels2 <- ifelse(dfr$coord2 < 0, "-", "+")
dfr$labels.final <- paste0(dfr$lab, " (",dfr$labels1,",",dfr$labels2, ")")
xmax <- max(dfr[, 2]) + 10
ymax <- max(dfr[, 3]) + 10
limit.value <- max(xmax, ymax)
ifelse(filter==FALSE, dfr <- dfr, dfr <- subset(dfr, cntr1>(100/ncols)*10 | cntr2>(100/ncols)*10))
p <- ggplot(dfr, aes(x = cntr1, y = cntr2)) + geom_point(alpha = 0.8) +
geom_hline(yintercept = round((100/ncols) * 10, digits = 0), colour = "red", linetype = "dashed") +
geom_vline(xintercept = round((100/ncols) * 10, digits = 0), colour = "red", linetype = "dashed") +
scale_y_continuous(limits = c(0, limit.value)) + scale_x_continuous(limits = c(0,limit.value)) +
geom_abline(intercept = 0, slope = 1, colour="#00000088") +
theme(panel.background = element_rect(fill="white", colour="black")) +
geom_text_repel(data = dfr, aes(label = labels.final), size = cex.labls) +
labs(x = paste("Column categories' contribution (permills) to Dim.",x), y = paste("Column categories' contribution (permills) to Dim.", y)) +
coord_fixed(ratio = 1, xlim = NULL, ylim = NULL, expand = TRUE)
return(p)
} |
.onAttach <- function(libname, pkgname) {
packageStartupMessage(
'\n\n\n\n\n*********************\n\n\n\nLatest version of regtools at GitHub.com/matloff\n\n\nType "?regtools" for function list.')
} | /R/onAttach.R | no_license | MATA62N/regtools | R | false | false | 208 | r |
.onAttach <- function(libname, pkgname) {
packageStartupMessage(
'\n\n\n\n\n*********************\n\n\n\nLatest version of regtools at GitHub.com/matloff\n\n\nType "?regtools" for function list.')
} |
library(igraph)
library(alabama)
library(ggplot2)
setwd("/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Code Used - Final/Results - LevBuf/")
# Parameters----------------------------------------------------------------------------------------
# Generating Graph
n_Banks = 20
m_Assets = 10
# Diversification
linkProbability = 0.4
minLinkProbability = 0.1
maxLinkProbability = 0.8
linkProbabilityStep = 0.1
# Generating the balance Sheet
asset_0 = 80
cash_0 = 20
intial_Price = matrix(data = 1, nrow = m_Assets, ncol = 1)
# Leverage testing factors
leverage = 5
leverage_low_step = 3
leverage_high_step = 8
leverage_test_step = 0.5
# System rules parameters
gamma_factor = 1
gamma = matrix(data = gamma_factor, nrow = n_Banks, ncol = 1)
# Liquidity heterogenity
meanLiquidity = 0.4
liquidity_step = 0.1
max_liquidityStep = 0.4
min_liquidityStep = 0
liquidity_step_sequence_test = 0.1
method_selection = 3
method_selection_list = c(1,2,3)
external_Trade_Dummy = 0
# Shock & Simulation
assetReduction = 0.05
numberIterations = 25
numberSimulations = 100
leverage_warning_factor = 0.95
testing = 0
convergence_index = matrix(data = NA, nrow = numberIterations, ncol =1)
# Set f to 0 if using single simulation
f = 0
index = c(0.99, 0.95, 0.9, 0.8)
index = c("0.01", "0.05", "0.1", "0.2")
# CONTAGION PROBABILITY ----------------------------------------------------------------------------------------
# Read the results - Contagion Probability
dimB<-scan(file="contagion_probability_simulation.csv",sep=",",nlines=1)
dataB<-matrix(scan(file="contagion_probability_simulation.csv",sep=",",skip=1), nrow = 3, byrow = TRUE)
contagion_probability_simulation_read<-array(t(dataB),dimB)
# Present in a plot:
a1 = rowSums(contagion_probability_simulation_read[,,1])/ numberSimulations
a2 = rowSums(contagion_probability_simulation_read[,,2])/ numberSimulations
a3 = rowSums(contagion_probability_simulation_read[,,3])/ numberSimulations
x = index
# Note: do box graphs for each of the methods !!
df1 = data.frame(x = index, y = a1, Method = as.factor("Pro-rata"))
df2 = data.frame(x = index, y = a2, Method = as.factor("Bank holding"))
df3 = data.frame(x = index, y = a3, Method = as.factor("System optim"))
info = rbind(df1, df2, df3)
Graph_title = "Bankruptcy percentage on leverage buffer"
x_lab_title = "Leverage buffer"
y_lab_title = "Bankruptcy percentage"
save_title = "Cont_Probability"
Contagion_Probability_Leverage = ggplot(info, aes(x,y , group = Method)) +
geom_line(aes( linetype = Method, color = Method), size = 0.7) +
geom_point(aes(shape = Method, color = Method), size =2.25, stroke = 0.75) +
ggtitle(Graph_title) +
xlab(x_lab_title) +
ylab(y_lab_title) +
theme_light() +
scale_fill_grey() +
scale_color_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(Contagion_Probability_Leverage)
ggsave(paste(save_title, ".pdf", sep =""), plot = Contagion_Probability_Leverage, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 12, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
# System Loss ----------------------------------------------------------------------------------------
# Read the results - System Loss
dimB<-scan(file="system_loss_simulation.csv",sep=",",nlines=1)
dataB<-matrix(scan(file="system_loss_simulation.csv",sep=",",skip=1), nrow = 3, byrow = TRUE)
system_loss_simulation_read<-array(t(dataB),dimB)
a1 = rowSums(system_loss_simulation_read[,,1])/ numberSimulations
a2 = rowSums(system_loss_simulation_read[,,2])/ numberSimulations
a3 = rowSums(system_loss_simulation_read[,,3])/ numberSimulations
df1 = data.frame(x = index, y = -a1, Method = as.factor("Pro-rata"))
df2 = data.frame(x = index, y = -a2, Method = as.factor("Bank holding"))
df3 = data.frame(x = index, y = -a3, Method = as.factor("System optim"))
info = rbind(df1, df2, df3)
Graph_title = "Asset loss on leverage buffer"
y_lab_title = "Asset loss"
save_title = "Cont_Loss"
Contagion_Loss_Leverage = ggplot(info, aes(x,y , group = Method)) +
geom_line(aes( linetype = Method, color = Method), size = 0.7) +
geom_point(aes(shape = Method, color = Method), size =2.25, stroke = 0.75) +
ggtitle(Graph_title) +
xlab(x_lab_title) +
ylab(y_lab_title) +
theme_light() +
scale_fill_grey() +
scale_color_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(Contagion_Probability_Leverage)
ggsave(paste(save_title, ".pdf", sep =""), plot = Contagion_Loss_Leverage, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 12, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
# System Loss Data Points ------------------
y_lab_title = "Asset loss"
Graph_title = "Asset loss outcome distribution for pro-rata"
# Plotting per method type:
# Method 1
b1 = c(system_loss_simulation_read[,,1])
index_values = as.factor(rep(index,numberSimulations))
method1 = rep("pro-Rata", length(c(b1)))
method1_data = data.frame(b1, index_values, method1)
M1 = ggplot(method1_data, aes(x=index_values, y=b1, )) +
geom_dotplot(binaxis='y', stackdir='center', binwidth = 0.4, stackratio = 1, dotsize =10) +
stat_summary(fun.data="mean_sdl", fun.args = list(mult=0.4), geom="crossbar", width=0.3) +
ggtitle(Graph_title) +
theme_light() +
xlab(x_lab_title) +
ylab(y_lab_title) +
scale_fill_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(M1)
ggsave(paste(save_title, "_M1", ".pdf", sep =""), plot = M1, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 20, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
# Method 2
Graph_title = "Asset loss outcome distribution for bank holding"
b2 = c(system_loss_simulation_read[,,2])
index_values = as.factor(rep(index,numberSimulations))
method2 = rep("Bank Holding", length(c(b2)))
method2_data = data.frame(b2, index_values, method2)
M2 = ggplot(method2_data, aes(x=index_values, y=b2, )) +
geom_dotplot(binaxis='y', stackdir='center', binwidth = 0.4, stackratio = 1, dotsize =10) +
stat_summary(fun.data="mean_sdl", fun.args = list(mult=0.4), geom="crossbar", width=0.3) +
ggtitle(Graph_title) +
theme_light() +
xlab(x_lab_title) +
ylab(y_lab_title) +
scale_fill_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(M2)
ggsave(paste(save_title, "_M2", ".pdf", sep =""), plot = M2, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 20, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
# Method 3
Graph_title = "Asset loss outcome distribution for system optim"
b3 = c(system_loss_simulation_read[,,3])
index_values = as.factor(rep(index,numberSimulations))
method3 = rep("System Optim", length(c(b3)))
method3_data = data.frame(b3, index_values, method3)
M3 = ggplot(method3_data, aes(x=index_values, y=b3 )) +
geom_dotplot(binaxis='y', stackdir='center', binwidth = 0.4, stackratio = 1, dotsize =10) +
stat_summary(fun.data="mean_sdl", fun.args = list(mult=0.4), geom="crossbar", width=0.3) +
ggtitle(Graph_title) +
theme_light() +
xlab(x_lab_title) +
ylab(y_lab_title) +
scale_fill_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(M3)
ggsave(paste(save_title, "_M3", ".pdf", sep =""), plot = M3, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 20, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
| /Code Used - Final/Results - LevBuf/Read Results_LevBuf.R | no_license | MStachnio/MasterThesis_SystemicStability | R | false | false | 8,898 | r |
library(igraph)
library(alabama)
library(ggplot2)
setwd("/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Code Used - Final/Results - LevBuf/")
# Parameters----------------------------------------------------------------------------------------
# Generating Graph
n_Banks = 20
m_Assets = 10
# Diversification
linkProbability = 0.4
minLinkProbability = 0.1
maxLinkProbability = 0.8
linkProbabilityStep = 0.1
# Generating the balance Sheet
asset_0 = 80
cash_0 = 20
intial_Price = matrix(data = 1, nrow = m_Assets, ncol = 1)
# Leverage testing factors
leverage = 5
leverage_low_step = 3
leverage_high_step = 8
leverage_test_step = 0.5
# System rules parameters
gamma_factor = 1
gamma = matrix(data = gamma_factor, nrow = n_Banks, ncol = 1)
# Liquidity heterogenity
meanLiquidity = 0.4
liquidity_step = 0.1
max_liquidityStep = 0.4
min_liquidityStep = 0
liquidity_step_sequence_test = 0.1
method_selection = 3
method_selection_list = c(1,2,3)
external_Trade_Dummy = 0
# Shock & Simulation
assetReduction = 0.05
numberIterations = 25
numberSimulations = 100
leverage_warning_factor = 0.95
testing = 0
convergence_index = matrix(data = NA, nrow = numberIterations, ncol =1)
# Set f to 0 if using single simulation
f = 0
index = c(0.99, 0.95, 0.9, 0.8)
index = c("0.01", "0.05", "0.1", "0.2")
# CONTAGION PROBABILITY ----------------------------------------------------------------------------------------
# Read the results - Contagion Probability
dimB<-scan(file="contagion_probability_simulation.csv",sep=",",nlines=1)
dataB<-matrix(scan(file="contagion_probability_simulation.csv",sep=",",skip=1), nrow = 3, byrow = TRUE)
contagion_probability_simulation_read<-array(t(dataB),dimB)
# Present in a plot:
a1 = rowSums(contagion_probability_simulation_read[,,1])/ numberSimulations
a2 = rowSums(contagion_probability_simulation_read[,,2])/ numberSimulations
a3 = rowSums(contagion_probability_simulation_read[,,3])/ numberSimulations
x = index
# Note: do box graphs for each of the methods !!
df1 = data.frame(x = index, y = a1, Method = as.factor("Pro-rata"))
df2 = data.frame(x = index, y = a2, Method = as.factor("Bank holding"))
df3 = data.frame(x = index, y = a3, Method = as.factor("System optim"))
info = rbind(df1, df2, df3)
Graph_title = "Bankruptcy percentage on leverage buffer"
x_lab_title = "Leverage buffer"
y_lab_title = "Bankruptcy percentage"
save_title = "Cont_Probability"
Contagion_Probability_Leverage = ggplot(info, aes(x,y , group = Method)) +
geom_line(aes( linetype = Method, color = Method), size = 0.7) +
geom_point(aes(shape = Method, color = Method), size =2.25, stroke = 0.75) +
ggtitle(Graph_title) +
xlab(x_lab_title) +
ylab(y_lab_title) +
theme_light() +
scale_fill_grey() +
scale_color_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(Contagion_Probability_Leverage)
ggsave(paste(save_title, ".pdf", sep =""), plot = Contagion_Probability_Leverage, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 12, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
# System Loss ----------------------------------------------------------------------------------------
# Read the results - System Loss
dimB<-scan(file="system_loss_simulation.csv",sep=",",nlines=1)
dataB<-matrix(scan(file="system_loss_simulation.csv",sep=",",skip=1), nrow = 3, byrow = TRUE)
system_loss_simulation_read<-array(t(dataB),dimB)
a1 = rowSums(system_loss_simulation_read[,,1])/ numberSimulations
a2 = rowSums(system_loss_simulation_read[,,2])/ numberSimulations
a3 = rowSums(system_loss_simulation_read[,,3])/ numberSimulations
df1 = data.frame(x = index, y = -a1, Method = as.factor("Pro-rata"))
df2 = data.frame(x = index, y = -a2, Method = as.factor("Bank holding"))
df3 = data.frame(x = index, y = -a3, Method = as.factor("System optim"))
info = rbind(df1, df2, df3)
Graph_title = "Asset loss on leverage buffer"
y_lab_title = "Asset loss"
save_title = "Cont_Loss"
Contagion_Loss_Leverage = ggplot(info, aes(x,y , group = Method)) +
geom_line(aes( linetype = Method, color = Method), size = 0.7) +
geom_point(aes(shape = Method, color = Method), size =2.25, stroke = 0.75) +
ggtitle(Graph_title) +
xlab(x_lab_title) +
ylab(y_lab_title) +
theme_light() +
scale_fill_grey() +
scale_color_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(Contagion_Probability_Leverage)
ggsave(paste(save_title, ".pdf", sep =""), plot = Contagion_Loss_Leverage, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 12, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
# System Loss Data Points ------------------
y_lab_title = "Asset loss"
Graph_title = "Asset loss outcome distribution for pro-rata"
# Plotting per method type:
# Method 1
b1 = c(system_loss_simulation_read[,,1])
index_values = as.factor(rep(index,numberSimulations))
method1 = rep("pro-Rata", length(c(b1)))
method1_data = data.frame(b1, index_values, method1)
M1 = ggplot(method1_data, aes(x=index_values, y=b1, )) +
geom_dotplot(binaxis='y', stackdir='center', binwidth = 0.4, stackratio = 1, dotsize =10) +
stat_summary(fun.data="mean_sdl", fun.args = list(mult=0.4), geom="crossbar", width=0.3) +
ggtitle(Graph_title) +
theme_light() +
xlab(x_lab_title) +
ylab(y_lab_title) +
scale_fill_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(M1)
ggsave(paste(save_title, "_M1", ".pdf", sep =""), plot = M1, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 20, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
# Method 2
Graph_title = "Asset loss outcome distribution for bank holding"
b2 = c(system_loss_simulation_read[,,2])
index_values = as.factor(rep(index,numberSimulations))
method2 = rep("Bank Holding", length(c(b2)))
method2_data = data.frame(b2, index_values, method2)
M2 = ggplot(method2_data, aes(x=index_values, y=b2, )) +
geom_dotplot(binaxis='y', stackdir='center', binwidth = 0.4, stackratio = 1, dotsize =10) +
stat_summary(fun.data="mean_sdl", fun.args = list(mult=0.4), geom="crossbar", width=0.3) +
ggtitle(Graph_title) +
theme_light() +
xlab(x_lab_title) +
ylab(y_lab_title) +
scale_fill_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(M2)
ggsave(paste(save_title, "_M2", ".pdf", sep =""), plot = M2, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 20, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
# Method 3
Graph_title = "Asset loss outcome distribution for system optim"
b3 = c(system_loss_simulation_read[,,3])
index_values = as.factor(rep(index,numberSimulations))
method3 = rep("System Optim", length(c(b3)))
method3_data = data.frame(b3, index_values, method3)
M3 = ggplot(method3_data, aes(x=index_values, y=b3 )) +
geom_dotplot(binaxis='y', stackdir='center', binwidth = 0.4, stackratio = 1, dotsize =10) +
stat_summary(fun.data="mean_sdl", fun.args = list(mult=0.4), geom="crossbar", width=0.3) +
ggtitle(Graph_title) +
theme_light() +
xlab(x_lab_title) +
ylab(y_lab_title) +
scale_fill_grey() +
theme(
plot.title = element_text(hjust = 0.5, size=12),
axis.title.x = element_text(size=11),
axis.title.y = element_text(size=11),
axis.text.x = element_text(size=11),
axis.text.y = element_text(size=11),
legend.title =element_text(size=11),
legend.text = element_text(size=11))
print(M3)
ggsave(paste(save_title, "_M3", ".pdf", sep =""), plot = M3, device = "pdf", path = "/Users/Michal/Dropbox/UNISG/20. Thesis/4. Code/Figures/",
scale = 1, width = 20, height = 7, units = "cm",
dpi = 300, limitsize = TRUE)
|
#
# NMPostAmputRefinement1
#
# Basic refinement, but taking into account amput data to
# save time and not stimulate the amputated zone(s).
#
# Neural Network Refinement Version 1
#
#
# Given a simple two layer network, stimulate the input layer
# using random-sized, random-amplitude, random-duration and
# semi-randomly positioned probes.
#
# The semi-random part arises from treating the input layer
# for example as the representation of the hand. That is,
# restrict
#
# The stimulus protocol is to have a few second long
# 'trial.' For the first second or so just iterate the
# system so that a background activity level can be
# estimatd. Then apply (as a ON step function) a x-second long
# pulse to the input layer "path" and convert that to a firing
# rate.
plasticityFlag = TRUE;
inStim = Dig3MapAmput0( N, trialDurRFProbeInIters, oneSecondNumIter,
refinementAmputPatchSize , refinementAmputStimDuration, kAmputDigit, kAmputZoneMax );
numStim = length ( inStim );
indexStim = sample(1:numStim); # Want to present the stimulii in random sequence.
viewFirstFew = 5;
viewList = rep ( 0, viewFirstFew );
for ( i in 1:viewFirstFew ) {
whichStim = indexStim[i];
viewList[i] = GetLin( inStim[[whichStim]]$rowPatch, inStim[[whichStim]]$columnPatch, N );
} # for ( i in 1:viewFirstFew ) {
viewList = sort ( viewList );
iStim = 1;
while ( iStim <= numStim ) { # Placeholder for an eventual automatic stopping criterion.
whichStim = indexStim[iStim];
iter = 2;
while ( iter <= trialLengthInIters ) {
# Load the stimulus pattern.
k = iter - 1;
v0[k,] = runif(N2, min = -noiseLevel, max = noiseLevel) + inStim[[whichStim]]$inputPatch[k,];
#v0[k,] = VectorNormalize ( runif(N2, min = -noiseLevel, max = noiseLevel) + inStim[[whichStim]]$inputPatch[k,] );
r0[k,] = sigmoid ( v0[k,], beta );
# Do one iteration of the system.
source("NMDispatch.R");
# Check whether to normalize. Recall that initial condition weights are normalized.
if ( wghtNormalizeFlag == 1 ) {
w1.e.0[,,iter] = NormalizeOutputWeights1 ( w1.e.0[,,iter], n0.numPreSyn, e.wResource );
w1.e.i[,,iter] = NormalizeOutputWeights1 ( w1.e.i[,,iter], n1.e.i.numPreSyn, i.wResource );
w1.i.e[,,iter] = NormalizeOutputWeights1 ( w1.i.e[,,iter], n1.i.e.numPreSyn, e.wResource );
w1.e.e[,,iter] = NormalizeOutputWeights1 ( w1.e.e[,,iter], n1.e.e.numPreSyn, e.wResource );
} else if ( wghtNormalizeFlag == 2.1 ) {
w1.e.0[,,iter] = NormalizeInputWeights1 ( w1.e.0[,,iter], e.wResource );
w1.e.i[,,iter] = NormalizeInputWeights1 ( w1.e.i[,,iter], e.wResource );
w1.i.e[,,iter] = NormalizeInputWeights1 ( w1.i.e[,,iter], i.wResource );
w1.e.e[,,iter] = NormalizeInputWeights1 ( w1.e.e[,,iter], e.wResource );
} else if ( wghtNormalizeFlag == 2.2 ) {
w1.e.0[,,iter] = NormalizeInputWeights2 ( w1.e.0[,,iter], e.wResource );
w1.e.i[,,iter] = NormalizeInputWeights2 ( w1.e.i[,,iter], e.wResource );
w1.i.e[,,iter] = NormalizeInputWeights2 ( w1.i.e[,,iter], i.wResource );
w1.e.e[,,iter] = NormalizeInputWeights2 ( w1.e.e[,,iter], e.wResource );
} # if ( wghtNormalizeFlag ) {
# Increment iteration.
iter = iter + 1;
} # while ( iter <= trialLengthInIters )
# View select time series.
# Show an epoch of system evolution for a few sample cells on each layer.
if ( verbose ) {
currentCellID = GetLin( inStim[[whichStim]]$rowPatch, inStim[[whichStim]]$columnPatch, N );
if ( sum ( currentCellID == viewList ) ) {
nTrials = 1; startOffsetInIter = numIter - nTrials * trialDurRFProbeInIters + 1;
ShowTimeSeries( v0, r0, v1.e, v1.i, nTrials, trialDurRFProbeInIters, startOffsetInIter, currentCellID, knmSelect, deltaT );
} # if ( sum ( currentCellID == viewList ) ) {
} # if ( verbose )
# Pop the last iteration into the first, including weights.
v0[1,] = v0[trialLengthInIters,]; r0[1,] = r0[trialLengthInIters,];
v1.e[1,] = v1.e[trialLengthInIters,]; r1.e[1,] = r1.e[trialLengthInIters,];
v1.i[1,] = v1.i[trialLengthInIters,]; r1.i[1,] = r1.i[trialLengthInIters,];
if ( plasticityFlag ) {
w1.e.0[,,1] = w1.e.0[,,numIter]; w1.e.e[,,1] = w1.e.e[,,numIter];
w1.e.i[,,1] = w1.e.i[,,numIter]; w1.i.e[,,1] = w1.i.e[,,numIter];
} # if ( plasticityFlag ) {
iStim = iStim + 1;
} # while ( iStim <= numStim ) {
#
# Before exiting be sure to turn off plasticity.
#
plasticityFlag = FALSE;
#
# Before exiting be sure to turn off plasticity.
#
| /NMPostAmputRefinement1.R | no_license | kgrajski/KamilGrajski-Somatotopic-Discontinuity-Plasticity | R | false | false | 4,550 | r |
#
# NMPostAmputRefinement1
#
# Basic refinement, but taking into account amput data to
# save time and not stimulate the amputated zone(s).
#
# Neural Network Refinement Version 1
#
#
# Given a simple two layer network, stimulate the input layer
# using random-sized, random-amplitude, random-duration and
# semi-randomly positioned probes.
#
# The semi-random part arises from treating the input layer
# for example as the representation of the hand. That is,
# restrict
#
# The stimulus protocol is to have a few second long
# 'trial.' For the first second or so just iterate the
# system so that a background activity level can be
# estimatd. Then apply (as a ON step function) a x-second long
# pulse to the input layer "path" and convert that to a firing
# rate.
plasticityFlag = TRUE;
inStim = Dig3MapAmput0( N, trialDurRFProbeInIters, oneSecondNumIter,
refinementAmputPatchSize , refinementAmputStimDuration, kAmputDigit, kAmputZoneMax );
numStim = length ( inStim );
indexStim = sample(1:numStim); # Want to present the stimulii in random sequence.
viewFirstFew = 5;
viewList = rep ( 0, viewFirstFew );
for ( i in 1:viewFirstFew ) {
whichStim = indexStim[i];
viewList[i] = GetLin( inStim[[whichStim]]$rowPatch, inStim[[whichStim]]$columnPatch, N );
} # for ( i in 1:viewFirstFew ) {
viewList = sort ( viewList );
iStim = 1;
while ( iStim <= numStim ) { # Placeholder for an eventual automatic stopping criterion.
whichStim = indexStim[iStim];
iter = 2;
while ( iter <= trialLengthInIters ) {
# Load the stimulus pattern.
k = iter - 1;
v0[k,] = runif(N2, min = -noiseLevel, max = noiseLevel) + inStim[[whichStim]]$inputPatch[k,];
#v0[k,] = VectorNormalize ( runif(N2, min = -noiseLevel, max = noiseLevel) + inStim[[whichStim]]$inputPatch[k,] );
r0[k,] = sigmoid ( v0[k,], beta );
# Do one iteration of the system.
source("NMDispatch.R");
# Check whether to normalize. Recall that initial condition weights are normalized.
if ( wghtNormalizeFlag == 1 ) {
w1.e.0[,,iter] = NormalizeOutputWeights1 ( w1.e.0[,,iter], n0.numPreSyn, e.wResource );
w1.e.i[,,iter] = NormalizeOutputWeights1 ( w1.e.i[,,iter], n1.e.i.numPreSyn, i.wResource );
w1.i.e[,,iter] = NormalizeOutputWeights1 ( w1.i.e[,,iter], n1.i.e.numPreSyn, e.wResource );
w1.e.e[,,iter] = NormalizeOutputWeights1 ( w1.e.e[,,iter], n1.e.e.numPreSyn, e.wResource );
} else if ( wghtNormalizeFlag == 2.1 ) {
w1.e.0[,,iter] = NormalizeInputWeights1 ( w1.e.0[,,iter], e.wResource );
w1.e.i[,,iter] = NormalizeInputWeights1 ( w1.e.i[,,iter], e.wResource );
w1.i.e[,,iter] = NormalizeInputWeights1 ( w1.i.e[,,iter], i.wResource );
w1.e.e[,,iter] = NormalizeInputWeights1 ( w1.e.e[,,iter], e.wResource );
} else if ( wghtNormalizeFlag == 2.2 ) {
w1.e.0[,,iter] = NormalizeInputWeights2 ( w1.e.0[,,iter], e.wResource );
w1.e.i[,,iter] = NormalizeInputWeights2 ( w1.e.i[,,iter], e.wResource );
w1.i.e[,,iter] = NormalizeInputWeights2 ( w1.i.e[,,iter], i.wResource );
w1.e.e[,,iter] = NormalizeInputWeights2 ( w1.e.e[,,iter], e.wResource );
} # if ( wghtNormalizeFlag ) {
# Increment iteration.
iter = iter + 1;
} # while ( iter <= trialLengthInIters )
# View select time series.
# Show an epoch of system evolution for a few sample cells on each layer.
if ( verbose ) {
currentCellID = GetLin( inStim[[whichStim]]$rowPatch, inStim[[whichStim]]$columnPatch, N );
if ( sum ( currentCellID == viewList ) ) {
nTrials = 1; startOffsetInIter = numIter - nTrials * trialDurRFProbeInIters + 1;
ShowTimeSeries( v0, r0, v1.e, v1.i, nTrials, trialDurRFProbeInIters, startOffsetInIter, currentCellID, knmSelect, deltaT );
} # if ( sum ( currentCellID == viewList ) ) {
} # if ( verbose )
# Pop the last iteration into the first, including weights.
v0[1,] = v0[trialLengthInIters,]; r0[1,] = r0[trialLengthInIters,];
v1.e[1,] = v1.e[trialLengthInIters,]; r1.e[1,] = r1.e[trialLengthInIters,];
v1.i[1,] = v1.i[trialLengthInIters,]; r1.i[1,] = r1.i[trialLengthInIters,];
if ( plasticityFlag ) {
w1.e.0[,,1] = w1.e.0[,,numIter]; w1.e.e[,,1] = w1.e.e[,,numIter];
w1.e.i[,,1] = w1.e.i[,,numIter]; w1.i.e[,,1] = w1.i.e[,,numIter];
} # if ( plasticityFlag ) {
iStim = iStim + 1;
} # while ( iStim <= numStim ) {
#
# Before exiting be sure to turn off plasticity.
#
plasticityFlag = FALSE;
#
# Before exiting be sure to turn off plasticity.
#
|
# map load
theme_set(theme_gray(base_family="NanumGothic"))
pacman::p_load("rgdal","ggmap","ggplot2","maps","plyr","dplyr")
sig_shp <- readOGR("/Users/youngjunna/Github/diffusion_model/R","전국_법정구역정보(시군구)_201510")
sig_shp <- readOGR("/Users/youngjun/Github/diffusion_model/R","전국_법정구역정보(시군구)_201510")
sig_shp@data$id <- rownames(sig_shp@data) #idx 부여
sig_shp_fortify <- fortify(sig_shp,by="id") #fortify함수 region = id로
sig_shp_fortify_join <- join(sig_shp_fortify,sig_shp@data, by="id") #id로 조인!
simple <- rmapshaper::ms_simplify(sig_shp,keep=0.01)
simple@data$id <- rownames(simple@data) #idx 부여
simple_fortify <- fortify(simple,by="id") #fortify함수 region = id로
simple_join <- join(simple_fortify,simple@data, by="id") #id로 조인!
# write.csv(simple_join,"simple.txt",row.names=FALSE)
| /R/diffusion_model.R | no_license | YoungjunNa/GIS-livestock-manure | R | false | false | 866 | r | # map load
theme_set(theme_gray(base_family="NanumGothic"))
pacman::p_load("rgdal","ggmap","ggplot2","maps","plyr","dplyr")
sig_shp <- readOGR("/Users/youngjunna/Github/diffusion_model/R","전국_법정구역정보(시군구)_201510")
sig_shp <- readOGR("/Users/youngjun/Github/diffusion_model/R","전국_법정구역정보(시군구)_201510")
sig_shp@data$id <- rownames(sig_shp@data) #idx 부여
sig_shp_fortify <- fortify(sig_shp,by="id") #fortify함수 region = id로
sig_shp_fortify_join <- join(sig_shp_fortify,sig_shp@data, by="id") #id로 조인!
simple <- rmapshaper::ms_simplify(sig_shp,keep=0.01)
simple@data$id <- rownames(simple@data) #idx 부여
simple_fortify <- fortify(simple,by="id") #fortify함수 region = id로
simple_join <- join(simple_fortify,simple@data, by="id") #id로 조인!
# write.csv(simple_join,"simple.txt",row.names=FALSE)
|
library(readxl)
library(dplyr)
library(tidyr)
library(stringr)
library(zoo)
file <- "2018-11-06_statewideprecinct_miami.xlsx"
excel_sheets(file)
## governor results
df <- read_xlsx(file, sheet=3, skip=1, col_names=TRUE)
gov_tidy <- df %>%
select(c(1,2,9:15)) %>%
slice(-c(1:2)) %>%
mutate(precinct_total = rowSums(.[3:9])) %>%
select(-c(3,6:9)) %>%
mutate(office = "governor") %>%
rename(county = "County Name",
precinct = "Precinct Name",
Cordray = "Richard Cordray and Betty Sutton (D)",
DeWine = "Mike DeWine and Jon Husted (R)") %>%
gather(key = gov_candidate, value = gov_votes, Cordray:precinct_total)
write.csv(gov_tidy, file=("gov_tidy.csv"), row.names = F)
## state legislative results
df <- read_xlsx(file, sheet=5, col_names = FALSE)
df[1,] <- paste(na.locf(as.character(df[1,])), df[2,], sep="-")
df[1,1:2] <- c("county","precinct")
ga_tidy <- df %>%
select(-c(3:44)) %>%
slice(-c(2:4)) %>%
mutate_all(funs(str_replace(., "State Representative - District ", "")))
colnames(ga_tidy) <- ga_tidy[1,]
house_candidate <- tail(names(ga_tidy),-2)
ga_tidy <- ga_tidy %>%
slice(-1) %>%
gather(house_candidate, key = house_candidate, value = house_votes) %>%
separate(house_candidate, sep = 3, into = c("house_district","house_candidate")) %>%
mutate(house_district = str_replace(house_district, "-","")) %>%
mutate(house_votes = na_if(house_votes, "0")) %>%
mutate_at(vars(5), as.numeric) %>%
drop_na()
write.csv(ga_tidy, file=("ga_tidy.csv"), row.names = F)
## join the two sheets
join_tidy <- full_join(ga_tidy,gov_tidy,by=c("county","precinct"))
write.csv(join_tidy, file=("join_tidy.csv"), row.names = F)
| /dailykos/ohio_tidy.R | no_license | acolter/elections | R | false | false | 1,688 | r | library(readxl)
library(dplyr)
library(tidyr)
library(stringr)
library(zoo)
file <- "2018-11-06_statewideprecinct_miami.xlsx"
excel_sheets(file)
## governor results
df <- read_xlsx(file, sheet=3, skip=1, col_names=TRUE)
gov_tidy <- df %>%
select(c(1,2,9:15)) %>%
slice(-c(1:2)) %>%
mutate(precinct_total = rowSums(.[3:9])) %>%
select(-c(3,6:9)) %>%
mutate(office = "governor") %>%
rename(county = "County Name",
precinct = "Precinct Name",
Cordray = "Richard Cordray and Betty Sutton (D)",
DeWine = "Mike DeWine and Jon Husted (R)") %>%
gather(key = gov_candidate, value = gov_votes, Cordray:precinct_total)
write.csv(gov_tidy, file=("gov_tidy.csv"), row.names = F)
## state legislative results
df <- read_xlsx(file, sheet=5, col_names = FALSE)
df[1,] <- paste(na.locf(as.character(df[1,])), df[2,], sep="-")
df[1,1:2] <- c("county","precinct")
ga_tidy <- df %>%
select(-c(3:44)) %>%
slice(-c(2:4)) %>%
mutate_all(funs(str_replace(., "State Representative - District ", "")))
colnames(ga_tidy) <- ga_tidy[1,]
house_candidate <- tail(names(ga_tidy),-2)
ga_tidy <- ga_tidy %>%
slice(-1) %>%
gather(house_candidate, key = house_candidate, value = house_votes) %>%
separate(house_candidate, sep = 3, into = c("house_district","house_candidate")) %>%
mutate(house_district = str_replace(house_district, "-","")) %>%
mutate(house_votes = na_if(house_votes, "0")) %>%
mutate_at(vars(5), as.numeric) %>%
drop_na()
write.csv(ga_tidy, file=("ga_tidy.csv"), row.names = F)
## join the two sheets
join_tidy <- full_join(ga_tidy,gov_tidy,by=c("county","precinct"))
write.csv(join_tidy, file=("join_tidy.csv"), row.names = F)
|
# Data Visutalizations
# Jung Mee Park
# jmpark@email.arizona.edu
# create one chart with simple breakdown
# ggplot(dm2, aes(x = Graduate)) +
# geom_bar() +
# coord_flip()
# create another image about number of emails
# cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# Q9graph2 <- dm2 %>%
# # mutate(Q9 = fct_infreq(Q9)) %>%
# # dplyr::filter(Students == "Undergraduate") %>%
# drop_na(Q9) %>%
# ggplot(aes(x = Q9, fill = Students, colour = Students)) +
# geom_bar() +
# coord_flip() +
# labs(x = "redundant emails", y = "responses")
Q7graph <- dm2 %>%
# mutate(Q7 = fct_infreq(Q7)) %>%
drop_na(Q7) %>%
ggplot(aes(x = Q7, fill = Students, response = Students)) +
geom_bar() +
coord_flip() +
labs(x = "emails per week", y = "responses")
Q7graph <- Q7graph + labs(title = "How many emails do Students Get?",
subtitle = "undergraduate and graduate students")
print(Q7graph) # 16 or more is 907 and that is about 40%
# pie chart for Q3
# slices <- c(458, 422, 430, 645, 482)
# lbls <- c("Freshmen", "Sophomore", "Junior", "Senior","Grad&Prof")
# pct <- round(slices/sum(slices)*100)
# lbls <- paste(lbls, pct) # add percents to labels
# lbls <- paste(lbls,"%",sep="") # add % to labels
# pie(slices,labels = lbls, col=terrain.colors(length(lbls)))
# pie(slices,labels = lbls, col=terrain.colors(length(lbls)), main="Distribution of Survey")
# Q7pie <- dm2 %>%
# # mutate(Q7 = fct_infreq(Q7)) %>%
# drop_na(Q7) %>%
# ggplot(aes(x = Q7), y=response, fill = Q7, response = Q7) +
# geom_bar(fill = "lightblue", color = "grey") +
# coord_polar("y", start=0) +
# theme_void()
#
# print(Q7pie)
# # just for FB
# dm2 %>%
# # %>% mutate(Q11_1 = fct_infreq(Q11_1)) %>%
# ggplot(aes(x = Q11_1)) +
# geom_bar() +
# coord_flip()
# redudant emails Q9
Q9graph <- dm2 %>%
# mutate(Q9 = fct_infreq(Q9)) %>%
drop_na(Q9) %>%
ggplot(aes(x = Q9)) +
geom_bar(fill = "lightblue", color = "grey") +
coord_flip() +
labs(x = "redundancy", y = "responses")
Q9graph <- Q9graph + labs(title = "How redundant are these emails?",
subtitle = "undergraduate and graduate")
print(Q9graph)
# redudant emails Q9 undergraduates only
Q9graph1 <- dm2 %>%
mutate(Q9 = fct_infreq(Q9)) %>%
drop_na(Q9) %>%
dplyr::filter(Career =="UGRD") %>%
ggplot(aes(x = Q9)) +
geom_bar(fill = "lightblue", color = "grey") +
coord_flip() +
labs(x = "redundant emails", y = "responses")
Q9graph1 <- Q9graph1 + labs(title = "How redundant are these emails?",
subtitle = "undergraduate only")
print(Q9graph1)
########33
# redudant emails Q9 undergraduates only
# try to find a
# bargraph0c <- dm2 %>%
# pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
# values_drop_na = TRUE) %>%
# ggplot(aes(x = response, fill = Students, colour = Students)) +
# geom_bar() +
# # geom_text(stat='count', aes(label=..count..), vjust=0) +
# geom_boxplot() +
# # scale_color_brewer(palette = "Blues") +
# labs(x = "Type of Social Media Apps", y = "Number of Uses")
Q9graph2 <- dm2 %>%
# mutate(Q9 = fct_infreq(Q9)) %>%
# dplyr::filter(Students == "Undergraduate") %>%
drop_na(Q9) %>%
ggplot(aes(x = Q9, fill = Students, colour = Students)) +
geom_bar() +
coord_flip() +
labs(x = "redundant emails", y = "responses")
Q9graph2 <- Q9graph2 + labs(title = "How redundant are these emails?",
subtitle = "undergraduate and graduate students")
print(Q9graph2)
# Q15
# reorder
# levels(dm2$Q15) <- c("Very difficult", "Somewhat difficult",
# "Somewhat easy", "Very easy")
dm2$Q15 = factor(dm2$Q15,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
Q15graph <- dm2 %>%
mutate(Q15 = fct_infreq(Q15)) %>%
drop_na(Q15) %>%
ggplot(aes(x = Q15), stat="identity") +
geom_bar(fill = "lightblue", color = "grey") +
coord_flip() +
labs(x = "", y = "responses")
Q15graph <- Q15graph + labs(title = "How easy is it for students to get involved?",
subtitle = "undergraduate and graduate")
print(Q15graph)
### emails that pertain to me
Q9graph3 <- dm2 %>%
# mutate(Q9 = fct_infreq(Q9)) %>%
# dplyr::filter(Students == "Undergraduate") %>%
drop_na(Q8) %>%
ggplot(aes(x = Q8, fill = Students, colour = Students)) +
geom_bar() +
coord_flip() +
labs(x = "Emails Relevant to Me", y = "Responses")
Q9graph3 <- Q9graph3 + labs(title = "Do these emails pertain to me?",
subtitle = "undergraduate and graduate students")
print(Q9graph3)
### data visualization
# catagorical or qualitative
# ordinal
dim(dm2)
summary(dm2)
# pie chart
table(data$Career)
slices <- c(35, 403, 30, 1955, 14)
lbls <- c("law", "grad", "med", "undergraduate","phrm")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # add % to labels
pie(slices,labels = lbls, col=terrain.colors(length(lbls)))
pie(slices,labels = lbls, col=terrain.colors(length(lbls)), main="Distribution of Survey")
# better pie chart
slices <- c(458, 422, 430, 645, 482)
lbls <- c("Freshmen", "Sophomore", "Junior", "Senior","Graduate & Professional")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # add % to labels
pie(slices,labels = lbls, col=terrain.colors(length(lbls)))
pie(slices,labels = lbls, col=terrain.colors(length(lbls)), main="Distribution of Survey")
# correlation plots
# Activate the dplyr package
library("dplyr")
# Correlation matrix of items
# install.packages("ggcorrplot")
# install.packages("lsr")
library(lsr)
# Correlation matrix of items
cormat <- data %>%
dplyr::select(starts_with(c("Age", "CumGPA2191"))) %>%
cor(., use = "pairwise.complete.obs")
# Activate the corrplot package
library("corrplot")
# Correlation matrix plot
corrplot(cormat, # correlation matrix
order = "hclust", # hierarchical clustering of correlations
addrect = 2) # number of rectangles to draw around clusters
# function to get chi square p value and Cramers V
# https://stackoverflow.com/questions/52554336/plot-the-equivalent-of-correlation-matrix-for-factors-categorical-data-and-mi
#
# f = function(x,y) {
# tbl = dm2 %>% select(x,y) %>% table()
# chisq_pval = round(chisq.test(tbl)$p.value, 4)
# cramV = round(cramersV(tbl), 4)
# data.frame(x, y, chisq_pval, cramV) }
#
# # create unique combinations of column names
# # sorting will help getting a better plot (upper triangular)
# df_comb = data.frame(t(combn(sort(names(dm2)), 2)), stringsAsFactors = F)
#
# # apply function to each variable combination
# df_res = map2_df(df_comb$Age, df_comb$Q7, f)
#
# # plot results
# df_res %>%
# ggplot(aes(x,y,fill=chisq_pval))+
# geom_tile()+
# geom_text(aes(x,y,label=cramV))+
# scale_fill_gradient(low="red", high="yellow")+
# theme_classic()
#
# require(rcompanion)
# # Calculate a pairwise association between all variables in a data-frame. In particular nominal vs nominal with Chi-square, numeric vs numeric with Pearson correlation, and nominal vs numeric with ANOVA.
# # Adopted from https://stackoverflow.com/a/52557631/590437
# mixed_assoc = function(dm2, cor_method="spearman", adjust_cramersv_bias=TRUE){
# df_comb = expand.grid(names(dm2), names(dm2), stringsAsFactors = F) %>% set_names("X1", "X2")
#
# is_nominal = function(x) class(x) %in% c("factor", "character")
# # https://community.rstudio.com/t/why-is-purr-is-numeric-deprecated/3559
# # https://github.com/r-lib/rlang/issues/781
# is_numeric <- function(x) { is.integer(x) || is_double(x)}
#
# f = function(xName,yName) {
# x = pull(df, xName)
# y = pull(df, yName)
#
# result = if(is_nominal(x) && is_nominal(y)){
# # use bias corrected cramersV as described in https://rdrr.io/cran/rcompanion/man/cramerV.html
# cv = cramerV(as.character(x), as.character(y), bias.correct = adjust_cramersv_bias)
# data.frame(xName, yName, assoc=cv, type="cramersV")
#
# }else if(is_numeric(x) && is_numeric(y)){
# correlation = cor(x, y, method=cor_method, use="complete.obs")
# data.frame(xName, yName, assoc=correlation, type="correlation")
#
# }else if(is_numeric(x) && is_nominal(y)){
# # from https://stats.stackexchange.com/questions/119835/correlation-between-a-nominal-iv-and-a-continuous-dv-variable/124618#124618
# r_squared = summary(lm(x ~ y))$r.squared
# data.frame(xName, yName, assoc=sqrt(r_squared), type="anova")
#
# }else if(is_nominal(x) && is_numeric(y)){
# r_squared = summary(lm(y ~x))$r.squared
# data.frame(xName, yName, assoc=sqrt(r_squared), type="anova")
#
# }else { #does not work from here
# warning(paste("unmatched column type combination: ", class(x), class(y)))
# }
#
# # finally add complete obs number and ratio to table
# result %>% mutate(complete_obs_pairs=sum(!is.na(x) & !is.na(y)), complete_obs_ratio=complete_obs_pairs/length(x)) %>% rename(x=xName, y=yName)
# }
#
# # apply function to each variable combination
# map2_df(df_comb$X1, df_comb$X2, f)
# }
#
# this bar graph includes counts from Q11
# use this with no NA
bargraph0 <- dm2 %>%
# dplyr::filter(Career !="UGRD") %>%
# dplyr::select(Q11_1 != "NA") %>%
# dplyr::select(Q11_4 != "NA") %>%
# dplyr::select(Q11_5 != "NA") %>%
# dplyr::select(Q11_6 != "NA") %>%
# dplyr::select(Q11_7 != "NA") %>%
# dplyr::select(Q11_8 != "NA") %>%
# dplyr::select(Q11_9 != "NA") %>%
# dplyr::select(Q11_10 != "NA") %>%
# dplyr::select(Q11_1 == "Facebook / Messenger")
# dplyr::filter(Q11_1 == 'Facebook / Messenger') %>%
# dplyr::filter(Q11_4 == 'Instagram') #1779 %>%
# dplyr::filter(Q11_5 == 'Snapchat') #1612 %>%
# dplyr::filter(Q11_6 == 'Text / SMS') #1991 %>%
# dplyr::filter(Q11_7 == 'Twitter') # 995 %>%
# dplyr::filter(Q11_8 == 'WeChat') # 100 %>%
# dplyr::filter(Q11_9 == 'WhatsApp') # 623 %>%
# dplyr::filter(Q11_10 == 'Other') #106 %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = response, colour = response)) +
geom_bar() +
geom_text(stat='count', aes(label=..count..), vjust=-1) +
geom_boxplot() +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0 <- bargraph0 + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed,
undergraduates only")
print(bargraph0)
# subset with Graduates only
bargraph0a <- dm2 %>%
dplyr::filter(Career !="UGRD") %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = response, colour = response)) +
geom_bar() +
geom_text(stat='count', aes(label=..count..), vjust=-1) +
geom_boxplot() +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0a <- bargraph0a + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed, graduates only")
print(bargraph0a)
# subset with ugrd only
bargraph0b <- dm2 %>%
dplyr::filter(Career =="UGRD") %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = response, colour = response)) +
geom_bar() +
geom_text(stat='count', aes(label=..count..), vjust=-1) +
geom_boxplot() +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0b <- bargraph0b + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed, undergraduates only")
print(bargraph0b)
# more attempts
# stacked bar graph in R
# https://www.statology.org/stacked-barplot-in-r/
# ggplot(df, aes(fill=position, y=points, x=team)) +
# geom_bar(position='stack', stat='identity') +
# theme_minimal() +
# labs(x='Team', y='Points', title='Avg. Points Scored by Position & Team') +
# theme(plot.title = element_text(hjust=0.5, size=20, face='bold')) +
# scale_fill_manual('Position', values=c('coral2', 'steelblue', 'pink'))
# bargraph0c <- dm2 %>%
# pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
# values_drop_na = TRUE) %>%
# ggplot(aes(x = response, fill = Career, colour = Career)) +
# geom_bar() +
# geom_text(stat='count', aes(label=..count..), vjust=-.5) +
# geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
# labs(x = "Type of Social Media Apps", y = "Number of Uses")
#
# bargraph0c <- bargraph0c + labs(title = "Which Apps do Students Use",
# subtitle = "multiple selection allowed")
# print(bargraph0c)
# stacked bar with other subset
# recode Graduate variable
# x <- factor(c("alpha","beta","gamma","alpha","beta"))
# levels(x) <- list(A="alpha", B="beta", C="gamma")
# levels(data$Graduate)
# dm2$Graduate <- recode_factor(dm2$Graduate, "2" = "Graduate", "1" = "Undergraduate")
# dm2$Graduate = factor(dm2$Graduate,
# ordered = FALSE,
# levels = c("1", "2")) # more warnings with this one
# Duplicate data
levels(dm2$Graduate) <- list("TRUE" = "Graduate", "FALSE" = "Undergraduate")
library(forcats)
dm2$Students <- as.factor(dm2$Graduate)
is.factor(dm2$Students)
dm2$Students <- recode_factor(dm2$Students, "TRUE" = "Graduate", "FALSE" = "Undergraduate") # this wroked
# dm2$Students <- recode(dm2$Students, "2" == "Graduate", "1" == "Undergraduate")
# fct_recode(dm2$Graduate, "TRUE" = "Graduate", "FALSE" = "Undergraduate")
# dm2$Graduate[dm2$Graduate== "TRUE"] <- "Graduate"
# dm2$Graduate[dm2$Graduate == "1"] <- "Undergraduate"
# Convert to a factor
# dm2$Graduate <- as.factor(dm2$Graduate)
# dplyr::mutate(Career=recode(Career,'UGRD'='Undergrad',
# 'GRAD' ='Graduate', 'LAW'='Graduate', 'MEDS'='Graduate', 'PHRM'='Graduate')) %>%
bargraph0c <- dm2 %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
# geom_text(stat='count', aes(label=..count..), vjust=0) +
geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0c <- bargraph0c + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed")
print(bargraph0c)
# different variable class standing
bargraph0d <- dm2 %>%
# dplyr::mutate(Graduate = c("Yes", "No")) %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Acad.Class.Standing, colour = Acad.Class.Standing)) +
geom_bar() +
geom_text(stat='count', aes(label=..count..), vjust=-.5) +
geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0d <- bargraph0d + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed")
print(bargraph0d)
#####
# bargraph0c <- dm2 %>%
# pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
# values_drop_na = TRUE) %>%
# ggplot(aes(x = response, fill = Students, colour = Students)) +
# geom_bar() +
# # geom_text(stat='count', aes(label=..count..), vjust=0) +
# geom_boxplot() +
# # scale_color_brewer(palette = "Blues") +
# labs(x = "Type of Social Media Apps", y = "Number of Uses")
#
# bargraph0c <- bargraph0c + labs(title = "Which Apps do Students Use",
# subtitle = "multiple selection allowed")
# print(bargraph0c)
# for campus tools
bargraph2 <- dm2 %>%
pivot_longer(Q5_1:Q5_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
geom_boxplot() +
labs(x = " ", y = "Total Responses")
bargraph2 <- bargraph2 + labs(title = "Overall Satisfaction with Campus Tools",
subtitle = "undergraduate and graduate students")
print(bargraph2)
# for campus involvement
bargraph3 <- dm2 %>%
pivot_longer(Q14_1:Q14_11, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
geom_boxplot() +
labs(x = " ", y = "Total Responses")
bargraph3 <- bargraph3 + labs(title = "Student Involvement",
subtitle = "undergraduate and graduate students")
print(bargraph3)
# another attempt for campus involvement
bargraph3a <- dm2 %>%
# dplyr::filter(dm2, Q14_1 == 'ASUA') %>%
# filter(dm2, Q14_1:Q14_13 != NA) %>%
dplyr::filter(as.integer(Q14_1) == 'ASUA') %>%
dplyr::filter(as.integer(Q14_4) == 'Classmates') %>%
dplyr::filter(as.integer(Q14_13) == 'Cultural and resource centers') %>%
dplyr::filter(as.integer(Q14_5) == 'Residence hall community') %>%
dplyr::filter(as.integer(Q14_6) == 'Fraternity and sorority programs') %>%
dplyr::filter(as.integer(Q14_7) == 'My college department') %>%
dplyr::filter(as.integer(Q14_8) == 'Roommates') %>%
dplyr::filter(as.integer(Q14_9) == 'Social media') %>%
dplyr::filter(as.integer(Q14_10) == 'Through friends and acquaintances') %>%
dplyr::filter(as.integer(Q14_12) == 'Workplace') %>%
dplyr::filter(as.integer(Q14_11) == 'Other') %>%
pivot_longer(Q14_1:Q14_13, names_to = "question", values_to = "response") %>%
ggplot(aes(x = response, colour = response)) +
geom_bar() +
geom_boxplot() +
labs(x = " ", y = "Total Responses")
bargraph3a <- bargraph3a + labs(title = "Student Involvement",
subtitle = " ")
print(bargraph3a)
# stacked bar graph for Q30
bargraph0e <- dm2 %>%
pivot_longer(Q30, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
# geom_text(stat='count', aes(label=..count..), vjust=0) +
geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
labs(x = "", y = "Number of Students")
bargraph0e <- bargraph0e + labs(title = "Will you be recommending UArizona to others?",
subtitle = "")
print(bargraph0e)
# for Q 18
# What is important to you?
bargraph4 <- dm2 %>%
pivot_longer(Q18_1:Q18_8, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
geom_boxplot() +
labs(x = " ", y = "Total Responses")
bargraph4 <- bargraph4 + labs(title = "Issues Important to Students",
subtitle = "undergraduate and graduate students")
print(bargraph4)
# for Q29
# rate these services
# Rate these administrative services
bargraph5 <- data.frame(dm2$Q29_1, dm2$Q29_2, dm2$Q29_3, dm2$Q29_4, dm2$Q29_5,
dm2$Q29_6, dm2$Q29_7, dm2$Q29_8, dm2$Q29_9, dm2$Q29_10)
# helpful example
# dt1 <- data.frame(A=1, B=2, C=3, D=4)
#
# dt2 <- data.frame(Col1=c("A","B","C","D"),Col2=c("E","Q","R","Z"))
dt2 <- data.frame(Col1=c("dm2$Q29_1","dm2$Q29_2","dm2$Q29_3","dm2$Q29_4",
"dm2$Q29_5","dm2$Q29_6","dm2$Q29_7","dm2$Q29_8",
"dm2$Q29_9","dm2$Q29_10"),
Col2=c("Fin Aid","Assignments","Change Majors","Check Grades",
"Class Search","Deg Req","Register","Advisor",
"Progress","Bill Pay"))
names(bargraph5) <- dt2$Col2
summary(bargraph5)
bargraph_long <- bargraph5 %>%
pivot_longer(cols = everything(), names_to = "question", values_to = "response")
bargraph_counts <- bargraph_long %>%
drop_na() %>%
group_by(response) %>%
summarise(count = n())
bargraph5a <- bargraph5 %>%
gather(bargraph5, value = response, na.rm = TRUE) %>%
# gather(bargraph5, value = response, na.rm = TRUE) %>%
# mutate(response = factor(response)) %>%
mutate(response = factor(response,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))) %>%
# ggplot(aes(x = bargraph5, fill = response, colour = response)) +
# geom_bar(aes(fill = response), position = "fill") +
# labs(x = " ", y = "Porportion of Responses")
mutate(response = factor(response)) %>%
ggplot(aes(x = bargraph5, fill = response, colour = response)) +
geom_bar(aes(fill = response), position = "fill") +
labs(x = " ", y = "Porportion of Responses")
bargraph5a <- bargraph5a + labs(title = "Rate these Administrative Processes",
subtitle = "undergraduate and graduate students")
print(bargraph5a) # this is the same as 5b
###### reorder the stack and legend
# o <- d %>% filter(Response == "A1") %>% arrange(Percent) %>% extract2("ValueName")
#
# d %>%
# mutate(ValueName = factor(ValueName, o)) %>%
# ggplot() +
# aes(x = ValueName, y = Percent, fill = reorder(Response, plotOrder)) +
# geom_bar(position = "fill", stat = "identity") +
# coord_flip()
# https://stackoverflow.com/questions/32345923/how-to-control-ordering-of-stacked-bar-chart-using-identity-on-ggplot2
# #random order
# p2 <- ggplot(df[sample(1:10),],aes(x=x,y=y,fill=fill_var))+
# geom_bar(stat="identity") + labs(title="Random order")
# #legend checks out, sequence wird
#
# #reverse order
# p3 <- ggplot(df[order(df$fill_var,decreasing=T),],
# aes(x=x,y=y,fill=fill_var))+
# geom_bar(stat="identity") + labs(title="Reverse sort by fill")
#
# plots <- list(p1,p2,p3)
#
# do.call(grid.arrange,plots)
### try to produce graph with better legend
# but this does not look as good
bargraph5$dm2.Q29_1 = factor(bargraph5$dm2$Q29_1,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_2 = factor(dm2$Q29_2,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_3 = factor(dm2$Q29_3,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_4 = factor(dm2$Q29_4,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_5 = factor(dm2$Q29_5,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_6 = factor(dm2$Q29_6,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_7 = factor(dm2$Q29_7,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_8 = factor(dm2$Q29_8,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_9 = factor(dm2$Q29_9,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_10 = factor(dm2$Q29_10,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
summary(bargraph5)
bargraph5a <- bargraph5 %>%
gather(bargraph5, value = response, na.rm = TRUE) %>%
mutate(response = factor(response)) %>%
ggplot(aes(x = bargraph5, fill = response, colour = response)) +
geom_bar(aes(fill = response), position = "fill") +
labs(x = " ", y = "Total Responses")
bargraph5a <- bargraph5a + labs(title = "Rate these Administrative Processes",
subtitle = "undergraduate and graduate students")
print(bargraph5a)
# plotting likert scale in R
# This works better
summary(bargraph5)
bargraph5b <- bargraph5 %>%
gather(bargraph5, value = response, na.rm = TRUE) %>%
mutate(response = factor(response, levels = c("Very difficult",
"Somewhat difficult","Somewhat easy", "Very easy"))) %>%
# ggplot(bargraph5[order(bargraph5$response,decreasing=T),],aes(x = bargraph5, fill = response, colour = response)) +
ggplot(aes(x = bargraph5, fill = response, colour = response)) +
geom_bar(aes(fill = response), position = "fill") +
labs(x = " ", y = "Porportion of Responses")
bargraph5b <- bargraph5b + labs(title = "How Easy are these Administrative Processes?",
subtitle = "undergraduate and graduate students")
print(bargraph5b)
library(ggplot2)
library(reshape2)
library(RColorBrewer)
library(dplyr)
library(ggthemes)
library(stringr)
#
bargraph0f <- dm2 %>%
pivot_longer(Q30, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar(data=dm2, aes(x = response, y=question, fill=col), position="stack", stat="identity") +
geom_bar(data=dm2, aes(x = response, y=-question, fill=col), position="stack", stat="identity")
# geom_hline(yintercept = 0, color =c("white")) +
# scale_fill_identity("Percent", labels = mylevels, guide="legend") +
# theme_fivethirtyeight() +
# coord_flip() +
# labs(title=mytitle, y="",x="") +
# theme(plot.title = element_text(size=14, hjust=0.5)) +
# theme(axis.text.y = element_text(hjust=0)) +
# theme(legend.position = "bottom") +
# scale_y_continuous(breaks=seq(mymin,mymax,25), limits=c(mymin,mymax))
print(bargraph0f)
# plotting likert scale in R
# http://rcompanion.org/handbook/E_02.html
XT <- xtabs(~ dm2$Q30, data=dm2)
prop.table(XT) #proportion tables
library(likert)
# Result <- likert(dm2)
#
# plot(Result,
# type="heat",
# low.color = "white",
# high.color = "blue",
# text.color = "black",
# text.size = 4,
# wrap = 50)
# install.packages("sjPlot")
# install.packages("sjmisc")
library(sjPlot)
library(sjmisc)
library(dplyr)
# filtering
df14 <- dm2 %>%
dplyr::select("Q14_1", "Q14_4", "Q14_5", "Q14_6",
"Q14_7", "Q14_8", "Q14_9", "Q14_10",
"Q14_11", "Q14_12", "Q14_13", Age, Graduate)
# df14 %>% gather("Q14_1", "Q14_4", "Q14_5", "Q14_6",
# "Q14_7", "Q14_8", "Q14_9", "Q14_10",
# "Q14_11", "Q14_12", "Q14_13", -Age, -Graduate)
# View(df14)
df14_longer <- df14 %>%
pivot_longer(cols = c("Q14_1", "Q14_4", "Q14_5", "Q14_6",
"Q14_7", "Q14_8", "Q14_9", "Q14_10",
"Q14_11", "Q14_12", "Q14_13"),
names_to = "community")
write.csv(df14_longer,"df14_longer.csv", row.names = FALSE)
####
# filter by different groups
# df14 %>% group_by(Age, Q14_1) %>% summarize(N - n()) %>%
# mutate(pct = round(N/sum(N) *100, 0))
#Error in eval(cols[[col]], .data, parent.frame()) : object 'N' not found
#In addition: Warning message:
# Factor `Q14_1` contains implicit NA, consider using `forcats::fct_explicit_na`
df14_longer %>%
group_by(Graduate) %>%
summarize(Age) # what is this
rem <- df14_longer %>%
filter(Graduate != "<NA>") ## retain all not with Graduate
undergraduates = subset(rem, Graduate==FALSE) #for undergrads about community
#remove NA
na_ugrd <- na.omit(undergraduates)
glimpse(undergraduates)
# this graph tells me very little
ggplot(na_ugrd, aes(x=community, y=value)) +
geom_point(shape=1) + # Use hollow circles
geom_smooth() # Add a loess smoothed fit curve with confidence region
#> `geom_smooth()` using method = 'loess'
# # rbind with multiple columns
# # df14_cols <- cols(c("Q14_1", "Q14_4", "Q14_5", "Q14_6",
# "Q14_7", "Q14_8", "Q14_9", "Q14_10",
# "Q14_11", "Q14_12", "Q14_13"))
# # df14$Q14 <- df14(cbind(c("Q14_1", "Q14_4", "Q14_5", "Q14_6",
# "Q14_7", "Q14_8", "Q14_9", "Q14_10",
# "Q14_11", "Q14_12", "Q14_13"))
# https://statisticsglobe.com/cbind-and-rbind-vectors-with-different-length-in-r
# install.packages("qpcR") # Install qpcR package
library("qpcR")
# # data_cbind <- qpcR:::cbind.na("Q14_1", "Q14_4", "Q14_5", "Q14_6",
# "Q14_7", "Q14_8", "Q14_9", "Q14_10",
# "Q14_11", "Q14_12", "Q14_13") # Bind as columns
# # data_cbind
# # Make a stacked bar graph
# # Activate likert and plyr
# library("likert")
# library("plyr")
#
# # A custom function to recode numerical responses into ordered factors
#
# y <- factor(y, levels = c("Strongly disagree", "Disagree", "Agree", "Strongly agree"))
#
# return(y)
# }
# stacked Bar graph for Q3
bargraph0g <- dm2 %>%
pivot_longer(Q3, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
# geom_text(stat='count', aes(label=..count..), vjust=0) +
geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
labs(x = "", y = "Number of Students")
bargraph0g <- bargraph0g + labs(title = "How effective are UArizona's Systems?",
subtitle = "")
print(bargraph0g)
# density plot for age
density_age <- density(dm2$Age)
plot(density_age)
| /data_visualizations.R | no_license | jmp243/ordinal_logistic | R | false | false | 30,324 | r | # Data Visutalizations
# Jung Mee Park
# jmpark@email.arizona.edu
# create one chart with simple breakdown
# ggplot(dm2, aes(x = Graduate)) +
# geom_bar() +
# coord_flip()
# create another image about number of emails
# cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# Q9graph2 <- dm2 %>%
# # mutate(Q9 = fct_infreq(Q9)) %>%
# # dplyr::filter(Students == "Undergraduate") %>%
# drop_na(Q9) %>%
# ggplot(aes(x = Q9, fill = Students, colour = Students)) +
# geom_bar() +
# coord_flip() +
# labs(x = "redundant emails", y = "responses")
Q7graph <- dm2 %>%
# mutate(Q7 = fct_infreq(Q7)) %>%
drop_na(Q7) %>%
ggplot(aes(x = Q7, fill = Students, response = Students)) +
geom_bar() +
coord_flip() +
labs(x = "emails per week", y = "responses")
Q7graph <- Q7graph + labs(title = "How many emails do Students Get?",
subtitle = "undergraduate and graduate students")
print(Q7graph) # 16 or more is 907 and that is about 40%
# pie chart for Q3
# slices <- c(458, 422, 430, 645, 482)
# lbls <- c("Freshmen", "Sophomore", "Junior", "Senior","Grad&Prof")
# pct <- round(slices/sum(slices)*100)
# lbls <- paste(lbls, pct) # add percents to labels
# lbls <- paste(lbls,"%",sep="") # add % to labels
# pie(slices,labels = lbls, col=terrain.colors(length(lbls)))
# pie(slices,labels = lbls, col=terrain.colors(length(lbls)), main="Distribution of Survey")
# Q7pie <- dm2 %>%
# # mutate(Q7 = fct_infreq(Q7)) %>%
# drop_na(Q7) %>%
# ggplot(aes(x = Q7), y=response, fill = Q7, response = Q7) +
# geom_bar(fill = "lightblue", color = "grey") +
# coord_polar("y", start=0) +
# theme_void()
#
# print(Q7pie)
# # just for FB
# dm2 %>%
# # %>% mutate(Q11_1 = fct_infreq(Q11_1)) %>%
# ggplot(aes(x = Q11_1)) +
# geom_bar() +
# coord_flip()
# redudant emails Q9
Q9graph <- dm2 %>%
# mutate(Q9 = fct_infreq(Q9)) %>%
drop_na(Q9) %>%
ggplot(aes(x = Q9)) +
geom_bar(fill = "lightblue", color = "grey") +
coord_flip() +
labs(x = "redundancy", y = "responses")
Q9graph <- Q9graph + labs(title = "How redundant are these emails?",
subtitle = "undergraduate and graduate")
print(Q9graph)
# redudant emails Q9 undergraduates only
Q9graph1 <- dm2 %>%
mutate(Q9 = fct_infreq(Q9)) %>%
drop_na(Q9) %>%
dplyr::filter(Career =="UGRD") %>%
ggplot(aes(x = Q9)) +
geom_bar(fill = "lightblue", color = "grey") +
coord_flip() +
labs(x = "redundant emails", y = "responses")
Q9graph1 <- Q9graph1 + labs(title = "How redundant are these emails?",
subtitle = "undergraduate only")
print(Q9graph1)
########33
# redudant emails Q9 undergraduates only
# try to find a
# bargraph0c <- dm2 %>%
# pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
# values_drop_na = TRUE) %>%
# ggplot(aes(x = response, fill = Students, colour = Students)) +
# geom_bar() +
# # geom_text(stat='count', aes(label=..count..), vjust=0) +
# geom_boxplot() +
# # scale_color_brewer(palette = "Blues") +
# labs(x = "Type of Social Media Apps", y = "Number of Uses")
Q9graph2 <- dm2 %>%
# mutate(Q9 = fct_infreq(Q9)) %>%
# dplyr::filter(Students == "Undergraduate") %>%
drop_na(Q9) %>%
ggplot(aes(x = Q9, fill = Students, colour = Students)) +
geom_bar() +
coord_flip() +
labs(x = "redundant emails", y = "responses")
Q9graph2 <- Q9graph2 + labs(title = "How redundant are these emails?",
subtitle = "undergraduate and graduate students")
print(Q9graph2)
# Q15
# reorder
# levels(dm2$Q15) <- c("Very difficult", "Somewhat difficult",
# "Somewhat easy", "Very easy")
dm2$Q15 = factor(dm2$Q15,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
Q15graph <- dm2 %>%
mutate(Q15 = fct_infreq(Q15)) %>%
drop_na(Q15) %>%
ggplot(aes(x = Q15), stat="identity") +
geom_bar(fill = "lightblue", color = "grey") +
coord_flip() +
labs(x = "", y = "responses")
Q15graph <- Q15graph + labs(title = "How easy is it for students to get involved?",
subtitle = "undergraduate and graduate")
print(Q15graph)
### emails that pertain to me
Q9graph3 <- dm2 %>%
# mutate(Q9 = fct_infreq(Q9)) %>%
# dplyr::filter(Students == "Undergraduate") %>%
drop_na(Q8) %>%
ggplot(aes(x = Q8, fill = Students, colour = Students)) +
geom_bar() +
coord_flip() +
labs(x = "Emails Relevant to Me", y = "Responses")
Q9graph3 <- Q9graph3 + labs(title = "Do these emails pertain to me?",
subtitle = "undergraduate and graduate students")
print(Q9graph3)
### data visualization
# catagorical or qualitative
# ordinal
dim(dm2)
summary(dm2)
# pie chart
table(data$Career)
slices <- c(35, 403, 30, 1955, 14)
lbls <- c("law", "grad", "med", "undergraduate","phrm")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # add % to labels
pie(slices,labels = lbls, col=terrain.colors(length(lbls)))
pie(slices,labels = lbls, col=terrain.colors(length(lbls)), main="Distribution of Survey")
# better pie chart
slices <- c(458, 422, 430, 645, 482)
lbls <- c("Freshmen", "Sophomore", "Junior", "Senior","Graduate & Professional")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # add % to labels
pie(slices,labels = lbls, col=terrain.colors(length(lbls)))
pie(slices,labels = lbls, col=terrain.colors(length(lbls)), main="Distribution of Survey")
# correlation plots
# Activate the dplyr package
library("dplyr")
# Correlation matrix of items
# install.packages("ggcorrplot")
# install.packages("lsr")
library(lsr)
# Correlation matrix of items
cormat <- data %>%
dplyr::select(starts_with(c("Age", "CumGPA2191"))) %>%
cor(., use = "pairwise.complete.obs")
# Activate the corrplot package
library("corrplot")
# Correlation matrix plot
corrplot(cormat, # correlation matrix
order = "hclust", # hierarchical clustering of correlations
addrect = 2) # number of rectangles to draw around clusters
# function to get chi square p value and Cramers V
# https://stackoverflow.com/questions/52554336/plot-the-equivalent-of-correlation-matrix-for-factors-categorical-data-and-mi
#
# f = function(x,y) {
# tbl = dm2 %>% select(x,y) %>% table()
# chisq_pval = round(chisq.test(tbl)$p.value, 4)
# cramV = round(cramersV(tbl), 4)
# data.frame(x, y, chisq_pval, cramV) }
#
# # create unique combinations of column names
# # sorting will help getting a better plot (upper triangular)
# df_comb = data.frame(t(combn(sort(names(dm2)), 2)), stringsAsFactors = F)
#
# # apply function to each variable combination
# df_res = map2_df(df_comb$Age, df_comb$Q7, f)
#
# # plot results
# df_res %>%
# ggplot(aes(x,y,fill=chisq_pval))+
# geom_tile()+
# geom_text(aes(x,y,label=cramV))+
# scale_fill_gradient(low="red", high="yellow")+
# theme_classic()
#
# require(rcompanion)
# # Calculate a pairwise association between all variables in a data-frame. In particular nominal vs nominal with Chi-square, numeric vs numeric with Pearson correlation, and nominal vs numeric with ANOVA.
# # Adopted from https://stackoverflow.com/a/52557631/590437
# mixed_assoc = function(dm2, cor_method="spearman", adjust_cramersv_bias=TRUE){
# df_comb = expand.grid(names(dm2), names(dm2), stringsAsFactors = F) %>% set_names("X1", "X2")
#
# is_nominal = function(x) class(x) %in% c("factor", "character")
# # https://community.rstudio.com/t/why-is-purr-is-numeric-deprecated/3559
# # https://github.com/r-lib/rlang/issues/781
# is_numeric <- function(x) { is.integer(x) || is_double(x)}
#
# f = function(xName,yName) {
# x = pull(df, xName)
# y = pull(df, yName)
#
# result = if(is_nominal(x) && is_nominal(y)){
# # use bias corrected cramersV as described in https://rdrr.io/cran/rcompanion/man/cramerV.html
# cv = cramerV(as.character(x), as.character(y), bias.correct = adjust_cramersv_bias)
# data.frame(xName, yName, assoc=cv, type="cramersV")
#
# }else if(is_numeric(x) && is_numeric(y)){
# correlation = cor(x, y, method=cor_method, use="complete.obs")
# data.frame(xName, yName, assoc=correlation, type="correlation")
#
# }else if(is_numeric(x) && is_nominal(y)){
# # from https://stats.stackexchange.com/questions/119835/correlation-between-a-nominal-iv-and-a-continuous-dv-variable/124618#124618
# r_squared = summary(lm(x ~ y))$r.squared
# data.frame(xName, yName, assoc=sqrt(r_squared), type="anova")
#
# }else if(is_nominal(x) && is_numeric(y)){
# r_squared = summary(lm(y ~x))$r.squared
# data.frame(xName, yName, assoc=sqrt(r_squared), type="anova")
#
# }else { #does not work from here
# warning(paste("unmatched column type combination: ", class(x), class(y)))
# }
#
# # finally add complete obs number and ratio to table
# result %>% mutate(complete_obs_pairs=sum(!is.na(x) & !is.na(y)), complete_obs_ratio=complete_obs_pairs/length(x)) %>% rename(x=xName, y=yName)
# }
#
# # apply function to each variable combination
# map2_df(df_comb$X1, df_comb$X2, f)
# }
#
# this bar graph includes counts from Q11
# use this with no NA
bargraph0 <- dm2 %>%
# dplyr::filter(Career !="UGRD") %>%
# dplyr::select(Q11_1 != "NA") %>%
# dplyr::select(Q11_4 != "NA") %>%
# dplyr::select(Q11_5 != "NA") %>%
# dplyr::select(Q11_6 != "NA") %>%
# dplyr::select(Q11_7 != "NA") %>%
# dplyr::select(Q11_8 != "NA") %>%
# dplyr::select(Q11_9 != "NA") %>%
# dplyr::select(Q11_10 != "NA") %>%
# dplyr::select(Q11_1 == "Facebook / Messenger")
# dplyr::filter(Q11_1 == 'Facebook / Messenger') %>%
# dplyr::filter(Q11_4 == 'Instagram') #1779 %>%
# dplyr::filter(Q11_5 == 'Snapchat') #1612 %>%
# dplyr::filter(Q11_6 == 'Text / SMS') #1991 %>%
# dplyr::filter(Q11_7 == 'Twitter') # 995 %>%
# dplyr::filter(Q11_8 == 'WeChat') # 100 %>%
# dplyr::filter(Q11_9 == 'WhatsApp') # 623 %>%
# dplyr::filter(Q11_10 == 'Other') #106 %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = response, colour = response)) +
geom_bar() +
geom_text(stat='count', aes(label=..count..), vjust=-1) +
geom_boxplot() +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0 <- bargraph0 + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed,
undergraduates only")
print(bargraph0)
# subset with Graduates only
bargraph0a <- dm2 %>%
dplyr::filter(Career !="UGRD") %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = response, colour = response)) +
geom_bar() +
geom_text(stat='count', aes(label=..count..), vjust=-1) +
geom_boxplot() +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0a <- bargraph0a + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed, graduates only")
print(bargraph0a)
# subset with ugrd only
bargraph0b <- dm2 %>%
dplyr::filter(Career =="UGRD") %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = response, colour = response)) +
geom_bar() +
geom_text(stat='count', aes(label=..count..), vjust=-1) +
geom_boxplot() +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0b <- bargraph0b + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed, undergraduates only")
print(bargraph0b)
# more attempts
# stacked bar graph in R
# https://www.statology.org/stacked-barplot-in-r/
# ggplot(df, aes(fill=position, y=points, x=team)) +
# geom_bar(position='stack', stat='identity') +
# theme_minimal() +
# labs(x='Team', y='Points', title='Avg. Points Scored by Position & Team') +
# theme(plot.title = element_text(hjust=0.5, size=20, face='bold')) +
# scale_fill_manual('Position', values=c('coral2', 'steelblue', 'pink'))
# bargraph0c <- dm2 %>%
# pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
# values_drop_na = TRUE) %>%
# ggplot(aes(x = response, fill = Career, colour = Career)) +
# geom_bar() +
# geom_text(stat='count', aes(label=..count..), vjust=-.5) +
# geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
# labs(x = "Type of Social Media Apps", y = "Number of Uses")
#
# bargraph0c <- bargraph0c + labs(title = "Which Apps do Students Use",
# subtitle = "multiple selection allowed")
# print(bargraph0c)
# stacked bar with other subset
# recode Graduate variable
# x <- factor(c("alpha","beta","gamma","alpha","beta"))
# levels(x) <- list(A="alpha", B="beta", C="gamma")
# levels(data$Graduate)
# dm2$Graduate <- recode_factor(dm2$Graduate, "2" = "Graduate", "1" = "Undergraduate")
# dm2$Graduate = factor(dm2$Graduate,
# ordered = FALSE,
# levels = c("1", "2")) # more warnings with this one
# Duplicate data
levels(dm2$Graduate) <- list("TRUE" = "Graduate", "FALSE" = "Undergraduate")
library(forcats)
dm2$Students <- as.factor(dm2$Graduate)
is.factor(dm2$Students)
dm2$Students <- recode_factor(dm2$Students, "TRUE" = "Graduate", "FALSE" = "Undergraduate") # this wroked
# dm2$Students <- recode(dm2$Students, "2" == "Graduate", "1" == "Undergraduate")
# fct_recode(dm2$Graduate, "TRUE" = "Graduate", "FALSE" = "Undergraduate")
# dm2$Graduate[dm2$Graduate== "TRUE"] <- "Graduate"
# dm2$Graduate[dm2$Graduate == "1"] <- "Undergraduate"
# Convert to a factor
# dm2$Graduate <- as.factor(dm2$Graduate)
# dplyr::mutate(Career=recode(Career,'UGRD'='Undergrad',
# 'GRAD' ='Graduate', 'LAW'='Graduate', 'MEDS'='Graduate', 'PHRM'='Graduate')) %>%
bargraph0c <- dm2 %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
# geom_text(stat='count', aes(label=..count..), vjust=0) +
geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0c <- bargraph0c + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed")
print(bargraph0c)
# different variable class standing
bargraph0d <- dm2 %>%
# dplyr::mutate(Graduate = c("Yes", "No")) %>%
pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Acad.Class.Standing, colour = Acad.Class.Standing)) +
geom_bar() +
geom_text(stat='count', aes(label=..count..), vjust=-.5) +
geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
labs(x = "Type of Social Media Apps", y = "Number of Uses")
bargraph0d <- bargraph0d + labs(title = "Which Apps do Students Use",
subtitle = "multiple selection allowed")
print(bargraph0d)
#####
# bargraph0c <- dm2 %>%
# pivot_longer(Q11_1:Q11_10, names_to = "question", values_to = "response",
# values_drop_na = TRUE) %>%
# ggplot(aes(x = response, fill = Students, colour = Students)) +
# geom_bar() +
# # geom_text(stat='count', aes(label=..count..), vjust=0) +
# geom_boxplot() +
# # scale_color_brewer(palette = "Blues") +
# labs(x = "Type of Social Media Apps", y = "Number of Uses")
#
# bargraph0c <- bargraph0c + labs(title = "Which Apps do Students Use",
# subtitle = "multiple selection allowed")
# print(bargraph0c)
# for campus tools
bargraph2 <- dm2 %>%
pivot_longer(Q5_1:Q5_10, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
geom_boxplot() +
labs(x = " ", y = "Total Responses")
bargraph2 <- bargraph2 + labs(title = "Overall Satisfaction with Campus Tools",
subtitle = "undergraduate and graduate students")
print(bargraph2)
# for campus involvement
bargraph3 <- dm2 %>%
pivot_longer(Q14_1:Q14_11, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
geom_boxplot() +
labs(x = " ", y = "Total Responses")
bargraph3 <- bargraph3 + labs(title = "Student Involvement",
subtitle = "undergraduate and graduate students")
print(bargraph3)
# another attempt for campus involvement
bargraph3a <- dm2 %>%
# dplyr::filter(dm2, Q14_1 == 'ASUA') %>%
# filter(dm2, Q14_1:Q14_13 != NA) %>%
dplyr::filter(as.integer(Q14_1) == 'ASUA') %>%
dplyr::filter(as.integer(Q14_4) == 'Classmates') %>%
dplyr::filter(as.integer(Q14_13) == 'Cultural and resource centers') %>%
dplyr::filter(as.integer(Q14_5) == 'Residence hall community') %>%
dplyr::filter(as.integer(Q14_6) == 'Fraternity and sorority programs') %>%
dplyr::filter(as.integer(Q14_7) == 'My college department') %>%
dplyr::filter(as.integer(Q14_8) == 'Roommates') %>%
dplyr::filter(as.integer(Q14_9) == 'Social media') %>%
dplyr::filter(as.integer(Q14_10) == 'Through friends and acquaintances') %>%
dplyr::filter(as.integer(Q14_12) == 'Workplace') %>%
dplyr::filter(as.integer(Q14_11) == 'Other') %>%
pivot_longer(Q14_1:Q14_13, names_to = "question", values_to = "response") %>%
ggplot(aes(x = response, colour = response)) +
geom_bar() +
geom_boxplot() +
labs(x = " ", y = "Total Responses")
bargraph3a <- bargraph3a + labs(title = "Student Involvement",
subtitle = " ")
print(bargraph3a)
# stacked bar graph for Q30
bargraph0e <- dm2 %>%
pivot_longer(Q30, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
# geom_text(stat='count', aes(label=..count..), vjust=0) +
geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
labs(x = "", y = "Number of Students")
bargraph0e <- bargraph0e + labs(title = "Will you be recommending UArizona to others?",
subtitle = "")
print(bargraph0e)
# for Q 18
# What is important to you?
bargraph4 <- dm2 %>%
pivot_longer(Q18_1:Q18_8, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
geom_boxplot() +
labs(x = " ", y = "Total Responses")
bargraph4 <- bargraph4 + labs(title = "Issues Important to Students",
subtitle = "undergraduate and graduate students")
print(bargraph4)
# for Q29
# rate these services
# Rate these administrative services
bargraph5 <- data.frame(dm2$Q29_1, dm2$Q29_2, dm2$Q29_3, dm2$Q29_4, dm2$Q29_5,
dm2$Q29_6, dm2$Q29_7, dm2$Q29_8, dm2$Q29_9, dm2$Q29_10)
# helpful example
# dt1 <- data.frame(A=1, B=2, C=3, D=4)
#
# dt2 <- data.frame(Col1=c("A","B","C","D"),Col2=c("E","Q","R","Z"))
dt2 <- data.frame(Col1=c("dm2$Q29_1","dm2$Q29_2","dm2$Q29_3","dm2$Q29_4",
"dm2$Q29_5","dm2$Q29_6","dm2$Q29_7","dm2$Q29_8",
"dm2$Q29_9","dm2$Q29_10"),
Col2=c("Fin Aid","Assignments","Change Majors","Check Grades",
"Class Search","Deg Req","Register","Advisor",
"Progress","Bill Pay"))
names(bargraph5) <- dt2$Col2
summary(bargraph5)
bargraph_long <- bargraph5 %>%
pivot_longer(cols = everything(), names_to = "question", values_to = "response")
bargraph_counts <- bargraph_long %>%
drop_na() %>%
group_by(response) %>%
summarise(count = n())
bargraph5a <- bargraph5 %>%
gather(bargraph5, value = response, na.rm = TRUE) %>%
# gather(bargraph5, value = response, na.rm = TRUE) %>%
# mutate(response = factor(response)) %>%
mutate(response = factor(response,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))) %>%
# ggplot(aes(x = bargraph5, fill = response, colour = response)) +
# geom_bar(aes(fill = response), position = "fill") +
# labs(x = " ", y = "Porportion of Responses")
mutate(response = factor(response)) %>%
ggplot(aes(x = bargraph5, fill = response, colour = response)) +
geom_bar(aes(fill = response), position = "fill") +
labs(x = " ", y = "Porportion of Responses")
bargraph5a <- bargraph5a + labs(title = "Rate these Administrative Processes",
subtitle = "undergraduate and graduate students")
print(bargraph5a) # this is the same as 5b
###### reorder the stack and legend
# o <- d %>% filter(Response == "A1") %>% arrange(Percent) %>% extract2("ValueName")
#
# d %>%
# mutate(ValueName = factor(ValueName, o)) %>%
# ggplot() +
# aes(x = ValueName, y = Percent, fill = reorder(Response, plotOrder)) +
# geom_bar(position = "fill", stat = "identity") +
# coord_flip()
# https://stackoverflow.com/questions/32345923/how-to-control-ordering-of-stacked-bar-chart-using-identity-on-ggplot2
# #random order
# p2 <- ggplot(df[sample(1:10),],aes(x=x,y=y,fill=fill_var))+
# geom_bar(stat="identity") + labs(title="Random order")
# #legend checks out, sequence wird
#
# #reverse order
# p3 <- ggplot(df[order(df$fill_var,decreasing=T),],
# aes(x=x,y=y,fill=fill_var))+
# geom_bar(stat="identity") + labs(title="Reverse sort by fill")
#
# plots <- list(p1,p2,p3)
#
# do.call(grid.arrange,plots)
### try to produce graph with better legend
# but this does not look as good
bargraph5$dm2.Q29_1 = factor(bargraph5$dm2$Q29_1,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_2 = factor(dm2$Q29_2,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_3 = factor(dm2$Q29_3,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_4 = factor(dm2$Q29_4,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_5 = factor(dm2$Q29_5,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_6 = factor(dm2$Q29_6,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_7 = factor(dm2$Q29_7,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_8 = factor(dm2$Q29_8,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_9 = factor(dm2$Q29_9,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
dm2$Q29_10 = factor(dm2$Q29_10,
ordered = TRUE,
levels = c("Very difficult", "Somewhat difficult",
"Somewhat easy", "Very easy"))
summary(bargraph5)
bargraph5a <- bargraph5 %>%
gather(bargraph5, value = response, na.rm = TRUE) %>%
mutate(response = factor(response)) %>%
ggplot(aes(x = bargraph5, fill = response, colour = response)) +
geom_bar(aes(fill = response), position = "fill") +
labs(x = " ", y = "Total Responses")
bargraph5a <- bargraph5a + labs(title = "Rate these Administrative Processes",
subtitle = "undergraduate and graduate students")
print(bargraph5a)
# plotting likert scale in R
# This works better
summary(bargraph5)
bargraph5b <- bargraph5 %>%
gather(bargraph5, value = response, na.rm = TRUE) %>%
mutate(response = factor(response, levels = c("Very difficult",
"Somewhat difficult","Somewhat easy", "Very easy"))) %>%
# ggplot(bargraph5[order(bargraph5$response,decreasing=T),],aes(x = bargraph5, fill = response, colour = response)) +
ggplot(aes(x = bargraph5, fill = response, colour = response)) +
geom_bar(aes(fill = response), position = "fill") +
labs(x = " ", y = "Porportion of Responses")
bargraph5b <- bargraph5b + labs(title = "How Easy are these Administrative Processes?",
subtitle = "undergraduate and graduate students")
print(bargraph5b)
library(ggplot2)
library(reshape2)
library(RColorBrewer)
library(dplyr)
library(ggthemes)
library(stringr)
#
bargraph0f <- dm2 %>%
pivot_longer(Q30, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar(data=dm2, aes(x = response, y=question, fill=col), position="stack", stat="identity") +
geom_bar(data=dm2, aes(x = response, y=-question, fill=col), position="stack", stat="identity")
# geom_hline(yintercept = 0, color =c("white")) +
# scale_fill_identity("Percent", labels = mylevels, guide="legend") +
# theme_fivethirtyeight() +
# coord_flip() +
# labs(title=mytitle, y="",x="") +
# theme(plot.title = element_text(size=14, hjust=0.5)) +
# theme(axis.text.y = element_text(hjust=0)) +
# theme(legend.position = "bottom") +
# scale_y_continuous(breaks=seq(mymin,mymax,25), limits=c(mymin,mymax))
print(bargraph0f)
# plotting likert scale in R
# http://rcompanion.org/handbook/E_02.html
XT <- xtabs(~ dm2$Q30, data=dm2)
prop.table(XT) #proportion tables
library(likert)
# Result <- likert(dm2)
#
# plot(Result,
# type="heat",
# low.color = "white",
# high.color = "blue",
# text.color = "black",
# text.size = 4,
# wrap = 50)
# install.packages("sjPlot")
# install.packages("sjmisc")
library(sjPlot)
library(sjmisc)
library(dplyr)
# filtering
df14 <- dm2 %>%
dplyr::select("Q14_1", "Q14_4", "Q14_5", "Q14_6",
"Q14_7", "Q14_8", "Q14_9", "Q14_10",
"Q14_11", "Q14_12", "Q14_13", Age, Graduate)
# df14 %>% gather("Q14_1", "Q14_4", "Q14_5", "Q14_6",
# "Q14_7", "Q14_8", "Q14_9", "Q14_10",
# "Q14_11", "Q14_12", "Q14_13", -Age, -Graduate)
# View(df14)
df14_longer <- df14 %>%
pivot_longer(cols = c("Q14_1", "Q14_4", "Q14_5", "Q14_6",
"Q14_7", "Q14_8", "Q14_9", "Q14_10",
"Q14_11", "Q14_12", "Q14_13"),
names_to = "community")
write.csv(df14_longer,"df14_longer.csv", row.names = FALSE)
####
# filter by different groups
# df14 %>% group_by(Age, Q14_1) %>% summarize(N - n()) %>%
# mutate(pct = round(N/sum(N) *100, 0))
#Error in eval(cols[[col]], .data, parent.frame()) : object 'N' not found
#In addition: Warning message:
# Factor `Q14_1` contains implicit NA, consider using `forcats::fct_explicit_na`
df14_longer %>%
group_by(Graduate) %>%
summarize(Age) # what is this
rem <- df14_longer %>%
filter(Graduate != "<NA>") ## retain all not with Graduate
undergraduates = subset(rem, Graduate==FALSE) #for undergrads about community
#remove NA
na_ugrd <- na.omit(undergraduates)
glimpse(undergraduates)
# this graph tells me very little
ggplot(na_ugrd, aes(x=community, y=value)) +
geom_point(shape=1) + # Use hollow circles
geom_smooth() # Add a loess smoothed fit curve with confidence region
#> `geom_smooth()` using method = 'loess'
# # rbind with multiple columns
# # df14_cols <- cols(c("Q14_1", "Q14_4", "Q14_5", "Q14_6",
# "Q14_7", "Q14_8", "Q14_9", "Q14_10",
# "Q14_11", "Q14_12", "Q14_13"))
# # df14$Q14 <- df14(cbind(c("Q14_1", "Q14_4", "Q14_5", "Q14_6",
# "Q14_7", "Q14_8", "Q14_9", "Q14_10",
# "Q14_11", "Q14_12", "Q14_13"))
# https://statisticsglobe.com/cbind-and-rbind-vectors-with-different-length-in-r
# install.packages("qpcR") # Install qpcR package
library("qpcR")
# # data_cbind <- qpcR:::cbind.na("Q14_1", "Q14_4", "Q14_5", "Q14_6",
# "Q14_7", "Q14_8", "Q14_9", "Q14_10",
# "Q14_11", "Q14_12", "Q14_13") # Bind as columns
# # data_cbind
# # Make a stacked bar graph
# # Activate likert and plyr
# library("likert")
# library("plyr")
#
# # A custom function to recode numerical responses into ordered factors
#
# y <- factor(y, levels = c("Strongly disagree", "Disagree", "Agree", "Strongly agree"))
#
# return(y)
# }
# stacked Bar graph for Q3
bargraph0g <- dm2 %>%
pivot_longer(Q3, names_to = "question", values_to = "response",
values_drop_na = TRUE) %>%
ggplot(aes(x = response, fill = Students, colour = Students)) +
geom_bar() +
# geom_text(stat='count', aes(label=..count..), vjust=0) +
geom_boxplot() +
# scale_color_brewer(palette = "Blues") +
labs(x = "", y = "Number of Students")
bargraph0g <- bargraph0g + labs(title = "How effective are UArizona's Systems?",
subtitle = "")
print(bargraph0g)
# density plot for age
density_age <- density(dm2$Age)
plot(density_age)
|
library(tidyverse)
library(naivebayes)
library(rpart)
library(randomForest)
library(pROC)
library(ggplot2)
library(ggrepel)
library(formattable)
g <- glimpse
h <- head
s <- summary
t <- tail
dc <- read_csv("psam_p11.csv")
# variables = colnames of interest
variables <- c('ST', 'ADJINC', "PWGTP", 'CIT', 'CITWP', 'COW', 'ENG', 'HINS1',
'HINS2', 'HINS3', 'HINS4', 'INTP', 'JWTR', 'LANX', 'MAR', 'MARHT',
'RETP', "SCH", 'SCHL', 'SEMP', 'SEX', 'SSP', "WAGP", 'YOEP', 'ANC',
'ANC1P', 'DECADE', 'DRIVESP', 'ESR', 'FOD1P', 'HICOV', 'INDP',
'JWAP', 'JWDP', 'LANP', 'MIGPUMA', 'MIGSP', 'NAICSP', 'NATIVITY',
'NOP', 'OC', 'OCCP', 'PERNP', 'PINCP', 'POBP', 'POVPIP',
'POWPUMA', 'POWSP', 'PRIVCOV', 'PUBCOV', 'RAC1P', 'RAC2P',
'RAC3P', 'RACAIAN', 'RACASN', 'RACBLK', 'RACNH', 'RACWHT',
'SCIENGP', 'SCIENGRLP', 'SFN', 'SFR', 'SOCP', 'WAOB')
variable_comments <- c('State code',
'Adjustment factor for income and earnings dollar amounts',
"Person's weight",
'Citizenship status',
'Year of naturalization write-in',
'Class of worker',
'Ability to speak English',
'Insurance through a current or former employer or union',
'Insurance purchased directly from an insurance company',
'Medicare, for people 65 and older, or people with certain disabilities',
'Medicaid, Medical Assistance, or any kind of government-assistance plan for those with low incomes or a disability',
'Interest, dividends, and net rental income past 12 months',
'Means of transportation to work',
'Language other than English spoken at home',
'Marital status',
'Number of times married',
'Retirement income past 12 months',
'School enrollment',
'Educational level',
'Self-employment income past 12 months',
'Sex', 'Social Security income past 12 months',
'Wages or salary income past 12 months',
'Year of entry to the US',
'Ancestry recode', 'Recoded Detailed Ancestry - first entry',
'Decade of entry',
'Number of vehicles calculated from JWRI',
'Employment status recode',
'Recoded field of degree - first entry',
'Health insurance coverage recode',
'Industry recode for 2013 and later based on 2012 IND codes',
'Time of arrival at work - hour and minute',
'Time of departure for work - hour and minute',
'Language spoken at home',
'Migration PUMA based on 2010 Census definition',
'Migration recode - State or foreign country code',
'NAICS Industry recode for 2013 and later based on 2012 NAICS codes',
'Nativity', 'Nativity of parent', 'Own child',
'Occupation recode for 2012 and later based on 2010 OCC codes',
'Total person\'s earnings', 'Total person\'s income',
'Place of birth (Recode)', 'Income-to-poverty ratio recode',
'Place of work PUMA based on 2010 Census definition',
'Place of work - State or foreign country recode',
'Private health insurance coverage recode',
'Public health coverage recode',
'Recoded detailed race code',
'Recoded detailed race code',
'Recoded detailed race code',
'American Indian and Alaska Native recode (American Indian and Alaska Native alone or in combination with one or more other races)',
'Asian recode (Asian alone or in combination with one or more other races)',
'Black or African American recode (Black alone or in combination with one or more other races)',
'Native Hawaiian recode (Native Hawaiian alone or in combination with one or more other races)',
'White recode (White alone or in combination with one or more other races)',
'Field of Degree Science and Engineering Flag - NSF Definition',
'Field of Degree Science and Engineering Related Flag - NSF Definition',
'Subfamily number', 'Subfamily relationship',
'SOC Occupation code for 2012 and later based on 2010 SOC codes',
'World area of birth')
# var: variables of primary interest
var <- data.frame(variables = variables, description = variable_comments)
var_of_interst <- c('Ability to speak English',
'Insurance through a current or former employer or union',
'Insurance purchased directly from an insurance company',
'Educational level',
'Health insurance coverage recode')
# var1: variables narrowed down
var1 <- var %>%
filter(description %in% var_of_interst)
# removing missing values in ENG (English level)
dc1 <- dc %>%
select(var1$variables) %>%
filter(!is.na(ENG))
# converting numeric to categorical values
ENG_d <- data.frame(ENG = 1:4,
English = factor(c("Very Well",
"Well",
"Not Well",
"Not at all"),
levels = c("Not at all",
"Not Well",
"Well",
"Very Well")))
SCHL_d <- data.frame(SCHL = 1:24,
Education = c("No Schooling Completed",
"Nursery School or Pre-school",
"Kindergarten",
"Grade 1",
"Grade 2",
"Grade 3",
"Grade 4",
"Grade 5",
"Grade 6",
"Grade 7",
"Grade 8",
"Grade 9",
"Grade 10",
"Grade 11",
"12th Grade - no diploma",
"Regular high school diploma",
"GED or alternative credential",
"Some college, but less than 1 year",
"1 or more years of college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree beyond a bachelor's degree",
"Doctorate degree"
)) %>%
mutate(Education = factor(Education,
levels = c("No Schooling Completed",
"Nursery School or Pre-school",
"Kindergarten",
"Grade 1",
"Grade 2",
"Grade 3",
"Grade 4",
"Grade 5",
"Grade 6",
"Grade 7",
"Grade 8",
"Grade 9",
"Grade 10",
"Grade 11",
"12th Grade - no diploma",
"Regular high school diploma",
"GED or alternative credential",
"Some college, but less than 1 year",
"1 or more years of college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree beyond a bachelor's degree",
"Doctorate degree"
)))
dc3 <- dc1 %>%
mutate(SCHL = as.integer(SCHL))
# converting ENG(English level) to a categorical variable
left_join(ENG_d, by = "ENG") %>%
# converting SCHL(Educational attainment) to a categorical variable
left_join(SCHL_d, by = "SCHL") %>%
# converting HINS1(work health insurance), HINS2(private health insurance),
# HICOV (health insurance) to categorical variables
mutate(Work_Insurance = ifelse(HINS1 == 1, "Yes", "No"),
Private_Insurance = ifelse(HINS2 == 1, "Yes", "No"),
Health_Insurance_Covered = ifelse(HICOV == 1, "Yes", "No")) %>%
mutate(HINS1 = ifelse(HINS1 == 1, 1, 0),
HINS2 = ifelse(HINS2 == 1, 1, 0),
HICOV = ifelse(HICOV == 1, 1, 0))
dc4 <- dc3[!is.na(dc3$Education), ]
# data cleaning for observation number
dc5 <- dc4 %>%
mutate(English = as.character(English),
Education = as.character(Education))
make_table <- function(vec1, vec2, insr) {
as.data.frame(table(vec1, vec2)) %>%
mutate(Insurance = insr)
}
dc6_1 <- make_table(dc5$English,
dc5$Work_Insurance,
"Work")
dc6_2 <- make_table(dc5$English,
dc5$Private_Insurance,
"Private")
dc6_3 <- make_table(dc5$English,
dc5$Health_Insurance_Covered,
"Work, Private, or Other")
dc6_4 <- rbind(dc6_1,
dc6_2,
dc6_3)
names(dc6_4) <- c("English", "Covered", "Number", "Insurance_Category")
dc6_4 <- dc6_4 %>%
mutate(English = factor(English,
levels = c("Not at all",
"Not Well",
"Well",
"Very Well")),
Insurance_Category = factor(Insurance_Category,
levels = c("Work",
"Private",
"Work, Private, or Other")))
dc7_1 <- make_table(dc5$Education,
dc5$Work_Insurance,
"Work")
dc7_2 <- make_table(dc5$Education,
dc5$Private_Insurance,
"Private")
dc7_3 <- make_table(dc5$Education,
dc5$Health_Insurance_Covered,
"Work, Private, or Other")
dc7_4 <- rbind(dc7_1,
dc7_2,
dc7_3)
names(dc7_4) <- c("Education", "Covered", "Number", "Insurance_Category")
dc7_4 <- dc7_4 %>%
mutate(Education = factor(Education,
levels = c("No Schooling Completed",
"Nursery School or Pre-school",
"Kindergarten",
"Grade 1",
"Grade 2",
"Grade 3",
"Grade 4",
"Grade 5",
"Grade 6",
"Grade 7",
"Grade 8",
"Grade 9",
"Grade 10",
"Grade 11",
"12th Grade - no diploma",
"Regular high school diploma",
"GED or alternative credential",
"Some college, but less than 1 year",
"1 or more years of college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree beyond a bachelor's degree",
"Doctorate degree")),
Insurance_Category = factor(Insurance_Category,
levels = c("Work",
"Private",
"Work, Private, or Other")))
# data cleaning for plotting
english_table1 <- as.data.frame(prop.table(table(dc4$English,
dc4$Work_Insurance),
margin = 1))
english_table2 <- as.data.frame(prop.table(table(dc4$English,
dc4$Health_Insurance_Covered),
margin = 1))
english_table3 <- rbind(english_table1, english_table2) %>%
mutate(Ins = c(rep("Work Insurance", times = 8),
rep("Work, Private, or Other Insurance", times = 8))) %>%
mutate(Ins = factor(Ins, levels = c("Work Insurance",
"Work, Private, or Other Insurance")))
names(english_table3) <- c("Level",
"Covered",
"Proportion",
"Category")
edu_table1 <- as.data.frame(prop.table(table(dc4$Education,
dc4$Work_Insurance),
margin = 1))
edu_table2 <- as.data.frame(prop.table(table(dc4$Education,
dc4$Health_Insurance_Covered),
margin = 1) )
edu_table3 <- rbind(edu_table1, edu_table2) %>%
filter(!is.nan(Freq)) %>%
mutate(Ins = c(rep("Work Insurance", times = 30),
rep("Work, Private, or Other Insurance", times = 30))) %>%
mutate(Ins = factor(Ins, levels = c("Work Insurance",
"Work, Private, or Other Insurance")))
names(edu_table3) = c("Education_Attainment",
"Covered",
"Proportion",
"Category")
edu_table4 <- edu_table3 %>%
filter(Covered == "Yes")
# Prediction model w logistic regression
logistic_prediction <- function(Input_numeric) {
logistic_model <- glm(Input_numeric ~
English +
Education,
data = dc4,
family = "binomial")
logistic_prob <- predict(logistic_model, type = "response")
logistic_prediction <- ifelse(logistic_prob > 0.5,
"Yes",
"No")
return(logistic_prediction)
}
# Prediction model w Bayesian mothod
bayes_prediction <- function(Input_categorical) {
bayes_model <- naive_bayes(Input_categorical ~
English +
Education,
data = dc4,
laplace = 1)
bayes_prediction <- predict(bayes_model, newdata = dc4)
return(bayes_prediction)
}
# Prediction model w classification tree (no pruning)
tree_prediction <- function(Input_categorical) {
tree_model <- rpart(Input_categorical ~
English +
Education,
data = dc4, method = "class")
tree_prediction <- predict(tree_model,
newdata = dc4,
type = "class")
return(tree_prediction)
}
# Prediction model w random forest
randomforest_prediction <- function(Input_numeric) {
forest_model <- randomForest(Input_numeric ~
English +
Education,
data = dc4,
ntree = 200)
forest_prob <- predict(forest_model)
forest_prediction <- ifelse(forest_prob > 0.5,
"Yes",
"No")
return(forest_prediction)
}
# Predict coverage of work insurance via logistic regression
work_logistic_accuracy <- mean(logistic_prediction(dc4$HINS1) == dc4$Work_Insurance)
# Predict coverage of private insurance via logistic regression
private_logistic_accuracy <- mean(logistic_prediction(dc4$HINS2) == dc4$Private_Insurance)
# Predict coverage of insurance via logistic regression
insurance_logistic_accuracy <- mean(logistic_prediction(dc4$HICOV) == dc4$Private_Insurance)
# Predict coverage of work insurance via Bayesian
work_bayes_accuracy <- mean(bayes_prediction(dc4$Work_Insurance) == dc4$Work_Insurance)
# Predict coverage of private insurance via Bayesian
private_bayes_accuracy <- mean(bayes_prediction(dc4$Private_Insurance) == dc4$Private_Insurance)
# Predict coverage of insurance via Byesian
insurance_bayes_accuracy <- mean(bayes_prediction(dc4$Health_Insurance_Covered) == dc4$Health_Insurance_Covered)
# Predict coverage of work insurance via classification tree
work_tree_accuracy <- mean(tree_prediction(dc4$Work_Insurance) == dc4$Work_Insurance)
# Predict coverage of private insurance via classification tree
private_tree_accuracy <- mean(tree_prediction(dc4$Private_Insurance) == dc4$Private_Insurance)
# Predict coverage of insurance via classification tree
insurance_tree_accuracy <- mean(tree_prediction(dc4$Health_Insurance_Covered) == dc4$Health_Insurance_Covered)
# Predict coverage of work insurance via random forest
work_rforest_accuracy <- mean(randomforest_prediction(dc4$HINS1) == dc4$Work_Insurance)
# Predict coverage of private insurance via random forest
private_rforest_accuracy <- mean(randomforest_prediction(dc4$HINS2) == dc4$Private_Insurance)
# Predict coverage of insurance via random forest
insurance_rforest_accuracy <- mean(randomforest_prediction(dc4$HICOV) == dc4$Private_Insurance)
# cleaning for comparing accuracy of each prediction
Model <- c(rep("Logistic Regression", times = 3),
rep("Naive Bayes", times = 3),
rep("Classification Tree", times = 3),
rep("Random Forest", times = 3))
Insurance <- rep(c("Work Insurance",
"Private Insurance",
"Work, Private, or Other Insurance"), times = 4)
Accuracy <- c(work_logistic_accuracy,
private_logistic_accuracy,
insurance_logistic_accuracy,
work_bayes_accuracy,
private_bayes_accuracy,
insurance_bayes_accuracy,
work_tree_accuracy,
private_tree_accuracy,
insurance_tree_accuracy,
work_rforest_accuracy,
private_rforest_accuracy,
insurance_rforest_accuracy)
pred_table <- data.frame(Model = Model,
Insurance_Category = Insurance,
Accuracy = as.numeric(Accuracy)) %>%
mutate(Accuracy = round(Accuracy * 100, digits = 2),
Insurance_Category = factor(Insurance_Category, levels =
c("Work Insurance",
"Private Insurance",
"Work, Private, or Other Insurance")),
Model = factor(Model, levels = c("Logistic Regression",
"Naive Bayes",
"Classification Tree",
"Random Forest"))) %>%
spread(Model, Accuracy)
# function converting categorical to numeric (yes to 1, no to 0)
# vec is a categorical variable
cat_to_num <- function(vec) {
num_vec <- c()
for (x in vec) {
if (x == "Yes") {
num <- 1
} else {
num <- 0
}
num_vec <- c(num_vec, num)
}
return(num_vec)
}
# Area under curve (AUC) function
# both actual_vec1 and prob_vec2 are categorical
get_auc <- function(actual_vec1, prob_vec2) {
roc <- roc(cat_to_num(actual_vec1), cat_to_num(prob_vec2))
return(auc(roc))
}
# AUC
work_logistic_auc <- get_auc(dc4$Work_Insurance,
logistic_prediction(dc4$HINS1))
private_logistic_auc <- get_auc(dc4$Private_Insurance,
logistic_prediction(dc4$HINS2))
insurance_logistic_auc <- get_auc(dc4$Health_Insurance_Covered,
logistic_prediction(dc4$HICOV))
work_bayes_auc <- get_auc(dc4$Work_Insurance,
bayes_prediction(dc4$Work_Insurance))
private_bayes_auc <- get_auc(dc4$Private_Insurance,
bayes_prediction(dc4$Private_Insurance))
insurance_bayes_auc <- get_auc(dc4$Health_Insurance_Covered,
bayes_prediction(dc4$Health_Insurance_Covered))
work_tree_auc <- get_auc(dc4$Work_Insurance,
tree_prediction(dc4$Work_Insurance))
private_tree_auc <- get_auc(dc4$Private_Insurance,
tree_prediction(dc4$Private_Insurance))
insurance_tree_auc <- get_auc(dc4$Health_Insurance_Covered,
tree_prediction(dc4$Health_Insurance_Covered))
work_rforest_auc <- get_auc(dc4$Work_Insurance,
randomforest_prediction(dc4$HINS1))
private_rforest_auc <- get_auc(dc4$Private_Insurance,
randomforest_prediction(dc4$HINS2))
insurance_rforest_auc <- get_auc(dc4$Health_Insurance_Covered,
randomforest_prediction(dc4$HICOV))
# data cleaning for AUC table
AUC <- c(work_logistic_auc,
private_logistic_auc,
insurance_logistic_auc,
work_bayes_auc,
private_bayes_auc,
insurance_bayes_auc,
work_tree_auc,
private_tree_auc,
insurance_tree_auc,
work_rforest_auc,
private_rforest_auc,
insurance_rforest_auc)
AUC_table <- data.frame(Model = Model,
Insurance_Category = Insurance,
AUC = round(as.numeric(AUC), digits = 2)) %>%
mutate(Insurance_Category = factor(Insurance_Category, levels =
c("Work Insurance",
"Private Insurance",
"Work, Private, or Other Insurance")),
Model = factor(Model, levels = c("Logistic Regression",
"Naive Bayes",
"Classification Tree",
"Random Forest"))) %>%
spread(Model, AUC)
# plotting
english_population <-
ggplot(dc6_4, aes(x = English, y = Number)) +
geom_bar(aes(fill = Covered), stat = "identity", width = 0.8) +
facet_grid(.~ Insurance_Category) +
theme_bw() +
theme(axis.text = element_text(size = 10),
strip.text = element_text(size = 12)) +
ggtitle("Sample Size: English Level") +
ylab("Population") +
xlab("English Level")
edu_population <-
ggplot(dc7_4, aes(x = Education, y = Number)) +
geom_bar(aes(fill = Covered), stat = "identity") +
facet_grid(.~ Insurance_Category) +
theme_bw() +
theme(axis.text = element_text(size = 10),
strip.text = element_text(size = 12),
axis.text.x = element_blank()) +
ggtitle("Sample Size: Education Level") +
ylab("Population") +
xlab("Education Attainment")
english_vs_insurance <-
ggplot(english_table3, aes(x = Level,
y = Proportion,
fill = Covered)) +
geom_bar(stat = "identity",
position = "fill",
width = 0.8) +
facet_grid(.~Category) +
theme_bw() +
theme(axis.text = element_text(size = 10),
strip.text = element_text(size = 12)) +
ggtitle("Health Insurance Coverage by English Level") +
ylab("Proportion of Health Insurance Coverage") +
xlab("English Level")
education_vs_insurance <-
ggplot(edu_table4, aes(x = Education_Attainment,
y = Proportion)) +
geom_point(size = 4,
aes(color = Education_Attainment)) +
facet_grid(.~ Category) +
theme_bw() +
theme(legend.title = element_blank(),
axis.text = element_text(size = 10),
strip.text = element_text(size = 12),
axis.text.x = element_blank()) +
ggtitle("Health Insurance Coverage by Education Level") +
ylab("Health Insurance Coverage \n(Proportion of Total Peoeple in the Same Education Attainment)") +
xlab("Education Attainment")
# final accuracy table presentation
pred_table_vis <- formattable(pred_table,
list(area(col = 2:5) ~ color_tile("white", "lightpink")))
AUC_table_vis <- formattable(AUC_table,
list(area(col = 2:5) ~ color_tile("white", "#FFCC66")))
| /code_updated.R | no_license | Mira0507/health_insurance_supervised_learning | R | false | false | 27,245 | r | library(tidyverse)
library(naivebayes)
library(rpart)
library(randomForest)
library(pROC)
library(ggplot2)
library(ggrepel)
library(formattable)
g <- glimpse
h <- head
s <- summary
t <- tail
dc <- read_csv("psam_p11.csv")
# variables = colnames of interest
variables <- c('ST', 'ADJINC', "PWGTP", 'CIT', 'CITWP', 'COW', 'ENG', 'HINS1',
'HINS2', 'HINS3', 'HINS4', 'INTP', 'JWTR', 'LANX', 'MAR', 'MARHT',
'RETP', "SCH", 'SCHL', 'SEMP', 'SEX', 'SSP', "WAGP", 'YOEP', 'ANC',
'ANC1P', 'DECADE', 'DRIVESP', 'ESR', 'FOD1P', 'HICOV', 'INDP',
'JWAP', 'JWDP', 'LANP', 'MIGPUMA', 'MIGSP', 'NAICSP', 'NATIVITY',
'NOP', 'OC', 'OCCP', 'PERNP', 'PINCP', 'POBP', 'POVPIP',
'POWPUMA', 'POWSP', 'PRIVCOV', 'PUBCOV', 'RAC1P', 'RAC2P',
'RAC3P', 'RACAIAN', 'RACASN', 'RACBLK', 'RACNH', 'RACWHT',
'SCIENGP', 'SCIENGRLP', 'SFN', 'SFR', 'SOCP', 'WAOB')
variable_comments <- c('State code',
'Adjustment factor for income and earnings dollar amounts',
"Person's weight",
'Citizenship status',
'Year of naturalization write-in',
'Class of worker',
'Ability to speak English',
'Insurance through a current or former employer or union',
'Insurance purchased directly from an insurance company',
'Medicare, for people 65 and older, or people with certain disabilities',
'Medicaid, Medical Assistance, or any kind of government-assistance plan for those with low incomes or a disability',
'Interest, dividends, and net rental income past 12 months',
'Means of transportation to work',
'Language other than English spoken at home',
'Marital status',
'Number of times married',
'Retirement income past 12 months',
'School enrollment',
'Educational level',
'Self-employment income past 12 months',
'Sex', 'Social Security income past 12 months',
'Wages or salary income past 12 months',
'Year of entry to the US',
'Ancestry recode', 'Recoded Detailed Ancestry - first entry',
'Decade of entry',
'Number of vehicles calculated from JWRI',
'Employment status recode',
'Recoded field of degree - first entry',
'Health insurance coverage recode',
'Industry recode for 2013 and later based on 2012 IND codes',
'Time of arrival at work - hour and minute',
'Time of departure for work - hour and minute',
'Language spoken at home',
'Migration PUMA based on 2010 Census definition',
'Migration recode - State or foreign country code',
'NAICS Industry recode for 2013 and later based on 2012 NAICS codes',
'Nativity', 'Nativity of parent', 'Own child',
'Occupation recode for 2012 and later based on 2010 OCC codes',
'Total person\'s earnings', 'Total person\'s income',
'Place of birth (Recode)', 'Income-to-poverty ratio recode',
'Place of work PUMA based on 2010 Census definition',
'Place of work - State or foreign country recode',
'Private health insurance coverage recode',
'Public health coverage recode',
'Recoded detailed race code',
'Recoded detailed race code',
'Recoded detailed race code',
'American Indian and Alaska Native recode (American Indian and Alaska Native alone or in combination with one or more other races)',
'Asian recode (Asian alone or in combination with one or more other races)',
'Black or African American recode (Black alone or in combination with one or more other races)',
'Native Hawaiian recode (Native Hawaiian alone or in combination with one or more other races)',
'White recode (White alone or in combination with one or more other races)',
'Field of Degree Science and Engineering Flag - NSF Definition',
'Field of Degree Science and Engineering Related Flag - NSF Definition',
'Subfamily number', 'Subfamily relationship',
'SOC Occupation code for 2012 and later based on 2010 SOC codes',
'World area of birth')
# var: variables of primary interest
var <- data.frame(variables = variables, description = variable_comments)
var_of_interst <- c('Ability to speak English',
'Insurance through a current or former employer or union',
'Insurance purchased directly from an insurance company',
'Educational level',
'Health insurance coverage recode')
# var1: variables narrowed down
var1 <- var %>%
filter(description %in% var_of_interst)
# removing missing values in ENG (English level)
dc1 <- dc %>%
select(var1$variables) %>%
filter(!is.na(ENG))
# converting numeric to categorical values
ENG_d <- data.frame(ENG = 1:4,
English = factor(c("Very Well",
"Well",
"Not Well",
"Not at all"),
levels = c("Not at all",
"Not Well",
"Well",
"Very Well")))
SCHL_d <- data.frame(SCHL = 1:24,
Education = c("No Schooling Completed",
"Nursery School or Pre-school",
"Kindergarten",
"Grade 1",
"Grade 2",
"Grade 3",
"Grade 4",
"Grade 5",
"Grade 6",
"Grade 7",
"Grade 8",
"Grade 9",
"Grade 10",
"Grade 11",
"12th Grade - no diploma",
"Regular high school diploma",
"GED or alternative credential",
"Some college, but less than 1 year",
"1 or more years of college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree beyond a bachelor's degree",
"Doctorate degree"
)) %>%
mutate(Education = factor(Education,
levels = c("No Schooling Completed",
"Nursery School or Pre-school",
"Kindergarten",
"Grade 1",
"Grade 2",
"Grade 3",
"Grade 4",
"Grade 5",
"Grade 6",
"Grade 7",
"Grade 8",
"Grade 9",
"Grade 10",
"Grade 11",
"12th Grade - no diploma",
"Regular high school diploma",
"GED or alternative credential",
"Some college, but less than 1 year",
"1 or more years of college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree beyond a bachelor's degree",
"Doctorate degree"
)))
dc3 <- dc1 %>%
mutate(SCHL = as.integer(SCHL))
# converting ENG(English level) to a categorical variable
left_join(ENG_d, by = "ENG") %>%
# converting SCHL(Educational attainment) to a categorical variable
left_join(SCHL_d, by = "SCHL") %>%
# converting HINS1(work health insurance), HINS2(private health insurance),
# HICOV (health insurance) to categorical variables
mutate(Work_Insurance = ifelse(HINS1 == 1, "Yes", "No"),
Private_Insurance = ifelse(HINS2 == 1, "Yes", "No"),
Health_Insurance_Covered = ifelse(HICOV == 1, "Yes", "No")) %>%
mutate(HINS1 = ifelse(HINS1 == 1, 1, 0),
HINS2 = ifelse(HINS2 == 1, 1, 0),
HICOV = ifelse(HICOV == 1, 1, 0))
dc4 <- dc3[!is.na(dc3$Education), ]
# data cleaning for observation number
dc5 <- dc4 %>%
mutate(English = as.character(English),
Education = as.character(Education))
make_table <- function(vec1, vec2, insr) {
as.data.frame(table(vec1, vec2)) %>%
mutate(Insurance = insr)
}
dc6_1 <- make_table(dc5$English,
dc5$Work_Insurance,
"Work")
dc6_2 <- make_table(dc5$English,
dc5$Private_Insurance,
"Private")
dc6_3 <- make_table(dc5$English,
dc5$Health_Insurance_Covered,
"Work, Private, or Other")
dc6_4 <- rbind(dc6_1,
dc6_2,
dc6_3)
names(dc6_4) <- c("English", "Covered", "Number", "Insurance_Category")
dc6_4 <- dc6_4 %>%
mutate(English = factor(English,
levels = c("Not at all",
"Not Well",
"Well",
"Very Well")),
Insurance_Category = factor(Insurance_Category,
levels = c("Work",
"Private",
"Work, Private, or Other")))
dc7_1 <- make_table(dc5$Education,
dc5$Work_Insurance,
"Work")
dc7_2 <- make_table(dc5$Education,
dc5$Private_Insurance,
"Private")
dc7_3 <- make_table(dc5$Education,
dc5$Health_Insurance_Covered,
"Work, Private, or Other")
dc7_4 <- rbind(dc7_1,
dc7_2,
dc7_3)
names(dc7_4) <- c("Education", "Covered", "Number", "Insurance_Category")
dc7_4 <- dc7_4 %>%
mutate(Education = factor(Education,
levels = c("No Schooling Completed",
"Nursery School or Pre-school",
"Kindergarten",
"Grade 1",
"Grade 2",
"Grade 3",
"Grade 4",
"Grade 5",
"Grade 6",
"Grade 7",
"Grade 8",
"Grade 9",
"Grade 10",
"Grade 11",
"12th Grade - no diploma",
"Regular high school diploma",
"GED or alternative credential",
"Some college, but less than 1 year",
"1 or more years of college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree beyond a bachelor's degree",
"Doctorate degree")),
Insurance_Category = factor(Insurance_Category,
levels = c("Work",
"Private",
"Work, Private, or Other")))
# data cleaning for plotting
english_table1 <- as.data.frame(prop.table(table(dc4$English,
dc4$Work_Insurance),
margin = 1))
english_table2 <- as.data.frame(prop.table(table(dc4$English,
dc4$Health_Insurance_Covered),
margin = 1))
english_table3 <- rbind(english_table1, english_table2) %>%
mutate(Ins = c(rep("Work Insurance", times = 8),
rep("Work, Private, or Other Insurance", times = 8))) %>%
mutate(Ins = factor(Ins, levels = c("Work Insurance",
"Work, Private, or Other Insurance")))
names(english_table3) <- c("Level",
"Covered",
"Proportion",
"Category")
edu_table1 <- as.data.frame(prop.table(table(dc4$Education,
dc4$Work_Insurance),
margin = 1))
edu_table2 <- as.data.frame(prop.table(table(dc4$Education,
dc4$Health_Insurance_Covered),
margin = 1) )
edu_table3 <- rbind(edu_table1, edu_table2) %>%
filter(!is.nan(Freq)) %>%
mutate(Ins = c(rep("Work Insurance", times = 30),
rep("Work, Private, or Other Insurance", times = 30))) %>%
mutate(Ins = factor(Ins, levels = c("Work Insurance",
"Work, Private, or Other Insurance")))
names(edu_table3) = c("Education_Attainment",
"Covered",
"Proportion",
"Category")
edu_table4 <- edu_table3 %>%
filter(Covered == "Yes")
# Prediction model w logistic regression
logistic_prediction <- function(Input_numeric) {
logistic_model <- glm(Input_numeric ~
English +
Education,
data = dc4,
family = "binomial")
logistic_prob <- predict(logistic_model, type = "response")
logistic_prediction <- ifelse(logistic_prob > 0.5,
"Yes",
"No")
return(logistic_prediction)
}
# Prediction model w Bayesian mothod
bayes_prediction <- function(Input_categorical) {
bayes_model <- naive_bayes(Input_categorical ~
English +
Education,
data = dc4,
laplace = 1)
bayes_prediction <- predict(bayes_model, newdata = dc4)
return(bayes_prediction)
}
# Prediction model w classification tree (no pruning)
tree_prediction <- function(Input_categorical) {
tree_model <- rpart(Input_categorical ~
English +
Education,
data = dc4, method = "class")
tree_prediction <- predict(tree_model,
newdata = dc4,
type = "class")
return(tree_prediction)
}
# Prediction model w random forest
randomforest_prediction <- function(Input_numeric) {
forest_model <- randomForest(Input_numeric ~
English +
Education,
data = dc4,
ntree = 200)
forest_prob <- predict(forest_model)
forest_prediction <- ifelse(forest_prob > 0.5,
"Yes",
"No")
return(forest_prediction)
}
# Predict coverage of work insurance via logistic regression
work_logistic_accuracy <- mean(logistic_prediction(dc4$HINS1) == dc4$Work_Insurance)
# Predict coverage of private insurance via logistic regression
private_logistic_accuracy <- mean(logistic_prediction(dc4$HINS2) == dc4$Private_Insurance)
# Predict coverage of insurance via logistic regression
insurance_logistic_accuracy <- mean(logistic_prediction(dc4$HICOV) == dc4$Private_Insurance)
# Predict coverage of work insurance via Bayesian
work_bayes_accuracy <- mean(bayes_prediction(dc4$Work_Insurance) == dc4$Work_Insurance)
# Predict coverage of private insurance via Bayesian
private_bayes_accuracy <- mean(bayes_prediction(dc4$Private_Insurance) == dc4$Private_Insurance)
# Predict coverage of insurance via Byesian
insurance_bayes_accuracy <- mean(bayes_prediction(dc4$Health_Insurance_Covered) == dc4$Health_Insurance_Covered)
# Predict coverage of work insurance via classification tree
work_tree_accuracy <- mean(tree_prediction(dc4$Work_Insurance) == dc4$Work_Insurance)
# Predict coverage of private insurance via classification tree
private_tree_accuracy <- mean(tree_prediction(dc4$Private_Insurance) == dc4$Private_Insurance)
# Predict coverage of insurance via classification tree
insurance_tree_accuracy <- mean(tree_prediction(dc4$Health_Insurance_Covered) == dc4$Health_Insurance_Covered)
# Predict coverage of work insurance via random forest
work_rforest_accuracy <- mean(randomforest_prediction(dc4$HINS1) == dc4$Work_Insurance)
# Predict coverage of private insurance via random forest
private_rforest_accuracy <- mean(randomforest_prediction(dc4$HINS2) == dc4$Private_Insurance)
# Predict coverage of insurance via random forest
insurance_rforest_accuracy <- mean(randomforest_prediction(dc4$HICOV) == dc4$Private_Insurance)
# cleaning for comparing accuracy of each prediction
Model <- c(rep("Logistic Regression", times = 3),
rep("Naive Bayes", times = 3),
rep("Classification Tree", times = 3),
rep("Random Forest", times = 3))
Insurance <- rep(c("Work Insurance",
"Private Insurance",
"Work, Private, or Other Insurance"), times = 4)
Accuracy <- c(work_logistic_accuracy,
private_logistic_accuracy,
insurance_logistic_accuracy,
work_bayes_accuracy,
private_bayes_accuracy,
insurance_bayes_accuracy,
work_tree_accuracy,
private_tree_accuracy,
insurance_tree_accuracy,
work_rforest_accuracy,
private_rforest_accuracy,
insurance_rforest_accuracy)
pred_table <- data.frame(Model = Model,
Insurance_Category = Insurance,
Accuracy = as.numeric(Accuracy)) %>%
mutate(Accuracy = round(Accuracy * 100, digits = 2),
Insurance_Category = factor(Insurance_Category, levels =
c("Work Insurance",
"Private Insurance",
"Work, Private, or Other Insurance")),
Model = factor(Model, levels = c("Logistic Regression",
"Naive Bayes",
"Classification Tree",
"Random Forest"))) %>%
spread(Model, Accuracy)
# function converting categorical to numeric (yes to 1, no to 0)
# vec is a categorical variable
cat_to_num <- function(vec) {
num_vec <- c()
for (x in vec) {
if (x == "Yes") {
num <- 1
} else {
num <- 0
}
num_vec <- c(num_vec, num)
}
return(num_vec)
}
# Area under curve (AUC) function
# both actual_vec1 and prob_vec2 are categorical
get_auc <- function(actual_vec1, prob_vec2) {
roc <- roc(cat_to_num(actual_vec1), cat_to_num(prob_vec2))
return(auc(roc))
}
# AUC
work_logistic_auc <- get_auc(dc4$Work_Insurance,
logistic_prediction(dc4$HINS1))
private_logistic_auc <- get_auc(dc4$Private_Insurance,
logistic_prediction(dc4$HINS2))
insurance_logistic_auc <- get_auc(dc4$Health_Insurance_Covered,
logistic_prediction(dc4$HICOV))
work_bayes_auc <- get_auc(dc4$Work_Insurance,
bayes_prediction(dc4$Work_Insurance))
private_bayes_auc <- get_auc(dc4$Private_Insurance,
bayes_prediction(dc4$Private_Insurance))
insurance_bayes_auc <- get_auc(dc4$Health_Insurance_Covered,
bayes_prediction(dc4$Health_Insurance_Covered))
work_tree_auc <- get_auc(dc4$Work_Insurance,
tree_prediction(dc4$Work_Insurance))
private_tree_auc <- get_auc(dc4$Private_Insurance,
tree_prediction(dc4$Private_Insurance))
insurance_tree_auc <- get_auc(dc4$Health_Insurance_Covered,
tree_prediction(dc4$Health_Insurance_Covered))
work_rforest_auc <- get_auc(dc4$Work_Insurance,
randomforest_prediction(dc4$HINS1))
private_rforest_auc <- get_auc(dc4$Private_Insurance,
randomforest_prediction(dc4$HINS2))
insurance_rforest_auc <- get_auc(dc4$Health_Insurance_Covered,
randomforest_prediction(dc4$HICOV))
# data cleaning for AUC table
AUC <- c(work_logistic_auc,
private_logistic_auc,
insurance_logistic_auc,
work_bayes_auc,
private_bayes_auc,
insurance_bayes_auc,
work_tree_auc,
private_tree_auc,
insurance_tree_auc,
work_rforest_auc,
private_rforest_auc,
insurance_rforest_auc)
AUC_table <- data.frame(Model = Model,
Insurance_Category = Insurance,
AUC = round(as.numeric(AUC), digits = 2)) %>%
mutate(Insurance_Category = factor(Insurance_Category, levels =
c("Work Insurance",
"Private Insurance",
"Work, Private, or Other Insurance")),
Model = factor(Model, levels = c("Logistic Regression",
"Naive Bayes",
"Classification Tree",
"Random Forest"))) %>%
spread(Model, AUC)
# plotting
english_population <-
ggplot(dc6_4, aes(x = English, y = Number)) +
geom_bar(aes(fill = Covered), stat = "identity", width = 0.8) +
facet_grid(.~ Insurance_Category) +
theme_bw() +
theme(axis.text = element_text(size = 10),
strip.text = element_text(size = 12)) +
ggtitle("Sample Size: English Level") +
ylab("Population") +
xlab("English Level")
edu_population <-
ggplot(dc7_4, aes(x = Education, y = Number)) +
geom_bar(aes(fill = Covered), stat = "identity") +
facet_grid(.~ Insurance_Category) +
theme_bw() +
theme(axis.text = element_text(size = 10),
strip.text = element_text(size = 12),
axis.text.x = element_blank()) +
ggtitle("Sample Size: Education Level") +
ylab("Population") +
xlab("Education Attainment")
english_vs_insurance <-
ggplot(english_table3, aes(x = Level,
y = Proportion,
fill = Covered)) +
geom_bar(stat = "identity",
position = "fill",
width = 0.8) +
facet_grid(.~Category) +
theme_bw() +
theme(axis.text = element_text(size = 10),
strip.text = element_text(size = 12)) +
ggtitle("Health Insurance Coverage by English Level") +
ylab("Proportion of Health Insurance Coverage") +
xlab("English Level")
education_vs_insurance <-
ggplot(edu_table4, aes(x = Education_Attainment,
y = Proportion)) +
geom_point(size = 4,
aes(color = Education_Attainment)) +
facet_grid(.~ Category) +
theme_bw() +
theme(legend.title = element_blank(),
axis.text = element_text(size = 10),
strip.text = element_text(size = 12),
axis.text.x = element_blank()) +
ggtitle("Health Insurance Coverage by Education Level") +
ylab("Health Insurance Coverage \n(Proportion of Total Peoeple in the Same Education Attainment)") +
xlab("Education Attainment")
# final accuracy table presentation
pred_table_vis <- formattable(pred_table,
list(area(col = 2:5) ~ color_tile("white", "lightpink")))
AUC_table_vis <- formattable(AUC_table,
list(area(col = 2:5) ~ color_tile("white", "#FFCC66")))
|
#############################################################################
# Project: Exploratory Data Analysis
# Description: This is the solution for course Project 2 (plot 1) for
# the exploritory data analysis module within the Data Science
# Specialisation delivered by John Hopkins University, via
# coursera.org.
# Developer: Dr Paul Fergus
# Email: p.fergus@ljmu.ac.uk
#############################################################################
#Set the working directory
setwd("~/Google Drive/LJMU6/Research/Git/datascience/4_exploratorydataanalysis/course_project_2")
#clean the environment
rm(list=ls())
#Set global variables to null
NEI <- NULL
SCC <- NULL
total_emissions_by_year <- NULL
#This function runs the analysis
run_analysis <- function(){
#The dataset has 6 million rows and 6 columns. First calculate a rough estimate of
#how much memory the dataset will require in memory before reading into R. Make sure
#your computer has enough memory (most modern computers should be fine).
if(is.null(NEI)){
#Read in the dataset - I am reading the dataset in full as my machine has no
#problem dealing with data of this size.
NEI <<- readRDS("data/summarySCC_PM25.rds")
}
if(is.null(SCC)){
#Read in the dataset
SCC <<- readRDS("data/Source_Classification_Code.rds")
}
#1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Yes: emissions have decreased in the US from 1999 to 2008
total_emissions_by_year <<- aggregate(Emissions ~ year, NEI, sum)
#Using the base plotting system, make a plot showing the total PM2.5 emission from all
#sources for each of the years 1999, 2002, 2005, and 2008.
png("plot1.png",width=480,height=480, units="px",bg="transparent")
#With is a generic function that evaluates the second parameter (in this case the barplot)
#in a local environment constructed from the parameter one (in this instance our
#data.frame). This is useful for simplifiying code.
with(total_emissions_by_year, {
barplot(
# reduct the size of the emissions value so that it can be read on graph
(Emissions)/10^6,
names.arg=year,
xlab="Year",
ylab="PM2.5 Emissions (10^6 Tons)",
main="Total PM2.5 Emissions From All US Sources"
)
})
#returns the number and name of the new active device (after the specified device
#has been shut down).
dev.off()
} | /4_exploratorydataanalysis/course_project_2/plot1.R | no_license | PaulFergus/datascience | R | false | false | 2,496 | r | #############################################################################
# Project: Exploratory Data Analysis
# Description: This is the solution for course Project 2 (plot 1) for
# the exploritory data analysis module within the Data Science
# Specialisation delivered by John Hopkins University, via
# coursera.org.
# Developer: Dr Paul Fergus
# Email: p.fergus@ljmu.ac.uk
#############################################################################
#Set the working directory
setwd("~/Google Drive/LJMU6/Research/Git/datascience/4_exploratorydataanalysis/course_project_2")
#clean the environment
rm(list=ls())
#Set global variables to null
NEI <- NULL
SCC <- NULL
total_emissions_by_year <- NULL
#This function runs the analysis
run_analysis <- function(){
#The dataset has 6 million rows and 6 columns. First calculate a rough estimate of
#how much memory the dataset will require in memory before reading into R. Make sure
#your computer has enough memory (most modern computers should be fine).
if(is.null(NEI)){
#Read in the dataset - I am reading the dataset in full as my machine has no
#problem dealing with data of this size.
NEI <<- readRDS("data/summarySCC_PM25.rds")
}
if(is.null(SCC)){
#Read in the dataset
SCC <<- readRDS("data/Source_Classification_Code.rds")
}
#1. Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Yes: emissions have decreased in the US from 1999 to 2008
total_emissions_by_year <<- aggregate(Emissions ~ year, NEI, sum)
#Using the base plotting system, make a plot showing the total PM2.5 emission from all
#sources for each of the years 1999, 2002, 2005, and 2008.
png("plot1.png",width=480,height=480, units="px",bg="transparent")
#With is a generic function that evaluates the second parameter (in this case the barplot)
#in a local environment constructed from the parameter one (in this instance our
#data.frame). This is useful for simplifiying code.
with(total_emissions_by_year, {
barplot(
# reduct the size of the emissions value so that it can be read on graph
(Emissions)/10^6,
names.arg=year,
xlab="Year",
ylab="PM2.5 Emissions (10^6 Tons)",
main="Total PM2.5 Emissions From All US Sources"
)
})
#returns the number and name of the new active device (after the specified device
#has been shut down).
dev.off()
} |
# Working directory set as "C:/"
# Download the data from the weblink
con <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(con, "C:/dataset.zip")
unzip("C:/dataset.zip", exdir = "C:/dataset")
list.files("C:/dataset")
# Read the data
NEI <- readRDS("C:/dataset/summarySCC_PM25.rds")
SCC <- readRDS("C:/dataset/Source_Classification_Code.rds")
# Convert year and type to factor variables
NEI <- transform(NEI, year = factor(year), type = factor(type))
# Subsetting on Baltimore, Maryland
NEI2 <- subset(NEI, fips == "24510")
# Summing the emissions by year and type
library(dplyr)
NEI2 <- group_by(NEI2, year, type)
NEI2 <- summarize(NEI2, Emissions = sum(Emissions, na.rm = TRUE))
# Plot showing the total PM2.5 emission from 4 sources(type = point, nonpoint, onraod, nonroad) in Baltimore City for 1999, 2002, 2005, and 2008
library(ggplot2)
g <- ggplot(NEI2, aes(year, Emissions))
p <- g + facet_grid(.~type) + geom_bar(aes(fill = type), stat = "identity") + labs(title = "PM2.5 emission by source type in Baltimore City")
print(p)
dev.copy(png, file = "C:/plot3.png")
dev.off()
| /Exploratory Data Analysis/Fine particulate matter emissions/plot3.r | no_license | s-mnt/repo-beg | R | false | false | 1,126 | r | # Working directory set as "C:/"
# Download the data from the weblink
con <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(con, "C:/dataset.zip")
unzip("C:/dataset.zip", exdir = "C:/dataset")
list.files("C:/dataset")
# Read the data
NEI <- readRDS("C:/dataset/summarySCC_PM25.rds")
SCC <- readRDS("C:/dataset/Source_Classification_Code.rds")
# Convert year and type to factor variables
NEI <- transform(NEI, year = factor(year), type = factor(type))
# Subsetting on Baltimore, Maryland
NEI2 <- subset(NEI, fips == "24510")
# Summing the emissions by year and type
library(dplyr)
NEI2 <- group_by(NEI2, year, type)
NEI2 <- summarize(NEI2, Emissions = sum(Emissions, na.rm = TRUE))
# Plot showing the total PM2.5 emission from 4 sources(type = point, nonpoint, onraod, nonroad) in Baltimore City for 1999, 2002, 2005, and 2008
library(ggplot2)
g <- ggplot(NEI2, aes(year, Emissions))
p <- g + facet_grid(.~type) + geom_bar(aes(fill = type), stat = "identity") + labs(title = "PM2.5 emission by source type in Baltimore City")
print(p)
dev.copy(png, file = "C:/plot3.png")
dev.off()
|
allf <- sub('.rds','',list.files('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/impute/10xcellline/alra'))
res <- lapply(allf,function(f) {
print(f)
sexpr <- t(readRDS(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/impute/10xcellline/alra/',f,'.rds')))
d <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/data/processed/10x/cellline/hg19/genebycell.rds')
colnames(sexpr) = colnames(d)
saveRDS(sexpr,file=paste0("/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/10xcellline/alra/",f,'.rds'))
})
| /code/procimpute/10xcellline/alra.R | no_license | wangdi2016/imputationBenchmark | R | false | false | 577 | r | allf <- sub('.rds','',list.files('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/impute/10xcellline/alra'))
res <- lapply(allf,function(f) {
print(f)
sexpr <- t(readRDS(paste0('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/impute/10xcellline/alra/',f,'.rds')))
d <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/data/processed/10x/cellline/hg19/genebycell.rds')
colnames(sexpr) = colnames(d)
saveRDS(sexpr,file=paste0("/home-4/whou10@jhu.edu/scratch/Wenpin/rna_imputation/result/procimpute/10xcellline/alra/",f,'.rds'))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/solar_calculations.R
\name{solar_sunset_angle}
\alias{solar_sunset_angle}
\title{Calculate solar sunset angles for a given latitude}
\usage{
solar_sunset_angle(dec, lat = 0)
}
\arguments{
\item{dec}{numeric, solar declination values in radians. One or multiple values allowed}
\item{lat}{numeric, latitude in radians. Only a single value allowed}
}
\value{
}
\description{
Calculates solar sunset angles for a given latitude. The input values have to be in radians! Convert from degree to radians by multiplication with pi/180
As default a latitude value of 0 is used.
}
\examples{
declination <- solar_declination(1:365)
angle <- solar_sunset_angle(dec = declination, lat = 0.8138967)
}
\references{
\url{https://www.e-education.psu.edu/eme810/}
}
| /man/solar_sunset_angle.Rd | no_license | sitscholl/Rgadgets | R | false | true | 829 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/solar_calculations.R
\name{solar_sunset_angle}
\alias{solar_sunset_angle}
\title{Calculate solar sunset angles for a given latitude}
\usage{
solar_sunset_angle(dec, lat = 0)
}
\arguments{
\item{dec}{numeric, solar declination values in radians. One or multiple values allowed}
\item{lat}{numeric, latitude in radians. Only a single value allowed}
}
\value{
}
\description{
Calculates solar sunset angles for a given latitude. The input values have to be in radians! Convert from degree to radians by multiplication with pi/180
As default a latitude value of 0 is used.
}
\examples{
declination <- solar_declination(1:365)
angle <- solar_sunset_angle(dec = declination, lat = 0.8138967)
}
\references{
\url{https://www.e-education.psu.edu/eme810/}
}
|
\name{multinominal2logical}
\alias{multinominal2logical}
\title{Convert multinominal to logical matrix}
\description{Convert a single multinominal, site-level attribute from a SoilProfileCollection into a matrix of corresponding logical values. The result contains IDs from the SoilProfileCollection and can easily be joined to the original site-level data.}
\usage{
multinominal2logical(x, v)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a \code{SoilProfileCollection} object}
\item{v}{the name of a site-level attribute that is a factor with more than 2 levels}
}
\value{A \code{data.frame} with IDs in the first column, and as many columns of logical vectors as there were levels in \code{v}. See examples.}
\author{D.E. Beaudette}
\seealso{\code{\link{diagnosticPropertyPlot}}}
\examples{
\donttest{
if(require(soilDB) &
require(aqp) &
require(latticeExtra)) {
# sample data, an SPC
data(loafercreek, package='soilDB')
# convert to logical matrix
hp <- multinominal2logical(loafercreek, 'hillslopeprof')
# join-in to site data
site(loafercreek) <- hp
# variable names
v <- c('lithic.contact', 'paralithic.contact',
'argillic.horizon', 'Toeslope', 'Footslope',
'Backslope', 'Shoulder', 'Summit')
# visualize with some other diagnostic features
x <- diagnosticPropertyPlot(loafercreek, v, k=5,
grid.label='bedrckkind', dend.label='pedon_id')
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{manip}
| /man/multinominal2logical.Rd | no_license | trilnick/sharpshootR | R | false | false | 1,582 | rd | \name{multinominal2logical}
\alias{multinominal2logical}
\title{Convert multinominal to logical matrix}
\description{Convert a single multinominal, site-level attribute from a SoilProfileCollection into a matrix of corresponding logical values. The result contains IDs from the SoilProfileCollection and can easily be joined to the original site-level data.}
\usage{
multinominal2logical(x, v)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a \code{SoilProfileCollection} object}
\item{v}{the name of a site-level attribute that is a factor with more than 2 levels}
}
\value{A \code{data.frame} with IDs in the first column, and as many columns of logical vectors as there were levels in \code{v}. See examples.}
\author{D.E. Beaudette}
\seealso{\code{\link{diagnosticPropertyPlot}}}
\examples{
\donttest{
if(require(soilDB) &
require(aqp) &
require(latticeExtra)) {
# sample data, an SPC
data(loafercreek, package='soilDB')
# convert to logical matrix
hp <- multinominal2logical(loafercreek, 'hillslopeprof')
# join-in to site data
site(loafercreek) <- hp
# variable names
v <- c('lithic.contact', 'paralithic.contact',
'argillic.horizon', 'Toeslope', 'Footslope',
'Backslope', 'Shoulder', 'Summit')
# visualize with some other diagnostic features
x <- diagnosticPropertyPlot(loafercreek, v, k=5,
grid.label='bedrckkind', dend.label='pedon_id')
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{manip}
|
library(DBI)
library(plutoDbR)
library(plutoR)
library(tidyverse)
options("scipen"=999)
source("../R/config.R")
yc <- YieldCurve()
#fetch the latest India Zero Coupon Bond yields
endDt <- (yc$IndiaZeroCoupon() %>%
summarize(MAX_TS = max(TIME_STAMP)) %>%
collect())$MAX_TS[1]
yc$IndiaZeroCoupon() %>%
filter(TIME_STAMP == endDt) %>%
arrange(MATURITY) %>%
print(n=Inf)
#fetch the latest US Treasury Yield Curve
endDt <- (yc$UsTreasury() %>%
summarize(MAX_TS = max(TIME_STAMP)) %>%
collect())$MAX_TS[1]
yc$UsTreasury() %>%
filter(TIME_STAMP == endDt) %>%
print()
#fetch the latest Euro area yield curve
endDt <- (yc$EuroArea() %>%
summarize(MAX_TS = max(TIME_STAMP)) %>%
collect())$MAX_TS[1]
yc$EuroArea() %>%
inner_join(yc$EuroArea(), by=c('TENOR_Y', 'TENOR_M', 'TIME_STAMP')) %>%
filter(TIME_STAMP == endDt & CURVE_ID.x == 'G_N_A' & CURVE_ID.y == 'G_N_C') %>%
select(TENOR_Y, TENOR_M, GNA = VALUE.x, GNC = VALUE.y) %>%
arrange(TENOR_Y, TENOR_M) %>%
print(n=Inf)
| /examples/YieldCurve.R | no_license | shyams80/plutoR | R | false | false | 1,024 | r | library(DBI)
library(plutoDbR)
library(plutoR)
library(tidyverse)
options("scipen"=999)
source("../R/config.R")
yc <- YieldCurve()
#fetch the latest India Zero Coupon Bond yields
endDt <- (yc$IndiaZeroCoupon() %>%
summarize(MAX_TS = max(TIME_STAMP)) %>%
collect())$MAX_TS[1]
yc$IndiaZeroCoupon() %>%
filter(TIME_STAMP == endDt) %>%
arrange(MATURITY) %>%
print(n=Inf)
#fetch the latest US Treasury Yield Curve
endDt <- (yc$UsTreasury() %>%
summarize(MAX_TS = max(TIME_STAMP)) %>%
collect())$MAX_TS[1]
yc$UsTreasury() %>%
filter(TIME_STAMP == endDt) %>%
print()
#fetch the latest Euro area yield curve
endDt <- (yc$EuroArea() %>%
summarize(MAX_TS = max(TIME_STAMP)) %>%
collect())$MAX_TS[1]
yc$EuroArea() %>%
inner_join(yc$EuroArea(), by=c('TENOR_Y', 'TENOR_M', 'TIME_STAMP')) %>%
filter(TIME_STAMP == endDt & CURVE_ID.x == 'G_N_A' & CURVE_ID.y == 'G_N_C') %>%
select(TENOR_Y, TENOR_M, GNA = VALUE.x, GNC = VALUE.y) %>%
arrange(TENOR_Y, TENOR_M) %>%
print(n=Inf)
|
#' Method creating a report from an `character` object representing a file name of a metadata spreadsheet
#'
#' @details
#' **`report.character`** creates a report of the object returnes from a `validate()`.
#'
#' @export
#' @md
#' @examples
#' ## Report of `dmdScheme_validation`
#' report( scheme_path_xlsx() )
#'
#' @describeIn report report of a `dmdScheme_validation` object.
#'
#'
report.character <- function(
x,
file = tempfile(),
open = TRUE,
report = "html",
report_author = "Tester",
report_title = "Validation of data against dmdScheme",
...
) {
if (!file.exists(x)) {
stop("If x is a character, it has to be the file name of a spreadsheet containing the scheme data!")
}
# Read and convert x ------------------------------------------------------
result <- report(
validate(x, ...),
file = file,
open = open,
report = report,
report_author = report_author,
report_title = report_title
)
# Return xml --------------------------------------------------------------
return(result)
}
| /R/report.character.R | permissive | krlmlr/dmdScheme | R | false | false | 1,051 | r | #' Method creating a report from an `character` object representing a file name of a metadata spreadsheet
#'
#' @details
#' **`report.character`** creates a report of the object returnes from a `validate()`.
#'
#' @export
#' @md
#' @examples
#' ## Report of `dmdScheme_validation`
#' report( scheme_path_xlsx() )
#'
#' @describeIn report report of a `dmdScheme_validation` object.
#'
#'
report.character <- function(
x,
file = tempfile(),
open = TRUE,
report = "html",
report_author = "Tester",
report_title = "Validation of data against dmdScheme",
...
) {
if (!file.exists(x)) {
stop("If x is a character, it has to be the file name of a spreadsheet containing the scheme data!")
}
# Read and convert x ------------------------------------------------------
result <- report(
validate(x, ...),
file = file,
open = open,
report = report,
report_author = report_author,
report_title = report_title
)
# Return xml --------------------------------------------------------------
return(result)
}
|
# Tell Syberia that not having model tests is okay.
optional_tests <- c("models", "lib/mungebits/factorize_single_valued_vars")
| /config/environments/test.R | permissive | syberia/examples | R | false | false | 128 | r | # Tell Syberia that not having model tests is okay.
optional_tests <- c("models", "lib/mungebits/factorize_single_valued_vars")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/damage.R
\name{damage}
\alias{damage}
\title{Current HP damage modifier}
\usage{
damage(which.character, damage)
}
\arguments{
\item{which.character}{The DnD character who being hurt}
\item{damage}{The amount the character's health is to decrease by}
}
\value{
Will return the character with the new health status
}
\description{
Will allow for the DnD character's current HP to be automatically updated. No assignment is neccessary. This is a generic method with two functions, one for a DnD class object and one for a non-DnD class object.
}
\seealso{
\code{\link{damage.DnD}}
\code{\link{damage.default}}
}
| /man/damage.Rd | no_license | vinny-paris/DnD | R | false | true | 690 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/damage.R
\name{damage}
\alias{damage}
\title{Current HP damage modifier}
\usage{
damage(which.character, damage)
}
\arguments{
\item{which.character}{The DnD character who being hurt}
\item{damage}{The amount the character's health is to decrease by}
}
\value{
Will return the character with the new health status
}
\description{
Will allow for the DnD character's current HP to be automatically updated. No assignment is neccessary. This is a generic method with two functions, one for a DnD class object and one for a non-DnD class object.
}
\seealso{
\code{\link{damage.DnD}}
\code{\link{damage.default}}
}
|
### Calculating cumulative sum of change metrics
### Author: Keivn Wilcox (wilcoxkr@gmail.com)
###
### Last updated March 21, 2018
### Set up workspace
library(tidyverse)
library(ggthemes)
library(RColorBrewer)
setwd("C:\\Users\\wilco\\Dropbox\\C2E\\Products\\CommunityChange\\March2018 WG\\")
### Read in data
change_metrics_bayes <- read.csv("CORRE_RACS_Subset_Perm.csv")
### Calculate cumulative sums of each metric
change_cumsum <- change_metrics_bayes %>%
group_by(site_project_comm, treatment, plot_id) %>%
mutate(richness_change_abs = abs(richness_change)) %>%
mutate(evenness_change_abs = abs(evenness_change)) %>%
mutate_at(vars(richness_change, richness_change_abs, evenness_change,evenness_change_abs, rank_change, gains, losses), funs(cumsum) ) %>%
mutate(control = ifelse(plot_mani==0,"control","treatment"))
### plot - faceted by site_project_comm and color by treatment
## absolute value of richness change
absrich_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=richness_change_abs, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control),se=F) +
# facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## richness change
rich_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=richness_change, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## evenness change
even_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=evenness_change, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## rank change
rank_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=rank_change, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## gains change
gains_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=gains, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## losses change
losses_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=losses, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
png(paste0("figures\\absrich cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(absrich_plot)
dev.off()
png(paste0("figures\\rich cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(rich_plot)
dev.off()
png(paste0("figures\\evenness cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(even_plot)
dev.off()
png(paste0("figures\\rank cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(rank_plot)
dev.off()
png(paste0("figures\\gains cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(gains_plot)
dev.off()
png(paste0("figures\\losses cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(losses_plot)
dev.off()
### plotting just trendlines
## absolute value of richness change
absrich_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=richness_change_abs, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## richness change
rich_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=richness_change, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## evenness change
even_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=evenness_change_abs, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## rank change
rank_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=rank_change, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## gains change
gains_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=gains, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## losses change
losses_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=losses, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
png(paste0("figures\\absrich cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(absrich_plot2)
dev.off()
png(paste0("figures\\rich cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(rich_plot2)
dev.off()
png(paste0("figures\\evenness cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(even_plot2)
dev.off()
png(paste0("figures\\rank cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(rank_plot2)
dev.off()
png(paste0("figures\\gains cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(gains_plot2)
dev.off()
png(paste0("figures\\losses cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(losses_plot2)
dev.off()
| /Community Paper/Calculating cumsum differences.R | no_license | klapierre/C2E | R | false | false | 5,883 | r | ### Calculating cumulative sum of change metrics
### Author: Keivn Wilcox (wilcoxkr@gmail.com)
###
### Last updated March 21, 2018
### Set up workspace
library(tidyverse)
library(ggthemes)
library(RColorBrewer)
setwd("C:\\Users\\wilco\\Dropbox\\C2E\\Products\\CommunityChange\\March2018 WG\\")
### Read in data
change_metrics_bayes <- read.csv("CORRE_RACS_Subset_Perm.csv")
### Calculate cumulative sums of each metric
change_cumsum <- change_metrics_bayes %>%
group_by(site_project_comm, treatment, plot_id) %>%
mutate(richness_change_abs = abs(richness_change)) %>%
mutate(evenness_change_abs = abs(evenness_change)) %>%
mutate_at(vars(richness_change, richness_change_abs, evenness_change,evenness_change_abs, rank_change, gains, losses), funs(cumsum) ) %>%
mutate(control = ifelse(plot_mani==0,"control","treatment"))
### plot - faceted by site_project_comm and color by treatment
## absolute value of richness change
absrich_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=richness_change_abs, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control),se=F) +
# facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## richness change
rich_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=richness_change, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## evenness change
even_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=evenness_change, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## rank change
rank_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=rank_change, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## gains change
gains_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=gains, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## losses change
losses_plot <- ggplot(change_cumsum, aes(x=treatment_year2, y=losses, group=treatment)) +
geom_point(aes(col=control),pch=1) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
png(paste0("figures\\absrich cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(absrich_plot)
dev.off()
png(paste0("figures\\rich cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(rich_plot)
dev.off()
png(paste0("figures\\evenness cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(even_plot)
dev.off()
png(paste0("figures\\rank cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(rank_plot)
dev.off()
png(paste0("figures\\gains cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(gains_plot)
dev.off()
png(paste0("figures\\losses cumsum plot_", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(losses_plot)
dev.off()
### plotting just trendlines
## absolute value of richness change
absrich_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=richness_change_abs, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## richness change
rich_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=richness_change, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## evenness change
even_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=evenness_change_abs, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## rank change
rank_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=rank_change, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## gains change
gains_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=gains, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
## losses change
losses_plot2 <- ggplot(change_cumsum, aes(x=treatment_year2, y=losses, group=treatment)) +
geom_smooth(aes(col=control)) +
facet_wrap(~site_project_comm, scales="free") +
theme_few() +
theme(legend.position="none")
png(paste0("figures\\absrich cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(absrich_plot2)
dev.off()
png(paste0("figures\\rich cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(rich_plot2)
dev.off()
png(paste0("figures\\evenness cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(even_plot2)
dev.off()
png(paste0("figures\\rank cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(rank_plot2)
dev.off()
png(paste0("figures\\gains cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(gains_plot2)
dev.off()
png(paste0("figures\\losses cumsum plot_trendlines only_perm", Sys.Date(),".png"), width=11, height=8, units="in", res=600)
print(losses_plot2)
dev.off()
|
or_table <- corona_mental %>% filter(Indicator == "Symptoms of Anxiety Disorder or Depressive Disorder") %>% select(-Indicator) %>% filter(Group != "By State")
# Depression or Anxiety
or_nation_table <- or_table %>% filter(Group == "National Estimate") %>% select(-Group, -Subgroup, -Low.CI, -High.CI)# Dividing the or table by all of the relevant indicators
or_age_table <- or_table %>% filter(Group == "By Age") %>% select(-Group, -Low.CI, -High.CI)
or_sex_table <- or_table %>% filter(Group == "By Gender") %>% select(-Group, -Low.CI, -High.CI)
or_race_table <- or_table %>% filter(Group == "By Race/Hispanic ethnicity") %>% select(-Group, -Low.CI, -High.CI)
or_ed_table <- or_table %>% filter(Group == "By Education") %>% select(-Group, -Low.CI, -High.CI)
shapiro.test(or_age_table$Value)
shapiro.test(or_sex_table$Value)
shapiro.test(or_race_table$Value)
shapiro.test(or_ed_table$Value)
or_age_table2 <- or_age_table %>% spread(key = Subgroup, value = Value) %>% select(-Week) #Mutated dataset for multivariate analysis
or_sex_table2 <- or_sex_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
or_race_table2 <- or_race_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
or_ed_table2 <- or_ed_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
#or_sta_table2 <- or_sta_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
shapiro.test(or_age_table2$`18 - 29 years`)
shapiro.test(or_age_table2$`30 - 39 years`)
shapiro.test(or_age_table2$`40 - 49 years`)
shapiro.test(or_age_table2$`50 - 59 years`)
shapiro.test(or_age_table2$`60 - 69 years`)
shapiro.test(or_age_table2$`70 - 79 years`)
shapiro.test(or_age_table2$`80 years and above`)
shapiro.test(or_sex_table2$Female)
shapiro.test(or_sex_table2$Male)
shapiro.test(or_race_table2$`Hispanic or Latino`)
shapiro.test(or_race_table2$`Non-Hispanic Asian, single race`)
shapiro.test(or_race_table2$`Non-Hispanic black, single race`)
shapiro.test(or_race_table2$`Non-Hispanic white, single race`)
shapiro.test(or_race_table2$`Non-Hispanic, other races and multiple races`)
shapiro.test(or_ed_table2$`Bachelor's degree or higher`)
shapiro.test(or_ed_table2$`High school diploma or GED`)
shapiro.test(or_ed_table2$`Less than a high school diploma`)
shapiro.test(or_ed_table2$`Some college/Associate's degree`)
describe(or_nation_table$Value)#describe the combined dataset
describe(or_age_table$Value)
describe(or_sex_table$Value)
describe(or_race_table$Value)
describe(or_ed_table$Value)
or_age_box <- ggplot(or_age_table, aes(Subgroup, Value)) + geom_boxplot() + labs(title = "Both Depression and Anxiety By Age", x = "Age Groups", y = "Population Percents")#both boxplots
or_sex_box <- ggplot(or_sex_table, aes(Subgroup, Value)) + geom_boxplot() + labs(title = "Both Depression and Anxiety By Sex", x = "Sex Groups", y = "Population Percents")
or_race_box <- ggplot(or_race_table, aes(Subgroup, Value)) + geom_boxplot() + labs(title = "Both Depression and Anxiety By Race", x = "Race Groups", y = "Population Percents")
or_ed_box <- ggplot(or_ed_table, aes(Subgroup, Value)) + geom_boxplot() + labs(title = "Both Depression and Anxiety By Education", x = "Education Groups", y = "Population Percents")
ggarrange(or_age_box, or_sex_box, or_race_box, or_ed_box, ncol = 2, nrow = 2)
ggplot(or_age_table, aes(Value)) + geom_histogram(aes(color=Subgroup)) + labs(title = "Distributions of Percentages in Age", x = "Percentages", y = "Percentage Count")#Both histograms
ggplot(or_sex_table, aes(Value)) + geom_histogram(aes(color=Subgroup)) + labs(title = "Distributions of Percentages in Sex", x = "Percentages", y = "Percentage Count")
ggplot(or_race_table, aes(Value)) + geom_histogram(aes(color=Subgroup)) + labs(title = "Distributions of Percentages in Race", x = "Percentages", y = "Percentage Count")
ggplot(or_ed_table, aes(Value)) + geom_histogram(aes(color=Subgroup)) + labs(title = "Distributions of Percentages in Education", x = "Percentages", y = "Percentage Count")
or_age_scatter <- ggplot(or_age_table, aes(Week, Value)) + geom_point(aes(color = Subgroup)) + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time By Age", x = "Week", y = "Percentage") #Both Scatter
or_sex_scatter <- ggplot(or_sex_table, aes(Week, Value)) + geom_point(aes(color = Subgroup)) + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time By Sex", x = "Week", y = "Percentage")
or_race_scatter <- ggplot(or_race_table, aes(Week, Value)) + geom_point(aes(color = Subgroup)) + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time By Race", x = "Week", y = "Percentage")
or_ed_scatter <- ggplot(or_ed_table, aes(Week, Value)) + geom_point(aes(color = Subgroup)) + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time By Education", x = "Week", y = "Percentage")
#ggplot(or_nation_table, aes(Week, Value)) + geom_point() + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time Nationally", x = "Week", y = "Percentage")
ggarrange(or_age_scatter, or_sex_scatter, or_race_scatter, or_ed_scatter, labels = c("Age", "Sex", "Race", "Education"), ncol = 2, nrow = 2)
or_nat_model <- glm(Value ~ Week, data = or_nation_table)
summary(or_nat_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_nat_model)
or_nat_model <- lm(Value ~ Week, data = or_nation_table)
summary(or_nat_model)
confint(or_nat_model, level = 0.95)
or_age_model <- glm(Value ~ Week + Subgroup, data = or_age_table)
summary(or_age_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_age_model)
or_age_model <- lm(Value ~ Week + Subgroup, data = or_age_table)
summary(or_age_model)
confint(or_age_model, level = 0.95)
or_sex_model <- lm(Value ~ Week + Subgroup, data = or_sex_table)
summary(or_sex_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_sex_model)
or_sex_model <- lm(Value ~ Week + Subgroup, data = or_sex_table)
summary(or_sex_model)
confint(or_sex_model, level = 0.95)
or_race_model <- glm(Value ~ Week + Subgroup, data = or_race_table)
summary(or_race_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_race_model)
or_race_model <- lm(Value ~ Week + Subgroup, data = or_race_table)
summary(or_race_model)
confint(or_race_model, level = 0.95)
or_ed_model <- glm(Value ~ Week + Subgroup, data = or_ed_table)
summary(or_ed_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_ed_model)
or_ed_model <- lm(Value ~ Week + Subgroup, data = or_ed_table)
summary(or_ed_model)
confint(or_ed_model, level = 0.95)
lrtest(or_nat_model, or_age_model)
lrtest(or_nat_model, or_sex_model)
lrtest(or_nat_model, or_race_model)
lrtest(or_nat_model, or_ed_model)
or_age_table2 <- or_age_table %>% spread(key = Subgroup, value = Value) %>% select(-Week) #Mutated dataset for multivariate analysis
or_sex_table2 <- or_sex_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
or_race_table2 <- or_race_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
or_ed_table2 <- or_ed_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
describe(or_age_table2)#describe the combined dataset
describe(or_sex_table2)
describe(or_race_table2)
describe(or_ed_table2)
ols_test_bartlett(or_age_table2, '18 - 29 years', '30 - 39 years', '40 - 49 years', '50 - 59 years', '60 - 69 years', '70 - 79 years', '80 years and above')#heteroskedaciticity across combined variables
ols_test_bartlett(or_sex_table2, 'Male', 'Female')
ols_test_bartlett(or_race_table2, 'Hispanic or Latino', 'Non-Hispanic white, single race', 'Non-Hispanic black, single race', 'Non-Hispanic Asian, single race', 'Non-Hispanic, other races and multiple races')
ols_test_bartlett(or_ed_table2, 'Less than a high school diploma', 'High school diploma or GED', 'Some college/Associate\'s degree', 'Bachelor\'s degree or higher')
T2.test(or_age_table2)# Hotelling's T2 test
T2.test(or_sex_table2)
T2.test(or_race_table2)
T2.test(or_ed_table2)
cor(or_age_table2)
cor(or_sex_table2)
cor(or_race_table2)
cor(or_ed_table2)
cov(or_age_table2)
cov(or_sex_table2)
cov(or_race_table2)
cov(or_ed_table2)
#eigen(or_age_table2)
chisq.test(or_age_table2)
chisq.test(or_sex_table2)
chisq.test(or_race_table2)
chisq.test(or_ed_table2)
#confint(or_age_table2)
anova(or_age_model)
aov(Value ~ Week + Subgroup, data = or_age_table)
summary(aov(Value ~ Week + Subgroup, data = or_age_table))
kruskal.test(anova(or_age_model))
anova(or_sex_model)
aov(Value ~ Week + Subgroup, data = or_sex_table)
summary(aov(Value ~ Week + Subgroup, data = or_sex_table))
kruskal.test(anova(or_sex_model))
anova(or_race_model)
aov(Value ~ Week + Subgroup, data = or_race_table)
summary(aov(Value ~ Week + Subgroup, data = or_race_table))
kruskal.test(anova(or_race_model))
anova(or_ed_model)
aov(Value ~ Week + Subgroup, data = or_ed_table)
summary(aov(Value ~ Week + Subgroup, data = or_ed_table))
kruskal.test(anova(or_ed_model))
#testCov(or_age_table2$`18 - 29 years`, or_age_table2$`30 - 39 years`, method = "ALL", J = 100)
#or_age_table2a <- or_age_table %>% spread(key = Subgroup, value = Value)# %>% select(-Week) #Mutated dataset for multivariate analysis
#or_sex_table2a <- or_sex_table %>% spread(key = Subgroup, value = Value)# %>% select(-Week)
#or_race_table2a <- or_race_table %>% spread(key = Subgroup, value = Value)# %>% select(-Week)
#or_ed_table2a <- or_ed_table %>% spread(key = Subgroup, value = Value)# %>% select(-Week)
#dfa <- lda(Subgroup ~ Week + Value, data = or_age_table)
or_age_cor <- cor(or_age_table2) #correlation matricies
or_sex_cor <- cor(or_sex_table2)
or_race_cor <- cor(or_race_table2)
or_ed_cor <- cor(or_ed_table2)
pca <- princomp(or_age_table2, cor = TRUE)# Principal Components Analysis
summary(pca)
pca1 <- pca$scores[,1]
pca2 <- pca$scores[,2]
or_age_cor2 <- cbind(pca1, pca2, or_age_cor)
or_age_cor2 <- round(cor(or_age_cor2), digits=3)
or_age_cor2
pca <- princomp(or_sex_table2, cor = TRUE)# Principal Components Analysis
summary(pca)
pca1 <- pca$scores[,1]
#pca2 <- pca$scores[,2]
or_sex_cor2 <- cbind(pca1, or_sex_cor)
or_sex_cor2 <- round(cor(or_sex_cor2), digits=3)
or_sex_cor2
pca <- princomp(or_race_table2, cor = TRUE)# Principal Components Analysis
summary(pca)
pca1 <- pca$scores[,1]
pca2 <- pca$scores[,2]
or_race_cor2 <- cbind(pca1, pca2, or_race_cor)
or_race_cor2 <- round(cor(or_race_cor2), digits=3)
or_race_cor2
pca <- princomp(or_ed_table2, cor = TRUE)# Principal Components Analysis
summary(pca)
pca1 <- pca$scores[,1]
pca2 <- pca$scores[,2]
or_ed_cor2 <- cbind(pca1, pca2, or_ed_cor)
or_ed_cor2 <- round(cor(or_ed_cor2), digits=3)
or_ed_cor2
#plot(pca1, pca2, type="n", main="Figure 5. Principal components plot, Bumpus data.", cex.axis=1.5, cex.lab=1.5)
#text(pca1, pca2, labels=survive)
#Cluster Analysis
or_age_table3 <- or_age_table2
#or_age_table3$Value <- or_age_table$Value %>% scale()
# Determine number of clusters
clun <- (nrow(or_age_table3) - 1) * sum(apply(or_age_table3, 2, var))
for (i in 2:8)
{
clun[i] <- sum(kmeans(or_age_table3, centers = i)$withinss)
}
plot(1:8, clun, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares")
# K-Means Cluster Analysis
fit1 <- kmeans(or_age_table3, 3) # 4 cluster solution
# get cluster means
aggregate(or_age_table3, by = list(fit1$cluster), FUN = mean)
# append cluster assignment
# K-Means Clustering with 5 clusters
fit2 <- kmeans(or_age_table3, 5)
aggregate(or_age_table3, by = list(fit2$cluster), FUN = mean)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
clusplot(or_age_table3, fit1$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
clusplot(or_age_table3, fit2$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
#Cluster Analysis
or_sex_table3 <- or_sex_table2
#or_sex_table3$Value <- or_sex_table$Value %>% scale()
# Determine number of clusters
clun <- (nrow(or_sex_table3) - 1) * sum(apply(or_sex_table3, 2, var))
for (i in 2:8)
{
clun[i] <- sum(kmeans(or_sex_table3, centers = i)$withinss)
}
plot(1:8, clun, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares")
# K-Means Cluster Analysis
fit1 <- kmeans(or_sex_table3, 3) # 4 cluster solution
# get cluster means
aggregate(or_sex_table3, by = list(fit1$cluster), FUN = mean)
# append cluster assignment
# K-Means Clustering with 5 clusters
fit2 <- kmeans(or_sex_table3, 5)
aggregate(or_sex_table3, by = list(fit2$cluster), FUN = mean)
#Cluster Analysis
or_race_table3 <- or_race_table2
#or_race_table3$Value <- or_race_table$Value %>% scale()
# Determine number of clusters
clun <- (nrow(or_race_table3) - 1) * sum(apply(or_race_table3, 2, var))
for (i in 2:8)
{
clun[i] <- sum(kmeans(or_race_table3, centers = i)$withinss)
}
plot(1:8, clun, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares")
# K-Means Cluster Analysis
fit1 <- kmeans(or_race_table3, 3) # 4 cluster solution
# get cluster means
aggregate(or_race_table3, by = list(fit1$cluster), FUN = mean)
# append cluster assignment
# K-Means Clustering with 5 clusters
fit2 <- kmeans(or_race_table3, 5)
aggregate(or_race_table3, by = list(fit2$cluster), FUN = mean)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
clusplot(or_race_table3, fit1$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
clusplot(or_race_table3, fit2$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
#Cluster Analysis
or_ed_table3 <- or_ed_table2
#or_ed_table3$Value <- or_ed_table$Value %>% scale()
# Determine number of clusters
clun <- (nrow(or_ed_table3) - 1) * sum(apply(or_ed_table3, 2, var))
for (i in 2:8)
{
clun[i] <- sum(kmeans(or_ed_table3, centers = i)$withinss)
}
plot(1:8, clun, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares")
# K-Means Cluster Analysis
fit1 <- kmeans(or_ed_table3, 3) # 4 cluster solution
# get cluster means
aggregate(or_ed_table3, by = list(fit1$cluster), FUN = mean)
# append cluster assignment
# K-Means Clustering with 5 clusters
fit2 <- kmeans(or_ed_table3, 5)
aggregate(or_ed_table3, by = list(fit2$cluster), FUN = mean)
ggarrange(age_clus1, age_clus2, ncol = 2, nrow = 1)
# Centroid Plot against 1st 2 discriminant functions
plotcluster(or_age_table3, fit1$cluster)
text(x=food$Red, y=food$White, labels=food$Country,col=grpMeat$cluster+1)
plotcluster(or_age_table3, fit2$cluster)
text(x=food$Red, y=food$White, labels=food$Country,col=grpMeat$cluster+1)
| /project_code_or.R | no_license | ericcartaya/CovidDepressionMultivariateStatEC2020 | R | false | false | 14,763 | r | or_table <- corona_mental %>% filter(Indicator == "Symptoms of Anxiety Disorder or Depressive Disorder") %>% select(-Indicator) %>% filter(Group != "By State")
# Depression or Anxiety
or_nation_table <- or_table %>% filter(Group == "National Estimate") %>% select(-Group, -Subgroup, -Low.CI, -High.CI)# Dividing the or table by all of the relevant indicators
or_age_table <- or_table %>% filter(Group == "By Age") %>% select(-Group, -Low.CI, -High.CI)
or_sex_table <- or_table %>% filter(Group == "By Gender") %>% select(-Group, -Low.CI, -High.CI)
or_race_table <- or_table %>% filter(Group == "By Race/Hispanic ethnicity") %>% select(-Group, -Low.CI, -High.CI)
or_ed_table <- or_table %>% filter(Group == "By Education") %>% select(-Group, -Low.CI, -High.CI)
shapiro.test(or_age_table$Value)
shapiro.test(or_sex_table$Value)
shapiro.test(or_race_table$Value)
shapiro.test(or_ed_table$Value)
or_age_table2 <- or_age_table %>% spread(key = Subgroup, value = Value) %>% select(-Week) #Mutated dataset for multivariate analysis
or_sex_table2 <- or_sex_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
or_race_table2 <- or_race_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
or_ed_table2 <- or_ed_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
#or_sta_table2 <- or_sta_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
shapiro.test(or_age_table2$`18 - 29 years`)
shapiro.test(or_age_table2$`30 - 39 years`)
shapiro.test(or_age_table2$`40 - 49 years`)
shapiro.test(or_age_table2$`50 - 59 years`)
shapiro.test(or_age_table2$`60 - 69 years`)
shapiro.test(or_age_table2$`70 - 79 years`)
shapiro.test(or_age_table2$`80 years and above`)
shapiro.test(or_sex_table2$Female)
shapiro.test(or_sex_table2$Male)
shapiro.test(or_race_table2$`Hispanic or Latino`)
shapiro.test(or_race_table2$`Non-Hispanic Asian, single race`)
shapiro.test(or_race_table2$`Non-Hispanic black, single race`)
shapiro.test(or_race_table2$`Non-Hispanic white, single race`)
shapiro.test(or_race_table2$`Non-Hispanic, other races and multiple races`)
shapiro.test(or_ed_table2$`Bachelor's degree or higher`)
shapiro.test(or_ed_table2$`High school diploma or GED`)
shapiro.test(or_ed_table2$`Less than a high school diploma`)
shapiro.test(or_ed_table2$`Some college/Associate's degree`)
describe(or_nation_table$Value)#describe the combined dataset
describe(or_age_table$Value)
describe(or_sex_table$Value)
describe(or_race_table$Value)
describe(or_ed_table$Value)
or_age_box <- ggplot(or_age_table, aes(Subgroup, Value)) + geom_boxplot() + labs(title = "Both Depression and Anxiety By Age", x = "Age Groups", y = "Population Percents")#both boxplots
or_sex_box <- ggplot(or_sex_table, aes(Subgroup, Value)) + geom_boxplot() + labs(title = "Both Depression and Anxiety By Sex", x = "Sex Groups", y = "Population Percents")
or_race_box <- ggplot(or_race_table, aes(Subgroup, Value)) + geom_boxplot() + labs(title = "Both Depression and Anxiety By Race", x = "Race Groups", y = "Population Percents")
or_ed_box <- ggplot(or_ed_table, aes(Subgroup, Value)) + geom_boxplot() + labs(title = "Both Depression and Anxiety By Education", x = "Education Groups", y = "Population Percents")
ggarrange(or_age_box, or_sex_box, or_race_box, or_ed_box, ncol = 2, nrow = 2)
ggplot(or_age_table, aes(Value)) + geom_histogram(aes(color=Subgroup)) + labs(title = "Distributions of Percentages in Age", x = "Percentages", y = "Percentage Count")#Both histograms
ggplot(or_sex_table, aes(Value)) + geom_histogram(aes(color=Subgroup)) + labs(title = "Distributions of Percentages in Sex", x = "Percentages", y = "Percentage Count")
ggplot(or_race_table, aes(Value)) + geom_histogram(aes(color=Subgroup)) + labs(title = "Distributions of Percentages in Race", x = "Percentages", y = "Percentage Count")
ggplot(or_ed_table, aes(Value)) + geom_histogram(aes(color=Subgroup)) + labs(title = "Distributions of Percentages in Education", x = "Percentages", y = "Percentage Count")
or_age_scatter <- ggplot(or_age_table, aes(Week, Value)) + geom_point(aes(color = Subgroup)) + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time By Age", x = "Week", y = "Percentage") #Both Scatter
or_sex_scatter <- ggplot(or_sex_table, aes(Week, Value)) + geom_point(aes(color = Subgroup)) + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time By Sex", x = "Week", y = "Percentage")
or_race_scatter <- ggplot(or_race_table, aes(Week, Value)) + geom_point(aes(color = Subgroup)) + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time By Race", x = "Week", y = "Percentage")
or_ed_scatter <- ggplot(or_ed_table, aes(Week, Value)) + geom_point(aes(color = Subgroup)) + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time By Education", x = "Week", y = "Percentage")
#ggplot(or_nation_table, aes(Week, Value)) + geom_point() + labs(title = "Progression of Both Depression and Anxiety Percentage Over Time Nationally", x = "Week", y = "Percentage")
ggarrange(or_age_scatter, or_sex_scatter, or_race_scatter, or_ed_scatter, labels = c("Age", "Sex", "Race", "Education"), ncol = 2, nrow = 2)
or_nat_model <- glm(Value ~ Week, data = or_nation_table)
summary(or_nat_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_nat_model)
or_nat_model <- lm(Value ~ Week, data = or_nation_table)
summary(or_nat_model)
confint(or_nat_model, level = 0.95)
or_age_model <- glm(Value ~ Week + Subgroup, data = or_age_table)
summary(or_age_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_age_model)
or_age_model <- lm(Value ~ Week + Subgroup, data = or_age_table)
summary(or_age_model)
confint(or_age_model, level = 0.95)
or_sex_model <- lm(Value ~ Week + Subgroup, data = or_sex_table)
summary(or_sex_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_sex_model)
or_sex_model <- lm(Value ~ Week + Subgroup, data = or_sex_table)
summary(or_sex_model)
confint(or_sex_model, level = 0.95)
or_race_model <- glm(Value ~ Week + Subgroup, data = or_race_table)
summary(or_race_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_race_model)
or_race_model <- lm(Value ~ Week + Subgroup, data = or_race_table)
summary(or_race_model)
confint(or_race_model, level = 0.95)
or_ed_model <- glm(Value ~ Week + Subgroup, data = or_ed_table)
summary(or_ed_model)
layout(matrix(c(1,2,3,4),2,2))
plot(or_ed_model)
or_ed_model <- lm(Value ~ Week + Subgroup, data = or_ed_table)
summary(or_ed_model)
confint(or_ed_model, level = 0.95)
lrtest(or_nat_model, or_age_model)
lrtest(or_nat_model, or_sex_model)
lrtest(or_nat_model, or_race_model)
lrtest(or_nat_model, or_ed_model)
or_age_table2 <- or_age_table %>% spread(key = Subgroup, value = Value) %>% select(-Week) #Mutated dataset for multivariate analysis
or_sex_table2 <- or_sex_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
or_race_table2 <- or_race_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
or_ed_table2 <- or_ed_table %>% spread(key = Subgroup, value = Value) %>% select(-Week)
describe(or_age_table2)#describe the combined dataset
describe(or_sex_table2)
describe(or_race_table2)
describe(or_ed_table2)
ols_test_bartlett(or_age_table2, '18 - 29 years', '30 - 39 years', '40 - 49 years', '50 - 59 years', '60 - 69 years', '70 - 79 years', '80 years and above')#heteroskedaciticity across combined variables
ols_test_bartlett(or_sex_table2, 'Male', 'Female')
ols_test_bartlett(or_race_table2, 'Hispanic or Latino', 'Non-Hispanic white, single race', 'Non-Hispanic black, single race', 'Non-Hispanic Asian, single race', 'Non-Hispanic, other races and multiple races')
ols_test_bartlett(or_ed_table2, 'Less than a high school diploma', 'High school diploma or GED', 'Some college/Associate\'s degree', 'Bachelor\'s degree or higher')
T2.test(or_age_table2)# Hotelling's T2 test
T2.test(or_sex_table2)
T2.test(or_race_table2)
T2.test(or_ed_table2)
cor(or_age_table2)
cor(or_sex_table2)
cor(or_race_table2)
cor(or_ed_table2)
cov(or_age_table2)
cov(or_sex_table2)
cov(or_race_table2)
cov(or_ed_table2)
#eigen(or_age_table2)
chisq.test(or_age_table2)
chisq.test(or_sex_table2)
chisq.test(or_race_table2)
chisq.test(or_ed_table2)
#confint(or_age_table2)
anova(or_age_model)
aov(Value ~ Week + Subgroup, data = or_age_table)
summary(aov(Value ~ Week + Subgroup, data = or_age_table))
kruskal.test(anova(or_age_model))
anova(or_sex_model)
aov(Value ~ Week + Subgroup, data = or_sex_table)
summary(aov(Value ~ Week + Subgroup, data = or_sex_table))
kruskal.test(anova(or_sex_model))
anova(or_race_model)
aov(Value ~ Week + Subgroup, data = or_race_table)
summary(aov(Value ~ Week + Subgroup, data = or_race_table))
kruskal.test(anova(or_race_model))
anova(or_ed_model)
aov(Value ~ Week + Subgroup, data = or_ed_table)
summary(aov(Value ~ Week + Subgroup, data = or_ed_table))
kruskal.test(anova(or_ed_model))
#testCov(or_age_table2$`18 - 29 years`, or_age_table2$`30 - 39 years`, method = "ALL", J = 100)
#or_age_table2a <- or_age_table %>% spread(key = Subgroup, value = Value)# %>% select(-Week) #Mutated dataset for multivariate analysis
#or_sex_table2a <- or_sex_table %>% spread(key = Subgroup, value = Value)# %>% select(-Week)
#or_race_table2a <- or_race_table %>% spread(key = Subgroup, value = Value)# %>% select(-Week)
#or_ed_table2a <- or_ed_table %>% spread(key = Subgroup, value = Value)# %>% select(-Week)
#dfa <- lda(Subgroup ~ Week + Value, data = or_age_table)
or_age_cor <- cor(or_age_table2) #correlation matricies
or_sex_cor <- cor(or_sex_table2)
or_race_cor <- cor(or_race_table2)
or_ed_cor <- cor(or_ed_table2)
pca <- princomp(or_age_table2, cor = TRUE)# Principal Components Analysis
summary(pca)
pca1 <- pca$scores[,1]
pca2 <- pca$scores[,2]
or_age_cor2 <- cbind(pca1, pca2, or_age_cor)
or_age_cor2 <- round(cor(or_age_cor2), digits=3)
or_age_cor2
pca <- princomp(or_sex_table2, cor = TRUE)# Principal Components Analysis
summary(pca)
pca1 <- pca$scores[,1]
#pca2 <- pca$scores[,2]
or_sex_cor2 <- cbind(pca1, or_sex_cor)
or_sex_cor2 <- round(cor(or_sex_cor2), digits=3)
or_sex_cor2
pca <- princomp(or_race_table2, cor = TRUE)# Principal Components Analysis
summary(pca)
pca1 <- pca$scores[,1]
pca2 <- pca$scores[,2]
or_race_cor2 <- cbind(pca1, pca2, or_race_cor)
or_race_cor2 <- round(cor(or_race_cor2), digits=3)
or_race_cor2
pca <- princomp(or_ed_table2, cor = TRUE)# Principal Components Analysis
summary(pca)
pca1 <- pca$scores[,1]
pca2 <- pca$scores[,2]
or_ed_cor2 <- cbind(pca1, pca2, or_ed_cor)
or_ed_cor2 <- round(cor(or_ed_cor2), digits=3)
or_ed_cor2
#plot(pca1, pca2, type="n", main="Figure 5. Principal components plot, Bumpus data.", cex.axis=1.5, cex.lab=1.5)
#text(pca1, pca2, labels=survive)
#Cluster Analysis
or_age_table3 <- or_age_table2
#or_age_table3$Value <- or_age_table$Value %>% scale()
# Determine number of clusters
clun <- (nrow(or_age_table3) - 1) * sum(apply(or_age_table3, 2, var))
for (i in 2:8)
{
clun[i] <- sum(kmeans(or_age_table3, centers = i)$withinss)
}
plot(1:8, clun, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares")
# K-Means Cluster Analysis
fit1 <- kmeans(or_age_table3, 3) # 4 cluster solution
# get cluster means
aggregate(or_age_table3, by = list(fit1$cluster), FUN = mean)
# append cluster assignment
# K-Means Clustering with 5 clusters
fit2 <- kmeans(or_age_table3, 5)
aggregate(or_age_table3, by = list(fit2$cluster), FUN = mean)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
clusplot(or_age_table3, fit1$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
clusplot(or_age_table3, fit2$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
#Cluster Analysis
or_sex_table3 <- or_sex_table2
#or_sex_table3$Value <- or_sex_table$Value %>% scale()
# Determine number of clusters
clun <- (nrow(or_sex_table3) - 1) * sum(apply(or_sex_table3, 2, var))
for (i in 2:8)
{
clun[i] <- sum(kmeans(or_sex_table3, centers = i)$withinss)
}
plot(1:8, clun, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares")
# K-Means Cluster Analysis
fit1 <- kmeans(or_sex_table3, 3) # 4 cluster solution
# get cluster means
aggregate(or_sex_table3, by = list(fit1$cluster), FUN = mean)
# append cluster assignment
# K-Means Clustering with 5 clusters
fit2 <- kmeans(or_sex_table3, 5)
aggregate(or_sex_table3, by = list(fit2$cluster), FUN = mean)
#Cluster Analysis
or_race_table3 <- or_race_table2
#or_race_table3$Value <- or_race_table$Value %>% scale()
# Determine number of clusters
clun <- (nrow(or_race_table3) - 1) * sum(apply(or_race_table3, 2, var))
for (i in 2:8)
{
clun[i] <- sum(kmeans(or_race_table3, centers = i)$withinss)
}
plot(1:8, clun, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares")
# K-Means Cluster Analysis
fit1 <- kmeans(or_race_table3, 3) # 4 cluster solution
# get cluster means
aggregate(or_race_table3, by = list(fit1$cluster), FUN = mean)
# append cluster assignment
# K-Means Clustering with 5 clusters
fit2 <- kmeans(or_race_table3, 5)
aggregate(or_race_table3, by = list(fit2$cluster), FUN = mean)
# Cluster Plot against 1st 2 principal components
# vary parameters for most readable graph
clusplot(or_race_table3, fit1$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
clusplot(or_race_table3, fit2$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
#Cluster Analysis
or_ed_table3 <- or_ed_table2
#or_ed_table3$Value <- or_ed_table$Value %>% scale()
# Determine number of clusters
clun <- (nrow(or_ed_table3) - 1) * sum(apply(or_ed_table3, 2, var))
for (i in 2:8)
{
clun[i] <- sum(kmeans(or_ed_table3, centers = i)$withinss)
}
plot(1:8, clun, type = "b", xlab = "Number of Clusters", ylab = "Within groups sum of squares")
# K-Means Cluster Analysis
fit1 <- kmeans(or_ed_table3, 3) # 4 cluster solution
# get cluster means
aggregate(or_ed_table3, by = list(fit1$cluster), FUN = mean)
# append cluster assignment
# K-Means Clustering with 5 clusters
fit2 <- kmeans(or_ed_table3, 5)
aggregate(or_ed_table3, by = list(fit2$cluster), FUN = mean)
ggarrange(age_clus1, age_clus2, ncol = 2, nrow = 1)
# Centroid Plot against 1st 2 discriminant functions
plotcluster(or_age_table3, fit1$cluster)
text(x=food$Red, y=food$White, labels=food$Country,col=grpMeat$cluster+1)
plotcluster(or_age_table3, fit2$cluster)
text(x=food$Red, y=food$White, labels=food$Country,col=grpMeat$cluster+1)
|
testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = integer(0), item_score = integer(0), person_id = c(0L, 0L))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result) | /dexterMST/inst/testfiles/mutate_booklet_score/libFuzzer_mutate_booklet_score/mutate_booklet_score_valgrind_files/1612727503-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 203 | r | testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = integer(0), item_score = integer(0), person_id = c(0L, 0L))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{bin_kurtosis}
\alias{bin_kurtosis}
\title{bin_kurtosis}
\usage{
bin_kurtosis(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability of success}
}
\value{
With valid input of parameters, return the kurtosis
}
\description{
compute the kurtosis
}
\examples{
bin_kurtosis(trials = 5, prob = 0.5)
}
| /man/bin_kurtosis.Rd | no_license | Jae1654/Custom_Binomial_package | R | false | true | 416 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{bin_kurtosis}
\alias{bin_kurtosis}
\title{bin_kurtosis}
\usage{
bin_kurtosis(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability of success}
}
\value{
With valid input of parameters, return the kurtosis
}
\description{
compute the kurtosis
}
\examples{
bin_kurtosis(trials = 5, prob = 0.5)
}
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.04,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/haematopoietic/haematopoietic_019.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/haematopoietic/haematopoietic_019.R | no_license | leon1003/QSMART | R | false | false | 376 | r | library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.04,family="gaussian",standardize=TRUE)
sink('./Model/EN/ReliefF/haematopoietic/haematopoietic_019.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
setwd("C:/Users/martisl/OneDrive - NIWA/NIWA Research Project/R_files")
rm(list = ls()) #will clear all objects includes hidden objects.
gc() #free up memrory and report the memory usage.
load("results\\chl.stack.list.yearly.Rda")
# library(ncdf4) # package for netcdf manipulation
library(raster) # package for raster manipulation
library(rgdal) # package for geospatial analysis
library(ggplot2) # package for plotting
library(dplyr)
library(stringr)
library(spData)
library(spData)
library(tidyr)
# detach("package:tidyverse", unload = TRUE)
get_xy_vals <- function(data_list,x,y){ #x=c(start=coord,end=coord)
if(length(x)!=1){
x <- seq(from=range(x)[1],to=range(x)[2], by=500)}
if(length(y)!=1){
y <- seq(from=range(y)[1],to=range(y)[2], by=500)}
xy <- data.frame(expand.grid(x=x,y=y))
year <- 2003:2019
expanded <- xy[rep(row.names(xy), 17), ] #fails here
mydf <- data.frame(expanded, year=sort(rep(year,nrow(xy))), chl=NA) #pre-set df
location <- xy #DF
coordinates(location) <- ~ x+ y #Convert to spatial points df format
for(i in 1 :length(data_list)){ #1-17
vals <- raster::extract(data_list[[i]], location) #Extract values
mydf[((i-1)*nrow(xy)+1):(i*nrow(xy)),"chl"] <- vals}
mydf <- mydf
}
dat <- get_xy_vals(data_list=ls_chl_yr,
x=(1088228+(500*143)):(1088228+(500*(143+9))),
y=(6260250-(500*207)):(6260250-(500*(207+9))))
#################OLD CODE#############
# getting x,y values - Inefficient code
get_xy_vals <- function(data_list,x,y){ #x=c(start=coord,end=coord)
if(length(x)!=1){
x <- seq(from=range(x)[1],to=range(x)[2], by=500)}
if(length(y)!=1){
y <- seq(from=range(y)[1],to=range(y)[2], by=500)}
xy <- as.data.frame(expand.grid(x=x,y=y))
points <- seq(1,nrow(xy))
location <- data.frame(points=points,x=xy[1],y=xy[2]) #DF
coordinates(location) <- ~ x+ y #Convert to spatial points df format
vals <- list() #empty list
cbind.vals <- matrix(0, nrow = nrow(xy), ncol = 0) #empty matrix
for(i in 1 :length(data_list)){
vals[[i]] <- raster::extract(data_list[[i]], location) #Extract values
cbind.vals <- cbind(cbind.vals,vals[[i]])} #combine data into matrix
valuesSP <- cbind(location,cbind.vals)
valuesDF <- as.data.frame(valuesSP) #convert to dataframe
# CONVERT df from wide format to long format
data <- tidyr::gather(valuesDF, key=file, value=CHL, CHL200301:CHL201912)
data$time <- substring(data$file,4,9)
data$year <- substring(data$file,4,7)
data$month <- substring(data$file,8,9)
data$season <- if_else(data$month %in% c("09","10","11"), "Spring",
if_else(data$month %in% c("12","01","02"), "Summer",
if_else(data$month %in% c("03","04","05"), "Autumn",
if_else(data$month %in% c("06","07","08"), "Winter", "NA"))))
data$season <- as.factor(data$season)
data <- data[, c("points", "file", "time", "season", "year", "month","x", "y", "CHL")]
}
| /1.3 get_xy.R | no_license | lindamartis/Temp1 | R | false | false | 3,035 | r | setwd("C:/Users/martisl/OneDrive - NIWA/NIWA Research Project/R_files")
rm(list = ls()) #will clear all objects includes hidden objects.
gc() #free up memrory and report the memory usage.
load("results\\chl.stack.list.yearly.Rda")
# library(ncdf4) # package for netcdf manipulation
library(raster) # package for raster manipulation
library(rgdal) # package for geospatial analysis
library(ggplot2) # package for plotting
library(dplyr)
library(stringr)
library(spData)
library(spData)
library(tidyr)
# detach("package:tidyverse", unload = TRUE)
get_xy_vals <- function(data_list,x,y){ #x=c(start=coord,end=coord)
if(length(x)!=1){
x <- seq(from=range(x)[1],to=range(x)[2], by=500)}
if(length(y)!=1){
y <- seq(from=range(y)[1],to=range(y)[2], by=500)}
xy <- data.frame(expand.grid(x=x,y=y))
year <- 2003:2019
expanded <- xy[rep(row.names(xy), 17), ] #fails here
mydf <- data.frame(expanded, year=sort(rep(year,nrow(xy))), chl=NA) #pre-set df
location <- xy #DF
coordinates(location) <- ~ x+ y #Convert to spatial points df format
for(i in 1 :length(data_list)){ #1-17
vals <- raster::extract(data_list[[i]], location) #Extract values
mydf[((i-1)*nrow(xy)+1):(i*nrow(xy)),"chl"] <- vals}
mydf <- mydf
}
dat <- get_xy_vals(data_list=ls_chl_yr,
x=(1088228+(500*143)):(1088228+(500*(143+9))),
y=(6260250-(500*207)):(6260250-(500*(207+9))))
#################OLD CODE#############
# getting x,y values - Inefficient code
get_xy_vals <- function(data_list,x,y){ #x=c(start=coord,end=coord)
if(length(x)!=1){
x <- seq(from=range(x)[1],to=range(x)[2], by=500)}
if(length(y)!=1){
y <- seq(from=range(y)[1],to=range(y)[2], by=500)}
xy <- as.data.frame(expand.grid(x=x,y=y))
points <- seq(1,nrow(xy))
location <- data.frame(points=points,x=xy[1],y=xy[2]) #DF
coordinates(location) <- ~ x+ y #Convert to spatial points df format
vals <- list() #empty list
cbind.vals <- matrix(0, nrow = nrow(xy), ncol = 0) #empty matrix
for(i in 1 :length(data_list)){
vals[[i]] <- raster::extract(data_list[[i]], location) #Extract values
cbind.vals <- cbind(cbind.vals,vals[[i]])} #combine data into matrix
valuesSP <- cbind(location,cbind.vals)
valuesDF <- as.data.frame(valuesSP) #convert to dataframe
# CONVERT df from wide format to long format
data <- tidyr::gather(valuesDF, key=file, value=CHL, CHL200301:CHL201912)
data$time <- substring(data$file,4,9)
data$year <- substring(data$file,4,7)
data$month <- substring(data$file,8,9)
data$season <- if_else(data$month %in% c("09","10","11"), "Spring",
if_else(data$month %in% c("12","01","02"), "Summer",
if_else(data$month %in% c("03","04","05"), "Autumn",
if_else(data$month %in% c("06","07","08"), "Winter", "NA"))))
data$season <- as.factor(data$season)
data <- data[, c("points", "file", "time", "season", "year", "month","x", "y", "CHL")]
}
|
# A brief look at self harm data, downloaded from http://ihmeuw.org/3zn8
# Set up
library(dplyr)
setwd('~/Documents/info-498c/demos/gbd/ylls/')
# Set margin spacing for graphics
par(mar=c(5,4,4,2))
# Load data
self.harm.data <- read.csv('data/prepped/self-harm.csv')
# Deal with string age-groups
self.harm.data <- self.harm.data %>%
filter(Value > 0) %>%
mutate(age.group = substr(Age, 1, 2))
###########################################################################
### Create a plot of the death rate (deaths per 100K) in each age-group ###
###########################################################################
# Subset the data
death.data <- self.harm.data %>% filter(Measure == 'Deaths per 100,000')
# Create chart
png('charts/self-harm-deaths.png', width = 4, height = 4, units = 'in', res=300)
plot(death.data$age.group,
death.data$Value,
xlab = 'Age-Group',
ylab = "Deaths",
title(main ="Self Harm Deaths / 100K in Colombia"))
dev.off()
#######################################################################
### Create a plot of the yll rate (ylls per 100K) in each age-group ###
#######################################################################
yll.data <- self.harm.data %>% filter(Measure != 'Deaths per 100,000')
# Plot YLL data
png('charts/self-harm-ylls.png', width = 4, height = 4, units = 'in', res=300)
plot(yll.data$age.group,
yll.data$Value,
xlab = 'Age-Group',
ylab = "YLLs",
title(main ="Self Harm YLLs / 100K in Colombia"))
dev.off() | /gbd/ylls/analysis-complete.R | permissive | WarrenW159/demos | R | false | false | 1,571 | r | # A brief look at self harm data, downloaded from http://ihmeuw.org/3zn8
# Set up
library(dplyr)
setwd('~/Documents/info-498c/demos/gbd/ylls/')
# Set margin spacing for graphics
par(mar=c(5,4,4,2))
# Load data
self.harm.data <- read.csv('data/prepped/self-harm.csv')
# Deal with string age-groups
self.harm.data <- self.harm.data %>%
filter(Value > 0) %>%
mutate(age.group = substr(Age, 1, 2))
###########################################################################
### Create a plot of the death rate (deaths per 100K) in each age-group ###
###########################################################################
# Subset the data
death.data <- self.harm.data %>% filter(Measure == 'Deaths per 100,000')
# Create chart
png('charts/self-harm-deaths.png', width = 4, height = 4, units = 'in', res=300)
plot(death.data$age.group,
death.data$Value,
xlab = 'Age-Group',
ylab = "Deaths",
title(main ="Self Harm Deaths / 100K in Colombia"))
dev.off()
#######################################################################
### Create a plot of the yll rate (ylls per 100K) in each age-group ###
#######################################################################
yll.data <- self.harm.data %>% filter(Measure != 'Deaths per 100,000')
# Plot YLL data
png('charts/self-harm-ylls.png', width = 4, height = 4, units = 'in', res=300)
plot(yll.data$age.group,
yll.data$Value,
xlab = 'Age-Group',
ylab = "YLLs",
title(main ="Self Harm YLLs / 100K in Colombia"))
dev.off() |
#--------------------------Shannon Index
shannon <-
function(vec, log = 'log2'){
if(length(vec)==0) stop("empty vector") #exception no value in vector
if(sum(vec)==0) stop("no values")
# if(any(vec)==0) stop("NaN")
vec <- vec[ vec != 0 ]
if(log == 'log2'){
pi <- vec/sum(vec)
lpi <- log2(pi)
result <- -(sum(pi*lpi))
} else if(log == 'log10'){
pi <- vec/sum(vec)
lpi <- log10(pi)
result <- -(sum(pi*lpi))
} else if(log == 'ln') {
pi <- vec/sum(vec)
lpi <- log(pi)
result <- -(sum(pi*lpi))
} else stop("wrong log parameter")
return(round(result, digits = 5))
}
shannondat <-
function(vec, log = 'log nat'){
if(length(vec)==0) stop("empty vector") #exception no value in vector
if(sum(vec)==0) stop("no values")
pi <- vec/sum(vec)
if(log == 'log 2'){
lpi <- log2(pi)
} else if(log == 'log 10'){
lpi <- log10(pi)
} else if(log == "log nat") {
lpi <- log(pi)
} else stop("wrong log parameter")
result <- -(pi*lpi)
letcol <- c('A','B','C','D','E','F')
back <- data.table(letcol,
number = as.integer(vec),
round(pi,3),
round(lpi, 3),
round(result, 3))
back <- back[number != 0]
colnames(back) <- c("Letter", "Absolute frequency", "Relative frequency",
"Logarithm of relative frequency", "Minus relative frequency times its logarithm")
return(back)
}
| /Shannon.R | no_license | guitaric/Visualizing-Inequality | R | false | false | 1,683 | r | #--------------------------Shannon Index
shannon <-
function(vec, log = 'log2'){
if(length(vec)==0) stop("empty vector") #exception no value in vector
if(sum(vec)==0) stop("no values")
# if(any(vec)==0) stop("NaN")
vec <- vec[ vec != 0 ]
if(log == 'log2'){
pi <- vec/sum(vec)
lpi <- log2(pi)
result <- -(sum(pi*lpi))
} else if(log == 'log10'){
pi <- vec/sum(vec)
lpi <- log10(pi)
result <- -(sum(pi*lpi))
} else if(log == 'ln') {
pi <- vec/sum(vec)
lpi <- log(pi)
result <- -(sum(pi*lpi))
} else stop("wrong log parameter")
return(round(result, digits = 5))
}
shannondat <-
function(vec, log = 'log nat'){
if(length(vec)==0) stop("empty vector") #exception no value in vector
if(sum(vec)==0) stop("no values")
pi <- vec/sum(vec)
if(log == 'log 2'){
lpi <- log2(pi)
} else if(log == 'log 10'){
lpi <- log10(pi)
} else if(log == "log nat") {
lpi <- log(pi)
} else stop("wrong log parameter")
result <- -(pi*lpi)
letcol <- c('A','B','C','D','E','F')
back <- data.table(letcol,
number = as.integer(vec),
round(pi,3),
round(lpi, 3),
round(result, 3))
back <- back[number != 0]
colnames(back) <- c("Letter", "Absolute frequency", "Relative frequency",
"Logarithm of relative frequency", "Minus relative frequency times its logarithm")
return(back)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_objects.R
\name{UserIm}
\alias{UserIm}
\title{UserIm Object}
\usage{
UserIm(customProtocol = NULL, customType = NULL, im = NULL,
primary = NULL, protocol = NULL, type = NULL)
}
\arguments{
\item{customProtocol}{Custom protocol}
\item{customType}{Custom type}
\item{im}{Instant messenger id}
\item{primary}{If this is user's primary im}
\item{protocol}{Protocol used in the instant messenger}
\item{type}{Each entry can have a type which indicates standard types of that entry}
}
\value{
UserIm object
}
\description{
UserIm Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
JSON template for instant messenger of an user.
}
| /googleadmindirectoryv1.auto/man/UserIm.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 752 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/admin_objects.R
\name{UserIm}
\alias{UserIm}
\title{UserIm Object}
\usage{
UserIm(customProtocol = NULL, customType = NULL, im = NULL,
primary = NULL, protocol = NULL, type = NULL)
}
\arguments{
\item{customProtocol}{Custom protocol}
\item{customType}{Custom type}
\item{im}{Instant messenger id}
\item{primary}{If this is user's primary im}
\item{protocol}{Protocol used in the instant messenger}
\item{type}{Each entry can have a type which indicates standard types of that entry}
}
\value{
UserIm object
}
\description{
UserIm Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
JSON template for instant messenger of an user.
}
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# R codes for : #
#"The vulnerability of whitefish (Coregonus lavaretus) to the invasive European catfish (Silurus glanis) in a large peri-Alpine lake"#
# #
# VAGNON Chloe #
# September 2021 #
# #
# This script is factionned into 2 parts: #
# I) Size-based vulnerability #
# a) Infer body size range for consumers #
# b) Infer the trophic links to obtain the binary matrix and the list of all possible links #
# c) Calculate interaction probabilities for all links #
# d) Plot the boxplot of trophic links probabilities between S. glanis and C. lavaretus #
# #
# II) Predation risk #
# a) Depth-matching #
# b) Metabolism #
# c) Predation risk for 0+ #
# d) Predation risk for Other life stages #
# #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
####################################### I) Size-based Vulnerability ######################################################
# Load packages
library(stringr)
library(ggplot2)
# Load data and functions for using the aNM
load("Param_reginvert.Rdata")
load("Param_regvert.Rdata")
source("cv_Functions_aNM.R")
# Load data referencing body size of each S. glanis and each C. lavaretus
# /!\ Body size in the table must be ordered by decreasing body size
load("DATA_BodySize.Rdata")
#### a) Infer body size range for consumers ######
Niche<-get_niche_attributes(DATA_BodySize$Name,DATA_BodySize$LogT, DATA_BodySize$SubPhylum)
#### b) Infer the trophic links to obtain the binary matrix and the list of all possible links ####
MatTab1<-L_fn2(Niche$name,Niche$n,Niche$c,Niche$low,Niche$high,"YES")
Mat1<-MatTab1[[1]] #Binary matrix
Tab1<-MatTab1[[2]] # Corresponding table of links
#### c) Calculate interaction probabilities for all links ####
MatW2aNM<-Weighting(Niche,Mat1)
Tab1$aNM<-c(MatW2aNM[MatW2aNM!=0])
#Delete S. glanis as prey and C. lavaretus as predator i the list
Tab1<-Tab1[!str_detect(Tab1$Prey,"SIL")&!str_detect(Tab1$Pred,"WHI"),]
Tab1$Tprey<-(10^Tab1$Log_Size_Prey)/10000 # in centimeters
Tab1$Tpred<-(10^Tab1$Log_Size_Pred)/10000
#### d)Plot the boxplot of trophic links probabilities between S. glanis and C. lavaretus ####
ggplot(data=Tab1,aes(x=as.factor(Tab1$Tprey),y=aNM))+
geom_boxplot(size=0.7)+
scale_x_discrete("Whitefish body size (cm)",labels=c(1,seq(5,50,5)))+
scale_y_continuous("Trophic link probability",limits=c(0,1))+
theme(panel.background = element_blank(),panel.grid.major.x = element_line(color = "darkgrey"),
panel.grid.major.y = element_line(color = "darkgrey"),
axis.text = element_text(size=16,color="black"),
axis.title = element_text(size=18,color="black"),
panel.border = element_rect(colour = "black", fill=NA))
####################################### II) Predation risk ################################################################
#### a) Depth-matching ####
# DATA to load for computing the depth-matching
load("Depth_SIL.Rdata") # DATA on SIL depth
load("Depth_C_0.Rdata") # DATA on 0+ C. lavaretus depth
load("Depth_C_Other.Rdata") # DATA on other C. lavaretus life stages depth
# Function to load
source("cv_Functions_Vul_Salmo.R")
# Plot of the modal depth for S. glanis and range depth for 0+ and other whitefish life stages
ggplot()+
geom_errorbar(data=Depth_SIL,aes(x=MonthNum-0.2,y=D_mode*-1,ymin=D_min*-1,ymax=D_max*-1),size=1.9,width=0.25,col="darkblue")+
geom_point(data=Depth_SIL,aes(x=MonthNum-0.2,y=D_mode*-1),size=5,col="darkblue")+
geom_errorbar(data=Depth_C_Other,aes(x=MonthNum+0.2,y=D_mean*-1,ymin=D_min*-1,ymax=D_max*-1),size=1.9,width=0.25,col="#CC9900")+
geom_errorbar(data=Depth_C_0,aes(x=MonthNum,y=D_mean*-1,ymin=D_min*-1,ymax=D_max*-1),size=1.9,width=0.25,col="cyan4")+
scale_y_continuous("Depth (m)",limits = c(-55,0.1),breaks = seq(-55,0,5))+
scale_x_continuous("",limits = c(2.6,10.4),breaks = seq(3,10,1),labels =Depth_SIL$Month)+
theme(panel.background = element_blank(),panel.grid.major.x = element_line(color = "darkgrey"),
panel.grid.major.y = element_line(color = "darkgrey"),
axis.text = element_text(size=18,color="black"),axis.title = element_text(size=20,color="black"),
panel.border = element_rect(colour = "black", fill=NA))
# Compute best parameters of fitting the LogNormal probability density function for each month
MEAN = seq(0.5,log(ceiling(max(Depth_SIL$D_max))),0.01) # INITIATE the mean
SD = seq(0.05,1,0.01)# INITIATE the sd
DepthAll<-seq(0.5,max(Depth_SIL$D_max),0.01) # INITIATE the total depth range to be covered
Param<-Param_Depth_Matching(DepthAll,Depth_SIL$D_mode,MEAN,SD, Month=Depth_SIL$MonthNum)# Compute the best combination of parameters
#OR use the ones already fitted for these data
load("Param.Rdata")
# Plot density curves for each studied month for 0+ whitefish
op <- par(mfrow=c(3,3),mar=c(5,5,2.5,2))
k=0
for(i in as.character(Depth_C_0$MonthNum)){
k=k+1
plot(-Param[["Density"]][[i]]$depth~Param[["Density"]][[i]]$Prob,main=unique(Depth_SIL$Month[k]),
ylab="Depth (m)",xlab="Depth-matching",
ylim=c(-60,0),xlim=c(0,1),type="l",col="darkblue",lwd=2,cex.lab=2.5,cex.main=2.5,cex.axis=2)
abline(h=-Depth_SIL$D_mode[k],col="darkblue",lty=2,lwd=2)
points(y=-seq(Depth_C_0$D_min[k],Depth_C_0$D_max[k],1),
x=Depth_Matching(Param,
DepthPrey=seq(Depth_C_0$D_min[k],Depth_C_0$D_max[k],1),
Month = Depth_C_0$MonthNum[k],
DepthAll=DepthAll)[,3],col="cyan4",pch=19,cex=2)
}
# Plot density curves for each studied month for other whitefish life stages
op <- par(mfrow=c(3,3),mar=c(5,5,2.5,2))
k=0
for(i in as.character(Depth_C_0$MonthNum)){
k=k+1
plot(-Param[["Density"]][[i]]$depth~Param[["Density"]][[i]]$Prob,main=unique(Depth_SIL$Month[k]),
ylab="Depth (m)",xlab="Depth-matching",
ylim=c(-60,0),xlim=c(0,1),type="l",col="darkblue",lwd=2,cex.lab=2.5,cex.main=2.5,cex.axis=2)
abline(h=-Depth_SIL$D_mode[k],col="darkblue",lty=2,lwd=2)
points(y=-seq(Depth_C_Other$D_min[k],Depth_C_Other$D_max[k],1),
x=Depth_Matching(Param,
DepthPrey=seq(Depth_C_Other$D_min[k],Depth_C_Other$D_max[k],1),
Month = Depth_C_0$MonthNum[k],
DepthAll=DepthAll)[,3],col="#CC9900",pch=19,cex=2)
}
#### b) Metabolism ####
# Load temperature data for different months and years
load("TempMonth.Rdata")
# Compute the maximum metabolism
Bmax<-basal_metabolism(bodymass=4500,temperature=max(TempMonth$Temp))
#### c) Compute the predation risk for 0+ ####
D_WHI_All_0<-seq(min(Depth_C_0$D_min), max(Depth_C_0$D_max),1)# For all the monthes in the year
vul_all_depth_0<-Depth_Matching(Param=Param,
DepthPrey=D_WHI_All_0,
Month=3:10,
DepthAll = DepthAll)[,3]
vul_unimod_max_0<-max(vul_all_depth_0)
# Compute Vulnerability
Vulnerability_0<-data.frame(Year=NA,Month=NA,MonthNum=NA,Temp=NA,Depth=NA,vul_unimod=NA)
for (i in 1:nrow(TempMonth)){
# Find metabolism (test with sil= 4.5 Kg)
B<-basal_metabolism(bodymass=4500,temperature=TempMonth$Temp[i])
D_WHI<-seq(Depth_C_0$D_min[Depth_C_0$MonthNum==TempMonth$MonthNum[i]],Depth_C_0$D_max[Depth_C_0$MonthNum==TempMonth$MonthNum[i]],1)
vul_unimod<-(Depth_Matching(Param,D_WHI,TempMonth$MonthNum[i])[,3]/vul_unimod_max_0)*(B/Bmax)
Vul<-data.frame(Year=TempMonth$Year[i],Month=TempMonth$Month[i],MonthNum=TempMonth$MonthNum[i],
Temp=TempMonth$Temp[i],Depth=D_WHI,vul_unimod=vul_unimod)
Vulnerability_0<-rbind(Vulnerability_0,Vul)
}
Vulnerability_0<-na.omit(Vulnerability_0)
# Plot the results
pos <-unique(TempMonth$Month)
ggplot(data=Vulnerability_0,aes(x=Month,y=vul_unimod,fill=Year))+
geom_boxplot(position = "dodge")+
scale_y_continuous("Predation risk",limits=c(0,1),breaks=seq(0,1,0.1))+
scale_x_discrete("",limits=pos)+
theme(panel.background = element_blank(),panel.grid.major.x = element_line(color = "darkgrey"),
panel.grid.major.y = element_line(color = "darkgrey"),
axis.text = element_text(size=18,color="black"),axis.title = element_text(size=20,color="black"),
panel.border = element_rect(colour = "black", fill=NA))
#### d) Compute the predation risk for Other whitefish life stages ####
D_WHI_AllOther<-seq(min(Depth_C_Other$D_min), max(Depth_C_Other$D_max),1)# For all the monthes in the year
vul_all_depthOther<-Depth_Matching(Param=Param,
DepthPrey=D_WHI_AllOther,
Month=3:10,
DepthAll = DepthAll)[,3]
vul_unimod_maxOther<-max(vul_all_depthOther)
# Compute Vulnerability
VulnerabilityOther<-data.frame(Year=NA,Month=NA,MonthNum=NA,Temp=NA,Depth=NA,vul_unimod=NA)
for (i in 1:nrow(TempMonth)){
# Find metabolism (test with sil= 4.5Kg)
B<-basal_metabolism(bodymass=4500,temperature=TempMonth$Temp[i])
D_WHI<-seq(Depth_C_Other$D_min[Depth_C_Other$MonthNum==TempMonth$MonthNum[i]],Depth_C_Other$D_max[Depth_C_Other$MonthNum==TempMonth$MonthNum[i]],1)
vul_unimod<-(Depth_Matching(Param,D_WHI,TempMonth$MonthNum[i])[,3]/vul_unimod_maxOther)*(B/Bmax)
Vul<-data.frame(Year=TempMonth$Year[i],Month=TempMonth$Month[i],MonthNum=TempMonth$MonthNum[i],
Temp=TempMonth$Temp[i],Depth=D_WHI,vul_unimod=vul_unimod)
VulnerabilityOther<-rbind(VulnerabilityOther,Vul)
}
VulnerabilityOther<-na.omit(VulnerabilityOther)
# Plot the results
ggplot(data=VulnerabilityOther,aes(x=Month,y=vul_unimod,fill=Year))+
geom_boxplot(position = "dodge")+
scale_y_continuous("Predation risk",limits=c(0,1),breaks=seq(0,1,0.1))+
scale_x_discrete("",limits=pos)+
theme(panel.background = element_blank(),panel.grid.major.x = element_line(color = "darkgrey"),
panel.grid.major.y = element_line(color = "darkgrey"),
axis.text = element_text(size=18,color="black"),axis.title = element_text(size=20,color="black"),
panel.border = element_rect(colour = "black", fill=NA))
| /cv_Rcodes_Vulnerability_Salmonidae.R | no_license | chloevagnon/Salmonids-Predation_Risk | R | false | false | 12,404 | r | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# R codes for : #
#"The vulnerability of whitefish (Coregonus lavaretus) to the invasive European catfish (Silurus glanis) in a large peri-Alpine lake"#
# #
# VAGNON Chloe #
# September 2021 #
# #
# This script is factionned into 2 parts: #
# I) Size-based vulnerability #
# a) Infer body size range for consumers #
# b) Infer the trophic links to obtain the binary matrix and the list of all possible links #
# c) Calculate interaction probabilities for all links #
# d) Plot the boxplot of trophic links probabilities between S. glanis and C. lavaretus #
# #
# II) Predation risk #
# a) Depth-matching #
# b) Metabolism #
# c) Predation risk for 0+ #
# d) Predation risk for Other life stages #
# #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
####################################### I) Size-based Vulnerability ######################################################
# Load packages
library(stringr)
library(ggplot2)
# Load data and functions for using the aNM
load("Param_reginvert.Rdata")
load("Param_regvert.Rdata")
source("cv_Functions_aNM.R")
# Load data referencing body size of each S. glanis and each C. lavaretus
# /!\ Body size in the table must be ordered by decreasing body size
load("DATA_BodySize.Rdata")
#### a) Infer body size range for consumers ######
Niche<-get_niche_attributes(DATA_BodySize$Name,DATA_BodySize$LogT, DATA_BodySize$SubPhylum)
#### b) Infer the trophic links to obtain the binary matrix and the list of all possible links ####
MatTab1<-L_fn2(Niche$name,Niche$n,Niche$c,Niche$low,Niche$high,"YES")
Mat1<-MatTab1[[1]] #Binary matrix
Tab1<-MatTab1[[2]] # Corresponding table of links
#### c) Calculate interaction probabilities for all links ####
MatW2aNM<-Weighting(Niche,Mat1)
Tab1$aNM<-c(MatW2aNM[MatW2aNM!=0])
#Delete S. glanis as prey and C. lavaretus as predator i the list
Tab1<-Tab1[!str_detect(Tab1$Prey,"SIL")&!str_detect(Tab1$Pred,"WHI"),]
Tab1$Tprey<-(10^Tab1$Log_Size_Prey)/10000 # in centimeters
Tab1$Tpred<-(10^Tab1$Log_Size_Pred)/10000
#### d)Plot the boxplot of trophic links probabilities between S. glanis and C. lavaretus ####
ggplot(data=Tab1,aes(x=as.factor(Tab1$Tprey),y=aNM))+
geom_boxplot(size=0.7)+
scale_x_discrete("Whitefish body size (cm)",labels=c(1,seq(5,50,5)))+
scale_y_continuous("Trophic link probability",limits=c(0,1))+
theme(panel.background = element_blank(),panel.grid.major.x = element_line(color = "darkgrey"),
panel.grid.major.y = element_line(color = "darkgrey"),
axis.text = element_text(size=16,color="black"),
axis.title = element_text(size=18,color="black"),
panel.border = element_rect(colour = "black", fill=NA))
####################################### II) Predation risk ################################################################
#### a) Depth-matching ####
# DATA to load for computing the depth-matching
load("Depth_SIL.Rdata") # DATA on SIL depth
load("Depth_C_0.Rdata") # DATA on 0+ C. lavaretus depth
load("Depth_C_Other.Rdata") # DATA on other C. lavaretus life stages depth
# Function to load
source("cv_Functions_Vul_Salmo.R")
# Plot of the modal depth for S. glanis and range depth for 0+ and other whitefish life stages
ggplot()+
geom_errorbar(data=Depth_SIL,aes(x=MonthNum-0.2,y=D_mode*-1,ymin=D_min*-1,ymax=D_max*-1),size=1.9,width=0.25,col="darkblue")+
geom_point(data=Depth_SIL,aes(x=MonthNum-0.2,y=D_mode*-1),size=5,col="darkblue")+
geom_errorbar(data=Depth_C_Other,aes(x=MonthNum+0.2,y=D_mean*-1,ymin=D_min*-1,ymax=D_max*-1),size=1.9,width=0.25,col="#CC9900")+
geom_errorbar(data=Depth_C_0,aes(x=MonthNum,y=D_mean*-1,ymin=D_min*-1,ymax=D_max*-1),size=1.9,width=0.25,col="cyan4")+
scale_y_continuous("Depth (m)",limits = c(-55,0.1),breaks = seq(-55,0,5))+
scale_x_continuous("",limits = c(2.6,10.4),breaks = seq(3,10,1),labels =Depth_SIL$Month)+
theme(panel.background = element_blank(),panel.grid.major.x = element_line(color = "darkgrey"),
panel.grid.major.y = element_line(color = "darkgrey"),
axis.text = element_text(size=18,color="black"),axis.title = element_text(size=20,color="black"),
panel.border = element_rect(colour = "black", fill=NA))
# Compute best parameters of fitting the LogNormal probability density function for each month
MEAN = seq(0.5,log(ceiling(max(Depth_SIL$D_max))),0.01) # INITIATE the mean
SD = seq(0.05,1,0.01)# INITIATE the sd
DepthAll<-seq(0.5,max(Depth_SIL$D_max),0.01) # INITIATE the total depth range to be covered
Param<-Param_Depth_Matching(DepthAll,Depth_SIL$D_mode,MEAN,SD, Month=Depth_SIL$MonthNum)# Compute the best combination of parameters
#OR use the ones already fitted for these data
load("Param.Rdata")
# Plot density curves for each studied month for 0+ whitefish
op <- par(mfrow=c(3,3),mar=c(5,5,2.5,2))
k=0
for(i in as.character(Depth_C_0$MonthNum)){
k=k+1
plot(-Param[["Density"]][[i]]$depth~Param[["Density"]][[i]]$Prob,main=unique(Depth_SIL$Month[k]),
ylab="Depth (m)",xlab="Depth-matching",
ylim=c(-60,0),xlim=c(0,1),type="l",col="darkblue",lwd=2,cex.lab=2.5,cex.main=2.5,cex.axis=2)
abline(h=-Depth_SIL$D_mode[k],col="darkblue",lty=2,lwd=2)
points(y=-seq(Depth_C_0$D_min[k],Depth_C_0$D_max[k],1),
x=Depth_Matching(Param,
DepthPrey=seq(Depth_C_0$D_min[k],Depth_C_0$D_max[k],1),
Month = Depth_C_0$MonthNum[k],
DepthAll=DepthAll)[,3],col="cyan4",pch=19,cex=2)
}
# Plot density curves for each studied month for other whitefish life stages
op <- par(mfrow=c(3,3),mar=c(5,5,2.5,2))
k=0
for(i in as.character(Depth_C_0$MonthNum)){
k=k+1
plot(-Param[["Density"]][[i]]$depth~Param[["Density"]][[i]]$Prob,main=unique(Depth_SIL$Month[k]),
ylab="Depth (m)",xlab="Depth-matching",
ylim=c(-60,0),xlim=c(0,1),type="l",col="darkblue",lwd=2,cex.lab=2.5,cex.main=2.5,cex.axis=2)
abline(h=-Depth_SIL$D_mode[k],col="darkblue",lty=2,lwd=2)
points(y=-seq(Depth_C_Other$D_min[k],Depth_C_Other$D_max[k],1),
x=Depth_Matching(Param,
DepthPrey=seq(Depth_C_Other$D_min[k],Depth_C_Other$D_max[k],1),
Month = Depth_C_0$MonthNum[k],
DepthAll=DepthAll)[,3],col="#CC9900",pch=19,cex=2)
}
#### b) Metabolism ####
# Load temperature data for different months and years
load("TempMonth.Rdata")
# Compute the maximum metabolism
Bmax<-basal_metabolism(bodymass=4500,temperature=max(TempMonth$Temp))
#### c) Compute the predation risk for 0+ ####
D_WHI_All_0<-seq(min(Depth_C_0$D_min), max(Depth_C_0$D_max),1)# For all the monthes in the year
vul_all_depth_0<-Depth_Matching(Param=Param,
DepthPrey=D_WHI_All_0,
Month=3:10,
DepthAll = DepthAll)[,3]
vul_unimod_max_0<-max(vul_all_depth_0)
# Compute Vulnerability
Vulnerability_0<-data.frame(Year=NA,Month=NA,MonthNum=NA,Temp=NA,Depth=NA,vul_unimod=NA)
for (i in 1:nrow(TempMonth)){
# Find metabolism (test with sil= 4.5 Kg)
B<-basal_metabolism(bodymass=4500,temperature=TempMonth$Temp[i])
D_WHI<-seq(Depth_C_0$D_min[Depth_C_0$MonthNum==TempMonth$MonthNum[i]],Depth_C_0$D_max[Depth_C_0$MonthNum==TempMonth$MonthNum[i]],1)
vul_unimod<-(Depth_Matching(Param,D_WHI,TempMonth$MonthNum[i])[,3]/vul_unimod_max_0)*(B/Bmax)
Vul<-data.frame(Year=TempMonth$Year[i],Month=TempMonth$Month[i],MonthNum=TempMonth$MonthNum[i],
Temp=TempMonth$Temp[i],Depth=D_WHI,vul_unimod=vul_unimod)
Vulnerability_0<-rbind(Vulnerability_0,Vul)
}
Vulnerability_0<-na.omit(Vulnerability_0)
# Plot the results
pos <-unique(TempMonth$Month)
ggplot(data=Vulnerability_0,aes(x=Month,y=vul_unimod,fill=Year))+
geom_boxplot(position = "dodge")+
scale_y_continuous("Predation risk",limits=c(0,1),breaks=seq(0,1,0.1))+
scale_x_discrete("",limits=pos)+
theme(panel.background = element_blank(),panel.grid.major.x = element_line(color = "darkgrey"),
panel.grid.major.y = element_line(color = "darkgrey"),
axis.text = element_text(size=18,color="black"),axis.title = element_text(size=20,color="black"),
panel.border = element_rect(colour = "black", fill=NA))
#### d) Compute the predation risk for Other whitefish life stages ####
D_WHI_AllOther<-seq(min(Depth_C_Other$D_min), max(Depth_C_Other$D_max),1)# For all the monthes in the year
vul_all_depthOther<-Depth_Matching(Param=Param,
DepthPrey=D_WHI_AllOther,
Month=3:10,
DepthAll = DepthAll)[,3]
vul_unimod_maxOther<-max(vul_all_depthOther)
# Compute Vulnerability
VulnerabilityOther<-data.frame(Year=NA,Month=NA,MonthNum=NA,Temp=NA,Depth=NA,vul_unimod=NA)
for (i in 1:nrow(TempMonth)){
# Find metabolism (test with sil= 4.5Kg)
B<-basal_metabolism(bodymass=4500,temperature=TempMonth$Temp[i])
D_WHI<-seq(Depth_C_Other$D_min[Depth_C_Other$MonthNum==TempMonth$MonthNum[i]],Depth_C_Other$D_max[Depth_C_Other$MonthNum==TempMonth$MonthNum[i]],1)
vul_unimod<-(Depth_Matching(Param,D_WHI,TempMonth$MonthNum[i])[,3]/vul_unimod_maxOther)*(B/Bmax)
Vul<-data.frame(Year=TempMonth$Year[i],Month=TempMonth$Month[i],MonthNum=TempMonth$MonthNum[i],
Temp=TempMonth$Temp[i],Depth=D_WHI,vul_unimod=vul_unimod)
VulnerabilityOther<-rbind(VulnerabilityOther,Vul)
}
VulnerabilityOther<-na.omit(VulnerabilityOther)
# Plot the results
ggplot(data=VulnerabilityOther,aes(x=Month,y=vul_unimod,fill=Year))+
geom_boxplot(position = "dodge")+
scale_y_continuous("Predation risk",limits=c(0,1),breaks=seq(0,1,0.1))+
scale_x_discrete("",limits=pos)+
theme(panel.background = element_blank(),panel.grid.major.x = element_line(color = "darkgrey"),
panel.grid.major.y = element_line(color = "darkgrey"),
axis.text = element_text(size=18,color="black"),axis.title = element_text(size=20,color="black"),
panel.border = element_rect(colour = "black", fill=NA))
|
dat = read.csv('word.csv',stringsAsFactors = FALSE)
colnames(dat) <- c('id','words','1star','2star','3star','4star','5star')
prin_dat = dat[,c('1star','2star','3star','4star','5star')]
prin_comp = princomp(scale(prin_dat))
summary(prin_comp)
prin_comp$loadings
# Cumulative plot
cum_prop = c(0.9194589,0.96753132,0.98308960,0.99312478,1.000000000)
x = c(1,2,3,4,5)
plot(x,cum_prop,type = 'b',lwd = 2,cex.axis = 1.5, cex.lab = 1.5,xlab = "Number of components",ylab = "Cumulative Propotion")
PC2 = prin_comp$scores[,2]
PC1 = prin_comp$scores[,1]
pos_csv = cbind(dat$words[which(PC2 < (-0.5))],PC2[which(PC2 < (-0.5))],PC1[which(PC2 < (-0.5))])
neg_csv = cbind(dat$words[which(PC2 > 0.5)],PC2[which(PC2 > 0.5)],PC1[which(PC2 < (-0.5))])
# Save them to do regression
write.csv(pos_csv,'pos.csv')
write.csv(neg_csv,'neg.csv')
# PLOT PC2 distribution
hist(prin_comp$scores[,2],col = c(2,2,2,2,0,0,1,1,1,1,1,1),cex.axis = 1.5, cex.lab = 1.5,cex.main = 1.5,xlab = "Component 2",main = 'Distribution of PC2')
| /code/pca.r | no_license | zgao92/STAT628_Module2_Group5 | R | false | false | 1,004 | r | dat = read.csv('word.csv',stringsAsFactors = FALSE)
colnames(dat) <- c('id','words','1star','2star','3star','4star','5star')
prin_dat = dat[,c('1star','2star','3star','4star','5star')]
prin_comp = princomp(scale(prin_dat))
summary(prin_comp)
prin_comp$loadings
# Cumulative plot
cum_prop = c(0.9194589,0.96753132,0.98308960,0.99312478,1.000000000)
x = c(1,2,3,4,5)
plot(x,cum_prop,type = 'b',lwd = 2,cex.axis = 1.5, cex.lab = 1.5,xlab = "Number of components",ylab = "Cumulative Propotion")
PC2 = prin_comp$scores[,2]
PC1 = prin_comp$scores[,1]
pos_csv = cbind(dat$words[which(PC2 < (-0.5))],PC2[which(PC2 < (-0.5))],PC1[which(PC2 < (-0.5))])
neg_csv = cbind(dat$words[which(PC2 > 0.5)],PC2[which(PC2 > 0.5)],PC1[which(PC2 < (-0.5))])
# Save them to do regression
write.csv(pos_csv,'pos.csv')
write.csv(neg_csv,'neg.csv')
# PLOT PC2 distribution
hist(prin_comp$scores[,2],col = c(2,2,2,2,0,0,1,1,1,1,1,1),cex.axis = 1.5, cex.lab = 1.5,cex.main = 1.5,xlab = "Component 2",main = 'Distribution of PC2')
|
#############################################################################
#############################################################################
###
### AnnDbPkg-maker.R file
###
#############################################################################
#############################################################################
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### The "AnnDbPkgSeed" class.
###
setClass(
"AnnDbPkgSeed",
representation(
Package="character", # e.g. "hgu133a2.db"
Title="character",
Version="character", # e.g. "0.0.99"
License="character",
Author="character",
Maintainer="character",
PkgTemplate="character", # e.g. "HUMANCHIP.DB"
DBschema="character", # e.g. "HUMANCHIP_DB"
AnnObjPrefix="character", # e.g. "hgu133a2"
AnnObjTarget="character", # e.g. "chip hgu133a2"
organism="character", # e.g. "Homo sapiens"
species="character", # e.g. "Human"
manufacturer="character", # e.g. "Affymetrix"
chipName="character", # e.g. "Human Genome U133A 2.0 Array"
manufacturerUrl="character", # e.g. "http://www.affymetrix.com/support/technical/byproduct.affx?product=hgu133-20"
biocViews="character"
),
prototype(
Title=as.character(NA),
License="Artistic-2.0",
Author="Marc Carlson",
Maintainer="Bioconductor Package Maintainer <maintainer@bioconductor.org>",
DBschema=as.character(NA),
AnnObjPrefix=as.character(NA),
AnnObjTarget=as.character(NA),
organism=as.character(NA),
species=as.character(NA),
manufacturer=as.character(NA),
chipName=as.character(NA),
manufacturerUrl=as.character(NA),
biocViews=as.character(NA)
)
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Some helper functions.
###
initWithDbMetada <- function(x, dbfile)
{
metadata2slot <- c(
DBSCHEMA="DBschema",
ORGANISM="organism",
SPECIES="species",
MANUFACTURER="manufacturer",
CHIPNAME="chipName",
MANUFACTURERURL="manufacturerUrl"
)
dbconn <- dbFileConnect(dbfile)
on.exit(dbFileDisconnect(dbconn))
metadata <- dbGetTable(dbconn, "metadata")
if (!identical(colnames(metadata), c("name", "value")))
stop("\"metadata\" table has unexpected col names")
if (any(duplicated(metadata$name))) {
stop("col \"name\" in \"metadata\" table has duplicated values\n",
" (this would never happen if \"name\" was defined as a PRIMARY KEY!)")
}
row.names(metadata) <- metadata$name
for (i in seq_len(length(metadata2slot))) {
metadata_name <- names(metadata2slot)[i]
if (!(metadata_name %in% row.names(metadata))) {
if (metadata_name == "DBSCHEMA")
stop("'DBSCHEMA' not found in \"metadata\" table")
next
}
slot_name <- metadata2slot[i]
val <- metadata[metadata_name, "value"]
if (is.na(slot(x, slot_name))) {
slot(x, slot_name) <- val
next
}
if (slot(x, slot_name) != val)
stop(metadata_name, " specified in '", dbfile, "' (\"", val, "\") ",
"doesn't match 'x@", slot_name, "' (\"", slot(x, slot_name), "\")")
}
if (is.na(x@manufacturerUrl)) {
x@manufacturerUrl <- ""
warning("no manufacturerUrl for package ", x@Package)
}
x
}
initComputedSlots <- function(x)
{
if (is.na(x@AnnObjPrefix))
stop("'AnnObjPrefix' slot must be set for package ", x@Package)
## Automatic default for "AnnObjTarget" slot
if (is.na(x@AnnObjTarget))
x@AnnObjTarget <- paste("chip", x@AnnObjPrefix)
## Automatic default for "Title" slot
if (is.na(x@Title)) {
if (is.na(x@manufacturer) || is.na(x@chipName) || is.na(x@AnnObjTarget)) {
warning("not enough information to set the 'Title' slot for package ", x@Package)
} else {
x@Title <- paste(x@manufacturer,
" ",
x@chipName,
" annotation data (",
x@AnnObjTarget,
")", sep="")
}
}
## Automatic default for "biocViews" slot
if (is.na(x@biocViews)
&& !is.na(x@organism)
&& !is.na(x@manufacturer)) {
chip_view <- paste(x@manufacturer, "Chip", sep="")
org_view <- chartr(" ", "_", x@organism)
x@biocViews <- paste("AnnotationData", chip_view, org_view,
x@AnnObjPrefix, sep=", ")
}
x
}
initWithDbDoc <- function(dbfile)
{
dbconn <- dbFileConnect(dbfile)
on.exit(dbFileDisconnect(dbconn))
if(dbExistsTable(dbconn, "map_metadata")){
map_metadata <- dbGetTable(dbconn, "map_metadata")
return(map_metadata)
} else {
return(NULL)
}
}
getSymbolValuesForManPages <- function(map_names, dbfile)
{
map_metadata <- initWithDbDoc(dbfile)
if(is.null(map_metadata)) return(NULL)
map_source <- sapply(map_names,
function(this_map)
{
map_index <- which(map_metadata$map_name == this_map)
if (length(map_index) > 0) {
this_source <- paste(
map_metadata[map_index, "source_name"],
" \n ",
map_metadata[map_index, "source_url"],
" \n With a date stamp from the source of:",
map_metadata[map_index, "source_date"],
sep=" ", collapse=" and ")
} else {
this_source <- NA
}
this_source
})
map_source <- gsub("_", "\\_", map_source, fixed=TRUE)
names(map_source) <- paste(map_names, "SOURCE", sep="")
map_source
}
removeCommentsFromFile <- function(infile, outfile)
{
if (!is.character(infile) || length(infile) != 1 || is.na(infile))
stop("'infile' must be a character string naming a file")
if (!is.character(outfile) || length(outfile) != 1 || is.na(outfile))
stop("'outfile' must be a character string naming a file")
if (file.exists(outfile))
stop("file '", outfile, "' already exists")
infile <- file(infile, "r")
#on.exit(close(infile))
outfile <- file(outfile, "w")
#on.exit(close(outfile)) # doesn't seem to work
while (TRUE) {
text <- readLines(infile, n=1)
if (length(text) == 0)
break
if (substr(text, 1, 1) != "#")
writeLines(text, outfile)
}
close(outfile)
close(infile)
}
loadAnnDbPkgIndex <- function(file)
{
if (missing(file)) {
file <- system.file("extdata", "GentlemanLab", "ANNDBPKG-INDEX.TXT",
package="AnnotationForge")
} else {
if (!is.character(file) || length(file) != 1 || is.na(file))
stop("'file' must be a character string naming a file")
}
tmp_file <- file.path(tempdir(), paste(basename(file), "tmp", sep="."))
removeCommentsFromFile(file, tmp_file)
index <- read.dcf(tmp_file)
file.remove(tmp_file)
index
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Helpers for filtering out innapropriate manual pages from a template.
###
## This function takes the seed and lists the Mappings
listMappings <- function(x, type){
## get seeds
schema <- x@DBschema ## schema will be like "HUMANCHIP_DB" or "HUMAN_DB"
if(type=="ChipDb"){
orgDbName <- getOrgPkgForSchema(schema)
allSeeds <- NCBICHIP_DB_SeedGenerator(orgDbName)
}else if(type=="OrgDb"){
allSeeds <- AnnotationDbi:::NCBIORG_DB_SeedGenerator()
}
seeds <- filterSeeds(allSeeds, schema, type)
## Then get the names
unlist(lapply(seeds, function(x){return(x$objName)}))
}
## This function will translate the from the actual mappings to the requisite
## man pages. The whole point is to get rid of things like flybase from
## humans etc.
## ALSO problematic: CHRLENGTHS.Rd, GO2ALLEGS.Rd UCSCGENES.Rd
filterManPages <- function(doc_template_names, maps, x){
docs <- sub("\\.Rd$", "", doc_template_names)
docs <- docs[docs %in% maps]
## Add things that will always be needed but are not themselves really bimaps
docs <- c(docs, "_dbconn" ,"BASE","ORGANISM","MAPCOUNTS")
if(!any(c("ECOLI_DB","XENOPUS_DB","ECOLICHIP_DB","XENOPUSCHIP_DB","PIG_DB"
,"PIGCHIP_DB") %in% x@DBschema)){
docs <- c(docs, "CHRLENGTHS")
}
paste(docs, ".Rd", sep="")
}
## And I need a wrapper function to help me filter out things that are not in
## the manList when I call createPackage.
.createAnnotPackage <-function(pkgname,destinationDir,originDir,symbolValues,
manList, unlink=FALSE, quiet=FALSE){
tdir <- file.path("TEMPANNOTPACKAGEDIRFORFILTERING")
dir.create(tdir)
# tdir <- file.path(tempdir()) ## tempdir() causes strange errors... :(
file.copy(from = dir(originDir, full.names = TRUE),
to = tdir,
recursive = TRUE)
## Then unlink unwanted man pages from tdir
manDir <- file.path(tdir, "man")
manFiles <- dir(manDir)
rmFiles <- manFiles[!(manFiles %in% manList)]
rmFiles <- file.path(manDir, rmFiles)
unlink(rmFiles)
## Then call createPackage
createPackage(pkgname=pkgname,
destinationDir=destinationDir,
originDir=tdir,
symbolValues=symbolValues,
unlink=unlink,
quiet=quiet)
## Then remove our terrible temp dir
unlink(tdir, recursive=TRUE)
## Will need to return to tempdir() if we ever want to be able to do more
## than one at a time... :(
}
## TESTING:
## library(AnnotationForge)
## debug(AnnotationForge:::.createAnnotPackage)
## debug(AnnotationForge:::.makeAnnDbPkg) ## this one is always called.
## debug(AnnotationForge:::.makeAnnDbPkgs) ## This one is called 1st for mine
## debug(AnnotationForge:::.makeAnnDbPkgList) ## called for others
## source("~/proj/Rpacks/AnnotationForge/inst/extdata/GentlemanLab/org-batch-script.R")
## source("~/proj/Rpacks/AnnotationForge/inst/extdata/GentlemanLab/chip-batch-script.R")
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### The "makeAnnDbPkg" new generic.
###
setGeneric("makeAnnDbPkg", signature="x",
function(x, dbfile, dest_dir=".", no.man=FALSE, ...)
standardGeneric("makeAnnDbPkg")
)
## helper to extract metadata
.getOrgDepFromMetadata <- function(dbfile){
con <- dbConnect(SQLite(), dbfile)
dbGetQuery(con, "SELECT value FROM metadata WHERE name = 'ORGPKGDEP'")
}
.makeAnnDbPkg <- function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
x <- initWithDbMetada(x, dbfile)
x <- initComputedSlots(x)
dbfile_basename <- basename(dbfile)
if (dbfile_basename != paste(x@AnnObjPrefix, ".sqlite", sep=""))
stop("'", dbfile, "': File name doesn't match 'x@AnnObjPrefix' (", x@AnnObjPrefix, ")")
if (!grepl("^/", x@PkgTemplate)[1]) { ##TODO: this regex seems hacky?
template_path <- system.file("AnnDbPkg-templates",
x@PkgTemplate,
package="AnnotationForge")
} else {
template_path <- x@PkgTemplate
}
ann_dbi_version <- installed.packages()['AnnotationDbi','Version']
## only define 'org_version' if we are making a chipDb package.
## Otherwise it will only cause trouble.
con1 <- dbConnect(dbDriver("SQLite"), dbfile)
type <- dbGetQuery(con1,
"SELECT value FROM metadata WHERE name='Db type'")
if(type=="ChipDb"){
org_version <- installed.packages()['org.Hs.eg.db','Version']
## NOCHIPSCHEMA DBs know who they depend on
if(x@DBschema=="NOCHIPSCHEMA_DB"){
org_pkg <- as.character(.getOrgDepFromMetadata(dbfile))
}else{
org_pkg <- paste0(getOrgPkgForSchema(x@DBschema),".db")
}
}else{
org_version <- "no org version date"
org_pkg <- "no org pkg required"
}
symvals <- list(
DBSCHEMA=x@DBschema,
PKGTITLE=x@Title,
ANNOBJPREFIX=x@AnnObjPrefix,
ANNOBJTARGET=x@AnnObjTarget,
ORGANISM=x@organism,
SPECIES=x@species,
MANUF=x@manufacturer,
CHIPNAME=x@chipName,
MANUFURL=x@manufacturerUrl,
AUTHOR=x@Author,
MAINTAINER=x@Maintainer,
PKGVERSION=x@Version,
LIC=x@License,
BIOCVIEWS=x@biocViews,
DBFILE=dbfile_basename,
ANNDBIVERSION=ann_dbi_version,
ORGVERSION=org_version,
ORGPKGDEP=org_pkg
)
man_dir <- file.path(template_path, "man")
doc_template_names <- list.files(man_dir, "\\.Rd$")
if (file.exists(man_dir)) {
if (!no.man) {
#is_static <- doc_template_names %in% c("_dbconn.Rd", "_dbfile.Rd")
#doc_template_names <- doc_template_names[!is_static]
## Do this only if your schema is an NCBI* one.
if(grepl("NCBI",x@PkgTemplate)){
## extract the map_names from the bimap definitions
map_names <- listMappings(x, type)
## now use this info to filter to relevant mappings
doc_template_names <- filterManPages(doc_template_names,
maps=map_names,x)
}else{## if old school, just use the man pages in template
map_names <- sub("\\.Rd$", "", doc_template_names)
}
if (length(map_names) != 0)
symvals <- c(symvals, getSymbolValuesForManPages(map_names, dbfile))
} else {
doc_template_names <- list()
unlink(man_dir, recursive=TRUE) # delete template
}
}
if (any(duplicated(names(symvals)))) {
str(symvals)
stop("'symvals' contains duplicated symbols (see above)")
}
## Remove NA values
symvals <- symvals[!sapply(symvals, is.na)]
.createAnnotPackage(x@Package,
destinationDir=dest_dir,
originDir=template_path,
symbolValues=symvals,
manList=doc_template_names)
## rename Rd files (prepend the pkg name)
## Here is also where we put the man files into the package (after renaming them)
if (file.exists(man_dir) && !no.man && length(doc_template_names) != 0) {
doc_path <- file.path(dest_dir, x@Package, "man")
from_doc_names <- paste(doc_path, doc_template_names, sep=.Platform$file.sep)
to_doc_names <- paste(x@AnnObjPrefix, doc_template_names, sep="")
to_doc_names <- paste(doc_path, to_doc_names, sep=.Platform$file.sep)
mapply(file.rename, from_doc_names, to_doc_names)
}
dest_db_dir <- file.path(dest_dir, x@Package, "inst", "extdata")
if (!file.exists(dest_db_dir)
&& !dir.create(dest_db_dir, recursive=TRUE))
stop("unable to create dest db dir ", dest_db_dir)
dest_dbfile <- file.path(dest_db_dir, dbfile_basename)
if (!file.copy(dbfile, dest_dbfile))
stop("cannot copy file '", dbfile, "' to '", dest_dbfile, "'")
if(.Platform$OS.type != 'windows'){
command <- paste("chmod 444", dest_dbfile)
if (system(command) != 0)
warning(command, " failed")
}
return(invisible(TRUE))
}
setMethod("makeAnnDbPkg", "AnnDbPkgSeed",
function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
.makeAnnDbPkg(x, dbfile, dest_dir=dest_dir, no.man=no.man, ...)
}
)
.makeAnnDbPkgList <- function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
x$Class <- "AnnDbPkgSeed"
y <- do.call(new, x)
makeAnnDbPkg(y, dbfile, dest_dir, no.man)
}
setMethod("makeAnnDbPkg", "list",
function(x, dbfile, dest_dir=".", no.man=FALSE, ...) {
.makeAnnDbPkgList(x, dbfile, dest_dir=dest_dir, no.man=no.man, ...)
}
)
.makeAnnDbPkgs <- function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
if (missing(dbfile)) {
dbfile <- system.file("extdata", "GentlemanLab", "ANNDBPKG-INDEX.TXT",
package="AnnotationForge")
}
index <- loadAnnDbPkgIndex(dbfile)
if (length(x) != 1) {
ii <- match(x, index[ , "Package"])
if (any(is.na(ii)))
stop("packages ", paste(x[is.na(ii)], collapse=", "),
" not in ", dbfile)
index <- index[ii, , drop=FALSE]
} else if (!is.na(x) && x != "") {
pkgname <- paste("^", x, "$", sep="")
ii <- grep(pkgname, index[ , "Package"])
index <- index[ii, , drop=FALSE]
}
filter <- list(...)
for (j in seq_len(length(filter))) {
colname <- names(filter)[j]
if (!(colname %in% colnames(index)))
stop("unknown field '", colname, "'")
colvals <- filter[[j]]
if (!is.character(colvals))
stop("extra arg values must be of type character")
index <- index[index[ , colname] %in% colvals, , drop=FALSE]
}
pkgnames_in1string <- paste(index[, "Package"], collapse=", ")
cat(nrow(index), " package(s) to make: ",
pkgnames_in1string, "\n", sep="")
for (i in seq_len(nrow(index))) {
y <- index[i, ]
y <- as.list(y[!is.na(y)])
cat("[", i, "/", nrow(index), "] making package ",
y[["Package"]], ": ", sep="")
dbfile <- y[["DBfile"]]
y <- y[names(y) != "DBfile"]
makeAnnDbPkg(y, dbfile, dest_dir, no.man)
}
cat("DONE (", nrow(index), " package(s) made under the ",
dest_dir, " directory)\n", sep="")
}
setMethod("makeAnnDbPkg", "character",
function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
.makeAnnDbPkgs(x, dbfile, dest_dir=dest_dir, no.man=no.man, ...)
}
)
| /R/makeAnnDbPkg.R | no_license | Bioconductor/AnnotationForge | R | false | false | 18,150 | r | #############################################################################
#############################################################################
###
### AnnDbPkg-maker.R file
###
#############################################################################
#############################################################################
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### The "AnnDbPkgSeed" class.
###
setClass(
"AnnDbPkgSeed",
representation(
Package="character", # e.g. "hgu133a2.db"
Title="character",
Version="character", # e.g. "0.0.99"
License="character",
Author="character",
Maintainer="character",
PkgTemplate="character", # e.g. "HUMANCHIP.DB"
DBschema="character", # e.g. "HUMANCHIP_DB"
AnnObjPrefix="character", # e.g. "hgu133a2"
AnnObjTarget="character", # e.g. "chip hgu133a2"
organism="character", # e.g. "Homo sapiens"
species="character", # e.g. "Human"
manufacturer="character", # e.g. "Affymetrix"
chipName="character", # e.g. "Human Genome U133A 2.0 Array"
manufacturerUrl="character", # e.g. "http://www.affymetrix.com/support/technical/byproduct.affx?product=hgu133-20"
biocViews="character"
),
prototype(
Title=as.character(NA),
License="Artistic-2.0",
Author="Marc Carlson",
Maintainer="Bioconductor Package Maintainer <maintainer@bioconductor.org>",
DBschema=as.character(NA),
AnnObjPrefix=as.character(NA),
AnnObjTarget=as.character(NA),
organism=as.character(NA),
species=as.character(NA),
manufacturer=as.character(NA),
chipName=as.character(NA),
manufacturerUrl=as.character(NA),
biocViews=as.character(NA)
)
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Some helper functions.
###
initWithDbMetada <- function(x, dbfile)
{
metadata2slot <- c(
DBSCHEMA="DBschema",
ORGANISM="organism",
SPECIES="species",
MANUFACTURER="manufacturer",
CHIPNAME="chipName",
MANUFACTURERURL="manufacturerUrl"
)
dbconn <- dbFileConnect(dbfile)
on.exit(dbFileDisconnect(dbconn))
metadata <- dbGetTable(dbconn, "metadata")
if (!identical(colnames(metadata), c("name", "value")))
stop("\"metadata\" table has unexpected col names")
if (any(duplicated(metadata$name))) {
stop("col \"name\" in \"metadata\" table has duplicated values\n",
" (this would never happen if \"name\" was defined as a PRIMARY KEY!)")
}
row.names(metadata) <- metadata$name
for (i in seq_len(length(metadata2slot))) {
metadata_name <- names(metadata2slot)[i]
if (!(metadata_name %in% row.names(metadata))) {
if (metadata_name == "DBSCHEMA")
stop("'DBSCHEMA' not found in \"metadata\" table")
next
}
slot_name <- metadata2slot[i]
val <- metadata[metadata_name, "value"]
if (is.na(slot(x, slot_name))) {
slot(x, slot_name) <- val
next
}
if (slot(x, slot_name) != val)
stop(metadata_name, " specified in '", dbfile, "' (\"", val, "\") ",
"doesn't match 'x@", slot_name, "' (\"", slot(x, slot_name), "\")")
}
if (is.na(x@manufacturerUrl)) {
x@manufacturerUrl <- ""
warning("no manufacturerUrl for package ", x@Package)
}
x
}
initComputedSlots <- function(x)
{
if (is.na(x@AnnObjPrefix))
stop("'AnnObjPrefix' slot must be set for package ", x@Package)
## Automatic default for "AnnObjTarget" slot
if (is.na(x@AnnObjTarget))
x@AnnObjTarget <- paste("chip", x@AnnObjPrefix)
## Automatic default for "Title" slot
if (is.na(x@Title)) {
if (is.na(x@manufacturer) || is.na(x@chipName) || is.na(x@AnnObjTarget)) {
warning("not enough information to set the 'Title' slot for package ", x@Package)
} else {
x@Title <- paste(x@manufacturer,
" ",
x@chipName,
" annotation data (",
x@AnnObjTarget,
")", sep="")
}
}
## Automatic default for "biocViews" slot
if (is.na(x@biocViews)
&& !is.na(x@organism)
&& !is.na(x@manufacturer)) {
chip_view <- paste(x@manufacturer, "Chip", sep="")
org_view <- chartr(" ", "_", x@organism)
x@biocViews <- paste("AnnotationData", chip_view, org_view,
x@AnnObjPrefix, sep=", ")
}
x
}
initWithDbDoc <- function(dbfile)
{
dbconn <- dbFileConnect(dbfile)
on.exit(dbFileDisconnect(dbconn))
if(dbExistsTable(dbconn, "map_metadata")){
map_metadata <- dbGetTable(dbconn, "map_metadata")
return(map_metadata)
} else {
return(NULL)
}
}
getSymbolValuesForManPages <- function(map_names, dbfile)
{
map_metadata <- initWithDbDoc(dbfile)
if(is.null(map_metadata)) return(NULL)
map_source <- sapply(map_names,
function(this_map)
{
map_index <- which(map_metadata$map_name == this_map)
if (length(map_index) > 0) {
this_source <- paste(
map_metadata[map_index, "source_name"],
" \n ",
map_metadata[map_index, "source_url"],
" \n With a date stamp from the source of:",
map_metadata[map_index, "source_date"],
sep=" ", collapse=" and ")
} else {
this_source <- NA
}
this_source
})
map_source <- gsub("_", "\\_", map_source, fixed=TRUE)
names(map_source) <- paste(map_names, "SOURCE", sep="")
map_source
}
removeCommentsFromFile <- function(infile, outfile)
{
if (!is.character(infile) || length(infile) != 1 || is.na(infile))
stop("'infile' must be a character string naming a file")
if (!is.character(outfile) || length(outfile) != 1 || is.na(outfile))
stop("'outfile' must be a character string naming a file")
if (file.exists(outfile))
stop("file '", outfile, "' already exists")
infile <- file(infile, "r")
#on.exit(close(infile))
outfile <- file(outfile, "w")
#on.exit(close(outfile)) # doesn't seem to work
while (TRUE) {
text <- readLines(infile, n=1)
if (length(text) == 0)
break
if (substr(text, 1, 1) != "#")
writeLines(text, outfile)
}
close(outfile)
close(infile)
}
loadAnnDbPkgIndex <- function(file)
{
if (missing(file)) {
file <- system.file("extdata", "GentlemanLab", "ANNDBPKG-INDEX.TXT",
package="AnnotationForge")
} else {
if (!is.character(file) || length(file) != 1 || is.na(file))
stop("'file' must be a character string naming a file")
}
tmp_file <- file.path(tempdir(), paste(basename(file), "tmp", sep="."))
removeCommentsFromFile(file, tmp_file)
index <- read.dcf(tmp_file)
file.remove(tmp_file)
index
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Helpers for filtering out innapropriate manual pages from a template.
###
## This function takes the seed and lists the Mappings
listMappings <- function(x, type){
## get seeds
schema <- x@DBschema ## schema will be like "HUMANCHIP_DB" or "HUMAN_DB"
if(type=="ChipDb"){
orgDbName <- getOrgPkgForSchema(schema)
allSeeds <- NCBICHIP_DB_SeedGenerator(orgDbName)
}else if(type=="OrgDb"){
allSeeds <- AnnotationDbi:::NCBIORG_DB_SeedGenerator()
}
seeds <- filterSeeds(allSeeds, schema, type)
## Then get the names
unlist(lapply(seeds, function(x){return(x$objName)}))
}
## This function will translate the from the actual mappings to the requisite
## man pages. The whole point is to get rid of things like flybase from
## humans etc.
## ALSO problematic: CHRLENGTHS.Rd, GO2ALLEGS.Rd UCSCGENES.Rd
filterManPages <- function(doc_template_names, maps, x){
docs <- sub("\\.Rd$", "", doc_template_names)
docs <- docs[docs %in% maps]
## Add things that will always be needed but are not themselves really bimaps
docs <- c(docs, "_dbconn" ,"BASE","ORGANISM","MAPCOUNTS")
if(!any(c("ECOLI_DB","XENOPUS_DB","ECOLICHIP_DB","XENOPUSCHIP_DB","PIG_DB"
,"PIGCHIP_DB") %in% x@DBschema)){
docs <- c(docs, "CHRLENGTHS")
}
paste(docs, ".Rd", sep="")
}
## And I need a wrapper function to help me filter out things that are not in
## the manList when I call createPackage.
.createAnnotPackage <-function(pkgname,destinationDir,originDir,symbolValues,
manList, unlink=FALSE, quiet=FALSE){
tdir <- file.path("TEMPANNOTPACKAGEDIRFORFILTERING")
dir.create(tdir)
# tdir <- file.path(tempdir()) ## tempdir() causes strange errors... :(
file.copy(from = dir(originDir, full.names = TRUE),
to = tdir,
recursive = TRUE)
## Then unlink unwanted man pages from tdir
manDir <- file.path(tdir, "man")
manFiles <- dir(manDir)
rmFiles <- manFiles[!(manFiles %in% manList)]
rmFiles <- file.path(manDir, rmFiles)
unlink(rmFiles)
## Then call createPackage
createPackage(pkgname=pkgname,
destinationDir=destinationDir,
originDir=tdir,
symbolValues=symbolValues,
unlink=unlink,
quiet=quiet)
## Then remove our terrible temp dir
unlink(tdir, recursive=TRUE)
## Will need to return to tempdir() if we ever want to be able to do more
## than one at a time... :(
}
## TESTING:
## library(AnnotationForge)
## debug(AnnotationForge:::.createAnnotPackage)
## debug(AnnotationForge:::.makeAnnDbPkg) ## this one is always called.
## debug(AnnotationForge:::.makeAnnDbPkgs) ## This one is called 1st for mine
## debug(AnnotationForge:::.makeAnnDbPkgList) ## called for others
## source("~/proj/Rpacks/AnnotationForge/inst/extdata/GentlemanLab/org-batch-script.R")
## source("~/proj/Rpacks/AnnotationForge/inst/extdata/GentlemanLab/chip-batch-script.R")
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### The "makeAnnDbPkg" new generic.
###
setGeneric("makeAnnDbPkg", signature="x",
function(x, dbfile, dest_dir=".", no.man=FALSE, ...)
standardGeneric("makeAnnDbPkg")
)
## helper to extract metadata
.getOrgDepFromMetadata <- function(dbfile){
con <- dbConnect(SQLite(), dbfile)
dbGetQuery(con, "SELECT value FROM metadata WHERE name = 'ORGPKGDEP'")
}
.makeAnnDbPkg <- function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
x <- initWithDbMetada(x, dbfile)
x <- initComputedSlots(x)
dbfile_basename <- basename(dbfile)
if (dbfile_basename != paste(x@AnnObjPrefix, ".sqlite", sep=""))
stop("'", dbfile, "': File name doesn't match 'x@AnnObjPrefix' (", x@AnnObjPrefix, ")")
if (!grepl("^/", x@PkgTemplate)[1]) { ##TODO: this regex seems hacky?
template_path <- system.file("AnnDbPkg-templates",
x@PkgTemplate,
package="AnnotationForge")
} else {
template_path <- x@PkgTemplate
}
ann_dbi_version <- installed.packages()['AnnotationDbi','Version']
## only define 'org_version' if we are making a chipDb package.
## Otherwise it will only cause trouble.
con1 <- dbConnect(dbDriver("SQLite"), dbfile)
type <- dbGetQuery(con1,
"SELECT value FROM metadata WHERE name='Db type'")
if(type=="ChipDb"){
org_version <- installed.packages()['org.Hs.eg.db','Version']
## NOCHIPSCHEMA DBs know who they depend on
if(x@DBschema=="NOCHIPSCHEMA_DB"){
org_pkg <- as.character(.getOrgDepFromMetadata(dbfile))
}else{
org_pkg <- paste0(getOrgPkgForSchema(x@DBschema),".db")
}
}else{
org_version <- "no org version date"
org_pkg <- "no org pkg required"
}
symvals <- list(
DBSCHEMA=x@DBschema,
PKGTITLE=x@Title,
ANNOBJPREFIX=x@AnnObjPrefix,
ANNOBJTARGET=x@AnnObjTarget,
ORGANISM=x@organism,
SPECIES=x@species,
MANUF=x@manufacturer,
CHIPNAME=x@chipName,
MANUFURL=x@manufacturerUrl,
AUTHOR=x@Author,
MAINTAINER=x@Maintainer,
PKGVERSION=x@Version,
LIC=x@License,
BIOCVIEWS=x@biocViews,
DBFILE=dbfile_basename,
ANNDBIVERSION=ann_dbi_version,
ORGVERSION=org_version,
ORGPKGDEP=org_pkg
)
man_dir <- file.path(template_path, "man")
doc_template_names <- list.files(man_dir, "\\.Rd$")
if (file.exists(man_dir)) {
if (!no.man) {
#is_static <- doc_template_names %in% c("_dbconn.Rd", "_dbfile.Rd")
#doc_template_names <- doc_template_names[!is_static]
## Do this only if your schema is an NCBI* one.
if(grepl("NCBI",x@PkgTemplate)){
## extract the map_names from the bimap definitions
map_names <- listMappings(x, type)
## now use this info to filter to relevant mappings
doc_template_names <- filterManPages(doc_template_names,
maps=map_names,x)
}else{## if old school, just use the man pages in template
map_names <- sub("\\.Rd$", "", doc_template_names)
}
if (length(map_names) != 0)
symvals <- c(symvals, getSymbolValuesForManPages(map_names, dbfile))
} else {
doc_template_names <- list()
unlink(man_dir, recursive=TRUE) # delete template
}
}
if (any(duplicated(names(symvals)))) {
str(symvals)
stop("'symvals' contains duplicated symbols (see above)")
}
## Remove NA values
symvals <- symvals[!sapply(symvals, is.na)]
.createAnnotPackage(x@Package,
destinationDir=dest_dir,
originDir=template_path,
symbolValues=symvals,
manList=doc_template_names)
## rename Rd files (prepend the pkg name)
## Here is also where we put the man files into the package (after renaming them)
if (file.exists(man_dir) && !no.man && length(doc_template_names) != 0) {
doc_path <- file.path(dest_dir, x@Package, "man")
from_doc_names <- paste(doc_path, doc_template_names, sep=.Platform$file.sep)
to_doc_names <- paste(x@AnnObjPrefix, doc_template_names, sep="")
to_doc_names <- paste(doc_path, to_doc_names, sep=.Platform$file.sep)
mapply(file.rename, from_doc_names, to_doc_names)
}
dest_db_dir <- file.path(dest_dir, x@Package, "inst", "extdata")
if (!file.exists(dest_db_dir)
&& !dir.create(dest_db_dir, recursive=TRUE))
stop("unable to create dest db dir ", dest_db_dir)
dest_dbfile <- file.path(dest_db_dir, dbfile_basename)
if (!file.copy(dbfile, dest_dbfile))
stop("cannot copy file '", dbfile, "' to '", dest_dbfile, "'")
if(.Platform$OS.type != 'windows'){
command <- paste("chmod 444", dest_dbfile)
if (system(command) != 0)
warning(command, " failed")
}
return(invisible(TRUE))
}
setMethod("makeAnnDbPkg", "AnnDbPkgSeed",
function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
.makeAnnDbPkg(x, dbfile, dest_dir=dest_dir, no.man=no.man, ...)
}
)
.makeAnnDbPkgList <- function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
x$Class <- "AnnDbPkgSeed"
y <- do.call(new, x)
makeAnnDbPkg(y, dbfile, dest_dir, no.man)
}
setMethod("makeAnnDbPkg", "list",
function(x, dbfile, dest_dir=".", no.man=FALSE, ...) {
.makeAnnDbPkgList(x, dbfile, dest_dir=dest_dir, no.man=no.man, ...)
}
)
.makeAnnDbPkgs <- function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
if (missing(dbfile)) {
dbfile <- system.file("extdata", "GentlemanLab", "ANNDBPKG-INDEX.TXT",
package="AnnotationForge")
}
index <- loadAnnDbPkgIndex(dbfile)
if (length(x) != 1) {
ii <- match(x, index[ , "Package"])
if (any(is.na(ii)))
stop("packages ", paste(x[is.na(ii)], collapse=", "),
" not in ", dbfile)
index <- index[ii, , drop=FALSE]
} else if (!is.na(x) && x != "") {
pkgname <- paste("^", x, "$", sep="")
ii <- grep(pkgname, index[ , "Package"])
index <- index[ii, , drop=FALSE]
}
filter <- list(...)
for (j in seq_len(length(filter))) {
colname <- names(filter)[j]
if (!(colname %in% colnames(index)))
stop("unknown field '", colname, "'")
colvals <- filter[[j]]
if (!is.character(colvals))
stop("extra arg values must be of type character")
index <- index[index[ , colname] %in% colvals, , drop=FALSE]
}
pkgnames_in1string <- paste(index[, "Package"], collapse=", ")
cat(nrow(index), " package(s) to make: ",
pkgnames_in1string, "\n", sep="")
for (i in seq_len(nrow(index))) {
y <- index[i, ]
y <- as.list(y[!is.na(y)])
cat("[", i, "/", nrow(index), "] making package ",
y[["Package"]], ": ", sep="")
dbfile <- y[["DBfile"]]
y <- y[names(y) != "DBfile"]
makeAnnDbPkg(y, dbfile, dest_dir, no.man)
}
cat("DONE (", nrow(index), " package(s) made under the ",
dest_dir, " directory)\n", sep="")
}
setMethod("makeAnnDbPkg", "character",
function(x, dbfile, dest_dir=".", no.man=FALSE, ...){
.makeAnnDbPkgs(x, dbfile, dest_dir=dest_dir, no.man=no.man, ...)
}
)
|
################################################################################
#source('redcap_data_cleaning.R')
# STEP 1 - Set working directory (the place you want to save your file)
# Use setwd() or manually set it in the 'Files' panel
setwd("~/Desktop/")
# Load dependencies
require(dplyr)
require(reshape2)
require(REDCapR)
require(stringr)
################################################################################
# STEP 2 - Import REDCap data from API
api_url <- "https://redcap.partners.org/redcap/api/"
# Nicole's API keys; substitute yours
api_keys <- list(clin = "98A8017C2C92DA7E7A53C50E1670C399",
banda_dat = "EDD2EE5E5FAA347F65D24D575B9B1125",
clin_ss = 'B1EDFAD0DA74AF59597DCB7C18AC7578')
# Pull data from REDCap
clin <- redcap_read_oneshot(api_url, api_keys$clin)$data
clin_ss <- redcap_read_oneshot(api_url, api_keys$clin_ss)$data
banda_dat <- redcap_read_oneshot(api_url, api_keys$banda_dat)$data
################################################################################
# STEP 3 - Load all code up to step 4
## Read latest BANDA participant labels (banda_id and subject_id)
labels_cur <- banda_dat %>%
select(banda_id, subject_id, age, sex) %>%
arrange(banda_id) %>%
mutate(subject_id = as.numeric(str_extract(subject_id, '\\d{3}'))) %>%
filter(banda_id != '', str_detect(banda_id, 'BANDA'))
## Clean data from 'BANDA Clinical' and 'BANDA Clinical Second Scoring'
# Pulls both initial and 12m clinical data
clin_vars <- intersect(names(clin), names(clin_ss))
clin_clean <- clin %>% as_tibble %>%
mutate(subject_id = as.numeric(str_extract(subject_id, '[0-9]+'))) %>%
select(subject_id, clin_vars) %>%
melt(id.vars='subject_id')
ss_clean <- clin_ss %>% as_tibble %>%
mutate(subject_id = as.numeric(str_extract(redcap_id_ss, '[0-9]+'))) %>%
select(subject_id, clin_vars) %>%
melt(id.vars='subject_id')
## Find entries that are different between clin_clean and ss_clean
ss_diff <- function(clin_dat = clin_all, ss_dat = ss_all, labels = labels_cur,
merge_vars = c('subject_id', 'variable')){
df <- merge(clin_dat, ss_dat, by= merge_vars) %>%
rename(clin = value.x, ss = value.y) %>%
filter(clin != ss) %>%
merge(labels_cur, ., by='subject_id') %>%
arrange(banda_id)
return(df)
}
clin_diff<- ss_diff(clin_clean, ss_clean, labels_cur) %>%
filter(clin!="[document]")
################################################################################
# STEP 4 - Save file
# Set file name
cur_date <- format(Sys.Date(),'%m%d%y')
file_name <- paste0('ban
da_clin_ss_diffs_', cur_date, '.csv')
# Save file
write.csv(clin_diff, file_name, row.names=F) | /old/second_scoring.R | no_license | nicolocin/banda_scripts | R | false | false | 2,728 | r |
################################################################################
#source('redcap_data_cleaning.R')
# STEP 1 - Set working directory (the place you want to save your file)
# Use setwd() or manually set it in the 'Files' panel
setwd("~/Desktop/")
# Load dependencies
require(dplyr)
require(reshape2)
require(REDCapR)
require(stringr)
################################################################################
# STEP 2 - Import REDCap data from API
api_url <- "https://redcap.partners.org/redcap/api/"
# Nicole's API keys; substitute yours
api_keys <- list(clin = "98A8017C2C92DA7E7A53C50E1670C399",
banda_dat = "EDD2EE5E5FAA347F65D24D575B9B1125",
clin_ss = 'B1EDFAD0DA74AF59597DCB7C18AC7578')
# Pull data from REDCap
clin <- redcap_read_oneshot(api_url, api_keys$clin)$data
clin_ss <- redcap_read_oneshot(api_url, api_keys$clin_ss)$data
banda_dat <- redcap_read_oneshot(api_url, api_keys$banda_dat)$data
################################################################################
# STEP 3 - Load all code up to step 4
## Read latest BANDA participant labels (banda_id and subject_id)
labels_cur <- banda_dat %>%
select(banda_id, subject_id, age, sex) %>%
arrange(banda_id) %>%
mutate(subject_id = as.numeric(str_extract(subject_id, '\\d{3}'))) %>%
filter(banda_id != '', str_detect(banda_id, 'BANDA'))
## Clean data from 'BANDA Clinical' and 'BANDA Clinical Second Scoring'
# Pulls both initial and 12m clinical data
clin_vars <- intersect(names(clin), names(clin_ss))
clin_clean <- clin %>% as_tibble %>%
mutate(subject_id = as.numeric(str_extract(subject_id, '[0-9]+'))) %>%
select(subject_id, clin_vars) %>%
melt(id.vars='subject_id')
ss_clean <- clin_ss %>% as_tibble %>%
mutate(subject_id = as.numeric(str_extract(redcap_id_ss, '[0-9]+'))) %>%
select(subject_id, clin_vars) %>%
melt(id.vars='subject_id')
## Find entries that are different between clin_clean and ss_clean
ss_diff <- function(clin_dat = clin_all, ss_dat = ss_all, labels = labels_cur,
merge_vars = c('subject_id', 'variable')){
df <- merge(clin_dat, ss_dat, by= merge_vars) %>%
rename(clin = value.x, ss = value.y) %>%
filter(clin != ss) %>%
merge(labels_cur, ., by='subject_id') %>%
arrange(banda_id)
return(df)
}
clin_diff<- ss_diff(clin_clean, ss_clean, labels_cur) %>%
filter(clin!="[document]")
################################################################################
# STEP 4 - Save file
# Set file name
cur_date <- format(Sys.Date(),'%m%d%y')
file_name <- paste0('ban
da_clin_ss_diffs_', cur_date, '.csv')
# Save file
write.csv(clin_diff, file_name, row.names=F) |
## R literacy: Part 5
##########################################################################################################################
## Manipulating data
## Modes, classes, attributes, length, and coercion
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
x<- 1:10
mode(x)
length(x)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
x<- 1:10
as.character(x)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
X<- matrix(1:30,nrow=3)
as.data.frame(X)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
X<- matrix(1:30, nrow=3)
X
nrow(X)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
ncol(X)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
dim(X)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
x<- 1:10
NROW(x)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
length(X)
length(x)
##########################################################################################################################
##########################################################################################################################
## Indexing, sub-setting, sorting and locating data
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1<- c(5,1,3,8)
v1
v1[3]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1[1:3]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1[-4]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1[v1<5]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1 < 5
v1[c(FALSE,TRUE,TRUE,FALSE)]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
length(v1)
v1[8]<- 10
length(v1)
v1
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
library(ithir)
data(USYD_soil1)
soil.data<- USYD_soil1
dim(soil.data)
str(soil.data)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[1:5,1:2]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[1:2,]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[1:5, "Total_Carbon"]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[1:5, c("Total_Carbon", "CEC")]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
na.omit(soil.data[soil.data$ESP>10,])
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
subset(soil.data, ESP>10)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
subset(soil.data, ESP>10 & Lower.Depth > 0.3 )
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=FALSE, background='white'-----
## subset(soil.data, Landclass=="Forest" | Landclass=="native pasture" )
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
head(subset(soil.data, Landclass %in% c("Forest", "native pasture")))
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
X<- matrix(1:30, nrow=3)
X
X[3,8]
X[,3]
Y<- array(1:90, dim=c(3,10,3))
Y[3,1,1]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
list.1<- list(1:10, X, Y)
list.1[[1]]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
list.1[[2]][3,2]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=FALSE, background='white'-----
soil.data.split<- split(soil.data, soil.data$PROFILE)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
x<- rnorm(5)
x
y<- sort(x)
y
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
head(soil.data[order(soil.data$clay),])
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
order(soil.data$clay)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
match(c(25.85,11.45,9.23), soil.data$CEC)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[c(41,59,18),]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
match(max(soil.data$CEC, na.rm=TRUE), soil.data$CEC)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data$CEC[95]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
which(soil.data$ESP>5)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
which(soil.data$ESP>5 & soil.data$clay > 30)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
which(is.na(soil.data$ESP))
soil.data$ESP[c(which(is.na(soil.data$ESP)))]
##########################################################################################################################
##########################################################################################################################
## Factors
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
a<- c(rep(0,4),rep(1,4))
a
a<- factor(a)
a
## ----echo=TRUE, tidy= TRUE,cache=FALSE,eval=TRUE, background='white'-----
soil.drainage<- c("well drained", "imperfectly drained",
"poorly drained", "poorly drained",
"well drained", "poorly drained")
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.drainage1<- factor(soil.drainage)
soil.drainage1
as.numeric(soil.drainage1)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.drainage2<- factor(soil.drainage, levels= c("well drained",
"imperfectly drained", "poorly drained"))
as.numeric(soil.drainage2)
##########################################################################################################################
##########################################################################################################################
## Combining data
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.info1<- data.frame(soil=c("Vertosol", "Hydrosol", "Sodosol"), response=1:3)
soil.info1
soil.info2<- data.frame(soil=c("Chromosol", "Dermosol", "Tenosol"), response=4:6)
soil.info2
soil.info<- rbind(soil.info1, soil.info2)
soil.info
a.column<- c(2.5,3.2,1.2,2.1,2,0.5)
soil.info3<- cbind(soil.info, SOC=a.column)
soil.info3
| /DSM_book/rcode/intro2R/P3_Intro_R_2017_Part5.R | no_license | brendo1001/brendo1001.github.io | R | false | false | 6,433 | r | ## R literacy: Part 5
##########################################################################################################################
## Manipulating data
## Modes, classes, attributes, length, and coercion
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
x<- 1:10
mode(x)
length(x)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
x<- 1:10
as.character(x)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
X<- matrix(1:30,nrow=3)
as.data.frame(X)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
X<- matrix(1:30, nrow=3)
X
nrow(X)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
ncol(X)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
dim(X)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
x<- 1:10
NROW(x)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
length(X)
length(x)
##########################################################################################################################
##########################################################################################################################
## Indexing, sub-setting, sorting and locating data
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1<- c(5,1,3,8)
v1
v1[3]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1[1:3]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1[-4]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1[v1<5]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
v1 < 5
v1[c(FALSE,TRUE,TRUE,FALSE)]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
length(v1)
v1[8]<- 10
length(v1)
v1
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
library(ithir)
data(USYD_soil1)
soil.data<- USYD_soil1
dim(soil.data)
str(soil.data)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[1:5,1:2]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[1:2,]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[1:5, "Total_Carbon"]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[1:5, c("Total_Carbon", "CEC")]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
na.omit(soil.data[soil.data$ESP>10,])
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
subset(soil.data, ESP>10)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
subset(soil.data, ESP>10 & Lower.Depth > 0.3 )
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=FALSE, background='white'-----
## subset(soil.data, Landclass=="Forest" | Landclass=="native pasture" )
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
head(subset(soil.data, Landclass %in% c("Forest", "native pasture")))
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
X<- matrix(1:30, nrow=3)
X
X[3,8]
X[,3]
Y<- array(1:90, dim=c(3,10,3))
Y[3,1,1]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
list.1<- list(1:10, X, Y)
list.1[[1]]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
list.1[[2]][3,2]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=FALSE, background='white'-----
soil.data.split<- split(soil.data, soil.data$PROFILE)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
x<- rnorm(5)
x
y<- sort(x)
y
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
head(soil.data[order(soil.data$clay),])
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
order(soil.data$clay)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
match(c(25.85,11.45,9.23), soil.data$CEC)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data[c(41,59,18),]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
match(max(soil.data$CEC, na.rm=TRUE), soil.data$CEC)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.data$CEC[95]
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
which(soil.data$ESP>5)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
which(soil.data$ESP>5 & soil.data$clay > 30)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
which(is.na(soil.data$ESP))
soil.data$ESP[c(which(is.na(soil.data$ESP)))]
##########################################################################################################################
##########################################################################################################################
## Factors
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
a<- c(rep(0,4),rep(1,4))
a
a<- factor(a)
a
## ----echo=TRUE, tidy= TRUE,cache=FALSE,eval=TRUE, background='white'-----
soil.drainage<- c("well drained", "imperfectly drained",
"poorly drained", "poorly drained",
"well drained", "poorly drained")
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.drainage1<- factor(soil.drainage)
soil.drainage1
as.numeric(soil.drainage1)
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.drainage2<- factor(soil.drainage, levels= c("well drained",
"imperfectly drained", "poorly drained"))
as.numeric(soil.drainage2)
##########################################################################################################################
##########################################################################################################################
## Combining data
## ----echo=TRUE, tidy=TRUE,cache=FALSE,eval=TRUE, background='white'------
soil.info1<- data.frame(soil=c("Vertosol", "Hydrosol", "Sodosol"), response=1:3)
soil.info1
soil.info2<- data.frame(soil=c("Chromosol", "Dermosol", "Tenosol"), response=4:6)
soil.info2
soil.info<- rbind(soil.info1, soil.info2)
soil.info
a.column<- c(2.5,3.2,1.2,2.1,2,0.5)
soil.info3<- cbind(soil.info, SOC=a.column)
soil.info3
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix is a function which creates a special "matrix" object that can
## cache its inverse for the input (which is an invertible square matrix).
##I am not sure what the argument (x=matrix()) does in the function statement.
makeCacheMatrix <- function(x = matrix()) {
cache <- NULL ##holds the cached value, or takes value NULL if there is no cached value. Initalize as NULL.
initMatrix <- function(newvalue) { #store a matrix
x <<- newvalue
cache <<- NULL ##since the matrix is assigned a new value, flush the cache.
##this will ensure the inverse of the new matrix will be cached.
}
getMatrix <- function() { ##returns the stored matrix
x
}
cacheInverse <- function(solve) {
cache <<- solve ##cache the given argument
}
getInverse <- function() {
cache ##retrieve the cahced value
}
##return the list of functions
list(initMatrix = initMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
## the following function calculates the inverse of the special matrix created with makeCacheMatrix
cacheSolve <- function(x, ...) {
##get the cached value
inverse <- x$getInverse()
##if a cached value already exists, return it
if (!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
#otherwise, get the matrix, calculate the inverse, and cache the value
data <- x$getMatrix()
inverse <- solve(data)
x$cacheInverse(inverse)
##return the inverse
inverse
}
| /cachematrix.R | no_license | Minyeah/datasciencecoursera | R | false | false | 1,670 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix is a function which creates a special "matrix" object that can
## cache its inverse for the input (which is an invertible square matrix).
##I am not sure what the argument (x=matrix()) does in the function statement.
makeCacheMatrix <- function(x = matrix()) {
cache <- NULL ##holds the cached value, or takes value NULL if there is no cached value. Initalize as NULL.
initMatrix <- function(newvalue) { #store a matrix
x <<- newvalue
cache <<- NULL ##since the matrix is assigned a new value, flush the cache.
##this will ensure the inverse of the new matrix will be cached.
}
getMatrix <- function() { ##returns the stored matrix
x
}
cacheInverse <- function(solve) {
cache <<- solve ##cache the given argument
}
getInverse <- function() {
cache ##retrieve the cahced value
}
##return the list of functions
list(initMatrix = initMatrix, getMatrix = getMatrix, cacheInverse = cacheInverse, getInverse = getInverse)
}
## the following function calculates the inverse of the special matrix created with makeCacheMatrix
cacheSolve <- function(x, ...) {
##get the cached value
inverse <- x$getInverse()
##if a cached value already exists, return it
if (!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
#otherwise, get the matrix, calculate the inverse, and cache the value
data <- x$getMatrix()
inverse <- solve(data)
x$cacheInverse(inverse)
##return the inverse
inverse
}
|
#Plots for WITCH runs with climate damages or impacts
SCC_plot <- function(scenplot=scenlist, regions = "World", normalization_region = "World", verbose = FALSE){
gdp_measure <- "y" #"cc" #for consumption or "y" for GDP
emi_sum <- "ghg" #or "co2" for only CO2 or ghg for all gases
#Impacts and Damages computation
get_witch_simple("OMEGA", check_calibration = T)
get_witch_simple("Q", check_calibration = T)
Q <- Q %>% filter(iq == gdp_measure) %>% select(-iq)
get_witch_simple("l", check_calibration = T)
get_witch_simple("Q_EMI", check_calibration = T)
get_witch_simple("ghg") # to get GHGs for non-co2 sets
if(emi_sum=="ghg") ghg_used <- unique(ghg$e) else if(emi_sum=="co2") ghg_used = c("co2")
Q_EMI <- Q_EMI %>% filter(e %in% ghg_used) %>% group_by(pathdir, file, n, t) %>% summarize(emiall = sum(value))
#get also BAU values
get_witch_simple("BAU_Q", check_calibration = T)
BAU_Q <- BAU_Q %>% filter(iq == gdp_measure) %>% select(-iq)
get_witch_simple("BAU_Q_EMI", check_calibration = T)
BAU_Q_EMI <- BAU_Q_EMI %>% filter(e %in% ghg_used) %>% group_by(pathdir, file, n, t) %>% summarize(emiall = sum(value))
impact <- Q %>% rename(gdp=value)
impact <- merge(impact, BAU_Q, by = c("pathdir", "file", "n", "t")); setnames(impact, "value", "gdp_bau")
impact <- merge(impact, Q_EMI, by = c("pathdir", "file", "n", "t")); setnames(impact, "emiall", "emi")
impact <- merge(impact, BAU_Q_EMI, by = c("pathdir", "file", "n", "t")); setnames(impact, "emiall", "emi_bau")
impact <- merge(impact, l, by = c("pathdir", "file", "n", "t")); setnames(impact, "value", "pop")
get_witch_simple("TEMP", check_calibration = T)
impact <- merge(impact, TEMP %>% filter(m=="atm") %>% select(-m), by = c("pathdir", "file", "n", "t")); setnames(impact, "value", "temp")
#add external climate modules in case
get_witch_simple("MAGICCTEMP", check_calibration = T)
if(exists("MAGICCTEMP")) {impact <- merge(impact, MAGICCTEMP %>% filter(m=="atm") %>% select(-m), by = c("pathdir", "file", "n", "t"), all.x = T); setnames(impact, "value", "temp_magicc6")}
scenplot_nopulse <- setdiff(scenplot, str_subset(scenlist, "emission_pulse"))
#PLOTS:
#COs Emissions
emi_plot <- witch_regional_line_plot(impact, varname = "emi", regions = regions, scenplot = scenplot_nopulse, ylab = "GHG Emissions [GtCO2eq]", conv_factor=44/12)
#Plot of relative GDP loss
gdp_loss_plot <- witch_regional_line_plot(impact, varname = "-(gdp/gdp_bau-1)", regions = regions, scenplot = scenplot_nopulse, ylab = "% GDP loss", conv_factor=100, rm.NA = F)
#Temperature
#temp_plot <- witch_regional_line_plot(impact, varname = "temp", scenplot = scenplot_nopulse, regions = "World", ylab = "Temperature increase [deg C]", conv_factor=1, nagg="mean")
temp_plot <- ggplot() + geom_line(data = impact %>% filter(file %in% scenplot_nopulse & ttoyear(t) <= yearmax & ttoyear(t) >= yearmin) %>% group_by(pathdir, file, t) %>% summarise_at(., .vars=vars(str_subset(names(impact), "temp")), funs(mean)), aes(ttoyear(t),temp,colour=file), stat="identity", size=1.5, linetype = "solid") + xlab("") + ylab("Temperature [deg C]")
if(exists("MAGICCTEMP")){temp_plot <- temp_plot + geom_line(data = impact %>% filter(file %in% scenplot_nopulse & ttoyear(t) <= yearmax & ttoyear(t) >= yearmin & !is.na(temp_magicc6)) %>% group_by(pathdir, file, t) %>% summarise_at(., .vars=vars(str_subset(names(impact), "temp_magicc6")), funs(mean)), aes(ttoyear(t),temp_magicc6,colour=file), stat="identity", size=1.5, linetype = "dashed") + ylab("Temp., MAGICC dashed")}
########### COMPUTE THE SOCIAL COST OF CARBON ####################
t0 = 1 #from which year to compute the SCC from
tmax = 30
#SCC from marginals
get_witch_simple("m_eqq_emi_tree")
get_witch_simple("m_eqq_y")
scc <- m_eqq_emi_tree %>% filter(e=="co2") %>% rename(m_emi=value)
scc <- merge(scc, m_eqq_y, by = c("pathdir", "file", "n", "t")); setnames(scc, "value", "m_eqq_y")
#SCC from T$/GtC to $/tCO2
scc <- scc %>% filter(t>= t0 & t <= tmax) %>% mutate(SCC_tn = 1e3 * (-m_emi / m_eqq_y) / (44/12)) %>% group_by(pathdir, file, n) %>% mutate(SCC_t0 = 1e3 * (-m_emi / m_eqq_y[t==t0]) / (44/12))
if(normalization_region=="World"){
#scc <- scc %>% group_by(pathdir, file, t) %>% mutate(m_eqq_normalization = mean(m_eqq_y))
#scc_value_marginals <- scc %>% group_by(pathdir, file, t) %>% mutate(SCC_contrib_norm=1e3 * (-m_emi / m_eqq_normalization) / (44/12)) %>% summarize(SCC=sum(SCC_contrib_norm)) %>% mutate(n="World") %>% as.data.frame()
scc_value_marginals <- scc %>% group_by(pathdir, file, t) %>% mutate(SCC_contrib_norm=1e3 * (-m_emi / m_eqq_y) / (44/12)) %>% summarize(SCC=sum(SCC_contrib_norm)) %>% mutate(n="World") %>% as.data.frame()
}else{
scc <- scc %>% group_by(pathdir, file, t) %>% mutate(m_eqq_normalization = m_eqq_y[n==normalization_region])
scc_value_marginals <- scc %>% group_by(pathdir, file, t) %>% mutate(SCC_contrib_norm=1e3 * (-m_emi / m_eqq_normalization) / (44/12)) %>% summarize(SCC=sum(SCC_contrib_norm)) %>% mutate(n="World") %>% as.data.frame()
}
scc_value_marginals <- subset(scc_value_marginals, file %in% scenplot_nopulse)
scc_value_marginals_t0 <- subset(scc_value_marginals, file %in% scenplot_nopulse & t==t0)
if(verbose) print(scc_value_marginals_t0)
SCC_bar_chart <- ggplot(scc_value_marginals_t0) + geom_bar(aes(file, SCC, fill=file), position = "dodge", stat="identity") + ylab("SCC [$/tCO2eq]") + xlab("") + geom_text(aes(file, SCC*0.9, label=paste0(round(SCC,1), "$"))) + guides(fill=FALSE)
SCC_over_time_plot <- witch_regional_line_plot(scc_value_marginals, varname = "SCC", regions = "World", scenplot = scenplot_nopulse, ylab = "SCC [$/tCO2eq]", conv_factor=1, rm.NA = F, ylim0 = T)
#compute SCC based on emission pulse method
SCC_bar_chart_pulse <- NULL
if(any(str_detect(scenplot, "_emission_pulse"))){
get_witch_simple("scc_regional")
scc_regional$file = gsub("_emission_pulse" , "", scc_regional$file)
#scc_regional$pathdir <- "Emission_pulse"
if(normalization_region=="World"){
scc_regional <- scc_regional %>% group_by(pathdir, file, t) %>% summarize(value=mean(value)) %>% mutate(n="World")#for now simple mean
}else{
scc_regional <- scc_regional %>% filter(n==normalization_region)
}
scc_regional_t0 <- scc_regional %>% filter(t==t0)
#add NAs for runs without pulse run to keep same structure and colors
scc_regional_t0 <- rbind(as.data.frame(scc_regional_t0), scc_value_marginals_t0 %>% filter(!(file %in% unique(scc_regional$file))) %>% rename(value=SCC) %>% mutate(value=NA))
if(verbose) print(scc_regional_t0)
SCC_bar_chart_pulse <- ggplot(scc_regional_t0) + geom_bar(aes(file, value, fill=file), position = "dodge", stat="identity") + ylab("SCC [$/tCO2eq] (PULSE)") + xlab("") + geom_text(aes(file, value*0.9, label=paste0(round(value,1), "$"))) + guides(fill=FALSE) + ylim(NA, max(c(scc_value_marginals$SCC, scc_regional$value)))
SCC_bar_chart <- SCC_bar_chart + ylim(NA, max(c(scc_value_marginals_t0$SCC, scc_regional$value)))
}
print(suppressWarnings(ggarrange(emi_plot, temp_plot, gdp_loss_plot, NULL, SCC_bar_chart, SCC_bar_chart_pulse, ncol = 2, nrow=3, common.legend = T, legend = "bottom")))
}
| /R/impact_plots.R | permissive | Mareasunami/witch-plot | R | false | false | 7,309 | r | #Plots for WITCH runs with climate damages or impacts
SCC_plot <- function(scenplot=scenlist, regions = "World", normalization_region = "World", verbose = FALSE){
gdp_measure <- "y" #"cc" #for consumption or "y" for GDP
emi_sum <- "ghg" #or "co2" for only CO2 or ghg for all gases
#Impacts and Damages computation
get_witch_simple("OMEGA", check_calibration = T)
get_witch_simple("Q", check_calibration = T)
Q <- Q %>% filter(iq == gdp_measure) %>% select(-iq)
get_witch_simple("l", check_calibration = T)
get_witch_simple("Q_EMI", check_calibration = T)
get_witch_simple("ghg") # to get GHGs for non-co2 sets
if(emi_sum=="ghg") ghg_used <- unique(ghg$e) else if(emi_sum=="co2") ghg_used = c("co2")
Q_EMI <- Q_EMI %>% filter(e %in% ghg_used) %>% group_by(pathdir, file, n, t) %>% summarize(emiall = sum(value))
#get also BAU values
get_witch_simple("BAU_Q", check_calibration = T)
BAU_Q <- BAU_Q %>% filter(iq == gdp_measure) %>% select(-iq)
get_witch_simple("BAU_Q_EMI", check_calibration = T)
BAU_Q_EMI <- BAU_Q_EMI %>% filter(e %in% ghg_used) %>% group_by(pathdir, file, n, t) %>% summarize(emiall = sum(value))
impact <- Q %>% rename(gdp=value)
impact <- merge(impact, BAU_Q, by = c("pathdir", "file", "n", "t")); setnames(impact, "value", "gdp_bau")
impact <- merge(impact, Q_EMI, by = c("pathdir", "file", "n", "t")); setnames(impact, "emiall", "emi")
impact <- merge(impact, BAU_Q_EMI, by = c("pathdir", "file", "n", "t")); setnames(impact, "emiall", "emi_bau")
impact <- merge(impact, l, by = c("pathdir", "file", "n", "t")); setnames(impact, "value", "pop")
get_witch_simple("TEMP", check_calibration = T)
impact <- merge(impact, TEMP %>% filter(m=="atm") %>% select(-m), by = c("pathdir", "file", "n", "t")); setnames(impact, "value", "temp")
#add external climate modules in case
get_witch_simple("MAGICCTEMP", check_calibration = T)
if(exists("MAGICCTEMP")) {impact <- merge(impact, MAGICCTEMP %>% filter(m=="atm") %>% select(-m), by = c("pathdir", "file", "n", "t"), all.x = T); setnames(impact, "value", "temp_magicc6")}
scenplot_nopulse <- setdiff(scenplot, str_subset(scenlist, "emission_pulse"))
#PLOTS:
#COs Emissions
emi_plot <- witch_regional_line_plot(impact, varname = "emi", regions = regions, scenplot = scenplot_nopulse, ylab = "GHG Emissions [GtCO2eq]", conv_factor=44/12)
#Plot of relative GDP loss
gdp_loss_plot <- witch_regional_line_plot(impact, varname = "-(gdp/gdp_bau-1)", regions = regions, scenplot = scenplot_nopulse, ylab = "% GDP loss", conv_factor=100, rm.NA = F)
#Temperature
#temp_plot <- witch_regional_line_plot(impact, varname = "temp", scenplot = scenplot_nopulse, regions = "World", ylab = "Temperature increase [deg C]", conv_factor=1, nagg="mean")
temp_plot <- ggplot() + geom_line(data = impact %>% filter(file %in% scenplot_nopulse & ttoyear(t) <= yearmax & ttoyear(t) >= yearmin) %>% group_by(pathdir, file, t) %>% summarise_at(., .vars=vars(str_subset(names(impact), "temp")), funs(mean)), aes(ttoyear(t),temp,colour=file), stat="identity", size=1.5, linetype = "solid") + xlab("") + ylab("Temperature [deg C]")
if(exists("MAGICCTEMP")){temp_plot <- temp_plot + geom_line(data = impact %>% filter(file %in% scenplot_nopulse & ttoyear(t) <= yearmax & ttoyear(t) >= yearmin & !is.na(temp_magicc6)) %>% group_by(pathdir, file, t) %>% summarise_at(., .vars=vars(str_subset(names(impact), "temp_magicc6")), funs(mean)), aes(ttoyear(t),temp_magicc6,colour=file), stat="identity", size=1.5, linetype = "dashed") + ylab("Temp., MAGICC dashed")}
########### COMPUTE THE SOCIAL COST OF CARBON ####################
t0 = 1 #from which year to compute the SCC from
tmax = 30
#SCC from marginals
get_witch_simple("m_eqq_emi_tree")
get_witch_simple("m_eqq_y")
scc <- m_eqq_emi_tree %>% filter(e=="co2") %>% rename(m_emi=value)
scc <- merge(scc, m_eqq_y, by = c("pathdir", "file", "n", "t")); setnames(scc, "value", "m_eqq_y")
#SCC from T$/GtC to $/tCO2
scc <- scc %>% filter(t>= t0 & t <= tmax) %>% mutate(SCC_tn = 1e3 * (-m_emi / m_eqq_y) / (44/12)) %>% group_by(pathdir, file, n) %>% mutate(SCC_t0 = 1e3 * (-m_emi / m_eqq_y[t==t0]) / (44/12))
if(normalization_region=="World"){
#scc <- scc %>% group_by(pathdir, file, t) %>% mutate(m_eqq_normalization = mean(m_eqq_y))
#scc_value_marginals <- scc %>% group_by(pathdir, file, t) %>% mutate(SCC_contrib_norm=1e3 * (-m_emi / m_eqq_normalization) / (44/12)) %>% summarize(SCC=sum(SCC_contrib_norm)) %>% mutate(n="World") %>% as.data.frame()
scc_value_marginals <- scc %>% group_by(pathdir, file, t) %>% mutate(SCC_contrib_norm=1e3 * (-m_emi / m_eqq_y) / (44/12)) %>% summarize(SCC=sum(SCC_contrib_norm)) %>% mutate(n="World") %>% as.data.frame()
}else{
scc <- scc %>% group_by(pathdir, file, t) %>% mutate(m_eqq_normalization = m_eqq_y[n==normalization_region])
scc_value_marginals <- scc %>% group_by(pathdir, file, t) %>% mutate(SCC_contrib_norm=1e3 * (-m_emi / m_eqq_normalization) / (44/12)) %>% summarize(SCC=sum(SCC_contrib_norm)) %>% mutate(n="World") %>% as.data.frame()
}
scc_value_marginals <- subset(scc_value_marginals, file %in% scenplot_nopulse)
scc_value_marginals_t0 <- subset(scc_value_marginals, file %in% scenplot_nopulse & t==t0)
if(verbose) print(scc_value_marginals_t0)
SCC_bar_chart <- ggplot(scc_value_marginals_t0) + geom_bar(aes(file, SCC, fill=file), position = "dodge", stat="identity") + ylab("SCC [$/tCO2eq]") + xlab("") + geom_text(aes(file, SCC*0.9, label=paste0(round(SCC,1), "$"))) + guides(fill=FALSE)
SCC_over_time_plot <- witch_regional_line_plot(scc_value_marginals, varname = "SCC", regions = "World", scenplot = scenplot_nopulse, ylab = "SCC [$/tCO2eq]", conv_factor=1, rm.NA = F, ylim0 = T)
#compute SCC based on emission pulse method
SCC_bar_chart_pulse <- NULL
if(any(str_detect(scenplot, "_emission_pulse"))){
get_witch_simple("scc_regional")
scc_regional$file = gsub("_emission_pulse" , "", scc_regional$file)
#scc_regional$pathdir <- "Emission_pulse"
if(normalization_region=="World"){
scc_regional <- scc_regional %>% group_by(pathdir, file, t) %>% summarize(value=mean(value)) %>% mutate(n="World")#for now simple mean
}else{
scc_regional <- scc_regional %>% filter(n==normalization_region)
}
scc_regional_t0 <- scc_regional %>% filter(t==t0)
#add NAs for runs without pulse run to keep same structure and colors
scc_regional_t0 <- rbind(as.data.frame(scc_regional_t0), scc_value_marginals_t0 %>% filter(!(file %in% unique(scc_regional$file))) %>% rename(value=SCC) %>% mutate(value=NA))
if(verbose) print(scc_regional_t0)
SCC_bar_chart_pulse <- ggplot(scc_regional_t0) + geom_bar(aes(file, value, fill=file), position = "dodge", stat="identity") + ylab("SCC [$/tCO2eq] (PULSE)") + xlab("") + geom_text(aes(file, value*0.9, label=paste0(round(value,1), "$"))) + guides(fill=FALSE) + ylim(NA, max(c(scc_value_marginals$SCC, scc_regional$value)))
SCC_bar_chart <- SCC_bar_chart + ylim(NA, max(c(scc_value_marginals_t0$SCC, scc_regional$value)))
}
print(suppressWarnings(ggarrange(emi_plot, temp_plot, gdp_loss_plot, NULL, SCC_bar_chart, SCC_bar_chart_pulse, ncol = 2, nrow=3, common.legend = T, legend = "bottom")))
}
|
## Put comments here that give an overall description of what your
## functions do
## The function expects a matrix as an argument
## 1. It initializes the inverse to NULL
## 2. The set function sets the value of the input matrix and resets the inverse to NULL
## 3. The get function return the input matrix
## 4. setinv stores a value in the inv variable
## 5. getinv returns the value stored in the inv variable
## The function returns a list with 4 internal methods as named members.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The function receives one argument, which is the output of the makeCacheMatrix, or the "special" matrix object
## 1. Reads the inverse value stored in the "special" matrix and stores it locally
## 2. Makes sure that the value is not empty
## 3. Returns the value stored locally that contains the inverse of the input "special" matrix object
## 4. If the inverse matrix gotten from the function parameter is null, then it calculates the inverse using solve
## 5. Function stores the inverse in the original matrix object, using the setinv method
## It returns the inverse calculated as well
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
| /cachematrix.R | no_license | ea-datascience/ProgrammingAssignment2 | R | false | false | 1,664 | r | ## Put comments here that give an overall description of what your
## functions do
## The function expects a matrix as an argument
## 1. It initializes the inverse to NULL
## 2. The set function sets the value of the input matrix and resets the inverse to NULL
## 3. The get function return the input matrix
## 4. setinv stores a value in the inv variable
## 5. getinv returns the value stored in the inv variable
## The function returns a list with 4 internal methods as named members.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The function receives one argument, which is the output of the makeCacheMatrix, or the "special" matrix object
## 1. Reads the inverse value stored in the "special" matrix and stores it locally
## 2. Makes sure that the value is not empty
## 3. Returns the value stored locally that contains the inverse of the input "special" matrix object
## 4. If the inverse matrix gotten from the function parameter is null, then it calculates the inverse using solve
## 5. Function stores the inverse in the original matrix object, using the setinv method
## It returns the inverse calculated as well
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinv(inv)
inv
}
|
## File: cachematrix.R
## Author: Scott Tenorman -
## Date: October 24, 2014
##
## Set of functions to provide a matrix with a cached inverse
## makeCacheMatrix - Generates a matrix object capable of storing its inverse
makeCacheMatrix <- function(x = matrix())
{
inverse <- NULL
set <- function(y)
{
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(i) inverse <<- i
getinverse <- function() inverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSove - Computes the inverse of a matrix if the value is not already
## in the matrix's cache.
cacheSolve <- function(x, ...)
{
# First check for a cached inverse in matrix x
inv = x$getinverse()
if (!is.null(inv))
return(inv)
# Must compute the inverse
inv = solve(x$get())
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | ScottTenorman/ProgrammingAssignment2 | R | false | false | 940 | r | ## File: cachematrix.R
## Author: Scott Tenorman -
## Date: October 24, 2014
##
## Set of functions to provide a matrix with a cached inverse
## makeCacheMatrix - Generates a matrix object capable of storing its inverse
makeCacheMatrix <- function(x = matrix())
{
inverse <- NULL
set <- function(y)
{
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(i) inverse <<- i
getinverse <- function() inverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSove - Computes the inverse of a matrix if the value is not already
## in the matrix's cache.
cacheSolve <- function(x, ...)
{
# First check for a cached inverse in matrix x
inv = x$getinverse()
if (!is.null(inv))
return(inv)
# Must compute the inverse
inv = solve(x$get())
x$setinverse(inv)
inv
}
|
zeros <- function(n) rep(list(0L), n)
pad <- function(x, top, right, bottom, left) {
do.call(
cbind,
c(
zeros(left),
list(do.call(rbind, c(zeros(top), list(x), zeros(bottom)))),
zeros(right)
)
)
}
#' Toy datasets for lifegame
#'
#' @name datasets
#' @rdname datasets
#' @export
blinker <- matrix(c(
0, 0, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 0, 0
), nrow = 5, byrow = TRUE)
#' @rdname datasets
#' @export
frog <- matrix(c(
0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0,
0, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0
), nrow = 6, byrow = TRUE)
#' @rdname datasets
#' @export
diehard <- pad(
cbind(
matrix(c(
0, 0,
1, 1,
0, 1
), ncol = 2, byrow = TRUE),
0, 0, 0,
matrix(c(
0, 1, 0,
0, 0, 0,
1, 1, 1
), ncol = 3, byrow = TRUE)
),
5L, 5L, 17L, 7L
)
| /R/matrices.R | no_license | atusy/lifegamer | R | false | false | 908 | r | zeros <- function(n) rep(list(0L), n)
pad <- function(x, top, right, bottom, left) {
do.call(
cbind,
c(
zeros(left),
list(do.call(rbind, c(zeros(top), list(x), zeros(bottom)))),
zeros(right)
)
)
}
#' Toy datasets for lifegame
#'
#' @name datasets
#' @rdname datasets
#' @export
blinker <- matrix(c(
0, 0, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 0, 0
), nrow = 5, byrow = TRUE)
#' @rdname datasets
#' @export
frog <- matrix(c(
0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0,
0, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0
), nrow = 6, byrow = TRUE)
#' @rdname datasets
#' @export
diehard <- pad(
cbind(
matrix(c(
0, 0,
1, 1,
0, 1
), ncol = 2, byrow = TRUE),
0, 0, 0,
matrix(c(
0, 1, 0,
0, 0, 0,
1, 1, 1
), ncol = 3, byrow = TRUE)
),
5L, 5L, 17L, 7L
)
|
subject <- factor(rep(1:6, each = 3))
score <- factor(rep(1:3, times = 6))
y <- c(10, 11, 12, 10, 12, 14, 12, 13, 14, 12, 14, 16, 14, 15,
16, 14, 16, 18)
Data <- data.frame(subject, score, y)
with(Data, tapply(y, list(subject = subject), mean))
# treat subject as fixed factor
fit.lm <- lm(y ~ 1 + subject, data = Data)
anova(fit.lm)
# treat subject as random factor
fit.aov <- aov(y ~ 1 + Error(subject), data = Data)
summary(fit.aov)N
# modern approach treating subject as random factor
library(lme4)
fit.lmer <- lmer(y ~ 1 + (1 | subject), data = Data)
summary(fit.lmer)
| /Applied_Logitudinal_Analysis/R/lem.r | no_license | anhualin/MyLearning | R | false | false | 584 | r | subject <- factor(rep(1:6, each = 3))
score <- factor(rep(1:3, times = 6))
y <- c(10, 11, 12, 10, 12, 14, 12, 13, 14, 12, 14, 16, 14, 15,
16, 14, 16, 18)
Data <- data.frame(subject, score, y)
with(Data, tapply(y, list(subject = subject), mean))
# treat subject as fixed factor
fit.lm <- lm(y ~ 1 + subject, data = Data)
anova(fit.lm)
# treat subject as random factor
fit.aov <- aov(y ~ 1 + Error(subject), data = Data)
summary(fit.aov)N
# modern approach treating subject as random factor
library(lme4)
fit.lmer <- lmer(y ~ 1 + (1 | subject), data = Data)
summary(fit.lmer)
|
\name{CoinMarketCap_All}
\alias{CoinMarketCap_All}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{'Coinamrketcap' All Currencies}
\description{Get all currencies from the 'coinmarketcap' api}
\usage{
CoinMarketCap_All(currency)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{currency}{
defaults "USD"
}
}
\details{
currencies availible ("AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR", "USD")
Dataframe Columns (id, name, symbol, rank, price_usd, price_btc, 24h_volume_usd, market_cap_usd, available_supply, total_supply, percent_change_1h, percent_change_24h, percent_change_7d, last_updated, {price_(currency),X24h_volume_(currency),market_cap_(currency)})
}
\value{
returns a dataframe of all the currencies on the coinmarketcap api
}
\references{
https://coinmarketcap.com/api/
}
\author{
GitHub = Time-Gnome
}
\examples{
MyData <- CoinMarketCap_All(currency = "CAD")
MyData <- CoinMarketCap_All("AUD")
MyData <- CoinMarketCap_All()
}
| /man/CoinMarketCap_All.Rd | no_license | Time-Gnome/RCrypto | R | false | false | 1,198 | rd | \name{CoinMarketCap_All}
\alias{CoinMarketCap_All}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{'Coinamrketcap' All Currencies}
\description{Get all currencies from the 'coinmarketcap' api}
\usage{
CoinMarketCap_All(currency)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{currency}{
defaults "USD"
}
}
\details{
currencies availible ("AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR", "USD")
Dataframe Columns (id, name, symbol, rank, price_usd, price_btc, 24h_volume_usd, market_cap_usd, available_supply, total_supply, percent_change_1h, percent_change_24h, percent_change_7d, last_updated, {price_(currency),X24h_volume_(currency),market_cap_(currency)})
}
\value{
returns a dataframe of all the currencies on the coinmarketcap api
}
\references{
https://coinmarketcap.com/api/
}
\author{
GitHub = Time-Gnome
}
\examples{
MyData <- CoinMarketCap_All(currency = "CAD")
MyData <- CoinMarketCap_All("AUD")
MyData <- CoinMarketCap_All()
}
|
library(ggplot2)
plot_cons_set_id <- function(data_sim,ideology_num,ideology_name,params,p_names_filt){
data_sim$pr_scores <- exp(as.matrix(data_sim[,4:10])%*%params[1:7])
aggr_sum <- aggregate(data_sim$pr_scores, by=list(id=data_sim[,1]), FUN = sum)
data_plot <- merge(data_sim, aggr_sum, by = "id")
data_plot$party <- rep(p_names_filt,nrow(data_plot)/length(p_names_filt))
colnames(data_plot)[3] <- "ideology"
data_plot$Pr <- data_plot$pr_scores/data_plot$V1
colnames(data_plot)[2] <- "party"
data_plot$party <- as.factor(data_plot$party)
plot <- ggplot(data=data_plot, aes_string(x="ideology", y="Pr",color="party",group="party"))+
geom_line()+
geom_point()+
theme(legend.position="right")+
xlab(ideology_name)+
theme_bw()
return(plot)
}
plot_cons_set_by_party <- function(data_sim,ideology_num,ideology_name,party_code,cons_sets,params){
data_plot_all <- data.frame()
p_names <- c("Just Russia","LDPR","CPRF","Yabloko","United Russia","Right Cause")
p_name <- p_names[party_code]
for (cons_set in cons_sets){
cons_set <- unlist(cons_set)
df_cons <- data_sim[data_sim$party %in% cons_set,]
p_names_cons <- p_names[cons_set]
df_cons$pr_scores <- exp(as.matrix(df_cons[,4:10])%*%params[1:7])
aggr_sum <- aggregate(df_cons$pr_scores, by=list(id=df_cons[,1]), FUN = sum)
data_plot <- merge(df_cons, aggr_sum, by = "id")
colnames(data_plot)[2] <- "party_cons_set"
data_plot <- data_plot[data_plot$party_cons_set == party_code,]
c_n <- paste(p_names_cons,collapse=",")
data_plot$party_cons_set <- rep(c_n,nrow(data_plot))
colnames(data_plot)[3] <- "ideology"
data_plot$Pr <- data_plot$pr_scores/data_plot$V1
data_plot_all <- rbind(data_plot_all,data_plot)
}
data_plot_all$party_cons_set <- as.factor(data_plot_all$party_cons_set)
plot <- ggplot(data=data_plot_all, aes_string(x="ideology", y="Pr",color="party_cons_set",group="party_cons_set"))+
geom_line()+
geom_point()+
theme(legend.position="left")+
xlab(ideology_name)+
theme_bw()
return(plot)
}
| /helpers.R | no_license | ana-alekseeva/disser_app | R | false | false | 2,131 | r | library(ggplot2)
plot_cons_set_id <- function(data_sim,ideology_num,ideology_name,params,p_names_filt){
data_sim$pr_scores <- exp(as.matrix(data_sim[,4:10])%*%params[1:7])
aggr_sum <- aggregate(data_sim$pr_scores, by=list(id=data_sim[,1]), FUN = sum)
data_plot <- merge(data_sim, aggr_sum, by = "id")
data_plot$party <- rep(p_names_filt,nrow(data_plot)/length(p_names_filt))
colnames(data_plot)[3] <- "ideology"
data_plot$Pr <- data_plot$pr_scores/data_plot$V1
colnames(data_plot)[2] <- "party"
data_plot$party <- as.factor(data_plot$party)
plot <- ggplot(data=data_plot, aes_string(x="ideology", y="Pr",color="party",group="party"))+
geom_line()+
geom_point()+
theme(legend.position="right")+
xlab(ideology_name)+
theme_bw()
return(plot)
}
plot_cons_set_by_party <- function(data_sim,ideology_num,ideology_name,party_code,cons_sets,params){
data_plot_all <- data.frame()
p_names <- c("Just Russia","LDPR","CPRF","Yabloko","United Russia","Right Cause")
p_name <- p_names[party_code]
for (cons_set in cons_sets){
cons_set <- unlist(cons_set)
df_cons <- data_sim[data_sim$party %in% cons_set,]
p_names_cons <- p_names[cons_set]
df_cons$pr_scores <- exp(as.matrix(df_cons[,4:10])%*%params[1:7])
aggr_sum <- aggregate(df_cons$pr_scores, by=list(id=df_cons[,1]), FUN = sum)
data_plot <- merge(df_cons, aggr_sum, by = "id")
colnames(data_plot)[2] <- "party_cons_set"
data_plot <- data_plot[data_plot$party_cons_set == party_code,]
c_n <- paste(p_names_cons,collapse=",")
data_plot$party_cons_set <- rep(c_n,nrow(data_plot))
colnames(data_plot)[3] <- "ideology"
data_plot$Pr <- data_plot$pr_scores/data_plot$V1
data_plot_all <- rbind(data_plot_all,data_plot)
}
data_plot_all$party_cons_set <- as.factor(data_plot_all$party_cons_set)
plot <- ggplot(data=data_plot_all, aes_string(x="ideology", y="Pr",color="party_cons_set",group="party_cons_set"))+
geom_line()+
geom_point()+
theme(legend.position="left")+
xlab(ideology_name)+
theme_bw()
return(plot)
}
|
#'@title Make predictions from seahorse data by sampling and minimizing total flux
#'@author Alfred Ramirez, Jonathan Dreyfuss
#'@description This function integrates the sampled seahorse measurements as constraints into the specified model,
#'optionally maximizes an objective reaction, minimizes total flux for each sample, and returns a matrix
#'of reactions-by-samples where the entries are predicted fluxes.
#'@param model An object of class \code{\link[sybil]{modelorg}}
#'@param seahorse_data A data.frame returned by \code{\link{map_seahorse}}
#'@param biomass_est Estimated biomass flux. Default = 0.
#'@param alg Either "fba" (default) to optimize a reaction or "mtf" to only minimize total flux (and not do fba).
#'@param obj_rxn Reaction in \code{model} to optimize if \code{alg="fba"}. Default \code{NULL} optimizes ATP demand reaction.
#'@param model.nm The metabolic model name. One of "2.1A", "2.1x", or "2.2" (default).
#'@param solver The solver to use; "gurobi" (default) or "glpk".
#'@details For parallel computing, a parallel backend must be registered. See \code{\link[foreach]{foreach}} for details.
#'@export
fluxPredict <-function(model, seahorse_data, biomass_est=0, alg=c("mtf", "fba"), low.wt.rxns=NULL,
obj_rxn=NULL, model.nm="2.1A", solver=c("gurobi", "glpk")){
alg <- match.arg(alg)
solver <- match.arg(solver)
stopifnot(model.nm %in% c("2.1A", "2.1x", "2.2"))
stopifnot(is.null(low.wt.rxns)|low.wt.rxns %in% model@react_id)
if (alg=="fba" && !is.null(obj_rxn) && !(obj_rxn %in% sybil::react_id(model))){
stop("obj_rxn ", obj_rxn, " not in react_id(model).")
}
if (alg=="mtf" && !is.null(obj_rxn)) warning("alg=mtf, so obj_rxn will be ignored.")
if (!is.numeric(biomass_est)||biomass_est<0) stop("biomass must be numeric and non-negative.")
#biomass reaction has same name in 2.1A & 2.2
biomass <- "biomass_reaction"
#exp_coefs <- c("EX_o2(e)in","EX_o2(e)ex","ATPS4m", "DM_atp_m_", "O2tm", "EX_lac_L(e)in", "EX_lac_L(e)ex", "biomass_reaction")
exp_coefs <- c(gsub("_lb$", "", grep("_lb$", rownames(seahorse_data), value=TRUE)), biomass=biomass)
#ub and lb should match
exp_coefs_ub <- c(gsub("_ub$", "", grep("_ub$", rownames(seahorse_data), value=TRUE)), biomass=biomass)
if (any(exp_coefs!=exp_coefs_ub)) stop("seahorse_data should have matching upper bounds and lower bounds, but it does not.")
#set obj
if (alg=="fba" && is.null(obj_rxn)){
#atp demand reaction has this form in 2.1A & 2.2
dm_atp <- grep("DM_atp", exp_coefs, value=TRUE)
if (length(dm_atp)>1){
stop("Expected one ATP demand reaction with 'DM_atp' in name from rownames(seahorse_data), but found several.")
}
obj_rxn <- dm_atp
}#end if fba & obj_rxn
#create wts to nudge to co2 + lactate efflux
if (alg=="mtf"){
wts <- rep(1, times=length(model@react_id))
names(wts) <- model@react_id
#found rxns initially in vmh
#then found rxns in 2.1A by using grep w/ fixed=TRUE
#L_LACt2r transports lac c <-> e
if (model.nm=="2.2"){
lac.rxns <- c("L_LACt2r", "EX_lac_L(e)", "LDH_L")
co2.rxns <- c("CO2t", "CO2tm", "EX_hco3(e)", "H2CO3D", "H2CO3Dm", "r0941", "r1418")
model <- sybil::changeBounds(model, react=c("H2CO3D", "H2CO3Dm", "EX_lac_L(e)", "EX_hco3(e)"), lb=0)
model <- sybil::changeBounds(model, react=c("L_LACt2r", "LDH_L"), ub=0)
#no co2 ex in Seahorse well
#LACDcm -> 2 H+ & no evidence it exists in humans
model <- sybil::changeBounds(model, react=c("EX_co2(e)", "L_LACDcm"), lb=0, ub=0)
} else if (model.nm %in% c("2.1A", "2.1x")){
lac.rxns <- c("L_LACt2r", "EX_lac_L(e)ex", "LDH_L")
co2.rxns <- c("CO2t", "CO2tm", "EX_hco3(e)ex", "H2CO3D", "r0941", "r1418")
model <- sybil::changeBounds(model, react=c("H2CO3D"), lb=0)
model <- sybil::changeBounds(model, react=c("L_LACt2r", "LDH_L", "EX_lac_L(e)in", "EX_hco3(e)in"), ub=0)
model <- sybil::changeBounds(model, react=c("EX_co2(e)in", "EX_co2(e)ex"), lb=0, ub=0)
}
low.wt.rxns <- c(lac.rxns, co2.rxns, low.wt.rxns)
stopifnot(low.wt.rxns %in% model@react_id)
wts[low.wt.rxns] <- 0.01
}#end create wts
final_output <- foreach(i=1:ncol(seahorse_data), .combine=cbind) %dopar% {
model_lb <- c(seahorse_data[grep("_lb$", rownames(seahorse_data)),i], biomass_est)
model_ub <- c(seahorse_data[grep("_ub$", rownames(seahorse_data)),i], biomass_est)
stopifnot(exp_coefs %in% model@react_id)
model <- sybil::changeBounds(model, react=exp_coefs, lb=model_lb, ub=model_ub)
if (alg=="fba"){
model <- sybil::changeObjFunc(model, obj_rxn, obj_coef=1)
model_test_flux <- sybil::optimizeProb(model, algorithm="fba", lpdir="max")
lpsolution <- model_test_flux@lp_stat
#If the constraints are not feasible, we return a column of NAs for those constraints
if (lpsolution == 0){
output <- NA
} else {
output_fluxes <- sybil::optimizeProb(model, alg="mtf", mtfobj=mod_obj(model_test_flux))
output <- sybil::getFluxDist(output_fluxes)
}
output
} else {
#if not fba, then min weighted flux to nudge to hco3 + lactate efflux
# output_fluxes <- sybil::optimizeProb(model, alg="mtf")
#output_fluxes <- sybil::optimizeProb(model, alg="fba", react=1:length(wts), obj_coef=wts)
#output <- sybil::getFluxDist(output_fluxes)
nrxns <- model@react_num
nmets <- model@met_num
eye <- simple_triplet_diag_matrix(v=rep(1, times=nrxns))
neg.eye <- simple_triplet_diag_matrix(v=rep(-1, times=nrxns))
mat1 <- abind_simple_sparse_array(as.simple_triplet_matrix(model@S), simple_triplet_zero_matrix(nrow = nmets, ncol=nrxns), MARGIN = 2)
mat2 <- abind_simple_sparse_array(eye, neg.eye, MARGIN = 2)
mat3 <- abind_simple_sparse_array(neg.eye, neg.eye, MARGIN = 2)
mat <- abind_simple_sparse_array(mat1, mat2, mat3, MARGIN=1)
mat <- as.simple_triplet_matrix(mat)
obj <- c(numeric(nrxns), wts)
rhs <- numeric(nmets+2*nrxns)
dirs <- rep(c("==", "<="), times=c(nmets, 2*nrxns))
if (solver=="glpk"){
lb <- list(ind = 1:nrxns, val = model@lowbnd)
ub <- list(ind = 1:nrxns, val = model@uppbnd)
opt <- Rglpk::Rglpk_solve_LP(obj=obj, mat=mat, dir=dirs, rhs=rhs, bounds=list(lower=lb, upper=ub))
if (opt$status==0) cat("OPTIMAL SOLUTION FOUND\n")
output <- opt$solution[1:nrxns]
} else if (solver=="gurobi"){
sense <- sub("==", "=", dirs)
lb <- c(model@lowbnd, numeric(nrxns))
ub <- c(model@uppbnd, rep(1000, nrxns))
mod <- list(obj=obj, A=mat, rhs=rhs, sense=sense, lb=lb, ub=ub)
#params: http://www.gurobi.com/documentation/7.5/refman/parameters.html
opt <- gurobi::gurobi(model=mod, params = list(OutputFlag=0))
if (opt$status=="OPTIMAL"){
cat("OPTIMAL SOLUTION FOUND\n")
output <- opt$x[1:nrxns]
} else {
output <- rep(NA, times=nrxns)
}
}
}
}#end foreach
rownames(final_output) <- sybil::react_id(model)
final_output
} | /R/fluxPredict.R | permissive | anu-bioinfo/sybilxf | R | false | false | 7,153 | r | #'@title Make predictions from seahorse data by sampling and minimizing total flux
#'@author Alfred Ramirez, Jonathan Dreyfuss
#'@description This function integrates the sampled seahorse measurements as constraints into the specified model,
#'optionally maximizes an objective reaction, minimizes total flux for each sample, and returns a matrix
#'of reactions-by-samples where the entries are predicted fluxes.
#'@param model An object of class \code{\link[sybil]{modelorg}}
#'@param seahorse_data A data.frame returned by \code{\link{map_seahorse}}
#'@param biomass_est Estimated biomass flux. Default = 0.
#'@param alg Either "fba" (default) to optimize a reaction or "mtf" to only minimize total flux (and not do fba).
#'@param obj_rxn Reaction in \code{model} to optimize if \code{alg="fba"}. Default \code{NULL} optimizes ATP demand reaction.
#'@param model.nm The metabolic model name. One of "2.1A", "2.1x", or "2.2" (default).
#'@param solver The solver to use; "gurobi" (default) or "glpk".
#'@details For parallel computing, a parallel backend must be registered. See \code{\link[foreach]{foreach}} for details.
#'@export
fluxPredict <-function(model, seahorse_data, biomass_est=0, alg=c("mtf", "fba"), low.wt.rxns=NULL,
obj_rxn=NULL, model.nm="2.1A", solver=c("gurobi", "glpk")){
alg <- match.arg(alg)
solver <- match.arg(solver)
stopifnot(model.nm %in% c("2.1A", "2.1x", "2.2"))
stopifnot(is.null(low.wt.rxns)|low.wt.rxns %in% model@react_id)
if (alg=="fba" && !is.null(obj_rxn) && !(obj_rxn %in% sybil::react_id(model))){
stop("obj_rxn ", obj_rxn, " not in react_id(model).")
}
if (alg=="mtf" && !is.null(obj_rxn)) warning("alg=mtf, so obj_rxn will be ignored.")
if (!is.numeric(biomass_est)||biomass_est<0) stop("biomass must be numeric and non-negative.")
#biomass reaction has same name in 2.1A & 2.2
biomass <- "biomass_reaction"
#exp_coefs <- c("EX_o2(e)in","EX_o2(e)ex","ATPS4m", "DM_atp_m_", "O2tm", "EX_lac_L(e)in", "EX_lac_L(e)ex", "biomass_reaction")
exp_coefs <- c(gsub("_lb$", "", grep("_lb$", rownames(seahorse_data), value=TRUE)), biomass=biomass)
#ub and lb should match
exp_coefs_ub <- c(gsub("_ub$", "", grep("_ub$", rownames(seahorse_data), value=TRUE)), biomass=biomass)
if (any(exp_coefs!=exp_coefs_ub)) stop("seahorse_data should have matching upper bounds and lower bounds, but it does not.")
#set obj
if (alg=="fba" && is.null(obj_rxn)){
#atp demand reaction has this form in 2.1A & 2.2
dm_atp <- grep("DM_atp", exp_coefs, value=TRUE)
if (length(dm_atp)>1){
stop("Expected one ATP demand reaction with 'DM_atp' in name from rownames(seahorse_data), but found several.")
}
obj_rxn <- dm_atp
}#end if fba & obj_rxn
#create wts to nudge to co2 + lactate efflux
if (alg=="mtf"){
wts <- rep(1, times=length(model@react_id))
names(wts) <- model@react_id
#found rxns initially in vmh
#then found rxns in 2.1A by using grep w/ fixed=TRUE
#L_LACt2r transports lac c <-> e
if (model.nm=="2.2"){
lac.rxns <- c("L_LACt2r", "EX_lac_L(e)", "LDH_L")
co2.rxns <- c("CO2t", "CO2tm", "EX_hco3(e)", "H2CO3D", "H2CO3Dm", "r0941", "r1418")
model <- sybil::changeBounds(model, react=c("H2CO3D", "H2CO3Dm", "EX_lac_L(e)", "EX_hco3(e)"), lb=0)
model <- sybil::changeBounds(model, react=c("L_LACt2r", "LDH_L"), ub=0)
#no co2 ex in Seahorse well
#LACDcm -> 2 H+ & no evidence it exists in humans
model <- sybil::changeBounds(model, react=c("EX_co2(e)", "L_LACDcm"), lb=0, ub=0)
} else if (model.nm %in% c("2.1A", "2.1x")){
lac.rxns <- c("L_LACt2r", "EX_lac_L(e)ex", "LDH_L")
co2.rxns <- c("CO2t", "CO2tm", "EX_hco3(e)ex", "H2CO3D", "r0941", "r1418")
model <- sybil::changeBounds(model, react=c("H2CO3D"), lb=0)
model <- sybil::changeBounds(model, react=c("L_LACt2r", "LDH_L", "EX_lac_L(e)in", "EX_hco3(e)in"), ub=0)
model <- sybil::changeBounds(model, react=c("EX_co2(e)in", "EX_co2(e)ex"), lb=0, ub=0)
}
low.wt.rxns <- c(lac.rxns, co2.rxns, low.wt.rxns)
stopifnot(low.wt.rxns %in% model@react_id)
wts[low.wt.rxns] <- 0.01
}#end create wts
final_output <- foreach(i=1:ncol(seahorse_data), .combine=cbind) %dopar% {
model_lb <- c(seahorse_data[grep("_lb$", rownames(seahorse_data)),i], biomass_est)
model_ub <- c(seahorse_data[grep("_ub$", rownames(seahorse_data)),i], biomass_est)
stopifnot(exp_coefs %in% model@react_id)
model <- sybil::changeBounds(model, react=exp_coefs, lb=model_lb, ub=model_ub)
if (alg=="fba"){
model <- sybil::changeObjFunc(model, obj_rxn, obj_coef=1)
model_test_flux <- sybil::optimizeProb(model, algorithm="fba", lpdir="max")
lpsolution <- model_test_flux@lp_stat
#If the constraints are not feasible, we return a column of NAs for those constraints
if (lpsolution == 0){
output <- NA
} else {
output_fluxes <- sybil::optimizeProb(model, alg="mtf", mtfobj=mod_obj(model_test_flux))
output <- sybil::getFluxDist(output_fluxes)
}
output
} else {
#if not fba, then min weighted flux to nudge to hco3 + lactate efflux
# output_fluxes <- sybil::optimizeProb(model, alg="mtf")
#output_fluxes <- sybil::optimizeProb(model, alg="fba", react=1:length(wts), obj_coef=wts)
#output <- sybil::getFluxDist(output_fluxes)
nrxns <- model@react_num
nmets <- model@met_num
eye <- simple_triplet_diag_matrix(v=rep(1, times=nrxns))
neg.eye <- simple_triplet_diag_matrix(v=rep(-1, times=nrxns))
mat1 <- abind_simple_sparse_array(as.simple_triplet_matrix(model@S), simple_triplet_zero_matrix(nrow = nmets, ncol=nrxns), MARGIN = 2)
mat2 <- abind_simple_sparse_array(eye, neg.eye, MARGIN = 2)
mat3 <- abind_simple_sparse_array(neg.eye, neg.eye, MARGIN = 2)
mat <- abind_simple_sparse_array(mat1, mat2, mat3, MARGIN=1)
mat <- as.simple_triplet_matrix(mat)
obj <- c(numeric(nrxns), wts)
rhs <- numeric(nmets+2*nrxns)
dirs <- rep(c("==", "<="), times=c(nmets, 2*nrxns))
if (solver=="glpk"){
lb <- list(ind = 1:nrxns, val = model@lowbnd)
ub <- list(ind = 1:nrxns, val = model@uppbnd)
opt <- Rglpk::Rglpk_solve_LP(obj=obj, mat=mat, dir=dirs, rhs=rhs, bounds=list(lower=lb, upper=ub))
if (opt$status==0) cat("OPTIMAL SOLUTION FOUND\n")
output <- opt$solution[1:nrxns]
} else if (solver=="gurobi"){
sense <- sub("==", "=", dirs)
lb <- c(model@lowbnd, numeric(nrxns))
ub <- c(model@uppbnd, rep(1000, nrxns))
mod <- list(obj=obj, A=mat, rhs=rhs, sense=sense, lb=lb, ub=ub)
#params: http://www.gurobi.com/documentation/7.5/refman/parameters.html
opt <- gurobi::gurobi(model=mod, params = list(OutputFlag=0))
if (opt$status=="OPTIMAL"){
cat("OPTIMAL SOLUTION FOUND\n")
output <- opt$x[1:nrxns]
} else {
output <- rep(NA, times=nrxns)
}
}
}
}#end foreach
rownames(final_output) <- sybil::react_id(model)
final_output
} |
# Question 5
#######################################################################################################################
#
# Create a numerical summary for gender. cdc is available in the workspace.
# Can you tell how many males are in the sample?
#
#######################################################################################################################
1 4,657
2 6,972
3 9,569
4 10,431
5 20,000
summary(cdc$gender)
Answer - 3 9,569
| /dataCamp/openCourses/dataAnalysisAndStatisticalInference/2_introductionToData/12_question5.R | permissive | sagarnikam123/learnNPractice | R | false | false | 465 | r | # Question 5
#######################################################################################################################
#
# Create a numerical summary for gender. cdc is available in the workspace.
# Can you tell how many males are in the sample?
#
#######################################################################################################################
1 4,657
2 6,972
3 9,569
4 10,431
5 20,000
summary(cdc$gender)
Answer - 3 9,569
|
#load required libraries. Set some variable.
#USAGE: 1) FIX THE DIRECTORY PATH BELOW IN the setwd CALL. 2) Cut and paste the content of this file in an R window.
library(dplyr)
library(caret)
require(dplyr)
require(caret)
CORRTHRESH = -1
CORRSAMPLESIZE = 1500
UNLIKELYSTR = "impossiblevaxdfd"
#Define some needed functions
#determines if a column has all numeric values
fullyNumeric = function(x) {length(x) == length(subset(x, is.numeric(x)))}
#determines if a column has no missing value
noMissingValue = function(x) {length(subset(x, is.na(x))) == 0}
#find very correlated columns
getCorrelatedCols = function(corrsampl) {
rawCols = colnames(corrsampl)
numCols = rawCols[sapply(1:length(rawCols), FUN=function(x) {fullyNumeric(unlist(corrsampl[,x]))})]
nonaCols = rawCols[sapply(1:length(rawCols), FUN=function(x) {noMissingValue(unlist(corrsampl[,x]))})]
hccds = corrsampl[,intersect(numCols, nonaCols)]
allcor = cor(hccds, hccds)
N <- dim(allcor)[1]
correlatedCols = rep(UNLIKELYSTR, N)
k = 1
vars <- colnames(hccds)
for (i in 1:N) {
for (j in i:N) {
if ((abs(allcor[i,j]) > CORRTHRESH) && (i != j)) {
correlatedCols[k] = vars[i]
k=k+1
}
}
}
setdiff(unique(correlatedCols), c(UNLIKELYSTR));
}
#use the training set to learn a random forest. Return predictions
#and some other important info as list.
learn <- function(trainingSet, testingSet) {
t1 = Sys.time()
tmp = list()
tmp$tc <- trainControl(method="cv", number=5)
tmp$goodCols <- names(trainingSet);
tmp$numCols <- dim(trainingSet)[2]
tmp$fit <- train(classe ~ ., method="rf", data=trainingSet, prox=TRUE, trControl=tmp$tc)
tmp$cm <- confusionMatrix(predict(tmp$fit, newdata = testingSet), testingSet$classe);
tmp$approxTime = floor(Sys.time() - t1)
tmp
}
#solve a large number of smaller learning problems (given by
#iterations). From each returned random forest, pick the most
#important columns. Pick the columns that pay biggest role in all
#iteratons combined.
mostImportantFeatures <- function(trainingSet, iterations=10, SSSize = 300) {
cols = names(trainingSet)
importance <- rep(0.0, length(cols));
for (i in 1:iterations) {
trn = trainingSet
if (SSSize < dim(trainingSet)[1]) {
smpl = sample(dim(trainingSet)[1], SSSize);
trn <- trainingSet[smpl, ]
}
learning = learn(trn, trn)
imp <- learning$fit$finalModel["importance"];
accuracy = as.data.frame(learning$cm["overall"])["Accuracy",];
importance <- importance +
sapply(cols, FUN = function(x){if (is.na(imp[1]$importance[,][x])) {0} else {imp[1]$importance[,][x]*accuracy}});
}
names(trainingSet)[order(importance, decreasing=TRUE)]
}
#Step 1: Setup initial environment. WD, SEED, DATA, constants
trn = NULL
tinyTST = NULL
pmlInit = function () {
rm(list=ls())
setwd(
paste("C:/Users/neeraj/b/Biz/wrk_hm/Learning/",
"Coursera/PracticalMachineLearningCode/Programs",
sep="", collapse="")
)
trn <<- read.csv("../Data/pml-training.csv")
tinyTST <<- read.csv("../Data/pml-testing.csv")
set.seed(71679174)
TESTDATAPROB <<- .3
CORRTHRESH <<- .8
}
pmlInit()
#Step 2: convert to tidy, remove stat-summary cols
trn <- tbl_df(trn)
tinyTST <- tbl_df(tinyTST)
trn<-trn[,2:160]
tinyTST<-tinyTST[,2:160]
allcols = colnames(trn)
print(dim(trn)[2]);print("XXXXX")
rawCols <- allcols[-grep("^max|^min|^amplitude|^var|^avg|^stddev|^skewness|^kurtosis", allcols)]
trn<- trn[rawCols]
tinyTST<- tinyTST[rawCols]
print(dim(trn)[2]);print("XXXXX")
#Step 3: Create a test set that will not be touched in any analysis
#and will be used only for testing
testingRows <- sample(dim(trn)[1], size = dim(trn)[1]*TESTDATAPROB)
tstset <- trn[testingRows,] #do not touch it except to deselect columns.
trn <- trn[-testingRows,] #not intersected with testing at all.
print(dim(trn)[2]);print("XXXXX")
#Step 4: Get rid of highly correlated columns
corrCols = getCorrelatedCols(trn[sample(dim(trn)[1], size = CORRSAMPLESIZE), ]);
keep = setdiff(names(trn), corrCols)
print(length(corrCols))
tstset <- trn[, keep] #do not touch it except to deselect columns.
trn <- trn[, keep] #not intersected with testing at all.
tinyTST<- tinyTST[, keep]
print(dim(trn)[2]);print("XXXXX")
#Step 5: Now do the main experiment. we will create training sets of
#diverse sizes (increasing), learn a model of them, check the accuracy
#of the model. We will try to hit 99%+ accuracy. Each model will be
#tested on two sets 1) The main test set (tstset) created above and 2)
#all other rows not used in training. If total rows are 20000 and
#5000 are in the tstset, potentially we can use 15000 roes for
#training. But we used only NN for training. The remaining 15000-N
#can also be used as a separate test set. We will vary NN from 100 to
#10000 and increase its value by 25% each time.
NN = 3508
answer = NULL
df = NULL;
bestN = floor(0.75 * dim(trn)[2]);
features = union(mostImportantFeatures(trn)[1:bestN], "classe")
trn <- trn[,features]
tstset <- tstset[,features]
tinyTST <- tinyTST[,features]
print(dim(trn)[2]);print("XXXXX")
for (i in 1:150) {
savedTrn <- trn
savedTst <- tstset
savedTiny <- tinyTST
NN = floor(NN * 1.25)
if (NN > 7000) {
break;
}
rows = sample(dim(trn)[1], NN);
thisTrain <- trn[rows,]
thisTest <- trn[-rows,]
L <- learn(thisTrain, thisTest)
answer <- paste(predict(L$fit, newdata = tinyTST),sep="", collapse="")
cm <- confusionMatrix(predict(L$fit, newdata = tstset), tstset$classe)
if (is.null(df)) {
df = c(NN, L$numCols, L$approxTime, as.data.frame(L$cm["overall"])["Accuracy",], as.data.frame(cm["overall"])["Accuracy",], answer)
names(df) = c("TrainingSize", "Features", "ApproxTime", "AccuracyOnUnusedTraining", "AccuracyOnSetAsideTest", "CatsOfTiny")
} else {
df = rbind(df, c(NN, L$numCols, L$approxTime, as.data.frame(L$cm["overall"])["Accuracy",], as.data.frame(cm["overall"])["Accuracy",], answer))
}
print(df)
write.csv(df, paste("CorrFile", NN, ".csv", sep="_"));
trn <- savedTrn
tstset <- savedTst
tinyTST <- savedTiny
}
#load required libraries. Set some variable.
library(dplyr)
library(caret)
require(dplyr)
require(caret)
CORRTHRESH = -1
CORRSAMPLESIZE = 1500
UNLIKELYSTR = "impossiblevaxdfd"
#Define some needed functions
#determines if a column has all numeric values
fullyNumeric = function(x) {length(x) == length(subset(x, is.numeric(x)))}
#determines if a column has no missing value
noMissingValue = function(x) {length(subset(x, is.na(x))) == 0}
#find very correlated columns
getCorrelatedCols = function(corrsampl) {
rawCols = colnames(corrsampl)
numCols = rawCols[sapply(1:length(rawCols), FUN=function(x) {fullyNumeric(unlist(corrsampl[,x]))})]
nonaCols = rawCols[sapply(1:length(rawCols), FUN=function(x) {noMissingValue(unlist(corrsampl[,x]))})]
hccds = corrsampl[,intersect(numCols, nonaCols)]
allcor = cor(hccds, hccds)
N <- dim(allcor)[1]
correlatedCols = rep(UNLIKELYSTR, N)
k = 1
vars <- colnames(hccds)
for (i in 1:N) {
for (j in i:N) {
if ((abs(allcor[i,j]) > CORRTHRESH) && (i != j)) {
correlatedCols[k] = vars[i]
k=k+1
}
}
}
setdiff(unique(correlatedCols), c(UNLIKELYSTR));
}
#use the training set to learn a random forest. Return predictions
#and some other important info as list.
learn <- function(trainingSet, testingSet) {
t1 = Sys.time()
tmp = list()
tmp$tc <- trainControl(method="cv", number=5)
tmp$goodCols <- names(trainingSet);
tmp$numCols <- dim(trainingSet)[2]
tmp$fit <- train(classe ~ ., method="rf", data=trainingSet, prox=TRUE, trControl=tmp$tc)
tmp$cm <- confusionMatrix(predict(tmp$fit, newdata = testingSet), testingSet$classe);
tmp$approxTime = floor(Sys.time() - t1)
tmp
}
#solve a large number of smaller learning problems (given by
#iterations). From each returned random forest, pick the most
#important columns. Pick the columns that pay biggest role in all
#iteratons combined.
mostImportantFeatures <- function(trainingSet, iterations=10, SSSize = 300) {
cols = names(trainingSet)
importance <- rep(0.0, length(cols));
for (i in 1:iterations) {
trn = trainingSet
if (SSSize < dim(trainingSet)[1]) {
smpl = sample(dim(trainingSet)[1], SSSize);
trn <- trainingSet[smpl, ]
}
learning = learn(trn, trn)
imp <- learning$fit$finalModel["importance"];
accuracy = as.data.frame(learning$cm["overall"])["Accuracy",];
importance <- importance +
sapply(cols, FUN = function(x){if (is.na(imp[1]$importance[,][x])) {0} else {imp[1]$importance[,][x]*accuracy}});
}
names(trainingSet)[order(importance, decreasing=TRUE)]
}
#Step 1: Setup initial environment. WD, SEED, DATA, constants
trn = NULL
tinyTST = NULL
pmlInit = function () {
rm(list=ls())
setwd(
paste("SET_TO_THE-PARENT_OF_DATA_DIRECTORY_WHERE",
"THE_FOLLOWING_TWO_FILES_ARE",
sep="", collapse="")
)
trn <<- read.csv("../Data/pml-training.csv")
tinyTST <<- read.csv("../Data/pml-testing.csv")
set.seed(71679174)
TESTDATAPROB <<- .3
CORRTHRESH <<- .8
}
pmlInit()
#Step 2: convert to tidy, remove stat-summary cols
trn <- tbl_df(trn)
tinyTST <- tbl_df(tinyTST)
trn<-trn[,2:160]
tinyTST<-tinyTST[,2:160]
allcols = colnames(trn)
print(dim(trn)[2]);print("XXXXX")
rawCols <- allcols[-grep("^max|^min|^amplitude|^var|^avg|^stddev|^skewness|^kurtosis", allcols)]
trn<- trn[rawCols]
tinyTST<- tinyTST[rawCols]
print(dim(trn)[2]);print("XXXXX")
#Step 3: Create a test set that will not be touched in any analysis
#and will be used only for testing
testingRows <- sample(dim(trn)[1], size = dim(trn)[1]*TESTDATAPROB)
tstset <- trn[testingRows,] #do not touch it except to deselect columns.
trn <- trn[-testingRows,] #not intersected with testing at all.
print(dim(trn)[2]);print("XXXXX")
#Step 4: Get rid of highly correlated columns
corrCols = getCorrelatedCols(trn[sample(dim(trn)[1], size = CORRSAMPLESIZE), ]);
keep = setdiff(names(trn), corrCols)
print(length(corrCols))
tstset <- trn[, keep] #do not touch it except to deselect columns.
trn <- trn[, keep] #not intersected with testing at all.
tinyTST<- tinyTST[, keep]
print(dim(trn)[2]);print("XXXXX")
#Step 5: Now do the main experiment. we will create training sets of
#diverse sizes (increasing), learn a model of them, check the accuracy
#of the model. We will try to hit 99%+ accuracy. Each model will be
#tested on two sets 1) The main test set (tstset) created above and 2)
#all other rows not used in training. If total rows are 20000 and
#5000 are in the tstset, potentially we can use 15000 roes for
#training. But we used only NN for training. The remaining 15000-N
#can also be used as a separate test set. We will vary NN from 100 to
#10000 and increase its value by 25% each time.
NN = 3508
answer = NULL
df = NULL;
bestN = floor(0.75 * dim(trn)[2]);
features = union(mostImportantFeatures(trn)[1:bestN], "classe")
trn <- trn[,features]
tstset <- tstset[,features]
tinyTST <- tinyTST[,features]
print(dim(trn)[2]);print("XXXXX")
for (i in 1:150) {
savedTrn <- trn
savedTst <- tstset
savedTiny <- tinyTST
NN = floor(NN * 1.25)
if (NN > 7000) {
break;
}
rows = sample(dim(trn)[1], NN);
thisTrain <- trn[rows,]
thisTest <- trn[-rows,]
L <- learn(thisTrain, thisTest)
answer <- paste(predict(L$fit, newdata = tinyTST),sep="", collapse="")
cm <- confusionMatrix(predict(L$fit, newdata = tstset), tstset$classe)
if (is.null(df)) {
df = c(NN, L$numCols, L$approxTime, as.data.frame(L$cm["overall"])["Accuracy",], as.data.frame(cm["overall"])["Accuracy",], answer)
names(df) = c("TrainingSize", "Features", "ApproxTime", "AccuracyOnUnusedTraining", "AccuracyOnSetAsideTest", "CatsOfTiny")
} else {
df = rbind(df, c(NN, L$numCols, L$approxTime, as.data.frame(L$cm["overall"])["Accuracy",], as.data.frame(cm["overall"])["Accuracy",], answer))
}
print(df)
write.csv(df, paste("CorrFile", NN, ".csv", sep="_"));
trn <- savedTrn
tstset <- savedTst
tinyTST <- savedTiny
}
| /pml-project.R | no_license | NB1/JHPML | R | false | false | 12,507 | r | #load required libraries. Set some variable.
#USAGE: 1) FIX THE DIRECTORY PATH BELOW IN the setwd CALL. 2) Cut and paste the content of this file in an R window.
library(dplyr)
library(caret)
require(dplyr)
require(caret)
CORRTHRESH = -1
CORRSAMPLESIZE = 1500
UNLIKELYSTR = "impossiblevaxdfd"
#Define some needed functions
#determines if a column has all numeric values
fullyNumeric = function(x) {length(x) == length(subset(x, is.numeric(x)))}
#determines if a column has no missing value
noMissingValue = function(x) {length(subset(x, is.na(x))) == 0}
#find very correlated columns
getCorrelatedCols = function(corrsampl) {
rawCols = colnames(corrsampl)
numCols = rawCols[sapply(1:length(rawCols), FUN=function(x) {fullyNumeric(unlist(corrsampl[,x]))})]
nonaCols = rawCols[sapply(1:length(rawCols), FUN=function(x) {noMissingValue(unlist(corrsampl[,x]))})]
hccds = corrsampl[,intersect(numCols, nonaCols)]
allcor = cor(hccds, hccds)
N <- dim(allcor)[1]
correlatedCols = rep(UNLIKELYSTR, N)
k = 1
vars <- colnames(hccds)
for (i in 1:N) {
for (j in i:N) {
if ((abs(allcor[i,j]) > CORRTHRESH) && (i != j)) {
correlatedCols[k] = vars[i]
k=k+1
}
}
}
setdiff(unique(correlatedCols), c(UNLIKELYSTR));
}
#use the training set to learn a random forest. Return predictions
#and some other important info as list.
learn <- function(trainingSet, testingSet) {
t1 = Sys.time()
tmp = list()
tmp$tc <- trainControl(method="cv", number=5)
tmp$goodCols <- names(trainingSet);
tmp$numCols <- dim(trainingSet)[2]
tmp$fit <- train(classe ~ ., method="rf", data=trainingSet, prox=TRUE, trControl=tmp$tc)
tmp$cm <- confusionMatrix(predict(tmp$fit, newdata = testingSet), testingSet$classe);
tmp$approxTime = floor(Sys.time() - t1)
tmp
}
#solve a large number of smaller learning problems (given by
#iterations). From each returned random forest, pick the most
#important columns. Pick the columns that pay biggest role in all
#iteratons combined.
mostImportantFeatures <- function(trainingSet, iterations=10, SSSize = 300) {
cols = names(trainingSet)
importance <- rep(0.0, length(cols));
for (i in 1:iterations) {
trn = trainingSet
if (SSSize < dim(trainingSet)[1]) {
smpl = sample(dim(trainingSet)[1], SSSize);
trn <- trainingSet[smpl, ]
}
learning = learn(trn, trn)
imp <- learning$fit$finalModel["importance"];
accuracy = as.data.frame(learning$cm["overall"])["Accuracy",];
importance <- importance +
sapply(cols, FUN = function(x){if (is.na(imp[1]$importance[,][x])) {0} else {imp[1]$importance[,][x]*accuracy}});
}
names(trainingSet)[order(importance, decreasing=TRUE)]
}
#Step 1: Setup initial environment. WD, SEED, DATA, constants
trn = NULL
tinyTST = NULL
pmlInit = function () {
rm(list=ls())
setwd(
paste("C:/Users/neeraj/b/Biz/wrk_hm/Learning/",
"Coursera/PracticalMachineLearningCode/Programs",
sep="", collapse="")
)
trn <<- read.csv("../Data/pml-training.csv")
tinyTST <<- read.csv("../Data/pml-testing.csv")
set.seed(71679174)
TESTDATAPROB <<- .3
CORRTHRESH <<- .8
}
pmlInit()
#Step 2: convert to tidy, remove stat-summary cols
trn <- tbl_df(trn)
tinyTST <- tbl_df(tinyTST)
trn<-trn[,2:160]
tinyTST<-tinyTST[,2:160]
allcols = colnames(trn)
print(dim(trn)[2]);print("XXXXX")
rawCols <- allcols[-grep("^max|^min|^amplitude|^var|^avg|^stddev|^skewness|^kurtosis", allcols)]
trn<- trn[rawCols]
tinyTST<- tinyTST[rawCols]
print(dim(trn)[2]);print("XXXXX")
#Step 3: Create a test set that will not be touched in any analysis
#and will be used only for testing
testingRows <- sample(dim(trn)[1], size = dim(trn)[1]*TESTDATAPROB)
tstset <- trn[testingRows,] #do not touch it except to deselect columns.
trn <- trn[-testingRows,] #not intersected with testing at all.
print(dim(trn)[2]);print("XXXXX")
#Step 4: Get rid of highly correlated columns
corrCols = getCorrelatedCols(trn[sample(dim(trn)[1], size = CORRSAMPLESIZE), ]);
keep = setdiff(names(trn), corrCols)
print(length(corrCols))
tstset <- trn[, keep] #do not touch it except to deselect columns.
trn <- trn[, keep] #not intersected with testing at all.
tinyTST<- tinyTST[, keep]
print(dim(trn)[2]);print("XXXXX")
#Step 5: Now do the main experiment. we will create training sets of
#diverse sizes (increasing), learn a model of them, check the accuracy
#of the model. We will try to hit 99%+ accuracy. Each model will be
#tested on two sets 1) The main test set (tstset) created above and 2)
#all other rows not used in training. If total rows are 20000 and
#5000 are in the tstset, potentially we can use 15000 roes for
#training. But we used only NN for training. The remaining 15000-N
#can also be used as a separate test set. We will vary NN from 100 to
#10000 and increase its value by 25% each time.
NN = 3508
answer = NULL
df = NULL;
bestN = floor(0.75 * dim(trn)[2]);
features = union(mostImportantFeatures(trn)[1:bestN], "classe")
trn <- trn[,features]
tstset <- tstset[,features]
tinyTST <- tinyTST[,features]
print(dim(trn)[2]);print("XXXXX")
for (i in 1:150) {
savedTrn <- trn
savedTst <- tstset
savedTiny <- tinyTST
NN = floor(NN * 1.25)
if (NN > 7000) {
break;
}
rows = sample(dim(trn)[1], NN);
thisTrain <- trn[rows,]
thisTest <- trn[-rows,]
L <- learn(thisTrain, thisTest)
answer <- paste(predict(L$fit, newdata = tinyTST),sep="", collapse="")
cm <- confusionMatrix(predict(L$fit, newdata = tstset), tstset$classe)
if (is.null(df)) {
df = c(NN, L$numCols, L$approxTime, as.data.frame(L$cm["overall"])["Accuracy",], as.data.frame(cm["overall"])["Accuracy",], answer)
names(df) = c("TrainingSize", "Features", "ApproxTime", "AccuracyOnUnusedTraining", "AccuracyOnSetAsideTest", "CatsOfTiny")
} else {
df = rbind(df, c(NN, L$numCols, L$approxTime, as.data.frame(L$cm["overall"])["Accuracy",], as.data.frame(cm["overall"])["Accuracy",], answer))
}
print(df)
write.csv(df, paste("CorrFile", NN, ".csv", sep="_"));
trn <- savedTrn
tstset <- savedTst
tinyTST <- savedTiny
}
#load required libraries. Set some variable.
library(dplyr)
library(caret)
require(dplyr)
require(caret)
CORRTHRESH = -1
CORRSAMPLESIZE = 1500
UNLIKELYSTR = "impossiblevaxdfd"
#Define some needed functions
#determines if a column has all numeric values
fullyNumeric = function(x) {length(x) == length(subset(x, is.numeric(x)))}
#determines if a column has no missing value
noMissingValue = function(x) {length(subset(x, is.na(x))) == 0}
#find very correlated columns
getCorrelatedCols = function(corrsampl) {
rawCols = colnames(corrsampl)
numCols = rawCols[sapply(1:length(rawCols), FUN=function(x) {fullyNumeric(unlist(corrsampl[,x]))})]
nonaCols = rawCols[sapply(1:length(rawCols), FUN=function(x) {noMissingValue(unlist(corrsampl[,x]))})]
hccds = corrsampl[,intersect(numCols, nonaCols)]
allcor = cor(hccds, hccds)
N <- dim(allcor)[1]
correlatedCols = rep(UNLIKELYSTR, N)
k = 1
vars <- colnames(hccds)
for (i in 1:N) {
for (j in i:N) {
if ((abs(allcor[i,j]) > CORRTHRESH) && (i != j)) {
correlatedCols[k] = vars[i]
k=k+1
}
}
}
setdiff(unique(correlatedCols), c(UNLIKELYSTR));
}
#use the training set to learn a random forest. Return predictions
#and some other important info as list.
learn <- function(trainingSet, testingSet) {
t1 = Sys.time()
tmp = list()
tmp$tc <- trainControl(method="cv", number=5)
tmp$goodCols <- names(trainingSet);
tmp$numCols <- dim(trainingSet)[2]
tmp$fit <- train(classe ~ ., method="rf", data=trainingSet, prox=TRUE, trControl=tmp$tc)
tmp$cm <- confusionMatrix(predict(tmp$fit, newdata = testingSet), testingSet$classe);
tmp$approxTime = floor(Sys.time() - t1)
tmp
}
#solve a large number of smaller learning problems (given by
#iterations). From each returned random forest, pick the most
#important columns. Pick the columns that pay biggest role in all
#iteratons combined.
mostImportantFeatures <- function(trainingSet, iterations=10, SSSize = 300) {
cols = names(trainingSet)
importance <- rep(0.0, length(cols));
for (i in 1:iterations) {
trn = trainingSet
if (SSSize < dim(trainingSet)[1]) {
smpl = sample(dim(trainingSet)[1], SSSize);
trn <- trainingSet[smpl, ]
}
learning = learn(trn, trn)
imp <- learning$fit$finalModel["importance"];
accuracy = as.data.frame(learning$cm["overall"])["Accuracy",];
importance <- importance +
sapply(cols, FUN = function(x){if (is.na(imp[1]$importance[,][x])) {0} else {imp[1]$importance[,][x]*accuracy}});
}
names(trainingSet)[order(importance, decreasing=TRUE)]
}
#Step 1: Setup initial environment. WD, SEED, DATA, constants
trn = NULL
tinyTST = NULL
pmlInit = function () {
rm(list=ls())
setwd(
paste("SET_TO_THE-PARENT_OF_DATA_DIRECTORY_WHERE",
"THE_FOLLOWING_TWO_FILES_ARE",
sep="", collapse="")
)
trn <<- read.csv("../Data/pml-training.csv")
tinyTST <<- read.csv("../Data/pml-testing.csv")
set.seed(71679174)
TESTDATAPROB <<- .3
CORRTHRESH <<- .8
}
pmlInit()
#Step 2: convert to tidy, remove stat-summary cols
trn <- tbl_df(trn)
tinyTST <- tbl_df(tinyTST)
trn<-trn[,2:160]
tinyTST<-tinyTST[,2:160]
allcols = colnames(trn)
print(dim(trn)[2]);print("XXXXX")
rawCols <- allcols[-grep("^max|^min|^amplitude|^var|^avg|^stddev|^skewness|^kurtosis", allcols)]
trn<- trn[rawCols]
tinyTST<- tinyTST[rawCols]
print(dim(trn)[2]);print("XXXXX")
#Step 3: Create a test set that will not be touched in any analysis
#and will be used only for testing
testingRows <- sample(dim(trn)[1], size = dim(trn)[1]*TESTDATAPROB)
tstset <- trn[testingRows,] #do not touch it except to deselect columns.
trn <- trn[-testingRows,] #not intersected with testing at all.
print(dim(trn)[2]);print("XXXXX")
#Step 4: Get rid of highly correlated columns
corrCols = getCorrelatedCols(trn[sample(dim(trn)[1], size = CORRSAMPLESIZE), ]);
keep = setdiff(names(trn), corrCols)
print(length(corrCols))
tstset <- trn[, keep] #do not touch it except to deselect columns.
trn <- trn[, keep] #not intersected with testing at all.
tinyTST<- tinyTST[, keep]
print(dim(trn)[2]);print("XXXXX")
#Step 5: Now do the main experiment. we will create training sets of
#diverse sizes (increasing), learn a model of them, check the accuracy
#of the model. We will try to hit 99%+ accuracy. Each model will be
#tested on two sets 1) The main test set (tstset) created above and 2)
#all other rows not used in training. If total rows are 20000 and
#5000 are in the tstset, potentially we can use 15000 roes for
#training. But we used only NN for training. The remaining 15000-N
#can also be used as a separate test set. We will vary NN from 100 to
#10000 and increase its value by 25% each time.
NN = 3508
answer = NULL
df = NULL;
bestN = floor(0.75 * dim(trn)[2]);
features = union(mostImportantFeatures(trn)[1:bestN], "classe")
trn <- trn[,features]
tstset <- tstset[,features]
tinyTST <- tinyTST[,features]
print(dim(trn)[2]);print("XXXXX")
for (i in 1:150) {
savedTrn <- trn
savedTst <- tstset
savedTiny <- tinyTST
NN = floor(NN * 1.25)
if (NN > 7000) {
break;
}
rows = sample(dim(trn)[1], NN);
thisTrain <- trn[rows,]
thisTest <- trn[-rows,]
L <- learn(thisTrain, thisTest)
answer <- paste(predict(L$fit, newdata = tinyTST),sep="", collapse="")
cm <- confusionMatrix(predict(L$fit, newdata = tstset), tstset$classe)
if (is.null(df)) {
df = c(NN, L$numCols, L$approxTime, as.data.frame(L$cm["overall"])["Accuracy",], as.data.frame(cm["overall"])["Accuracy",], answer)
names(df) = c("TrainingSize", "Features", "ApproxTime", "AccuracyOnUnusedTraining", "AccuracyOnSetAsideTest", "CatsOfTiny")
} else {
df = rbind(df, c(NN, L$numCols, L$approxTime, as.data.frame(L$cm["overall"])["Accuracy",], as.data.frame(cm["overall"])["Accuracy",], answer))
}
print(df)
write.csv(df, paste("CorrFile", NN, ".csv", sep="_"));
trn <- savedTrn
tstset <- savedTst
tinyTST <- savedTiny
}
|
library(tidyverse)
library(Seurat)
library(matrixStats)
library(ggpubr)
library(data.table)
##### Set Up Directories #####
dir <- "/directflow/SCCGGroupShare/projects/DrewNeavin/iPSC_Village/"
datadir <- paste0(dir,"output/All_data_integrated_remove_bad/")
outdir <- paste0(dir,"output/Expression_Correlations/")
dir.create(outdir)
##### Set Colors for Figures #####
line_colors <- c(FSA0006 = "#F79E29", MBE1006 = "#9B2C99", TOB0421 = "#35369C")
site_updates <- c("Brisbane" = "Site 1", "Sydney" = "Site 3" ,"Melbourne" = "Site 2")
##### Figure Function #####
save_figs <- function(plot, basename, width = 17, height = 17, units = "cm"){
ggsave(plot, filename = paste0(basename,".png"), height = height, width = width, units = units)
ggsave(plot, filename = paste0(basename,".pdf"), height = height, width = width, units = units)
ggsave(plot, filename = paste0(basename,".eps"), height = height, width = width, units = units)
}
##### Read in Data #####
seurat <- readRDS(paste0(datadir,"seurat_integrated_all_times_clustered.rds"))
colnames(seurat@meta.data)
unique(seurat@meta.data$Location)
unique(seurat@meta.data$Time)
unique(seurat@meta.data$Final_Assignment)
unique(seurat@meta.data$MULTI_ID)
seurat@meta.data$Location <- gsub("_.+", "", seurat@meta.data$Location)
seurat_list <- lapply(unique(seurat@meta.data$Location), function(x){
subset(seurat, subset = Location == x)
})
names(seurat_list) <- unique(seurat@meta.data$Location)
seurat_list_list <- lapply(names(seurat_list), function(x){
temp <- list()
for (day in unique(seurat_list[[x]]@meta.data$Time)){
temp[[x]][[day]] <- subset(seurat_list[[x]], subset = Time == day)
}
return(temp[[x]])
})
names(seurat_list_list) <- unique(seurat@meta.data$Location)
seurat_list_list_list <- lapply(names(seurat_list_list), function(x){
temp2 <- lapply(names(seurat_list_list[[x]]), function(y){
temp <- list()
for (assign in unique(seurat_list_list[[x]][[y]]$Final_Assignment)){
temp[[x]][[y]][[assign]] <- subset(seurat_list_list[[x]][[y]], subset = Final_Assignment == assign)
}
return(temp[[x]][[y]])
})
names(temp2) <- unique(seurat_list[[x]]@meta.data$Time)
return(temp2)
})
names(seurat_list_list_list) <- unique(seurat@meta.data$Location)
seurat_list_list_list_list <- lapply(names(seurat_list_list), function(x){
temp2 <- lapply(names(seurat_list_list_list[[x]]), function(y){
temp3 <- lapply(names(seurat_list_list_list[[x]][[y]]), function(z){
temp <- list()
for (rep in unique(seurat_list_list_list[[x]][[y]][[z]]$MULTI_ID)){
temp[[x]][[y]][[z]][[rep]] <- subset(seurat_list_list_list[[x]][[y]][[z]], subset = MULTI_ID == rep)
}
return(temp[[x]][[y]][[z]])
})
print(temp3)
names(temp3) <- unique(seurat_list_list[[x]][[y]]$Final_Assignment)
return(temp3)
})
names(temp2) <- unique(seurat_list[[x]]@meta.data$Time)
return(temp2)
})
names(seurat_list_list_list_list) <- unique(seurat@meta.data$Location)
### 10% to start
seurat_list_list_list_list_sub <- lapply(seurat_list_list_list_list, function(x){
lapply(x, function(y){
lapply(y, function(z){
lapply(z, function(rep){
subset(rep, features = rownames(seurat)[which((rowSums(seurat[["SCT"]]@counts > 0)/ncol(seurat[["SCT"]]@counts)) > 0.1)])
})
})
})
})
names(seurat_list_list_list_list_sub) <- unique(seurat_sub@meta.data$Location)
saveRDS(seurat_list_list_list_list_sub, paste0(outdir, "subset_seurat_list.rds"))
seurat_list_list_list_list_sub <- readRDS(paste0(outdir, "subset_seurat_list.rds"))
summary_list <- lapply(names(seurat_list_list_list_list), function(x){
temp3 <- lapply(names(seurat_list_list_list_list[[x]]), function(y){
temp2 <- lapply(names(seurat_list_list_list_list[[x]][[y]]), function(z){
temp <- lapply(names(seurat_list_list_list_list[[x]][[y]][[z]]), function(rep){
data.frame(Gene = rownames(seurat_list_list_list_list[[x]][[y]][[z]][[rep]]), Mean = rowMeans(seurat_list_list_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@scale.data), Replicate = rep, Line = z, Time = y, Location = x)
})
# print(head(temp))
do.call(rbind, temp)
})
do.call(rbind, temp2)
})
do.call(rbind, temp3)
})
summary <- do.call(rbind, summary_list)
summary$Replicate <- gsub("Brisbane", "Replicate", summary$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
summary_wide <- pivot_wider(summary, names_from = Replicate, values_from = Mean)
summary_wide$SD <- rowSds(as.matrix(summary_wide[,c("Replicate1", "Replicate2", "Replicate3")]))
summary_wide$Mean <- rowMeans(as.matrix(summary_wide[,c("Replicate1", "Replicate2", "Replicate3")]))
summary_wide_wide <- inner_join(summary_wide, summary_wide, by = c("Gene", "Line", "Location"))
summary_wide_wide <- summary_wide_wide[which(summary_wide_wide$Time.x != summary_wide_wide$Time.y),]
summary_wide_wide <- summary_wide_wide[which(summary_wide_wide$Time.x == "Baseline"),]
pExpression_Correlation <- list()
for (location in unique(summary_wide_wide$Location)){
pExpression_Correlation[[location]] <- ggplot(summary_wide_wide[which(summary_wide_wide$Location == location),], aes(Mean.x, Mean.y)) +
geom_point() +
theme_classic()
ggsave(pExpression_Correlation[[location]], filename = paste0(outdir,location,"_Expression_Correlation.png"))
}
##### Redo with separately assessed by SCT #####
location_SCT <- readRDS(paste0(dir,"output/Variance_contributions_lm_scale_data/seurat_SCT_wRB_sydney_regress_all_covs.rds"))
cryo_SCT <- readRDS(paste0(dir,"output/Variance_contributions_lm_scale_data_Sydney/seurat_SCT_wRB_sydney_regress_all_covs.rds"))
### Update names to combine objects ###
names(location_SCT) <- c("Baseline", "Village")
location_SCT <- lapply(location_SCT, function(x){
names(x) <- c("Brisbane", "Melbourne", "Sydney_Fresh")
return(x)
})
cryo_SCT_updated <- list()
cryo_SCT_updated[["Baseline"]][["Sydney_Fresh"]] <- cryo_SCT[["Baseline"]]
cryo_SCT_updated[["Baseline"]][["Sydney_Cryopreserved"]] <- cryo_SCT[["Thawed Village Day 0"]]
cryo_SCT_updated[["Village"]][["Sydney_Fresh"]] <- cryo_SCT[["Village Day 4"]]
cryo_SCT_updated[["Village"]][["Sydney_Cryopreserved"]] <- cryo_SCT[["Thawed Village Day 7"]]
SCT_combined <- location_SCT
SCT_combined[["Baseline"]][["Sydney_Cryopreserved"]] <- cryo_SCT_updated[["Baseline"]][["Sydney_Cryopreserved"]]
SCT_combined[["Village"]][["Sydney_Cryopreserved"]] <- cryo_SCT_updated[["Village"]][["Sydney_Cryopreserved"]]
SCT_combined_list <- lapply(names(SCT_combined), function(x){
temp2 <- lapply(names(SCT_combined[[x]]), function(y){
temp <- list()
for (assign in unique(SCT_combined[[x]][[y]]$Final_Assignment)){
temp[[x]][[y]][[assign]] <- subset(SCT_combined[[x]][[y]], subset = Final_Assignment == assign)
}
return(temp[[x]][[y]])
})
names(temp2) <- unique(names(SCT_combined[[x]]))
return(temp2)
})
names(SCT_combined_list) <- names(SCT_combined)
SCT_combined_list_list <- lapply(names(SCT_combined_list), function(x){
temp2 <- lapply(names(SCT_combined_list[[x]]), function(y){
temp3 <- lapply(names(SCT_combined_list[[x]][[y]]), function(z){
temp <- list()
for (rep in unique(SCT_combined_list[[x]][[y]][[z]]$MULTI_ID)){
temp[[x]][[y]][[z]][[rep]] <- subset(SCT_combined_list[[x]][[y]][[z]], subset = MULTI_ID == rep)
}
return(temp[[x]][[y]][[z]])
})
print(temp3)
names(temp3) <- unique(SCT_combined[[x]][[y]]$Final_Assignment)
return(temp3)
})
names(temp2) <- names(SCT_combined[[x]])
return(temp2)
})
names(SCT_combined_list_list) <- names(SCT_combined)
saveRDS(SCT_combined_list_list, paste0(outdir, "subset_seurat_list_sep.rds"))
SCT_combined_list_list <- readRDS(paste0(outdir, "subset_seurat_list_sep.rds"))
summary_SCT_list <- lapply(names(SCT_combined_list_list), function(x){
temp3 <- lapply(names(SCT_combined_list_list[[x]]), function(y){
temp2 <- lapply(names(SCT_combined_list_list[[x]][[y]]), function(z){
temp <- lapply(names(SCT_combined_list_list[[x]][[y]][[z]]), function(rep){
data.frame(Gene = rownames(SCT_combined_list_list[[x]][[y]][[z]][[rep]]), Mean = rowMeans(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@scale.data), N = ncol(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@scale.data), Replicate = rep, Line = z, Location = y, Time = x)
})
# print(head(temp))
do.call(rbind, temp)
})
do.call(rbind, temp2)
})
do.call(rbind, temp3)
})
summary_SCT <- data.table(do.call(rbind, summary_SCT_list))
summary_SCT$Replicate <- gsub("Brisbane", "Replicate", summary_SCT$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
##### Get unique id for each #####
summary_SCT$ID <- paste(summary_SCT$Location, summary_SCT$Time, summary_SCT$Line, summary_SCT$Replicate, sep = "-")
correlations <- lapply(unique(summary_SCT$ID), function(group1){
print(group1)
tmp <- lapply(unique(summary_SCT$ID), function(group2){
print(group2)
genes <- summary_SCT[ID == group1][summary_SCT[ID == group1]$Gene %in% summary_SCT[ID == group2]$Gene]$Gene
print(all(summary_SCT[ID == group1][Gene %in% genes]$Gene == summary_SCT[ID == group2][Gene %in% genes]$Gene))
return(data.table(Group1 = group1, Group2 = group2, Spearman = cor(summary_SCT[ID == group1][Gene %in% genes]$Mean, summary_SCT[ID == group2][Gene %in% genes]$Mean, method = "spearman")))
})
return(do.call(rbind, tmp))
})
saveRDS(correlations, paste0(outdir, "correlations.rds"))
correlations <- readRDS(paste0(outdir, "correlations.rds"))
correlations_pearson <- lapply(unique(summary_SCT$ID), function(group1){
print(group1)
tmp <- lapply(unique(summary_SCT$ID), function(group2){
print(group2)
genes <- summary_SCT[ID == group1][summary_SCT[ID == group1]$Gene %in% summary_SCT[ID == group2]$Gene]$Gene
print(all(summary_SCT[ID == group1][Gene %in% genes]$Gene == summary_SCT[ID == group2][Gene %in% genes]$Gene))
return(data.table(Group1 = group1, Group2 = group2, Pearson = cor(summary_SCT[ID == group1][Gene %in% genes]$Mean, summary_SCT[ID == group2][Gene %in% genes]$Mean)))
})
return(do.call(rbind, tmp))
})
correlations_pearson_dt <- do.call(rbind, correlations_pearson)
tmp <- summary_SCT[ID == "Brisbane-Baseline-FSA0006-Replicate1"][summary_SCT[ID == "Brisbane-Baseline-TOB0421-Replicate1"], on = "Gene"]
scatter_test <- ggplot(tmp, aes(Mean,i.Mean)) +
geom_point() +
theme_classic()
ggsave(scatter_test, filename = paste0(outdir, "test_correlation.png"))
correlations_dt <- do.call(rbind, correlations)
correlations_dt[, c("Location1", "Village1", "Line1", "Replicate1") := tstrsplit(Group1, "-", fixed=TRUE)]
correlations_dt[, c("Location2", "Village2", "Line2", "Replicate2") := tstrsplit(Group2, "-", fixed=TRUE)]
##### Try with data slot instead #####
summary_SCT_data_list <- lapply(names(SCT_combined_list_list), function(x){
temp3 <- lapply(names(SCT_combined_list_list[[x]]), function(y){
temp2 <- lapply(names(SCT_combined_list_list[[x]][[y]]), function(z){
temp <- lapply(names(SCT_combined_list_list[[x]][[y]][[z]]), function(rep){
data.frame(Gene = rownames(SCT_combined_list_list[[x]][[y]][[z]][[rep]]), Mean = rowMeans(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@data), N = ncol(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@data), Replicate = rep, Line = z, Location = y, Time = x)
})
do.call(rbind, temp)
})
do.call(rbind, temp2)
})
do.call(rbind, temp3)
})
summary_SCT_data <- data.table(do.call(rbind, summary_SCT_data_list))
summary_SCT_data$Replicate <- gsub("Brisbane", "Replicate", summary_SCT_data$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
##### Get unique id for each #####
summary_SCT_data$ID <- paste(summary_SCT_data$Location, summary_SCT_data$Time, summary_SCT_data$Line, summary_SCT_data$Replicate, sep = "-")
correlations_data <- lapply(unique(summary_SCT_data$ID), function(group1){
print(group1)
tmp <- lapply(unique(summary_SCT_data$ID)[(which(unique(summary_SCT_data$ID) == group1) + 1):length(unique(summary_SCT_data$ID))], function(group2){
print(group2)
genes <- summary_SCT_data[ID == group1][summary_SCT_data[ID == group1]$Gene %in% summary_SCT_data[ID == group2]$Gene]$Gene
print(all(summary_SCT_data[ID == group1][Gene %in% genes]$Gene == summary_SCT_data[ID == group2][Gene %in% genes]$Gene))
return(data.table(Group1 = group1, Group2 = group2, Spearman = cor(summary_SCT_data[ID == group1][Gene %in% genes]$Mean, summary_SCT_data[ID == group2][Gene %in% genes]$Mean, method = "spearman")))
})
return(do.call(rbind, tmp))
})
saveRDS(correlations_data, paste0(outdir, "correlations_data.rds"))
correlations_data <- readRDS(paste0(outdir, "correlations_data.rds"))
correlations_data_dt <- do.call(rbind, correlations_data)
correlations_data_dt[, c("Location1", "Village1", "Line1", "Replicate1") := tstrsplit(Group1, "-", fixed=TRUE)]
correlations_data_dt[, c("Location2", "Village2", "Line2", "Replicate2") := tstrsplit(Group2, "-", fixed=TRUE)]
correlations_data_fresh_dt <- correlations_data_dt[Location1 != "Sydney_Cryopreserved" | Location2 != "Sydney_Cryopreserved"]
##### #####
correlations_data_fresh_village_dt <- correlations_data_fresh_dt[Replicate1 == Replicate2 & Line1 == Line2 & Location1 == Location2 & Village1 != Village2 & Group1 != Group2]
correlations_data_fresh_village_dt$Group <- "Village"
correlations_data_fresh_replicate_dt <- correlations_data_fresh_dt[Village1 == Village2 & Line1 == Line2 & Location1 == Location2 & Replicate1 != Replicate2 & Group1 != Group2]
correlations_data_fresh_replicate_dt$Group <- "Replicate"
correlations_data_fresh_location_dt <- correlations_data_fresh_dt[Replicate1 == Replicate2 & Line1 == Line2 & Village1 == Village2 & Location1 != Location2 & Group1 != Group2]
correlations_data_fresh_location_dt$Group <- "Location"
correlations_data_fresh_line_dt <- correlations_data_fresh_dt[Village1 == Village2 & Location1 == Location2 & Line1 != Line2 & Group1 != Group2]
correlations_data_fresh_line_dt$Group <- "Line"
correlations_data_fresh_combined_dt <- rbind(correlations_data_fresh_village_dt, correlations_data_fresh_replicate_dt, correlations_data_fresh_location_dt, correlations_data_fresh_line_dt)
correlations_data_fresh_combined_dt$Group <- factor(correlations_data_fresh_combined_dt$Group, levels = c("Replicate", "Village", "Line", "Location"))
res <- list()
for (group1 in unique(correlations_data_fresh_combined_dt$Group)){
for (group2 in unique(correlations_data_fresh_combined_dt$Group)[(which(unique(correlations_data_fresh_combined_dt$Group) == group1)+1):length(unique(correlations_data_fresh_combined_dt$Group))]){
res[[group1]][[group2]] <- wilcox.test(correlations_data_fresh_combined_dt[Group == group1]$Spearman, correlations_data_fresh_combined_dt[Group == group2]$Spearman,exact = FALSE)
}
}
correlations_data_fresh_combined_dt_med <- correlations_data_fresh_combined_dt %>%
group_by(Group) %>%
mutate(median = median(as.numeric(Spearman)))
correlation_fresh_dist <- ggplot(correlations_data_fresh_combined_dt, aes(Spearman)) +
geom_histogram(bins = 50) +
facet_wrap(vars(factor(Group, levels = c("Replicate", "Village", "Line", "Location"))), ncol = 1, scales = "free_y") +
theme_classic() +
geom_vline(data = correlations_data_fresh_combined_dt_med, aes(xintercept = median), linetype = "dashed")
ggsave(correlation_fresh_dist, filename = paste0(outdir, "correlation_distributions_fresh.png"), width = 3)
ggsave(correlation_fresh_dist, filename = paste0(outdir, "correlation_distributions_fresh.pdf"), width = 3)
fwrite(correlations_data_fresh_combined_dt, paste0(outdir,"correlation_distribution_table.tsv"), sep = "\t")
##### For cryopreserdd #####
correlations_data_combined_cryo_dt <- correlations_data_dt[grepl("Sydney", Location1) & grepl("Sydney", Location2)]
correlations_data_combined_cryo_dt$Cryopreserved1 <- gsub("Sydney_", "",correlations_data_combined_cryo_dt$Location1)
correlations_data_combined_cryo_dt$Cryopreserved2 <- gsub("Sydney_", "",correlations_data_combined_cryo_dt$Location2)
##### #####
correlations_data_cryo_village_dt <- correlations_data_combined_cryo_dt[Replicate1 == Replicate2 & Line1 == Line2 & Cryopreserved1 == Cryopreserved2 & Village1 != Village2 & Group1 != Group2]
correlations_data_cryo_village_dt$Group <- "Village"
correlations_data_cryo_replicate_dt <- correlations_data_combined_cryo_dt[Village1 == Village2 & Line1 == Line2 & Cryopreserved1 == Cryopreserved2 & Replicate1 != Replicate2 & Group1 != Group2]
correlations_data_cryo_replicate_dt$Group <- "Replicate"
correlations_data_cryo_cryo_dt <- correlations_data_combined_cryo_dt[Replicate1 == Replicate2 & Line1 == Line2 & Village1 == Village2 & Cryopreserved1 != Cryopreserved2 & Group1 != Group2]
correlations_data_cryo_cryo_dt$Group <- "Cryopreserved"
correlations_data_cryo_line_dt <- correlations_data_combined_cryo_dt[Village1 == Village2 & Cryopreserved1 == Cryopreserved2 & Line1 != Line2 & Group1 != Group2]
correlations_data_cryo_line_dt$Group <- "Line"
correlations_data_cryo_combined_dt <- rbind(correlations_data_cryo_village_dt, correlations_data_cryo_replicate_dt, correlations_data_cryo_cryo_dt, correlations_data_cryo_line_dt)
correlations_data_cryo_combined_dt$Group <- factor(correlations_data_cryo_combined_dt$Group, levels = c("Replicate", "Cryopreserved", "Village", "Line"))
correlations_data_cryo_combined_dt_med <- correlations_data_cryo_combined_dt %>%
group_by(Group) %>%
mutate(median = median(as.numeric(Spearman)))
correlation_cryo_dist <- ggplot(correlations_data_cryo_combined_dt, aes(Spearman)) +
geom_histogram(bins = 50) +
facet_wrap(vars(factor(Group, levels = c("Replicate", "Cryopreserved", "Village", "Line"))), ncol = 1, scales = "free_y") +
theme_classic() +
geom_vline(data = correlations_data_cryo_combined_dt_med, aes(xintercept = median), linetype = "dashed")
ggsave(correlation_cryo_dist, filename = paste0(outdir, "correlation_distributions_cryo.png"), width = 3)
ggsave(correlation_cryo_dist, filename = paste0(outdir, "correlation_distributions_cryo.pdf"), width = 3)
##### For cryopreserd with fresh #####
correlations_data_combined_cryo_fresh_dt <- correlations_data_dt[grepl("Sydney", Location1) | grepl("Sydney", Location2)]
correlations_data_combined_cryo_fresh_dt$Cryopreserved1 <- ifelse(grepl("Sydney",correlations_data_combined_cryo_fresh_dt$Location1), gsub("Sydney_", "",correlations_data_combined_cryo_fresh_dt$Location1), "Fresh")
correlations_data_combined_cryo_fresh_dt$Cryopreserved2 <- ifelse(grepl("Sydney",correlations_data_combined_cryo_fresh_dt$Location2), gsub("Sydney_", "",correlations_data_combined_cryo_fresh_dt$Location2), "Fresh")
mean(correlations_data_dt[grepl("Sydney_Fresh-Baseline", Group1) | grepl("Sydney_Fresh-Baseline", Group2)]$Spearman)
mean(correlations_data_dt[grepl("Sydney_Cryopreserved-Baseline", Group1) | grepl("Sydney_Cryopreserved-Baseline", Group2)]$Spearman)
mean(correlations_data_dt[grepl("Sydney_Fresh-Village", Group1) | grepl("Sydney_Fresh-Village", Group2)]$Spearman)
mean(correlations_data_dt[grepl("Sydney_Cryopreserved-Village", Group1) | grepl("Sydney_Cryopreserved-Village", Group2)]$Spearman, na.rm = TRUE)
##### #####
correlations_data_cryo_fresh_village_dt <- correlations_data_combined_cryo_fresh_dt[Line1 == Line2 & Village1 != Village2 & Group1 != Group2]
correlations_data_cryo_fresh_village_dt$Group <- "Village"
correlations_data_cryo_fresh_village_dt$Site_color <- ifelse(correlations_data_cryo_fresh_village_dt$Cryopreserved1 == "Fresh", correlations_data_cryo_fresh_village_dt$Location1,
ifelse(correlations_data_cryo_fresh_village_dt$Cryopreserved2 == "Fresh", correlations_data_cryo_fresh_village_dt$Location2, "")
correlations_data_cryo_fresh_combined_dt$Group <- factor(correlations_data_cryo_combined_dt$Group, levels = c("Replicate", "Cryopreserved", "Village", "Line"))
correlations_data_cryo_fresh_combined_dt_med <- correlations_data_cryo_combined_dt %>%
group_by(Group) %>%
mutate(median = median(as.numeric(Spearman)))
correlation_cryo_dist <- ggplot(correlations_data_cryo_combined_dt, aes(Spearman)) +
geom_histogram(bins = 50) +
facet_wrap(vars(factor(Group, levels = c("Replicate", "Cryopreserved", "Village", "Line"))), ncol = 1, scales = "free_y") +
theme_classic() +
geom_vline(data = correlations_data_cryo_combined_dt_med, aes(xintercept = median), linetype = "dashed")
ggsave(correlation_cryo_dist, filename = paste0(outdir, "correlation_distributions_cryo.png"), width = 3)
ggsave(correlation_cryo_dist, filename = paste0(outdir, "correlation_distributions_cryo.pdf"), width = 3)
##### Try with counts slot instead #####
summary_SCT_counts_list <- lapply(names(SCT_combined_list_list), function(x){
temp3 <- lapply(names(SCT_combined_list_list[[x]]), function(y){
temp2 <- lapply(names(SCT_combined_list_list[[x]][[y]]), function(z){
temp <- lapply(names(SCT_combined_list_list[[x]][[y]][[z]]), function(rep){
data.frame(Gene = rownames(SCT_combined_list_list[[x]][[y]][[z]][[rep]]), Mean = rowMeans(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@counts), N = ncol(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@counts), Replicate = rep, Line = z, Location = y, Time = x)
})
do.call(rbind, temp)
})
do.call(rbind, temp2)
})
do.call(rbind, temp3)
})
summary_SCT_counts <- data.table(do.call(rbind, summary_SCT_counts_list))
summary_SCT_counts$Replicate <- gsub("Brisbane", "Replicate", summary_SCT_counts$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
##### Get unique id for each #####
summary_SCT_counts$ID <- paste(summary_SCT_counts$Location, summary_SCT_counts$Time, summary_SCT_counts$Line, summary_SCT_counts$Replicate, sep = "-")
correlations_counts <- lapply(unique(summary_SCT_counts$ID), function(group1){
print(group1)
tmp <- lapply(unique(summary_SCT_counts$ID)[(which(unique(summary_SCT_counts$ID) == group1) + 1):length(unique(summary_SCT_counts$ID))], function(group2){
print(group2)
genes <- summary_SCT_counts[ID == group1][summary_SCT_counts[ID == group1]$Gene %in% summary_SCT_counts[ID == group2]$Gene]$Gene
print(all(summary_SCT_counts[ID == group1][Gene %in% genes]$Gene == summary_SCT_counts[ID == group2][Gene %in% genes]$Gene))
return(data.table(Group1 = group1, Group2 = group2, Spearman = cor(summary_SCT_counts[ID == group1][Gene %in% genes]$Mean, summary_SCT_counts[ID == group2][Gene %in% genes]$Mean, method = "spearman")))
})
return(do.call(rbind, tmp))
})
correlations_counts_dt <- do.call(rbind, correlations_counts)
correlations_counts_dt[, c("Location1", "Village1", "Line1", "Replicate1") := tstrsplit(Group1, "-", fixed=TRUE)]
correlations_counts_dt[, c("Location2", "Village2", "Line2", "Replicate2") := tstrsplit(Group2, "-", fixed=TRUE)]
correlations_counts_village_dt <- correlations_counts_dt[Replicate1 == Replicate2 & Line1 == Line2 & Location1 == Location2 & Village1 != Village2 & Group1 != Group2]
correlations_counts_village_dt$Group <- "Village"
correlations_counts_replicate_dt <- correlations_counts_dt[Village1 == Village2 & Line1 == Line2 & Location1 == Location2 & Replicate1 != Replicate2 & Group1 != Group2]
correlations_counts_replicate_dt$Group <- "Replicate"
correlations_counts_location_dt <- correlations_counts_dt[Replicate1 == Replicate2 & Line1 == Line2 & Village1 == Village2 & Location1 != Location2 & Group1 != Group2]
correlations_counts_location_dt$Group <- "Location"
correlations_counts_line_dt <- correlations_counts_dt[Replicate1 == Replicate2 & Village1 == Village2 & Location1 == Location2 & Line1 != Line2 & Group1 != Group2]
correlations_counts_line_dt$Group <- "Line"
correlations_counts_combined_dt <- rbind(correlations_counts_village_dt, correlations_counts_replicate_dt, correlations_counts_location_dt, correlations_counts_line_dt)
correlations_counts_combined_dt_med <- correlations_counts_combined_dt %>%
group_by(Group) %>%
mutate(median = median(as.numeric(Spearman)))
correlations_counts_combined_dt$Group <- factor(correlations_counts_combined_dt$Group, levels = c("Replicate", "Village", "Line", "Location"))
correlation_dist <- ggplot(correlations_counts_combined_dt, aes(Spearman)) +
geom_histogram(bins = 50) +
facet_wrap(vars(factor(Group, levels = c("Replicate", "Village", "Line", "Location"))), ncol = 1, scales = "free_y") +
theme_classic() +
geom_vline(data = correlations_counts_combined_dt_med, aes(xintercept = median), linetype = "dashed")
ggsave(correlation_dist, filename = paste0(outdir, "correlation_distributions_counts.png"), width = 3)
summary_SCT_wide <- pivot_wider(summary_SCT, names_from = Replicate, values_from = c(Mean, N))
summary_SCT_wide$SD <- rowSds(as.matrix(summary_SCT_wide[,c("Mean_Replicate1", "Mean_Replicate2", "Mean_Replicate3")]))
summary_SCT_wide$Mean <- (summary_SCT_wide$Mean_Replicate1 * summary_SCT_wide$N_Replicate1 + summary_SCT_wide$Mean_Replicate2 * summary_SCT_wide$N_Replicate2 + summary_SCT_wide$Mean_Replicate3 * summary_SCT_wide$N_Replicate3)/(summary_SCT_wide$N_Replicate1 + summary_SCT_wide$N_Replicate2 + summary_SCT_wide$N_Replicate3)
summary_SCT_wide_wide <- inner_join(summary_SCT_wide, summary_SCT_wide, by = c("Gene", "Line", "Location"))
summary_SCT_wide_wide <- summary_SCT_wide_wide[which(summary_SCT_wide_wide$Time.x != summary_SCT_wide_wide$Time.y),]
summary_SCT_wide_wide <- summary_SCT_wide_wide[which(summary_SCT_wide_wide$Time.x == "Baseline"),]
as.data.frame(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == "TOB0421" & summary_SCT_wide_wide$Mean.y < -2.5), ]) ### ENSG00000129824
as.data.frame(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Gene == "ENSG00000129824"),])
as.data.frame(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == "TOB0421" & summary_SCT_wide_wide$Mean.y >4), ]) ### ENSG00000106153 CHCHD2
as.data.frame(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Gene == "ENSG00000106153"),])
pExpression_Correlation_SCT <- list()
for (location in unique(summary_SCT_wide_wide$Location)){
pExpression_Correlation_SCT[[location]] <- ggplot(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Location == location),], aes(Mean.x, Mean.y)) +
geom_point() +
theme_classic() +
facet_wrap(vars(Line), nrow = 1, scales = "free")
ggsave(pExpression_Correlation_SCT[[location]], filename = paste0(outdir,location,"_Expression_Correlation_norm_sep.png"), width = 15)
}
Rsquared <- unique(summary_SCT_wide_wide[,c("Line","Location","Time.x","Time.y")])
Rsquared$Rsquared <- NA
Rsquared$pearson
Rsquared$spearman
for (row in 1:nrow(Rsquared)){
Rsquared$Rsquared[row] <- summary(lm(Mean.y ~ Mean.x, summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]))$r.squared
Rsquared$pearson[row] <- cor(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]$Mean.y,
summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]$Mean.x)
Rsquared$spearman[row] <- cor(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]$Mean.y,
summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]$Mean.x, method = "spearman")
}
Rsquared
# ##### Filter out the genes not expressed in 1% of cells #####
# SCT_combined_1pct <- lapply(SCT_combined, function(x){
# lapply(x, function(y){
# subset(y, features = rownames(y)[which((rowSums(y[["SCT"]]@counts > 0)/ncol(y[["SCT"]]@counts)) > 0.01)])
# })
# })
# summary_SCT_list_1pct <- lapply(names(SCT_combined_1pct), function(village){
# temp <- lapply(names(SCT_combined_1pct[[village]]), function(location){
# summary_SCT[which(summary_SCT$Location == location & summary_SCT$Time == village & summary_SCT$Gene %in% rownames(SCT_combined_1pct[[village]][[location]])), ]
# })
# do.call(rbind, temp)
# })
# summary_SCT_1pct <- do.call(rbind, summary_SCT_list_1pct)
# summary_SCT_1pct$Replicate <- gsub("Brisbane", "Replicate", summary_SCT_1pct$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
# summary_SCT_1pct_wide <- pivot_wider(summary_SCT_1pct, names_from = Replicate, values_from = c(Mean, N))
# summary_SCT_1pct_wide$SD <- rowSds(as.matrix(summary_SCT_1pct_wide[,c("Mean_Replicate1", "Mean_Replicate2", "Mean_Replicate3")]))
# summary_SCT_1pct_wide$Mean <- (summary_SCT_1pct_wide$Mean_Replicate1 * summary_SCT_1pct_wide$N_Replicate1 + summary_SCT_1pct_wide$Mean_Replicate2 * summary_SCT_1pct_wide$N_Replicate2 + summary_SCT_1pct_wide$Mean_Replicate3 * summary_SCT_1pct_wide$N_Replicate3)/(summary_SCT_1pct_wide$N_Replicate1 + summary_SCT_1pct_wide$N_Replicate2 + summary_SCT_1pct_wide$N_Replicate3)
# summary_SCT_1pct_wide_wide <- inner_join(summary_SCT_1pct_wide, summary_SCT_1pct_wide, by = c("Gene", "Line", "Location"))
# summary_SCT_1pct_wide_wide <- summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Time.x != summary_SCT_1pct_wide_wide$Time.y),]
# summary_SCT_1pct_wide_wide <- summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Time.x == "Baseline"),]
# as.data.frame(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == "TOB0421" & summary_SCT_1pct_wide_wide$Mean.y < -2.5), ]) ### ENSG00000129824
# as.data.frame(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Gene == "ENSG00000129824"),])
# as.data.frame(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == "TOB0421" & summary_SCT_1pct_wide_wide$Mean.y >4), ]) ### ENSG00000106153 CHCHD2
# as.data.frame(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Gene == "ENSG00000106153"),])
# pExpression_Correlation_SCT <- list()
# for (location in unique(summary_SCT_1pct_wide_wide$Location)){
# pExpression_Correlation_SCT[[location]] <- ggplot(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Location == location),], aes(Mean.x, Mean.y)) +
# geom_point() +
# theme_classic() +
# facet_wrap(vars(Line), nrow = 1, scales = "free")
# ggsave(pExpression_Correlation_SCT[[location]], filename = paste0(outdir,location,"_Expression_Correlation_norm_sep.png"), width = 15)
# }
# Rsquared_1pct <- unique(summary_SCT_1pct_wide_wide[,c("Line","Location","Time.x","Time.y")])
# Rsquared_1pct$Rsquared <- NA
# Rsquared_1pct$pearson
# Rsquared_1pct$spearman
# for (row in 1:nrow(Rsquared_1pct)){
# Rsquared_1pct$Rsquared[row] <- summary(lm(Mean.y ~ Mean.x, summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]))$r.squared
# Rsquared_1pct$pearson[row] <- cor(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]$Mean.y,
# summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]$Mean.x)
# Rsquared_1pct$spearman[row] <- cor(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]$Mean.y,
# summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]$Mean.x, method = "spearman")
# }
# Rsquared_1pct
##### Filter out the genes not expressed in 10% of cells #####
SCT_combined_10pct <- lapply(SCT_combined, function(x){
lapply(x, function(y){
subset(y, features = rownames(y)[which((rowSums(y[["SCT"]]@counts > 0)/ncol(y[["SCT"]]@counts)) > 0.1)])
})
})
summary_SCT_list_10pct <- lapply(names(SCT_combined_10pct), function(village){
temp <- lapply(names(SCT_combined_10pct[[village]]), function(location){
summary_SCT[which(summary_SCT$Location == location & summary_SCT$Time == village & summary_SCT$Gene %in% rownames(SCT_combined_10pct[[village]][[location]])), ]
})
do.call(rbind, temp)
})
summary_SCT_10pct <- do.call(rbind, summary_SCT_list_10pct)
summary_SCT_10pct$Replicate <- gsub("Brisbane", "Replicate", summary_SCT_10pct$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
summary_SCT_10pct_wide <- pivot_wider(summary_SCT_10pct, names_from = Replicate, values_from = c(Mean, N))
summary_SCT_10pct_wide$SD <- rowSds(as.matrix(summary_SCT_10pct_wide[,c("Mean_Replicate1", "Mean_Replicate2", "Mean_Replicate3")]))
summary_SCT_10pct_wide$Mean <- (summary_SCT_10pct_wide$Mean_Replicate1 * summary_SCT_10pct_wide$N_Replicate1 + summary_SCT_10pct_wide$Mean_Replicate2 * summary_SCT_10pct_wide$N_Replicate2 + summary_SCT_10pct_wide$Mean_Replicate3 * summary_SCT_10pct_wide$N_Replicate3)/(summary_SCT_10pct_wide$N_Replicate1 + summary_SCT_10pct_wide$N_Replicate2 + summary_SCT_10pct_wide$N_Replicate3)
summary_SCT_10pct_wide_wide <- inner_join(summary_SCT_10pct_wide, summary_SCT_10pct_wide, by = c("Gene", "Line", "Location"))
summary_SCT_10pct_wide_wide <- summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Time.x != summary_SCT_10pct_wide_wide$Time.y),]
summary_SCT_10pct_wide_wide <- summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Time.x == "Baseline"),]
as.data.frame(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == "TOB0421" & summary_SCT_10pct_wide_wide$Mean.y < -2.5), ]) ### ENSG00000129824
as.data.frame(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Gene == "ENSG00000129824"),])
as.data.frame(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == "TOB0421" & summary_SCT_10pct_wide_wide$Mean.y >4), ]) ### ENSG00000106153 CHCHD2
as.data.frame(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Gene == "ENSG00000106153"),])
pExpression_Correlation_SCT <- list()
for (location in unique(summary_SCT_10pct_wide_wide$Location)){
pExpression_Correlation_SCT[[location]] <- ggplot(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Location == location),], aes(Mean.x, Mean.y)) +
geom_point(size = 0.5, alpha = 0.5) +
theme_classic() +
facet_wrap(vars(Line), nrow = 1, scales = "free")
ggsave(pExpression_Correlation_SCT[[location]], filename = paste0(outdir,location,"_Expression_Correlation_norm_sep_10pct.png"), width = 15)
}
pExpression_Correlation_Combined <- ggplot(summary_SCT_10pct_wide_wide, aes(Mean.x, Mean.y)) +
geom_point(size = 0.5, alpha = 0.3) +
theme_classic() +
facet_grid(Location ~ Line, scales = "free")
pExpression_Correlation_Combined <- ggscatter(data = summary_SCT_10pct_wide_wide, x = "Mean.x", y = "Mean.y",
color = "Line",
facet.by = c("Location", "Line"),
scales = "free",
size = 0.5,
alpha = 0.25,
ylab = "Average Baseline Normalized Expression",
xlab = "Average Village Normalized Expression",
add = "reg.line",
conf.int = TRUE,
scales = "free") +
stat_cor(aes(color = Line), method = "pearson")
ggsave(pExpression_Correlation_Combined, filename = paste0(outdir,"Expression_Correlation_norm_sep_10pct_all_locations.png"))
summary_SCT_10pct_wide_wide_fresh <- summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Location != "Sydney_Cryopreserved"),]
summary_SCT_10pct_wide_wide_fresh$Location <- gsub("_Fresh", "", summary_SCT_10pct_wide_wide_fresh$Location)
for (location in names(site_updates)){
summary_SCT_10pct_wide_wide_fresh$Location <- gsub(location, site_updates[location], summary_SCT_10pct_wide_wide_fresh$Location)
}
summary_SCT_10pct_wide_wide_fresh$Location_Line <- paste0(summary_SCT_10pct_wide_wide_fresh$Location, "_", summary_SCT_10pct_wide_wide_fresh$Line)
pExpression_Correlation_Combined_fresh <- ggscatter(data = summary_SCT_10pct_wide_wide_fresh, x = "Mean.x", y = "Mean.y",
color = "Line",
facet.by = c("Location_Line"),
palette = line_colors,
size = 0.5,
alpha = 0.25,
ylab = "Baseline Average Normalized Expression",
xlab = "Village Average Normalized Expression",
add = "reg.line",
add.params = list(color = "black", fill = "lightgray", size = 0.5),
conf.int = TRUE,
scales = "free") +
stat_cor(aes(label = ..r.label..), method = "pearson")
save_figs(pExpression_Correlation_Combined_fresh, paste0(outdir,"Expression_Correlation_norm_sep_10pct_fresh"), height = 15, width = 15)
### Make accompanying table ###
expression_pearson_df <- unique(summary_SCT_wide_wide[,c("Line","Location","Time.x","Time.y")])
expression_pearson_df$`Pearson R` <- NA
expression_pearson_df$`Pearson P` <- NA
for (row in 1:nrow(expression_pearson_df)){
expression_pearson_df$`Pearson R`[row] <- cor.test(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == expression_pearson_df$Line[row] &
summary_SCT_wide_wide$Location == expression_pearson_df$Location[row] &
summary_SCT_wide_wide$Time.y == expression_pearson_df$Time.y[row] &
summary_SCT_wide_wide$Time.x == expression_pearson_df$Time.x[row]),]$Mean.y,
summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == expression_pearson_df$Line[row] &
summary_SCT_wide_wide$Location == expression_pearson_df$Location[row] &
summary_SCT_wide_wide$Time.y == expression_pearson_df$Time.y[row] &
summary_SCT_wide_wide$Time.x == expression_pearson_df$Time.x[row]),]$Mean.x, exact = TRUE, method = "pearson")$estimate
expression_pearson_df$`Pearson P`[row] <- cor.test(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == expression_pearson_df$Line[row] &
summary_SCT_wide_wide$Location == expression_pearson_df$Location[row] &
summary_SCT_wide_wide$Time.y == expression_pearson_df$Time.y[row] &
summary_SCT_wide_wide$Time.x == expression_pearson_df$Time.x[row]),]$Mean.y,
summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == expression_pearson_df$Line[row] &
summary_SCT_wide_wide$Location == expression_pearson_df$Location[row] &
summary_SCT_wide_wide$Time.y == expression_pearson_df$Time.y[row] &
summary_SCT_wide_wide$Time.x == expression_pearson_df$Time.x[row]),]$Mean.x, exact = TRUE, method = "pearson")$p.value
}
for (location in names(site_updates)){
expression_pearson_df$Location <- gsub(location, site_updates[location], expression_pearson_df$Location)
}
expression_pearson_df <- data.table(expression_pearson_df)
expression_pearson_df_fresh <- expression_pearson_df[Location != "Site 3_Cryopreserved"]
expression_pearson_df_fresh$Location <- gsub("_Fresh", "", expression_pearson_df_fresh$Location)
fwrite(expression_pearson_df_fresh, paste0(outdir, "Fresh_expression_pearson.tsv"), sep = "\t")
expression_pearson_df_cryo <- expression_pearson_df[grepl("Site 3", expression_pearson_df$Location)]
expression_pearson_df_cryo$Location <- gsub("_", " - ", expression_pearson_df_cryo$Location)
fwrite(expression_pearson_df_cryo, paste0(outdir, "Cryopreserved_expression_pearson.tsv"), sep = "\t")
summary_SCT_10pct_wide_wide_cryo <- summary_SCT_10pct_wide_wide[grepl("Sydney", summary_SCT_10pct_wide_wide$Location),]
summary_SCT_10pct_wide_wide_cryo$Location <- gsub("Sydney_", "", summary_SCT_10pct_wide_wide_cryo$Location)
summary_SCT_10pct_wide_wide_cryo$Location <- factor(summary_SCT_10pct_wide_wide_cryo$Location, levels = c("Fresh", "Cryopreserved"))
for (location in names(site_updates)){
summary_SCT_10pct_wide_wide_cryo$Location <- gsub(location, site_updates[location], summary_SCT_10pct_wide_wide_cryo$Location)
}
summary_SCT_10pct_wide_wide_cryo$Location_Line <- paste0(summary_SCT_10pct_wide_wide_cryo$Location, "_", summary_SCT_10pct_wide_wide_cryo$Line)
pExpression_Correlation_Combined_cryo <- ggscatter(data = summary_SCT_10pct_wide_wide_cryo, x = "Mean.x", y = "Mean.y",
color = "Line",
facet.by = c("Location_Line"),
palette = line_colors,
size = 0.5,
alpha = 0.25,
ylab = "Baseline Average Normalized Expression",
xlab = "Village Average Normalized Expression",
add = "reg.line",
add.params = list(color = "black", fill = "lightgray", size = 0.5),
conf.int = TRUE,
scales = "free") +
stat_cor(aes(label = ..r.label..), method = "pearson")
save_figs(pExpression_Correlation_Combined_cryo, paste0(outdir,"Expression_Correlation_norm_sep_10pct_cryo"), height = 10, width = 12)
Rsquared_10pct <- unique(summary_SCT_10pct_wide_wide[,c("Line","Location","Time.x","Time.y")])
Rsquared_10pct$Rsquared <- NA
Rsquared_10pct$pearson
Rsquared_10pct$spearman
for (row in 1:nrow(Rsquared_10pct)){
Rsquared_10pct$Rsquared[row] <- summary(lm(Mean.y ~ Mean.x, summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]))$r.squared
Rsquared_10pct$pearson[row] <- cor(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]$Mean.y,
summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]$Mean.x)
Rsquared_10pct$spearman[row] <- cor(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]$Mean.y,
summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]$Mean.x, method = "spearman")
}
Rsquared_10pct
Rsquared_1pct
Rsquared
| /Expression_Correlation/Expression_Correlation.R | no_license | powellgenomicslab/iPSC_Village_scripts | R | false | false | 45,303 | r | library(tidyverse)
library(Seurat)
library(matrixStats)
library(ggpubr)
library(data.table)
##### Set Up Directories #####
dir <- "/directflow/SCCGGroupShare/projects/DrewNeavin/iPSC_Village/"
datadir <- paste0(dir,"output/All_data_integrated_remove_bad/")
outdir <- paste0(dir,"output/Expression_Correlations/")
dir.create(outdir)
##### Set Colors for Figures #####
line_colors <- c(FSA0006 = "#F79E29", MBE1006 = "#9B2C99", TOB0421 = "#35369C")
site_updates <- c("Brisbane" = "Site 1", "Sydney" = "Site 3" ,"Melbourne" = "Site 2")
##### Figure Function #####
save_figs <- function(plot, basename, width = 17, height = 17, units = "cm"){
ggsave(plot, filename = paste0(basename,".png"), height = height, width = width, units = units)
ggsave(plot, filename = paste0(basename,".pdf"), height = height, width = width, units = units)
ggsave(plot, filename = paste0(basename,".eps"), height = height, width = width, units = units)
}
##### Read in Data #####
seurat <- readRDS(paste0(datadir,"seurat_integrated_all_times_clustered.rds"))
colnames(seurat@meta.data)
unique(seurat@meta.data$Location)
unique(seurat@meta.data$Time)
unique(seurat@meta.data$Final_Assignment)
unique(seurat@meta.data$MULTI_ID)
seurat@meta.data$Location <- gsub("_.+", "", seurat@meta.data$Location)
seurat_list <- lapply(unique(seurat@meta.data$Location), function(x){
subset(seurat, subset = Location == x)
})
names(seurat_list) <- unique(seurat@meta.data$Location)
seurat_list_list <- lapply(names(seurat_list), function(x){
temp <- list()
for (day in unique(seurat_list[[x]]@meta.data$Time)){
temp[[x]][[day]] <- subset(seurat_list[[x]], subset = Time == day)
}
return(temp[[x]])
})
names(seurat_list_list) <- unique(seurat@meta.data$Location)
seurat_list_list_list <- lapply(names(seurat_list_list), function(x){
temp2 <- lapply(names(seurat_list_list[[x]]), function(y){
temp <- list()
for (assign in unique(seurat_list_list[[x]][[y]]$Final_Assignment)){
temp[[x]][[y]][[assign]] <- subset(seurat_list_list[[x]][[y]], subset = Final_Assignment == assign)
}
return(temp[[x]][[y]])
})
names(temp2) <- unique(seurat_list[[x]]@meta.data$Time)
return(temp2)
})
names(seurat_list_list_list) <- unique(seurat@meta.data$Location)
seurat_list_list_list_list <- lapply(names(seurat_list_list), function(x){
temp2 <- lapply(names(seurat_list_list_list[[x]]), function(y){
temp3 <- lapply(names(seurat_list_list_list[[x]][[y]]), function(z){
temp <- list()
for (rep in unique(seurat_list_list_list[[x]][[y]][[z]]$MULTI_ID)){
temp[[x]][[y]][[z]][[rep]] <- subset(seurat_list_list_list[[x]][[y]][[z]], subset = MULTI_ID == rep)
}
return(temp[[x]][[y]][[z]])
})
print(temp3)
names(temp3) <- unique(seurat_list_list[[x]][[y]]$Final_Assignment)
return(temp3)
})
names(temp2) <- unique(seurat_list[[x]]@meta.data$Time)
return(temp2)
})
names(seurat_list_list_list_list) <- unique(seurat@meta.data$Location)
### 10% to start
seurat_list_list_list_list_sub <- lapply(seurat_list_list_list_list, function(x){
lapply(x, function(y){
lapply(y, function(z){
lapply(z, function(rep){
subset(rep, features = rownames(seurat)[which((rowSums(seurat[["SCT"]]@counts > 0)/ncol(seurat[["SCT"]]@counts)) > 0.1)])
})
})
})
})
names(seurat_list_list_list_list_sub) <- unique(seurat_sub@meta.data$Location)
saveRDS(seurat_list_list_list_list_sub, paste0(outdir, "subset_seurat_list.rds"))
seurat_list_list_list_list_sub <- readRDS(paste0(outdir, "subset_seurat_list.rds"))
summary_list <- lapply(names(seurat_list_list_list_list), function(x){
temp3 <- lapply(names(seurat_list_list_list_list[[x]]), function(y){
temp2 <- lapply(names(seurat_list_list_list_list[[x]][[y]]), function(z){
temp <- lapply(names(seurat_list_list_list_list[[x]][[y]][[z]]), function(rep){
data.frame(Gene = rownames(seurat_list_list_list_list[[x]][[y]][[z]][[rep]]), Mean = rowMeans(seurat_list_list_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@scale.data), Replicate = rep, Line = z, Time = y, Location = x)
})
# print(head(temp))
do.call(rbind, temp)
})
do.call(rbind, temp2)
})
do.call(rbind, temp3)
})
summary <- do.call(rbind, summary_list)
summary$Replicate <- gsub("Brisbane", "Replicate", summary$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
summary_wide <- pivot_wider(summary, names_from = Replicate, values_from = Mean)
summary_wide$SD <- rowSds(as.matrix(summary_wide[,c("Replicate1", "Replicate2", "Replicate3")]))
summary_wide$Mean <- rowMeans(as.matrix(summary_wide[,c("Replicate1", "Replicate2", "Replicate3")]))
summary_wide_wide <- inner_join(summary_wide, summary_wide, by = c("Gene", "Line", "Location"))
summary_wide_wide <- summary_wide_wide[which(summary_wide_wide$Time.x != summary_wide_wide$Time.y),]
summary_wide_wide <- summary_wide_wide[which(summary_wide_wide$Time.x == "Baseline"),]
pExpression_Correlation <- list()
for (location in unique(summary_wide_wide$Location)){
pExpression_Correlation[[location]] <- ggplot(summary_wide_wide[which(summary_wide_wide$Location == location),], aes(Mean.x, Mean.y)) +
geom_point() +
theme_classic()
ggsave(pExpression_Correlation[[location]], filename = paste0(outdir,location,"_Expression_Correlation.png"))
}
##### Redo with separately assessed by SCT #####
location_SCT <- readRDS(paste0(dir,"output/Variance_contributions_lm_scale_data/seurat_SCT_wRB_sydney_regress_all_covs.rds"))
cryo_SCT <- readRDS(paste0(dir,"output/Variance_contributions_lm_scale_data_Sydney/seurat_SCT_wRB_sydney_regress_all_covs.rds"))
### Update names to combine objects ###
names(location_SCT) <- c("Baseline", "Village")
location_SCT <- lapply(location_SCT, function(x){
names(x) <- c("Brisbane", "Melbourne", "Sydney_Fresh")
return(x)
})
cryo_SCT_updated <- list()
cryo_SCT_updated[["Baseline"]][["Sydney_Fresh"]] <- cryo_SCT[["Baseline"]]
cryo_SCT_updated[["Baseline"]][["Sydney_Cryopreserved"]] <- cryo_SCT[["Thawed Village Day 0"]]
cryo_SCT_updated[["Village"]][["Sydney_Fresh"]] <- cryo_SCT[["Village Day 4"]]
cryo_SCT_updated[["Village"]][["Sydney_Cryopreserved"]] <- cryo_SCT[["Thawed Village Day 7"]]
SCT_combined <- location_SCT
SCT_combined[["Baseline"]][["Sydney_Cryopreserved"]] <- cryo_SCT_updated[["Baseline"]][["Sydney_Cryopreserved"]]
SCT_combined[["Village"]][["Sydney_Cryopreserved"]] <- cryo_SCT_updated[["Village"]][["Sydney_Cryopreserved"]]
SCT_combined_list <- lapply(names(SCT_combined), function(x){
temp2 <- lapply(names(SCT_combined[[x]]), function(y){
temp <- list()
for (assign in unique(SCT_combined[[x]][[y]]$Final_Assignment)){
temp[[x]][[y]][[assign]] <- subset(SCT_combined[[x]][[y]], subset = Final_Assignment == assign)
}
return(temp[[x]][[y]])
})
names(temp2) <- unique(names(SCT_combined[[x]]))
return(temp2)
})
names(SCT_combined_list) <- names(SCT_combined)
SCT_combined_list_list <- lapply(names(SCT_combined_list), function(x){
temp2 <- lapply(names(SCT_combined_list[[x]]), function(y){
temp3 <- lapply(names(SCT_combined_list[[x]][[y]]), function(z){
temp <- list()
for (rep in unique(SCT_combined_list[[x]][[y]][[z]]$MULTI_ID)){
temp[[x]][[y]][[z]][[rep]] <- subset(SCT_combined_list[[x]][[y]][[z]], subset = MULTI_ID == rep)
}
return(temp[[x]][[y]][[z]])
})
print(temp3)
names(temp3) <- unique(SCT_combined[[x]][[y]]$Final_Assignment)
return(temp3)
})
names(temp2) <- names(SCT_combined[[x]])
return(temp2)
})
names(SCT_combined_list_list) <- names(SCT_combined)
saveRDS(SCT_combined_list_list, paste0(outdir, "subset_seurat_list_sep.rds"))
SCT_combined_list_list <- readRDS(paste0(outdir, "subset_seurat_list_sep.rds"))
summary_SCT_list <- lapply(names(SCT_combined_list_list), function(x){
temp3 <- lapply(names(SCT_combined_list_list[[x]]), function(y){
temp2 <- lapply(names(SCT_combined_list_list[[x]][[y]]), function(z){
temp <- lapply(names(SCT_combined_list_list[[x]][[y]][[z]]), function(rep){
data.frame(Gene = rownames(SCT_combined_list_list[[x]][[y]][[z]][[rep]]), Mean = rowMeans(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@scale.data), N = ncol(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@scale.data), Replicate = rep, Line = z, Location = y, Time = x)
})
# print(head(temp))
do.call(rbind, temp)
})
do.call(rbind, temp2)
})
do.call(rbind, temp3)
})
summary_SCT <- data.table(do.call(rbind, summary_SCT_list))
summary_SCT$Replicate <- gsub("Brisbane", "Replicate", summary_SCT$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
##### Get unique id for each #####
summary_SCT$ID <- paste(summary_SCT$Location, summary_SCT$Time, summary_SCT$Line, summary_SCT$Replicate, sep = "-")
correlations <- lapply(unique(summary_SCT$ID), function(group1){
print(group1)
tmp <- lapply(unique(summary_SCT$ID), function(group2){
print(group2)
genes <- summary_SCT[ID == group1][summary_SCT[ID == group1]$Gene %in% summary_SCT[ID == group2]$Gene]$Gene
print(all(summary_SCT[ID == group1][Gene %in% genes]$Gene == summary_SCT[ID == group2][Gene %in% genes]$Gene))
return(data.table(Group1 = group1, Group2 = group2, Spearman = cor(summary_SCT[ID == group1][Gene %in% genes]$Mean, summary_SCT[ID == group2][Gene %in% genes]$Mean, method = "spearman")))
})
return(do.call(rbind, tmp))
})
saveRDS(correlations, paste0(outdir, "correlations.rds"))
correlations <- readRDS(paste0(outdir, "correlations.rds"))
correlations_pearson <- lapply(unique(summary_SCT$ID), function(group1){
print(group1)
tmp <- lapply(unique(summary_SCT$ID), function(group2){
print(group2)
genes <- summary_SCT[ID == group1][summary_SCT[ID == group1]$Gene %in% summary_SCT[ID == group2]$Gene]$Gene
print(all(summary_SCT[ID == group1][Gene %in% genes]$Gene == summary_SCT[ID == group2][Gene %in% genes]$Gene))
return(data.table(Group1 = group1, Group2 = group2, Pearson = cor(summary_SCT[ID == group1][Gene %in% genes]$Mean, summary_SCT[ID == group2][Gene %in% genes]$Mean)))
})
return(do.call(rbind, tmp))
})
correlations_pearson_dt <- do.call(rbind, correlations_pearson)
tmp <- summary_SCT[ID == "Brisbane-Baseline-FSA0006-Replicate1"][summary_SCT[ID == "Brisbane-Baseline-TOB0421-Replicate1"], on = "Gene"]
scatter_test <- ggplot(tmp, aes(Mean,i.Mean)) +
geom_point() +
theme_classic()
ggsave(scatter_test, filename = paste0(outdir, "test_correlation.png"))
correlations_dt <- do.call(rbind, correlations)
correlations_dt[, c("Location1", "Village1", "Line1", "Replicate1") := tstrsplit(Group1, "-", fixed=TRUE)]
correlations_dt[, c("Location2", "Village2", "Line2", "Replicate2") := tstrsplit(Group2, "-", fixed=TRUE)]
##### Try with data slot instead #####
summary_SCT_data_list <- lapply(names(SCT_combined_list_list), function(x){
temp3 <- lapply(names(SCT_combined_list_list[[x]]), function(y){
temp2 <- lapply(names(SCT_combined_list_list[[x]][[y]]), function(z){
temp <- lapply(names(SCT_combined_list_list[[x]][[y]][[z]]), function(rep){
data.frame(Gene = rownames(SCT_combined_list_list[[x]][[y]][[z]][[rep]]), Mean = rowMeans(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@data), N = ncol(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@data), Replicate = rep, Line = z, Location = y, Time = x)
})
do.call(rbind, temp)
})
do.call(rbind, temp2)
})
do.call(rbind, temp3)
})
summary_SCT_data <- data.table(do.call(rbind, summary_SCT_data_list))
summary_SCT_data$Replicate <- gsub("Brisbane", "Replicate", summary_SCT_data$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
##### Get unique id for each #####
summary_SCT_data$ID <- paste(summary_SCT_data$Location, summary_SCT_data$Time, summary_SCT_data$Line, summary_SCT_data$Replicate, sep = "-")
correlations_data <- lapply(unique(summary_SCT_data$ID), function(group1){
print(group1)
tmp <- lapply(unique(summary_SCT_data$ID)[(which(unique(summary_SCT_data$ID) == group1) + 1):length(unique(summary_SCT_data$ID))], function(group2){
print(group2)
genes <- summary_SCT_data[ID == group1][summary_SCT_data[ID == group1]$Gene %in% summary_SCT_data[ID == group2]$Gene]$Gene
print(all(summary_SCT_data[ID == group1][Gene %in% genes]$Gene == summary_SCT_data[ID == group2][Gene %in% genes]$Gene))
return(data.table(Group1 = group1, Group2 = group2, Spearman = cor(summary_SCT_data[ID == group1][Gene %in% genes]$Mean, summary_SCT_data[ID == group2][Gene %in% genes]$Mean, method = "spearman")))
})
return(do.call(rbind, tmp))
})
saveRDS(correlations_data, paste0(outdir, "correlations_data.rds"))
correlations_data <- readRDS(paste0(outdir, "correlations_data.rds"))
correlations_data_dt <- do.call(rbind, correlations_data)
correlations_data_dt[, c("Location1", "Village1", "Line1", "Replicate1") := tstrsplit(Group1, "-", fixed=TRUE)]
correlations_data_dt[, c("Location2", "Village2", "Line2", "Replicate2") := tstrsplit(Group2, "-", fixed=TRUE)]
correlations_data_fresh_dt <- correlations_data_dt[Location1 != "Sydney_Cryopreserved" | Location2 != "Sydney_Cryopreserved"]
##### #####
correlations_data_fresh_village_dt <- correlations_data_fresh_dt[Replicate1 == Replicate2 & Line1 == Line2 & Location1 == Location2 & Village1 != Village2 & Group1 != Group2]
correlations_data_fresh_village_dt$Group <- "Village"
correlations_data_fresh_replicate_dt <- correlations_data_fresh_dt[Village1 == Village2 & Line1 == Line2 & Location1 == Location2 & Replicate1 != Replicate2 & Group1 != Group2]
correlations_data_fresh_replicate_dt$Group <- "Replicate"
correlations_data_fresh_location_dt <- correlations_data_fresh_dt[Replicate1 == Replicate2 & Line1 == Line2 & Village1 == Village2 & Location1 != Location2 & Group1 != Group2]
correlations_data_fresh_location_dt$Group <- "Location"
correlations_data_fresh_line_dt <- correlations_data_fresh_dt[Village1 == Village2 & Location1 == Location2 & Line1 != Line2 & Group1 != Group2]
correlations_data_fresh_line_dt$Group <- "Line"
correlations_data_fresh_combined_dt <- rbind(correlations_data_fresh_village_dt, correlations_data_fresh_replicate_dt, correlations_data_fresh_location_dt, correlations_data_fresh_line_dt)
correlations_data_fresh_combined_dt$Group <- factor(correlations_data_fresh_combined_dt$Group, levels = c("Replicate", "Village", "Line", "Location"))
res <- list()
for (group1 in unique(correlations_data_fresh_combined_dt$Group)){
for (group2 in unique(correlations_data_fresh_combined_dt$Group)[(which(unique(correlations_data_fresh_combined_dt$Group) == group1)+1):length(unique(correlations_data_fresh_combined_dt$Group))]){
res[[group1]][[group2]] <- wilcox.test(correlations_data_fresh_combined_dt[Group == group1]$Spearman, correlations_data_fresh_combined_dt[Group == group2]$Spearman,exact = FALSE)
}
}
correlations_data_fresh_combined_dt_med <- correlations_data_fresh_combined_dt %>%
group_by(Group) %>%
mutate(median = median(as.numeric(Spearman)))
correlation_fresh_dist <- ggplot(correlations_data_fresh_combined_dt, aes(Spearman)) +
geom_histogram(bins = 50) +
facet_wrap(vars(factor(Group, levels = c("Replicate", "Village", "Line", "Location"))), ncol = 1, scales = "free_y") +
theme_classic() +
geom_vline(data = correlations_data_fresh_combined_dt_med, aes(xintercept = median), linetype = "dashed")
ggsave(correlation_fresh_dist, filename = paste0(outdir, "correlation_distributions_fresh.png"), width = 3)
ggsave(correlation_fresh_dist, filename = paste0(outdir, "correlation_distributions_fresh.pdf"), width = 3)
fwrite(correlations_data_fresh_combined_dt, paste0(outdir,"correlation_distribution_table.tsv"), sep = "\t")
##### For cryopreserdd #####
correlations_data_combined_cryo_dt <- correlations_data_dt[grepl("Sydney", Location1) & grepl("Sydney", Location2)]
correlations_data_combined_cryo_dt$Cryopreserved1 <- gsub("Sydney_", "",correlations_data_combined_cryo_dt$Location1)
correlations_data_combined_cryo_dt$Cryopreserved2 <- gsub("Sydney_", "",correlations_data_combined_cryo_dt$Location2)
##### #####
correlations_data_cryo_village_dt <- correlations_data_combined_cryo_dt[Replicate1 == Replicate2 & Line1 == Line2 & Cryopreserved1 == Cryopreserved2 & Village1 != Village2 & Group1 != Group2]
correlations_data_cryo_village_dt$Group <- "Village"
correlations_data_cryo_replicate_dt <- correlations_data_combined_cryo_dt[Village1 == Village2 & Line1 == Line2 & Cryopreserved1 == Cryopreserved2 & Replicate1 != Replicate2 & Group1 != Group2]
correlations_data_cryo_replicate_dt$Group <- "Replicate"
correlations_data_cryo_cryo_dt <- correlations_data_combined_cryo_dt[Replicate1 == Replicate2 & Line1 == Line2 & Village1 == Village2 & Cryopreserved1 != Cryopreserved2 & Group1 != Group2]
correlations_data_cryo_cryo_dt$Group <- "Cryopreserved"
correlations_data_cryo_line_dt <- correlations_data_combined_cryo_dt[Village1 == Village2 & Cryopreserved1 == Cryopreserved2 & Line1 != Line2 & Group1 != Group2]
correlations_data_cryo_line_dt$Group <- "Line"
correlations_data_cryo_combined_dt <- rbind(correlations_data_cryo_village_dt, correlations_data_cryo_replicate_dt, correlations_data_cryo_cryo_dt, correlations_data_cryo_line_dt)
correlations_data_cryo_combined_dt$Group <- factor(correlations_data_cryo_combined_dt$Group, levels = c("Replicate", "Cryopreserved", "Village", "Line"))
correlations_data_cryo_combined_dt_med <- correlations_data_cryo_combined_dt %>%
group_by(Group) %>%
mutate(median = median(as.numeric(Spearman)))
correlation_cryo_dist <- ggplot(correlations_data_cryo_combined_dt, aes(Spearman)) +
geom_histogram(bins = 50) +
facet_wrap(vars(factor(Group, levels = c("Replicate", "Cryopreserved", "Village", "Line"))), ncol = 1, scales = "free_y") +
theme_classic() +
geom_vline(data = correlations_data_cryo_combined_dt_med, aes(xintercept = median), linetype = "dashed")
ggsave(correlation_cryo_dist, filename = paste0(outdir, "correlation_distributions_cryo.png"), width = 3)
ggsave(correlation_cryo_dist, filename = paste0(outdir, "correlation_distributions_cryo.pdf"), width = 3)
##### For cryopreserd with fresh #####
correlations_data_combined_cryo_fresh_dt <- correlations_data_dt[grepl("Sydney", Location1) | grepl("Sydney", Location2)]
correlations_data_combined_cryo_fresh_dt$Cryopreserved1 <- ifelse(grepl("Sydney",correlations_data_combined_cryo_fresh_dt$Location1), gsub("Sydney_", "",correlations_data_combined_cryo_fresh_dt$Location1), "Fresh")
correlations_data_combined_cryo_fresh_dt$Cryopreserved2 <- ifelse(grepl("Sydney",correlations_data_combined_cryo_fresh_dt$Location2), gsub("Sydney_", "",correlations_data_combined_cryo_fresh_dt$Location2), "Fresh")
mean(correlations_data_dt[grepl("Sydney_Fresh-Baseline", Group1) | grepl("Sydney_Fresh-Baseline", Group2)]$Spearman)
mean(correlations_data_dt[grepl("Sydney_Cryopreserved-Baseline", Group1) | grepl("Sydney_Cryopreserved-Baseline", Group2)]$Spearman)
mean(correlations_data_dt[grepl("Sydney_Fresh-Village", Group1) | grepl("Sydney_Fresh-Village", Group2)]$Spearman)
mean(correlations_data_dt[grepl("Sydney_Cryopreserved-Village", Group1) | grepl("Sydney_Cryopreserved-Village", Group2)]$Spearman, na.rm = TRUE)
##### #####
correlations_data_cryo_fresh_village_dt <- correlations_data_combined_cryo_fresh_dt[Line1 == Line2 & Village1 != Village2 & Group1 != Group2]
correlations_data_cryo_fresh_village_dt$Group <- "Village"
correlations_data_cryo_fresh_village_dt$Site_color <- ifelse(correlations_data_cryo_fresh_village_dt$Cryopreserved1 == "Fresh", correlations_data_cryo_fresh_village_dt$Location1,
ifelse(correlations_data_cryo_fresh_village_dt$Cryopreserved2 == "Fresh", correlations_data_cryo_fresh_village_dt$Location2, "")
correlations_data_cryo_fresh_combined_dt$Group <- factor(correlations_data_cryo_combined_dt$Group, levels = c("Replicate", "Cryopreserved", "Village", "Line"))
correlations_data_cryo_fresh_combined_dt_med <- correlations_data_cryo_combined_dt %>%
group_by(Group) %>%
mutate(median = median(as.numeric(Spearman)))
correlation_cryo_dist <- ggplot(correlations_data_cryo_combined_dt, aes(Spearman)) +
geom_histogram(bins = 50) +
facet_wrap(vars(factor(Group, levels = c("Replicate", "Cryopreserved", "Village", "Line"))), ncol = 1, scales = "free_y") +
theme_classic() +
geom_vline(data = correlations_data_cryo_combined_dt_med, aes(xintercept = median), linetype = "dashed")
ggsave(correlation_cryo_dist, filename = paste0(outdir, "correlation_distributions_cryo.png"), width = 3)
ggsave(correlation_cryo_dist, filename = paste0(outdir, "correlation_distributions_cryo.pdf"), width = 3)
##### Try with counts slot instead #####
summary_SCT_counts_list <- lapply(names(SCT_combined_list_list), function(x){
temp3 <- lapply(names(SCT_combined_list_list[[x]]), function(y){
temp2 <- lapply(names(SCT_combined_list_list[[x]][[y]]), function(z){
temp <- lapply(names(SCT_combined_list_list[[x]][[y]][[z]]), function(rep){
data.frame(Gene = rownames(SCT_combined_list_list[[x]][[y]][[z]][[rep]]), Mean = rowMeans(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@counts), N = ncol(SCT_combined_list_list[[x]][[y]][[z]][[rep]][["SCT"]]@counts), Replicate = rep, Line = z, Location = y, Time = x)
})
do.call(rbind, temp)
})
do.call(rbind, temp2)
})
do.call(rbind, temp3)
})
summary_SCT_counts <- data.table(do.call(rbind, summary_SCT_counts_list))
summary_SCT_counts$Replicate <- gsub("Brisbane", "Replicate", summary_SCT_counts$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
##### Get unique id for each #####
summary_SCT_counts$ID <- paste(summary_SCT_counts$Location, summary_SCT_counts$Time, summary_SCT_counts$Line, summary_SCT_counts$Replicate, sep = "-")
correlations_counts <- lapply(unique(summary_SCT_counts$ID), function(group1){
print(group1)
tmp <- lapply(unique(summary_SCT_counts$ID)[(which(unique(summary_SCT_counts$ID) == group1) + 1):length(unique(summary_SCT_counts$ID))], function(group2){
print(group2)
genes <- summary_SCT_counts[ID == group1][summary_SCT_counts[ID == group1]$Gene %in% summary_SCT_counts[ID == group2]$Gene]$Gene
print(all(summary_SCT_counts[ID == group1][Gene %in% genes]$Gene == summary_SCT_counts[ID == group2][Gene %in% genes]$Gene))
return(data.table(Group1 = group1, Group2 = group2, Spearman = cor(summary_SCT_counts[ID == group1][Gene %in% genes]$Mean, summary_SCT_counts[ID == group2][Gene %in% genes]$Mean, method = "spearman")))
})
return(do.call(rbind, tmp))
})
correlations_counts_dt <- do.call(rbind, correlations_counts)
correlations_counts_dt[, c("Location1", "Village1", "Line1", "Replicate1") := tstrsplit(Group1, "-", fixed=TRUE)]
correlations_counts_dt[, c("Location2", "Village2", "Line2", "Replicate2") := tstrsplit(Group2, "-", fixed=TRUE)]
correlations_counts_village_dt <- correlations_counts_dt[Replicate1 == Replicate2 & Line1 == Line2 & Location1 == Location2 & Village1 != Village2 & Group1 != Group2]
correlations_counts_village_dt$Group <- "Village"
correlations_counts_replicate_dt <- correlations_counts_dt[Village1 == Village2 & Line1 == Line2 & Location1 == Location2 & Replicate1 != Replicate2 & Group1 != Group2]
correlations_counts_replicate_dt$Group <- "Replicate"
correlations_counts_location_dt <- correlations_counts_dt[Replicate1 == Replicate2 & Line1 == Line2 & Village1 == Village2 & Location1 != Location2 & Group1 != Group2]
correlations_counts_location_dt$Group <- "Location"
correlations_counts_line_dt <- correlations_counts_dt[Replicate1 == Replicate2 & Village1 == Village2 & Location1 == Location2 & Line1 != Line2 & Group1 != Group2]
correlations_counts_line_dt$Group <- "Line"
correlations_counts_combined_dt <- rbind(correlations_counts_village_dt, correlations_counts_replicate_dt, correlations_counts_location_dt, correlations_counts_line_dt)
correlations_counts_combined_dt_med <- correlations_counts_combined_dt %>%
group_by(Group) %>%
mutate(median = median(as.numeric(Spearman)))
correlations_counts_combined_dt$Group <- factor(correlations_counts_combined_dt$Group, levels = c("Replicate", "Village", "Line", "Location"))
correlation_dist <- ggplot(correlations_counts_combined_dt, aes(Spearman)) +
geom_histogram(bins = 50) +
facet_wrap(vars(factor(Group, levels = c("Replicate", "Village", "Line", "Location"))), ncol = 1, scales = "free_y") +
theme_classic() +
geom_vline(data = correlations_counts_combined_dt_med, aes(xintercept = median), linetype = "dashed")
ggsave(correlation_dist, filename = paste0(outdir, "correlation_distributions_counts.png"), width = 3)
summary_SCT_wide <- pivot_wider(summary_SCT, names_from = Replicate, values_from = c(Mean, N))
summary_SCT_wide$SD <- rowSds(as.matrix(summary_SCT_wide[,c("Mean_Replicate1", "Mean_Replicate2", "Mean_Replicate3")]))
summary_SCT_wide$Mean <- (summary_SCT_wide$Mean_Replicate1 * summary_SCT_wide$N_Replicate1 + summary_SCT_wide$Mean_Replicate2 * summary_SCT_wide$N_Replicate2 + summary_SCT_wide$Mean_Replicate3 * summary_SCT_wide$N_Replicate3)/(summary_SCT_wide$N_Replicate1 + summary_SCT_wide$N_Replicate2 + summary_SCT_wide$N_Replicate3)
summary_SCT_wide_wide <- inner_join(summary_SCT_wide, summary_SCT_wide, by = c("Gene", "Line", "Location"))
summary_SCT_wide_wide <- summary_SCT_wide_wide[which(summary_SCT_wide_wide$Time.x != summary_SCT_wide_wide$Time.y),]
summary_SCT_wide_wide <- summary_SCT_wide_wide[which(summary_SCT_wide_wide$Time.x == "Baseline"),]
as.data.frame(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == "TOB0421" & summary_SCT_wide_wide$Mean.y < -2.5), ]) ### ENSG00000129824
as.data.frame(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Gene == "ENSG00000129824"),])
as.data.frame(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == "TOB0421" & summary_SCT_wide_wide$Mean.y >4), ]) ### ENSG00000106153 CHCHD2
as.data.frame(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Gene == "ENSG00000106153"),])
pExpression_Correlation_SCT <- list()
for (location in unique(summary_SCT_wide_wide$Location)){
pExpression_Correlation_SCT[[location]] <- ggplot(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Location == location),], aes(Mean.x, Mean.y)) +
geom_point() +
theme_classic() +
facet_wrap(vars(Line), nrow = 1, scales = "free")
ggsave(pExpression_Correlation_SCT[[location]], filename = paste0(outdir,location,"_Expression_Correlation_norm_sep.png"), width = 15)
}
Rsquared <- unique(summary_SCT_wide_wide[,c("Line","Location","Time.x","Time.y")])
Rsquared$Rsquared <- NA
Rsquared$pearson
Rsquared$spearman
for (row in 1:nrow(Rsquared)){
Rsquared$Rsquared[row] <- summary(lm(Mean.y ~ Mean.x, summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]))$r.squared
Rsquared$pearson[row] <- cor(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]$Mean.y,
summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]$Mean.x)
Rsquared$spearman[row] <- cor(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]$Mean.y,
summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == Rsquared$Line[row] &
summary_SCT_wide_wide$Location == Rsquared$Location[row] &
summary_SCT_wide_wide$Time.y == Rsquared$Time.y[row] &
summary_SCT_wide_wide$Time.x == Rsquared$Time.x[row]),]$Mean.x, method = "spearman")
}
Rsquared
# ##### Filter out the genes not expressed in 1% of cells #####
# SCT_combined_1pct <- lapply(SCT_combined, function(x){
# lapply(x, function(y){
# subset(y, features = rownames(y)[which((rowSums(y[["SCT"]]@counts > 0)/ncol(y[["SCT"]]@counts)) > 0.01)])
# })
# })
# summary_SCT_list_1pct <- lapply(names(SCT_combined_1pct), function(village){
# temp <- lapply(names(SCT_combined_1pct[[village]]), function(location){
# summary_SCT[which(summary_SCT$Location == location & summary_SCT$Time == village & summary_SCT$Gene %in% rownames(SCT_combined_1pct[[village]][[location]])), ]
# })
# do.call(rbind, temp)
# })
# summary_SCT_1pct <- do.call(rbind, summary_SCT_list_1pct)
# summary_SCT_1pct$Replicate <- gsub("Brisbane", "Replicate", summary_SCT_1pct$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
# summary_SCT_1pct_wide <- pivot_wider(summary_SCT_1pct, names_from = Replicate, values_from = c(Mean, N))
# summary_SCT_1pct_wide$SD <- rowSds(as.matrix(summary_SCT_1pct_wide[,c("Mean_Replicate1", "Mean_Replicate2", "Mean_Replicate3")]))
# summary_SCT_1pct_wide$Mean <- (summary_SCT_1pct_wide$Mean_Replicate1 * summary_SCT_1pct_wide$N_Replicate1 + summary_SCT_1pct_wide$Mean_Replicate2 * summary_SCT_1pct_wide$N_Replicate2 + summary_SCT_1pct_wide$Mean_Replicate3 * summary_SCT_1pct_wide$N_Replicate3)/(summary_SCT_1pct_wide$N_Replicate1 + summary_SCT_1pct_wide$N_Replicate2 + summary_SCT_1pct_wide$N_Replicate3)
# summary_SCT_1pct_wide_wide <- inner_join(summary_SCT_1pct_wide, summary_SCT_1pct_wide, by = c("Gene", "Line", "Location"))
# summary_SCT_1pct_wide_wide <- summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Time.x != summary_SCT_1pct_wide_wide$Time.y),]
# summary_SCT_1pct_wide_wide <- summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Time.x == "Baseline"),]
# as.data.frame(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == "TOB0421" & summary_SCT_1pct_wide_wide$Mean.y < -2.5), ]) ### ENSG00000129824
# as.data.frame(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Gene == "ENSG00000129824"),])
# as.data.frame(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == "TOB0421" & summary_SCT_1pct_wide_wide$Mean.y >4), ]) ### ENSG00000106153 CHCHD2
# as.data.frame(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Gene == "ENSG00000106153"),])
# pExpression_Correlation_SCT <- list()
# for (location in unique(summary_SCT_1pct_wide_wide$Location)){
# pExpression_Correlation_SCT[[location]] <- ggplot(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Location == location),], aes(Mean.x, Mean.y)) +
# geom_point() +
# theme_classic() +
# facet_wrap(vars(Line), nrow = 1, scales = "free")
# ggsave(pExpression_Correlation_SCT[[location]], filename = paste0(outdir,location,"_Expression_Correlation_norm_sep.png"), width = 15)
# }
# Rsquared_1pct <- unique(summary_SCT_1pct_wide_wide[,c("Line","Location","Time.x","Time.y")])
# Rsquared_1pct$Rsquared <- NA
# Rsquared_1pct$pearson
# Rsquared_1pct$spearman
# for (row in 1:nrow(Rsquared_1pct)){
# Rsquared_1pct$Rsquared[row] <- summary(lm(Mean.y ~ Mean.x, summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]))$r.squared
# Rsquared_1pct$pearson[row] <- cor(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]$Mean.y,
# summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]$Mean.x)
# Rsquared_1pct$spearman[row] <- cor(summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]$Mean.y,
# summary_SCT_1pct_wide_wide[which(summary_SCT_1pct_wide_wide$Line == Rsquared_1pct$Line[row] &
# summary_SCT_1pct_wide_wide$Location == Rsquared_1pct$Location[row] &
# summary_SCT_1pct_wide_wide$Time.y == Rsquared_1pct$Time.y[row] &
# summary_SCT_1pct_wide_wide$Time.x == Rsquared_1pct$Time.x[row]),]$Mean.x, method = "spearman")
# }
# Rsquared_1pct
##### Filter out the genes not expressed in 10% of cells #####
SCT_combined_10pct <- lapply(SCT_combined, function(x){
lapply(x, function(y){
subset(y, features = rownames(y)[which((rowSums(y[["SCT"]]@counts > 0)/ncol(y[["SCT"]]@counts)) > 0.1)])
})
})
summary_SCT_list_10pct <- lapply(names(SCT_combined_10pct), function(village){
temp <- lapply(names(SCT_combined_10pct[[village]]), function(location){
summary_SCT[which(summary_SCT$Location == location & summary_SCT$Time == village & summary_SCT$Gene %in% rownames(SCT_combined_10pct[[village]][[location]])), ]
})
do.call(rbind, temp)
})
summary_SCT_10pct <- do.call(rbind, summary_SCT_list_10pct)
summary_SCT_10pct$Replicate <- gsub("Brisbane", "Replicate", summary_SCT_10pct$Replicate) %>% gsub("Melbourne", "Replicate", .) %>% gsub("Sydney", "Replicate", .)
summary_SCT_10pct_wide <- pivot_wider(summary_SCT_10pct, names_from = Replicate, values_from = c(Mean, N))
summary_SCT_10pct_wide$SD <- rowSds(as.matrix(summary_SCT_10pct_wide[,c("Mean_Replicate1", "Mean_Replicate2", "Mean_Replicate3")]))
summary_SCT_10pct_wide$Mean <- (summary_SCT_10pct_wide$Mean_Replicate1 * summary_SCT_10pct_wide$N_Replicate1 + summary_SCT_10pct_wide$Mean_Replicate2 * summary_SCT_10pct_wide$N_Replicate2 + summary_SCT_10pct_wide$Mean_Replicate3 * summary_SCT_10pct_wide$N_Replicate3)/(summary_SCT_10pct_wide$N_Replicate1 + summary_SCT_10pct_wide$N_Replicate2 + summary_SCT_10pct_wide$N_Replicate3)
summary_SCT_10pct_wide_wide <- inner_join(summary_SCT_10pct_wide, summary_SCT_10pct_wide, by = c("Gene", "Line", "Location"))
summary_SCT_10pct_wide_wide <- summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Time.x != summary_SCT_10pct_wide_wide$Time.y),]
summary_SCT_10pct_wide_wide <- summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Time.x == "Baseline"),]
as.data.frame(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == "TOB0421" & summary_SCT_10pct_wide_wide$Mean.y < -2.5), ]) ### ENSG00000129824
as.data.frame(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Gene == "ENSG00000129824"),])
as.data.frame(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == "TOB0421" & summary_SCT_10pct_wide_wide$Mean.y >4), ]) ### ENSG00000106153 CHCHD2
as.data.frame(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Gene == "ENSG00000106153"),])
pExpression_Correlation_SCT <- list()
for (location in unique(summary_SCT_10pct_wide_wide$Location)){
pExpression_Correlation_SCT[[location]] <- ggplot(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Location == location),], aes(Mean.x, Mean.y)) +
geom_point(size = 0.5, alpha = 0.5) +
theme_classic() +
facet_wrap(vars(Line), nrow = 1, scales = "free")
ggsave(pExpression_Correlation_SCT[[location]], filename = paste0(outdir,location,"_Expression_Correlation_norm_sep_10pct.png"), width = 15)
}
pExpression_Correlation_Combined <- ggplot(summary_SCT_10pct_wide_wide, aes(Mean.x, Mean.y)) +
geom_point(size = 0.5, alpha = 0.3) +
theme_classic() +
facet_grid(Location ~ Line, scales = "free")
pExpression_Correlation_Combined <- ggscatter(data = summary_SCT_10pct_wide_wide, x = "Mean.x", y = "Mean.y",
color = "Line",
facet.by = c("Location", "Line"),
scales = "free",
size = 0.5,
alpha = 0.25,
ylab = "Average Baseline Normalized Expression",
xlab = "Average Village Normalized Expression",
add = "reg.line",
conf.int = TRUE,
scales = "free") +
stat_cor(aes(color = Line), method = "pearson")
ggsave(pExpression_Correlation_Combined, filename = paste0(outdir,"Expression_Correlation_norm_sep_10pct_all_locations.png"))
summary_SCT_10pct_wide_wide_fresh <- summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Location != "Sydney_Cryopreserved"),]
summary_SCT_10pct_wide_wide_fresh$Location <- gsub("_Fresh", "", summary_SCT_10pct_wide_wide_fresh$Location)
for (location in names(site_updates)){
summary_SCT_10pct_wide_wide_fresh$Location <- gsub(location, site_updates[location], summary_SCT_10pct_wide_wide_fresh$Location)
}
summary_SCT_10pct_wide_wide_fresh$Location_Line <- paste0(summary_SCT_10pct_wide_wide_fresh$Location, "_", summary_SCT_10pct_wide_wide_fresh$Line)
pExpression_Correlation_Combined_fresh <- ggscatter(data = summary_SCT_10pct_wide_wide_fresh, x = "Mean.x", y = "Mean.y",
color = "Line",
facet.by = c("Location_Line"),
palette = line_colors,
size = 0.5,
alpha = 0.25,
ylab = "Baseline Average Normalized Expression",
xlab = "Village Average Normalized Expression",
add = "reg.line",
add.params = list(color = "black", fill = "lightgray", size = 0.5),
conf.int = TRUE,
scales = "free") +
stat_cor(aes(label = ..r.label..), method = "pearson")
save_figs(pExpression_Correlation_Combined_fresh, paste0(outdir,"Expression_Correlation_norm_sep_10pct_fresh"), height = 15, width = 15)
### Make accompanying table ###
expression_pearson_df <- unique(summary_SCT_wide_wide[,c("Line","Location","Time.x","Time.y")])
expression_pearson_df$`Pearson R` <- NA
expression_pearson_df$`Pearson P` <- NA
for (row in 1:nrow(expression_pearson_df)){
expression_pearson_df$`Pearson R`[row] <- cor.test(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == expression_pearson_df$Line[row] &
summary_SCT_wide_wide$Location == expression_pearson_df$Location[row] &
summary_SCT_wide_wide$Time.y == expression_pearson_df$Time.y[row] &
summary_SCT_wide_wide$Time.x == expression_pearson_df$Time.x[row]),]$Mean.y,
summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == expression_pearson_df$Line[row] &
summary_SCT_wide_wide$Location == expression_pearson_df$Location[row] &
summary_SCT_wide_wide$Time.y == expression_pearson_df$Time.y[row] &
summary_SCT_wide_wide$Time.x == expression_pearson_df$Time.x[row]),]$Mean.x, exact = TRUE, method = "pearson")$estimate
expression_pearson_df$`Pearson P`[row] <- cor.test(summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == expression_pearson_df$Line[row] &
summary_SCT_wide_wide$Location == expression_pearson_df$Location[row] &
summary_SCT_wide_wide$Time.y == expression_pearson_df$Time.y[row] &
summary_SCT_wide_wide$Time.x == expression_pearson_df$Time.x[row]),]$Mean.y,
summary_SCT_wide_wide[which(summary_SCT_wide_wide$Line == expression_pearson_df$Line[row] &
summary_SCT_wide_wide$Location == expression_pearson_df$Location[row] &
summary_SCT_wide_wide$Time.y == expression_pearson_df$Time.y[row] &
summary_SCT_wide_wide$Time.x == expression_pearson_df$Time.x[row]),]$Mean.x, exact = TRUE, method = "pearson")$p.value
}
for (location in names(site_updates)){
expression_pearson_df$Location <- gsub(location, site_updates[location], expression_pearson_df$Location)
}
expression_pearson_df <- data.table(expression_pearson_df)
expression_pearson_df_fresh <- expression_pearson_df[Location != "Site 3_Cryopreserved"]
expression_pearson_df_fresh$Location <- gsub("_Fresh", "", expression_pearson_df_fresh$Location)
fwrite(expression_pearson_df_fresh, paste0(outdir, "Fresh_expression_pearson.tsv"), sep = "\t")
expression_pearson_df_cryo <- expression_pearson_df[grepl("Site 3", expression_pearson_df$Location)]
expression_pearson_df_cryo$Location <- gsub("_", " - ", expression_pearson_df_cryo$Location)
fwrite(expression_pearson_df_cryo, paste0(outdir, "Cryopreserved_expression_pearson.tsv"), sep = "\t")
summary_SCT_10pct_wide_wide_cryo <- summary_SCT_10pct_wide_wide[grepl("Sydney", summary_SCT_10pct_wide_wide$Location),]
summary_SCT_10pct_wide_wide_cryo$Location <- gsub("Sydney_", "", summary_SCT_10pct_wide_wide_cryo$Location)
summary_SCT_10pct_wide_wide_cryo$Location <- factor(summary_SCT_10pct_wide_wide_cryo$Location, levels = c("Fresh", "Cryopreserved"))
for (location in names(site_updates)){
summary_SCT_10pct_wide_wide_cryo$Location <- gsub(location, site_updates[location], summary_SCT_10pct_wide_wide_cryo$Location)
}
summary_SCT_10pct_wide_wide_cryo$Location_Line <- paste0(summary_SCT_10pct_wide_wide_cryo$Location, "_", summary_SCT_10pct_wide_wide_cryo$Line)
pExpression_Correlation_Combined_cryo <- ggscatter(data = summary_SCT_10pct_wide_wide_cryo, x = "Mean.x", y = "Mean.y",
color = "Line",
facet.by = c("Location_Line"),
palette = line_colors,
size = 0.5,
alpha = 0.25,
ylab = "Baseline Average Normalized Expression",
xlab = "Village Average Normalized Expression",
add = "reg.line",
add.params = list(color = "black", fill = "lightgray", size = 0.5),
conf.int = TRUE,
scales = "free") +
stat_cor(aes(label = ..r.label..), method = "pearson")
save_figs(pExpression_Correlation_Combined_cryo, paste0(outdir,"Expression_Correlation_norm_sep_10pct_cryo"), height = 10, width = 12)
Rsquared_10pct <- unique(summary_SCT_10pct_wide_wide[,c("Line","Location","Time.x","Time.y")])
Rsquared_10pct$Rsquared <- NA
Rsquared_10pct$pearson
Rsquared_10pct$spearman
for (row in 1:nrow(Rsquared_10pct)){
Rsquared_10pct$Rsquared[row] <- summary(lm(Mean.y ~ Mean.x, summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]))$r.squared
Rsquared_10pct$pearson[row] <- cor(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]$Mean.y,
summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]$Mean.x)
Rsquared_10pct$spearman[row] <- cor(summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]$Mean.y,
summary_SCT_10pct_wide_wide[which(summary_SCT_10pct_wide_wide$Line == Rsquared_10pct$Line[row] &
summary_SCT_10pct_wide_wide$Location == Rsquared_10pct$Location[row] &
summary_SCT_10pct_wide_wide$Time.y == Rsquared_10pct$Time.y[row] &
summary_SCT_10pct_wide_wide$Time.x == Rsquared_10pct$Time.x[row]),]$Mean.x, method = "spearman")
}
Rsquared_10pct
Rsquared_1pct
Rsquared
|
# https://hbctraining.github.io/DGE_workshop/lessons/02_DGE_count_normalization.html
# make normalized count for human data for GSEA analysis
# make expression dataset and phenotype labels files
# create own gene sets from microglia subtype markers
library(tibble)
library(dplyr)
library(DESeq2)
#### MATCH THE METADATA AND COUNTS DATA ####
setwd('/Users/januaryyiyue/Desktop/SchoolWorkLife/year3.5/camh/project/mouse_patch_seq/data')
data <- read.csv('20200513_Mouse_PatchSeq_Release_count.v2.csv')
top20_meta <- read.csv('patchseq_metadata_mouse_ttype_top20.csv')
top20_meta$microglianess <- 'high'
bottom20_meta <- read.csv('patchseq_metadata_mouse_ttype_bottom20.csv')
bottom20_meta$microglianess <- 'low'
meta <- bind_rows(top20_meta, bottom20_meta)
meta <- as.data.frame(meta[-1])
meta <- dplyr::rename(meta, 'transcriptomics.sample.id'= 'transcriptomics_sample_id')
# no need but just in case
# meta$transcriptomics_sample_id = sub("-",".", meta$transcriptomics_sample_id)
# meta$transcriptomics_sample_id = sub("-",".", meta$transcriptomics_sample_id)
# meta$transcriptomics_sample_id = sub("-",".", meta$transcriptomics_sample_id)
# the transcriptomics_sample_id don't match, so rotate the count matrix and inner_join with meta
data <- data %>%
as.data.frame() %>%
tibble::rownames_to_column(var = 'transcript')
data <- data %>% distinct(transcript, .keep_all = T)
transcript = data$transcript
rownames(data) = rownames(data) %>% make.names(unique=T)
data_trans = data[-1] %>% t() %>% as.data.frame() # integers became string :(
colnames(data_trans) = transcript
data_trans = data_trans %>% janitor::row_to_names(row_number = 1)
data_trans[] <- lapply(data_trans, function(x) as.numeric(as.character(x)))
View(data_trans) # now we have the count matrix rotated 90 degrees
data_trans <- tibble::rownames_to_column(data_trans, var = 'transcriptomics.sample.id')
data_id <- as.data.frame(data_trans$transcriptomics.sample.id)
data_id <- dplyr::rename(data_id, 'transcriptomics.sample.id'= 'data_trans$transcriptomics.sample.id')
meta_joined <- inner_join(data_id, meta, by = 'transcriptomics.sample.id')
meta_joined$rowname <- meta_joined$transcriptomics.sample.id
meta_joined <- column_to_rownames(meta_joined, var = 'rowname')
data_trans <- inner_join(data_trans, meta_joined, by = 'transcriptomics.sample.id')
data_trans <- select(data_trans, -c(subclass, microglianess))
# reshape data_trans
data_trans <- data_trans %>%
as.data.frame() %>%
tibble::rownames_to_column(var = 'transcript')
data_trans <- data_trans %>% distinct(transcript, .keep_all = T)
transcript = data_trans$transcript
rownames(data_trans) = rownames(data_trans) %>% make.names(unique=T)
data2 = data_trans[-1] %>% t() %>% as.data.frame()
colnames(data2) = transcript
data2 = data2 %>% janitor::row_to_names(row_number = 1)
data2[] <- lapply(data2, function(x) as.numeric(as.character(x)))
View(data2)
all(colnames(data2) %in% rownames(meta_joined))
all(colnames(data2) == rownames(meta_joined))
#### CREATE DESEq2 OBJECT ####
## Create DESeq2Dataset object
dds <- DESeqDataSetFromMatrix(countData = data2, colData = meta_joined, design = ~ microglianess)
View(counts(dds))
#### GENERATE THE MOV10 NORMALIZED COUNTS ####
dds <- estimateSizeFactors(dds)
sizeFactors(dds)
normalized_counts <- counts(dds, normalized=TRUE)
setwd('/Users/januaryyiyue/Desktop/SchoolWorkLife/year3.5/camh/project/olah_suppl_data/gsea_mouse')
write.table(normalized_counts, file="mouse_normalized_counts.txt", sep="\t", quote=F, col.names=NA)
#### MAKE PHENOTYPE LABELS FILES ####
# skipped expression dataset because did it on Excel
# also skipped gene set because did it on Excel
# for the phenotype labels files, take microglianess row in meta_joined and reshape (already checked to make sure that the order is the same as expression dataset)
group <- as.data.frame(meta_joined$microglianess)
group_trans <- group %>% t() %>% as.data.frame()
# do that
write.xlsx(group_trans, file = "phenotype_labels.xlsx", sheetName = "phenotype_labels", append = FALSE)
# when on Excel, save Excel file to txt and then to cls
| /GSEA_PREP_MOUSE.R | no_license | derekhoward/patch_seq_microglia | R | false | false | 4,107 | r | # https://hbctraining.github.io/DGE_workshop/lessons/02_DGE_count_normalization.html
# make normalized count for human data for GSEA analysis
# make expression dataset and phenotype labels files
# create own gene sets from microglia subtype markers
library(tibble)
library(dplyr)
library(DESeq2)
#### MATCH THE METADATA AND COUNTS DATA ####
setwd('/Users/januaryyiyue/Desktop/SchoolWorkLife/year3.5/camh/project/mouse_patch_seq/data')
data <- read.csv('20200513_Mouse_PatchSeq_Release_count.v2.csv')
top20_meta <- read.csv('patchseq_metadata_mouse_ttype_top20.csv')
top20_meta$microglianess <- 'high'
bottom20_meta <- read.csv('patchseq_metadata_mouse_ttype_bottom20.csv')
bottom20_meta$microglianess <- 'low'
meta <- bind_rows(top20_meta, bottom20_meta)
meta <- as.data.frame(meta[-1])
meta <- dplyr::rename(meta, 'transcriptomics.sample.id'= 'transcriptomics_sample_id')
# no need but just in case
# meta$transcriptomics_sample_id = sub("-",".", meta$transcriptomics_sample_id)
# meta$transcriptomics_sample_id = sub("-",".", meta$transcriptomics_sample_id)
# meta$transcriptomics_sample_id = sub("-",".", meta$transcriptomics_sample_id)
# the transcriptomics_sample_id don't match, so rotate the count matrix and inner_join with meta
data <- data %>%
as.data.frame() %>%
tibble::rownames_to_column(var = 'transcript')
data <- data %>% distinct(transcript, .keep_all = T)
transcript = data$transcript
rownames(data) = rownames(data) %>% make.names(unique=T)
data_trans = data[-1] %>% t() %>% as.data.frame() # integers became string :(
colnames(data_trans) = transcript
data_trans = data_trans %>% janitor::row_to_names(row_number = 1)
data_trans[] <- lapply(data_trans, function(x) as.numeric(as.character(x)))
View(data_trans) # now we have the count matrix rotated 90 degrees
data_trans <- tibble::rownames_to_column(data_trans, var = 'transcriptomics.sample.id')
data_id <- as.data.frame(data_trans$transcriptomics.sample.id)
data_id <- dplyr::rename(data_id, 'transcriptomics.sample.id'= 'data_trans$transcriptomics.sample.id')
meta_joined <- inner_join(data_id, meta, by = 'transcriptomics.sample.id')
meta_joined$rowname <- meta_joined$transcriptomics.sample.id
meta_joined <- column_to_rownames(meta_joined, var = 'rowname')
data_trans <- inner_join(data_trans, meta_joined, by = 'transcriptomics.sample.id')
data_trans <- select(data_trans, -c(subclass, microglianess))
# reshape data_trans
data_trans <- data_trans %>%
as.data.frame() %>%
tibble::rownames_to_column(var = 'transcript')
data_trans <- data_trans %>% distinct(transcript, .keep_all = T)
transcript = data_trans$transcript
rownames(data_trans) = rownames(data_trans) %>% make.names(unique=T)
data2 = data_trans[-1] %>% t() %>% as.data.frame()
colnames(data2) = transcript
data2 = data2 %>% janitor::row_to_names(row_number = 1)
data2[] <- lapply(data2, function(x) as.numeric(as.character(x)))
View(data2)
all(colnames(data2) %in% rownames(meta_joined))
all(colnames(data2) == rownames(meta_joined))
#### CREATE DESEq2 OBJECT ####
## Create DESeq2Dataset object
dds <- DESeqDataSetFromMatrix(countData = data2, colData = meta_joined, design = ~ microglianess)
View(counts(dds))
#### GENERATE THE MOV10 NORMALIZED COUNTS ####
dds <- estimateSizeFactors(dds)
sizeFactors(dds)
normalized_counts <- counts(dds, normalized=TRUE)
setwd('/Users/januaryyiyue/Desktop/SchoolWorkLife/year3.5/camh/project/olah_suppl_data/gsea_mouse')
write.table(normalized_counts, file="mouse_normalized_counts.txt", sep="\t", quote=F, col.names=NA)
#### MAKE PHENOTYPE LABELS FILES ####
# skipped expression dataset because did it on Excel
# also skipped gene set because did it on Excel
# for the phenotype labels files, take microglianess row in meta_joined and reshape (already checked to make sure that the order is the same as expression dataset)
group <- as.data.frame(meta_joined$microglianess)
group_trans <- group %>% t() %>% as.data.frame()
# do that
write.xlsx(group_trans, file = "phenotype_labels.xlsx", sheetName = "phenotype_labels", append = FALSE)
# when on Excel, save Excel file to txt and then to cls
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_rw_csv.R
\name{read_rw_csv}
\alias{read_rw_csv}
\title{Read RiverWare/RiverSMART produced csv files}
\usage{
read_rw_csv(file)
}
\arguments{
\item{file}{The name of the file which the data are to be read from. Either
an absolute or relative path.}
}
\value{
A tibble (data frame) containing the data in the csv.
}
\description{
\code{read_rw_csv()} reads in a CSV file created from RiverWare. If the CSV
file does not contain column names that RiverWare always uses (see Details),
then it assumes that the CSV file was not created from RiverWare and throws
an error. It also removes spaces from the column names, and adjusts the
\code{Object.Slot} and \verb{Slot Value} columns to be \code{ObjectSlot} and \code{Value},
respectively.
}
\details{
The required column names are: \verb{Run Number}, \verb{Trace Number}, \code{Object.Slot},
\code{Timestep}, \verb{Slot Value}. See the CSV output section of the
\href{http://www.riverware.org/HelpSystem/index.html#page/SolutionApproaches/Solutions_MRM.4.5.html#ww477402}{RiverWare documentation}
for more information on the other optional column names.
This function uses \code{\link[data.table:fread]{data.table::fread()}} to read in
the CSV file, and forces it to expect a CSV file, expect headers, and return
\code{data.frame}.
}
\examples{
zz <- read_rw_csv(system.file(
"extdata/Scenario/ISM1988_2014,2007Dems,IG,Most",
"KeySlots.csv",
package = "RWDataPlyr"
))
}
\seealso{
\code{\link[=read.rdf]{read.rdf()}}
}
| /man/read_rw_csv.Rd | permissive | BoulderCodeHub/RWDataPlyr | R | false | true | 1,554 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_rw_csv.R
\name{read_rw_csv}
\alias{read_rw_csv}
\title{Read RiverWare/RiverSMART produced csv files}
\usage{
read_rw_csv(file)
}
\arguments{
\item{file}{The name of the file which the data are to be read from. Either
an absolute or relative path.}
}
\value{
A tibble (data frame) containing the data in the csv.
}
\description{
\code{read_rw_csv()} reads in a CSV file created from RiverWare. If the CSV
file does not contain column names that RiverWare always uses (see Details),
then it assumes that the CSV file was not created from RiverWare and throws
an error. It also removes spaces from the column names, and adjusts the
\code{Object.Slot} and \verb{Slot Value} columns to be \code{ObjectSlot} and \code{Value},
respectively.
}
\details{
The required column names are: \verb{Run Number}, \verb{Trace Number}, \code{Object.Slot},
\code{Timestep}, \verb{Slot Value}. See the CSV output section of the
\href{http://www.riverware.org/HelpSystem/index.html#page/SolutionApproaches/Solutions_MRM.4.5.html#ww477402}{RiverWare documentation}
for more information on the other optional column names.
This function uses \code{\link[data.table:fread]{data.table::fread()}} to read in
the CSV file, and forces it to expect a CSV file, expect headers, and return
\code{data.frame}.
}
\examples{
zz <- read_rw_csv(system.file(
"extdata/Scenario/ISM1988_2014,2007Dems,IG,Most",
"KeySlots.csv",
package = "RWDataPlyr"
))
}
\seealso{
\code{\link[=read.rdf]{read.rdf()}}
}
|
library(ggplot2)
## Data files have been downloaded.
## Load the NEI & SCC data frames.
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
## Subset data for Baltimore and emissions that occurred on the road
plot5sources <- SCC[grepl("On-Road", SCC$EI.Sector),]
plot5sources <- plot5sources$SCC
plot5 <- NEI[NEI$SCC %in% plot5sources,]
plot5 <- NEI[NEI$fips == "24510",]
## Aggregate data by Year
plot5 <- aggregate(Emissions ~ year, data=plot5, sum)
ggplot(data=plot5, aes(x=year, y=Emissions)) + geom_line() + geom_point( size=4, shape=21, fill="white") + xlab("Year") + ylab("Emissions (tons)") + ggtitle("Motor Vehicle PM2.5 Emissions in Baltimore")
ggsave(file="plot5.png")
| /plot5.R | no_license | fleclee/ExData_Plotting2 | R | false | false | 733 | r | library(ggplot2)
## Data files have been downloaded.
## Load the NEI & SCC data frames.
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
## Subset data for Baltimore and emissions that occurred on the road
plot5sources <- SCC[grepl("On-Road", SCC$EI.Sector),]
plot5sources <- plot5sources$SCC
plot5 <- NEI[NEI$SCC %in% plot5sources,]
plot5 <- NEI[NEI$fips == "24510",]
## Aggregate data by Year
plot5 <- aggregate(Emissions ~ year, data=plot5, sum)
ggplot(data=plot5, aes(x=year, y=Emissions)) + geom_line() + geom_point( size=4, shape=21, fill="white") + xlab("Year") + ylab("Emissions (tons)") + ggtitle("Motor Vehicle PM2.5 Emissions in Baltimore")
ggsave(file="plot5.png")
|
library(WGCNA)
library(RColorBrewer)
library(preprocessCore)
bm_10 <- readRDS('Data/02_vst_10_filtered_limma_corrected.rds')
bm_10_coldata <- readRDS('Data/02_bm_10_data.rds')
bm_10_NP.1_coldata <- bm_10_coldata[bm_10_coldata$NP.1 == 1, ]
bm_10_NP.4_coldata <- bm_10_coldata[bm_10_coldata$NP.1 == 4, ]
bm_10_NP.1 <- t(bm_10[, bm_10_NP.1_coldata$index_id]) #rows -> samples, col -> genes
bm_10_NP.4 <- t(bm_10[, bm_10_NP.4_coldata$index_id]) #rows -> samples, col -> genes
beta1=3
AdjMatC1<-sign(cor(bm_10_NP.1,method="spearman"))*(cor(bm_10_NP.1,method="spearman"))^2
AdjMatC2<-sign(cor(bm_10_NP.4,method="spearman"))*(cor(bm_10_NP.4,method="spearman"))^2
diag(AdjMatC1)<-0
diag(AdjMatC2)<-0
collectGarbage()
dissTOMC1C2=TOMdist((abs(AdjMatC1-AdjMatC2)/2)^(beta1/2))
collectGarbage()
#Hierarchical clustering is performed using the Topological Overlap of the adjacency difference as input distance matrix
geneTreeC1C2 = flashClust(as.dist(dissTOMC1C2), method = "average");
# Plot the resulting clustering tree (dendrogram)
png(file="hierarchicalTree.png",height=1000,width=1000)
plot(geneTreeC1C2, xlab="", sub="", main = "Gene clustering on TOM-based dissimilarity",labels = FALSE, hang = 0.04);
dev.off()
#We now extract modules from the hierarchical tree. This is done using cutreeDynamic. Please refer to WGCNA package documentation for details
dynamicModsHybridC1C2 = cutreeDynamic(dendro = geneTreeC1C2, distM = dissTOMC1C2,method="hybrid",cutHeight=.996,deepSplit = T, pamRespectsDendro = FALSE,minClusterSize = 20);
#Every module is assigned a color. Note that GREY is reserved for genes which do not belong to any differentially coexpressed module
dynamicColorsHybridC1C2 = labels2colors(dynamicModsHybridC1C2)
#the next step merges clusters which are close (see WGCNA package documentation)
mergedColorC1C2<-mergeCloseModules(rbind(datC1,datC2),dynamicColorsHybridC1C2,cutHeight=.2)$color
colorh1C1C2<-mergedColorC1C2
#reassign better colors
colorh1C1C2[which(colorh1C1C2 =="midnightblue")]<-"red"
colorh1C1C2[which(colorh1C1C2 =="lightgreen")]<-"yellow"
colorh1C1C2[which(colorh1C1C2 =="cyan")]<-"orange"
colorh1C1C2[which(colorh1C1C2 =="lightcyan")]<-"green"
# Plot the dendrogram and colors underneath
png(file="module_assignment.png",width=1000,height=1000)
plotDendroAndColors(geneTreeC1C2, colorh1C1C2, "Hybrid Tree Cut",dendroLabels = FALSE, hang = 0.03,addGuide = TRUE, guideHang = 0.05,main = "Gene dendrogram and module colors cells")
dev.off()
#We write each module to an individual file containing affymetrix probeset IDs
modulesC1C2Merged<-extractModules(colorh1C1C2,datC1,anno,dir="modules",file_prefix=paste("Output","Specific_module",sep=''),write=T)
write.table(colorh1C1C2,file="module_assignment.txt",row.names=F,col.names=F,quote=F)
#We plot to a file the comparative heatmap showing correlation changes in the modules
#The code for the function plotC1C2Heatmap and others can be found below under the Supporting Functions section
plotC1C2Heatmap(colorh1C1C2,AdjMatC1,AdjMatC2, datC1, datC2)
png(file="exprChange.png",height=500,width=500)
plotExprChange(datC1,datC2,colorh1C1C2)
dev.off() | /Scripts/04_bm_10_diffcoex.R | no_license | ngyz96/URECA_2020 | R | false | false | 3,197 | r | library(WGCNA)
library(RColorBrewer)
library(preprocessCore)
bm_10 <- readRDS('Data/02_vst_10_filtered_limma_corrected.rds')
bm_10_coldata <- readRDS('Data/02_bm_10_data.rds')
bm_10_NP.1_coldata <- bm_10_coldata[bm_10_coldata$NP.1 == 1, ]
bm_10_NP.4_coldata <- bm_10_coldata[bm_10_coldata$NP.1 == 4, ]
bm_10_NP.1 <- t(bm_10[, bm_10_NP.1_coldata$index_id]) #rows -> samples, col -> genes
bm_10_NP.4 <- t(bm_10[, bm_10_NP.4_coldata$index_id]) #rows -> samples, col -> genes
beta1=3
AdjMatC1<-sign(cor(bm_10_NP.1,method="spearman"))*(cor(bm_10_NP.1,method="spearman"))^2
AdjMatC2<-sign(cor(bm_10_NP.4,method="spearman"))*(cor(bm_10_NP.4,method="spearman"))^2
diag(AdjMatC1)<-0
diag(AdjMatC2)<-0
collectGarbage()
dissTOMC1C2=TOMdist((abs(AdjMatC1-AdjMatC2)/2)^(beta1/2))
collectGarbage()
#Hierarchical clustering is performed using the Topological Overlap of the adjacency difference as input distance matrix
geneTreeC1C2 = flashClust(as.dist(dissTOMC1C2), method = "average");
# Plot the resulting clustering tree (dendrogram)
png(file="hierarchicalTree.png",height=1000,width=1000)
plot(geneTreeC1C2, xlab="", sub="", main = "Gene clustering on TOM-based dissimilarity",labels = FALSE, hang = 0.04);
dev.off()
#We now extract modules from the hierarchical tree. This is done using cutreeDynamic. Please refer to WGCNA package documentation for details
dynamicModsHybridC1C2 = cutreeDynamic(dendro = geneTreeC1C2, distM = dissTOMC1C2,method="hybrid",cutHeight=.996,deepSplit = T, pamRespectsDendro = FALSE,minClusterSize = 20);
#Every module is assigned a color. Note that GREY is reserved for genes which do not belong to any differentially coexpressed module
dynamicColorsHybridC1C2 = labels2colors(dynamicModsHybridC1C2)
#the next step merges clusters which are close (see WGCNA package documentation)
mergedColorC1C2<-mergeCloseModules(rbind(datC1,datC2),dynamicColorsHybridC1C2,cutHeight=.2)$color
colorh1C1C2<-mergedColorC1C2
#reassign better colors
colorh1C1C2[which(colorh1C1C2 =="midnightblue")]<-"red"
colorh1C1C2[which(colorh1C1C2 =="lightgreen")]<-"yellow"
colorh1C1C2[which(colorh1C1C2 =="cyan")]<-"orange"
colorh1C1C2[which(colorh1C1C2 =="lightcyan")]<-"green"
# Plot the dendrogram and colors underneath
png(file="module_assignment.png",width=1000,height=1000)
plotDendroAndColors(geneTreeC1C2, colorh1C1C2, "Hybrid Tree Cut",dendroLabels = FALSE, hang = 0.03,addGuide = TRUE, guideHang = 0.05,main = "Gene dendrogram and module colors cells")
dev.off()
#We write each module to an individual file containing affymetrix probeset IDs
modulesC1C2Merged<-extractModules(colorh1C1C2,datC1,anno,dir="modules",file_prefix=paste("Output","Specific_module",sep=''),write=T)
write.table(colorh1C1C2,file="module_assignment.txt",row.names=F,col.names=F,quote=F)
#We plot to a file the comparative heatmap showing correlation changes in the modules
#The code for the function plotC1C2Heatmap and others can be found below under the Supporting Functions section
plotC1C2Heatmap(colorh1C1C2,AdjMatC1,AdjMatC2, datC1, datC2)
png(file="exprChange.png",height=500,width=500)
plotExprChange(datC1,datC2,colorh1C1C2)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quanteda.dictionaries-package.r
\docType{package}
\name{quanteda.dictionaries}
\alias{quanteda.dictionaries}
\alias{quanteda.dictionaries-package}
\title{An R package consisting of dictionaries for text analysis and associated utilities}
\description{
Provides text analysis dictionaries and additional functionality for their
use in text analysis frameworks, especially \pkg{quanteda}.
}
\details{
The package contains several dictionaries that capture positive and negative sentiment, and
other types of emotions. Moreover, the package contains word lists of words spelled differently in
British and American English. These lists can be used to adjust text corpora and avoid
double-counting the same word with different spellings in the same corpus.
The second main purpose of \pkg{quanteda.dictionaries} is the function \link{liwcalike}. It allows
analyzing text corpora in a LIWC-alike fashion. LIWC (Linguistic Inquiry and Word Count) is a
standalone software distributed at http://liwc.wpengine.com. \link{liwcalike} takes a \pkg{quanteda}
\link[quanteda]{corpus} as an input and allows to easily apply dictionaries to the text corpus.
The output returns a data.frame consisting of percentages and other quantities, as well as the count
of all dictionary categories in each document.
}
\section{Source code and additional information}{
\url{http://github.com/kbenoit/quanteda.dictionaries}
}
| /man/quanteda.dictionaries.Rd | no_license | mrweiler/quanteda.dictionaries | R | false | true | 1,488 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quanteda.dictionaries-package.r
\docType{package}
\name{quanteda.dictionaries}
\alias{quanteda.dictionaries}
\alias{quanteda.dictionaries-package}
\title{An R package consisting of dictionaries for text analysis and associated utilities}
\description{
Provides text analysis dictionaries and additional functionality for their
use in text analysis frameworks, especially \pkg{quanteda}.
}
\details{
The package contains several dictionaries that capture positive and negative sentiment, and
other types of emotions. Moreover, the package contains word lists of words spelled differently in
British and American English. These lists can be used to adjust text corpora and avoid
double-counting the same word with different spellings in the same corpus.
The second main purpose of \pkg{quanteda.dictionaries} is the function \link{liwcalike}. It allows
analyzing text corpora in a LIWC-alike fashion. LIWC (Linguistic Inquiry and Word Count) is a
standalone software distributed at http://liwc.wpengine.com. \link{liwcalike} takes a \pkg{quanteda}
\link[quanteda]{corpus} as an input and allows to easily apply dictionaries to the text corpus.
The output returns a data.frame consisting of percentages and other quantities, as well as the count
of all dictionary categories in each document.
}
\section{Source code and additional information}{
\url{http://github.com/kbenoit/quanteda.dictionaries}
}
|
library(shinydashboard)
library(shiny)
# !!! use fluidrows -> columns or boxes for each dashboard tab (CH 3 shinydashboards:datacamp) --> and color them (also CH 3)
# !!! Add fontawesome Icons to tab names
# Prolly not -> ideas: read in data(csv) real time?
# !!! use DT:Table to display data (head) with some sort function after describing the pre-processing procedures.
header <- dashboardHeader(
dropdownMenu(
type = "notifications",
notificationItem(
text ="Check out my Website by clicking here.",
href ="http://www.datacamp.com",
icon = icon(name ="fa-globe-americas",class ="fa-globe-americas")
),
notificationItem(
text ="Check out my GitHub by clicking here.",
href ="https://github.com/fuchsfranklin",
icon = icon(name ="fa-github",class ="fa-github"))
)
)
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem("Recommender Settings",
tabName ="rec_set"
),
menuItem("Product Recommendations",
tabName ="prod_rec"
),
menuItem("Algorithm Information",
tabName ="algo_info"
),
menuItem("Performance Metrics and Plots",
tabName ="perf_mets"
),
menuItem("Data Information",
tabName ="data_info"
),
menuItem("Dashboard Information",
tabName ="dash_info"
)
)
)
body <- dashboardBody(
tabItems(
tabItem(tabName ="rec_set",
"No Settigns yet"),
tabItem(tabName ="prod_rec",
"No Recommendations yet"),
tabItem(tabName ="algo_info",
"No Information About Algorithms yet"),
tabItem(tabName ="perf_mets",
"No Metrics or Plots yet."),
tabItem(tabName ="data_info",
"No Data Information yet."),
tabItem(tabName ="dash_info",
"No References yet.")
)
)
ui <- dashboardPage(header=header, sidebar=sidebar, body=body)
server <- function(input, output) {}
shiny::shinyApp(ui, server) | /Recommendation_System_Dashboard.R | no_license | fuchsfranklin/Recommendation-System-Project | R | false | false | 1,968 | r | library(shinydashboard)
library(shiny)
# !!! use fluidrows -> columns or boxes for each dashboard tab (CH 3 shinydashboards:datacamp) --> and color them (also CH 3)
# !!! Add fontawesome Icons to tab names
# Prolly not -> ideas: read in data(csv) real time?
# !!! use DT:Table to display data (head) with some sort function after describing the pre-processing procedures.
header <- dashboardHeader(
dropdownMenu(
type = "notifications",
notificationItem(
text ="Check out my Website by clicking here.",
href ="http://www.datacamp.com",
icon = icon(name ="fa-globe-americas",class ="fa-globe-americas")
),
notificationItem(
text ="Check out my GitHub by clicking here.",
href ="https://github.com/fuchsfranklin",
icon = icon(name ="fa-github",class ="fa-github"))
)
)
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem("Recommender Settings",
tabName ="rec_set"
),
menuItem("Product Recommendations",
tabName ="prod_rec"
),
menuItem("Algorithm Information",
tabName ="algo_info"
),
menuItem("Performance Metrics and Plots",
tabName ="perf_mets"
),
menuItem("Data Information",
tabName ="data_info"
),
menuItem("Dashboard Information",
tabName ="dash_info"
)
)
)
body <- dashboardBody(
tabItems(
tabItem(tabName ="rec_set",
"No Settigns yet"),
tabItem(tabName ="prod_rec",
"No Recommendations yet"),
tabItem(tabName ="algo_info",
"No Information About Algorithms yet"),
tabItem(tabName ="perf_mets",
"No Metrics or Plots yet."),
tabItem(tabName ="data_info",
"No Data Information yet."),
tabItem(tabName ="dash_info",
"No References yet.")
)
)
ui <- dashboardPage(header=header, sidebar=sidebar, body=body)
server <- function(input, output) {}
shiny::shinyApp(ui, server) |
\name{tc}
\alias{tc}
\docType{data}
\title{
Tencent Data Frame
}
\description{
Data frame containing data of Date, Monthly Active Users (MAUs) and Revenue of Tencent.
}
\usage{data(tc)}
\format{
A data frame with 46 observations on the following 3 variables.
\describe{
\item{\code{date}}{the date}
\item{\code{maus}}{monthly active users average for the quarter.}
\item{\code{revenue}}{GAAP revenue for the quarter}
}
}
\details{
It contains data from the second quarter 2004 (2004'Q2) to the third quarter 2015 (2015'Q3) obtained from the financial releases reported to investors.
The Date (date) variable is formatted as "\%Y-\%m-\%d".
Monthly Active Users (maus) variable is in million of users.
Revenue (revenue) is in million USD.
}
\source{
\url{http://www.tencent.com/en-us/ir/news/2015.shtml}
}
\examples{
data(tc)
}
\keyword{datasets}
| /man/tc.Rd | no_license | kmyokoyama/netvl | R | false | false | 863 | rd | \name{tc}
\alias{tc}
\docType{data}
\title{
Tencent Data Frame
}
\description{
Data frame containing data of Date, Monthly Active Users (MAUs) and Revenue of Tencent.
}
\usage{data(tc)}
\format{
A data frame with 46 observations on the following 3 variables.
\describe{
\item{\code{date}}{the date}
\item{\code{maus}}{monthly active users average for the quarter.}
\item{\code{revenue}}{GAAP revenue for the quarter}
}
}
\details{
It contains data from the second quarter 2004 (2004'Q2) to the third quarter 2015 (2015'Q3) obtained from the financial releases reported to investors.
The Date (date) variable is formatted as "\%Y-\%m-\%d".
Monthly Active Users (maus) variable is in million of users.
Revenue (revenue) is in million USD.
}
\source{
\url{http://www.tencent.com/en-us/ir/news/2015.shtml}
}
\examples{
data(tc)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HistPlot.R
\name{HistPlot}
\alias{HistPlot}
\title{Density estimates of the histograms for a list of Markov chains.}
\usage{
HistPlot(list_of_vectors, method = NULL, burn_in = 0.1, size_line = 1,
...)
}
\arguments{
\item{list_of_vectors}{A list of Markov chains to be plotted. The number of element in the list can be greater or equal than one.}
\item{method}{A vector giving the different algorithms used to produce the Markov chains. The dafault value is NULL.}
\item{burn_in}{A proportion of the generated sample that need to be discarded in the plot. Need to be given as a percentage in decimal number of the algorithm iterations. The default value is 0.1.}
\item{size_line}{The size of the line in the plot. The default value is 1.}
\item{...}{Additional graphical parameters to be passed to ggplot.}
}
\value{
The function returns the density estimates for the histograms of the specified Markov chains.
}
\description{
\code{} The HistPlot function returns the density estimates for the histograms of a given list of Markov chains.
}
\examples{
# Generate two sets of values
chain1 = rnorm(100)
chain2 = rnorm(100)
# Produce the density plots
HistPlot(list(chain1, chain2),
method = c('Single machine', 'Multiple machines'),
burn_in = 0.2,
size_line = 2)
}
| /ConsensusMCMC/man/HistPlot.Rd | no_license | EmiliaPompe/GirlsProject | R | false | true | 1,381 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HistPlot.R
\name{HistPlot}
\alias{HistPlot}
\title{Density estimates of the histograms for a list of Markov chains.}
\usage{
HistPlot(list_of_vectors, method = NULL, burn_in = 0.1, size_line = 1,
...)
}
\arguments{
\item{list_of_vectors}{A list of Markov chains to be plotted. The number of element in the list can be greater or equal than one.}
\item{method}{A vector giving the different algorithms used to produce the Markov chains. The dafault value is NULL.}
\item{burn_in}{A proportion of the generated sample that need to be discarded in the plot. Need to be given as a percentage in decimal number of the algorithm iterations. The default value is 0.1.}
\item{size_line}{The size of the line in the plot. The default value is 1.}
\item{...}{Additional graphical parameters to be passed to ggplot.}
}
\value{
The function returns the density estimates for the histograms of the specified Markov chains.
}
\description{
\code{} The HistPlot function returns the density estimates for the histograms of a given list of Markov chains.
}
\examples{
# Generate two sets of values
chain1 = rnorm(100)
chain2 = rnorm(100)
# Produce the density plots
HistPlot(list(chain1, chain2),
method = c('Single machine', 'Multiple machines'),
burn_in = 0.2,
size_line = 2)
}
|
\name{format_author}
\alias{format_author}
\title{from utils:::toBibtex, good for matching by given name initials only}
\usage{
format_author(author)
}
\description{
from utils:::toBibtex, good for matching by given name
initials only
}
\keyword{internal}
| /man/format_author.Rd | no_license | aurora-mareviv/RefManageR | R | false | false | 269 | rd | \name{format_author}
\alias{format_author}
\title{from utils:::toBibtex, good for matching by given name initials only}
\usage{
format_author(author)
}
\description{
from utils:::toBibtex, good for matching by given name
initials only
}
\keyword{internal}
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 858935928078237, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) | /epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615927180-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 1,101 | r | testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 858935928078237, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) |
#' FiveThirtyEight probabilities from the 2018 election cycle.
#'
#' A dataset containing FiveThirtyEight's probability estimates for the 2018 US elections. The races covered seats
#' in the House, Senate, and for Governor. They also used three different methods to compute these estimates. Classic, Deluxe,
#' and Lite. All of these have been included. The probabilities can be viewed from the perspective of a Democrat winning
#' or from the perspective Republican winning any given race. The final results of each race are also included.
#'
#' @format A data frame with 1518 rows and 1 variables:
#' \describe{
#' \item{cycle}{the year the race took place}
#' \item{branch}{what branch of the government the race was for}
#' \item{race}{the specific position the election was for}
#' \item{forecastdate}{date of forecast}
#' \item{version}{type of prediction method FiveThirtyEight used}
#' \item{Democrat_WinProbability}{probability of a democrat winning the race}
#' \item{Republican_WinProbability}{probability of a republican winning the race}
#' \item{category}{category}
#' \item{Democrat_Won}{binary variable indicating if a democrat won or not}
#' \item{Republican_Won}{binary variable indicating if a republican won or not}
#' \item{uncalled}{binary variable indicating if the race was uncalled or not}
#' }
#' @source \url{https://github.com/fivethirtyeight/data/tree/master/forecast-review}
"elections_2018"
| /R/elections_2018.R | no_license | bradleyrava/ecap | R | false | false | 1,422 | r | #' FiveThirtyEight probabilities from the 2018 election cycle.
#'
#' A dataset containing FiveThirtyEight's probability estimates for the 2018 US elections. The races covered seats
#' in the House, Senate, and for Governor. They also used three different methods to compute these estimates. Classic, Deluxe,
#' and Lite. All of these have been included. The probabilities can be viewed from the perspective of a Democrat winning
#' or from the perspective Republican winning any given race. The final results of each race are also included.
#'
#' @format A data frame with 1518 rows and 1 variables:
#' \describe{
#' \item{cycle}{the year the race took place}
#' \item{branch}{what branch of the government the race was for}
#' \item{race}{the specific position the election was for}
#' \item{forecastdate}{date of forecast}
#' \item{version}{type of prediction method FiveThirtyEight used}
#' \item{Democrat_WinProbability}{probability of a democrat winning the race}
#' \item{Republican_WinProbability}{probability of a republican winning the race}
#' \item{category}{category}
#' \item{Democrat_Won}{binary variable indicating if a democrat won or not}
#' \item{Republican_Won}{binary variable indicating if a republican won or not}
#' \item{uncalled}{binary variable indicating if the race was uncalled or not}
#' }
#' @source \url{https://github.com/fivethirtyeight/data/tree/master/forecast-review}
"elections_2018"
|
setwd("C:/coursera/getcleandata/week4")
# read the activity labels and features files and give the columns the names for the activity and features numbers
# and the activity and feature descriptions. The features file contains the features in the same order as the columns
# of measurements in the test and training measurement files.
act_tbl <- read.table("Activity_Labels.txt",col.names=c("ActNum","ActDesc"))
features <- read.table("features.txt",col.names=c("feature_num","feature_desc"))
# Use the grepl function to create logical vectors of the columns that have mean ("mean") or standard deviation ("std")
# measures. A vector is creates separately for each and then the "or" logic is used to create a vector with TRUE values
# for mean or standard deviation
fetch_mean <- grepl("mean()",features$feature_desc)
fetch_std <- grepl("std()",features$feature_desc)
fetch_all <- fetch_mean | fetch_std
# Select only the features with mean or std
kept_features <- features[fetch_all,]
# Read in the main test and training files of measurements
testdata <- read.table("X_test.txt")
traindata <- read.table("X_train.txt")
# Read in the files of the subject and activity numbers for each row in the test and training files
testsubj <- read.table("subject_test.txt",col.names=c("SubjectNum"))
trainsubj <- read.table("subject_train.txt",col.names=c("SubjectNum"))
testactvty <- read.table("y_test.txt",col.names=c("ActvtyNum"))
trainactvty <- read.table("y_train.txt",col.names=c("ActvtyNum"))
# Select only the columns with mean or std measures by using the logical vector "fetch_all" as the column index
# for test data & traininf data
test_mnstd <- testdata[,fetch_all]
train_mnstd <- traindata[,fetch_all]
colnames(test_mnstd) <- kept_features$feature_desc
colnames(train_mnstd) <- kept_features$feature_desc
# Add columns to each of the test and training data frames of the activity number and the subject
test_mnstd$ActvtyNum <- testactvty[[1]]
train_mnstd$ActvtyNum <- trainactvty[[1]]
test_mnstd$SubjectNum <- testsubj[[1]]
train_mnstd$SubjectNum <- trainsubj[[1]]
# Combine the test and training files into a single data frame
cmbnd <- rbind(test_mnstd,train_mnstd)
# Merge the main measurement data frame with the activity name file by the activity number to get the activity
# description on the data frame
cmbnd2 <- merge(cmbnd,act_tbl,by.x="ActvtyNum",by.y="ActNum",all=TRUE)
# These dplyr functions group_by and summarize_all create a 2nd tidy dataset with the means of all the measures by
# Subject and activity group
SbjctActGrp <- group_by(cmbnd2,SubjectNum,ActDesc)
SbjctActGrp_Mean <- summarize_all(SbjctActGrp,mean)
| /assgmnt.R | no_license | chasman55/Getting-and-Cleaning-Data-Week4-Assignment | R | false | false | 2,709 | r | setwd("C:/coursera/getcleandata/week4")
# read the activity labels and features files and give the columns the names for the activity and features numbers
# and the activity and feature descriptions. The features file contains the features in the same order as the columns
# of measurements in the test and training measurement files.
act_tbl <- read.table("Activity_Labels.txt",col.names=c("ActNum","ActDesc"))
features <- read.table("features.txt",col.names=c("feature_num","feature_desc"))
# Use the grepl function to create logical vectors of the columns that have mean ("mean") or standard deviation ("std")
# measures. A vector is creates separately for each and then the "or" logic is used to create a vector with TRUE values
# for mean or standard deviation
fetch_mean <- grepl("mean()",features$feature_desc)
fetch_std <- grepl("std()",features$feature_desc)
fetch_all <- fetch_mean | fetch_std
# Select only the features with mean or std
kept_features <- features[fetch_all,]
# Read in the main test and training files of measurements
testdata <- read.table("X_test.txt")
traindata <- read.table("X_train.txt")
# Read in the files of the subject and activity numbers for each row in the test and training files
testsubj <- read.table("subject_test.txt",col.names=c("SubjectNum"))
trainsubj <- read.table("subject_train.txt",col.names=c("SubjectNum"))
testactvty <- read.table("y_test.txt",col.names=c("ActvtyNum"))
trainactvty <- read.table("y_train.txt",col.names=c("ActvtyNum"))
# Select only the columns with mean or std measures by using the logical vector "fetch_all" as the column index
# for test data & traininf data
test_mnstd <- testdata[,fetch_all]
train_mnstd <- traindata[,fetch_all]
colnames(test_mnstd) <- kept_features$feature_desc
colnames(train_mnstd) <- kept_features$feature_desc
# Add columns to each of the test and training data frames of the activity number and the subject
test_mnstd$ActvtyNum <- testactvty[[1]]
train_mnstd$ActvtyNum <- trainactvty[[1]]
test_mnstd$SubjectNum <- testsubj[[1]]
train_mnstd$SubjectNum <- trainsubj[[1]]
# Combine the test and training files into a single data frame
cmbnd <- rbind(test_mnstd,train_mnstd)
# Merge the main measurement data frame with the activity name file by the activity number to get the activity
# description on the data frame
cmbnd2 <- merge(cmbnd,act_tbl,by.x="ActvtyNum",by.y="ActNum",all=TRUE)
# These dplyr functions group_by and summarize_all create a 2nd tidy dataset with the means of all the measures by
# Subject and activity group
SbjctActGrp <- group_by(cmbnd2,SubjectNum,ActDesc)
SbjctActGrp_Mean <- summarize_all(SbjctActGrp,mean)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formality.R
\name{Network.formality}
\alias{Network.formality}
\title{Network Formality}
\usage{
\method{Network}{formality}(x, contextual = "yellow", formal = "red",
edge.constant, title = NULL, digits = 3, plus.300.color = "grey40",
under.300.color = "grey88", missing.color = "purple", ...)
}
\arguments{
\item{x}{A \code{\link[qdap]{formality}} object.}
\item{contextual}{The color to use for 0\% formality (purely contextual).}
\item{formal}{The color to use for 100\% formality (purely formal).}
\item{edge.constant}{A constant to multiple edge width by.}
\item{title}{The title to apply to the \code{Network}ed image(s).}
\item{digits}{The number of digits to use in the current turn of talk
formality.}
\item{plus.300.color}{The bar color to use for grouping variables exceeding
299 words per Heylighen & Dewaele's (2002) minimum word recommendations.}
\item{under.300.color}{The bar color to use for grouping variables less
than 300 words per Heylighen & Dewaele's (2002) minimum word recommendations.}
\item{missing.color}{The color to use in a network plot for edges
corresponding to missing text data. Use \code{\link[stats]{na.omit}} before
hand to remove the missing values all together.}
\item{\ldots}{Other arguments passed to \code{\link[qdap]{discourse_map}}.}
}
\description{
\code{Network.formality} - Network a \code{\link[qdap]{formality}} object.
}
\details{
formality Method for Network
}
| /man/Network.formality.Rd | no_license | hoodaly/qdap | R | false | true | 1,511 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formality.R
\name{Network.formality}
\alias{Network.formality}
\title{Network Formality}
\usage{
\method{Network}{formality}(x, contextual = "yellow", formal = "red",
edge.constant, title = NULL, digits = 3, plus.300.color = "grey40",
under.300.color = "grey88", missing.color = "purple", ...)
}
\arguments{
\item{x}{A \code{\link[qdap]{formality}} object.}
\item{contextual}{The color to use for 0\% formality (purely contextual).}
\item{formal}{The color to use for 100\% formality (purely formal).}
\item{edge.constant}{A constant to multiple edge width by.}
\item{title}{The title to apply to the \code{Network}ed image(s).}
\item{digits}{The number of digits to use in the current turn of talk
formality.}
\item{plus.300.color}{The bar color to use for grouping variables exceeding
299 words per Heylighen & Dewaele's (2002) minimum word recommendations.}
\item{under.300.color}{The bar color to use for grouping variables less
than 300 words per Heylighen & Dewaele's (2002) minimum word recommendations.}
\item{missing.color}{The color to use in a network plot for edges
corresponding to missing text data. Use \code{\link[stats]{na.omit}} before
hand to remove the missing values all together.}
\item{\ldots}{Other arguments passed to \code{\link[qdap]{discourse_map}}.}
}
\description{
\code{Network.formality} - Network a \code{\link[qdap]{formality}} object.
}
\details{
formality Method for Network
}
|
#' @include eutil.R
#' @include parse-params.R
NULL
#' @export
.esummary <- setRefClass(
Class = "esummary",
contains = "eutil",
methods = list(
initialize = function(method, ...) {
callSuper()
perform_query(method = method, ...)
if (no_errors()) {
errors$check_errors(.self)
}
},
show_xml = function() {
methods::show(get_content("xml"))
tail <- sprintf("ESummary query using the database %s.", sQuote(database()))
cat(tail, sep="\n")
},
show_json = function() {
methods::show(get_content("json"))
tail <- sprintf("ESummary query using the database %s.", sQuote(database()))
cat(tail, sep="\n")
},
show = function() {
cat("Object of class", sQuote(eutil()), "\n")
if (no_errors()) {
switch(retmode(), xml = show_xml(), json = show_json())
} else {
methods::show(get_error())
}
}
)
)
#' \code{esummary} performs calls to the NCBI ESummary utility to retrieve document
#' summaries (DocSums) for a list of primary UIDs or for a set of UIDs stored in the
#' user's web environment (using the Entrez History server).
#'
#' @details
#' See the official online documentation for NCBI's
#' \href{http://www.ncbi.nlm.nih.gov/books/NBK25499/\#chapter4.ESummary}{EUtilities}
#' for additional information.
#'
#' @title esummary - downloading Document Summaries
#' @param uid (Required)
#' List of UIDs provided either as a character vector, as an
#' \code{esearch} or \code{elink} object, or by reference to a Web
#' Environment and a query key obtained directly from objects returned
#' by previous calls to \code{\link{esearch}}, \code{\link{epost}} or
#' \code{\link{elink}}.
#' If UIDs are provided as a plain character vector, \code{db} must be
#' specified explicitly, and all of the UIDs must be from the database
#' specified by \code{db}.
#' @param db (Required only when \code{id} is a character vector of UIDs)
#' Database from which to retrieve DocSums.
#' @param retstart Numeric index of the first DocSum to be retrieved
#' (default: 1).
#' @param retmax Total number of DocSums from the input set to be retrieved
#' (maximum: 10,000).
#' @param querykey An integer specifying which of the UID lists attached
#' to a user's Web Environment will be used as input to \code{efetch}.
#' (Usually obtained drectely from objects returned by previous
#' \code{\link{esearch}}, \code{\link{epost}} or \code{\link{elink}} calls.)
#' @param webenv A character string specifying the Web Environment that
#' contains the UID list. (Usually obtained directely from objects returned
#' by previous \code{\link{esearch}}, \code{\link{epost}} or
#' \code{\link{elink}} calls.)
#' @param retmode Retrieval mode. (default: 'xml', alternative: 'json')
#' @param version If "2.0" \code{esummary} will retrieve version 2.0
#' ESummary XML output.
#' @return An \code{\linkS4class{esummary}} object.
#' @seealso
#' \code{\link{content}}, \code{\link{getUrl}}, \code{\link{getError}},
#' \code{\link{database}}.
#' @export
#' @examples
#' ## Retrieve the Document Summary information for a set of
#' ## UIDs frome the Gene datanase.
#' ds <- esummary(c("828392", "790", "470338"), "gene")
#' ds
#'
#' \dontrun{
#' ## parse the XML into a data frame
#' df <- content(ds, "parsed")
#' df
#'
#' ## use XPath expressions to extract nodes of interest
#' ds['//TaxID']
#' }
esummary <- function(uid, db = NULL, retstart = 1, retmax = 10000,
querykey = NULL, webenv = NULL, retmode = 'xml',
version = "2.0") {
## extract query parameters
params <- parse_params(uid, db, querykey, webenv)
retmode <- match.arg(retmode, c('xml', 'json'))
if (!is.null(params$querykey) && !is.null(params$webenv)) {
retstart <- retstart - 1L
}
if (retmax > 10000) {
stop("Number of DocSums to be downloaded should not exceed 10,000.", call.=FALSE)
}
.esummary(method = if (length(params$uid) < 100) "GET" else "POST",
db = params$db, id = .collapse(params$uid),
query_key = params$querykey, WebEnv = params$webenv,
retstart = retstart, retmax = retmax, retmode = retmode,
version = if (version == "2.0") "2.0" else NULL)
}
#' @describeIn content Access the data content from an \code{esummary} request.
setMethod("content", "esummary", function(x, as = NULL) {
callNextMethod(x = x, as = as)
})
#' ESummary accessors
#'
#' Extract XML nodes from an \code{\linkS4class{esummary}} object.
#'
#' @param x An \code{\linkS4class{esummary}} object.
#' @param i An XPath expression.
#' @return An XML node set.
#' @rdname sub-esummary
#' @export
#' @examples
#' \dontrun{
#' ds <- esummary("470338", "protein")
#' ds["//Slen/node()"]
#'
#' library("XML")
#' as.numeric(xmlValue(ds[["//Slen"]]))
#' }
setMethod("[", c("esummary", "character"), function(x, i) {
x$xmlSet(i)
})
#' @rdname sub-esummary
#' @export
setMethod("[[", c("esummary", "character"), function(x, i) {
ans <- x[i]
if (length(ans) > 1) {
warning(length(ans), " elements in node set. Returning just the first!")
}
ans[[1]]
})
| /R/esummary.R | no_license | guru1982/reutils | R | false | false | 5,139 | r | #' @include eutil.R
#' @include parse-params.R
NULL
#' @export
.esummary <- setRefClass(
Class = "esummary",
contains = "eutil",
methods = list(
initialize = function(method, ...) {
callSuper()
perform_query(method = method, ...)
if (no_errors()) {
errors$check_errors(.self)
}
},
show_xml = function() {
methods::show(get_content("xml"))
tail <- sprintf("ESummary query using the database %s.", sQuote(database()))
cat(tail, sep="\n")
},
show_json = function() {
methods::show(get_content("json"))
tail <- sprintf("ESummary query using the database %s.", sQuote(database()))
cat(tail, sep="\n")
},
show = function() {
cat("Object of class", sQuote(eutil()), "\n")
if (no_errors()) {
switch(retmode(), xml = show_xml(), json = show_json())
} else {
methods::show(get_error())
}
}
)
)
#' \code{esummary} performs calls to the NCBI ESummary utility to retrieve document
#' summaries (DocSums) for a list of primary UIDs or for a set of UIDs stored in the
#' user's web environment (using the Entrez History server).
#'
#' @details
#' See the official online documentation for NCBI's
#' \href{http://www.ncbi.nlm.nih.gov/books/NBK25499/\#chapter4.ESummary}{EUtilities}
#' for additional information.
#'
#' @title esummary - downloading Document Summaries
#' @param uid (Required)
#' List of UIDs provided either as a character vector, as an
#' \code{esearch} or \code{elink} object, or by reference to a Web
#' Environment and a query key obtained directly from objects returned
#' by previous calls to \code{\link{esearch}}, \code{\link{epost}} or
#' \code{\link{elink}}.
#' If UIDs are provided as a plain character vector, \code{db} must be
#' specified explicitly, and all of the UIDs must be from the database
#' specified by \code{db}.
#' @param db (Required only when \code{id} is a character vector of UIDs)
#' Database from which to retrieve DocSums.
#' @param retstart Numeric index of the first DocSum to be retrieved
#' (default: 1).
#' @param retmax Total number of DocSums from the input set to be retrieved
#' (maximum: 10,000).
#' @param querykey An integer specifying which of the UID lists attached
#' to a user's Web Environment will be used as input to \code{efetch}.
#' (Usually obtained drectely from objects returned by previous
#' \code{\link{esearch}}, \code{\link{epost}} or \code{\link{elink}} calls.)
#' @param webenv A character string specifying the Web Environment that
#' contains the UID list. (Usually obtained directely from objects returned
#' by previous \code{\link{esearch}}, \code{\link{epost}} or
#' \code{\link{elink}} calls.)
#' @param retmode Retrieval mode. (default: 'xml', alternative: 'json')
#' @param version If "2.0" \code{esummary} will retrieve version 2.0
#' ESummary XML output.
#' @return An \code{\linkS4class{esummary}} object.
#' @seealso
#' \code{\link{content}}, \code{\link{getUrl}}, \code{\link{getError}},
#' \code{\link{database}}.
#' @export
#' @examples
#' ## Retrieve the Document Summary information for a set of
#' ## UIDs frome the Gene datanase.
#' ds <- esummary(c("828392", "790", "470338"), "gene")
#' ds
#'
#' \dontrun{
#' ## parse the XML into a data frame
#' df <- content(ds, "parsed")
#' df
#'
#' ## use XPath expressions to extract nodes of interest
#' ds['//TaxID']
#' }
esummary <- function(uid, db = NULL, retstart = 1, retmax = 10000,
querykey = NULL, webenv = NULL, retmode = 'xml',
version = "2.0") {
## extract query parameters
params <- parse_params(uid, db, querykey, webenv)
retmode <- match.arg(retmode, c('xml', 'json'))
if (!is.null(params$querykey) && !is.null(params$webenv)) {
retstart <- retstart - 1L
}
if (retmax > 10000) {
stop("Number of DocSums to be downloaded should not exceed 10,000.", call.=FALSE)
}
.esummary(method = if (length(params$uid) < 100) "GET" else "POST",
db = params$db, id = .collapse(params$uid),
query_key = params$querykey, WebEnv = params$webenv,
retstart = retstart, retmax = retmax, retmode = retmode,
version = if (version == "2.0") "2.0" else NULL)
}
#' @describeIn content Access the data content from an \code{esummary} request.
setMethod("content", "esummary", function(x, as = NULL) {
callNextMethod(x = x, as = as)
})
#' ESummary accessors
#'
#' Extract XML nodes from an \code{\linkS4class{esummary}} object.
#'
#' @param x An \code{\linkS4class{esummary}} object.
#' @param i An XPath expression.
#' @return An XML node set.
#' @rdname sub-esummary
#' @export
#' @examples
#' \dontrun{
#' ds <- esummary("470338", "protein")
#' ds["//Slen/node()"]
#'
#' library("XML")
#' as.numeric(xmlValue(ds[["//Slen"]]))
#' }
setMethod("[", c("esummary", "character"), function(x, i) {
x$xmlSet(i)
})
#' @rdname sub-esummary
#' @export
setMethod("[[", c("esummary", "character"), function(x, i) {
ans <- x[i]
if (length(ans) > 1) {
warning(length(ans), " elements in node set. Returning just the first!")
}
ans[[1]]
})
|
\name{alnumx}
\docType{data}
\alias{alnumx}
\title{Regular expression for removal of non-alphanumeric characters (saving special characters)}
\description{
This character string contains a regular expression for use in \code{gsub} deployed in \code{textvector} that identifies all alphanumeric characters (including language specific special characters not included in \code{[:alnum:]}, currently only the ones found in German and Polish.
You can use this expression by loading it with \code{data(alnumx)}.
}
\usage{
data(alnumx)
}
\author{ Fridolin Wild \email{f.wild@open.ac.uk}}
\format{Vector of type character.}
\keyword{datasets}
| /man/alnumx.Rd | no_license | cran/lsa | R | false | false | 655 | rd | \name{alnumx}
\docType{data}
\alias{alnumx}
\title{Regular expression for removal of non-alphanumeric characters (saving special characters)}
\description{
This character string contains a regular expression for use in \code{gsub} deployed in \code{textvector} that identifies all alphanumeric characters (including language specific special characters not included in \code{[:alnum:]}, currently only the ones found in German and Polish.
You can use this expression by loading it with \code{data(alnumx)}.
}
\usage{
data(alnumx)
}
\author{ Fridolin Wild \email{f.wild@open.ac.uk}}
\format{Vector of type character.}
\keyword{datasets}
|
#-function to combined all individual outputs to single df for post-processing----------------------------------------
combine_outputs <- function(r, prev, seas, cov, dr){
run <- r #baseline or smc
pfpr <- prev #baseline pfpr levels
seasonality <- seas #seasonality
coverage <- cov #coverage value
draw <- dr #model parameter draw
#-upload output in and select relevent variables----------------------------------------------------------------------
raw <- read_csv(paste0(here::here(), "/", run, "/output/", run,"_", seasonality, "_", pfpr, "_", coverage, "_", draw,".csv"))
out <- raw
return(out)
}
| /Part_1/output_functions.R | no_license | htopazian/seasonal_use_case | R | false | false | 682 | r |
#-function to combined all individual outputs to single df for post-processing----------------------------------------
combine_outputs <- function(r, prev, seas, cov, dr){
run <- r #baseline or smc
pfpr <- prev #baseline pfpr levels
seasonality <- seas #seasonality
coverage <- cov #coverage value
draw <- dr #model parameter draw
#-upload output in and select relevent variables----------------------------------------------------------------------
raw <- read_csv(paste0(here::here(), "/", run, "/output/", run,"_", seasonality, "_", pfpr, "_", coverage, "_", draw,".csv"))
out <- raw
return(out)
}
|
#' Find cocaine based on ICD-10-CM.
#'
#' Find cocaine based on ICD-10-CM.
#'
#' @param data input data
#' @param diag_ecode_col column indices
#'
#' @return cocaine_icd10cm
#' @export
#'
#' @examples to be added
od_cocaine_icd10cm <- function(data, diag_ecode_col) {
cdc_cocaine_icd10cm_regex7_ <- "T405.(1|2|3|4)(A|$)"
data %>% mutate(cocaine_icd10cm = od_create_diag(., expr = cdc_cocaine_icd10cm_regex7_, colvec = diag_ecode_col))
}
| /R/od_cocaine_icd10cm.R | no_license | doh-FXX0303/overdoser | R | false | false | 456 | r | #' Find cocaine based on ICD-10-CM.
#'
#' Find cocaine based on ICD-10-CM.
#'
#' @param data input data
#' @param diag_ecode_col column indices
#'
#' @return cocaine_icd10cm
#' @export
#'
#' @examples to be added
od_cocaine_icd10cm <- function(data, diag_ecode_col) {
cdc_cocaine_icd10cm_regex7_ <- "T405.(1|2|3|4)(A|$)"
data %>% mutate(cocaine_icd10cm = od_create_diag(., expr = cdc_cocaine_icd10cm_regex7_, colvec = diag_ecode_col))
}
|
library(tidyverse)
library(plotly)
library(wordcloud2)
library(hash)
scripts = read.csv('CSV Files/scripts.csv')
avg_embeds = read.csv('CSV Files/avg_embeds.csv')
embeds = read.csv('CSV Files/embeds.csv')
tsne = read.csv('CSV Files/tsne.csv')
word_counts = read.csv('CSV Files/word_counts_by_movie.csv')
all_characters = unique(scripts %>% group_by(character) %>% summarize(n = n()) %>% arrange(desc(n)) %>% select(character))$character
all_films = c("EpisodeIV", "EpisodeV", "EpisodeVI")
scripts = scripts %>% group_by(film) %>% mutate(line_num = row_number())
### FUNCTIONS
#Wordcloud generator
create_wordcloud = function(films, img, word_counts){
#get all rows containing desired movies and add them to a temp table
temp = data.frame()
for (movie in films){
temp = rbind(temp, word_counts %>% filter(film == movie) %>% select(word, count))
}
#group by word and return word name and freq
wc_df = temp %>% group_by(word) %>% summarize(freq = sum(count)) %>% arrange(desc(freq))
wc = wordcloud2(wc_df, color="white", backgroundColor = "black", figPath = paste('Images/', img, '.jpg', sep=""))
return(wc)
}
###Sentiment Time Series
sentiment_time = function(characters, films, scripts, color_palette = color_palette){
if (films == "All Films"){films = all_films}
scripts = scripts %>% group_by(film) %>% mutate(line_num = row_number())
#get unique set of characters
chars = unique(scripts[c("character", "film")])
#filter out only the characters and films desired
df = data.frame()
for (char in characters){
for (movie in films){
df = rbind(df, chars %>% filter(character == char & film == movie))
}
}
#plot the sentiment over time for each character desired
palette = color_palette(length(df$character))
fig = plot_ly()
for (i in 1:length(df$character)){
plot_df = scripts %>% filter(character == as.character(df[i, 1]) & film == as.character(df[i, 2]) & compound != 0) #use script data for characters in question, remove instances with 0 compound score
fig = fig %>% add_trace(data=plot_df, x=~line_num, y=~compound, mode="markers", name=paste(df[i, 1], "-", df[i, 2]), marker = list(color = palette[i], opacity = 0.5))
fig = fig %>% add_lines(data = plot_df, x = ~line_num, y = ~fitted(loess(compound ~ line_num)), name = paste(df[i, 1], "-", df[i, 2], "LOESS"), line = list(color = palette[i], width = 2.4), showlegend = F)
}
fig = fig %>% layout(hovermode = 'x unified', title="Sentiment Over Time", xaxis=list(title="Line Number"), yaxis=list(title="Sentiment Score"))
return(fig)
}
###sentiment Bar Chart
sentiment_bar = function(characters, films, scripts, met, color_palette = color_palette){
if (films == "All Films"){films = all_films}
h = hash()
h[["Sentiment"]] = "compound"
h[["Positive"]] = "pos"
h[["Neutral"]] = "neu"
h[["Negative"]] = "neg"
avg_sentiments = spread(scripts %>% group_by(character, film) %>% summarize(sentiment = mean(get(h[[met]]))), key=film, value=sentiment)
df = data.frame()
for (char in characters){
df = rbind(df, avg_sentiments %>% filter(character == char) %>% select(films))
}
palette = color_palette(3)
fig = plot_ly(df, x=~get(films[1]), y=~character, type="bar", name=films[1], text = films[1], marker = list(color = palette[1]), orientation = "h", hoverinfo = "x+text")
if (length(films) >= 2){fig = fig %>% add_trace(x=~get(films[2]), name=films[2], text=films[2], marker = list(color = palette[2]))}
if (length(films) == 3){fig = fig %>% add_trace(x=~get(films[3]), name=films[3], text=films[3], marker = list(color = palette[3]))}
fig = fig %>% layout(title=paste(met, "Score of Selected Characters and Films"), xaxis = list(title="Character"), yaxis=list(title=paste(met, "Sentiment Score")))
return(fig)
}
###Sentiment over number of lines
sentiment_over_lines = function(characters, films, scripts, met, color_palette = color_palette){
if (films == "All Films"){films = all_films}
data = scripts %>% group_by(character, film) %>% summarize(lines = n(), Sentiment = mean(compound), Positive = mean(pos), Neutral = mean(neu), Negative = mean(neg))
df = data.frame()
for (char in characters){
for (movie in films){
df = rbind(df, data %>% filter(character == char & film == movie))
}
}
t = ~paste("Name:", character, "<br>Film:", film, paste("<br>", met, ":", sep = ""), round(get(met), 3))
fig = plot_ly(data = df, x = ~lines, y = ~get(met), name = ~paste(character, "-", film), marker = list(color = color_palette(length(df$character)), line = list(color = "black", width = 1), size = 20), text = t, hoverinfo = "text")
fig = fig %>% layout(title = paste(met, "over Total Lines"), showlegend = T, xaxis = list(title = "Line Count"), yaxis = list(title = met))
return(fig)
}
###TSNE Plot
tsne_plot = function(characters, films, tsne, color_palette = color_palette, color_max = 0.25, color_min = -0.25){
temp = tsne
temp$Sentiment = replace(temp$compound, temp$compound > color_max, color_max)
temp$Sentiment = replace(temp$Sentiment, temp$Sentiment < color_min, color_min)
if (characters == "All Characters"){characters = all_characters}
if (films == "All Films"){films = all_films}
df = data.frame()
for (char in characters){
for (movie in films){
df = rbind(df, temp %>% filter(character == char & film == movie))
}
}
palette = color_palette(100)
m = list(sizeref = 0.25, line=list(color='black', width=1), text=~character) #marker characteristics
t = ~paste("Character:", character, "<br>Film:", film, "<br>Lines:", line_count, "<br>Sentiment:", round(compound, 2)) #hover text
ax = list(title="", showline=F, zeroline=F, showticklabels=T, tickfont=list(size=10)) #axis
fig = plot_ly(data=df, x=~x, y=~y, type="scatter", mode='markers', colors = palette, name=~paste(character, "-", film), marker = m, color = ~Sentiment, size=~(line_count), text = t, hoverinfo = "text", height = 1150)
fig = fig %>% layout(showlegend = F, xaxis = ax, yaxis = ax, title = "TSNE Plot, Sized by Line Count, Colored by Avg. Sentiment")
return(fig)
} | /R Code/helpers.R | no_license | hoganj15/StarWars | R | false | false | 6,132 | r | library(tidyverse)
library(plotly)
library(wordcloud2)
library(hash)
scripts = read.csv('CSV Files/scripts.csv')
avg_embeds = read.csv('CSV Files/avg_embeds.csv')
embeds = read.csv('CSV Files/embeds.csv')
tsne = read.csv('CSV Files/tsne.csv')
word_counts = read.csv('CSV Files/word_counts_by_movie.csv')
all_characters = unique(scripts %>% group_by(character) %>% summarize(n = n()) %>% arrange(desc(n)) %>% select(character))$character
all_films = c("EpisodeIV", "EpisodeV", "EpisodeVI")
scripts = scripts %>% group_by(film) %>% mutate(line_num = row_number())
### FUNCTIONS
#Wordcloud generator
create_wordcloud = function(films, img, word_counts){
#get all rows containing desired movies and add them to a temp table
temp = data.frame()
for (movie in films){
temp = rbind(temp, word_counts %>% filter(film == movie) %>% select(word, count))
}
#group by word and return word name and freq
wc_df = temp %>% group_by(word) %>% summarize(freq = sum(count)) %>% arrange(desc(freq))
wc = wordcloud2(wc_df, color="white", backgroundColor = "black", figPath = paste('Images/', img, '.jpg', sep=""))
return(wc)
}
###Sentiment Time Series
sentiment_time = function(characters, films, scripts, color_palette = color_palette){
if (films == "All Films"){films = all_films}
scripts = scripts %>% group_by(film) %>% mutate(line_num = row_number())
#get unique set of characters
chars = unique(scripts[c("character", "film")])
#filter out only the characters and films desired
df = data.frame()
for (char in characters){
for (movie in films){
df = rbind(df, chars %>% filter(character == char & film == movie))
}
}
#plot the sentiment over time for each character desired
palette = color_palette(length(df$character))
fig = plot_ly()
for (i in 1:length(df$character)){
plot_df = scripts %>% filter(character == as.character(df[i, 1]) & film == as.character(df[i, 2]) & compound != 0) #use script data for characters in question, remove instances with 0 compound score
fig = fig %>% add_trace(data=plot_df, x=~line_num, y=~compound, mode="markers", name=paste(df[i, 1], "-", df[i, 2]), marker = list(color = palette[i], opacity = 0.5))
fig = fig %>% add_lines(data = plot_df, x = ~line_num, y = ~fitted(loess(compound ~ line_num)), name = paste(df[i, 1], "-", df[i, 2], "LOESS"), line = list(color = palette[i], width = 2.4), showlegend = F)
}
fig = fig %>% layout(hovermode = 'x unified', title="Sentiment Over Time", xaxis=list(title="Line Number"), yaxis=list(title="Sentiment Score"))
return(fig)
}
###sentiment Bar Chart
sentiment_bar = function(characters, films, scripts, met, color_palette = color_palette){
if (films == "All Films"){films = all_films}
h = hash()
h[["Sentiment"]] = "compound"
h[["Positive"]] = "pos"
h[["Neutral"]] = "neu"
h[["Negative"]] = "neg"
avg_sentiments = spread(scripts %>% group_by(character, film) %>% summarize(sentiment = mean(get(h[[met]]))), key=film, value=sentiment)
df = data.frame()
for (char in characters){
df = rbind(df, avg_sentiments %>% filter(character == char) %>% select(films))
}
palette = color_palette(3)
fig = plot_ly(df, x=~get(films[1]), y=~character, type="bar", name=films[1], text = films[1], marker = list(color = palette[1]), orientation = "h", hoverinfo = "x+text")
if (length(films) >= 2){fig = fig %>% add_trace(x=~get(films[2]), name=films[2], text=films[2], marker = list(color = palette[2]))}
if (length(films) == 3){fig = fig %>% add_trace(x=~get(films[3]), name=films[3], text=films[3], marker = list(color = palette[3]))}
fig = fig %>% layout(title=paste(met, "Score of Selected Characters and Films"), xaxis = list(title="Character"), yaxis=list(title=paste(met, "Sentiment Score")))
return(fig)
}
###Sentiment over number of lines
sentiment_over_lines = function(characters, films, scripts, met, color_palette = color_palette){
if (films == "All Films"){films = all_films}
data = scripts %>% group_by(character, film) %>% summarize(lines = n(), Sentiment = mean(compound), Positive = mean(pos), Neutral = mean(neu), Negative = mean(neg))
df = data.frame()
for (char in characters){
for (movie in films){
df = rbind(df, data %>% filter(character == char & film == movie))
}
}
t = ~paste("Name:", character, "<br>Film:", film, paste("<br>", met, ":", sep = ""), round(get(met), 3))
fig = plot_ly(data = df, x = ~lines, y = ~get(met), name = ~paste(character, "-", film), marker = list(color = color_palette(length(df$character)), line = list(color = "black", width = 1), size = 20), text = t, hoverinfo = "text")
fig = fig %>% layout(title = paste(met, "over Total Lines"), showlegend = T, xaxis = list(title = "Line Count"), yaxis = list(title = met))
return(fig)
}
###TSNE Plot
tsne_plot = function(characters, films, tsne, color_palette = color_palette, color_max = 0.25, color_min = -0.25){
temp = tsne
temp$Sentiment = replace(temp$compound, temp$compound > color_max, color_max)
temp$Sentiment = replace(temp$Sentiment, temp$Sentiment < color_min, color_min)
if (characters == "All Characters"){characters = all_characters}
if (films == "All Films"){films = all_films}
df = data.frame()
for (char in characters){
for (movie in films){
df = rbind(df, temp %>% filter(character == char & film == movie))
}
}
palette = color_palette(100)
m = list(sizeref = 0.25, line=list(color='black', width=1), text=~character) #marker characteristics
t = ~paste("Character:", character, "<br>Film:", film, "<br>Lines:", line_count, "<br>Sentiment:", round(compound, 2)) #hover text
ax = list(title="", showline=F, zeroline=F, showticklabels=T, tickfont=list(size=10)) #axis
fig = plot_ly(data=df, x=~x, y=~y, type="scatter", mode='markers', colors = palette, name=~paste(character, "-", film), marker = m, color = ~Sentiment, size=~(line_count), text = t, hoverinfo = "text", height = 1150)
fig = fig %>% layout(showlegend = F, xaxis = ax, yaxis = ax, title = "TSNE Plot, Sized by Line Count, Colored by Avg. Sentiment")
return(fig)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createGraphic.R
\name{createGraphic}
\alias{createGraphic}
\title{createGraphic}
\usage{
createGraphic(handNo, handN, handE, handS, handW, dealer, vuln, points)
}
\arguments{
\item{handNo}{The id of the hand}
\item{handN}{The North hand generated by bridgeHand}
\item{handE}{The East hand generated by bridgeHand}
\item{handS}{The South hand generated by bridgeHand}
\item{handW}{The West hand generated by bridgeHand}
\item{dealer}{The hand to become South, the designated dealer}
\item{vuln}{The hand's vulnerability}
\item{points}{The hand's points}
}
\value{
ggplot graphic object
}
\description{
Create the graphic of the hand
}
| /man/createGraphic.Rd | no_license | cran/bridger | R | false | true | 750 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createGraphic.R
\name{createGraphic}
\alias{createGraphic}
\title{createGraphic}
\usage{
createGraphic(handNo, handN, handE, handS, handW, dealer, vuln, points)
}
\arguments{
\item{handNo}{The id of the hand}
\item{handN}{The North hand generated by bridgeHand}
\item{handE}{The East hand generated by bridgeHand}
\item{handS}{The South hand generated by bridgeHand}
\item{handW}{The West hand generated by bridgeHand}
\item{dealer}{The hand to become South, the designated dealer}
\item{vuln}{The hand's vulnerability}
\item{points}{The hand's points}
}
\value{
ggplot graphic object
}
\description{
Create the graphic of the hand
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_delete_product}
\alias{servicecatalog_delete_product}
\title{Deletes the specified product}
\usage{
servicecatalog_delete_product(AcceptLanguage, Id)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{Id}{[required] The product identifier.}
}
\description{
Deletes the specified product.
}
\details{
You cannot delete a product if it was shared with you or is associated
with a portfolio.
A delegated admin is authorized to invoke this command.
}
\section{Request syntax}{
\preformatted{svc$delete_product(
AcceptLanguage = "string",
Id = "string"
)
}
}
\keyword{internal}
| /cran/paws.management/man/servicecatalog_delete_product.Rd | permissive | jcheng5/paws | R | false | true | 819 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicecatalog_operations.R
\name{servicecatalog_delete_product}
\alias{servicecatalog_delete_product}
\title{Deletes the specified product}
\usage{
servicecatalog_delete_product(AcceptLanguage, Id)
}
\arguments{
\item{AcceptLanguage}{The language code.
\itemize{
\item \code{en} - English (default)
\item \code{jp} - Japanese
\item \code{zh} - Chinese
}}
\item{Id}{[required] The product identifier.}
}
\description{
Deletes the specified product.
}
\details{
You cannot delete a product if it was shared with you or is associated
with a portfolio.
A delegated admin is authorized to invoke this command.
}
\section{Request syntax}{
\preformatted{svc$delete_product(
AcceptLanguage = "string",
Id = "string"
)
}
}
\keyword{internal}
|
#
# Course: IST687
# Name: Joyce Woznica
# Homework 6 - Viz HW: air quality Analysts
# Due Date: 2/19/2019
# Date Submitted:
#
# Step 1: Load the data
# we will use the airquality data set, which you should already have as part of your R installation
myairQ<-airquality
# Step 2: Clean the data
# There will be NA's - figure out what you are going to do with that
# JLJW NOTE: I elected to use the mean for each column for the NAs as opposed to
# removing all the observations that had an NA for one of the values
# create a function to replace each column NA with mean for that column
replaceNAwMeans<-function(vec)
{
numcols<-length(colnames(vec))
index<-1
while(index<=numcols)
{
theColV <- vec[,index]
theColV[is.na(theColV)]<-mean(theColV,na.rm=TRUE)
vec[,index]<-theColV
index<-index+1
}
return(vec)
}
# Update the dataframe with the modified frame with means applied to NA values
myairQ<-replaceNAwMeans(myairQ)
# Step 3: Understand the data distribution
# Create the following visualizations using ggplot:
# 1) Histogrames for each of the variables
# Get the columns of myairQ
colnames(myairQ)
# function for correct packages
install.packages ("ggplot2")
library(ggplot2)
# histogram for Ozone with ggplot
g <- ggplot(myairQ, aes(x=Ozone))
g <- g + geom_histogram(bins=20, color="blue", fill="green")
g <- g+ ggtitle("Air Quality:Ozone")
g
gdata <- ggplot_build(g)
gdata[1]
gdata$data[[1]]$y
# histogram for Solar.R with ggplot
g <- ggplot(myairQ, aes(x=Solar.R))
g <- g + geom_histogram(bins=20, color="red", fill="orange")
g <- g+ ggtitle("Air Quality:Solar.R")
g
# histogram for Wind with ggplot
g <- ggplot(myairQ, aes(x=Wind))
g <- g + geom_histogram(bins=20, color="black", fill="blue")
g <- g+ ggtitle("Air Quality:Wind")
g
# histogram for Temperature with ggplot
g <- ggplot(myairQ, aes(x=Temp))
g <- g + geom_histogram(bins=20, color="blue", fill="pink")
g <- g+ ggtitle("Air Quality:Temperature")
g
# - do we even need these two? They are not 'variables'
# histogram for Month with ggplot
g <- ggplot(myairQ, aes(x=Month))
g <- g + geom_histogram(binwidth=1, bins=5, color="green", fill="blue")
g <- g+ ggtitle("Air Quality:Month")
g
# histogram for Day with ggplot
g <- ggplot(myairQ, aes(x=Day))
g <- g + geom_histogram(bins=15, color="orange", fill="yellow")
g <- g+ ggtitle("Air Quality:Day")
g
# 2) Bloxplot for Ozone with ggplot
# Need to find "buckets" for the information
# DO NOT MAP AGAINST MONTH!
g<- ggplot(myairQ, aes(group=Month,x=Month,y=Ozone))
g<- g + geom_boxplot(aes(fill=factor(Month)))
g<- g + ggtitle("Ozone by Month")
g
# 3) Boxplot for wind values (round the wind to get a good number of "buckets")
# DO NOT MAP AGAINST MONTH!
g<- ggplot(myairQ, aes(group=Month,x=Month,y=round(Wind, digits=0)))
g<- g + geom_boxplot(aes(fill=factor(Month)))
g<- g + ggtitle("Wind by Month")+theme(plot.title=element_text(hjust=0.5))
g
# maybe don't use month - maybe do not have "wind" on the Y axis (use buckets?)
# Step 3 (again): Explore how the data changes over time
# First make sure to create appropriate dates (this data was from 1973).
# Then create line charts for ozone, temp, wind and solar.R
# (one line chart for each, then one chart with
# four lines, each having a different color).
# create these visualizations with ggplot
# **Note that for the chart with 4 lines, you need to think about how to
# effectively use the y-axis
# create a function to create dates from 1973
# read month, day and create 1973-<month>-<day>
createDate <- function (yyyy,mm,dd)
{
as.Date(paste(yyyy,mm,dd,sep='-'))
}
# function to create a new column from the created date
makeDateCol <- function(df)
{
for (row in 1:nrow(df))
{
dateVal<-createDate(1973,df$Month,df$Day)
}
return(dateVal)
}
# now add the new column
myairQ$Date<-makeDateCol(myairQ)
g <- ggplot(myairQ, aes(x=Month, y=Ozone, group=Day, color=factor(Day)))
g <- g + geom_line(size=1)
g <- g + ylab("Ozone")
g <- g + ggtitle("Day of Month")
g
# Step 4: Look at all the data via a Heatmap
# Create a heatmap, with each day along the x-axis and
# Ozone, Temp, Wind and Solar.R along the y-axis
# Create using geom_tile (tiles instead of lines)
# You will need to figure out how to show the relative change
# equally across all the variables
g<- ggplot(myairQ, aes(x=Day, y=??))
g<- g + geom_tile(aes(color=Solar.R, size=Ozone))
g
# Step 5: Look at all the data via a scatter plot
# Create a scatter chart (geom_point),
# x-axis is wind
# y-axis is temperature
# dot size represents Ozone
# color represents Solar.R
g<- ggplot(myairQ, aes(x=Wind, y=Temp))
g<- g + geom_point(aes(color=Solar.R, size=Ozone))
g
# Step 6: Final Analysis
# Do you see any patterns after exploring the data?
# ANSWER: Wind decreases in late summer, but is fairly high in the late Spring
# Lower the wind, lower the temperature, higher the Ozone?
# ** MORE **
#
# What was the most useful visualization?
# ANSWER: **ANSWER**
| /Submissions/Week6/hw6Work.R | no_license | jlwoznic/IST687 | R | false | false | 5,198 | r | #
# Course: IST687
# Name: Joyce Woznica
# Homework 6 - Viz HW: air quality Analysts
# Due Date: 2/19/2019
# Date Submitted:
#
# Step 1: Load the data
# we will use the airquality data set, which you should already have as part of your R installation
myairQ<-airquality
# Step 2: Clean the data
# There will be NA's - figure out what you are going to do with that
# JLJW NOTE: I elected to use the mean for each column for the NAs as opposed to
# removing all the observations that had an NA for one of the values
# create a function to replace each column NA with mean for that column
replaceNAwMeans<-function(vec)
{
numcols<-length(colnames(vec))
index<-1
while(index<=numcols)
{
theColV <- vec[,index]
theColV[is.na(theColV)]<-mean(theColV,na.rm=TRUE)
vec[,index]<-theColV
index<-index+1
}
return(vec)
}
# Update the dataframe with the modified frame with means applied to NA values
myairQ<-replaceNAwMeans(myairQ)
# Step 3: Understand the data distribution
# Create the following visualizations using ggplot:
# 1) Histogrames for each of the variables
# Get the columns of myairQ
colnames(myairQ)
# function for correct packages
install.packages ("ggplot2")
library(ggplot2)
# histogram for Ozone with ggplot
g <- ggplot(myairQ, aes(x=Ozone))
g <- g + geom_histogram(bins=20, color="blue", fill="green")
g <- g+ ggtitle("Air Quality:Ozone")
g
gdata <- ggplot_build(g)
gdata[1]
gdata$data[[1]]$y
# histogram for Solar.R with ggplot
g <- ggplot(myairQ, aes(x=Solar.R))
g <- g + geom_histogram(bins=20, color="red", fill="orange")
g <- g+ ggtitle("Air Quality:Solar.R")
g
# histogram for Wind with ggplot
g <- ggplot(myairQ, aes(x=Wind))
g <- g + geom_histogram(bins=20, color="black", fill="blue")
g <- g+ ggtitle("Air Quality:Wind")
g
# histogram for Temperature with ggplot
g <- ggplot(myairQ, aes(x=Temp))
g <- g + geom_histogram(bins=20, color="blue", fill="pink")
g <- g+ ggtitle("Air Quality:Temperature")
g
# - do we even need these two? They are not 'variables'
# histogram for Month with ggplot
g <- ggplot(myairQ, aes(x=Month))
g <- g + geom_histogram(binwidth=1, bins=5, color="green", fill="blue")
g <- g+ ggtitle("Air Quality:Month")
g
# histogram for Day with ggplot
g <- ggplot(myairQ, aes(x=Day))
g <- g + geom_histogram(bins=15, color="orange", fill="yellow")
g <- g+ ggtitle("Air Quality:Day")
g
# 2) Bloxplot for Ozone with ggplot
# Need to find "buckets" for the information
# DO NOT MAP AGAINST MONTH!
g<- ggplot(myairQ, aes(group=Month,x=Month,y=Ozone))
g<- g + geom_boxplot(aes(fill=factor(Month)))
g<- g + ggtitle("Ozone by Month")
g
# 3) Boxplot for wind values (round the wind to get a good number of "buckets")
# DO NOT MAP AGAINST MONTH!
g<- ggplot(myairQ, aes(group=Month,x=Month,y=round(Wind, digits=0)))
g<- g + geom_boxplot(aes(fill=factor(Month)))
g<- g + ggtitle("Wind by Month")+theme(plot.title=element_text(hjust=0.5))
g
# maybe don't use month - maybe do not have "wind" on the Y axis (use buckets?)
# Step 3 (again): Explore how the data changes over time
# First make sure to create appropriate dates (this data was from 1973).
# Then create line charts for ozone, temp, wind and solar.R
# (one line chart for each, then one chart with
# four lines, each having a different color).
# create these visualizations with ggplot
# **Note that for the chart with 4 lines, you need to think about how to
# effectively use the y-axis
# create a function to create dates from 1973
# read month, day and create 1973-<month>-<day>
createDate <- function (yyyy,mm,dd)
{
as.Date(paste(yyyy,mm,dd,sep='-'))
}
# function to create a new column from the created date
makeDateCol <- function(df)
{
for (row in 1:nrow(df))
{
dateVal<-createDate(1973,df$Month,df$Day)
}
return(dateVal)
}
# now add the new column
myairQ$Date<-makeDateCol(myairQ)
g <- ggplot(myairQ, aes(x=Month, y=Ozone, group=Day, color=factor(Day)))
g <- g + geom_line(size=1)
g <- g + ylab("Ozone")
g <- g + ggtitle("Day of Month")
g
# Step 4: Look at all the data via a Heatmap
# Create a heatmap, with each day along the x-axis and
# Ozone, Temp, Wind and Solar.R along the y-axis
# Create using geom_tile (tiles instead of lines)
# You will need to figure out how to show the relative change
# equally across all the variables
g<- ggplot(myairQ, aes(x=Day, y=??))
g<- g + geom_tile(aes(color=Solar.R, size=Ozone))
g
# Step 5: Look at all the data via a scatter plot
# Create a scatter chart (geom_point),
# x-axis is wind
# y-axis is temperature
# dot size represents Ozone
# color represents Solar.R
g<- ggplot(myairQ, aes(x=Wind, y=Temp))
g<- g + geom_point(aes(color=Solar.R, size=Ozone))
g
# Step 6: Final Analysis
# Do you see any patterns after exploring the data?
# ANSWER: Wind decreases in late summer, but is fairly high in the late Spring
# Lower the wind, lower the temperature, higher the Ozone?
# ** MORE **
#
# What was the most useful visualization?
# ANSWER: **ANSWER**
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TextReuseTextDocument.R
\name{TextReuseTextDocument}
\alias{TextReuseTextDocument}
\alias{has_content}
\alias{has_hashes}
\alias{has_minhashes}
\alias{has_tokens}
\alias{is.TextReuseTextDocument}
\title{TextReuseTextDocument}
\usage{
TextReuseTextDocument(text, file = NULL, meta = list(),
tokenizer = tokenize_ngrams, ..., hash_func = hash_string,
minhash_func = NULL, keep_tokens = FALSE, keep_text = TRUE,
skip_short = TRUE)
is.TextReuseTextDocument(x)
has_content(x)
has_tokens(x)
has_hashes(x)
has_minhashes(x)
}
\arguments{
\item{text}{A character vector containing the text of the document. This
argument can be skipped if supplying \code{file}.}
\item{file}{The path to a text file, if \code{text} is not provided.}
\item{meta}{A list with named elements for the metadata associated with this
document. If a document is created using the \code{text} parameter, then
you must provide an \code{id} field, e.g., \code{meta = list(id =
"my_id")}. If the document is created using \code{file}, then the ID will
be created from the file name.}
\item{tokenizer}{A function to split the text into tokens. See
\code{\link{tokenizers}}. If value is \code{NULL}, then tokenizing and
hashing will be skipped.}
\item{...}{Arguments passed on to the \code{tokenizer}.}
\item{hash_func}{A function to hash the tokens. See
\code{\link{hash_string}}.}
\item{minhash_func}{A function to create minhash signatures of the document.
See \code{\link{minhash_generator}}.}
\item{keep_tokens}{Should the tokens be saved in the document that is
returned or discarded?}
\item{keep_text}{Should the text be saved in the document that is returned or
discarded?}
\item{skip_short}{Should short documents be skipped? (See details.)}
\item{x}{An R object to check.}
}
\value{
An object of class \code{TextReuseTextDocument}. This object inherits
from the virtual S3 class \code{\link[NLP]{TextDocument}} in the NLP
package. It contains the following elements: \describe{ \item{content}{The
text of the document.} \item{tokens}{The tokens created from the text.}
\item{hashes}{Hashes created from the tokens.} \item{minhashes}{The minhash
signature of the document.} \item{metadata}{The document metadata,
including the filename (if any) in \code{file}.} }
}
\description{
This is the constructor function for \code{TextReuseTextDocument} objects.
This class is used for comparing documents.
}
\details{
This constructor function follows a three-step process. It reads in
the text, either from a file or from memory. It then tokenizes that text.
Then it hashes the tokens. Most of the comparison functions in this package
rely only on the hashes to make the comparison. By passing \code{FALSE} to
\code{keep_tokens} and \code{keep_text}, you can avoid saving those
objects, which can result in significant memory savings for large corpora.
If \code{skip_short = TRUE}, this function will return \code{NULL} for very
short or empty documents. A very short document is one where there are two
few words to create at least two n-grams. For example, if five-grams are
desired, then a document must be at least six words long. If no value of
\code{n} is provided, then the function assumes a value of \code{n = 3}. A
warning will be printed with the document ID of a skipped document.
}
\examples{
file <- system.file("extdata/legal/ny1850-match.txt", package = "textreuse")
doc <- TextReuseTextDocument(file = file, meta = list(id = "ny1850"))
print(doc)
meta(doc)
head(tokens(doc))
head(hashes(doc))
\dontrun{
content(doc)
}
}
\seealso{
\link[=TextReuseTextDocument-accessors]{Accessors for TextReuse
objects}.
}
| /man/TextReuseTextDocument.Rd | permissive | felipegonzalez/textreuse | R | false | true | 3,724 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TextReuseTextDocument.R
\name{TextReuseTextDocument}
\alias{TextReuseTextDocument}
\alias{has_content}
\alias{has_hashes}
\alias{has_minhashes}
\alias{has_tokens}
\alias{is.TextReuseTextDocument}
\title{TextReuseTextDocument}
\usage{
TextReuseTextDocument(text, file = NULL, meta = list(),
tokenizer = tokenize_ngrams, ..., hash_func = hash_string,
minhash_func = NULL, keep_tokens = FALSE, keep_text = TRUE,
skip_short = TRUE)
is.TextReuseTextDocument(x)
has_content(x)
has_tokens(x)
has_hashes(x)
has_minhashes(x)
}
\arguments{
\item{text}{A character vector containing the text of the document. This
argument can be skipped if supplying \code{file}.}
\item{file}{The path to a text file, if \code{text} is not provided.}
\item{meta}{A list with named elements for the metadata associated with this
document. If a document is created using the \code{text} parameter, then
you must provide an \code{id} field, e.g., \code{meta = list(id =
"my_id")}. If the document is created using \code{file}, then the ID will
be created from the file name.}
\item{tokenizer}{A function to split the text into tokens. See
\code{\link{tokenizers}}. If value is \code{NULL}, then tokenizing and
hashing will be skipped.}
\item{...}{Arguments passed on to the \code{tokenizer}.}
\item{hash_func}{A function to hash the tokens. See
\code{\link{hash_string}}.}
\item{minhash_func}{A function to create minhash signatures of the document.
See \code{\link{minhash_generator}}.}
\item{keep_tokens}{Should the tokens be saved in the document that is
returned or discarded?}
\item{keep_text}{Should the text be saved in the document that is returned or
discarded?}
\item{skip_short}{Should short documents be skipped? (See details.)}
\item{x}{An R object to check.}
}
\value{
An object of class \code{TextReuseTextDocument}. This object inherits
from the virtual S3 class \code{\link[NLP]{TextDocument}} in the NLP
package. It contains the following elements: \describe{ \item{content}{The
text of the document.} \item{tokens}{The tokens created from the text.}
\item{hashes}{Hashes created from the tokens.} \item{minhashes}{The minhash
signature of the document.} \item{metadata}{The document metadata,
including the filename (if any) in \code{file}.} }
}
\description{
This is the constructor function for \code{TextReuseTextDocument} objects.
This class is used for comparing documents.
}
\details{
This constructor function follows a three-step process. It reads in
the text, either from a file or from memory. It then tokenizes that text.
Then it hashes the tokens. Most of the comparison functions in this package
rely only on the hashes to make the comparison. By passing \code{FALSE} to
\code{keep_tokens} and \code{keep_text}, you can avoid saving those
objects, which can result in significant memory savings for large corpora.
If \code{skip_short = TRUE}, this function will return \code{NULL} for very
short or empty documents. A very short document is one where there are two
few words to create at least two n-grams. For example, if five-grams are
desired, then a document must be at least six words long. If no value of
\code{n} is provided, then the function assumes a value of \code{n = 3}. A
warning will be printed with the document ID of a skipped document.
}
\examples{
file <- system.file("extdata/legal/ny1850-match.txt", package = "textreuse")
doc <- TextReuseTextDocument(file = file, meta = list(id = "ny1850"))
print(doc)
meta(doc)
head(tokens(doc))
head(hashes(doc))
\dontrun{
content(doc)
}
}
\seealso{
\link[=TextReuseTextDocument-accessors]{Accessors for TextReuse
objects}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hd.birge.R
\name{hd.lc.sm}
\alias{hd.lc.sm}
\title{Hellinger distance between log-concave densities}
\usage{
hd.lc.sm(x, y)
}
\arguments{
\item{x}{Vector of m independent and identically distributed random variables;
corresponds to the first sample.}
\item{y}{Vector of n independent and identically distributed random variables;
corresponds to the second sample.}
}
\value{
A point estimator of the Hellinger distance.
}
\description{
Provides an estimate of
the squared Hellinger distance between two log-concave densities.
This function uses the smoothed log-concave density estimator of Chen and Samworth (2013),
given by \link[logcondens]{logConDens} of logcondens package.
}
\examples{
x <- sort(rnorm(100)); y <- sort(rgamma(50, shape=1));
hd.lc.sm(x,y)
}
\references{
Laha, N., Moodie, Z., Huang, Y., and Luedtke (2021), A.
\emph{ Improved inference for vaccine-induced immune responses
via shape-constrained methods}. Submitted.
Chen, Y. and Samworth, R. J. (2013). \emph{Smoothed
log-concave maximum likelihood estimation with applications},
Statistica Sinica, 23, 1303-1398.
}
\seealso{
\code{\link{hd.lc}}, \code{\link{hd.uni}}, \code{\link{hell.ci}}
}
\author{
\href{https://connects.catalyst.harvard.edu/Profiles/display/Person/184207}{Nilanjana Laha}
(maintainer), \email{nlaha@hsph.harvard.edu},
Alex Luedtke, \email{aluedtke@uw.edu}.
}
| /man/hd.lc.sm.Rd | permissive | nilanjanalaha/SDNNtests | R | false | true | 1,461 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hd.birge.R
\name{hd.lc.sm}
\alias{hd.lc.sm}
\title{Hellinger distance between log-concave densities}
\usage{
hd.lc.sm(x, y)
}
\arguments{
\item{x}{Vector of m independent and identically distributed random variables;
corresponds to the first sample.}
\item{y}{Vector of n independent and identically distributed random variables;
corresponds to the second sample.}
}
\value{
A point estimator of the Hellinger distance.
}
\description{
Provides an estimate of
the squared Hellinger distance between two log-concave densities.
This function uses the smoothed log-concave density estimator of Chen and Samworth (2013),
given by \link[logcondens]{logConDens} of logcondens package.
}
\examples{
x <- sort(rnorm(100)); y <- sort(rgamma(50, shape=1));
hd.lc.sm(x,y)
}
\references{
Laha, N., Moodie, Z., Huang, Y., and Luedtke (2021), A.
\emph{ Improved inference for vaccine-induced immune responses
via shape-constrained methods}. Submitted.
Chen, Y. and Samworth, R. J. (2013). \emph{Smoothed
log-concave maximum likelihood estimation with applications},
Statistica Sinica, 23, 1303-1398.
}
\seealso{
\code{\link{hd.lc}}, \code{\link{hd.uni}}, \code{\link{hell.ci}}
}
\author{
\href{https://connects.catalyst.harvard.edu/Profiles/display/Person/184207}{Nilanjana Laha}
(maintainer), \email{nlaha@hsph.harvard.edu},
Alex Luedtke, \email{aluedtke@uw.edu}.
}
|
##
## Copyright (c) 2006-2019 of Toni Giorgino
##
## This file is part of the DTW package.
##
## DTW is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## DTW is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with DTW. If not, see <http://www.gnu.org/licenses/>.
##
#' Plotting of dynamic time warp results
#'
#' Methods for plotting dynamic time warp alignment objects returned by
#' [dtw()].
#'
#' `dtwPlot` displays alignment contained in `dtw` objects.
#'
#' Various plotting styles are available, passing strings to the `type`
#' argument (may be abbreviated):
#'
#' * `alignment` plots the warping curve in `d`;
#' * `twoway` plots a point-by-point comparison, with matching lines; see [dtwPlotTwoWay()];
#' * `threeway` vis-a-vis inspection of the timeseries and their warping curve; see [dtwPlotThreeWay()];
#' * `density` displays the cumulative cost landscape with the warping path overimposed; see [dtwPlotDensity()]
#'
#' Additional parameters are passed to the plotting functions: use with
#' care.
#'
#' @aliases dtwPlot dtwPlotAlignment plot.dtw
#' @param x,d `dtw` object, usually result of call to [dtw()]
#' @param xlab label for the query axis
#' @param ylab label for the reference axis
#' @param type general style for the plot, see below
#' @param plot.type type of line to be drawn, used as the `type` argument
#' in the underlying `plot` call
#' @param ... additional arguments, passed to plotting functions
#' @section Warning: These functions are incompatible with mechanisms for
#' arranging plots on a device: `par(mfrow)`, `layout` and
#' `split.screen`.
#' @author Toni Giorgino
#' @family plot
#' @seealso [dtwPlotTwoWay()], [dtwPlotThreeWay()] and [dtwPlotDensity()] for details
#' on the respective plotting styles.
#' @keywords ts hplot
#' @examples
#'
#' ## Same example as in dtw
#'
#' idx<-seq(0,6.28,len=100);
#' query<-sin(idx)+runif(100)/10;
#' reference<-cos(idx)
#'
#' alignment<-dtw(query,reference,keep=TRUE);
#'
#' # A sample of the plot styles. See individual plotting functions for details
#'
#' plot(alignment, type="alignment",
#' main="DTW sine/cosine: simple alignment plot")
#'
#' plot(alignment, type="twoway",
#' main="DTW sine/cosine: dtwPlotTwoWay")
#'
#' plot(alignment, type="threeway",
#' main="DTW sine/cosine: dtwPlotThreeWay")
#'
#' plot(alignment, type="density",
#' main="DTW sine/cosine: dtwPlotDensity")
#'
#' @name dtwPlot
#' @import graphics
#' @export
plot.dtw <- function(x, type="alignment", ...) {
pt<-pmatch(type,c("alignment",
"twoway",
"threeway",
"density"));
switch(pt, dtwPlotAlignment(x, ...),
dtwPlotTwoWay(x, ...),
dtwPlotThreeWay(x, ...),
dtwPlotDensity(x, ...)
);
}
## an alias
#' @export
dtwPlot <- plot.dtw;
#' @rdname dtwPlot
#' @export
dtwPlotAlignment <- function(d, xlab="Query index", ylab="Reference index", plot.type="l", ...) {
plot( d$index1,d$index2,
xlim=c(1,d$N),ylim=c(1,d$M),
xlab=xlab,ylab=ylab,type=plot.type,
...
);
}
## Well-known and much-copied pairwise matching
#' Plotting of dynamic time warp results: pointwise comparison
#'
#' Display the query and reference time series and their alignment, arranged
#' for visual inspection.
#'
#' The two vectors are displayed via the [matplot()] functions; their
#' appearance can be customized via the `type` and `pch` arguments
#' (constants or vectors of two elements). If `offset` is set, the
#' reference is shifted vertically by the given amount; this will be reflected
#' by the *right-hand* axis.
#'
#' Argument `match.indices` is used to draw a visual guide to matches; if
#' a vector is given, guides are drawn for the corresponding indices in the
#' warping curve (match lines). If integer, it is used as the number of guides
#' to be plotted. The corresponding style is customized via the
#' `match.col` and `match.lty` arguments.
#'
#' If `xts` and `yts` are not supplied, they will be recovered from
#' `d`, as long as it was created with the two-argument call of
#' [dtw()] with `keep.internals=TRUE`. Only single-variate time
#' series can be plotted this way.
#'
#' @param d an alignment result, object of class `dtw`
#' @param xts query vector
#' @param yts reference vector
#' @param xlab,ylab axis labels
#' @param offset displacement between the timeseries, summed to reference
#' @param match.col,match.lty color and line type of the match guide lines
#' @param match.indices indices for which to draw a visual guide
#' @param ts.type,pch graphical parameters for timeseries plotting, passed to
#' `matplot`
#' @param ... additional arguments, passed to `matplot`
#' @note When `offset` is set values on the left axis only apply to the
#' query.
#' @section Warning: The function is incompatible with mechanisms for arranging
#' plots on a device: `par(mfrow)`, `layout` and `split.screen`.
#' @author Toni Giorgino
#' @family plot
#' @seealso [dtwPlot()] for other dtw plotting functions,
#' [matplot()] for graphical parameters.
#' @keywords hplot
#' @examples
#'
#'
#' ## A noisy sine wave as query
#' ## A cosine is for reference; sin and cos are offset by 25 samples
#'
#' idx<-seq(0,6.28,len=100);
#' query<-sin(idx)+runif(100)/10;
#' reference<-cos(idx)
#' dtw(query,reference,step=asymmetricP1,keep=TRUE)->alignment;
#'
#'
#' ## Equivalent to plot(alignment,type="two");
#' dtwPlotTwoWay(alignment);
#'
#'
#' ## Highlight matches of chosen QUERY indices. We will do some index
#' ## arithmetics to recover the corresponding indices along the warping
#' ## curve
#'
#' hq <- (0:8)/8
#' hq <- round(hq*100) # indices in query for pi/4 .. 7/4 pi
#'
#' hw <- (alignment$index1 %in% hq) # where are they on the w. curve?
#' hi <- (1:length(alignment$index1))[hw]; # get the indices of TRUE elems
#'
#'
#' ## Beware of the reference's y axis, may be confusing
#' plot(alignment,offset=-2,type="two", lwd=3, match.col="grey50",
#' match.indices=hi,main="Match lines shown every pi/4 on query");
#'
#' legend("topright",c("Query","Reference (rt. axis)"), pch=21, col=1:6)
#'
#'
#'
#' @export
dtwPlotTwoWay <- function(d,xts=NULL,yts=NULL, offset=0,
ts.type="l",pch=21,
match.indices=NULL,
match.col="gray70", match.lty=3,
xlab="Index", ylab="Query value",
... ) {
if(is.null(xts) || is.null(yts)) {
xts <- d$query;
yts <- d$reference;
}
if(is.null(xts) || is.null(yts))
stop("Original timeseries are required");
ytso<-yts+offset;
## pad to longest
maxlen<-max(length(xts),length(ytso));
length(xts)<-maxlen;
length(ytso)<-maxlen;
## save default, for resetting...
def.par <- par(no.readonly = TRUE);
## make room for secondary axis, if any
if(offset!=0) {
par(mar=c(5,4,4,4)+.1);
}
## plot q+t
matplot(cbind(xts,ytso),
type=ts.type,pch=pch,
xlab=xlab, ylab=ylab,
axes=FALSE,
...);
## box and main axis
## compute range covering all values
box();
axis(1);
axis(2,at=pretty(xts));
## display secondary axis if offset
if(offset!=0) {
rightTicks <- pretty(yts);
axis(4,at=rightTicks+offset,labels=rightTicks);
}
## plot the matching
# par(par.match);
if(is.null(match.indices)) {
ml<-length(d$index1);
idx<-1:ml;
} else if(length(match.indices)==1) {
idx <- seq(from=1,
to=length(d$index1),
length.out=match.indices);
} else {
idx <- match.indices;
}
## x0, y0 coordinates of points from which to draw.
## x1, y1 coordinates of points to which to draw.
segments(d$index1[idx],xts[d$index1[idx]],
d$index2[idx],ytso[d$index2[idx]],
col=match.col,lty=match.lty);
par(def.par)#- reset to default
}
## ##################################################
## Global distance density plot
# for each plot, we should set: color, width, style, type
# for match lines: color, width, style
#' Plotting of dynamic time warp results: annotated warping function
#'
#' Display the query and reference time series and their warping curve,
#' arranged for visual inspection.
#'
#'
#' The query time series is plotted in the bottom panel, with indices growing
#' rightwards and values upwards. Reference is in the left panel, indices
#' growing upwards and values leftwards. The warping curve panel matches
#' indices, and therefore element (1,1) will be at the lower left, (N,M) at the
#' upper right.
#'
#' Argument `match.indices` is used to draw a visual guide to matches; if
#' a vector is given, guides are drawn for the corresponding indices in the
#' warping curve (match lines). If integer, it is used as the number of guides
#' to be plotted. The corresponding style is customized via the
#' `match.col` and `match.lty` arguments.
#'
#' If `xts` and `yts` are not supplied, they will be recovered from
#' `d`, as long as it was created with the two-argument call of
#' [dtw()] with `keep.internals=TRUE`. Only single-variate time
#' series can be plotted.
#'
#' @param d an alignment result, object of class `dtw`
#' @param xts query vector
#' @param yts reference vector
#' @param xlab label for the query axis
#' @param ylab label for the reference axis
#' @param main main title
#' @param type.align line style for warping curve plot
#' @param type.ts line style for timeseries plot
#' @param match.indices indices for which to draw a visual guide
#' @param margin outer figure margin
#' @param inner.margin inner figure margin
#' @param title.margin space on the top of figure
#' @param ... additional arguments, used for the warping curve
#' @family plot
#' @section Warning: The function is incompatible with mechanisms for arranging
#' plots on a device: `par(mfrow)`, `layout` and `split.screen`.
#' Appearance of the match lines and timeseries currently can not be
#' customized.
#' @author Toni Giorgino
#' @keywords hplot
#' @examples
#'
#'
#' ## A noisy sine wave as query
#' ## A cosine is for reference; sin and cos are offset by 25 samples
#'
#' idx<-seq(0,6.28,len=100);
#' query<-sin(idx)+runif(100)/10;
#' reference<-cos(idx)
#' dtw(query,reference,keep=TRUE)->alignment;
#'
#'
#' ## Beware of the reference's y axis, may be confusing
#' ## Equivalent to plot(alignment,type="three");
#' dtwPlotThreeWay(alignment);
#'
#'
#' ## Highlight matches of chosen QUERY indices. We will do some index
#' ## arithmetics to recover the corresponding indices along the warping
#' ## curve
#'
#' hq <- (0:8)/8
#' hq <- round(hq*100) # indices in query for pi/4 .. 7/4 pi
#'
#' hw <- (alignment$index1 %in% hq) # where are they on the w. curve?
#' hi <- (1:length(alignment$index1))[hw]; # get the indices of TRUE elems
#'
#' dtwPlotThreeWay(alignment,match.indices=hi);
#'
#' @export
dtwPlotThreeWay <- function(d,xts=NULL,yts=NULL,
type.align="l",type.ts="l",
match.indices=NULL,
margin=4, inner.margin=0.2, title.margin=1.5,
xlab="Query index",ylab="Reference index",main="Timeseries alignment",
... ) {
if(is.null(xts) || is.null(yts)) {
xts <- d$query;
yts <- d$reference;
}
# Sanity check
if(is.null(xts) || is.null(yts))
stop("Original timeseries are required");
# Coerce to plain vectors
xts <- as.matrix(xts);
yts <- as.matrix(yts);
# Verify if not multivariate
if( ncol(xts)>1 || ncol(yts)>1 )
stop("Only single-variate timeseries can be displayed. (You may want to extract a column for visualization purposes.)");
def.par <- par(no.readonly = TRUE) # save default, for resetting...
layout(matrix(c(3,1,0,2),2,2,byrow=TRUE), c(1,3), c(3,1), TRUE);
imar<-inner.margin;
bmar<-margin;
lmar<-margin;
tmar<-margin+title.margin;
rmar<-margin;
mlab=margin/2;
mtex=margin/6;
nn<-length(xts);
mm<-length(yts);
# Plot the warping function
par(mar=c(imar,imar,tmar,rmar));
# todo: plot over segments
plot(d$index1,d$index2,type=type.align,
xlim=c(1,nn),ylim=c(1,mm),
ax=FALSE,main=main, ...
); # fake a diagonal, to set the axes
# vertical match segments
# 1 value: plot total of N elements
if(length(match.indices)==1) {
match.indices <- seq(from=1,
to=length(d$index1),
length.out=match.indices);
}
# vector: use specified indices
if(! is.null(match.indices) ) { # vertical match segments
idx <- match.indices;
segments(d$index1[idx],0,
d$index1[idx],d$index2[idx],
col="grey60",lty=3);
# horz.
segments(0,d$index2[idx],
d$index1[idx],d$index2[idx],
col="grey60",lty=3);
}
box();
# axis are 1- bot; 2- left; 3- top; 4- right
# Plot query (horizontal, bottom)
par(mar=c(bmar,imar,imar,rmar));
plot(xts ~ c(1:nn), type=type.ts,
xlab=xlab ,mgp=c(mlab,mtex,0) ,ax=FALSE,
);
axis(1);
axis(2);
box();
# Plot reference (vertical, left)
par(mar=c(imar,lmar,tmar,imar));
# reverse the horiz. axis so that rotation is more natural
plot(c(1:mm) ~ yts, xlim=rev(range(yts)), type=type.ts,
ylab=ylab, mgp=c(mlab,mtex,0) , ax=FALSE,
);
axis(3);
axis(2);
box();
par(def.par)#- reset to default
}
#' Display the cumulative cost density with the warping path overimposed
#'
#' The plot is based on the cumulative cost matrix. It displays the optimal alignment
#' as a "ridge" in the global cost landscape.
#'
#' The alignment must have been
#' constructed with the `keep.internals=TRUE` parameter set.
#'
#' If `normalize` is `TRUE`, the *average* cost per step is
#' plotted instead of the cumulative one. Step averaging depends on the
#' [stepPattern()] used.
#'
#' @param d an alignment result, object of class `dtw`
#' @param normalize show per-step average cost instead of cumulative cost
#' @param xlab label for the query axis
#' @param ylab label for the reference axis
#' @param ... additional parameters forwarded to plotting functions
#' @family plot
#' @examples
#'
#' ## A study of the "Itakura" parallelogram
#' ##
#' ## A widely held misconception is that the "Itakura parallelogram" (as
#' ## described in the original article) is a global constraint. Instead,
#' ## it arises from local slope restrictions. Anyway, an "itakuraWindow",
#' ## is provided in this package. A comparison between the two follows.
#'
#' ## The local constraint: three sides of the parallelogram are seen
#' idx<-seq(0,6.28,len=100);
#' query<-sin(idx)+runif(100)/10;
#' reference<-cos(idx)
#'
#' ita <- dtw(query,reference,keep=TRUE,step=typeIIIc)
#' dtwPlotDensity(ita, main="Slope-limited asymmetric step (Itakura)")
#'
#' ## Symmetric step with global parallelogram-shaped constraint. Note how
#' ## long (>2 steps) horizontal stretches are allowed within the window.
#'
#' dtw(query,reference,keep=TRUE,window=itakuraWindow)->ita;
#' dtwPlotDensity(ita,
#' main="Symmetric step with Itakura parallelogram window")
#'
#' @export
dtwPlotDensity <- function(d, normalize=FALSE,
xlab="Query index", ylab="Reference index", ...) {
cm<-d$costMatrix;
if(is.null(cm))
stop("dtwPlotDensity requires dtw internals (set keep.internals=TRUE on dtw() call)");
## We can safely modify cm locally
if(normalize) {
norm <- attr(d$stepPattern,"norm");
if(is.na(norm))
stop("No normalization known for step pattern used");
if(norm=="N") {
cm <- cm / row(cm);
} else if(norm=="N+M") {
cm <- cm / (row(cm)+col(cm));
} else if(norm=="M") {
cm <- cm / col(cm);
}
}
xd<-dim(cm)[1];
yd<-dim(cm)[2];
image(cm,col=grDevices::terrain.colors(100),x=1:xd,y=1:yd,
xlab=xlab,ylab=ylab, ...);
contour(cm,x=1:xd,y=1:yd,add=TRUE);
lines(d$index1,d$index2,col="blue",lwd=2);
}
| /R/plot.dtw.R | no_license | cran/dtw | R | false | false | 17,058 | r |
##
## Copyright (c) 2006-2019 of Toni Giorgino
##
## This file is part of the DTW package.
##
## DTW is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## DTW is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with DTW. If not, see <http://www.gnu.org/licenses/>.
##
#' Plotting of dynamic time warp results
#'
#' Methods for plotting dynamic time warp alignment objects returned by
#' [dtw()].
#'
#' `dtwPlot` displays alignment contained in `dtw` objects.
#'
#' Various plotting styles are available, passing strings to the `type`
#' argument (may be abbreviated):
#'
#' * `alignment` plots the warping curve in `d`;
#' * `twoway` plots a point-by-point comparison, with matching lines; see [dtwPlotTwoWay()];
#' * `threeway` vis-a-vis inspection of the timeseries and their warping curve; see [dtwPlotThreeWay()];
#' * `density` displays the cumulative cost landscape with the warping path overimposed; see [dtwPlotDensity()]
#'
#' Additional parameters are passed to the plotting functions: use with
#' care.
#'
#' @aliases dtwPlot dtwPlotAlignment plot.dtw
#' @param x,d `dtw` object, usually result of call to [dtw()]
#' @param xlab label for the query axis
#' @param ylab label for the reference axis
#' @param type general style for the plot, see below
#' @param plot.type type of line to be drawn, used as the `type` argument
#' in the underlying `plot` call
#' @param ... additional arguments, passed to plotting functions
#' @section Warning: These functions are incompatible with mechanisms for
#' arranging plots on a device: `par(mfrow)`, `layout` and
#' `split.screen`.
#' @author Toni Giorgino
#' @family plot
#' @seealso [dtwPlotTwoWay()], [dtwPlotThreeWay()] and [dtwPlotDensity()] for details
#' on the respective plotting styles.
#' @keywords ts hplot
#' @examples
#'
#' ## Same example as in dtw
#'
#' idx<-seq(0,6.28,len=100);
#' query<-sin(idx)+runif(100)/10;
#' reference<-cos(idx)
#'
#' alignment<-dtw(query,reference,keep=TRUE);
#'
#' # A sample of the plot styles. See individual plotting functions for details
#'
#' plot(alignment, type="alignment",
#' main="DTW sine/cosine: simple alignment plot")
#'
#' plot(alignment, type="twoway",
#' main="DTW sine/cosine: dtwPlotTwoWay")
#'
#' plot(alignment, type="threeway",
#' main="DTW sine/cosine: dtwPlotThreeWay")
#'
#' plot(alignment, type="density",
#' main="DTW sine/cosine: dtwPlotDensity")
#'
#' @name dtwPlot
#' @import graphics
#' @export
plot.dtw <- function(x, type="alignment", ...) {
pt<-pmatch(type,c("alignment",
"twoway",
"threeway",
"density"));
switch(pt, dtwPlotAlignment(x, ...),
dtwPlotTwoWay(x, ...),
dtwPlotThreeWay(x, ...),
dtwPlotDensity(x, ...)
);
}
## an alias
#' @export
dtwPlot <- plot.dtw;
#' @rdname dtwPlot
#' @export
dtwPlotAlignment <- function(d, xlab="Query index", ylab="Reference index", plot.type="l", ...) {
plot( d$index1,d$index2,
xlim=c(1,d$N),ylim=c(1,d$M),
xlab=xlab,ylab=ylab,type=plot.type,
...
);
}
## Well-known and much-copied pairwise matching
#' Plotting of dynamic time warp results: pointwise comparison
#'
#' Display the query and reference time series and their alignment, arranged
#' for visual inspection.
#'
#' The two vectors are displayed via the [matplot()] functions; their
#' appearance can be customized via the `type` and `pch` arguments
#' (constants or vectors of two elements). If `offset` is set, the
#' reference is shifted vertically by the given amount; this will be reflected
#' by the *right-hand* axis.
#'
#' Argument `match.indices` is used to draw a visual guide to matches; if
#' a vector is given, guides are drawn for the corresponding indices in the
#' warping curve (match lines). If integer, it is used as the number of guides
#' to be plotted. The corresponding style is customized via the
#' `match.col` and `match.lty` arguments.
#'
#' If `xts` and `yts` are not supplied, they will be recovered from
#' `d`, as long as it was created with the two-argument call of
#' [dtw()] with `keep.internals=TRUE`. Only single-variate time
#' series can be plotted this way.
#'
#' @param d an alignment result, object of class `dtw`
#' @param xts query vector
#' @param yts reference vector
#' @param xlab,ylab axis labels
#' @param offset displacement between the timeseries, summed to reference
#' @param match.col,match.lty color and line type of the match guide lines
#' @param match.indices indices for which to draw a visual guide
#' @param ts.type,pch graphical parameters for timeseries plotting, passed to
#' `matplot`
#' @param ... additional arguments, passed to `matplot`
#' @note When `offset` is set values on the left axis only apply to the
#' query.
#' @section Warning: The function is incompatible with mechanisms for arranging
#' plots on a device: `par(mfrow)`, `layout` and `split.screen`.
#' @author Toni Giorgino
#' @family plot
#' @seealso [dtwPlot()] for other dtw plotting functions,
#' [matplot()] for graphical parameters.
#' @keywords hplot
#' @examples
#'
#'
#' ## A noisy sine wave as query
#' ## A cosine is for reference; sin and cos are offset by 25 samples
#'
#' idx<-seq(0,6.28,len=100);
#' query<-sin(idx)+runif(100)/10;
#' reference<-cos(idx)
#' dtw(query,reference,step=asymmetricP1,keep=TRUE)->alignment;
#'
#'
#' ## Equivalent to plot(alignment,type="two");
#' dtwPlotTwoWay(alignment);
#'
#'
#' ## Highlight matches of chosen QUERY indices. We will do some index
#' ## arithmetics to recover the corresponding indices along the warping
#' ## curve
#'
#' hq <- (0:8)/8
#' hq <- round(hq*100) # indices in query for pi/4 .. 7/4 pi
#'
#' hw <- (alignment$index1 %in% hq) # where are they on the w. curve?
#' hi <- (1:length(alignment$index1))[hw]; # get the indices of TRUE elems
#'
#'
#' ## Beware of the reference's y axis, may be confusing
#' plot(alignment,offset=-2,type="two", lwd=3, match.col="grey50",
#' match.indices=hi,main="Match lines shown every pi/4 on query");
#'
#' legend("topright",c("Query","Reference (rt. axis)"), pch=21, col=1:6)
#'
#'
#'
#' @export
dtwPlotTwoWay <- function(d,xts=NULL,yts=NULL, offset=0,
ts.type="l",pch=21,
match.indices=NULL,
match.col="gray70", match.lty=3,
xlab="Index", ylab="Query value",
... ) {
if(is.null(xts) || is.null(yts)) {
xts <- d$query;
yts <- d$reference;
}
if(is.null(xts) || is.null(yts))
stop("Original timeseries are required");
ytso<-yts+offset;
## pad to longest
maxlen<-max(length(xts),length(ytso));
length(xts)<-maxlen;
length(ytso)<-maxlen;
## save default, for resetting...
def.par <- par(no.readonly = TRUE);
## make room for secondary axis, if any
if(offset!=0) {
par(mar=c(5,4,4,4)+.1);
}
## plot q+t
matplot(cbind(xts,ytso),
type=ts.type,pch=pch,
xlab=xlab, ylab=ylab,
axes=FALSE,
...);
## box and main axis
## compute range covering all values
box();
axis(1);
axis(2,at=pretty(xts));
## display secondary axis if offset
if(offset!=0) {
rightTicks <- pretty(yts);
axis(4,at=rightTicks+offset,labels=rightTicks);
}
## plot the matching
# par(par.match);
if(is.null(match.indices)) {
ml<-length(d$index1);
idx<-1:ml;
} else if(length(match.indices)==1) {
idx <- seq(from=1,
to=length(d$index1),
length.out=match.indices);
} else {
idx <- match.indices;
}
## x0, y0 coordinates of points from which to draw.
## x1, y1 coordinates of points to which to draw.
segments(d$index1[idx],xts[d$index1[idx]],
d$index2[idx],ytso[d$index2[idx]],
col=match.col,lty=match.lty);
par(def.par)#- reset to default
}
## ##################################################
## Global distance density plot
# for each plot, we should set: color, width, style, type
# for match lines: color, width, style
#' Plotting of dynamic time warp results: annotated warping function
#'
#' Display the query and reference time series and their warping curve,
#' arranged for visual inspection.
#'
#'
#' The query time series is plotted in the bottom panel, with indices growing
#' rightwards and values upwards. Reference is in the left panel, indices
#' growing upwards and values leftwards. The warping curve panel matches
#' indices, and therefore element (1,1) will be at the lower left, (N,M) at the
#' upper right.
#'
#' Argument `match.indices` is used to draw a visual guide to matches; if
#' a vector is given, guides are drawn for the corresponding indices in the
#' warping curve (match lines). If integer, it is used as the number of guides
#' to be plotted. The corresponding style is customized via the
#' `match.col` and `match.lty` arguments.
#'
#' If `xts` and `yts` are not supplied, they will be recovered from
#' `d`, as long as it was created with the two-argument call of
#' [dtw()] with `keep.internals=TRUE`. Only single-variate time
#' series can be plotted.
#'
#' @param d an alignment result, object of class `dtw`
#' @param xts query vector
#' @param yts reference vector
#' @param xlab label for the query axis
#' @param ylab label for the reference axis
#' @param main main title
#' @param type.align line style for warping curve plot
#' @param type.ts line style for timeseries plot
#' @param match.indices indices for which to draw a visual guide
#' @param margin outer figure margin
#' @param inner.margin inner figure margin
#' @param title.margin space on the top of figure
#' @param ... additional arguments, used for the warping curve
#' @family plot
#' @section Warning: The function is incompatible with mechanisms for arranging
#' plots on a device: `par(mfrow)`, `layout` and `split.screen`.
#' Appearance of the match lines and timeseries currently can not be
#' customized.
#' @author Toni Giorgino
#' @keywords hplot
#' @examples
#'
#'
#' ## A noisy sine wave as query
#' ## A cosine is for reference; sin and cos are offset by 25 samples
#'
#' idx<-seq(0,6.28,len=100);
#' query<-sin(idx)+runif(100)/10;
#' reference<-cos(idx)
#' dtw(query,reference,keep=TRUE)->alignment;
#'
#'
#' ## Beware of the reference's y axis, may be confusing
#' ## Equivalent to plot(alignment,type="three");
#' dtwPlotThreeWay(alignment);
#'
#'
#' ## Highlight matches of chosen QUERY indices. We will do some index
#' ## arithmetics to recover the corresponding indices along the warping
#' ## curve
#'
#' hq <- (0:8)/8
#' hq <- round(hq*100) # indices in query for pi/4 .. 7/4 pi
#'
#' hw <- (alignment$index1 %in% hq) # where are they on the w. curve?
#' hi <- (1:length(alignment$index1))[hw]; # get the indices of TRUE elems
#'
#' dtwPlotThreeWay(alignment,match.indices=hi);
#'
#' @export
dtwPlotThreeWay <- function(d,xts=NULL,yts=NULL,
type.align="l",type.ts="l",
match.indices=NULL,
margin=4, inner.margin=0.2, title.margin=1.5,
xlab="Query index",ylab="Reference index",main="Timeseries alignment",
... ) {
if(is.null(xts) || is.null(yts)) {
xts <- d$query;
yts <- d$reference;
}
# Sanity check
if(is.null(xts) || is.null(yts))
stop("Original timeseries are required");
# Coerce to plain vectors
xts <- as.matrix(xts);
yts <- as.matrix(yts);
# Verify if not multivariate
if( ncol(xts)>1 || ncol(yts)>1 )
stop("Only single-variate timeseries can be displayed. (You may want to extract a column for visualization purposes.)");
def.par <- par(no.readonly = TRUE) # save default, for resetting...
layout(matrix(c(3,1,0,2),2,2,byrow=TRUE), c(1,3), c(3,1), TRUE);
imar<-inner.margin;
bmar<-margin;
lmar<-margin;
tmar<-margin+title.margin;
rmar<-margin;
mlab=margin/2;
mtex=margin/6;
nn<-length(xts);
mm<-length(yts);
# Plot the warping function
par(mar=c(imar,imar,tmar,rmar));
# todo: plot over segments
plot(d$index1,d$index2,type=type.align,
xlim=c(1,nn),ylim=c(1,mm),
ax=FALSE,main=main, ...
); # fake a diagonal, to set the axes
# vertical match segments
# 1 value: plot total of N elements
if(length(match.indices)==1) {
match.indices <- seq(from=1,
to=length(d$index1),
length.out=match.indices);
}
# vector: use specified indices
if(! is.null(match.indices) ) { # vertical match segments
idx <- match.indices;
segments(d$index1[idx],0,
d$index1[idx],d$index2[idx],
col="grey60",lty=3);
# horz.
segments(0,d$index2[idx],
d$index1[idx],d$index2[idx],
col="grey60",lty=3);
}
box();
# axis are 1- bot; 2- left; 3- top; 4- right
# Plot query (horizontal, bottom)
par(mar=c(bmar,imar,imar,rmar));
plot(xts ~ c(1:nn), type=type.ts,
xlab=xlab ,mgp=c(mlab,mtex,0) ,ax=FALSE,
);
axis(1);
axis(2);
box();
# Plot reference (vertical, left)
par(mar=c(imar,lmar,tmar,imar));
# reverse the horiz. axis so that rotation is more natural
plot(c(1:mm) ~ yts, xlim=rev(range(yts)), type=type.ts,
ylab=ylab, mgp=c(mlab,mtex,0) , ax=FALSE,
);
axis(3);
axis(2);
box();
par(def.par)#- reset to default
}
#' Display the cumulative cost density with the warping path overimposed
#'
#' The plot is based on the cumulative cost matrix. It displays the optimal alignment
#' as a "ridge" in the global cost landscape.
#'
#' The alignment must have been
#' constructed with the `keep.internals=TRUE` parameter set.
#'
#' If `normalize` is `TRUE`, the *average* cost per step is
#' plotted instead of the cumulative one. Step averaging depends on the
#' [stepPattern()] used.
#'
#' @param d an alignment result, object of class `dtw`
#' @param normalize show per-step average cost instead of cumulative cost
#' @param xlab label for the query axis
#' @param ylab label for the reference axis
#' @param ... additional parameters forwarded to plotting functions
#' @family plot
#' @examples
#'
#' ## A study of the "Itakura" parallelogram
#' ##
#' ## A widely held misconception is that the "Itakura parallelogram" (as
#' ## described in the original article) is a global constraint. Instead,
#' ## it arises from local slope restrictions. Anyway, an "itakuraWindow",
#' ## is provided in this package. A comparison between the two follows.
#'
#' ## The local constraint: three sides of the parallelogram are seen
#' idx<-seq(0,6.28,len=100);
#' query<-sin(idx)+runif(100)/10;
#' reference<-cos(idx)
#'
#' ita <- dtw(query,reference,keep=TRUE,step=typeIIIc)
#' dtwPlotDensity(ita, main="Slope-limited asymmetric step (Itakura)")
#'
#' ## Symmetric step with global parallelogram-shaped constraint. Note how
#' ## long (>2 steps) horizontal stretches are allowed within the window.
#'
#' dtw(query,reference,keep=TRUE,window=itakuraWindow)->ita;
#' dtwPlotDensity(ita,
#' main="Symmetric step with Itakura parallelogram window")
#'
#' @export
dtwPlotDensity <- function(d, normalize=FALSE,
xlab="Query index", ylab="Reference index", ...) {
cm<-d$costMatrix;
if(is.null(cm))
stop("dtwPlotDensity requires dtw internals (set keep.internals=TRUE on dtw() call)");
## We can safely modify cm locally
if(normalize) {
norm <- attr(d$stepPattern,"norm");
if(is.na(norm))
stop("No normalization known for step pattern used");
if(norm=="N") {
cm <- cm / row(cm);
} else if(norm=="N+M") {
cm <- cm / (row(cm)+col(cm));
} else if(norm=="M") {
cm <- cm / col(cm);
}
}
xd<-dim(cm)[1];
yd<-dim(cm)[2];
image(cm,col=grDevices::terrain.colors(100),x=1:xd,y=1:yd,
xlab=xlab,ylab=ylab, ...);
contour(cm,x=1:xd,y=1:yd,add=TRUE);
lines(d$index1,d$index2,col="blue",lwd=2);
}
|
library(peekds)
library(readxl)
library(tidyverse)
library(here)
source(here("helper/pod.R"))
lab_dir = "pilot_data/pilot_data_LMU"
# parsing errors for adults
p <- bind_rows(readxl::read_xlsx(here(lab_dir, "raw_data/LMU_Munich_participantsheet_children.xlsx")))
#readxl::read_xlsx(here(lab_dir, "raw_data/LMU_Munich_participantsheet_adults.xlsx"))
d = bind_rows(read_tsv(here(lab_dir, "raw_data/LMU_Munich_rawdata_children.tsv")))
#read_tsv(here(lab_dir, "raw_data/LMU_Munich_rawdata_adults.tsv"))
# datasets
# dataset_id, monitor_size_x, monitor_size_y, sample_rate, tracker, lab_dataset_id
datasets <- tibble(dataset_id = 4,
monitor_size_x = 1280,
monitor_size_y = 1024,
sample_rate = 60,
tracker = "tobii",
lab_dataset_id = "lmu_babylab")
#peekds::validate_table(df_table = datasets,
# table_type = "datasets")
write_csv(datasets, here(lab_dir, "processed_data/datasets.csv") )
# subjects
# subject_id, age, sex, lab_subject_id
subjects <- p %>%
select(age = age_days,
sex = participant_gender,
lab_subject_id = subid,
session_error) %>%
mutate(subject_id = 0:(nrow(p) -1 ),
error = session_error == "error",
dataset_id = 4) %>%
select(-session_error)
#peekds::validate_table(df_table = subjects,
# table_type = "subjects")
write_csv(subjects, here(lab_dir, "processed_data/subjects.csv") )
# aoi_regions
# aoi_region_id, l_x_max, l_x_min, l_y_max, l_y_min, r_x_max, r_x_min, r_y_max,
# r_y_min
source(here("metadata/generate_AOIs.R"))
aoi_regions = generate_aoi_regions(screen_width = datasets$monitor_size_x,
screen_height = datasets$monitor_size_y,
video_width = 1280, # from data #TODO: how do we get this from data?
video_height = 960
)
#peekds::validate_table(df_table = aoi_regions,
# table_type = "aoi_regions")
write_csv(aoi_regions, here(lab_dir, "processed_data/aoi_regions.csv"))
# trials
# trial_id, aoi_region, dataset, lab_trial_id, distractor_image, distractor_label,
# full_phrase, point_of_disambiguation, target_image, target_label, target_side
# get the trial_num based on timestamp, for each subject
# assign trial_id based on subject/MediaName combo
trials <- filter(d, grepl("FAM", d$MediaName),
is.na(EyeTrackerTimestamp) == F) %>%
group_by(ParticipantName, MediaName) %>%
summarise(firsttime = min(EyeTrackerTimestamp)) %>%
rename(lab_trial_id = MediaName,
lab_subject_id = ParticipantName) %>%
mutate(trial_num = rank(firsttime),
condition = substr(lab_trial_id, 5, 6),
experiment_num = "pilot_1a",
aoi_region_id = 0,
dataset_id = 4,
distractor_image = "distractor",
distractor_label = "distractor",
full_phrase = NA,
point_of_disambiguation = pod_pilot_1a,
target_image = "target",
target_label = "target",
target_side = ifelse(str_sub(condition, start = 2, end = 2) == "L",
"left", "right"),
distractor_id = 0,
target_id = 0) %>%
ungroup() %>%
mutate(trial_id = 0:(n()-1)) %>%
select(-firsttime)
# TODO: this fails because it is looking for aoi_region and not aoi_region_id
#peekds::validate_table(df_table = trials,
# table_type = "trials")
write_csv(trials, here(lab_dir, "processed_data/trials.csv"))
# from https://www.tobiipro.com/siteassets/tobii-pro/user-manuals/tobii-pro-studio-user-manual.pdf
# we want ADCSpx coordinates - those are display coordinates
# note tobii gives upper-left indexed coordinates
xy_data <- tibble(lab_subject_id = d$ParticipantName,
x = d$`GazePointX (ADCSpx)`,
y = d$`GazePointY (ADCSpx)`,
t = (d$EyeTrackerTimestamp - d$EyeTrackerTimestamp[1])/1000,
lab_trial_id = d$MediaName) %>%
filter(str_detect(lab_trial_id, "FAM"),
!is.na(t),
!is.na(lab_trial_id)) %>%
mutate(xy_data_id = 0:(n() - 1)) %>%
left_join(trials) %>%
left_join(subjects) %>%
select(xy_data_id, subject_id, trial_id, x, y, t, point_of_disambiguation) %>%
center_time_on_pod() %>%
xy_trim(datasets)
#peekds::validate_table(df_table = xy_data,
# table_type = "xy_data")
write_csv(xy_data, here(lab_dir, "processed_data/xy_data.csv"))
# aoi_data
# aoi_data_id, aoi, subject, t, trial
aoi_data <- generate_aoi_small(here(lab_dir, "processed_data/"))
#peekds::validate_table(df_table = aoi_data,
# table_type = "aoi_data")
write_csv(aoi_data, here(lab_dir, "processed_data/aoi_data.csv"))
| /pilot_data/pilot_data_LMU/import_scripts/import.R | permissive | GalRaz/mb2-analysis | R | false | false | 4,889 | r | library(peekds)
library(readxl)
library(tidyverse)
library(here)
source(here("helper/pod.R"))
lab_dir = "pilot_data/pilot_data_LMU"
# parsing errors for adults
p <- bind_rows(readxl::read_xlsx(here(lab_dir, "raw_data/LMU_Munich_participantsheet_children.xlsx")))
#readxl::read_xlsx(here(lab_dir, "raw_data/LMU_Munich_participantsheet_adults.xlsx"))
d = bind_rows(read_tsv(here(lab_dir, "raw_data/LMU_Munich_rawdata_children.tsv")))
#read_tsv(here(lab_dir, "raw_data/LMU_Munich_rawdata_adults.tsv"))
# datasets
# dataset_id, monitor_size_x, monitor_size_y, sample_rate, tracker, lab_dataset_id
datasets <- tibble(dataset_id = 4,
monitor_size_x = 1280,
monitor_size_y = 1024,
sample_rate = 60,
tracker = "tobii",
lab_dataset_id = "lmu_babylab")
#peekds::validate_table(df_table = datasets,
# table_type = "datasets")
write_csv(datasets, here(lab_dir, "processed_data/datasets.csv") )
# subjects
# subject_id, age, sex, lab_subject_id
subjects <- p %>%
select(age = age_days,
sex = participant_gender,
lab_subject_id = subid,
session_error) %>%
mutate(subject_id = 0:(nrow(p) -1 ),
error = session_error == "error",
dataset_id = 4) %>%
select(-session_error)
#peekds::validate_table(df_table = subjects,
# table_type = "subjects")
write_csv(subjects, here(lab_dir, "processed_data/subjects.csv") )
# aoi_regions
# aoi_region_id, l_x_max, l_x_min, l_y_max, l_y_min, r_x_max, r_x_min, r_y_max,
# r_y_min
source(here("metadata/generate_AOIs.R"))
aoi_regions = generate_aoi_regions(screen_width = datasets$monitor_size_x,
screen_height = datasets$monitor_size_y,
video_width = 1280, # from data #TODO: how do we get this from data?
video_height = 960
)
#peekds::validate_table(df_table = aoi_regions,
# table_type = "aoi_regions")
write_csv(aoi_regions, here(lab_dir, "processed_data/aoi_regions.csv"))
# trials
# trial_id, aoi_region, dataset, lab_trial_id, distractor_image, distractor_label,
# full_phrase, point_of_disambiguation, target_image, target_label, target_side
# get the trial_num based on timestamp, for each subject
# assign trial_id based on subject/MediaName combo
trials <- filter(d, grepl("FAM", d$MediaName),
is.na(EyeTrackerTimestamp) == F) %>%
group_by(ParticipantName, MediaName) %>%
summarise(firsttime = min(EyeTrackerTimestamp)) %>%
rename(lab_trial_id = MediaName,
lab_subject_id = ParticipantName) %>%
mutate(trial_num = rank(firsttime),
condition = substr(lab_trial_id, 5, 6),
experiment_num = "pilot_1a",
aoi_region_id = 0,
dataset_id = 4,
distractor_image = "distractor",
distractor_label = "distractor",
full_phrase = NA,
point_of_disambiguation = pod_pilot_1a,
target_image = "target",
target_label = "target",
target_side = ifelse(str_sub(condition, start = 2, end = 2) == "L",
"left", "right"),
distractor_id = 0,
target_id = 0) %>%
ungroup() %>%
mutate(trial_id = 0:(n()-1)) %>%
select(-firsttime)
# TODO: this fails because it is looking for aoi_region and not aoi_region_id
#peekds::validate_table(df_table = trials,
# table_type = "trials")
write_csv(trials, here(lab_dir, "processed_data/trials.csv"))
# from https://www.tobiipro.com/siteassets/tobii-pro/user-manuals/tobii-pro-studio-user-manual.pdf
# we want ADCSpx coordinates - those are display coordinates
# note tobii gives upper-left indexed coordinates
xy_data <- tibble(lab_subject_id = d$ParticipantName,
x = d$`GazePointX (ADCSpx)`,
y = d$`GazePointY (ADCSpx)`,
t = (d$EyeTrackerTimestamp - d$EyeTrackerTimestamp[1])/1000,
lab_trial_id = d$MediaName) %>%
filter(str_detect(lab_trial_id, "FAM"),
!is.na(t),
!is.na(lab_trial_id)) %>%
mutate(xy_data_id = 0:(n() - 1)) %>%
left_join(trials) %>%
left_join(subjects) %>%
select(xy_data_id, subject_id, trial_id, x, y, t, point_of_disambiguation) %>%
center_time_on_pod() %>%
xy_trim(datasets)
#peekds::validate_table(df_table = xy_data,
# table_type = "xy_data")
write_csv(xy_data, here(lab_dir, "processed_data/xy_data.csv"))
# aoi_data
# aoi_data_id, aoi, subject, t, trial
aoi_data <- generate_aoi_small(here(lab_dir, "processed_data/"))
#peekds::validate_table(df_table = aoi_data,
# table_type = "aoi_data")
write_csv(aoi_data, here(lab_dir, "processed_data/aoi_data.csv"))
|
#' @title Get model parameters from htest-objects
#' @name get_parameters.htest
#'
#' @description Returns the parameters from a hypothesis test.
#'
#' @param ... Currently not used.
#' @inheritParams find_parameters
#'
#' @return A data frame with two columns: the parameter names and the related
#' point estimates.
#'
#' @examples
#' get_parameters(t.test(1:10, y = c(7:20)))
#' @export
get_parameters.htest <- function(x, ...) {
m_info <- model_info(x)
if (m_info$is_correlation) {
out <- .extract_htest_correlation(x)
} else if (m_info$is_levenetest) {
out <- .extract_htest_levenetest(x)
} else if (m_info$is_ttest) {
out <- .extract_htest_ttest(x)
} else if (m_info$is_ranktest) {
out <- .extract_htest_ranktest(x)
} else if (m_info$is_onewaytest) {
out <- .extract_htest_oneway(x)
} else if (m_info$is_chi2test) {
out <- .extract_htest_chi2(x)
} else if (m_info$is_proptest) {
out <- .extract_htest_prop(x)
} else if (m_info$is_binomtest) {
out <- .extract_htest_binom(x)
} else {
format_error("`get_parameters()` not implemented for such hypothesis tests yet.")
}
row.names(out) <- NULL
out
}
# extract htest correlation ----------------------
.extract_htest_correlation <- function(model) {
out <- data.frame(
Parameter = model$data.name,
stringsAsFactors = FALSE
)
if (model$method == "Pearson's Chi-squared test") {
out$Estimate <- model$statistic
} else {
out$Estimate <- model$estimate
}
out
}
# extract htest ranktest ----------------------
.extract_htest_ranktest <- function(model) {
out <- data.frame(
Parameter = model$data.name,
stringsAsFactors = FALSE
)
if (grepl("Wilcoxon", model$method, fixed = TRUE)) {
out$Estimate <- model$statistic
} else if (grepl("Kruskal-Wallis", model$method, fixed = TRUE)) {
out$Estimate <- model$statistic
}
out
}
# extract htest leveneTest ----------------------
.extract_htest_levenetest <- function(model) {
data.frame(
Parameter = "Parameter",
Estimate = model$`F value`[1],
stringsAsFactors = FALSE
)
}
# extract htest ttest ----------------------
.extract_htest_ttest <- function(model, standardized_d = NULL, hedges_g = NULL) {
out <- data.frame(
Parameter = model$data.name,
stringsAsFactors = FALSE
)
if (length(model$estimate) == 1) {
out$Estimate <- model$estimate
} else {
out$Estimate <- model$estimate[1] - model$estimate[2]
}
out
}
# extract htest oneway ----------------------
.extract_htest_oneway <- function(model) {
NULL
}
# extract htest chi2 ----------------------
.extract_htest_chi2 <- function(model) {
out <- data.frame(
Parameter = model$data.name,
stringsAsFactors = FALSE
)
if (!is.null(model$estimate) && identical(names(model$estimate), "odds ratio")) {
out$Estimate <- model$estimate
} else {
out$Estimate <- model$statistic
}
out
}
# extract htest prop ----------------------
.extract_htest_prop <- function(model) {
out <- data.frame(
Parameter = "probability",
Estimate = model$estimate,
stringsAsFactors = FALSE
)
}
# extract htest binom ----------------------
.extract_htest_binom <- function(model) {
out <- data.frame(
Parameter = "probability",
Estimate = model$estimate,
stringsAsFactors = FALSE
)
out
}
| /R/get_parameters_htest.R | no_license | cran/insight | R | false | false | 3,523 | r | #' @title Get model parameters from htest-objects
#' @name get_parameters.htest
#'
#' @description Returns the parameters from a hypothesis test.
#'
#' @param ... Currently not used.
#' @inheritParams find_parameters
#'
#' @return A data frame with two columns: the parameter names and the related
#' point estimates.
#'
#' @examples
#' get_parameters(t.test(1:10, y = c(7:20)))
#' @export
get_parameters.htest <- function(x, ...) {
m_info <- model_info(x)
if (m_info$is_correlation) {
out <- .extract_htest_correlation(x)
} else if (m_info$is_levenetest) {
out <- .extract_htest_levenetest(x)
} else if (m_info$is_ttest) {
out <- .extract_htest_ttest(x)
} else if (m_info$is_ranktest) {
out <- .extract_htest_ranktest(x)
} else if (m_info$is_onewaytest) {
out <- .extract_htest_oneway(x)
} else if (m_info$is_chi2test) {
out <- .extract_htest_chi2(x)
} else if (m_info$is_proptest) {
out <- .extract_htest_prop(x)
} else if (m_info$is_binomtest) {
out <- .extract_htest_binom(x)
} else {
format_error("`get_parameters()` not implemented for such hypothesis tests yet.")
}
row.names(out) <- NULL
out
}
# extract htest correlation ----------------------
.extract_htest_correlation <- function(model) {
out <- data.frame(
Parameter = model$data.name,
stringsAsFactors = FALSE
)
if (model$method == "Pearson's Chi-squared test") {
out$Estimate <- model$statistic
} else {
out$Estimate <- model$estimate
}
out
}
# extract htest ranktest ----------------------
.extract_htest_ranktest <- function(model) {
out <- data.frame(
Parameter = model$data.name,
stringsAsFactors = FALSE
)
if (grepl("Wilcoxon", model$method, fixed = TRUE)) {
out$Estimate <- model$statistic
} else if (grepl("Kruskal-Wallis", model$method, fixed = TRUE)) {
out$Estimate <- model$statistic
}
out
}
# extract htest leveneTest ----------------------
.extract_htest_levenetest <- function(model) {
data.frame(
Parameter = "Parameter",
Estimate = model$`F value`[1],
stringsAsFactors = FALSE
)
}
# extract htest ttest ----------------------
.extract_htest_ttest <- function(model, standardized_d = NULL, hedges_g = NULL) {
out <- data.frame(
Parameter = model$data.name,
stringsAsFactors = FALSE
)
if (length(model$estimate) == 1) {
out$Estimate <- model$estimate
} else {
out$Estimate <- model$estimate[1] - model$estimate[2]
}
out
}
# extract htest oneway ----------------------
.extract_htest_oneway <- function(model) {
NULL
}
# extract htest chi2 ----------------------
.extract_htest_chi2 <- function(model) {
out <- data.frame(
Parameter = model$data.name,
stringsAsFactors = FALSE
)
if (!is.null(model$estimate) && identical(names(model$estimate), "odds ratio")) {
out$Estimate <- model$estimate
} else {
out$Estimate <- model$statistic
}
out
}
# extract htest prop ----------------------
.extract_htest_prop <- function(model) {
out <- data.frame(
Parameter = "probability",
Estimate = model$estimate,
stringsAsFactors = FALSE
)
}
# extract htest binom ----------------------
.extract_htest_binom <- function(model) {
out <- data.frame(
Parameter = "probability",
Estimate = model$estimate,
stringsAsFactors = FALSE
)
out
}
|
notes:
- replace bootstrapped 95pct CIs with Bayesian credibility interval?
- we should make sure to cite the Weisleder intervention paper (since it’s among the only pieces of causal evidence on early parenting)
- ICCs for JA overall are high but very low for sub measures: We should mention that this is a reason why we might may see fewer findings on episodes
- Note that figure on JA has non-matching y labels for Experiment 2 (total vs mean)
- say in E1 that low reliability led us to follow up and refine coding procedure:
E1 ICCs might be a flag for some folks, so minimally we should think about explaining that these low reliability limits interpretability of those findings
For consistency, although we didn’t have a preregistration for e1, we used the same analyses that we used in e2.
Kat: clarify "scientific justification" in videos. Did the videos focus on language, JA?
- cite Leung2018 HomeView: https://www.sciencedirect.com/science/article/pii/S0885200618301534
Under such expert guidance, children are encouraged and motivated to engage in more advanced play, undertaking explorations that push the boundaries of what they would be able to do unaided [@Vygotsky1980].
What are digital interventions
With the widespread use of smartphones and tablets worldwide, digitally-delivered interventions could address many of the logistical barriers that have limited scaling up face-to-face delivery methods.
However, the parent and child outcomes assessed in the review (e.g.infant positive behaviors, satisfaction, emotional symptoms etc.) did not address the nature or quality of parent-infant interactions at a detailed level, for example whether the interventions lead to children paying more attention, or vocabulary changes in parents' language usages.
Although digitally-delivered activities are designed to promote learning and cognitive development, it is unclear how they might affect these dimensions of parent-child interactions.
Thus, we want to conduct this experiment to explore if and how digital scaffolding of activities affect the social and linguistic characteristics of parent-child ineractions.
The quality of parent-child interactions can be measured by both the social engagement of parents (e.g., joint attention to objects in the environment)[@Bigelow2004] and the quality of language (e.g., vocabulary diversity) [@Malvern2004].
Communication and message content
In our study, we focus on videos that provide suggested play activities to parents.
These videos constitute a short-term "guided play" intervention.
Young children spend a large portion of their waking time at play, variously manipulating objects, exploring their environment, and interacting with caregivers and peers.
Playing with objects allows them to discover hidden object properties and relations, and to build a causal understanding of how objects interact [e.g., @Schulz2007].
Meanwhile, play also gives children an opportunity to set and achieve goals (e.g., build a tower) and to practice a wide range of motor skills (e.g., stacking) that will help them navigate the world [@Singer2006].
Social play can help children learn about human relationships, both through imitation of adult behaviors and by experiencing and learning to process emotional events such as failures [@Singer2006].
Of course, young children are rarely playing in isolation: caregivers often provide encouragement and guidance while scaffolding a child's play [@Kaye1970; @Wood1976].
The quality of interactions during such guided play has been shown to influence language learning: parents' joint attention to objects that their child was focused on was positively correlated with the child's subsequent vocabulary growth [@Tomasello1986; @Carpenter1998].
Episodes of joint attention during guided play have also been found to contain more age-appropriate advanced forms of play [@Bigelow2004].
More generally, parenting practices early in childhood have been shown to play an important role in shaping the future outcomes of young children [@Hart1995; @Heckman2006].
While interventions often have trouble reaching many parents of very young children, the proliferation of mobile devices offers a good avenut for the digital delivery of parenting advice [@Breitenstein2014].
However, the efficacy of digital parenting advice has not been widely proven.
## Guided Play Scaffolds Learning
Children's early play behaviors are often assisted by more skilled and knowledgeable play partners such as their caregivers and older siblings [@Kaye1970].
Under such expert guidance, children are encouraged and motivated to engage in more advanced play, undertaking explorations that push the boundaries of what they would be able to do unaided [@Vygotsky1980].
These tutorial interactions have been shown to be important components of child development [@Wood1976].
Thus, with the knowledge of both play and tutorial interactions, guided play, which consists of both active and enjoyable activities as well as close guidance of adults [@Hirsh2008] has drawn researchers' interest.
A study of preschoolers showed that guided play scaffolds the environment while still allowing children to maintain a large degree of control,
and it outperforms direct-instruction approaches in encouraging a variety of positive academic outcomes [@Weisberg2013].
Another study found that guided play could facilitate children's vocabulary and comprehensive language development and subsequent literacy skills [@Massey2013].
## Improving Parenting Practices and Language Use
Thus, including guided play in parenting practices from an early age may boost children's language and educational outcomes.
A recent home-visit parenting practices intervention targeting children of low socioeconomic status found that parents in the intervention group gained knowledge of language development, and that this effect sustained four months after the intervention [@Suskind2015].
However, although a number of interactive measures increased during the experiment, including the number of word tokens, conversational turns, and child vocalizations, these increases did not sustain after the intervention.
That changes did not sustain could be due to the intervention itself,
or could merely be that the methods of home-visiting is not sustainable enough for parents to easily and constantly get parenting advice.
Thus, new methods of delivering parenting advice should be considered.
## Effectiveness of Digital Delivery
But does providing such information change parents' behavior towards young children?
And is this advice effective in producing change along the desired dimension?
An alternative might be that asking parents to focus on a specific activity that may be novel to them may tax their cognitive resources, resulting in decreases in the quality of their child-directed speech, or in less flexible, lower-quality social interactions.
Exp 1 Discussion
Demographics and EPAQ do not interact with condition, although there was a marginal effect of RR score on lexical diversity (lower diversity for higher RR scores), and marginal effects of parent education on word types and tokens (higher education yielding more types and tokens).
Discussion
Parents in the control conditions received either no video (Experiment 1) or a video of a recent finding in developmental psychology (Experiment 2), and then played with their child for three minutes using the same sets of toys given to those in the experimental conditions.
| /paper/extra_text.R | no_license | mcfrank/parenting_obs | R | false | false | 7,531 | r | notes:
- replace bootstrapped 95pct CIs with Bayesian credibility interval?
- we should make sure to cite the Weisleder intervention paper (since it’s among the only pieces of causal evidence on early parenting)
- ICCs for JA overall are high but very low for sub measures: We should mention that this is a reason why we might may see fewer findings on episodes
- Note that figure on JA has non-matching y labels for Experiment 2 (total vs mean)
- say in E1 that low reliability led us to follow up and refine coding procedure:
E1 ICCs might be a flag for some folks, so minimally we should think about explaining that these low reliability limits interpretability of those findings
For consistency, although we didn’t have a preregistration for e1, we used the same analyses that we used in e2.
Kat: clarify "scientific justification" in videos. Did the videos focus on language, JA?
- cite Leung2018 HomeView: https://www.sciencedirect.com/science/article/pii/S0885200618301534
Under such expert guidance, children are encouraged and motivated to engage in more advanced play, undertaking explorations that push the boundaries of what they would be able to do unaided [@Vygotsky1980].
What are digital interventions
With the widespread use of smartphones and tablets worldwide, digitally-delivered interventions could address many of the logistical barriers that have limited scaling up face-to-face delivery methods.
However, the parent and child outcomes assessed in the review (e.g.infant positive behaviors, satisfaction, emotional symptoms etc.) did not address the nature or quality of parent-infant interactions at a detailed level, for example whether the interventions lead to children paying more attention, or vocabulary changes in parents' language usages.
Although digitally-delivered activities are designed to promote learning and cognitive development, it is unclear how they might affect these dimensions of parent-child interactions.
Thus, we want to conduct this experiment to explore if and how digital scaffolding of activities affect the social and linguistic characteristics of parent-child ineractions.
The quality of parent-child interactions can be measured by both the social engagement of parents (e.g., joint attention to objects in the environment)[@Bigelow2004] and the quality of language (e.g., vocabulary diversity) [@Malvern2004].
Communication and message content
In our study, we focus on videos that provide suggested play activities to parents.
These videos constitute a short-term "guided play" intervention.
Young children spend a large portion of their waking time at play, variously manipulating objects, exploring their environment, and interacting with caregivers and peers.
Playing with objects allows them to discover hidden object properties and relations, and to build a causal understanding of how objects interact [e.g., @Schulz2007].
Meanwhile, play also gives children an opportunity to set and achieve goals (e.g., build a tower) and to practice a wide range of motor skills (e.g., stacking) that will help them navigate the world [@Singer2006].
Social play can help children learn about human relationships, both through imitation of adult behaviors and by experiencing and learning to process emotional events such as failures [@Singer2006].
Of course, young children are rarely playing in isolation: caregivers often provide encouragement and guidance while scaffolding a child's play [@Kaye1970; @Wood1976].
The quality of interactions during such guided play has been shown to influence language learning: parents' joint attention to objects that their child was focused on was positively correlated with the child's subsequent vocabulary growth [@Tomasello1986; @Carpenter1998].
Episodes of joint attention during guided play have also been found to contain more age-appropriate advanced forms of play [@Bigelow2004].
More generally, parenting practices early in childhood have been shown to play an important role in shaping the future outcomes of young children [@Hart1995; @Heckman2006].
While interventions often have trouble reaching many parents of very young children, the proliferation of mobile devices offers a good avenut for the digital delivery of parenting advice [@Breitenstein2014].
However, the efficacy of digital parenting advice has not been widely proven.
## Guided Play Scaffolds Learning
Children's early play behaviors are often assisted by more skilled and knowledgeable play partners such as their caregivers and older siblings [@Kaye1970].
Under such expert guidance, children are encouraged and motivated to engage in more advanced play, undertaking explorations that push the boundaries of what they would be able to do unaided [@Vygotsky1980].
These tutorial interactions have been shown to be important components of child development [@Wood1976].
Thus, with the knowledge of both play and tutorial interactions, guided play, which consists of both active and enjoyable activities as well as close guidance of adults [@Hirsh2008] has drawn researchers' interest.
A study of preschoolers showed that guided play scaffolds the environment while still allowing children to maintain a large degree of control,
and it outperforms direct-instruction approaches in encouraging a variety of positive academic outcomes [@Weisberg2013].
Another study found that guided play could facilitate children's vocabulary and comprehensive language development and subsequent literacy skills [@Massey2013].
## Improving Parenting Practices and Language Use
Thus, including guided play in parenting practices from an early age may boost children's language and educational outcomes.
A recent home-visit parenting practices intervention targeting children of low socioeconomic status found that parents in the intervention group gained knowledge of language development, and that this effect sustained four months after the intervention [@Suskind2015].
However, although a number of interactive measures increased during the experiment, including the number of word tokens, conversational turns, and child vocalizations, these increases did not sustain after the intervention.
That changes did not sustain could be due to the intervention itself,
or could merely be that the methods of home-visiting is not sustainable enough for parents to easily and constantly get parenting advice.
Thus, new methods of delivering parenting advice should be considered.
## Effectiveness of Digital Delivery
But does providing such information change parents' behavior towards young children?
And is this advice effective in producing change along the desired dimension?
An alternative might be that asking parents to focus on a specific activity that may be novel to them may tax their cognitive resources, resulting in decreases in the quality of their child-directed speech, or in less flexible, lower-quality social interactions.
Exp 1 Discussion
Demographics and EPAQ do not interact with condition, although there was a marginal effect of RR score on lexical diversity (lower diversity for higher RR scores), and marginal effects of parent education on word types and tokens (higher education yielding more types and tokens).
Discussion
Parents in the control conditions received either no video (Experiment 1) or a video of a recent finding in developmental psychology (Experiment 2), and then played with their child for three minutes using the same sets of toys given to those in the experimental conditions.
|
library(randomForest)
library(ggplot2)
library(ROCit)
library(reshape2)
library(caret)
library(e1071)
library(mlbench)
library(rpart)
library(rpart.plot)
################################Random Forest########################################################
# Regression problem
data("BostonHousing")
data <- BostonHousing
###
result1 <- randomforest_build(data, m = .75, response_name = "medv", indicator = "regression", num_trees = 100,
importance_measure = "impurity")
result1$VarImportancePlot
result1$RMSE
###
result2 <- randomforest_build(data, m = .75, response_name = "medv", indicator = "regression", num_trees = 100,
importance_measure = "accuracy")
result2$VarImportancePlot
result2$RMSE
###
result3 <- num_trees_tune(data, m = .75, response_name = "medv", indicator = "regression", max_trees = 75, min_trees = 20,
importance_measure = "accuracy")
result3
# Classification problem
data("PimaIndiansDiabetes")
data <- PimaIndiansDiabetes
###
result4 <- randomforest_build(data, m = .75, response_name = "diabetes", indicator = "classification", num_trees = 100,
importance_measure = "impurity", positive_class_name = "pos")
result4$VarImportancePlot
result4$ROCPlot
result4$KSPlot
confusion_matrix_plot(cm_matrix = result4$ConfusionMatrix, positive = "Disease", negative = "Health")
result4$Accuracy
###
result5 <- randomforest_build(data, m = .75, response_name = "diabetes", indicator = "classification", num_trees = 100,
importance_measure = "accuracy", positive_class_name = "pos")
result5$VarImportancePlot
result5$ROCPlot
result5$KSPlot
confusion_matrix_plot(cm_matrix = result5$ConfusionMatrix, positive = "Disease", negative = "Health")
result5$Accuracy
###
result6 <- num_trees_tune(data, m = .75, response_name = "diabetes", indicator = "classification", max_trees = 75,
min_trees = 20, importance_measure = "accuracy", positive_class_name = "pos")
result6
########################################Decision Tree###############################################
# Regression problem
data("BostonHousing")
data <- BostonHousing
###
result7 <- decision_tree(data, m = .75, response_name = "medv", indicator = "regression")
result7$RMSE
Tree_plot(result7$PlotObject)
# Classification problem
data("PimaIndiansDiabetes")
data <- PimaIndiansDiabetes
###
result8 <- decision_tree(data, m = .75, response_name = "diabetes", indicator = "classification",
positive_class_name = "pos", maxdepth = 4)
result8$ROCPlot
result8$KSPlot
confusion_matrix_plot(cm_matrix = result8$ConfusionMatrix, positive = "Disease", negative = "Health")
result8$Accuracy
Tree_plot(result8$PlotObject)
| /R_functions/Example_of_using_package_App.R | no_license | linchen-deng/TreeMasterShiny | R | false | false | 2,804 | r | library(randomForest)
library(ggplot2)
library(ROCit)
library(reshape2)
library(caret)
library(e1071)
library(mlbench)
library(rpart)
library(rpart.plot)
################################Random Forest########################################################
# Regression problem
data("BostonHousing")
data <- BostonHousing
###
result1 <- randomforest_build(data, m = .75, response_name = "medv", indicator = "regression", num_trees = 100,
importance_measure = "impurity")
result1$VarImportancePlot
result1$RMSE
###
result2 <- randomforest_build(data, m = .75, response_name = "medv", indicator = "regression", num_trees = 100,
importance_measure = "accuracy")
result2$VarImportancePlot
result2$RMSE
###
result3 <- num_trees_tune(data, m = .75, response_name = "medv", indicator = "regression", max_trees = 75, min_trees = 20,
importance_measure = "accuracy")
result3
# Classification problem
data("PimaIndiansDiabetes")
data <- PimaIndiansDiabetes
###
result4 <- randomforest_build(data, m = .75, response_name = "diabetes", indicator = "classification", num_trees = 100,
importance_measure = "impurity", positive_class_name = "pos")
result4$VarImportancePlot
result4$ROCPlot
result4$KSPlot
confusion_matrix_plot(cm_matrix = result4$ConfusionMatrix, positive = "Disease", negative = "Health")
result4$Accuracy
###
result5 <- randomforest_build(data, m = .75, response_name = "diabetes", indicator = "classification", num_trees = 100,
importance_measure = "accuracy", positive_class_name = "pos")
result5$VarImportancePlot
result5$ROCPlot
result5$KSPlot
confusion_matrix_plot(cm_matrix = result5$ConfusionMatrix, positive = "Disease", negative = "Health")
result5$Accuracy
###
result6 <- num_trees_tune(data, m = .75, response_name = "diabetes", indicator = "classification", max_trees = 75,
min_trees = 20, importance_measure = "accuracy", positive_class_name = "pos")
result6
########################################Decision Tree###############################################
# Regression problem
data("BostonHousing")
data <- BostonHousing
###
result7 <- decision_tree(data, m = .75, response_name = "medv", indicator = "regression")
result7$RMSE
Tree_plot(result7$PlotObject)
# Classification problem
data("PimaIndiansDiabetes")
data <- PimaIndiansDiabetes
###
result8 <- decision_tree(data, m = .75, response_name = "diabetes", indicator = "classification",
positive_class_name = "pos", maxdepth = 4)
result8$ROCPlot
result8$KSPlot
confusion_matrix_plot(cm_matrix = result8$ConfusionMatrix, positive = "Disease", negative = "Health")
result8$Accuracy
Tree_plot(result8$PlotObject)
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641268162e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) | /epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615926953-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 1,101 | r | testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641268162e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result) |
#' get_top_coord_urls
#'
#' A function to get the top n URLs shared in a coordinated way
#'
#' @param output the output list resulting from the function get_coord_shares
#' @param order_by name of the column used to order the top news. Default to "engagement". Other possible values are: "statistics.actual.likeCount", "statistics.actual.shareCount", "statistics.actual.commentCount", "statistics.actual.loveCount", "statistics.actual.wowCount", "statistics.actual.hahaCount", "statistics.actual.sadCount","statistics.actual.angryCount"
#' @param component return the top URLs by network component (TRUE, default) or just the top URLs (FALSE)
#' @param top number of the top URLs to be retrieved
#'
#' @return A data frame (grouped_df) containing the top URLs shared in a coordinated way by the highly coordinated entities, with a set of attributes
#'
#' @examples
#' # get the top ten URLs shared in a coordinated way by each network component, by engagement
#' df <- get_top_coord_urls(output, order_by = "engagement", component = TRUE, top=10)
#'
#' # get the top ten URLs shared in a coordinated way, by engagement
#' df <- get_top_news(output, order_by = "engagement", top=10)
#'
#' @export
get_top_coord_urls <- function(output, order_by = "engagement", component=TRUE, top=10){
require(dplyr) # 0.8.3
coord_shares_urls <- output[[1]][output[[1]]$iscoordinated==TRUE & output[[1]]$account.url %in% output[[3]]$name,]
coord_shares_urls$engagement <- apply(coord_shares_urls[,c("statistics.actual.likeCount",
"statistics.actual.shareCount",
"statistics.actual.commentCount",
"statistics.actual.loveCount",
"statistics.actual.wowCount",
"statistics.actual.hahaCount",
"statistics.actual.sadCount",
"statistics.actual.angryCount")], 1, sum)
colnames(output[[3]])[1] <- "account.url"
coord_shares_urls <- merge(coord_shares_urls, output[[3]][,c("account.url","component")], by="account.url", all.x=T)
coord_shares_urls <- coord_shares_urls[,c("account.url","date","title","description","message","link","postUrl",
"account.name","account.handle","account.subscriberCount","expanded",
"statistics.actual.likeCount", "statistics.actual.shareCount",
"statistics.actual.commentCount", "statistics.actual.loveCount",
"statistics.actual.wowCount", "statistics.actual.hahaCount",
"statistics.actual.sadCount","statistics.actual.angryCount",
"engagement","component")]
if(component==TRUE){
suppressMessages(top_urls <-
coord_shares_urls %>%
group_by(component, expanded) %>%
summarize(n_shares = n(),
eng = sum(!!sym(order_by))) %>%
top_n(top) %>%
arrange(.by_group=T))
colnames(top_urls)[4] <- order_by
urls_account_id <- unique(coord_shares_urls[, c("expanded", "account.name", "account.url")])
top_urls$account.names <- NA
top_urls$account.urls <- NA
for (i in 1:nrow(top_urls)){
for (j in 1:nrow(urls_account_id)){
if (urls_account_id$expanded[j] == top_urls$expanded[i]) {
urls_account_id_names_sub <- unique(urls_account_id$account.name[urls_account_id$expanded == urls_account_id$expanded[j]])
top_urls$account.names[i] <- paste(urls_account_id_names_sub, collapse = ", ")
}
if (urls_account_id$expanded[j] == top_urls$expanded[i]) {
urls_account_id_urls_sub <- unique(urls_account_id$account.url[urls_account_id$expanded == urls_account_id$expanded[j]])
top_urls$account.urls[i] <- paste(urls_account_id_urls_sub, collapse = ", ")
}
}
}
return(top_urls)
}
if(component==FALSE){
suppressMessages(top_urls <-
coord_shares_urls %>%
group_by(expanded) %>%
summarize(n_shares = n(),
eng = sum(!!sym(order_by))) %>%
top_n(top) %>%
arrange(desc(eng)))
colnames(top_urls)[3] <- order_by
urls_account_id <- unique(coord_shares_urls[, c("expanded", "account.name", "account.url")])
top_urls$account.names <- NA
top_urls$account.urls <- NA
for (i in 1:nrow(top_urls)){
for (j in 1:nrow(urls_account_id)){
if (urls_account_id$expanded[j] == top_urls$expanded[i]) {
urls_account_id_names_sub <- unique(urls_account_id$account.name[urls_account_id$expanded == urls_account_id$expanded[j]])
top_urls$account.names[i] <- paste(urls_account_id_names_sub, collapse = ", ")
}
if (urls_account_id$expanded[j] == top_urls$expanded[i]) {
urls_account_id_urls_sub <- unique(urls_account_id$account.url[urls_account_id$expanded == urls_account_id$expanded[j]])
top_urls$account.urls[i] <- paste(urls_account_id_urls_sub, collapse = ", ")
}
}
}
return(top_urls)
}
}
| /R/get_top_coord_urls.R | permissive | mathiasfls/CooRnet | R | false | false | 5,395 | r | #' get_top_coord_urls
#'
#' A function to get the top n URLs shared in a coordinated way
#'
#' @param output the output list resulting from the function get_coord_shares
#' @param order_by name of the column used to order the top news. Default to "engagement". Other possible values are: "statistics.actual.likeCount", "statistics.actual.shareCount", "statistics.actual.commentCount", "statistics.actual.loveCount", "statistics.actual.wowCount", "statistics.actual.hahaCount", "statistics.actual.sadCount","statistics.actual.angryCount"
#' @param component return the top URLs by network component (TRUE, default) or just the top URLs (FALSE)
#' @param top number of the top URLs to be retrieved
#'
#' @return A data frame (grouped_df) containing the top URLs shared in a coordinated way by the highly coordinated entities, with a set of attributes
#'
#' @examples
#' # get the top ten URLs shared in a coordinated way by each network component, by engagement
#' df <- get_top_coord_urls(output, order_by = "engagement", component = TRUE, top=10)
#'
#' # get the top ten URLs shared in a coordinated way, by engagement
#' df <- get_top_news(output, order_by = "engagement", top=10)
#'
#' @export
get_top_coord_urls <- function(output, order_by = "engagement", component=TRUE, top=10){
require(dplyr) # 0.8.3
coord_shares_urls <- output[[1]][output[[1]]$iscoordinated==TRUE & output[[1]]$account.url %in% output[[3]]$name,]
coord_shares_urls$engagement <- apply(coord_shares_urls[,c("statistics.actual.likeCount",
"statistics.actual.shareCount",
"statistics.actual.commentCount",
"statistics.actual.loveCount",
"statistics.actual.wowCount",
"statistics.actual.hahaCount",
"statistics.actual.sadCount",
"statistics.actual.angryCount")], 1, sum)
colnames(output[[3]])[1] <- "account.url"
coord_shares_urls <- merge(coord_shares_urls, output[[3]][,c("account.url","component")], by="account.url", all.x=T)
coord_shares_urls <- coord_shares_urls[,c("account.url","date","title","description","message","link","postUrl",
"account.name","account.handle","account.subscriberCount","expanded",
"statistics.actual.likeCount", "statistics.actual.shareCount",
"statistics.actual.commentCount", "statistics.actual.loveCount",
"statistics.actual.wowCount", "statistics.actual.hahaCount",
"statistics.actual.sadCount","statistics.actual.angryCount",
"engagement","component")]
if(component==TRUE){
suppressMessages(top_urls <-
coord_shares_urls %>%
group_by(component, expanded) %>%
summarize(n_shares = n(),
eng = sum(!!sym(order_by))) %>%
top_n(top) %>%
arrange(.by_group=T))
colnames(top_urls)[4] <- order_by
urls_account_id <- unique(coord_shares_urls[, c("expanded", "account.name", "account.url")])
top_urls$account.names <- NA
top_urls$account.urls <- NA
for (i in 1:nrow(top_urls)){
for (j in 1:nrow(urls_account_id)){
if (urls_account_id$expanded[j] == top_urls$expanded[i]) {
urls_account_id_names_sub <- unique(urls_account_id$account.name[urls_account_id$expanded == urls_account_id$expanded[j]])
top_urls$account.names[i] <- paste(urls_account_id_names_sub, collapse = ", ")
}
if (urls_account_id$expanded[j] == top_urls$expanded[i]) {
urls_account_id_urls_sub <- unique(urls_account_id$account.url[urls_account_id$expanded == urls_account_id$expanded[j]])
top_urls$account.urls[i] <- paste(urls_account_id_urls_sub, collapse = ", ")
}
}
}
return(top_urls)
}
if(component==FALSE){
suppressMessages(top_urls <-
coord_shares_urls %>%
group_by(expanded) %>%
summarize(n_shares = n(),
eng = sum(!!sym(order_by))) %>%
top_n(top) %>%
arrange(desc(eng)))
colnames(top_urls)[3] <- order_by
urls_account_id <- unique(coord_shares_urls[, c("expanded", "account.name", "account.url")])
top_urls$account.names <- NA
top_urls$account.urls <- NA
for (i in 1:nrow(top_urls)){
for (j in 1:nrow(urls_account_id)){
if (urls_account_id$expanded[j] == top_urls$expanded[i]) {
urls_account_id_names_sub <- unique(urls_account_id$account.name[urls_account_id$expanded == urls_account_id$expanded[j]])
top_urls$account.names[i] <- paste(urls_account_id_names_sub, collapse = ", ")
}
if (urls_account_id$expanded[j] == top_urls$expanded[i]) {
urls_account_id_urls_sub <- unique(urls_account_id$account.url[urls_account_id$expanded == urls_account_id$expanded[j]])
top_urls$account.urls[i] <- paste(urls_account_id_urls_sub, collapse = ", ")
}
}
}
return(top_urls)
}
}
|
structure(list(url = "https://api.twitter.com/2/tweets?tweet.fields=attachments%2Cauthor_id%2Cconversation_id%2Ccreated_at%2Centities%2Cgeo%2Cid%2Cin_reply_to_user_id%2Clang%2Cpublic_metrics%2Cpossibly_sensitive%2Creferenced_tweets%2Csource%2Ctext%2Cwithheld&user.fields=created_at%2Cdescription%2Centities%2Cid%2Clocation%2Cname%2Cpinned_tweet_id%2Cprofile_image_url%2Cprotected%2Cpublic_metrics%2Curl%2Cusername%2Cverified%2Cwithheld&expansions=author_id%2Centities.mentions.username%2Cgeo.place_id%2Cin_reply_to_user_id%2Creferenced_tweets.id%2Creferenced_tweets.id.author_id&place.fields=contained_within%2Ccountry%2Ccountry_code%2Cfull_name%2Cgeo%2Cid%2Cname%2Cplace_type&ids=1%2C2%2C1266867327079002121%2C1266866660713127936%2C1266864490446012418%2C1266860737244336129%2C1266859737615826944%2C1266859455586676736%2C1266858090143588352%2C1266857669157097473%2C1266856357954756609%2C1266855807699861506%2C1266855344086663169%2C1266854627758276608%2C1266854586188476421%2C1266854533889757187%2C1266853931247906816%2C1266853419291234312%2C1266852781526302722%2C1266852099163291650",
status_code = 200L, headers = structure(list(date = "Sun, 19 Dec 2021 20:45:17 UTC",
server = "tsa_o", `api-version` = "2.32", `content-type` = "application/json; charset=utf-8",
`cache-control` = "no-cache, no-store, max-age=0", `content-length` = "10146",
`x-access-level` = "read", `x-frame-options` = "SAMEORIGIN",
`content-encoding` = "gzip", `x-xss-protection` = "0",
`x-rate-limit-limit` = "300", `x-rate-limit-reset` = "1639947531",
`content-disposition` = "attachment; filename=json.json",
`x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "287",
`strict-transport-security` = "max-age=631138519", `x-response-time` = "458",
`x-connection-hash` = "7d54fc61481fbbf761f96e7412fcce2a506bf85407f0f6725ae81f8d24a33064"), class = c("insensitive",
"list")), all_headers = list(list(status = 200L, version = "HTTP/2",
headers = structure(list(date = "Sun, 19 Dec 2021 20:45:17 UTC",
server = "tsa_o", `api-version` = "2.32", `content-type` = "application/json; charset=utf-8",
`cache-control` = "no-cache, no-store, max-age=0",
`content-length` = "10146", `x-access-level` = "read",
`x-frame-options` = "SAMEORIGIN", `content-encoding` = "gzip",
`x-xss-protection` = "0", `x-rate-limit-limit` = "300",
`x-rate-limit-reset` = "1639947531", `content-disposition` = "attachment; filename=json.json",
`x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "287",
`strict-transport-security` = "max-age=631138519",
`x-response-time` = "458", `x-connection-hash` = "7d54fc61481fbbf761f96e7412fcce2a506bf85407f0f6725ae81f8d24a33064"), class = c("insensitive",
"list")))), cookies = structure(list(domain = c(".twitter.com",
".twitter.com", ".twitter.com", ".twitter.com"), flag = c(TRUE,
TRUE, TRUE, TRUE), path = c("/", "/", "/", "/"), secure = c(TRUE,
TRUE, TRUE, TRUE), expiration = structure(c(1702744284, 1702744284,
1702744284, 1702744284), class = c("POSIXct", "POSIXt")),
name = c("guest_id_marketing", "guest_id_ads", "personalization_id",
"guest_id"), value = c("REDACTED", "REDACTED", "REDACTED",
"REDACTED")), row.names = c(NA, -4L), class = "data.frame"),
content = charToRaw("{\"data\":[{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266867327079002121\",\"attachments\":{\"media_keys\":[\"3_1266867313988509697\"]},\"text\":\"Diese Rücksichtslosigkeit kotzt mich langsam an: Nein, es ist total uncool, seinen Müll wegzuwerfen. \\n\\nIch sagte schon zu Zeiten von #FridaysForFuture , dass es sinnvoller wäre, am FR Nachmittag mit Müllbeuteln bewaffnet die REGIONALE Natur zu säubern, um ein Gefühl zu erhalten. https://t.co/lT2YVFpXit\",\"created_at\":\"2020-05-30T23:01:22.000Z\",\"entities\":{\"hashtags\":[{\"start\":133,\"end\":150,\"tag\":\"FridaysForFuture\"}],\"urls\":[{\"start\":280,\"end\":303,\"url\":\"https://t.co/lT2YVFpXit\",\"expanded_url\":\"https://twitter.com/Mr_Twoflower/status/1266867327079002121/photo/1\",\"display_url\":\"pic.twitter.com/lT2YVFpXit\"}]},\"lang\":\"de\",\"author_id\":\"3850413016\",\"id\":\"1266867327079002121\",\"possibly_sensitive\":true,\"public_metrics\":{\"retweet_count\":0,\"reply_count\":1,\"like_count\":5,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266866660713127936\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"bremenforfuture\",\"id\":\"1080927540422815744\"},{\"start\":70,\"end\":85,\"username\":\"polizei_nrw_ge\",\"id\":\"2389272182\"}],\"hashtags\":[{\"start\":53,\"end\":62,\"tag\":\"Datteln4\"}]},\"text\":\"RT @bremenforfuture: Bei d. friedlichen Protesten um #Datteln4 hat d. @polizei_nrw_ge gerade 5 Aktivist*innen aus d. Masse gezogen, nieder…\",\"created_at\":\"2020-05-30T22:58:43.000Z\",\"lang\":\"de\",\"author_id\":\"179629368\",\"id\":\"1266866660713127936\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266704570190790656\"}],\"public_metrics\":{\"retweet_count\":153,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266864490446012418\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"bremenforfuture\",\"id\":\"1080927540422815744\"},{\"start\":70,\"end\":85,\"username\":\"polizei_nrw_ge\",\"id\":\"2389272182\"}],\"hashtags\":[{\"start\":53,\"end\":62,\"tag\":\"Datteln4\"}]},\"text\":\"RT @bremenforfuture: Bei d. friedlichen Protesten um #Datteln4 hat d. @polizei_nrw_ge gerade 5 Aktivist*innen aus d. Masse gezogen, nieder…\",\"created_at\":\"2020-05-30T22:50:06.000Z\",\"lang\":\"de\",\"author_id\":\"728209251190120449\",\"id\":\"1266864490446012418\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266704570190790656\"}],\"public_metrics\":{\"retweet_count\":153,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266860737244336129\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:35:11.000Z\",\"lang\":\"de\",\"author_id\":\"3844151483\",\"id\":\"1266860737244336129\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266859737615826944\",\"text\":\"RT @fff_2209: Das Kohleausstiegsgesetz darf so nicht verabschiedet werden. Macht auf das Thema aufmerksam und schreibt Euren Bundestagsabge…\",\"created_at\":\"2020-05-30T22:31:13.000Z\",\"lang\":\"de\",\"author_id\":\"61153698\",\"id\":\"1266859737615826944\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266817176687779846\"}],\"public_metrics\":{\"retweet_count\":33,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter Web App\",\"conversation_id\":\"1266859455586676736\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:30:05.000Z\",\"lang\":\"de\",\"author_id\":\"871879419559903232\",\"id\":\"1266859455586676736\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266858090143588352\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":12,\"username\":\"wlkutsch\",\"id\":\"2816669118\"},{\"start\":49,\"end\":57,\"username\":\"ICOS_RI\",\"id\":\"3216719140\"}]},\"text\":\"RT @wlkutsch: Gestern hatten wir ein CO2FFEE mit @ICOS_RI über die Arktis: Wissenschaft: wir müssen Emissionen schnell reduzieren. Politik:…\",\"created_at\":\"2020-05-30T22:24:40.000Z\",\"lang\":\"de\",\"author_id\":\"1180995990284853248\",\"id\":\"1266858090143588352\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266779244484575232\"}],\"public_metrics\":{\"retweet_count\":2,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266857669157097473\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:23:00.000Z\",\"lang\":\"de\",\"author_id\":\"1242872854925975552\",\"id\":\"1266857669157097473\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266856357954756609\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:17:47.000Z\",\"lang\":\"de\",\"author_id\":\"1027180824335208448\",\"id\":\"1266856357954756609\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266855807699861506\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:15:36.000Z\",\"lang\":\"de\",\"author_id\":\"817842489814118401\",\"id\":\"1266855807699861506\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266855344086663169\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:13:45.000Z\",\"lang\":\"de\",\"author_id\":\"1007979497965178880\",\"id\":\"1266855344086663169\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266854627758276608\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:10:54.000Z\",\"lang\":\"de\",\"author_id\":\"1220059857535086597\",\"id\":\"1266854627758276608\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266854586188476421\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":14,\"username\":\"FFFDresden\",\"id\":\"1081603778699296770\"},{\"start\":79,\"end\":95,\"username\":\"StudisForFuture\",\"id\":\"1121882665064501248\"},{\"start\":97,\"end\":110,\"username\":\"ADFC_Dresden\",\"id\":\"125625502\"},{\"start\":112,\"end\":124,\"username\":\"P4F_Dresden\",\"id\":\"1136652369859682305\"}]},\"text\":\"RT @FFFDresden: Neben unserer Fahrraddemo finden am Dienstag auch Aktionen von @StudisForFuture, @ADFC_Dresden, @P4F_Dresden und @BundSachs…\",\"created_at\":\"2020-05-30T22:10:45.000Z\",\"lang\":\"de\",\"author_id\":\"1180995990284853248\",\"id\":\"1266854586188476421\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266819115236364290\"}],\"public_metrics\":{\"retweet_count\":6,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266854533889757187\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:10:32.000Z\",\"lang\":\"de\",\"author_id\":\"255637301\",\"id\":\"1266854533889757187\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter Web App\",\"conversation_id\":\"1266677828801695744\",\"entities\":{\"mentions\":[{\"start\":0,\"end\":15,\"username\":\"Bastian_Atzger\",\"id\":\"107357635\"},{\"start\":16,\"end\":27,\"username\":\"LudgerWess\",\"id\":\"989716693\"}],\"hashtags\":[{\"start\":152,\"end\":169,\"tag\":\"FridaysForFuture\"}]},\"text\":\"@Bastian_Atzger @LudgerWess Wer nicht darauf angewiesen ist, Hetze zu betreiben, hätte vielleicht erkannt, daß diese Ausschnitte aus Plakaten weder von #FridaysForFuture noch aus Deutschland stammen.\",\"created_at\":\"2020-05-30T22:08:08.000Z\",\"lang\":\"de\",\"in_reply_to_user_id\":\"107357635\",\"author_id\":\"126978307\",\"id\":\"1266853931247906816\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"replied_to\",\"id\":\"1266831085582913538\"}],\"public_metrics\":{\"retweet_count\":0,\"reply_count\":1,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPad\",\"conversation_id\":\"1266853419291234312\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:06:06.000Z\",\"lang\":\"de\",\"author_id\":\"1743316638\",\"id\":\"1266853419291234312\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266852781526302722\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":11,\"username\":\"Manya_G\",\"id\":\"98816776\"}],\"hashtags\":[{\"start\":61,\"end\":78,\"tag\":\"FridaysForFuture\"}]},\"text\":\"RT @Manya_G: Super Schildermeer am Heldenplatz heute! \\uD83D\\uDCAA\\uD83C\\uDFFB\\uD83D\\uDC4F\\uD83C\\uDFFB\\uD83D\\uDC9A\\n\\n#FridaysForFuture sind kreativ wie eh und je, schön, euch wieder \\\"live\\\" zu seh…\",\"created_at\":\"2020-05-30T22:03:34.000Z\",\"lang\":\"de\",\"author_id\":\"593841338\",\"id\":\"1266852781526302722\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266436838190911497\"}],\"public_metrics\":{\"retweet_count\":121,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266852099163291650\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":10,\"username\":\"addnme\",\"id\":\"67984352\"}],\"hashtags\":[{\"start\":57,\"end\":71,\"tag\":\"Verkehrswende\"},{\"start\":72,\"end\":93,\"tag\":\"VerkehrswendeDresden\"},{\"start\":94,\"end\":102,\"tag\":\"Dresden\"},{\"start\":103,\"end\":111,\"tag\":\"Sachsen\"},{\"start\":112,\"end\":127,\"tag\":\"Fahrradprotest\"},{\"start\":128,\"end\":133,\"tag\":\"ADFC\"}]},\"text\":\"RT @addnme: Fahrradprotest für sichere Karl-Marx-Straße\\n\\n#Verkehrswende #VerkehrswendeDresden #Dresden #Sachsen #Fahrradprotest #ADFC #Frid…\",\"created_at\":\"2020-05-30T22:00:52.000Z\",\"lang\":\"de\",\"author_id\":\"1180995990284853248\",\"id\":\"1266852099163291650\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266722835252129794\"}],\"public_metrics\":{\"retweet_count\":2,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}}],\"includes\":{\"users\":[{\"location\":\"Am Liebsten am Gipfelkreuz.\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1470089012748636168/Z2uCmxwU_normal.jpg\",\"created_at\":\"2015-10-03T07:33:16.000Z\",\"pinned_tweet_id\":\"1365455686289793026\",\"verified\":false,\"username\":\"Mr_Twoflower\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/Vq9r6Da3Ba\",\"expanded_url\":\"https://zwischenmenschliches.wordpress.com/2013/02/27/wie-mein-leben-zum-ersten-mal-verandert-wurde/\",\"display_url\":\"zwischenmenschliches.wordpress.com/2013/02/27/wie…\"}]}},\"name\":\"derWandersmann\",\"protected\":false,\"description\":\"Nicht reden! Nicht denken! Nicht fragen! Einfach machen! So funktioniert das auch mit dem Küssen! ◽️ Wenn es Dich traurig macht, warum machst Du es dann?\",\"id\":\"3850413016\",\"url\":\"https://t.co/Vq9r6Da3Ba\",\"public_metrics\":{\"followers_count\":2168,\"following_count\":449,\"tweet_count\":76064,\"listed_count\":37}},{\"location\":\"Essen\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1435303060897738758/uwAgQFEz_normal.jpg\",\"created_at\":\"2010-08-17T19:18:29.000Z\",\"pinned_tweet_id\":\"1233101631107010562\",\"verified\":false,\"username\":\"MaddyLina25\",\"name\":\"Madeleine Montberg\",\"protected\":false,\"description\":\"I'm a German GTA (= Gestaltungstechnische Assistentin in Grafik- und Objektdesign/ Assistant of Graphic and Design) and an artist. she/her (Insta: maddylina25)\",\"id\":\"179629368\",\"url\":\"\",\"public_metrics\":{\"followers_count\":495,\"following_count\":65,\"tweet_count\":151074,\"listed_count\":1}},{\"location\":\"Bremen, Germany\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1245682869068017665/BpEWcCOX_normal.jpg\",\"created_at\":\"2019-01-03T20:43:03.000Z\",\"pinned_tweet_id\":\"1441386050308427778\",\"verified\":false,\"username\":\"bremenforfuture\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/KHkwU9Jan5\",\"expanded_url\":\"https://fridaysforfuture-bremen.de/\",\"display_url\":\"fridaysforfuture-bremen.de\"}]}},\"name\":\"Fridays for Future Bremen\",\"protected\":false,\"description\":\"Wir streiken für Klimagerechtigkeit!\",\"id\":\"1080927540422815744\",\"url\":\"https://t.co/KHkwU9Jan5\",\"public_metrics\":{\"followers_count\":2970,\"following_count\":130,\"tweet_count\":606,\"listed_count\":41}},{\"location\":\"Gelsenkirchen\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1421028257558843394/EK1_tRYc_normal.jpg\",\"created_at\":\"2014-03-14T15:12:47.000Z\",\"verified\":true,\"username\":\"polizei_nrw_ge\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/OzAD0DAAhq\",\"expanded_url\":\"http://gelsenkirchen.polizei.nrw\",\"display_url\":\"gelsenkirchen.polizei.nrw\"}]},\"description\":{\"urls\":[{\"start\":95,\"end\":118,\"url\":\"https://t.co/vigzJs5xAR\",\"expanded_url\":\"http://url.nrw/43N\",\"display_url\":\"url.nrw/43N\"}]}},\"name\":\"Polizei NRW GE\",\"protected\":false,\"description\":\"Polizei Gelsenkirchen. Rathausplatz 4, 45877 Gelsenkirchen, Tel.: 0209 / 365 - 0. Datenschutz: https://t.co/vigzJs5xAR\",\"id\":\"2389272182\",\"url\":\"https://t.co/OzAD0DAAhq\",\"public_metrics\":{\"followers_count\":8796,\"following_count\":135,\"tweet_count\":4622,\"listed_count\":138}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/862004484540682240/HbuGRQYo_normal.jpg\",\"created_at\":\"2016-05-05T13:06:29.000Z\",\"pinned_tweet_id\":\"1267156926208540677\",\"verified\":false,\"username\":\"SiggiSGE\",\"name\":\"Siegfried\",\"protected\":false,\"description\":\"Jeder verdient es mit Respekt behandelt zu werden, auch wenn wir nicht der gleichen Meinung sind.\\nNie wieder Krieg in Europa & weltweite Kriege beenden\",\"id\":\"728209251190120449\",\"url\":\"\",\"public_metrics\":{\"followers_count\":522,\"following_count\":2289,\"tweet_count\":17719,\"listed_count\":0}},{\"location\":\"Hamburg\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1439299606513397763/ZC9sVoJM_normal.jpg\",\"created_at\":\"2015-10-02T15:42:30.000Z\",\"verified\":false,\"username\":\"L2279600\",\"name\":\"Rita\",\"protected\":false,\"description\":\"Der Nürnberger Kodex wurde eingeführt,damit Menschen nie wieder zu medizinischen Behandlungen gezwungen oder genötigt werden‼️\",\"id\":\"3844151483\",\"url\":\"\",\"public_metrics\":{\"followers_count\":1072,\"following_count\":1024,\"tweet_count\":27833,\"listed_count\":12}},{\"location\":\"Cologne \",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1190023297494376452/trykKe9U_normal.jpg\",\"created_at\":\"2019-07-29T14:30:49.000Z\",\"pinned_tweet_id\":\"1330566567839588357\",\"verified\":false,\"username\":\"TheReal32492440\",\"name\":\"Inspector Reality \\uD83D\\uDD75️♂️ \\uD83C\\uDDE9\\uD83C\\uDDEA \\uD83C\\uDDEC\\uD83C\\uDDF7 \\uD83C\\uDDFA\\uD83C\\uDDF8\",\"protected\":false,\"description\":\"Konservativ. Gastarbeiter Kind. \\uD83D\\uDD0E Make Germany great again! Gedanken sind Energie. Coronologe™ My immune system is created by god.\",\"id\":\"1155848155893837825\",\"url\":\"\",\"public_metrics\":{\"followers_count\":3897,\"following_count\":3207,\"tweet_count\":36497,\"listed_count\":29}},{\"location\":\"Ysmault\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/851757824845770752/glCaQdzZ_normal.jpg\",\"created_at\":\"2009-07-29T09:07:53.000Z\",\"verified\":false,\"username\":\"Scarcloud\",\"entities\":{\"description\":{\"hashtags\":[{\"start\":54,\"end\":71,\"tag\":\"Klimakatastrophe\"}]}},\"name\":\"Red Lantern\",\"protected\":false,\"description\":\"'I'll always be angry.'\\nLebensziel: 2040 nicht an der #Klimakatastrophe krepieren.\\nUnited we stand, divided we fall.\",\"id\":\"61153698\",\"url\":\"\",\"public_metrics\":{\"followers_count\":80,\"following_count\":205,\"tweet_count\":3553,\"listed_count\":3}},{\"location\":\"Hochneukirch \",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1346147544368369672/UN8jezCT_normal.jpg\",\"created_at\":\"2018-12-14T08:18:50.000Z\",\"verified\":false,\"username\":\"fffHochneukirch\",\"entities\":{\"description\":{\"hashtags\":[{\"start\":80,\"end\":97,\"tag\":\"FridaysForFuture\"},{\"start\":128,\"end\":144,\"tag\":\"LützerathBleibt\"}]}},\"name\":\"FFF Hochneukirch\",\"protected\":false,\"description\":\"Wir sind junge Menschen, die sich für ihre Zukunft einsetzen. Hier twittert die #FridaysForFuture Ortsgruppe aus Hochneukirch.\\uD83C\\uDF0D #LützerathBleibt\",\"id\":\"1073492498050416640\",\"url\":\"\",\"public_metrics\":{\"followers_count\":1957,\"following_count\":286,\"tweet_count\":2139,\"listed_count\":42}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1173750408537419781/iinbdQ3w_normal.jpg\",\"created_at\":\"2017-06-06T00:00:27.000Z\",\"pinned_tweet_id\":\"1354963867118563346\",\"verified\":false,\"username\":\"RehBln50\",\"entities\":{\"description\":{\"hashtags\":[{\"start\":134,\"end\":148,\"tag\":\"genugistgenug\"}]}},\"name\":\"RehBln50 ❌\",\"protected\":false,\"description\":\"Definitiv wbl., gegen Vieles, z. B. Political Correctness, Betreutes Denken. Für Meinungsfreiheit, allerdings auch mit Manieren ! ;-)\\n#genugistgenug\",\"id\":\"871879419559903232\",\"url\":\"\",\"public_metrics\":{\"followers_count\":864,\"following_count\":1280,\"tweet_count\":47166,\"listed_count\":0}},{\"location\":\"Berlin, Deutschland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1441895077610143767/gXY0jbrg_normal.jpg\",\"created_at\":\"2019-10-06T23:59:42.000Z\",\"pinned_tweet_id\":\"1421396422180712449\",\"verified\":false,\"username\":\"ClaudiaP4F\",\"entities\":{\"description\":{\"hashtags\":[{\"start\":2,\"end\":16,\"tag\":\"Berlin4Future\"},{\"start\":19,\"end\":29,\"tag\":\"P4FBerlin\"},{\"start\":32,\"end\":45,\"tag\":\"bornat330ppm\"}]}},\"name\":\"\\uD83D\\uDC9AClaudia\\uD83D\\uDC9A\",\"protected\":false,\"description\":\"\\uD83D\\uDC9A #Berlin4Future \\uD83D\\uDC9A #P4FBerlin \\uD83D\\uDC9A #bornat330ppm \\uD83D\\uDC9A \\uD83D\\uDCAF linksgrünglänzend\",\"id\":\"1180995990284853248\",\"url\":\"\",\"public_metrics\":{\"followers_count\":198,\"following_count\":616,\"tweet_count\":10060,\"listed_count\":1}},{\"location\":\"Helsinki\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1424676001523585024/5k5Yl21g_normal.jpg\",\"created_at\":\"2014-09-18T09:31:13.000Z\",\"verified\":false,\"username\":\"wlkutsch\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/l44b9mw6iF\",\"expanded_url\":\"http://www.icos-ri.eu\",\"display_url\":\"icos-ri.eu\"}]},\"description\":{\"mentions\":[{\"start\":20,\"end\":28,\"username\":\"ICOS_RI\"}]}},\"name\":\"Werner Leo Kutsch\",\"protected\":false,\"description\":\"Director General of @ICOS_RI\",\"id\":\"2816669118\",\"url\":\"https://t.co/l44b9mw6iF\",\"public_metrics\":{\"followers_count\":716,\"following_count\":318,\"tweet_count\":1349,\"listed_count\":16}},{\"location\":\"Helsinki, Finnland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1237365121527222272/2OdKGWIv_normal.jpg\",\"created_at\":\"2015-04-28T10:37:40.000Z\",\"verified\":false,\"username\":\"ICOS_RI\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/jaOULcoPFy\",\"expanded_url\":\"https://www.icos-ri.eu/\",\"display_url\":\"icos-ri.eu\"}]}},\"name\":\"ICOS RI\",\"protected\":false,\"description\":\"European research infrastructure providing open & standardised greenhouse gas data to prevent climate change.\",\"id\":\"3216719140\",\"url\":\"https://t.co/jaOULcoPFy\",\"public_metrics\":{\"followers_count\":2185,\"following_count\":737,\"tweet_count\":2658,\"listed_count\":47}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1265724538593202178/44uO4Q3L_normal.jpg\",\"created_at\":\"2020-03-25T17:56:52.000Z\",\"pinned_tweet_id\":\"1410688500798722050\",\"verified\":false,\"username\":\"Charlyle7\",\"name\":\"Charlyle\",\"protected\":false,\"description\":\"Ich benutze meinen Verstand und bilde mir meine eigene Meinung.\",\"id\":\"1242872854925975552\",\"url\":\"\",\"public_metrics\":{\"followers_count\":427,\"following_count\":308,\"tweet_count\":8551,\"listed_count\":0}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1418612911371702274/rS70orqB_normal.jpg\",\"created_at\":\"2018-08-08T13:12:47.000Z\",\"verified\":false,\"username\":\"EllyB53292373\",\"name\":\"EllyB \\uD83C\\uDDF3\\uD83C\\uDDF1\\uD83C\\uDDEE\\uD83C\\uDDF1\\uD83C\\uDDE9\\uD83C\\uDDEA\",\"protected\":false,\"description\":\"\",\"id\":\"1027180824335208448\",\"url\":\"\",\"public_metrics\":{\"followers_count\":893,\"following_count\":487,\"tweet_count\":82609,\"listed_count\":2}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/847833492721467393/UzHloCb9_normal.jpg\",\"created_at\":\"2017-01-07T21:16:59.000Z\",\"pinned_tweet_id\":\"1272473885536137218\",\"verified\":false,\"username\":\"frankenzicke\",\"name\":\"enisa\",\"protected\":false,\"description\":\"Wir leben in einer Diktatur der Verblödeten, das ist die höchste und edelste Form der Sklaverei.// Gott stehe uns bei \\uD83D\\uDE4F\",\"id\":\"817842489814118401\",\"url\":\"\",\"public_metrics\":{\"followers_count\":2553,\"following_count\":1554,\"tweet_count\":91107,\"listed_count\":2}},{\"location\":\"NRW, Deutschland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1348577897393610757/7qHem2PP_normal.jpg\",\"created_at\":\"2018-06-16T13:33:34.000Z\",\"verified\":false,\"username\":\"RTErsc\",\"entities\":{\"description\":{\"mentions\":[{\"start\":112,\"end\":119,\"username\":\"RTErsc\"}]}},\"name\":\"RTErs\\uD83C\\uDDE9\\uD83C\\uDDEA\\uD83C\\uDDFA\\uD83C\\uDDF8\\uD83C\\uDDEA\\uD83C\\uDDF8MAGA*IFB*\",\"protected\":false,\"description\":\"Es ist nie zu spät - neu anzufangen! Wer aufgibt, hat den Kampf bereits verloren! Att.! \\nNEU - GETTR - unter : @RTErsc\",\"id\":\"1007979497965178880\",\"url\":\"\",\"public_metrics\":{\"followers_count\":6536,\"following_count\":6570,\"tweet_count\":155208,\"listed_count\":4}},{\"location\":\"\\uD83D\\uDE4B\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1220060457207242753/de4ZrMWa_normal.jpg\",\"created_at\":\"2020-01-22T19:05:37.000Z\",\"verified\":false,\"username\":\"Conny96418914\",\"name\":\"Conny\",\"protected\":false,\"description\":\"Lieber stehende sterben als kriechend leben\",\"id\":\"1220059857535086597\",\"url\":\"\",\"public_metrics\":{\"followers_count\":113,\"following_count\":145,\"tweet_count\":2711,\"listed_count\":0}},{\"location\":\"Dresden, Sachsen\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1444628319295709184/xoLenFzS_normal.jpg\",\"created_at\":\"2019-01-05T17:30:10.000Z\",\"pinned_tweet_id\":\"1441425967617089536\",\"verified\":false,\"username\":\"FFFDresden\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/mdPiW5NsEv\",\"expanded_url\":\"http://fffutu.re/dresden\",\"display_url\":\"fffutu.re/dresden\"}]},\"description\":{\"hashtags\":[{\"start\":116,\"end\":132,\"tag\":\"UprootTheSystem\"}]}},\"name\":\"Fridays for Future Dresden\",\"protected\":false,\"description\":\"Wir sind Schüler:innen und junge Menschen aus Dresden, die freitags für eine sozial-ökologische Wende streiken. \\n⋅ #UprootTheSystem ✊ \\n⋅\",\"id\":\"1081603778699296770\",\"url\":\"https://t.co/mdPiW5NsEv\",\"public_metrics\":{\"followers_count\":4189,\"following_count\":499,\"tweet_count\":2827,\"listed_count\":52}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1121882984020361217/MLdMP6eb_normal.jpg\",\"created_at\":\"2019-04-26T21:04:05.000Z\",\"verified\":false,\"username\":\"StudisForFuture\",\"name\":\"StudisForFuture Dresden\",\"protected\":false,\"description\":\"Wir sind eine Gruppe Studierender aus Dresden und solidarisieren uns mit der Fridays For Future Bewegung\",\"id\":\"1121882665064501248\",\"url\":\"\",\"public_metrics\":{\"followers_count\":86,\"following_count\":14,\"tweet_count\":13,\"listed_count\":2}},{\"location\":\"Dresden, Saxony\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/770030243/logo_normal.jpg\",\"created_at\":\"2010-03-23T11:04:00.000Z\",\"verified\":false,\"username\":\"ADFC_Dresden\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":22,\"url\":\"http://t.co/57lk74oIJe\",\"expanded_url\":\"http://www.adfc-dresden.de\",\"display_url\":\"adfc-dresden.de\"}]},\"description\":{\"urls\":[{\"start\":53,\"end\":76,\"url\":\"https://t.co/QC9HlftOPE\",\"expanded_url\":\"http://adfc-dresden.de/impressum\",\"display_url\":\"adfc-dresden.de/impressum\"}]}},\"name\":\"ADFC Dresden\",\"protected\":false,\"description\":\"Unterwegs für aktive Mobilität in Dresden seit 1987.\\nhttps://t.co/QC9HlftOPE\",\"id\":\"125625502\",\"url\":\"http://t.co/57lk74oIJe\",\"public_metrics\":{\"followers_count\":2862,\"following_count\":198,\"tweet_count\":5354,\"listed_count\":67}},{\"location\":\"Dresden\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1367921039418744834/6mvom25w_normal.jpg\",\"created_at\":\"2019-06-06T15:13:37.000Z\",\"pinned_tweet_id\":\"1471446694109847556\",\"verified\":false,\"username\":\"P4F_Dresden\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/CvlWkCOhRW\",\"expanded_url\":\"http://dresden.parentsforfuture.de\",\"display_url\":\"dresden.parentsforfuture.de\"}]},\"description\":{\"mentions\":[{\"start\":50,\"end\":61,\"username\":\"FFFDresden\"}]}},\"name\":\"Parents For Future Dresden\",\"protected\":false,\"description\":\"Eltern aus Dresden und Umgebung. Wir unterstützen @FFFDresden. Wir suchen Helfer - sprich uns auf den Demos an und komm zu unseren Treffen.\",\"id\":\"1136652369859682305\",\"url\":\"https://t.co/CvlWkCOhRW\",\"public_metrics\":{\"followers_count\":1106,\"following_count\":913,\"tweet_count\":1640,\"listed_count\":26}},{\"location\":\"Deutschland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/738821341416181760/IFbSKtJ5_normal.jpg\",\"created_at\":\"2011-02-21T19:31:20.000Z\",\"verified\":false,\"username\":\"savannaSeKinner\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/R4O8AdFIvj\",\"expanded_url\":\"http://blog.t-originals.de\",\"display_url\":\"blog.t-originals.de\"}]}},\"name\":\"gab.ai/Katja \\uD83D\\uDC38\",\"protected\":false,\"description\":\"\",\"id\":\"255637301\",\"url\":\"https://t.co/R4O8AdFIvj\",\"public_metrics\":{\"followers_count\":807,\"following_count\":680,\"tweet_count\":75900,\"listed_count\":27}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1385992649157582848/-0HNRr2A_normal.jpg\",\"created_at\":\"2010-03-27T16:38:04.000Z\",\"verified\":false,\"username\":\"Kl_Stone\",\"name\":\"Klaus Steinfelder\",\"protected\":false,\"description\":\"Wenn man sich jeden Tag als 1 qmm vorstellt, dann reicht ein 3/4-DIN A4-Blatt Millimeterpapier für 80 Jahre Leben. Schon beunruhigend überschaubar irgendwie...\",\"id\":\"126978307\",\"url\":\"\",\"public_metrics\":{\"followers_count\":1030,\"following_count\":1557,\"tweet_count\":84652,\"listed_count\":5}},{\"location\":\"Baden-Württemberg & Minho, PT\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1060484934492667906/i6xHY5lm_normal.jpg\",\"created_at\":\"2010-01-22T09:03:27.000Z\",\"verified\":false,\"username\":\"Bastian_Atzger\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/QI1nTwuWsa\",\"expanded_url\":\"http://www.bastian-atzger.de\",\"display_url\":\"bastian-atzger.de\"}]}},\"name\":\"Bastian Atzger\",\"protected\":false,\"description\":\"\\uD83D\\uDCC8 Marketer\\n\\uD83D\\uDD0E Wirtschaftswissenschaftler / economic scientist\\n\\uD83D\\uDDF3 MITtelstandsunion BW / middle class politics\\n\\uD83D\\uDD2CSceptic and critical realist\\n\\uD83C\\uDDE9\\uD83C\\uDDEA \\uD83C\\uDDEA\\uD83C\\uDDFA\",\"id\":\"107357635\",\"url\":\"https://t.co/QI1nTwuWsa\",\"public_metrics\":{\"followers_count\":578,\"following_count\":476,\"tweet_count\":7564,\"listed_count\":34}},{\"location\":\"Hamburg, Deutschland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1222199878325088264/rkeWxQQb_normal.jpg\",\"created_at\":\"2012-12-04T22:28:03.000Z\",\"pinned_tweet_id\":\"1361621295079817222\",\"verified\":false,\"username\":\"LudgerWess\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/X1JeSrZgXC\",\"expanded_url\":\"https://www.matthes-seitz-berlin.de/autor/ludger-wess.html\",\"display_url\":\"matthes-seitz-berlin.de/autor/ludger-w…\"}]}},\"name\":\"Ludger Wess\",\"protected\":false,\"description\":\"Drawn into biology since my first encounter with a microscope. Research in molecular biology, now science writer: fiction, non-fiction, commentaries.\",\"id\":\"989716693\",\"url\":\"https://t.co/X1JeSrZgXC\",\"public_metrics\":{\"followers_count\":3293,\"following_count\":616,\"tweet_count\":28042,\"listed_count\":62}},{\"profile_image_url\":\"https://abs.twimg.com/sticky/default_profile_images/default_profile_normal.png\",\"created_at\":\"2013-09-07T08:17:54.000Z\",\"verified\":false,\"username\":\"HansBrunner1\",\"name\":\"Hans Brunner\",\"protected\":false,\"description\":\"\",\"id\":\"1743316638\",\"url\":\"\",\"public_metrics\":{\"followers_count\":343,\"following_count\":670,\"tweet_count\":18423,\"listed_count\":0}},{\"location\":\"Amsterdam\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1366306118218170373/5RQ3TK8X_normal.jpg\",\"created_at\":\"2012-05-29T16:28:09.000Z\",\"pinned_tweet_id\":\"1284128128210796545\",\"verified\":false,\"username\":\"UrsulaBrinkmann\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/PCoLqPOgEI\",\"expanded_url\":\"http://www.interculturalreadiness.com\",\"display_url\":\"interculturalreadiness.com\"}]}},\"name\":\"Ursula Brinkmann\",\"protected\":false,\"description\":\"Author 'Intercultural Readiness' (Palgrave). Intercultural Readiness Certification September October 2+3 December 2021. Contact us for more information\",\"id\":\"593841338\",\"url\":\"https://t.co/PCoLqPOgEI\",\"public_metrics\":{\"followers_count\":1358,\"following_count\":2249,\"tweet_count\":5740,\"listed_count\":50}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1251530661304942592/STby4pV6_normal.jpg\",\"created_at\":\"2009-12-23T06:38:26.000Z\",\"verified\":false,\"username\":\"Manya_G\",\"name\":\"Manya G.\",\"protected\":false,\"description\":\"\",\"id\":\"98816776\",\"url\":\"\",\"public_metrics\":{\"followers_count\":329,\"following_count\":260,\"tweet_count\":6168,\"listed_count\":4}},{\"location\":\"Dresden\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1123289805994045440/CjvvZnzW_normal.png\",\"created_at\":\"2009-08-22T21:11:53.000Z\",\"pinned_tweet_id\":\"1466837840080351241\",\"verified\":false,\"username\":\"addnme\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/6WCLELV28u\",\"expanded_url\":\"http://www.addn.me\",\"display_url\":\"addn.me\"}]}},\"name\":\"Alternative Dresden News\",\"protected\":false,\"description\":\"Nachrichtenportal für eine kritische Öffentlichkeit in Dresden und Umgebung mit Schwerpunkten auf Antifa, Nazis, Freiräumen, Kultur und sozialen Themen.\",\"id\":\"67984352\",\"url\":\"https://t.co/6WCLELV28u\",\"public_metrics\":{\"followers_count\":4554,\"following_count\":647,\"tweet_count\":8420,\"listed_count\":0}}],\"tweets\":[{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266704570190790656\",\"entities\":{\"mentions\":[{\"start\":49,\"end\":64,\"username\":\"polizei_nrw_ge\",\"id\":\"2389272182\"}],\"hashtags\":[{\"start\":32,\"end\":41,\"tag\":\"Datteln4\"},{\"start\":253,\"end\":266,\"tag\":\"StopDatteln4\"},{\"start\":267,\"end\":284,\"tag\":\"FridaysForFuture\"}]},\"text\":\"Bei d. friedlichen Protesten um #Datteln4 hat d. @polizei_nrw_ge gerade 5 Aktivist*innen aus d. Masse gezogen, nieder geprügelt & m. Kabelbindern gefesselt. Wir bleiben sitzen bis d. Gefangenen freigelassen wurden. Wir sind friedlich? Was seid ihr? #StopDatteln4 #FridaysForFuture\",\"created_at\":\"2020-05-30T12:14:38.000Z\",\"lang\":\"de\",\"author_id\":\"1080927540422815744\",\"id\":\"1266704570190790656\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":153,\"reply_count\":15,\"like_count\":486,\"quote_count\":14}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266816466512297984\",\"attachments\":{\"media_keys\":[\"3_1266816456924299264\"]},\"text\":\"#Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen und #FridaysForFuture. Er gehört hinter Gittern!!! #GeorgeFloyd #mineapolis\\n#AntifaTerrorists #riots2020 https://t.co/UESkGHg3QG\",\"created_at\":\"2020-05-30T19:39:16.000Z\",\"entities\":{\"hashtags\":[{\"start\":0,\"end\":6,\"tag\":\"Soros\"},{\"start\":31,\"end\":36,\"tag\":\"Geld\"},{\"start\":99,\"end\":104,\"tag\":\"NGOs\"},{\"start\":120,\"end\":137,\"tag\":\"FridaysForFuture\"},{\"start\":167,\"end\":179,\"tag\":\"GeorgeFloyd\"},{\"start\":180,\"end\":191,\"tag\":\"mineapolis\"},{\"start\":192,\"end\":209,\"tag\":\"AntifaTerrorists\"},{\"start\":210,\"end\":220,\"tag\":\"riots2020\"}],\"urls\":[{\"start\":221,\"end\":244,\"url\":\"https://t.co/UESkGHg3QG\",\"expanded_url\":\"https://twitter.com/TheReal32492440/status/1266816466512297984/photo/1\",\"display_url\":\"pic.twitter.com/UESkGHg3QG\"}]},\"lang\":\"de\",\"author_id\":\"1155848155893837825\",\"id\":\"1266816466512297984\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":100,\"reply_count\":29,\"like_count\":148,\"quote_count\":9}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266817176687779846\",\"attachments\":{\"media_keys\":[\"3_1266817169398038529\",\"3_1266817169460953089\",\"3_1266817169477730306\",\"3_1266817170455101441\"]},\"text\":\"Das Kohleausstiegsgesetz darf so nicht verabschiedet werden. Macht auf das Thema aufmerksam und schreibt Euren Bundestagsabgeordneten!\\nhttps://t.co/EpLthvP2MF\\n#FridaysForFuture #StopDatteln4 #Kohlestopp #KeinKonsens https://t.co/vPdW00UVB0\",\"created_at\":\"2020-05-30T19:42:05.000Z\",\"entities\":{\"hashtags\":[{\"start\":159,\"end\":176,\"tag\":\"FridaysForFuture\"},{\"start\":177,\"end\":190,\"tag\":\"StopDatteln4\"},{\"start\":191,\"end\":202,\"tag\":\"Kohlestopp\"},{\"start\":203,\"end\":215,\"tag\":\"KeinKonsens\"}],\"urls\":[{\"start\":135,\"end\":158,\"url\":\"https://t.co/EpLthvP2MF\",\"expanded_url\":\"https://www.bundestag.de/abgeordnete\",\"display_url\":\"bundestag.de/abgeordnete\"},{\"start\":216,\"end\":239,\"url\":\"https://t.co/vPdW00UVB0\",\"expanded_url\":\"https://twitter.com/fff_2209/status/1266817176687779846/photo/1\",\"display_url\":\"pic.twitter.com/vPdW00UVB0\"},{\"start\":216,\"end\":239,\"url\":\"https://t.co/vPdW00UVB0\",\"expanded_url\":\"https://twitter.com/fff_2209/status/1266817176687779846/photo/1\",\"display_url\":\"pic.twitter.com/vPdW00UVB0\"},{\"start\":216,\"end\":239,\"url\":\"https://t.co/vPdW00UVB0\",\"expanded_url\":\"https://twitter.com/fff_2209/status/1266817176687779846/photo/1\",\"display_url\":\"pic.twitter.com/vPdW00UVB0\"},{\"start\":216,\"end\":239,\"url\":\"https://t.co/vPdW00UVB0\",\"expanded_url\":\"https://twitter.com/fff_2209/status/1266817176687779846/photo/1\",\"display_url\":\"pic.twitter.com/vPdW00UVB0\"}]},\"lang\":\"de\",\"author_id\":\"1073492498050416640\",\"id\":\"1266817176687779846\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":33,\"reply_count\":0,\"like_count\":57,\"quote_count\":1}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266779244484575232\",\"entities\":{\"mentions\":[{\"start\":35,\"end\":43,\"username\":\"ICOS_RI\",\"id\":\"3216719140\"},{\"start\":161,\"end\":165,\"username\":\"CDU\",\"id\":\"20429858\"},{\"start\":166,\"end\":172,\"username\":\"spdde\",\"id\":\"26458162\"}],\"hashtags\":[{\"start\":126,\"end\":138,\"tag\":\"Abfckprämie\"},{\"start\":139,\"end\":148,\"tag\":\"Datteln4\"},{\"start\":173,\"end\":190,\"tag\":\"FridaysforFuture\"},{\"start\":191,\"end\":211,\"tag\":\"ClimateStrikeOnline\"},{\"start\":212,\"end\":225,\"tag\":\"StopDatteln4\"}],\"urls\":[{\"start\":226,\"end\":249,\"url\":\"https://t.co/JtEyiQLd2H\",\"expanded_url\":\"https://youtu.be/oOEyQtgRd54\",\"display_url\":\"youtu.be/oOEyQtgRd54\",\"status\":200,\"unwound_url\":\"https://www.youtube.com/watch?v=oOEyQtgRd54&feature=youtu.be\"}]},\"text\":\"Gestern hatten wir ein CO2FFEE mit @ICOS_RI über die Arktis: Wissenschaft: wir müssen Emissionen schnell reduzieren. Politik: #Abfckprämie #Datteln4 Super Job! @CDU @spdde\\n#FridaysforFuture #ClimateStrikeOnline\\n#StopDatteln4 https://t.co/JtEyiQLd2H\",\"created_at\":\"2020-05-30T17:11:22.000Z\",\"lang\":\"de\",\"author_id\":\"2816669118\",\"id\":\"1266779244484575232\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":2,\"reply_count\":1,\"like_count\":3,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266067475155869696\",\"attachments\":{\"media_keys\":[\"3_1266819103513337857\"]},\"entities\":{\"mentions\":[{\"start\":63,\"end\":79,\"username\":\"StudisForFuture\",\"id\":\"1121882665064501248\"},{\"start\":81,\"end\":94,\"username\":\"ADFC_Dresden\",\"id\":\"125625502\"},{\"start\":96,\"end\":108,\"username\":\"P4F_Dresden\",\"id\":\"1136652369859682305\"},{\"start\":113,\"end\":125,\"username\":\"BundSachsen\",\"id\":\"1003560485315383296\"}],\"hashtags\":[{\"start\":160,\"end\":173,\"tag\":\"Abfckpraemie\"},{\"start\":238,\"end\":255,\"tag\":\"FridaysForFuture\"}],\"urls\":[{\"start\":256,\"end\":279,\"url\":\"https://t.co/PpdX7djZBs\",\"expanded_url\":\"https://twitter.com/FFFDresden/status/1266819115236364290/photo/1\",\"display_url\":\"pic.twitter.com/PpdX7djZBs\"}]},\"text\":\"Neben unserer Fahrraddemo finden am Dienstag auch Aktionen von @StudisForFuture, @ADFC_Dresden, @P4F_Dresden und @BundSachsen statt. Seid am 02.0. dabei! Weder #Abfckpraemie, noch unweltschädliche Subventionen dürfen ins Konjunkturpaket! #FridaysForFuture https://t.co/PpdX7djZBs\",\"created_at\":\"2020-05-30T19:49:48.000Z\",\"lang\":\"de\",\"in_reply_to_user_id\":\"1081603778699296770\",\"author_id\":\"1081603778699296770\",\"id\":\"1266819115236364290\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"replied_to\",\"id\":\"1266067475155869696\"}],\"public_metrics\":{\"retweet_count\":6,\"reply_count\":3,\"like_count\":12,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266677828801695744\",\"entities\":{\"mentions\":[{\"start\":0,\"end\":11,\"username\":\"LudgerWess\",\"id\":\"989716693\"}],\"hashtags\":[{\"start\":55,\"end\":72,\"tag\":\"FridaysForFuture\"}]},\"text\":\"@LudgerWess Jetzt ist mir klar, weshalb die Kinder von #FridaysForFuture immer behaupten, sie würden \\\"die Meinung der Wissenschaft\\\" repräsentieren, was in sich gesehen doppelter Nonsens ist. \\uD83D\\uDE44\",\"created_at\":\"2020-05-30T20:37:22.000Z\",\"lang\":\"de\",\"in_reply_to_user_id\":\"989716693\",\"author_id\":\"107357635\",\"id\":\"1266831085582913538\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"replied_to\",\"id\":\"1266677828801695744\"}],\"public_metrics\":{\"retweet_count\":0,\"reply_count\":1,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter Web App\",\"conversation_id\":\"1266436838190911497\",\"attachments\":{\"media_keys\":[\"3_1266436070302003201\"]},\"text\":\"Super Schildermeer am Heldenplatz heute! \\uD83D\\uDCAA\\uD83C\\uDFFB\\uD83D\\uDC4F\\uD83C\\uDFFB\\uD83D\\uDC9A\\n\\n#FridaysForFuture sind kreativ wie eh und je, schön, euch wieder \\\"live\\\" zu sehen \\uD83D\\uDE0D https://t.co/sQFsra6LH2\",\"created_at\":\"2020-05-29T18:30:46.000Z\",\"entities\":{\"hashtags\":[{\"start\":48,\"end\":65,\"tag\":\"FridaysForFuture\"}],\"urls\":[{\"start\":131,\"end\":154,\"url\":\"https://t.co/sQFsra6LH2\",\"expanded_url\":\"https://twitter.com/Manya_G/status/1266436838190911497/photo/1\",\"display_url\":\"pic.twitter.com/sQFsra6LH2\"}]},\"lang\":\"de\",\"author_id\":\"98816776\",\"id\":\"1266436838190911497\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":121,\"reply_count\":6,\"like_count\":799,\"quote_count\":2}},{\"source\":\"Twitter Web App\",\"conversation_id\":\"1266722835252129794\",\"text\":\"Fahrradprotest für sichere Karl-Marx-Straße\\n\\n#Verkehrswende #VerkehrswendeDresden #Dresden #Sachsen #Fahrradprotest #ADFC #FridaysForFuture #FFFDresden #Autogipfel #Abwrackprämie #GhostBike #StVO \\n\\nhttps://t.co/OjUBpx4PIK\",\"created_at\":\"2020-05-30T13:27:13.000Z\",\"entities\":{\"hashtags\":[{\"start\":45,\"end\":59,\"tag\":\"Verkehrswende\"},{\"start\":60,\"end\":81,\"tag\":\"VerkehrswendeDresden\"},{\"start\":82,\"end\":90,\"tag\":\"Dresden\"},{\"start\":91,\"end\":99,\"tag\":\"Sachsen\"},{\"start\":100,\"end\":115,\"tag\":\"Fahrradprotest\"},{\"start\":116,\"end\":121,\"tag\":\"ADFC\"},{\"start\":122,\"end\":139,\"tag\":\"FridaysForFuture\"},{\"start\":140,\"end\":151,\"tag\":\"FFFDresden\"},{\"start\":152,\"end\":163,\"tag\":\"Autogipfel\"},{\"start\":164,\"end\":178,\"tag\":\"Abwrackprämie\"},{\"start\":179,\"end\":189,\"tag\":\"GhostBike\"},{\"start\":190,\"end\":195,\"tag\":\"StVO\"}],\"urls\":[{\"start\":198,\"end\":221,\"url\":\"https://t.co/OjUBpx4PIK\",\"expanded_url\":\"https://www.addn.me/soziales/fahrradprotest-fuer-sichere-karl-marx-strasse/\",\"display_url\":\"addn.me/soziales/fahrr…\",\"status\":200,\"unwound_url\":\"https://www.addn.me/soziales/fahrradprotest-fuer-sichere-karl-marx-strasse/\"}]},\"lang\":\"de\",\"author_id\":\"67984352\",\"id\":\"1266722835252129794\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":2,\"reply_count\":1,\"like_count\":4,\"quote_count\":0}}]},\"errors\":[{\"value\":\"1\",\"detail\":\"Could not find tweet with ids: [1].\",\"title\":\"Not Found Error\",\"resource_type\":\"tweet\",\"parameter\":\"ids\",\"resource_id\":\"1\",\"type\":\"https://api.twitter.com/2/problems/resource-not-found\"},{\"value\":\"2\",\"detail\":\"Could not find tweet with ids: [2].\",\"title\":\"Not Found Error\",\"resource_type\":\"tweet\",\"parameter\":\"ids\",\"resource_id\":\"2\",\"type\":\"https://api.twitter.com/2/problems/resource-not-found\"}]}"),
date = structure(1639946717, class = c("POSIXct", "POSIXt"
), tzone = "GMT"), times = c(redirect = 0, namelookup = 3.1e-05,
connect = 3.3e-05, pretransfer = 0.000121, starttransfer = 0.474505,
total = 0.475566)), class = "response")
| /tests/testthat/api.twitter.com/2/tweets-0ffabd.R | permissive | cjbarrie/academictwitteR | R | false | false | 51,278 | r | structure(list(url = "https://api.twitter.com/2/tweets?tweet.fields=attachments%2Cauthor_id%2Cconversation_id%2Ccreated_at%2Centities%2Cgeo%2Cid%2Cin_reply_to_user_id%2Clang%2Cpublic_metrics%2Cpossibly_sensitive%2Creferenced_tweets%2Csource%2Ctext%2Cwithheld&user.fields=created_at%2Cdescription%2Centities%2Cid%2Clocation%2Cname%2Cpinned_tweet_id%2Cprofile_image_url%2Cprotected%2Cpublic_metrics%2Curl%2Cusername%2Cverified%2Cwithheld&expansions=author_id%2Centities.mentions.username%2Cgeo.place_id%2Cin_reply_to_user_id%2Creferenced_tweets.id%2Creferenced_tweets.id.author_id&place.fields=contained_within%2Ccountry%2Ccountry_code%2Cfull_name%2Cgeo%2Cid%2Cname%2Cplace_type&ids=1%2C2%2C1266867327079002121%2C1266866660713127936%2C1266864490446012418%2C1266860737244336129%2C1266859737615826944%2C1266859455586676736%2C1266858090143588352%2C1266857669157097473%2C1266856357954756609%2C1266855807699861506%2C1266855344086663169%2C1266854627758276608%2C1266854586188476421%2C1266854533889757187%2C1266853931247906816%2C1266853419291234312%2C1266852781526302722%2C1266852099163291650",
status_code = 200L, headers = structure(list(date = "Sun, 19 Dec 2021 20:45:17 UTC",
server = "tsa_o", `api-version` = "2.32", `content-type` = "application/json; charset=utf-8",
`cache-control` = "no-cache, no-store, max-age=0", `content-length` = "10146",
`x-access-level` = "read", `x-frame-options` = "SAMEORIGIN",
`content-encoding` = "gzip", `x-xss-protection` = "0",
`x-rate-limit-limit` = "300", `x-rate-limit-reset` = "1639947531",
`content-disposition` = "attachment; filename=json.json",
`x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "287",
`strict-transport-security` = "max-age=631138519", `x-response-time` = "458",
`x-connection-hash` = "7d54fc61481fbbf761f96e7412fcce2a506bf85407f0f6725ae81f8d24a33064"), class = c("insensitive",
"list")), all_headers = list(list(status = 200L, version = "HTTP/2",
headers = structure(list(date = "Sun, 19 Dec 2021 20:45:17 UTC",
server = "tsa_o", `api-version` = "2.32", `content-type` = "application/json; charset=utf-8",
`cache-control` = "no-cache, no-store, max-age=0",
`content-length` = "10146", `x-access-level` = "read",
`x-frame-options` = "SAMEORIGIN", `content-encoding` = "gzip",
`x-xss-protection` = "0", `x-rate-limit-limit` = "300",
`x-rate-limit-reset` = "1639947531", `content-disposition` = "attachment; filename=json.json",
`x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "287",
`strict-transport-security` = "max-age=631138519",
`x-response-time` = "458", `x-connection-hash` = "7d54fc61481fbbf761f96e7412fcce2a506bf85407f0f6725ae81f8d24a33064"), class = c("insensitive",
"list")))), cookies = structure(list(domain = c(".twitter.com",
".twitter.com", ".twitter.com", ".twitter.com"), flag = c(TRUE,
TRUE, TRUE, TRUE), path = c("/", "/", "/", "/"), secure = c(TRUE,
TRUE, TRUE, TRUE), expiration = structure(c(1702744284, 1702744284,
1702744284, 1702744284), class = c("POSIXct", "POSIXt")),
name = c("guest_id_marketing", "guest_id_ads", "personalization_id",
"guest_id"), value = c("REDACTED", "REDACTED", "REDACTED",
"REDACTED")), row.names = c(NA, -4L), class = "data.frame"),
content = charToRaw("{\"data\":[{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266867327079002121\",\"attachments\":{\"media_keys\":[\"3_1266867313988509697\"]},\"text\":\"Diese Rücksichtslosigkeit kotzt mich langsam an: Nein, es ist total uncool, seinen Müll wegzuwerfen. \\n\\nIch sagte schon zu Zeiten von #FridaysForFuture , dass es sinnvoller wäre, am FR Nachmittag mit Müllbeuteln bewaffnet die REGIONALE Natur zu säubern, um ein Gefühl zu erhalten. https://t.co/lT2YVFpXit\",\"created_at\":\"2020-05-30T23:01:22.000Z\",\"entities\":{\"hashtags\":[{\"start\":133,\"end\":150,\"tag\":\"FridaysForFuture\"}],\"urls\":[{\"start\":280,\"end\":303,\"url\":\"https://t.co/lT2YVFpXit\",\"expanded_url\":\"https://twitter.com/Mr_Twoflower/status/1266867327079002121/photo/1\",\"display_url\":\"pic.twitter.com/lT2YVFpXit\"}]},\"lang\":\"de\",\"author_id\":\"3850413016\",\"id\":\"1266867327079002121\",\"possibly_sensitive\":true,\"public_metrics\":{\"retweet_count\":0,\"reply_count\":1,\"like_count\":5,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266866660713127936\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"bremenforfuture\",\"id\":\"1080927540422815744\"},{\"start\":70,\"end\":85,\"username\":\"polizei_nrw_ge\",\"id\":\"2389272182\"}],\"hashtags\":[{\"start\":53,\"end\":62,\"tag\":\"Datteln4\"}]},\"text\":\"RT @bremenforfuture: Bei d. friedlichen Protesten um #Datteln4 hat d. @polizei_nrw_ge gerade 5 Aktivist*innen aus d. Masse gezogen, nieder…\",\"created_at\":\"2020-05-30T22:58:43.000Z\",\"lang\":\"de\",\"author_id\":\"179629368\",\"id\":\"1266866660713127936\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266704570190790656\"}],\"public_metrics\":{\"retweet_count\":153,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266864490446012418\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"bremenforfuture\",\"id\":\"1080927540422815744\"},{\"start\":70,\"end\":85,\"username\":\"polizei_nrw_ge\",\"id\":\"2389272182\"}],\"hashtags\":[{\"start\":53,\"end\":62,\"tag\":\"Datteln4\"}]},\"text\":\"RT @bremenforfuture: Bei d. friedlichen Protesten um #Datteln4 hat d. @polizei_nrw_ge gerade 5 Aktivist*innen aus d. Masse gezogen, nieder…\",\"created_at\":\"2020-05-30T22:50:06.000Z\",\"lang\":\"de\",\"author_id\":\"728209251190120449\",\"id\":\"1266864490446012418\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266704570190790656\"}],\"public_metrics\":{\"retweet_count\":153,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266860737244336129\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:35:11.000Z\",\"lang\":\"de\",\"author_id\":\"3844151483\",\"id\":\"1266860737244336129\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266859737615826944\",\"text\":\"RT @fff_2209: Das Kohleausstiegsgesetz darf so nicht verabschiedet werden. Macht auf das Thema aufmerksam und schreibt Euren Bundestagsabge…\",\"created_at\":\"2020-05-30T22:31:13.000Z\",\"lang\":\"de\",\"author_id\":\"61153698\",\"id\":\"1266859737615826944\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266817176687779846\"}],\"public_metrics\":{\"retweet_count\":33,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter Web App\",\"conversation_id\":\"1266859455586676736\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:30:05.000Z\",\"lang\":\"de\",\"author_id\":\"871879419559903232\",\"id\":\"1266859455586676736\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266858090143588352\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":12,\"username\":\"wlkutsch\",\"id\":\"2816669118\"},{\"start\":49,\"end\":57,\"username\":\"ICOS_RI\",\"id\":\"3216719140\"}]},\"text\":\"RT @wlkutsch: Gestern hatten wir ein CO2FFEE mit @ICOS_RI über die Arktis: Wissenschaft: wir müssen Emissionen schnell reduzieren. Politik:…\",\"created_at\":\"2020-05-30T22:24:40.000Z\",\"lang\":\"de\",\"author_id\":\"1180995990284853248\",\"id\":\"1266858090143588352\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266779244484575232\"}],\"public_metrics\":{\"retweet_count\":2,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266857669157097473\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:23:00.000Z\",\"lang\":\"de\",\"author_id\":\"1242872854925975552\",\"id\":\"1266857669157097473\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266856357954756609\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:17:47.000Z\",\"lang\":\"de\",\"author_id\":\"1027180824335208448\",\"id\":\"1266856357954756609\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266855807699861506\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:15:36.000Z\",\"lang\":\"de\",\"author_id\":\"817842489814118401\",\"id\":\"1266855807699861506\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266855344086663169\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:13:45.000Z\",\"lang\":\"de\",\"author_id\":\"1007979497965178880\",\"id\":\"1266855344086663169\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266854627758276608\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:10:54.000Z\",\"lang\":\"de\",\"author_id\":\"1220059857535086597\",\"id\":\"1266854627758276608\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266854586188476421\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":14,\"username\":\"FFFDresden\",\"id\":\"1081603778699296770\"},{\"start\":79,\"end\":95,\"username\":\"StudisForFuture\",\"id\":\"1121882665064501248\"},{\"start\":97,\"end\":110,\"username\":\"ADFC_Dresden\",\"id\":\"125625502\"},{\"start\":112,\"end\":124,\"username\":\"P4F_Dresden\",\"id\":\"1136652369859682305\"}]},\"text\":\"RT @FFFDresden: Neben unserer Fahrraddemo finden am Dienstag auch Aktionen von @StudisForFuture, @ADFC_Dresden, @P4F_Dresden und @BundSachs…\",\"created_at\":\"2020-05-30T22:10:45.000Z\",\"lang\":\"de\",\"author_id\":\"1180995990284853248\",\"id\":\"1266854586188476421\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266819115236364290\"}],\"public_metrics\":{\"retweet_count\":6,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266854533889757187\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:10:32.000Z\",\"lang\":\"de\",\"author_id\":\"255637301\",\"id\":\"1266854533889757187\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter Web App\",\"conversation_id\":\"1266677828801695744\",\"entities\":{\"mentions\":[{\"start\":0,\"end\":15,\"username\":\"Bastian_Atzger\",\"id\":\"107357635\"},{\"start\":16,\"end\":27,\"username\":\"LudgerWess\",\"id\":\"989716693\"}],\"hashtags\":[{\"start\":152,\"end\":169,\"tag\":\"FridaysForFuture\"}]},\"text\":\"@Bastian_Atzger @LudgerWess Wer nicht darauf angewiesen ist, Hetze zu betreiben, hätte vielleicht erkannt, daß diese Ausschnitte aus Plakaten weder von #FridaysForFuture noch aus Deutschland stammen.\",\"created_at\":\"2020-05-30T22:08:08.000Z\",\"lang\":\"de\",\"in_reply_to_user_id\":\"107357635\",\"author_id\":\"126978307\",\"id\":\"1266853931247906816\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"replied_to\",\"id\":\"1266831085582913538\"}],\"public_metrics\":{\"retweet_count\":0,\"reply_count\":1,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPad\",\"conversation_id\":\"1266853419291234312\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":19,\"username\":\"TheReal32492440\",\"id\":\"1155848155893837825\"}],\"hashtags\":[{\"start\":21,\"end\":27,\"tag\":\"Soros\"},{\"start\":52,\"end\":57,\"tag\":\"Geld\"},{\"start\":120,\"end\":125,\"tag\":\"NGOs\"}]},\"text\":\"RT @TheReal32492440: #Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen un…\",\"created_at\":\"2020-05-30T22:06:06.000Z\",\"lang\":\"de\",\"author_id\":\"1743316638\",\"id\":\"1266853419291234312\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266816466512297984\"}],\"public_metrics\":{\"retweet_count\":100,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266852781526302722\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":11,\"username\":\"Manya_G\",\"id\":\"98816776\"}],\"hashtags\":[{\"start\":61,\"end\":78,\"tag\":\"FridaysForFuture\"}]},\"text\":\"RT @Manya_G: Super Schildermeer am Heldenplatz heute! \\uD83D\\uDCAA\\uD83C\\uDFFB\\uD83D\\uDC4F\\uD83C\\uDFFB\\uD83D\\uDC9A\\n\\n#FridaysForFuture sind kreativ wie eh und je, schön, euch wieder \\\"live\\\" zu seh…\",\"created_at\":\"2020-05-30T22:03:34.000Z\",\"lang\":\"de\",\"author_id\":\"593841338\",\"id\":\"1266852781526302722\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266436838190911497\"}],\"public_metrics\":{\"retweet_count\":121,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266852099163291650\",\"entities\":{\"mentions\":[{\"start\":3,\"end\":10,\"username\":\"addnme\",\"id\":\"67984352\"}],\"hashtags\":[{\"start\":57,\"end\":71,\"tag\":\"Verkehrswende\"},{\"start\":72,\"end\":93,\"tag\":\"VerkehrswendeDresden\"},{\"start\":94,\"end\":102,\"tag\":\"Dresden\"},{\"start\":103,\"end\":111,\"tag\":\"Sachsen\"},{\"start\":112,\"end\":127,\"tag\":\"Fahrradprotest\"},{\"start\":128,\"end\":133,\"tag\":\"ADFC\"}]},\"text\":\"RT @addnme: Fahrradprotest für sichere Karl-Marx-Straße\\n\\n#Verkehrswende #VerkehrswendeDresden #Dresden #Sachsen #Fahrradprotest #ADFC #Frid…\",\"created_at\":\"2020-05-30T22:00:52.000Z\",\"lang\":\"de\",\"author_id\":\"1180995990284853248\",\"id\":\"1266852099163291650\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"retweeted\",\"id\":\"1266722835252129794\"}],\"public_metrics\":{\"retweet_count\":2,\"reply_count\":0,\"like_count\":0,\"quote_count\":0}}],\"includes\":{\"users\":[{\"location\":\"Am Liebsten am Gipfelkreuz.\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1470089012748636168/Z2uCmxwU_normal.jpg\",\"created_at\":\"2015-10-03T07:33:16.000Z\",\"pinned_tweet_id\":\"1365455686289793026\",\"verified\":false,\"username\":\"Mr_Twoflower\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/Vq9r6Da3Ba\",\"expanded_url\":\"https://zwischenmenschliches.wordpress.com/2013/02/27/wie-mein-leben-zum-ersten-mal-verandert-wurde/\",\"display_url\":\"zwischenmenschliches.wordpress.com/2013/02/27/wie…\"}]}},\"name\":\"derWandersmann\",\"protected\":false,\"description\":\"Nicht reden! Nicht denken! Nicht fragen! Einfach machen! So funktioniert das auch mit dem Küssen! ◽️ Wenn es Dich traurig macht, warum machst Du es dann?\",\"id\":\"3850413016\",\"url\":\"https://t.co/Vq9r6Da3Ba\",\"public_metrics\":{\"followers_count\":2168,\"following_count\":449,\"tweet_count\":76064,\"listed_count\":37}},{\"location\":\"Essen\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1435303060897738758/uwAgQFEz_normal.jpg\",\"created_at\":\"2010-08-17T19:18:29.000Z\",\"pinned_tweet_id\":\"1233101631107010562\",\"verified\":false,\"username\":\"MaddyLina25\",\"name\":\"Madeleine Montberg\",\"protected\":false,\"description\":\"I'm a German GTA (= Gestaltungstechnische Assistentin in Grafik- und Objektdesign/ Assistant of Graphic and Design) and an artist. she/her (Insta: maddylina25)\",\"id\":\"179629368\",\"url\":\"\",\"public_metrics\":{\"followers_count\":495,\"following_count\":65,\"tweet_count\":151074,\"listed_count\":1}},{\"location\":\"Bremen, Germany\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1245682869068017665/BpEWcCOX_normal.jpg\",\"created_at\":\"2019-01-03T20:43:03.000Z\",\"pinned_tweet_id\":\"1441386050308427778\",\"verified\":false,\"username\":\"bremenforfuture\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/KHkwU9Jan5\",\"expanded_url\":\"https://fridaysforfuture-bremen.de/\",\"display_url\":\"fridaysforfuture-bremen.de\"}]}},\"name\":\"Fridays for Future Bremen\",\"protected\":false,\"description\":\"Wir streiken für Klimagerechtigkeit!\",\"id\":\"1080927540422815744\",\"url\":\"https://t.co/KHkwU9Jan5\",\"public_metrics\":{\"followers_count\":2970,\"following_count\":130,\"tweet_count\":606,\"listed_count\":41}},{\"location\":\"Gelsenkirchen\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1421028257558843394/EK1_tRYc_normal.jpg\",\"created_at\":\"2014-03-14T15:12:47.000Z\",\"verified\":true,\"username\":\"polizei_nrw_ge\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/OzAD0DAAhq\",\"expanded_url\":\"http://gelsenkirchen.polizei.nrw\",\"display_url\":\"gelsenkirchen.polizei.nrw\"}]},\"description\":{\"urls\":[{\"start\":95,\"end\":118,\"url\":\"https://t.co/vigzJs5xAR\",\"expanded_url\":\"http://url.nrw/43N\",\"display_url\":\"url.nrw/43N\"}]}},\"name\":\"Polizei NRW GE\",\"protected\":false,\"description\":\"Polizei Gelsenkirchen. Rathausplatz 4, 45877 Gelsenkirchen, Tel.: 0209 / 365 - 0. Datenschutz: https://t.co/vigzJs5xAR\",\"id\":\"2389272182\",\"url\":\"https://t.co/OzAD0DAAhq\",\"public_metrics\":{\"followers_count\":8796,\"following_count\":135,\"tweet_count\":4622,\"listed_count\":138}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/862004484540682240/HbuGRQYo_normal.jpg\",\"created_at\":\"2016-05-05T13:06:29.000Z\",\"pinned_tweet_id\":\"1267156926208540677\",\"verified\":false,\"username\":\"SiggiSGE\",\"name\":\"Siegfried\",\"protected\":false,\"description\":\"Jeder verdient es mit Respekt behandelt zu werden, auch wenn wir nicht der gleichen Meinung sind.\\nNie wieder Krieg in Europa & weltweite Kriege beenden\",\"id\":\"728209251190120449\",\"url\":\"\",\"public_metrics\":{\"followers_count\":522,\"following_count\":2289,\"tweet_count\":17719,\"listed_count\":0}},{\"location\":\"Hamburg\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1439299606513397763/ZC9sVoJM_normal.jpg\",\"created_at\":\"2015-10-02T15:42:30.000Z\",\"verified\":false,\"username\":\"L2279600\",\"name\":\"Rita\",\"protected\":false,\"description\":\"Der Nürnberger Kodex wurde eingeführt,damit Menschen nie wieder zu medizinischen Behandlungen gezwungen oder genötigt werden‼️\",\"id\":\"3844151483\",\"url\":\"\",\"public_metrics\":{\"followers_count\":1072,\"following_count\":1024,\"tweet_count\":27833,\"listed_count\":12}},{\"location\":\"Cologne \",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1190023297494376452/trykKe9U_normal.jpg\",\"created_at\":\"2019-07-29T14:30:49.000Z\",\"pinned_tweet_id\":\"1330566567839588357\",\"verified\":false,\"username\":\"TheReal32492440\",\"name\":\"Inspector Reality \\uD83D\\uDD75️♂️ \\uD83C\\uDDE9\\uD83C\\uDDEA \\uD83C\\uDDEC\\uD83C\\uDDF7 \\uD83C\\uDDFA\\uD83C\\uDDF8\",\"protected\":false,\"description\":\"Konservativ. Gastarbeiter Kind. \\uD83D\\uDD0E Make Germany great again! Gedanken sind Energie. Coronologe™ My immune system is created by god.\",\"id\":\"1155848155893837825\",\"url\":\"\",\"public_metrics\":{\"followers_count\":3897,\"following_count\":3207,\"tweet_count\":36497,\"listed_count\":29}},{\"location\":\"Ysmault\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/851757824845770752/glCaQdzZ_normal.jpg\",\"created_at\":\"2009-07-29T09:07:53.000Z\",\"verified\":false,\"username\":\"Scarcloud\",\"entities\":{\"description\":{\"hashtags\":[{\"start\":54,\"end\":71,\"tag\":\"Klimakatastrophe\"}]}},\"name\":\"Red Lantern\",\"protected\":false,\"description\":\"'I'll always be angry.'\\nLebensziel: 2040 nicht an der #Klimakatastrophe krepieren.\\nUnited we stand, divided we fall.\",\"id\":\"61153698\",\"url\":\"\",\"public_metrics\":{\"followers_count\":80,\"following_count\":205,\"tweet_count\":3553,\"listed_count\":3}},{\"location\":\"Hochneukirch \",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1346147544368369672/UN8jezCT_normal.jpg\",\"created_at\":\"2018-12-14T08:18:50.000Z\",\"verified\":false,\"username\":\"fffHochneukirch\",\"entities\":{\"description\":{\"hashtags\":[{\"start\":80,\"end\":97,\"tag\":\"FridaysForFuture\"},{\"start\":128,\"end\":144,\"tag\":\"LützerathBleibt\"}]}},\"name\":\"FFF Hochneukirch\",\"protected\":false,\"description\":\"Wir sind junge Menschen, die sich für ihre Zukunft einsetzen. Hier twittert die #FridaysForFuture Ortsgruppe aus Hochneukirch.\\uD83C\\uDF0D #LützerathBleibt\",\"id\":\"1073492498050416640\",\"url\":\"\",\"public_metrics\":{\"followers_count\":1957,\"following_count\":286,\"tweet_count\":2139,\"listed_count\":42}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1173750408537419781/iinbdQ3w_normal.jpg\",\"created_at\":\"2017-06-06T00:00:27.000Z\",\"pinned_tweet_id\":\"1354963867118563346\",\"verified\":false,\"username\":\"RehBln50\",\"entities\":{\"description\":{\"hashtags\":[{\"start\":134,\"end\":148,\"tag\":\"genugistgenug\"}]}},\"name\":\"RehBln50 ❌\",\"protected\":false,\"description\":\"Definitiv wbl., gegen Vieles, z. B. Political Correctness, Betreutes Denken. Für Meinungsfreiheit, allerdings auch mit Manieren ! ;-)\\n#genugistgenug\",\"id\":\"871879419559903232\",\"url\":\"\",\"public_metrics\":{\"followers_count\":864,\"following_count\":1280,\"tweet_count\":47166,\"listed_count\":0}},{\"location\":\"Berlin, Deutschland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1441895077610143767/gXY0jbrg_normal.jpg\",\"created_at\":\"2019-10-06T23:59:42.000Z\",\"pinned_tweet_id\":\"1421396422180712449\",\"verified\":false,\"username\":\"ClaudiaP4F\",\"entities\":{\"description\":{\"hashtags\":[{\"start\":2,\"end\":16,\"tag\":\"Berlin4Future\"},{\"start\":19,\"end\":29,\"tag\":\"P4FBerlin\"},{\"start\":32,\"end\":45,\"tag\":\"bornat330ppm\"}]}},\"name\":\"\\uD83D\\uDC9AClaudia\\uD83D\\uDC9A\",\"protected\":false,\"description\":\"\\uD83D\\uDC9A #Berlin4Future \\uD83D\\uDC9A #P4FBerlin \\uD83D\\uDC9A #bornat330ppm \\uD83D\\uDC9A \\uD83D\\uDCAF linksgrünglänzend\",\"id\":\"1180995990284853248\",\"url\":\"\",\"public_metrics\":{\"followers_count\":198,\"following_count\":616,\"tweet_count\":10060,\"listed_count\":1}},{\"location\":\"Helsinki\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1424676001523585024/5k5Yl21g_normal.jpg\",\"created_at\":\"2014-09-18T09:31:13.000Z\",\"verified\":false,\"username\":\"wlkutsch\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/l44b9mw6iF\",\"expanded_url\":\"http://www.icos-ri.eu\",\"display_url\":\"icos-ri.eu\"}]},\"description\":{\"mentions\":[{\"start\":20,\"end\":28,\"username\":\"ICOS_RI\"}]}},\"name\":\"Werner Leo Kutsch\",\"protected\":false,\"description\":\"Director General of @ICOS_RI\",\"id\":\"2816669118\",\"url\":\"https://t.co/l44b9mw6iF\",\"public_metrics\":{\"followers_count\":716,\"following_count\":318,\"tweet_count\":1349,\"listed_count\":16}},{\"location\":\"Helsinki, Finnland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1237365121527222272/2OdKGWIv_normal.jpg\",\"created_at\":\"2015-04-28T10:37:40.000Z\",\"verified\":false,\"username\":\"ICOS_RI\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/jaOULcoPFy\",\"expanded_url\":\"https://www.icos-ri.eu/\",\"display_url\":\"icos-ri.eu\"}]}},\"name\":\"ICOS RI\",\"protected\":false,\"description\":\"European research infrastructure providing open & standardised greenhouse gas data to prevent climate change.\",\"id\":\"3216719140\",\"url\":\"https://t.co/jaOULcoPFy\",\"public_metrics\":{\"followers_count\":2185,\"following_count\":737,\"tweet_count\":2658,\"listed_count\":47}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1265724538593202178/44uO4Q3L_normal.jpg\",\"created_at\":\"2020-03-25T17:56:52.000Z\",\"pinned_tweet_id\":\"1410688500798722050\",\"verified\":false,\"username\":\"Charlyle7\",\"name\":\"Charlyle\",\"protected\":false,\"description\":\"Ich benutze meinen Verstand und bilde mir meine eigene Meinung.\",\"id\":\"1242872854925975552\",\"url\":\"\",\"public_metrics\":{\"followers_count\":427,\"following_count\":308,\"tweet_count\":8551,\"listed_count\":0}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1418612911371702274/rS70orqB_normal.jpg\",\"created_at\":\"2018-08-08T13:12:47.000Z\",\"verified\":false,\"username\":\"EllyB53292373\",\"name\":\"EllyB \\uD83C\\uDDF3\\uD83C\\uDDF1\\uD83C\\uDDEE\\uD83C\\uDDF1\\uD83C\\uDDE9\\uD83C\\uDDEA\",\"protected\":false,\"description\":\"\",\"id\":\"1027180824335208448\",\"url\":\"\",\"public_metrics\":{\"followers_count\":893,\"following_count\":487,\"tweet_count\":82609,\"listed_count\":2}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/847833492721467393/UzHloCb9_normal.jpg\",\"created_at\":\"2017-01-07T21:16:59.000Z\",\"pinned_tweet_id\":\"1272473885536137218\",\"verified\":false,\"username\":\"frankenzicke\",\"name\":\"enisa\",\"protected\":false,\"description\":\"Wir leben in einer Diktatur der Verblödeten, das ist die höchste und edelste Form der Sklaverei.// Gott stehe uns bei \\uD83D\\uDE4F\",\"id\":\"817842489814118401\",\"url\":\"\",\"public_metrics\":{\"followers_count\":2553,\"following_count\":1554,\"tweet_count\":91107,\"listed_count\":2}},{\"location\":\"NRW, Deutschland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1348577897393610757/7qHem2PP_normal.jpg\",\"created_at\":\"2018-06-16T13:33:34.000Z\",\"verified\":false,\"username\":\"RTErsc\",\"entities\":{\"description\":{\"mentions\":[{\"start\":112,\"end\":119,\"username\":\"RTErsc\"}]}},\"name\":\"RTErs\\uD83C\\uDDE9\\uD83C\\uDDEA\\uD83C\\uDDFA\\uD83C\\uDDF8\\uD83C\\uDDEA\\uD83C\\uDDF8MAGA*IFB*\",\"protected\":false,\"description\":\"Es ist nie zu spät - neu anzufangen! Wer aufgibt, hat den Kampf bereits verloren! Att.! \\nNEU - GETTR - unter : @RTErsc\",\"id\":\"1007979497965178880\",\"url\":\"\",\"public_metrics\":{\"followers_count\":6536,\"following_count\":6570,\"tweet_count\":155208,\"listed_count\":4}},{\"location\":\"\\uD83D\\uDE4B\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1220060457207242753/de4ZrMWa_normal.jpg\",\"created_at\":\"2020-01-22T19:05:37.000Z\",\"verified\":false,\"username\":\"Conny96418914\",\"name\":\"Conny\",\"protected\":false,\"description\":\"Lieber stehende sterben als kriechend leben\",\"id\":\"1220059857535086597\",\"url\":\"\",\"public_metrics\":{\"followers_count\":113,\"following_count\":145,\"tweet_count\":2711,\"listed_count\":0}},{\"location\":\"Dresden, Sachsen\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1444628319295709184/xoLenFzS_normal.jpg\",\"created_at\":\"2019-01-05T17:30:10.000Z\",\"pinned_tweet_id\":\"1441425967617089536\",\"verified\":false,\"username\":\"FFFDresden\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/mdPiW5NsEv\",\"expanded_url\":\"http://fffutu.re/dresden\",\"display_url\":\"fffutu.re/dresden\"}]},\"description\":{\"hashtags\":[{\"start\":116,\"end\":132,\"tag\":\"UprootTheSystem\"}]}},\"name\":\"Fridays for Future Dresden\",\"protected\":false,\"description\":\"Wir sind Schüler:innen und junge Menschen aus Dresden, die freitags für eine sozial-ökologische Wende streiken. \\n⋅ #UprootTheSystem ✊ \\n⋅\",\"id\":\"1081603778699296770\",\"url\":\"https://t.co/mdPiW5NsEv\",\"public_metrics\":{\"followers_count\":4189,\"following_count\":499,\"tweet_count\":2827,\"listed_count\":52}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1121882984020361217/MLdMP6eb_normal.jpg\",\"created_at\":\"2019-04-26T21:04:05.000Z\",\"verified\":false,\"username\":\"StudisForFuture\",\"name\":\"StudisForFuture Dresden\",\"protected\":false,\"description\":\"Wir sind eine Gruppe Studierender aus Dresden und solidarisieren uns mit der Fridays For Future Bewegung\",\"id\":\"1121882665064501248\",\"url\":\"\",\"public_metrics\":{\"followers_count\":86,\"following_count\":14,\"tweet_count\":13,\"listed_count\":2}},{\"location\":\"Dresden, Saxony\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/770030243/logo_normal.jpg\",\"created_at\":\"2010-03-23T11:04:00.000Z\",\"verified\":false,\"username\":\"ADFC_Dresden\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":22,\"url\":\"http://t.co/57lk74oIJe\",\"expanded_url\":\"http://www.adfc-dresden.de\",\"display_url\":\"adfc-dresden.de\"}]},\"description\":{\"urls\":[{\"start\":53,\"end\":76,\"url\":\"https://t.co/QC9HlftOPE\",\"expanded_url\":\"http://adfc-dresden.de/impressum\",\"display_url\":\"adfc-dresden.de/impressum\"}]}},\"name\":\"ADFC Dresden\",\"protected\":false,\"description\":\"Unterwegs für aktive Mobilität in Dresden seit 1987.\\nhttps://t.co/QC9HlftOPE\",\"id\":\"125625502\",\"url\":\"http://t.co/57lk74oIJe\",\"public_metrics\":{\"followers_count\":2862,\"following_count\":198,\"tweet_count\":5354,\"listed_count\":67}},{\"location\":\"Dresden\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1367921039418744834/6mvom25w_normal.jpg\",\"created_at\":\"2019-06-06T15:13:37.000Z\",\"pinned_tweet_id\":\"1471446694109847556\",\"verified\":false,\"username\":\"P4F_Dresden\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/CvlWkCOhRW\",\"expanded_url\":\"http://dresden.parentsforfuture.de\",\"display_url\":\"dresden.parentsforfuture.de\"}]},\"description\":{\"mentions\":[{\"start\":50,\"end\":61,\"username\":\"FFFDresden\"}]}},\"name\":\"Parents For Future Dresden\",\"protected\":false,\"description\":\"Eltern aus Dresden und Umgebung. Wir unterstützen @FFFDresden. Wir suchen Helfer - sprich uns auf den Demos an und komm zu unseren Treffen.\",\"id\":\"1136652369859682305\",\"url\":\"https://t.co/CvlWkCOhRW\",\"public_metrics\":{\"followers_count\":1106,\"following_count\":913,\"tweet_count\":1640,\"listed_count\":26}},{\"location\":\"Deutschland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/738821341416181760/IFbSKtJ5_normal.jpg\",\"created_at\":\"2011-02-21T19:31:20.000Z\",\"verified\":false,\"username\":\"savannaSeKinner\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/R4O8AdFIvj\",\"expanded_url\":\"http://blog.t-originals.de\",\"display_url\":\"blog.t-originals.de\"}]}},\"name\":\"gab.ai/Katja \\uD83D\\uDC38\",\"protected\":false,\"description\":\"\",\"id\":\"255637301\",\"url\":\"https://t.co/R4O8AdFIvj\",\"public_metrics\":{\"followers_count\":807,\"following_count\":680,\"tweet_count\":75900,\"listed_count\":27}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1385992649157582848/-0HNRr2A_normal.jpg\",\"created_at\":\"2010-03-27T16:38:04.000Z\",\"verified\":false,\"username\":\"Kl_Stone\",\"name\":\"Klaus Steinfelder\",\"protected\":false,\"description\":\"Wenn man sich jeden Tag als 1 qmm vorstellt, dann reicht ein 3/4-DIN A4-Blatt Millimeterpapier für 80 Jahre Leben. Schon beunruhigend überschaubar irgendwie...\",\"id\":\"126978307\",\"url\":\"\",\"public_metrics\":{\"followers_count\":1030,\"following_count\":1557,\"tweet_count\":84652,\"listed_count\":5}},{\"location\":\"Baden-Württemberg & Minho, PT\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1060484934492667906/i6xHY5lm_normal.jpg\",\"created_at\":\"2010-01-22T09:03:27.000Z\",\"verified\":false,\"username\":\"Bastian_Atzger\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/QI1nTwuWsa\",\"expanded_url\":\"http://www.bastian-atzger.de\",\"display_url\":\"bastian-atzger.de\"}]}},\"name\":\"Bastian Atzger\",\"protected\":false,\"description\":\"\\uD83D\\uDCC8 Marketer\\n\\uD83D\\uDD0E Wirtschaftswissenschaftler / economic scientist\\n\\uD83D\\uDDF3 MITtelstandsunion BW / middle class politics\\n\\uD83D\\uDD2CSceptic and critical realist\\n\\uD83C\\uDDE9\\uD83C\\uDDEA \\uD83C\\uDDEA\\uD83C\\uDDFA\",\"id\":\"107357635\",\"url\":\"https://t.co/QI1nTwuWsa\",\"public_metrics\":{\"followers_count\":578,\"following_count\":476,\"tweet_count\":7564,\"listed_count\":34}},{\"location\":\"Hamburg, Deutschland\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1222199878325088264/rkeWxQQb_normal.jpg\",\"created_at\":\"2012-12-04T22:28:03.000Z\",\"pinned_tweet_id\":\"1361621295079817222\",\"verified\":false,\"username\":\"LudgerWess\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/X1JeSrZgXC\",\"expanded_url\":\"https://www.matthes-seitz-berlin.de/autor/ludger-wess.html\",\"display_url\":\"matthes-seitz-berlin.de/autor/ludger-w…\"}]}},\"name\":\"Ludger Wess\",\"protected\":false,\"description\":\"Drawn into biology since my first encounter with a microscope. Research in molecular biology, now science writer: fiction, non-fiction, commentaries.\",\"id\":\"989716693\",\"url\":\"https://t.co/X1JeSrZgXC\",\"public_metrics\":{\"followers_count\":3293,\"following_count\":616,\"tweet_count\":28042,\"listed_count\":62}},{\"profile_image_url\":\"https://abs.twimg.com/sticky/default_profile_images/default_profile_normal.png\",\"created_at\":\"2013-09-07T08:17:54.000Z\",\"verified\":false,\"username\":\"HansBrunner1\",\"name\":\"Hans Brunner\",\"protected\":false,\"description\":\"\",\"id\":\"1743316638\",\"url\":\"\",\"public_metrics\":{\"followers_count\":343,\"following_count\":670,\"tweet_count\":18423,\"listed_count\":0}},{\"location\":\"Amsterdam\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1366306118218170373/5RQ3TK8X_normal.jpg\",\"created_at\":\"2012-05-29T16:28:09.000Z\",\"pinned_tweet_id\":\"1284128128210796545\",\"verified\":false,\"username\":\"UrsulaBrinkmann\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/PCoLqPOgEI\",\"expanded_url\":\"http://www.interculturalreadiness.com\",\"display_url\":\"interculturalreadiness.com\"}]}},\"name\":\"Ursula Brinkmann\",\"protected\":false,\"description\":\"Author 'Intercultural Readiness' (Palgrave). Intercultural Readiness Certification September October 2+3 December 2021. Contact us for more information\",\"id\":\"593841338\",\"url\":\"https://t.co/PCoLqPOgEI\",\"public_metrics\":{\"followers_count\":1358,\"following_count\":2249,\"tweet_count\":5740,\"listed_count\":50}},{\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1251530661304942592/STby4pV6_normal.jpg\",\"created_at\":\"2009-12-23T06:38:26.000Z\",\"verified\":false,\"username\":\"Manya_G\",\"name\":\"Manya G.\",\"protected\":false,\"description\":\"\",\"id\":\"98816776\",\"url\":\"\",\"public_metrics\":{\"followers_count\":329,\"following_count\":260,\"tweet_count\":6168,\"listed_count\":4}},{\"location\":\"Dresden\",\"profile_image_url\":\"https://pbs.twimg.com/profile_images/1123289805994045440/CjvvZnzW_normal.png\",\"created_at\":\"2009-08-22T21:11:53.000Z\",\"pinned_tweet_id\":\"1466837840080351241\",\"verified\":false,\"username\":\"addnme\",\"entities\":{\"url\":{\"urls\":[{\"start\":0,\"end\":23,\"url\":\"https://t.co/6WCLELV28u\",\"expanded_url\":\"http://www.addn.me\",\"display_url\":\"addn.me\"}]}},\"name\":\"Alternative Dresden News\",\"protected\":false,\"description\":\"Nachrichtenportal für eine kritische Öffentlichkeit in Dresden und Umgebung mit Schwerpunkten auf Antifa, Nazis, Freiräumen, Kultur und sozialen Themen.\",\"id\":\"67984352\",\"url\":\"https://t.co/6WCLELV28u\",\"public_metrics\":{\"followers_count\":4554,\"following_count\":647,\"tweet_count\":8420,\"listed_count\":0}}],\"tweets\":[{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266704570190790656\",\"entities\":{\"mentions\":[{\"start\":49,\"end\":64,\"username\":\"polizei_nrw_ge\",\"id\":\"2389272182\"}],\"hashtags\":[{\"start\":32,\"end\":41,\"tag\":\"Datteln4\"},{\"start\":253,\"end\":266,\"tag\":\"StopDatteln4\"},{\"start\":267,\"end\":284,\"tag\":\"FridaysForFuture\"}]},\"text\":\"Bei d. friedlichen Protesten um #Datteln4 hat d. @polizei_nrw_ge gerade 5 Aktivist*innen aus d. Masse gezogen, nieder geprügelt & m. Kabelbindern gefesselt. Wir bleiben sitzen bis d. Gefangenen freigelassen wurden. Wir sind friedlich? Was seid ihr? #StopDatteln4 #FridaysForFuture\",\"created_at\":\"2020-05-30T12:14:38.000Z\",\"lang\":\"de\",\"author_id\":\"1080927540422815744\",\"id\":\"1266704570190790656\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":153,\"reply_count\":15,\"like_count\":486,\"quote_count\":14}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266816466512297984\",\"attachments\":{\"media_keys\":[\"3_1266816456924299264\"]},\"text\":\"#Soros hat es angekündigt: mit #Geld sorgt er für Unruhen und Zerstörung. Dieser Teufel finanziert #NGOs, Zeitungen und #FridaysForFuture. Er gehört hinter Gittern!!! #GeorgeFloyd #mineapolis\\n#AntifaTerrorists #riots2020 https://t.co/UESkGHg3QG\",\"created_at\":\"2020-05-30T19:39:16.000Z\",\"entities\":{\"hashtags\":[{\"start\":0,\"end\":6,\"tag\":\"Soros\"},{\"start\":31,\"end\":36,\"tag\":\"Geld\"},{\"start\":99,\"end\":104,\"tag\":\"NGOs\"},{\"start\":120,\"end\":137,\"tag\":\"FridaysForFuture\"},{\"start\":167,\"end\":179,\"tag\":\"GeorgeFloyd\"},{\"start\":180,\"end\":191,\"tag\":\"mineapolis\"},{\"start\":192,\"end\":209,\"tag\":\"AntifaTerrorists\"},{\"start\":210,\"end\":220,\"tag\":\"riots2020\"}],\"urls\":[{\"start\":221,\"end\":244,\"url\":\"https://t.co/UESkGHg3QG\",\"expanded_url\":\"https://twitter.com/TheReal32492440/status/1266816466512297984/photo/1\",\"display_url\":\"pic.twitter.com/UESkGHg3QG\"}]},\"lang\":\"de\",\"author_id\":\"1155848155893837825\",\"id\":\"1266816466512297984\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":100,\"reply_count\":29,\"like_count\":148,\"quote_count\":9}},{\"source\":\"Twitter for iPhone\",\"conversation_id\":\"1266817176687779846\",\"attachments\":{\"media_keys\":[\"3_1266817169398038529\",\"3_1266817169460953089\",\"3_1266817169477730306\",\"3_1266817170455101441\"]},\"text\":\"Das Kohleausstiegsgesetz darf so nicht verabschiedet werden. Macht auf das Thema aufmerksam und schreibt Euren Bundestagsabgeordneten!\\nhttps://t.co/EpLthvP2MF\\n#FridaysForFuture #StopDatteln4 #Kohlestopp #KeinKonsens https://t.co/vPdW00UVB0\",\"created_at\":\"2020-05-30T19:42:05.000Z\",\"entities\":{\"hashtags\":[{\"start\":159,\"end\":176,\"tag\":\"FridaysForFuture\"},{\"start\":177,\"end\":190,\"tag\":\"StopDatteln4\"},{\"start\":191,\"end\":202,\"tag\":\"Kohlestopp\"},{\"start\":203,\"end\":215,\"tag\":\"KeinKonsens\"}],\"urls\":[{\"start\":135,\"end\":158,\"url\":\"https://t.co/EpLthvP2MF\",\"expanded_url\":\"https://www.bundestag.de/abgeordnete\",\"display_url\":\"bundestag.de/abgeordnete\"},{\"start\":216,\"end\":239,\"url\":\"https://t.co/vPdW00UVB0\",\"expanded_url\":\"https://twitter.com/fff_2209/status/1266817176687779846/photo/1\",\"display_url\":\"pic.twitter.com/vPdW00UVB0\"},{\"start\":216,\"end\":239,\"url\":\"https://t.co/vPdW00UVB0\",\"expanded_url\":\"https://twitter.com/fff_2209/status/1266817176687779846/photo/1\",\"display_url\":\"pic.twitter.com/vPdW00UVB0\"},{\"start\":216,\"end\":239,\"url\":\"https://t.co/vPdW00UVB0\",\"expanded_url\":\"https://twitter.com/fff_2209/status/1266817176687779846/photo/1\",\"display_url\":\"pic.twitter.com/vPdW00UVB0\"},{\"start\":216,\"end\":239,\"url\":\"https://t.co/vPdW00UVB0\",\"expanded_url\":\"https://twitter.com/fff_2209/status/1266817176687779846/photo/1\",\"display_url\":\"pic.twitter.com/vPdW00UVB0\"}]},\"lang\":\"de\",\"author_id\":\"1073492498050416640\",\"id\":\"1266817176687779846\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":33,\"reply_count\":0,\"like_count\":57,\"quote_count\":1}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266779244484575232\",\"entities\":{\"mentions\":[{\"start\":35,\"end\":43,\"username\":\"ICOS_RI\",\"id\":\"3216719140\"},{\"start\":161,\"end\":165,\"username\":\"CDU\",\"id\":\"20429858\"},{\"start\":166,\"end\":172,\"username\":\"spdde\",\"id\":\"26458162\"}],\"hashtags\":[{\"start\":126,\"end\":138,\"tag\":\"Abfckprämie\"},{\"start\":139,\"end\":148,\"tag\":\"Datteln4\"},{\"start\":173,\"end\":190,\"tag\":\"FridaysforFuture\"},{\"start\":191,\"end\":211,\"tag\":\"ClimateStrikeOnline\"},{\"start\":212,\"end\":225,\"tag\":\"StopDatteln4\"}],\"urls\":[{\"start\":226,\"end\":249,\"url\":\"https://t.co/JtEyiQLd2H\",\"expanded_url\":\"https://youtu.be/oOEyQtgRd54\",\"display_url\":\"youtu.be/oOEyQtgRd54\",\"status\":200,\"unwound_url\":\"https://www.youtube.com/watch?v=oOEyQtgRd54&feature=youtu.be\"}]},\"text\":\"Gestern hatten wir ein CO2FFEE mit @ICOS_RI über die Arktis: Wissenschaft: wir müssen Emissionen schnell reduzieren. Politik: #Abfckprämie #Datteln4 Super Job! @CDU @spdde\\n#FridaysforFuture #ClimateStrikeOnline\\n#StopDatteln4 https://t.co/JtEyiQLd2H\",\"created_at\":\"2020-05-30T17:11:22.000Z\",\"lang\":\"de\",\"author_id\":\"2816669118\",\"id\":\"1266779244484575232\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":2,\"reply_count\":1,\"like_count\":3,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266067475155869696\",\"attachments\":{\"media_keys\":[\"3_1266819103513337857\"]},\"entities\":{\"mentions\":[{\"start\":63,\"end\":79,\"username\":\"StudisForFuture\",\"id\":\"1121882665064501248\"},{\"start\":81,\"end\":94,\"username\":\"ADFC_Dresden\",\"id\":\"125625502\"},{\"start\":96,\"end\":108,\"username\":\"P4F_Dresden\",\"id\":\"1136652369859682305\"},{\"start\":113,\"end\":125,\"username\":\"BundSachsen\",\"id\":\"1003560485315383296\"}],\"hashtags\":[{\"start\":160,\"end\":173,\"tag\":\"Abfckpraemie\"},{\"start\":238,\"end\":255,\"tag\":\"FridaysForFuture\"}],\"urls\":[{\"start\":256,\"end\":279,\"url\":\"https://t.co/PpdX7djZBs\",\"expanded_url\":\"https://twitter.com/FFFDresden/status/1266819115236364290/photo/1\",\"display_url\":\"pic.twitter.com/PpdX7djZBs\"}]},\"text\":\"Neben unserer Fahrraddemo finden am Dienstag auch Aktionen von @StudisForFuture, @ADFC_Dresden, @P4F_Dresden und @BundSachsen statt. Seid am 02.0. dabei! Weder #Abfckpraemie, noch unweltschädliche Subventionen dürfen ins Konjunkturpaket! #FridaysForFuture https://t.co/PpdX7djZBs\",\"created_at\":\"2020-05-30T19:49:48.000Z\",\"lang\":\"de\",\"in_reply_to_user_id\":\"1081603778699296770\",\"author_id\":\"1081603778699296770\",\"id\":\"1266819115236364290\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"replied_to\",\"id\":\"1266067475155869696\"}],\"public_metrics\":{\"retweet_count\":6,\"reply_count\":3,\"like_count\":12,\"quote_count\":0}},{\"source\":\"Twitter for Android\",\"conversation_id\":\"1266677828801695744\",\"entities\":{\"mentions\":[{\"start\":0,\"end\":11,\"username\":\"LudgerWess\",\"id\":\"989716693\"}],\"hashtags\":[{\"start\":55,\"end\":72,\"tag\":\"FridaysForFuture\"}]},\"text\":\"@LudgerWess Jetzt ist mir klar, weshalb die Kinder von #FridaysForFuture immer behaupten, sie würden \\\"die Meinung der Wissenschaft\\\" repräsentieren, was in sich gesehen doppelter Nonsens ist. \\uD83D\\uDE44\",\"created_at\":\"2020-05-30T20:37:22.000Z\",\"lang\":\"de\",\"in_reply_to_user_id\":\"989716693\",\"author_id\":\"107357635\",\"id\":\"1266831085582913538\",\"possibly_sensitive\":false,\"referenced_tweets\":[{\"type\":\"replied_to\",\"id\":\"1266677828801695744\"}],\"public_metrics\":{\"retweet_count\":0,\"reply_count\":1,\"like_count\":0,\"quote_count\":0}},{\"source\":\"Twitter Web App\",\"conversation_id\":\"1266436838190911497\",\"attachments\":{\"media_keys\":[\"3_1266436070302003201\"]},\"text\":\"Super Schildermeer am Heldenplatz heute! \\uD83D\\uDCAA\\uD83C\\uDFFB\\uD83D\\uDC4F\\uD83C\\uDFFB\\uD83D\\uDC9A\\n\\n#FridaysForFuture sind kreativ wie eh und je, schön, euch wieder \\\"live\\\" zu sehen \\uD83D\\uDE0D https://t.co/sQFsra6LH2\",\"created_at\":\"2020-05-29T18:30:46.000Z\",\"entities\":{\"hashtags\":[{\"start\":48,\"end\":65,\"tag\":\"FridaysForFuture\"}],\"urls\":[{\"start\":131,\"end\":154,\"url\":\"https://t.co/sQFsra6LH2\",\"expanded_url\":\"https://twitter.com/Manya_G/status/1266436838190911497/photo/1\",\"display_url\":\"pic.twitter.com/sQFsra6LH2\"}]},\"lang\":\"de\",\"author_id\":\"98816776\",\"id\":\"1266436838190911497\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":121,\"reply_count\":6,\"like_count\":799,\"quote_count\":2}},{\"source\":\"Twitter Web App\",\"conversation_id\":\"1266722835252129794\",\"text\":\"Fahrradprotest für sichere Karl-Marx-Straße\\n\\n#Verkehrswende #VerkehrswendeDresden #Dresden #Sachsen #Fahrradprotest #ADFC #FridaysForFuture #FFFDresden #Autogipfel #Abwrackprämie #GhostBike #StVO \\n\\nhttps://t.co/OjUBpx4PIK\",\"created_at\":\"2020-05-30T13:27:13.000Z\",\"entities\":{\"hashtags\":[{\"start\":45,\"end\":59,\"tag\":\"Verkehrswende\"},{\"start\":60,\"end\":81,\"tag\":\"VerkehrswendeDresden\"},{\"start\":82,\"end\":90,\"tag\":\"Dresden\"},{\"start\":91,\"end\":99,\"tag\":\"Sachsen\"},{\"start\":100,\"end\":115,\"tag\":\"Fahrradprotest\"},{\"start\":116,\"end\":121,\"tag\":\"ADFC\"},{\"start\":122,\"end\":139,\"tag\":\"FridaysForFuture\"},{\"start\":140,\"end\":151,\"tag\":\"FFFDresden\"},{\"start\":152,\"end\":163,\"tag\":\"Autogipfel\"},{\"start\":164,\"end\":178,\"tag\":\"Abwrackprämie\"},{\"start\":179,\"end\":189,\"tag\":\"GhostBike\"},{\"start\":190,\"end\":195,\"tag\":\"StVO\"}],\"urls\":[{\"start\":198,\"end\":221,\"url\":\"https://t.co/OjUBpx4PIK\",\"expanded_url\":\"https://www.addn.me/soziales/fahrradprotest-fuer-sichere-karl-marx-strasse/\",\"display_url\":\"addn.me/soziales/fahrr…\",\"status\":200,\"unwound_url\":\"https://www.addn.me/soziales/fahrradprotest-fuer-sichere-karl-marx-strasse/\"}]},\"lang\":\"de\",\"author_id\":\"67984352\",\"id\":\"1266722835252129794\",\"possibly_sensitive\":false,\"public_metrics\":{\"retweet_count\":2,\"reply_count\":1,\"like_count\":4,\"quote_count\":0}}]},\"errors\":[{\"value\":\"1\",\"detail\":\"Could not find tweet with ids: [1].\",\"title\":\"Not Found Error\",\"resource_type\":\"tweet\",\"parameter\":\"ids\",\"resource_id\":\"1\",\"type\":\"https://api.twitter.com/2/problems/resource-not-found\"},{\"value\":\"2\",\"detail\":\"Could not find tweet with ids: [2].\",\"title\":\"Not Found Error\",\"resource_type\":\"tweet\",\"parameter\":\"ids\",\"resource_id\":\"2\",\"type\":\"https://api.twitter.com/2/problems/resource-not-found\"}]}"),
date = structure(1639946717, class = c("POSIXct", "POSIXt"
), tzone = "GMT"), times = c(redirect = 0, namelookup = 3.1e-05,
connect = 3.3e-05, pretransfer = 0.000121, starttransfer = 0.474505,
total = 0.475566)), class = "response")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.