blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a8d4e776c6597fc133ecc73c57b3a5cecd7607df | 52b727996e50601cd4bbdb42c659521cfd9658d1 | /hESC/comparison_hESC_monkey.r | 7aa0dc669dff4f659699d010c752c46c851c3a43 | [] | no_license | kenichi-shimada/Nakanishi_single_cell | 16f25321d02be6456c72b7e527314d113d9c23f8 | 10ddf79523eea1cb986698fecca07a83c9051112 | refs/heads/master | 2020-04-06T15:22:21.485517 | 2018-11-14T16:20:16 | 2018-11-14T16:20:16 | 157,576,297 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,348 | r | comparison_hESC_monkey.r | ##
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/hESC_data")
# hesc.exp.lin <- readRDS(file="uniq.exp.10clust.rds")
hesc.exp.lin <- readRDS(file="uniq.exp.7clust_062818.rds")
##
library(openxlsx)
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/monkey/")
genes <- read.xlsx("nature19096-s2.xlsx",sheet=1,colNames=TRUE,startRow=2)
hs.genes <- genes[[2]]
table(table(hs.genes)) ## all unique
mf.genes <- genes[[4]]
table(table(mf.genes)) ## all unique
mac2hum <- hs.genes
names(mac2hum) <- mf.genes ##17936
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/monkey")
mf.exp.lin <- readRDS(file="exprs.14lin_all_062818.rds")
rownames(mf.exp.lin) <- mac2hum[rownames(mf.exp.lin)]
has.gene <- !is.na(rownames(mf.exp.lin))
# has.gene
# FALSE TRUE
# 11093 17821
mf.exp.lin <- mf.exp.lin[has.gene,]
overlapped <- intersect(rownames(hesc.exp.lin),rownames(mf.exp.lin)) ## 15170 genes -> 14893
overlapped.mf.genes <- genes$macFas5_gene_symbol[match(overlapped,genes$hg19_gene_symbol)]
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/monkey")
if(0)saveRDS(overlapped.mf.genes,file="used.mf.genes_062818.rds")
overlapped.mf.genes <- readRDS("used.mf.genes_062818.rds")
## DEG
deg.tmp <- read.xlsx("GeneList_EDFig5b.xlsx",1)
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/monkey")
overlapped.mf.genes.1 <- intersect(overlapped.mf.genes,deg.tmp$MF5Symbol) ## 5423
if(0)saveRDS(overlapped.mf.genes.1,file="used.mf.genes.1_062818.rds") ## 5423 -> 5410
## exclusive biomarkers
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/monkey")
bm.mf.genes <- readRDS(file="biomarkers_kw_wilcox_up_062818.rds")
overlapped.1 <- mac2hum[bm.mf.genes]
## compare Spearman correlation
mf.exp.lin <- mf.exp.lin[overlapped.1,]
hesc.exp.lin <- hesc.exp.lin[overlapped.1,]
cor.mat <- apply(hesc.exp.lin,2,function(hs){
apply(mf.exp.lin,2,function(mf){
cors <- cor(hs,mf,use="everything",method="spearman")
})
})
library(gplots)
library(RColorBrewer)
# display.brewer.all()
cols <- colorRampPalette(brewer.pal(9,"YlOrRd"))(20)
# cols <- colorRampPalette(c("black","yellow"))(20)
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_mf_hsESC.png",width=500,height=500)
heatmap.2(cor.mat,trace="none",margins=c(5,10),col=cols)
dev.off()
##
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_mf_hsESC_DEG.png",width=500,height=500)
heatmap.2(cor.mat,trace="none",margins=c(5,10),col=cols)
dev.off()
##
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_mf_hsESC_exclusive_up.png",width=500,height=500)
heatmap.2(cor.mat,trace="none",margins=c(5,10),col=cols)
dev.off()
if(0){
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_mf_hsESC_exclusive_up-woEXMC.png",width=500,height=500)
distfun <- function(x)dist(x,method="euclidean")
# distfun <- function(x)as.dist(1-cor(t(x),method="pearson"))
hclustfun <- function(x)hclust(x,method="average")
heatmap.2(cor.mat[!rownames(cor.mat) %in% "EXMC",],trace="none",margins=c(5,10),col=cols,
hclustfun=hclustfun,distfun=distfun)
dev.off()
}else{
}
##
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_mf_hsESC_DEG_exclusive_up.png",width=500,height=500)
heatmap.2(cor.mat,trace="none",margins=c(5,10),col=cols)
dev.off()
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_mf_hsESC_DEG_exclusive_up-woEXMC.png",width=500,height=500)
heatmap.2(cor.mat[!rownames(cor.mat) %in% "EXMC",],trace="none",margins=c(5,10),col=cols)
dev.off()
##
png("spearman_correlation_barplot_hypoblast.png",width=300,height=500)
barplot(cor.mat["Hypoblast",],ylab="Spearman correlation against Hypoblast")
dev.off()
##
barplot(cor.mat["VE/YE",],las=2)
if(0){
sub.cor.mat <- cor.mat[c("PreE-TE","PreL-TE","Post-paTE","Hypoblast","VE/YE","ICM",
"Pre-EPI","PostE-EPI","PostL-EPI","Gast1","Gast2a","Gast2b"),
c("3","1","2","7","4","5","6","10","8","9")]
colnames(sub.cor.mat) <- 1:10
rowsidecols <- rep(brewer.pal(5,"Set2"),c(3,2,1,3,3))
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_noclust_15lins_1.png",width=500,height=500)
heatmap.2(sub.cor.mat,trace="none",margins=c(5,10),col=cols,
hclustfun=hclustfun,distfun=distfun,Rowv=F,Colv=F,dendrogram="none",RowSideColors=rowsidecols)
dev.off()
}else{
sub.cor.mat <- cor.mat[c("PreE-TE","PreL-TE","Post-paTE","Hypoblast","VE/YE","ICM",
"Pre-EPI","PostE-EPI","PostL-EPI","Gast1","Gast2"),]
# c("3","1","2","7","4","5","6","10","8","9")]
colnames(sub.cor.mat) <- 1:7
rowsidecols <- rep(brewer.pal(3,"Set2"),c(3,2,6))
##
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_noclust_14lins.png",width=500,height=500)
heatmap.2(sub.cor.mat,trace="none",margins=c(5,10),col=cols,
hclustfun=hclustfun,distfun=distfun,Rowv=F,Colv=F,dendrogram="none")
dev.off()
##
setwd("/n/groups/mitchison/Kenichi/collaboration/Nakanishi_single_cell/Shared_Shimada_harvard/20180520_hESC/plots")
png("spearman_correlation_noclust_14lins_1_062818.png",width=500,height=500)
heatmap.2(sub.cor.mat,trace="none",margins=c(5,10),col=cols,
hclustfun=hclustfun,distfun=distfun,Rowv=F,Colv=F,dendrogram="none",RowSideColors=rowsidecols)
dev.off()
pdf("spearman_correlation_noclust_14lins_1_062818.pdf",width=5,height=5)
heatmap.2(sub.cor.mat,trace="none",margins=c(5,10),col=cols,
hclustfun=hclustfun,distfun=distfun,Rowv=F,Colv=F,dendrogram="none",RowSideColors=rowsidecols)
dev.off()
} |
6cc4b7ab5f1a6a635414e756009140dc3e37bb58 | d308acfdf2319f7669eca6af94ec1a6d8e935fd4 | /man-roxygen/param_chromacity.R | f01eab7295fb807d89984df6d0317990b0c3ae42 | [] | no_license | jiho/chroma | c6c41076bf40adac8027782272c609d82e2d171d | f098464515954b1a35106735d510c5ef8f3d7818 | refs/heads/master | 2022-12-06T18:02:58.002216 | 2022-11-18T23:30:48 | 2022-11-18T23:32:09 | 52,235,679 | 14 | 3 | null | 2022-02-15T06:58:20 | 2016-02-22T00:21:36 | R | UTF-8 | R | false | false | 78 | r | param_chromacity.R | #' @param c chromacity, number in \code{[0,~1]}; 0 is grey, ~1 is full color.
|
4aa07bd3a36450ad511ec5135a82464b8e57829c | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/binaryGP/R/likelihood.R | 33c02164b77b4dc51d6a609e6167ac188126556d | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 397 | r | likelihood.R | likelihood <- function(omega, nugget, M.mx, X, w, eta_tilde, T_val, corr, orthogonalGP){
sigma_hat <- omega[1]
sigma2 <- sigma_hat^2
rho_hat <- omega[2:length(omega)]
R.mx <- corr_matrix(X, rho_hat, corr) + diag(nugget/sigma2, nrow(X))
if(orthogonalGP) R.mx <- R.mx - orthogonalize(X, rho_hat, corr)
out <- likelihood_fun(M.mx, X, w, eta_tilde, R.mx, sigma2, T_val)
return(out)
}
|
f8821627b5d2fc2ea0e014e28059e32093e10125 | 83b0857680678c239a1028c9253dff36f8c4e981 | /demo/tir92_Rasterlize.R | f2c1d80ada8e39665051c078bdcd18d121c5eae7 | [
"MIT"
] | permissive | bwtian/TIR | 079369d8778327d905a3d62f527fb95dcefdffa9 | 7746da88146032b1eb7ff2f5ef9d0399d32438db | refs/heads/master | 2020-06-04T06:25:05.800938 | 2015-02-25T18:04:24 | 2015-02-25T18:04:24 | 24,012,165 | 4 | 1 | null | 2014-09-14T09:27:48 | 2014-09-14T02:59:21 | R | UTF-8 | R | false | false | 1,712 | r | tir92_Rasterlize.R | source("~/SparkleShare/TIR/demo/tirSettings.R")
### Import df to spdf
hkdAG100df <- readRDS("~/Share500sda/AG100B/hkdAG100B.Rds")
head(hkdAG100df)
hkdAG100spdf <- hkdAG100df
coordinates(hkdAG100spdf) <- ~Lon+Lat
proj4string(hkdAG100spdf) <- wgs84GRS
setwd(dir.AG100B)
getwd()
### Subset spdf
hkdBoudary <- readRDS
sub <- hkdAG100spdf[hkdBoudary]
#phd.saveshp.geo(hkdAG100spdf) ## This will take a lot of time
####sgdf Crop spdf using Vector
###
hkdAG100sgdfLSTm <- vect2rast(hkdAG100spdf, fname = "LSTm", cell.size = 100)
summary(hkdAG100sgdfLSTm)
##mask
hkdAG100sgdfLSTmR <- raster(hkdAG100sgdfLSTm)
hkdmaskb <- readRDS("~/SparkleShare/TIR/hkdmskb_grdi2d1h.Rds")
proj4string(hkdmaskb)
LSTcrop <- crop(hkdAG100sgdfLSTmR, hkdmaskb, filename = "hkdAG100sgdfLSTm.tif", overwrite=TRUE)
# plot(LSTcrop)
# proj4string(hkdAG100sgdfLSTmR)
# proj4string(hkdmaskb)
# extent(LSTcrop)
# extent(hkdmaskb)
LSTrsp <- resample(LSTcrop, hkdmaskb)
LSTrsp2 <- resample(hkdAG100sgdfLSTmR, hkdmaskb)
compareRaster(LSTrsp, LSTrsp2)
extent(LSTrsp2)
extent(hkdmaskb)
plot(LSTrsp)
summary(LSTrsp)
LSTmask <- mask(LSTrsp2, hkdmaskb, filename = "hkdAG100LSTmask.tif", overwrite=TRUE)
plot(LSTmask, zlim =c(252, 320))
levelplot(LSTmask, par.settings = BuRdTheme)
levelplot(LSTmask, par.settings = BuRdTheme, at = seq(252,320, 2))
plotKML(LSTmask)
LSTCenter <- scale(LSTmask, center = TRUE, scale = FALSE)
summary(LSTCenter)
#### rasterlize
plot(hkdmaskb)
proj4string(hkdmaskb)
extent(hkdmaskb)
hkdAG100rb <- rasterize(hkdAG100spdf, hkdmaskb)
levelplot(hkdAG100rb)
plot(hkdAG100rb)
class(hkdAG100r)
saveRDS(hkdAG100r, file = "hkdAG100Br.Rds")
hkdAG100rb[[1]]
|
da28c56ccdad13fd465a1544f423df59937de9c0 | 3aa50cc3eaeb92e64b4668873f48ae06518aece7 | /08.Fst.heatmap.R | e04bbe1f0844fa4eceeadc650580be9036c0dcaf | [] | no_license | mattingsdal/corkwing_1 | d7301cbf734f9ba5900135be576e19d374feced5 | 81ea47c85d4e816b3df5dfe659285b7feaad9cab | refs/heads/master | 2020-06-23T07:49:19.916928 | 2016-11-24T10:01:47 | 2016-11-24T10:01:47 | 74,658,644 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,461 | r | 08.Fst.heatmap.R | setwd("C:/Users/mortenma/Google Drive/post doc/essential files/paper 1/test")
library(SNPRelate)
library(pheatmap)
library(Cairo)
bed.fn <- "SNP.QC.population.bed"
fam.fn <- "SNP.QC.population.fam"
bim.fn <- "SNP.QC.population.bim"
snpgdsBED2GDS(bed.fn, fam.fn, bim.fn, "SNP.QC.population.gds")
snpgdsSummary("SNP.QC.population.gds")
genofile <- snpgdsOpen("SNP.QC.population.gds")
id <- read.gdsn(index.gdsn(genofile, "sample.id"))
## Fst, F
pop=c(rep("F",7),rep("A",8),rep("E",8),rep("H",10),rep("C",8),rep("B",8),rep("D",8),rep("G",8))
pop2=c(rep("AR",7),rep("UK",8),rep("EG",8),rep("GM",10),rep("NH",8),rep("SM",8),rep("ST",8),rep("TV",8))
res=matrix(ncol=8,nrow=8)
index=unique(pop)
for (j in 1:8){
for (i in 1:8){
subpop=pop[pop%in%index[j]|pop%in%index[i]]
subsample=id[pop%in%index[j]|pop%in%index[i]]
res[i,j]=ifelse(length(unique(subpop))==2,snpgdsFst(genofile, sample.id=subsample, population=as.factor(subpop),method="W&C84",autosome.only=FALSE,verbose=FALSE)$Fst,c(0))
}
}
colnames(res)= unique(pop2)
row.names(res)= unique(pop2)
FST=res
library(pheatmap)
library(RColorBrewer)
Cairo(file="FST_heatmap.pdf",type="pdf",width=80,height=80,units="mm")
par(mar = rep(0, 4))
pheatmap(FST,border="white",color = brewer.pal(9,"Blues"),cluster_rows=TRUE,cluster_cols=TRUE,display_numbers=T,number_color="gray60",number_format="%.3f",fontsize_number=3,fontsize_row=6,fontsize_col=6,legend=FALSE,treeheight_row=25,treeheight_col=25)
dev.off()
|
4ec71f953c43436f0d57db4fa944262fae21dd16 | 864e2ada04ce6b35099cc91afdd13f6627f63d2a | /Gov1347/Narrative.R | 64b9b7d541a5e91744d7be8a22d050ce3899631c | [] | no_license | SerhiyS1/electionblog | f0dfbc7c524fa0d963000ae7b56566059202e27e | 97983ba77f8bdf2db7dd1ebfe30cd6afdc2c29b7 | refs/heads/master | 2023-01-28T20:20:50.571823 | 2020-12-11T01:49:49 | 2020-12-11T01:49:49 | 293,930,002 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,264 | r | Narrative.R | ## Reflection - Serhiy Sokhan
library(tidyverse)
library(ggplot2)
library(reshape2)
library(usmap)
library(ggrepel)
## Setting working directory
setwd("/Users/serhiysokhan/Desktop/SeniorYear/Gov1347")
## Reading in data
results_per_state <- read_csv("popvote_bystate_1948-2020.csv") %>%
filter(year == "2020") %>%
mutate(gop_margin = (R_pv2p - D_pv2p)*100)
results_per_state$state <- state.abb[match(results_per_state$state, state.name)]
exit_polls <- read_csv("exitpolls.csv")
exits <- exit_polls %>%
mutate(dem_margin = Democrat - Republican)
ggplot(exits, aes(x=race_gender, y= dem_margin, fill = as.factor(year))) +
geom_bar(stat = "identity", position = "dodge") +
scale_fill_manual(name = "Election Year", values =c("purple", "orange")) +
xlab("Race and Sex") +
ylab("Democratic Margin (%)") +
ggtitle("Democratic Margin by Race and Sex in the 2016 and 2020 Elections") +
scale_x_discrete(labels=c(black_men = "Black Men", black_women = "Black Women", latina_women = "Latina Women", latino_men =
"Latino Men", white_men = "White Men", white_women = "White Women")) +
geom_text(aes(label = round(dem_margin, digits = 2)), position = position_dodge(width = 1),
vjust = -0.5, size = 3.0) |
ade68bfe37eab96b01d53ec6e610302206e47ddb | 81d0bf32f78b27eade9b612714dfc74457b43ad1 | /analysis2.R | dcd58a2185ea94959da2ac4b4e9322503355ffcc | [] | no_license | michaelrgaffney/SignalingExperiment2018 | 36954e55935ccc4a75d0b87ed0d9df4276558eff | e7eed6ee68932bac87f8097bbe0de15e1f666ce4 | refs/heads/master | 2021-06-15T07:11:56.788725 | 2021-03-25T19:29:40 | 2021-03-25T19:29:40 | 166,730,725 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,301 | r | analysis2.R |
#+ message=F,warning=F
library(signalingdata2018)
library(tidyverse)
library(mediation)
library(visreg)
library(car)
library(effects)
library(glmnet)
library(MASS)
library(ggmosaic)
library(ggcorrplot)
library(ggfortify)
library(naniar)
library(broom)
library(hagenutils)
library(gapmap)
library(UpSetR)
library(patchwork)
#+ message=F,warning=F,fig.width=10,fig.height=10
# Recoding MC2.3: How does your sister feel?
signalingdata2018 <-
signalingdata2018 %>%
dplyr::filter(State != 'I do not reside in the United States') %>%
mutate(
closesistert1 = MC1.1_1,
MC2.3 = ifelse(is.na(MC2.3), 'None', MC2.3),
Angry = ifelse(str_detect(MC2.3, 'Angry'), 1, 0),
Sad = ifelse(str_detect(MC2.3, 'Sad'), 1, 0),
Suicidal = ifelse(str_detect(MC2.3, 'Suicidal'), 1, 0),
MentallyIll = ifelse(str_detect(MC2.3, 'Mentally ill'), 1, 0),
Depressed = ifelse(str_detect(MC2.3, 'Depressed'), 1, 0),
OutOfOptions = ifelse(str_detect(MC2.3, 'Out of alternative options'), 1, 0),
Happy = ifelse(str_detect(MC2.3, 'Happy'), 1, 0),
Neutral = ifelse(str_detect(MC2.3, 'Neutral'), 1, 0),
Scared = ifelse(str_detect(MC2.3, 'Scared'), 1, 0),
Tired = ifelse(str_detect(MC2.3, 'Tired'), 1, 0),
Distressed = ifelse(str_detect(MC2.3, 'Distressed'), 1, 0),
NoneOfAbove = ifelse(str_detect(MC2.3, 'None'), 1, 0),
Siblings2 = as.numeric(ifelse(Siblings == "More than 10", "11", Siblings)),
Ed2 <- ordered(
Ed,
levels = c(
"Some high school (secondary school), no diploma",
"GED",
"High school (secondary school)",
"Associate degree",
"Bachelor's degree",
"Professional degree",
"Master's degree",
"Doctorate"
)
),
angryt1 = 100 - angryt1,
howsadt1 = 100 - howsadt1,
daughterharmt1 = 100 - daughterharmt1
) %>%
dplyr::select(-MC2.3)
# Years of education
ed2years <-
c(
"Some high school (secondary school), no diploma" = 11,
"GED" = 13,
"High school (secondary school)" = 13,
"Associate degree" = 15,
"Bachelor's degree" = 17,
"Professional degree" = 19,
"Master's degree" = 19,
"Doctorate" = 24
)
signalingdata2018$years_education <- ed2years[signalingdata2018$Ed]
# All signals and conditions
signaldict <-
c(
'Schizophrenia:Sister' = 'Schizophrenia',
'VerbalRequest:Sister' = 'VerbalRequest',
'Anger:Sister' = 'Anger',
'FacialSadnesswithCrying:Sister' = 'Crying',
'Depression:Sister' = 'Depression',
'DepressionwithSuicideThreat:Sister' = 'Depression&Suicidal',
'SuicideAttempt:Sister' = 'Suicide attempt',
'Control:Sister' = 'Control'
)
d0 <-
signalingdata2018 %>%
filter(!exclusionopt1) %>%
dplyr::select(
-starts_with('Recipient'),
-ExternalReference,
-starts_with('AC'),
-starts_with('FL_')
) %>%
mutate(
signal = factor(signaldict[signal]),
p_info = ordered(p_info, levels = c('Cheating', 'PrivateInformation', 'Honest')),
conflict = factor(conflict, levels = c('Conflict', 'Support')),
#angryt1 = 100 - angryt1,
howsadt1 = 100 - howsadt1,
daughterharmt1 = 100 - daughterharmt1,
needsmoneyt2 = ifelse(is.na(needsmoneyt2), 50, needsmoneyt2),
likelylendmoneyt2 = ifelse(is.na(likelylendmoneyt2), 50, likelylendmoneyt2),
angryt2 = ifelse(is.na(angryt2), 50, angryt2),
satisfactiont2 = ifelse(is.na(satisfactiont2), 50, satisfactiont2),
howreasonablet2 = ifelse(is.na(howreasonablet2), 50, howreasonablet2),
believehealtht2 = ifelse(is.na(believehealtht2), 50, believehealtht2),
believeneedt2 = ifelse(is.na(believeneedt2), 50, believeneedt2),
believeneedt2 = 100 - believeneedt2,
sisterbenefitt2 = ifelse(is.na(sisterbenefitt2), 50, sisterbenefitt2),
trustrepayt2 = ifelse(is.na(trustrepayt2), 50, trustrepayt2),
daughterharmt2 = ifelse(is.na(daughterharmt2), 50, daughterharmt2),
howsadt2 = ifelse(is.na(howsadt2), 50, howsadt2),
) %>%
mutate_at(
vars(needsmoneyt2:trustrepayt2),
function(x) x - 50
) %>%
mutate(
delta_needs_money = needsmoneyt2,
delta_lend = likelylendmoneyt2
)
df_na <-
signalingdata2018 %>%
filter(!exclusionopt1) %>%
dplyr::select(needsmoneyt2:trustrepayt2) %>%
summarise_all(function(x) signif(100*sum(is.na(x))/length(x), 2)) %>%
gather(key = 'Variable', value = "Percent missing")
df_na_sum <- summary(df_na$`Percent missing`)
df_na_sum
m <- lm(delta_needs_money ~ signal - 1, d0)
mc <- names(sort(coef(m)))
mc <- str_replace(mc, 'signal', '')
d0$signal2 <- factor(d0$signal, levels = mc)
mT2need <- lm(delta_needs_money ~ signal2 - 1, d0)
pT2need <- visreg(mT2need, partial=F, gg = T, rug = F)
# PCA of t1 vars
needvarsT1 <-
c(
"needsmoneyt1",
"likelylendmoneyt1",
"angryt1",
"satisfactiont1",
"howsadt1",
"howreasonablet1",
"believehealtht1",
# "daughterharmt1",
"believeneedt1",
"sisterbenefitt1",
"trustrepayt1",
"comfortablelendingt1",
"closesistert1"
)
cc <- complete.cases(d0[needvarsT1])
pcaT1 <- prcomp(d0[cc, needvarsT1], scale. = T)
d0$PC1t1 <- NA
d0$PC1t1[cc] <- pcaT1$x[,1]
# likelylendmoneyt2 = likelylendmoneyt2/100,
# needsmoneyt1 = needsmoneyt1/100,
# needsmoneyt2 = needsmoneyt2/100,
# pca_loadings_plot(m)
biplotT1 <-
autoplot(
pcaT1,
loadings = T,
loadings.label = T,
data = d0[cc,],
alpha = 0.25
) +
theme_bw()
autoplot(
pcaT1,
loadings = T,
loadings.label = T,
data = d0[cc,],
colour = 'conflict',
frame.type = 'norm'
) +
theme_bw()
autoplot(
pcaT1,
loadings = T,
loadings.label = T,
data = d0[cc,],
colour = 'p_info',
frame.type = 'norm'
) +
theme_bw()
# Effect of t1 conflict and private info on perceived need and PC1t1
mT1manipulation <- lm(-PC1t1 ~ conflict + p_info, d0)
Anova(mT1manipulation)
pT1a <- visreg(mT1manipulation, xvar = 'conflict', gg = T) +
scale_y_continuous(limits = c(-8, 5)) +
labs(title = 'Conflict effect', x = '', y = 'PC1 (time 1)') +
theme_bw()
pT1b <- visreg(mT1manipulation, xvar = 'p_info', gg = T) +
scale_y_continuous(limits = c(-8, 5)) +
labs(title = 'Information effect', x = '', y = '') +
theme_bw()
mT1interaction <- lm(-PC1t1 ~ conflict * p_info, d0[d0$p_info != 'Cheating',])
mT1interaction_anova <- Anova(mT1interaction, type = 3)
m <- lm(delta_needs_money ~ conflict + p_info, d0)
Anova(m)
plot(allEffects(m))
# PCA of t2 vars
needvarsT2 <-
c(
"needsmoneyt2",
"likelylendmoneyt2",
"angryt2",
"satisfactiont2",
# "howsadt2",
"howreasonablet2",
"believehealtht2",
# "daughterharmt2",
"believeneedt2",
"sisterbenefitt2",
"trustrepayt2",
"comfortablelendingt2",
"MC2.1_1"
)
cc <- complete.cases(d0[needvarsT2])
m <- prcomp(d0[cc, needvarsT2], scale. = T)
d0$PC1t2all <- NA
d0$PC1t2all[cc] <- -m$x[,1]
mT2pc1 <- lm(PC1t2all ~ PC1t1 + signal2, d0)
pT2pc1 <- visreg(mT2pc1, xvar = 'signal2', partial=F, gg = T, rug = F)
pT2pc1 <-
pT2pc1 +
# geom_hline(yintercept = mean(d0$comfortablelendingt1, na.rm=T), linetype = 'dotted') +
labs(
title = "D. Signal effect on PC1",
# subtitle = paste('N =', nobs(m)),
x = '',
y = "") +
coord_flip() +
theme_bw() +
theme(axis.title.y = element_blank(), axis.text.y=element_blank())
pT2pc1
# Main effects plots
pT2need <-
pT2need +
geom_hline(yintercept = 0, linetype = 'dotted') +
labs(
title = "C. Change in perception of sister's need",
# subtitle = paste('N =', nobs(m)),
x = '',
y = "Change in perception of sister's need for money") +
coord_flip() +
theme_bw()
pT2need
mT2lend <- lm(delta_lend ~ signal2 - 1, d0)
# mc <- names(sort(coef(m)))
# mc <- str_replace(mc, 'signal', '')
# d0$signal2 <- factor(d0$signal, levels = mc)
# m <- lm(delta_lend ~ signal2 - 1, d0)
pT2lend <- visreg(mT2lend, partial=F, gg = T, rug = F)
pT2lend <-
pT2lend +
geom_hline(yintercept = 0, linetype = 'dotted') +
labs(
title = "B. Change in likelihood of lending money",
# subtitle = paste('N =', nobs(m)),
x = '',
y = "Change in likelihood of lending money") +
coord_flip() +
theme_bw() +
theme(axis.title.y = element_blank(), axis.text.y=element_blank())
pT2lend
mT2comfort <- lm(comfortablelendingt2 ~ comfortablelendingt1 + signal2 - 1, d0)
pT2comfort <- visreg(mT2comfort, xvar = 'signal2', partial=F, gg = T, rug = F)
pT2comfort <-
pT2comfort +
geom_hline(yintercept = mean(d0$comfortablelendingt1, na.rm=T), linetype = 'dotted') +
labs(
title = "A. Amount of money comfortable lending",
# subtitle = paste('N =', nobs(m)),
x = '',
y = "Amount comofortable lending in US dollars") +
coord_flip() +
theme_bw()
pT2comfort
# Preregistered interaction models
interactplot <- function(f, trm, ylab, removeLegend = F, removeY = F){
m <- lm(formula = f, d0)
vdf <- visreg(m, xvar='signal2', by = trm, plot = F)
trm <- sym(trm)
p <- ggplot(vdf$fit, aes(signal2, visregFit, colour = !!trm)) +
geom_point(position = position_dodge(width = 0.3)) +
geom_linerange(aes(ymin = visregLwr, ymax = visregUpr), position = position_dodge(width = 0.3)) +
geom_hline(yintercept = 0, linetype = 'dotted') +
labs(title = ylab, x = '', y = '') +
coord_flip() +
theme_bw()
if (removeLegend) {
p <- p + theme(legend.position = "none")
}
if (removeY){
p <- p + theme(axis.title.y = element_blank(), axis.text.y=element_blank())
}
return(list(plot = p, model = m))
}
# signal X p_info interactions
p_comfort_signal_pinfo <-
interactplot(comfortablelendingt2 ~ comfortablelendingt1 + signal2 * p_info - 1, 'p_info', '\nA. Amount comfortable lending at T2', removeLegend=T)
p_lend_signal_pinfo <-
interactplot(delta_lend ~ signal2 * p_info - 1, 'p_info', '\nB. Change in likelihood of lending money', removeY = T)
p_pc1_signal_pinfo <-
interactplot(PC1t2all ~ PC1t1 +signal2 * p_info - 1, 'p_info', '\nC. PC1 at T2', removeLegend = T)
p_money_signal_pinfo <-
interactplot(delta_needs_money ~ signal2 * p_info - 1, 'p_info', '\nD. Change in perceived need for money', removeY = T)
# (p_comfort_signal_pinfo + p_lend_signal_pinfo + scale_y_continuous(limits = c(-40, 20)))/(p_pc1_signal_pinfo + p_money_signal_pinfo + scale_y_continuous(limits = c(-40, 20)))
# signal X conflict interactions
p_comfort_signal_conflict <-
interactplot(comfortablelendingt2 ~ comfortablelendingt1 + signal2 * conflict - 1, 'conflict', '\nA. Amount comfortable lending at T2', removeLegend=T)
p_lend_signal_conflict <-
interactplot(delta_lend ~ signal2 * conflict - 1, 'conflict', '\nB. Change in likelihood of lending money', removeY = T)
p_pc1_signal_conflict <-
interactplot(PC1t2all ~ PC1t1 + signal2 * conflict - 1, 'conflict', '\nC. PC1 at T2', removeLegend = T)
p_money_signal_conflict <-
interactplot(delta_needs_money ~ signal2 * conflict - 1, 'conflict', '\nD. Change in perceived need for money', removeY = T)
# (p_comfort_signal_conflict + p_lend_signal_conflict + scale_y_continuous(limits = c(-40, 20)))/(p_pc1_signal_conflict + p_money_signal_conflict + scale_y_continuous(limits = c(-40, 20)))
# Filter out Schizophrenia, Anger, Control, Depression&Suicidal, Cheating
d <-
d0 %>%
dplyr::filter(
! signal %in% c('Schizophrenia', 'Anger', 'Control', 'Depression&Suicidal', 'Suicidal')
) %>%
mutate(signal = fct_drop(signal))
cc <- complete.cases(d[needvarsT2])
m <- prcomp(d[cc, needvarsT2], scale. = T)
d$PC1t2 <- NA
d$PC1t2[cc] <- -m$x[,1]
autoplot(m, loadings = T, loadings.label = T, data = d[cc,], colour = 'signal', frame.type = 'norm') + theme_bw()
# PCA of all need vars
df <- d0 %>% dplyr::filter(signal %in% c('VerbalRequest', 'Crying', 'Depression'))
# df <- d %>% dplyr::filter(signal %in% c('VerbalRequest', 'Suicidal'))
cc <- complete.cases(df[c(needvarsT1, needvarsT2)])
m <- prcomp(df[cc, c(needvarsT1, needvarsT2)], scale. = T)
# pca_loadings_plot(m)
autoplot(m, loadings = T, loadings.label = T, data = df[cc,], colour = 'signal', frame.type = 'norm') + theme_bw()
# Models
# Retain only VerbalRequest, Crying, Depression
d2 <-
d %>%
dplyr::select(
p_info,
conflict,
signal,
needsmoneyt1,
delta_needs_money,
delta_lend,
PC1t1,
PC1t2
) %>%
dplyr::filter(
p_info != 'Cheating',
signal %in% c(
'VerbalRequest',
'Crying',
'Depression')
) %>%
mutate(
p_info = ordered(as.character(p_info), levels = c('PrivateInformation', 'Honest')),
signal = ordered(signal, levels = c('VerbalRequest', 'Crying', 'Depression')),
delta_need = case_when(
delta_needs_money < -1 ~ -1,
delta_needs_money > 1 ~ 1,
TRUE ~ 0
),
delta_need2 = case_when(
PC1t2 < (mean(PC1t2, na.rm=T) - sd(PC1t2, na.rm=T)/2) ~ -1,
PC1t2 > mean(PC1t2, na.rm=T) + sd(PC1t2, na.rm=T)/2 ~ 1,
TRUE ~ 0
),
delta_need = ordered(delta_need),
delta_need2 = ordered(delta_need2)
) %>%
na.omit
# Scatterplots
ggplot(d2, aes(needsmoneyt1, delta_needs_money, colour = signal)) +
geom_point() +
geom_smooth(span = 2) +
scale_color_discrete() +
facet_wrap(~conflict) +
theme_bw()
p_pc1_t1t2 <-
ggplot(d2, aes(-PC1t1, PC1t2, colour = signal)) +
geom_point() +
geom_smooth(span = 2) +
scale_color_discrete() +
facet_wrap(~conflict) +
theme_bw()
# Mediation
# Retain only Depression and VerbalRequest as treatment and control, resp.
d2b <-
d0 %>%
dplyr::select(
signal,
needsmoneyt1,
delta_needs_money,
delta_lend
) %>%
dplyr::filter(
signal %in% c(
'VerbalRequest',
'Depression')
) %>%
mutate(
signal = factor(signal, levels = c('VerbalRequest', 'Depression'))
) %>%
na.omit
## d2b data set restricted to 2 signals
model.y <- lm(delta_lend ~ needsmoneyt1 + delta_needs_money + signal, data = d2b)
model.m <- lm(delta_needs_money ~ needsmoneyt1 + signal, data = d2b)
mediation_model <- mediate(model.m, model.y, treat = 'signal', mediator = 'delta_needs_money', boot = T)
summary(mediation_model)
plot(mediation_model)
# Retain only Depression and Crying as treatment and control, resp.
d2b <-
d0 %>%
dplyr::select(
signal,
needsmoneyt1,
delta_needs_money,
delta_lend
) %>%
dplyr::filter(
signal %in% c(
'Crying',
'Depression')
) %>%
mutate(
signal = factor(signal, levels = c('Crying', 'Depression'))
) %>%
na.omit
## d2b data set restricted to 2 signals
model.y <- lm(delta_lend ~ needsmoneyt1 + delta_needs_money + signal, data = d2b)
model.m <- lm(delta_needs_money ~ needsmoneyt1 + signal, data = d2b)
mediation_model2 <- mediate(model.m, model.y, treat = 'signal', mediator = 'delta_needs_money', boot = T)
summary(mediation_model2)
plot(mediation_model2)
# Retain only Depression and Control as treatment and control, resp.
d2b <-
d0 %>%
dplyr::select(
signal,
needsmoneyt1,
delta_needs_money,
delta_lend
) %>%
dplyr::filter(
signal %in% c(
'Control',
'Depression')
) %>%
mutate(
signal = factor(signal, levels = c('Control', 'Depression'))
) %>%
na.omit
## d2b data set restricted to 2 signals
model.y <- lm(delta_lend ~ needsmoneyt1 + delta_needs_money + signal, data = d2b)
model.m <- lm(delta_needs_money ~ needsmoneyt1 + signal, data = d2b)
mediation_model3 <- mediate(model.m, model.y, treat = 'signal', mediator = 'delta_needs_money', boot = T)
summary(mediation_model3)
plot(mediation_model3)
# Retain only Depression and Suicide as treatment and control, resp.
d2b <-
d0 %>%
dplyr::select(
signal,
needsmoneyt1,
delta_needs_money,
delta_lend
) %>%
dplyr::filter(
signal %in% c(
'Suicide attempt',
'Depression')
) %>%
mutate(
signal = factor(signal, levels = c('Suicide attempt', 'Depression'))
) %>%
na.omit
## d2b data set restricted to 2 signals
model.y <- lm(delta_lend ~ needsmoneyt1 + delta_needs_money + signal, data = d2b)
model.m <- lm(delta_needs_money ~ needsmoneyt1 + signal, data = d2b)
mediation_model4 <- mediate(model.m, model.y, treat = 'signal', mediator = 'delta_needs_money', boot = T)
summary(mediation_model4)
plot(mediation_model4)
# d2, but with cheating
# Retain only VerbalRequest, Crying, Depression
d2c <-
d %>%
dplyr::select(
p_info,
conflict,
signal,
needsmoneyt1,
delta_needs_money,
delta_lend,
PC1t1,
PC1t2
) %>%
dplyr::filter(
signal %in% c(
'VerbalRequest',
'Crying',
'Depression')
) %>%
mutate(
p_info = ordered(as.character(p_info), levels = c('Cheating', 'PrivateInformation', 'Honest')),
signal = ordered(signal, levels = c('VerbalRequest', 'Crying', 'Depression')),
delta_need = case_when(
delta_needs_money < -1 ~ -1,
delta_needs_money > 1 ~ 1,
TRUE ~ 0
),
delta_need2 = case_when(
PC1t2 < (mean(PC1t2, na.rm=T) - sd(PC1t2, na.rm=T)/2) ~ -1,
PC1t2 > mean(PC1t2, na.rm=T) + sd(PC1t2, na.rm=T)/2 ~ 1,
TRUE ~ 0
),
delta_need = ordered(delta_need),
delta_need2 = ordered(delta_need2)
) %>%
na.omit
# Exploratory models
# Main effects on PC1t2 only
m <- lm(PC1t2 ~ signal + conflict + p_info, data = d2)
Anova(m)
plot(allEffects(m))
# Main effects on PC1t2 only, controlling for PC1t1
m <- lm(PC1t2 ~ PC1t1 + signal + conflict + p_info, data = d2)
Anova(m)
plot(allEffects(m))
# Full interaction model
m <- lm(PC1t2 ~ signal * conflict * p_info, data = d2)
Anova(m, type = 3)
plot(allEffects(m))
# Full interaction model, controlling for PC1t1
m <- lm(PC1t2 ~ PC1t1 + signal * conflict * p_info, data = d2)
Anova(m, type = 3)
plot(Effect(c("signal", "conflict", "p_info"), m))
# Final exploratory model of PC1t2?
# signal * conflict interaction
# p_info main effect only
m_exp <- lm(PC1t2 ~ signal * conflict + p_info, data = d2) #pca after conditions filtered
summary(m_exp)
Anova(m_exp, type = 3)
plot(allEffects(m_exp))
Plot_Exploratory <- visreg(m_exp, xvar= "signal", by = "conflict", partial = F, rug = F, gg = T) + theme_bw() + labs(y = "PC1 Time 2", x = "")
# With cheating too
m_exp <- lm(delta_needs_money ~ signal * p_info + conflict, data = d2c)
summary(m_exp)
Anova(m_exp, type = 3)
plot(allEffects(m_exp))
Plot_Exploratory <- visreg(m_exp, xvar= "signal", by = "conflict", partial = F, rug = F, gg = T) + theme_bw() + labs(y = "PC1 Time 2", x = "")
# m <- lm(PC1t2all ~ signal * conflict, data = d2b)
# Anova(m, type = 3)
# plot(allEffects(m))
# Ordinal logistic of delta_need (based on needsmoneyt2)
# Main effects only
m <- polr(delta_need ~ signal+p_info+conflict, d2, Hess = T)
plot(Effect('signal', m))
# Full interaction model
m <- polr(delta_need ~ signal*conflict + p_info, d2, Hess = T)
plot(allEffects(m))
# Mosaic plots
ggplot(data = d2) +
geom_mosaic(aes(x = product(delta_need, signal), fill = delta_need)) +
labs(x='', y='', title='Post signal reaction')
ggplot(data = d2) +
geom_mosaic(aes(x = product(delta_need, signal), fill = delta_need)) +
facet_grid(conflict~p_info) +
labs(x='', y='', title='Post signal reaction')
# Heatmap
p <-
d %>%
dplyr::select(
needvarsT1,
needvarsT2,
signal,
conflict,
p_info,
Age,
Sex,
Siblings,
`Duration (in seconds)`,
LocationLatitude
) %>%
dplyr::filter(
signal %in% c(
'VerbalRequest',
'Crying',
'Depression'
)
) %>%
mutate(
conflict = as.numeric(conflict),
p_info = as.numeric(p_info),
signal = as.numeric(ordered(signal, levels = c('VerbalRequest', 'Crying', 'Depression'))),
Sex = ifelse(Sex == 'Female', 0, 1),
Siblings = ifelse(Siblings == '0', 0, 1),
LocationLatitude = abs(LocationLatitude)
) %>%
na.omit %>%
cor(use = 'pairwise.complete.obs') %>%
ggcorrplot(hc.order = T, hc.method = 'ward')
p
# Note: p_info correlates with T2 vars more than does conflict
# Elasticnet model of likelylendmoneyt2 as function of T1 need vars
x <- d[c(needvarsT1, 'likelylendmoneyt2')]
x <- as.matrix(na.omit(x))
cv <- cv.glmnet(x[,1:11], x[,12])
coef(cv)
d0 %>%
dplyr::select(Angry:NoneOfAbove) %>%
cor(use = 'pairwise.complete.obs') %>%
ggcorrplot(hc.order = T, hc.method = 'ward')
d_mean_feel <-
d0 %>%
dplyr::select(signal, Angry:NoneOfAbove) %>%
group_by(signal) %>%
summarise_all(mean, na.rm=T)
signalmat <- as.matrix(d_mean_feel[-1])
rownames(signalmat) <- d_mean_feel$signal
heatmap(signalmat, scale = 'none')
## my attempt
distxy <- dist(signalmat) #gets rid of checked answers
hc <- hclust(distxy)
dend <- as.dendrogram(hc)
distxy2 <- dist(t(signalmat))
hc2 <- hclust(distxy2)
dend2 <- as.dendrogram(hc2)
grey_scale =c("#333333", "#5C5C5C", "#757575", "#8A8A8A", "#9B9B9B", "#AAAAAA", "#B8B8B8", "#C5C5C5", "#D0D0D0", "#DBDBDB", "#E6E6E6")
blue_red =c("#053061", "#2166AC", "#4393C3", "#92C5DE", "#D1E5F0", "#F7F7F7","#FDDBC7", "#F4A582", "#D6604D", "#B2182B", "#67001F")
yellow_red =c("#ffff00", "#ffea00", "#ffd400", "#ffbf00", "#ffaa00", "#ff9500", "#ff8000", "#ff6a00", "#ff5500", "#ff4000", "#ff2b00", "#ff1500", "#ff0000")
navajowhite_navy =c("#000080", "#151284", "#2b2587", "#40388b", "#554a8f", "#6a5d93", "#806f97", "#95819a", "#aa949e", "#bfa7a2", "#d4b9a5", "#eacba9", "#ffdead")
#gapmap(m = as.matrix(hclust), d_row= rev(dend), d_col=dend, col = grey_scale)
gapmap(m = signalmat, d_row= rev(dend), d_col=dend2, mode = "quantitative", mapping="linear", col = yellow_red)
## end my attempt
lvls = names(d_mean_feel[-1])[order(as.numeric(d_mean_feel[2,-1]))]
d_mean_long <-
d_mean_feel %>%
gather(key = Emotion, value = Mean, -signal) %>%
mutate(
Emotion = factor(Emotion, levels = lvls)
)
ggplot(d_mean_long, aes(Emotion, Mean, group = signal)) +
geom_line() +
facet_wrap(~signal, ncol=1)
# PCA of perceived emotions
emotions <-
d0 %>%
dplyr::select(signal,Angry:NoneOfAbove)
Epca <- prcomp(emotions[-1], scale. = F)
emotions$PC1 <- Epca$x[,1]
emotions$PC2 <- Epca$x[,2]
plot(Epca)
pca_loadings_plot(Epca)
emotions_biplot <- autoplot(
Epca,
loadings = T,
loadings.label = T,
data = emotions
) + theme_bw()
# Order signals by median of PC1
emotions$signal <- fct_reorder(emotions$signal, emotions$PC1)
autoplot(
Epca,
data = emotions,
colour = 'signal',
frame.type = 'norm'
) + facet_wrap(~signal) + theme_bw() + labs(title = 'Ordered by PC1')
epc1 <- ggplot(emotions, aes(signal, PC1)) + geom_boxplot() + geom_point() + theme_bw() + coord_flip()
# Order signals by median of PC2
emotions$signal <- fct_reorder(emotions$signal, emotions$PC2)
autoplot(
Epca,
data = emotions,
colour = 'signal',
frame.type = 'norm'
) + facet_wrap(~signal) + theme_bw() + labs(title = 'Ordered by PC2')
ggplot(emotions, aes(signal, PC2)) + geom_boxplot() + geom_point() + theme_bw() + coord_flip()
autoplot(
Epca,
loadings = T,
loadings.label = T,
data = emotions,
colour = 'signal',
frame.type = 'norm'
) +
theme_bw()
# Perceived emotions vs. PC1t2
d3 <-
d0 %>%
mutate_at(vars(Angry:NoneOfAbove), factor)
m <- lm(PC1t2all ~ Depressed + Sad + MentallyIll, d3)
Anova(m)
plot(allEffects(m))
# Summary of correlation of all T1 outcome vars except anger
cm <- cm <- cor(d0[needvarsT1[-c(3,5)]], use = 'pairwise.complete.obs')
cmu <- cm[upper.tri(cm)]
# discussion explorations
# It is also a possibility that conflict might increase one’s suspicion of cheating in others,
# however, because our measure for this is so closely correlated to our other pc1 variables.....
# something which is seen pre-signal with and without controlling for the level of information (c in analysis 2).
c1 <- glm(believeneedt1 ~ conflict + p_info, data = d0)
summary(c1)
plot(allEffects(c1))
c2 <- glm(believeneedt1 ~ conflict, data = d0)
summary(c2)
plot(allEffects(c2))
# Although we did not ask if participants viewed the sister’s request to be manipulative directly,
# those in the suicide attempt condition perceived the request as more indicative of an attempt to get
# the money for other purposes than those who saw less costly signals.
# bm <- glm(believeneedt2/100 ~ conflict * signal * p_info, family = binomial, data = d0)
# summary(bm)
# Anova(bm, type = 3)
# plot(allEffects(bm))
#importance of percieved need on instrumental outcomes
# going for r-squards. Is this viable? Should anything else be included?
nm <- lm(likelylendmoneyt2 ~ needsmoneyt2 + conflict + p_info + signal, data = d0)
summary(nm)
plot(allEffects(nm))
nm2 <- lm(comfortablelendingt2 ~ needsmoneyt2, data = d0)
summary(nm2)
#then mention the effect of signal in prediciting needsmoney is small
nm3 <- lm(needsmoneyt2 ~ signal, data = d0)
summary(nm3)
plot(allEffects(nm3))
#testing for demographic effects at time on
d0$Sex <- as.factor(d0$Sex)
d0$Ed <- as.factor(d0$Ed)
d0$Siblings <- as.factor(d0$Siblings)
mdem <- lm(-PC1t1 ~ Sex * conflict + Sex * p_info + Sex * years_education + Sex * Age + Sex * Children + Sex * Siblings2, d0)
summary(mdem)
plot(allEffects(mdem))
# (p_comfort_signal_pinfo + p_lend_signal_pinfo + scale_y_continuous(limits = c(-40, 20)))/(p_pc1_signal_pinfo + p_money_signal_pinfo + scale_y_continuous(limits = c(-40, 20)))
p_ease <- interactplot(MC2.4_1 ~ signal2 * p_info - 1, 'p_info', '\nA. Information', removeY = F)
p_ease$plot
p_ease2 <- interactplot(MC2.4_1 ~ signal2 * conflict - 1, 'conflict', '\nB. Conflict', removeY = F)
p_ease2$plot
p_anger <- interactplot(angryt2 ~ angryt1 + signal2 * p_info - 1, 'p_info', '\nB. Ease of putting in scenario Reported ease of imagining (signal by conflict)', removeY = F)
p_anger$plot
p_believeneed <- interactplot(believeneedt2 ~ believeneedt1 + signal2 * p_info - 1, 'p_info', '\nD. Ease of putting in scenario', removeY = F)
p_believeneed$plot
# for analysis2
d0$signal <- factor(d0$signal, levels = c('Schizophrenia', 'Control', 'VerbalRequest', 'Crying', 'Anger', 'Depression', 'Depression&Suicidal', 'Suicide attempt'))
full_int_overview <- lm(needsmoneyt2 ~ signal * conflict * p_info, data = d0) #changed from delta_lend
plot(allEffects(full_int_overview))
overviewa <- visreg(full_int_overview, xvar= "signal", by = "p_info", cond = list(conflict = "Conflict"), partial = F, rug = F, gg = T) +
theme_bw() + labs(y = "", x = "", title = "A. Conflict") + coord_flip()
overviewb <- visreg(full_int_overview, xvar= "signal", by = "p_info", cond = list(conflict = "Support"), partial = F, rug = F, gg = T) +
theme_bw() + labs(y = "Liklihood of lending the money at T2", x = "", title = "B. Support") +coord_flip()
(overviewa + overviewb + plot_layout(ncol = 1))
# Custom ggplot
vis_conflict <- visreg(full_int_overview, xvar= "signal", by = "p_info", cond = list(conflict = "Conflict"), plot = F)
vis_support <- visreg(full_int_overview, xvar= "signal", by = "p_info", cond = list(conflict = "Support"), plot = F)
vis_overview <- rbind(vis_conflict$fit, vis_support$fit)
p_overview <-
ggplot(vis_overview, aes(signal, visregFit, colour=p_info)) +
geom_point(position = position_dodge(width = 0.3)) +
geom_linerange(aes(ymin = visregLwr, ymax = visregUpr), position = position_dodge(width = 0.3)) +
geom_hline(yintercept = 0, linetype = 'dotted') +
facet_wrap(~conflict) +
labs(title = 'Overview', x = '', y = '') +
coord_flip() +
theme_bw()
p
p <-
ggplot(vis_overview, aes(signal, visregFit, colour=p_info, shape=conflict)) +
geom_point(position = position_dodge(width = 0.7), size = 3) +
geom_linerange(aes(ymin = visregLwr, ymax = visregUpr), position = position_dodge(width = 0.7), size = 1) +
geom_hline(yintercept = 0, linetype = 'dotted') +
labs(title = 'Overview', x = '', y = '\nChange in perceived need (time 2)') +
coord_flip() +
theme_bw(15)
p
#Plot_full_int
#full_int_overview2 <- glm(likelylendmoneyt2 ~ signal * conflict * p_info, data = d0)
#plot(allEffects(full_int_overview2))
# signal vs. mentally ill
p_mentallyill <-
emotions %>%
group_by(signal) %>%
summarise(mentallyill = sum(MentallyIll)/n()) %>%
ungroup %>%
mutate(signal = fct_reorder(signal, mentallyill)) %>%
ggplot(aes(mentallyill, signal)) +
geom_point() +
labs(x = '\nProportion categorizing sister as mentally ill') +
theme_bw()
p_mentallyill
# Upset plot
d0 %>%
dplyr::select(signal, Angry:NoneOfAbove) %>%
mutate_if(is.numeric, as.integer) %>%
as.data.frame %>%
upset(order.by = 'freq', nsets = 12)
# Control condition only
d0 %>%
dplyr::select(signal, Angry:NoneOfAbove) %>%
dplyr::filter(signal == 'Control') %>%
mutate_if(is.numeric, as.integer) %>%
as.data.frame %>%
upset(order.by = 'freq', nsets = 12, nintersect = 10)
d0 %>%
dplyr::select(signal, Angry:NoneOfAbove) %>%
dplyr::filter(signal == 'Depression&Suicidal') %>%
mutate_if(is.numeric, as.integer) %>%
as.data.frame %>%
upset(order.by = 'freq', nsets = 12, nintersect = 10)
# Anovas to draw p-values from for Rmd
int1sig <- Anova(p_comfort_signal_pinfo$model, type = 3)
int2sig <- Anova(p_lend_signal_conflict$model, type = 3)
# Summary table of only T1 and T2 vars
dft1 <-
d0 %>%
dplyr::select(needsmoneyt1:comfortablelendingt1, MC1.1_1) %>%
rename(MC1 = MC1.1_1) %>%
rename_all(function(x)str_replace(x, 't1', '')) %>%
mutate(Time = 'T1')
dft2 <-
d0 %>%
dplyr::select(needsmoneyt2:angryt2, satisfactiont2, howsadt2, howreasonablet2:comfortablelendingt2, MC1.2_1) %>%
rename(MC1 = MC1.2_1) %>%
rename_all(function(x)str_replace(x, 't2', '')) %>%
mutate(Time = 'T2')
dft12 <- rbind(dft1, dft2)
demovars <-
c(
"needsmoney" = "Perceived need of money",
"likelylendmoney" = "How likely to lend money",
"angry" = "How angry are you about the request",
"satisfaction" = "How satisfied to help",
"howsad" = "How sad about health problem",
"howreasonable" = "How reasonable is the amount requested",
"believehealth" = "How much do believe the niece is ill",
"daughterharm" = "How much harm will your daughter suffer",
"believeneed" = "Suspicion of sister",
"sisterbenefit" = "Perceived benefit to sister",
"trustrepay" = "Trust that sister will repay",
"comfortablelending" = "Amount conformtable lending in dollars",
"MC1" = "How close to sister"
)
df_t1t2_sum <- custom.summarize(dft12, vars = demovars, facvar = 'Time', statscol = F)
|
a56d036429119682ae827cafba7aed46b826f8b4 | 099552f15fc47a5b988684d22126ef799d578e03 | /man/add_node.Rd | b34d09ba477ee4661d720fc02ef344d428ff7824 | [] | no_license | statwonk/DiagrammeR | 261ff3f9e74035172ad5029b0e13b16813db16a5 | 75253f412daeb87c1c04f69659e4a6e05b681df8 | refs/heads/master | 2021-01-23T18:46:54.192760 | 2015-11-08T16:42:44 | 2015-11-08T16:42:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,483 | rd | add_node.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_node.R
\name{add_node}
\alias{add_node}
\title{Add a node to an existing graph object}
\usage{
add_node(graph, node, from = NULL, to = NULL, label = TRUE, type = NULL)
}
\arguments{
\item{graph}{a graph object of class \code{dgr_graph} that is created using
\code{create_graph}.}
\item{node}{a node ID for the newly connected node.}
\item{from}{an optional vector containing node IDs from which edges will be
directed to the new node.}
\item{to}{an optional vector containing node IDs to which edges will be
directed from the new node.}
\item{label}{a character object for supplying an optional label to the node.
Setting to \code{TRUE} ascribes the node ID to the label. Setting to
\code{FALSE} yields a blank label.}
\item{type}{an optional string that describes the entity type for the node
to be added.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
With a graph object of class \code{dgr_graph}, add a new node
of a specified type to extant nodes within the graph.
}
\examples{
\dontrun{
# Create an empty graph
graph <- create_graph()
# Add two nodes
graph <- add_node(graph, node = "a")
graph <- add_node(graph, node = "b")
get_nodes(graph)
#> [1] "a" "b"
# Add a node with 'type' defined
graph <- add_node(graph, node = "c", type = "letter")
get_node_df(graph)
#> nodes type label
#> 1 a a
#> 2 b b
#> 3 c letter c
}
}
|
dcdeb48ce520b9ad70c52638195dc05505bd0a42 | 91760defb6326b38cd2ee069ef44044a6067e616 | /regularization.R | d89fa02850e677ce27a02bd598e575425394062c | [] | no_license | avnishsingh93/recommendersystem | 7cee2ded184e29701968e1c375cf899dc87ea5a4 | 7a7746020c040d53206c8a79972a3ea47d449899 | refs/heads/master | 2022-05-25T07:44:17.508407 | 2020-05-01T09:31:40 | 2020-05-01T09:31:40 | 260,242,581 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,444 | r | regularization.R | lamdas=seq(0,25,1)
error=sapply(lamdas,function(i){
m=mean(edx$rating)
movie=edx%>%group_by(movieId)%>%
summarise(m_i=sum(rating-m)/(n()+i))
user=edx%>%
left_join(movie,by="movieId")%>%
group_by(userId)%>%
summarise(u_i=sum(rating-m-m_i)/(n()+i))
year=edx%>%
left_join(movie, by="movieId")%>%
left_join(user,by="userId")%>%
group_by(releaseyear)%>%
summarise(y_i=sum(rating-m-m_i-u_i)/(n()))
genre=edx%>%
left_join(movie, by="movieId")%>%
left_join(user,by="userId")%>%
left_join(year,by="releaseyear")%>%
group_by(genres)%>%
summarise(g_i=sum(rating-m-m_i-u_i-y_i)/(n()))
yearsafter=edx%>%
left_join(movie, by="movieId")%>%
left_join(user,by="userId")%>%
left_join(year,by="releaseyear")%>%
left_join(genre,by="genres")%>%
group_by(ratedafter)%>%
summarise(ya_i=sum(rating-m-m_i-u_i-y_i-g_i)/(n()))
pred_regularize=validation%>%
left_join(movie,by="movieId")%>%
left_join(user, by="userId")%>%
left_join(year,by="releaseyear")%>%
left_join(genre,by="genres")%>%
left_join(yearsafter, by="ratedafter")%>%
mutate(prediction=m+m_i+u_i+y_i+g_i+ya_i)
return(RMSE(test$rating,pred_regularize$prediction))
})
qplot(lamdas,error)
l=lamdas[which.min(error)]
movie=edx%>%group_by(movieId)%>%
summarise(m_i=sum(rating-m)/(n()+l),n_m=n())
user=edx%>%
left_join(movie,by="movieId")%>%
group_by(userId)%>%
summarise(u_i=sum(rating-m-m_i)/(n()+l),n_u=n())
year=edx%>%
left_join(movie, by="movieId")%>%
left_join(user,by="userId")%>%
group_by(releaseyear)%>%
summarise(y_i=sum(rating-m-m_i-u_i)/(n()+l),n_y=n())
genre=edx%>%
left_join(movie, by="movieId")%>%
left_join(user,by="userId")%>%
left_join(year,by="releaseyear")%>%
group_by(genres)%>%
summarise(g_i=sum(rating-m-m_i-u_i-y_i)/(n()+l),n_g=n())
yearsafter=edx%>%
left_join(movie, by="movieId")%>%
left_join(user,by="userId")%>%
left_join(year,by="releaseyear")%>%
left_join(genre,by="genres")%>%
group_by(ratedafter)%>%
summarise(ya_i=sum(rating-m-m_i-u_i-y_i-g_i)/(n()+l),n_ya=n())
pred_regularize=validation%>%
left_join(movie,by="movieId")%>%
left_join(user, by="userId")%>%
left_join(year,by="releaseyear")%>%
left_join(genre,by="genres")%>%
left_join(yearsafter, by="ratedafter")%>%
mutate(prediction=m+m_i+u_i+y_i+g_i+ya_i)%>%
.$prediction
model_6=RMSE(test$rating,pred_regularize)
|
76d55840d0c44df6b482692b0047d8e964954574 | 0aaecd6991a7f16759a1f8d2b3be6093f8a183af | /inst/snippet/domedata.R | 30175e7397d8c47defd408c213c6bfdfe91ad25d | [] | no_license | cran/fastR | 3f0e3959dad4e5d361c341eb6bea670eab6bfdcc | 572a5dc31e5aa85af4126662f95268329179c87b | refs/heads/master | 2021-01-21T04:55:14.927487 | 2017-07-27T19:52:06 | 2017-07-27T19:52:06 | 17,695,981 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 49 | r | domedata.R | require(alr3); data(domedata)
summary(domedata)
|
70c335fa34f600af2e8244b64cf65a85551a7984 | 66e04f24259a07363ad8da7cd47872f75abbaea0 | /Correlation and Regression/Chapter 1-Visualizing two variables/3.R | 2abfe9ab0b92b19c479882c287ac981066cc0f62 | [
"MIT"
] | permissive | artileda/Datacamp-Data-Scientist-with-R-2019 | 19d64729a691880228f5a18994ad7b58d3e7b40e | a8b3f8f64cc5756add7ec5cae0e332101cb00bd9 | refs/heads/master | 2022-02-24T04:18:28.860980 | 2019-08-28T04:35:32 | 2019-08-28T04:35:32 | 325,043,594 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,176 | r | 3.R | # Creating scatterplots
# Creating scatterplots is simple and they are so useful that is it worthwhile to expose yourself to many examples. Over time, you will gain familiarity with the types of patterns that you see. You will begin to recognize how scatterplots can reveal the nature of the relationship between two variables.
#
# In this exercise, and throughout this chapter, we will be using several datasets listed below. These data are available through the openintro package. Briefly:
#
# The mammals dataset contains information about 39 different species of mammals, including their body weight, brain weight, gestation time, and a few other variables.
# The mlbBat10 dataset contains batting statistics for 1,199 Major League Baseball players during the 2010 season.
# The bdims dataset contains body girth and skeletal diameter measurements for 507 physically active individuals.
# The smoking dataset contains information on the smoking habits of 1,691 citizens of the United Kingdom.
# To see more thorough documentation, use the ? or help() functions.
#
# Instructions 1/4
# 25 XP
# 1
# Using the mammals dataset, create a scatterplot illustrating how the brain weight of a mammal varies as a function of its body weight.
#
# Take Hint (-7 XP)
# 2
# Using the mlbBat10 dataset, create a scatterplot illustrating how the slugging percentage (SLG) of a player varies as a function of his on-base percentage (OBP).
#
# 3
# Using the bdims dataset, create a scatterplot illustrating how a person's weight varies as a function of their height. Use color to separate by sex, which you'll need to coerce to a factor with factor().
#
# 4
# Using the smoking dataset, create a scatterplot illustrating how the amount that a person smokes on weekdays varies as a function of their age.
# Mammals scatterplot
ggplot(mammals, aes(x = BodyWt, y = BrainWt )) +
geom_point()
# Baseball player scatterplot
ggplot(mlbBat10, aes(x = OBP, y = SLG)) +
geom_point()
# Body dimensions scatterplot
ggplot(bdims, aes(x = hgt, y = wgt, color = factor(sex))) +
geom_point()
# Smoking scatterplot
ggplot(smoking, aes(x = age, y = amtWeekdays)) +
geom_point()
|
cf283e73e4b93aff7ac73f864fd4e619c4dd89ed | bb14506c83541c183a7edd97c6e40d8cc94f3a29 | /BDEEPZillow/man/db_type_converter.Rd | e0d95c68ed6759bed7f86c7dbc22ac848c7bd04b | [] | no_license | uiuc-bdeep/Zillow_Housing_Database | a43885af38ed4eb61902829a30ae6e4135b08329 | fa379796e2503d7f6723e2bc1fff34c19787380c | refs/heads/master | 2021-07-24T11:09:28.742501 | 2020-04-10T12:17:57 | 2020-04-10T12:17:57 | 136,386,596 | 5 | 7 | null | null | null | null | UTF-8 | R | false | true | 778 | rd | db_type_converter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/converter.R
\name{db_type_converter}
\alias{db_type_converter}
\title{db_type_converter}
\usage{
db_type_converter(data, dbname)
}
\arguments{
\item{data}{The actual data.frame to convert.}
\item{dbname}{The name of the database. Used to distinguish data.}
}
\value{
The modified data.frame
}
\description{
This function converts the type to align with the requirement. See requirement online.
}
\examples{
# data is the output from any get_from_db function
data <- get_from_db_usr("SELECT loadid FROM hedonics_new.sd_hedonics_new")
# convert the type of the columns
data <- db_type_converter(data)
# ... or you can specify the database
data <- db_type_converter(data, dbname = "zillow_2017_nov")
}
|
dd37d2c1a2144886572dc99451526ca4d01299d5 | 9bc8edece669ca4054d6cdec72e7332c1ac82957 | /man/lonlat_to_tilenum.Rd | e8b88913d3c29bde5f97eac85c6356b37edcfa60 | [
"MIT"
] | permissive | EBukin/slippymath | f1bd041fa2214a9d6a981f6f22df3594d905ca83 | 1e4c8bde183884d63b7db0d726138bc9ac12c26f | refs/heads/master | 2023-03-15T19:03:41.786483 | 2019-06-28T15:21:41 | 2019-06-28T15:21:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 889 | rd | lonlat_to_tilenum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slippymath.R
\name{lonlat_to_tilenum}
\alias{lonlat_to_tilenum}
\title{lonlat_to_tilenum}
\usage{
lonlat_to_tilenum(lon_deg, lat_deg, zoom)
}
\arguments{
\item{lon_deg}{degrees longitude for point}
\item{lat_deg}{degrees latitude for point}
\item{zoom}{zoom level for tile calculation. Increasing zoom increases the
number of tiles.}
}
\value{
a list containing `x` and `y` - the tile numbers.
}
\description{
Convert longitude and latitude to slippy tile numbers
}
\details{
Returns the Open Street Map slippy map tile numbers (x, y) the
supplied latitude and longitude fall on, for a given zoom level.
The point specified by lon_deg` and `lat_deg` is assumed to be in ESPG:4326
coordinate reference system.
}
\examples{
lonlat_to_tilenum(
lon = 13.37771496361961,
lat = 52.51628011262304,
zoom = 17
)
}
|
7520873afb43a342be24c16046fa90a623c6f577 | 97b38346e7bb6b2a9c7736e30bc8facbeeb5dbfe | /Martha HW2.R | 05cfd75b15b51750fa2a0b23ad32557c7ad2a23a | [] | no_license | mtgizaw/Intro_R_Programming | 756da03b957a394d27c6048d63af4706da17a647 | 9aa1ae50eebd399884c476271ff4eb3047edb839 | refs/heads/master | 2022-02-17T12:19:30.312139 | 2019-08-29T15:29:25 | 2019-08-29T15:29:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,686 | r | Martha HW2.R | #Assignment 2
#Intro to R & Statistical Programming
#Please label the file with your name and upload to blackboard.
#Name: MARTHA GIZAW
#Directions:
#Fill out this short assignment using your own knowledge.
#Please feel free to consult our lecture notes and code or ask us for help.
#Question 1
#Using the iris dataset create a boxplot visualizing Sepal width according to species.
#Be sure and include axis labels and a title
boxplot(Sepal.Width~Species, data=iris,
xlab="Species", ylab="Sepal Width", main="Iris Boxplot")
#Question 2
#Using the iris dataset create a new variable for the ratio between Petal Length and Petal width
#and provide the 5 figure summary for your new variable.
new_var <- iris$Petal.Length / iris$Petal.Width
fivenum(new_var)
#For a good breakdown on what the five numbers (excluding the average) mean:
summary(new_var)
#Question 3
#Using an if else statement write a code chunk that prints if a number is odd or even and test it.
num <- readline(prompt="Enter number: ")
num <- as.integer(num)
if (num %% 2 == 0) {
print(paste(num, "is even."))
} else {
print(paste(num, "is odd."))
}
#Question 4
#Using a for loop write a code block that prints 1 to 20 advancing by one each time and then
#back down from 20 to 1. The output should look something like this...
# [1] 1
# [1] 1 2
# [1] 1 2 3
# ...
# [1] 1 2 3
# [1] 1 2
# [1] 1
forward <- seq(from=2, to=20, by=1)
backward <- seq(from=20, to=2, by=-1)
x <- 1
if (x) {
for (i in forward) {
print(x)
x <- c(x, i)
}
for (i in backward) {
print(x)
x <- x[-i]
}
print(x)
}
|
1a0d8702fc05bd73b2a6b8b9f189c3d29c1cafda | bd073304fb38f0e878bccb4bc1855e25ede3922c | /R/flatten_table_list.R | dbd9056554ff77117f2186306f7daeb43d107435 | [] | no_license | cran/flattabler | bb7b58bee53167d607eda7ccfe64c4faf1bca2fd | 78045611c6319d0be84c60fe46a7a730e0f8467a | refs/heads/master | 2023-01-16T06:00:41.658005 | 2020-11-15T18:00:02 | 2020-11-15T18:00:02 | 276,685,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,162 | r | flatten_table_list.R | #' Transform a pivot table list into a flat table
#'
#' Given a list of pivot tables and a transformation function that flattens a
#' `pivot_table` object, transforms each table using the function and merges the
#' results into a flat table.
#'
#' @param lpt A list of `pivot_table` objects.
#' @param FUN A function, transformation function that flattens a `pivot_table`
#' object (it returns a `tibble`).
#'
#' @return A `tibble`, a flat table implemented by a `tibble`.
#'
#' @family flat table generation functions
#' @seealso
#'
#' @examples
#' library(tidyr)
#'
#' f <- function(pt) {
#' pt %>%
#' set_page(1, 1) %>%
#' remove_top(1) %>%
#' define_labels(n_col = 2, n_row = 2) %>%
#' remove_k() %>%
#' replace_dec() %>%
#' fill_values() %>%
#' fill_labels() %>%
#' remove_agg() %>%
#' unpivot()
#' }
#'
#' ft <- flatten_table_list(list_pt, f)
#'
#' @export
flatten_table_list <- function(lpt = list(), FUN) {
lft <- lapply(lpt, FUN)
ft <- lft[[1]]
if (length(lft) > 1) {
for (i in 2:length(lft)) {
ft <- dplyr::bind_rows(ft,lft[[i]])
}
}
ft
}
|
90ccdde02adcba08d3bdb251095adf58a6a62ad8 | 62213a596ff3ca746d3d73fecc492323a3c0269a | /man/values.DTSg.Rd | 0088acefb1883e32fb8ddc191e1c3e34bd84a80b | [
"MIT"
] | permissive | gisler/DTSg | dc0c9e42a636987b5c538a8856fc479328df2f28 | 3e2c29d1c5dc9c71f913de014d4278d17ba039c6 | refs/heads/main | 2023-02-17T08:09:40.867100 | 2023-02-04T11:49:29 | 2023-02-04T12:02:56 | 206,717,690 | 6 | 0 | NOASSERTION | 2022-04-03T17:57:14 | 2019-09-06T05:13:16 | R | UTF-8 | R | false | true | 1,968 | rd | values.DTSg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Swrappers.R
\name{values.DTSg}
\alias{values.DTSg}
\alias{values}
\title{Get values}
\usage{
\method{values}{DTSg}(
x,
reference = FALSE,
drop = FALSE,
class = c("data.table", "data.frame"),
...
)
}
\arguments{
\item{x}{A \code{\link{DTSg}} object (S3 method only).}
\item{reference}{A logical specifying if a copy of the values or a reference
to the values shall be returned. See corresponding section for further
information.}
\item{drop}{A logical specifying if the object and all references to it shall
be removed from the global (and only the global) environment after
successfully returning its values. This feature allows for a resource
efficient destruction of a \code{\link{DTSg}} object while preserving its values.}
\item{class}{A character string specifying the class of the returned values.
\code{"data.frame"} only works when either a copy of the values is returned or
the object is dropped.}
\item{...}{Not used (S3 method only).}
}
\value{
Returns a \code{\link[data.table:data.table]{data.table::data.table}}, a reference to a
\code{\link[data.table:data.table]{data.table::data.table}} or a \code{\link{data.frame}}.
}
\description{
Returns the values of a \code{\link{DTSg}} object.
}
\note{
The original name of the \emph{.dateTime} column is restored when not returned as
a reference or when dropped.
}
\section{Reference to the values}{
A reference to the values of a \code{\link{DTSg}} object can be used to modify them in
place. This includes the \emph{.dateTime} column, which serves as the object's
time index. Modifying this column can therefore endanger the object's
integrity. In case needs to do so ever arise, \code{\link{refresh}} should be called
immediately afterwards in order to check the object's integrity.
}
\examples{
# new DTSg object
x <- DTSg$new(values = flow)
# get values
## R6 method
x$values()
## S3 method
values(x = x)
}
|
4f61b23fdbf2da4a5739a194014e4801c7183e3c | 970bfbd2ed88a08638a1f139cbfcda626376f0a6 | /tests/testthat/test_summarystats.R | 1e2f37137a9b837712fad1a2d249e9c89c6b39fe | [] | no_license | ComputationalRegulatoryGenomicsICL/GenomicInteractions | f294458b3f4d89885545f299aa7071b8df872404 | 5879ff0f529d774c4e10e13cc6d2d1a7859a5345 | refs/heads/master | 2022-05-02T13:06:35.443231 | 2022-04-08T15:58:13 | 2022-04-08T15:58:13 | 26,012,214 | 4 | 2 | null | 2016-04-14T17:09:10 | 2014-10-31T10:39:44 | R | UTF-8 | R | false | false | 1,902 | r | test_summarystats.R | gi <- new("GenomicInteractions",
metadata = list(experiment_name="test", description = "this is a test"),
anchor1 = as.integer(c(1, 7, 8, 4, 5)),
anchor2 = as.integer(c(6, 2, 10, 9, 3)),
regions = GRanges(seqnames = S4Vectors::Rle(factor(c("chr1", "chr2")), c(6, 4)),
ranges = IRanges(start = c(1,6,3,4,5,7,2,3,4,5),
width = c(10,9,6,7,6,10,9,8,7,8)),
strand = S4Vectors::Rle(c("+", "-", "+", "-"), c(2,4,1,3)),
seqinfo = Seqinfo(seqnames = paste("chr", 1:2, sep=""))),
elementMetadata = DataFrame(counts = 1:5))
promoters <- GRanges(seqnames = S4Vectors::Rle(factor(c("chr1", "chr2")), c(2, 1)),
ranges = IRanges(c(9, 14, 11), width = 4:6),
strand = S4Vectors::Rle(strand(c("+", "-", "-"))),
seqinfo = Seqinfo(seqnames = paste("chr", 1:2, sep="")),
id = paste0("P", 1:3))
## Annotation by overlaps
suppressMessages(annotateInteractions(gi, list(promoter = promoters)))
test_that("categoriseInteraction returns correct result", {
res <- data.frame(category = c("promoter-promoter", "promoter-distal", "distal-distal"),
count = c(1, 2, 2),
stringsAsFactors = FALSE)
expect_equal(categoriseInteractions(gi), res)
expect_equal(categoriseInteractions(gi, viewpoints = "promoter"), res[1:2,])
expect_equal(categoriseInteractions(gi, node.classes = "promoter"), res[1,])
})
# test_that("plotSummaryStats works as previously", {
# expect_equal_to_reference(plotSummaryStats(gi),
# file = "plotsummarystats.rds")
# resetAnnotations(gi)
# expect_equal_to_reference(plotSummaryStats(gi),
# file = "plotsummarystats_reset.rds")
# })
|
5232c03e20c6374e6c276acc235d018eb1249e96 | 34dcaf94af67b8ce3c85dd0f55f865fecb0cdd9e | /lacharts.R | b4af9334886372dc920b8f18d792158d612bb4fe | [] | no_license | frankpopham/side | 5bfeebdb16cf3651ac7f121db579e10390580050 | 9c5248e38e30748fb3f30ead0309642324b39b86 | refs/heads/master | 2023-01-19T05:58:22.284841 | 2020-11-23T11:47:26 | 2020-11-23T11:47:26 | 315,090,319 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,568 | r | lacharts.R | library("tidyverse")
library("lubridate")
library("parallaxr")
download.file("https://www.opendata.nhs.scot/dataset/b318bddf-a4dc-4262-971f-0ba329e09b87/resource/427f9a25-db22-4014-a3bc-893b68243055/download/trend_ca_20201117.csv", "ladata.csv")
# read data
dflachart <- read_csv("ladata.csv") %>%
mutate(date2=ymd(Date)) %>%
filter(month(date2)>7 & year(date2) > 2019)
plotglachart <- dflachart %>%
filter(CA=="S12000049") %>%
ggplot(aes(x=date2, y=DailyPositive)) +
geom_col(width=1) +
ylab("Daily Cases") +
xlab("") +
scale_x_date(date_labels = "%B") +
theme(axis.text.x=element_text(size=5, angle = 45, hjust = 1),
axis.text.y=element_text(size=5),
axis.title.y=element_blank())
ggsave("gla.png",
width = 5,
height = 5,
units = c("cm"))
plotedchart <- dflachart %>%
filter(CA=="S12000036") %>%
ggplot(aes(x=date2, y=DailyPositive)) +
geom_col(width=1) +
ylab("Daily Cases") +
xlab("") +
scale_x_date(date_labels = "%B") +
theme(axis.text.x=element_text(size=5, angle = 45, hjust = 1),
axis.text.y=element_text(size=5),
axis.title.y=element_blank())
ggsave("ed.png",
width = 5,
height = 5,
units = c("cm"))
## MD files
all_md_str <- list.files(pattern=".md", full.names = FALSE)
## Loop through each MD file, parse, and return a single tibble
md_tibble <-
all_md_str %>%
purrr::map_dfr(parse_md)
## Output HTML file
generate_scroll_doc(path = "index.html",
inputs = md_tibble)
|
ec84af17f3c9ad31585a07bd65dc16ed89dc0f87 | 3e4c6a79d1e799f44bcf4b3edf2d39a2e293695b | /R/KBinfo.R | b10f8e131b4520a10d5410cd1c6983e4cf11891f | [
"MIT"
] | permissive | phenoscape/rphenoscape | f28f23ecb4952f9dc61230cfd9274b4e796fa4ad | c766a46e315e086bf60f538cb8a136febc2123da | refs/heads/master | 2023-07-20T01:23:53.432028 | 2023-07-17T12:28:21 | 2023-07-17T13:33:18 | 42,216,360 | 2 | 6 | NOASSERTION | 2023-06-20T15:54:55 | 2015-09-10T02:14:11 | R | UTF-8 | R | false | false | 1,525 | r | KBinfo.R | #' Summary and metadata about the Phenoscape KB
#'
#' Returns the date of the current release of the Phenoscape Knowledgebase (KB) and
#' counts of annotated matrices, taxa, phenotypes, characters, and states.
#'
#' @return A list of class "KBinfo" with summary statistics of annotation counts
#' and other KB metadata (specifically, a timestamp for the current KB build).
#' @examples
#' kbmeta <- get_KBinfo()
#' names(kbmeta)
#' kbmeta
#' @export
get_KBinfo <- function() {
resp <- get_json_data(pkb_api("/kb/annotation_summary"), query = list())
for (timeElem in grep(".*_time", names(resp), value = TRUE)) {
resp[[timeElem]] <- strptime(resp[[timeElem]], format = "%FT%T", tz = "UTC")
}
class(resp) <- c(class(resp), "KBinfo")
resp
}
#' Print method for objects of class KBinfo
#'
#' @param x the object of class "KBinfo" to be printed
#' @param tz the timezone in which to print timestamp values, defaults to
#' local timezone
#' @param ... additional paramaters that might be passed to print methods,
#' ignored here
#' @rdname get_KBinfo
#' @export
print.KBinfo <- function(x, ..., tz = "") {
cat(sapply(names(x),
function(i) {
if (any(grepl("POSIX[c,l]t", class(x[[i]]))))
val <- format(as.POSIXct(x[[i]]), usetz = TRUE, tz = tz)
else
val <- x[[i]]
paste0(toupper(substring(i, 1, 1)),
sub("_", " ", substring(i, 2)), ": ", val)
}),
sep = "\n")
invisible(x)
}
|
d6907647b3cf7320744c9d4a710d4f01ef92e2a9 | 2302ef052ed1e38e0c693b099810009a25667bc5 | /cachematrix.R | aa0489199e213d03f4edaff02a2b43994824c780 | [] | no_license | gibarto/ProgrammingAssignment2 | d57e254228be9cb6a39e8d5ff4e7fd9f924b0d32 | bb983cd58eb99cbedbb2601c112dd23223904f34 | refs/heads/master | 2021-01-17T04:56:51.043948 | 2014-04-26T20:40:29 | 2014-04-26T20:40:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,044 | r | cachematrix.R | ## Caching the inverse of a matrix to avoid having to re-solve
## Make and store a matrix
makeCacheMatrix <- function(x = matrix()) {
s <- NULL ## set solution to NULL at start
set <- function(y) { ## move x (fun) and s(sol) outside env for retrieval
x <<- y
s <<- NULL
}
get <- function() x ## gets x back
setsol <- function(solve) s <<- solve ## solves s
getsol <- function() s ## gets s back after setsol
list(set = set, get = get, ## return the results in a list
setsol = setsol,
getsol = getsol)
}
## after setting matrix with makeCacheMatrix, use this to get the solution
## so that if it's already solved, you don't have to wait
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsol() ## check the cache
if(!is.null(s)) { ## if already stored, return what's stored
message("getting cached data")
return(s)
}
data <- x$get() ## set and return the solution if it's not already stored
s <- solve(data, ...)
x$setsol(s)
s
}
|
9c07871f693a90022ab1edd63be60e711600b588 | bfdd68b1fa3e092fa393a02b967ae93cf56be777 | /Rscripts/create_hulk_heatmap.r | 7c9369220e01c3964b74cb416753d6d7cd928e5e | [
"BSD-3-Clause"
] | permissive | NorwegianVeterinaryInstitute/Talos | 3cbad76974fab764890a5424144032237ecd415f | a5b0bfd24423d76e5478d5f56a3895bc70d92e46 | refs/heads/master | 2022-09-25T20:23:30.197425 | 2022-08-31T08:28:20 | 2022-08-31T08:28:20 | 227,598,476 | 1 | 2 | BSD-3-Clause | 2022-08-31T08:28:21 | 2019-12-12T12:10:47 | Nextflow | UTF-8 | R | false | false | 1,455 | r | create_hulk_heatmap.r | #!/usr/bin/env Rscript
# a script to process the output from hulk smash
# And vizualize them as a heatmap and a ordination plot
#functions
# check.packages function: install and load multiple R packages.
# (downloaded from: https://gist.github.com/smithdanielle/9913897)
# Check to see if packages are installed. Install them if they are not, then load them into the R session.
# NOTE CRAN MIRROR SELECT IS norway MIRROR !!!
check.packages <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE, repos="https://cran.uib.no/")
sapply(pkg, require, character.only = TRUE)
}
#Libraries to load and install if needed.
packages<-c("heatmap3")
check.packages(packages)
# loading and processing the data
all_samples <- read.csv("all_samples.Weighted_Jaccard.hulk-matrix.csv", head=TRUE)
colnames(all_samples) <- sub(".clean.fq.gz.json","",colnames(all_samples))
rownames(all_samples) <- colnames(all_samples)
samples <- ncol(all_samples)
#transforming the data to a matrix
all_samples_matrix <- as.matrix(all_samples)
#creating a heatmap using the Jaccard distances, and saving it.
colors <- colorRampPalette(c("steelblue", "white","orange2"))(100)
pdf(file= "Heatmap.all_samples.WeightedJaccard.pdf",
width = 12, height = 9, bg="white")
heatmap3(all_samples_matrix, scale="none", col=colors,cexRow=(0.8), cexCol=0.8, margins=c(10,6))
dev.off() |
58185b1577fb05aab7f979bc96e15b69af193860 | ec1404568b3961a8c95a122998712f6f69930d2e | /hw5/task1.R | 970de56f7a8882dc21c5e279e7f31b038cc8f425 | [] | no_license | gnayoaixgnaw/Data_Analytics | ec5c69e7dfc1ce3f79b8b485a0266a125c40e133 | 019ff5a57deb81c48236df3413767b607148c475 | refs/heads/main | 2023-02-07T05:48:21.753877 | 2020-12-29T23:31:43 | 2020-12-29T23:31:43 | 325,407,497 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 753 | r | task1.R | install.packages("syuzhet")
library("syuzhet")
Google_News_Entry <-read.table("/Users/wangxiaoyang/Desktop/cs_code/cs688/hw5_xiaoyang_wang/result.csv", header=TRUE, sep=",")
Google_News_Entry
df <- as.vector(Google_News_Entry$text)
affin_list <-c()
nrc_list <-c()
bing_list <-c()
for ( i in 1:length(df)){
affin_vector <- get_sentiment(df[i], method="afinn")
nrc_vector <- get_sentiment(df[i], method="nrc")
bing_vector <- get_sentiment(df[i], method="bing")
affin_list<-c(affin_list,affin_vector)
nrc_list<-c(nrc_list,nrc_vector)
bing_list<-c(bing_list,bing_vector)
}
a = data.frame(df,affin_list,nrc_list,bing_list)
write.csv(a,file = "/Users/wangxiaoyang/Desktop/cs_code/cs688/hw5_xiaoyang_wang/mydata.csv",row.names = F)
|
71ad0c285373d5561366253d6584695bb7843c20 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/Rfast2/man/kumar.mle.Rd | b8e8c57d47463590f3ee7fd1222f4a425137af4d | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,338 | rd | kumar.mle.Rd | \name{MLE of distributions defined for proportions}
\alias{kumar.mle}
\alias{zil.mle}
\alias{simplex.mle}
\title{
MLE of distributions defined for proportions
}
\description{
MLE of distributions defined for proportions.
}
\usage{
kumar.mle(x, tol = 1e-07, maxiters = 50)
simplex.mle(x, tol = 1e-07)
zil.mle(x)
}
\arguments{
\item{x}{
A vector with proportions or percentages. Zeros are allowed only for the zero inflated logistirc normal distribution (zil.mle).
}
\item{tol}{
The tolerance level up to which the maximisation stops; set to 1e-07 by default.
}
\item{maxiters}{
The maximum number of iterations the Newton-Raphson will perform.
}
}
\details{
Instead of maximising the log-likelihood via a numerical optimiser we have used a Newton-Raphson algorithm which is faster.
See wikipedia for the equations to be solved. The distributions are Kumaraswamy, zero inflated logistic normal and simplex.
}
\value{
Usually a list with three elements, but this is not for all cases.
\item{iters}{The number of iterations required for the Newton-Raphson to converge.
}
\item{param}{
The two parameters (shape and scale) of the Kumaraswamy distribution or the means and sigma of the simpled distribution.
For the zero inflated logistic normal, the probability of non zeros, the mean and the unbiased variance.
}
\item{loglik}{The value of the maximised log-likelihood.
}
}
\references{
Kumaraswamy, P. (1980). A generalized probability density function for double-bounded random processes.
Journal of Hydrology. 46 (1-2): 79-88.
Jones, M.C. (2009). Kumaraswamy's distribution: A beta-type distribution with some tractability advantages.
Statistical Methodology. 6(1): 70-81.
Zhang, W. & Wei, H. (2008). Maximum likelihood estimation for simplex distribution nonlinear mixed models
via the stochastic approximation algorithm. The Rocky Mountain Journal of Mathematics, 38(5): 1863-1875.
You can also check the relevant wikipedia pages.
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris \email{mtsagris@yahoo.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{zigamma.mle}, \link{censweibull.mle}
}
}
\examples{
u <- runif(1000)
a <- 0.4 ; b <- 1
x <- ( 1 - (1 - u)^(1/b) )^(1/a)
kumar.mle(x)
}
|
8930c98de52e9cc0a0391df7274af3441cc95beb | 9ec89425032154c1e1bf3142f85bdbe48ee1d688 | /create_word_images.R | c5cf4d87d402304976df1713e57d361b381558b1 | [] | no_license | SebastianKorinth/imWo | 2d1aea76a9009aeeab89a13ff9e12eb66051376a | 3ae0116886791bbb3e31b7333c3a1e08803b1813 | refs/heads/master | 2020-06-23T17:52:51.500739 | 2019-07-25T14:03:35 | 2019-07-25T14:03:35 | 198,706,116 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,584 | r | create_word_images.R |
CreateWordImages <- function(items.df,
image.width = 5,
image.height = 1,
image.resolution = 150,
print.color = "black",
background.color = "white",
item.column = 1,
font.type = "Courier",
file.name.extension = ".png",
file.name.column = 2,
file.name.encoding = "UTF-8"){
# Creates empty folder to save word image files
dir.create("itemImages/")
# Define path for item images
item.image.path = "itemImages/"
# Loops through word list
for(item.counter in 1 : nrow(items.df)){
# Prepares file name
item.image.name <- paste(item.image.path,items.df[item.counter,file.name.column],
file.name.extension, sep="")
if(file.name.encoding == "native"){
item.image.name <- enc2native(item.image.name)
}
if(file.name.encoding == "UTF-8"){
item.image.name <- enc2utf8(item.image.name)
}
# Plots into png device
png(item.image.name, width = image.width, height = image.height,
units = "in", res = image.resolution, bg = background.color)
par(mar = c(0,0,0,0), family= font.type)
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n',
yaxt = 'n')
text(x = 0.5, y=0.8, items.df[item.counter,item.column], cex = 4,
col = print.color, pos = 1)
dev.off()
}
}
|
cbd97f94ce694b6ac5bae926045a7d8ca16b626b | aa69b22f4534b6c7facc2f4385c0658ca45ce7c0 | /R/PERCA2_Shiny_ls_full.R | 0ce383cdfedb7fc582e0b498d698e15ff00d0855 | [
"MIT"
] | permissive | DRF-TARPA1/PERCA2_Shiny_ls_full | 02b8cff326f5e0af3cba08673d1fbe51683ebc12 | 55290e245ea13d18c034aaf9f3fb15a2f86833e0 | refs/heads/master | 2020-09-10T17:35:17.029321 | 2019-11-14T21:36:14 | 2019-11-14T21:36:14 | 221,762,936 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,573 | r | PERCA2_Shiny_ls_full.R | # File or Package Name: PERCA2_Shiny_ls_full.R
# Title: Filtering an AD Listing and write CSV file
# Description: Script for filtering list from ./data
# where a listing from AD output is processed and export in ./ouput
# Version: 1.0.0
# Author: Patrice Tardif
# Maintainer: Patrice Tardif
# License: MIT
# Date: 2019-11-14
# Paramaters for data location and output in the project
PATH_AD <- "./data"
PATH_OUT <- "./output"
fNameIn <- file.path(PATH_AD, "Shiny_AD_full.txt")
if (!file.exists(fNameIn)) {
fNameIn <- file.path(PATH_AD, "Shiny_AD_dummy.txt")
}
if (!dir.exists(PATH_OUT)) {
dir.create(PATH_OUT)
}
fNameOut <- file.path(PATH_OUT, paste(sep="","Shiny_ls_full",".csv"))
# Load, Extract, Tansform (ETL) the AD file list
# File import is done under "UTF-8"since Latin alphabet underlying the text in the file.
# The lines are split with the coma delimited (,)
# and 2 fields are subsplit with (=, \\ or non printable characters ^[:print:]) with REGEX.
Shiny_AD <- read.csv(fNameIn, encoding="UTF-8", header=FALSE, stringsAsFactors=FALSE)
Shiny_ls_full <- Shiny_AD[stringr::str_detect(Shiny_AD[,5],"Utilisateurs"), c(1,2,4)]
names(Shiny_ls_full) <- c("Nom","Prenom_(Region)","Centre")
Shiny_ls_full[,"Centre"] <- unlist(lapply(stringr::str_split(Shiny_ls_full[,"Centre"],"="), '[',2))
Shiny_ls_full[,"Nom"] <- unlist(lapply(stringr::str_split(Shiny_ls_full[,"Nom"], "=|[\\\\]|[^[:print:]]"), '[',2))
# Export also the dataframe in CSV format
write.csv(Shiny_ls_full, fNameOut, row.names = F)
|
bd46159aa6d8db97f99935a2e4b957e942b2e7a2 | 31c34fe5fd15c02c84f396d22b2f3965afd4f0c8 | /WG45P2.R | 0baa28b2945f29ff1b14ed482af0cc6f3717fac9 | [] | no_license | Louhokseson/22nd-October | 4cc37ace9edbf1eeaa764b4be2de03b3c96a2985 | b0fee154c50bc8578553e425b344d78a1a87437d | refs/heads/main | 2023-09-02T09:39:44.332879 | 2021-10-21T11:54:31 | 2021-10-21T11:54:31 | 416,526,722 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,750 | r | WG45P2.R | # Group 45 Xuexun Lu and Siying Zhu Yeah only two people.
# https://github.com/Louhokseson/22nd-October
## Overview
# This code use the SEIR model to simulate the covid propagation.
# We generate a data with 5.5million size with respect to three groups.
# Then we store the daily number of people who transfer from S -> E and standardize the data.
# Plot the Original graph verus Standardized graph. And we also simulate the model 10 times
# Visualize the variability in the results
# Finally, give a comment to our results and ZOE issues.
# The covid model
seir_covid <- function(n=5500000,ni=10,nt=150,gamma=1/3,delta=1/5) {
## SEIR stochastic simulation model.
## n = population size; ni = initially exposed; nt = number of days
## gamma = daily prob E -> I; delta = daily prob I -> R;
## S = 0, E = 1, I = 2, R = 3
#browser()
lambda = 0.4/n
x <- x_random <- x_lowbeta <- rep(0,n) # x_random or x_lowbeta
x[1:ni] <- 1 ## create some exposed people
beta <- rlnorm(n,0,0.5); beta <- beta/mean(beta) ## individual infection rates
## find the lowest 10% beta
n_lowbeta <- n * 0.1 # the size of lowest beta group
beta_order_location <- order(beta) # the location of sorted beta in x
x_lowbeta[beta_order_location[1:n_lowbeta]] <- 1 # mark first n_lowbeta beta_order_location in x
## generate the random group
n_random <- n * 0.001 # the size of random group
n_random_location <- sample(n,n_random) # random generate the n_random locations from x
x_random[n_random_location] <- 1
# new infections each day
Whole_newinfect <- lowbeta_newinfect <- random_newinfect <- rep(0,nt)
for (i in 2:nt) { ## loop over days
# update the probability of S -> E
probaStoE <- lambda*beta*sum(beta[x==2])
u <- runif(n) ## uniform random deviates
x[x==2&u<delta] <- 3 ## I -> R with prob delta
# sum the three different groups' daily infection
# Location S->E
whole_location <- x==0&u<probaStoE
random_location <- whole_location&x_random
lowbeta_location <- whole_location&x_lowbeta
# Sum of S->E
Whole_newinfect[i] <- sum(whole_location)
random_newinfect[i] <- sum(random_location)
lowbeta_newinfect[i] <- sum(lowbeta_location)
x[x==1&u<gamma] <- 2 ## E -> I with prob gamma
x[whole_location] <- 1 ## S -> E with probaStoE
}
SEIR = list(Whole_newinfect=Whole_newinfect,random_newinfect=random_newinfect,lowbeta_newinfect=lowbeta_newinfect)
return(SEIR)
} ## seir
SEIR <- seir_covid()
# Standardize the vectors
whole_stan <- SEIR$Whole_newinfect/(5.5*10^6)
random_stan <- SEIR$random_newinfect/(0.001*(5.5*10^6))
lowbeta_stan <- SEIR$lowbeta_newinfect/(0.1*(5.5*10^6))
# Plot the Original and Standardized graph of three groups
par(mfrow = c(1,2))
plot(SEIR$Whole_newinfect,type = "l",ylim = c(0,max(SEIR$Whole_newinfect)),xlab="day",ylab="the number of new infections")
lines(SEIR$random_newinfect,col=4)
lines(SEIR$lowbeta_newinfect,col=2) ## Random (blue) and lowbeta (red)
legend <- legend("topleft",
c("whole population","the 'cautious'","the random sample"),
col=c("black","red","blue"),
lty = 1,lwd = 2,
cex = 0.6)
title(main = "Original graph")
#The comparison plot between three group which including whole population(Whole_newinfect), the 'cautious 10%'(lowbeta_newinfect) and the 0.1%random sample(random_newinfect)
plot(random_stan,type = "l",cex = 0.5,col = 4,xlab="day",ylab="the number of new infections")
points(whole_stan,type = "l",cex = 0.5)
points(lowbeta_stan,type = "l",cex = 0.5,col = 2) ## Random (blue) and lowbeta (red)
legend <- legend("topleft",
c("whole population","the 'cautious'","the random sample"),
col=c("black","red","blue"),
lty = 1,lwd = 2,
cex = 0.6)
title(main = "Standardized graph")
#Label the peak of whole population(Whole_newinfect)
W_y = max(whole_stan);
W_x = which.max(whole_stan);
point1 <- points(W_x,W_y,pch=8,col="black",cex = 0.5)
#Label the peak of the 0.1%random sample(random_newinfect)
R_y = max(random_stan);
R_x = which.max(random_stan);
point2 <- points(R_x,R_y,pch=8,col="blue",cex = 0.5)
#Label the peak of the 'cautious 10%'(lowbeta_newinfect)
L_y = max(lowbeta_stan);
L_x = which.max(lowbeta_stan);
point3 <- points(L_x,L_y,pch=8,col="red",cex = 0.5)
# for loop for 10 stimulations
Peak_whole <- Peak_random <- Peak_lowbeta <- rep(0,10)
# create the empty list to store the result
whole_list <- vector(mode = "list", length = 10)
random_list <- vector(mode = "list", length = 10)
lowbeta_list <- vector(mode = "list", length = 10)
for (i in 1:10){
SEIR <- seir_covid()
Peak_whole[i] <- which.max(SEIR$Whole_newinfect)
Peak_random[i] <- which.max(SEIR$random_newinfect)
Peak_lowbeta[i] <- which.max(SEIR$lowbeta_newinfect)
whole_list[[i]] <- SEIR$Whole_newinfect/(5.5*10^6)
random_list[[i]] <- SEIR$random_newinfect/(0.001*(5.5*10^6))
lowbeta_list[[i]] <- SEIR$lowbeta_newinfect/(0.1*(5.5*10^6))
}
# plots of 10 simulation
par(new=FALSE)
# 10 simulations of whole population.png
plot(whole_list[[1]],type = "l",cex = 0.5,xlab="day",ylab="the number of new infections",main = "whole infection")
for (j in 2:10){
points(whole_list[[j]],type = "l",cex = 0.5)
par(new=TRUE)
}
par(new=FALSE)
# 10 simulations of random population.png
plot(random_list[[1]],type = "l",cex = 0.5,col = 4,xlab="day",ylab="the number of new infections",main = "random infection")
for (j in 2:10) {
points(random_list[[j]],type = "l",cex = 0.5,col = 4)
par(new=TRUE)
}
par(new=FALSE)
# 10 simulations of lowest beta population.png
plot(lowbeta_list[[1]],type = "l",cex = 0.5,col = 2,xlab="day",ylab="the number of new infections",main = "lowest beta infection")
for (j in 2:10) {
points(lowbeta_list[[j]],type = "l",cex = 0.5,col = 2)
par(new=TRUE)
}
par(new = FALSE)
# Peak of three groups
plot(Peak_whole,ylim = c(0,110),main="Peak of three groups",xlab="i",ylab="peak",col = "green",pch=3,cex=1.2,lwd=2,type='o')
points(Peak_random,col = "blue",pch=3,cex=1.2,lwd=2,type='o')
points(Peak_lowbeta,col = "red",pch=3,cex=1.2,lwd=2,type='o')
legend <- legend("bottomleft",
c("whole population","the 'cautious'","the random sample"),
col=c("green","red","blue"),
lty = 1,lwd = 2,
cex = 0.6)
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#XXXXXXXXXXXXXXXXXXXXXXXXXX CODE COMMENT XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#We know that people who download the ZOE symptom tracker app are more concerned about Covid than the average people,
#so it is same as the 'cautious 10%' population, namely the population with the lowest beta i values in our SEIR model.
#1.The standardized graph shows the trend of the 0.1%random sample(random_newinfect) and the whole population are similiar,
#because 0.1%random sample is a state composed of 5,500 people randomly generated from the whole population. It can represented the situation in the whole population in a certain extent on the picture.
#2.And the 'cautious 10%'(lowbeta_newinfect) will have a lagging in the outbreak compared with the other two groups.
#3.It is much smaller than the whole population in new infections in S to E state although the 'cautious 10%' is one-tenth of it.
#Conclusion: It means that if we use the ZOE application to rebuild the daily infection trajectories.
#we will underestimate the severity of the covid which lead to the spread of the epidemic.
|
0387bfe8fbe544bbaf9ab8dbad16dc99922f8e97 | caee885bdb4c5be1e38fbb062df52bfbaa54167f | /man/yb2007_ascov.Rd | 9a4339c5ef337110ce7bf0431d0f285e48a6c960 | [] | no_license | mlgiordano1/MIIVmsem | d97af3df7370f8eb1f60433c3f262c65760fa83c | 919847eb962404aadadbc78773a40165275f6e31 | refs/heads/master | 2020-09-25T02:29:37.899797 | 2019-12-04T15:30:40 | 2019-12-04T15:30:40 | 225,898,079 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 804 | rd | yb2007_ascov.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yb2007_ascov.R
\name{yb2007_ascov}
\alias{yb2007_ascov}
\title{CalculatiNG the asymptotic covariance matrix of the saturated model;
UsiNG only the level2 units as buildiNG blocks;}
\usage{
yb2007_ascov(beta, p, dup, nlevel1, NG, smatw, ymean, vsmatL1, varNames)
}
\arguments{
\item{beta}{description tbd}
\item{p}{description tbd}
\item{dup}{description tbd}
\item{nlevel1}{description tbd}
\item{NG}{description tbd}
\item{smatw}{description tbd}
\item{ymean}{description tbd}
\item{vsmatL1}{description tbd}
}
\value{
list of amat and gamma
}
\description{
CalculatiNG the asymptotic covariance matrix of the saturated model;
UsiNG only the level2 units as buildiNG blocks;
}
\examples{
yb2007_ascov(df, 2018, 08)
}
|
8d719fc8e5cc3f77ae19d29cdaa4c9a7add99d94 | 282fbd5459030bc3df7100b19dfba513ecea007e | /OfstedMap/app9.R | 44d162ac1398f0088d293b6aa17d85adffd80258 | [] | no_license | andrewcartledge/Ofsted-Outcome-Predictor | 89a7cf6b3d49d7fc3dcd2d1299d84d52f0b3248a | f192a28929b4897a15d0753191d51b78cb29bcd6 | refs/heads/main | 2023-02-02T21:00:42.084439 | 2020-12-17T12:00:33 | 2020-12-17T12:00:33 | 302,334,234 | 1 | 2 | null | 2020-12-17T09:23:22 | 2020-10-08T12:33:37 | HTML | UTF-8 | R | false | false | 11,206 | r | app9.R | #Ofsted Outcome Predictor App
#This is the nearly final (potentially final) version of this app
#The idea is to take the provided file that has already undergone modelling and then spit out an interactive map and table
#The map will have popups for each school and the markers for each school will be colour coded according to risk
#Comments will be brief but are designed to give you (and me) an idea what each section is trying to achieve!
# Check packages are installed, probably not necessary here but just in case!
if(!require("rgdal")) install.packages("sp", dependencies = TRUE)
if(!require("shiny")) install.packages("shiny", dependencies = TRUE)
if(!require("shinyWidgets")) install.packages("shinyWidgets", dependencies = TRUE)
if(!require("tidyverse")) install.packages("tidyverse", dependencies = TRUE)
if(!require("leaflet")) install.packages("leaflet", dependencies = TRUE)
if(!require("sp")) install.packages("sp", dependencies = TRUE)
if(!require("DT")) install.packages("DT", dependencies = TRUE)
if(!require("crosstalk")) install.packages("DT", dependencies = TRUE)
# Load packages
library(shiny)
library(shinyWidgets)
library(tidyverse)
library(leaflet)
library(sp)
library(DT)
library(crosstalk)
# Load in csv of data and predictions and mutate it to have the right data types and form
ofsted_data <- read_csv(
"data/Inspection Outcome Map Data - Logistic Regression.csv") %>%
rename(lat = Northing, long = Easting) %>%
mutate(
URN = as.integer(URN),
LAESTAB = as.integer(LAESTAB),
DfE_number = as.integer(LAESTAB - 9370000),
Ofstedphase = as.factor(Ofstedphase),
Typeofeducation = as.factor(Typeofeducation),
Publicationdate = as.integer(Publicationdate),
Previouspublicationdate = as.integer(Previouspublicationdate),
Overalleffectiveness = as.integer(Overalleffectiveness),
Previousfullinspectionoveralleffectiveness = as.integer(Previousfullinspectionoveralleffectiveness),
Current_OE = as.factor(ifelse(Overalleffectiveness == 1, "Outstanding",
ifelse(Overalleffectiveness == 2, "Good",
ifelse(Overalleffectiveness == 3, "Requires Improvement",
"Inadequate")))),
Previous_OE = ifelse(Previousfullinspectionoveralleffectiveness == 1, "Outstanding",
ifelse(Previousfullinspectionoveralleffectiveness == 2, "Good",
ifelse(Previousfullinspectionoveralleffectiveness == 3, "Requires Improvement",
"Inadequate"))),
Academy = as.factor(ifelse(Academy == 1, "Yes", "No")),
LACode = as.integer(LACode),
daysbetween = as.integer(daysbetween),
IDACI = as.integer(IDACI),
num_pupils = as.integer(num_pupils),
bad_out_chance = round(bad_out_chance*100,2),
good_out_chance = good_out_chance*100) %>%
mutate(chance_category = case_when(
bad_out_chance <= 5 ~ "00-05",
bad_out_chance <= 10 ~ "05-10",
bad_out_chance <= 15 ~ "10-15",
bad_out_chance <= 20 ~ "15-20",
bad_out_chance <= 25 ~ "20-25",
bad_out_chance <= 30 ~ "25-30",
bad_out_chance <= 35 ~ "30-35",
bad_out_chance <= 40 ~ "35-40",
bad_out_chance <= 45 ~ "40-45",
bad_out_chance > 45 ~ "45+")) %>%
mutate(
Pub_date = format(as.Date(Publicationdate, origin = "1970-01-01"),"%d/%m/%Y"),
Prev_Pub_date = format(as.Date(Previouspublicationdate, origin = "1970-01-01"),"%d/%m/%Y"),
days_since = as.integer(Sys.Date() - as.Date(Publicationdate, origin = "1970-01-01"))
) %>%
arrange(desc(bad_out_chance))
#Now create a colour factor set with one shade of red for each of the chance_category groups
factpal <- colorFactor(c("#ffe6e6","#ffcccc","#ffb3b3","#ff9999","#ff8080","#ff6666","#ff4d4d","#ff3333","#ff1a1a","#ff0000"),
ofsted_data$chance_category)
#Create ui, a side bar on the left with a numeric input and a map below it, then a data table on the right
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
numericInput("poorschools", "Enter how many at risk schools to display (default is maximum)", value = nrow(ofsted_data)),
leafletOutput("schoolmap", width = "100%", height = 850)
),
mainPanel(
DTOutput("datatable")
)
)
)
#Create server function
server <- function(input, output, session) {
#First thing to do is create a reactive dataset that will be used to plot data on the map and populate the table
map_data <- reactive({
temp_data <- ofsted_data
#This first section relates to the numeric input and controls how many schools are displayed on the map and in the table, the default
#value starts set to the maximum number of schools, but can be changed. This bit of code deals with the change.
if (isTruthy(input$poorschools)) {
temp_data <- temp_data %>%
arrange(desc(bad_out_chance)) %>%
slice(1:input$poorschools)
}
#Additional conditions and filters could be added to the ui and controls entered here. However the filtering on the table should be
#sufficient for now.
#Create a dataset of the location data
school_location <- temp_data %>%
select(long, lat)
#Create a dataset of the non-location data, this will be what is displayed or used from now on
school_data <- temp_data %>%
select(
URN, DfE_number, Schoolname, Ofstedphase, Typeofeducation, Academy, IDACI, bad_out_chance,
Current_OE, Pub_date, days_since, chance_category
)
#Create a spatial data frame, this then has to be transformed from BNG format to Lat/Long format
ofsted_map <- SpatialPointsDataFrame(
coords = school_location,
data = school_data,
proj4string = CRS("+init=epsg:27700")) %>%
spTransform(CRS("+init=epsg:4326")
)
# convert back to a normal dataframe
cbind(school_data, ofsted_map@coords)
})
#Turn the newly created reactive map_data into a Shared Dataframe
shared_map <- SharedData$new(map_data)
#It is a requirement that popups are displayed when a school is selected so that you can see which school it is
#Ideally this will also move to the related row in the data table but this is not functional currently
observeEvent(input$schoolmap_marker_click,
{
#When the map is clicked it produces the above event. This event contains 4 data items these are saved to a variable
loc <- input$schoolmap_marker_click
#When drawing the map a layer id is created. This saves the layer id aspect of the event to a variable
school_id <- input$schoolmap_marker_click$id
#Next we filter down the original data set to the row(s) corresponding to the id. There should only be one row but a
#slice is used just in case!
school_details <- ofsted_data %>%
filter(URN == school_id) %>%
slice(1)
#Then we draw the popups onto the map in the right locations
leafletProxy("schoolmap") %>%
addPopups(loc$lng, loc$lat, paste0("<b>School name:</b> ", school_details$Schoolname,
"<br><b>DfE Number:</b> ", school_details$DfE_number
))
}
)
#NEW FORMULA FOR ZOOMS
observeEvent(input$datatable_rows_selected, {
school_selected <- input$datatable_rows_selected
temp_pos <- shared_map$origData() %>%
mutate(rn = row_number()) %>%
filter(rn == school_selected) %>%
select(long, lat)
cat(str(temp_pos))
zoomToSchool(temp_pos)
}
)
zoomToSchool() <- reactive({
output$schoolmap %>%
flyTo(temp_pos$long, temp_pos$lat, 13)
})
#This section draws the map
output$schoolmap <- renderLeaflet({
leaflet() %>%
#The WCC firewall blocks OpenStreetMap, which is extremely helpful
addProviderTiles(providers$OpenStreetMap) %>%
#A couple of different markers were used in building this app, circles were the only one which allowed large range of colours to
#be used, in particular hex codes. This bit of code creates the markers, sets the colour and opacity of them according to their
#chance category and creates the layer id based on the schools URN
addCircleMarkers(data = shared_map,
radius = 5,
layerId = shared_map$origData()$URN,
opacity = 0.75,
color = ~ factpal(chance_category),
fillColor = ~ factpal(chance_category),
fillOpacity = 0.9
)
})
#Finally the data table is produced. Again this feature was experimented with to find the best set of options to use for the app. The
#output could still be improved I think but it will suffice for the current needs. The location coordinates and chance_category band are
#removed from the dataset, we don't want that displayed in the table. The table has a filter assigned at the top and a more
#helpful set of field names than is present in the dataset. Some of the default options of the table have been removed. The table has
#been curated to fit nicely on the page.
#Ideally selecting a row in the table should cause the map to focus into that particular school on the map but this is not currently
#working.
output$datatable <- renderDT(shared_map,
class = "cell-border stripe",
filter = "top",
colnames = c("URN", "DfE number", "School name", "School Phase", "Type of Education", "Academy", "IDACI Quintile", "Chance of less than good outcome", "Current Overall Effectiveness", "Inspection Published", "Days since last full inspection"),
rownames = FALSE,
options = list(sDom = '<"top">rt<"bottom">ip', columnDefs = list(list(visible = FALSE, targets = 11:13))),
selection = "single",
server = FALSE
)
}
#Finally call the app and get to using the data
shinyApp(ui, server) |
48def31d6ee9d0b0499e960f5b5a1e41ffb0f39f | b2ad74cbe2c3e5eaf710dde0a3937d1a025cdfed | /R/stream_from_files.R | 969238f6af88c84c76d27e213cbcd87d002b912a | [
"MIT"
] | permissive | sevvandi/eventstream | 4fad9b0e48693e1fa3a0e99e11247da6989dc90b | 41ff2a68c6f40a75a10cc68789c0c3c9540fc9be | refs/heads/master | 2022-05-28T10:41:33.247890 | 2022-05-16T07:20:16 | 2022-05-16T07:20:16 | 162,342,215 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 966 | r | stream_from_files.R | #' Generates a two dimensional data stream from data files in a given folder.
#'
#' @param folder The folder with the data files.
#'
#' @examples
#' \dontrun{
#' folder <- tempdir()
#' out <- gen_stream(2, folder = folder)
#' stream <- stream_from_files(paste(folder, "/data", sep=""))
#' dim(stream)
#' unlink(folder, recursive = TRUE)
#' }
#'@seealso \code{\link{gen_stream}}.
#'@export
stream_from_files <- function(folder){
last_char <- substr(folder, nchar(folder), nchar(folder))
if(last_char=="/"){
folder <- substr(folder, 1, nchar(folder)-1)
}
if(!dir.exists(folder)){
stop("Invalid directory!")
}
files_list <- list.files(folder, pattern="csv")
if(length(files_list)==0){
stop("No data files in directory!")
}
for(i in 1:length(files_list) ){
dat<- read.csv(paste(folder, "/",files_list[i], sep=""))
if(i==1){
stream <- dat
}else{
stream <- rbind.data.frame(stream, dat)
}
}
return(stream)
}
|
c2ed8c1d069a8eb8ead26af9f0ba8f3bc4523585 | 8cea013a81ea4babcfe9001ebadb79e781b1e8ed | /R/SDF2mvspectrum.R | e8315049a4a4720fa95994cde552aacc9504e904 | [] | no_license | cran/ForeCA | ebac795ffb39aefa245a11c5f756e4776bf35653 | 056b1ce29a5d3926cc22cd357386c387ff216858 | refs/heads/master | 2021-01-18T22:35:29.383005 | 2020-06-29T11:40:42 | 2020-06-29T11:40:42 | 17,679,292 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,722 | r | SDF2mvspectrum.R | # # @rdname mvspectrum
# #
# # @description
# # \code{.SDF2mvspectrum} converts the output of \code{\link[sapa]{SDF}} to a 3D array with
# # frequencies in the firs and the spectral density matrix at a given frequency in the latter
# # two dimensions.
# #
# # @details
# # The \code{\link[sapa]{SDF}} function only returns the upper diagonal matrix
# # (including diagonal), since spectrum matrices are Hermitian. For fast
# # vectorized computations, however, the full matrices are required.
# # Thus \code{.SDF2mvspectrum} converts SDF output to a \eqn{3D} array with
# # number of frequencies in the first dimension and the spectral density matrix
# # in the latter two.
# #
# # \code{.SDF2mvspectrum} is typically not called by the user, but by
# # \code{\link{mvspectrum}}.
# #
# # @param sdf.output an object of class \code{"SDF"} from \code{\link[sapa]{SDF}}
# # @keywords ts manip
.SDF2mvspectrum <- function(sdf.output) {
num.series <- attr(sdf.output, "n.series")
num.freqs <- length(attr(sdf.output, "frequency"))
f.lambda <- array(NA, dim = c(num.freqs, num.series, num.series))
if (num.series > 1) {
col.sels <- seq_len(num.series)
# TODO: make this faster/vectorized
for (ii in seq_len(num.series)) {
f.lambda[, ii, ii:num.series] <- sdf.output[, col.sels]
col.sels <- col.sels + (num.series - ii)
col.sels <- col.sels[-1]
}
f.lambda <- apply(f.lambda, 1, fill_hermitian)
f.lambda <- array(t(f.lambda), dim = c(num.freqs, num.series, num.series))
} else {
f.lambda[, 1, 1] <- c(sdf.output)
}
# remove frequency 0
f.lambda <- f.lambda[-1, , ]
attr(f.lambda, "frequency") <- attr(sdf.output, "frequency")[-1] * pi
return(f.lambda)
} |
36d1fd73c8b9cedcb5ec32501e4a8542e5ae1ce2 | 29585dff702209dd446c0ab52ceea046c58e384e | /phyloTop/R/nConfig.R | 9873f5a450ee43e1cae2ef31c6625a66367ca378 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,390 | r | nConfig.R | #' Configuration sizes in a tree
#'
#' Finds the sizes of configurations in the tree.
#'
#' @author Caroline Colijn \email{c.colijn@imperial.ac.uk}
#' @author Michelle Kendall \email{michelle.louise.kendall@@gmail.com}
#'
#' @param tree a tree of class \code{phylo} or \code{phylo4}. The tree should be binary and rooted; if not it will be coerced into a binary rooted tree using multi2di, if possible.
#' @param maxClade An integer between 1 and the number of tips (the default), specifying the maximum clade size of interest.
#'
#' @return A list with 2 entries:
#' \itemize{
#' \item cladeSizes is a vector giving the size of the clade descending at each node. Tips all have the value 1.
#' Internal nodes have their number of tip descendants.
#' \item numClades is a vector where numClades[[i]] is the number of clades of size i in the tree.
#' All clade sizes are calculated, but the output can be restricted using \code{maxClade} to just those of size up to 'maxClade'.
#' }
#'
#' @import ape
#'
#' @examples
#' ## Configuration sizes on a random tree with 10 tips:
#' tree <- rtree(10)
#' plot(tree)
#' nodelabels()
#' nConfig(tree)
#'
#'
#' @export
nConfig <- function(tree,maxClade=NULL) {
tree <- phyloCheck(tree)
if (is.null(tree$tip.label))
stop("This tree has no tip labels")
num.tips=length(tree$tip.label)
if (is.null(maxClade)) {maxClade <- num.tips}
else if(maxClade > num.tips) {maxClade <- num.tips} # maxClade greater than number of tips makes no sense and would append unnecessary zeroes to output
labels <- rep(NA, nrow(tree$edge)+1)
names(labels)[1:num.tips]=tree$tip.label;
names(labels)[(num.tips+1): length(labels)]=paste0("node",(num.tips+1): length(labels))
labels[1:num.tips]=1 # tips are 1 here.
NodeIDS= (num.tips + 1) : (2*num.tips -1)
# fill in the configuration sizes of internal nodes
while (any(is.na(labels))) {
IsReady = NodeIDS[ vapply(NodeIDS,function(x) !any(is.na(labels[tree$edge[which(tree$edge[,1]==x),2]])) & is.na(labels[x]) ,FUN.VALUE=TRUE) ]
TheseLabels = unlist(sapply(IsReady, function(x) sum(labels[tree$edge[tree$edge[,1]==x,2]])))
labels[IsReady]=TheseLabels
}
numClades=vapply(1:maxClade, function(x) sum(labels==x),FUN.VALUE=1)
names(numClades) <- 1:maxClade
return(list(cladeSizes=labels,numClades=numClades))
} |
25354b4a838821a576d6001367ff25e36b728c0a | b8ba06f8a51d1b89813e6b3e1e2cd86efc2ed059 | /Code/test.R | 8acd4d6d02028523158d20e95ef2391302d6c96f | [] | no_license | michielvandijk/HAZES | 4cfa54201f7c52b1fa517fffc0315d20de7c7834 | a6db9086db3f07d84bb89aa799ad5e59fb203638 | refs/heads/master | 2021-01-20T21:16:10.372168 | 2016-08-13T22:26:05 | 2016-08-13T22:26:05 | 65,639,312 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,283 | r | test.R | # http://quantifyingmemory.blogspot.co.uk/2014/02/web-scraping-part2-digging-deeper.html
library(RCurl)
library(XML)
web_page <- getURL("http://songteksten.net/lyric/142/80194/andre-hazes/nou-ben-ik-aan-de-beurt.html")
check<- htmlTreeParse(web_page) # do not know what this does but is the only way to visualise with head
summary(check)
substring(check,1,20000)
PARSED <- htmlParse(web_page)
head(check)
xpathApply(PARSED, "///li")
xpathSApply(PARSED, "//h1",xmlValue)
xpathSApply(PARSED, "//h3",xmlValue)
length(xpathSApply(PARSED, "//a/@href"))
head(xpathSApply(PARSED, "//ul[@class='lyrics']",xmlValue))
head(xpathSApply(PARSED, "//span[@class='citation news']/a/@href"))
xpathSApply(PARSED, "//div[@id='body_right']",xmlValue) # WERKT!!
xpathSApply(PARSED, "//div[@id='body_right']/p",xmlValue) # werkt niet, zou tekst moeten opknippen in verschillende lists.
# Webscraper function
songs1<- getURL("http://songteksten.net/artist/lyrics/142/andre-hazes.html")
PARSED <- htmlParse(songs1)
xpathSApply(PARSED, "//ul[@class='lyrics']",xmlValue, recursive=T)
xpathSApply(PARSED, "//ul[@class='lyrics']",xmlName)
getNodeSet(PARSED, "//ul[@class='lyrics']")
doc<-xmlRoot(check)
xmlName(doc)
xmlSize(doc)
doc[[2]]
node <- xmlNode("foo", "Some text")
xmlValue(node)
xmlAttrs(doc)
|
1b8334cd3fc86540c99d46d2cbfe41d987710495 | d7862cf1e1f976ef00a0713edc2ee3e759ea8694 | /glmm/R/pql.R | 76da1e917c9483b4b24f7d57bd097a59e661a22b | [] | no_license | knudson1/glmm | fadea742cec3b745cac705a3eef68e58a4dea69b | 5bbbc65dfc96e3e0094603d3db3cd70017e8160a | refs/heads/master | 2022-10-18T08:51:39.501070 | 2022-10-07T18:28:20 | 2022-10-07T18:28:20 | 20,006,591 | 10 | 10 | null | 2019-08-01T20:06:19 | 2014-05-21T03:26:25 | TeX | UTF-8 | R | false | false | 5,798 | r | pql.R | pql <- function(mod.mcml,family.mcml, wts,cache){
if (! missing(cache))
stopifnot(is.environment(cache))
eek <- getEk(mod.mcml$z)
#need inits for parameters
sigma <- rep(1,length(mod.mcml$z))
beta <- rep(0,ncol(mod.mcml$x))
#need inits for random effects
nrand<-lapply(mod.mcml$z,ncol)
nrandom<-unlist(nrand)
totnrandom<-sum(nrandom)
s<-rep(1,totnrandom)
#need to give this
Z = do.call(cbind,mod.mcml$z)
outer.optim<-suppressWarnings(optim(par=sigma, fn=fn.outer, beta=beta,
s=s, Y=mod.mcml$y , X = mod.mcml$x ,Z=Z, eek=eek, family.mcml=family.mcml,
cache=cache, ntrials = mod.mcml$ntrials, wts=wts))
list(sigma = outer.optim$par)
}
fn.outer <- function(par, beta, s, Y, X, Z, eek, family.mcml, cache, ntrials, wts){
sigma <- par
Aks <- Map("*",eek,sigma)
A <- addVecs(Aks) #at this point still a vector
A <- diag(A) #takes the vector and makes it a diag matrix
if (! missing(cache)) {
stopifnot(is.environment(cache))
if (exists("s.twid", envir = cache, inherits = FALSE)) {
stopifnot(is.numeric(cache$s.twid))
stopifnot(is.finite(cache$s.twid))
stopifnot(length(cache$s.twid) == length(s))
s <- cache$s.twid
}
#
if (exists("beta.twid", envir = cache, inherits = FALSE)) {
stopifnot(is.numeric(cache$beta.twid))
stopifnot(is.finite(cache$beta.twid))
stopifnot(length(cache$beta.twid) == length(beta))
beta <- cache$beta.twid
}
}
nbeta<-length(beta)
#run trust
inner.optim<-trust(fn.inner.trust,parinit=c(beta,s), rinit=5, rmax=10000, minimize=F,
Y=Y, X=X, Z=Z, A=A, nbeta=nbeta, family.mcml=family.mcml, cache=cache, ntrials = ntrials, wts=wts)
#get beta and s
beta.twid <- cache$beta.twid
s.twid <- cache$s.twid
#calculate eta.twid
eta.twid <- (X %*% beta.twid + Z %*% A %*% s.twid)
#calculate el using eta.twid. this is piece1
family.mcml <- getFamily(family.mcml)
if(family.mcml$family.glmm == "bernoulli.glmm"){
piece1 <- .C(C_elc, as.double(Y), as.double(X), as.integer(nrow(X)),
as.integer(ncol(X)), as.double(eta.twid), as.integer(1), as.integer(ntrials), wts = as.double(wts),
value=double(1), gradient=double(ncol(X)), hessian=double((ncol(X)^2)))$value}
if(family.mcml$family.glmm=="poisson.glmm"){
piece1 <- .C(C_elc, as.double(Y), as.double(X), as.integer(nrow(X)),
as.integer(ncol(X)), as.double(eta.twid), as.integer(2), as.integer(ntrials), wts = as.double(wts),
value=double(1), gradient=double(ncol(X)), hessian=double((ncol(X)^2)))$value}
if(family.mcml$family.glmm=="binomial.glmm"){
piece1 <- .C(C_elc, as.double(Y), as.double(X), as.integer(nrow(X)),
as.integer(ncol(X)), as.double(eta.twid), as.integer(3), as.integer(ntrials), wts = as.double(wts),
value=double(1), gradient=double(ncol(X)), hessian=double((ncol(X)^2)))$value}
#calculate W = c''(eta.twid)
if(family.mcml$family.glmm == "bernoulli.glmm") {dubya <- family.mcml$cpp(eta.twid)}
if(family.mcml$family.glmm == "poisson.glmm"){dubya <- family.mcml$cpp(eta.twid)}
if(family.mcml$family.glmm == "binomial.glmm"){dubya <- family.mcml$cpp(eta.twid, ntrials)}
W <- diag(as.vector(dubya))
#calculate thatthing = A t(Z)WZA+I
thatthing <- A %*% t(Z) %*% W %*% Z %*% A + diag(nrow(A))
L <- chol(thatthing)
#calculate piece2a = .5 log det(thatthing)
piece2a <- sum(log(diag(L)))
#calculate piece 2b = .5 s.twid's.twid
piece2b <- .5*s%*%s
#then minvalue= piece2a+piece2b-piece1
minvalue <- piece2a+piece2b-piece1
as.vector(minvalue)
}
fn.inner.trust <-
function(mypar,Y,X,Z,A,family.mcml,nbeta,cache, ntrials, wts)
{
beta <- mypar[1:nbeta]
s <- mypar[-(1:nbeta)]
eta <- X%*%beta + Z%*%A%*%s
family.mcml<-getFamily(family.mcml)
if(family.mcml$family.glmm == "bernoulli.glmm") {mu <- family.mcml$cp(eta)}
if(family.mcml$family.glmm == "poisson.glmm"){mu <- family.mcml$cp(eta)}
if(family.mcml$family.glmm == "binomial.glmm"){mu <- family.mcml$cp(eta, ntrials)}
#value<- ellikelihood(Y,X,eta,family.mcml)$value-.5*s%*%s
if(family.mcml$family.glmm=="bernoulli.glmm"){
value<-.C(C_elc, as.double(Y), as.double(X), as.integer(nrow(X)),
as.integer(ncol(X)), as.double(eta),as.integer(1),as.integer(ntrials), wts = as.double(wts),
value=double(1), gradient=double(ncol(X)), hessian=double((ncol(X)^2)))$value-.5*s%*%s}
if(family.mcml$family.glmm=="poisson.glmm"){
value<-.C(C_elc, as.double(Y), as.double(X), as.integer(nrow(X)),
as.integer(ncol(X)), as.double(eta), as.integer(2), as.integer(ntrials), wts = as.double(wts),
value=double(1), gradient=double(ncol(X)), hessian=double((ncol(X)^2)))$value-.5*s%*%s}
if(family.mcml$family.glmm=="binomial.glmm"){
value <- .C(C_elc, as.double(Y), as.double(X), as.integer(nrow(X)),
as.integer(ncol(X)), as.double(eta), as.integer(3), as.integer(ntrials), wts = as.double(wts),
value=double(1), gradient=double(ncol(X)), hessian=double((ncol(X)^2)))$value-.5*s%*%s}
#gradient calculation
db <- t(X)%*%(Y-mu)
ds <- A%*%t(Z)%*%(Y-mu)-s
gradient <- c(db,ds)
#hessian calculation
if(family.mcml$family.glmm == "bernoulli.glmm") {cdub <- family.mcml$cpp(eta)}
if(family.mcml$family.glmm == "poisson.glmm"){cdub <- family.mcml$cpp(eta)}
if(family.mcml$family.glmm == "binomial.glmm"){cdub <- family.mcml$cpp(eta, ntrials)}
cdub <- as.vector(cdub)
cdub <- diag(cdub)
kyoo <- nrow(A)
piece1 <- (-t(X)%*%cdub%*%X)
piece2 <- (-t(X)%*%cdub%*%Z%*%A)
piece3 <- (-A%*%t(Z)%*%cdub%*%X)
piece4 <- (-A%*%t(Z)%*%cdub%*%Z%*%A -diag(kyoo))
hessian <- rbind(cbind(piece1,piece2),cbind(piece3,piece4))
cache$s.twid <- s
cache$beta.twid <- beta
list(value=value, gradient=gradient, hessian=hessian)
}
|
4b368329f8f5306bfcfced8abe7b7b81086bf2f0 | 0572208a7c6b5202440f747b1f1f3d30ba2d820d | /R_workshop.R | 1d4457af78a343955bd1443d6850356b02beb1fb | [] | no_license | mteguchi/R_mini_course_2019 | 8b66a9d66699c4545129837afeb2725522c75d47 | fdd81b01359304f99203a0a10e3b672e40c19455 | refs/heads/master | 2020-05-22T18:11:08.069546 | 2019-06-05T20:59:19 | 2019-06-05T20:59:19 | 186,467,794 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 684 | r | R_workshop.R | # Example script for R workshop for the turtle groups
library(ggplot2)
#max <- 120
set.seed(10)
x <- runif(n = 10,
min = 44,
max = 115)
set.seed(10)
y <- 0.96 + 0.04 * x + rnorm(n=length(x), 0, 0.1)
mass <- exp(y)
df1 <- data.frame(length = x, mass = mass)
summary(df1)
lm1 <- lm(log(mass) ~ length, data = df1)
summary(lm1)
df1$predict <- predict(lm1)
#plot(x, y)
p1 <- ggplot(data = df1, aes(x = length)) +
geom_point(aes(y = log(mass)),
colour = "red",
size = 3) +
geom_line(aes(y = predict), colour = "black") +
theme(axis.text = element_text(size = 12)) +
xlab("Length (cm)") +
ylab("ln(Mass) (kg)")
print(p1)
|
20312d384b8c96e4ef3189e0cb085411b7fca04c | 0cbb895c4ae0dcf7053b7618a5cf23bcc503061a | /R/create_index.R | 369b9b1cd37dcab6c4c55169144f9b22d0d6eaed | [] | no_license | cran/rDataPipeline | 9e729f6e5c1fdb775d4bd833b02e060313f53e23 | 495e47def394d7026e465ea6ebda392259b55280 | refs/heads/master | 2023-09-03T09:45:45.611148 | 2021-11-17T20:00:06 | 2021-11-17T20:00:06 | 429,284,686 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 255 | r | create_index.R | #' create_index
#'
#' @param self self
#'
create_index <- function(self) {
tmp <- 1
if (!is.null(self$inputs))
tmp <- c(tmp, max(self$inputs$index) + 1)
if (!is.null(self$outputs))
tmp <- c(tmp, max(self$outputs$index) + 1)
max(tmp)
}
|
7d1ddeb9e36681070a850681bd461658ccf1249f | e8bb53f264224f2b72b9b6e2f715080f98914fdf | /04_ExploratoryDataAnalysis/Assignment/Project1/plot3.R | d69336587f36a9fe3897a1365a241f1a126ed6c4 | [] | no_license | pritraj90/DataScienceR | 0ef2550590f101bd0886ba7db22c6aa6df755ce0 | 54a67ad080756699138d083bd495da1dfa100d09 | refs/heads/master | 2020-03-21T16:31:22.158157 | 2018-03-07T16:52:26 | 2018-03-07T16:52:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,119 | r | plot3.R | # Getting full dataset
electric <- read.csv("./data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
electric$Date <- as.Date(electric$Date, format="%d/%m/%Y")
# Subsetting the data
jan_feb_07 <- subset(electric, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(electric)
# Week days
week <- paste(as.Date(jan_feb_07$Date), jan_feb_07$Time)
jan_feb_07$Datetime <- as.POSIXct(week)
# use the png driver as dev.copy is cutting the legend when I save it
png("plot3.png", width=480, height=480)
# Plot 3
#par(bg = 'light gray')
with (jan_feb_07, {
plot(Datetime, Sub_metering_1,
xlab = "",ylab = "Energy sub metering", type = "l")
lines(Datetime, Sub_metering_2, type = "l", col = "red")
lines(Datetime, Sub_metering_3, type = "l", col = "blue")
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=1.5,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Save
#dev.copy(png, file = "plot3.png", height = 480, width = 480)
dev.off()
|
381d0f29d2f295d8f43c655cc30443e62f2d060a | a4edf1eaab938eda9ee21d63acdbabff4a91d908 | /R/hpmods.R | 7d6166fad025a2f59e9a887f10a91b7dcf7a5c69 | [] | no_license | dmbates/hpmods | a7e460dccb474ef335864ca6b94302eb863f5d5d | bf04a6038a56d91cb3d78b3b11f195b7c8c92a22 | refs/heads/master | 2016-09-06T12:21:22.395499 | 2012-07-18T17:53:18 | 2012-07-18T17:53:18 | 5,057,163 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,542 | r | hpmods.R | ##' Package to determine hierarchy-preserving models
##'
##' @name hpmods-package
##' @docType package
##' @author Douglas Bates \email{bates@@stat.wisc.edu}
##' @keywords package
NA
##' Determine hierarchy-preserving models
##'
##' Determine the hierarchy-preserving models from a formula
##' @title Hierarchy-preserving models
##' @param formula as in \code{"\link{lm}"}
##' @param data as in \code{"\link{lm}"}
##' @param subset as in \code{"\link{lm}"}
##' @param weights as in \code{"\link{lm}"}
##' @param na.action as in \code{"\link{lm}"}
##' @param offset as in \code{"\link{lm}"}
##' @param \dots optional, additional arguments. At present, none are used.
##' @return an object of class \code{"hpmods"}
##' @author Douglas Bates
##' @keywords models
##' @examples
##' set.seed(12321)
##' fr <- data.frame(y = rnorm(900), x1 = rnorm(900), x2 = rnorm(900),
##' x3 = gl(3,10,900), x4 = gl(10,3,900))
##' (hpm <- hpmods(y ~ (x1 + x2 + x3 + x4)^4, fr))
##' @export
hpmods <- function(formula, data, subset, weights, na.action, offset, ...) {
cl <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "na.action",
"offset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
atts <- attributes(terms(mf))
inci <- crossprod(atts$factor) == atts$order # incidence matrix for terms
mods <- array(FALSE, c(nrow(inci), 1))
rownames(mods) <- rownames(inci)
for (j in 1:ncol(inci))
mods <- cbind(mods, t(unique(t(array(inci[,j], dim(mods)) | mods))))
mods <- t(mods)
rownames(mods) <- mods %*% 2^((seq_len(ncol(inci)))-1)
res <- list(call=cl, incidence=inci, models=mods,
frame=mf, X=model.matrix(terms(mf), mf))
class(res) <- "hpmods"
res
}
##' @S3method print hpmods
print.hpmods <- function(x, ...) {
require("Matrix", quietly=TRUE, character.only=TRUE)
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n\n", sep = "")
print(as(x$models, "sparseMatrix"))
invisible(x)
}
##' Extract model subsets from an hpmods object
##'
##' After fitting a model with several terms the residual sums of
##' squares (and the coefficient estimates, if needed) for any leading
##' subset of those terms can be determined directly. This function
##' extracts distinct subsets of the rank-preserving models that can
##' where each subset can be analyzed from a single model fit.
##' @title Extract model subsets
##' @param x an object of class \code{"hpmods"}
##' @param \dots optional, additional arguments. At present, none are used.
##' @return a list of model subsets
##' @author Douglas Bates
##' ##' set.seed(12321)
##' fr <- data.frame(y = rnorm(900), x1 = rnorm(900), x2 = rnorm(900),
##' x3 = gl(3,10,900), x4 = gl(10,3,900))
##' msubs(hpmods(y ~ (x1 + x2 + x3 + x4)^4, fr))
##' @export
msubs <- function(x, ...) {
stopifnot(class(x) == "hpmods")
mm <- x$models
ans <- vector(mode="list", length=nrow(mm)-ncol(mm))
i <- 1L
p2 <- 2^(0:(ncol(mm)-1))
while (nrow(mm) > 0) {
cr <- as.character(cumsum(p2[mm[nrow(mm),]]))
if (i == 1L) cr <- c("0", cr) # null model goes in first group
ans[[i]] <- mm[intersect(rownames(mm), cr),,drop=FALSE]
i <- i + 1L
mm <- mm[setdiff(rownames(mm), cr),,drop=FALSE]
}
ans[!sapply(ans, is.null)]
}
|
f1819207be86f2407ef123018624485dee2ac8f5 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/mapi/man/MAPI_RunOnGrid.Rd | 63a29cff2b975e6a6d61074b596699b151ed3e79 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 5,100 | rd | MAPI_RunOnGrid.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MAPI_RunOnGrid.R
\name{MAPI_RunOnGrid}
\alias{MAPI_RunOnGrid}
\title{Function MAPI_RunOnGrid}
\usage{
MAPI_RunOnGrid(samples, metric, grid, isMatrix = FALSE, ecc = 0.975,
errRad = 10, nbPermuts = 0, dMin = 0, dMax = Inf,
nbCores = ifelse(base::requireNamespace("parallel", quietly = TRUE),
parallel::detectCores() - 1, 1), N = 8)
}
\arguments{
\item{samples}{a data.frame with names and geographical coordinates of samples.
Column names must be: 'ind', 'x', 'y'. Optional column 'errRad' with an error radius for sample locations (eg. GPS uncertainty).
Coordinates must be projected (not latitude/longitude).}
\item{metric}{a data.frame or a square matrix with the pairwise metric computed for all pairs of samples.
If data.frame, column names must be: 'ind1', 'ind2', 'value'.
If matrix, sample names must be the row- and column names.}
\item{grid}{a spatial object of class 'sf' with the geometry of each cell.
When using your own grid, please check that the object structure is the same as returned by
\code{\link{MAPI_GridAuto}} or \code{\link{MAPI_GridHexagonal}}.}
\item{isMatrix}{Boolean. Depends on the 'metric' data:\cr
TRUE if 'metric' is a square matrix with column names = row names and standing for sample names.\cr
FALSE if 'metric is a three columns data.frame ('ind1', 'ind2', 'value'). \cr
The default value is determined using a "matrix" class detection for metric as well as identity between row and column number.}
\item{ecc}{ellipse eccentricity value (0.975 by default).}
\item{errRad}{global error radius for sample locations (same radius for all samples, 10 by default).
Units are in the same reference system as the sample geographical coordinates.
To use different error radius values for sample locations, add a column 'errRad' in the 'sample' data (see \code{\link{mapi}}).}
\item{nbPermuts}{number of permutations of sample locations (0 by default).}
\item{dMin}{minimum distance between individuals. 0 by default.}
\item{dMax}{maximal distance between individuals. +Inf by default.}
\item{nbCores}{number of CPU cores you want to use during parallel computation.
The default value is estimated as the number of available cores minus 1, suitable for a personal computer.
On a cluster you might have to set it to a reasonable value (eg. 8) in order to keep resources for other tasks.}
\item{N}{number of points used per quarter of ellipse, 8 by default.
Don't change it unless you really know what you are doing.}
}
\value{
a spatial object of class 'sf' providing for each cell: \cr
\itemize{
\item gid: Cell ID \cr
\item x and y coordinates of cell center \cr
\item nb_ell: number of ellipses used to compute the weighted mean \cr
\item avg_value: weighted mean of the pairwise metric \cr
\item sum_wgts: sum of weights of ellipses used to compute the weighted mean \cr
\item w_stdev: weighted standard deviation of the pairwise metric \cr
\item swQ: percentile of the sum of weights \cr
\item geometry \cr
When permutations are performed: \cr
\item permuts: list of the weighted mean values obtained from all permutations \cr
\item proba: proportion of the permuted weighted means below the observed weighted mean \cr
\item ltP: lower-tail p-value adjusted using the FDR procedure of Benjamini and Yekutieli \cr
\item utP: upper-tail p-value adjusted using the FDR procedure of Benjamini and Yekutieli \cr
}
}
\description{
Launch a MAPI analysis for a given grid computed with \code{\link{MAPI_GridAuto}}
or \code{\link{MAPI_GridHexagonal}} or provided by users.
}
\details{
To test whether the pairwise metric values associated with the ellipses are independent of the sample locations, those are permuted 'nbPermuts' times.
At each permutation, new cell values are computed and stored to build a cumulative null distribution for each cell of the grid.
Each cell value from the observed data set is then ranked against its null distribution.
For each cell, the proportion of permuted values that are smaller or greater
than the observed value provides a lower-tailed (ltP) and upper-tailed (utP) test p-value.
A false discovery rate (FDR) procedure (Benjamini and Yekutieli, 2001) is applied to account for multiple
testing (number of cells) under positive dependency conditions (spatial autocorrelation). An adjusted
p-value is computed for each cell using the function \code{p.adjust} from the 'stats' package with the method 'BY'.
}
\examples{
\dontrun{
data(metric)
data(samples)
my.grid <- MAPI_GridHexagonal(samples, crs=3857, 500) # 500m halfwidth
# Note: 10 permutations is only for test purpose, increase to >=1000 in real life!
my.results <- MAPI_RunOnGrid(samples, metric, grid=my.grid, nbPermuts=10, nbCores=1)
# eg. Export results to shapefile "myFirstMapiResult" in current directory
library(sf)
st_write(my.results, dsn=".", layer="myFirstMapiResult", driver="ESRI Shapefile")
}
}
\references{
Benjamini, Y. and Yekutieli, D. (2001). The control of the false discovery rate in multiple testing under dependency. Annals of Statistics 29, 1165–1188.
}
|
e98baf1672f1cb292e9727cd7aee2dd6b1575cca | caf6f15fe311eb9e8a70465054b2f47d6729cfc0 | /man/pairSort.Rd | a4e1a05826dd50513feb31f00c804e28cd1f1ad4 | [] | no_license | cran/alR | 8f4f84656fc2bbed4381444a70f6b442da903d59 | c9d857d9bfdea576eb3bbe7ca5dcb67ed7a5728b | refs/heads/master | 2021-01-21T12:46:29.775345 | 2017-12-07T11:02:24 | 2017-12-07T11:02:24 | 102,097,008 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 559 | rd | pairSort.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{pairSort}
\alias{pairSort}
\title{Sorted vector index.}
\usage{
pairSort(x)
}
\arguments{
\item{x}{A real-valued vector to be sorted.}
}
\value{
A list with two components:
\itemize{
\item sorted: The sorted version of \code{x}.
\item index: The index of the \eqn{i^{th}} element in \code{x}.
}
}
\description{
The sorted vector is returned along with the original index of the vector it belonged to.
}
\examples{
pairSort(c(5, 2, 6))
}
|
1a5656695ac6ae0889f1451d9de08980555ed971 | 7b74f00cd80694634e6925067aaeb6572b09aef8 | /2019/Assignment/FE8828-Low Wei-Ning, Raelynn/Assignment 1/Assignment 1.R | 0059e7e102fb6f354bc455683aa3299618043715 | [] | no_license | leafyoung/fe8828 | 64c3c52f1587a8e55ef404e8cedacbb28dd10f3f | ccd569c1caed8baae8680731d4ff89699405b0f9 | refs/heads/master | 2023-01-13T00:08:13.213027 | 2020-11-08T14:08:10 | 2020-11-08T14:08:10 | 107,782,106 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,987 | r | Assignment 1.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
ui <- fluidPage(
navbarPage(title = "Clim8te",
mainPanel(
img(src = "earth.jpg")),
tabPanel("Our Purpose",
titlePanel("Save the Earth"),
"According to a recent think tank report, climate change could pose
significant existential threat by 2050. If global temperatures rise
3 degree Celcius, 55% of the world's population would experience heat
beyond the threshold of human survivability.",
titlePanel("Take action now"),
"Our products are manufactured sustainably with 20% of the sales donated to
NPOs fighting climate change. Do your part by choosing sustainability.",
actionButton("yesButton","Yes!",icon("laugh")
)),
tabPanel("Our Products",
titlePanel("Our Products"),
fluidRow(
column(4,
h1("Sustainable bottle",style = "font-size:25px"),
(img(src="bottle.jpg",width="25%")),
actionButton("buyButton1", "Buy now!",icon("shopping-cart"))),
column(4,
h2("Sustainable tshirt",style = "font-size:25px"),
(img(src="shirt.jpg",width="45%")),
actionButton("buyButton2", "Buy now!",icon("shopping-cart"))),
column(4,
h3("Sustainable bag",style = "font-size:25px"),
(img(src="bag.jpg",width="40%")),
actionButton("buyButton3", "Buy now!",icon("shopping-cart"))
)
)
),
tabPanel("Our People",
sidebarLayout(
sidebarPanel(
h1 ("Our People", style = "font-size:25px"),
"We are a team of youths passionate about living (on Earth).",
h2 ("Contact us", style = "font-size:25px"),
"Address: SomewhereOnEarth",
"Phone: +12345678"
),
mainPanel(
img(src="people.jpg",width="153%")
),
position = c("right")
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
}
# Run the application
shinyApp(ui = ui, server = server) |
3a6f125249c95f8c38fb6d6ea535effe96eef413 | 1df5320b29a831aea4d6954b715c7d4d470c9cd2 | /run_analysis.R | 43907c8cefe491fe3f1d3f4ca38dd89d70be632a | [] | no_license | 000Nelson000/Gitub-Getting-and-cleaning-data-project-assignement | 20560a9fbedc7fa98e9446c050aa47ac25003705 | 93c764f4f8c03691a0c10bfaa670071d1e09409a | refs/heads/master | 2016-09-01T16:42:00.950510 | 2015-11-23T00:14:34 | 2015-11-23T00:14:34 | 46,580,874 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,934 | r | run_analysis.R |
# You should create one R script called run_analysis.R that does the following.
# 1.Merges the training and the test sets to create one data set.
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.Uses descriptive activity names to name the activities in the data set
# 4.Appropriately labels the data set with descriptive variable names.
# 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
###Note: f_codebook calls should be interpreed as comments: These functon calls ar suposed to build the coodbook dynmically
##path for the md file
CodebookFilePath<- ".\\CodeBook.md"
##Fuction that writes in a specific md file, each time we call it.
f_codebook <- function(...){
cat(..., "\n",file=CodebookFilePath,append=TRUE, sep="")
}
f_codebook("# Code Book for the script `run_analysis.R`")
f_codebook("Generated ",as.character(Sys.time())," during the script excution")
f_codebook("")
###################################################
zip_file_url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zip_file_destination<-"data.zip"
data_path<-".\\data"
#IF TRUE the directory will be deleted an recreated
f_codebook("Downlaod zip file` ",zip_file_url, "` to the working directory if variable `flag_download_zip_file` =TRUE")
force_overwrite<-FALSE
##If TRUE the zip file will be downloaded
f_codebook("Extract the content of the zip file `",zip_file_url, "`to the folder" ,data_path, "`,if flag_extract_data_from_zip_file=TRUE")
flag_download_zip_file<-TRUE
## IF TRUE the content of th zip file will be extracted
flag_extract_data_from_zip_file<-TRUE
f_codebook("Drop and recreate the destination folder if variable `force_overwrite`=TRUE")
if(!file.exists(data_path) & force_overwrite){unlink(data_path,recursive=TRUE)}
if(!file.exists(data_path)){
dir.create(data_path)
}
if(flag_download_zip_file){
zip_file<-download.file(zip_file_url,zip_file_destination)
}
if (flag_extract_data_from_zip_file){
unzip("data.zip",exdir = data_path)
}
###################################################
flag_reset_coodebok<-TRUE
flag_codebook_backup_old_file<-TRUE
if (flag_reset_coodebok & file.exists(CodebookFilePath)) {
if (flag_codebook_backup_old_file) file.rename(CodebookFilePath,paste0(".\\codebook","_backup_",gsub(":","_",gsub(" ","_",Sys.time())),".md"))
#file.remove(CodebookFilePath)
file.rename(".\\CodeBook.md",".\\CodeBook_backup.md")
}
f_codebook("## Actions performed on data:")
f_codebook("")
f_codebook("Load data.table package")
library(data.table)
# X_train<-read.table(".\\train\\X_train.txt",header = F,)
f_codebook("* Read files using the fread function from data.table package :"," \n")
f_codebook("* Files to be read:"," \n")
f_codebook("* activity_labels.txt")
f_codebook("* features.txt")
f_codebook("* features_info.txt")
f_codebook("* README.txt")
f_codebook("* train\\X_train.txt")
f_codebook("* train\\Y_train.txt")
f_codebook("* train\\Subject_train.txt")
f_codebook("* test\\X_test.txt")
f_codebook("* test\\Y_test.txt")
f_codebook("* test\\Subject_test.txt")
activity_labels<-fread(input =".\\data\\UCI HAR Dataset\\activity_labels.txt",header=F)
features<-fread(input =".\\data\\UCI HAR Dataset\\features.txt",header=F)
features_info<-readLines(".\\data\\UCI HAR Dataset\\features_info.txt")
README<-readLines(".\\data\\UCI HAR Dataset\\README.txt")
X_train<-fread(input =".\\data\\UCI HAR Dataset\\train\\X_train.txt",header=F)
Y_train<-fread(input =".\\data\\UCI HAR Dataset\\train\\Y_train.txt",header=F)
subject_train<-fread(input =".\\data\\UCI HAR Dataset\\train\\subject_train.txt",header=F)
# names(X_train)
X_test<-fread(input =".\\data\\UCI HAR Dataset\\test\\X_test.txt",header=F)
Y_test<-fread(input =".\\data\\UCI HAR Dataset\\test\\Y_test.txt",header=F)
subject_test<-fread(input =".\\data\\UCI HAR Dataset\\test\\subject_test.txt",header=F)
f_codebook("Get the column names for the X(train and test) files, matching with features file"," \n")
f_codebook("* Note: The `V` was removed from the column names before matching"," \n")
names(X_train)<-
features[match(
as.integer(gsub("V","",names(X_train)))
,features$V1)
,V2]
names(X_test)<-
features[match(
as.integer(gsub("V","",names(X_test)))
,features$V1)
,V2]
f_codebook("* Setting the Y files column names to `Target`"," \n")
names(Y_train)<-"Target"
names(Y_test)<-"Target"
f_codebook("* Binding the train data sets (X,Y,Subject) into a single data set named `train`"," \n")
train<-cbind
f_codebook("* Binding the train/test data sets (X,Y,Subject) into a single data set named `test`"," \n")
test<-cbind(X_test,Y_test,Subject=subject_test$V1,aux_source=c("Test"))
f_codebook("* Note: A new column `aux_source` (equal to 'train' or 'test') was included so we can identify where each row came from ")
f_codebook("* Binding the train/test full data sets into a single data set names `dt`")
dt<-rbind(train,test)
# table(full[,aux_source])
f_codebook("* `dt` data set:", nrow(dt)," x " ,ncol(dt))
f_codebook("* Store the `dt` colum names into a vector names `aux_names`")
aux_names<-names(dt)
# table(dt$Target)
# length(aux_names)
#
# grep("[a-b]|[x-z]", letters)
mean_std_features<-aux_names[grep("mean\\(\\)|std\\(\\)" ,aux_names,ignore.case =T)]
f_codebook("* Getting a vector with the colum names containing `mean()` or `std()` :", length(mean_std_features)," features")
f_codebook("* Create a column names `Target_activity` containing the activity description")
dt$Target_activity<-activity_labels[match (dt$Target,activity_labels$V1),V2]
# f_codebook("* `dt` data set columns: \n")
# f_codebook("Variable name | Desription")
# f_codebook("--------------------|------------")
#
# for (i in 1:length(names(dt))){
# f_codebook(
# "`",names(dt)[i]
# ,"` | Average value for this column,",class(data.frame(dt)[,i])
# # ," range : from ",paste(as.character(range(data.frame(aux_summary)[,i])),sep=" to ")
#
# )
# }
f_codebook("* Getting a vector with columns names of interest")
interest_features<-c(mean_std_features,"Target_activity","Subject")
f_codebook("* Creating a new data set,dt_2, containing only the columns of interest : ", length,(interest_features), " features ,including target and subject")
dt_2<-dt[,interest_features,with=FALSE]
# f_codebook("* `dt_2` data set columns: \n")
# f_codebook("Variable name | Desription")
# f_codebook("--------------------|------------")
#
#
#
# for (i in 1:length(names(dt_2))){
# f_codebook(
# "`",names(dt_2)[i]
# ,"` | Average value for this column,",class(data.frame(dt_2)[,i])
# # ," range : from ",paste(as.character(range(data.frame(aux_summary)[,i])),sep=" to ")
#
# )
# }
# names(dt)
# names(dt_2)
f_codebook("* Getting a data frame with the original ('actual') colum names of the dt_2 data set, including a new column for the new column names ('new')")
f_codebook("* `dt_2` data set:" ,nrow(dt_2)," x ", ncol(dt_2))
aux_names_2<-data.frame(cbind(actual=names(dt_2),new=names(dt_2)))
aux_names_2$actual<-as.character(aux_names_2$actual)
aux_names_2$new<-as.character(aux_names_2$new)
f_codebook("* Changing the column names according with the folowing set of rules:\n")
f_codebook("### Repace applied to column names\n")
f_codebook("actual | new")
f_codebook("--------------------|------------")
f_codebook("Start with 't' | 'Time'")
f_codebook("Start with 'f' | 'Frequency'")
f_codebook("'acc' | 'Accelerometer'")
f_codebook("'Gyro' | 'Gyroscope'")
f_codebook("'BodyBody' | 'Body'")
f_codebook("'mean' | Mean")
f_codebook("'std' | Std")
f_codebook("'-' | ''")
f_codebook("'()' | ''")
f_codebook("")
aux_names_2$new<-gsub("^t","Time",aux_names_2$new)
aux_names_2$new<-gsub("^f","Frequency",aux_names_2$new)
aux_names_2$new<-gsub("Acc","Accelerometer",aux_names_2$new)
aux_names_2$new<-gsub("Gyro","Gyroscope",aux_names_2$new)
aux_names_2$new<-gsub("BodyBody","Body",aux_names_2$new)
aux_names_2$new<-gsub("-","",aux_names_2$new)
aux_names_2$new<-gsub("mean","Mean",aux_names_2$new)
aux_names_2$new<-gsub("std","Std",aux_names_2$new)
aux_names_2$new<-gsub("\\(\\)","",aux_names_2$new)
aux_names_2$new
f_codebook("* Changing the column names according with the rules above")
names(dt_2)<-aux_names_2[match(names(dt_2),aux_names_2$actual),"new"]
f_codebook("* Identifying the coluns to be aggregated and stores their names in a vector called `colstoagg`")
colstoagg<-grep("Mean|Std",names(dt_2),value = T)
f_codebook("* Calculating the mean for each variable stores in the vector `colstoagg`ans tore the result in the `aux_summary` data set")
aux_summary<-dt_2[,lapply(.SD,mean),by=c("Target_activity","Subject")]
# dt_resumo<-dt_2[,lapply(.SD,mean,.SDCols=colstoagg),by=c("Target_activity","Subject")]
f_codebook("* Write the data set `aux_summary` into the `TidyDataSet.txt` file - ",nrow(aux_summary)," x ",ncol(aux_summary))
write.table(x=aux_summary,file="TidyDataSet.txt",row.names = FALSE)
##Checking if the file is readable as we expect
f_codebook("* Check if the `TidyDataSet.txt` file exists and is readable")
check_file<-read.table("TidyDataSet.txt",header = T)
f_codebook("* Check if the `TidyDataSet.txt` has the expected structure, calling the head function ")
head(check_file)
# names(aux_summary)
f_codebook("* `TidyDataSet.txt` / 'tidydata' final columns: \n")
f_codebook("Variable name | Desription")
f_codebook("--------------------|------------")
for (i in 1:length(names(aux_summary))){
f_codebook(
"`",names(aux_summary)[i]
,"` | Average value for this column,",class(data.frame(aux_summary)[,i])
# ," range : from ",paste(as.character(range(data.frame(aux_summary)[,i])),sep=" to ")
)
}
|
c74f7f7fd0363b666a938a125870587842079fd1 | a8148ecf3d3478ac5c41de1759c2d0bd9c8a5aea | /evaluate_tree_score.r | 822742efb4df3e7f55c14addd652b01b1ec29088 | [] | no_license | imadk/basicML | bed72d2cee945ea950d425c2601da19b777d540d | 05cfdbbe495771ef22b106a827fd3353f6bd5f84 | refs/heads/master | 2020-04-03T13:09:52.645365 | 2018-10-29T20:24:25 | 2018-10-29T20:24:25 | 155,275,951 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,485 | r | evaluate_tree_score.r | evaluate_tree_score <- function (all = FALSE, ind=1){
#Score a dataset
ftest<-"C:/Users/Imad/Documents/CKI/CKI Inc/workspaces/BramPaperman_workspace/OUTPUTS/"
#proj$testset <- read.csv(ftest, na.strings=c(".", "NA", "", "?"), header=TRUE, sep="\t", encoding="UTF-8")
proj$testset <- proj$dataset[proj$test,];
# Ensure the levels are the same as the training data for variable `summary'.
#levels(crs$testset[["summary"]]) <-
# c(levels(crs$testset[["summary"]]),
# setdiff(levels(crs$dataset[["summary"]]),
# levels(crs$testset[["summary"]])))
proj$pr <- predict(proj$rpart, proj$testset[,c(proj$input)])
if(!all){ #ident only
sdata <- subset(proj$testset[,], select=c(proj$ident))
}else{
sdata <- proj$testset[,]
}
write.csv(cbind(sdata, proj$dataset[as.vector(as.numeric(sdata[,1])),1], proj$pr), file=paste(ftest,ind,as.character(proj$target),"test_score_tree_id.csv",sep=""), row.names=FALSE)
}
# Obtain predictions for the DT model on the validation dataset
# proj$pr <- predict(proj$rpart, proj$dataset[proj$validate, c(proj$input)])
# sdata <- subset(proj$dataset[proj$validate,], select=c(proj$target))
# sdata2 <- proj$dataset[proj$validate,]
# write.csv(cbind(sdata, proj$pr), file="C:\\Users\\Imad\\Documents\\CKI\\CKI Inc\\workspaces\\iPerceptions2_workspace\\Code\\workspace\\Dataset Prediction Project 1\\data_validate_score_idents.csv", row.names=FALSE)
# write.csv(cbind(sdata2, proj$pr), file="C:\\Users\\Imad\\Documents\\CKI\\CKI Inc\\workspaces\\iPerceptions2_workspace\\Code\\workspace\\Dataset Prediction Project 1\\data_validate_score_all.csv", row.names=FALSE)
# Obtain predictions for the DT model on the test dataset
# proj$pr <- predict(proj$rpart, proj$dataset[proj$test, c(proj$input)])
# sdata <- subset(proj$dataset[proj$test,], select=c(proj$target))
# sdata2 <- proj$dataset[proj$test,]
# write.csv(cbind(sdata, proj$pr), file="C:\\Users\\Imad\\Documents\\CKI\\CKI Inc\\workspaces\\iPerceptions2_workspace\\Code\\workspace\\Dataset Prediction Project 1\\data_test_score_idents.csv", row.names=FALSE)
# write.csv(cbind(sdata2, proj$pr), file="C:\\Users\\Imad\\Documents\\CKI\\CKI Inc\\workspaces\\iPerceptions2_workspace\\Code\\workspace\\Dataset Prediction Project 1\\data_test_score_all.csv", row.names=FALSE)
|
00142479d510788162857f4f281994c70b98dcb9 | 618b0785e4630752fa1f30b03756930a2d490c99 | /1_code/_0_overview.R | 82b4c0a597278e653966c26af7cd608cd7f68db7 | [] | no_license | tarasevic-r/time-series | 072b6ea60e2e6df70473d1599dcd564a9bec19c8 | 45366153838bbfb38fbde646373068b2cec4801f | refs/heads/master | 2022-04-18T19:56:53.452191 | 2020-04-20T06:02:02 | 2020-04-20T06:02:02 | 257,186,572 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 913 | r | _0_overview.R |
# Write sensor id
sensor_id_1 <- 'T7_800_R6292' # define 1st sensor id
sensor_id_2 <- 'T7_800_R6292' # define 2nd sensor id
#### Required packages ####
source("packages.R")
##### Load data ####
source("0_load_rhdf5.R")
#### Full signals statistics ####
source("0_statistics_overview.R")
#### discretiez signals ####
# source("0_discretize.R")
# plot SENSOR 1 signal to choose subset
plotly::plot_ly(
y = d1$value
,x = d1$timestamp
, type = 'scatter'
, mode = 'lines'
)
# # plot SENSOR 2 signal to choose subset
# plotly::plot_ly(
# y = d2$value
# ,x = d2$timestamp
# , type = 'scatter'
# , mode = 'lines'
# )
#
#
# # Define period for first signal YYYY-MM-DD hh:mm:ss
# period_start_1 <- "2019-02-08 11:10:00"
# period_end_1 <- "2019-02-08 18:00:00"
#
# # Define period for second signal YYYY-MM-DD hh:mm:ss
# period_start_2 <- "2019-02-06 16:00:00"
# period_end_2 <- "2019-02-07 18:00:00" |
6adcefa14a40fffbe59d3c4a1ff059fec70d0469 | 870ca1a284af713c56e3d6c97f0cd0c2a92f2303 | /scripts/supplementary.R | e867c08d0f1f86cc958d5951dcebef15c8feaa44 | [] | no_license | GrosseLab/BGSC | 6b59da54edebb284b506687dc5270111eb65429a | f7a68ce1f29a1d2b8de2d5d1b3e08d5fc5cc89d4 | refs/heads/master | 2020-03-15T05:20:12.235915 | 2019-09-04T08:59:59 | 2019-09-04T08:59:59 | 131,986,682 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,184 | r | supplementary.R | plotOUT <- './PaperPlot/'
dodge <- position_dodge(width=0.9)
base_size <- 20
# PlotsPaper mRNA G0G1 Proliferation -------------------------------------
ExcelFiles <- c("./rawdata/supplementary/RNA_Daten_zur_SF767_CCND1.csv",
"./rawdata/supplementary/RNA_Daten_zur_SF767_EGFRall.csv",
"./rawdata/supplementary/RNA_Daten_zur_SF767_EGFRv1.csv",
"./rawdata/supplementary/RNA_Daten_zur_SF767_EGFRv4.csv",
"./rawdata/supplementary/RNA_Daten_zur_SF767_G0G1.csv",
"./rawdata/supplementary/RNA_Daten_zur_SF767_GAPDH.csv",
"./rawdata/supplementary/RNA_Daten_zur_SF767_MMP2.csv",
"./rawdata/supplementary/RNA_Daten_zur_SF767_proliferation48H.csv",
"./rawdata/supplementary/RNA_Daten_zur_SF767_SVV.csv")
ExcelFilesNames <- c("CCND1","EGFRall","EGFRv1","EGFRv4","G0G1","GAPDH","MMP2","proliferation48H","SVV")
names(ExcelFiles) <- ExcelFilesNames
ExcelFilesList <- lapply(ExcelFilesNames,function(x) read.csv(ExcelFiles[x],sep = ';',dec = ',') )
names(ExcelFilesList) <- ExcelFilesNames
OutNames <- c("CCND1","EGFR variants I-IV ","EGFR variant I","EGFR variant IV","G0G1","GAPDH","MMP2","proliferation48H","BIRC5")
names(OutNames) <- ExcelFilesNames
# PlotsPaper mRNA -------------------------------------
mRNANames <- c("EGFRall","EGFRv1","EGFRv4","GAPDH","MMP2","SVV","CCND1")
PData <- data.frame()
for(mRNA in mRNANames){
mat <- matrix(as.double(unlist(ExcelFilesList[[mRNA]][,c(5:8)])),4,4)
dimnames(mat) <- list(as.character(ExcelFilesList[[mRNA]][,'Behandlung']),c('rep1','rep2','rep3','rep4'))
# rownames(mat)[2] <- 'siRNA-luci'
rownames(mat)[2] <- 'siRNA-nonsense'
print(mat)
pdata <- data.frame( "Treat" = rownames(mat),
"Gene" = mRNA,
"Out" = OutNames[mRNA],
"GeoMean" = (2^rowMeans(log2(mat),na.rm = T )),
"Mean" = rowMeans(mat,na.rm = T),
"stderr" = apply(mat,1,function(row) plotrix::std.error((row),na.rm = T)),
"sd" = apply(mat,1,function(row) sd((row),na.rm = T)))
PData <- rbind(PData,pdata)
rns <- levels(pdata$Treat)[c(1,4,2,3)] ; pdata$Treat <- factor(pdata$Treat, levels = rns)
limitsMeanErr <- aes(ymax = pdata$Mean + pdata$stderr , ymin=pdata$Mean - pdata$stderr) ; limitsMeanErrMAX <- round(max(pdata$Mean + pdata$stderr)+10)
limitsGeoMeanErr <- aes(ymax = pdata$GeoMean + pdata$stderr , ymin=pdata$GeoMean - pdata$stderr); limitsGeoMeanErrMAX <- round(max(pdata$GeoMean + pdata$stderr)+10)
limitsMeanSd <- aes(ymax = pdata$Mean + pdata$sd , ymin=pdata$Mean - pdata$sd); limitsMeanSdMAX <- round(max(pdata$Mean + pdata$sd)+10)
limitsGeoMeanSd <- aes(ymax = pdata$GeoMean + pdata$sd , ymin=pdata$GeoMean - pdata$sd); limitsGeoMeanSdMAX <- round(max(pdata$GeoMean + pdata$sd)+10)
PlotMeanSd <- ggplot(pdata,aes(x=factor(Treat),y=Mean)) +
geom_bar(position="dodge",stat="identity") +
geom_errorbar(limitsMeanSd, position=dodge, width=0.4) +
#ylim(c(0 ,limitsMeanSdMAX)) +
labs(x = "", y = paste0("relative mRNA level of ",OutNames[mRNA]," copies / \n HPRT copies"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
scale_y_continuous(breaks=seq(0,limitsMeanSdMAX,20))
#theme(legend.position="bottom")
# scale_fill_manual(values = cols,name="") +
print(PlotMeanSd)
ggsave(plot = PlotMeanSd,filename = paste0(plotOUT,'mRNA_',mRNA,'_MeanSd.pdf'),width = 12,height = 8)
# PlotMeanErr <- ggplot(pdata,aes(x=factor(Treat),y=Mean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsMeanErr, position=dodge, width=0.4) +
# # ylim(c(0 ,limitsMeanErrMAX)) +
# labs(x = "", y = paste0("relative mRNA level of ",OutNames[mRNA]," copies / \n HPRT copies"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
# scale_y_continuous(breaks=seq(0,limitsMeanErrMAX,20))
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotMeanErr)
# ggsave(plot = PlotMeanErr,filename = paste0(plotOUT,'mRNA_',mRNA,'_MeanErr.pdf'),width = 12,height = 8)
#
# PlotGeoMeanSd <- ggplot(pdata,aes(x=factor(Treat),y=GeoMean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsGeoMeanSd, position=dodge, width=0.4) +
# #ylim(c(0 ,limitsGeoMeanSdMAX)) +
# labs(x = "", y = paste0("relative mRNA level of ",OutNames[mRNA]," copies / \n HPRT copies"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
# scale_y_continuous(breaks=seq(0,limitsGeoMeanSdMAX,20))
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotGeoMeanSd)
# ggsave(plot = PlotGeoMeanSd,filename = paste0(plotOUT,'mRNA_',mRNA,'_GeoMeanSd.pdf'),width = 12,height = 8)
#
# PlotGeoMeanErr <- ggplot(pdata,aes(x=factor(Treat),y=GeoMean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsGeoMeanErr, position=dodge, width=0.4) +
# #ylim(c(0 ,limitsGeoMeanErrMAX)) +
# labs(x = "", y = paste0("relative mRNA level of ",OutNames[mRNA]," copies / \n HPRT copies"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
# scale_y_continuous(breaks=seq(0,limitsGeoMeanErrMAX,20))
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotGeoMeanErr)
# ggsave(plot = PlotGeoMeanErr,filename = paste0(plotOUT,'mRNA_',mRNA,'_GeoMeanErr.pdf'),width = 12,height = 8)
}
rns <- levels(PData$Treat)[c(1,4,2,3)] ; PData$Treat <- factor(PData$Treat, levels = rns)
limitsMeanErr <- aes(ymax = PData$Mean + PData$stderr , ymin=PData$Mean - PData$stderr) ; limitsMeanErrMAX <- round(max(PData$Mean + PData$stderr)+10)
limitsGeoMeanErr <- aes(ymax = PData$GeoMean + PData$stderr , ymin=PData$GeoMean - PData$stderr); limitsGeoMeanErrMAX <- round(max(PData$GeoMean + PData$stderr)+10)
limitsMeanSd <- aes(ymax = PData$Mean + PData$sd , ymin=PData$Mean - PData$sd); limitsMeanSdMAX <- round(max(PData$Mean + PData$sd)+10)
limitsGeoMeanSd <- aes(ymax = PData$GeoMean + PData$sd , ymin=PData$GeoMean - PData$sd); limitsGeoMeanSdMAX <- round(max(PData$GeoMean + PData$sd)+10)
PlotMeanSd <- ggplot(PData,aes(x=factor(Treat),y=Mean)) +
geom_bar(position="dodge",stat="identity") +
geom_errorbar(limitsMeanSd, position=dodge, width=0.4) +
#ylim(c(0 ,limitsMeanSdMAX)) +
labs(x = "", y = paste0("relative mRNA level of gene copies / HPRT copies")) +
thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
scale_y_continuous(breaks=seq(0,limitsMeanSdMAX,20)) +
facet_grid(Out ~ .)
#theme(legend.position="bottom")
# scale_fill_manual(values = cols,name="") +
print(PlotMeanSd)
ggsave(plot = PlotMeanSd,filename = paste0(plotOUT,'mRNA_ALL_MeanSd.pdf'),width = 12,height = 20)
barCol <- RColorBrewer::brewer.pal(11,'Spectral')[6]
for(i in c("EGFR",'Control','Proli','EGFRControl')){
set <- switch(i,
"EGFR" = as.character( levels(PData$Gene)[1:3] ),
'Control' = as.character( levels(PData$Gene)[4:5] ),
'Proli' = as.character( levels(PData$Gene)[6:7] ),
'EGFRControl'= as.character( levels(PData$Gene)[1:5] )
)
tmp <- PData[PData$Gene %in% set,]
tmp_limitsMeanSd <- aes(ymax = tmp$Mean + tmp$sd , ymin=tmp$Mean - tmp$sd); limitsMeanSdMAX <- round(max(tmp$Mean + tmp$sd)+10)
PlotMeanSd <- ggplot(tmp,aes(x=factor(Treat),y=Mean)) +
geom_bar(position="dodge",stat="identity",fill=barCol,color='black') +
geom_errorbar(tmp_limitsMeanSd, position=dodge, width=0.4) +
labs(x = "", y = paste0("relative mRNA level of gene copies / HPRT copies")) +
thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
scale_y_continuous(breaks=seq(0,limitsMeanSdMAX,20)) +
facet_grid(Out ~ .)
print(PlotMeanSd)
ggsave(plot = PlotMeanSd,filename = paste0(plotOUT,'mRNA_ALL_MeanSd_',i,'.pdf'),width = 8,height = 15)
}
# PlotMeanErr <- ggplot(PData,aes(x=factor(Treat),y=Mean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsMeanErr, position=dodge, width=0.4) +
# # ylim(c(0 ,limitsMeanErrMAX)) +
# labs(x = "", y = paste0("relative mRNA level of gene copies / HPRT copies")) +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
# scale_y_continuous(breaks=seq(0,limitsMeanErrMAX,20)) +
# facet_grid(Out ~ .)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotMeanErr)
# ggsave(plot = PlotMeanErr,filename = paste0(plotOUT,'mRNA_ALL_MeanErr.pdf'),width = 12,height = 20)
#
# PlotGeoMeanSd <- ggplot(PData,aes(x=factor(Treat),y=GeoMean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsGeoMeanSd, position=dodge, width=0.4) +
# #ylim(c(0 ,limitsGeoMeanSdMAX)) +
# labs(x = "", y = paste0("relative mRNA level of gene copies / HPRT copies")) +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
# scale_y_continuous(breaks=seq(0,limitsGeoMeanSdMAX,20)) +
# facet_grid(Out ~ .)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotGeoMeanSd)
# ggsave(plot = PlotGeoMeanSd,filename = paste0(plotOUT,'mRNA_ALL_GeoMeanSd.pdf'),width = 12,height = 20)
#
# PlotGeoMeanErr <- ggplot(PData,aes(x=factor(Treat),y=GeoMean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsGeoMeanErr, position=dodge, width=0.4) +
# #ylim(c(0 ,limitsGeoMeanErrMAX)) +
# labs(x = "", y = paste0("relative mRNA level of gene copies / HPRT copies")) +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6) +
# scale_y_continuous(breaks=seq(0,limitsGeoMeanErrMAX,20)) +
# facet_grid(Out ~ .)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotGeoMeanErr)
# ggsave(plot = PlotGeoMeanErr,filename = paste0(plotOUT,'mRNA_ALL_GeoMeanErr.pdf'),width = 12,height = 20)
# PlotsPaper proliferation48H -------------------------------------
set <- "proliferation48H"
mat <- matrix(as.double(unlist(ExcelFilesList[[set]][,c(3:7)])),10,5)
dimnames(mat) <- list(as.character(ExcelFilesList[[set]][,'Behandlung']),c('rep1','rep2','rep3','rep4','rep5'))
mat <- mat[c("K","Lu","All",'Full'),]
# rownames(mat) <- c('control','siRNA-luci','siRNA-ALL','siRNA-I')
rownames(mat) <- c('control','siRNA-nonsense','siRNA-ALL','siRNA-I')
pdata <- data.frame( "Treat" = rownames(mat),
# "Out" = OutNames[mRNA],
"GeoMean" = (2^rowMeans(log2(mat),na.rm = T )),
"Mean" = rowMeans(mat,na.rm = T),
"stderr" = apply(mat,1,function(row) plotrix::std.error((row),na.rm = T)),
"sd" = apply(mat,1,function(row) sd((row),na.rm = T)))
rns <- levels(pdata$Treat)[c(1,4,2,3)] ; pdata$Treat <- factor(pdata$Treat, levels = rns)
limitsMeanErr <- aes(ymax = pdata$Mean + pdata$stderr , ymin=pdata$Mean - pdata$stderr) ; limitsMeanErrMAX <- round(max(pdata$Mean + pdata$stderr)+10)
limitsGeoMeanErr <- aes(ymax = pdata$GeoMean + pdata$stderr , ymin=pdata$GeoMean - pdata$stderr); limitsGeoMeanErrMAX <- round(max(pdata$GeoMean + pdata$stderr)+10)
limitsMeanSd <- aes(ymax = pdata$Mean + pdata$sd , ymin=pdata$Mean - pdata$sd); limitsMeanSdMAX <- round(max(pdata$Mean + pdata$sd)+10)
limitsGeoMeanSd <- aes(ymax = pdata$GeoMean + pdata$sd , ymin=pdata$GeoMean - pdata$sd); limitsGeoMeanSdMAX <- round(max(pdata$GeoMean + pdata$sd)+10)
PlotMeanSd <- ggplot(pdata,aes(x=factor(Treat),y=Mean)) +
geom_bar(position="dodge",stat="identity",fill=barCol,color='black') +
geom_errorbar(limitsMeanSd, position=dodge, width=0.4) +
ylim(c(0 ,limitsMeanSdMAX)) +
# scale_y_continuous(breaks=seq(0,limitsMeanSdMAX,20)) +
labs(x = "", y = paste0("cell number after 48 hours \n of proliferation"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
thememapBarplot(base_size = base_size,legend_key_size = 0.6)
#theme(legend.position="bottom")
# scale_fill_manual(values = cols,name="") +
print(PlotMeanSd)
ggsave(plot = PlotMeanSd,filename = paste0(plotOUT,'proliferation48H_MeanSd.pdf'),width = 12,height = 8)
# PlotMeanErr <- ggplot(pdata,aes(x=factor(Treat),y=Mean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsMeanErr, position=dodge, width=0.4) +
# ylim(c(0 ,limitsMeanErrMAX)) +
# labs(x = "", y = paste0("cell number after 48 hours of proliferation"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotMeanErr)
# ggsave(plot = PlotMeanErr,filename = paste0(plotOUT,'proliferation48H_MeanErr.pdf'),width = 12,height = 8)
#
# PlotGeoMeanSd <- ggplot(pdata,aes(x=factor(Treat),y=GeoMean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsGeoMeanSd, position=dodge, width=0.4) +
# ylim(c(0 ,limitsGeoMeanSdMAX)) +
# labs(x = "", y = paste0("cell number after 48 hours of proliferation"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotGeoMeanSd)
# ggsave(plot = PlotGeoMeanSd,filename = paste0(plotOUT,'proliferation48H_GeoMeanSd.pdf'),width = 12,height = 8)
#
# PlotGeoMeanErr <- ggplot(pdata,aes(x=factor(Treat),y=GeoMean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsGeoMeanErr, position=dodge, width=0.4) +
# ylim(c(0 ,limitsGeoMeanErrMAX)) +
# labs(x = "", y = paste0("cell number after 48 hours of proliferation"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotGeoMeanErr)
# ggsave(plot = PlotGeoMeanErr,filename = paste0(plotOUT,'proliferation48H_GeoMeanErr.pdf'),width = 12,height = 8)
# PlotsPaper G0G1 -------------------------------------
set <- "G0G1"
mat <- matrix(as.double(unlist(ExcelFilesList[[set]][,c(3:8)])),10,6)
dimnames(mat) <- list(as.character(ExcelFilesList[[set]][,'Bezeichnung']),c('rep1','rep2','rep3','rep4','rep5','rep6'))
mat <- mat[c("Ko","LU","All",'Full'),]
rownames(mat) <- c('control','siRNA-luci','siRNA-ALL','siRNA-I')
rownames(mat) <- c('control','siRNA-nonsense','siRNA-ALL','siRNA-I')
pdata <- data.frame( "Treat" = rownames(mat),
# "Out" = OutNames[mRNA],
"GeoMean" = (2^rowMeans(log2(mat),na.rm = T )),
"Mean" = rowMeans(mat,na.rm = T),
"stderr" = apply(mat,1,function(row) plotrix::std.error((row),na.rm = T)),
"sd" = apply(mat,1,function(row) sd((row),na.rm = T)))
rns <- levels(pdata$Treat)[c(1,4,2,3)] ; pdata$Treat <- factor(pdata$Treat, levels = rns)
limitsMeanErr <- aes(ymax = pdata$Mean + pdata$stderr , ymin=pdata$Mean - pdata$stderr) ; limitsMeanErrMAX <- round(max(pdata$Mean + pdata$stderr)+10)
limitsGeoMeanErr <- aes(ymax = pdata$GeoMean + pdata$stderr , ymin=pdata$GeoMean - pdata$stderr); limitsGeoMeanErrMAX <- round(max(pdata$GeoMean + pdata$stderr)+10)
limitsMeanSd <- aes(ymax = pdata$Mean + pdata$sd , ymin=pdata$Mean - pdata$sd); limitsMeanSdMAX <- round(max(pdata$Mean + pdata$sd)+10)
limitsGeoMeanSd <- aes(ymax = pdata$GeoMean + pdata$sd , ymin=pdata$GeoMean - pdata$sd); limitsGeoMeanSdMAX <- round(max(pdata$GeoMean + pdata$sd)+10)
PlotMeanSd <- ggplot(pdata,aes(x=factor(Treat),y=Mean)) +
geom_bar(position="dodge",stat="identity",fill=barCol,color='black') +
geom_errorbar(limitsMeanSd, position=dodge, width=0.4) +
# ylim(c(0 ,limitsMeanSdMAX)) +
scale_y_continuous(breaks=seq(0,limitsMeanSdMAX,10)) +
labs(x = "", y = paste0("relative level of G0/G1 cells"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
thememapBarplot(base_size = base_size,legend_key_size = 0.6)
print(PlotMeanSd)
ggsave(plot = PlotMeanSd,filename = paste0(plotOUT,'G0G1_MeanSd.pdf'),width = 12,height = 8)
# PlotMeanErr <- ggplot(pdata,aes(x=factor(Treat),y=Mean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsMeanErr, position=dodge, width=0.4) +
# scale_y_continuous(breaks=seq(0,limitsMeanErrMAX,10)) +
# labs(x = "", y = paste0("relative level of G0/G1 cells"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotMeanErr)
# ggsave(plot = PlotMeanErr,filename = paste0(plotOUT,'G0G1_MeanErr.pdf'),width = 12,height = 8)
#
# PlotGeoMeanSd <- ggplot(pdata,aes(x=factor(Treat),y=GeoMean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsGeoMeanSd, position=dodge, width=0.4) +
# scale_y_continuous(breaks=seq(0,limitsGeoMeanSdMAX,10)) +
# labs(x = "", y = paste0("relative level of G0/G1 cells"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotGeoMeanSd)
# ggsave(plot = PlotGeoMeanSd,filename = paste0(plotOUT,'G0G1_GeoMeanSd.pdf'),width = 12,height = 8)
#
# PlotGeoMeanErr <- ggplot(pdata,aes(x=factor(Treat),y=GeoMean)) +
# geom_bar(position="dodge",stat="identity") +
# geom_errorbar(limitsGeoMeanErr, position=dodge, width=0.4) +
# scale_y_continuous(breaks=seq(0,limitsGeoMeanErrMAX,10)) +
# labs(x = "", y = paste0("relative level of G0/G1 cells"))+ # ,title="PAPER PLOT - Comparison of SF767 - Illumina data and RT-qPCR") +
# thememapBarplot(base_size = base_size,legend_key_size = 0.6)
# #theme(legend.position="bottom")
# # scale_fill_manual(values = cols,name="") +
# print(PlotGeoMeanErr)
# ggsave(plot = PlotGeoMeanErr,filename = paste0(plotOUT,'G0G1_GeoMeanErr.pdf'),width = 12,height = 8)
|
5ab804ba894b44c05d11ff8fef40a985f27e3ef4 | b3357449a175852145c9099327649df50046d0c4 | /src/recommender/man/recommendation-functions.Rd | 7d0a4d3a3a55fbc8261690fbc119653855fef352 | [] | no_license | savchukndr/temat14 | acb2e5c28beee21c67c7deca942b4edca34ca6b5 | 3fb63c9c2128de0784047be72d9daf2090bf098c | refs/heads/master | 2021-05-14T17:21:29.412546 | 2018-01-15T20:55:35 | 2018-01-15T20:55:35 | 116,044,592 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 302 | rd | recommendation-functions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recommendation-functions.R
\name{recommendation-functions}
\alias{recommendation-functions}
\alias{help.get_cosine}
\title{Recommendation functions.}
\usage{
help.get_cosine(x, y)
}
\description{
Recommendation functions.
}
|
c72bd70d3444f350caa9b03e723427c240214e18 | 5ea60fbfab431e91d9762c9d0b7f9cb7ea8791b6 | /R/globals.R | 39f2d124e18c913c170b9c9e6126147512c87545 | [
"Apache-2.0"
] | permissive | r-spark/sparkhail | 8eaf7f5dc8afcd392377e147fbe50e42676a000b | 30cdfc91a03ad0a3fed6f56262a00427348aa0a5 | refs/heads/master | 2021-07-25T09:28:47.898536 | 2020-01-22T17:21:24 | 2020-01-22T17:21:24 | 193,734,242 | 12 | 4 | Apache-2.0 | 2021-07-08T20:19:34 | 2019-06-25T15:24:35 | R | UTF-8 | R | false | false | 227 | r | globals.R | utils::globalVariables(c(".",
"entries.Call",
"entries.AD",
"entries.DP",
"entries.GQ",
"entries.PL")) |
5189feb9e8873e85be730694023a7529b0744bd6 | 42dda69bec18f6448b414334a3ecddc43e3fd2f0 | /man/Broadstone.nodes.Rd | f82c758541097afc55a345918c445f1ac23b56c3 | [] | no_license | cran/econullnetr | 3a8514e88899baad2fde511985e3fd83a0029dd2 | d0073240fb271179225ef0bf9f3feec93a2edda0 | refs/heads/master | 2023-05-13T23:03:23.505618 | 2021-06-02T07:10:05 | 2021-06-02T07:10:05 | 104,781,799 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 949 | rd | Broadstone.nodes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Broadstone_nodes.R
\docType{data}
\name{Broadstone.nodes}
\alias{Broadstone.nodes}
\title{Abundance and body mass data to accompany the \code{Broadstone} data set}
\format{
A data frame with three columns and 19 rows:
\describe{
\item{node}{The name of the macroinvertebrate taxon (node) in the network}
\item{M}{The mean body mass of an individual of the taxon, from
\code{cheddar}'s \code{BroadstoneStream} object}
\item{N}{The total abundance of each taxon from the 30 Surber samples (see
\code{\link{Broadstone.prey}} for details)}
}
}
\usage{
Broadstone.nodes
}
\description{
Provides additional information about each macroinvertebrate taxon in the
\code{\link{Broadstone}} food web for use alongside null model results in the
foodweb package \code{cheddar} (Hudson \emph{et al}. 2013).
}
\keyword{datasets}
|
a80026c8b6cdcf2f215f0bdea59beef4513e2418 | 494c629798ff86f58759fd3b91bf0e6bba1ff12a | /GTEx_codes/opposite_eQTL.R | 030df856d8ffd22e07167d2aa80ca765d85a769c | [] | no_license | fawnshao/UTHealthMut2Loop | ced2334d86ed779cd2ee930e0ebb602c02c0d0f1 | 541868f6f89837d621d12e062af19fde39824342 | refs/heads/master | 2021-01-23T21:13:43.230880 | 2019-07-05T19:00:06 | 2019-07-05T19:00:06 | 102,887,142 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,494 | r | opposite_eQTL.R | args <- commandArgs(TRUE)
evarfile <- paste(args[1], "eVar.counts.tsv", sep = ".")
motiffile <- paste(args[1], "motif.cmp.txt", sep = ".")
# $tissue.motif.cmp.txt
# $tissue.eVar.counts.tsv
# do hypergeometric exact test for motif count
# Usage:
# dhyper(x, m, n, k, log = FALSE)
# phyper(q, m, n, k, lower.tail = TRUE, log.p = FALSE)
# qhyper(p, m, n, k, lower.tail = TRUE, log.p = FALSE)
# rhyper(nn, m, n, k)
# Arguments:
# x, q: vector of quantiles representing the number of white balls
# drawn without replacement from an urn which contains both
# black and white balls.
# m: the number of white balls in the urn.
# n: the number of black balls in the urn.
# k: the number of balls drawn from the urn.
evarcount <- read.table(evarfile, sep = "\t", row.names = 1)
usedevarcount <- evarcount[c(9,9,10,10,11,11,12,12,5,5,6,6),]
motifcount <- read.table(motiffile, sep = "\t", header = T, row.names = 1, na.strings = "/")
motifpvalue <- matrix(nrow = nrow(motifcount), ncol = 8)
motiffisher <- matrix(nrow = nrow(motifcount), ncol = 8)
for(i in 1:nrow(motifcount)){
for(j in 1:4){
motifpvalue[i, j] <- phyper(motifcount[i, j], motifcount[i, j+4],
usedevarcount[j+4] - motifcount[i, j+4], usedevarcount[j],
lower.tail = FALSE)
motifpvalue[i, j+4] <- phyper(motifcount[i, j], motifcount[i, j+8],
usedevarcount[j+8] - motifcount[i, j+8], usedevarcount[j],
lower.tail = FALSE)
cmat <- matrix(c(motifcount[i, j],usedevarcount[j]-motifcount[i, j],
motifcount[i, j+4],usedevarcount[j+4] - motifcount[i, j+4]), nrow = 2, byrow = TRUE)
if(sum(is.na(cmat)) == 0){
motiffisher[i, j] <- fisher.test(cmat)$p.value
}
cmat <- matrix(c(motifcount[i, j],usedevarcount[j]-motifcount[i, j],
motifcount[i, j+8],usedevarcount[j+8] - motifcount[i, j+8]), nrow = 2, byrow = TRUE)
if(sum(is.na(cmat)) == 0){
motiffisher[i, j+4] <- fisher.test(cmat)$p.value
}
}
}
colnames(motifpvalue) <- colnames(motifcount)[5:12]
colnames(motiffisher) <- colnames(motifcount)[5:12]
rownames(motifpvalue) <- rownames(motifcount)
rownames(motiffisher) <- rownames(motifcount)
write.table(rbind(usedevarcount,motifcount), file = paste("final.count", args[1], "tsv", sep = "."), quote = F, sep = "\t")
write.table(motifpvalue, file = paste("final.phyper", args[1], "tsv", sep = "."), quote = F, sep = "\t")
write.table(motiffisher, file = paste("final.fisher", args[1], "tsv", sep = "."), quote = F, sep = "\t")
|
8d5972b82ba5c77c9ff3d29825ae4b869c1f72ef | 9cc7423f4a94698df5173188b63c313a7df99b0e | /man/analyze.fa.Rd | da67d37972539f8493295536b74bc4d012050fac | [
"MIT"
] | permissive | HugoNjb/psycho.R | 71a16406654b11007f0d2f84b8d36587c5c8caec | 601eef008ec463040c68bf72ac1ed8d4a8f7751f | refs/heads/master | 2020-03-27T01:24:23.389884 | 2018-07-19T13:08:53 | 2018-07-19T13:08:53 | 145,707,311 | 1 | 0 | null | 2018-08-22T12:39:27 | 2018-08-22T12:39:27 | null | UTF-8 | R | false | true | 775 | rd | analyze.fa.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze.fa.R
\name{analyze.fa}
\alias{analyze.fa}
\title{Analyze fa objects.}
\usage{
\method{analyze}{fa}(x, labels = NULL, treshold = "max", ...)
}
\arguments{
\item{x}{An psych object.}
\item{labels}{Supply a additional column with e.g. item labels.}
\item{treshold}{'max' or numeric. The treshold over which to associate an item with its component.}
\item{...}{Arguments passed to or from other methods.}
}
\value{
output
}
\description{
Analyze fa objects.
}
\examples{
library(psycho)
library(psych)
x <- psych::fa(psych::Thurstone.33, 2)
results <- analyze(x)
print(results)
summary(results)
plot(results)
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
551d608f31bd209877b14c0431064d8287db2dd7 | 7944b44a3e0b62fc2e66a2f7687b8d947dd900b4 | /R/date_ranges.R | 8025f299799d711d7a972b0744416a64e3eaefd5 | [] | no_license | selesnow/ryandexdirect | 3316d3ba092b299b411d69b935d37c4313550482 | 04f8b84d5513d430e2f01cf5ce61c2810264f928 | refs/heads/master | 2023-04-03T08:38:45.572545 | 2023-03-17T14:57:18 | 2023-03-17T14:57:18 | 64,333,780 | 59 | 40 | null | 2019-05-02T05:56:51 | 2016-07-27T18:50:53 | R | UTF-8 | R | false | false | 376 | r | date_ranges.R | # FetchBy
date_ranges <- function(start, end, by) {
start <- as.Date(start)
end <- as.Date(end)
by <- tolower(by)
by <- match.arg(by, c("day", "week", "month", "quarter", "year"))
dates <- seq.Date(start, end, by = by)
res <- cbind(start = as.character(dates),
end = as.character(c(dates[-1] - 1, end)))
as.data.frame(res, stringsAsFactors = F)
}
|
b4c2ccbad4e4c7d2503ffe2fdf266c19c523ae9a | 07ba78332e0b99c2d688a2bc781302908334a92d | /R/calc_params.R | 40ebd749922bee51f11d022decf4197247fb0589 | [] | no_license | rasenior/SolarCalc | 878a21b5cc9bcf725e39328704568df7f6530323 | f9048c07ee02605d1e0d4760d48dec6997f81dc1 | refs/heads/master | 2020-12-07T15:36:59.883958 | 2017-07-08T20:17:33 | 2017-07-08T20:17:33 | 95,547,018 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,735 | r | calc_params.R |
julian_century <- function(julian_day) {
# Define total days in one Julian century
century_days <- 36525
result <- (julian_day - 2451545)/century_days
return(result)
}
geom_mean_long <- function(julian_century) {
numerator <- 280.46646 + (julian_century * (36000.76983 + julian_century * 0.0003032))
result <- numerator%%360
return(result)
}
geom_mean_anom <- function(julian_century) {
result <- 357.52911 + julian_century * (35999.05029 - (0.0001537 * julian_century))
return(result)
}
eccent_earth_orbit <- function(julian_century) {
result <- 0.016708634 - julian_century * (4.2037e-05 + (1.267e-07 * julian_century))
return(result)
}
rad2deg <- function(rad) {
(rad * 180)/(pi)
}
deg2rad <- function(deg) {
(deg * pi)/(180)
}
sun_eq_ctr <- function(geom_mean_anom, julian_century) {
x <- sin(deg2rad(geom_mean_anom)) * (1.914602 - julian_century * (0.004817 + (0.00014 * julian_century)))
y <- sin(deg2rad(2 * geom_mean_anom)) * (0.019993 - (0.000101 * julian_century))
z <- sin(deg2rad(3 * geom_mean_anom)) * 0.000289
result <- x + y + z
return(result)
}
sun_true_long <- function(geom_mean_long, sun_eq_ctr) {
result <- geom_mean_long + sun_eq_ctr
return(result)
}
sun_true_anom <- function(geom_mean_anom, sun_eq_ctr) {
result <- geom_mean_anom + sun_eq_ctr
return(result)
}
sun_rad_vector <- function(eccent_earth_orbit, sun_true_anom) {
numerator <- 1.000001018 * (1 - (eccent_earth_orbit^2))
denominator <- 1 + (eccent_earth_orbit * (cos(deg2rad(sun_true_anom))))
result <- numerator/denominator
return(result)
}
sun_app_long <- function(sun_true_long, julian_century) {
result <- sun_true_long - 0.00569 - (0.00478 * (sin(deg2rad(125.04 - (1934.136 * julian_century)))))
return(result)
}
mean_obliq_ecliptic <- function(julian_century) {
x <- 21.448 - (julian_century * (46.815 + (julian_century * (0.00059 - (julian_century * 0.001813)))))
y <- x/60
z <- (26 + y)/60
result <- 23 + z
return(result)
}
obliq_corr <- function(mean_obliq_ecliptic, julian_century) {
result <- mean_obliq_ecliptic + (0.00256 * (cos(deg2rad(125.04 - (julian_century * 1934.136)))))
return(result)
}
sun_rt_ascen <- function(sun_app_long, obliq_corr) {
x <- cos(deg2rad(sun_app_long))
y <- cos(deg2rad(obliq_corr)) * sin(deg2rad(sun_app_long))
z <- atan2(y = y, x = x)
result <- rad2deg(z)
return(result)
}
sun_declin <- function(obliq_corr, sun_app_long) {
x <- sin(deg2rad(obliq_corr)) * sin(deg2rad(sun_app_long))
result <- rad2deg(asin(x))
return(result)
}
var_y <- function(obliq_corr) {
result <- (tan(deg2rad(obliq_corr/2)))^2
return(result)
}
eq_time <- function(var_y, geom_mean_long, eccent_earth_orbit, geom_mean_anom) {
var1 <- var_y * (sin(2 * deg2rad(geom_mean_long)))
var2 <- 2 * (eccent_earth_orbit * (sin(deg2rad(geom_mean_anom))))
var3 <- 4 * eccent_earth_orbit * var_y * (sin(deg2rad(geom_mean_anom))) * cos(2 * deg2rad(geom_mean_long))
var4 <- 0.5 * (var_y^2) * sin(4 * deg2rad(geom_mean_long))
var5 <- 1.25 * (eccent_earth_orbit^2) * sin(2 * deg2rad(geom_mean_anom))
result <- 4 * rad2deg(var1 - var2 + var3 - var4 - var5)
return(result)
}
ha_sunrise <- function(latitude, sun_declin) {
x <- cos(deg2rad(90.833))/(cos(deg2rad(latitude)) * cos(deg2rad(sun_declin))) - tan(deg2rad(latitude)) *
tan(deg2rad(sun_declin))
result <- rad2deg(acos(x))
return(result)
}
solar_noon <- function(longitude, eq_time, tz) {
result <- 720 - (4 * longitude) - eq_time + (tz * 60)
return(result)
}
|
3cfc7d0a3e76354733e29425ffaf73a5e8f2b836 | ffd9b736a674ebe15a0cc90ab7ba19a1fa9c0518 | /run_analysis.R | 65c412a1120aea9acc1e0a3c3ef6be85abc2ac48 | [] | no_license | jedieudonne/getting-and-cleaning-data-project | ad9c02c84f058c81def433bd16179229c8091663 | ac8c4db08b979dfea1729cc319fa044f93f860ca | refs/heads/master | 2021-01-20T06:22:52.096160 | 2015-08-21T11:32:20 | 2015-08-21T11:32:20 | 40,888,489 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,379 | r | run_analysis.R | library(dplyr)
#setwd("./UCI HAR Dataset")
# step 1 : reads everything
activity_labels<-read.table("activity_labels.txt")
features_txt<-read.table("features.txt")
subject_test<-read.table("./test/subject_test.txt")
X_test<-read.table("./test/X_test.txt")
activity_test<-read.table("./test/y_test.txt")
subject_train<-read.table("./train/subject_train.txt")
X_train<-read.table("./train/X_train.txt")
activity_train<-read.table("./train/y_train.txt")
# step 2 : create a logical vector with features containing mean or std.
# Creates a vector with all the features containing mean or std
featuresmean<-grepl("mean",features_txt[,2])
featuresstd<-grepl("std",features_txt[,2])
name_of_features_selected<-features_txt[featuresmean|featuresstd,]
features_selected_logical<-featuresmean|featuresstd
# step 3 : subset everything in accordance with features_selected_logical
X_test_selected<-X_test[,features_selected_logical]
X_train_selected<-X_train[,features_selected_logical]
# step 4 : concatenate X_test_selected and X_train_selected into features_total_selected and name the columns
features_total_selected<-rbind(X_test_selected,X_train_selected)
colnames(features_total_selected)<-features_txt[features_selected_logical,2]
# step 5 : creates columns for activity and subject_id.
activity<-rbind(activity_test,activity_train)
i=1
names=c()
while(i<=length(activity[,1])){
names<-c(names,as.character(activity_labels$V2[activity[i,1]]))
i=i+1
}
activity<-names
subject<-rbind(subject_test,subject_train)
activity_and_subject<-cbind(activity,subject)
colnames(activity_and_subject)<-c("activity","subject_id")
# step 6 : create the final frame and the txt file
final_frame<-cbind(activity_and_subject , features_total_selected)
result_frame <- aggregate(final_frame[,3:81], by = list(final_frame$subject_id, final_frame$activity), FUN = mean)
colnames(result_frame)[which(names(result_frame) == "Group.1")] <- "subject_id"
colnames(result_frame)[which(names(result_frame) == "Group.2")] <- "activity"
write.table(result_frame, file="tidy_data_mean_measures.txt", row.names = FALSE)
#step 7 : clean the variables
rm(activity_labels,features_txt,subject_test,X_test,activity_test,X_test_selected,X_train_selected,activity_and_subject,activity,subject_train,X_train,activity_train,subject,name_of_features_selected)
|
184037c4c4906e83ad885883e741a48692f3b96b | 63caf4d9e0f4b9c9cb5ab101f5795a94f27d575d | /man/oce.spectrum.Rd | 4e6451a61b598dd04e5064ee08caf905a1e5d847 | [] | no_license | marie-geissler/oce | b2e596c29050c5e2076d02730adfc0c4f4b07bb4 | 2206aaef7c750d6c193b9c6d6b171a1bdec4f93d | refs/heads/develop | 2021-01-17T20:13:33.429798 | 2015-12-24T15:38:23 | 2015-12-24T15:38:23 | 48,561,769 | 1 | 0 | null | 2015-12-25T01:36:30 | 2015-12-25T01:36:30 | null | UTF-8 | R | false | false | 937 | rd | oce.spectrum.Rd | \name{oce.spectrum}
\alias{oce.spectrum}
\alias{oceSpectrum}
\title{Wrapper to give normalized spectrum}
\description{This is a wrapper around the R \code{\link{spectrum}}
function, which returns spectral values that are adjusted so that the
integral of those values equals the variance of the input \code{x}.
}
\usage{oce.spectrum(x, ...)}
\arguments{
\item{x}{As for \code{\link{spectrum}}, a univariate or multivariate
time series.}
\item{\dots}{extra arguments passed on to \code{\link{spectrum}}.}
}
\examples{
x <- rnorm(1e3)
s <- spectrum(x, plot=FALSE)
ss <- oce.spectrum(x, plot=FALSE)
cat("variance of x=", var(x), "\n")
cat("integral of spectrum=", sum(s$spec)*diff(s$freq[1:2]), "\n")
cat("integral of oce.spectrum=", sum(ss$spec)*diff(ss$freq[1:2]), "\n")
}
\value{A spectrum that has values that integrate to the variance.}
\seealso{\code{\link{spectrum}}.}
\author{Dan Kelley}
\keyword{misc}
|
de4d30986dc9c3e5db824753ef4835a9bfe33116 | e4755d1e2207edc616f4f20eb2d4e5fb65a71c42 | /man/equation2function.Rd | e06bf0e625849a4a7592111e94c625acb9941257 | [
"CC0-1.0"
] | permissive | dwbapst/paleotree | 14bbfd5b312848c109a5fc539a1e82978a760538 | 95c2f57e91c4204c04cd59d9662ba94c43c87a60 | refs/heads/master | 2022-09-23T03:57:35.959138 | 2022-08-25T18:29:50 | 2022-08-25T18:29:50 | 3,827,289 | 20 | 11 | null | 2022-08-25T18:30:48 | 2012-03-25T21:01:38 | R | UTF-8 | R | false | true | 1,772 | rd | equation2function.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/equation2function.R
\name{equation2function}
\alias{equation2function}
\title{Turn a Character String of the Right-Hand Side of an Equation into an R Function}
\usage{
equation2function(equation, envir = parent.frame(), notName = "XXXXXXXXXXX")
}
\arguments{
\item{equation}{The right-hand-side (RHS) of an equation, given as a character string.
If not of type character, \code{equation2function} attempts to coerce \code{equation} to
type character and fails if it cannot.}
\item{envir}{The environment the resulting function will be evaluated in.
See \code{\link{as.function}}.}
\item{notName}{A useless string used simply as a placeholder in turning \code{equation}
into a function, which should not match any actual variable in \code{equation}. Only
supplied as an argument in case any}
}
\value{
A function, with named blank (i.e. no default value) arguments.
}
\description{
\code{equation2function} converts the right-hand side of an equation that can be written
as a single line (like the right-hand side of an object of class \code{formula}) and
creates an R function which calls the variables within as arguments and returns values
consistent with the parameters of the input equation as written.
}
\details{
This simple little function is rather 'hacky' but seems to get the job done, for a
functionality that does not seem to be otherwise exist elsewhere in R.
}
\examples{
# some simple examples
foo <- equation2function("x+y")
foo
foo(x = 4,y = 0.1)
foo <- equation2function("x+2*sqrt(2*y+3)^2")
foo
foo(x = 4,y = 0.1)
# what about weird long argument names and spaces
foo <- equation2function("stegosaur + 0.4 * P")
foo
foo(stegosaur = 5,P = 0.3)
}
\author{
David W. Bapst
}
|
13f79b332c983aa8bef6c16ff4b11fa92a2f889c | b2485a466b338d7ead6321bb0d0a9e1ddea3092b | /man/plot.AccurateGLM.Rd | cd0ddc5a614fb8ea721cd1bace3e4b9cbf6b886b | [] | no_license | cran/aglm | c9c3f405c72611ffa67f602d7c73196fd64bf0c0 | 51664a6d2ed959c0f10d078bfef41f13c206c72d | refs/heads/master | 2023-05-31T06:59:33.524925 | 2021-06-09T11:10:02 | 2021-06-09T11:10:02 | 375,409,407 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,494 | rd | plot.AccurateGLM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-aglm.R
\name{plot.AccurateGLM}
\alias{plot.AccurateGLM}
\title{Plot contribution of each variable and residuals}
\usage{
\method{plot}{AccurateGLM}(
x,
vars = NULL,
verbose = TRUE,
s = NULL,
resid = FALSE,
smooth_resid = TRUE,
smooth_resid_fun = NULL,
ask = TRUE,
layout = c(2, 2),
only_plot = FALSE,
main = "",
add_rug = FALSE,
...
)
}
\arguments{
\item{x}{A model object obtained from \code{aglm()} or \code{cv.aglm()}.}
\item{vars}{Used to specify variables to be plotted (\code{NULL} means all the variables).
This parameter may have one of the following classes:
\itemize{
\item \code{integer}: specifying variables by index.
\item \code{character}: specifying variables by name.
}}
\item{verbose}{Set to \code{FALSE} if textual outputs are not needed.}
\item{s}{A numeric value specifying \eqn{\lambda} at which plotting is required.
Note that plotting for multiple \eqn{\lambda}'s are not allowed and \code{s} always should be a single value.
When the model is trained with only a single \eqn{\lambda} value, just set it to \code{NULL} to plot for that value.}
\item{resid}{Used to display residuals in plots.
This parameter may have one of the following classes:
\itemize{
\item \code{logical}(single value): If \code{TRUE}, working residuals are plotted.
\item \code{character}(single value): type of residual to be plotted. See \link{residuals.AccurateGLM} for more details on types of residuals.
\item \code{numerical}(vector): residual values to be plotted.
}}
\item{smooth_resid}{Used to display smoothing lines of residuals for quantitative variables.
This parameter may have one of the following classes:
\itemize{
\item \code{logical}: If \code{TRUE}, smoothing lines are drawn.
\item \code{character}:
\itemize{
\item \code{smooth_resid="both"}: Balls and smoothing lines are drawn.
\item \code{smooth_resid="smooth_only"}: Only smoothing lines are drawn.
}
}}
\item{smooth_resid_fun}{Set if users need custom smoothing functions.}
\item{ask}{By default, \code{plot()} stops and waits inputs each time plotting for each variable is completed.
Users can set \code{ask=FALSE} to avoid this.
It is useful, for example, when using devices as \code{bmp} to create image files.}
\item{layout}{Plotting multiple variables for each page is allowed.
To achieve this, set it to a pair of integer, which indicating number of rows and columns, respectively.}
\item{only_plot}{Set to \code{TRUE} if no automatic graphical configurations are needed.}
\item{main}{Used to specify the title of plotting.}
\item{add_rug}{Set to \code{TRUE} for rug plots.}
\item{...}{Other arguments are currently not used and just discarded.}
}
\value{
No return value, called for side effects.
}
\description{
Plot contribution of each variable and residuals
}
\examples{
#################### using plot() and predict() ####################
library(MASS) # For Boston
library(aglm)
## Read data
xy <- Boston # xy is a data.frame to be processed.
colnames(xy)[ncol(xy)] <- "y" # Let medv be the objective variable, y.
## Split data into train and test
n <- nrow(xy) # Sample size.
set.seed(2018) # For reproducibility.
test.id <- sample(n, round(n/4)) # ID numbders for test data.
test <- xy[test.id,] # test is the data.frame for testing.
train <- xy[-test.id,] # train is the data.frame for training.
x <- train[-ncol(xy)]
y <- train$y
newx <- test[-ncol(xy)]
y_true <- test$y
## With the result of aglm()
model <- aglm(x, y)
lambda <- 0.1
plot(model, s=lambda, resid=TRUE, add_rug=TRUE,
verbose=FALSE, layout=c(3, 3))
y_pred <- predict(model, newx=newx, s=lambda)
plot(y_true, y_pred)
## With the result of cv.aglm()
model <- cv.aglm(x, y)
lambda <- model@lambda.min
plot(model, s=lambda, resid=TRUE, add_rug=TRUE,
verbose=FALSE, layout=c(3, 3))
y_pred <- predict(model, newx=newx, s=lambda)
plot(y_true, y_pred)
}
\references{
Suguru Fujita, Toyoto Tanaka, Kenji Kondo and Hirokazu Iwasawa. (2020)
\emph{AGLM: A Hybrid Modeling Method of GLM and Data Science Techniques}, \cr
\url{https://www.institutdesactuaires.com/global/gene/link.php?doc_id=16273&fg=1} \cr
\emph{Actuarial Colloquium Paris 2020}
}
\author{
\itemize{
\item Kenji Kondo,
\item Kazuhisa Takahashi and Hikari Banno (worked on L-Variable related features)
}
}
|
50d32e1b2c4b3fd99058b9c3129b48238f4032e6 | 236b5da714ed2ee47aaa0036230ba811701bd7ff | /man/fars_read.Rd | 75bbe3c0b2a7616fe1b0807a3c2324125ecb7971 | [] | no_license | RaphaelFred/softdev | b7480ad980ef60700c145369bd2b3cc0c1c530bb | 80a5dfee250cbfd2980d39a6f9665cfc198e48cd | refs/heads/master | 2020-05-02T15:33:26.904705 | 2019-03-31T15:38:41 | 2019-03-31T15:38:41 | 178,044,568 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 543 | rd | fars_read.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read}
\alias{fars_read}
\title{Reads data and creates a data frame tbl}
\usage{
fars_read(filename)
}
\arguments{
\item{filename}{Name of the file that is read into R}
}
\value{
This function will return a data frame of format tbl_df. (For further information look at tbl_df{dplyr})
}
\description{
This function reads a csv file if existant and creates a data frame tbl.
}
\examples{
#'
\dontrun{
fars_read("accident_2015.csv.bz2")
}
}
|
9a4852b906ed5615a40737bd5ab79afbc26ba9d5 | b176c1f22279ca79d735231c694d2dc702e8e8dc | /man/getRC.Rd | cfc521ef0af2317fe6b15a4f8bd5588ae49049f6 | [] | no_license | EricDugan/gaitFeature | 6c2548d8cac07776023656839b4b3872c99e9bab | 72391e832293f2d72a715291e2af8ad4d599ef47 | refs/heads/master | 2021-09-07T12:45:04.018373 | 2018-02-23T03:19:34 | 2018-02-23T03:19:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 408 | rd | getRC.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getRC.R
\name{getRC}
\alias{getRC}
\title{Get reference curve}
\usage{
getRC(.joint, .plane)
}
\arguments{
\item{.joint}{joint name}
\item{.plane}{plane name}
}
\value{
a dataframe of referene curve filtered to the specified joint and plane.
}
\description{
Get reference curve
}
\examples{
getRC("Ankle","sag")
}
\seealso{
rc
}
|
7e8a031eb47e257a5e02105a472c6e79f71c90a9 | 97f1e3e6e908a83489e4243268ba539316196176 | /R/antsRegistration.R | ec31eaa355473ace511782023636abf7b16865a3 | [
"Apache-2.0"
] | permissive | ANTsX/ANTsRCore | 1c3d1da3bea84859da7d18f54c34ae13d2af8619 | 8e234fd1363c0d618f9dc21c9566f3d5464655a2 | refs/heads/master | 2023-05-24T23:53:30.886217 | 2023-05-22T02:52:39 | 2023-05-22T02:52:39 | 83,897,912 | 8 | 22 | null | 2023-05-22T02:52:40 | 2017-03-04T14:09:48 | C++ | UTF-8 | R | false | false | 52,101 | r | antsRegistration.R | #' Perform registration between two images.
#'
#' Register a pair of images either through the full or simplified interface
#' to the ANTs registration method.
#'
#' @param fixed fixed image to which we register the moving image.
#' @param moving moving image to be mapped to fixed space.
#' @param typeofTransform A linear or non-linear registration type. Mutual
#' information metric by default. See \code{Details.}
#' @param initialTransform transforms to prepend
#' @param outprefix output will be named with this prefix.
#' @param mask Registration metric mask in the fixed image space.
#' @param movingMask Registration metric mask in the moving image space.
#' @param maskAllStages If true, apply metric mask(s) to all registration stages, instead of just the final stage.
#' @param gradStep gradient step size (not for all tx)
#' @param flowSigma smoothing for update field
#' @param totalSigma smoothing for total field
#' @param affMetric the metric for the affine part (GC, mattes, meansquares)
#' @param affSampling the sampling parameter for the affine metric
#' @param synMetric the metric for the syn part (CC, mattes, meansquares, demons)
#' @param synSampling the nbins or radius parameter for the syn metric
#' @param affIterations vector of iterations for low-dimensional registration.
#' we will set the smoothing and multi-resolution parameters based on the
#' length of this vector.
#' @param regIterations vector of iterations for syn. we will set the smoothing
#' and multi-resolution parameters based on the length of this vector.
#' @param multivariateExtras list of additional images and metrics which will
#' trigger the use of multiple metrics in the registration process
#' in the deformable stage. Multivariate metrics needs 5 entries:
#' name of metric, fixed, moving, weight, samplingParam.
#' the list should be of the form
#' \code{ list( list( "nameOfMetric2", img, img, weight, metricParam ) ) }.
#' Another example would be \code{ list( list( "MeanSquares", f2, m2, 0.5, 0 ),
#' list( "CC", f2, m2, 0.5, 2 ) ) }. This is only compatible with the
#' \code{SyNOnly}, \code{ElasticOnly}, or \code{antsRegistrationSyN*} transformations.
#' @param restrictTransformation This option allows the user to restrict the
#' optimization of the displacement field, translation, rigid or affine
#' transform on a per-component basis. For example, if one wants to limit
#' the deformation or rotation of 3-D volume to the first two dimensions,
#' this is possible by specifying a weight vector of \code{c(1,1,0)} for a
#' 3D deformation field or \code{c(1,1,0,1,1,0)} for a rigid transformation.
#' Restriction currently only works if there are no preceding transformations.
#' @param writeCompositeTransform if \code{TRUE}, will write transformations to h5 format. Defaults to FALSE.
#' @param randomSeed integer random seed. combine with setting
#' ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS environment variable to limit the
#' impact of numerical differences.
#' @param samplingPercentage value between zero and one that allows the percentage
#' of points sampled to be controlled in low-dimensional metric estimation.
#' @param verbose request verbose output (useful for debugging)
#' @param printArgs print raw command line (useful for debugging)
#' @param ... additional options see antsRegistration in ANTs
#' @details
#' typeofTransform can be one of:
#' \itemize{
#' \item{"Translation": }{Translation transformation.}
#' \item{"Rigid": }{Rigid transformation: Only rotation and translation.}
#' \item{"Similarity": }{Similarity transformation: scaling, rotation and
#' translation.}
#' \item{"QuickRigid": }{Rigid transformation: Only rotation and translation.
#' May be useful for quick visualization fixes.'}
#' \item{"DenseRigid": }{Rigid transformation: Only rotation and translation.
#' Employs dense sampling during metric estimation.'}
#' \item{"BOLDRigid": }{Rigid transformation: Parameters typical for BOLD
#' to BOLD intrasubject registration'.'}
#' \item{"Affine": }{Affine transformation: Rigid + scaling.}
#' \item{"AffineFast": }{Fast version of \code{Affine}.}
#' \item{"BOLDAffine": }{Affine transformation: Parameters typical for BOLD
#' to BOLD intrasubject registration'.'}
#' \item{"TRSAA": }{translation, rigid, similarity, affine (twice). please set
#' \code{regIterations} if using this option. this would be used in cases
#' where you want a really high quality affine mapping (perhaps with mask).}
#' \item{"ElasticSyN": }{Symmetric normalization: Affine + deformable transformation.
#' Uses \code{synMetric} as optimization metric and elastic regularization.}
#' \item{"SyN": }{Symmetric normalization: Affine + deformable transformation.
#' Uses \code{synMetric} as optimization metric.}
#' \item{"SyNRA": }{Symmetric normalization: Rigid + Affine + deformable transformation.
#' Uses \code{synMetric} as optimization metric.}
#' \item{"SyNOnly": }{Symmetric normalization: no initial transformation.
#' Uses \code{synMetric} as optimization metric. Assumes images are
#' aligned by an inital transformation. Can be useful if you want to run
#' an unmasked affine followed by masked deformable registration.}
#' \item{"ElasticOnly": }{Elastic normalization: no initial transformation.}
#' \item{"SyNCC": }{SyN, but with cross-correlation as the metric.
#' Note, the default or chosen parameters will be replaced with
#' \code{synMetric="CC", synSampling=4, synits="2100x1200x1200x20",
#' smoothingsigmas="3x2x1x0", shrinkfactors="4x3x2x1"}. }
#' \item{"SyNabp": }{SyN optimized for abpBrainExtraction, forces mutual information
#' as optimization metric.}
#' \item{"SyNBold": }{SyN, but optimized for registrations between
#' BOLD and T1 images.}
#' \item{"SyNBoldAff": }{SyN, but optimized for registrations between
#' BOLD and T1 images, with additional affine step.}
#' \item{"SyNAggro": }{SyN, but with more aggressive registration
#' (fine-scale matching and more deformation). Takes more time than \code{SyN}.}
#' \item{"TV[n]": }{time-varying diffeomorphism with where 'n' indicates number of
#' time points in velocity field discretization. The initial transform
#' should be computed, if needed, in a separate call to ants.registration.}
#' \item{"TVMSQ": }{time-varying diffeomorphism with mean square metric}
#' \item{"TVMSQC": }{time-varying diffeomorphism with mean square metric
#' for very large deformation}
#' \item{"Elastic": }{simple elastic deformation. one might want to run an
#' affine transformation before this. may not produce diffeomorphic transformations.
#' user may need to explore gradient and sigma parameters. this will not produce a valid inverse deformation. \code{totalSigma} should be greater than zero.}
#' \item{"antsRegistrationSyN[x]":}{recreation of the antsRegistrationSyN.sh script in ANTs
#' where 'x' is one of the transforms available (e.g., 't', 'b', 's')}
#' \item{"antsRegistrationSyNQuick[x]":}{recreation of the antsRegistrationSyNQuick.sh script in ANTs
#' where 'x' is one of the transforms available (e.g., 't', 'b', 's')}
#' \item{"antsRegistrationSyNRepro[x]":}{reproducible registration. x options as above.}
#' \item{"antsRegistrationSyNQuickRepro[x]":}{quick reproducible registration. x options as above.}
#' }
#' @return outputs a list containing:
#' \itemize{
#' \item{warpedmovout: }{Moving image warped to space of fixed image.}
#' \item{warpedfixout: }{Fixed image warped to space of moving image.}
#' \item{fwdtransforms: }{Transforms to move from moving to fixed image.}
#' \item{invtransforms: }{Transforms to move from fixed to moving image.}
#' }
#' Ouptut of 1 indicates failure
#' @author Shrinidhi KL, Tustison NJ, Avants BB
#' @examples
#'
#' #print help
#' antsRegistration()
#' fi <- antsImageRead(getANTsRData("r16") )
#' mi <- antsImageRead(getANTsRData("r64") )
#' mytx2 <- antsRegistration(fixed=fi, typeofTransform = '')
#' rig <- antsRegistration(fixed=fi, moving = mi,
#' typeofTransform = 'Rigid', verbose = TRUE)
#' trans = readAntsrTransform(rig$fwdtransforms, 2)
#' postrig <- antsRegistration(fixed=fi, moving=mi,
#' typeofTransform = "Affine", initialTransform = trans)
#' for (itype in c("AffineFast", "BOLDAffine")) {
#' print(itype)
#' mytx2 <- antsRegistration(fixed=fi, moving=mi,
#' typeofTransform = itype)
#' }
#' mytx2 <- antsRegistration(fixed=fi, moving=mi,
#' typeofTransform = "SyNOnly",
#' multivariateExtras = list(list( "MeanSquares", fi, mi, 0.5, 0 )) )
#' testthat::expect_error(
#' antsRegistration(fixed=fi, moving=mi, typeofTransform = "sdf")
#' )
#' bad <- antsRegistration(fixed=fi, moving=mi, regIterations = 40)
#' affIterations = c(3, 2, 1, 0)
#' mytx2 <- antsRegistration(fixed=fi, moving=mi,
#' affIterations = affIterations)
#' # set below for slower but numerically repeatable results
#' # these should be set in .Renviron not by sys calls
#' # Sys.setenv(ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS = 1)
#' # Sys.setenv(ANTS_RANDOM_SEED = 20180716)
#' fi <- antsImageRead(getANTsRData("r16") )
#' mi <- antsImageRead(getANTsRData("r64") )
#' fi<-resampleImage(fi,c(60,60),1,0)
#' mi<-resampleImage(mi,c(50, 50),1,0) # speed up
#' mytx <- antsRegistration(fixed=fi, moving=mi, typeofTransform = c('SyN') )
#' mywarpedimage <- antsApplyTransforms( fixed=fi, moving=mi,
#' transformlist=mytx$fwdtransforms )
#' mytx2 <- antsRegistration(fixed=fi, moving=mi, typeofTransform = c('SyN') )
#' mywarpedimage2 <- antsApplyTransforms( fixed=fi, moving=mi,
#' transformlist=mytx2$fwdtransforms )
#' # testthat::expect_equal(as.array(mywarpedimage), as.array(mywarpedimage2))
#'
#' \dontrun{ # quick visualization fix for images with odd orientation
#' mni = antsImageRead( getANTsRData( "mni" ) )
#' strokt1=antsImageRead('strokt1.nii.gz')
#' strokt1reg=antsRegistration(
#' fixed=mni,
#' moving=strokt1,
#' typeofTransform = "QuickRigid",verbose=TRUE )
#' plot( strokt1reg$warpedmovout, axis=3, nslices=20)
#' # now - how to use a mask
#' fi <- antsImageRead(getANTsRData("r16") )
#' fiseg = kmeansSegmentation( fi, 3 )
#' mi <- antsImageRead(getANTsRData("r64") )
#' msk = thresholdImage(fiseg$segmentation, 0, 0 )
#' mytx <- antsRegistration(fixed=fi, moving=mi, typeofTransform = c('SyNCC'),
#' mask=msk, verbose=F )
#' jac = createJacobianDeterminantImage( fi, mytx$fwdtransforms[1] )
#' }
#'
#' @export antsRegistration
antsRegistration <- function(
fixed = NULL,
moving = NULL,
typeofTransform = "SyN",
initialTransform = NA,
outprefix = NULL,
mask = NULL,
movingMask = NULL,
maskAllStages = FALSE,
gradStep = 0.2,
flowSigma = 3,
totalSigma = 0,
affMetric = "mattes",
affSampling=32,
synMetric = "mattes",
synSampling=32,
affIterations,
regIterations = c(40,20,0),
multivariateExtras,
restrictTransformation,
writeCompositeTransform = FALSE,
randomSeed,
samplingPercentage=0.2,
verbose=FALSE,
printArgs = FALSE, ... ) {
numargs <- nargs()
if (numargs == 1 & typeof(fixed) == "list") {
.Call("antsRegistration",
.int_antsProcessArguments(c(fixed)), PACKAGE = "ANTsRCore")
return(0)
}
if (nchar(typeofTransform) == 0)
typeofTransform = "SyN"
if (nchar(outprefix) == 0 || length(outprefix) == 0
|| is.null(outprefix)) {
outprefix = tempfile()
}
find_tx = function(outprefix) {
alltx = Sys.glob( paste0( outprefix, "*", "[0-9]*") )
alltx = alltx[!grepl( "VelocityField", alltx )]
findinv = grepl( "[0-9]InverseWarp.nii.gz", alltx )
findaff = grepl( "[0-9]GenericAffine.mat", alltx )
findfwd = grepl( "[0-9]Warp.nii.gz", alltx )
velocityfield = Sys.glob( paste0( outprefix, "*VelocityField.nii.gz" ) )
L = list(alltx = alltx,
findinv = findinv,
findfwd = findfwd,
findaff = findaff,
velocityfield = velocityfield)
return(L)
}
all_tx = find_tx(outprefix)
pre_transform = all_tx$alltx[ all_tx$findinv | all_tx$findfwd | all_tx$findaff]
rm(list = "all_tx")
if ( numargs < 1 | missing(fixed) | missing(moving) )
{
cat("for simplified mode: \n")
cat(paste0(" antsRegistration( fixed , moving , ",
"typeofTransform = c(\"Rigid\",\"Affine\",\"AffineFast\",",
"\"SyN\",\"SyNCC\"), outputPrefix=\"./antsRegOut\" \n"))
cat("")
cat("For full mode: use standard ants call , e.g. : \n")
cat(paste0(" ANTsR::antsRegistration( list( d=2,m=\"mi[r16slice.nii.gz,",
"r64slice.nii.gz,1,20,Regular,0.05]\", t=\"affine[1.0]\", ",
"c=\"2100x1200x1200x0\", s=\"3x2x1x0\", f=\"4x3x2x1\", ",
"u=\"1\", o=\"[xtest,xtest.nii.gz,xtest_inv.nii.gz]\" ) )\n"))
cat("full help: \n")
.Call("antsRegistration", .int_antsProcessArguments(c(list("--help"))),
PACKAGE = "ANTsRCore")
return(0)
}
args <- list(fixed, moving, typeofTransform, outprefix, ...)
myfAff = "6x4x2x1"
mysAff = "3x2x1x0"
metsam = 0.2
myiterations <- "2100x1200x1200x10"
if ( ! missing( affIterations ) ) {
myiterations = paste0( affIterations, collapse='x' )
itlen = length( affIterations )-1
if ( itlen == 0 ) {
mysAff = 0
myfAff = 1
myiterations = affIterations
} else {
mysAff = itlen:0
myfAff = 2^mysAff
mysAff = paste( mysAff, collapse='x' )
myfAff = paste( myfAff, collapse='x' )
}
}
if ( typeofTransform == "AffineFast" ) {
typeofTransform <- "Affine"
myiterations <- "2100x1200x0x0"
}
if ( typeofTransform == "BOLDAffine" ) {
typeofTransform <- "Affine"
myfAff="2x1"
mysAff="1x0"
myiterations <- "100x20"
}
if ( typeofTransform == "QuickRigid" ) {
typeofTransform <- "Rigid"
myiterations <- "20x20x0x0"
}
if ( typeofTransform == "DenseRigid" ) {
typeofTransform <- "Rigid"
metsam = 0.8
}
if ( typeofTransform == "BOLDRigid" ) {
typeofTransform <- "Rigid"
myfAff="2x1"
mysAff="1x0"
myiterations <- "100x20"
}
mysyn = paste("SyN[",gradStep,",",flowSigma,",",totalSigma,"]", sep = "")
itlen = length( regIterations )-1
if ( itlen == 0 ) {
smoothingsigmas = 0
shrinkfactors = 1
synits = regIterations
} else {
smoothingsigmas = itlen:0
shrinkfactors = 2^smoothingsigmas
smoothingsigmas = paste( smoothingsigmas, collapse='x' )
shrinkfactors = paste( shrinkfactors, collapse='x' )
synits = paste( regIterations, collapse='x')
}
if (!is.null(fixed)) {
fixed = check_ants(fixed)
}
if (!is.null(moving)) {
moving = check_ants(moving)
}
if (fixed@dimension != moving@dimension) {
stop("Fixed and moving image dimensions are not the same.")
}
if (!is.character(fixed)) {
fixed = check_ants(fixed)
error_not_antsImage(fixed, "fixed")
moving = check_ants(moving)
error_not_antsImage(moving, "moving")
if (is.antsImage(fixed) & is.antsImage(moving)) {
inpixeltype <- fixed@pixeltype
tvTypes <- c( paste0( "TV[", 1:8, "]" ) )
ttexists <- FALSE
# change this to a match.arg
allowableTx <- c("Translation","Rigid", "Similarity", "Affine", "TRSAA",
"SyN","SyNRA","SyNOnly","SyNCC","SyNabp", "SyNBold", "SyNBoldAff",
"SyNAggro", "SyNLessAggro", tvTypes,
"TVMSQ","TVMSQC","ElasticSyN","Elastic","ElasticOnly"
)
# Perform checking of antsRegistrationSyN transforms later
ttexists <- typeofTransform %in% allowableTx || grepl( "antsRegistrationSyN", typeofTransform )
if (ttexists) {
initx = initialTransform
if ( class( initx ) == "antsrTransform" )
{
tempTXfilename = tempfile( fileext = ".mat" )
initx = invertAntsrTransform( initialTransform )
initx = invertAntsrTransform( initx )
writeAntsrTransform( initx, tempTXfilename )
initx = tempTXfilename
}
moving <- antsImageClone(moving, "float")
fixed <- antsImageClone(fixed, "float")
warpedfixout <- antsImageClone(moving)
warpedmovout <- antsImageClone(fixed)
f <- antsrGetPointerName(fixed)
m <- antsrGetPointerName(moving)
wfo <- antsrGetPointerName(warpedfixout)
wmo <- antsrGetPointerName(warpedmovout)
fMaskStr <- "NA"
mMaskStr <- "NA"
if( is.list( mask ) && !is.null( movingMask ) )
{
stop( "Ambiguous definition for the moving mask." )
}
if( is.list( mask ) )
{
fixedMaskBinary <- antsImageClone( thresholdImage( mask[[1]], 0, 0, 0, 1 ), "unsigned char" )
fMaskStr <- antsrGetPointerName( fixedMaskBinary )
movingMaskBinary <- antsImageClone( thresholdImage( mask[[2]], 0, 0, 0, 1 ), "unsigned char" )
mMaskStr <- antsrGetPointerName( movingMaskBinary )
} else {
if( !is.null( mask ) )
{
fixedMaskBinary <- antsImageClone( thresholdImage( mask, 0, 0, 0, 1 ), "unsigned char" )
fMaskStr <- antsrGetPointerName( fixedMaskBinary )
}
if( !is.null( movingMask ) )
{
movingMaskBinary <- antsImageClone( thresholdImage( movingMask, 0, 0, 0, 1 ), "unsigned char" )
mMaskStr <- antsrGetPointerName( movingMaskBinary )
}
}
maskOption <- paste0( "[", fMaskStr, ",", mMaskStr, "]" )
if( maskAllStages )
{
earlyMaskOption <- maskOption
} else {
earlyMaskOption <- "[NA,NA]"
}
if (is.na(initx)) {
initx = paste("[", f, ",", m, ",1]", sep = "")
}
if (typeofTransform == "SyNBold") {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",
affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Rigid[0.25]", "-c", "[1200x1200x100,1e-6,5]", "-s", "2x1x0",
"-f", "4x2x1",
"-x", earlyMaskOption,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", mysyn, "-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas, "-f", shrinkfactors, "-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if (typeofTransform == "SyNBoldAff") {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,
",regular,",samplingPercentage,"]", sep = ""),
"-t", "Rigid[0.25]", "-c", "[1200x1200x100,1e-6,5]", "-s", "2x1x0",
"-f", "4x2x1",
"-x", earlyMaskOption,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,
",regular,",samplingPercentage,"]", sep = ""),
"-t", "Affine[0.25]", "-c", "[200x20,1e-6,5]", "-s", "1x0",
"-f", "2x1",
"-x", earlyMaskOption,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,
"]", sep = ""),
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas, "-f", shrinkfactors, "-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption)
}
if (typeofTransform == "ElasticSyN") {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Affine[0.25]", "-c", "2100x1200x200x0", "-s", "3x2x1x0",
"-f", "4x2x2x1",
"-x", earlyMaskOption,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas, "-f", shrinkfactors,
"-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",",wmo, ",", wfo, "]", sep = ""),
"-x", maskOption)
}
if (typeofTransform == "SyN") {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Affine[0.25]", "-c", "2100x1200x1200x0", "-s", "3x2x1x0",
"-f", "4x2x2x1",
"-x", earlyMaskOption,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas, "-f", shrinkfactors, "-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption
)
}
if (typeofTransform == "SyNRA") {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Rigid[0.25]", "-c", "2100x1200x1200x0", "-s", "3x2x1x0",
"-f", "4x2x2x1",
"-x", earlyMaskOption,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Affine[0.25]", "-c", "2100x1200x1200x0", "-s", "3x2x1x0",
"-f", "4x2x2x1",
"-x", earlyMaskOption,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas, "-f", shrinkfactors, "-u",
"0", "-z", "1", "-o", paste("[", outprefix, ",",
wmo, ",", wfo, "]", sep = ""),
"-x", maskOption)
}
if (typeofTransform == "ElasticOnly") {
mysyn = paste("GaussianDisplacementField[",gradStep,",",flowSigma,",",totalSigma,"]", sep = "")
}
if (typeofTransform == "SyNOnly" | typeofTransform == "ElasticOnly" ) {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas, "-f", shrinkfactors,
"-u", "0", "-z", "1", "-o", paste("[", outprefix, ",",
wmo, ",", wfo, "]", sep = ""))
if ( ! missing( multivariateExtras ) ) {
args0 <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(synMetric,"[", f, ",", m,
",1,",synSampling,"]", sep = "") )
args1 = list( )
for ( mm in 1:length( multivariateExtras ) ) {
if ( length( multivariateExtras[[mm]] ) != 5 )
stop(paste0("multivariate metric needs 5 entries: ",
"name of metric, fixed, moving, weight, ",
"samplingParam"))
args1 <- lappend( args1, list(
"-m", paste(
as.character(multivariateExtras[[mm]][[1]]),"[",
antsrGetPointerName(multivariateExtras[[mm]][[2]]), ",",
antsrGetPointerName(multivariateExtras[[mm]][[3]]), ",",
as.character(multivariateExtras[[mm]][[4]]), ",",
as.character(multivariateExtras[[mm]][[5]]),"]", sep = "") ) )
}
args2 <- list(
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas, "-f", shrinkfactors,
"-u", "0", "-z", "1", "-o", paste("[", outprefix, ",",
wmo, ",", wfo, "]", sep = ""))
args=lappend(args0,args1)
args=lappend(args,args2)
}
args=lappend( args, list( "-x", maskOption ) )
}
if (typeofTransform == "SyNAggro") {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Affine[0.25]", "-c", "2100x1200x1200x100", "-s", "3x2x1x0",
"-f", "4x2x2x1",
"-x", earlyMaskOption,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas,
"-f", shrinkfactors, "-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if (typeofTransform == "SyNCC") {
synMetric="CC"
synSampling=4
synits="2100x1200x1200x20"
smoothingsigmas="3x2x1x0"
shrinkfactors="4x3x2x1"
mysyn=paste("SyN[0.15,3,0]", sep = "")
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Rigid[1]",
"-c", "2100x1200x1200x0",
"-s", "3x2x1x0",
"-f", "4x4x2x1",
"-x", earlyMaskOption,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Affine[1]", "-c", "1200x1200x100", "-s", "2x1x0",
"-f", "4x2x1",
"-x", earlyMaskOption,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas,
"-f", shrinkfactors, "-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if (typeofTransform == "TRSAA") {
itlen = length( regIterations )
itlenlow = round( itlen/2 + 0.0001 )
dlen = itlen - itlenlow
myconvlow = paste(
c( rep( 2000, itlenlow ), rep( 0, dlen ) ), collapse = 'x' )
myconvhi = paste( regIterations, collapse='x')
myconvhi = paste("[",myconvhi,",1.e-7,10]",sep='')
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,0.3]", sep = ""),
"-t", "Translation[1]",
"-c", myconvlow,
"-s", smoothingsigmas,
"-f", shrinkfactors,
"-x", earlyMaskOption,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,0.3]", sep = ""),
"-t", "Rigid[1]",
"-c", myconvlow,
"-s", smoothingsigmas,
"-f", shrinkfactors,
"-x", earlyMaskOption,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,0.3]", sep = ""),
"-t", "Similarity[1]",
"-c", myconvlow,
"-s", smoothingsigmas,
"-f", shrinkfactors,
"-x", earlyMaskOption,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,0.3]", sep = ""),
"-t", "Affine[1]",
"-c", myconvhi,
"-s", smoothingsigmas,
"-f", shrinkfactors,
"-x", earlyMaskOption,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,0.3]", sep = ""),
"-t", "Affine[1]",
"-c", myconvhi,
"-s", smoothingsigmas,
"-f", shrinkfactors,
"-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if (typeofTransform == "SyNabp") {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste("mattes[", f, ",", m, ",1,32,regular,",samplingPercentage,"]", sep = ""),
"-t", "Rigid[0.1]", "-c", "1000x500x250x100", "-s", "4x2x1x0",
"-f", "8x4x2x1",
"-x", earlyMaskOption,
"-m", paste("mattes[", f, ",", m, ",1,32,regular,",samplingPercentage,"]", sep = ""),
"-t", "Affine[0.1]", "-c", "1000x500x250x100", "-s", "4x2x1x0",
"-f", "8x4x2x1",
"-x", earlyMaskOption,
"-m", paste("CC[", f, ",", m, ",0.5,4]", sep = ""),
"-t", paste("SyN[0.1,3,0]", sep = ""), "-c", "50x10x0",
"-s", "2x1x0", "-f", "4x2x1", "-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if (typeofTransform == "SyNLessAggro") {
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",samplingPercentage,"]", sep = ""),
"-t", "Affine[0.25]", "-c", "2100x1200x1200x100", "-s", "3x2x1x0",
"-f", "4x2x2x1",
"-x", earlyMaskOption,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", mysyn,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas,
"-f", shrinkfactors, "-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if ( typeofTransform == "TVMSQ" ) {
if ( is.na(gradStep) ) gradStep=1.0
tvtx=paste("TimeVaryingVelocityField[",
gradStep,", 4,",flowSigma,",0.0,",totalSigma,",0 ]",sep='')
args <- list("-d", as.character(fixed@dimension), # "-r", initx,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", tvtx,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas,
"-f", shrinkfactors,
"-u", "0", "-z", "0",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if ( typeofTransform == "TVMSQC" ) {
if ( is.na(gradStep) ) gradStep=2.0
tvtx=paste("TimeVaryingVelocityField[",
gradStep,", 8, 1.0,0.0, 0.05,0 ]",sep='')
args <- list("-d", as.character(fixed@dimension), # "-r", initx,
"-m", paste("demons[", f, ",", m, ",0.5,0]", sep = ""),
"-m", paste("meansquares[", f, ",", m, ",1,0]", sep = ""),
"-t", tvtx,
"-c", "[1200x1200x100x20x0,0,5]",
"-s", "8x6x4x2x1vox",
"-f", "8x6x4x2x1",
"-u", "0", "-z", "0",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if( typeofTransform %in% tvTypes )
{
if( is.na( gradStep ) )
{
gradStep <- 1.0
}
nTimePoints <- as.numeric( strsplit( strsplit( typeofTransform, "\\[" )[[1]][2], "\\]" )[[1]][1] )
if ( is.na(gradStep) ) gradStep=1.0
tvtx=paste("TimeVaryingVelocityField[",
gradStep,",", nTimePoints, ",",flowSigma,",0.0,",totalSigma,",0 ]",sep='')
args <- list("-d", as.character(fixed@dimension), # "-r", initx,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", tvtx,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas,
"-f", shrinkfactors,
"-u", "0", "-z", "0",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if ( typeofTransform == "Elastic" ) {
if ( is.na(gradStep) ) gradStep=0.25
tvtx=paste("GaussianDisplacementField[",
gradStep,",",flowSigma,",",totalSigma,"]",sep='')
args <- list("-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(synMetric,"[", f, ",", m, ",1,",synSampling,"]", sep = ""),
"-t", tvtx,
"-c", paste("[",synits,",1e-7,8]",collapse=''),
"-s", smoothingsigmas,
"-f", shrinkfactors,
"-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if ( typeofTransform == "Rigid" |
typeofTransform == "Similarity" |
typeofTransform == "Translation" |
typeofTransform == "Affine" )
{
args <- list(
"-d", as.character(fixed@dimension), "-r", initx,
"-m", paste(affMetric,"[", f, ",", m, ",1,",affSampling,",regular,",metsam,"]", sep = ""),
"-t", paste(typeofTransform, "[0.25]", sep = ""), "-c", myiterations,
"-s", mysAff, "-f", myfAff, "-u", "0", "-z", "1",
"-o", paste("[", outprefix, ",", wmo, ",", wfo, "]", sep = ""),
"-x", maskOption )
}
if( grepl( "antsRegistrationSyN", typeofTransform ) )
{
doQuick <- FALSE
if( grepl( "Quick", typeofTransform ) )
{
doQuick <- TRUE
}
subtypeOfTransform <- 's'
splineDistance <- 26
metricParameter <- 4
if( doQuick )
{
metricParameter <- 32
}
if( grepl( '\\[', typeofTransform ) && grepl( '\\]', typeofTransform ) )
{
subtypeOfTransform <- strsplit( strsplit( typeofTransform, "\\[" )[[1]][2], "\\]" )[[1]][1]
if( grepl( ',', subtypeOfTransform ) )
{
subtypeOfTransformArgs <- strsplit( subtypeOfTransform, "," )[[1]]
subtypeOfTransform <- subtypeOfTransformArgs[1]
if( !( subtypeOfTransform == 'b' ||
subtypeOfTransform == 'br' ||
subtypeOfTransform == 'bo' ||
subtypeOfTransform == 's' ||
subtypeOfTransform == 'sr' ||
subtypeOfTransform == 'so' ) )
{
stop( "Extra parameters are only valid for B-spline SyN transform." )
}
metricParameter <- subtypeOfTransformArgs[2]
if( length( subtypeOfTransformArgs ) > 2 )
{
splineDistance <- subtypeOfTransformArgs[3]
}
}
}
doRepro <- FALSE
if( grepl( "Repro", typeofTransform ) )
{
doRepro <- TRUE
}
if( doQuick == TRUE )
{
rigidConvergence <- "[1000x500x250x0,1e-6,10]"
} else {
rigidConvergence <- "[1000x500x250x100,1e-6,10]"
}
rigidShrinkFactors <- "8x4x2x1"
rigidSmoothingSigmas <- "3x2x1x0vox"
if( doQuick == TRUE )
{
affineConvergence <- "[1000x500x250x0,1e-6,10]"
} else {
affineConvergence <- "[1000x500x250x100,1e-6,10]"
}
affineShrinkFactors <- "8x4x2x1"
affineSmoothingSigmas <- "3x2x1x0vox"
linearMetric <- "MI"
linearMetricParameter <- "32"
if( doRepro == TRUE )
{
linearMetric <- "GC"
linearMetricParameter <- "1"
}
if( doQuick == TRUE )
{
synConvergence <- "[100x70x50x0,1e-6,10]"
synMetric <- paste0( "MI[", f, ",", m, ",1,", metricParameter, "]" )
} else {
synConvergence <- "[100x70x50x20,1e-6,10]"
synMetric <- paste0( "CC[", f, ",", m, ",1,", metricParameter, "]" )
}
synShrinkFactors <- "8x4x2x1"
synSmoothingSigmas <- "3x2x1x0vox"
tx <- "Rigid"
if( subtypeOfTransform == "t" )
{
tx <- "Translation"
}
if( doQuick == TRUE && doRepro == TRUE )
{
synConvergence <- "[100x70x50x0,1e-6,10]"
synMetric <- paste0( "CC[", f, ",", m, ",1", metricParameter, "]" )
}
if( missing( randomSeed ) && doRepro == TRUE )
{
randomSeed <- 1
}
rigidStage <- list( "--transform", paste0( tx, "[0.1]" ),
"--metric", paste0( linearMetric, "[", f, ",", m, ",1,", linearMetricParameter, ",regular,",samplingPercentage,"]" ),
"--convergence", rigidConvergence,
"--shrink-factors", rigidShrinkFactors,
"--smoothing-sigmas", rigidSmoothingSigmas
)
affineStage <- list( "--transform", "Affine[0.1]",
"--metric", paste0( linearMetric, "[", f, ",", m, ",1,", linearMetricParameter, ",regular,",samplingPercentage,"]" ),
"--convergence", affineConvergence,
"--shrink-factors", affineShrinkFactors,
"--smoothing-sigmas", affineSmoothingSigmas
)
if( subtypeOfTransform == "sr" || subtypeOfTransform == "br" )
{
if( doQuick == TRUE )
{
synConvergence <- "[50x0,1e-6,10]"
} else {
synConvergence <- "[50x20,1e-6,10]"
}
synShrinkFactors <- "2x1"
synSmoothingSigmas <- "1x0vox"
}
synTransform <- "SyN[0.1,3,0]"
if( subtypeOfTransform == "b" || subtypeOfTransform == "br" || subtypeOfTransform == "bo" )
{
synTransform <- paste0( "BSplineSyN[0.1,", splineDistance, ",0,3]" )
}
synStage <- list( "--transform", synTransform,
"--metric", synMetric )
if( ! missing( multivariateExtras ) )
{
for( mm in seq.int( length( multivariateExtras ) ) )
{
if( length( multivariateExtras[[mm]] ) != 5 )
stop(paste0("multivariate metric needs 5 entries: ",
"name of metric, fixed, moving, weight, ",
"samplingParam"))
synStage <- lappend( synStage, list(
"--metric", paste(
as.character( multivariateExtras[[mm]][[1]] ), "[",
antsrGetPointerName( multivariateExtras[[mm]][[2]] ), ",",
antsrGetPointerName( multivariateExtras[[mm]][[3]] ), ",",
as.character( multivariateExtras[[mm]][[4]] ), ",",
as.character( multivariateExtras[[mm]][[5]] ), "]", sep = "") ) )
}
}
synStage <- lappend( synStage, list(
"--convergence", synConvergence,
"--shrink-factors", synShrinkFactors,
"--smoothing-sigmas", synSmoothingSigmas
) )
args <- list(
"-d", as.character( fixed@dimension ),
"-r", initx,
"-o", paste0("[", outprefix, ",", wmo, ",", wfo, "]" ) )
if( subtypeOfTransform == "r" || subtypeOfTransform == "t" )
{
args <- lappend( args, rigidStage )
} else if( subtypeOfTransform == "a" ) {
args <- lappend( args, rigidStage )
args <- lappend( args, affineStage )
} else if( subtypeOfTransform == "b" || subtypeOfTransform == "s" ) {
args <- lappend( args, rigidStage )
args <- lappend( args, affineStage )
args <- lappend( args, synStage )
} else if( subtypeOfTransform == "br" || subtypeOfTransform == "sr" ) {
args <- lappend( args, rigidStage )
args <- lappend( args, synStage )
} else if( subtypeOfTransform == "bo" || subtypeOfTransform == "so" ) {
args <- lappend( args, synStage )
}
args <- lappend( args, list( "-x", maskOption ) )
args <- as.list( unlist( args ) )
}
if ( !missing( restrictTransformation ) ) {
args[[ length(args)+1]]="-g"
args[[ length(args)+1]]=paste( restrictTransformation, collapse='x')
}
args[[ length(args)+1]]="--float"
args[[ length(args)+1]]="1"
# set the random seed
if ( missing( randomSeed )) {
myseed=Sys.getenv("ANTS_RANDOM_SEED")
if ( nchar(myseed) == 0 ) myseed = "1234"
}
args[[ length(args)+1]]="--random-seed"
args[[ length(args)+1]]="1"
args[[ length(args)+1]]="--write-composite-transform"
args[[ length(args)+1]]=as.character(as.numeric( writeCompositeTransform ))
if ( verbose ) {
args[[ length(args)+1]]="-v"
args[[ length(args)+1]]="1"
}
if ( printArgs )
{
cat( "antsRegistration", paste( unlist( args ) ), "\n" )
}
args = .int_antsProcessArguments(c(args))
.Call("antsRegistration", args, PACKAGE = "ANTsRCore")
all_tx = find_tx(outprefix)
alltx = all_tx$alltx
findinv = all_tx$findinv
findfwd = all_tx$findfwd
findaff = all_tx$findaff
velocityfield = all_tx$velocityfield
rm(list = "all_tx")
# this will make it so other file naming don't mess this up
alltx = alltx[ findinv | findfwd | findaff]
if ( any( findinv )) {
fwdtransforms = rev( alltx[ findfwd | findaff ] )
invtransforms = alltx[ findinv | findaff ]
if ( length( fwdtransforms ) != length( invtransforms ) ) {
message(paste0("transform composite list may not be ",
"invertible - return all transforms and ",
"leave it to user to figure it out"))
invtransforms = alltx
}
} else {
fwdtransforms = rev( alltx )
invtransforms = ( alltx )
}
if ( writeCompositeTransform ) {
fwdtransforms = paste0( outprefix , 'Composite.h5' )
invtransforms = paste0( outprefix , 'InverseComposite.h5' )
}
if ( sum( fixed - warpedmovout ) == 0 ) # FIXME better error catching
stop( "Registration failed. Use verbose mode to diagnose." )
if( length( velocityfield ) == 0 )
{
return(
list( warpedmovout = antsImageClone(warpedmovout, inpixeltype),
warpedfixout = antsImageClone(warpedfixout, inpixeltype),
fwdtransforms = fwdtransforms,
invtransforms = invtransforms,
prev_transforms = pre_transform))
} else {
return(
list( warpedmovout = antsImageClone(warpedmovout, inpixeltype),
warpedfixout = antsImageClone(warpedfixout, inpixeltype),
fwdtransforms = fwdtransforms,
invtransforms = invtransforms,
prev_transforms = pre_transform,
velocityfield = velocityfield))
}
}
if (!ttexists) {
stop("Unrecognized transform type.")
}
}
return(0)
}
args[[ length(args)+1]]="--float"
args[[ length(args)+1]]="1"
if ( verbose ) {
args[[ length(args)+1]]="-v"
args[[ length(args)+1]]="1"
}
if ( printArgs )
{
cat( "antsRegistration", paste( unlist( args ) ), "\n" )
}
args = .int_antsProcessArguments(c(args))
.Call("antsRegistration", args, PACKAGE = "ANTsRCore")
}
################################
# .antsrmakeRandomString(n, length) function generates a random string random
# string of the length (length), made up of numbers, small and capital letters
# helper function
################################
.antsrmakeRandomString <- function(n = 1, mylength = 12) {
randomString <- c(1:n) # initialize vector
for (i in 1:n) {
randomString[i] <- paste(sample(c(0:9, letters, LETTERS), mylength, replace = TRUE),
collapse = "")
}
return(randomString)
}
#' Return the antsImage pointer string
#'
#' This is a low-level function that may be useful for debugging. It will
#' return the pointer name itself from an antsImage object.
#'
#' @param img image whose pointer we want
#' @return string
#' @author Avants BB
#' @examples
#'
#' img = antsImageRead( getANTsRData("r16") )
#' antsrGetPointerName( img )
#' antsrGetPointerName( antsImageClone( img ))
#'
#' @export antsrGetPointerName
antsrGetPointerName <- function(img) {
# if ( Sys.info()['sysname'] == 'Linux' ) endofpointer<-20 if (
# Sys.info()['sysname'] == 'Darwin' ) endofpointer<-21 pname<- substr(
# .int_antsProcessArguments( list( img ) ) , 11 , endofpointer )
splitptrname <- strsplit(.int_antsProcessArguments(list(img)), " ")[[1]][2]
pname <- strsplit(splitptrname, ">")
return(pname[[1]])
}
#' ANTs template building.
#'
#' Iteratively estimate a population shape and intensity average image. This
#' can be computationally intensive and currently is not parallelized. Perhaps
#' better to use official \code{antsMultivariateTemplateConstruction*} in ANTs.
#' However, this code can be useful for smaller problems/populations.
#'
#' @param initialTemplate fixed image to which we register the population.
#' @param imgList moving image list from which template will be built.
#' @param typeofTransform A linear or non-linear registration type. Mutual
#' information metric by default. See \code{antsRegistration.}
#' @param iterations should be greater than 1 less than 10.
#' @param gradientStep speed of template shape update step, less than 1.
#' @param blendingWeight parameter between zero and one, default 0.5; higher
#' values lead to sharper intensity but may also cause artifacts.
#' @param segList segmentations for each target image, this will trigger a
#' joint label fusion call for each iteration and use masks during registration.
#' @param weights numeric vector, length of number of images providing weights
#' @param verbose print diagnostic messages,
#' passed to \code{\link{antsRegistration}}
#' @param ... Additional options to pass to \code{\link{antsRegistration}}
#' @return template antsImage
#' @author Avants BB
#' @examples
#' pop = getANTsRData( "population" )
#' avg = antsAverageImages( pop ) # this is in ANTsR
#' template = buildTemplate( avg, pop, 'SyN', iterations = 1)
#' @export buildTemplate
buildTemplate <- function(
initialTemplate,
imgList,
typeofTransform,
iterations = 3,
gradientStep = 0.25,
blendingWeight = 0.5,
segList,
weights,
verbose = TRUE,
...
) {
template = antsImageClone( initialTemplate )
if ( blendingWeight < 0 ) blendingWeight = 0
if ( blendingWeight > 1 ) blendingWeight = 1
if ( ! missing( segList ) ) doJif = TRUE else doJif = FALSE
if ( missing( weights ) ) weights = as.numeric( 1:length( imgList ) )
weights = weights / sum( weights )
for (i in 1:iterations ) {
if (verbose) {
message(paste0("Iteration: ", i))
}
avgIlist = list()
avgWlist = c()
avgSlist = list()
for (k in 1:length( imgList ) ) {
if ( doJif & ( i > 1 ) ) {
segreglist = list(
thresholdImage( segmentation, 1, Inf ),
thresholdImage( segList[[k]], 1, Inf )
)
w1 = antsRegistration(
template,
imgList[[k]], typeofTransform = typeofTransform,
verbose = verbose > 1,
mask = segreglist,
...)
} else {
w1 = antsRegistration(
template,
imgList[[k]], typeofTransform = typeofTransform,
verbose = verbose > 1,
...)
}
avgIlist[[k]] = w1$warpedmovout
avgWlist[ k ] = antsApplyTransforms(
initialTemplate, imgList[[ k ]],
w1$fwdtransforms, compose = w1$fwdtransforms[1] )
if ( doJif ) {
avgSlist[[k]] = antsApplyTransforms(
initialTemplate, segList[[ k ]],
w1$fwdtransforms, interpolator = 'nearestNeighbor' )
}
}
if (verbose) {
message("Averaging images")
}
template = antsAverageImages( avgIlist, weights = weights )
if ( doJif ) {
tempmask = thresholdImage( antsAverageImages( avgSlist, weights = weights ),
1/length(avgSlist), Inf )
jlf = jointLabelFusion( template, tempmask, rSearch=3,
avgIlist, labelList = avgSlist )
template = jlf$intensity
segmentation = antsImageClone( jlf$segmentation, "float" )
}
if (verbose) {
message("Averaging warped composed transforms")
}
wavg = antsAverageImages( avgWlist, weights = weights ) * (-1.0 * gradientStep)
wmag = sqrt( mean( wavg^2 ) )
wavgfn = tempfile( fileext = ".nii.gz" )
antsImageWrite( wavg, wavgfn )
template = antsApplyTransforms( template, template, wavgfn )
if ( doJif )
segmentation = antsApplyTransforms( segmentation, segmentation, wavgfn,
interpolator = 'nearestNeighbor' )
if (verbose) {
message( paste0( "Blending template image", wmag ) )
}
template = template * ( 1.0 - blendingWeight ) +
iMath( template, "Sharpen" ) * blendingWeight
}
if ( doJif ) return( list( template = template, segmentation = segmentation ))
return( template )
}
|
5895c61b9200f028188c437c58c205282e9a4fc2 | 5693d3497d8d4e61f5ffa0becca905a1074de569 | /TA/Sentiment/Sentiment.R | 05edca8870944959a82f0f70803a85b365bfcf7a | [] | no_license | souviksamanta95/R_Personal | 65a6272253792839c3b44cfab3cf934227609ae8 | 56597d51b620a587e097c8b669d014a807481f39 | refs/heads/master | 2023-02-16T11:10:15.966371 | 2021-01-20T11:12:20 | 2021-01-20T11:12:20 | 260,390,124 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,236 | r | Sentiment.R | # Date - 21/08/2020 - 2:30 pm - Guest Lecture - Sentiment Analysis
setwd("D:/Analytics/R/TA/Sentiment")
# Load library
library (tm)
## Loading required package: NLP
library (stringr)
library (rvest)
## Loading required package: xml2
library (SnowballC)
trip = read.csv ("tripadvisor_reviews.csv")
N
# sentiment, echo=FALSE}
# Sentiment Analysis of Hotel Reviews on Trip Advisor - JW Marriott
hotel_reviews <- as.character (trip $ review)
# Load the positive and negative lexicon data and explore
positive_lexicon <- read.csv ("positive-lexicon.txt")
negative_lexicon <- read.csv ("negative-lexicon.txt")
# Load the stop words text file and explore
stop_words <- read.csv ("stopwords_en.txt")
vector_source_reviews = VectorSource (hotel_reviews)
reviews_corpus <- Corpus (vector_source_reviews)
#inspect(reviews_corpus)
reviews_corpus[[2]] $ content
# Remove stop words
filtered_corpus_no_stopwords <- tm_map (reviews_corpus, removeWords, stopwords ('english'))
## Warning in tm_map.SimpleCorpus(reviews_corpus, removeWords,
## stopwords("english")): transformation drops documents
#inspect(filtered_corpus_no_stopwords)
# Remove Punctuation
filtered_corpus_no_puncts <- tm_map (filtered_corpus_no_stopwords, removePunctuation)
## Warning in tm_map.SimpleCorpus(filtered_corpus_no_stopwords, removePunctuation):
## transformation drops documents
#inspect(filtered_corpus_no_puncts)
# Remove Numbers
filtered_corpus_no_numbers <- tm_map (filtered_corpus_no_puncts, removeNumbers)
## Warning in tm_map.SimpleCorpus(filtered_corpus_no_puncts, removeNumbers):
## transformation drops documents
#inspect(filtered_corpus_no_numbers)
# Remove unwanted white spaces
filtered_corpus_no_whitespace <- tm_map (filtered_corpus_no_numbers, stripWhitespace)
## Warning in tm_map.SimpleCorpus(filtered_corpus_no_numbers, stripWhitespace):
## transformation drops documents
#inspect(filtered_corpus_no_whitespace)
# Make all words to lowercase
filtered_corpus_to_lower <- tm_map (filtered_corpus_no_whitespace, content_transformer (tolower))
## Warning in tm_map.SimpleCorpus(filtered_corpus_no_whitespace,
## content_transformer(tolower)): transformation drops documents
#inspect(filtered_corpus_to_lower)
# Remove stop words of the external file from the corpus and whitespaces again and inspect
stopwords_vec <- as.data.frame (stop_words)
final_corpus_no_stopwords <- tm_map (filtered_corpus_to_lower, removeWords, stopwords_vec[,1])
## Warning in tm_map.SimpleCorpus(filtered_corpus_to_lower, removeWords,
## stopwords_vec[, : transformation drops documents
#inspect(final_corpus_no_stopwords)
final_corpus <- tm_map (final_corpus_no_stopwords, stripWhitespace)
## Warning in tm_map.SimpleCorpus(final_corpus_no_stopwords, stripWhitespace):
## transformation drops documents
#inspect(final_corpus)
# Character representation of the corpus of first review
final_corpus[[1]] $ content
## [1] " significant travel challenge day visit hotel hotel handled staff members chris jennifer expectations chris welcoming remembered time jennifer handled difficult situation professionalism thoroughness hotel lovely common"
hotel_reviews[1]
## [1] "I had a significant travel challenge during my 5 day visit to this hotel and the hotel could not have handled it better. Staff members, Chris and Jennifer went beyond my expectations. Chris was especially welcoming and remembered my name each time I saw him. Jennifer handled my difficult situation with professionalism and thoroughness.\nThe hotel itself was lovely. Common..."
# Stem the words to their root of all reviews present in the corpus
stemmed_corpus <- tm_map (final_corpus, stemDocument)
## Warning in tm_map.SimpleCorpus(final_corpus, stemDocument): transformation drops
## documents
stemmed_corpus[[1]] $ content
## [1] "signific travel challeng day visit hotel hotel handl staff member chris jennif expect chris welcom rememb time jennif handl difficult situat profession thorough hotel love common"
TDM_corpus <- TermDocumentMatrix (stemmed_corpus)
# terms occurring with a minimum frequency of 5
findFreqTerms (TDM_corpus, 5)
total_pos_count <- 0
total_neg_count <- 0
pos_count_vector <- c ()
neg_count_vector <- c ()
size <- length (stemmed_corpus)
for (i in 1 : size){
corpus_words<- list ( strsplit (stemmed_corpus[[i]] $ content, split = " "))
#print(intersect(unlist(corpus_words), unlist(positive_lexicon))) ## positive words in current review
pos_count <- length ( intersect ( unlist (corpus_words), unlist (positive_lexicon)))
#print(intersect(unlist(corpus_words), unlist(negative_lexicon))) ## negative words in current review
neg_count <- length ( intersect ( unlist (corpus_words), unlist (negative_lexicon)))
if (pos_count > neg_count){
#print("It's a positive review")
} else {
#print("It's a negative review")
}
total_count_for_current_review <- pos_count + neg_count ## current positive and negative count
pos_percentage <- (pos_count * 100) / total_count_for_current_review
neg_percentage <- (neg_count * 100) / total_count_for_current_review
#print(pos_percentage) ## current positive percentage
#print(neg_percentage) ## current negtive percentage
total_pos_count <- total_pos_count + pos_count ## overall positive count
total_neg_count <- total_neg_count + neg_count ## overall negative count
pos_count_vector <- append (pos_count_vector, pos_count)
neg_count_vector <- append (neg_count_vector, neg_count)
}
print (pos_percentage) ## current positive percentage
## [1] 71.42857
print (neg_percentage) ## current negtive percentage
## [1] 28.57143
# Sentiment score of each review and visualizing using boxplot
counts <- data.frame (pos_count_vector, neg_count_vector)
sentiment <- data.frame ( c (1 : size),(pos_count_vector - neg_count_vector) / (pos_count_vector + neg_count_vector))
names (sentiment)= c ('review_id','SentimentScore')
boxplot (sentiment $ SentimentScore[0 : 5] ~ sentiment $ review_id[0 : 5])
# Visualiztion of positive and negative count of single review
singe_review <- c (counts $ pos_count_vector[8], counts $ neg_count_vector[8])
barplot ( t ( as.data.frame (singe_review)), ylab = "Count", xlab = "Positve v/s Negative", main = "Positive")
# Calculating overall percentage of positive and negative words of all the reviews
total_pos_count ## overall positive count
total_count <- total_pos_count + total_neg_count
overall_positive_percentage <- (total_pos_count * 100) / total_count
overall_negative_percentage <- (total_neg_count * 100) / total_count
overall_positive_percentage ## overall positive percentage
overall_negative_percentage ## overall negative percentage
# Visualization of positive and negative word count for all the reviews
review_count_frame <- data.frame ( matrix ( c (pos_count_vector, neg_count_vector), nrow = 100, ncol = 2))
colnames (review_count_frame) <- c ("Positive Word Count", "Negative Word Count")
barplot (review_count_frame $ `Positive Word Count`, ylab = "Positive Word Count", xlab = "Reviews from 1 ")
# Visualization of Overall positive and negative reviews
percent_vec <- c (overall_positive_percentage, overall_negative_percentage)
percent_frame <- as.data.frame (percent_vec)
rownames (percent_frame) <- c ("Positive Reviews","Negative Reviews")
colnames (percent_frame) <- c ("Percentage")
percentage <- t (percent_frame)
barplot (percentage, ylab = "Percentage", main = "Sentiment Analysis of JW Marriot Reviews on TripAdvisor")
library (wordcloud)
## Loading required package: RColorBrewer
set.seed (1234) # for reproducibility
textM = as.matrix (TDM_corpus)
#textM[0:10]
textFreqs <- rowSums (textM)
freqTab <- data.frame (term = names (textFreqs), num = textFreqs)
write.csv (freqTab, "freq.csv", row.names = FALSE)
wordcloud (freqTab $ term, freqTab $ num, max.words = 500, color = "green")
wordcloud (words = freqTab $ term, freq = freqTab $ num, min.freq = 4,
max.words=500, random.order=FALSE, rot.per=0.35,
colors= brewer.pal (8, "Dark2"))
freqTab = read.csv ('freq.csv', stringsAsFactors = FALSE)
dim (freqTab)
head (freqTab)
library (wordcloud2)
#wordcloud2(data=freqTab)
wordcloud2 (data=freqTab[0 : 100,], size=1.6, color='random-dark')
wordcloud2 (data=freqTab[100 : 200,], size = 0.7, shape = 'pentagon')
|
9438403d255a61bdfda9f0231a5edb2a9c242e58 | 55c8b901565745a5876eb7dd6a207b6bae5ceea3 | /BSImpliedVolatility.R | 8b8d0c931ef29fa4b7865767081f18ddaebd2d28 | [] | no_license | tituschirchir/ComputationalMethods | e51c201d6647d40cae4e8eada7e8d886ff42565f | 36c59761796e424b989ad4d304ae79eb1069c6f8 | refs/heads/master | 2021-01-20T14:43:00.390763 | 2017-02-23T00:30:14 | 2017-02-23T00:30:14 | 82,706,591 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,164 | r | BSImpliedVolatility.R | library(curl)
library(quantmod)
library(scatterplot3d)
today <- "2017-02-10"
tolerance <- 0.0001
#Black Scholes option pricer ----
BlackScholesPricer <- function(isCall, S, K, tau, r, vol)
{
d1 <- getD1(S, K, tau, r, vol)
d2 <- d1 - vol * sqrt(tau)
ifelse(
isCall,
S * pnorm(d1) - K * exp(-r * tau) * pnorm(d2),
K * exp(-r * tau) * pnorm(-d2) - S * pnorm(-d1)
)
}
#Put-Call Parity Checker ----
PutCallParityCheck <- function(S, K, tau, r, vol)
{
C <- BlackScholesPricer(T, S, K, tau, r, vol)
P <- BlackScholesPricer(F, S, K, tau, r, vol)
isMet <- C - P == S - K * exp(-r * tau)
equalswithinTolerance(C - P, S - K * exp(-r * tau))
}
#Implied Volatility Calculator: Will decide between bisection and secant based on flag ----
calcImpliedVols <-
function(isCall,
isBisection,
Cmkt,
S,
K,
tau,
r,
tolerance)
{
ifelse(
isBisection,
bisectionImpliedVols(isCall, Cmkt, S, K, tau, r, tolerance, 2000),
secantImpliedVols(
isCall,
Cmkt,
S,
K,
tau,
r,
c(0, 3),
BlackScholesPricer(isCall, S, K, tau, r, 1)
)
)
}
#Calculate implied volatilities using bisection method ----
bisectionImpliedVols <-
function(isCall,
Cmkt,
S,
K,
tau,
r,
tolerance,
maxIter)
{
a <- 0
b <- 5
CHigh <- Cmkt - BlackScholesPricer(isCall, S, K, tau, r, b)
CLow <- Cmkt - BlackScholesPricer(isCall, S, K, tau, r, a)
impliedVol <- 0.0
midp <- 0
midCdiff <-
Cmkt - BlackScholesPricer(isCall, S, K, tau, r, (a + b) / 2)
if (CHigh * CLow > 0.0)
{
impliedVol <- NA
} else{
while (midCdiff ^ 2 > tolerance ^ 2)
{
midp <- (a + b) / 2
midCdiff <-
Cmkt - BlackScholesPricer(isCall, S, K, tau, r, midp)
ifelse(midCdiff > 0.0, a <- midp, b <- midp)
impliedVol <- midp
}
}
impliedVol
}
#Calculate implied volatilities using secant method ----
secantImpliedVols <-
function(isCall, Cmkt, S, K, tau, r, vols, fn_0) {
fn_1 <- Cmkt - BlackScholesPricer(isCall, S, K, tau, r, vols[2])
vol3 <- vols[2] - fn_1 * (vols[2] - vols[1]) / (fn_1 - fn_0)
ifelse (
equalswithinTolerance(vol3, vols[2]),
vol3,
secantImpliedVols(isCall, Cmkt, S, K, tau, r, c(vols[2], vol3), fn_1)
)
}
#Calculate d1 for B-S formulation ----
getD1 <- function(S, K, tau, r, vol)
{
d1 <- (log(S / K) + (r + 0.5 * vol ^ 2) * tau) / (vol * sqrt(tau))
}
#Calculate Delta exactly ----
delta <- function(S, K, tau, r, vol)
{
pnorm(getD1(S, K, tau, r, vol))
}
#Calculate Delta approximately ----
CallDeltaApproximation <- function(S, K, tau, r, vol, del)
{
(BlackScholesPricer(T, S + del, K, tau, r, vol) - BlackScholesPricer(T, S, K, tau, r, vol)) /
del
}
#Calculate Vega Exactly ----
vega <- function(S, K, tau, r, vol) {
ndprime(getD1(S, K, tau, r, vol)) * S * sqrt(tau)
}
#Calculate Vega approximately ----
CallVegaApproximation <- function(S, K, tau, r, vol, del)
{
(BlackScholesPricer(T, S, K, tau, r, vol + del) - BlackScholesPricer(T, S, K, tau, r, vol)) /
del
}
#Calculate Gamma Exactly ----
gamma <- function(S, K, tau, r, vol) {
ndprime(getD1(S, K, tau, r, vol)) / (S * vol * sqrt(tau))
}
#Calculate Gamma approximately ----
CallGammaApproximation <- function(S, K, tau, r, vol, del)
{
(
BlackScholesPricer(T, S + del, K, tau, r, vol) -
2 * BlackScholesPricer(T, S, K, tau, r, vol) +
BlackScholesPricer(T, S - del, K, tau, r, vol)
) / (del ^ 2)
}
ndprime <- function(d1)
{
exp(-d1 ^ 2 / 2) / sqrt(2 * pi)
}
#Checks for equality between two variables within 0.0001 ----
equalswithinTolerance <- function(x, y)
{
(x - y) ^ 2 <= tolerance ^ 2
}
#Generates Implied Volatilities by 2 methods (Bisection and Secant)
# returns a dataframe of Strike, Time to Maturity, Implied Vols ----
GetImpliedVolatilities <- function(r, security, matDates, today)
{
calls <- getOptionsData(security, matDates , today)
Imp.Vol.Bisection <- c(0)
Imp.Vol.Secant <- c(0)
for (i in 1:nrow(calls))
{
rowq <- calls[i,]
isCall <- rowq[, "Type"] == "Call"
Imp.Vol.Bisection[i] <-
calcImpliedVols(
isCall <- rowq[, "Type"] == "Call",
isBisection = T,
Cmkt = rowq[, "Market.Price"],
S = 130,
K = rowq[, "Strike"],
tau = rowq[, "Time"],
r = 0.05,
0.0001
)
Imp.Vol.Secant[i] <-
calcImpliedVols(
isCall <- rowq[, "Type"] == "Call",
isBisection = F,
Cmkt = rowq[, "Market.Price"],
S = 130,
K = rowq[, "Strike"],
tau = rowq[, "Time"],
r = 0.05,
0.0001
)
}
calls["Imp.Vol.Bisection"] <- Imp.Vol.Bisection
calls["Imp.Vol.Secant"] <- Imp.Vol.Secant
calls["Moneyness"] <-
Moneyness(130, calls[, 1], calls[, "Time"], rep(r, nrow(calls)))
calls <-
subset(calls,!is.na(Imp.Vol.Bisection) &
!is.na(Imp.Vol.Secant))
calls[, c(1, 2, 4, 5, 6, 7)]
}
#Accepts a dataframe with volatilities, strikes and maturity date and evaluates
#Calculates Moneyness
Moneyness <- function(S, K, tau, r) {
K * exp(-r * tau) / S
}
#Gamma, Vega and Delta ----
PopulateGreeks <- function(dataframe, S, r)
{
df <-
data.frame("Strike" = dataframe[, 1],
"Time" = dataframe[, 2],
"Imp.Vol" = dataframe[, 4])
df[, "Delta"] <-
delta(S, dataframe[, 1], dataframe[, 2], r, dataframe[, 4])
df[, "Vega"] <-
vega(S, dataframe[, 1], dataframe[, 2], r, dataframe[, 4])
df[, "Gamma"] <-
gamma(S, dataframe[, 1], dataframe[, 2], r, dataframe[, 4])
df
}
#Utility to download options data to Csv from Yahoo! Finance ----
DownloadOptionsDataToCsv <- function(security, matDates)
{
for (i in 1:length(matDates)) {
df <-
getOptionChain(security, src = "yahoo", Exp = matDates[i])$calls
write.table(
df,
file = paste0(security, "_", matDates[i], ".csv"),
sep = ",",
qmethod = "double"
)
}
}
#Loads options data for a security from .csv file; Format: security_matdate.csv e.g. AAPL_2015-04-17.csv ----
getOptionsData <- function(security, matDate, currentDate)
{
df <- read.csv(paste0(security, "_", matDate, ".csv"))
dataframe <- data.frame("Strike" = df[, 1])
timeToMaturity <-
as.numeric(difftime(matDate, currentDate), units = "days") / 360
dataframe["Time"] <- rep(timeToMaturity, nrow(df))
dataframe["Type"] <- rep("Call", nrow(df))
dataframe["Market.Price"] <- (df[, "Bid"] + df[, "Ask"]) / 2
dataframe
}
#Calculate area under a function f between a and b with N steps using trapezoid Rule ----
TrapezoidRule <- function(f, a, b, N)
{
h <- (b - a) / N
theSum = 0.5 * h * (f(a) + f(a + h))
for (i in 1:(N - 1)) {
theSum <- theSum + 0.5 * h * (f(a + i * h) + f(a + (i + 1) * h))
}
theSum
}
#Calculate area under a function f between a and b with N steps using Simpson's Rule ----
SimpsonRule <- function(f, a, b, N)
{
h <- (b - a) / N
xa <- seq(from = a, to = b - h, by = h)
xb <- seq(from = a + h, to = b, by = h)
sum((xb - xa) / 6 * (f(xa) + 4 * f((xa + xb) / 2) + f(xb)))
}
#Gets Truncation Error for area calculation from Simpson Rule and Trapezoid Rule ----
TruncationError <- function(f, as, Ns) {
a <- as[length(as)]
n <- Ns[length(Ns)]
TrapError <- TrapezoidRule(f, -a, a, Ns[1]) - pi
SimpError <- SimpsonRule(f, -a, a, Ns[1]) - pi
dfN <-
data.frame(
"a" = a,
"N" = Ns[1],
"Simpson Error" = SimpError,
"Trapezoid Error" = TrapError
)
for (i in 2:length(Ns))
{
dfN <-
rbind(dfN, c(
a,
Ns[i],
SimpsonRule(f, -a, a, Ns[i]) - pi,
TrapezoidRule(f, -a, a, Ns[i]) - pi
))
}
dfA <-
data.frame(
"a" = as[1],
"N" = n,
"Simpson Error" = SimpError,
"Trapezoid Error" = TrapError
)
for (i in 2:length(as))
{
dfA <-
rbind(dfA, c(
as[i],
n,
SimpsonRule(f, -as[i], as[i], n) - pi,
TrapezoidRule(f, -as[i], as[i], n) - pi
))
}
list(dfN, dfA)
}
#Iterative approximation of area under a function f using Simpson's Rule ----
SimpsonIterativeApproximation <- function(func, a, b, epsilon)
{
begin <- Sys.time()
k <- 3
v1 <- SimpsonRule(func, a, b, 2)
v2 <- SimpsonRule(func, a, b, 4)
while (abs(v2 - v1) > epsilon)
{
v1 <- v2
v2 <- SimpsonRule(func, a, b, k ^ 2)
k = k + 1
}
c(
"Value" = v2,
"Iterations" = (k - 3),
"Time" = (Sys.time() - begin)
)
}
#Iterative approximation of area under a function f using Trapezoid Rule ----
TrapezoidIterativeApproximation <- function(func, a, b, epsilon)
{
begin <- Sys.time()
k <- 3
v1 <- TrapezoidRule(func, a, b, 2)
v2 <- TrapezoidRule(func, a, b, 4)
while (abs(v2 - v1) > epsilon)
{
v1 <- v2
v2 <- TrapezoidRule(func, a, b, k ^ 2)
k = k + 1
}
c(
"Value" = v2,
"Iterations" = (k - 3),
"Time" = (Sys.time() - begin)
)
}
#Heston Model Implementation ----
Call.Heston <-
function(theta = 0.1,
sigma = 0.2,
S0 = 1,
lambda = 0,
rho = -0.3,
V0 = 0.1,
r = 0,
q = 0,
tau = 5,
Ks = c(0.5, 0.75, 1, 1.25, 1.5),
Kappas = c(4, 2, 1)) {
HestonIntegrand <- function(j, phi) {
a <- kappa * theta
b <- kappa + lambda
if (j == 1)
{
b <- b - rho * sigma
u <- 0.5
}
else
{
u <- -0.5
}
d <-
sqrt((rho * sigma * phi * 1i - b) ^ 2 - (sigma ^ 2) * (2 * u * phi * 1i - phi ^ 2))
g <-
(b - rho * sigma * phi * 1i + d) / (b - rho * sigma * phi * 1i - d)
C <-
(r - q) * phi * 1i * tau + (kappa * theta / (sigma ^ 2)) * ((b - rho * sigma * phi * 1i + d) * tau - 2 * log((1 - g * exp(d * tau)) / (1 - g)))
D <-
((b - rho * sigma * phi * 1i + d) / (sigma ^ 2)) * ((1 - exp(d * tau)) / (1 - g * exp(d * tau)))
psi <- exp(C + D * V0 + 1i * phi * log(S0 * exp(r * tau)))
Re((exp(-1i * phi * log(K)) * psi) / (1i * phi))
}
P1Function <- function(psi)
{
HestonIntegrand(1, psi)
}
P2Function <- function(psi)
{
HestonIntegrand(2, psi)
}
mat <- matrix(nrow = length(Ks), ncol = length(Kappas))
cNames <- c(0)
for (m in 1:length(Ks))
{
K <- Ks[m]
rowM <- c(0)
for (n in 1:length(Kappas))
{
kappa <- Kappas[n]
P1 <-
0.5 + SimpsonIterativeApproximation(P1Function, 10 ^ -20, 743, 10 ^ (-4))["Value"] / pi
P2 <-
0.5 + SimpsonIterativeApproximation(P2Function, 10 ^ -20, 743, 10 ^ (-4))["Value"] / pi
C <- S0 * P1 - K * exp(-(r - q) * tau) * P2
mat[m, n] <- C
}
colnames(mat) <- paste("Kappa =", Kappas)
rownames(mat) <- paste("K =", Ks)
}
mat
}
#Utility to check the deviation of one value from another as a percentage ----
deviationFrom <- function(a, b) {
abs(b - a) * 100 / a
}
|
015cd947cb8ec316b1064692e90b087d7c669d2b | 0a906cf8b1b7da2aea87de958e3662870df49727 | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610555215-test.R | 18d6c22c51d09d834b4b5163a7fda1f58703e96e | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 249 | r | 1610555215-test.R | testlist <- list(data = structure(c(1.51719503792547e-307, 1.03613125137291e-317, 1.0321387553943e-310, 4.86146168521645e-299, 2.4173716589517e+35, 0), .Dim = c(1L, 6L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
06299ff008b2a5d5359007fdf9b5ed6f15b6f292 | d7fa73e9d43081aab992eb518a0644d09ea9fb76 | /ui.R | 9cf2c4e73b0cdc766c31e5f1ac18543004f47404 | [] | no_license | james504s/20170501 | 284c5163902f637fb12d7a86af632a0037ef51bd | 3827e908500997e0e7be5847a6793ab0f884d51f | refs/heads/master | 2021-01-20T06:38:36.352374 | 2017-06-28T12:15:56 | 2017-06-28T12:15:56 | 89,905,861 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,254 | r | ui.R |
shinyUI(fluidPage
(navbarPage("Shinytest"
,navbarMenu("Lesson6")
,navbarMenu("Lesson7",
tabPanel("檔案分享"
,navlistPanel(("Share Your Shiny Apps")
,tabPanel("檔案分享"
,mainPanel(titlePanel("分享你的ShinyApp")
,helpText("1.分享你檔案給用戶,讓用戶在自己的電腦執行。")
,helpText("2.上傳至網頁,將網頁傳給用戶使用。")))
,tabPanel("runUrl"
,mainPanel(titlePanel("runUrl")
,helpText("從網路連結下載並啟動ShinyApp")
,helpText("1.將存放檔案的資料夾存成zip")
,helpText("2.在網頁上以自己的連結託管該zip檔案。")
,helpText('任何有權訪問連結的用戶都可以通過運行以下內容從R啟動此App:
"runUrl(weblink)"')))
,tabPanel("runGitHub"
,mainPanel(titlePanel("runGitHub")
,helpText("在www.github.com免費存放,
GitHub是R開發人員的熱門存放站點,
有許多功能來支持運作。")
,verbatimTextOutput("Hub_Text")))
,tabPanel("runGist"
,mainPanel(titlePanel("runGist")
,helpText("GitHub提供網路上共享文件的簡易服務(匿名),
此服務不需要註冊GitHub會員")
,helpText("1.將需要的檔案的程式碼ㄊ複製到Gist網頁上")
,helpText("2.注意GitHub給予Gist的URL")
,helpText('3.runGist("網址末端數字")')))))
,tabPanel("網頁分享",
pageWithSidebar(
headerPanel("testestest")
,sidebarPanel("三種方式")
,mainPanel(
tabsetPanel(
tabPanel("RStudio提供的三種方式"
,mainPanel(
img(src="pic1.png", height = 400, width = 600)
))
,tabPanel("Shinyapps.io"
,mainPanel(
img(src="pic2.png", height = 400, width = 600)
))
,tabPanel("Shiny Server"
,mainPanel(
img(src="pic3.png", height = 400, width = 600)
))
,tabPanel("Shiny Server Pro"
,mainPanel(
img(src="pic4.png", height = 400, width = 600)
))
))
)))
))) |
25dfff3090888380940181f87a1e614de2c0709c | 2c74b9a588ee8df696ab8afd21bd9761738d027c | /Random Forest.r | 262af36b7a422cfb3840bb27d41bedbc840e8703 | [] | no_license | FelipeLelis/MachineLearning | 286ee70c717de28243a82ceca06c9a39be0771d5 | 0dbf2734b45990f9908f0d6907b6684de1b669fe | refs/heads/master | 2020-11-30T22:27:40.784605 | 2019-12-27T18:37:36 | 2019-12-27T18:37:36 | 230,496,669 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 591 | r | Random Forest.r |
install.packages("randomForest")
library(randomForest)
credito = read.csv(file.choose(), sep=",", header=T)
modelo = randomForest(class ~ ., data = credito, ntree=500 )
#previsao baseado nos dados out of bag
modelo$predicted
#importancia dos atributos no modelo
modelo$importance
#proporção de votos na classificação de cada instancia
modelo$votes
#arvores induzidas
modelo$forest
#matriz de confusão baseado nos dados out of bag
modelo$confusion
plot(modelo)
#fazendo previsões com um registro
predict(modelo, newdata = credito[154,])
|
e2c343172828433cac390ccc101aad6ec43931df | 4c24ab304de9ac6d547c52e78f3af2d79e8a1474 | /BIOS624-A2-HRTexp.R | 80c3cbf10bd2c747537e88b28eca99e1098862fb | [] | no_license | k-milbers/BIOS624-A2 | f824c378a3037b42fbd86588ee1e54702494e30b | 1830309579dbdc5cc93d8960548e752c822907b6 | refs/heads/master | 2020-08-27T09:43:35.171099 | 2019-10-23T18:12:31 | 2019-10-23T18:12:31 | 217,321,031 | 0 | 0 | null | 2019-10-24T14:36:05 | 2019-10-24T14:36:04 | null | UTF-8 | R | false | false | 5,907 | r | BIOS624-A2-HRTexp.R |
data <- read.csv("HRTdata2019.csv")
attach(data)
### DATA CLEANING
#----Colon Cancer Indicator----
#Classify cases as 1, controls as 0
cCASE <- ifelse(CASE == 'Control', 0, 1)
#----Blackhole effect----
# *Note there is likely a cleaner way to do this step.
#STEP 1: For any year effected by the black hole (BH), if they were exposed in the year prior to BH or the year after to the BH, then code them as 1 for the missing year
#Code for year 3:
BH_EXP5_03 <- ifelse(BH03 =='1',
ifelse(EXP5_02 =='1' | EXP5_04 =='1', 1,0))
#For all other years 1 to 15 (Note years 16-21 for the black hole are not included as the exposure data only goes to year 15)
BH_EXP5_01 <- ifelse(BH01 =='1',
ifelse(EXP5_02 =='1', 1,0))
BH_EXP5_02 <- ifelse(BH02 =='1',
ifelse(EXP5_01 =='1' | EXP5_03 =='1', 1,0))
BH_EXP5_04 <- ifelse(BH04 =='1',
ifelse(EXP5_03 =='1' | EXP5_05 =='1', 1,0))
BH_EXP5_05 <- ifelse(BH05 =='1',
ifelse(EXP5_04 =='1' | EXP5_06 =='1', 1,0))
BH_EXP5_06 <- ifelse(BH06 =='1',
ifelse(EXP5_05 =='1' | EXP5_07 =='1', 1,0))
BH_EXP5_07 <- ifelse(BH07 =='1',
ifelse(EXP5_06 =='1' | EXP5_08 =='1', 1,0))
BH_EXP5_08 <- ifelse(BH08 =='1',
ifelse(EXP5_07 =='1' | EXP5_09 =='1', 1,0))
BH_EXP5_09 <- ifelse(BH09 =='1',
ifelse(EXP5_08 =='1' | EXP5_10 =='1', 1,0))
BH_EXP5_10 <- ifelse(BH10 =='1',
ifelse(EXP5_09 =='1' | EXP5_11 =='1', 1,0))
BH_EXP5_11 <- ifelse(BH11 =='1',
ifelse(EXP5_10 =='1' | EXP5_12 =='1', 1,0))
BH_EXP5_12 <- ifelse(BH12 =='1',
ifelse(EXP5_11 =='1' | EXP5_13 =='1', 1,0))
BH_EXP5_13 <- ifelse(BH13 =='1',
ifelse(EXP5_12 =='1' | EXP5_14 =='1', 1,0))
BH_EXP5_14 <- ifelse(BH14 =='1',
ifelse(EXP5_13 =='1' | EXP5_15 =='1', 1,0))
BH_EXP5_15 <- ifelse(BH15 =='1',
ifelse(EXP5_14 =='1', 1,0))
#Step 2: Create new columns for the exposure years - if they were exposed in year x or if they were classified as exposed for year x due to the black hole
cEXP5_01 <- ifelse(EXP5_01 == '1' | BH_EXP5_01 == '1', 1, 0)
cEXP5_02 <- ifelse(EXP5_02 == '1' | BH_EXP5_02 == '1', 1, 0)
cEXP5_03 <- ifelse(EXP5_03 == '1' | BH_EXP5_03 == '1', 1, 0)
cEXP5_04 <- ifelse(EXP5_04 == '1' | BH_EXP5_04 == '1', 1, 0)
cEXP5_05 <- ifelse(EXP5_05 == '1' | BH_EXP5_05 == '1', 1, 0)
cEXP5_06 <- ifelse(EXP5_06 == '1' | BH_EXP5_06 == '1', 1, 0)
cEXP5_07 <- ifelse(EXP5_07 == '1' | BH_EXP5_07 == '1', 1, 0)
cEXP5_08 <- ifelse(EXP5_08 == '1' | BH_EXP5_08 == '1', 1, 0)
cEXP5_09 <- ifelse(EXP5_09 == '1' | BH_EXP5_09 == '1', 1, 0)
cEXP5_10 <- ifelse(EXP5_10 == '1' | BH_EXP5_10 == '1', 1, 0)
cEXP5_11 <- ifelse(EXP5_11 == '1' | BH_EXP5_11 == '1', 1, 0)
cEXP5_12 <- ifelse(EXP5_12 == '1' | BH_EXP5_12 == '1', 1, 0)
cEXP5_13 <- ifelse(EXP5_13 == '1' | BH_EXP5_13 == '1', 1, 0)
cEXP5_14 <- ifelse(EXP5_14 == '1' | BH_EXP5_14 == '1', 1, 0)
cEXP5_15 <- ifelse(EXP5_15 == '1' | BH_EXP5_15 == '1', 1, 0)
#----Reclassify exposure groups----
#Group <5 years, excluding year 1 and 2. If they are exposed in either year 3 or 4 then they are categorized exposed.
#Cat 1 is exposed, 0 is not exposed
cEXP5_Cat_less <- ifelse(cEXP5_03=='1',1,
ifelse(cEXP5_04=='1',1,0))
#Group >=5 years. If they are exposed in any year 5 to 15 then they are categorized exposed.
#Cat 1 is exposed, 0 is not exposed
cEXP5_Cat_greater <- ifelse(cEXP5_05=='1',1,
ifelse(cEXP5_06=='1',1,
ifelse(cEXP5_07=='1',1,
ifelse(cEXP5_08=='1',1,
ifelse(cEXP5_09=='1',1,
ifelse(cEXP5_10=='1',1,
ifelse(cEXP5_11=='1',1,
ifelse(cEXP5_12=='1',1,
ifelse(cEXP5_13=='1',1,
ifelse(cEXP5_14=='1',1,
ifelse(cEXP5_15=='1',1,0)))))))))))
#Group no exposure.
#Cat 1 is not exposed (never exposed), 0 is exposed (ever exposed)
cEXP5_Cat_Non <- ifelse(cEXP5_Cat_less=='1', 0,
ifelse(cEXP5_Cat_less=='1',0, 1))
#----Compressing the drug data----
#Y/N to having ever taken X drug are split into 3 categories: Y1-5, Y6-10, Y11-15. In this step we compress Y6-10 and Y11-15 into one.
#*Note: I realize that this cleaning doesn't distinguish between: Non or Unknown, did we want to distinguish?
#NSAIDS
cNS_EXP1 <- NS_EXP1
cNS_EXP2 <- ifelse(NS_EXP2 == '1' | NS_EXP3 =='1', 1, 0)
#CVD drugs
cNUM56_1 <- NUM56_1
cNUM56_2 <- ifelse(NUM56_2 == "1 or more" | NUM56_3 =="1 or more", 1, 0)
#CNS
cNUM57_1 <- NUM57_1
cNUM57_2 <- ifelse(NUM57_2 == "1 or more" | NUM57_3 =="1 or more", 1, 0)
#Other hormones
cNUM61_1 <- NUM61_1
cNUM61_2 <- ifelse(NUM61_2 == "1 or more" | NUM61_3 =="1 or more", 1, 0)
#Vitamins
cNUM63_1 <- NUM63_1
cNUM63_2 <- ifelse(NUM63_2 == "1 or more" | NUM63_3 =="1 or more", 1, 0)
#Oral Contraception
cOC1<- OC1
cOC2 <- ifelse(OC2 == '1' | OC3 =='1', 1, 0)
#Sigmonoscopy
cSIG2EVR <- SIG2EVR
#Doctor Visits
cTOTDOC <- TOTDOC
#----Cleaned dataset----
cdata <- cbind(STUD_ID, CASE_ID, cCASE, COLON, cEXP5_Cat_Non, cEXP5_Cat_less, cEXP5_Cat_greater, cNS_EXP1, cNS_EXP2,
cNUM56_1, cNUM56_2, cNUM57_1, cNUM57_2, cNUM61_1, cNUM61_2, cNUM63_1, cNUM63_2, cOC1, cOC2, cSIG2EVR, cTOTDOC)
cdata <- as.data.frame(cdata)
detach(data)
#----Conditional logistic regression----
|
201e17096c4b68226a41f1baff1a5fd009c71526 | 604ae9dbc4bf79cdf00b4c22bb086a36df067b98 | /man/angleCells.Rd | 55ef25f335d65be5f9bd8d32a34267d725a75319 | [] | no_license | Amish7525/celltrackR | fb1f0988451e6b5bb893bd954761174eac03331a | fafa12fbf6f41e7383e06b17c8a7c0dcc59a4036 | refs/heads/master | 2023-07-12T03:34:58.568268 | 2021-08-20T16:20:50 | 2021-08-20T16:20:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 843 | rd | angleCells.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/angle-functions.R
\name{angleCells}
\alias{angleCells}
\title{Angle between Two Tracks}
\usage{
angleCells(X, cellids, degrees = TRUE)
}
\arguments{
\item{X}{a tracks object}
\item{cellids}{a vector of two indices specifying the tracks to get steps from.}
\item{degrees}{logical; should angle be returned in degrees instead of radians? (defaults to \code{TRUE})}
}
\value{
A single angle.
}
\description{
Compute the angle between the displacement vectors of two tracks in the dataset.
}
\examples{
## Find the angle between the tracks with ids 1 and 3
angleCells( TCells, c("1","3") )
}
\seealso{
\code{\link{distanceCells}} to compute the minimum distance between the tracks,
and \code{\link{AngleAnalysis}} for other methods to compute angles and distances.
}
|
017e6f9714ccc5d1798de357173e31729827fa90 | 068edb35f376dc3eb572b0763d62b800dfe172a9 | /batchHelperFunctions.R | e737bf7072dbe07c23926322a61ac652c7cb9555 | [] | no_license | wdwatkins/loadflexBatch | 8888cb0ba8d4852640d274a8ce651898175a57a7 | 9e9093b2d0ac2f0510685328fd6db771f9831b6e | refs/heads/master | 2021-01-11T23:14:01.846607 | 2017-02-14T20:45:07 | 2017-02-14T20:45:07 | 78,558,430 | 0 | 2 | null | 2017-01-23T20:52:45 | 2017-01-10T17:47:59 | R | UTF-8 | R | false | false | 3,804 | r | batchHelperFunctions.R | #read-in function
# make a data.frame describing the data files that exist in this directory for
# the given constituents
makeFileDF <- function(input.folder, constits, discharge.folder) {
#TODO:check that all constituent files have matching discharge records
#df of corresponding files
allFolders <- file.path(input.folder, c(constits, discharge.folder))
if(!all(dir.exists(allFolders))) {
stop("Input or constituent folder does not exist")
}
#get all constituent files
constitFiles <- list.files(file.path(input.folder, constits), full.names = TRUE)
constitNameOnly <- basename(constitFiles)
qFiles <- file.path(input.folder, dischargeFolder, constitNameOnly)
#TODO: warning if not matching dischargeFolder, will be skipped
#should deal with if a discharge file doesn't exist?
fileDF <- data.frame(constitFile = constitFiles, qFile=qFiles, stringsAsFactors = FALSE)
return(fileDF)
}
# recombine summaries into single dfs
summarizeCsvs <- function(csvType=c('inputs','annual','multiYear', 'modelMetrics'),
fileDF, outputFolder) {
csvType <- match.arg(csvType)
allCsvs <- bind_rows(lapply(seq_len(nrow(fileDF)), function(i) {
constitStation <- basename(file_path_sans_ext(fileDF$constitFile[i]))
constitName <- basename(dirname(fileDF$constitFile[i]))
csvFile <- file.path(outputFolder, constitName, csvType, paste0(constitStation, '.csv'))
tryCatch({
suppressWarnings(read.csv(csvFile, header=TRUE, stringsAsFactors=FALSE))
}, error=function(e) {
message(paste0('could not read ', csvFile), call.=FALSE)
NULL
})
}))
#write separate csvs for each constituent
constits <- unique(allCsvs$constituent)
sapply(constits, function(con) {
writeFile = file.path(outputFolder, con, paste0(con,"_", csvType,".csv"))
write.csv(filter(allCsvs, constituent == con),
file = writeFile,
row.names = FALSE)
message('the ', con, ' ', csvType, ' summary has been written to ', writeFile)
})
return(allCsvs)
}
#write plots to pdfs for a single site/constituent pair
#handles "tall" preds data frame of multiple models
writePDFreport <- function(file, intdat, estdat, allPreds, meta, inputCSV, annualCSV) {
#pdf(file, height = 11, width = 8.5)
#write csv data to pretty table
#TODO: add titles for different models
# input <- tableGrob(inputCSV)
# annual <- tableGrob(annualCSV)
# grid.arrange(input, annual, ncol= 1)
#plots
#are some of these going to be redundant with multiple models?
modelNames <- data.frame(short = c("rloadest", "interp", "composite"),
long = c("rloadest 5 parameter model",
"Interpolation Model",
"Composite rloadest and interpolation model"),
stringsAsFactors = FALSE)
for(m in unique(allPreds$model)) {
preds <- filter(allPreds, model == m)
modelLong <- modelNames$long[modelNames$short == m]
#page 1
par(omi = c(2,2,2,2))
plotEGRET("multiPlotDataOverview", intdat, estdat, preds, meta, preds.type='Conc')
title(paste("Input data:", getInfo(siteMeta, "site.id"), modelLong))
#page 2
par(mfrow=c(2,1))
plotEGRET("plotConcTimeDaily", intdat, estdat, preds, meta, mgp = c(4,1,0), preds.type='Conc')
title(paste("Predictions:", getInfo(siteMeta, "site.id"), modelLong), line = 6)
plotEGRET("plotFluxTimeDaily", intdat, estdat, preds, meta, mgp = c(4,1,0), preds.type='Conc')
#page 3
par(mfrow=c(1,1))
plotEGRET("fluxBiasMulti", intdat, estdat, preds, meta, moreTitle = modelLong, preds.type='Conc')
title(paste("Diagnostics:", getInfo(siteMeta, "site.id"), modelLong), line = 3)
}
#dev.off()
}
|
d77fe817025e79065e9584a9936d0e918bd88fcd | 94768e1a146bfd65c1a42fb289aa93d13645ace6 | /man/paramp.Rd | ae4d04d02bd5c01512138c1f5a6c2484bcad911c | [] | no_license | cran/normalp | 44694a8f4141f00682cae19b311c67e5cd67cdaa | f97ad4035397e9d345a1aedbfa0b80f6aec69cb2 | refs/heads/master | 2021-01-22T10:09:18.196723 | 2020-02-14T09:50:08 | 2020-02-14T09:50:08 | 17,697,943 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,294 | rd | paramp.Rd | \name{paramp}
\alias{paramp}
\alias{paramp.default}
\title{Estimation of location and scale parameters}
\description{The function \code{paramp} returns a list with five elements: arithmetic mean, \eqn{M_p}{Mp},
standard deviation, \eqn{S_p}{Sp}, and shape parameter \eqn{p}{p}, estimated on a sample.
}
\usage{
paramp(x, p)
}
\arguments{
\item{x}{A vector of observations.}
\item{p}{If specified, the algorithm uses this value for \code{p},
i.e. the algorithm does not use an estimate of \code{p}.}
}
\value{The estimation of \eqn{\mu}{%
Mp} and \eqn{p}{p} is based on an iterative method. To show differences between the
least squares method and the maximum likelihood method, it prints out also the mean and the standard deviation.
\item{Mean}{Arithmetic mean.}
\item{Mp}{The estimated value of the location parameter.}
\item{Sd}{Standard deviation.}
\item{Sp}{The estimated value of the scale parameter.}
\item{p}{The estimated value of the shape parameter.}
\item{iter}{If its value is 1, we have had problems on convergence.}
}
\references{Mineo, A.M. (1996) \emph{La migliore combinazione delle osservazioni: curve normali di ordine p e
stimatori di norma Lp}. PhD thesis.
}
\examples{
x<-rnormp(1000,2,3,4.2)
paramp(x)
}
\keyword{univar}
\author{Angelo M. Mineo}
|
ae0b79dd39b4af608d6c39153dac90f148c779be | 8d1d227b70e235723d75e023345b3902cbd5e25f | /script/reads_stat_by_fastqcr.R | 9ffe19cdcc03c16bf74dc50b385ac957e909ca32 | [] | no_license | bioxfu/RNA-Seq | 30abf3032a0854768c198da7fab52bd7391947d1 | d49f93bcac3ff09707eee47382ceecf71d37d9c8 | refs/heads/master | 2021-06-02T02:37:33.381359 | 2019-12-02T04:15:16 | 2019-12-02T04:15:16 | 109,210,286 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 415 | r | reads_stat_by_fastqcr.R | library(fastqcr)
library(magrittr)
qc_raw <- qc_aggregate('fastqc/raw') %>% qc_stats()
qc_clean <- qc_aggregate('fastqc/clean') %>% qc_stats()
qc <- cbind(qc_raw, qc_clean)
qc <- qc[, c(1,4,9)]
colnames(qc) <- c('sample', 'tot.seq', 'tot.clean.seq')
qc$percentage <- round(as.numeric(qc$tot.clean.seq) / as.numeric(qc$tot.seq) * 100, 1)
write.table(qc, 'stat/fastqc_stat.tsv', sep='\t', row.names = F, quote=F)
|
e3e27e3291eae87a7f160334a81f004615fa4de0 | 359724449f3606b892e2f973e03d0adddfafdc6e | /src/test_eu1.R | 69842463f979f3639a404ea1c55e511853a343d2 | [] | no_license | Christinepeng/USPopularMusicConcertMarketDemandPrediction | f91beba0a7694e6bc97872a1df2711f51c693bdd | 3df144bb2fd1da8a015b62f07577dfa7ea5a696a | refs/heads/master | 2020-07-21T00:35:30.285188 | 2019-09-06T09:04:32 | 2019-09-06T09:04:32 | 206,731,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,789 | r | test_eu1.R | library(xlsx)
df <- read.xlsx('../dat/Europe.xlsx', 1)
df$state_ = factor(df$state)
model = lm(ave_price_USD~genre_Alternative+genre_Blues+genre_Classical+genre_Comedy+genre_Country+genre_Dance+genre_Folk+genre_HipHop_Rap+genre_Jazz+genre_Latin+genre_Pop+genre_RnB_Soul+genre_Rock_Metal+genre_Other+state_+genre_Alternative:state_+genre_Blues:state_+genre_Classical:state_+genre_Comedy:state_+genre_Country:state_+genre_Dance:state_+genre_Folk:state_+genre_HipHop_Rap:state_+genre_Jazz:state_+genre_Latin:state_+genre_Pop:state_+genre_RnB_Soul:state_+genre_Rock_Metal:state_+genre_Other:state_+genre_Alternative:sold_out_80+genre_Blues:sold_out_80+genre_Classical:sold_out_80+genre_Comedy:sold_out_80+genre_Country:sold_out_80+genre_Dance:sold_out_80+genre_Folk:sold_out_80+genre_HipHop_Rap:sold_out_80+genre_Jazz:sold_out_80+genre_Latin:sold_out_80+genre_Pop:sold_out_80+genre_RnB_Soul:sold_out_80+genre_Rock_Metal:sold_out_80+genre_Other:sold_out_80+state_:sold_out_80+genre_Alternative:state_:sold_out_80+genre_Blues:state_:sold_out_80+genre_Classical:state_:sold_out_80+genre_Comedy:state_:sold_out_80+genre_Country:state_:sold_out_80+genre_Dance:state_:sold_out_80+genre_Folk:state_:sold_out_80+genre_HipHop_Rap:state_:sold_out_80+genre_Jazz:state_:sold_out_80+genre_Latin:state_:sold_out_80+genre_Pop:state_:sold_out_80+genre_RnB_Soul:state_:sold_out_80+genre_Rock_Metal:state_:sold_out_80+genre_Other:state_:sold_out_80+sold_out_80:capacity, data=df)
x <- summary(model)
arr = c(0, 0.001, 0.01, 0.05)
for (idx in 1:(length(arr)-1)) {
lb = arr[idx]
rb = arr[idx+1]
to_select <- lb < x$coeff[-1,4] & x$coeff[-1,4] <= rb
relevant <- names(to_select)[to_select==TRUE]
print(c(lb, rb))
print(x$coeff[-1,1][relevant])
}
print(x$r.squared)
print(x$adj.r.squared)
|
187e31e90b83fe1858b2c785375178072dc059b7 | b4dbbaecfca03144b1a01b93f8a1d3d92a2446d3 | /Data Visualization with ggplot2/sixth-ggplot-second.R | f845a542602cee9c97ff50f1428d4a37c16dca43 | [] | no_license | Nischal2015/R-for-Data-Science | b9785e9c4fb6452ce291fd8daa0564d97036ee9c | b90ac2bd9048bfa6b7399f3b139d88f8f5f82d50 | refs/heads/master | 2022-11-20T21:49:08.441774 | 2022-11-17T04:27:00 | 2022-11-17T04:27:00 | 264,099,070 | 1 | 0 | null | 2022-11-17T04:27:01 | 2020-05-15T04:53:34 | Jupyter Notebook | UTF-8 | R | false | false | 164 | r | sixth-ggplot-second.R | library(ggplot2)
#2nd
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth(mapping = aes(group = drv), size = 1.5 ,se = F) |
368e862f0136bf67199f3474dda1e45439cf4888 | 50819e94eaf31ca6360a1038b9079fc60abe738f | /fgwas/R/fg.report.r | b9f4610887b815cda3ec86e2e4367a34ed3127c9 | [] | no_license | wzhy2000/R | a2176e5ff2260fed0213821406bea64ecd974866 | 2ce4d03e1ab8cffe2b59d8e7fee24e7a13948b88 | refs/heads/master | 2021-05-01T11:49:17.856170 | 2017-12-11T04:44:15 | 2017-12-11T04:44:15 | 23,846,343 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,266 | r | fg.report.r | fg.report<-function( fg.dat, fg.scan=NULL, fg.perm=NULL, file.pdf=NULL )
{
check_FG_DAT( fg.dat );
if( !missing(fg.scan) ) check_FG_SCAN( fg.scan );
if( !missing(fg.perm) ) check_FG_PERM( fg.perm );
if( !missing(file.pdf) ) check_file_pdf( file.pdf );
if (is.null(file.pdf))
file.pdf <- paste(fg.dat$file.pheY.csv, ".PDF", sep="");
Report.new( file.pdf, options );
Report.title( "Functional GWAS Report", "fGWAS", "http://ccb.bjfu.edu.cn/" );
Report.par( "dat.file", fg.dat$file.pheY.csv);
Report.AddHeadline( "Data Summary", level=1 );
proc_report_dat(fg.dat);
if( !is.null( fg.scan) )
{
Report.AddHeadline( "SNP Scan", level=1 );
proc_report_snpscan(fg.dat, fg.scan, fg.perm) ;
}
if( !is.null( fg.perm) )
{
Report.AddHeadline( "SNP Permutation", level=1 );
proc_report_perm(fg.dat, fg.perm) ;
}
if( !is.null( fg.perm) && !is.null( fg.scan) )
{
r.sig <- fg_detect_sig(fg.dat, fg.scan, fg.perm)
Report.AddHeadline( "Significant SNP", level=1 );
proc_report_sig( fg.dat, r.sig$sig.05, r.sig$sig.01 ) ;
}
if( is.null( fg.perm) && !is.null( fg.scan) )
{
Report.AddHeadline( "Top SNP", level=1 );
proc_report_topn(fg.dat, fg.scan) ;
}
Report.Output( file.pdf );
}
|
a9f046af02f6341f065c70b48c2c9133e28ac83f | 3c2ca82db20efda37f9161959a8fc85964ae7e85 | /day14/day14_1.R | 29bfcc8da337a9aa69f1717c78abad2e759696d4 | [] | no_license | quoctran98/Advent-of-Code-2020 | 036ab16b6767f76f36b205d38808a704d1faaf8b | 7616ad230bf089cfef2959743d4c2e762f057fc2 | refs/heads/master | 2023-01-29T22:55:58.980251 | 2020-12-14T06:45:57 | 2020-12-14T06:45:57 | 318,958,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,492 | r | day14_1.R | library(stringr)
input <- readLines("day14/day14_input.txt")
# using this from https://stackoverflow.com/questions/12892348/convert-binary-string-to-binary-or-decimal-value
# the base "strtoi" function returns NA sometimes with my input (BUT ONLY SOMETIMES?)
base2decimal = function(base_number, base = 2) {
split_base = strsplit(as.character(base_number), split = "")
return(sapply(split_base, function(x) sum(as.numeric(x) * base^(rev(seq_along(x) - 1)))))
}
writeMasked <- function (value, mask) {
maskedBin <- sapply(str_split(paste(rev(intToBits(value))), ""), `[[`, 2)
# ^ thanks https://stackoverflow.com/questions/6614283/converting-decimal-to-binary-in-r
while (length(maskedBin) < 36) {maskedBin <- append(maskedBin, "0", after = 0)}
maskedBin[mask != "X"] <- mask[mask != "X"]
return(base2decimal(paste(maskedBin, collapse = ""), base = 2))
}
# memory is array of dec int; mask is vector; val is dec int
mask <- rep.int("X", 36)
memory <- NULL # there's no 0 address in my input, so we're good :)
for (command in input) {
if (str_detect(command, "mask")) {
mask <- str_split(str_split(command," = ")[[1]][2], "")[[1]]
} else if (str_detect(command, "mem")) {
address <- as.numeric(str_remove(str_sub(str_split(command," = ")[[1]][1], 5), "]"))
value <- as.numeric(str_split(command," = ")[[1]][2])
memory[address] <- writeMasked(value, mask)
} else {
print("uh oh")
}
}
print(format(sum(memory, na.rm = T), scientific = F))
|
ae92ba1b5f8fc384a76ec26fd416277961f6d487 | b33735d157848984bc57288b41ce2bded79e2710 | /R/Parametre.R | 1ea2dd7d304b72cfef44a7a688affb8610c6200e | [] | no_license | bachirtadde/CInLPN | 9a00a0cabf5af34ba8403b17d81763db6067b990 | bb51492aa116635c4bf70afd29de7bdd58de15a5 | refs/heads/master | 2023-06-30T15:21:04.684650 | 2023-06-20T14:31:49 | 2023-06-20T14:31:49 | 163,929,733 | 2 | 1 | null | 2021-03-28T14:58:25 | 2019-01-03T06:00:38 | R | UTF-8 | R | false | false | 10,552 | r | Parametre.R | #' Function to initialize parameters kin multivariate CInLPN model
#'
#' @param K number of the markers
#' @param nD number of the latent processes
#' @param vec_ncol_x0n vector of number of columns of model.matrix for baseline's submodel
#' @param n_col_x number of overall columns of model.matrix for change's submodel
#' @param nb_RE number of random effects
#' @param stochErr indicates if the structural model contain stochastique components
#' @param indexparaFixeUser position of parameters to be constrained
#' @param paraFixeUser values associated to the index of parameters to be constrained
#' @param L number of columns of model.matrix for temporal infuences model
#' @param paras.ini initial values for parameters, default values is NULL
#' @param ncolMod.MatrixY vector of number of columns of model.matrix for transformation submodel
#'
#' @return a list
#'
#'
Parametre <- function(K, nD, vec_ncol_x0n, n_col_x, nb_RE, stochErr=FALSE, indexparaFixeUser =NULL,
paraFixeUser=NULL, L = 1, paras.ini, ncolMod.MatrixY){
cl <- match.call()
# require(MASS)
#initialisation des parametres
# L = number of parameters for each coefficient a of matrix A
# K = number of outcomes
#======================================================================================
nb_paraD = nb_RE*(nb_RE+1)/2
indexparaFixeForIden <- NULL
# if user not specified initial parameters
if(is.null(paras.ini)){
p <- 0 # position in the initialize parameters
cpt1 <- 0 # compteur pour tous les paras
cpt2<-0 # compteur de boucle
#alpha_mu0
alpha_mu0 <-rep(.1,sum(vec_ncol_x0n))
p <- p+ sum(vec_ncol_x0n)
index_paraFixe_mu0_constraint <- NULL
for(n in 1:nD){
alpha_mu0[(cpt2+1)] <- 0
cpt2 <- cpt2 + vec_ncol_x0n[n]
cpt1 <- cpt1 + vec_ncol_x0n[n]
}
#alpha_mu
alpha_mu <-rep(.3,n_col_x)
p <- p+n_col_x
cpt1 <- cpt1 + n_col_x
alpha_D <-rep(.1,nb_paraD)
to_nrow <- nb_RE
i_alpha_D <- 0
index_paraFixeDconstraint <- NULL
for(n in 1:nD){
alpha_D[i_alpha_D+1] <- 1
i_alpha_D <- i_alpha_D + to_nrow
cpt1 <- cpt1 + to_nrow
to_nrow <- to_nrow -1
}
p <- p+nb_paraD
# para of transition matrix vec_alpha_ij
vec_alpha_ij <- rep(0.4, L*nD*nD)
cpt1 <- cpt1 + L*nD*nD
p <- p + L*nD*nD
#paraB
paraB <- NULL
if(stochErr==TRUE){
paraB <- rep(.15,nD)
cpt1 <- cpt1 + nD
p <- p + nD
}
#paraSig
paraSig <- rep(.5, K)
cpt1 <- cpt1 + K
p <- p + K
### parameters of the link function
ParaTransformY <- rep(1, ncolMod.MatrixY)
cpt1 <- cpt1 + ncolMod.MatrixY
p <- p + ncolMod.MatrixY
}
# if user specified initial parameters
if(!is.null(paras.ini)){
p <- 0 # position in the initialize parameters
cpt1 <-0 # counter for parameterd
cpt2<-0 # loop counter
#alpha_mu0
alpha_mu0 <- paras.ini[(p+1):(p+sum(vec_ncol_x0n))]
p <- p+ sum(vec_ncol_x0n)
index_paraFixe_mu0_constraint <-NULL
for(n in 1:nD){
# CPL : not necessary
#alpha_mu0[(cpt2+1)] <- 0
cpt2 <- cpt2 + vec_ncol_x0n[n]
cpt1 <- cpt1 + vec_ncol_x0n[n]
}
paraFixe_mu0_constraint <- rep(1,nD)
#alpha_mu
alpha_mu <- paras.ini[(p+1):(p+n_col_x)]
p <- p+n_col_x
cpt1 <- cpt1 + n_col_x
#alpha_D parameters for cholesky of all random effects
alpha_D <- paras.ini[(p+1):(p+nb_paraD)]
to_nrow <- nb_RE
i_alpha_D <- 0
index_paraFixeDconstraint <- NULL
for(n in 1:nD){
# CPL : not necessary
#alpha_D[i_alpha_D+1] <- 1
i_alpha_D <- i_alpha_D + to_nrow
cpt1 <- cpt1 + to_nrow
to_nrow <- to_nrow -1
}
p <- p+nb_paraD
paraFixeDconstraint <- rep(1,nD)
# para of transition matrix vec_alpha_ij
vec_alpha_ij <- paras.ini[(p+1):(p + L*nD*nD)]
p <- p + L*nD*nD
cpt1 <- cpt1 + L*nD*nD
# paraB
paraB <- NULL
if(stochErr==TRUE){
paraB <- paras.ini[(p+1):(p + nD)]
p <- p + nD
cpt1 <- cpt1 + nD
}
#paraSig
paraSig <- paras.ini[(p+1):(p + K)]
p <- p + K
cpt1 <- cpt1 + K
### para of link function
ParaTransformY <- paras.ini[(p+1):(p + ncolMod.MatrixY)]
cpt1 <- cpt1 + ncolMod.MatrixY
p <- p + ncolMod.MatrixY
}
#final vector of initial parameters
paras <- c(alpha_mu0, alpha_mu, alpha_D, vec_alpha_ij, paraB, paraSig, ParaTransformY )
#initialisation
# paraOpt <- paras
posfix <- rep(0,length(paras)) # 0 = non fixe 1 = fixe # initialisation
# constraining of parameters==============
indexFixe <- indexparaFixeForIden
if(!is.null(indexparaFixeUser)){
if(length(indexparaFixeUser) != length(paraFixeUser)){
stop("The length of paraFixe does not correspond with the length of indexparaFixe")
}
indexFixe <- sort(unique(c(indexFixe,indexparaFixeUser)))
}
paraFixe <- rep(NA, length(posfix))
if(!is.null(paraFixeUser)){
paraFixe[c(indexparaFixeUser)]<- paraFixeUser
}
paraFixe[index_paraFixe_mu0_constraint]<- rep(0,K)
paraFixe[index_paraFixeDconstraint]<- rep(1,K)
if(sum(!is.na(paraFixe))==0){
paraFixe = -1
paraOpt <- paras
}else{
paraFixe <- paraFixe[!is.na(paraFixe)]
posfix[indexFixe] <- 1 # fixiation des paras d'indexes dans indexparaFixe
paras[indexFixe] <- paraFixe
paraOpt <- paras[-indexFixe]
}
return(list(para = paras, paraOpt = paraOpt, paraFixe = paraFixe, posfix = posfix, L = L))
}
#' Initialisation of parameters
#'
#' @param data indicates the data frame containing all the variables for estimating the model
#' @param outcomes names of the outcomes
#' @param mapped.to.LP indicates which outcome measured which latent process, it is a mapping table between
#' outcomes and latents processes
#' @param fixed_X0.models fixed effects in the submodel for the baseline level of processes
#' @param fixed_DeltaX.models a two-sided linear formula object for specifying the response outcomes (one the left part of ~ symbol)
#' and the covariates with fixed-effects (on the right part of ~ symbol)
#' @param randoms_DeltaX.models random effects in the submodel for change over time of latent processes
#' @param randoms_X0.models random effects in the submodel for the baseline level of processes
#' @param nb_RE number of random effects
#' @param mod_trans.model model for elements of the temporal transition matrix, which captures
#' the temporal influences between latent processes
#' @param subject indicates the name of the covariate representing the grouping structure
#' @param Time indicates the name of the covariate representing the time
#' @param link indicates link used to transform outcome
#' @param knots indicates position of knots used to transform outcomes
#' @param DeltaT indicates the discretization step
#' @param maxiter maximum iteration
#' @param epsa threshold for the convergence criterion on the parameters, default value is 1.e-4
#' @param epsb threshold for the convergence criterion on the likelihood, default value is 1.e-4
#' @param epsd threshold for the convergence criterion on the derivatives, default value is 1.e-3
#' @param nproc number of processor to be used for running this package
#' @param print.info to print information during the liklihood optimization, default value is FALSE
#'
#' @return a list
f_paras.ini <- function(data, outcomes, mapped.to.LP, fixed_X0.models, fixed_DeltaX.models, randoms_DeltaX.models,
randoms_X0.models, nb_RE, mod_trans.model, subject,
Time, link, knots, DeltaT, maxiter = 25, epsa = .0001, epsb = .0001,
epsd = .0001, nproc = 1, print.info = TRUE)
{
cl <- match.call()
K <- length(unlist(outcomes))
nD <- length(fixed_X0.models)
paras.ini <- NULL
para.fixed_X0 <- NULL
para.fixed_DeltaX <- NULL
para.RE <- NULL
para.trans <- NULL
para.Sig <- NULL
ParaTransformY <- NULL
paras.ini <- list()
for(k in 1 : K){
data <- data[!is.na(data[,outcomes[k]]),]
fixed_DeltaX <- as.formula(paste(outcomes[k],"~",fixed_DeltaX.models[mapped.to.LP[k]], sep=""))
fixed_X0 <- as.formula(paste("~", fixed_X0.models[mapped.to.LP[k]], sep=""))
n_col_x_k <- ncol(model.matrix(object = fixed_DeltaX,data = data ))
n_col_x0_k <- ncol(model.matrix(object = fixed_X0,data = data ))
mod_randoms_DeltaX <- as.formula(paste("~", randoms_DeltaX.models[mapped.to.LP[k]], sep=""))
mod_trans <- as.formula(paste("~", mod_trans.model, sep=" "))
indexparaFixeUser <- c(1,(n_col_x0_k+n_col_x_k+1))
paraFixeUser <- c(0,1)
structural.model <- list(fixed.LP0 = fixed_X0,
fixed.DeltaLP = fixed_DeltaX,
random.DeltaLP = mod_randoms_DeltaX,
trans.matrix = mod_trans,
delta.time = DeltaT)
measurement.model <- list(link.functions = list(links = link[k], knots = knots[k]))
parameters = list(paras.ini = NULL, Fixed.para.index = indexparaFixeUser, Fixed.para.values = paraFixeUser)
option = list(nproc = nproc, print.info = print.info, maxiter = maxiter)
mod <- CInLPN(structural.model = structural.model, measurement.model = measurement.model, parameters = parameters,
option = option, Time = Time, subject = subject, data = data,TimeDiscretization = FALSE)
L <- ncol(mod$modA_mat)
## compute number of paramter per component
coefficients <- as.vector(mod$coefficients)
i1 <- 0
if(k==1 | (k>1 && (mapped.to.LP[k-1]!= mapped.to.LP[k]))){
para.fixed_X0 <- c(para.fixed_X0, coefficients[(i1+1):(i1+n_col_x0_k)])
i1 <- i1+n_col_x0_k
para.fixed_DeltaX <- c(para.fixed_DeltaX, coefficients[(i1+1):(i1+n_col_x_k)])
i1 <- i1 + n_col_x_k + mod$nb_paraD
}
else{
i1 <- i1 + n_col_x0_k + mod$nb_paraD
}
if(k==1){
para.trans <- c(para.trans, coefficients[(i1+1):(i1+L)])
}
if(k!=1 && (mapped.to.LP[k-1]!= mapped.to.LP[k]) ){ #
para.trans <- c(para.trans, rep(0,nD*L), coefficients[(i1+1):(i1+L)])
}
i1 <- i1+L
para.Sig <- c(para.Sig, mod$b[(i1+1):(i1+1)])
i1 <- i1+1
ParaTransformY <- c(ParaTransformY, coefficients[(i1+1):(i1+mod$length_para_trY)])
i1 <- i1+mod$length_para_trY
}
para.RE <- rep(0.1, (nb_RE*(nb_RE+1)/2))
paras.ini <- c(para.fixed_X0, para.fixed_DeltaX, para.RE, para.trans, para.Sig, ParaTransformY)
return(paras.ini)
}
|
2463b202a3be859b5ca26b10dc9dec6d9c88b1f7 | eaa0b720232f33314a32206393c89892b3c9e950 | /create_cluster.R | f85b5d9c63592c08f37c09c48e746c8958f3914c | [] | no_license | juliancabezas/random_functions | 8ccf218a30f2f18469d4e47fa06783617a8464fb | a8c55c477dd603556d102d2c81eff2465eabdfbe | refs/heads/master | 2021-07-13T00:30:32.457297 | 2020-06-07T12:45:58 | 2020-06-07T12:45:58 | 154,214,345 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,653 | r | create_cluster.R | # Function to create a cluster for paralelizing in Windows machines
# It creates a cluster with n nodes copyng THE COMPLETE environment, so it uses A LOT of RAM
# You can use this function as input for the parallel package functions (e.g parLapply)
# Use with precaution, remember to destroy the cluster after using it
# Based on parallelsugar package https://github.com/nathanvan/parallelsugar
create_cluster <- function(nnodes, libroute = NULL) {
# nnodes: Number of nodes
# libroute: Provided library route (different of the default library)
tryCatch({
cl <- parallel::makeCluster(nnodes) # Create an empty cluster with n nodes
loaded.package.names <- c( # Find the loaded packages in the session
sessionInfo()$basePkgs, # base packages
names(sessionInfo()$otherPkgs)
) # aditional loaded packages
parallel::clusterExport(cl, # Send the packages and the complete environment to the cluster
"loaded.package.names", # loaded packages
envir = environment()
) # "Environment"
## Charge the packages in all nodes
if (is.null(libroute)) { # Use default library location
parallel::parLapply(cl, 1:length(cl), function(xx) {
lapply(loaded.package.names, function(yy) {
library(yy, character.only = TRUE)
})
})
} else {
parallel::parLapply(cl, 1:length(cl), function(xx) { # If we provide a library location use it (useful when working in remote machines)
lapply(loaded.package.names, function(yy) {
library(yy, character.only = TRUE, lib.loc = libroute)
})
})
}
})
return(cl) # The result is the cluster
}
|
3c6914c801e8ceff0f789eb136b652005bec9682 | a0201022b3185ea7f295c6902950764fa8052418 | /Scripts/22_assemble_partner_data.R | 56468624037e18ed87becca6857f941d33ca0323 | [] | no_license | USAID-OHA-SI/pump_up_the_jam | 78d117ca04e0217f3cf73a5072de9edbb322b1ec | edb897c073e678f554888949950fd71f56f7e170 | refs/heads/master | 2023-02-07T02:14:37.601194 | 2023-02-01T02:03:50 | 2023-02-01T02:03:50 | 246,078,974 | 0 | 0 | null | 2020-06-26T18:34:12 | 2020-03-09T15:55:57 | R | UTF-8 | R | false | false | 7,987 | r | 22_assemble_partner_data.R | ## PROJECT: Pump up the jam
## AUTHOR: achafetz | USAID
## LICENSE: MIT
## PURPOSE: assemble MER and Partner HFR data
## DATE: 2020-05-06
## UPDATED: 2020-05-09
# DEPENDENCIES ------------------------------------------------------------
library(tidyverse)
library(vroom)
library(lubridate)
library(Wavelength)
library(tidylog, warn.conflicts = FALSE)
# GLOBAL VARIABLES --------------------------------------------------------
start_date <- "2020-01-20"
weeks <- 12
datim_folder <- "Data"
processed_folder <- "Dataout/PartnerProcessed"
out_folder <- "Dataout"
# APPEND MER RESULTS/TARGETS ----------------------------------------------
#unzip Google Drive download of all MER FY20 data
list.files(datim_folder, "DATIM", full.names = TRUE) %>% unzip(exdir = datim_folder)
#store files paths as vecotre for import
files <- list.files(datim_folder, "HFR_FY20Q1.*csv", full.names = TRUE)
#import and bind together
df_datim <- map_dfr(.x = files,
.f = ~readr::read_csv(.x))
#fix TX_MMD "targets" --> set to Q1 results
df_datim_mmd <- df_datim %>%
filter(indicator == "TX_CURR") %>%
mutate(indicator = "TX_MMD",
mer_targets = mer_results,
mer_results = NA)
df_datim <- bind_rows(df_datim, df_datim_mmd)
rm(df_datim_mmd)
#aggregate to level of detail desired
df_datim <- df_datim %>%
group_by(orgunituid, mech_code, fy, indicator) %>%
summarise_at(vars(starts_with("mer")), sum, na.rm = TRUE) %>%
ungroup()
#identify dates to repeat df_datim
dates <- as_date(start_date) %>% seq(by = 7, length.out = weeks)
#repeat df_datim so there is a full target/result set for each week
df_datim_rpt <- map_dfr(.x = dates,
.f = ~mutate(df_datim, date = .x))
#change mech code to character
df_datim_rpt <- mutate(df_datim_rpt, mech_code = as.character(mech_code))
#add otherdisaggregate for MMD
df_datim_rpt <- mutate(df_datim_rpt, otherdisaggregate = NA_character_)
#remove files (just keep zipped folder)
unlink(files)
#remove extra objects
rm(df_datim, dates, files)
# IMPORT + AGGREGATE HFR --------------------------------------------------
#import partner data
df_hfr_ptnr <- list.files(processed_folder, full.names = TRUE) %>%
hfr_read()
#limit partner file to just HFR indicators
df_hfr_ptnr <- df_hfr_ptnr %>%
filter(indicator %in% c("HTS_TST", "HTS_TST_POS",
"TX_NEW", "PrEP_NEW",
"VMMC_CIRC", "TX_CURR",
"TX_MMD"))
#identify partner to limit country file
partner_mechs <- unique(df_hfr_ptnr$mech_code)
#cleanup capture both under/over 3mo MMD as well as total
df_hfr_ptnr <- df_hfr_ptnr %>%
mutate(otherdisaggregate = case_when(otherdisaggregate == "<3 months" ~ "<3 months",
otherdisaggregate %in% c("3-5 months", "6 months or more") ~ "+3 months"))
mmd_grp <- setdiff(names(df_hfr_ptnr), c("otherdisaggregate", "val"))
df_mmd_ptnr <- df_hfr_ptnr %>%
filter(!is.na(otherdisaggregate)) %>%
group_by_at(vars(all_of(mmd_grp))) %>%
summarise_at(vars(val), sum, na.rm = TRUE) %>%
ungroup()
df_hfr_ptnr <- bind_rows(df_hfr_ptnr, df_mmd_ptnr)
#grouping vars
grp_vars <- c("orgunituid", "mech_code", "fy", "date", "indicator", "otherdisaggregate")
#aggregate after removing extra
df_hfr_ptnr <- df_hfr_ptnr %>%
group_by_at(vars(all_of(grp_vars))) %>%
summarise(hfr_results_ptnr = sum(val, na.rm = TRUE)) %>%
ungroup()
#import country data
df_hfr_ctry <- file.path(datim_folder, "HFR_2020.07_Tableau_20200507.zip") %>%
hfr_read()
#filter country submitted data
df_hfr_ctry <- df_hfr_ctry %>%
filter(mech_code %in% partner_mechs)
#cleanup other disaggregate
df_hfr_ctry <- df_hfr_ctry %>%
mutate(otherdisaggregate = case_when(otherdisaggregate %in% c("<3 months", "<3months") ~ "<3 months",
otherdisaggregate %in% c("3-5 months", "6months","6 months or more") ~ "+3 months"))
mmd_grp <- setdiff(names(df_hfr_ctry), c("otherdisaggregate", "val"))
df_mmd_ctry <- df_hfr_ctry %>%
filter(!is.na(otherdisaggregate)) %>%
group_by_at(vars(all_of(mmd_grp))) %>%
summarise_at(vars(val), sum, na.rm = TRUE) %>%
ungroup()
df_hfr_ctry <- bind_rows(df_hfr_ctry, df_mmd_ctry)
#aggregate after removing extra
df_hfr_ctry <- df_hfr_ctry %>%
group_by_at(vars(all_of(grp_vars))) %>%
summarise(hfr_results_ctry = sum(val, na.rm = TRUE)) %>%
ungroup()
#remove SQL export NAs
df_hfr_ctry <- df_hfr_ctry %>%
mutate_if(is.character, ~na_if(., "\\N"))
# MERGE HFR + DATIM -------------------------------------------------------
#merge both HFR datasets
df_hfr <- df_hfr_ptnr %>%
full_join(df_hfr_ctry, by = grp_vars)
#merge
df_joint <- df_datim_rpt %>%
filter(mech_code %in% partner_mechs) %>%
full_join(df_hfr, by = grp_vars)
#make sure all have the HFR PD (missing for DATIM)
df_joint <- df_joint %>%
hfr_fix_date() %>%
hfr_assign_pds()
#arrange var order
df_joint <- df_joint %>%
select(fy, hfr_pd, date, everything())
# MERGE META --------------------------------------------------------------
#import hierarchy
df_hierarchy <- file.path(datim_folder, "HFR_FY20_GLOBAL_orghierarchy_20200306.csv") %>%
read_csv() %>%
select(-level)
#merge hierarchy onto joint file
df_joint <- left_join(df_joint, df_hierarchy, by = "orgunituid")
#import mechanism info
df_mech <- file.path(datim_folder, "HFR_FY20_GLOBAL_mechanisms_20200306.csv") %>%
read_csv(col_types = c(.default = "c")) %>%
select(-operatingunit)
#merge mech info onto joint file
df_joint <- left_join(df_joint, df_mech, by = "mech_code")
#arrange var order
df_joint <- df_joint %>%
select(operatingunit:facility, orgunit, orgunituid, latitude, longitude,
fundingagency, mech_code, mech_name, primepartner,
fy, hfr_pd, date,
indicator, otherdisaggregate,
hfr_results_ptnr, hfr_results_ctry, mer_results, mer_targets)
#fix orphan OUs b/c of inaccurate/missing UIDS
df_mech_ou <- read_csv("https://www.datim.org/api/sqlViews/fgUtV6e9YIX/data.csv")
df_joint <- df_mech_ou %>%
select(mech_code = code, ou) %>%
left_join(df_joint, ., by = "mech_code")
df_joint <- df_joint %>%
mutate(operatingunit = ifelse(is.na(operatingunit), ou, operatingunit)) %>%
select(-ou)
#map on iso codes (from Wavelength)
df_joint <- df_joint %>%
left_join(iso_map, by = "operatingunit") %>%
select(-regional) %>%
rename(iso_ou = iso)
df_joint <- df_joint %>%
left_join(iso_map, by = c("countryname" = "operatingunit")) %>%
select(-regional) %>%
rename(iso_ctry = iso)
df_joint <- df_joint %>%
select(operatingunit, countryname, iso_ou, iso_ctry, everything())
#add in submission partner info
df_ptnr_mechs <- read_csv("Dataout/HFR_CentralPartnerMechs.csv",
col_types = c(.default = "c"))
df_joint <- df_ptnr_mechs %>%
distinct(sub_partner = partner, mech_code) %>%
left_join(df_joint, ., by = "mech_code")
# EXPORT ------------------------------------------------------------------
#store file name
filename <- paste0("HFR_PARTNER_FY20PD0507_", format(Sys.Date(), "%Y%m%d"), ".csv")
#save
write_csv(df_joint, file.path(out_folder, filename), na = "")
|
cbdc3d097dea56b61bd8e0dfe5d7f6848b1d24eb | c75aa783354f4a3c896f1aaff4da517de88bdc5d | /Interrupted Time Series Analysis.R | 4cdf423b8db16a3a635b109dbdf116979252adea | [] | no_license | KyriacosLeonidou/MSIN0114-DISSERTATION | 7c97ba9c176c04171f3f9ada624bea9ce1fcbbb4 | 06f20d1bda9851a773dbe47ca8cf99f6ccefe3e4 | refs/heads/main | 2023-06-29T18:00:37.150933 | 2021-08-01T12:16:59 | 2021-08-01T12:16:59 | 391,590,640 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,691 | r | Interrupted Time Series Analysis.R | #install.packages( "dplyr" )
#install.packages( "pander" )
#install.packages( "stargazer" )
#install.packages( "scales" )
#install.packages("stargazer")
#load the packages
pacman::p_load(data.table, ggplot2,psych,dplyr)
library( "scales" )
library( "stargazer" )
library( "dplyr" )
library( "pander" )
library( "Wats" )
library(stargazer)
# load the data
#data including the first intervention
li<- fread("C:/working/MAP PROJECT/DATA/li48894114 - 1st change.csv",
data.table = T,
stringsAsFactors = F)
#data including the second intervention
li2<- fread("C:/working/MAP PROJECT/DATA/li48894114 - 2nd change.csv",
data.table = T,
stringsAsFactors = F)
#plot the cumulative cpa
plot( li$T, li$Y,
bty="n", pch=19, col="gray",
ylim = c(0, 30), xlim=c(0,85),
xlab = "Time (days)",
ylab = "Cumulative CPA" )
# Line marking the interventions
#first intervention --> day 14
abline( v=14, col="firebrick", lty=3 )
text( 14, 30, "1st Trader Intervention", col="firebrick", cex=1, pos=4 )
abline(h=10.15, col="black", lty=3)
#second intervention --> day 42
abline( v=42, col="darkgreen", lty=3 )
text( 42, 30, "2nd Trader Intervention", col="darkgreen", cex=1, pos=4 )
abline(h=7.12, col="black", lty=3)
#add the last day --> day 82
abline( v=81, col="orange", lty=3 )
text(68, 25, "Last Day", col="orange", cex=1, pos=4 )
#abline(h=7.12, col="black", lty=3)
# Add the regression line
ts <- lm( li$Y ~ li$T + li$D + li$P, data=li )
lines( li$T, ts$fitted.values, col="steelblue", lwd=2 )
#run the regression and provide the regression summary
regli = lm ( Y ~ T + D + P, data=li)
stargazer( regli,
type = "html",
dep.var.labels = ("Cumulative CPA"),
column.labels = ("Model results"),
covariate.labels = c("Time", "Treatment",
"Time Since Treatment"),
omit.stat = "all",
digits = 2,
out="models.html")
#find the predicted value at day 15
data1 <- as.data.frame( cbind( T = 15, D = 1, P = 1 ))
y1 <- predict( regli, data1 )
# We plot our initial observations, the column Y in our dataset
plot( li$T,li$Y,
bty="n",
col = gray(0.5,0.5), pch=19,
xlim = c(1, 60),
ylim = c(0, 30),
xlab = "Time (days)",
ylab = "Cumulative CPA")
# We add a point showing the cumulative cpa at day 15
points( 15, y1, col = "dodgerblue4",
pch = 19, bg = "dodgerblue4", cex = 2 )
text( 15, y1, labels = "t = 15", pos = 4, cex = 1 )
# Line marking the intervention
abline( v=14, col="red", lty=2 )
#find the predicted value at day 19
data2 <- as.data.frame( cbind( T = 19, D = 1, P = 5 ))
y2 <- predict( regli, data2 )
# We plot our initial observations, the column Y in our dataset
plot( li$T,li$Y,
bty="n",
col = gray(0.5,0.5), pch=19,
xlim = c(1, 60),
ylim = c(0, 30),
xlab = "Time (days)",
ylab = "Cumulative CPA")
# We add a point showing the cumulative cpa at day 15
points(15, y1, col = "dodgerblue4", pch = 19, bg = "red", cex = 2)
# We add a new point showing the cumulative cpa at day 19
points(19, y2, col = "dodgerblue4", pch = 19, bg = "red", cex = 2)
# Label for our predicted outcome
text(15, y1, labels = "t = 15", pos = 4, cex = 1)
text(19, y2, labels = "t = 19", pos = 4, cex = 1)
# Line marking the interruption
abline( v=14, col="red", lty=2 )
#find the counterfactual value at day 19
data3 <- as.data.frame(cbind( T= 19, D = 0, P = 0)) # Data if the intervention does not occur
y3 <- predict(regli, data3)
# We plot our initial observations, the column Y in our dataset
plot( li$T,li$Y,
bty="n",
col = gray(0.5,0.5), pch=19,
xlim = c(1, 60),
ylim = c(0, 30),
xlab = "Time (days)",
ylab = "Cumulative CPA")
# We add a point showing the predicted cumulative cpa at day 19
points(19, y2, col = "dodgerblue4", pch = 19, bg = "red", cex = 2)
# We add a point indicating the counterfactual at day 19
points(19, y3, col = "darkorange2", pch = 19, bg = "red",cex = 2)
# Label for our predicted outcome
text(19, y2, labels = "Y at t = 19", pos = 4, cex = 1)
#Label for the counterfactual
text(19, y3, labels = "C at t = 19", pos = 4, cex = 1)
# Line marking the intervention
abline( v=13, col="red", lty=2 )
text( 13, 30, "1st Trader Intervention", col="firebrick", cex=1, pos=4 )
# Add the regression line
#ts <- lm( li$Y ~ li$T + li$D + li$p, data=li )
#lines( li$T, ts$fitted.values, col="steelblue", lwd=2 )
#find all predictedand counterfactual values at any point
pred1 <- predict(regli, li)
# To estimate all predicted values of Y, we just use our dataset
datanew <- as.data.frame(cbind(T = rep(1 : 81), D = rep(0), P = rep(0)))
# Create a new dataset where Treatment and Time Since Treatment are equal to 0 as the intervention did not occur.
pred2 <- predict(regli, datanew)
# Predict the counterfactuals
plot( li$T,li$Y,
bty="n",
col = gray(0.5,0.5), pch=19,
xlim = c(1, 60),
ylim = c(0, 30),
xlab = "Time (days)",
ylab = "Cumulative CPA")
#plot the lines
lines( rep(1:14), pred1[1:14], col="dodgerblue4", lwd = 3 )
lines( rep(14:81), pred1[14:81], col="dodgerblue4", lwd = 3 )
lines( rep(14:81), pred2[14:81], col="darkorange2", lwd = 3, lty = 5 )
#anottate the lines
text(0, 17, labels = "Predicted values", pos = 4, cex = 1, col = "dodgerblue4")
text(22, 27, labels = "Counterfactual values", pos = 4, cex = 1, col = "darkorange2")
# Line marking the interruption
abline( v=13, col="darkorange2", lty=2 )
text( 14, 30, "1st Trader Intervention", col="firebrick", cex=1, pos=4 )
#SECOND INTERVENTION
plot( li2$T, li2$Y,
bty="n", pch=19, col="gray",
ylim = c(0, 30), xlim=c(0,60),
xlab = "Time (days)",
ylab = "Cumulative CPA" )
# Line marking the interruption
abline( v=42, col="darkgreen", lty=3 )
text( 42, 30, "2nd Trader Intervention", col="darkgreen", cex=1, pos=4 )
#abline( v=42, col="darkgreen", lty=3 )
#text( 42, 30, "2nd Trader Intervention", col="darkgreen", cex=1, pos=4 )
# Add the regression line
ts2 <- lm( li2$Y ~ li2$T + li2$D + li2$P, data=li2 )
lines( li2$T, ts2$fitted.values, col="brown", lwd=2 )
#second regression equation
regli2 = lm ( Y ~ T + D + P, data=li2) # Our time series model
stargazer( regli2,
type = "html",
dep.var.labels = ("Cumulative CPA"),
column.labels = ("Model results"),
covariate.labels = c("Time", "Treatment",
"Time Since Treatment"),
omit.stat = "all",
digits = 2,
out="models.html")
# find the predicted value at day 43
data1 <- as.data.frame( cbind( T = 43, D = 1, P = 1 ))
y1 <- predict( regli2, data1 )
# We plot our initial observations, the column Y in our dataset
plot( li2$T,li2$Y,
bty="n",
col = gray(0.5,0.5), pch=19,
xlim = c(1, 60),
ylim = c(0, 30),
xlab = "Time (days)",
ylab = "Cumulative CPA")
# at the predicted value
points( 43, y1, col = "dodgerblue4",
pch = 19, bg = "dodgerblue4", cex = 2 )
text( 43, y1, labels = "t = 43", pos = 4, cex = 1 )
# Line marking the interruption
abline( v=42, col="red", lty=2 )
#find the predicted value at day 47
data2 <- as.data.frame( cbind( T = 47, D = 1, P = 5 )) # New data
y2 <- predict( regli2, data2 )
# We plot our initial observations, the column Y in our dataset
plot( li2$T,li2$Y,
bty="n",
col = gray(0.5,0.5), pch=19,
xlim = c(1, 60),
ylim = c(0, 30),
xlab = "Time (days)",
ylab = "Cumulative CPA")
# plot the predicted value at day 43
points(43, y1, col = "dodgerblue4", pch = 19, bg = "red", cex = 2)
# plot the predicted value at day 47
points(47, y2, col = "dodgerblue4", pch = 19, bg = "red", cex = 2)
# Label for our predicted outcome
text(43, y1, labels = "t = 43", pos = 4, cex = 1)
text(47, y2, labels = "t = 47", pos = 4, cex = 1)
# Line marking the interruption
abline( v=42, col="red", lty=2 )
#find the counterfactual value at day 47
data3 <- as.data.frame(cbind( T= 47, D = 0, P = 0)) # Data if the intervention does not occur
y3 <- predict(regli2, data3) #Counterfactual
# We plot our initial observations, the column Y in our dataset
plot( li2$T,li2$Y,
bty="n",
col = gray(0.5,0.5), pch=19,
xlim = c(1, 60),
ylim = c(0, 30),
xlab = "Time (days)",
ylab = "Cumulative CPA")
#plot the predicted value at day 47
points(47, y2, col = "dodgerblue4", pch = 19, bg = "red", cex = 2)
# plot the counterfactual value at day 47
points(47, y3, col = "darkorange2", pch = 19, bg = "red",cex = 2)
# Label for our predicted outcome
text(47, y2, labels = "Y at t = 19", pos = 4, cex = 1)
#Label for the counterfactual
text(19, y3, labels = "C at t = 19", pos = 4, cex = 1)
# Line marking the interruption
abline( v=42, col="darkgreen", lty=2 )
text(42, 30, "2nd Trader Intervention", col="darkgreen", cex=1, pos=4 )
# Add the regression line
#ts <- lm( li$Y ~ li$T + li$D + li$p, data=li )
#lines( li$T, ts$fitted.values, col="steelblue", lwd=2 )
#calculate predicted and counterfactual values at any point in time
pred1 <- predict(regli2, li2)
# To estimate all predicted values of Y, we just use our dataset
datanew <- as.data.frame(cbind(T = rep(1 : 81), D = rep(0), P = rep(0)))
# Create a new dataset where Treatment and Time Since Treatment are equal to 0 as the intervention did not occur.
pred2 <- predict(regli2, datanew)
# Predict the counterfactuals
plot( li2$T,li2$Y,
bty="n",
col = gray(0.5,0.5), pch=19,
xlim = c(1, 60),
ylim = c(0, 30),
xlab = "Time (days)",
ylab = "Cumulative CPA")
#plot the lines
lines( rep(1:42), pred1[1:42], col="brown", lwd = 3 )
lines( rep(42:81), pred1[42:81], col="brown", lwd = 3 )
lines( rep(42:81), pred2[42:81], col="darkorange2", lwd = 3, lty = 5 )
#anottated
text(0, 17, labels = "Predicted values", pos = 4, cex = 1, col = "brown")
text(43,2, labels = "Counterfactual values", pos = 4, cex = 1, col = "darkorange2")
# Line marking the interruption
abline( v=42, col="darkgreen", lty=2 )
text( 42, 30, "2nd Trader Intervention", col="darkgreen", cex=1, pos=4 )
|
e0aa0cf53d1e1b0ba245993b75d505da3ef1a38c | 49e6913808a44eeacf17f6f12b7eaa98a4500355 | /SamplingData.R | a496aa0bfda279510914d39e4da840da96509d15 | [] | no_license | grundc/DataScience_CapStone | 096628badb24e04878f832bd336acf9fc1fc4d4f | 9b62880c28cd046e7bfdbc2f864a84da3c1c3ece | refs/heads/master | 2021-01-11T23:41:36.928022 | 2017-09-04T09:54:10 | 2017-09-04T09:54:10 | 78,621,739 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,430 | r | SamplingData.R | # setwd("U://My Documents/GitHub/DataScience_CapStone") # ROCHE
# setwd("C:/Users/Grund/Documents/GitHub/DataScience_CapStone") # HOME
# --------------------------------------------------------------------------
# ?connections
# file(description = "", open = "", blocking = TRUE,
# encoding = getOption("encoding"), raw = FALSE,
# method = getOption("url.method", "default"))
#
# url(description, open = "", blocking = TRUE,
# encoding = getOption("encoding"),
# method = getOption("url.method", "default"))
#
# gzfile(description, open = "", encoding = getOption("encoding"),
# compression = 6)
#
# bzfile(description, open = "", encoding = getOption("encoding"),
# compression = 9)
#
# xzfile(description, open = "", encoding = getOption("encoding"),
# compression = 6)
#
# unz(description, filename, open = "", encoding = getOption("encoding"))
#
# pipe(description, open = "", encoding = getOption("encoding"))
#
# fifo(description, open = "", blocking = FALSE,
# encoding = getOption("encoding"))
#
# socketConnection(host = "localhost", port, server = FALSE,
# blocking = FALSE, open = "a+",
# encoding = getOption("encoding"),
# timeout = getOption("timeout"))
#
# open(con, ...)
# ## S3 method for class 'connection'
# open(con, open = "r", blocking = TRUE, ...)
#
# close(con, ...)
# ## S3 method for class 'connection'
# close(con, type = "rw", ...)
#
# flush(con)
#
# isOpen(con, rw = "")
# isIncomplete(con)
# ---------------------------------------------------------------------------------------
con <- file("data/en_US.twitter.txt", "r")
rows <- character()
count <- 0
for (row in readLines(con, 10000) )
{
#row <- readLines(con, 1)
if (rbinom(1,1, prob=0.1) == 1)
{
rows <- c(rows,row)
}
}
close(con)
write(rows,"data/sample_en_US_twitter.txt")
# ------------------------------------------
# Count amount rows
con <- file("data/en_US.twitter.txt", "r")
rows <- character()
count <- 0
for (row in readLines(con) )
{
count <- count + 1
}
close(con)
count
# ------------------------------------------
# ------------------------------------------
# Count amount rows
con <- file("data/en_US.twitter.txt", "r")
rows <- character()
count <- 0
for (row in readLines(con) )
{
count <- count + 1
}
close(con)
count
# ------------------------------------------
|
4ff37f35b8394b582a63769288857c8864a7430b | d9daae750182699f8b405d279e37ee872c7befeb | /my_first_r.R | 5df25fde13557eeefffb64be9a18c3b761b812cb | [] | no_license | shah1mohit/Intro_RRepo | 95ae7084942a0c7ea0278cc07a58a70370180e4e | 9fd2be4a6523d15d32f53cc7726631fc8c0973c4 | refs/heads/master | 2020-12-24T11:45:17.644998 | 2016-11-06T21:20:00 | 2016-11-06T21:20:00 | 73,020,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | my_first_r.R | # Creates a simple plot
x <- c(1,2,3)
y <- c(5,2,1)
plot (x,y)
|
a31630e10e45e797243869a5787b03f818fc29f1 | 6e96cb0b6bb88d41e62a8fba15e7d7bfe6e351ec | /write_metadata.R | e0342ad0605a16f331b5969b07a638d67c1b4cdd | [
"MIT"
] | permissive | SrivastavaLab/cardoso2008 | f5d467de2bb2f1b5c41bdd9ce8cfd98bdb8b73cb | d5050f1cc4c59cf7f5ed2f5b427cd715b6402c0c | refs/heads/master | 2021-01-12T11:07:37.452454 | 2016-11-22T13:47:59 | 2016-11-22T13:47:59 | 72,834,681 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 259 | r | write_metadata.R | ## collect column headers for metadata writing
dir("data", full.names = TRUE) %>%
set_names(basename(.)) %>%
map(read_csv) %>%
map_df(~ .x %>% names %>% data_frame(variable_name = .), .id = "dataset_name") %>%
write_csv("metadata_blank.csv")
|
54d75af5ab0796d3e0f410bc2cf25c2b1572e9bb | 4fe27c2b7392caf3f2c22947a8076b28f468e059 | /Functions/Old/SwimReportFunction.R | d63a473555e67135732ac861f127da5a09dda19c | [] | no_license | rmcdonnell/Daphnia-Project | 26c7f36a47fcb9dbaca9d6c2176acf73b0b7a81e | a8d675e28590666499b6134942b09a0f6bd5e266 | refs/heads/master | 2020-04-01T12:52:11.135160 | 2015-07-14T00:15:31 | 2015-07-14T00:15:31 | 37,035,092 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 6,118 | r | SwimReportFunction.R | # To use this function, you need to import a .csv file of the raw
# ImageJ data. To prepare the .csv file, you need to take the
# individual animal observation periods and collect them into a
# single .csv file. Then, depending on how you concatinate the
# collected data you take note of what the interval is between
# individual animal observations is and put that value into the
# DataInterval position of the function. For example, if you have
# two observation periods and two animals in the same drug
# concentration, you may have recorded, using ImageJ, the ID, Area,
# X, Y, and Slice. If you recorded this information, in this order,
# you have an interval of 5 since you have 5 categories of data.
# Only the Area and X data are examined here. The ID has no meaning
# here and the Y displacements are minimal and add very little to
# the force measurements being made. Slice only has importance when
# the time is needed for a time series analysis, which this script
# does not address. The output is an array of force measurements
# that can be compared at various drug concentrations. This script
# also removes the feeding oscilations (which is only important here
# because it skewes the mean force measuments).
SwimReport <- function(InputData,DataInterval,CalibrationData_DispVariable)
{
# A simple difference function taking in an array and outputs an
# array of differences (final - initial)
dx <- function(x)
{
y <- c(0,x[1:length(x)-1])
z <- c(0,x[2:length(x)])
out <- z-y
print("dx function used")
return(out)
}
zero <- function(zeroInput, zeroColumn)
{
# 640 is the width of the image. This conversion is done to
# correct the x-y coordinate system.
numbers <- 640 - data.matrix(zeroInput[zeroColumn])
# a 20% trimmed mean results in a better central fiber position
# while removing values that deviate from the last more than 20
# pixels will not be included.
central <- mean(numbers[abs(dx(numbers)) < 10], na.rm = TRUE,
trim = 0.2)
out <- as.vector(numbers - central, mode = "numeric")
print("zero function used")
return(out)
}
area_dev <- function(areaInput, areaColumn)
{
g <- data.matrix(areaInput[areaColumn])
z_area <- abs(g - mean(g, na.rm = TRUE)) / sd(g, na.rm=TRUE)
# 'z_area' uses the trimmed mean to find the absolute value of
# the z-score for the fiber's area values, the z-score is then
# used to remove any valuses that deviate too far from the
# area's trimmed mean.
out <- as.vector(ifelse(z_area <= 2, 1, NA), mode = "numeric")
print("area_dev function used")
return(out)
}
filter <- function(Input, Element, Interval)
{
col_zero <- (Element) * Interval
# The column to be entered into the zero() function
col_area <- (Element) * Interval - 1
# The column to be intered into the area_dev() function
out <- as.vector(zero(Input, col_zero) *
area_dev(Input, col_area), mode = "numeric")
print("filter function used")
return(out)
}
# This function concatenates the different columns of X's
# The inputs:
# Input = the raw .csv file,
# Intervals = the number of columns for each filming period (if
# there are two columns until the next filming period,
# e.g. area and x, then the Intervals value is 2).
# The output:
# An array of normalized distance values (units are in pixels)
# that are concatinated together.
concat <- function(Inputs,Intervals)
{
final <- NULL
Max <- dim(Inputs)[2]/2
elementSequence <- seq(1,Max,1)
for (i in elementSequence)
{
final <- c(final,
filter(Inputs, elementSequence[i], Intervals))
print("concat function used: ")
print(i)
}
return(final)
}
# This function finds the max force values by removing values around
# a local maximum value. The input is the data to be filtered
# (preferably after being concatinated) with a bound that is simply
# the lower limit of which you want no values below it. The output
# is an array of the filtered data.
ForceFilter <- function(input,bound)
{
for(i in 1:length(input))
{
x <- ifelse(input[i] >= bound, input, NA)
}
y <- c(NA, x[1:length(x)-1])
z <- c(x[2:length(x)], NA)
out <- numeric(length(x))
for (i in 1:length(x))
{
if (!is.na(x[i]))
{
if (is.na(y[i]))
{
if (is.na(z[i]) || x[i] > z[i])
{
out[i] <- x[i]
}
}
else
{
if (x[i] > y[i])
{
if (is.na(z[i]) || x[i] > z[i])
{
out[i] <- x[i]
}
}
}
}
else
{
out[i] <- NA
}
}
print("forcefilter used")
return(out)
}
# Takes in input data, and goes therough a series of possible lower
# bounds to create an array of means. The rate of change of the
# array of means is then taken. The mean of this array is found and
# that mean is then compared to the rest of the array to find the
# number of points in the array are less than the mean. This is the
# best lower bound for the data set.
# Input: data and a number of lower bounds you want to test
# Output: the best lower bound
lowerbounds <- function(input,MAX)
{
v <- seq(0,MAX,1)
out <- NULL
for(i in v)
{
data <- ForceFilter(concat(input,2),i)
newMean <- mean(data, na.rm=TRUE)
out <- c(out, newMean)
}
best <- sum(dx(out) < mean(dx(out)))
print("lowerbound used")
return(best)
}
maximumForceReportable <- max(CalibrationData_DispVariable)
MaxReport <- function(inData,inMax)
{
x <- inData
x[x>inMax] <- NA
x[x==0] <- NA
print("maxreport used")
return(x)
}
ForceData <- ForceFilter(concat(InputData, DataInterval),
lowerbounds(InputData,100))
outData <- MaxReport(ForceData,maximumForceReportable)
return(ForceData)
}
|
f0139e261d5f2ef3d506775f11d8bd6a0ce75d16 | 4149a4cc99850c923bff4d12edabc0be276aab04 | /man/statepolicy.Rd | f0780d5bf283896a465fe12a146d5db0f6449ba9 | [] | no_license | vladtarko/ppeldata | a8bc26ed14e59c6d9713fe7c91c7ab242a676c71 | ce668c239c150e2a0ecd4363b6cda9812967e0fc | refs/heads/master | 2020-08-22T02:38:17.101454 | 2019-10-21T07:49:24 | 2019-10-21T07:49:24 | 216,300,700 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 970 | rd | statepolicy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ppeldata.R
\docType{data}
\name{statepolicy}
\alias{statepolicy}
\title{United States State Policies}
\format{An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 4258 rows and 709 columns.}
\source{
\href{http://www.statepolicyindex.com/data/}{State Policy Database}
}
\usage{
data(statepolicy)
data(statepolicy_meta)
}
\description{
The Ruger and Sorens Database of State and Local Public Policies and Policy Ideology Indices (2018)
}
\details{
The `statepolicy_meta` contains the variable descriptions and original sources.
The `statepolicy` constains the data.
}
\examples{
data(statepolicy)
data(statepolicy_meta)
}
\references{
Jason Sorens, Fait Muedini and William P. Ruger. 2008. "U.S. State and Local Public Policies in 2006: A New Database." _State Politics and Policy Quarterly_
vol 8, no 3, pp 309-326.
}
\keyword{datasets}
|
2fa469dfc16b5d8bd9ac35154b73d5a7eb5a597f | 3eeb5ee6e7b43bb4a1ed950c883b3c7af4af2a17 | /app.R | 7dce44b159924cc6bf861ab33ecda6e587252d06 | [] | no_license | raghunandanalugubelli/CASAS | d59b655174a0b211a943e110db4e464c1f4133c7 | ed3000e878816e89482feba48fd76f2e2943e912 | refs/heads/master | 2021-09-03T07:14:34.453033 | 2018-01-06T20:38:56 | 2018-01-06T20:38:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 162,747 | r | app.R | library(shiny)
library(shinyjs)
library(shinythemes)
library(DT)
library(cequre)
library(survival)
library(survMisc)
#library(survplot)
library(gplots)
library(ggplot2)
library(survminer)
library(gridExtra)
library(cmprsk)
library(dynpred)
library(reshape2)
library(RColorBrewer)
ui <- fluidPage(
navbarPage("CASAS: Cancer Survival Analysis Suite",
tabPanel("Standard Survival Analysis",
fluidRow(
column(2,
wellPanel(
h4(strong("Input your file")),
selectInput("file2",label= "Select an example ds or upload your own with 'Load my own'",
choices = c("Example ds File"="Example2", "Load my own data" = "load_my_own2")),
conditionalPanel("input.file2 == 'load_my_own2'",
fileInput('file22', 'Choose file to upload (maximum size 500 MB).', accept=c('.xlsx','text/csv', 'text/comma-separated-values,text/plain', '.csv'))),
conditionalPanel("input.file2 == 'Example2'",
downloadButton('downloadEx2', 'Download Example data')
)),
conditionalPanel("input.cPanels1 == 2",
wellPanel(
h4(strong("KM Analysis")),
selectInput("select21", "Select a Variable of Interest as Cohort Group",
choices=c("AGE", "RACE")),
selectInput("binary",label= "The variable of interest is categorical or continuous",
choices = c("Categorical Variable" = "categorical", "Continuous Variable"="continuous"), selected= "continuous"),
conditionalPanel("input.binary == 'continuous'",
radioButtons("cutoff2", "Choose Cutoff Point for Continuous Variable:", list("Optimal Cutoff" = 1,"25th Percentile" = 25,"50th Percentile" = 50, "75th Percentile" = 75), selected = 25)),
selectInput("select22", "Select a Time Variable to Visualize KM Plot",
choices=c("os")),
selectInput("select23", "Select a Censoring Variable to Visualize KM Plot",
choices=c("os_censor")),
radioButtons("time", "Time Unit:", list("Years" = 1, "Months" = 2, "Days" = 3), selected = 1),
radioButtons("riskt", "Risk Table:", list("Yes" = TRUE, "No" = FALSE), selected = TRUE),
hr()
)),
conditionalPanel("input.cPanels1 == 3",
wellPanel(
h4(strong("Univariate Association Analysis")),
selectInput("select24", "Select a Time Variable for Survival Analysis",
choices=c("os")),
selectInput("select25", "Select a Censoring Variable for Survival Analysis",
choices=c("os_censor")),
selectInput("show_vars26", "Select multiple Variables to Generate Univariate Survival Association Table",
c("AGE", "RACE"), choices=c("AGE", "RACE"), multiple = TRUE),
radioButtons("assum", "Test for Proportional Hazards Assumption:", list("Yes" = 1,"No" = 0), selected = 0),
hr()
)),
conditionalPanel("input.cPanels1 == 3",
h4(strong("Downloads")),
wellPanel(
textInput("fname22", "Type the file name you would like to save as", value = "survivaltable"),
downloadButton('x1', 'Download Survival Report')
)),
conditionalPanel("input.cPanels1 == 2",
h4(strong("Downloads")),
wellPanel(
textInput("fname21", "Type the file name you would like to save as", value = "kmplot"),
downloadButton('downloadKM', 'Download KM Plot')
)
)),
column(10,
tabsetPanel(
tabPanel("Read Me", htmlOutput("ReadMe2"), value =1),
tabPanel("KM Analysis and Plot", htmlOutput("pv21"), plotOutput("kmplot", height= 600, width = 800), value =2),
tabPanel("Univariate Survival Association (Cox Model)", htmlOutput("pv22"),
DT::dataTableOutput("out2"), value = 3),
id = "cPanels1"
) #uiOutput("out2")
),
column(12,
tags$head(tags$style(type="text/css", "
#loadmessage {
position: fixed;
bottom: 0px;
right: 0px;
width: 100%;
padding: 5px 0px 5px 0px;
text-align: center;
font-weight: bold;
font-size: 100%;
color: #000000;
background-color: #b8b8b8;
z-index: 105;
}
")),
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div("Loading...",id="loadmessage"))
)
)),
tabPanel("Competing Risk Survival Analysis",
fluidRow(
column(2,
wellPanel(
h4(strong("Input your file")),
selectInput("file3",label= "Select an example ds or upload your own with 'Load my own'",
choices = c("Example ds File"="Example3", "Load my own data" = "load_my_own3")),
conditionalPanel("input.file3 == 'load_my_own3'",
fileInput('file32', 'Choose file to upload (maximum size 500 MB).', accept=c('.xlsx','text/csv', 'text/comma-separated-values,text/plain', '.csv'))),
conditionalPanel("input.file3 == 'Example3'",
downloadButton('downloadEx3', 'Download Example data')
)),
conditionalPanel("input.cPanels2 == 2",
wellPanel(
h4(strong("CIF Analysis")),
selectInput("select31", "Select a Variable of Interest as Cohort Group",
choices=c("AGE", "RACE")),
selectInput("binary2",label= "The variable of interest is categorical or continuous",
choices = c("Categorical Variable" = "categorical", "Continuous Variable"="continuous")),
conditionalPanel("input.binary2 == 'continuous'",
radioButtons("cutoff3", "Choose Cutoff Point for Continuous Variable:", list("25th Percentile" = 25,"50th Percentile" = 50, "75th Percentile" = 75), selected = 25)),
selectInput("select32", "Select a Time Variable to Visualize CIF Plot",
choices=c("os")),
selectInput("select33", "Select a Censoring Variable to Visualize CIF Plot",
choices=c("os_censor")),
radioButtons("points", "Input Time Points:", list("Yes" = 1, "No" = 0), selected = 0),
textInput("text", label = "Time Points Input", value = "0, 10, 20, 30"),
radioButtons("time2", "Time Unit:", list("Years" = 1, "Months" = 2, "Days" = 3), selected = 2),
radioButtons("event", "Event Code:", list("1" = 1, "2" = 2, "0" = 0), selected = 2),
radioButtons("censor", "Censor Code:", list("1" = 1, "2" = 2, "0" = 0), selected = 0),
hr()
)),
conditionalPanel("input.cPanels2 == 3",
wellPanel(
h4(strong("Univariate Association Analysis")),
selectInput("select34", "Select a Time Variable for Competing Risk Survival Analysis",
choices=c("os")),
selectInput("select35", "Select a Censoring Variable for Competing Risk Survival Analysis",
choices=c("os_censor")),
selectInput("show_vars36", "Select multiple Variables to Generate Univariate Survival Association Table",
c("AGE", "RACE"), choices=c("AGE", "RACE"), multiple = TRUE),
radioButtons("event2", "Event Code:", list("1" = 1, "2" = 2, "0" = 0), selected = 2),
radioButtons("censor2", "Censor Code:", list("1" = 1, "2" = 2, "0" = 0), selected = 0),
hr()
)),
conditionalPanel("input.cPanels2 == 3",
h4(strong("Downloads")),
wellPanel(
textInput("fname32", "Type the file name you would like to save as", value = "crrtable"),
downloadButton('x2', 'Download Competing Risk Report')
)),
conditionalPanel("input.cPanels2 == 2",
h4(strong("Downloads")),
wellPanel(
textInput("fname31", "Type the file name you would like to save as", value = "cifplot"),
downloadButton('downloadcif', 'Download CIF Plot')
),
wellPanel(
textInput("fname33", "Type the file name you would like to save as", value = "ciftable"),
downloadButton('downloadciftable', 'Download CIF Report')
)
)
),
column(10,
tabsetPanel(
tabPanel("Read Me", htmlOutput("ReadMe3"), value = 1),
tabPanel("CIF Analysis and Plot", htmlOutput("pv31"),
plotOutput("cifplot", height= 600, width = 800),
DT::dataTableOutput("ciftable"), value = 2),
tabPanel("Univariate Survival Association (Fine and Gray Model)", htmlOutput("pv32"),
DT::dataTableOutput("out3"), value = 3),
id = "cPanels2") #uiOutput("out2")
#uiOutput("out2")
),
column(12,
tags$head(tags$style(type="text/css", "
#loadmessage {
position: fixed;
bottom: 0px;
right: 0px;
width: 100%;
padding: 5px 0px 5px 0px;
text-align: center;
font-weight: bold;
font-size: 100%;
color: #000000;
background-color: #b8b8b8;
z-index: 105;
}
")),
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div("Loading...",id="loadmessage"))
)
)),
tabPanel("Landmark Survival Analysis",
fluidRow(
column(2,
wellPanel(
h4(strong("Input your file")),
selectInput("file4",label= "Select an example ds or upload your own with 'Load my own'",
choices = c("Example ds File"="Example4", "Load my own data" = "load_my_own4")),
conditionalPanel("input.file4 == 'load_my_own4'",
fileInput('file42', 'Choose file to upload (maximum size 500 MB).', accept=c('.xlsx','text/csv', 'text/comma-separated-values,text/plain', '.csv'))),
conditionalPanel("input.file4 == 'Example4'",
downloadButton('downloadEx4', 'Download Example data')
)),
wellPanel(
h4(strong("KM Analysis")),
selectInput("select41", "Select a Variable of Interest as Cohort Group",
choices=c("AGE", "RACE")),
selectInput("binary3",label= "The variable of interest is categorical or continuous",
choices = c("Categorical Variable" = "categorical", "Continuous Variable"="continuous")),
conditionalPanel("input.binary3 == 'continuous'",
radioButtons("cutoff4", "Choose Cutoff Point for Continuous Variable:", list("Optimal Cutoff" = 1,"25th Percentile" = 25,"50th Percentile" = 50, "75th Percentile" = 75), selected = 25)),
selectInput("select42", "Select a Time Variable to Visualize KM/CIF Plot",
choices=c("os")),
selectInput("select43", "Select a Censoring Variable to Visualize KM/CIF Plot",
choices=c("os_censor")),
selectInput("select44", "Select a Time Dependent Variable to Visualize KM/CIF Plot",
choices=c("wtime")),
textInput("text2", label = "Input Time Point for Landmark Analysis", value = "200"),
radioButtons("time3", "Time Unit:", list("Years" = 1, "Months" = 2, "Days" = 3), selected = 3),
radioButtons("option", "KM or CIF:", list("KM" = TRUE, "CIF" = FALSE), selected = TRUE),
radioButtons("riskt2", "Risk Table:", list("Yes" = TRUE, "No" = FALSE), selected = TRUE),
hr()
),
h4(strong("Downloads")),
wellPanel(
textInput("fname41", "Type the file name you would like to save as", value = "landmarkplot"),
downloadButton('downloadlandmarkplot', 'Download Landmark Plot')
)
),
column(10,
tabsetPanel(
tabPanel("Read Me", htmlOutput("ReadMe4") ),
tabPanel("Landmark Survival Plot", htmlOutput("pv41"),
plotOutput("landmarkplot", height= 600, width = 800))
) #uiOutput("out2")
#uiOutput("out2")
),
column(12,
tags$head(tags$style(type="text/css", "
#loadmessage {
position: fixed;
bottom: 0px;
right: 0px;
width: 100%;
padding: 5px 0px 5px 0px;
text-align: center;
font-weight: bold;
font-size: 100%;
color: #000000;
background-color: #b8b8b8;
z-index: 105;
}
")),
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div("Loading...",id="loadmessage"))
)
)),
tabPanel("Quantile Survival Analysis",
fluidRow(
column(2,
wellPanel(
h4(strong("Input your file")),
selectInput("file1",label= "Select an example ds or upload your own with 'Load my own'",
choices = c("Example ds File"="Example", "Load my own data" = "load_my_own")),
conditionalPanel("input.file1 == 'load_my_own'",
fileInput('file12', 'Choose file to upload (maximum size 500 MB).', accept=c('.xlsx','text/csv', 'text/comma-separated-values,text/plain', '.csv'))),
conditionalPanel("input.file1 == 'Example'",
downloadButton('downloadEx', 'Download Example data')
)),
wellPanel(
h4(strong("Quantile Survival Analysis")),
selectInput("select", "Select a Variable of Interest",
choices=c("AGE", "RACE")),
selectInput("binary4",label= "The variable of interest is categorical or continuous",
choices = c("Categorical Variable" = "categorical", "Continuous Variable"="continuous")),
conditionalPanel("input.binary4 == 'continuous'",
radioButtons("cutoff", "Choose Cutoff Point:", list("Optimal Cutoff" = 1,"25th Percentile" = 25,"50th Percentile" = 50, "75th Percentile" = 75), selected = 25),
radioButtons("exp_ref", "Choose Reference Level:", list("High" = "High","Low" = "Low"), selected = "High")
),
selectInput("select12", "Select a Time Variable for Quantile Analysis",
choices=c("os")),
selectInput("select13", "Select a Censoring Variable for Quantile Analysis",
choices=c("os_censor")),
radioButtons("time4", "Time Unit:", list("Years" = 1, "Months" = 2, "Days" = 3), selected = 1),
actionButton("numb", "Run & generate random numbers"),
h5("Hit Run & Generate random number button above EACH time to display output on main panel."),
hr() # Copy the line below to make a select box
),
h4(strong("Downloads")),
wellPanel(
textInput("fname11", "Type the file name you would like to save as", value = "QAplot"),
downloadButton('downloadQA', 'Download Quantile Survival Plot')),
wellPanel(
textInput("fname12", "Type the file name you would like to save as", value = "forestplot"),
downloadButton('downloadFP', 'Download Forest Plot')),
wellPanel(
textInput("fname13", "Type the file name you would like to save as", value = "Data for Grid"),
downloadButton('downloadTB', 'Download Grid Data')
)
),
column(10,
tabsetPanel(
tabPanel("Read Me", htmlOutput("ReadMe1")),
tabPanel("Quantile Survival Plots and Output",
htmlOutput("pv15"),
splitLayout(cellWidths = c("60%", "40%"), htmlOutput("pv11"), htmlOutput("pv12")),
splitLayout(cellWidths = c("60%", "40%"), plotOutput("QAplot", height= 600, width = 800),
plotOutput("forestplot", height= 500, width = 600)),
htmlOutput("pv13"),
DT::dataTableOutput("coxout"),
htmlOutput("pv14"),
DT::dataTableOutput("CIout")
)
) #uiOutput("out2")
#htmlOutput("pv"),
#plotOutput("QAplot", height= 600, width = 800),
#htmlOutput("pv2"),
#plotOutput("forestplot", height= 600, width = 800)
#htmlOutput("blurp"))
#h5("Descriptive Statistics"),
#verbatimTextOutput("out")
),
column(12,
tags$head(tags$style(type="text/css", "
#loadmessage {
position: fixed;
bottom: 0px;
right: 0px;
width: 100%;
padding: 5px 0px 5px 0px;
text-align: center;
font-weight: bold;
font-size: 100%;
color: #000000;
background-color: #b8b8b8;
z-index: 105;
}
")),
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div("Loading...",id="loadmessage"))
)
)),
tabPanel("Optimal Cutoff Point Finder",
fluidRow(
column(2,
wellPanel(
h4(strong("Input your file")),
selectInput("file5",label= "Select an example ds or upload your own with 'Load my own'",
choices = c("Example ds File"="Example5", "Load my own data" = "load_my_own5")),
conditionalPanel("input.file5 == 'load_my_own5'",
fileInput('file51', 'Choose file to upload (maximum size 500 MB).', accept=c('.xlsx','text/csv', 'text/comma-separated-values,text/plain', '.csv'))),
conditionalPanel("input.file5 == 'Example5'",
downloadButton('downloadEx5', 'Download Example data')
)),
wellPanel(
h4(strong("Choose Variable")),
selectInput("select51", "Select Input",
choices=c("AGE", "RACE")),
selectInput("select52", "Select a Time Variable",
choices=c("os")),
selectInput("select53", "Select a Censoring Variable",
choices=c("os_censor")),
hr() # Copy the line below to make a select box
),
h4(strong("Downloads")),
wellPanel(
textInput("fname51", "Type the file name you would like to save as", value = "MRplot"),
downloadButton('downloadMR', 'Download Martingale Residual Plot')
)
),
column(10,
tabsetPanel(
tabPanel("Read Me", htmlOutput("ReadMe5")),
tabPanel("Optimal Cutpoint Finder Output)", plotOutput("MRplot", height= 600, width = 800),
htmlOutput("pv51"),
DT::dataTableOutput("Optimal"))
) #uiOutput("out2")
#htmlOutput("pv4"),
#htmlOutput("blurp"))
#h5("Descriptive Statistics"),
#verbatimTextOutput("out")
),
column(12,
tags$head(tags$style(type="text/css", "
#loadmessage {
position: fixed;
bottom: 0px;
right: 0px;
width: 100%;
padding: 5px 0px 5px 0px;
text-align: center;
font-weight: bold;
font-size: 100%;
color: #000000;
background-color: #b8b8b8;
z-index: 105;
}
")),
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div("Loading...",id="loadmessage"))
)
)),
tabPanel("Grid for Summarized Significance",
fluidRow(
column(2,
wellPanel(
h4(strong("Input your file")),
selectInput("file6",label= "Select an example ds or upload your own with 'Load my own'",
choices = c("Example ds File"="Example6", "Load my own data" = "load_my_own6")),
conditionalPanel("input.file6 == 'load_my_own6'",
fileInput('file61', 'Choose file to upload (maximum size 500 MB).', accept=c('.xlsx','text/csv', 'text/comma-separated-values,text/plain', '.csv'))),
conditionalPanel("input.file6 == 'Example6'",
downloadButton('downloadEx6', 'Download Example data')
)),
wellPanel(
radioButtons("exp_ref2", "Choose Reference Level:", list("High" = "High","Low" = "Low"), selected = "High")
),
h4(strong("Downloads")),
wellPanel(
textInput("fname61", "Type the file name you would like to save as", value = "Grid"),
downloadButton('downloadHM', 'Download Grid Plot')
)
),
column(10,
tabsetPanel(
tabPanel("Read Me", htmlOutput("ReadMe6")),
tabPanel("Summarized Significance Grid", htmlOutput("pv61"),
plotOutput("heatmapplot", height= 800, width = 800))
) #uiOutput("out2")
#htmlOutput("pv3"),
#htmlOutput("title"),
#DT::dataTableOutput("SuperPC")
),
column(12,
tags$head(tags$style(type="text/css", "
#loadmessage {
position: fixed;
bottom: 0px;
right: 0px;
width: 100%;
padding: 5px 0px 5px 0px;
text-align: center;
font-weight: bold;
font-size: 100%;
color: #000000;
background-color: #b8b8b8;
z-index: 105;
}
")),
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div("Loading...",id="loadmessage"))
)
)),
tabPanel("Tutorial",
tags$iframe(src= "CASAS_Tutorial.pdf", width = 1800, height = 1000)),
navbarMenu("About Us",
tabPanel("How to Cite",
fluidRow(
column(8, offset = 2,
"Rupji M, Zhang X and Kowalski J. CASAS: Cancer Survival Analysis Suite, a web based application [version 1; referees: awaiting peer review]. F1000Research 2017, 6:919 (doi: 10.12688/f1000research.11830.1)",
br(),
br(),
"The National Cancer Institute (NCI) requires that publications acknowledge the Winship Cancer Institute CCSG support, and they are tracking compliance. When using this tool to report results in your publication, please include the following statement in the acknowledgment section of your publication(s):",
br(),
br(),
em("Research reported in this publication was supported in part by the Biostatistics and Bioinformatics Shared Resource of Winship Cancer Institute of Emory University and NIH/NCI under award number P30CA138292. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health.")
))),
tabPanel("Contact Us",
fluidRow(
column(8, offset = 2,
"This tool was prepared by members of the Winship Biostatistics and Bioinformatics Shared Resource (BBISR) of Emory University.",
br(),
a(href="https://bbisr.winship.emory.edu/", "https://bbisr.winship.emory.edu/"),
br(),
br(),
"Authors- Manali Rupji, dual M.S., Xinyan (Abby) Zhang, MPH. & Jeanne Kowalski Ph.D.",
br(),
"Maintainer- Manali Rupji 'manali(dot)rupji(at)emory(dot)edu'")
)),
tabPanel("Feedback",
fluidRow(
column(8, offset = 2,
#br(),
"As a Biostatistics and Bioinformatics core, we are actively improving and expanding our NGS analysis services and analysis products. For any questions, comments, or suggestions, please email the developer at manali(dot)rupji(at)emory(dot)edu."
)))
)
))
################################################
################ SERVER.R ######################
################################################
options(shiny.maxRequestSize= 500*1024^2)
options(shiny.sanitize.errors = TRUE)
#options(bitmapType='cairo')
source("QSE.R")
source("forestplot.R")
source("significant.R")
source("boxcolor.R")
source("survplot.R")
source("cutpoint finder.R")
source("optimalcut_plot.R")
source("optimalcut_table.R")
source("CumIncidence.R")
source("factor2ind.R")
server <- function(input, output, session){
data_input <- reactive({
if(input$file1 == 'Example'){
d <- read.csv("data/BRCA_for_quantile_survival_analysis.csv", header =T, sep =",")
}
else if(input$file1 == 'load_my_own'){
inFile <- input$file12
if (is.null(inFile))
return(NULL)
else if(grepl(".xlsx", inFile[1])) { d = read.xlsx(as.character(inFile$datapath), colNames = TRUE, rowNames = F, as.is = T) }
else if(grepl(".csv", inFile[1])) { d = read.csv(as.character(inFile$datapath), header = TRUE, sep = ",", stringsAsFactors = F, as.is = T, fill = T) }
else if(grepl(".txt", inFile[1])) { d = read.table(as.character(inFile$datapath), header = TRUE, sep = "\t", stringsAsFactors = F, as.is = T, fill = T) }
}
else
return(NULL)
# dim(data)
Dataset <- data.frame(d)
return(as.data.frame(Dataset))
})
output$downloadEx <- downloadHandler(
filename <- function() {
paste('Example ds', Sys.time(),'.csv', sep='')
},
content <- function(file) {
ds <- data_input()#res()$CI_Data2
write.csv(ds, file, row.names = FALSE)
}
)
observe({
dsnames <- colnames(data_input())
cb_options <- dsnames
updateSelectInput(session, "select", label = "Select Variable of Interest",
choices = cb_options,
selected = tail(cb_options))
updateSelectInput(session, "select12", label = "Select a Time Variable for Quantile Analysis",
choices = cb_options,
selected=cb_options[3])
updateSelectInput(session, "select13", label = "Select a Censoring Variable for Quantile Analysis",
choices = cb_options,
selected =cb_options[2] )
dsnames2 <- colnames(data_input2())
cb_options2 <- dsnames2
updateSelectInput(session, "select21", label = "Select a Variable of Interest as Cohort Group",
choices = c(cb_options2, "All Patients"),
selected = cb_options2[5])
updateSelectInput(session, "select22", label = "Select a Time Variable to Visualize KM Plot",
choices = cb_options2,
selected=cb_options2[3])
updateSelectInput(session, "select23", label = "Select a Censoring Variable to Visualize KM Plot",
choices = cb_options2,
selected =cb_options2[2] )
updateSelectInput(session, "select24", label = "Select a Time Variable for Survival Analysis",
choices = cb_options2,
selected=cb_options2[3])
updateSelectInput(session, "select25", label = "Select a Censoring Variable for Survival Analysis",
choices = cb_options2,
selected =cb_options2[2] )
updateSelectInput(session, "show_vars26", label = "Select multiple Variables to Generate Univariate Survival Association Table",
choices = cb_options2,
selected = cb_options2[c(5,6)])
dsnames3 <- colnames(data_input3())
cb_options3 <- dsnames3
updateSelectInput(session, "select31", label = "Select a Variable of Interest as Cohort Group",
choices = c(cb_options3, "All Patients"),
selected = cb_options3[length(cb_options3)])
updateSelectInput(session, "select32", label = "Select a Time Variable to Visualize CIF Plot",
choices = cb_options3,
selected=cb_options3[1])
updateSelectInput(session, "select33", label = "Select a Censoring Variable to Visualize CIF Plot",
choices = cb_options3,
selected =cb_options3[2] )
updateSelectInput(session, "select34", label = "Select a Time Variable for Competing Risk Survival Analysis",
choices = cb_options3,
selected=cb_options3[1])
updateSelectInput(session, "select35", label = "Select a Censoring Variable for Competing Risk Survival Analysis",
choices = cb_options3,
selected =cb_options3[2] )
updateSelectInput(session, "show_vars36", label = "Select multiple Variables to Generate Univariate Survival Association Table",
choices = cb_options3,
selected = cb_options3[c(3,5)])
dsnames4 <- colnames(data_input4())
cb_options4 <- dsnames4
updateSelectInput(session, "select41", label = "Select a Variable of Interest as Cohort Group",
choices = cb_options4,
selected = cb_options4[3])
updateSelectInput(session, "select42", label = "Select a Time Variable to Visualize Landmark Plot",
choices = cb_options4,
selected=cb_options4[1])
updateSelectInput(session, "select43", label = "Select a Censoring Variable to Visualize landmark Plot",
choices = cb_options4,
selected =cb_options4[2] )
updateSelectInput(session, "select44", label = "Select a Time Dependent Variable to Visualize landmark Plot",
choices = cb_options4,
selected =cb_options4[6] )
dsnames5 <- colnames(data_input5())
cb_options5 <- dsnames5
updateSelectInput(session, "select51", label = "Select a Continuous Variable of Interest",
choices = cb_options5,
selected = cb_options5[5])
updateSelectInput(session, "select52", label = "Select a Time Variable",
choices = cb_options5,
selected=cb_options5[3])
updateSelectInput(session, "select53", label = "Select a Censoring Variable",
choices = cb_options5,
selected =cb_options5[2] )
})
res <- eventReactive(input$numb, {
data <- data_input()
xvar <- data[, input$select]
time <- as.numeric(data[,input$select12])
censor <- data[,input$select13]
dat <- cbind.data.frame(time, censor, xvar)
dat <- dat[!is.na(dat$time),]
if (class(xvar) %in% c("integer", "numeric") & input$binary4 == "continuous") {
if (as.numeric(as.integer(input$cutoff == 1))) {
perc <- optimalcut(dat)
} else {perc <- as.numeric(as.integer(input$cutoff))}
QS1 = dat
#QS1<-read.table(file = "C:/Users/xzhan60/Downloads/bioinformatics research projects/apps/Quantile Analysis/data/BRCA_for_quantile_survival_analysis.csv", header=T, sep=",", stringsAsFactors = F)
QS1$Group <- ifelse(QS1[, 'xvar'] < quantile(QS1[, 'xvar'], perc/100), "Low", "High")
res <- QSE(QS1, perc, input$exp_ref)
return(res)
} else if (class(xvar) == 'factor' & input$binary4 =='categorical') {
dat$Group <- dat$xvar
ref <- levels(factor(dat$Group))[1]
res <- QSE(dat, percentile = 1, ref)
return(res)
} else if (class(xvar) == 'character' & input$binary4 =='categorical') {
dat$Group <- as.factor(dat$xvar)
ref <- levels(factor(dat$Group))[1]
res <- QSE(dat, percentile = 1, ref)
return(res)
}else {return(NULL)}
})
output$QAplot <- renderPlot({
xlabel <- c("Years", "Months", "Days")
#b <- c(1, 12, 365)
TopLegend = "F"
color=c(3,2,1)
upperQ = 1
KM_xlab= paste0("Survival Time (", xlabel[as.numeric(input$time4)], ")")
Q_ylab= "Survival Time Diff:"
Dlabel = "T"
surv = res()$surv
exp= res()$exp
fit = res()$fit
m_survt = res()$m_survt
cvt.length = res()$cvt.length
NR = res()$NR
p.val = res()$p.val
pos = res()$pos
char.cvt = res()$char.cvt
taus = res()$taus
exp.ref = res()$exp.ref
op <- par(mfrow = c(1, cvt.length),mar = c(5,4,2,2), oma=c(4,0,0,0))
plot(surv, lty =c(1,1), ylim=c(-0.05,1), xlim=c(-0.05*max(surv$time), 1.05*max(surv$time)),col=color, lwd=2, xlab= KM_xlab, ylab="Survival Probability", xaxt="n")
axis(1,at=round(pos,2))
abline(v=m_survt, col = "lightgray", lty=3)
for (n in 1: dim(NR)[1]){text(x=pos,y= 0,labels= NR[n,],cex=0.8,pos=3, offset=(-0.8*n), col=color[n],font=3)}
text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=0.8, font=4)
if (TopLegend == "F")legend(0,0.1*cvt.length, char.cvt,lty =c(1,1), col=color,bty = "n" ,cex=1,seg.len=2,lwd =2, title=paste("logrank P = ", p.val, sep = ""))
if (TopLegend == "T") legend("topright", char.cvt,lty =c(1,1), col=color,bty = "n" ,cex=1,seg.len=2,lwd =2, title=paste("logrank P = ", p.val, sep = ""))
for (k in 2:cvt.length){
est<-fit$bt[k,]
va <- fit$va[k,k,]
va <- sapply(va,function(x) ifelse(x<0,0,x))
#This assumes that the length of exposure is the n, i.e. there are no missing outcomes
low <-fit$bt[k,]-qt(0.975,df=c(length(exp)-1))*sqrt(va)
up <-fit$bt[k,]+qt(0.975,df=c(length(exp)-1))*sqrt(va)
yrange<-max(up)-min(low)
plot(est~taus,xlab="Quantile",xlim=c(-0.03*upperQ,upperQ), ylim=c(min(low,-1)-0.1*yrange,max(up,0)),lwd=2, col="blue",type="l",
ylab=paste("Survival Time Diff: ", char.cvt[k], " vs. ", exp.ref, sep= " "),xaxt = "n")
axis(1,at=round(taus,3), labels = paste("Q", 1:10, sep = ""), las=2)
polygon(c(taus,rev(taus)), c(low, rev(up)),col="grey", density = 20,ylim=c(min(low),max(up)))
abline(h=0,col="red", lty= 2)
if (Dlabel == "T") {
text(x=c(-0.005*upperQ,taus),y=min(low,-1)-0.1*yrange,labels=c(char.cvt[k],round(fit$bt[k,]+ fit$bt[1,],1)),cex=0.8,pos=3, offset=0.1, col=color[k],font=3)
text(x=c(-0.005*upperQ,taus),y=min(low,-1)-0.1*yrange,labels=c(char.cvt[1],round(fit$bt[1,],1)),cex=0.8,pos =1, offset=0.1, col=color[1],font=3)
text(x=0.2*upperQ,y=min(low,-1)-0.1*yrange , label="Survival Time at Quantiles", pos=3, offset=0.8, cex=0.8, font=4)
}
}
#title(sub = Title,line = 1, outer=T,cex.sub =1.1, font.sub = 2)
par(op)
})
output$forestplot <- renderPlot({
CI_Data = res()$CI_Data
forestplot(CI_Data)
})
output$coxout <- DT::renderDataTable({
coxout <- round(res()$coxout, 4)
DT::datatable(coxout)
})
output$CIout <- DT::renderDataTable({
CIout <- res()$CI_Data3
CIout <- cbind.data.frame(CIout[, 1], round(CIout[, 2:4], 4), CIout[, 5])
r <- c("High", "Low")
colnames(CIout) <- c("Quantiles", "Mean Time Difference/Transformed Hazard Ratio(log(1/HR))", "CI Lower Limit", "CI Upper Limit",
paste0("Significance(0: Non-sig, 1:", input$exp_ref, " Better, 2:", r[r != input$exp_ref], " Better, 3:Non-estimable)") )
DT::datatable(CIout, options = list(
lengthMenu = list(c(5, 10, -1), c('5', '10', 'All')),
pageLength = 20))
})
output$pv11 <- renderUI({
if (is.null(res())) {
return(NULL)
} else {
hs1 <- paste(" ")
hs2 <- paste("Quantile Survival Analysis Plots")
HTML(paste(h2(strong(hs2)), hs1, sep = '<br/>'))
}
})
output$pv12 <- renderUI({
if (is.null(res())) {
return(NULL)
} else {
hs1 <- paste(" ")
hs2 <- paste("Forest Plot for Survival Time Difference")
HTML(paste(h2(strong(hs2)), hs1, sep = '<br/>'))
}
})
output$pv13 <- renderUI({
if (is.null(res())) {
return(NULL)
} else {
hs1 <- paste(" ")
hs2 <- paste("Univariate Cox Survival Analysis Table")
HTML(paste(h2(strong(hs2)), hs1, sep = '<br/>'))
}
})
output$pv14 <- renderUI({
if (is.null(res())) {
return(NULL)
} else {
hs1 <- paste(" ")
hs2 <- paste("Data for Forest Plot")
HTML(paste(h2(strong(hs2)), hs1, sep = '<br/>'))
}
})
output$pv15 <- renderUI({
if (is.null(res())) {
return(NULL)
} else {
str0 <- paste(" ")
str9 <- paste("The three plots from left to right are as following:")
str3 <- paste("  1. The first plot is KM plot with number at risk table for overall survival and log rank test p-value.")
str4 <- paste("  2. The second plot is the survival time difference with 95% CI between two dichotomized groups at 10 quantiles as Q1 to Q10 (defined as 10 percentile to 100 percentile by 10 percentile at mean survival time among all patients).")
str5 <- paste("  3. The third plot the the summary using forest plot for the survival time difference at 10 quantiles. The overall in forest plot corresponds to the transformed HR and 95% CI for overall survival (log(1/HR)).
The transformed HR will be interpret as if 0 is included then it is not significant.")
str10 <- paste("NOTE1: The first table contains hazard ratio and 95% CI with cox proportional hazard model for the overall survival with the variable of interest in the original scale.")
str11 <- paste("NOTE2: The second table 'Data for Forest Plot' contains information to generate the forest plot. For item 'overall', it shows the transformed HR and 95% CI. The last column in this table includes information of direction.
It is also needed to generate a summarized grid if you have multiple cancer types or subgroups.")
str12 <- paste("NOTE3: To generate a summarized grid if you have multiple cancer types or subgroups, you can download the data for grid in the left side panel. The percentile in the output table will be 1 if your variable of interest is categorical.
You only need to keep the first two columns and transpose the data. Repeat this for different cancer types or subgroups and combine as a new datafile to generate a grid in the tab:
Grid for summarized significance'.")
HTML(paste(str0, strong(str9),str0, str3, str4, str5, str0,str0,strong((str10)), str0,str0,strong((str11)), str0,str0, strong((str12)), str0,str0, sep = '<br/>'))
}
})
output$downloadQA <- downloadHandler(
filename <- function() {
pdf_file <<- as.character(input$fname11)
paste('QA_', pdf_file, Sys.time(),'.pdf', sep='')
},
content <- function(file) {
pdf(file=paste(pdf_file,".pdf",sep="") , height= 8, width=12)
xlabel <- c("Years", "Months", "Days")
TopLegend = "F"
color=c(3,2,1)
upperQ = 1
KM_xlab= paste0("Survival Time (", xlabel[as.numeric(input$time4)], ")")
Q_ylab= "Survival Time Diff:"
Dlabel = "T"
surv = res()$surv
exp= res()$exp
fit = res()$fit
m_survt = res()$m_survt
cvt.length = res()$cvt.length
NR = res()$NR
p.val = res()$p.val
pos = res()$pos
char.cvt = res()$char.cvt
taus = res()$taus
exp.ref = res()$exp.ref
op <- par(mfrow = c(1, cvt.length),mar = c(5,4,2,2), oma=c(4,0,0,0))
plot(surv, lty =c(1,1), ylim=c(-0.05,1), xlim=c(-0.05*max(surv$time), 1.05*max(surv$time)),col=color, lwd=2, xlab= KM_xlab, ylab="Survival Probability", xaxt="n")
axis(1,at=round(pos,2))
abline(v=m_survt, col = "lightgray", lty=3)
for (n in 1: dim(NR)[1]){text(x=pos,y= 0,labels= NR[n,],cex=0.8,pos=3, offset=(-0.8*n), col=color[n],font=3)}
text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=0.8, font=4)
if (TopLegend == "F")legend(0,0.1*cvt.length, char.cvt,lty =c(1,1), col=color,bty = "n" ,cex=1,seg.len=2,lwd =2, title=paste("logrank P = ", p.val, sep = ""))
if (TopLegend == "T") legend("topright", char.cvt,lty =c(1,1), col=color,bty = "n" ,cex=1,seg.len=2,lwd =2, title=paste("logrank P = ", p.val, sep = ""))
for (k in 2:cvt.length){
est<-fit$bt[k,]
va <- fit$va[k,k,]
va <- sapply(va,function(x) ifelse(x<0,0,x))
#This assumes that the length of exposure is the n, i.e. there are no missing outcomes
low <-fit$bt[k,]-qt(0.975,df=c(length(exp)-1))*sqrt(va)
up <-fit$bt[k,]+qt(0.975,df=c(length(exp)-1))*sqrt(va)
yrange<-max(up)-min(low)
plot(est~taus,xlab="Quantile",xlim=c(-0.03*upperQ,upperQ), ylim=c(min(low,-1)-0.1*yrange,max(up,0)),lwd=2, col="blue",type="l",
ylab=paste("Survival Time Diff: ", char.cvt[k], " vs. ", exp.ref, sep= " "),xaxt = "n")
axis(1,at=round(taus,3), labels = paste("Q", 1:10, sep = ""), las=2)
polygon(c(taus,rev(taus)), c(low, rev(up)),col="grey", density = 20,ylim=c(min(low),max(up)))
abline(h=0,col="red", lty= 2)
if (Dlabel == "T") {
text(x=c(-0.005*upperQ,taus),y=min(low,-1)-0.1*yrange,labels=c(char.cvt[k],round(fit$bt[k,]+ fit$bt[1,],1)),cex=0.8,pos=3, offset=0.1, col=color[k],font=3)
text(x=c(-0.005*upperQ,taus),y=min(low,-1)-0.1*yrange,labels=c(char.cvt[1],round(fit$bt[1,],1)),cex=0.8,pos =1, offset=0.1, col=color[1],font=3)
text(x=0.2*upperQ,y=min(low,-1)-0.1*yrange , label="Survival Time at Quantiles", pos=3, offset=0.8, cex=0.8, font=4)
}
}
#title(sub = Title,line = 1, outer=T,cex.sub =1.1, font.sub = 2)
par(op)
dev.off()
file.copy(paste(pdf_file,'.pdf', sep='') ,file, overwrite=TRUE)
})
output$downloadFP <- downloadHandler(
filename <- function() {
pdf_file <<- as.character(input$fname12)
paste('FP_', pdf_file, Sys.time(),'.pdf', sep='')
},
content <- function(file) {
pdf(file=paste(pdf_file,".pdf",sep="") , height= 12, width=8)
#plot_fp()
CI_Data = res()$CI_Data
forestplot(CI_Data)
dev.off()
file.copy(paste(pdf_file,'.pdf', sep='') ,file, overwrite=TRUE)
})
output$downloadTB <- downloadHandler(
filename <- function() {
csv_file <<- as.character(input$fname13)
paste('TB_', csv_file, Sys.time(),'.csv', sep='')
},
content <- function(file) {
CIout <- res()$CI_Data3[, c(1, 5, 2, 3, 4)]
CIout[,1] <- as.character(c(paste("Q", 1:10, sep = ""), "Overall"))
CIout[12,1] <- "Percentile"
CIout[12,2] <- res()$percentile
r <- c("High", "Low")
colnames(CIout) <- c("Quantiles", paste0("Significance(0: Non-sig, 1:", input$exp_ref, " Better, 2:", r[r != input$exp_ref], " Better, 3:Non-estimable)"),
"Mean Time Difference/Transformed Hazard Ratio(log(1/HR))", "CI Lower Limit", "CI Upper Limit")
write.csv(CIout, file, row.names = FALSE)
})
data_input2 <- reactive({
if(input$file2 == 'Example2'){
d2 <- read.csv("data/BRCA_for_quantile_survival_analysis.csv", header =T, sep =",", stringsAsFactors = F)
}
else if(input$file2 == 'load_my_own2'){
inFile <- input$file22
if (is.null(inFile))
return(NULL)
else if(grepl(".xlsx", inFile[1])) { d2 = read.xlsx(as.character(inFile$datapath), colNames = TRUE, rowNames = F, as.is = T) }
else if(grepl(".csv", inFile[1])) { d2 = read.csv(as.character(inFile$datapath), header = TRUE, sep = ",", stringsAsFactors = F, as.is = T, fill = T) }
else if(grepl(".txt", inFile[1])) { d2 = read.table(as.character(inFile$datapath), header = TRUE, sep = "\t", stringsAsFactors = F, as.is = T, fill = T) }
}
else
return(NULL)
# dim(data)
Dataset2 <- data.frame(d2)
return(as.data.frame(Dataset2))
})
output$downloadEx2 <- downloadHandler(
filename <- function() {
paste('Example ds data', Sys.time(),'.csv', sep='')
},
content <- function(file) {
ds2 <- data_input2()
write.csv(ds2, file, row.names = FALSE)
}
)
output$kmplot <- renderPlot({
data2 <- data_input2()
if(!is.null(data2)){
for (j in 1:ncol(data2)) {
if (class(data2[, j]) %in% c("character"))
data2[,j] <- as.factor(data2[,j])
else data2[,j] = data2[, j]
}
if (input$select21 == "All Patients") {
time <- data2[,input$select22]
censor <- data2[,input$select23]
dat <- cbind.data.frame(time, censor)
fit <- survfit(Surv(as.numeric(time), as.numeric(factor(censor))) ~ 1, data = dat)
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(1, 12, 365)
#TF <- c(TRUE, FALSE)
if (input$riskt == TRUE) {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21,
color = "#2E9FDF", risk.table = TRUE, risk.table.y.text.col = TRUE)#, fun = function(y) y*100)
return(res)
} else {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21,
color = "#2E9FDF", risk.table = FALSE, risk.table.y.text.col = FALSE)#, fun = function(y) y*100)
return(res)
}
} else {
xvar <- data2[, input$select21]
time <- data2[,input$select22]
censor <- data2[,input$select23]
dat <- cbind.data.frame(time, censor, xvar)
if (is.null(xvar) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(xvar) > 30) {
return(NULL)
} else if (class(xvar) == "factor") {
fit <- survfit(Surv(as.numeric(time), as.numeric(factor(censor))) ~ xvar, data = dat)
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(1, 12, 365)
#TF <- c(TRUE, FALSE)
if (input$riskt == TRUE) {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21, legend.labs = levels(xvar),
color = "#2E9FDF", risk.table = TRUE, risk.table.y.text.col = TRUE)#, fun = function(y) y*100)
return(res)
} else {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21, legend.labs = levels(xvar),
color = "#2E9FDF", risk.table = FALSE, risk.table.y.text.col = FALSE)#, fun = function(y) y*100)
return(res)
}
}else if (class(xvar) %in% c("integer", "numeric") & input$binary == "continuous") {
if (as.numeric(as.integer(input$cutoff2)) == 1) {
perc <- optimalcut(dat)
} else {perc <- as.numeric(as.integer(input$cutoff2))}
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100, na.rm= TRUE), "Low", "High")
Group <- as.factor(dat$Group)
fit <- survfit(Surv(as.numeric(time), as.numeric(factor(censor))) ~ Group, data = dat)
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(1, 12, 365)
#TF <- c(TRUE, FALSE)
if (input$riskt == TRUE) {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21, legend.labs = levels(Group),
color = "#2E9FDF", risk.table = TRUE, risk.table.y.text.col = TRUE)#, fun = function(y) y*100, type="cairo")
return(res)
} else {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21, legend.labs = levels(Group),
color = "#2E9FDF", risk.table = FALSE, risk.table.y.text.col = FALSE)#, fun = function(y) y*100, type="cairo")
return(res)
}
} else {
return(NULL)
}
}
}
})
output$out2 <- renderDataTable({
data2 <- data_input2()
for (j in 1:ncol(data2)) {
if (class(data2[, j]) %in% c("character"))
data2[,j] <- as.factor(data2[,j])
else data2[,j] = data2[, j]
}
xvar <- data2[,input$show_vars26, drop = FALSE]
time <- data2[,input$select24]
censor <- data2[,input$select25]
options(warn=-1)
res <- list()
for (i in 1: ncol(xvar)){
x <- xvar[, i]
if (is.null(x) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(x) > 30) {
return(NULL)
} else if (class(x) == "factor" | class(x) %in% c("integer", "numeric")){
if (class(x) %in% c("integer", "numeric")){
dat <- cbind.data.frame(time, censor, xvar=x)
if (as.numeric(as.integer(input$cutoff2)) == 1) {
perc <- optimalcut(dat)
} else {perc <- as.numeric(as.integer(input$cutoff2))}
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100, na.rm= TRUE), "Low", "High")
Group <- as.factor(dat$Group)
x <- Group
}
Variable <- c(colnames(xvar)[i], rep("", (length(levels(x))-1)))
#x <- C(x, contr.treatment, base=3)
fit <- coxph(Surv(as.numeric(time), as.numeric(factor(censor))) ~ x)
temp <- cox.zph(fit)
assum.p.value <- ifelse(temp$table[3] < 0.001, "<0.001", paste0(round(temp$table[3], 4)))
assump <- c(assum.p.value, rep("", (length(levels(x))-1)))
sum <- summary(fit)
hazard.ratio = round(sum$conf.int[, 1], 2)
lower95 = round(sum$conf.int[, 3], 2)
upper95 = round(sum$conf.int[, 4], 2)
logrank.p.value = ifelse(sum$sctest[3] < 0.001, "<0.001", paste0(round(sum$sctest[3], 4)))
logrankp <- c(logrank.p.value, rep("", (length(levels(x))-1)))
type3.p.value = ifelse(sum$coefficients[, 5] < 0.001, "<0.001", paste0(round(sum$coefficients[, 5], 4)))
counts <- data.frame(table(x))
counts <- rbind.data.frame(counts[2:nrow(counts),], counts[1, ])
coxres <- cbind.data.frame(Variable, counts, c(paste0(hazard.ratio, " (", lower95,
"-", upper95, ")"), ""), c(type3.p.value, ""), logrankp)
colnames(coxres) <- c("Variable", "Level", "N", "Hazard Ratio (95% CI)", "Type 3 P-value", "Log-rank P-value")
if (input$assum == 0) {
res[[i]] <- coxres
} else if (input$assum == 1) {
coxres2 <- cbind.data.frame(coxres, assump)
colnames(coxres2)[7] <- "P-value for Proportional Hazards Assumption"
res[[i]] <- coxres2
}
} else
return(NULL)
}
res_table <- do.call("rbind", res)
}, rownames = FALSE) #, options = list(dom = 'tip'))
output$pv21 <- renderUI({
hs3 <- paste(" ")
hs4 <- paste("To Visualize the Kaplan Meier Plot:")
HTML(paste(h2(strong(hs4)), hs3, sep = '<br/>'))
})
output$pv22 <- renderUI({
hs3 <- paste(" ")
hs4 <- paste("Univariate Survival Association Analysis for Multiple Selected Variables")
HTML(paste(hs3, h2(strong(hs4)), hs3, sep = '<br/>'))
})
#output$out2 = renderUI ({
# tagList(
# htmlOutput("pv22"),
# dataTableOutput("out2")
# )
#})
output$downloadKM <- downloadHandler(
filename <- function() {
pdf_file <<- as.character(input$fname21)
paste('KM_', pdf_file, Sys.time(),'.pdf', sep='')
},
content <- function(file) {
pdf(file=paste(pdf_file,".pdf",sep="") , height= 8, width=12)
#plot_fp()
data2 <- data_input2()
for (j in 1:ncol(data2)) {
if (class(data2[, j]) %in% c("character"))
data2[,j] <- as.factor(data2[,j])
else data2[,j] = data2[, j]
}
if (input$select21 == "All Patients") {
time <- data2[,input$select22]
censor <- data2[,input$select23]
dat <- cbind.data.frame(time, censor)
fit <- survfit(Surv(as.numeric(time), as.numeric(factor(censor))) ~ 1, data = dat)
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(1, 12, 365)
#TF <- c(TRUE, FALSE)
if (input$riskt == TRUE) {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21,
color = "#2E9FDF", risk.table = TRUE, risk.table.y.text.col = TRUE)#, fun = function(y) y*100)
p <- grid.arrange(res$plot, res$table, nrow = 2, heights=c(0.7,0.3))
plot(p)
} else {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21,
color = "#2E9FDF", risk.table = FALSE, risk.table.y.text.col = FALSE)#, fun = function(y) y*100)
p <- grid.arrange(res$plot, res$table, nrow = 2, heights=c(0.7,0.3))
plot(p)
}
} else {
xvar <- data2[, input$select21]
time <- data2[,input$select22]
censor <- data2[,input$select23]
dat <- cbind.data.frame(time, censor, xvar)
if (is.null(xvar) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(xvar) > 30) {
return(NULL)
} else if (class(xvar) == "factor") {
fit <- survfit(Surv(as.numeric(time), as.numeric(factor(censor))) ~ xvar, data = dat)
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(1, 12, 365)
#TF <- c(TRUE, FALSE)
if (input$riskt == TRUE) {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21, legend.labs = levels(xvar),
color = "#2E9FDF", risk.table = TRUE, risk.table.y.text.col = TRUE)#, fun = function(y) y*100)
p <- grid.arrange(res$plot, res$table, nrow = 2, heights=c(0.7,0.3))
plot(p)
} else {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21, legend.labs = levels(xvar),
color = "#2E9FDF", risk.table = FALSE, risk.table.y.text.col = FALSE)#, fun = function(y) y*100)
p <- grid.arrange(res$plot, res$table, nrow = 2, heights=c(0.7,0.3))
plot(p)
}
}else if (class(xvar) %in% c("integer", "numeric") & input$binary == "continuous") {
if (as.numeric(as.integer(input$cutoff2)) == 1) {
perc <- optimalcut(dat)
} else {perc <- as.numeric(as.integer(input$cutoff2))}
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100), "Low", "High")
Group <- as.factor(dat$Group)
fit <- survfit(Surv(as.numeric(time), as.numeric(factor(censor))) ~ Group, data = dat)
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(1, 12, 365)
#TF <- c(TRUE, FALSE)
if (input$riskt == TRUE) {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21, legend.labs = levels(Group),
color = "#2E9FDF", risk.table = TRUE, risk.table.y.text.col = TRUE)#, fun = function(y) y*100)
p <- grid.arrange(res$plot, res$table, nrow = 2, heights=c(0.7,0.3))
plot(p)
} else {
res <- ggsurvplot(fit, data = dat, break.time.by = b[as.numeric(input$time)], font.main = 18,
font.x = 14,
font.y = 14, xlab = xlabel[as.numeric(input$time)], pval = T, pval.method = T,
pval.coord = c(max(time)*0.9, 0.9), pval.method.coord = c(max(time)*0.9, 0.95),
font.tickslab = 12,legend = c(0.2, 0.2), legend.title = input$select21, legend.labs = levels(Group),
color = "#2E9FDF", risk.table = FALSE, risk.table.y.text.col = FALSE)#, fun = function(y) y*100)
p <- grid.arrange(res$plot, res$table, nrow = 2, heights=c(0.7,0.3))
plot(p)
}
} else {
return(NULL)
}
}
dev.off()
file.copy(paste(pdf_file,'.pdf', sep='') ,file, overwrite=TRUE)
})
output$x1 = downloadHandler(
filename <- function() {
csv_file <<- as.character(input$fname22)
paste('ST_', csv_file, Sys.time(),'.csv', sep='')
},
content = function(file) {
data2 <- data_input2()
for (j in 1:ncol(data2)) {
if (class(data2[, j]) %in% c("character"))
data2[,j] <- as.factor(data2[,j])
else data2[,j] = data2[, j]
}
xvar <- data2[,input$show_vars26, drop = FALSE]
time <- data2[,input$select24]
censor <- data2[,input$select25]
options(warn=-1)
res <- list()
for (i in 1: ncol(xvar)){
x <- xvar[, i]
if (is.null(x) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(x) > 30) {
return(NULL)
} else if (class(x) == "factor" | class(x) %in% c("integer", "numeric")){
if (class(x) %in% c("integer", "numeric")){
dat <- cbind.data.frame(time, censor, xvar=x)
if (as.numeric(as.integer(input$cutoff2)) == 1) {
perc <- optimalcut(dat)
} else {perc <- as.numeric(as.integer(input$cutoff2))}
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100, na.rm= TRUE), "Low", "High")
Group <- as.factor(dat$Group)
x <- Group
}
Variable <- c(colnames(xvar)[i], rep("", (length(levels(x))-1)))
#x <- C(x, contr.treatment, base=3)
fit <- coxph(Surv(as.numeric(time), as.numeric(factor(censor))) ~ x)
temp <- cox.zph(fit)
assum.p.value <- ifelse(temp$table[3] < 0.001, "<0.001", paste0(round(temp$table[3], 4)))
assump <- c(assum.p.value, rep("", (length(levels(x))-1)))
sum <- summary(fit)
hazard.ratio = round(sum$conf.int[, 1], 2)
lower95 = round(sum$conf.int[, 3], 2)
upper95 = round(sum$conf.int[, 4], 2)
logrank.p.value = ifelse(sum$sctest[3] < 0.001, "<0.001", paste0(round(sum$sctest[3], 4)))
logrankp <- c(logrank.p.value, rep("", (length(levels(x))-1)))
type3.p.value = ifelse(sum$coefficients[, 5] < 0.001, "<0.001", paste0(round(sum$coefficients[, 5], 4)))
counts <- data.frame(table(x))
counts <- rbind.data.frame(counts[2:nrow(counts),], counts[1, ])
coxres <- cbind.data.frame(Variable, counts, c(paste0(hazard.ratio, " (", lower95,
"-", upper95, ")"), ""), c(type3.p.value, ""), logrankp)
colnames(coxres) <- c("Variable", "Level", "N", "Hazard Ratio (95% CI)", "Type 3 P-value", "Log-rank P-value")
if (input$assum == 0) {
res[[i]] <- coxres
} else if (input$assum == 1) {
coxres2 <- cbind.data.frame(coxres, assump)
colnames(coxres2)[7] <- "P-value for Proportional Hazards Assumption"
res[[i]] <- coxres2
}
} else
return(NULL)
}
res_table <- do.call("rbind", res)
write.csv(res_table, file, row.names = F)
})
data_input3 <- reactive({
if(input$file3 == 'Example3'){
d3 <- read.csv("data/bmtcrr.csv", header =T, sep =",")
}
else if(input$file3 == 'load_my_own3'){
inFile <- input$file32
if (is.null(inFile))
return(NULL)
else if(grepl(".xlsx", inFile[1])) { d3 = read.xlsx(as.character(inFile$datapath), colNames = TRUE, rowNames = F, as.is = T) }
else if(grepl(".csv", inFile[1])) { d3 = read.csv(as.character(inFile$datapath), header = TRUE, sep = ",", stringsAsFactors = T, as.is = T, fill = T) }
else if(grepl(".txt", inFile[1])) { d3 = read.table(as.character(inFile$datapath), header = TRUE, sep = "\t", stringsAsFactors = T, as.is = T, fill = T) }
}
else
return(NULL)
# dim(data)
Dataset3 <- data.frame(d3)
return(as.data.frame(Dataset3))
})
output$downloadEx3 <- downloadHandler(
filename <- function() {
paste('Example ds data', Sys.time(),'.csv', sep='')
},
content <- function(file) {
ds3 <- data_input3()
write.csv(ds3, file, row.names = FALSE)
}
)
output$cifplot <- renderPlot({
data3 <- data_input3()
if (input$select31 == "All Patients") {
time <- data3[,input$select32]
censor <- data3[,input$select33]
dat <- cbind.data.frame(time, censor)
if (is.null(time) | is.null(censor)) {
return(NULL)
} else if (class(time)=="numeric"){
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
## Drawing curves
}else {
return(NULL)
}
} else {
xvar <- data3[, input$select31]
time <- data3[,input$select32]
censor <- data3[,input$select33]
dat <- cbind.data.frame(time, censor, xvar)
if (is.null(xvar) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(xvar) > 30) {
return(NULL)
} else if (class(xvar) == "factor" & (class(time)=="numeric"|class(time)=="integer")){
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
## Drawing curves
}else if (class(xvar) == "character" & (class(time)=="numeric"|class(time)=="integer")){
xvar <- as.factor(xvar)
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
} else if (class(xvar) %in% c("integer", "numeric") & input$binary2 == "continuous") {
perc <- as.numeric(as.integer(input$cutoff3))
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100), "Low", "High")
Group <- as.factor(dat$Group)
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (as.numeric(time), censor, Group, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (as.numeric(time), censor, Group, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
}
}
})
output$ciftable <- renderDataTable({
data3 <- data_input3()
if (input$select31 == "All Patients") {
time <- data3[,input$select32]
censor <- data3[,input$select33]
dat <- cbind.data.frame(time, censor)
if (is.null(time) | is.null(censor)) {
return(NULL)
} else if (class(time)=="numeric" |class(time)=="integer"){
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
## Drawing curves
}else {
return(NULL)
}
} else {
xvar <- data3[, input$select31]
time <- data3[,input$select32]
censor <- data3[,input$select33]
dat <- cbind.data.frame(time, censor, xvar)
if (is.null(xvar) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(xvar) > 30) {
return(NULL)
} else if (class(xvar) == "factor" & (class(time)=="numeric"|class(time)=="integer")){
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
## Drawing curves
} else if(class(xvar) == "character" & (class(time)=="numeric"|class(time)=="integer")){
xvar <- as.factor(xvar)
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
} else if (class(xvar) %in% c("integer", "numeric") & input$binary2 == "continuous") {
perc <- as.numeric(as.integer(input$cutoff3))
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100), "Low", "High")
Group <- as.factor(dat$Group)
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (as.numeric(time), censor, Group, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (as.numeric(time), censor, Group, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
} else {
return(NULL)
}
}
z <- qnorm(1-(1-0.95)/2)
lower <- fit$est ^ exp(-z*fit$se/(fit$est*log(fit$est)))
lower <- melt(round(lower, 4), id.vars=rownames(lower))
lower <- lower[order(lower[,1]),]
upper <- fit$est ^ exp(z*fit$se/(fit$est*log(fit$est)))
upper <- melt(round(upper, 4), id.vars=rownames(upper))
upper <- upper[order(upper[,1]),]
est <- melt(round(fit$est, 4), id.vars=rownames(fit$est))
est <- est[order(est[,1]),]
ci <- data.frame(paste0(est[, 3], " (", lower[, 3], ", ", upper[, 3], ")"))
Variable <- as.character(est[, 1])
table <- cbind.data.frame(Variable, data.frame(est[, 2]), ci, stringsAsFactors=FALSE)
colnames(table) <- c(input$select31, paste0("Time (", xlabel, ")"), "CIF Estimate (95% CI)")
vec <- as.vector(table[, 2])
seq <- 1:nrow(table)
seq <- seq[-which(vec == unique(vec)[1])]
table[seq, 1] <- ""
table
}, rownames = FALSE, options = list(pageLength = 100))
output$out3 <- renderDataTable({
data3 <- data_input3()
xvar <- data3[,input$show_vars36, drop = FALSE]
time <- data3[,input$select34]
censor <- data3[,input$select35]
res <- list()
for (i in 1: ncol(xvar)){
x <- xvar[, i]
if (is.null(x) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(x) > 30) {
return(NULL)
} else if (class(x) == "factor" & (class(time)=="numeric"|class(time)=="integer")){
Variable <- c(colnames(xvar)[i], rep("", (length(levels(x))-1)))
#x <- C(x, contr.treatment, base=3)
fit <- crr(time, censor, factor2ind(x), failcode = input$event2, cencode = input$censor2)
sum <- summary(fit)
hazard.ratio = round(sum$conf.int[, 1], 2)
lower95 = round(sum$conf.int[, 3], 2)
upper95 = round(sum$conf.int[, 4], 2)
gray.p.value = ifelse(cuminc(time, censor, factor2ind(x))$Tests[as.integer(input$event2), "pv"] < 0.001, "<0.001", paste0(round(cuminc(time, censor, factor2ind(x))$Tests[as.integer(input$event2), "pv"], 4)))
grayp <- c(gray.p.value, rep("", (length(levels(x))-1)))
type3.p.value = ifelse(sum$coef[, 5] < 0.001, "<0.001", paste0(round(sum$coef[, 5], 4)))
counts <- data.frame(table(x))
counts <- rbind.data.frame(counts[2:nrow(counts),], counts[1, ])
coxres <- cbind.data.frame(Variable, counts, c(paste0(hazard.ratio, " (", lower95,
"-", upper95, ")"), ""), c(type3.p.value, ""), grayp)
colnames(coxres) <- c("Variable", "Level", "N", "Hazard Ratio (95% CI)", "Type 3 P-value", "Gray's P-value")
res[[i]] <- coxres
}else if(class(x) == "character" & (class(time)=="numeric"|class(time)=="integer")){
x <- as.factor(x)
Variable <- c(colnames(xvar)[i], rep("", (length(levels(x))-1)))
#x <- C(x, contr.treatment, base=3)
fit <- crr(time, censor, factor2ind(x), failcode = input$event2, cencode = input$censor2)
sum <- summary(fit)
hazard.ratio = round(sum$conf.int[, 1], 2)
lower95 = round(sum$conf.int[, 3], 2)
upper95 = round(sum$conf.int[, 4], 2)
gray.p.value = ifelse(cuminc(time, censor, factor2ind(x))$Tests[as.integer(input$event2), "pv"] < 0.001, "<0.001", paste0(round(cuminc(time, censor, factor2ind(x))$Tests[as.integer(input$event2), "pv"], 4)))
grayp <- c(gray.p.value, rep("", (length(levels(x))-1)))
type3.p.value = ifelse(sum$coef[, 5] < 0.001, "<0.001", paste0(round(sum$coef[, 5], 4)))
counts <- data.frame(table(x))
counts <- rbind.data.frame(counts[2:nrow(counts),], counts[1, ])
coxres <- cbind.data.frame(Variable, counts, c(paste0(hazard.ratio, " (", lower95,
"-", upper95, ")"), ""), c(type3.p.value, ""), grayp)
colnames(coxres) <- c("Variable", "Level", "N", "Hazard Ratio (95% CI)", "Type 3 P-value", "Gray's P-value")
res[[i]] <- coxres
}else if (class(x) %in% c("integer", "numeric") & (class(time)=="numeric"|class(time)=="integer")){
Variable <- colnames(xvar)[i]
fit <- crr(time, censor, x, failcode = input$event2, cencode = input$censor2)
sum <- summary(fit)
hazard.ratio = round(sum$conf.int[1], 2)
lower95 = round(sum$conf.int[3], 2)
upper95 = round(sum$conf.int[4], 2)
#logrank.p.value = sum$sctest[3]
type3.p.value = ifelse(sum$coef[5] < 0.001, "<0.001", paste0(round(sum$coef[5], 4)))
coxres <- cbind.data.frame(Variable, NA, length(!is.na(x)), paste0(hazard.ratio, " (", lower95,
"-", upper95, ")"), type3.p.value, "-")
colnames(coxres) <- c("Variable", "Level", "N", "Hazard Ratio (95% CI)", "Type 3 P-value", "Gray's P-value")
res[[i]] <- coxres
} else
return(NULL)
}
res_table <- do.call("rbind", res)
}, rownames = FALSE) #, options = list(dom = 'tip'))
output$pv31 <- renderUI({
hs3 <- paste(" ")
hs4 <- paste("To Visualize the Cumulative Incidence Function Plot:")
HTML(paste(h2(strong(hs4)), hs3, sep = '<br/>'))
})
output$pv32 <- renderUI({
hs3 <- paste(" ")
hs4 <- paste("Univariate Competing Risk Survival Association Analysis for Multiple Selected Variables")
HTML(paste(hs3, h2(strong(hs4)), hs3, sep = '<br/>'))
})
output$downloadcif <- downloadHandler(
filename <- function() {
pdf_file <<- as.character(input$fname31)
paste('CIF_', pdf_file, Sys.time(),'.pdf', sep='')
},
content <- function(file) {
pdf(file=paste(pdf_file,".pdf",sep="") , height= 8, width=12)
#plot_fp()
data3 <- data_input3()
if (input$select31 == "All Patients") {
time <- data3[,input$select32]
censor <- data3[,input$select33]
dat <- cbind.data.frame(time, censor)
if (is.null(time) | is.null(censor)) {
return(NULL)
} else if (class(time)=="numeric"){
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
## Drawing curves
}else {
return(NULL)
}
} else {
xvar <- data3[, input$select31]
time <- data3[,input$select32]
censor <- data3[,input$select33]
dat <- cbind.data.frame(time, censor, xvar)
if (is.null(xvar) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(xvar) > 30) {
return(NULL)
} else if (class(xvar) == "factor" & (class(time)=="numeric"|class(time)=="integer")){
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
## Drawing curves
}else if (class(xvar) == "character" & (class(time)=="numeric"|class(time)=="integer")){
xvar <- as.factor(xvar)
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
} else if (class(xvar) %in% c("integer", "numeric") & input$binary2 == "continuous") {
perc <- as.numeric(as.integer(input$cutoff3))
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100), "Low", "High")
Group <- as.factor(dat$Group)
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (as.numeric(time), censor, Group, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (as.numeric(time), censor, Group, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
}
}
dev.off()
file.copy(paste(pdf_file,'.pdf', sep='') ,file, overwrite=TRUE)
})
output$downloadciftable = downloadHandler(
filename <- function() {
csv_file <<- as.character(input$fname33)
paste('ciftable_', csv_file, Sys.time(),'.csv', sep='')
},
content = function(file) {
data3 <- data_input3()
if (input$select31 == "All Patients") {
time <- data3[,input$select32]
censor <- data3[,input$select33]
dat <- cbind.data.frame(time, censor)
if (is.null(time) | is.null(censor)) {
return(NULL)
} else if (class(time)=="numeric" |class(time)=="integer"){
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
## Drawing curves
}else {
return(NULL)
}
} else {
xvar <- data3[, input$select31]
time <- data3[,input$select32]
censor <- data3[,input$select33]
dat <- cbind.data.frame(time, censor, xvar)
if (is.null(xvar) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(xvar) > 30) {
return(NULL)
} else if (class(xvar) == "factor" & (class(time)=="numeric"|class(time)=="integer")){
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
## Drawing curves
} else if(class(xvar) == "character" & (class(time)=="numeric"|class(time)=="integer")){
xvar <- as.factor(xvar)
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (time, censor, xvar, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
}else if (class(xvar) %in% c("integer", "numeric") & input$binary2 == "continuous") {
perc <- as.numeric(as.integer(input$cutoff3))
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100), "Low", "High")
Group <- as.factor(dat$Group)
if (input$points == 0) {
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (as.numeric(time), censor, Group, cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
} else {
#xlabel <- c("Years", "Months", "Days")[input$time2]
xlabel <- c("Years", "Months", "Days")[as.numeric(input$time2)]
fit <- CumIncidence (as.numeric(time), censor, Group, t = as.numeric(as.vector(unlist(strsplit(input$text, ", ")))), cencode = input$censor, xlab=xlabel, event = as.numeric(input$event))
}
} else {
return(NULL)
}
}
z <- qnorm(1-(1-0.95)/2)
lower <- fit$est ^ exp(-z*fit$se/(fit$est*log(fit$est)))
lower <- melt(round(lower, 4), id.vars=rownames(lower))
lower <- lower[order(lower[,1]),]
upper <- fit$est ^ exp(z*fit$se/(fit$est*log(fit$est)))
upper <- melt(round(upper, 4), id.vars=rownames(upper))
upper <- upper[order(upper[,1]),]
est <- melt(round(fit$est, 4), id.vars=rownames(fit$est))
est <- est[order(est[,1]),]
ci <- data.frame(paste0(est[, 3], " (", lower[, 3], ", ", upper[, 3], ")"))
Variable <- as.character(est[, 1])
table <- cbind.data.frame(Variable, data.frame(est[, 2]), ci, stringsAsFactors=FALSE)
colnames(table) <- c(input$select31, paste0("Time (", xlabel, ")"), "CIF Estimate (95% CI)")
vec <- as.vector(table[, 2])
seq <- 1:nrow(table)
seq <- seq[-which(vec == unique(vec)[1])]
table[seq, 1] <- ""
table
write.csv(table, file, row.names = F)
})
output$x2 = downloadHandler(
filename <- function() {
csv_file <<- as.character(input$fname32)
paste('ciftable_', csv_file, Sys.time(),'.csv', sep='')
},
content = function(file) {
data3 <- data_input3()
xvar <- data3[,input$show_vars36, drop = FALSE]
time <- data3[,input$select34]
censor <- data3[,input$select35]
res <- list()
for (i in 1: ncol(xvar)){
x <- xvar[, i]
if (is.null(x) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(x) > 30) {
return(NULL)
} else if (class(x) == "factor" & (class(time)=="numeric"|class(time)=="integer")){
Variable <- c(colnames(xvar)[i], rep("", (length(levels(x))-1)))
#x <- C(x, contr.treatment, base=3)
fit <- crr(time, censor, factor2ind(x), failcode = input$event2, cencode = input$censor2)
sum <- summary(fit)
hazard.ratio = round(sum$conf.int[, 1], 2)
lower95 = round(sum$conf.int[, 3], 2)
upper95 = round(sum$conf.int[, 4], 2)
gray.p.value = ifelse(cuminc(time, censor, factor2ind(x))$Tests[as.integer(input$event2), "pv"] < 0.001, "<0.001", paste0(round(cuminc(time, censor, factor2ind(x))$Tests[as.integer(input$event2), "pv"], 4)))
grayp <- c(gray.p.value, rep("", (length(levels(x))-1)))
type3.p.value = ifelse(sum$coef[, 5] < 0.001, "<0.001", paste0(round(sum$coef[, 5], 4)))
counts <- data.frame(table(x))
counts <- rbind.data.frame(counts[2:nrow(counts),], counts[1, ])
coxres <- cbind.data.frame(Variable, counts, c(paste0(hazard.ratio, " (", lower95,
"-", upper95, ")"), ""), c(type3.p.value, ""), grayp)
colnames(coxres) <- c("Variable", "Level", "N", "Hazard Ratio (95% CI)", "Type 3 P-value", "Gray's P-value")
res[[i]] <- coxres
}else if(class(x) == "character" & (class(time)=="numeric"|class(time)=="integer")){
x <- as.factor(x)
Variable <- c(colnames(xvar)[i], rep("", (length(levels(x))-1)))
#x <- C(x, contr.treatment, base=3)
fit <- crr(time, censor, factor2ind(x), failcode = input$event2, cencode = input$censor2)
sum <- summary(fit)
hazard.ratio = round(sum$conf.int[, 1], 2)
lower95 = round(sum$conf.int[, 3], 2)
upper95 = round(sum$conf.int[, 4], 2)
gray.p.value = ifelse(cuminc(time, censor, factor2ind(x))$Tests[as.integer(input$event2), "pv"] < 0.001, "<0.001", paste0(round(cuminc(time, censor, factor2ind(x))$Tests[as.integer(input$event2), "pv"], 4)))
grayp <- c(gray.p.value, rep("", (length(levels(x))-1)))
type3.p.value = ifelse(sum$coef[, 5] < 0.001, "<0.001", paste0(round(sum$coef[, 5], 4)))
counts <- data.frame(table(x))
counts <- rbind.data.frame(counts[2:nrow(counts),], counts[1, ])
coxres <- cbind.data.frame(Variable, counts, c(paste0(hazard.ratio, " (", lower95,
"-", upper95, ")"), ""), c(type3.p.value, ""), grayp)
colnames(coxres) <- c("Variable", "Level", "N", "Hazard Ratio (95% CI)", "Type 3 P-value", "Gray's P-value")
res[[i]] <- coxres
}else if (class(x) %in% c("integer", "numeric") & (class(time)=="numeric"|class(time)=="integer")){
Variable <- colnames(xvar)[i]
fit <- crr(time, censor, x, failcode = input$event2, cencode = input$censor2)
sum <- summary(fit)
hazard.ratio = round(sum$conf.int[1], 2)
lower95 = round(sum$conf.int[3], 2)
upper95 = round(sum$conf.int[4], 2)
#logrank.p.value = sum$sctest[3]
type3.p.value = ifelse(sum$coef[5] < 0.001, "<0.001", paste0(round(sum$coef[5], 4)))
coxres <- cbind.data.frame(Variable, NA, length(!is.na(x)), paste0(hazard.ratio, " (", lower95,
"-", upper95, ")"), type3.p.value, "-")
colnames(coxres) <- c("Variable", "Level", "N", "Hazard Ratio (95% CI)", "Type 3 P-value", "Gray's P-value")
res[[i]] <- coxres
} else
return(NULL)
}
res_table <- do.call("rbind", res)
write.csv(res_table, file, row.names = F)
})
data_input4 <- reactive({
if(input$file4 == 'Example4'){
d4 <- read.csv("data/example_landmark_Stan.csv", header =T, sep =",",stringsAsFactors = F)
}
else if(input$file4 == 'load_my_own4'){
inFile <- input$file42
if (is.null(inFile))
return(NULL)
else if(grepl(".xlsx", inFile[1])) { d4 = read.xlsx(as.character(inFile$datapath), colNames = TRUE, rowNames = F, as.is = T) }
else if(grepl(".csv", inFile[1])) { d4 = read.csv(as.character(inFile$datapath), header = TRUE, sep = ",", stringsAsFactors = F, as.is = T, fill = T) }
else if(grepl(".txt", inFile[1])) { d4 = read.table(as.character(inFile$datapath), header = TRUE, sep = "\t", stringsAsFactors = F, as.is = T, fill = T) }
}
else
return(NULL)
# dim(data)
Dataset4 <- data.frame(d4)
return(as.data.frame(Dataset4))
})
output$downloadEx4 <- downloadHandler(
filename <- function() {
paste('Example ds data', Sys.time(),'.csv', sep='')
},
content <- function(file) {
ds4 <- data_input4()
write.csv(ds4, file, row.names = FALSE)
}
)
output$landmarkplot <- renderPlot({
data4 <- data_input4()
if(!is.null(data4)){
for (j in 1:ncol(data4)) {
if (class(data4[, j]) %in% c("character"))
data4[,j] <- as.factor(data4[,j])
else data4[,j] = data4[, j]
}
xvar <- data4[,input$select41]
time <- as.numeric(data4[,input$select42])
censor <- as.numeric(factor(data4[,input$select43]))
rtime <- as.numeric(data4[,input$select44])
dat <- cbind.data.frame(time, censor, xvar, rtime)
dat <- as.data.frame(dat[!is.na(dat$xvar),]) ###
check_data <<- dat
if (is.null(xvar) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(xvar) > 30) {
return(NULL)
} else if (class(xvar) == "factor" & input$text2 != "") {
#test <- cutLM(dat, outcome=list(time="time", status="censor"),
# LM=as.numeric(input$text2), horizon=max(time), covs=list(varying = colnames(dat)[3]))
test0 <- check_data[order(check_data$time),]
test <- test0[test0$time > as.numeric(input$text2), ]
change_level <- as.character(unique(test[is.na(test$rtime), "xvar"]))
for(j in 1:nrow(test)) {
check_xvar <- as.character(test$xvar[j])
test$xvar[j] <- ifelse(is.na(test$rtime[j])|test$rtime[j] > as.numeric(input$text2), change_level, as.character(test$xvar[j]))
}
fit1 <- survfit(Surv(time, censor) ~ xvar, data = dat)
fit2 <- survfit(Surv(time, censor) ~ xvar, data = test)
p1 <- survdiff(Surv(time, censor) ~ xvar, data = dat)
p2 <- survdiff(Surv(time, censor) ~ xvar, data = test)
p.val1 <- 1 - pchisq(p1$chisq, length(p1$n) - 1)
p.val2 <- 1 - pchisq(p2$chisq, length(p2$n) - 1)
darkcols <- c("blue", "red", "green", "black", "yellow", "orange", "pink", "purple", "grey")
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(0.25, 3, 90)
pos1<-c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]), as.numeric(input$text2))
NR1<-nrisk(fit1, times=pos1)
NR1[, ncol(NR1)] <- rep(NA, nrow(NR1))
pos2<-seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)])
NR2<-nrisk(fit2, times=pos2)
if (input$riskt2 == TRUE) {
#TF <- c(TRUE, FALSE)
if (input$option == TRUE) {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, col=darkcols[1:length(levels(dat$xvar))], xlim=c(0,as.numeric(input$text2)), axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
#for (n in 1: dim(NR1)[1]){text(x=pos1,y= 0,labels= NR1[n,],cex=1.5,pos=3, offset=(-0.8*n), col=darkcols[1:length(levels(test$xvar))],font=3)}
#text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=1.5, font=4)
for (n in 1: dim(NR1)[1]) {mtext(text = NR1[n, ], side = 1, line = (6+2*n), outer = F, at = pos1, col=darkcols[1:length(levels(test$xvar))][n])}
mtext("Number at Risk", side = 1, line = 5, at = 0, outer = T)
plot(fit2, col=darkcols[1:length(levels(dat$xvar))], xlim=c(as.numeric(input$text2), 1.02*max(time)), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
for (n in 1: dim(NR2)[1]) {mtext(text = NR2[n, ], side = 1, line = (6+2*n), outer = F, at = pos2, col=darkcols[1:length(levels(test$xvar))][n])}
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Survival Probability",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 1, legend = levels(factor(test$xvar)),
lty = c(1:1), col=darkcols[1:length(levels(test$xvar))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
} else {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, xaxs='i', xlim=c(0,as.numeric(input$text2)), ylim =c(0,1), fun = function(x) {1-x}, col=darkcols[1:length(levels(dat$xvar))], axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
for (n in 1: dim(NR1)[1]) {mtext(text = NR1[n, ], side = 1, line = (6+2*n), outer = F, at = pos1, col=darkcols[1:length(levels(test$xvar))][n])}
mtext("Number at Risk", side = 1, line = 5, at = 0, outer = T)
plot(fit2, xaxs='i', fun = function(x) {1-x}, col=darkcols[1:length(levels(test$xvar))], xlim=c(as.numeric(input$text2), 1.02*max(time)), ylim =c(0,1), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
for (n in 1: dim(NR2)[1]) {mtext(text = NR2[n, ], side = 1, line = (6+2*n), outer = F, at = pos2, col=darkcols[1:length(levels(test$xvar))][n])}
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Cumulative Incidence Rate",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 0.1, legend = levels(factor(test$xvar)),
lty = c(1:1), col=darkcols[1:length(levels(test$xvar))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
}
} else {
#TF <- c(TRUE, FALSE)
if (input$option == TRUE) {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, col=darkcols[1:length(levels(dat$xvar))], xlim=c(0,as.numeric(input$text2)), axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
#for (n in 1: dim(NR1)[1]){text(x=pos1,y= 0,labels= NR1[n,],cex=1.5,pos=3, offset=(-0.8*n), col=darkcols[1:length(levels(test$xvar))],font=3)}
#text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=1.5, font=4)
plot(fit2, col=darkcols[1:length(levels(dat$xvar))], xlim=c(as.numeric(input$text2), 1.02*max(time)), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Survival Probability",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 1, legend = levels(factor(test$xvar)),
lty = c(1:1), col=darkcols[1:length(levels(test$xvar))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
} else {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, xaxs='i', xlim=c(0,as.numeric(input$text2)), ylim =c(0,1), fun = function(x) {1-x}, col=darkcols[1:length(levels(dat$xvar))], axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
plot(fit2, xaxs='i', fun = function(x) {1-x}, col=darkcols[1:length(levels(test$xvar))], xlim=c(as.numeric(input$text2), 1.02*max(time)), ylim =c(0,1), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Cumulative Incidence Rate",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 0.1, legend = levels(factor(test$xvar)),
lty = c(1:1), col=darkcols[1:length(levels(test$xvar))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
}
}
} else if (class(xvar) %in% c("integer", "numeric") & input$text2 != "" & input$binary3 == "continuous") {
if (as.numeric(as.integer(input$cutoff4)) == 1) {
perc <- optimalcut(dat)
} else {perc <- as.numeric(as.integer(input$cutoff4))}
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100), "Low", "High")
#Group <- as.factor(dat$Group)
dat <- dat[, -3]
test <- cutLM(dat, outcome=list(time="time", status="censor"),
LM=as.numeric(input$text2), horizon=max(time), covs=list(fixed=colnames(dat)[3]))
fit1 <- survfit(Surv(time, censor) ~ Group, data = dat)
fit2 <- survfit(Surv(time, censor) ~ Group, data = test)
p1 <- survdiff(Surv(time, censor) ~ Group, data = dat)
p2 <- survdiff(Surv(time, censor) ~ Group, data = test)
p.val1 <- 1 - pchisq(p1$chisq, length(p1$n) - 1)
p.val2 <- 1 - pchisq(p2$chisq, length(p2$n) - 1)
darkcols <- c("blue", "red", "green", "black", "yellow", "orange", "pink", "purple", "grey")
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(0.25, 3, 90)
pos1<-c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]), as.numeric(input$text2))
NR1<-nrisk(fit1, times=pos1)
NR1[, ncol(NR1)] <- rep(NA, nrow(NR1))
pos2<-seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)])
NR2<-nrisk(fit2, times=pos2)
if (input$riskt2 == TRUE) {
#TF <- c(TRUE, FALSE)
if (input$option == TRUE) {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, col=darkcols[1:length(levels(factor(dat$Group)))], xlim=c(0,as.numeric(input$text2)), axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
#for (n in 1: dim(NR1)[1]){text(x=pos1,y= 0,labels= NR1[n,],cex=1.5,pos=3, offset=(-0.8*n), col=darkcols[1:length(levels(test$Group))],font=3)}
#text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=1.5, font=4)
for (n in 1: dim(NR1)[1]) {mtext(text = NR1[n, ], side = 1, line = (6+2*n), outer = F, at = pos1, col=darkcols[1:length(levels(factor(test$Group)))][n])}
mtext("Number at Risk", side = 1, line = 5, at = 0, outer = T)
plot(fit2, col=darkcols[1:length(levels(factor(dat$Group)))], xlim=c(as.numeric(input$text2), 1.02*max(time)), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
for (n in 1: dim(NR2)[1]) {mtext(text = NR2[n, ], side = 1, line = (6+2*n), outer = F, at = pos2, col=darkcols[1:length(levels(factor(test$Group)))][n])}
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Survival Probability",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 1, legend = levels(factor(test$Group)),
lty = c(1:1), col=darkcols[1:length(levels(factor(test$Group)))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
} else {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, xaxs='i', xlim=c(0,as.numeric(input$text2)), ylim =c(0,1), fun = function(x) {1-x}, col=darkcols[1:length(levels(factor(dat$Group)))], axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
for (n in 1: dim(NR1)[1]) {mtext(text = NR1[n, ], side = 1, line = (6+2*n), outer = F, at = pos1, col=darkcols[1:length(levels(factor(test$Group)))][n])}
mtext("Number at Risk", side = 1, line = 5, at = 0, outer = T)
plot(fit2, xaxs='i', fun = function(x) {1-x}, col=darkcols[1:length(levels(factor(test$Group)))], xlim=c(as.numeric(input$text2), 1.02*max(time)), ylim =c(0,1), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
for (n in 1: dim(NR2)[1]) {mtext(text = NR2[n, ], side = 1, line = (6+2*n), outer = F, at = pos2, col=darkcols[1:length(levels(factor(test$Group)))][n])}
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Cumulative Incidence Rate",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 0.1, legend = levels(factor(test$Group)),
lty = c(1:1), col=darkcols[1:length(levels(factor(test$Group)))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
}
} else {
#TF <- c(TRUE, FALSE)
if (input$option == TRUE) {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, col=darkcols[1:length(levels(factor(dat$Group)))], xlim=c(0,as.numeric(input$text2)), axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
#for (n in 1: dim(NR1)[1]){text(x=pos1,y= 0,labels= NR1[n,],cex=1.5,pos=3, offset=(-0.8*n), col=darkcols[1:length(levels(test$Group))],font=3)}
#text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=1.5, font=4)
plot(fit2, col=darkcols[1:length(levels(factor(dat$Group)))], xlim=c(as.numeric(input$text2), 1.02*max(time)), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Survival Probability",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 1, legend = levels(factor(test$Group)),
lty = c(1:1), col=darkcols[1:length(levels(factor(test$Group)))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
} else {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, xaxs='i', xlim=c(0,as.numeric(input$text2)), ylim =c(0,1), fun = function(x) {1-x}, col=darkcols[1:length(levels(factor(dat$Group)))], axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
plot(fit2, xaxs='i', fun = function(x) {1-x}, col=darkcols[1:length(levels(factor(test$Group)))], xlim=c(as.numeric(input$text2), 1.02*max(time)), ylim =c(0,1), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Cumulative Incidence Rate",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 0.1, legend = levels(factor(test$Group)),
lty = c(1:1), col=darkcols[1:length(levels(factor(test$Group)))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
}
}
} else {
return(NULL)
}
}
})
output$pv41 <- renderUI({
hs3 <- paste(" ")
hs4 <- paste("To Visualize the Landmark Survival Plot:")
HTML(paste(h2(strong(hs4)), hs3, sep = '<br/>'))
})
output$downloadlandmarkplot <- downloadHandler(
filename <- function() {
pdf_file <<- as.character(input$fname41)
paste('landmarkplot_', pdf_file, Sys.time(),'.pdf', sep='')
},
content <- function(file) {
pdf(file=paste(pdf_file,".pdf",sep="") , height= 8, width=12)
#plot_fp()
data4 <- data_input4()
for (j in 1:ncol(data4)) {
if (class(data4[, j]) %in% c("character"))
data4[,j] <- as.factor(data4[,j])
else data4[,j] = data4[, j]
}
xvar <- data4[,input$select41]
time <- as.numeric(data4[,input$select42])
censor <- as.numeric(factor(data4[,input$select43]))
rtime <- as.numeric(data4[,input$select44])
dat <- cbind.data.frame(time, censor, xvar, rtime)
dat <- as.data.frame(dat[!is.na(dat$xvar),]) ###
check_data <<- dat
if (is.null(xvar) | is.null(time) | is.null(censor)) {
return(NULL)
} else if (nlevels(xvar) > 30) {
return(NULL)
} else if (class(xvar) == "factor" & input$text2 != "") {
#test <- cutLM(dat, outcome=list(time="time", status="censor"),
# LM=as.numeric(input$text2), horizon=max(time), covs=list(varying = colnames(dat)[3]))
test0 <- check_data[order(check_data$time),]
test <- test0[test0$time > as.numeric(input$text2), ]
change_level <- as.character(unique(test[is.na(test$rtime), "xvar"]))
for(j in 1:nrow(test)) {
check_xvar <- as.character(test$xvar[j])
test$xvar[j] <- ifelse(is.na(test$rtime[j])|test$rtime[j] > as.numeric(input$text2), change_level, as.character(test$xvar[j]))
}
fit1 <- survfit(Surv(time, censor) ~ xvar, data = dat)
fit2 <- survfit(Surv(time, censor) ~ xvar, data = test)
p1 <- survdiff(Surv(time, censor) ~ xvar, data = dat)
p2 <- survdiff(Surv(time, censor) ~ xvar, data = test)
p.val1 <- 1 - pchisq(p1$chisq, length(p1$n) - 1)
p.val2 <- 1 - pchisq(p2$chisq, length(p2$n) - 1)
darkcols <- c("blue", "red", "green", "black", "yellow", "orange", "pink", "purple", "grey")
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(0.25, 3, 90)
pos1<-c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]), as.numeric(input$text2))
NR1<-nrisk(fit1, times=pos1)
NR1[, ncol(NR1)] <- rep(NA, nrow(NR1))
pos2<-seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)])
NR2<-nrisk(fit2, times=pos2)
if (input$riskt2 == TRUE) {
#TF <- c(TRUE, FALSE)
if (input$option == TRUE) {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, col=darkcols[1:length(levels(dat$xvar))], xlim=c(0,as.numeric(input$text2)), axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
#for (n in 1: dim(NR1)[1]){text(x=pos1,y= 0,labels= NR1[n,],cex=1.5,pos=3, offset=(-0.8*n), col=darkcols[1:length(levels(test$xvar))],font=3)}
#text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=1.5, font=4)
for (n in 1: dim(NR1)[1]) {mtext(text = NR1[n, ], side = 1, line = (6+2*n), outer = F, at = pos1, col=darkcols[1:length(levels(test$xvar))][n])}
mtext("Number at Risk", side = 1, line = 5, at = 0, outer = T)
plot(fit2, col=darkcols[1:length(levels(dat$xvar))], xlim=c(as.numeric(input$text2), 1.02*max(time)), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
for (n in 1: dim(NR2)[1]) {mtext(text = NR2[n, ], side = 1, line = (6+2*n), outer = F, at = pos2, col=darkcols[1:length(levels(test$xvar))][n])}
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Survival Probability",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 1, legend = levels(factor(test$xvar)),
lty = c(1:1), col=darkcols[1:length(levels(test$xvar))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
} else {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, xaxs='i', xlim=c(0,as.numeric(input$text2)), ylim =c(0,1), fun = function(x) {1-x}, col=darkcols[1:length(levels(dat$xvar))], axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
for (n in 1: dim(NR1)[1]) {mtext(text = NR1[n, ], side = 1, line = (6+2*n), outer = F, at = pos1, col=darkcols[1:length(levels(test$xvar))][n])}
mtext("Number at Risk", side = 1, line = 5, at = 0, outer = T)
plot(fit2, xaxs='i', fun = function(x) {1-x}, col=darkcols[1:length(levels(test$xvar))], xlim=c(as.numeric(input$text2), 1.02*max(time)), ylim =c(0,1), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
for (n in 1: dim(NR2)[1]) {mtext(text = NR2[n, ], side = 1, line = (6+2*n), outer = F, at = pos2, col=darkcols[1:length(levels(test$xvar))][n])}
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Cumulative Incidence Rate",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 0.1, legend = levels(factor(test$xvar)),
lty = c(1:1), col=darkcols[1:length(levels(test$xvar))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
}
} else {
#TF <- c(TRUE, FALSE)
if (input$option == TRUE) {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, col=darkcols[1:length(levels(dat$xvar))], xlim=c(0,as.numeric(input$text2)), axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
#for (n in 1: dim(NR1)[1]){text(x=pos1,y= 0,labels= NR1[n,],cex=1.5,pos=3, offset=(-0.8*n), col=darkcols[1:length(levels(test$xvar))],font=3)}
#text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=1.5, font=4)
plot(fit2, col=darkcols[1:length(levels(dat$xvar))], xlim=c(as.numeric(input$text2), 1.02*max(time)), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Survival Probability",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 1, legend = levels(factor(test$xvar)),
lty = c(1:1), col=darkcols[1:length(levels(test$xvar))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
} else {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, xaxs='i', xlim=c(0,as.numeric(input$text2)), ylim =c(0,1), fun = function(x) {1-x}, col=darkcols[1:length(levels(dat$xvar))], axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
plot(fit2, xaxs='i', fun = function(x) {1-x}, col=darkcols[1:length(levels(test$xvar))], xlim=c(as.numeric(input$text2), 1.02*max(time)), ylim =c(0,1), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Cumulative Incidence Rate",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 0.1, legend = levels(factor(test$xvar)),
lty = c(1:1), col=darkcols[1:length(levels(test$xvar))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
}
}
} else if (class(xvar) %in% c("integer", "numeric") & input$text2 != "" & input$binary3 == "continuous") {
if (as.numeric(as.integer(input$cutoff4)) == 1) {
perc <- optimalcut(dat)
} else {perc <- as.numeric(as.integer(input$cutoff4))}
dat$Group <- ifelse(dat[, 'xvar'] < quantile(dat[, 'xvar'], perc/100), "Low", "High")
#Group <- as.factor(dat$Group)
dat <- dat[, -3]
test <- cutLM(dat, outcome=list(time="time", status="censor"),
LM=as.numeric(input$text2), horizon=max(time), covs=list(fixed=colnames(dat)[3]))
fit1 <- survfit(Surv(time, censor) ~ Group, data = dat)
fit2 <- survfit(Surv(time, censor) ~ Group, data = test)
p1 <- survdiff(Surv(time, censor) ~ Group, data = dat)
p2 <- survdiff(Surv(time, censor) ~ Group, data = test)
p.val1 <- 1 - pchisq(p1$chisq, length(p1$n) - 1)
p.val2 <- 1 - pchisq(p2$chisq, length(p2$n) - 1)
darkcols <- c("blue", "red", "green", "black", "yellow", "orange", "pink", "purple", "grey")
# Drawing curves
xlabel <- c("Years", "Months", "Days")
b <- c(0.25, 3, 90)
pos1<-c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]), as.numeric(input$text2))
NR1<-nrisk(fit1, times=pos1)
NR1[, ncol(NR1)] <- rep(NA, nrow(NR1))
pos2<-seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)])
NR2<-nrisk(fit2, times=pos2)
if (input$riskt2 == TRUE) {
#TF <- c(TRUE, FALSE)
if (input$option == TRUE) {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, col=darkcols[1:length(levels(factor(dat$Group)))], xlim=c(0,as.numeric(input$text2)), axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
#for (n in 1: dim(NR1)[1]){text(x=pos1,y= 0,labels= NR1[n,],cex=1.5,pos=3, offset=(-0.8*n), col=darkcols[1:length(levels(test$Group))],font=3)}
#text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=1.5, font=4)
for (n in 1: dim(NR1)[1]) {mtext(text = NR1[n, ], side = 1, line = (6+2*n), outer = F, at = pos1, col=darkcols[1:length(levels(factor(test$Group)))][n])}
mtext("Number at Risk", side = 1, line = 5, at = 0, outer = T)
plot(fit2, col=darkcols[1:length(levels(factor(dat$Group)))], xlim=c(as.numeric(input$text2), 1.02*max(time)), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
for (n in 1: dim(NR2)[1]) {mtext(text = NR2[n, ], side = 1, line = (6+2*n), outer = F, at = pos2, col=darkcols[1:length(levels(factor(test$Group)))][n])}
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Survival Probability",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 1, legend = levels(factor(test$Group)),
lty = c(1:1), col=darkcols[1:length(levels(factor(test$Group)))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
} else {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, xaxs='i', xlim=c(0,as.numeric(input$text2)), ylim =c(0,1), fun = function(x) {1-x}, col=darkcols[1:length(levels(factor(dat$Group)))], axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
for (n in 1: dim(NR1)[1]) {mtext(text = NR1[n, ], side = 1, line = (6+2*n), outer = F, at = pos1, col=darkcols[1:length(levels(factor(test$Group)))][n])}
mtext("Number at Risk", side = 1, line = 5, at = 0, outer = T)
plot(fit2, xaxs='i', fun = function(x) {1-x}, col=darkcols[1:length(levels(factor(test$Group)))], xlim=c(as.numeric(input$text2), 1.02*max(time)), ylim =c(0,1), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
for (n in 1: dim(NR2)[1]) {mtext(text = NR2[n, ], side = 1, line = (6+2*n), outer = F, at = pos2, col=darkcols[1:length(levels(factor(test$Group)))][n])}
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Cumulative Incidence Rate",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 0.1, legend = levels(factor(test$Group)),
lty = c(1:1), col=darkcols[1:length(levels(factor(test$Group)))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
}
} else {
#TF <- c(TRUE, FALSE)
if (input$option == TRUE) {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, col=darkcols[1:length(levels(factor(dat$Group)))], xlim=c(0,as.numeric(input$text2)), axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
#for (n in 1: dim(NR1)[1]){text(x=pos1,y= 0,labels= NR1[n,],cex=1.5,pos=3, offset=(-0.8*n), col=darkcols[1:length(levels(test$Group))],font=3)}
#text(x=0,y=0.01, label="Number at Risk", pos=4, offset=-0.5, cex=1.5, font=4)
plot(fit2, col=darkcols[1:length(levels(factor(dat$Group)))], xlim=c(as.numeric(input$text2), 1.02*max(time)), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Survival Probability",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 1, legend = levels(factor(test$Group)),
lty = c(1:1), col=darkcols[1:length(levels(factor(test$Group)))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
} else {
op <- par(mar=c(0.5, 0, 0.2, 0), mfrow=c(1,2),
oma = c(20, 5, 0.6, 0.2))
layout(matrix(c(1,2,2), 1, 3, byrow = TRUE))
plot(fit1, xaxs='i', xlim=c(0,as.numeric(input$text2)), ylim =c(0,1), fun = function(x) {1-x}, col=darkcols[1:length(levels(factor(dat$Group)))], axes = F)
text(0.45*as.numeric(input$text2), 1, paste0("Log-rank p-value: ", round(p.val1, 4)), cex = 1.5)
axis(side = 1, cex.axis = 1.5, at=c(seq(0,as.numeric(input$text2), b[as.numeric(input$time3)]),as.numeric(input$text2)))
axis(side = 2)
plot(fit2, xaxs='i', fun = function(x) {1-x}, col=darkcols[1:length(levels(factor(test$Group)))], xlim=c(as.numeric(input$text2), 1.02*max(time)), ylim =c(0,1), axes = F)
text(0.7*1.02*max(dat$time), 1, paste0("Log-rank p-value: ", round(p.val2, 4)), cex = 1.5)
axis(side = 1, cex.axis=1.5, at=seq(as.numeric(input$text2), max(time), b[as.numeric(input$time3)]))
title(xlab = xlabel[as.numeric(input$time3)],
ylab = "Cumulative Incidence Rate",
outer = TRUE, line = 3, cex.lab= 2)
legend(0.85*1.02*max(dat$time), 0.1, legend = levels(factor(test$Group)),
lty = c(1:1), col=darkcols[1:length(levels(factor(test$Group)))], cex = 1.5)
abline(v=as.numeric(input$text2), lty = 2)
par(op)
}
}
} else {
return(NULL)
}
dev.off()
file.copy(paste(pdf_file,'.pdf', sep='') ,file, overwrite=TRUE)
})
data_input5 <- reactive({
if(input$file5 == 'Example5'){
d5 <- read.csv("data/BRCA_for_quantile_survival_analysis.csv", header =T, sep =",")
}
else if(input$file5 == 'load_my_own5'){
inFile <- input$file51
if (is.null(inFile))
return(NULL)
else if(grepl(".xlsx", inFile[1])) { d5 = read.xlsx(as.character(inFile$datapath), colNames = TRUE, rowNames = F, as.is = T) }
else if(grepl(".csv", inFile[1])) { d5 = read.csv(as.character(inFile$datapath), header = TRUE, sep = ",", stringsAsFactors = F, as.is = T, fill = T) }
else if(grepl(".txt", inFile[1])) { d5 = read.table(as.character(inFile$datapath), header = TRUE, sep = "\t", stringsAsFactors = F, as.is = T, fill = T) }
}
else
return(NULL)
# dim(data)
Dataset5 <- data.frame(d5)
return(as.data.frame(Dataset5))
})
output$downloadEx5 <- downloadHandler(
filename <- function() {
paste('Example ds cutpoint PC', Sys.time(),'.csv', sep='')
},
content <- function(file) {
ds5 <- data_input5()
write.csv(ds5, file, row.names = FALSE)
}
)
output$MRplot <- renderPlot({
data5 <- data_input5()
if(!is.null(data5)){
xvar <- data5[, input$select51]
time <- data5[,input$select52]
censor <- data5[,input$select53]
dat <- cbind.data.frame(time, censor, xvar)
dat <- as.data.frame(dat[!is.na(dat$time),])
optimalcut_plot(dat, input$select51)
}else
return(NULL)
})
output$Optimal <- DT::renderDataTable({
data5 <- data_input5()
if(!is.null(data5)){
xvar <- data5[, input$select51]
time <- data5[,input$select52]
censor <- data5[,input$select53]
dat <- cbind.data.frame(time, censor, xvar)
dat <- as.data.frame(dat[!is.na(dat$time),])
DT::datatable(optimalcut_table(dat, input$select51), options = list(
lengthMenu = list(c(5, 10, -1), c('5', '10', 'All')),
pageLength = 10))
}
})
output$pv51 <- renderUI({
hs1 <- paste(" ")
hs2 <- paste("Optimal Cut Point Output")
HTML(paste(h2(strong(hs2)), hs1, sep = '<br/>'))
})
output$downloadMR <- downloadHandler(
filename <- function() {
pdf_file <<- as.character(input$fname51)
paste('MR_', pdf_file, Sys.time(),'.pdf', sep='')
},
content <- function(file) {
pdf(file=paste(pdf_file,".pdf",sep="") , height= 8, width=12)
data5 <- data_input5()
xvar <- data5[, input$select51]
time <- data5[,input$select52]
censor <- data5[,input$select53]
dat <- cbind.data.frame(time, censor, xvar)
dat <- as.data.frame(dat[!is.na(dat$time),])
optimalcut_plot(dat, input$select51)
dev.off()
file.copy(paste(pdf_file,'.pdf', sep='') ,file, overwrite=TRUE)
})
data_input6 <- reactive({
if(input$file6 == 'Example6'){
d6 <- read.csv("data/heatmap.csv", header =T, sep =",")
}
else if(input$file6 == 'load_my_own6'){
inFile <- input$file61
if (is.null(inFile))
return(NULL)
else if(grepl(".xlsx", inFile[1])) { d6 = read.xlsx(as.character(inFile$datapath), colNames = TRUE, rowNames = F, as.is = T) }
else if(grepl(".csv", inFile[1])) { d6 = read.csv(as.character(inFile$datapath), header = TRUE, sep = ",", stringsAsFactors = F, as.is = T, fill = T) }
else if(grepl(".txt", inFile[1])) { d6 = read.table(as.character(inFile$datapath), header = TRUE, sep = "\t", stringsAsFactors = F, as.is = T, fill = T) }
}
else
return(NULL)
# dim(data)
Dataset6 <- data.frame(d6)
return(as.data.frame(Dataset6))
})
output$downloadEx6 <- downloadHandler(
filename <- function() {
paste('Example ds grid PC', Sys.time(),'.csv', sep='')
},
content <- function(file) {
ds6 <- data_input6()
write.csv(ds6, file, row.names = FALSE)
}
)
output$heatmapplot <- renderPlot({
data6 <- data_input6()
hm_p <- data.matrix(data6[, -1])
class(hm_p) <- "numeric"
rownames(hm_p) <- data6[, 1]
colnames(hm_p) <- colnames(data6[, -1])
hm_p[, ncol(hm_p)] <- 3
hm_p_values <- cbind(matrix(NA, nrow(hm_p), (ncol(hm_p)-1)), data6[, ncol(data6)])
colnames(hm_p_values) <- colnames(data6[, -1])
par(oma=c(2,1,1,0), mar=c(0, 0, 0, 2), xpd=TRUE, cex.main=0.9)
r <- c("High", "Low")
p <- heatmap.2(hm_p, cellnote = hm_p_values, notecol = "black", Rowv=F, Colv=FALSE, scale="none", dendrogram="none", key = F,
trace="none", col = c("grey", "blue", "red", "white"), xlab="", ylab="", margins=c(5,13), main = "")
legend("topleft", inset=c(0,0.15), cex = 0.8, legend=c("Non-significant", paste0(r[r != input$exp_ref2], " Expression Better Survival"), paste0(input$exp_ref2, " Expression Better Survival"), "Non-estimable"), fill=c("grey", "red", "blue", "white"), title="Color Key")
})
output$downloadHM <- downloadHandler(
filename <- function() {
pdf_file <<- as.character(input$fname61)
paste('HM_', pdf_file, Sys.time(),'.pdf', sep='')
},
content <- function(file) {
pdf(file=paste(pdf_file,".pdf",sep="") , height= 8, width=12)
data6 <- data_input6()
hm_p <- data.matrix(data6[, -1])
class(hm_p) <- "numeric"
rownames(hm_p) <- data6[, 1]
colnames(hm_p) <- colnames(data6[, -1])
hm_p[, ncol(hm_p)] <- 3
hm_p_values <- cbind(matrix(NA, nrow(hm_p), (ncol(hm_p)-1)), data6[, ncol(data6)])
colnames(hm_p_values) <- colnames(data6[, -1])
par(oma=c(2,1,1,0), mar=c(0, 0, 0, 2), xpd=TRUE, cex.main=0.9)
r <- c("High", "Low")
p <- heatmap.2(hm_p, cellnote = hm_p_values, notecol = "black", Rowv=F, Colv=FALSE, scale="none", dendrogram="none", key = F,
trace="none", col = c("grey", "blue", "red", "white"), xlab="", ylab="", margins=c(5,13), main = "")
legend("topleft", inset=c(0,0.15), cex = 0.8, legend=c("Non-significant", paste0(r[r != input$exp_ref2], " Expression Better Survival"), paste0(input$exp_ref2, " Expression Better Survival"), "Non-estimable"), fill=c("grey", "red", "blue", "white"), title="Color Key")
dev.off()
file.copy(paste(pdf_file,'.pdf', sep='') ,file, overwrite=TRUE)
})
output$pv61 <- renderUI({
hs1 <- paste(" ")
hs2 <- paste("Summarized Significance conclusion over all cancer types")
HTML(paste(h2(strong(hs2)), hs1, sep = '<br/>'))
})
output$ReadMe2 <- renderUI({
str0 <- paste(" ")
str1 <- paste("DATA FORMAT")
str2 <- paste("  1. Data should be input as a .txt or .xlsx or .csv file. The first row of the data file have information about the variables.")
str3 <- paste("  2. The remaining lines contain measurements one line per each subject/sample, described in the format below.")
str4 <- paste("  3. The first column of the file contains the survival time 'time' (column 1), survival status 'status' (column 2) followed by the other variables of interest.")
str6 <- paste("   a) Column_1. This should contain the survival time, for the user's reference.")
str7 <- paste("   b) Column_2. This should contain the Survival status, input as 'censored' vs 'dead'.")
str8 <- paste("   c) Remaining Columns. These columns should contain information with variables of interest, such as age, race, gender and patient id kept as reference." )
str9 <- paste("NOTE1: In this tab, you are able to carry out standard survival analysis. On the left side panel, you have the options to upload data; select variables for univariate
survival association analysis with cox proportional hazard model; meanwhile, select the variables for Kaplan Meier analysis; download the output table and plot accordingly.")
str10 <- paste("NOTE2: To carry out univariate survival association analysis, it is based on cox proportional hazards model with entering one variable in the model each time. Select the variables of interest to generate the output table.
You can choose the option to be 'Yes' if you want to test the proportional hazard assumption using Schoenfeld residuals test.")
str11 <- paste("NOTE3: To carry out Kaplan Meier Analysis, the variable of interest can be All Patients or a categorical variable. You also need to specify the time unit corresponding to the data in order for the plot to display correctly.")
HTML(paste(str0, strong(str9), str0,str0,str0,strong((str10)), str0,str0,str0,strong((str11)), str0, str0,h5(strong(str1)), str0, str2, str3, str4, str6, str7,str8, str0, sep = '<br/>'))
})
output$ReadMe1 <- renderUI({
str0 <- paste(" ")
str1 <- paste("DATA FORMAT")
str2 <- paste("  1. Data should be input as a .txt or .xlsx or .csv file. The first row of the data file have information about the variables.")
str3 <- paste("  2. The remaining lines contain measurements one line per each subject/sample, described in the format below.")
str4 <- paste("  3. The first column of the file contains sample id, the second column includes the survival status 'os_censor' (column 2), survival time 'os_time' (column 3) followed by the other variables of interest.")
str6 <- paste("   a) Column_1. This should contain the Survival status, input as 0 for 'censored' vs 1 for 'dead'.")
str7 <- paste("   b) Column_2. This should contain the survival time, for the user's reference.")
str8 <- paste("   c) Remaining Columns. These columns should contain information with variables of interest, such as age, race, gender and patient id kept as reference." )
str9 <- paste("NOTE1: In this tab, you are able to carry out quantile survival analysis. The quantile survival analysis in this shiny app is based on the method developed by Huang (2010 & 2016).
For each quantile, the survival time difference will estimated with 95% CI. If it included 0, then that quantile is not significant. Otherwise, it is significant in that quantile.")
str10 <- paste("NOTE2: On the left side panel, you have the options to upload data; select a continuous variable of interest to be dichotomized with optimal cutoff, 25 percentile,
50 percentile, 75 percentile; the option to choose reference level needs to be specified by the user with respect to interest; download the output table and plot accordingly.")
str11 <- paste("NOTE3: Once you click the Run & Generate a random number button, the app will start to run the quantile survival analysis. You can then check the output plots and tables in the second subtab.")
HTML(paste(str0, strong(str9), str0,str0,str0,strong((str10)), str0,str0,str0,strong((str11)), str0, str0,h5(strong(str1)), str0, str2, str3, str4, str6, str7,str8, str0, sep = '<br/>'))
})
output$ReadMe3 <- renderUI({
str0 <- paste(" ")
str1 <- paste("DATA FORMAT")
str2 <- paste("  1. Data should be input as a .txt or .xlsx or .csv file. The first row of the data file have information about the variables.")
str3 <- paste("  2. The remaining lines contain measurements one line per each subject/sample, described in the format below.")
str4 <- paste("  3. The first column contains survival time, the second column includes the survival status, followed by the other variables of interest.")
str6 <- paste("   a) Column_1. This should contain the Survival time, for the user's reference.")
str7 <- paste("   b) Column_2. This should contain the survival status, input as 0 for 'censored' vs 1 for 'dead'.")
str8 <- paste("   c) Remaining Columns. These columns should contain information with variables of interest, such as age, race, gender and patient id kept as reference." )
str9 <- paste("NOTE1: In this tab, you are able to carry out a competing risk survival analysis. The method is based on
Fine and Gray's Model. On the left side panel, you have the options to upload data; select variables for univariate
survival association analysis with Fine and Gray model; meanwhile, select the variables for cumulative incidence function analysis; download the output table and plot accordingly.")
str10 <- paste("NOTE2: For Univariate analysis, choose variables of interest to enter the analysis. Meanwhile, you need to specify event code and censor code.")
str11 <- paste("NOTE3: To generate a CIF plot and table, choose a categorical variable to compare or All Patient for overall. You can select the correct time unit based on the data.
Also, you can choose to input time points of interest or not. Censor code is also need to be specified.")
HTML(paste(str0, strong(str9), str0,str0,strong((str10)), str0,str0,strong((str11)), str0, str0,h5(strong(str1)), str0, str2, str3, str4, str6, str7,str8, str0, sep = '<br/>'))
})
output$ReadMe4 <- renderUI({
str0 <- paste(" ")
str1 <- paste("DATA FORMAT")
str2 <- paste("  1. Data should be input as a .txt or .xlsx or .csv file. The first row of the data file have information about the variables.")
str3 <- paste("  2. The remaining lines contain measurements one line per each subject/sample, described in the format below.")
str4 <- paste("  3. The first column of the file contains the survival time 'time' (column 1), survival status 'status' (column 2) followed by the other variables of interest.")
str6 <- paste("   a) Column_1. This should contain the survival time, for the user's reference.")
str7 <- paste("   b) Column_2. This should contain the Survival status, input as 'censored' vs 'dead'.")
str8 <- paste("   c) Remaining Columns. These columns should contain information with variables of interest, such as age, race, gender and patient id kept as reference." )
str9 <- paste("NOTE1: In this tab, you are able to carry out landmark survival analysis. On the left side panel, you have the options to upload data; select variables for landmark analysis; you also need to specify the value for landmark analysis; download the output plot accordingly.")
str10 <- paste("NOTE2: The analysis is based on the function from R package 'dynpred' to generate the landmark dataset.")
str11 <- paste("NOTE3: You also need to specify the time unit corresponding to the data in order for the plot to display correctly. You can choose to get KM curves or CIF curves per research interest.")
HTML(paste(str0, strong(str9), str0,str0,str0,strong((str10)), str0,str0,str0,strong((str11)), str0, str0,h5(strong(str1)), str0, str2, str3, str4, str6, str7,str8, str0, sep = '<br/>'))
})
output$ReadMe5 <- renderUI({
str0 <- paste(" ")
str1 <- paste("DATA FORMAT")
str2 <- paste("  1. Data should be input as a .txt or .xlsx or .csv file. The first row of the data file have information about the variables.")
str3 <- paste("  2. The remaining lines contain measurements one line per each subject/sample, described in the format below.")
str4 <- paste("  3. The first column of the file contains sample id, the second column includes the survival status 'os_censor' (column 2), survival time 'os_time' (column 3) followed by the other variables of interest.")
str6 <- paste("   a) Column_1. This should contain the Survival status, input as 0 for 'censored' vs 1 for 'dead'.")
str7 <- paste("   b) Column_2. This should contain the survival time, for the user's reference.")
str8 <- paste("   c) Remaining Columns. These columns should contain information with variables of interest, such as age, race, gender and patient id kept as reference." )
str9 <- paste("NOTE1: In this tab, you are able to search for optimal cutpoint for a continuous variable of interest. The method is based on
Contal and O'Quigley (1999) and has also been implemented in SAS by Mandrekar et al. (2003).")
str10 <- paste("NOTE2: In the output, a martingale residual plot is included. In the following table, you can find the cutpoint value of the variable and corresponding percentile.")
HTML(paste(str0, strong(str9), str0,str0,str0,strong((str10)), str0, str0,h5(strong(str1)), str0, str2, str3, str4, str6, str7,str8, str0, sep = '<br/>'))
})
output$ReadMe6 <- renderUI({
str0 <- paste(" ")
str1 <- paste("DATA FORMAT")
str2 <- paste("  1. Data should be input as a .txt or .xlsx or .csv file. The first row of the data file have information about the variables.")
str3 <- paste("  2. The remaining lines contain measurements one line per each cancer type or subgroup, described in the format below.")
str4 <- paste("  3. The data is designed to have 13 columns.")
str6 <- paste("   a) Column_1. The column contains names of different cancer types or subgroups.")
str7 <- paste("   b) Column_13. This should contain the percentile for each cutpoint you used in quantile survival analysis.")
str8 <- paste("   c) Column_2 to Column 12. from 2nd to 12th column, the significance indications are included for 10 quantiles and overall." )
str9 <- paste("NOTE1: In this tab, you are able to generate a grid if you have multiple cancer or groups of interest to run multiple quantile survival analysis separately.")
str10 <- paste("NOTE2: On the left side panel, you have the options to upload data; select the reference level corresponding to the reference level chosen in quantile survival analysis;
download the output plot accordingly.")
str11 <- paste("NOTE3: To generate the input data, you can refer to the 'Quantile Survival Analysis' tab for more information.")
HTML(paste(str0, strong(str9), str0,str0,str0,strong((str10)), str0,str0,str0,strong((str11)), str0, str0,h5(strong(str1)), str0, str2, str3, str4, str6, str7,str8, str0, sep = '<br/>'))
})
}
shinyApp(ui= ui, server= server) |
3947add107c3e3d118f102f3626d4cb60752f8cd | bcbf4ae41cb39706540cea093d8ee6865641f7dc | /man/find_bins.Rd | 4d16e4111ea01f04e362cd658b0751b3b87507b0 | [
"MIT"
] | permissive | c1au6i0/rvent | 5bdb7db9f1a4f4ebda7900780072526ced23c596 | 5ccc790afb87d10ec612d2376c6064604736bba0 | refs/heads/master | 2022-05-08T16:21:48.960384 | 2022-04-06T17:32:40 | 2022-04-06T17:32:40 | 218,093,444 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 804 | rd | find_bins.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_bins.R
\name{find_bins}
\alias{find_bins}
\title{find_bins in vent dataframes.}
\usage{
find_bins(dat, bin, filter_vals = TRUE)
}
\arguments{
\item{dat}{dataframe as returned by \code{\link{import_session}}.}
\item{bin}{bin duration in minutes.}
\item{filter_vals}{if TRUE filters data to eliminte impossible value for rat phisiology.}
}
\value{
a dataframe with a column "int_min" indicating the left limit of the interval in minutes and, a
a colum "int_sec" indicating the left and right limits of the interval in seconds. Time of injection is 0.
}
\description{
find_bins creates left closed duration intervals and filters the dataframe returned by import_vent.
}
\note{
this function is used by summarize_vent
}
|
018b5d06c02e3b1148091057ebf6758a856e5cd2 | e43e0d9b80fb33919ec9b253b5ebdb0dda7e12f8 | /R/compute_sharpe.R | 53d0966198146a6c894c1b917a2dac5924e93f5d | [] | no_license | jmoss13148/sandpr | ce1034e864e73155fd41f3193eb5f213f0e66ade | 6e4bd6ca262e13f2fcf76133d353040056f4313e | refs/heads/master | 2021-08-23T21:22:20.999081 | 2017-12-06T16:09:41 | 2017-12-06T16:09:41 | 110,973,284 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,962 | r | compute_sharpe.R | ##' @description compute the Sharpe ratio based upon daily returns
##'
##' @param x a matrix of daily returns for the stocks of interest
##' @param weights a matrix of weights for the stocks of interest
##'
##' @export
compute_sharpe = function(data,
preds,
prices_data = cleaned_price_data,
type = "long short") {
## Build first datafame - stocks and weights
## Want top and bottom 10% of stocks
stocks = data.frame(ticker = data[["test_stocks"]],
test_date = data[["test_dates"]],
prediction = preds[["predictions"]])
stocks$ticker = as.character(stocks$ticker)
if(type == "long short") {
## Lowest to highest
stocks = arrange(stocks, prediction)
## 5 % of the total number of stocks in portfolio
number = floor(0.05 * nrow(stocks))
## Get indices
short_indices = 1:number
long_indices = (nrow(stocks) - number + 1):nrow(stocks)
## Get weight value
weight_value = 0.5 / number
weights = c(rep(-weight_value, number), rep(weight_value, number))
## Subset for indices
sharpe_data_one = stocks[c(short_indices, long_indices), ] %>%
mutate(weight = weights)
} else if(type == "long only") {
## Highest to lowest
stocks = arrange(stocks, desc(prediction))
## 10% of the total number of stocks in portfolio
number = floor(0.10 * nrow(stocks))
## Highest 10% of predictions
index = 1:(number)
## Get weight value
weight_value = 1 / number
weights = rep(weight_value, number)
## Subset for indices
sharpe_data_one = stocks[index, ] %>%
mutate(weight = weights)
}
sharpe_data = sharpe_data_one
## Only keep information from this date
weights_data = sharpe_data %>% arrange(ticker)
tickers = weights_data$ticker
weights = weights_data$weight
## Get return matrix with one year of returns and stocks we want
returns = prices_data %>% filter(date >= as.Date("2015-12-31") & date <= as.Date("2015-12-31") + 365,
symbol %in% tickers) %>%
arrange(symbol)
returns_ = returns %>% select(-close) %>%
spread(returns, key = date, value = Percentage_Change)
## Allocate vector for returns
return_values = numeric()
for(i in 2:ncol(returns_)) {
test = data.frame(ticker = tickers,
myweights = weights,
returns = returns_[, i] %>% unlist()) %>%
mutate(adjusted_returns = ifelse(myweights >= 0, returns + 1, returns - 1)) %>%
mutate(real_return = myweights * adjusted_returns) %>%
mutate(new_weight = ifelse(myweights >= 0, real_return, -real_return))
## Return for the day
day_return = ( sum(test$real_return) - sum(abs(test$myweights)) ) / sum(abs(test$myweights))
return_values = c(return_values, day_return)
## Update the weights
weights = test$new_weight
}
num_days = length(return_values)
## Compute the Sharpe ratio
#sharpe = mean(return_values) / sd(return_values) * sqrt(num_days)
## Use return value from preds
yearly_return = preds$average_returns
daily_sd = sd(return_values)
yearly_sd = daily_sd * sqrt(num_days)
sharpe = yearly_return / yearly_sd
return(list("yearly_return" = yearly_return,
"yearly_sd" = yearly_sd,
"sharpe" = sharpe,
"data" = sharpe_data_one))
}
|
b51dafef8e226a4ac0b02a16a19d177f64b81198 | ae80b4e9dd33d68b9876a34e7af91fa22d9b8b26 | /man/createGrid.Rd | 03f357358a9fe4d42f439679e5be4d20c5a17294 | [] | no_license | cran/redlistr | 5132c46912666919308c63d4cc4cbdff63d04d21 | 5b1ce794ecf494f31b16f030f26cacc45a1b301c | refs/heads/master | 2021-06-04T05:55:09.823568 | 2019-07-11T12:42:58 | 2019-07-11T12:42:58 | 96,505,437 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,371 | rd | createGrid.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AOO_functions.R
\name{createGrid}
\alias{createGrid}
\title{Create empty Area of Occupancy (AOO) Grid.}
\usage{
createGrid(input.data, grid.size)
}
\arguments{
\item{input.data}{Object of an ecosystem or species distribution. Accepts either
raster or spatial points formats. Please use a CRS with units measured in
metres.}
\item{grid.size}{A number specifying the width of the desired grid square (in
same units as your coordinate reference system)}
}
\value{
A regular grid raster with extent \code{input.data} and grid size
\code{grid.size}. Each grid square has a unique identification number.
}
\description{
\code{createGrid} produces empty grid which can be used as the basis to help
compute AOO.
}
\references{
Bland, L.M., Keith, D.A., Miller, R.M., Murray, N.J. and
Rodriguez, J.P. (eds.) 2016. Guidelines for the application of IUCN Red
List of Ecosystems Categories and Criteria, Version 1.0. Gland,
Switzerland: IUCN. ix + 94pp. Available at the following web site:
\url{https://iucnrle.org/}
}
\seealso{
Other AOO functions: \code{\link{getAOOSilent}},
\code{\link{getAOO}}, \code{\link{makeAOOGrid}}
}
\author{
Nicholas Murray \email{murr.nick@gmail.com}, Calvin Lee
\email{calvinkflee@gmail.com}
}
\concept{AOO functions}
|
49173c6b36d1b264d0ab2cd958f71b69512afb7e | 5e094991872cd54c38fa10f8527030df89af6438 | /R/ordmasks.R | f757f86d5cca96414ce2fb194522cf270c5854bb | [] | no_license | lshtm-gis/PHSM_SI_package | 39b0c34f0f2190c39f20a5f745eee53e89352462 | 43c41f6da721e68748777bbcf9689aca4d1b8413 | refs/heads/main | 2023-03-31T05:19:38.416560 | 2021-04-09T07:14:45 | 2021-04-09T07:14:45 | 353,692,002 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,983 | r | ordmasks.R | #' Compute the Ordinal Scope Scale for Masks
#'
#' This function read the output of the \code{binaryindex} function
#' and compute the Ordinal scope scale for Indicator 1.4.
#'
#' @param x Output file from previous run - \code{binaryindex} function
#'
#' @return In addition to the input columns, a `ord` variable
#'
#' @export
#' @examples
#' ordmasks(x)
#'
ordmasks <- function(x){
#
# Assign 0 as default for all 1.4
#
x$ord <- ifelse(x$who_code=='1.4', 0,'')
#
# Assign 1 if enforcement is recommended
#
x$ord <- ifelse(x$who_code=='1.4' &
x$enforcement == 'recommended', 1,x$ord)
#
# Assign 2 if enforcement is required or monitored and
# target is not public spaces
#
x$ord <- ifelse(x$who_code=='1.4' & x$enforcement %in% c('required','monitored') &
!grepl('public spaces', tolower(x$targeted)) |
grepl('transport', tolower(x$targeted)) |
grepl('businesses', tolower(x$targeted)), 2,x$ord)
#
# Assign 3 if enforcement is required or monitored and target
# is public spaces or indoor spaces or (indoor spaces & outdoor spaces)
#
x$ord <- ifelse(x$who_code=='1.4' &
x$enforcement %in% c('required','monitored') &
!x$enforcement == 'recommended' &
grepl('public spaces', tolower(x$targeted)) |
grepl('indoor spaces', tolower(x$targeted)) |
grepl('outdoor spaces', tolower(x$targeted)) |
(grepl('indoor spaces', tolower(x$targeted)) &
grepl('outdoor spaces', tolower(x$targeted))), 3,x$ord)
#
# Phase-out or finish can not go up
#
x$ord <- ifelse(x$who_code=='1.4'&
x$measure_stage %in% c("phase-out","finish") &
x$ord > lag(x$ord),lag(x$ord),x$ord)
#
x <- replace_na(x, list(ord = 1))
#
return(x)
}
#ad<-ordmasks(ac)
|
d6c421fca14d2e4285aa220c134c73b92268cdfb | c6cae6c491f680faa421032da2b6a2d9c3086e1d | /man/impute.rfsrc.Rd | d556de95dd2941d28b0bc5987bd5c4810c4187a2 | [] | no_license | cran/randomForestSRC | 2bf244f2a3382f07832c095aa1974d459d4be25e | b4d099e262423362a8872c13c468e6dbe2f9e9da | refs/heads/master | 2023-06-10T03:39:05.442106 | 2023-05-23T22:12:03 | 2023-05-23T22:12:03 | 17,698,986 | 8 | 7 | null | null | null | null | UTF-8 | R | false | false | 9,144 | rd | impute.rfsrc.Rd | \name{impute.rfsrc}
\alias{impute.rfsrc}
\alias{impute}
\title{Impute Only Mode}
\description{
Fast imputation mode. A random forest is grown and used to impute
missing data. No ensemble estimates or error rates are calculated.
}
\usage{\method{impute}{rfsrc}(formula, data,
ntree = 100, nodesize = 1, nsplit = 10,
nimpute = 2, fast = FALSE, blocks,
mf.q, max.iter = 10, eps = 0.01,
ytry = NULL, always.use = NULL, verbose = TRUE,
...)
}
\arguments{
\item{formula}{A symbolic description of the model to be fit. Can be
left unspecified if there are no outcomes or we don't care to
distinguish between y-outcomes and x-variables in the imputation.
Ignored when using multivariate missForest imputation.}
\item{data}{Data frame containing the data to be imputed.}
\item{ntree}{Number of trees to grow.}
\item{nodesize}{Forest average terminal node size.}
\item{nsplit}{Non-negative integer value used to specify random splitting.}
\item{nimpute}{Number of iterations of the missing data algorithm.
Ignored for multivariate missForest; in which case the algorithm
iterates until a convergence criteria is achieved (users can
however enforce a maximum number of iterations with the option
\code{max.iter}).}
\item{fast}{Use fast random forests, \code{rfsrcFast}, in place of
\code{rfsrc}? Improves speed but is less accurate.}
\item{blocks}{Integer value specifying the number of blocks the data
should be broken up into (by rows). This can improve computational
efficiency when the sample size is large but imputation efficiency
decreases. By default, no action is taken if left unspecified.}
\item{mf.q}{Use this to turn on missForest (which is off by default).
Specifies fraction of variables (between 0 and 1) used as responses
in multivariate missForest imputation. When set to 1 this
corresponds to missForest, otherwise multivariate missForest is
used. Can also be an integer, in
which case this equals the number of multivariate responses.}
\item{max.iter}{Maximum number of iterations used when implementing
multivariate missForest imputation.}
\item{eps}{Tolerance value used to determine convergence of
multivariate missForest imputation.}
\item{ytry}{Number of variables used as pseudo-responses in
unsupervised forests. See details below.}
\item{always.use}{Character vector of variable names to always
be included as a response in multivariate missForest imputation.
Does not apply for other imputation methods.}
\item{verbose}{Send verbose output to terminal (only applies to
multivariate missForest imputation).}
\item{...}{Further arguments passed to or from other methods.}
}
\details{
\enumerate{
\item Grow a forest and use this to impute data. All external
calculations such as ensemble calculations, error rates, etc. are
turned off. Use this function if your only interest is imputing the
data.
\item Split statistics are calculated using non-misssing data only.
If a node splits on a variable with missing data, the variable's
missing data is imputed by randomly drawing values from non-missing
in-bag data. The purpose of this is to make it possible to assign
cases to daughter nodes based on the split.
\item If no formula is specified, unsupervised splitting is
implemented using a \code{ytry} value of sqrt(\code{p}) where
\code{p} equals the number of variables. More precisely,
\code{mtry} variables are selected at random, and for each of these
a random subset of \code{ytry} variables are selected and defined as
the multivariate pseudo-responses. A multivariate composite
splitting rule of dimension \code{ytry} is then applied to each of
the \code{mtry} multivariate regression problems and the node split
on the variable leading to the best split (Tang and Ishwaran, 2017).
\item If \code{mf.q} is specified, a multivariate version of
missForest imputation (Stekhoven and Buhlmann, 2012) is applied.
Specifically, a fraction \code{mf.q} of variables are used as
multivariate responses and split by the remaining variables using
multivariate composite splitting (Tang and Ishwaran, 2017). Missing
data for responses are imputed by prediction. The process is
repeated using a new set of variables for responses (mutually
exclusive to the previous fit), until all variables have been
imputed. This is one iteration. The entire process is repeated,
and the algorithm iterated until a convergence criteria is met
(specified using options \code{max.iter} and \code{eps}). Integer
values for \code{mf.q} are allowed and interpreted as a request that
\code{mf.q} variables be selected for the multivariate response. If
\code{mf.q=1}, the algorithm reverts to the original missForest
procedure. This is generally the most accurate of all the
imputation procedures, but also the most computationally demanding.
See examples below for strategies to increase speed.
\item Prior to imputation, the data is processed and records with
all values missing are removed, as are variables having all missing
values.
\item If there is no missing data, either before or after processing
of the data, the algorithm returns the processed data and no
imputation is performed.
\item All options are the same as \command{rfsrc} and the user should
consult the \command{rfsrc} help file for details.
}
}
\value{
Invisibly, the data frame containing the orginal data with imputed
data overlaid.
}
\author{
Hemant Ishwaran and Udaya B. Kogalur
}
\references{
Ishwaran H., Kogalur U.B., Blackstone E.H. and Lauer M.S.
(2008). Random survival forests, \emph{Ann. App.
Statist.}, 2:841-860.
Stekhoven D.J. and Buhlmann P. (2012). MissForest--non-parametric
missing value imputation for mixed-type data.
\emph{Bioinformatics}, 28(1):112-118.
Tang F. and Ishwaran H. (2017). Random forest missing data
algorithms. \emph{Statistical Analysis and Data Mining}, 10:363-377.
}
\seealso{
\command{\link{rfsrc}},
\command{\link{rfsrc.fast}}
}
\examples{
\donttest{
## ------------------------------------------------------------
## example of survival imputation
## ------------------------------------------------------------
## default everything - unsupervised splitting
data(pbc, package = "randomForestSRC")
pbc1.d <- impute(data = pbc)
## imputation using outcome splitting
f <- as.formula(Surv(days, status) ~ .)
pbc2.d <- impute(f, data = pbc, nsplit = 3)
## random splitting can be reasonably good
pbc3.d <- impute(f, data = pbc, splitrule = "random", nimpute = 5)
## ------------------------------------------------------------
## example of regression imputation
## ------------------------------------------------------------
air1.d <- impute(data = airquality, nimpute = 5)
air2.d <- impute(Ozone ~ ., data = airquality, nimpute = 5)
air3.d <- impute(Ozone ~ ., data = airquality, fast = TRUE)
## ------------------------------------------------------------
## multivariate missForest imputation
## ------------------------------------------------------------
data(pbc, package = "randomForestSRC")
## missForest algorithm - uses 1 variable at a time for the response
pbc.d <- impute(data = pbc, mf.q = 1)
## multivariate missForest - use 10 percent of variables as responses
## i.e. multivariate missForest
pbc.d <- impute(data = pbc, mf.q = .01)
## missForest but faster by using random splitting
pbc.d <- impute(data = pbc, mf.q = 1, splitrule = "random")
## missForest but faster by increasing nodesize
pbc.d <- impute(data = pbc, mf.q = 1, nodesize = 20, splitrule = "random")
## missForest but faster by using rfsrcFast
pbc.d <- impute(data = pbc, mf.q = 1, fast = TRUE)
## ------------------------------------------------------------
## another example of multivariate missForest imputation
## (suggested by John Sheffield)
## ------------------------------------------------------------
test_rows <- 1000
set.seed(1234)
a <- rpois(test_rows, 500)
b <- a + rnorm(test_rows, 50, 50)
c <- b + rnorm(test_rows, 50, 50)
d <- c + rnorm(test_rows, 50, 50)
e <- d + rnorm(test_rows, 50, 50)
f <- e + rnorm(test_rows, 50, 50)
g <- f + rnorm(test_rows, 50, 50)
h <- g + rnorm(test_rows, 50, 50)
i <- h + rnorm(test_rows, 50, 50)
fake_data <- data.frame(a, b, c, d, e, f, g, h, i)
fake_data_missing <- data.frame(lapply(fake_data, function(x) {
x[runif(test_rows) <= 0.4] <- NA
x
}))
imputed_data <- impute(
data = fake_data_missing,
mf.q = 0.2,
ntree = 100,
fast = TRUE,
verbose = TRUE
)
par(mfrow=c(3,3))
o=lapply(1:ncol(imputed_data), function(j) {
pt <- is.na(fake_data_missing[, j])
x <- fake_data[pt, j]
y <- imputed_data[pt, j]
plot(x, y, pch = 16, cex = 0.8, xlab = "raw data",
ylab = "imputed data", col = 2)
points(x, y, pch = 1, cex = 0.8, col = gray(.9))
lines(supsmu(x, y, span = .25), lty = 1, col = 4, lwd = 4)
mtext(colnames(imputed_data)[j])
NULL
})
}
}
\keyword{missing data}
|
be73371b230d3d318a163cd6a1c787e33e3acdac | f8bb211f20c7067fe79bc158dabcba314f88985f | /src/R/thermal.R | b4827c1e6aae1f3cb0de30ac43d91704f50a00d5 | [] | no_license | zhangyaqi1989/Benchmark-for-Different-Programming-Language | 05d3357721fd584a6352745b3bd08a5c43d70ebe | 42fd9e70803088b6f8dacc0b157e50d04b279c15 | refs/heads/master | 2020-03-07T18:48:32.414386 | 2018-06-06T02:19:05 | 2018-06-06T02:19:05 | 127,652,206 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,178 | r | thermal.R | ##################################
# University of Wisconsin-Madison
# Author: Yaqi Zhang
##################################
# Thermal analysis
##################################
main <- function()
{
DEPOSIT_T = 543.15
ENVELOP_T = 343.15
RHO = 1050.0
C = 2080.0
k = 0.177
h = 75
e = 0.95
S_B = 5.67e-8
DIAMETER = 0.004
LENGTH = 0.005
dt = 0.2
N = 40000
Ts = rep(DEPOSIT_T, N);
Q_convs = rep(0, N);
Q_radis = rep(0, N);
Q_pres = rep(0, N);
Q_sucs = rep(0, N);
free_area = pi * DIAMETER * LENGTH
cross_area = pi * DIAMETER * DIAMETER / 4.0
max_temp = 0.0
for (i in 1:N)
{
Q_convs[1:i] = (Ts[1:i] - ENVELOP_T) * free_area * h;
Q_radis[1:i] = (Ts[1:i] ^ 4 - ENVELOP_T ^ 4) * e * S_B * free_area
if (i > 1)
{
Q_pres[2:i] = (Ts[2:i] - Ts[1:i-1]) * k * cross_area / LENGTH
Q_sucs[1:i-1] = (Ts[1:i-1] - Ts[2:i]) * k * cross_area / LENGTH
}
m = RHO * pi * DIAMETER * DIAMETER / 4.0 * LENGTH
Ts[1:i] = Ts[1:i] - (Q_convs[1:i] + Q_radis[1:i] + Q_pres[1:i] + Q_sucs[1:i]) * dt / (m * C)
if (Ts[1] > max_temp)
max_temp = Ts[1]
}
sprintf("max temp = %f", max_temp)
}
system.time(main())
|
2919716e5cb3f07475713d34901008ba31a5ec9e | 1440f3df0bcfc149817e0b079263f497152c4de3 | /man/check_class.Rd | 02d952ca1992c41ce9f18e11be75e5f6297168fa | [] | no_license | thfuchs/testr | cdcf8c1d6e684a8ec918c0b49a2bb66be449e040 | 12845ff02f593922a71d4aed8324293bf5077c12 | refs/heads/master | 2023-02-17T20:10:24.959857 | 2021-01-13T11:43:56 | 2021-01-13T11:43:56 | 287,491,910 | 0 | 0 | null | 2021-01-13T11:43:57 | 2020-08-14T09:09:57 | R | UTF-8 | R | false | true | 1,022 | rd | check_class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_class.R
\name{check_class}
\alias{check_class}
\title{Check variable class}
\usage{
check_class(var, type, fun_name = NULL, n = NULL, allowNULL = FALSE)
}
\arguments{
\item{var}{variable to check}
\item{type}{class to check var for}
\item{fun_name}{required function name for error message, as character}
\item{n}{numeric to also check length var}
\item{allowNULL}{allow to pass check if var NULL? FALSE by default}
}
\value{
silent if check is passed, rlang error otherwise
}
\description{
Match variable class with the one specified and abort by
`\link[rlang]{abort}` otherwise.
}
\examples{
id <- "chr"
check_class(id, "character")
id <- 2
check_class(id, "numeric")
# check variable type and length
check_class(id, "numeric", n = 1)
# pass check if variable NULL
id <- NULL
check_class(id, "character", allowNULL = TRUE)
\dontrun{
# generates error
id <- 2
check_class(id, "character")
}
}
|
68f4715f73e3e46cfda3cfda8dfe5be41a1f1502 | 8b209188d063cd3c173e0fb1262b11ef16ec6cfe | /man/projecting_tm.Rd | d8d47380ea74783c55d70dd34d4cd0cdc3fb0bdb | [] | no_license | cran/tnet | 26e7a2f9e737b9cde34e29a9918315d183a2ca31 | 15bd16db1192d2ff0712aa916785fd50d6927fd0 | refs/heads/master | 2021-01-15T12:26:21.300405 | 2020-02-24T17:00:02 | 2020-02-24T17:00:02 | 17,700,542 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,394 | rd | projecting_tm.Rd | \name{projecting_tm}
\alias{projecting_tm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Projecting binary and weighted two-mode networks onto weighted one-mode networks. }
\description{ This function is the implemtation of the procedure outlined on \cr
http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/ }
\usage{projecting_tm(net, method = "sum")}
\arguments{
\item{net}{A two-mode edgelist}
\item{method}{ The method-switch control the method used to calculate the weights.\cr
binary sets all weights to 1\cr
sum sets the weights to the number of cooccurances\cr
Newman bases the weights on Newman's (2001) method of discounting for the size of collaborations. }
}
\value{Returns a one-mode network}
\references{Opsahl. T., 2009. Projecting two-mode networks onto weighted one-mode networks. Available at: http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/ }
\author{ Tore Opsahl; http://toreopsahl.com }
\note{ version 1.0.0 }
\examples{
## define two-mode network
two.mode.net <- cbind(
i=c(1,1,2,2,2,2,2,3,4,5,5,5,6),
p=c(1,2,1,2,3,4,5,2,3,4,5,6,6))
## Run the function
projecting_tm(two.mode.net, method="Newman")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ networks } |
4dda9f8bc000c99c598d0bd69c5533a16f231a59 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/multinma/man/nma.Rd | 9a2f7e9a6e4260dba48031fdd7bc3de723731a11 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 10,704 | rd | nma.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nma.R
\name{nma}
\alias{nma}
\title{Network meta-analysis models}
\usage{
nma(
network,
consistency = c("consistency", "ume"),
trt_effects = c("fixed", "random"),
regression = NULL,
class_interactions = c("common", "exchangeable", "independent"),
likelihood = NULL,
link = NULL,
...,
prior_intercept = .default(normal(scale = 100)),
prior_trt = .default(normal(scale = 10)),
prior_het = .default(half_normal(scale = 5)),
prior_het_type = c("sd", "var", "prec"),
prior_reg = .default(normal(scale = 10)),
prior_aux = .default(),
QR = FALSE,
center = TRUE,
adapt_delta = NULL,
int_thin = max(network$n_int\%/\%10, 1)
)
}
\arguments{
\item{network}{An \code{nma_data} object, as created by the functions \verb{set_*()},
\code{combine_network()}, or \code{add_integration()}}
\item{consistency}{Character string specifying the type of (in)consistency
model to fit, currently either \code{"consistency"} or \code{"ume"}}
\item{trt_effects}{Character string specifying either \code{"fixed"} or \code{"random"} effects}
\item{regression}{A one-sided model formula, specifying the prognostic and
effect-modifying terms for a regression model. Any references to treatment
should use the \code{.trt} special variable, for example specifying effect
modifier interactions as \code{variable:.trt} (see details).}
\item{class_interactions}{Character string specifying whether effect modifier
interactions are specified as \code{"common"}, \code{"exchangeable"}, or
\code{"independent"}.}
\item{likelihood}{Character string specifying a likelihood, if unspecified
will be inferred from the data}
\item{link}{Character string specifying a link function, if unspecified will
default to the canonical link}
\item{...}{Further arguments passed to
\code{\link[rstan:stanmodel-method-sampling]{sampling()}}, such as \code{iter},
\code{chains}, \code{cores}, etc.}
\item{prior_intercept}{Specification of prior distribution for the intercept}
\item{prior_trt}{Specification of prior distribution for the treatment effects}
\item{prior_het}{Specification of prior distribution for the heterogeneity
(if \code{trt_effects = "random"})}
\item{prior_het_type}{Character string specifying whether the prior
distribution \code{prior_het} is placed on the heterogeneity standard deviation
\eqn{\tau} (\code{"sd"}, the default), variance \eqn{\tau^2} (\code{"var"}), or
precision \eqn{1/\tau^2} (\code{"prec"}).}
\item{prior_reg}{Specification of prior distribution for the regression
coefficients (if \code{regression} formula specified)}
\item{prior_aux}{Specification of prior distribution for the auxiliary
parameter, if applicable (see details)}
\item{QR}{Logical scalar (default \code{FALSE}), whether to apply a QR
decomposition to the model design matrix}
\item{center}{Logical scalar (default \code{TRUE}), whether to center the
(numeric) regression terms about the overall means}
\item{adapt_delta}{See \link{adapt_delta} for details}
\item{int_thin}{A single integer value, the thinning factor for returning
cumulative estimates of integration error}
}
\value{
\code{nma()} returns a \link{stan_nma} object, \code{nma.fit()} returns a \link{stanfit}
object.
}
\description{
The \code{nma} function fits network meta-analysis and (multilevel) network
meta-regression models in Stan.
}
\details{
When specifying a model formula in the \code{regression} argument, the
usual formula syntax is available (as interpreted by \code{\link[=model.matrix]{model.matrix()}}). The
only additional requirement here is that the special variable \code{.trt} should
be used to refer to treatment. For example, effect modifier interactions
should be specified as \code{variable:.trt}. Prognostic (main) effects and
interactions can be included together compactly as \code{variable*.trt}, which
expands to \code{variable + variable:.trt} (plus \code{.trt}, which is already in the
NMA model).
For the advanced user, the additional specials \code{.study} and \code{.trtclass} are
also available, and refer to studies and (if specified) treatment classes
respectively.
See \code{\link[multinma:priors]{?priors}} for details on prior
specification. Default prior distributions are available, but may not be
appropriate for the particular setting and will raise a warning if used. No
attempt is made to tailor these defaults to the data provided. Please
consider appropriate prior distributions for the particular setting,
accounting for the scales of outcomes and covariates, etc. The function
\code{\link[=plot_prior_posterior]{plot_prior_posterior()}} may be useful in examining the influence of the
chosen prior distributions on the posterior distributions, and the
\code{\link[multinma:summary.nma_prior]{summary()}} method for \code{nma_prior}
objects prints prior intervals.
}
\section{Auxiliary parameters}{
Auxiliary parameters are only present in the following models.
\subsection{Normal likelihood with IPD}{
When a Normal likelihood is fitted to IPD, the auxiliary parameters are the
arm-level standard deviations \eqn{\sigma_{jk}} on treatment \eqn{k} in
study \eqn{j}.
}
\subsection{Ordered multinomial likelihood}{
When fitting a model to \eqn{M} ordered outcomes, the auxiliary parameters
are the latent cutoffs between each category, \eqn{c_0 < c_1 < \dots <
c_M}. Only \eqn{c_2} to \eqn{c_{M-1}} are estimated; we fix \eqn{c_0 =
-\infty}, \eqn{c_1 = 0}, and \eqn{c_M = \infty}. When specifying priors for
these latent cutoffs, we choose to specify priors on the \emph{differences}
\eqn{c_{m+1} - c_m}. Stan automatically truncates any priors so that the
ordering constraints are satisfied.
}
}
\examples{
## Smoking cessation NMA
# Set up network of smoking cessation data
head(smoking)
smk_net <- set_agd_arm(smoking,
study = studyn,
trt = trtc,
r = r,
n = n,
trt_ref = "No intervention")
# Print details
smk_net
\donttest{
# Fitting a fixed effect model
smk_fit_FE <- nma(smk_net, \dontshow{refresh = if (interactive()) 200 else 0,}
trt_effects = "fixed",
prior_intercept = normal(scale = 100),
prior_trt = normal(scale = 100))
smk_fit_FE
}
\donttest{
# Fitting a random effects model
smk_fit_RE <- nma(smk_net, \dontshow{refresh = if (interactive()) 200 else 0,}
trt_effects = "random",
prior_intercept = normal(scale = 100),
prior_trt = normal(scale = 100),
prior_het = normal(scale = 5))
smk_fit_RE
}
\donttest{
# Fitting an unrelated mean effects (inconsistency) model
smk_fit_RE_UME <- nma(smk_net, \dontshow{refresh = if (interactive()) 200 else 0,}
consistency = "ume",
trt_effects = "random",
prior_intercept = normal(scale = 100),
prior_trt = normal(scale = 100),
prior_het = normal(scale = 5))
smk_fit_RE_UME
}
## Plaque psoriasis ML-NMR
# Set up plaque psoriasis network combining IPD and AgD
library(dplyr)
pso_ipd <- filter(plaque_psoriasis_ipd,
studyc \%in\% c("UNCOVER-1", "UNCOVER-2", "UNCOVER-3"))
pso_agd <- filter(plaque_psoriasis_agd,
studyc == "FIXTURE")
head(pso_ipd)
head(pso_agd)
pso_ipd <- pso_ipd \%>\%
mutate(# Variable transformations
bsa = bsa / 100,
prevsys = as.numeric(prevsys),
psa = as.numeric(psa),
weight = weight / 10,
durnpso = durnpso / 10,
# Treatment classes
trtclass = case_when(trtn == 1 ~ "Placebo",
trtn \%in\% c(2, 3, 5, 6) ~ "IL blocker",
trtn == 4 ~ "TNFa blocker"),
# Check complete cases for covariates of interest
complete = complete.cases(durnpso, prevsys, bsa, weight, psa)
)
pso_agd <- pso_agd \%>\%
mutate(
# Variable transformations
bsa_mean = bsa_mean / 100,
bsa_sd = bsa_sd / 100,
prevsys = prevsys / 100,
psa = psa / 100,
weight_mean = weight_mean / 10,
weight_sd = weight_sd / 10,
durnpso_mean = durnpso_mean / 10,
durnpso_sd = durnpso_sd / 10,
# Treatment classes
trtclass = case_when(trtn == 1 ~ "Placebo",
trtn \%in\% c(2, 3, 5, 6) ~ "IL blocker",
trtn == 4 ~ "TNFa blocker")
)
# Exclude small number of individuals with missing covariates
pso_ipd <- filter(pso_ipd, complete)
pso_net <- combine_network(
set_ipd(pso_ipd,
study = studyc,
trt = trtc,
r = pasi75,
trt_class = trtclass),
set_agd_arm(pso_agd,
study = studyc,
trt = trtc,
r = pasi75_r,
n = pasi75_n,
trt_class = trtclass)
)
# Print network details
pso_net
# Add integration points to the network
pso_net <- add_integration(pso_net,
durnpso = distr(qgamma, mean = durnpso_mean, sd = durnpso_sd),
prevsys = distr(qbern, prob = prevsys),
bsa = distr(qlogitnorm, mean = bsa_mean, sd = bsa_sd),
weight = distr(qgamma, mean = weight_mean, sd = weight_sd),
psa = distr(qbern, prob = psa),
n_int = 1000)
\donttest{
# Fitting a ML-NMR model.
# Specify a regression model to include effect modifier interactions for five
# covariates, along with main (prognostic) effects. We use a probit link and
# specify that the two-parameter Binomial approximation for the aggregate-level
# likelihood should be used. We set treatment-covariate interactions to be equal
# within each class. We narrow the possible range for random initial values with
# init_r = 0.1, since probit models in particular are often hard to initialise.
# Using the QR decomposition greatly improves sampling efficiency here, as is
# often the case for regression models.
pso_fit <- nma(pso_net, \dontshow{refresh = if (interactive()) 200 else 0,}
trt_effects = "fixed",
link = "probit",
likelihood = "bernoulli2",
regression = ~(durnpso + prevsys + bsa + weight + psa)*.trt,
class_interactions = "common",
prior_intercept = normal(scale = 10),
prior_trt = normal(scale = 10),
prior_reg = normal(scale = 10),
init_r = 0.1,
QR = TRUE)
pso_fit
}
}
|
1dcaed973e6ebb178a6538bc44e521c729217104 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612803010-test.R | 51c4a627e6df6063df42c558bcedc5a7210b6ddf | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 583 | r | 1612803010-test.R | testlist <- list(bytes1 = c(1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1879048192L, 0L, -256L, -54529L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L ), pmutation = 4.72145650504351e-140)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
b24ed137f5fb597e2d672e90c141c9f484b3eefc | e04a933dbabf3c483e754bbd42fe021960675537 | /R/plots.R | 4a3131a81bb4701b1444df16d2cdbbc4fd2c81fb | [
"CC0-1.0"
] | permissive | cboettig/wrightscape | f3b18135b14d86e50efcc20c3ee4704648e3cda7 | 93947673f4342266acf5af667141bd466de13b3a | refs/heads/master | 2020-06-05T02:41:56.112379 | 2017-10-17T03:44:07 | 2017-10-17T03:44:07 | 672,381 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,443 | r | plots.R | # File: plots.R
# Author: Carl Boettiger <cboettig@gmail.com>
# Date: 2011-11-16
# License: BSD
#' creates a color-coded plot of how well each tip fits it's predicted val
#' @param modelfit a fit from multiTypeOU (class multiOU)
#' @param n number of replicates to use in simulation
#' @param ... additional arguments to plot.phylo
#' @details
#' this function is a proof of principle that could be made more generic
#' Note that this function also illustrates tree conversion with regime data.
#'
#' @export
tip_plot <- function(modelfit, n=100, ...){
dat <- modelfit$data
tree <- modelfit$tree
reps <- sapply(1:n, function(i) t(simulate(modelfit)))
m <- rowMeans(reps)
sder <- sapply(1:(dim(reps)[1]), function(i) sd(reps[i,]))
colorcode <- dat[[1]]
colorcode[ abs(dat - m) < 3*sder ] <- "orange"
colorcode[ abs(dat - m) < 2*sder ] <- "green"
colorcode[ abs(dat - m) < sder ] <- "blue"
colorcode[ abs(dat - m) > 3*sder ] <- "red"
tr <- convert(tree, colorcode)
col <- tr$regimes[!is.na(tr$regimes)]
plot(tr, ...)
tiplabels(col=col, pch=19)
list(tree=tr, tipcolors=col)
}
# These are data-format particular and could use updating.
# Also consider ggplot-based utilities
# Meanwhile, these functions are not exported and plotting parameters is up to the user
subplot <- function(parname, par_dist, xlim=NULL, xlab=NULL, ...){
colors <- c( rgb(0,1,0.5), rgb(0,0,1,.5), rgb(1,0,0,.5), rgb(1,1,0.5) )
posterior <- vector("list", dim(par_dist)[2])
id <- grep(parname, colnames(par_dist))
for(i in id){
posterior[[i]] <- density(par_dist[,i])
}
if(is.null(xlim[parname]))
xlim <- c(min(sapply(posterior[id], function(P) P$x)),
max(sapply(posterior[id], function(P) P$x)))
else
xlim <- xlim[[parname]]
if(is.null(xlab))
xlab<-parse(text=parname)
else
xlab<-xlab[[parname]]
plot(posterior[[id[1]]], xlab=xlab, xlim=xlim, ...)
cindex <- 1
for(i in id){
polygon(posterior[[i]], col=colors[cindex])
cindex <- cindex + 1
}
}
plot_par <- function(par_dist, ...){
# Example:
# plot_par(boots$null_par_dist, xlim=list(alpha=c(0,10)), cex=2)
par_dist <- t(par_dist[!duplicated(par_dist),])
par(mfrow=c(1,3))
subplot("alpha", par_dist, main="Selection Strength", ...)
subplot("sigma", par_dist, main="Trait Diversification", ...)
subplot("theta", par_dist, main="Optimum", ...)
}
|
eef4933520dc0dc09892a3dfeca113ffcdec82f4 | 1ed3625d3377647bdaa64b4b12d1a8ac19529067 | /man/plot_segm.Rd | 72687cd12a638805cc9ec91dd67a550f8617483e | [] | no_license | cynsky/segtools | 12f24111685ccd8b5999d45fe9df5c24e92ec92e | 7680204e269ed5c3fc6538d44d7846f59d9583f4 | refs/heads/master | 2021-08-17T18:59:20.532469 | 2017-11-21T15:21:02 | 2017-11-21T15:21:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 856 | rd | plot_segm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_series_plot.R
\name{plot_segm}
\alias{plot_segm}
\title{Plot segmentation on time-serie}
\usage{
plot_segm(data, output, interactive = F, diag.var, x_col = "expectTime",
html = F, order = F, stationarity = F)
}
\arguments{
\item{data}{the data.frame with the different variable}
\item{output}{outputs of the segmentation or segclust algorithm for one number of segment}
\item{interactive}{should graph be interactive through leaflet ?}
\item{diag.var}{names of the variables on which statistics are calculated}
\item{x_col}{column name for time}
\item{html}{should the graph be incorporated in a markdown file through htmltools::tagList()}
\item{order}{should cluster be ordered}
}
\value{
a graph
}
\description{
\code{plot_segm} plot segmented time serie.
}
|
f849b907336332a53028dd76c7bab7629abea864 | bc30a809c131ef5b554be31ca224cb9e8ee59a2b | /cv_by_animal_run.r | 33933a82f172fb89bd7b69f449a8954dd714073f | [] | no_license | aptperson/behaviour_accelerometry | b3b396c04d1d171441e4ffb69bfdb36ba545c2be | 32ba173420b8663f999a5ab7e0a439114f0db067 | refs/heads/master | 2021-01-11T06:05:25.867365 | 2016-09-25T05:54:23 | 2016-09-25T05:54:23 | 69,069,007 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,297 | r | cv_by_animal_run.r | ##### run script for testing the cv-by aninmal
rm(list = ls())
setwd("~/Documents/repos/behaviour_accelerometry/")
library(caret)
library(randomForest)
library(glmnet)
library(e1071)
require(devtools)
library(xgboost)
source("parallelCVxgb.r")
source("ml_run.r")
source("seals_data_prep_for_ml.r")
##### load the data
# load("processed_data/featData4_13.RData")
load("processed_data/outputData_13.RData")
# head(featData7_13)
# unique(featData7_13$SealName)
# [1] abbey bella groucho malie mav maxine miri nelson rocky ronnie sly teiko
# SL: abbey, miri, maxine, rocky, malie, teiko
# FS: bella, groucho, mav, ronnie, sly, nelson
print(table(outputData$EventIds, outputData$SealName))
print(table(outputData$EventIds, outputData$Place))
##### remove land Foraging
# names(outputData)
#
outputData$Place <- as.numeric(as.factor(outputData$Place))
# table(outputData$Place)
#
# feat_data_less_feat <- outputData[, !grepl(pattern = "[0-9]", x = names(outputData))]
# feat_data_less_feat <- feat_data_less_feat[, !grepl(pattern = "LQ", x = names(feat_data_less_feat))]
# feat_data_less_feat <- feat_data_less_feat[, !grepl(pattern = "UQ", x = names(feat_data_less_feat))]
# names(feat_data_less_feat)
##### defin class maxes for down sampling
# these class maxs should give roughly ballanced overall class sizes
calss_max_table_train = data.frame(behaviours = c("Foraging", "Grooming", "Resting", "Travelling"),
classMax = c(550, Inf, 550, 450))
calss_max_table_test = data.frame(behaviours = c("Foraging", "Grooming", "Resting", "Travelling"),
classMax = c(300, Inf, 300, 300))
##### prep the data for ml
# save this data for uploading as minimal data
processed_feat_data <- seals_data_prep_for_ml(featureData = outputData,
test_animal_names = c("maxine", "groucho"),
codeTest = FALSE, # when this is TRUE the class max is forced to be 20
classMaxTrain = calss_max_table_train,
classMaxTest = calss_max_table_test)
print(table(processed_feat_data$trainDataSplit$EventIds))
print(table(processed_feat_data$testDataSplit$EventIds))
save(processed_feat_data, file = "processed_data/processed_feat_data.RData")
load(file = "processed_data/processed_feat_data.RData")
##### look at the classes by fold
trainEventIds <- as.factor(processed_feat_data$trainDataSplit[, "EventIds"])
testEventIds <- as.factor(processed_feat_data$testDataSplit[, "EventIds"])
trainFoldTable <- lapply(1:10, function(i){
table(trainEventIds[-processed_feat_data$folds_list[[i]]])
})
testFoldTable <- lapply(1:10, function(i){
table(testEventIds[-processed_feat_data$folds_list[[i]]])
})
print(do.call(rbind, trainFoldTable))
print(do.call(rbind, testFoldTable))
##### run the ml model
xgb_test_run <- ml_run(trainData = processed_feat_data$trainDataSplit,
# codeTest = F,
testData = processed_feat_data$testDataSplit,
folds_list = processed_feat_data$folds_list,
Model = "XGB")
save(xgb_test_run, file = "model_outputs/xgb_test_run.RData")
|
6c3a90e5dcc92a93b46478aea0e0a208f8613a6e | 8e85e0e71f18c2da27c51c5737c20b89349bbe54 | /assignment 5/4b.R | dfd7e12ae2fd37916fe0267d01afe83ef2639b74 | [] | no_license | trungnnguyen/time-series-analysis | c3d5b144b86701782ce3bf6242faf51930b78ca1 | 9c752d59b3838c1911a9a1f6b16212df0299aafb | refs/heads/master | 2021-01-21T08:15:37.691621 | 2016-12-19T04:23:23 | 2016-12-19T04:23:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 448 | r | 4b.R | load('sarima_data.Rdata')
#plot(yk)
#acf(yk,xlab="Lag",ylab='ACF')
#pacf(yk)
vk=diff(yk)
acf(vk)
pacf(vk)
mod_sarima = arima(vk,order=c(3,0,5),seasonal=list(order=c(0,0,1),period=12),include.mean=F)
print(mod_sarima)
tsdiag(mod_sarima,gof.lag=20)
#acf(a1)
#pacf(a1)
#plot(a1)
#tre=yk-a1# difference is 1
#plot(tre)
#vk=arma(a1,order=c(2,5))
#acf(vk$residuals[10:999], type="covariance") # residuals is white
#x=auto.arima(yk)
|
230f2f0edcaa04b46ea998784f1cad352b178cd3 | cef3b5e2588a7377281a8f627a552350059ca68b | /paws/man/iotwireless_get_wireless_gateway.Rd | f5adecbf0b7a6b6756adc4295328e0360bf531da | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sanchezvivi/paws | b1dc786a9229e0105f0f128d5516c46673cb1cb5 | 2f5d3f15bf991dcaa6a4870ed314eb7c4b096d05 | refs/heads/main | 2023-02-16T11:18:31.772786 | 2021-01-17T23:50:41 | 2021-01-17T23:50:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 721 | rd | iotwireless_get_wireless_gateway.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iotwireless_operations.R
\name{iotwireless_get_wireless_gateway}
\alias{iotwireless_get_wireless_gateway}
\title{Gets information about a wireless gateway}
\usage{
iotwireless_get_wireless_gateway(Identifier, IdentifierType)
}
\arguments{
\item{Identifier}{[required] The identifier of the wireless gateway to get.}
\item{IdentifierType}{[required] The type of identifier used in \code{identifier}.}
}
\description{
Gets information about a wireless gateway.
}
\section{Request syntax}{
\preformatted{svc$get_wireless_gateway(
Identifier = "string",
IdentifierType = "GatewayEui"|"WirelessGatewayId"|"ThingName"
)
}
}
\keyword{internal}
|
eb885d5980d0d6b5b18f087579a7868820b88a0d | 685051f8112068e4f847519ad5d602f516208bc0 | /other_files/data_exploration_albina.R | d5deb14b605f3b264d3a1e1e758a080e8c51f438 | [] | no_license | cako-green-zhang/stock_prices | 043fda5c1d29759f8b842a7e048870fb993e63e2 | 257a484b82fe1544032a95d9882d4f65bbf87ca1 | refs/heads/main | 2023-01-06T16:47:38.883013 | 2020-11-13T02:29:27 | 2020-11-13T02:29:27 | 308,722,760 | 0 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,076 | r | data_exploration_albina.R | #load dataset
data <- read.csv("exploration_data.csv")
#data summary
summary(data)
#multiple histogram plot for missing data
#load libraries
library(ggplot2)
library(reshape2)
#make plot
ggplot(melt(data),aes(x=value)) + geom_histogram() + facet_wrap(~variable)
#load dplyr
library(dplyr)
#load imputed dataset
data_imp <- read.csv("full_set.csv")
#data summary
summary(data_imp)
#remove data that we do not need for the model
data_clean <- select(data_imp, -c(X.1, X))
#load libraries to run decision tree
library(caret)
library(rpart)
library(xgboost)
library(tidyverse)
#new data summary
summary(data_clean)
#set seed
set.seed(100)
#run decision tree
dec_tree <- train(Market.Cap ~.,data=data_clean,trControl = trainControl("cv",number=10, savePredictions = 'all'),
method='rpart')
#look at decision tree
summary(dec_tree)
print(dec_tree)
plot(dec_tree)
attributes(dec_tree)
#model accuracy
min(dec_tree$results$RMSE)
max(dec_tree$results$Rsquared)
max(dec_tree$resuls$MAE)
#save RDS
saveRDS(dec_tree, 'decisiontree_model_updated_albina.rds')
|
498f02876a2a7158d570cae9c6608fc7abd599f9 | 0f54bd5a4fd9c51f30896cd1c070ba1fe5709095 | /man/prediction.Rd | 52d591b880fbe9c3232b5953258868bb71e401ec | [
"MIT"
] | permissive | AlineTalhouk/splendid | 643905b988fa48fe5ed1e889bd745654c9f7a31f | c1062d167227afb3b9f8f70d924e81b8bd7843dd | refs/heads/master | 2021-12-10T13:40:24.727988 | 2021-12-03T02:22:38 | 2021-12-03T02:22:38 | 86,625,708 | 1 | 0 | NOASSERTION | 2021-02-10T22:34:00 | 2017-03-29T20:19:30 | R | UTF-8 | R | false | true | 2,943 | rd | prediction.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prediction.R
\name{prediction}
\alias{prediction}
\alias{prediction.default}
\alias{prediction.pamrtrained}
\alias{prediction.knn}
\title{Class prediction on OOB set}
\usage{
prediction(
mod,
data,
class = NULL,
test.id = NULL,
train.id = NULL,
threshold = 0,
standardize = FALSE,
...
)
\method{prediction}{default}(
mod,
data,
class = NULL,
test.id = NULL,
train.id = NULL,
threshold = 0,
standardize = FALSE,
...
)
\method{prediction}{pamrtrained}(
mod,
data,
class = NULL,
test.id = NULL,
train.id = NULL,
threshold = 0,
standardize = FALSE,
...
)
\method{prediction}{knn}(
mod,
data,
class = NULL,
test.id = NULL,
train.id = NULL,
threshold = 0,
standardize = FALSE,
...
)
}
\arguments{
\item{mod}{model object from \code{\link[=classification]{classification()}}}
\item{data}{data frame with rows as samples, columns as features}
\item{class}{true/reference class vector used for supervised learning}
\item{test.id}{integer vector of indices for test set. If \code{NULL} (default),
all samples are used.}
\item{train.id}{integer vector of indices for training set. If \code{NULL}
(default), all samples are used.}
\item{threshold}{a number between 0 and 1 indicating the lowest maximum class
probability below which a sample will be unclassified.}
\item{standardize}{logical; if \code{TRUE}, the training sets are standardized on
features to have mean zero and unit variance. The test sets are
standardized using the vectors of centers and standard deviations used in
corresponding training sets.}
\item{...}{additional arguments to be passed to or from methods}
}
\value{
A factor of predicted classes with labels in the same order as true
class. If \code{mod} is a \code{"pamr"} classifier, the return value is a list of
length 2: the predicted class, and the threshold value.
}
\description{
Functions to predict class labels on the Out-Of-Bag (test) set for different
classifiers.
}
\details{
The \code{knn} and \code{pamr} prediction methods use the \code{train.id} and \code{class}
arguments for additional modelling steps before prediction. For \code{knn}, the
modelling and prediction are performed in one step, so the function takes in
both training and test set identifiers. For \code{pamr}, the classifier needs to
be cross-validated on the training set in order to find a shrinkage threshold
with the minimum CV error to use in prediction on the test set. The other
prediction methods make use of the default method.
}
\examples{
data(hgsc)
class <- attr(hgsc, "class.true")
set.seed(1)
training.id <- sample(seq_along(class), replace = TRUE)
test.id <- which(!seq_along(class) \%in\% training.id)
mod <- classification(hgsc[training.id, ], class[training.id], "slda")
pred <- prediction(mod, hgsc, class, test.id)
table(true = class[test.id], pred)
}
\author{
Derek Chiu
}
|
b3b659996c89444246af0e5f0e0532cd8727e637 | 29e74b9b3a5a3228d5de27090f5b0e6728d77ee7 | /70207_Lab7_ExerciseAnswer.R | 9ea9f60ad36e706627d6cd93fa29c130ce158887 | [] | no_license | typark99/TeachingLab_ProbabilityStatistics | c6e9f64122d809d51ace55b3618330ee8e9cef2d | 9bd8d2397c73bfa73916a1304aaebdc310eb1334 | refs/heads/master | 2022-11-22T11:26:39.672726 | 2020-07-26T15:57:23 | 2020-07-26T15:57:23 | 282,683,309 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,832 | r | 70207_Lab7_ExerciseAnswer.R | #####################
#
# Population Means
#
#####################
############
# Exercise 1
############
# The Consumer Reports Restaurant Customer Satisfaction Survey
# studies full-service restraurant chanis.
# One of the variables in the study is meal price, the average amount paid per
# person for dinner and drinks, minus the tip.
# The GrandStrand.csv data show the meal prices obtained from 24 restaurants
# in the Grand Strand section in a city of the US.
# Use .05 significance level to test if there is a significant
# difference among the mean meal price for the three types of restraurants.
# Answer the following questions:
# 1. What is the between-groups estimate of population variance?
# 2. What is the within-groups estimate of population variance?
# 3. What is the F statistic?
# 4. What is the p value?
# 5. What is you conclusion about the difference among the mean meal prices for the three types of restraurants?
grand = read.csv("GrandStrand.csv")
colnames(grand)
View(grand)
stacked.grand = stack(grand)
View(stacked.grand)
anova.grand = aov(values ~ ind, data=stacked.grand)
summary(anova.grand)
# between-groups estimate: 104
# within-groups estimate: 14.19
# F stat = 104/14.19 = 7.329
# p-value = 0.00385
# We found evidence supporting that the mean meal prices for the three types of restraurants are not the same.
############
# Exercise 2
############
# 1. Test for the mean difference between the price for Italian and that for Seafood
t.test(grand$Italian, grand$Seafood, conf.level = .95)
# 2. Test for the mean difference between the price for Italian and that for Steakhouse
t.test(grand$Italian, grand$Steakhouse, conf.level = .95)
# 3. Test for the mean difference between the price for Seafood and that for Steakhouse
t.test(grand$Seafood, grand$Steakhouse, conf.level = .95)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.